diff -Nru temporal-1.21.5-1/Makefile temporal-1.22.5/Makefile --- temporal-1.21.5-1/Makefile 2023-10-17 22:37:26.000000000 +0000 +++ temporal-1.22.5/Makefile 2024-02-23 09:42:40.000000000 +0000 @@ -11,7 +11,7 @@ export GOMODCACHE := $(shell [ -d $(HOME)/go ] && echo $(HOME)/go/pkg/mod || mktemp --tmpdir -d tmp.go-mod-cacheXXX) export GOFLAGS := -mod=vendor -TEMPORAL_VERSION := 1.21.5-1 +TEMPORAL_VERSION := 1.22.5 TEMPORAL_GIT_URL := https://git.launchpad.net/~maas-committers/maas/+git/temporal ARTIFACTS := temporal-server temporal-sql-tool tdbg diff -Nru temporal-1.21.5-1/debian/changelog temporal-1.22.5/debian/changelog --- temporal-1.21.5-1/debian/changelog 2024-02-23 10:04:13.000000000 +0000 +++ temporal-1.22.5/debian/changelog 2024-02-23 10:04:15.000000000 +0000 @@ -1,3 +1,9 @@ +temporal (1.22.5-0ubuntu1) jammy; urgency=medium + + * Temporal v1.22.5 + + -- Anton Troyanov Fri, 23 Feb 2024 09:43:00 +0000 + temporal (1.21.5-1-0ubuntu1) jammy; urgency=medium * Temporal v1.21.5 diff -Nru temporal-1.21.5-1/src/.git/HEAD temporal-1.22.5/src/.git/HEAD --- temporal-1.21.5-1/src/.git/HEAD 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.git/HEAD 2024-02-23 09:45:43.000000000 +0000 @@ -1 +1 @@ -1eaaaf8da577c6c50fcd242833efa3826a0cb876 +2787da350495fc2acb01b5456d5b2a12d7fbca2f diff -Nru temporal-1.21.5-1/src/.git/config temporal-1.22.5/src/.git/config --- temporal-1.21.5-1/src/.git/config 2023-09-29 14:03:03.000000000 +0000 +++ temporal-1.22.5/src/.git/config 2024-02-23 09:45:29.000000000 +0000 @@ -5,4 +5,4 @@ logallrefupdates = true [remote "origin"] url = https://git.launchpad.net/~maas-committers/maas/+git/temporal - fetch = +refs/tags/v1.21.5:refs/tags/v1.21.5 + fetch = +refs/tags/v1.22.5:refs/tags/v1.22.5 Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/.git/index and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/.git/index differ diff -Nru temporal-1.21.5-1/src/.git/logs/HEAD temporal-1.22.5/src/.git/logs/HEAD --- temporal-1.21.5-1/src/.git/logs/HEAD 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.git/logs/HEAD 2024-02-23 09:45:43.000000000 +0000 @@ -1 +1 @@ -0000000000000000000000000000000000000000 1eaaaf8da577c6c50fcd242833efa3826a0cb876 Ubuntu 1695996187 +0000 clone: from https://git.launchpad.net/~maas-committers/maas/+git/temporal +0000000000000000000000000000000000000000 2787da350495fc2acb01b5456d5b2a12d7fbca2f Ubuntu 1708681543 +0000 clone: from https://git.launchpad.net/~maas-committers/maas/+git/temporal Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/.git/objects/pack/pack-77fcf9e24450d44e927a5ecb1385e850fef5a4c4.idx and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/.git/objects/pack/pack-77fcf9e24450d44e927a5ecb1385e850fef5a4c4.idx differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/.git/objects/pack/pack-77fcf9e24450d44e927a5ecb1385e850fef5a4c4.pack and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/.git/objects/pack/pack-77fcf9e24450d44e927a5ecb1385e850fef5a4c4.pack differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/.git/objects/pack/pack-cb35619a78c14f44db1b12a962563241c8edaaa7.idx and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/.git/objects/pack/pack-cb35619a78c14f44db1b12a962563241c8edaaa7.idx differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/.git/objects/pack/pack-cb35619a78c14f44db1b12a962563241c8edaaa7.pack and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/.git/objects/pack/pack-cb35619a78c14f44db1b12a962563241c8edaaa7.pack differ diff -Nru temporal-1.21.5-1/src/.git/packed-refs temporal-1.22.5/src/.git/packed-refs --- temporal-1.21.5-1/src/.git/packed-refs 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.git/packed-refs 2024-02-23 09:45:43.000000000 +0000 @@ -1,2 +1,2 @@ # pack-refs with: peeled fully-peeled sorted -1eaaaf8da577c6c50fcd242833efa3826a0cb876 refs/tags/v1.21.5 +2787da350495fc2acb01b5456d5b2a12d7fbca2f refs/tags/v1.22.5 diff -Nru temporal-1.21.5-1/src/.git/refs/tags/v1.21.5-rc3 temporal-1.22.5/src/.git/refs/tags/v1.21.5-rc3 --- temporal-1.21.5-1/src/.git/refs/tags/v1.21.5-rc3 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.git/refs/tags/v1.21.5-rc3 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -1eaaaf8da577c6c50fcd242833efa3826a0cb876 diff -Nru temporal-1.21.5-1/src/.git/shallow temporal-1.22.5/src/.git/shallow --- temporal-1.21.5-1/src/.git/shallow 2023-09-29 14:03:03.000000000 +0000 +++ temporal-1.22.5/src/.git/shallow 2024-02-23 09:45:29.000000000 +0000 @@ -1 +1 @@ -1eaaaf8da577c6c50fcd242833efa3826a0cb876 +2787da350495fc2acb01b5456d5b2a12d7fbca2f diff -Nru temporal-1.21.5-1/src/.github/workflows/features-integration.yml temporal-1.22.5/src/.github/workflows/features-integration.yml --- temporal-1.21.5-1/src/.github/workflows/features-integration.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.github/workflows/features-integration.yml 2024-02-23 09:45:43.000000000 +0000 @@ -4,7 +4,7 @@ pull_request: push: branches: - - master + - main concurrency: # Auto-cancel existing runs in the PR when a new commit is pushed group: ${{ github.head_ref || github.run_id }} diff -Nru temporal-1.21.5-1/src/.github/workflows/golangci-lint.yml temporal-1.22.5/src/.github/workflows/golangci-lint.yml --- temporal-1.21.5-1/src/.github/workflows/golangci-lint.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.github/workflows/golangci-lint.yml 2024-02-23 09:45:43.000000000 +0000 @@ -8,17 +8,21 @@ name: lint runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 - with: - go-version: 1.19 - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 + + - uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + check-latest: true + - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.51.2 + version: v1.53.3 args: --verbose --timeout 10m --fix=false --new-from-rev=HEAD~ --config=.golangci.yml + - name: check-is-dirty run: | if [[ -n $(git status --porcelain) ]]; then diff -Nru temporal-1.21.5-1/src/.github/workflows/goreleaser.yml temporal-1.22.5/src/.github/workflows/goreleaser.yml --- temporal-1.21.5-1/src/.github/workflows/goreleaser.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.github/workflows/goreleaser.yml 2024-02-23 09:45:43.000000000 +0000 @@ -1,24 +1,32 @@ name: goreleaser + on: release: types: - released + +permissions: + contents: write + jobs: goreleaser: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version-file: 'go.mod' + check-latest: true + - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v5 with: version: latest - args: release + args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff -Nru temporal-1.21.5-1/src/.github/workflows/semgrep.yml temporal-1.22.5/src/.github/workflows/semgrep.yml --- temporal-1.21.5-1/src/.github/workflows/semgrep.yml 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/.github/workflows/semgrep.yml 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,23 @@ +on: + workflow_dispatch: {} + pull_request: {} + push: + branches: + - main + paths: + - .github/workflows/semgrep.yml + schedule: + # random HH:MM to avoid a load spike on GitHub Actions at 00:00 + - cron: 28 19 * * * +name: Semgrep +jobs: + semgrep: + name: Scan + runs-on: ubuntu-20.04 + env: + SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} + container: + image: returntocorp/semgrep + steps: + - uses: actions/checkout@v3 + - run: semgrep ci diff -Nru temporal-1.21.5-1/src/.github/workflows/trigger-publish.yml temporal-1.22.5/src/.github/workflows/trigger-publish.yml --- temporal-1.21.5-1/src/.github/workflows/trigger-publish.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.github/workflows/trigger-publish.yml 2024-02-23 09:45:43.000000000 +0000 @@ -3,7 +3,7 @@ on: push: branches: - - master + - main - release/* jobs: @@ -20,7 +20,7 @@ id: get_branch run: | echo "::set-output name=branch::${GITHUB_REF#refs/heads/}" - + - name: Generate a token id: generate_token uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 diff -Nru temporal-1.21.5-1/src/.golangci.yml temporal-1.22.5/src/.golangci.yml --- temporal-1.21.5-1/src/.golangci.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/.golangci.yml 2024-02-23 09:45:43.000000000 +0000 @@ -9,6 +9,7 @@ - revive # revive supersedes golint, which is now archived - staticcheck - vet + - forbidigo run: skip-dirs: - ^api @@ -17,6 +18,10 @@ linters-settings: govet: fieldalignment: 0 + forbidigo: + forbid: + - p: ^time\.After$ + msg: "time.After may leak resources. Use time.NewTimer instead." revive: severity: error confidence: 0.8 @@ -25,12 +30,16 @@ # Disabled rules - name: add-constant disabled: true + - name: argument-limit + disabled: true - name: bare-return disabled: true - name: banned-characters disabled: true - name: bool-literal-in-expr disabled: true + - name: confusing-naming + disabled: true - name: empty-lines disabled: true - name: error-naming @@ -71,9 +80,6 @@ disabled: true # Rule tuning - - name: argument-limit - arguments: - - 10 - name: cognitive-complexity arguments: - 25 @@ -82,10 +88,12 @@ - 15 - name: function-result-limit arguments: - - 3 + - 4 - name: unhandled-error arguments: - - "fmt.Printf" + - "fmt.*" + - "bytes.Buffer.*" + - "strings.Builder.*" issues: # Exclude cyclomatic and cognitive complexity rules for functional tests in the `tests` root directory. exclude-rules: @@ -93,3 +101,6 @@ text: "(cyclomatic|cognitive)" linters: - revive + - path: _test\.go + linters: + - goerr113 diff -Nru temporal-1.21.5-1/src/Makefile temporal-1.22.5/src/Makefile --- temporal-1.21.5-1/src/Makefile 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/Makefile 2024-02-23 09:45:43.000000000 +0000 @@ -70,9 +70,7 @@ ALL_SRC += go.mod ALL_SCRIPTS := $(shell find . -name "*.sh") -MAIN_BRANCH = master -MERGE_BASE ?= $(shell git merge-base $(MAIN_BRANCH) HEAD) -MODIFIED_FILES := $(shell git diff --name-status $(MERGE_BASE) -- | cut -f2) +MAIN_BRANCH := main TEST_DIRS := $(sort $(dir $(filter %_test.go,$(ALL_SRC)))) FUNCTIONAL_TEST_ROOT := ./tests @@ -117,7 +115,7 @@ update-linters: @printf $(COLOR) "Install/update linters..." - @go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 + @go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.3 update-mockgen: @printf $(COLOR) "Install/update mockgen tool..." @@ -234,6 +232,8 @@ @printf $(COLOR) "Fix license header..." @go run ./cmd/tools/copyright/licensegen.go +goimports: MERGE_BASE ?= $(shell git merge-base $(MAIN_BRANCH) HEAD) +goimports: MODIFIED_FILES := $(shell git diff --name-status $(MERGE_BASE) -- | cut -f2) goimports: @printf $(COLOR) "Run goimports for modified files..." @printf "Merge base: $(MERGE_BASE)\n" diff -Nru temporal-1.21.5-1/src/README.md temporal-1.22.5/src/README.md --- temporal-1.21.5-1/src/README.md 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/README.md 2024-02-23 09:45:43.000000000 +0000 @@ -1,5 +1,5 @@ -[![Build status](https://badge.buildkite.com/98c157ed502d55722ef7f28e6470aa20702c76d6989a0a5a89.svg?branch=master)](https://buildkite.com/temporal/temporal-public) -[![Coverage Status](https://coveralls.io/repos/github/temporalio/temporal/badge.svg?branch=master)](https://coveralls.io/github/temporalio/temporal?branch=master) +[![Build status](https://badge.buildkite.com/98c157ed502d55722ef7f28e6470aa20702c76d6989a0a5a89.svg?branch=main)](https://buildkite.com/temporal/temporal-public) +[![Coverage Status](https://coveralls.io/repos/github/temporalio/temporal/badge.svg?branch=main)](https://coveralls.io/github/temporalio/temporal?branch=main) [![Discourse](https://img.shields.io/static/v1?label=Discourse&message=Get%20Help&color=informational)](https://community.temporal.io) [![Go Report Card][go-report-image]][go-report-url] @@ -25,14 +25,11 @@ Execute the following commands to start a pre-built image along with all the dependencies. ```bash -git clone https://github.com/temporalio/docker-compose.git -cd docker-compose -docker-compose up +brew install temporal +temporal server start-dev ``` -Refer to Temporal [docker-compose](https://github.com/temporalio/docker-compose) repo for more advanced options. - -For more details on Docker images refer to [docker-builds](https://github.com/temporalio/docker-builds) repo. +Refer to [Temporal CLI](https://docs.temporal.io/cli/#installation) documentation for more installation options. ### Run the Samples @@ -41,21 +38,20 @@ ### Use CLI -Use [Temporal's command line tool](https://docs.temporal.io/tctl-v1) `tctl` to interact with the local Temporal server. +Use [Temporal CLI](https://docs.temporal.io/cli/) to interact with the running Temporal server. ```bash -alias tctl="docker exec temporal-admin-tools tctl" -tctl namespace list -tctl workflow list +temporal operator namespace list +temporal workflow list ``` ### Use Temporal Web UI -Try [Temporal Web UI](https://github.com/temporalio/ui) by opening [http://localhost:8080](http://localhost:8080) for viewing your sample workflows executing on Temporal. +Try [Temporal Web UI](https://docs.temporal.io/web-ui) by opening [http://localhost:8233](http://localhost:8233) for viewing your sample workflows executing on Temporal. ## Repository -This repository contains the source code of the Temporal server. To implement Workflows, Activities and Workers, use [Go SDK](https://github.com/temporalio/sdk-go) or [Java SDK](https://github.com/temporalio/sdk-java). +This repository contains the source code of the Temporal server. To implement Workflows, Activities and Workers, use one of the [supported languages](https://docs.temporal.io/dev-guide/). ## Contributing @@ -68,4 +64,4 @@ ## License -[MIT License](https://github.com/temporalio/temporal/blob/master/LICENSE) +[MIT License](https://github.com/temporalio/temporal/blob/main/LICENSE) diff -Nru temporal-1.21.5-1/src/api/adminservice/v1/request_response.pb.go temporal-1.22.5/src/api/adminservice/v1/request_response.pb.go --- temporal-1.21.5-1/src/api/adminservice/v1/request_response.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/adminservice/v1/request_response.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -44,6 +44,8 @@ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" v1 "go.temporal.io/api/common/v1" v16 "go.temporal.io/api/enums/v1" + v110 "go.temporal.io/api/namespace/v1" + v111 "go.temporal.io/api/replication/v1" v19 "go.temporal.io/api/version/v1" v17 "go.temporal.io/api/workflow/v1" v18 "go.temporal.io/server/api/cluster/v1" @@ -1818,6 +1820,7 @@ FailoverVersionIncrement int64 `protobuf:"varint,10,opt,name=failover_version_increment,json=failoverVersionIncrement,proto3" json:"failover_version_increment,omitempty"` InitialFailoverVersion int64 `protobuf:"varint,11,opt,name=initial_failover_version,json=initialFailoverVersion,proto3" json:"initial_failover_version,omitempty"` IsGlobalNamespaceEnabled bool `protobuf:"varint,12,opt,name=is_global_namespace_enabled,json=isGlobalNamespaceEnabled,proto3" json:"is_global_namespace_enabled,omitempty"` + Tags map[string]string `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *DescribeClusterResponse) Reset() { *m = DescribeClusterResponse{} } @@ -1936,6 +1939,13 @@ return false } +func (m *DescribeClusterResponse) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + type ListClustersRequest struct { PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` @@ -3332,6 +3342,174 @@ } } +type GetNamespaceRequest struct { + // Types that are valid to be assigned to Attributes: + // *GetNamespaceRequest_Namespace + // *GetNamespaceRequest_Id + Attributes isGetNamespaceRequest_Attributes `protobuf_oneof:"attributes"` +} + +func (m *GetNamespaceRequest) Reset() { *m = GetNamespaceRequest{} } +func (*GetNamespaceRequest) ProtoMessage() {} +func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cc07c1a2abe7cb51, []int{57} +} +func (m *GetNamespaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetNamespaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetNamespaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNamespaceRequest.Merge(m, src) +} +func (m *GetNamespaceRequest) XXX_Size() int { + return m.Size() +} +func (m *GetNamespaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNamespaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNamespaceRequest proto.InternalMessageInfo + +type isGetNamespaceRequest_Attributes interface { + isGetNamespaceRequest_Attributes() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type GetNamespaceRequest_Namespace struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3,oneof" json:"namespace,omitempty"` +} +type GetNamespaceRequest_Id struct { + Id string `protobuf:"bytes,2,opt,name=id,proto3,oneof" json:"id,omitempty"` +} + +func (*GetNamespaceRequest_Namespace) isGetNamespaceRequest_Attributes() {} +func (*GetNamespaceRequest_Id) isGetNamespaceRequest_Attributes() {} + +func (m *GetNamespaceRequest) GetAttributes() isGetNamespaceRequest_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *GetNamespaceRequest) GetNamespace() string { + if x, ok := m.GetAttributes().(*GetNamespaceRequest_Namespace); ok { + return x.Namespace + } + return "" +} + +func (m *GetNamespaceRequest) GetId() string { + if x, ok := m.GetAttributes().(*GetNamespaceRequest_Id); ok { + return x.Id + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GetNamespaceRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*GetNamespaceRequest_Namespace)(nil), + (*GetNamespaceRequest_Id)(nil), + } +} + +type GetNamespaceResponse struct { + Info *v110.NamespaceInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` + Config *v110.NamespaceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + ReplicationConfig *v111.NamespaceReplicationConfig `protobuf:"bytes,5,opt,name=replication_config,json=replicationConfig,proto3" json:"replication_config,omitempty"` + ConfigVersion int64 `protobuf:"varint,6,opt,name=config_version,json=configVersion,proto3" json:"config_version,omitempty"` + FailoverVersion int64 `protobuf:"varint,7,opt,name=failover_version,json=failoverVersion,proto3" json:"failover_version,omitempty"` + FailoverHistory []*v111.FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` +} + +func (m *GetNamespaceResponse) Reset() { *m = GetNamespaceResponse{} } +func (*GetNamespaceResponse) ProtoMessage() {} +func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cc07c1a2abe7cb51, []int{58} +} +func (m *GetNamespaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetNamespaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetNamespaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNamespaceResponse.Merge(m, src) +} +func (m *GetNamespaceResponse) XXX_Size() int { + return m.Size() +} +func (m *GetNamespaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNamespaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNamespaceResponse proto.InternalMessageInfo + +func (m *GetNamespaceResponse) GetInfo() *v110.NamespaceInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *GetNamespaceResponse) GetConfig() *v110.NamespaceConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *GetNamespaceResponse) GetReplicationConfig() *v111.NamespaceReplicationConfig { + if m != nil { + return m.ReplicationConfig + } + return nil +} + +func (m *GetNamespaceResponse) GetConfigVersion() int64 { + if m != nil { + return m.ConfigVersion + } + return 0 +} + +func (m *GetNamespaceResponse) GetFailoverVersion() int64 { + if m != nil { + return m.FailoverVersion + } + return 0 +} + +func (m *GetNamespaceResponse) GetFailoverHistory() []*v111.FailoverStatus { + if m != nil { + return m.FailoverHistory + } + return nil +} + func init() { proto.RegisterType((*RebuildMutableStateRequest)(nil), "temporal.server.api.adminservice.v1.RebuildMutableStateRequest") proto.RegisterType((*RebuildMutableStateResponse)(nil), "temporal.server.api.adminservice.v1.RebuildMutableStateResponse") @@ -3372,6 +3550,7 @@ proto.RegisterType((*DescribeClusterRequest)(nil), "temporal.server.api.adminservice.v1.DescribeClusterRequest") proto.RegisterType((*DescribeClusterResponse)(nil), "temporal.server.api.adminservice.v1.DescribeClusterResponse") proto.RegisterMapType((map[string]string)(nil), "temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntry") + proto.RegisterMapType((map[string]string)(nil), "temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntry") proto.RegisterType((*ListClustersRequest)(nil), "temporal.server.api.adminservice.v1.ListClustersRequest") proto.RegisterType((*ListClustersResponse)(nil), "temporal.server.api.adminservice.v1.ListClustersResponse") proto.RegisterType((*AddOrUpdateRemoteClusterRequest)(nil), "temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest") @@ -3396,6 +3575,8 @@ proto.RegisterType((*DeleteWorkflowExecutionResponse)(nil), "temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse") proto.RegisterType((*StreamWorkflowReplicationMessagesRequest)(nil), "temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest") proto.RegisterType((*StreamWorkflowReplicationMessagesResponse)(nil), "temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse") + proto.RegisterType((*GetNamespaceRequest)(nil), "temporal.server.api.adminservice.v1.GetNamespaceRequest") + proto.RegisterType((*GetNamespaceResponse)(nil), "temporal.server.api.adminservice.v1.GetNamespaceResponse") } func init() { @@ -3403,201 +3584,212 @@ } var fileDescriptor_cc07c1a2abe7cb51 = []byte{ - // 3095 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x3a, 0x4d, 0x6c, 0x1b, 0xd7, - 0xd1, 0x5a, 0xfe, 0x48, 0xe4, 0xe8, 0x7f, 0x2d, 0x59, 0x34, 0x15, 0x51, 0x0a, 0xe3, 0x38, 0xb2, - 0xbf, 0x84, 0xfa, 0xac, 0xb4, 0x8d, 0x93, 0xd4, 0x30, 0x24, 0xd9, 0x91, 0x95, 0x4a, 0xf9, 0x59, - 0x39, 0x76, 0x1b, 0x20, 0xd8, 0x2c, 0x77, 0x9f, 0xa8, 0x85, 0xb9, 0x3f, 0xd9, 0xf7, 0x48, 0x5b, - 0x01, 0xfa, 0x83, 0xa6, 0x45, 0xd1, 0x43, 0x51, 0x03, 0x45, 0x81, 0x20, 0xa7, 0x1e, 0xdb, 0xa2, - 0x45, 0x6f, 0xbd, 0xf7, 0xd6, 0x63, 0xd0, 0x5e, 0x82, 0x16, 0x68, 0x1a, 0xe5, 0xd2, 0x63, 0xce, - 0x3d, 0x15, 0xef, 0x6f, 0xff, 0xb8, 0xa4, 0xa8, 0xda, 0x4e, 0x81, 0xdc, 0xb8, 0xf3, 0x66, 0xe6, - 0xcd, 0x9b, 0xbf, 0x37, 0x33, 0x8f, 0xf0, 0x12, 0x41, 0x8e, 0xef, 0x05, 0x46, 0x7b, 0x0d, 0xa3, - 0xa0, 0x8b, 0x82, 0x35, 0xc3, 0xb7, 0xd7, 0x0c, 0xcb, 0xb1, 0x5d, 0xfa, 0x6d, 0x9b, 0x68, 0xad, - 0x7b, 0x79, 0x2d, 0x40, 0xef, 0x75, 0x10, 0x26, 0x7a, 0x80, 0xb0, 0xef, 0xb9, 0x18, 0x35, 0xfc, - 0xc0, 0x23, 0x9e, 0xfa, 0x94, 0xa4, 0x6d, 0x70, 0xda, 0x86, 0xe1, 0xdb, 0x8d, 0x38, 0x6d, 0xa3, - 0x7b, 0xb9, 0xba, 0xdc, 0xf2, 0xbc, 0x56, 0x1b, 0xad, 0x31, 0x92, 0x66, 0xe7, 0x60, 0x8d, 0xd8, - 0x0e, 0xc2, 0xc4, 0x70, 0x7c, 0xce, 0xa5, 0x5a, 0x4b, 0x23, 0x58, 0x9d, 0xc0, 0x20, 0xb6, 0xe7, - 0x8a, 0xf5, 0x27, 0x2d, 0xe4, 0x23, 0xd7, 0x42, 0xae, 0x69, 0x23, 0xbc, 0xd6, 0xf2, 0x5a, 0x1e, - 0x83, 0xb3, 0x5f, 0x02, 0xa5, 0x1e, 0x1e, 0x82, 0x4a, 0x8f, 0xdc, 0x8e, 0x83, 0xa9, 0xd8, 0xa6, - 0xe7, 0x38, 0x21, 0x9b, 0x0b, 0xd9, 0x38, 0xc4, 0xc0, 0x77, 0xf5, 0xf7, 0x3a, 0xa8, 0x23, 0x0e, - 0x55, 0x3d, 0x9f, 0xc0, 0xe3, 0x2c, 0x28, 0xa2, 0x83, 0x30, 0x36, 0x5a, 0x12, 0xeb, 0xe9, 0x04, - 0x56, 0x17, 0x05, 0xd8, 0xce, 0x42, 0x4b, 0x6e, 0x7a, 0xcf, 0x0b, 0xee, 0x1e, 0xb4, 0xbd, 0x7b, - 0xbd, 0x78, 0xcf, 0x66, 0x59, 0xc1, 0x6c, 0x77, 0x30, 0x41, 0x41, 0x2f, 0xf6, 0xc5, 0x2c, 0xec, - 0xec, 0x53, 0x5f, 0x1a, 0x8c, 0xca, 0x77, 0x10, 0xb8, 0xcf, 0x0c, 0xc4, 0xa5, 0x8a, 0x1a, 0x24, - 0xed, 0xa1, 0x8d, 0x89, 0x17, 0x1c, 0xf5, 0x4a, 0xdb, 0xc8, 0xc2, 0x76, 0x0d, 0x07, 0x61, 0xdf, - 0xe0, 0xee, 0x95, 0xc4, 0xff, 0xff, 0x2c, 0xfc, 0x00, 0xf9, 0x6d, 0xdb, 0x64, 0x6e, 0xd1, 0x4b, - 0xf1, 0x62, 0x16, 0x85, 0x4f, 0x6d, 0x82, 0x09, 0x72, 0xf9, 0x1e, 0xe2, 0xa8, 0xba, 0x83, 0x88, - 0x61, 0x19, 0xc4, 0x10, 0xa4, 0xcf, 0x0f, 0x41, 0x8a, 0xee, 0x23, 0xb3, 0x43, 0x77, 0xc6, 0x82, - 0xe8, 0xda, 0x10, 0x44, 0xd2, 0xd6, 0xba, 0xd3, 0x21, 0x46, 0xb3, 0x8d, 0x74, 0x4c, 0x0c, 0x32, - 0x50, 0x25, 0x29, 0x06, 0x54, 0xdf, 0x62, 0xc3, 0xfa, 0x07, 0x0a, 0x54, 0x35, 0xd4, 0xec, 0xd8, - 0x6d, 0x6b, 0x8f, 0xb3, 0xdb, 0xa7, 0xdc, 0x34, 0x1e, 0x96, 0xea, 0x13, 0x50, 0x0e, 0xf5, 0x59, - 0x51, 0x56, 0x94, 0xd5, 0xb2, 0x16, 0x01, 0xd4, 0x6d, 0x28, 0x87, 0x27, 0xa8, 0xe4, 0x56, 0x94, - 0xd5, 0xf1, 0xf5, 0x8b, 0xa1, 0x00, 0x2c, 0x64, 0x85, 0xc7, 0x74, 0x2f, 0x37, 0xee, 0x08, 0xa9, - 0x6f, 0x48, 0x02, 0x2d, 0xa2, 0xad, 0x2f, 0xc1, 0x62, 0xa6, 0x10, 0x3c, 0x27, 0xd4, 0x7f, 0xa4, - 0xc0, 0xe2, 0x75, 0x84, 0xcd, 0xc0, 0x6e, 0xa2, 0xff, 0xa1, 0x94, 0x7f, 0xcc, 0xc1, 0x13, 0xd9, - 0x62, 0x70, 0x39, 0xd5, 0x73, 0x50, 0xc2, 0x87, 0x46, 0x60, 0xe9, 0xb6, 0x25, 0xc4, 0x18, 0x63, - 0xdf, 0x3b, 0x96, 0xfa, 0x24, 0x4c, 0x08, 0x37, 0xd6, 0x0d, 0xcb, 0x0a, 0x98, 0x1c, 0x65, 0x6d, - 0x5c, 0xc0, 0x36, 0x2c, 0x2b, 0x50, 0x0f, 0xe1, 0x8c, 0x69, 0x98, 0x87, 0x28, 0x69, 0xd7, 0x4a, - 0x9e, 0x49, 0x7c, 0xa5, 0x91, 0x95, 0x11, 0x63, 0x86, 0x8d, 0x4b, 0x9f, 0x10, 0x6e, 0x96, 0x31, - 0x8d, 0x83, 0x54, 0x17, 0xce, 0x52, 0x47, 0x6d, 0x1a, 0x38, 0xbd, 0x59, 0xe1, 0x21, 0x37, 0x9b, - 0x93, 0x7c, 0xe3, 0xd0, 0xfa, 0x5f, 0x14, 0xa8, 0x4a, 0xc5, 0xdd, 0xe4, 0x27, 0xbe, 0xe9, 0x61, - 0x22, 0xcd, 0x47, 0x75, 0xe3, 0x61, 0xc2, 0x14, 0x83, 0x30, 0x16, 0xaa, 0x1b, 0xa7, 0xb0, 0x0d, - 0x0e, 0x4a, 0x68, 0x96, 0xaa, 0xae, 0x18, 0x69, 0x36, 0x61, 0xfc, 0x7c, 0xda, 0xf8, 0xdf, 0x06, - 0x35, 0x8c, 0x97, 0xc8, 0x0b, 0x0a, 0xa7, 0xf5, 0x82, 0xd9, 0x7b, 0x69, 0x50, 0xfd, 0x1f, 0x31, - 0xa7, 0x4c, 0x1c, 0x4a, 0x38, 0xc3, 0x53, 0x30, 0xc9, 0x44, 0xc4, 0xba, 0xdb, 0x71, 0x9a, 0x28, - 0x60, 0xc7, 0x2a, 0x6a, 0x13, 0x1c, 0xf8, 0x1a, 0x83, 0xa9, 0x8b, 0x50, 0x96, 0xe7, 0xc2, 0x95, - 0xdc, 0x4a, 0x7e, 0xb5, 0xa8, 0x95, 0xc4, 0xc1, 0xb0, 0xfa, 0x0e, 0x4c, 0x87, 0x07, 0xd1, 0x99, - 0x15, 0x85, 0x33, 0x7c, 0x2d, 0xd3, 0x3e, 0x21, 0x2e, 0x3d, 0xc2, 0x6b, 0xf2, 0x63, 0x8b, 0xd2, - 0xed, 0xb8, 0x07, 0x9e, 0x36, 0xe5, 0x26, 0x60, 0x6a, 0x05, 0xc6, 0xa4, 0xc6, 0x8b, 0xdc, 0x59, - 0xc5, 0xe7, 0xab, 0x85, 0x52, 0x61, 0xa6, 0x58, 0x6f, 0xc0, 0xec, 0x56, 0xdb, 0xc3, 0x68, 0x9f, - 0xca, 0x23, 0x6d, 0x95, 0x76, 0xf1, 0xc8, 0x10, 0xf5, 0x39, 0x50, 0xe3, 0xf8, 0x22, 0x76, 0x9f, - 0x85, 0xe9, 0x6d, 0x44, 0x86, 0xe5, 0xf1, 0x2e, 0xcc, 0x44, 0xd8, 0x42, 0x91, 0xbb, 0x00, 0x02, - 0xdd, 0x3d, 0xf0, 0x18, 0xc1, 0xf8, 0xfa, 0x73, 0xc3, 0x78, 0x28, 0x63, 0xc3, 0x8e, 0xce, 0x95, - 0x4c, 0x7f, 0xd6, 0x7f, 0x96, 0x83, 0x85, 0x5d, 0x1b, 0x13, 0x61, 0xb2, 0x5b, 0x34, 0x17, 0x9e, - 0x2c, 0x98, 0xfa, 0x0a, 0x94, 0x4c, 0x83, 0xa0, 0x96, 0x17, 0x1c, 0x31, 0x07, 0x9c, 0x5a, 0xbf, - 0x94, 0x29, 0x02, 0xbb, 0xd4, 0xe8, 0xe6, 0x94, 0xf1, 0x96, 0xa0, 0xd0, 0x42, 0x5a, 0xf5, 0x26, - 0x00, 0xab, 0x0b, 0x02, 0xc3, 0x6d, 0x49, 0x73, 0x5e, 0xcc, 0xe4, 0x24, 0x52, 0x83, 0xe4, 0xa5, - 0x51, 0x02, 0xad, 0x4c, 0xe4, 0x4f, 0x75, 0x09, 0xa0, 0x69, 0x10, 0xf3, 0x50, 0xc7, 0xf6, 0xfb, - 0x3c, 0x70, 0x8b, 0x5a, 0x99, 0x41, 0xf6, 0xed, 0xf7, 0x91, 0x7a, 0x01, 0xa6, 0x5d, 0x74, 0x9f, - 0xe8, 0xbe, 0xd1, 0x42, 0x3a, 0xf1, 0xee, 0x22, 0x97, 0x59, 0x79, 0x42, 0x9b, 0xa4, 0xe0, 0x37, - 0x8c, 0x16, 0xba, 0x45, 0x81, 0xf4, 0x02, 0xa8, 0xf4, 0xea, 0x43, 0xa8, 0xfe, 0x1a, 0x14, 0xd9, - 0x65, 0x51, 0x51, 0x56, 0xf2, 0x7d, 0x05, 0x4d, 0x95, 0x65, 0x5c, 0x5a, 0x4e, 0x97, 0x25, 0x45, - 0x2e, 0x4b, 0x8a, 0x0f, 0x73, 0x50, 0xa0, 0x74, 0x34, 0x17, 0x44, 0x3e, 0x1f, 0xa6, 0xd1, 0xf1, - 0x10, 0xb6, 0x63, 0xa9, 0xcb, 0x30, 0x1e, 0x86, 0xb4, 0x48, 0x07, 0x65, 0x0d, 0x24, 0x68, 0xc7, - 0x52, 0xe7, 0x61, 0x34, 0xe8, 0xb8, 0x74, 0x8d, 0xa7, 0x83, 0x62, 0xd0, 0x71, 0x77, 0x2c, 0x75, - 0x01, 0xc6, 0x98, 0xea, 0x6d, 0x8b, 0x69, 0x2b, 0xaf, 0x8d, 0xd2, 0xcf, 0x1d, 0x4b, 0xdd, 0x02, - 0xa6, 0x56, 0x9d, 0x1c, 0xf9, 0x88, 0x29, 0x69, 0x6a, 0xfd, 0xc2, 0xc9, 0xc6, 0xbd, 0x75, 0xe4, - 0x23, 0xad, 0x44, 0xc4, 0x2f, 0xf5, 0x2a, 0x94, 0x0f, 0xec, 0x00, 0xe9, 0xb4, 0x06, 0xad, 0x8c, - 0x32, 0xbb, 0x56, 0x1b, 0xbc, 0xfe, 0x6c, 0xc8, 0xfa, 0xb3, 0x71, 0x4b, 0x16, 0xa8, 0x9b, 0x85, - 0x07, 0x9f, 0x2e, 0x2b, 0x5a, 0x89, 0x92, 0x50, 0x20, 0x0d, 0x46, 0x51, 0xea, 0x55, 0xc6, 0x98, - 0x70, 0xf2, 0xb3, 0xfe, 0x37, 0x05, 0x66, 0x35, 0xe4, 0x78, 0x5d, 0xc4, 0x14, 0xfb, 0xe5, 0xb9, - 0x6a, 0x4c, 0x5f, 0xf9, 0x84, 0xbe, 0x76, 0x60, 0xba, 0x6b, 0x63, 0xbb, 0x69, 0xb7, 0x6d, 0x72, - 0xc4, 0x0f, 0x5c, 0x18, 0xf2, 0xc0, 0x53, 0x11, 0x21, 0x5d, 0xa2, 0x39, 0x23, 0x7e, 0x36, 0x91, - 0x33, 0x7e, 0x91, 0x87, 0x67, 0xb6, 0x11, 0xe9, 0x4d, 0xc3, 0xc6, 0x3d, 0xe1, 0xa6, 0xb7, 0xd7, - 0x63, 0x97, 0x47, 0xc2, 0x61, 0xca, 0xbd, 0x0e, 0xf3, 0xa8, 0x0a, 0x00, 0xf5, 0x3c, 0x4c, 0x61, - 0x62, 0x04, 0x44, 0x47, 0x5d, 0xe4, 0x92, 0x48, 0x31, 0x13, 0x0c, 0x7a, 0x83, 0x02, 0x77, 0x2c, - 0xb5, 0x01, 0x67, 0xe2, 0x58, 0xd2, 0xac, 0xdc, 0xe7, 0x66, 0x23, 0xd4, 0xdb, 0x7c, 0x41, 0x5d, - 0x81, 0x09, 0xe4, 0x5a, 0x11, 0xcf, 0x22, 0x43, 0x04, 0xe4, 0x5a, 0x92, 0xe3, 0x25, 0x98, 0x8d, - 0x30, 0x24, 0xbf, 0x51, 0x86, 0x36, 0x2d, 0xd1, 0x24, 0xb7, 0x4b, 0x30, 0xeb, 0x18, 0xf7, 0x6d, - 0xa7, 0xe3, 0xf0, 0xa0, 0x63, 0xd9, 0x61, 0x8c, 0x79, 0xc8, 0xb4, 0x58, 0xa0, 0x61, 0xd7, 0x2f, - 0x47, 0x94, 0x32, 0xa2, 0xf3, 0xd5, 0x42, 0x49, 0x99, 0xc9, 0xd5, 0x7f, 0x95, 0x83, 0xd5, 0x93, - 0xad, 0x22, 0x32, 0x47, 0x06, 0x6b, 0x25, 0x83, 0x35, 0xf5, 0x25, 0x59, 0x17, 0xb1, 0xdc, 0x85, - 0xf8, 0x35, 0x38, 0xbe, 0xbe, 0xd2, 0xcf, 0x42, 0xd7, 0x0d, 0x62, 0x6c, 0xb6, 0xbd, 0xa6, 0x36, - 0x25, 0x08, 0x37, 0x39, 0x9d, 0x7a, 0x07, 0xa6, 0x85, 0x6e, 0x74, 0xb1, 0x22, 0xf2, 0x6b, 0xe3, - 0xa4, 0xfc, 0x2a, 0x74, 0x27, 0x4e, 0xa1, 0x4d, 0x75, 0x13, 0xdf, 0xea, 0x2a, 0xcc, 0x48, 0x19, - 0x5d, 0xcf, 0x42, 0xec, 0xae, 0x2e, 0xac, 0xe4, 0x57, 0xf3, 0xa1, 0x08, 0xaf, 0x79, 0x16, 0xda, - 0xb1, 0x70, 0xfd, 0x81, 0x02, 0x4b, 0xdb, 0x88, 0x68, 0x51, 0x4b, 0xb1, 0xc7, 0xdb, 0x89, 0xf0, - 0x8a, 0xd9, 0x85, 0x51, 0xa6, 0x0d, 0x99, 0x52, 0xb3, 0xaf, 0xf2, 0x58, 0x4f, 0x42, 0xe5, 0x8b, - 0xf1, 0x63, 0x5a, 0xd3, 0x04, 0x0f, 0xea, 0xfc, 0xb2, 0xfb, 0xa0, 0x0e, 0x2f, 0xab, 0x4a, 0x01, - 0xa3, 0x35, 0x40, 0xfd, 0xa3, 0x1c, 0xd4, 0xfa, 0x89, 0x24, 0x6c, 0xf5, 0x5d, 0x98, 0xe2, 0xb9, - 0x44, 0xf4, 0x3e, 0x52, 0xb6, 0xdb, 0x43, 0xa5, 0xfb, 0xc1, 0xcc, 0xf9, 0x25, 0x2c, 0xa1, 0x37, - 0x5c, 0x12, 0x1c, 0x69, 0xbc, 0x2e, 0x92, 0xb0, 0xea, 0x11, 0xa8, 0xbd, 0x48, 0xea, 0x0c, 0xe4, - 0xef, 0xa2, 0x23, 0x91, 0xdb, 0xe8, 0x4f, 0x75, 0x0f, 0x8a, 0x5d, 0xa3, 0xdd, 0x41, 0x22, 0x84, - 0x5f, 0x38, 0xa5, 0xe6, 0x42, 0xc9, 0x38, 0x97, 0x97, 0x72, 0x57, 0x94, 0xfa, 0x9f, 0x14, 0xb8, - 0xb0, 0x8d, 0x48, 0x58, 0x2c, 0x0d, 0x30, 0xdc, 0x8b, 0x70, 0xae, 0x6d, 0xb0, 0x41, 0x05, 0x09, - 0x6c, 0xd4, 0x45, 0xa1, 0xb6, 0x64, 0x06, 0xce, 0x6b, 0x67, 0x29, 0x82, 0x26, 0xd7, 0x05, 0x83, - 0x1d, 0x2b, 0x24, 0xf5, 0x03, 0xcf, 0x44, 0x18, 0x27, 0x49, 0x73, 0x11, 0xe9, 0x1b, 0x72, 0x3d, - 0x22, 0x4d, 0x1b, 0x38, 0xdf, 0x6b, 0xe0, 0xef, 0xb1, 0x5c, 0x39, 0xf8, 0x08, 0xc2, 0xd0, 0xfb, - 0x50, 0x8a, 0x99, 0xf8, 0xa1, 0x94, 0x18, 0x32, 0xaa, 0xbf, 0x0f, 0x2b, 0xdb, 0x88, 0x5c, 0xdf, - 0x7d, 0x73, 0x80, 0xf2, 0x6e, 0x8b, 0xaa, 0x87, 0x56, 0x70, 0xd2, 0xbb, 0x4e, 0xbb, 0x35, 0xbd, - 0x21, 0x78, 0x31, 0x47, 0xc4, 0x2f, 0x5c, 0xff, 0xb1, 0x02, 0x4f, 0x0e, 0xd8, 0x5c, 0x1c, 0xfb, - 0x5d, 0x98, 0x8d, 0xb1, 0xd5, 0xe3, 0x15, 0xcd, 0xf3, 0xff, 0x85, 0x10, 0xda, 0x4c, 0x90, 0x04, - 0xe0, 0xfa, 0x5f, 0x15, 0x98, 0xd3, 0x90, 0xe1, 0xfb, 0xed, 0x23, 0x96, 0x8c, 0x71, 0xbf, 0xdb, - 0xa9, 0xd0, 0x7b, 0x3b, 0x65, 0x77, 0x28, 0xb9, 0x87, 0xef, 0x50, 0xd4, 0x2b, 0x30, 0xca, 0xae, - 0x0c, 0x2c, 0xf2, 0xe0, 0xc9, 0x29, 0x55, 0xe0, 0x8b, 0x84, 0xbf, 0x00, 0xf3, 0xa9, 0x43, 0x89, - 0xfb, 0xf9, 0xdf, 0x39, 0xa8, 0x6e, 0x58, 0xd6, 0x3e, 0x32, 0x02, 0xf3, 0x70, 0x83, 0x90, 0xc0, - 0x6e, 0x76, 0x48, 0x64, 0xed, 0x1f, 0x2a, 0x30, 0x8b, 0xd9, 0x9a, 0x6e, 0x84, 0x8b, 0x42, 0xe1, - 0x6f, 0x0d, 0x95, 0x53, 0xfa, 0x33, 0x6f, 0xa4, 0xe1, 0x3c, 0xa5, 0xcc, 0xe0, 0x14, 0x98, 0x96, - 0xc7, 0xb6, 0x6b, 0xa1, 0xfb, 0xf1, 0xc4, 0x58, 0x66, 0x10, 0x1a, 0x2a, 0xea, 0xb3, 0xa0, 0xe2, - 0xbb, 0xb6, 0xaf, 0x63, 0xf3, 0x10, 0x39, 0x86, 0xde, 0xf1, 0x2d, 0xd9, 0x6b, 0x97, 0xb4, 0x19, - 0xba, 0xb2, 0xcf, 0x16, 0xde, 0x62, 0xf0, 0x64, 0x8f, 0x59, 0x48, 0xf5, 0x98, 0xd5, 0x36, 0xcc, - 0x67, 0x4a, 0x15, 0xcf, 0x61, 0x65, 0x9e, 0xc3, 0xae, 0xc6, 0x73, 0xd8, 0xd4, 0xfa, 0x33, 0x49, - 0x8b, 0x84, 0x15, 0xd9, 0x0e, 0x95, 0x13, 0x59, 0xb7, 0x29, 0x2a, 0xab, 0x33, 0x63, 0x39, 0x6b, - 0x09, 0x16, 0x33, 0xd5, 0x23, 0x6c, 0xf3, 0x53, 0x05, 0x96, 0x78, 0x49, 0xd5, 0xcf, 0x3c, 0xff, - 0xd7, 0xcf, 0x3a, 0xe5, 0xd3, 0xab, 0x71, 0x60, 0xf3, 0x5d, 0x5f, 0x81, 0x5a, 0x3f, 0x51, 0x84, - 0xb4, 0xdf, 0x81, 0x2a, 0xed, 0xf7, 0xfa, 0x48, 0x9a, 0xdc, 0x5c, 0x19, 0xb8, 0x79, 0x2e, 0xbd, - 0xf9, 0x47, 0xa3, 0xb0, 0x98, 0xc9, 0x5b, 0x64, 0x85, 0x0f, 0x14, 0x98, 0x35, 0x3b, 0x98, 0x78, - 0x4e, 0xaf, 0x97, 0x0e, 0x7d, 0xf3, 0xf5, 0xe3, 0xde, 0xd8, 0x62, 0x9c, 0x7b, 0xdc, 0xd4, 0x4c, - 0x81, 0x99, 0x14, 0xf8, 0x08, 0x13, 0x94, 0x90, 0x22, 0xf7, 0x88, 0xa4, 0xd8, 0x67, 0x9c, 0x7b, - 0x83, 0x25, 0x05, 0x56, 0x5b, 0x30, 0xe6, 0x18, 0xbe, 0x6f, 0xbb, 0xad, 0x4a, 0x9e, 0x6d, 0xbd, - 0xf7, 0xd0, 0x5b, 0xef, 0x71, 0x7e, 0x7c, 0x47, 0xc9, 0x5d, 0x75, 0x61, 0xd1, 0xb0, 0x2c, 0xbd, - 0x37, 0xe1, 0xf1, 0xe6, 0x9e, 0xb7, 0x11, 0x6b, 0xc9, 0xa8, 0x90, 0xc8, 0x99, 0x79, 0x8f, 0xdd, - 0x08, 0x15, 0xc3, 0xb2, 0x32, 0x57, 0x68, 0x68, 0x66, 0x5a, 0xe2, 0xb1, 0x84, 0x26, 0x4b, 0x04, - 0x59, 0x1a, 0x7f, 0x3c, 0xbb, 0xbd, 0x04, 0x13, 0x71, 0x25, 0x67, 0x6c, 0x32, 0x17, 0xdf, 0xa4, - 0x1c, 0x4f, 0x22, 0x2f, 0xc3, 0x59, 0x39, 0xbb, 0xda, 0xe2, 0xb5, 0x44, 0xec, 0xc6, 0x4a, 0x54, - 0x1c, 0x4a, 0x6f, 0xc5, 0xf1, 0x9b, 0x51, 0x58, 0xe8, 0xa1, 0x16, 0x51, 0xf5, 0x7d, 0x98, 0xc5, - 0x1d, 0xdf, 0xf7, 0x02, 0x82, 0x2c, 0xdd, 0x6c, 0xdb, 0xec, 0xfa, 0xe1, 0x41, 0xa5, 0x0d, 0xe5, - 0x53, 0x7d, 0x18, 0x37, 0xf6, 0x25, 0xd7, 0x2d, 0xce, 0x54, 0xba, 0x72, 0x0a, 0xac, 0x3e, 0x0d, - 0x53, 0x9c, 0x7b, 0xd8, 0x28, 0xf1, 0xc3, 0x4f, 0x72, 0xa8, 0x6c, 0x93, 0xee, 0xc0, 0xb4, 0x83, - 0x9c, 0x26, 0x0a, 0xf0, 0xa1, 0xed, 0x73, 0xe7, 0x1b, 0xd4, 0x2c, 0xc8, 0xe7, 0x8c, 0xee, 0xe5, - 0xc6, 0x5e, 0x48, 0xc6, 0xa7, 0x6a, 0x4e, 0xe2, 0x9b, 0xe6, 0x2c, 0xa9, 0xbf, 0xf0, 0xbe, 0x2f, - 0x0b, 0x48, 0x46, 0x41, 0x57, 0xec, 0x51, 0x2f, 0xed, 0x1f, 0x65, 0xbb, 0xc1, 0xcb, 0x72, 0xd3, - 0xeb, 0xb8, 0x84, 0xf5, 0x7b, 0x45, 0x6d, 0x56, 0x2c, 0xb1, 0x8a, 0x79, 0x8b, 0x2e, 0xd0, 0x7c, - 0x1e, 0x1b, 0x7c, 0xe9, 0x74, 0x99, 0x77, 0x7c, 0x65, 0x6d, 0x26, 0xb6, 0xb0, 0x4f, 0xe1, 0xea, - 0x45, 0x98, 0x89, 0xf5, 0xee, 0x1c, 0xb7, 0xc4, 0x70, 0x63, 0x3d, 0x3d, 0x47, 0xdd, 0x86, 0x09, - 0xd9, 0x4f, 0x31, 0xfd, 0x94, 0x99, 0x7e, 0xce, 0x27, 0x3d, 0x55, 0x60, 0xc4, 0xba, 0x28, 0xa6, - 0x95, 0xf1, 0x6e, 0xf4, 0xa1, 0x7e, 0x13, 0xaa, 0x07, 0x86, 0xdd, 0xf6, 0x62, 0x46, 0xd1, 0x6d, - 0xd7, 0x0c, 0x90, 0x83, 0x5c, 0x52, 0x01, 0x56, 0x00, 0x57, 0x24, 0x46, 0xc8, 0x45, 0xac, 0xab, - 0x57, 0xa0, 0x62, 0xbb, 0x36, 0xb1, 0x8d, 0xb6, 0x9e, 0xe6, 0x52, 0x19, 0xe7, 0xc5, 0xb3, 0x58, - 0x7f, 0x25, 0xc9, 0x42, 0xbd, 0x0a, 0x8b, 0x36, 0xd6, 0x5b, 0x6d, 0xaf, 0x69, 0xb4, 0xf5, 0xa8, - 0x0c, 0x43, 0xae, 0xd1, 0x6c, 0x23, 0xab, 0x32, 0xc1, 0x2e, 0xfb, 0x8a, 0x8d, 0xb7, 0x19, 0x46, - 0x58, 0x41, 0xdf, 0xe0, 0xeb, 0xd5, 0x2d, 0x98, 0xcf, 0x74, 0xba, 0x53, 0x05, 0xda, 0xdb, 0x70, - 0x66, 0xd7, 0xc6, 0x44, 0x78, 0x73, 0x78, 0xb3, 0x2d, 0x42, 0x39, 0xea, 0xce, 0x79, 0x8f, 0x53, - 0xf2, 0x07, 0xb4, 0xe5, 0x99, 0x43, 0xb3, 0x9f, 0x2b, 0x30, 0x97, 0x64, 0x2e, 0x82, 0xf0, 0x75, - 0x28, 0x09, 0x87, 0x1a, 0x5c, 0xe7, 0xa6, 0xe6, 0xa5, 0x82, 0xcf, 0x9e, 0x78, 0xc7, 0xd2, 0x42, - 0x26, 0x43, 0x4b, 0xf4, 0x4b, 0x05, 0x96, 0x37, 0x2c, 0xeb, 0xf5, 0x80, 0xd7, 0x4d, 0xf4, 0xf2, - 0x27, 0xe9, 0x04, 0x73, 0x11, 0x66, 0x0e, 0x02, 0xcf, 0x25, 0xc8, 0xb5, 0x52, 0x13, 0xff, 0x69, - 0x09, 0x97, 0x53, 0xff, 0x6d, 0x58, 0xe1, 0xc6, 0xd2, 0x03, 0xc6, 0x49, 0x97, 0xa1, 0x63, 0x7a, - 0xae, 0x8b, 0xcc, 0xb0, 0x50, 0x2e, 0x69, 0x4b, 0x1c, 0x2f, 0xb1, 0xe1, 0x56, 0x88, 0x54, 0xaf, - 0xc3, 0x4a, 0x7f, 0xb1, 0x44, 0x29, 0x72, 0x0d, 0xaa, 0xbc, 0x58, 0xc9, 0x94, 0x7a, 0x88, 0xb4, - 0xc8, 0x1e, 0xb1, 0x32, 0x18, 0x44, 0x43, 0xad, 0x73, 0x31, 0x6b, 0x89, 0x34, 0x22, 0xf9, 0xef, - 0xc3, 0x3c, 0xeb, 0x11, 0x0f, 0x91, 0x11, 0x90, 0x26, 0x32, 0x88, 0x7e, 0xcf, 0x26, 0x87, 0xb6, - 0x2b, 0xfa, 0xb4, 0x73, 0x3d, 0x93, 0xb5, 0xeb, 0xe2, 0x29, 0x7b, 0xb3, 0xf0, 0xe1, 0xa7, 0xcb, - 0x8a, 0x76, 0x86, 0x52, 0xdf, 0x94, 0xc4, 0x77, 0x18, 0xad, 0xba, 0x0c, 0xe3, 0x81, 0x6f, 0x86, - 0x5a, 0x16, 0x93, 0xd2, 0xc0, 0x37, 0xa5, 0x82, 0x17, 0x60, 0x8c, 0xbd, 0xbc, 0x84, 0xa3, 0xd2, - 0x51, 0xfa, 0xc9, 0x46, 0xa2, 0x85, 0xc0, 0x6b, 0xf3, 0x5a, 0x77, 0x2a, 0x7e, 0x21, 0x67, 0xcd, - 0x0f, 0x13, 0x27, 0xd2, 0xbc, 0x36, 0xd2, 0x18, 0xb1, 0xfa, 0x0e, 0x54, 0x31, 0xc2, 0x2c, 0xdc, - 0xd9, 0xd4, 0x0b, 0x59, 0xba, 0x71, 0x40, 0x35, 0xc8, 0x46, 0x86, 0xc5, 0x21, 0x47, 0x86, 0x0b, - 0x82, 0xc7, 0x3e, 0x67, 0xb1, 0x41, 0x39, 0xb0, 0x91, 0x69, 0x22, 0x86, 0x46, 0x4f, 0x8e, 0xa1, - 0xb1, 0x2c, 0x8f, 0xfd, 0x48, 0x81, 0x6a, 0x96, 0x55, 0x44, 0x24, 0xdd, 0x82, 0x29, 0xc3, 0x24, - 0x76, 0x17, 0xe9, 0x22, 0xcd, 0x8b, 0x78, 0x7a, 0xee, 0xa4, 0x5b, 0x22, 0xa9, 0x93, 0x49, 0xce, - 0x44, 0x70, 0x1f, 0x3a, 0x9c, 0x7e, 0x9f, 0x83, 0x79, 0xde, 0xde, 0xa6, 0x1b, 0xea, 0x1b, 0x50, - 0x60, 0xd3, 0x6a, 0x85, 0xd9, 0xe7, 0xf2, 0x60, 0xfb, 0x5c, 0x47, 0x86, 0xb5, 0x8b, 0x08, 0x41, - 0xc1, 0x9b, 0x1d, 0x24, 0xea, 0x08, 0x46, 0x3e, 0xe8, 0x59, 0x8d, 0xde, 0xa3, 0x5e, 0x27, 0x30, - 0xc3, 0xa0, 0x13, 0x1e, 0x32, 0xc9, 0xa1, 0xe2, 0x7c, 0xea, 0x0b, 0x34, 0x3b, 0x53, 0x0c, 0xaa, - 0x23, 0x1a, 0xd2, 0xb1, 0xd1, 0x06, 0x9f, 0x78, 0xce, 0x87, 0xeb, 0x37, 0xdc, 0xd8, 0x64, 0x23, - 0x73, 0x4e, 0x59, 0x1c, 0x7a, 0x4e, 0x39, 0x9a, 0xa5, 0xaf, 0x4f, 0x72, 0x70, 0x36, 0xad, 0x2f, - 0x61, 0xc8, 0x47, 0xa4, 0xb0, 0xcc, 0x51, 0x42, 0xee, 0x11, 0x8e, 0x12, 0xb2, 0xce, 0x9a, 0xcf, - 0x1a, 0x9c, 0x3a, 0x70, 0xb6, 0x47, 0x12, 0x59, 0x44, 0x3f, 0xd4, 0x78, 0x65, 0x2e, 0x2d, 0x12, - 0x7b, 0x36, 0xfb, 0xbb, 0x02, 0x0b, 0x6f, 0x74, 0x82, 0x16, 0xfa, 0x2a, 0x3a, 0x63, 0xbd, 0x0a, - 0x95, 0xde, 0xc3, 0x89, 0xbc, 0xfd, 0x87, 0x1c, 0x2c, 0xec, 0xa1, 0xaf, 0xe8, 0xc9, 0x1f, 0x4b, - 0x18, 0x6e, 0x42, 0xa5, 0x57, 0x61, 0xa7, 0x7b, 0x17, 0xa0, 0xb5, 0xcd, 0xa2, 0x86, 0x0e, 0x02, - 0x84, 0x0f, 0x65, 0x67, 0x97, 0x78, 0xaa, 0x4d, 0x0f, 0xd6, 0xf2, 0x8f, 0xef, 0xd9, 0x47, 0x4c, - 0xc3, 0x6a, 0xf0, 0x44, 0xb6, 0x40, 0x91, 0x9f, 0x2c, 0x69, 0x08, 0x23, 0xd7, 0x4a, 0x45, 0x55, - 0x5f, 0x99, 0x1f, 0xe1, 0xdb, 0xe6, 0xd3, 0x30, 0x95, 0x2c, 0x91, 0x44, 0xe7, 0x31, 0x19, 0xc4, - 0x6b, 0x91, 0x8c, 0x07, 0xac, 0x62, 0xc6, 0x03, 0xd6, 0x53, 0x30, 0xc9, 0xb1, 0x92, 0x4f, 0x4d, - 0x1c, 0xa9, 0xdf, 0xab, 0xd5, 0x58, 0xcf, 0xab, 0xd5, 0x32, 0x8c, 0x53, 0x0c, 0xc9, 0xa4, 0x14, - 0x22, 0x08, 0x16, 0x7c, 0x3c, 0x94, 0xad, 0x30, 0xa1, 0xd3, 0xdf, 0xe5, 0xa0, 0xb2, 0x8d, 0x08, - 0x05, 0xf2, 0x98, 0x89, 0xab, 0x73, 0xf0, 0xbf, 0x7e, 0x96, 0xc4, 0xc8, 0x99, 0xfd, 0x01, 0x4f, - 0x4e, 0x87, 0x88, 0x64, 0xa4, 0xee, 0xc2, 0x74, 0xb4, 0xcc, 0x5f, 0x7e, 0xf3, 0x2c, 0x88, 0xcf, - 0xf7, 0xe9, 0xc4, 0x23, 0x19, 0x68, 0xdc, 0x4e, 0x92, 0xf8, 0xa7, 0x5a, 0x83, 0x71, 0xc7, 0xe6, - 0x49, 0x38, 0x8a, 0xb8, 0xb2, 0x63, 0xf3, 0xac, 0x6a, 0xb1, 0x75, 0xe3, 0x7e, 0xb8, 0x5e, 0x14, - 0xeb, 0xc6, 0x7d, 0xb1, 0x9e, 0x7c, 0xcb, 0x1f, 0x1d, 0xe2, 0x2d, 0x3f, 0xb3, 0x98, 0x79, 0xa0, - 0xc0, 0xb9, 0x0c, 0x75, 0x89, 0xd0, 0xfb, 0x56, 0xf2, 0x31, 0xff, 0xeb, 0xc3, 0xb4, 0x04, 0x1b, - 0xed, 0xb6, 0x67, 0x1a, 0x04, 0x59, 0xe1, 0xf5, 0x70, 0xca, 0x87, 0xfd, 0x9f, 0x28, 0x50, 0xbb, - 0x8e, 0xda, 0x88, 0xa0, 0xde, 0x10, 0xfb, 0x72, 0xff, 0xbd, 0x75, 0x15, 0x96, 0xfb, 0x0a, 0x22, - 0x34, 0x54, 0x85, 0xd2, 0x3d, 0x23, 0x70, 0x6d, 0xb7, 0x25, 0x07, 0xa2, 0xe1, 0x77, 0xfd, 0xb7, - 0x0a, 0xac, 0xee, 0x93, 0x00, 0x19, 0x8e, 0xa4, 0x1f, 0xf0, 0xde, 0xe1, 0xc3, 0x59, 0x7c, 0xe4, - 0x9a, 0x7a, 0xfc, 0x86, 0xe6, 0x7f, 0xb0, 0x52, 0x06, 0xfc, 0xc1, 0x2a, 0x75, 0x39, 0xef, 0x1f, - 0xb9, 0x66, 0x6c, 0x0f, 0xf6, 0x57, 0xaa, 0x9b, 0x23, 0xda, 0x1c, 0xce, 0x80, 0x6f, 0x4e, 0x00, - 0x44, 0xf3, 0xc3, 0xfa, 0x87, 0x0a, 0x5c, 0x1c, 0x42, 0x58, 0x71, 0xec, 0x77, 0x7a, 0x9e, 0x85, - 0xae, 0x0d, 0x23, 0xdf, 0x00, 0xd6, 0x37, 0x47, 0xa2, 0x07, 0xa2, 0xa4, 0x68, 0x9b, 0xed, 0x8f, - 0x3f, 0xab, 0x8d, 0x7c, 0xf2, 0x59, 0x6d, 0xe4, 0x8b, 0xcf, 0x6a, 0xca, 0x0f, 0x8e, 0x6b, 0xca, - 0xaf, 0x8f, 0x6b, 0xca, 0x9f, 0x8f, 0x6b, 0xca, 0xc7, 0xc7, 0x35, 0xe5, 0x9f, 0xc7, 0x35, 0xe5, - 0x5f, 0xc7, 0xb5, 0x91, 0x2f, 0x8e, 0x6b, 0xca, 0x83, 0xcf, 0x6b, 0x23, 0x1f, 0x7f, 0x5e, 0x1b, - 0xf9, 0xe4, 0xf3, 0xda, 0xc8, 0xdb, 0xdf, 0x68, 0x79, 0x91, 0x48, 0xb6, 0x37, 0xe0, 0x1f, 0xc5, - 0x2f, 0xc7, 0xbf, 0x9b, 0xa3, 0xac, 0xad, 0x78, 0xfe, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd2, - 0xe2, 0x79, 0x71, 0x8c, 0x2c, 0x00, 0x00, + // 3274 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x1a, 0x4b, 0x6c, 0x1b, 0xc7, + 0x55, 0xcb, 0x8f, 0x44, 0x3e, 0xfd, 0xd7, 0xb2, 0x45, 0x53, 0x11, 0xa5, 0x30, 0x8e, 0x23, 0x3b, + 0x09, 0x55, 0x3b, 0x6d, 0xe3, 0x7c, 0x0c, 0x43, 0x92, 0x1d, 0x49, 0xa9, 0x94, 0xcf, 0xca, 0xb1, + 0xdb, 0x00, 0xc1, 0x66, 0xb8, 0x3b, 0xa2, 0x16, 0x26, 0x77, 0x99, 0x9d, 0x21, 0x6d, 0x05, 0xe8, + 0x07, 0x4d, 0x8b, 0xa2, 0x87, 0xa2, 0x06, 0x8a, 0x02, 0x41, 0x4e, 0x3d, 0x16, 0x41, 0x8b, 0xde, + 0x7a, 0x2c, 0xd0, 0x5b, 0x8f, 0x41, 0x7b, 0x09, 0x5a, 0xa0, 0x69, 0x94, 0x4b, 0x8f, 0x39, 0xf7, + 0x54, 0xcc, 0x6f, 0x7f, 0x5c, 0x52, 0x54, 0x6d, 0xa7, 0x40, 0x6e, 0xdc, 0x37, 0xef, 0xbd, 0x79, + 0xf3, 0x7e, 0xf3, 0xde, 0x1b, 0xc2, 0x8b, 0x14, 0xb7, 0xda, 0x9e, 0x8f, 0x9a, 0xab, 0x04, 0xfb, + 0x5d, 0xec, 0xaf, 0xa2, 0xb6, 0xb3, 0x8a, 0xec, 0x96, 0xe3, 0xb2, 0x6f, 0xc7, 0xc2, 0xab, 0xdd, + 0x4b, 0xab, 0x3e, 0x7e, 0xaf, 0x83, 0x09, 0x35, 0x7d, 0x4c, 0xda, 0x9e, 0x4b, 0x70, 0xad, 0xed, + 0x7b, 0xd4, 0xd3, 0x9f, 0x50, 0xb4, 0x35, 0x41, 0x5b, 0x43, 0x6d, 0xa7, 0x16, 0xa5, 0xad, 0x75, + 0x2f, 0x95, 0x97, 0x1a, 0x9e, 0xd7, 0x68, 0xe2, 0x55, 0x4e, 0x52, 0xef, 0xec, 0xaf, 0x52, 0xa7, + 0x85, 0x09, 0x45, 0xad, 0xb6, 0xe0, 0x52, 0xae, 0x24, 0x11, 0xec, 0x8e, 0x8f, 0xa8, 0xe3, 0xb9, + 0x72, 0xfd, 0x71, 0x1b, 0xb7, 0xb1, 0x6b, 0x63, 0xd7, 0x72, 0x30, 0x59, 0x6d, 0x78, 0x0d, 0x8f, + 0xc3, 0xf9, 0x2f, 0x89, 0x52, 0x0d, 0x0e, 0xc1, 0xa4, 0xc7, 0x6e, 0xa7, 0x45, 0x98, 0xd8, 0x96, + 0xd7, 0x6a, 0x05, 0x6c, 0xce, 0xa7, 0xe3, 0x50, 0x44, 0xee, 0x98, 0xef, 0x75, 0x70, 0x47, 0x1e, + 0xaa, 0x7c, 0x2e, 0x86, 0x27, 0x58, 0x30, 0xc4, 0x16, 0x26, 0x04, 0x35, 0x14, 0xd6, 0x93, 0x31, + 0xac, 0x2e, 0xf6, 0x89, 0x93, 0x86, 0x16, 0xdf, 0xf4, 0xae, 0xe7, 0xdf, 0xd9, 0x6f, 0x7a, 0x77, + 0x7b, 0xf1, 0x9e, 0x8a, 0xe1, 0xb9, 0xa8, 0x85, 0x49, 0x1b, 0x09, 0xdd, 0xc7, 0x11, 0x2f, 0xc4, + 0x10, 0x7d, 0xdc, 0x6e, 0x3a, 0x16, 0x57, 0x56, 0x2f, 0xea, 0x33, 0x69, 0x96, 0xb5, 0x9a, 0x1d, + 0x42, 0xb1, 0x3f, 0x88, 0x71, 0x04, 0x3b, 0x5d, 0x93, 0x17, 0x07, 0xa3, 0x8a, 0x1d, 0x7a, 0x0e, + 0x96, 0x86, 0xcb, 0x94, 0x3f, 0x48, 0xda, 0x03, 0x87, 0x50, 0xcf, 0x3f, 0xec, 0x95, 0xb6, 0x96, + 0x86, 0x3d, 0x40, 0x6d, 0xdf, 0x48, 0xc3, 0x1f, 0xa8, 0xbd, 0x17, 0xd2, 0x28, 0xda, 0xcc, 0xce, + 0x84, 0x62, 0x57, 0xec, 0x21, 0x8f, 0x6a, 0xb6, 0x30, 0x45, 0x36, 0xa2, 0x48, 0x92, 0x3e, 0x37, + 0x04, 0x29, 0xbe, 0x87, 0xad, 0x0e, 0xdb, 0x99, 0x48, 0xa2, 0x6b, 0x43, 0x10, 0x29, 0xff, 0x31, + 0x5b, 0x1d, 0x8a, 0xea, 0x4d, 0x6c, 0x12, 0x8a, 0xe8, 0x40, 0x95, 0x24, 0x18, 0x30, 0x7d, 0xcb, + 0x0d, 0xab, 0x1f, 0x68, 0x50, 0x36, 0x70, 0xbd, 0xe3, 0x34, 0xed, 0x5d, 0xc1, 0x6e, 0x8f, 0x71, + 0x33, 0x44, 0xa8, 0xeb, 0x8f, 0x41, 0x31, 0xd0, 0x67, 0x49, 0x5b, 0xd6, 0x56, 0x8a, 0x46, 0x08, + 0xd0, 0x37, 0xa1, 0x18, 0x9c, 0xa0, 0x94, 0x59, 0xd6, 0x56, 0xc6, 0x2f, 0x5f, 0x08, 0x04, 0xe0, + 0x69, 0x40, 0x7a, 0x4c, 0xf7, 0x52, 0xed, 0xb6, 0x94, 0xfa, 0x86, 0x22, 0x30, 0x42, 0xda, 0xea, + 0x22, 0x2c, 0xa4, 0x0a, 0x21, 0xf2, 0x4c, 0xf5, 0x27, 0x1a, 0x2c, 0x5c, 0xc7, 0xc4, 0xf2, 0x9d, + 0x3a, 0xfe, 0x3f, 0x4a, 0xf9, 0xc7, 0x0c, 0x3c, 0x96, 0x2e, 0x86, 0x90, 0x53, 0x3f, 0x0b, 0x05, + 0x72, 0x80, 0x7c, 0xdb, 0x74, 0x6c, 0x29, 0xc6, 0x18, 0xff, 0xde, 0xb6, 0xf5, 0xc7, 0x61, 0x42, + 0xba, 0xb1, 0x89, 0x6c, 0xdb, 0xe7, 0x72, 0x14, 0x8d, 0x71, 0x09, 0x5b, 0xb3, 0x6d, 0x5f, 0x3f, + 0x80, 0x53, 0x16, 0xb2, 0x0e, 0x70, 0xdc, 0xae, 0xa5, 0x2c, 0x97, 0xf8, 0x4a, 0x2d, 0x2d, 0xcb, + 0x46, 0x0c, 0x1b, 0x95, 0x3e, 0x26, 0xdc, 0x2c, 0x67, 0x1a, 0x05, 0xe9, 0x2e, 0x9c, 0x61, 0x8e, + 0x5a, 0x47, 0x24, 0xb9, 0x59, 0xee, 0x01, 0x37, 0x9b, 0x53, 0x7c, 0xa3, 0xd0, 0xea, 0x5f, 0x35, + 0x28, 0x2b, 0xc5, 0x6d, 0x89, 0x13, 0x6f, 0x79, 0x84, 0x2a, 0xf3, 0x31, 0xdd, 0x78, 0x84, 0x72, + 0xc5, 0x60, 0x42, 0xa4, 0xea, 0xc6, 0x19, 0x6c, 0x4d, 0x80, 0x62, 0x9a, 0x65, 0xaa, 0xcb, 0x87, + 0x9a, 0x8d, 0x19, 0x3f, 0x9b, 0x34, 0xfe, 0x77, 0x41, 0x0f, 0xe2, 0x25, 0xf4, 0x82, 0xdc, 0x49, + 0xbd, 0x60, 0xf6, 0x6e, 0x12, 0x54, 0xfd, 0x67, 0xc4, 0x29, 0x63, 0x87, 0x92, 0xce, 0xf0, 0x04, + 0x4c, 0x72, 0x11, 0x89, 0xe9, 0x76, 0x5a, 0x75, 0xec, 0xf3, 0x63, 0xe5, 0x8d, 0x09, 0x01, 0x7c, + 0x8d, 0xc3, 0xf4, 0x05, 0x28, 0xaa, 0x73, 0x91, 0x52, 0x66, 0x39, 0xbb, 0x92, 0x37, 0x0a, 0xf2, + 0x60, 0x44, 0x7f, 0x07, 0xa6, 0x83, 0x83, 0x98, 0xdc, 0x8a, 0xd2, 0x19, 0xbe, 0x99, 0x6a, 0x9f, + 0x00, 0x97, 0x1d, 0xe1, 0x35, 0xf5, 0xb1, 0xc1, 0xe8, 0xb6, 0xdd, 0x7d, 0xcf, 0x98, 0x72, 0x63, + 0x30, 0xbd, 0x04, 0x63, 0x4a, 0xe3, 0x79, 0xe1, 0xac, 0xf2, 0xf3, 0xd5, 0x5c, 0x21, 0x37, 0x93, + 0xaf, 0xd6, 0x60, 0x76, 0xa3, 0xe9, 0x11, 0xbc, 0xc7, 0xe4, 0x51, 0xb6, 0x4a, 0xba, 0x78, 0x68, + 0x88, 0xea, 0x1c, 0xe8, 0x51, 0x7c, 0x19, 0xbb, 0xcf, 0xc0, 0xf4, 0x26, 0xa6, 0xc3, 0xf2, 0x78, + 0x17, 0x66, 0x42, 0x6c, 0xa9, 0xc8, 0x1d, 0x00, 0x89, 0xee, 0xee, 0x7b, 0x9c, 0x60, 0xfc, 0xf2, + 0xb3, 0xc3, 0x78, 0x28, 0x67, 0xc3, 0x8f, 0x2e, 0x94, 0xcc, 0x7e, 0x56, 0x7f, 0x91, 0x81, 0xf9, + 0x1d, 0x87, 0x50, 0x69, 0xb2, 0x9b, 0x2c, 0x17, 0x1e, 0x2f, 0x98, 0xfe, 0x0a, 0x14, 0x2c, 0x44, + 0x71, 0xc3, 0xf3, 0x0f, 0xb9, 0x03, 0x4e, 0x5d, 0xbe, 0x98, 0x2a, 0x02, 0xbf, 0xd4, 0xd8, 0xe6, + 0x8c, 0xf1, 0x86, 0xa4, 0x30, 0x02, 0x5a, 0x7d, 0x0b, 0x80, 0xd7, 0x1a, 0x3e, 0x72, 0x1b, 0xca, + 0x9c, 0x17, 0x52, 0x39, 0xc9, 0xd4, 0xa0, 0x78, 0x19, 0x8c, 0xc0, 0x28, 0x52, 0xf5, 0x53, 0x5f, + 0x04, 0xa8, 0x23, 0x6a, 0x1d, 0x98, 0xc4, 0x79, 0x5f, 0x04, 0x6e, 0xde, 0x28, 0x72, 0xc8, 0x9e, + 0xf3, 0x3e, 0xd6, 0xcf, 0xc3, 0xb4, 0x8b, 0xef, 0x51, 0xb3, 0x8d, 0x1a, 0xd8, 0xa4, 0xde, 0x1d, + 0xec, 0x72, 0x2b, 0x4f, 0x18, 0x93, 0x0c, 0xfc, 0x06, 0x6a, 0xe0, 0x9b, 0x0c, 0xc8, 0x2e, 0x80, + 0x52, 0xaf, 0x3e, 0xa4, 0xea, 0xaf, 0x41, 0x9e, 0x5f, 0x16, 0x25, 0x6d, 0x39, 0xdb, 0x57, 0xd0, + 0x44, 0xa9, 0x27, 0xa4, 0x15, 0x74, 0x69, 0x52, 0x64, 0xd2, 0xa4, 0xf8, 0x30, 0x03, 0x39, 0x46, + 0xc7, 0x72, 0x41, 0xe8, 0xf3, 0x41, 0x1a, 0x1d, 0x0f, 0x60, 0xdb, 0xb6, 0xbe, 0x04, 0xe3, 0x41, + 0x48, 0xcb, 0x74, 0x50, 0x34, 0x40, 0x81, 0xb6, 0x6d, 0xfd, 0x34, 0x8c, 0xfa, 0x1d, 0x97, 0xad, + 0x89, 0x74, 0x90, 0xf7, 0x3b, 0xee, 0xb6, 0xad, 0xcf, 0xc3, 0x18, 0x57, 0xbd, 0x63, 0x73, 0x6d, + 0x65, 0x8d, 0x51, 0xf6, 0xb9, 0x6d, 0xeb, 0x1b, 0xc0, 0xd5, 0x6a, 0xd2, 0xc3, 0x36, 0xe6, 0x4a, + 0x9a, 0xba, 0x7c, 0xfe, 0x78, 0xe3, 0xde, 0x3c, 0x6c, 0x63, 0xa3, 0x40, 0xe5, 0x2f, 0xfd, 0x2a, + 0x14, 0xf7, 0x1d, 0x1f, 0x9b, 0xac, 0xae, 0x2d, 0x8d, 0x72, 0xbb, 0x96, 0x6b, 0xa2, 0xa6, 0xad, + 0xa9, 0x9a, 0xb6, 0x76, 0x53, 0x15, 0xbd, 0xeb, 0xb9, 0xfb, 0x9f, 0x2d, 0x69, 0x46, 0x81, 0x91, + 0x30, 0x20, 0x0b, 0x46, 0x59, 0x3e, 0x96, 0xc6, 0xb8, 0x70, 0xea, 0xb3, 0xfa, 0x77, 0x0d, 0x66, + 0x0d, 0xdc, 0xf2, 0xba, 0x98, 0x2b, 0xf6, 0xab, 0x73, 0xd5, 0x88, 0xbe, 0xb2, 0x31, 0x7d, 0x6d, + 0xc3, 0x74, 0xd7, 0x21, 0x4e, 0xdd, 0x69, 0x3a, 0xf4, 0x50, 0x1c, 0x38, 0x37, 0xe4, 0x81, 0xa7, + 0x42, 0x42, 0xb6, 0xc4, 0x72, 0x46, 0xf4, 0x6c, 0x32, 0x67, 0xfc, 0x2a, 0x0b, 0x4f, 0x6d, 0x62, + 0xda, 0x9b, 0x86, 0xd1, 0x5d, 0xe9, 0xa6, 0xb7, 0x2e, 0x47, 0x2e, 0x8f, 0x98, 0xc3, 0x14, 0x7b, + 0x1d, 0xe6, 0x61, 0x15, 0x00, 0xfa, 0x39, 0x98, 0x22, 0x14, 0xf9, 0xd4, 0xc4, 0x5d, 0xec, 0xd2, + 0x50, 0x31, 0x13, 0x1c, 0x7a, 0x83, 0x01, 0xb7, 0x6d, 0xbd, 0x06, 0xa7, 0xa2, 0x58, 0xca, 0xac, + 0xc2, 0xe7, 0x66, 0x43, 0xd4, 0x5b, 0x62, 0x41, 0x5f, 0x86, 0x09, 0xec, 0xda, 0x21, 0xcf, 0x3c, + 0x47, 0x04, 0xec, 0xda, 0x8a, 0xe3, 0x45, 0x98, 0x0d, 0x31, 0x14, 0xbf, 0x51, 0x8e, 0x36, 0xad, + 0xd0, 0x14, 0xb7, 0x8b, 0x30, 0xdb, 0x42, 0xf7, 0x9c, 0x56, 0xa7, 0x25, 0x82, 0x8e, 0x67, 0x87, + 0x31, 0xee, 0x21, 0xd3, 0x72, 0x81, 0x85, 0x5d, 0xbf, 0x1c, 0x51, 0x48, 0x89, 0xce, 0x57, 0x73, + 0x05, 0x6d, 0x26, 0x53, 0xfd, 0x4d, 0x06, 0x56, 0x8e, 0xb7, 0x8a, 0xcc, 0x1c, 0x29, 0xac, 0xb5, + 0x14, 0xd6, 0xcc, 0x97, 0x54, 0x5d, 0xc4, 0x73, 0x17, 0x16, 0xd7, 0xe0, 0xf8, 0xe5, 0xe5, 0x7e, + 0x16, 0xba, 0x8e, 0x28, 0x5a, 0x6f, 0x7a, 0x75, 0x63, 0x4a, 0x12, 0xae, 0x0b, 0x3a, 0xfd, 0x36, + 0x4c, 0x4b, 0xdd, 0x98, 0x72, 0x45, 0xe6, 0xd7, 0xda, 0x71, 0xf9, 0x55, 0xea, 0x4e, 0x9e, 0xc2, + 0x98, 0xea, 0xc6, 0xbe, 0xf5, 0x15, 0x98, 0x51, 0x32, 0xba, 0x9e, 0x8d, 0xf9, 0x5d, 0x9d, 0x5b, + 0xce, 0xae, 0x64, 0x03, 0x11, 0x5e, 0xf3, 0x6c, 0xbc, 0x6d, 0x93, 0xea, 0x7d, 0x0d, 0x16, 0x37, + 0x31, 0x35, 0xc2, 0x96, 0x62, 0x57, 0xb4, 0x13, 0xc1, 0x15, 0xb3, 0x03, 0xa3, 0x5c, 0x1b, 0x2a, + 0xa5, 0xa6, 0x5f, 0xe5, 0x91, 0x9e, 0x84, 0xc9, 0x17, 0xe1, 0xc7, 0xb5, 0x66, 0x48, 0x1e, 0xcc, + 0xf9, 0x55, 0xf7, 0xc1, 0x1c, 0x5e, 0x55, 0x95, 0x12, 0xc6, 0x6a, 0x80, 0xea, 0x47, 0x19, 0xa8, + 0xf4, 0x13, 0x49, 0xda, 0xea, 0xfb, 0x30, 0x25, 0x72, 0x89, 0xec, 0x7d, 0x94, 0x6c, 0xb7, 0x86, + 0x4a, 0xf7, 0x83, 0x99, 0x8b, 0x4b, 0x58, 0x41, 0x6f, 0xb8, 0xd4, 0x3f, 0x34, 0x44, 0x5d, 0xa4, + 0x60, 0xe5, 0x43, 0xd0, 0x7b, 0x91, 0xf4, 0x19, 0xc8, 0xde, 0xc1, 0x87, 0x32, 0xb7, 0xb1, 0x9f, + 0xfa, 0x2e, 0xe4, 0xbb, 0xa8, 0xd9, 0xc1, 0x32, 0x84, 0x9f, 0x3f, 0xa1, 0xe6, 0x02, 0xc9, 0x04, + 0x97, 0x17, 0x33, 0x57, 0xb4, 0xea, 0x9f, 0x35, 0x38, 0xbf, 0x89, 0x69, 0x50, 0x2c, 0x0d, 0x30, + 0xdc, 0x0b, 0x70, 0xb6, 0x89, 0xf8, 0xf0, 0x83, 0xfa, 0x0e, 0xee, 0xe2, 0x40, 0x5b, 0x2a, 0x03, + 0x67, 0x8d, 0x33, 0x0c, 0xc1, 0x50, 0xeb, 0x92, 0xc1, 0xb6, 0x1d, 0x90, 0xb6, 0x7d, 0xcf, 0xc2, + 0x84, 0xc4, 0x49, 0x33, 0x21, 0xe9, 0x1b, 0x6a, 0x3d, 0x24, 0x4d, 0x1a, 0x38, 0xdb, 0x6b, 0xe0, + 0x1f, 0xf0, 0x5c, 0x39, 0xf8, 0x08, 0xd2, 0xd0, 0x7b, 0x50, 0x88, 0x98, 0xf8, 0x81, 0x94, 0x18, + 0x30, 0xaa, 0xbe, 0x0f, 0xcb, 0x9b, 0x98, 0x5e, 0xdf, 0x79, 0x73, 0x80, 0xf2, 0x6e, 0xc9, 0xaa, + 0x87, 0x55, 0x70, 0xca, 0xbb, 0x4e, 0xba, 0x35, 0xbb, 0x21, 0x44, 0x31, 0x47, 0xe5, 0x2f, 0x52, + 0xfd, 0xa9, 0x06, 0x8f, 0x0f, 0xd8, 0x5c, 0x1e, 0xfb, 0x5d, 0x98, 0x8d, 0xb0, 0x35, 0xa3, 0x15, + 0xcd, 0x73, 0xff, 0x83, 0x10, 0xc6, 0x8c, 0x1f, 0x07, 0x90, 0xea, 0xdf, 0x34, 0x98, 0x33, 0x30, + 0x6a, 0xb7, 0x9b, 0x87, 0x3c, 0x19, 0x93, 0x7e, 0xb7, 0x53, 0xae, 0xf7, 0x76, 0x4a, 0xef, 0x50, + 0x32, 0x0f, 0xde, 0xa1, 0xe8, 0x57, 0x60, 0x94, 0x5f, 0x19, 0x44, 0xe6, 0xc1, 0xe3, 0x53, 0xaa, + 0xc4, 0x97, 0x09, 0x7f, 0x1e, 0x4e, 0x27, 0x0e, 0x25, 0xef, 0xe7, 0xff, 0x64, 0xa0, 0xbc, 0x66, + 0xdb, 0x7b, 0x18, 0xf9, 0xd6, 0xc1, 0x1a, 0xa5, 0xbe, 0x53, 0xef, 0xd0, 0xd0, 0xda, 0x3f, 0xd6, + 0x60, 0x96, 0xf0, 0x35, 0x13, 0x05, 0x8b, 0x52, 0xe1, 0x6f, 0x0d, 0x95, 0x53, 0xfa, 0x33, 0xaf, + 0x25, 0xe1, 0x22, 0xa5, 0xcc, 0x90, 0x04, 0x98, 0x95, 0xc7, 0x8e, 0x6b, 0xe3, 0x7b, 0xd1, 0xc4, + 0x58, 0xe4, 0x10, 0x16, 0x2a, 0xfa, 0x33, 0xa0, 0x93, 0x3b, 0x4e, 0xdb, 0x24, 0xd6, 0x01, 0x6e, + 0x21, 0xb3, 0xd3, 0xb6, 0x55, 0xaf, 0x5d, 0x30, 0x66, 0xd8, 0xca, 0x1e, 0x5f, 0x78, 0x8b, 0xc3, + 0xe3, 0x3d, 0x66, 0x2e, 0xd1, 0x63, 0x96, 0x9b, 0x70, 0x3a, 0x55, 0xaa, 0x68, 0x0e, 0x2b, 0x8a, + 0x1c, 0x76, 0x35, 0x9a, 0xc3, 0xa6, 0x2e, 0x3f, 0x15, 0xb7, 0x48, 0x50, 0x91, 0x6d, 0x33, 0x39, + 0xb1, 0x7d, 0x8b, 0xa1, 0xf2, 0x3a, 0x33, 0x92, 0xb3, 0x16, 0x61, 0x21, 0x55, 0x3d, 0xd2, 0x36, + 0x3f, 0xd7, 0x60, 0x51, 0x94, 0x54, 0xfd, 0xcc, 0xf3, 0x74, 0x3f, 0xeb, 0x14, 0x4f, 0xae, 0xc6, + 0x81, 0xcd, 0x77, 0x75, 0x19, 0x2a, 0xfd, 0x44, 0x91, 0xd2, 0x7e, 0x0f, 0xca, 0xac, 0xdf, 0xeb, + 0x23, 0x69, 0x7c, 0x73, 0x6d, 0xe0, 0xe6, 0x99, 0xe4, 0xe6, 0x1f, 0x8d, 0xc2, 0x42, 0x2a, 0x6f, + 0x99, 0x15, 0x3e, 0xd0, 0x60, 0xd6, 0xea, 0x10, 0xea, 0xb5, 0x7a, 0xbd, 0x74, 0xe8, 0x9b, 0xaf, + 0x1f, 0xf7, 0xda, 0x06, 0xe7, 0xdc, 0xe3, 0xa6, 0x56, 0x02, 0xcc, 0xa5, 0x20, 0x87, 0x84, 0xe2, + 0x98, 0x14, 0x99, 0x87, 0x24, 0xc5, 0x1e, 0xe7, 0xdc, 0x1b, 0x2c, 0x09, 0xb0, 0xde, 0x80, 0xb1, + 0x16, 0x6a, 0xb7, 0x1d, 0xb7, 0x51, 0xca, 0xf2, 0xad, 0x77, 0x1f, 0x78, 0xeb, 0x5d, 0xc1, 0x4f, + 0xec, 0xa8, 0xb8, 0xeb, 0x2e, 0x2c, 0x20, 0xdb, 0x36, 0x7b, 0x13, 0x9e, 0x68, 0xee, 0x45, 0x1b, + 0xb1, 0x1a, 0x8f, 0x0a, 0x85, 0x9c, 0x9a, 0xf7, 0xf8, 0x8d, 0x50, 0x42, 0xb6, 0x9d, 0xba, 0xc2, + 0x42, 0x33, 0xd5, 0x12, 0x8f, 0x24, 0x34, 0x79, 0x22, 0x48, 0xd3, 0xf8, 0xa3, 0xd9, 0xed, 0x45, + 0x98, 0x88, 0x2a, 0x39, 0x65, 0x93, 0xb9, 0xe8, 0x26, 0xc5, 0x68, 0x12, 0x79, 0x09, 0xce, 0xa8, + 0xd9, 0xd5, 0x86, 0xa8, 0x25, 0x22, 0x37, 0x56, 0xac, 0xe2, 0xd0, 0x7a, 0x2b, 0x8e, 0x3f, 0x8d, + 0xc1, 0x7c, 0x0f, 0xb5, 0x8c, 0xaa, 0x1f, 0xc2, 0x2c, 0xe9, 0xb4, 0xdb, 0x9e, 0x4f, 0xb1, 0x6d, + 0x5a, 0x4d, 0x87, 0x5f, 0x3f, 0x22, 0xa8, 0x8c, 0xa1, 0x7c, 0xaa, 0x0f, 0xe3, 0xda, 0x9e, 0xe2, + 0xba, 0x21, 0x98, 0x2a, 0x57, 0x4e, 0x80, 0xf5, 0x27, 0x61, 0x4a, 0x70, 0x0f, 0x1a, 0x25, 0x71, + 0xf8, 0x49, 0x01, 0x55, 0x6d, 0xd2, 0x6d, 0x98, 0x6e, 0xe1, 0x56, 0x1d, 0xfb, 0xe4, 0xc0, 0x69, + 0x0b, 0xe7, 0x1b, 0xd4, 0x2c, 0xa8, 0xe7, 0x8c, 0xee, 0xa5, 0xda, 0x6e, 0x40, 0x26, 0xa6, 0x6a, + 0xad, 0xd8, 0x37, 0xcb, 0x59, 0x4a, 0x7f, 0xc1, 0x7d, 0x5f, 0x94, 0x90, 0x94, 0x82, 0x2e, 0xdf, + 0xa3, 0x5e, 0xd6, 0x3f, 0xaa, 0x76, 0x43, 0x94, 0xe5, 0x96, 0xd7, 0x71, 0x29, 0xef, 0xf7, 0xf2, + 0xc6, 0xac, 0x5c, 0xe2, 0x15, 0xf3, 0x06, 0x5b, 0x60, 0xf9, 0x3c, 0x32, 0xf8, 0x32, 0xd9, 0xb2, + 0xe8, 0xf8, 0x8a, 0xc6, 0x4c, 0x64, 0x61, 0x8f, 0xc1, 0xf5, 0x0b, 0x30, 0x13, 0xe9, 0xdd, 0x05, + 0x6e, 0x81, 0xe3, 0x46, 0x7a, 0x7a, 0x81, 0xba, 0x09, 0x13, 0xaa, 0x9f, 0xe2, 0xfa, 0x29, 0x72, + 0xfd, 0x9c, 0x8b, 0x7b, 0xaa, 0xc4, 0x88, 0x74, 0x51, 0x5c, 0x2b, 0xe3, 0xdd, 0xf0, 0x43, 0x7f, + 0x19, 0xca, 0xfb, 0xc8, 0x69, 0x7a, 0x11, 0xa3, 0x98, 0x8e, 0x6b, 0xf9, 0xb8, 0x85, 0x5d, 0x5a, + 0x02, 0x5e, 0x00, 0x97, 0x14, 0x46, 0xc0, 0x45, 0xae, 0xeb, 0x57, 0xa0, 0xe4, 0xb8, 0x0e, 0x75, + 0x50, 0xd3, 0x4c, 0x72, 0x29, 0x8d, 0x8b, 0xe2, 0x59, 0xae, 0xbf, 0x12, 0x67, 0xa1, 0x5f, 0x85, + 0x05, 0x87, 0x98, 0x8d, 0xa6, 0x57, 0x47, 0x4d, 0x33, 0x2c, 0xc3, 0xb0, 0x8b, 0xea, 0x4d, 0x6c, + 0x97, 0x26, 0xf8, 0x65, 0x5f, 0x72, 0xc8, 0x26, 0xc7, 0x08, 0x2a, 0xe8, 0x1b, 0x62, 0x5d, 0x7f, + 0x1b, 0x72, 0x14, 0x35, 0x48, 0x69, 0x92, 0x7b, 0xef, 0x2b, 0x0f, 0xe4, 0xbd, 0x37, 0x51, 0x43, + 0x7a, 0x2c, 0xe7, 0x59, 0xde, 0x80, 0xd3, 0xa9, 0x0e, 0x7d, 0x92, 0x20, 0x2e, 0x3f, 0x0f, 0xc5, + 0x80, 0xef, 0x89, 0xa2, 0xff, 0x6d, 0x38, 0xb5, 0xe3, 0x10, 0x2a, 0x85, 0x0c, 0xae, 0xdb, 0x05, + 0x28, 0x86, 0x23, 0x03, 0xd1, 0x78, 0x15, 0xda, 0x03, 0x66, 0x05, 0xa9, 0x93, 0xbc, 0x5f, 0x6a, + 0x30, 0x17, 0x67, 0x2e, 0x33, 0xc3, 0xeb, 0x50, 0x90, 0x5e, 0x3e, 0xb8, 0xf8, 0x4e, 0x0c, 0x71, + 0x25, 0x9f, 0x5d, 0xf9, 0xb8, 0x66, 0x04, 0x4c, 0x86, 0x96, 0xe8, 0xd7, 0x1a, 0x2c, 0xad, 0xd9, + 0xf6, 0xeb, 0xbe, 0x28, 0xe6, 0x58, 0x45, 0x42, 0x93, 0x59, 0xef, 0x02, 0xcc, 0xec, 0xfb, 0x9e, + 0x4b, 0xb1, 0x6b, 0x27, 0x9e, 0x21, 0xa6, 0x15, 0x5c, 0x3d, 0x45, 0x6c, 0xc2, 0xb2, 0xf0, 0x20, + 0xd3, 0xe7, 0x9c, 0x4c, 0x15, 0xcf, 0x96, 0xe7, 0xba, 0xd8, 0x0a, 0xaa, 0xf7, 0x82, 0xb1, 0x28, + 0xf0, 0x62, 0x1b, 0x6e, 0x04, 0x48, 0xd5, 0x2a, 0x2c, 0xf7, 0x17, 0x4b, 0xd6, 0x47, 0xd7, 0xa0, + 0x2c, 0x2a, 0xa8, 0x54, 0xa9, 0x87, 0xc8, 0xd5, 0xfc, 0x65, 0x2d, 0x85, 0x41, 0x38, 0x69, 0x3b, + 0x1b, 0xb1, 0x96, 0xcc, 0x6d, 0x8a, 0xff, 0x1e, 0x9c, 0xe6, 0x8d, 0xeb, 0x01, 0x46, 0x3e, 0xad, + 0x63, 0x44, 0xcd, 0xbb, 0x0e, 0x3d, 0x70, 0x5c, 0xd9, 0x3c, 0x9e, 0xed, 0x19, 0xf7, 0x5d, 0x97, + 0x6f, 0xf6, 0xeb, 0xb9, 0x0f, 0x3f, 0x5b, 0xd2, 0x8c, 0x53, 0x8c, 0x7a, 0x4b, 0x11, 0xdf, 0xe6, + 0xb4, 0xfa, 0x12, 0x8c, 0xfb, 0x6d, 0x2b, 0xd0, 0xb2, 0x1c, 0xdf, 0xfa, 0x6d, 0x4b, 0x29, 0x78, + 0x1e, 0xc6, 0xf8, 0x73, 0x50, 0x30, 0xbf, 0x1d, 0x65, 0x9f, 0x7c, 0x4e, 0x9b, 0xf3, 0xbd, 0xa6, + 0x28, 0xc0, 0xa7, 0xa2, 0x55, 0x42, 0xda, 0x50, 0x33, 0x76, 0x22, 0xc3, 0x6b, 0x62, 0x83, 0x13, + 0xeb, 0xef, 0x40, 0x99, 0x60, 0xc2, 0x73, 0x10, 0x1f, 0xc5, 0x61, 0xdb, 0x44, 0xfb, 0x4c, 0x83, + 0x7c, 0x8e, 0x99, 0x1f, 0x72, 0x8e, 0x39, 0x2f, 0x79, 0xec, 0x09, 0x16, 0x6b, 0x8c, 0x03, 0x9f, + 0xe3, 0xc6, 0x62, 0x68, 0xf4, 0xf8, 0x18, 0x1a, 0x4b, 0xf3, 0xd8, 0x8f, 0x34, 0x28, 0xa7, 0x59, + 0x45, 0x46, 0xd2, 0x4d, 0x98, 0x42, 0x16, 0x75, 0xba, 0xd8, 0x94, 0x77, 0x8f, 0x8c, 0xa7, 0x67, + 0x8f, 0xbb, 0xba, 0xe2, 0x3a, 0x99, 0x14, 0x4c, 0x24, 0xf7, 0xa1, 0xc3, 0xe9, 0xf7, 0x19, 0x38, + 0x2d, 0x7a, 0xee, 0x64, 0x97, 0x7f, 0x03, 0x72, 0x7c, 0x84, 0xae, 0x71, 0xfb, 0x5c, 0x1a, 0x6c, + 0x9f, 0xeb, 0x18, 0xd9, 0x3b, 0x98, 0x52, 0xec, 0xbf, 0xd9, 0xc1, 0xb2, 0xb8, 0xe1, 0xe4, 0x83, + 0xde, 0xfa, 0xd8, 0xe5, 0xee, 0x75, 0x7c, 0x2b, 0x08, 0x3a, 0xe9, 0x21, 0x93, 0x02, 0x2a, 0xcf, + 0xa7, 0x3f, 0xcf, 0xae, 0x0c, 0x86, 0xc1, 0x74, 0xc4, 0x42, 0x3a, 0x32, 0x6f, 0x11, 0x63, 0xd8, + 0xd3, 0xc1, 0xfa, 0x0d, 0x37, 0x32, 0x6e, 0x49, 0x1d, 0x9e, 0xe6, 0x87, 0x1e, 0x9e, 0x8e, 0xa6, + 0xe9, 0xeb, 0xd3, 0x0c, 0x9c, 0x49, 0xea, 0x4b, 0x1a, 0xf2, 0x21, 0x29, 0x2c, 0x75, 0xbe, 0x91, + 0x79, 0x88, 0xf3, 0x8d, 0xb4, 0xb3, 0x66, 0xd3, 0xa6, 0xb9, 0x2d, 0x38, 0xd3, 0x23, 0x89, 0xaa, + 0xec, 0x1f, 0x68, 0xe6, 0x33, 0x97, 0x14, 0x89, 0xbf, 0xe5, 0xfd, 0x43, 0x83, 0xf9, 0x37, 0x3a, + 0x7e, 0x03, 0x7f, 0x1d, 0x9d, 0xb1, 0x5a, 0x86, 0x52, 0xef, 0xe1, 0x64, 0xde, 0xfe, 0x43, 0x06, + 0xe6, 0x77, 0xf1, 0xd7, 0xf4, 0xe4, 0x8f, 0x24, 0x0c, 0xd7, 0xa1, 0xd4, 0xab, 0xb0, 0x93, 0x3d, + 0x56, 0xb0, 0xda, 0x66, 0xc1, 0xc0, 0xfb, 0x3e, 0x26, 0x07, 0xaa, 0xdd, 0x8c, 0xbd, 0x1f, 0x27, + 0xa7, 0x7d, 0xd9, 0x47, 0xf7, 0x16, 0x25, 0x47, 0x74, 0x15, 0x78, 0x2c, 0x5d, 0xa0, 0xd0, 0x4f, + 0x16, 0x0d, 0x4c, 0xb0, 0x6b, 0x27, 0xa2, 0xaa, 0xaf, 0xcc, 0x0f, 0xf1, 0xc1, 0xf5, 0x49, 0x98, + 0x8a, 0x97, 0x48, 0xb2, 0x1d, 0x9a, 0xf4, 0xa3, 0xb5, 0x48, 0xca, 0xab, 0x5a, 0x3e, 0xe5, 0x55, + 0xed, 0x09, 0x98, 0x14, 0x58, 0xf1, 0xf7, 0x2f, 0x81, 0xd4, 0xef, 0x29, 0x6d, 0xac, 0xe7, 0x29, + 0x6d, 0x09, 0xc6, 0x19, 0x86, 0x62, 0x52, 0x08, 0x10, 0x24, 0x0b, 0x31, 0xb3, 0x4a, 0x57, 0x98, + 0xd4, 0xe9, 0xef, 0x32, 0x50, 0xda, 0xc4, 0x94, 0x01, 0x45, 0xcc, 0x44, 0xd5, 0x39, 0xf8, 0xaf, + 0x48, 0x8b, 0x72, 0x0e, 0xce, 0xff, 0x69, 0xa8, 0x46, 0x56, 0x54, 0x31, 0xd2, 0x77, 0x60, 0x3a, + 0x5c, 0x16, 0xcf, 0xd1, 0x59, 0x1e, 0xc4, 0xe7, 0xfa, 0x8c, 0x07, 0x42, 0x19, 0x58, 0xdc, 0x4e, + 0xd2, 0xe8, 0xa7, 0x5e, 0x81, 0xf1, 0x96, 0x23, 0x92, 0x70, 0x18, 0x71, 0xc5, 0x96, 0x23, 0xb2, + 0xaa, 0xcd, 0xd7, 0xd1, 0xbd, 0x60, 0x3d, 0x2f, 0xd7, 0xd1, 0x3d, 0xb9, 0x1e, 0xff, 0x83, 0xc1, + 0xe8, 0x10, 0x7f, 0x30, 0x48, 0x2d, 0x66, 0xee, 0x6b, 0x70, 0x36, 0x45, 0x5d, 0x32, 0xf4, 0xbe, + 0x13, 0xff, 0x87, 0xc1, 0xb7, 0x86, 0x69, 0x09, 0xd6, 0x9a, 0x4d, 0xcf, 0x42, 0x14, 0xdb, 0xc1, + 0xf5, 0x70, 0xc2, 0x7f, 0x1b, 0xfc, 0x4c, 0x83, 0xca, 0x75, 0xdc, 0xc4, 0x14, 0xf7, 0x86, 0xd8, + 0x57, 0xfb, 0x97, 0xb2, 0xab, 0xb0, 0xd4, 0x57, 0x10, 0xa9, 0xa1, 0x32, 0x14, 0xee, 0x22, 0xdf, + 0x75, 0xdc, 0x86, 0x9a, 0xd2, 0x06, 0xdf, 0xd5, 0x8f, 0x35, 0x58, 0xd9, 0xa3, 0x3e, 0x46, 0x2d, + 0x45, 0x3f, 0xe0, 0x11, 0xa6, 0x0d, 0x67, 0xc8, 0xa1, 0x6b, 0x99, 0xd1, 0x1b, 0x5a, 0xfc, 0xeb, + 0x4b, 0x1b, 0xf0, 0xaf, 0xaf, 0xc4, 0xe5, 0xbc, 0x77, 0xe8, 0x5a, 0x91, 0x3d, 0xf8, 0xff, 0xbb, + 0xb6, 0x46, 0x8c, 0x39, 0x92, 0x02, 0x5f, 0x9f, 0x00, 0x08, 0x87, 0x9a, 0xd5, 0x0f, 0x35, 0xb8, + 0x30, 0x84, 0xb0, 0xf2, 0xd8, 0xef, 0xf4, 0xbc, 0x55, 0x5d, 0x1b, 0x46, 0xbe, 0x01, 0xac, 0xb7, + 0x46, 0xc2, 0x57, 0xab, 0x84, 0x68, 0x6f, 0xc1, 0xa9, 0xf8, 0x1b, 0x9a, 0xd0, 0x58, 0xa5, 0xc7, + 0x09, 0xb6, 0x46, 0xa2, 0x6e, 0x30, 0x03, 0x19, 0x95, 0x0f, 0xb7, 0x46, 0x8c, 0x8c, 0x63, 0x27, + 0xd8, 0x7e, 0x9c, 0x85, 0xb9, 0x38, 0x5f, 0x79, 0xb8, 0x97, 0x21, 0x17, 0x19, 0x39, 0xad, 0xc4, + 0x5d, 0x27, 0xfd, 0x7f, 0x5c, 0xdc, 0xcf, 0x39, 0x95, 0xbe, 0x0e, 0xa3, 0x96, 0xe7, 0xee, 0x3b, + 0x0d, 0x39, 0x2f, 0xbd, 0x38, 0x0c, 0xfd, 0x06, 0xa7, 0x30, 0x24, 0xa5, 0xbe, 0x0f, 0x7a, 0xd4, + 0x0f, 0x24, 0xbf, 0x7c, 0xf2, 0x51, 0x30, 0x45, 0xc3, 0x69, 0x2f, 0x8d, 0x92, 0x79, 0xb4, 0x0c, + 0x15, 0x20, 0x76, 0x07, 0x08, 0xde, 0x89, 0xbc, 0x3d, 0x29, 0xa0, 0x2a, 0x71, 0xb3, 0xfe, 0x3b, + 0x39, 0xdc, 0x11, 0xc9, 0x7b, 0x3a, 0x31, 0x18, 0xd2, 0x6f, 0x45, 0x50, 0xd5, 0x3b, 0x7f, 0x81, + 0x27, 0x8f, 0xa7, 0x07, 0xca, 0xad, 0xa6, 0x43, 0xcc, 0x35, 0x3b, 0x24, 0xe4, 0x2b, 0x5f, 0xf9, + 0xd7, 0x9b, 0x9f, 0x7c, 0x5e, 0x19, 0xf9, 0xf4, 0xf3, 0xca, 0xc8, 0x97, 0x9f, 0x57, 0xb4, 0x1f, + 0x1d, 0x55, 0xb4, 0xdf, 0x1e, 0x55, 0xb4, 0xbf, 0x1c, 0x55, 0xb4, 0x4f, 0x8e, 0x2a, 0xda, 0xbf, + 0x8e, 0x2a, 0xda, 0xbf, 0x8f, 0x2a, 0x23, 0x5f, 0x1e, 0x55, 0xb4, 0xfb, 0x5f, 0x54, 0x46, 0x3e, + 0xf9, 0xa2, 0x32, 0xf2, 0xe9, 0x17, 0x95, 0x91, 0xb7, 0xbf, 0xdd, 0xf0, 0xc2, 0x5d, 0x1d, 0x6f, + 0xc0, 0xdf, 0xe7, 0x5f, 0x8a, 0x7e, 0xd7, 0x47, 0x79, 0x6b, 0xf9, 0xdc, 0x7f, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x59, 0x15, 0xdd, 0xbf, 0x79, 0x2f, 0x00, 0x00, } func (this *RebuildMutableStateRequest) Equal(that interface{}) bool { @@ -4630,6 +4822,14 @@ if this.IsGlobalNamespaceEnabled != that1.IsGlobalNamespaceEnabled { return false } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if this.Tags[i] != that1.Tags[i] { + return false + } + } return true } func (this *ListClustersRequest) Equal(that interface{}) bool { @@ -5419,6 +5619,128 @@ } return true } +func (this *GetNamespaceRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetNamespaceRequest) + if !ok { + that2, ok := that.(GetNamespaceRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Attributes == nil { + if this.Attributes != nil { + return false + } + } else if this.Attributes == nil { + return false + } else if !this.Attributes.Equal(that1.Attributes) { + return false + } + return true +} +func (this *GetNamespaceRequest_Namespace) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetNamespaceRequest_Namespace) + if !ok { + that2, ok := that.(GetNamespaceRequest_Namespace) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Namespace != that1.Namespace { + return false + } + return true +} +func (this *GetNamespaceRequest_Id) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetNamespaceRequest_Id) + if !ok { + that2, ok := that.(GetNamespaceRequest_Id) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + return true +} +func (this *GetNamespaceResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetNamespaceResponse) + if !ok { + that2, ok := that.(GetNamespaceResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Info.Equal(that1.Info) { + return false + } + if !this.Config.Equal(that1.Config) { + return false + } + if !this.ReplicationConfig.Equal(that1.ReplicationConfig) { + return false + } + if this.ConfigVersion != that1.ConfigVersion { + return false + } + if this.FailoverVersion != that1.FailoverVersion { + return false + } + if len(this.FailoverHistory) != len(that1.FailoverHistory) { + return false + } + for i := range this.FailoverHistory { + if !this.FailoverHistory[i].Equal(that1.FailoverHistory[i]) { + return false + } + } + return true +} func (this *RebuildMutableStateRequest) GoString() string { if this == nil { return "nil" @@ -5884,7 +6206,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 16) + s := make([]string, 0, 17) s = append(s, "&adminservice.DescribeClusterResponse{") keysForSupportedClients := make([]string, 0, len(this.SupportedClients)) for k, _ := range this.SupportedClients { @@ -5914,6 +6236,19 @@ s = append(s, "FailoverVersionIncrement: "+fmt.Sprintf("%#v", this.FailoverVersionIncrement)+",\n") s = append(s, "InitialFailoverVersion: "+fmt.Sprintf("%#v", this.InitialFailoverVersion)+",\n") s = append(s, "IsGlobalNamespaceEnabled: "+fmt.Sprintf("%#v", this.IsGlobalNamespaceEnabled)+",\n") + keysForTags := make([]string, 0, len(this.Tags)) + for k, _ := range this.Tags { + keysForTags = append(keysForTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTags) + mapStringForTags := "map[string]string{" + for _, k := range keysForTags { + mapStringForTags += fmt.Sprintf("%#v: %#v,", k, this.Tags[k]) + } + mapStringForTags += "}" + if this.Tags != nil { + s = append(s, "Tags: "+mapStringForTags+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -6228,6 +6563,57 @@ `Messages:` + fmt.Sprintf("%#v", this.Messages) + `}`}, ", ") return s } +func (this *GetNamespaceRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&adminservice.GetNamespaceRequest{") + if this.Attributes != nil { + s = append(s, "Attributes: "+fmt.Sprintf("%#v", this.Attributes)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetNamespaceRequest_Namespace) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&adminservice.GetNamespaceRequest_Namespace{` + + `Namespace:` + fmt.Sprintf("%#v", this.Namespace) + `}`}, ", ") + return s +} +func (this *GetNamespaceRequest_Id) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&adminservice.GetNamespaceRequest_Id{` + + `Id:` + fmt.Sprintf("%#v", this.Id) + `}`}, ", ") + return s +} +func (this *GetNamespaceResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&adminservice.GetNamespaceResponse{") + if this.Info != nil { + s = append(s, "Info: "+fmt.Sprintf("%#v", this.Info)+",\n") + } + if this.Config != nil { + s = append(s, "Config: "+fmt.Sprintf("%#v", this.Config)+",\n") + } + if this.ReplicationConfig != nil { + s = append(s, "ReplicationConfig: "+fmt.Sprintf("%#v", this.ReplicationConfig)+",\n") + } + s = append(s, "ConfigVersion: "+fmt.Sprintf("%#v", this.ConfigVersion)+",\n") + s = append(s, "FailoverVersion: "+fmt.Sprintf("%#v", this.FailoverVersion)+",\n") + if this.FailoverHistory != nil { + s = append(s, "FailoverHistory: "+fmt.Sprintf("%#v", this.FailoverHistory)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringRequestResponse(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -7676,6 +8062,25 @@ _ = i var l int _ = l + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRequestResponse(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } if m.IsGlobalNamespaceEnabled { i-- if m.IsGlobalNamespaceEnabled { @@ -8817,6 +9222,149 @@ } return len(dAtA) - i, nil } +func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Attributes != nil { + { + size := m.Attributes.Size() + i -= size + if _, err := m.Attributes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *GetNamespaceRequest_Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetNamespaceRequest_Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GetNamespaceRequest_Id) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetNamespaceRequest_Id) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FailoverHistory) > 0 { + for iNdEx := len(m.FailoverHistory) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FailoverHistory[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.FailoverVersion != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.FailoverVersion)) + i-- + dAtA[i] = 0x38 + } + if m.ConfigVersion != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.ConfigVersion)) + i-- + dAtA[i] = 0x30 + } + if m.ReplicationConfig != nil { + { + size, err := m.ReplicationConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + func encodeVarintRequestResponse(dAtA []byte, offset int, v uint64) int { offset -= sovRequestResponse(v) base := offset @@ -9492,6 +10040,14 @@ if m.IsGlobalNamespaceEnabled { n += 2 } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRequestResponse(uint64(len(k))) + 1 + len(v) + sovRequestResponse(uint64(len(v))) + n += mapEntrySize + 1 + sovRequestResponse(uint64(mapEntrySize)) + } + } return n } @@ -9965,6 +10521,70 @@ } return n } +func (m *GetNamespaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attributes != nil { + n += m.Attributes.Size() + } + return n +} + +func (m *GetNamespaceRequest_Namespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovRequestResponse(uint64(l)) + return n +} +func (m *GetNamespaceRequest_Id) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + n += 1 + l + sovRequestResponse(uint64(l)) + return n +} +func (m *GetNamespaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.ReplicationConfig != nil { + l = m.ReplicationConfig.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.ConfigVersion != 0 { + n += 1 + sovRequestResponse(uint64(m.ConfigVersion)) + } + if m.FailoverVersion != 0 { + n += 1 + sovRequestResponse(uint64(m.FailoverVersion)) + } + if len(m.FailoverHistory) > 0 { + for _, e := range m.FailoverHistory { + l = e.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + } + return n +} func sovRequestResponse(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -10424,6 +11044,16 @@ mapStringForSupportedClients += fmt.Sprintf("%v: %v,", k, this.SupportedClients[k]) } mapStringForSupportedClients += "}" + keysForTags := make([]string, 0, len(this.Tags)) + for k, _ := range this.Tags { + keysForTags = append(keysForTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTags) + mapStringForTags := "map[string]string{" + for _, k := range keysForTags { + mapStringForTags += fmt.Sprintf("%v: %v,", k, this.Tags[k]) + } + mapStringForTags += "}" s := strings.Join([]string{`&DescribeClusterResponse{`, `SupportedClients:` + mapStringForSupportedClients + `,`, `ServerVersion:` + fmt.Sprintf("%v", this.ServerVersion) + `,`, @@ -10437,6 +11067,7 @@ `FailoverVersionIncrement:` + fmt.Sprintf("%v", this.FailoverVersionIncrement) + `,`, `InitialFailoverVersion:` + fmt.Sprintf("%v", this.InitialFailoverVersion) + `,`, `IsGlobalNamespaceEnabled:` + fmt.Sprintf("%v", this.IsGlobalNamespaceEnabled) + `,`, + `Tags:` + mapStringForTags + `,`, `}`, }, "") return s @@ -10763,6 +11394,56 @@ }, "") return s } +func (this *GetNamespaceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNamespaceRequest{`, + `Attributes:` + fmt.Sprintf("%v", this.Attributes) + `,`, + `}`, + }, "") + return s +} +func (this *GetNamespaceRequest_Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNamespaceRequest_Namespace{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *GetNamespaceRequest_Id) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNamespaceRequest_Id{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *GetNamespaceResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForFailoverHistory := "[]*FailoverStatus{" + for _, f := range this.FailoverHistory { + repeatedStringForFailoverHistory += strings.Replace(fmt.Sprintf("%v", f), "FailoverStatus", "v111.FailoverStatus", 1) + "," + } + repeatedStringForFailoverHistory += "}" + s := strings.Join([]string{`&GetNamespaceResponse{`, + `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "NamespaceInfo", "v110.NamespaceInfo", 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NamespaceConfig", "v110.NamespaceConfig", 1) + `,`, + `ReplicationConfig:` + strings.Replace(fmt.Sprintf("%v", this.ReplicationConfig), "NamespaceReplicationConfig", "v111.NamespaceReplicationConfig", 1) + `,`, + `ConfigVersion:` + fmt.Sprintf("%v", this.ConfigVersion) + `,`, + `FailoverVersion:` + fmt.Sprintf("%v", this.FailoverVersion) + `,`, + `FailoverHistory:` + repeatedStringForFailoverHistory + `,`, + `}`, + }, "") + return s +} func valueToStringRequestResponse(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -15651,6 +16332,133 @@ } } m.IsGlobalNamespaceEnabled = bool(v != 0) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRequestResponse + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRequestResponse + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRequestResponse + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRequestResponse + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -18618,6 +19426,356 @@ iNdEx = postIndex default: iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNamespaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNamespaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = &GetNamespaceRequest_Namespace{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = &GetNamespaceRequest_Id{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNamespaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNamespaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &v110.NamespaceInfo{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &v110.NamespaceConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReplicationConfig == nil { + m.ReplicationConfig = &v111.NamespaceReplicationConfig{} + } + if err := m.ReplicationConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigVersion", wireType) + } + m.ConfigVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConfigVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailoverVersion", wireType) + } + m.FailoverVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailoverVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailoverHistory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailoverHistory = append(m.FailoverHistory, &v111.FailoverStatus{}) + if err := m.FailoverHistory[len(m.FailoverHistory)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) if err != nil { return err diff -Nru temporal-1.21.5-1/src/api/adminservice/v1/service.pb.go temporal-1.22.5/src/api/adminservice/v1/service.pb.go --- temporal-1.21.5-1/src/api/adminservice/v1/service.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/adminservice/v1/service.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -54,63 +54,65 @@ } var fileDescriptor_cf5ca5e0c737570d = []byte{ - // 895 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x98, 0x4d, 0x6f, 0xd3, 0x48, - 0x18, 0xc7, 0x33, 0x97, 0xd5, 0x6a, 0xd4, 0x7d, 0xf3, 0xae, 0xf6, 0xa5, 0x07, 0xef, 0x6e, 0xf7, - 0xb2, 0xa7, 0x84, 0x16, 0x28, 0xf4, 0xbd, 0x69, 0x12, 0x52, 0x89, 0xa4, 0xd0, 0x84, 0x17, 0x89, - 0x0b, 0x9a, 0xc4, 0x4f, 0x5b, 0xab, 0x4e, 0x6c, 0x66, 0xc6, 0x29, 0x3d, 0xc1, 0x05, 0x09, 0x09, - 0x09, 0x81, 0x84, 0x84, 0x84, 0xc4, 0x09, 0x09, 0x81, 0xc4, 0x67, 0x40, 0xea, 0xad, 0xc7, 0x1e, - 0x7b, 0xa4, 0xe9, 0x85, 0x63, 0x3f, 0x02, 0x72, 0x9d, 0x99, 0xda, 0xc9, 0x50, 0xc6, 0x4e, 0x6f, - 0x4d, 0x3d, 0xbf, 0xff, 0xfc, 0xfc, 0xc4, 0x33, 0xcf, 0x38, 0x78, 0x9c, 0x43, 0xcb, 0x73, 0x29, - 0x71, 0x72, 0x0c, 0x68, 0x07, 0x68, 0x8e, 0x78, 0x76, 0x8e, 0x58, 0x2d, 0xbb, 0x1d, 0x7c, 0xb6, - 0x9b, 0x90, 0xeb, 0x8c, 0xe7, 0x7a, 0x7f, 0x66, 0x3d, 0xea, 0x72, 0xd7, 0xf8, 0x4f, 0x20, 0xd9, - 0x10, 0xc9, 0x12, 0xcf, 0xce, 0x46, 0x91, 0x6c, 0x67, 0x7c, 0x74, 0x5a, 0x27, 0x97, 0xc2, 0x3d, - 0x1f, 0x18, 0xbf, 0x4b, 0x81, 0x79, 0x6e, 0x9b, 0xf5, 0x26, 0x98, 0xd8, 0x19, 0xc3, 0x23, 0xf9, - 0x60, 0x68, 0x3d, 0x1c, 0x6a, 0xbc, 0x42, 0xf8, 0xd7, 0x1a, 0x34, 0x7c, 0xdb, 0xb1, 0xaa, 0x3e, - 0x27, 0x0d, 0x07, 0xea, 0x9c, 0x70, 0x30, 0x16, 0xb2, 0x1a, 0x2a, 0x59, 0x05, 0x59, 0x0b, 0x27, - 0x1e, 0x5d, 0x4c, 0x1f, 0x10, 0x1a, 0x8f, 0x65, 0x8c, 0xd7, 0x08, 0xff, 0x56, 0x04, 0xd6, 0xa4, - 0x76, 0x03, 0x62, 0x76, 0x7a, 0xe1, 0x2a, 0x54, 0xe8, 0xe5, 0x87, 0x48, 0x90, 0x7e, 0x41, 0xf1, - 0xc4, 0x90, 0x65, 0x9b, 0x71, 0x97, 0x6e, 0x2f, 0xbb, 0x8c, 0x6b, 0x16, 0x4f, 0x41, 0x26, 0x2b, - 0x9e, 0x32, 0x40, 0xca, 0x6d, 0xe3, 0xef, 0xcb, 0xc0, 0xeb, 0x1b, 0x84, 0x5a, 0xc6, 0x05, 0xad, - 0x3c, 0x31, 0x5c, 0x58, 0x5c, 0x4c, 0x48, 0xc9, 0xa9, 0x1f, 0x60, 0x5c, 0x70, 0x5c, 0x06, 0xe1, - 0xe4, 0x93, 0x5a, 0x31, 0x27, 0x80, 0x98, 0xfe, 0x52, 0x62, 0x4e, 0x0a, 0x3c, 0x47, 0xf8, 0xe7, - 0x8a, 0xcd, 0x78, 0xaf, 0x32, 0x37, 0x08, 0xdb, 0x64, 0xc6, 0xac, 0x56, 0x5e, 0x3f, 0x26, 0x6c, - 0xe6, 0x52, 0xd2, 0xd1, 0xa2, 0xd4, 0xa0, 0xe5, 0x76, 0x20, 0xb8, 0xa0, 0x59, 0x94, 0x13, 0x20, - 0x59, 0x51, 0xa2, 0x9c, 0x14, 0xd8, 0x41, 0xf8, 0x9f, 0x32, 0xf0, 0xdb, 0x2e, 0xdd, 0x5c, 0x73, - 0xdc, 0xad, 0xd2, 0x7d, 0x68, 0xfa, 0xdc, 0x76, 0xdb, 0x35, 0xb2, 0xd5, 0x53, 0xbe, 0x35, 0x61, - 0x54, 0x74, 0xbf, 0xf3, 0x53, 0x63, 0x84, 0x6d, 0xf5, 0x8c, 0xd2, 0xe4, 0x3d, 0xbc, 0x41, 0xf8, - 0xf7, 0x32, 0xf0, 0x1a, 0x78, 0x8e, 0xdd, 0x24, 0xc1, 0xc0, 0x2a, 0x30, 0x46, 0xd6, 0x81, 0x19, - 0x4b, 0xba, 0x73, 0x29, 0x60, 0xe1, 0x5b, 0x18, 0x2a, 0x43, 0x5a, 0x7e, 0x44, 0xf8, 0xef, 0x32, - 0xf0, 0x15, 0xd2, 0x02, 0xe6, 0x91, 0x26, 0xa8, 0x74, 0xaf, 0xea, 0x4e, 0x75, 0x5a, 0x8a, 0xf0, - 0xae, 0x9c, 0x4d, 0x98, 0xbc, 0x81, 0x0f, 0x08, 0xff, 0x55, 0x06, 0x5e, 0xac, 0xac, 0xaa, 0xd4, - 0x4b, 0xba, 0xb3, 0xa9, 0x79, 0x21, 0x7d, 0x65, 0xd8, 0x18, 0xa9, 0xfb, 0x18, 0xe1, 0x1f, 0x6a, - 0x40, 0x3c, 0xcf, 0xd9, 0x2e, 0x75, 0xa0, 0xcd, 0x99, 0x31, 0xa5, 0xb9, 0x4c, 0x22, 0x8c, 0xd0, - 0x9a, 0x4e, 0x83, 0xc6, 0x5a, 0x42, 0xde, 0xb2, 0xea, 0x40, 0x68, 0x73, 0x23, 0xcf, 0x39, 0xb5, - 0x1b, 0x3e, 0x07, 0xa6, 0xd9, 0x12, 0x14, 0x64, 0xb2, 0x96, 0xa0, 0x0c, 0x88, 0xad, 0x9e, 0x70, - 0x6b, 0x18, 0xf0, 0x5b, 0x4a, 0xb0, 0xaf, 0x7c, 0x4d, 0xb1, 0x30, 0x54, 0x46, 0xac, 0x84, 0x41, - 0x53, 0x49, 0x57, 0x42, 0x05, 0x99, 0xac, 0x84, 0xca, 0x00, 0x29, 0xf7, 0x14, 0xe1, 0x9f, 0x44, - 0xdf, 0x2d, 0x38, 0x3e, 0xe3, 0x40, 0x8d, 0x99, 0x44, 0xdd, 0xba, 0x47, 0x09, 0xa9, 0xd9, 0x74, - 0xb0, 0x14, 0x7a, 0x84, 0xf0, 0x48, 0xd0, 0x75, 0x7a, 0x57, 0x98, 0x71, 0x59, 0xbb, 0x51, 0x09, - 0x44, 0xa8, 0x4c, 0xa5, 0x20, 0xa5, 0xc7, 0x4b, 0x84, 0x8d, 0xc8, 0xa5, 0x2a, 0xb4, 0x1a, 0x81, - 0xcd, 0x7c, 0xd2, 0xcc, 0x1e, 0x28, 0x9c, 0x16, 0x52, 0xf3, 0xd2, 0xec, 0x3d, 0xc2, 0x7f, 0xe6, - 0x2d, 0xeb, 0x1a, 0xbd, 0xe9, 0x59, 0xc7, 0xe7, 0xb7, 0x96, 0xcb, 0xe5, 0x77, 0x57, 0xd4, 0x5d, - 0x56, 0x4a, 0x5c, 0x58, 0x96, 0x86, 0x4c, 0x89, 0x3d, 0xfb, 0xe1, 0x02, 0x89, 0x6b, 0x2e, 0x24, - 0x58, 0x5a, 0x4a, 0xc3, 0xc5, 0xf4, 0x01, 0x52, 0xee, 0x09, 0xc2, 0x3f, 0x86, 0xdb, 0xb1, 0x6c, - 0x05, 0xd3, 0x09, 0xf6, 0xf0, 0xfe, 0xfd, 0x7f, 0x26, 0x15, 0x1b, 0x3b, 0xe3, 0x5d, 0xf7, 0xe9, - 0x3a, 0x44, 0x7d, 0xf4, 0x56, 0x53, 0x3f, 0x96, 0xec, 0x8c, 0x37, 0x48, 0xc7, 0x9c, 0xaa, 0x90, - 0xca, 0xa9, 0x1f, 0x4b, 0xe6, 0x34, 0x48, 0xc7, 0x5e, 0xa2, 0x6a, 0xb0, 0x46, 0x81, 0x6d, 0x88, - 0x53, 0x56, 0x78, 0x1e, 0xd6, 0x7d, 0x24, 0x06, 0xd1, 0x64, 0x2f, 0x51, 0xea, 0x84, 0xbe, 0xa6, - 0xc4, 0xa0, 0x6d, 0x45, 0x9a, 0x7c, 0x68, 0xa8, 0xdb, 0x94, 0x54, 0x70, 0xd2, 0xa6, 0xa4, 0xce, - 0x90, 0x96, 0x2f, 0x10, 0xfe, 0xa5, 0x0c, 0x3c, 0xf8, 0xf7, 0xaa, 0x0f, 0x3e, 0x84, 0x82, 0x73, - 0xba, 0x8f, 0x70, 0x9c, 0x13, 0x6e, 0xf3, 0x69, 0x71, 0xa9, 0xf5, 0x16, 0xe1, 0x3f, 0x8a, 0xe0, - 0x00, 0x87, 0x81, 0x13, 0xb4, 0x51, 0xd0, 0xec, 0x2c, 0x4a, 0x5a, 0x28, 0x16, 0x87, 0x0b, 0x91, - 0xa2, 0xbb, 0x08, 0xff, 0x5b, 0xe7, 0x14, 0x48, 0x4b, 0x8c, 0x52, 0x9d, 0x2c, 0xf5, 0xde, 0x17, - 0xbe, 0x99, 0x23, 0xe4, 0x57, 0xce, 0x2a, 0x4e, 0xdc, 0xc6, 0xff, 0xe8, 0x1c, 0x5a, 0x72, 0xf6, - 0x0e, 0xcc, 0xcc, 0xfe, 0x81, 0x99, 0x39, 0x3a, 0x30, 0xd1, 0xc3, 0xae, 0x89, 0xde, 0x75, 0x4d, - 0xb4, 0xdb, 0x35, 0xd1, 0x5e, 0xd7, 0x44, 0x9f, 0xba, 0x26, 0xfa, 0xdc, 0x35, 0x33, 0x47, 0x5d, - 0x13, 0x3d, 0x3b, 0x34, 0x33, 0x7b, 0x87, 0x66, 0x66, 0xff, 0xd0, 0xcc, 0xdc, 0x99, 0x5c, 0x77, - 0x4f, 0x6c, 0x6c, 0xf7, 0x94, 0xdf, 0x6e, 0x66, 0xa2, 0x9f, 0x1b, 0xdf, 0x1d, 0xff, 0x70, 0x73, - 0xfe, 0x4b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf7, 0xa0, 0x23, 0x4e, 0x12, 0x00, 0x00, + // 914 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x98, 0xcb, 0x6b, 0x1b, 0x47, + 0x1c, 0xc7, 0x35, 0x97, 0x52, 0x06, 0xf7, 0xb5, 0x2d, 0x7d, 0xf8, 0xb0, 0x7d, 0xf8, 0xd2, 0x93, + 0x54, 0xbb, 0xad, 0x5b, 0xbf, 0x2d, 0x4b, 0xaa, 0x0c, 0x95, 0xdc, 0x5a, 0xea, 0x03, 0x7a, 0x29, + 0x23, 0xed, 0xcf, 0xf6, 0xe2, 0x95, 0x76, 0x33, 0x33, 0x2b, 0xc7, 0xa7, 0xe4, 0x12, 0x08, 0x04, + 0x42, 0x02, 0x81, 0x40, 0x20, 0xa7, 0x40, 0x48, 0x20, 0x7f, 0x43, 0x20, 0x37, 0xe7, 0xe6, 0xa3, + 0x8f, 0xb1, 0x7c, 0xc9, 0xd1, 0x7f, 0x42, 0x58, 0xaf, 0x66, 0xbc, 0x2b, 0x4d, 0x9c, 0x99, 0x95, + 0x6f, 0x96, 0x77, 0x3e, 0xdf, 0xf9, 0xec, 0x4f, 0x3b, 0xf3, 0x9b, 0x15, 0x9e, 0xe6, 0xd0, 0x09, + 0x7c, 0x4a, 0xbc, 0x02, 0x03, 0xda, 0x03, 0x5a, 0x20, 0x81, 0x5b, 0x20, 0x4e, 0xc7, 0xed, 0x46, + 0x9f, 0xdd, 0x36, 0x14, 0x7a, 0xd3, 0x85, 0xc1, 0x9f, 0xf9, 0x80, 0xfa, 0xdc, 0xb7, 0xa6, 0x04, + 0x92, 0x8f, 0x91, 0x3c, 0x09, 0xdc, 0x7c, 0x12, 0xc9, 0xf7, 0xa6, 0x27, 0xe7, 0x75, 0x72, 0x29, + 0x5c, 0x09, 0x81, 0xf1, 0xff, 0x29, 0xb0, 0xc0, 0xef, 0xb2, 0xc1, 0x04, 0x33, 0x2f, 0xa7, 0xf0, + 0x44, 0x31, 0x1a, 0xda, 0x8c, 0x87, 0x5a, 0x0f, 0x10, 0xfe, 0xb4, 0x01, 0xad, 0xd0, 0xf5, 0x9c, + 0x7a, 0xc8, 0x49, 0xcb, 0x83, 0x26, 0x27, 0x1c, 0xac, 0x95, 0xbc, 0x86, 0x4a, 0x5e, 0x41, 0x36, + 0xe2, 0x89, 0x27, 0x57, 0xb3, 0x07, 0xc4, 0xc6, 0xdf, 0xe5, 0xac, 0x87, 0x08, 0x7f, 0x56, 0x06, + 0xd6, 0xa6, 0x6e, 0x0b, 0x52, 0x76, 0x7a, 0xe1, 0x2a, 0x54, 0xe8, 0x15, 0xc7, 0x48, 0x90, 0x7e, + 0x51, 0xf1, 0xc4, 0x90, 0x75, 0x97, 0x71, 0x9f, 0xee, 0xaf, 0xfb, 0x8c, 0x6b, 0x16, 0x4f, 0x41, + 0x9a, 0x15, 0x4f, 0x19, 0x20, 0xe5, 0xf6, 0xf1, 0xfb, 0x55, 0xe0, 0xcd, 0x1d, 0x42, 0x1d, 0xeb, + 0x27, 0xad, 0x3c, 0x31, 0x5c, 0x58, 0xfc, 0x6c, 0x48, 0xc9, 0xa9, 0xaf, 0x61, 0x5c, 0xf2, 0x7c, + 0x06, 0xf1, 0xe4, 0xb3, 0x5a, 0x31, 0xe7, 0x80, 0x98, 0xfe, 0x17, 0x63, 0x4e, 0x0a, 0xdc, 0x45, + 0xf8, 0xe3, 0x9a, 0xcb, 0xf8, 0xa0, 0x32, 0x7f, 0x11, 0xb6, 0xcb, 0xac, 0x45, 0xad, 0xbc, 0x61, + 0x4c, 0xd8, 0x2c, 0x65, 0xa4, 0x93, 0x45, 0x69, 0x40, 0xc7, 0xef, 0x41, 0x74, 0x41, 0xb3, 0x28, + 0xe7, 0x80, 0x59, 0x51, 0x92, 0x9c, 0x14, 0x78, 0x81, 0xf0, 0x37, 0x55, 0xe0, 0xff, 0xfa, 0x74, + 0x77, 0xcb, 0xf3, 0xf7, 0x2a, 0x57, 0xa1, 0x1d, 0x72, 0xd7, 0xef, 0x36, 0xc8, 0xde, 0x40, 0xf9, + 0x9f, 0x19, 0xab, 0xa6, 0xfb, 0x9d, 0x5f, 0x18, 0x23, 0x6c, 0xeb, 0x97, 0x94, 0x26, 0xef, 0xe1, + 0x11, 0xc2, 0x9f, 0x57, 0x81, 0x37, 0x20, 0xf0, 0xdc, 0x36, 0x89, 0x06, 0xd6, 0x81, 0x31, 0xb2, + 0x0d, 0xcc, 0x5a, 0xd3, 0x9d, 0x4b, 0x01, 0x0b, 0xdf, 0xd2, 0x58, 0x19, 0xd2, 0xf2, 0x39, 0xc2, + 0x5f, 0x57, 0x81, 0x6f, 0x90, 0x0e, 0xb0, 0x80, 0xb4, 0x41, 0xa5, 0xfb, 0xbb, 0xee, 0x54, 0x17, + 0xa5, 0x08, 0xef, 0xda, 0xe5, 0x84, 0xc9, 0x1b, 0x78, 0x86, 0xf0, 0x57, 0x55, 0xe0, 0xe5, 0xda, + 0xa6, 0x4a, 0xbd, 0xa2, 0x3b, 0x9b, 0x9a, 0x17, 0xd2, 0xbf, 0x8d, 0x1b, 0x23, 0x75, 0x6f, 0x22, + 0xfc, 0x41, 0x03, 0x48, 0x10, 0x78, 0xfb, 0x95, 0x1e, 0x74, 0x39, 0xb3, 0xe6, 0x34, 0x97, 0x49, + 0x82, 0x11, 0x5a, 0xf3, 0x59, 0xd0, 0x54, 0x4b, 0x28, 0x3a, 0x4e, 0x13, 0x08, 0x6d, 0xef, 0x14, + 0x39, 0xa7, 0x6e, 0x2b, 0xe4, 0xc0, 0x34, 0x5b, 0x82, 0x82, 0x34, 0x6b, 0x09, 0xca, 0x80, 0xd4, + 0xea, 0x89, 0xb7, 0x86, 0x11, 0xbf, 0x35, 0x83, 0x7d, 0xe5, 0x6d, 0x8a, 0xa5, 0xb1, 0x32, 0x52, + 0x25, 0x8c, 0x9a, 0x4a, 0xb6, 0x12, 0x2a, 0x48, 0xb3, 0x12, 0x2a, 0x03, 0xa4, 0xdc, 0x6d, 0x84, + 0x3f, 0x12, 0x7d, 0xb7, 0xe4, 0x85, 0x8c, 0x03, 0xb5, 0x16, 0x8c, 0xba, 0xf5, 0x80, 0x12, 0x52, + 0x8b, 0xd9, 0x60, 0x29, 0x74, 0x03, 0xe1, 0x89, 0xa8, 0xeb, 0x0c, 0xae, 0x30, 0xeb, 0x57, 0xed, + 0x46, 0x25, 0x10, 0xa1, 0x32, 0x97, 0x81, 0x94, 0x1e, 0xf7, 0x11, 0xb6, 0x12, 0x97, 0xea, 0xd0, + 0x69, 0x45, 0x36, 0xcb, 0xa6, 0x99, 0x03, 0x50, 0x38, 0xad, 0x64, 0xe6, 0xa5, 0xd9, 0x53, 0x84, + 0xbf, 0x2c, 0x3a, 0xce, 0x1f, 0xf4, 0xef, 0xc0, 0x39, 0x3b, 0xbf, 0x75, 0x7c, 0x2e, 0xbf, 0xbb, + 0xb2, 0xee, 0xb2, 0x52, 0xe2, 0xc2, 0xb2, 0x32, 0x66, 0x4a, 0xea, 0xd9, 0x8f, 0x17, 0x48, 0x5a, + 0x73, 0xc5, 0x60, 0x69, 0x29, 0x0d, 0x57, 0xb3, 0x07, 0x48, 0xb9, 0x5b, 0x08, 0x7f, 0x18, 0x6f, + 0xc7, 0xb2, 0x15, 0xcc, 0x1b, 0xec, 0xe1, 0xc3, 0xfb, 0xff, 0x42, 0x26, 0x36, 0x75, 0xc6, 0xfb, + 0x33, 0xa4, 0xdb, 0x90, 0xf4, 0xd1, 0x5b, 0x4d, 0xc3, 0x98, 0xd9, 0x19, 0x6f, 0x94, 0x4e, 0x39, + 0xd5, 0x21, 0x93, 0xd3, 0x30, 0x66, 0xe6, 0x34, 0x4a, 0xa7, 0x5e, 0xa2, 0x1a, 0xb0, 0x45, 0x81, + 0xed, 0x88, 0x53, 0x56, 0x7c, 0x1e, 0xd6, 0x7d, 0x24, 0x46, 0x51, 0xb3, 0x97, 0x28, 0x75, 0xc2, + 0x50, 0x53, 0x62, 0xd0, 0x75, 0x12, 0x4d, 0x3e, 0x36, 0xd4, 0x6d, 0x4a, 0x2a, 0xd8, 0xb4, 0x29, + 0xa9, 0x33, 0xa4, 0xe5, 0x3d, 0x84, 0x3f, 0xa9, 0x02, 0x8f, 0xfe, 0xbd, 0x19, 0x42, 0x08, 0xb1, + 0xe0, 0x92, 0xee, 0x23, 0x9c, 0xe6, 0x84, 0xdb, 0x72, 0x56, 0x5c, 0x6a, 0x3d, 0x46, 0xf8, 0x8b, + 0x32, 0x78, 0xc0, 0x61, 0xe4, 0x04, 0x6d, 0x95, 0x34, 0x3b, 0x8b, 0x92, 0x16, 0x8a, 0xe5, 0xf1, + 0x42, 0xa4, 0xe8, 0x01, 0xc2, 0xdf, 0x36, 0x39, 0x05, 0xd2, 0x11, 0xa3, 0x54, 0x27, 0x4b, 0xbd, + 0xf7, 0x85, 0x77, 0xe6, 0x08, 0xf9, 0x8d, 0xcb, 0x8a, 0x13, 0xb7, 0xf1, 0x3d, 0xfa, 0x01, 0x9d, + 0x75, 0xdc, 0xe4, 0x51, 0x5a, 0xb3, 0xe3, 0xa6, 0x4f, 0xdf, 0x26, 0x1d, 0x37, 0x4d, 0x0a, 0x97, + 0x35, 0xef, 0xf0, 0xd8, 0xce, 0x1d, 0x1d, 0xdb, 0xb9, 0xd3, 0x63, 0x1b, 0x5d, 0xef, 0xdb, 0xe8, + 0x49, 0xdf, 0x46, 0x07, 0x7d, 0x1b, 0x1d, 0xf6, 0x6d, 0xf4, 0xaa, 0x6f, 0xa3, 0xd7, 0x7d, 0x3b, + 0x77, 0xda, 0xb7, 0xd1, 0x9d, 0x13, 0x3b, 0x77, 0x78, 0x62, 0xe7, 0x8e, 0x4e, 0xec, 0xdc, 0x7f, + 0xb3, 0xdb, 0xfe, 0xf9, 0xa4, 0xae, 0x7f, 0xc1, 0x6f, 0x48, 0x0b, 0xc9, 0xcf, 0xad, 0xf7, 0xce, + 0x7e, 0x40, 0xfa, 0xf1, 0x4d, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x48, 0x16, 0x5e, 0xd6, 0x12, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -185,6 +187,7 @@ // DeleteWorkflowExecution force deletes a workflow's visibility record, current & concrete execution record and history if possible DeleteWorkflowExecution(ctx context.Context, in *DeleteWorkflowExecutionRequest, opts ...grpc.CallOption) (*DeleteWorkflowExecutionResponse, error) StreamWorkflowReplicationMessages(ctx context.Context, opts ...grpc.CallOption) (AdminService_StreamWorkflowReplicationMessagesClient, error) + GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) } type adminServiceClient struct { @@ -469,6 +472,15 @@ return m, nil } +func (c *adminServiceClient) GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) { + out := new(GetNamespaceResponse) + err := c.cc.Invoke(ctx, "/temporal.server.api.adminservice.v1.AdminService/GetNamespace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AdminServiceServer is the server API for AdminService service. type AdminServiceServer interface { // RebuildMutableState attempts to rebuild mutable state according to persisted history events. @@ -531,6 +543,7 @@ // DeleteWorkflowExecution force deletes a workflow's visibility record, current & concrete execution record and history if possible DeleteWorkflowExecution(context.Context, *DeleteWorkflowExecutionRequest) (*DeleteWorkflowExecutionResponse, error) StreamWorkflowReplicationMessages(AdminService_StreamWorkflowReplicationMessagesServer) error + GetNamespace(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) } // UnimplementedAdminServiceServer can be embedded to have forward compatible implementations. @@ -621,6 +634,9 @@ func (*UnimplementedAdminServiceServer) StreamWorkflowReplicationMessages(srv AdminService_StreamWorkflowReplicationMessagesServer) error { return status.Errorf(codes.Unimplemented, "method StreamWorkflowReplicationMessages not implemented") } +func (*UnimplementedAdminServiceServer) GetNamespace(ctx context.Context, req *GetNamespaceRequest) (*GetNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNamespace not implemented") +} func RegisterAdminServiceServer(s *grpc.Server, srv AdminServiceServer) { s.RegisterService(&_AdminService_serviceDesc, srv) @@ -1138,6 +1154,24 @@ return m, nil } +func _AdminService_GetNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).GetNamespace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/temporal.server.api.adminservice.v1.AdminService/GetNamespace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).GetNamespace(ctx, req.(*GetNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _AdminService_serviceDesc = grpc.ServiceDesc{ ServiceName: "temporal.server.api.adminservice.v1.AdminService", HandlerType: (*AdminServiceServer)(nil), @@ -1250,6 +1284,10 @@ MethodName: "DeleteWorkflowExecution", Handler: _AdminService_DeleteWorkflowExecution_Handler, }, + { + MethodName: "GetNamespace", + Handler: _AdminService_GetNamespace_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff -Nru temporal-1.21.5-1/src/api/adminservicemock/v1/service.pb.mock.go temporal-1.22.5/src/api/adminservicemock/v1/service.pb.mock.go --- temporal-1.21.5-1/src/api/adminservicemock/v1/service.pb.mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/adminservicemock/v1/service.pb.mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -241,6 +241,26 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).GetDLQReplicationMessages), varargs...) } +// GetNamespace mocks base method. +func (m *MockAdminServiceClient) GetNamespace(ctx context.Context, in *adminservice.GetNamespaceRequest, opts ...grpc.CallOption) (*adminservice.GetNamespaceResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetNamespace", varargs...) + ret0, _ := ret[0].(*adminservice.GetNamespaceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNamespace indicates an expected call of GetNamespace. +func (mr *MockAdminServiceClientMockRecorder) GetNamespace(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockAdminServiceClient)(nil).GetNamespace), varargs...) +} + // GetNamespaceReplicationMessages mocks base method. func (m *MockAdminServiceClient) GetNamespaceReplicationMessages(ctx context.Context, in *adminservice.GetNamespaceReplicationMessagesRequest, opts ...grpc.CallOption) (*adminservice.GetNamespaceReplicationMessagesResponse, error) { m.ctrl.T.Helper() @@ -703,7 +723,7 @@ } // RecvMsg mocks base method. -func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) RecvMsg(m interface{}) error { +func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) RecvMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "RecvMsg", m) ret0, _ := ret[0].(error) @@ -731,7 +751,7 @@ } // SendMsg mocks base method. -func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) SendMsg(m interface{}) error { +func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) SendMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "SendMsg", m) ret0, _ := ret[0].(error) @@ -916,6 +936,21 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).GetDLQReplicationMessages), arg0, arg1) } +// GetNamespace mocks base method. +func (m *MockAdminServiceServer) GetNamespace(arg0 context.Context, arg1 *adminservice.GetNamespaceRequest) (*adminservice.GetNamespaceResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNamespace", arg0, arg1) + ret0, _ := ret[0].(*adminservice.GetNamespaceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNamespace indicates an expected call of GetNamespace. +func (mr *MockAdminServiceServerMockRecorder) GetNamespace(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockAdminServiceServer)(nil).GetNamespace), arg0, arg1) +} + // GetNamespaceReplicationMessages mocks base method. func (m *MockAdminServiceServer) GetNamespaceReplicationMessages(arg0 context.Context, arg1 *adminservice.GetNamespaceReplicationMessagesRequest) (*adminservice.GetNamespaceReplicationMessagesResponse, error) { m.ctrl.T.Helper() @@ -1253,7 +1288,7 @@ } // RecvMsg mocks base method. -func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) RecvMsg(m interface{}) error { +func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) RecvMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "RecvMsg", m) ret0, _ := ret[0].(error) @@ -1295,7 +1330,7 @@ } // SendMsg mocks base method. -func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) SendMsg(m interface{}) error { +func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) SendMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "SendMsg", m) ret0, _ := ret[0].(error) diff -Nru temporal-1.21.5-1/src/api/historyservice/v1/request_response.pb.go temporal-1.22.5/src/api/historyservice/v1/request_response.pb.go --- temporal-1.21.5-1/src/api/historyservice/v1/request_response.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/historyservice/v1/request_response.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -48,13 +48,13 @@ v111 "go.temporal.io/api/history/v1" v110 "go.temporal.io/api/protocol/v1" v19 "go.temporal.io/api/query/v1" - v16 "go.temporal.io/api/taskqueue/v1" + v17 "go.temporal.io/api/taskqueue/v1" v112 "go.temporal.io/api/workflow/v1" v1 "go.temporal.io/api/workflowservice/v1" v116 "go.temporal.io/server/api/adminservice/v1" v15 "go.temporal.io/server/api/clock/v1" - v17 "go.temporal.io/server/api/enums/v1" - v18 "go.temporal.io/server/api/history/v1" + v18 "go.temporal.io/server/api/enums/v1" + v16 "go.temporal.io/server/api/history/v1" v114 "go.temporal.io/server/api/namespace/v1" v113 "go.temporal.io/server/api/persistence/v1" v115 "go.temporal.io/server/api/replication/v1" @@ -253,10 +253,11 @@ } type GetMutableStateRequest struct { - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` - CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` + CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + VersionHistoryItem *v16.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` } func (m *GetMutableStateRequest) Reset() { *m = GetMutableStateRequest{} } @@ -319,21 +320,28 @@ return nil } +func (m *GetMutableStateRequest) GetVersionHistoryItem() *v16.VersionHistoryItem { + if m != nil { + return m.VersionHistoryItem + } + return nil +} + type GetMutableStateResponse struct { Execution *v14.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` WorkflowType *v14.WorkflowType `protobuf:"bytes,2,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` PreviousStartedEventId int64 `protobuf:"varint,4,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` LastFirstEventId int64 `protobuf:"varint,5,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` - TaskQueue *v16.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StickyTaskQueue *v16.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` + TaskQueue *v17.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StickyTaskQueue *v17.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) StickyTaskQueueScheduleToStartTimeout *time.Duration `protobuf:"bytes,11,opt,name=sticky_task_queue_schedule_to_start_timeout,json=stickyTaskQueueScheduleToStartTimeout,proto3,stdduration" json:"sticky_task_queue_schedule_to_start_timeout,omitempty"` CurrentBranchToken []byte `protobuf:"bytes,13,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - WorkflowState v17.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` + WorkflowState v18.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` WorkflowStatus v12.WorkflowExecutionStatus `protobuf:"varint,16,opt,name=workflow_status,json=workflowStatus,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"workflow_status,omitempty"` - VersionHistories *v18.VersionHistories `protobuf:"bytes,17,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + VersionHistories *v16.VersionHistories `protobuf:"bytes,17,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` IsStickyTaskQueueEnabled bool `protobuf:"varint,18,opt,name=is_sticky_task_queue_enabled,json=isStickyTaskQueueEnabled,proto3" json:"is_sticky_task_queue_enabled,omitempty"` LastFirstEventTxnId int64 `protobuf:"varint,19,opt,name=last_first_event_txn_id,json=lastFirstEventTxnId,proto3" json:"last_first_event_txn_id,omitempty"` FirstExecutionRunId string `protobuf:"bytes,20,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` @@ -409,14 +417,14 @@ return 0 } -func (m *GetMutableStateResponse) GetTaskQueue() *v16.TaskQueue { +func (m *GetMutableStateResponse) GetTaskQueue() *v17.TaskQueue { if m != nil { return m.TaskQueue } return nil } -func (m *GetMutableStateResponse) GetStickyTaskQueue() *v16.TaskQueue { +func (m *GetMutableStateResponse) GetStickyTaskQueue() *v17.TaskQueue { if m != nil { return m.StickyTaskQueue } @@ -437,11 +445,11 @@ return nil } -func (m *GetMutableStateResponse) GetWorkflowState() v17.WorkflowExecutionState { +func (m *GetMutableStateResponse) GetWorkflowState() v18.WorkflowExecutionState { if m != nil { return m.WorkflowState } - return v17.WORKFLOW_EXECUTION_STATE_UNSPECIFIED + return v18.WORKFLOW_EXECUTION_STATE_UNSPECIFIED } func (m *GetMutableStateResponse) GetWorkflowStatus() v12.WorkflowExecutionStatus { @@ -451,7 +459,7 @@ return v12.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED } -func (m *GetMutableStateResponse) GetVersionHistories() *v18.VersionHistories { +func (m *GetMutableStateResponse) GetVersionHistories() *v16.VersionHistories { if m != nil { return m.VersionHistories } @@ -487,10 +495,11 @@ } type PollMutableStateRequest struct { - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` - CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` + CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + VersionHistoryItem *v16.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` } func (m *PollMutableStateRequest) Reset() { *m = PollMutableStateRequest{} } @@ -553,20 +562,27 @@ return nil } +func (m *PollMutableStateRequest) GetVersionHistoryItem() *v16.VersionHistoryItem { + if m != nil { + return m.VersionHistoryItem + } + return nil +} + type PollMutableStateResponse struct { Execution *v14.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` WorkflowType *v14.WorkflowType `protobuf:"bytes,2,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` PreviousStartedEventId int64 `protobuf:"varint,4,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` LastFirstEventId int64 `protobuf:"varint,5,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` - TaskQueue *v16.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StickyTaskQueue *v16.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` + TaskQueue *v17.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StickyTaskQueue *v17.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) StickyTaskQueueScheduleToStartTimeout *time.Duration `protobuf:"bytes,11,opt,name=sticky_task_queue_schedule_to_start_timeout,json=stickyTaskQueueScheduleToStartTimeout,proto3,stdduration" json:"sticky_task_queue_schedule_to_start_timeout,omitempty"` CurrentBranchToken []byte `protobuf:"bytes,12,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - VersionHistories *v18.VersionHistories `protobuf:"bytes,14,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` - WorkflowState v17.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` + VersionHistories *v16.VersionHistories `protobuf:"bytes,14,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + WorkflowState v18.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` WorkflowStatus v12.WorkflowExecutionStatus `protobuf:"varint,16,opt,name=workflow_status,json=workflowStatus,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"workflow_status,omitempty"` LastFirstEventTxnId int64 `protobuf:"varint,17,opt,name=last_first_event_txn_id,json=lastFirstEventTxnId,proto3" json:"last_first_event_txn_id,omitempty"` FirstExecutionRunId string `protobuf:"bytes,18,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` @@ -639,14 +655,14 @@ return 0 } -func (m *PollMutableStateResponse) GetTaskQueue() *v16.TaskQueue { +func (m *PollMutableStateResponse) GetTaskQueue() *v17.TaskQueue { if m != nil { return m.TaskQueue } return nil } -func (m *PollMutableStateResponse) GetStickyTaskQueue() *v16.TaskQueue { +func (m *PollMutableStateResponse) GetStickyTaskQueue() *v17.TaskQueue { if m != nil { return m.StickyTaskQueue } @@ -667,18 +683,18 @@ return nil } -func (m *PollMutableStateResponse) GetVersionHistories() *v18.VersionHistories { +func (m *PollMutableStateResponse) GetVersionHistories() *v16.VersionHistories { if m != nil { return m.VersionHistories } return nil } -func (m *PollMutableStateResponse) GetWorkflowState() v17.WorkflowExecutionState { +func (m *PollMutableStateResponse) GetWorkflowState() v18.WorkflowExecutionState { if m != nil { return m.WorkflowState } - return v17.WORKFLOW_EXECUTION_STATE_UNSPECIFIED + return v18.WORKFLOW_EXECUTION_STATE_UNSPECIFIED } func (m *PollMutableStateResponse) GetWorkflowStatus() v12.WorkflowExecutionStatus { @@ -888,14 +904,15 @@ NextEventId int64 `protobuf:"varint,5,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` Attempt int32 `protobuf:"varint,6,opt,name=attempt,proto3" json:"attempt,omitempty"` StickyExecutionEnabled bool `protobuf:"varint,7,opt,name=sticky_execution_enabled,json=stickyExecutionEnabled,proto3" json:"sticky_execution_enabled,omitempty"` - TransientWorkflowTask *v18.TransientWorkflowTaskInfo `protobuf:"bytes,8,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` - WorkflowExecutionTaskQueue *v16.TaskQueue `protobuf:"bytes,9,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` + TransientWorkflowTask *v16.TransientWorkflowTaskInfo `protobuf:"bytes,8,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` + WorkflowExecutionTaskQueue *v17.TaskQueue `protobuf:"bytes,9,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` BranchToken []byte `protobuf:"bytes,11,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` ScheduledTime *time.Time `protobuf:"bytes,12,opt,name=scheduled_time,json=scheduledTime,proto3,stdtime" json:"scheduled_time,omitempty"` StartedTime *time.Time `protobuf:"bytes,13,opt,name=started_time,json=startedTime,proto3,stdtime" json:"started_time,omitempty"` Queries map[string]*v19.WorkflowQuery `protobuf:"bytes,14,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Clock *v15.VectorClock `protobuf:"bytes,15,opt,name=clock,proto3" json:"clock,omitempty"` Messages []*v110.Message `protobuf:"bytes,16,rep,name=messages,proto3" json:"messages,omitempty"` + Version int64 `protobuf:"varint,17,opt,name=version,proto3" json:"version,omitempty"` } func (m *RecordWorkflowTaskStartedResponse) Reset() { *m = RecordWorkflowTaskStartedResponse{} } @@ -979,14 +996,14 @@ return false } -func (m *RecordWorkflowTaskStartedResponse) GetTransientWorkflowTask() *v18.TransientWorkflowTaskInfo { +func (m *RecordWorkflowTaskStartedResponse) GetTransientWorkflowTask() *v16.TransientWorkflowTaskInfo { if m != nil { return m.TransientWorkflowTask } return nil } -func (m *RecordWorkflowTaskStartedResponse) GetWorkflowExecutionTaskQueue() *v16.TaskQueue { +func (m *RecordWorkflowTaskStartedResponse) GetWorkflowExecutionTaskQueue() *v17.TaskQueue { if m != nil { return m.WorkflowExecutionTaskQueue } @@ -1035,6 +1052,13 @@ return nil } +func (m *RecordWorkflowTaskStartedResponse) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + type RecordActivityTaskStartedRequest struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` @@ -1136,6 +1160,7 @@ WorkflowType *v14.WorkflowType `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` WorkflowNamespace string `protobuf:"bytes,7,opt,name=workflow_namespace,json=workflowNamespace,proto3" json:"workflow_namespace,omitempty"` Clock *v15.VectorClock `protobuf:"bytes,8,opt,name=clock,proto3" json:"clock,omitempty"` + Version int64 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` } func (m *RecordActivityTaskStartedResponse) Reset() { *m = RecordActivityTaskStartedResponse{} } @@ -1226,6 +1251,13 @@ return nil } +func (m *RecordActivityTaskStartedResponse) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + type RespondWorkflowTaskCompletedRequest struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` CompleteRequest *v1.RespondWorkflowTaskCompletedRequest `protobuf:"bytes,2,opt,name=complete_request,json=completeRequest,proto3" json:"complete_request,omitempty"` @@ -1422,6 +1454,117 @@ var xxx_messageInfo_RespondWorkflowTaskFailedResponse proto.InternalMessageInfo +type IsWorkflowTaskValidRequest struct { + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Clock *v15.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` +} + +func (m *IsWorkflowTaskValidRequest) Reset() { *m = IsWorkflowTaskValidRequest{} } +func (*IsWorkflowTaskValidRequest) ProtoMessage() {} +func (*IsWorkflowTaskValidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b8c78c1d460a3711, []int{16} +} +func (m *IsWorkflowTaskValidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsWorkflowTaskValidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsWorkflowTaskValidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IsWorkflowTaskValidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsWorkflowTaskValidRequest.Merge(m, src) +} +func (m *IsWorkflowTaskValidRequest) XXX_Size() int { + return m.Size() +} +func (m *IsWorkflowTaskValidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IsWorkflowTaskValidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IsWorkflowTaskValidRequest proto.InternalMessageInfo + +func (m *IsWorkflowTaskValidRequest) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +func (m *IsWorkflowTaskValidRequest) GetExecution() *v14.WorkflowExecution { + if m != nil { + return m.Execution + } + return nil +} + +func (m *IsWorkflowTaskValidRequest) GetClock() *v15.VectorClock { + if m != nil { + return m.Clock + } + return nil +} + +func (m *IsWorkflowTaskValidRequest) GetScheduledEventId() int64 { + if m != nil { + return m.ScheduledEventId + } + return 0 +} + +type IsWorkflowTaskValidResponse struct { + // whether matching service can call history service to start the workflow task + IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` +} + +func (m *IsWorkflowTaskValidResponse) Reset() { *m = IsWorkflowTaskValidResponse{} } +func (*IsWorkflowTaskValidResponse) ProtoMessage() {} +func (*IsWorkflowTaskValidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b8c78c1d460a3711, []int{17} +} +func (m *IsWorkflowTaskValidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsWorkflowTaskValidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsWorkflowTaskValidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IsWorkflowTaskValidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsWorkflowTaskValidResponse.Merge(m, src) +} +func (m *IsWorkflowTaskValidResponse) XXX_Size() int { + return m.Size() +} +func (m *IsWorkflowTaskValidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IsWorkflowTaskValidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IsWorkflowTaskValidResponse proto.InternalMessageInfo + +func (m *IsWorkflowTaskValidResponse) GetIsValid() bool { + if m != nil { + return m.IsValid + } + return false +} + type RecordActivityTaskHeartbeatRequest struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` HeartbeatRequest *v1.RecordActivityTaskHeartbeatRequest `protobuf:"bytes,2,opt,name=heartbeat_request,json=heartbeatRequest,proto3" json:"heartbeat_request,omitempty"` @@ -1430,7 +1573,7 @@ func (m *RecordActivityTaskHeartbeatRequest) Reset() { *m = RecordActivityTaskHeartbeatRequest{} } func (*RecordActivityTaskHeartbeatRequest) ProtoMessage() {} func (*RecordActivityTaskHeartbeatRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{16} + return fileDescriptor_b8c78c1d460a3711, []int{18} } func (m *RecordActivityTaskHeartbeatRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1623,7 @@ func (m *RecordActivityTaskHeartbeatResponse) Reset() { *m = RecordActivityTaskHeartbeatResponse{} } func (*RecordActivityTaskHeartbeatResponse) ProtoMessage() {} func (*RecordActivityTaskHeartbeatResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{17} + return fileDescriptor_b8c78c1d460a3711, []int{19} } func (m *RecordActivityTaskHeartbeatResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1524,7 +1667,7 @@ func (m *RespondActivityTaskCompletedRequest) Reset() { *m = RespondActivityTaskCompletedRequest{} } func (*RespondActivityTaskCompletedRequest) ProtoMessage() {} func (*RespondActivityTaskCompletedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{18} + return fileDescriptor_b8c78c1d460a3711, []int{20} } func (m *RespondActivityTaskCompletedRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1573,7 +1716,7 @@ func (m *RespondActivityTaskCompletedResponse) Reset() { *m = RespondActivityTaskCompletedResponse{} } func (*RespondActivityTaskCompletedResponse) ProtoMessage() {} func (*RespondActivityTaskCompletedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{19} + return fileDescriptor_b8c78c1d460a3711, []int{21} } func (m *RespondActivityTaskCompletedResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1610,7 +1753,7 @@ func (m *RespondActivityTaskFailedRequest) Reset() { *m = RespondActivityTaskFailedRequest{} } func (*RespondActivityTaskFailedRequest) ProtoMessage() {} func (*RespondActivityTaskFailedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{20} + return fileDescriptor_b8c78c1d460a3711, []int{22} } func (m *RespondActivityTaskFailedRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1659,7 +1802,7 @@ func (m *RespondActivityTaskFailedResponse) Reset() { *m = RespondActivityTaskFailedResponse{} } func (*RespondActivityTaskFailedResponse) ProtoMessage() {} func (*RespondActivityTaskFailedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{21} + return fileDescriptor_b8c78c1d460a3711, []int{23} } func (m *RespondActivityTaskFailedResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1696,7 +1839,7 @@ func (m *RespondActivityTaskCanceledRequest) Reset() { *m = RespondActivityTaskCanceledRequest{} } func (*RespondActivityTaskCanceledRequest) ProtoMessage() {} func (*RespondActivityTaskCanceledRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{22} + return fileDescriptor_b8c78c1d460a3711, []int{24} } func (m *RespondActivityTaskCanceledRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1745,7 +1888,7 @@ func (m *RespondActivityTaskCanceledResponse) Reset() { *m = RespondActivityTaskCanceledResponse{} } func (*RespondActivityTaskCanceledResponse) ProtoMessage() {} func (*RespondActivityTaskCanceledResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{23} + return fileDescriptor_b8c78c1d460a3711, []int{25} } func (m *RespondActivityTaskCanceledResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1774,6 +1917,117 @@ var xxx_messageInfo_RespondActivityTaskCanceledResponse proto.InternalMessageInfo +type IsActivityTaskValidRequest struct { + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Clock *v15.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` +} + +func (m *IsActivityTaskValidRequest) Reset() { *m = IsActivityTaskValidRequest{} } +func (*IsActivityTaskValidRequest) ProtoMessage() {} +func (*IsActivityTaskValidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b8c78c1d460a3711, []int{26} +} +func (m *IsActivityTaskValidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsActivityTaskValidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsActivityTaskValidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IsActivityTaskValidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsActivityTaskValidRequest.Merge(m, src) +} +func (m *IsActivityTaskValidRequest) XXX_Size() int { + return m.Size() +} +func (m *IsActivityTaskValidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IsActivityTaskValidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IsActivityTaskValidRequest proto.InternalMessageInfo + +func (m *IsActivityTaskValidRequest) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +func (m *IsActivityTaskValidRequest) GetExecution() *v14.WorkflowExecution { + if m != nil { + return m.Execution + } + return nil +} + +func (m *IsActivityTaskValidRequest) GetClock() *v15.VectorClock { + if m != nil { + return m.Clock + } + return nil +} + +func (m *IsActivityTaskValidRequest) GetScheduledEventId() int64 { + if m != nil { + return m.ScheduledEventId + } + return 0 +} + +type IsActivityTaskValidResponse struct { + // whether matching service can call history service to start the activity task + IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` +} + +func (m *IsActivityTaskValidResponse) Reset() { *m = IsActivityTaskValidResponse{} } +func (*IsActivityTaskValidResponse) ProtoMessage() {} +func (*IsActivityTaskValidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b8c78c1d460a3711, []int{27} +} +func (m *IsActivityTaskValidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsActivityTaskValidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsActivityTaskValidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IsActivityTaskValidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsActivityTaskValidResponse.Merge(m, src) +} +func (m *IsActivityTaskValidResponse) XXX_Size() int { + return m.Size() +} +func (m *IsActivityTaskValidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IsActivityTaskValidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IsActivityTaskValidResponse proto.InternalMessageInfo + +func (m *IsActivityTaskValidResponse) GetIsValid() bool { + if m != nil { + return m.IsValid + } + return false +} + type SignalWorkflowExecutionRequest struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` SignalRequest *v1.SignalWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=signal_request,json=signalRequest,proto3" json:"signal_request,omitempty"` @@ -1784,7 +2038,7 @@ func (m *SignalWorkflowExecutionRequest) Reset() { *m = SignalWorkflowExecutionRequest{} } func (*SignalWorkflowExecutionRequest) ProtoMessage() {} func (*SignalWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{24} + return fileDescriptor_b8c78c1d460a3711, []int{28} } func (m *SignalWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1847,7 +2101,7 @@ func (m *SignalWorkflowExecutionResponse) Reset() { *m = SignalWorkflowExecutionResponse{} } func (*SignalWorkflowExecutionResponse) ProtoMessage() {} func (*SignalWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{25} + return fileDescriptor_b8c78c1d460a3711, []int{29} } func (m *SignalWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1888,7 +2142,7 @@ } func (*SignalWithStartWorkflowExecutionRequest) ProtoMessage() {} func (*SignalWithStartWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{26} + return fileDescriptor_b8c78c1d460a3711, []int{30} } func (m *SignalWithStartWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1940,7 +2194,7 @@ } func (*SignalWithStartWorkflowExecutionResponse) ProtoMessage() {} func (*SignalWithStartWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{27} + return fileDescriptor_b8c78c1d460a3711, []int{31} } func (m *SignalWithStartWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1985,7 +2239,7 @@ func (m *RemoveSignalMutableStateRequest) Reset() { *m = RemoveSignalMutableStateRequest{} } func (*RemoveSignalMutableStateRequest) ProtoMessage() {} func (*RemoveSignalMutableStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{28} + return fileDescriptor_b8c78c1d460a3711, []int{32} } func (m *RemoveSignalMutableStateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2041,7 +2295,7 @@ func (m *RemoveSignalMutableStateResponse) Reset() { *m = RemoveSignalMutableStateResponse{} } func (*RemoveSignalMutableStateResponse) ProtoMessage() {} func (*RemoveSignalMutableStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{29} + return fileDescriptor_b8c78c1d460a3711, []int{33} } func (m *RemoveSignalMutableStateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2080,7 +2334,7 @@ func (m *TerminateWorkflowExecutionRequest) Reset() { *m = TerminateWorkflowExecutionRequest{} } func (*TerminateWorkflowExecutionRequest) ProtoMessage() {} func (*TerminateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{30} + return fileDescriptor_b8c78c1d460a3711, []int{34} } func (m *TerminateWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2143,7 +2397,7 @@ func (m *TerminateWorkflowExecutionResponse) Reset() { *m = TerminateWorkflowExecutionResponse{} } func (*TerminateWorkflowExecutionResponse) ProtoMessage() {} func (*TerminateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{31} + return fileDescriptor_b8c78c1d460a3711, []int{35} } func (m *TerminateWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2182,7 +2436,7 @@ func (m *DeleteWorkflowExecutionRequest) Reset() { *m = DeleteWorkflowExecutionRequest{} } func (*DeleteWorkflowExecutionRequest) ProtoMessage() {} func (*DeleteWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{32} + return fileDescriptor_b8c78c1d460a3711, []int{36} } func (m *DeleteWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2245,7 +2499,7 @@ func (m *DeleteWorkflowExecutionResponse) Reset() { *m = DeleteWorkflowExecutionResponse{} } func (*DeleteWorkflowExecutionResponse) ProtoMessage() {} func (*DeleteWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{33} + return fileDescriptor_b8c78c1d460a3711, []int{37} } func (m *DeleteWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2282,7 +2536,7 @@ func (m *ResetWorkflowExecutionRequest) Reset() { *m = ResetWorkflowExecutionRequest{} } func (*ResetWorkflowExecutionRequest) ProtoMessage() {} func (*ResetWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{34} + return fileDescriptor_b8c78c1d460a3711, []int{38} } func (m *ResetWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2332,7 +2586,7 @@ func (m *ResetWorkflowExecutionResponse) Reset() { *m = ResetWorkflowExecutionResponse{} } func (*ResetWorkflowExecutionResponse) ProtoMessage() {} func (*ResetWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{35} + return fileDescriptor_b8c78c1d460a3711, []int{39} } func (m *ResetWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2379,7 +2633,7 @@ func (m *RequestCancelWorkflowExecutionRequest) Reset() { *m = RequestCancelWorkflowExecutionRequest{} } func (*RequestCancelWorkflowExecutionRequest) ProtoMessage() {} func (*RequestCancelWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{36} + return fileDescriptor_b8c78c1d460a3711, []int{40} } func (m *RequestCancelWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2451,7 +2705,7 @@ } func (*RequestCancelWorkflowExecutionResponse) ProtoMessage() {} func (*RequestCancelWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{37} + return fileDescriptor_b8c78c1d460a3711, []int{41} } func (m *RequestCancelWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2491,7 +2745,7 @@ func (m *ScheduleWorkflowTaskRequest) Reset() { *m = ScheduleWorkflowTaskRequest{} } func (*ScheduleWorkflowTaskRequest) ProtoMessage() {} func (*ScheduleWorkflowTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{38} + return fileDescriptor_b8c78c1d460a3711, []int{42} } func (m *ScheduleWorkflowTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2561,7 +2815,7 @@ func (m *ScheduleWorkflowTaskResponse) Reset() { *m = ScheduleWorkflowTaskResponse{} } func (*ScheduleWorkflowTaskResponse) ProtoMessage() {} func (*ScheduleWorkflowTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{39} + return fileDescriptor_b8c78c1d460a3711, []int{43} } func (m *ScheduleWorkflowTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2601,7 +2855,7 @@ } func (*VerifyFirstWorkflowTaskScheduledRequest) ProtoMessage() {} func (*VerifyFirstWorkflowTaskScheduledRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{40} + return fileDescriptor_b8c78c1d460a3711, []int{44} } func (m *VerifyFirstWorkflowTaskScheduledRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2659,7 +2913,7 @@ } func (*VerifyFirstWorkflowTaskScheduledResponse) ProtoMessage() {} func (*VerifyFirstWorkflowTaskScheduledResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{41} + return fileDescriptor_b8c78c1d460a3711, []int{45} } func (m *VerifyFirstWorkflowTaskScheduledResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2696,9 +2950,9 @@ // child creates multiple runs through ContinueAsNew before finally completing. type RecordChildExecutionCompletedRequest struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + ParentExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=parent_execution,json=parentExecution,proto3" json:"parent_execution,omitempty"` ParentInitiatedId int64 `protobuf:"varint,3,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` - CompletedExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=completed_execution,json=completedExecution,proto3" json:"completed_execution,omitempty"` + ChildExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=child_execution,json=childExecution,proto3" json:"child_execution,omitempty"` CompletionEvent *v111.HistoryEvent `protobuf:"bytes,5,opt,name=completion_event,json=completionEvent,proto3" json:"completion_event,omitempty"` Clock *v15.VectorClock `protobuf:"bytes,6,opt,name=clock,proto3" json:"clock,omitempty"` ParentInitiatedVersion int64 `protobuf:"varint,7,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` @@ -2707,7 +2961,7 @@ func (m *RecordChildExecutionCompletedRequest) Reset() { *m = RecordChildExecutionCompletedRequest{} } func (*RecordChildExecutionCompletedRequest) ProtoMessage() {} func (*RecordChildExecutionCompletedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{42} + return fileDescriptor_b8c78c1d460a3711, []int{46} } func (m *RecordChildExecutionCompletedRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2743,9 +2997,9 @@ return "" } -func (m *RecordChildExecutionCompletedRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (m *RecordChildExecutionCompletedRequest) GetParentExecution() *v14.WorkflowExecution { if m != nil { - return m.WorkflowExecution + return m.ParentExecution } return nil } @@ -2757,9 +3011,9 @@ return 0 } -func (m *RecordChildExecutionCompletedRequest) GetCompletedExecution() *v14.WorkflowExecution { +func (m *RecordChildExecutionCompletedRequest) GetChildExecution() *v14.WorkflowExecution { if m != nil { - return m.CompletedExecution + return m.ChildExecution } return nil } @@ -2791,7 +3045,7 @@ func (m *RecordChildExecutionCompletedResponse) Reset() { *m = RecordChildExecutionCompletedResponse{} } func (*RecordChildExecutionCompletedResponse) ProtoMessage() {} func (*RecordChildExecutionCompletedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{43} + return fileDescriptor_b8c78c1d460a3711, []int{47} } func (m *RecordChildExecutionCompletedResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2834,7 +3088,7 @@ } func (*VerifyChildExecutionCompletionRecordedRequest) ProtoMessage() {} func (*VerifyChildExecutionCompletionRecordedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{44} + return fileDescriptor_b8c78c1d460a3711, []int{48} } func (m *VerifyChildExecutionCompletionRecordedRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2913,7 +3167,7 @@ } func (*VerifyChildExecutionCompletionRecordedResponse) ProtoMessage() {} func (*VerifyChildExecutionCompletionRecordedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{45} + return fileDescriptor_b8c78c1d460a3711, []int{49} } func (m *VerifyChildExecutionCompletionRecordedResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2950,7 +3204,7 @@ func (m *DescribeWorkflowExecutionRequest) Reset() { *m = DescribeWorkflowExecutionRequest{} } func (*DescribeWorkflowExecutionRequest) ProtoMessage() {} func (*DescribeWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{46} + return fileDescriptor_b8c78c1d460a3711, []int{50} } func (m *DescribeWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3004,7 +3258,7 @@ func (m *DescribeWorkflowExecutionResponse) Reset() { *m = DescribeWorkflowExecutionResponse{} } func (*DescribeWorkflowExecutionResponse) ProtoMessage() {} func (*DescribeWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{47} + return fileDescriptor_b8c78c1d460a3711, []int{51} } func (m *DescribeWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3071,7 +3325,7 @@ type ReplicateEventsV2Request struct { NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - VersionHistoryItems []*v18.VersionHistoryItem `protobuf:"bytes,3,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` + VersionHistoryItems []*v16.VersionHistoryItem `protobuf:"bytes,3,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` Events *v14.DataBlob `protobuf:"bytes,4,opt,name=events,proto3" json:"events,omitempty"` // New run events does not need version history since there is no prior events. NewRunEvents *v14.DataBlob `protobuf:"bytes,5,opt,name=new_run_events,json=newRunEvents,proto3" json:"new_run_events,omitempty"` @@ -3081,7 +3335,7 @@ func (m *ReplicateEventsV2Request) Reset() { *m = ReplicateEventsV2Request{} } func (*ReplicateEventsV2Request) ProtoMessage() {} func (*ReplicateEventsV2Request) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{48} + return fileDescriptor_b8c78c1d460a3711, []int{52} } func (m *ReplicateEventsV2Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3124,7 +3378,7 @@ return nil } -func (m *ReplicateEventsV2Request) GetVersionHistoryItems() []*v18.VersionHistoryItem { +func (m *ReplicateEventsV2Request) GetVersionHistoryItems() []*v16.VersionHistoryItem { if m != nil { return m.VersionHistoryItems } @@ -3158,7 +3412,7 @@ func (m *ReplicateEventsV2Response) Reset() { *m = ReplicateEventsV2Response{} } func (*ReplicateEventsV2Response) ProtoMessage() {} func (*ReplicateEventsV2Response) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{49} + return fileDescriptor_b8c78c1d460a3711, []int{53} } func (m *ReplicateEventsV2Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3196,7 +3450,7 @@ func (m *ReplicateWorkflowStateRequest) Reset() { *m = ReplicateWorkflowStateRequest{} } func (*ReplicateWorkflowStateRequest) ProtoMessage() {} func (*ReplicateWorkflowStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{50} + return fileDescriptor_b8c78c1d460a3711, []int{54} } func (m *ReplicateWorkflowStateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3252,7 +3506,7 @@ func (m *ReplicateWorkflowStateResponse) Reset() { *m = ReplicateWorkflowStateResponse{} } func (*ReplicateWorkflowStateResponse) ProtoMessage() {} func (*ReplicateWorkflowStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{51} + return fileDescriptor_b8c78c1d460a3711, []int{55} } func (m *ReplicateWorkflowStateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3290,7 +3544,7 @@ func (m *SyncShardStatusRequest) Reset() { *m = SyncShardStatusRequest{} } func (*SyncShardStatusRequest) ProtoMessage() {} func (*SyncShardStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{52} + return fileDescriptor_b8c78c1d460a3711, []int{56} } func (m *SyncShardStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3346,7 +3600,7 @@ func (m *SyncShardStatusResponse) Reset() { *m = SyncShardStatusResponse{} } func (*SyncShardStatusResponse) ProtoMessage() {} func (*SyncShardStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{53} + return fileDescriptor_b8c78c1d460a3711, []int{57} } func (m *SyncShardStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3389,14 +3643,14 @@ Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` LastFailure *v13.Failure `protobuf:"bytes,12,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` LastWorkerIdentity string `protobuf:"bytes,13,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` - VersionHistory *v18.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + VersionHistory *v16.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` BaseExecutionInfo *v11.BaseExecutionInfo `protobuf:"bytes,15,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` } func (m *SyncActivityRequest) Reset() { *m = SyncActivityRequest{} } func (*SyncActivityRequest) ProtoMessage() {} func (*SyncActivityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{54} + return fileDescriptor_b8c78c1d460a3711, []int{58} } func (m *SyncActivityRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3516,7 +3770,7 @@ return "" } -func (m *SyncActivityRequest) GetVersionHistory() *v18.VersionHistory { +func (m *SyncActivityRequest) GetVersionHistory() *v16.VersionHistory { if m != nil { return m.VersionHistory } @@ -3536,7 +3790,7 @@ func (m *SyncActivityResponse) Reset() { *m = SyncActivityResponse{} } func (*SyncActivityResponse) ProtoMessage() {} func (*SyncActivityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{55} + return fileDescriptor_b8c78c1d460a3711, []int{59} } func (m *SyncActivityResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3573,7 +3827,7 @@ func (m *DescribeMutableStateRequest) Reset() { *m = DescribeMutableStateRequest{} } func (*DescribeMutableStateRequest) ProtoMessage() {} func (*DescribeMutableStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{56} + return fileDescriptor_b8c78c1d460a3711, []int{60} } func (m *DescribeMutableStateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3624,7 +3878,7 @@ func (m *DescribeMutableStateResponse) Reset() { *m = DescribeMutableStateResponse{} } func (*DescribeMutableStateResponse) ProtoMessage() {} func (*DescribeMutableStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{57} + return fileDescriptor_b8c78c1d460a3711, []int{61} } func (m *DescribeMutableStateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3679,7 +3933,7 @@ func (m *DescribeHistoryHostRequest) Reset() { *m = DescribeHistoryHostRequest{} } func (*DescribeHistoryHostRequest) ProtoMessage() {} func (*DescribeHistoryHostRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{58} + return fileDescriptor_b8c78c1d460a3711, []int{62} } func (m *DescribeHistoryHostRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3746,7 +4000,7 @@ func (m *DescribeHistoryHostResponse) Reset() { *m = DescribeHistoryHostResponse{} } func (*DescribeHistoryHostResponse) ProtoMessage() {} func (*DescribeHistoryHostResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{59} + return fileDescriptor_b8c78c1d460a3711, []int{63} } func (m *DescribeHistoryHostResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3810,7 +4064,7 @@ func (m *CloseShardRequest) Reset() { *m = CloseShardRequest{} } func (*CloseShardRequest) ProtoMessage() {} func (*CloseShardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{60} + return fileDescriptor_b8c78c1d460a3711, []int{64} } func (m *CloseShardRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3852,7 +4106,7 @@ func (m *CloseShardResponse) Reset() { *m = CloseShardResponse{} } func (*CloseShardResponse) ProtoMessage() {} func (*CloseShardResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{61} + return fileDescriptor_b8c78c1d460a3711, []int{65} } func (m *CloseShardResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4142,7 @@ func (m *GetShardRequest) Reset() { *m = GetShardRequest{} } func (*GetShardRequest) ProtoMessage() {} func (*GetShardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{62} + return fileDescriptor_b8c78c1d460a3711, []int{66} } func (m *GetShardRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3931,7 +4185,7 @@ func (m *GetShardResponse) Reset() { *m = GetShardResponse{} } func (*GetShardResponse) ProtoMessage() {} func (*GetShardResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{63} + return fileDescriptor_b8c78c1d460a3711, []int{67} } func (m *GetShardResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3969,7 +4223,7 @@ type RemoveTaskRequest struct { ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Category v17.TaskCategory `protobuf:"varint,2,opt,name=category,proto3,enum=temporal.server.api.enums.v1.TaskCategory" json:"category,omitempty"` + Category v18.TaskCategory `protobuf:"varint,2,opt,name=category,proto3,enum=temporal.server.api.enums.v1.TaskCategory" json:"category,omitempty"` TaskId int64 `protobuf:"varint,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` VisibilityTime *time.Time `protobuf:"bytes,4,opt,name=visibility_time,json=visibilityTime,proto3,stdtime" json:"visibility_time,omitempty"` } @@ -3977,7 +4231,7 @@ func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } func (*RemoveTaskRequest) ProtoMessage() {} func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{64} + return fileDescriptor_b8c78c1d460a3711, []int{68} } func (m *RemoveTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4013,11 +4267,11 @@ return 0 } -func (m *RemoveTaskRequest) GetCategory() v17.TaskCategory { +func (m *RemoveTaskRequest) GetCategory() v18.TaskCategory { if m != nil { return m.Category } - return v17.TASK_CATEGORY_UNSPECIFIED + return v18.TASK_CATEGORY_UNSPECIFIED } func (m *RemoveTaskRequest) GetTaskId() int64 { @@ -4040,7 +4294,7 @@ func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } func (*RemoveTaskResponse) ProtoMessage() {} func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{65} + return fileDescriptor_b8c78c1d460a3711, []int{69} } func (m *RemoveTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4077,7 +4331,7 @@ func (m *GetReplicationMessagesRequest) Reset() { *m = GetReplicationMessagesRequest{} } func (*GetReplicationMessagesRequest) ProtoMessage() {} func (*GetReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{66} + return fileDescriptor_b8c78c1d460a3711, []int{70} } func (m *GetReplicationMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4127,7 +4381,7 @@ func (m *GetReplicationMessagesResponse) Reset() { *m = GetReplicationMessagesResponse{} } func (*GetReplicationMessagesResponse) ProtoMessage() {} func (*GetReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{67} + return fileDescriptor_b8c78c1d460a3711, []int{71} } func (m *GetReplicationMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4170,7 +4424,7 @@ func (m *GetDLQReplicationMessagesRequest) Reset() { *m = GetDLQReplicationMessagesRequest{} } func (*GetDLQReplicationMessagesRequest) ProtoMessage() {} func (*GetDLQReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{68} + return fileDescriptor_b8c78c1d460a3711, []int{72} } func (m *GetDLQReplicationMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4213,7 +4467,7 @@ func (m *GetDLQReplicationMessagesResponse) Reset() { *m = GetDLQReplicationMessagesResponse{} } func (*GetDLQReplicationMessagesResponse) ProtoMessage() {} func (*GetDLQReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{69} + return fileDescriptor_b8c78c1d460a3711, []int{73} } func (m *GetDLQReplicationMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4257,7 +4511,7 @@ func (m *QueryWorkflowRequest) Reset() { *m = QueryWorkflowRequest{} } func (*QueryWorkflowRequest) ProtoMessage() {} func (*QueryWorkflowRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{70} + return fileDescriptor_b8c78c1d460a3711, []int{74} } func (m *QueryWorkflowRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4307,7 +4561,7 @@ func (m *QueryWorkflowResponse) Reset() { *m = QueryWorkflowResponse{} } func (*QueryWorkflowResponse) ProtoMessage() {} func (*QueryWorkflowResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{71} + return fileDescriptor_b8c78c1d460a3711, []int{75} } func (m *QueryWorkflowResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4351,7 +4605,7 @@ func (m *ReapplyEventsRequest) Reset() { *m = ReapplyEventsRequest{} } func (*ReapplyEventsRequest) ProtoMessage() {} func (*ReapplyEventsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{72} + return fileDescriptor_b8c78c1d460a3711, []int{76} } func (m *ReapplyEventsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4400,7 +4654,7 @@ func (m *ReapplyEventsResponse) Reset() { *m = ReapplyEventsResponse{} } func (*ReapplyEventsResponse) ProtoMessage() {} func (*ReapplyEventsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{73} + return fileDescriptor_b8c78c1d460a3711, []int{77} } func (m *ReapplyEventsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4430,7 +4684,7 @@ var xxx_messageInfo_ReapplyEventsResponse proto.InternalMessageInfo type GetDLQMessagesRequest struct { - Type v17.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` @@ -4441,7 +4695,7 @@ func (m *GetDLQMessagesRequest) Reset() { *m = GetDLQMessagesRequest{} } func (*GetDLQMessagesRequest) ProtoMessage() {} func (*GetDLQMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{74} + return fileDescriptor_b8c78c1d460a3711, []int{78} } func (m *GetDLQMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4470,11 +4724,11 @@ var xxx_messageInfo_GetDLQMessagesRequest proto.InternalMessageInfo -func (m *GetDLQMessagesRequest) GetType() v17.DeadLetterQueueType { +func (m *GetDLQMessagesRequest) GetType() v18.DeadLetterQueueType { if m != nil { return m.Type } - return v17.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED + return v18.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED } func (m *GetDLQMessagesRequest) GetShardId() int32 { @@ -4513,7 +4767,7 @@ } type GetDLQMessagesResponse struct { - Type v17.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ReplicationTasks []*v115.ReplicationTask `protobuf:"bytes,2,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` ReplicationTasksInfo []*v115.ReplicationTaskInfo `protobuf:"bytes,4,rep,name=replication_tasks_info,json=replicationTasksInfo,proto3" json:"replication_tasks_info,omitempty"` @@ -4522,7 +4776,7 @@ func (m *GetDLQMessagesResponse) Reset() { *m = GetDLQMessagesResponse{} } func (*GetDLQMessagesResponse) ProtoMessage() {} func (*GetDLQMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{75} + return fileDescriptor_b8c78c1d460a3711, []int{79} } func (m *GetDLQMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4551,11 +4805,11 @@ var xxx_messageInfo_GetDLQMessagesResponse proto.InternalMessageInfo -func (m *GetDLQMessagesResponse) GetType() v17.DeadLetterQueueType { +func (m *GetDLQMessagesResponse) GetType() v18.DeadLetterQueueType { if m != nil { return m.Type } - return v17.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED + return v18.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED } func (m *GetDLQMessagesResponse) GetReplicationTasks() []*v115.ReplicationTask { @@ -4580,7 +4834,7 @@ } type PurgeDLQMessagesRequest struct { - Type v17.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` @@ -4589,7 +4843,7 @@ func (m *PurgeDLQMessagesRequest) Reset() { *m = PurgeDLQMessagesRequest{} } func (*PurgeDLQMessagesRequest) ProtoMessage() {} func (*PurgeDLQMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{76} + return fileDescriptor_b8c78c1d460a3711, []int{80} } func (m *PurgeDLQMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4618,11 +4872,11 @@ var xxx_messageInfo_PurgeDLQMessagesRequest proto.InternalMessageInfo -func (m *PurgeDLQMessagesRequest) GetType() v17.DeadLetterQueueType { +func (m *PurgeDLQMessagesRequest) GetType() v18.DeadLetterQueueType { if m != nil { return m.Type } - return v17.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED + return v18.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED } func (m *PurgeDLQMessagesRequest) GetShardId() int32 { @@ -4652,7 +4906,7 @@ func (m *PurgeDLQMessagesResponse) Reset() { *m = PurgeDLQMessagesResponse{} } func (*PurgeDLQMessagesResponse) ProtoMessage() {} func (*PurgeDLQMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{77} + return fileDescriptor_b8c78c1d460a3711, []int{81} } func (m *PurgeDLQMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4682,7 +4936,7 @@ var xxx_messageInfo_PurgeDLQMessagesResponse proto.InternalMessageInfo type MergeDLQMessagesRequest struct { - Type v17.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` @@ -4693,7 +4947,7 @@ func (m *MergeDLQMessagesRequest) Reset() { *m = MergeDLQMessagesRequest{} } func (*MergeDLQMessagesRequest) ProtoMessage() {} func (*MergeDLQMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{78} + return fileDescriptor_b8c78c1d460a3711, []int{82} } func (m *MergeDLQMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4722,11 +4976,11 @@ var xxx_messageInfo_MergeDLQMessagesRequest proto.InternalMessageInfo -func (m *MergeDLQMessagesRequest) GetType() v17.DeadLetterQueueType { +func (m *MergeDLQMessagesRequest) GetType() v18.DeadLetterQueueType { if m != nil { return m.Type } - return v17.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED + return v18.DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED } func (m *MergeDLQMessagesRequest) GetShardId() int32 { @@ -4771,7 +5025,7 @@ func (m *MergeDLQMessagesResponse) Reset() { *m = MergeDLQMessagesResponse{} } func (*MergeDLQMessagesResponse) ProtoMessage() {} func (*MergeDLQMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{79} + return fileDescriptor_b8c78c1d460a3711, []int{83} } func (m *MergeDLQMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4815,7 +5069,7 @@ func (m *RefreshWorkflowTasksRequest) Reset() { *m = RefreshWorkflowTasksRequest{} } func (*RefreshWorkflowTasksRequest) ProtoMessage() {} func (*RefreshWorkflowTasksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{80} + return fileDescriptor_b8c78c1d460a3711, []int{84} } func (m *RefreshWorkflowTasksRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4864,7 +5118,7 @@ func (m *RefreshWorkflowTasksResponse) Reset() { *m = RefreshWorkflowTasksResponse{} } func (*RefreshWorkflowTasksResponse) ProtoMessage() {} func (*RefreshWorkflowTasksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{81} + return fileDescriptor_b8c78c1d460a3711, []int{85} } func (m *RefreshWorkflowTasksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4903,7 +5157,7 @@ } func (*GenerateLastHistoryReplicationTasksRequest) ProtoMessage() {} func (*GenerateLastHistoryReplicationTasksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{82} + return fileDescriptor_b8c78c1d460a3711, []int{86} } func (m *GenerateLastHistoryReplicationTasksRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4948,6 +5202,7 @@ type GenerateLastHistoryReplicationTasksResponse struct { StateTransitionCount int64 `protobuf:"varint,1,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` + HistoryLength int64 `protobuf:"varint,2,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` } func (m *GenerateLastHistoryReplicationTasksResponse) Reset() { @@ -4955,7 +5210,7 @@ } func (*GenerateLastHistoryReplicationTasksResponse) ProtoMessage() {} func (*GenerateLastHistoryReplicationTasksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{83} + return fileDescriptor_b8c78c1d460a3711, []int{87} } func (m *GenerateLastHistoryReplicationTasksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4991,6 +5246,13 @@ return 0 } +func (m *GenerateLastHistoryReplicationTasksResponse) GetHistoryLength() int64 { + if m != nil { + return m.HistoryLength + } + return 0 +} + type GetReplicationStatusRequest struct { // Remote cluster names to query for. If omit, will return for all remote clusters. RemoteClusters []string `protobuf:"bytes,1,rep,name=remote_clusters,json=remoteClusters,proto3" json:"remote_clusters,omitempty"` @@ -4999,7 +5261,7 @@ func (m *GetReplicationStatusRequest) Reset() { *m = GetReplicationStatusRequest{} } func (*GetReplicationStatusRequest) ProtoMessage() {} func (*GetReplicationStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{84} + return fileDescriptor_b8c78c1d460a3711, []int{88} } func (m *GetReplicationStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5042,7 +5304,7 @@ func (m *GetReplicationStatusResponse) Reset() { *m = GetReplicationStatusResponse{} } func (*GetReplicationStatusResponse) ProtoMessage() {} func (*GetReplicationStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{85} + return fileDescriptor_b8c78c1d460a3711, []int{89} } func (m *GetReplicationStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5354,7 @@ func (m *ShardReplicationStatus) Reset() { *m = ShardReplicationStatus{} } func (*ShardReplicationStatus) ProtoMessage() {} func (*ShardReplicationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{86} + return fileDescriptor_b8c78c1d460a3711, []int{90} } func (m *ShardReplicationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5172,7 +5434,7 @@ func (m *HandoverNamespaceInfo) Reset() { *m = HandoverNamespaceInfo{} } func (*HandoverNamespaceInfo) ProtoMessage() {} func (*HandoverNamespaceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{87} + return fileDescriptor_b8c78c1d460a3711, []int{91} } func (m *HandoverNamespaceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5218,7 +5480,7 @@ func (m *ShardReplicationStatusPerCluster) Reset() { *m = ShardReplicationStatusPerCluster{} } func (*ShardReplicationStatusPerCluster) ProtoMessage() {} func (*ShardReplicationStatusPerCluster) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{88} + return fileDescriptor_b8c78c1d460a3711, []int{92} } func (m *ShardReplicationStatusPerCluster) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5269,7 +5531,7 @@ func (m *RebuildMutableStateRequest) Reset() { *m = RebuildMutableStateRequest{} } func (*RebuildMutableStateRequest) ProtoMessage() {} func (*RebuildMutableStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{89} + return fileDescriptor_b8c78c1d460a3711, []int{93} } func (m *RebuildMutableStateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5318,7 +5580,7 @@ func (m *RebuildMutableStateResponse) Reset() { *m = RebuildMutableStateResponse{} } func (*RebuildMutableStateResponse) ProtoMessage() {} func (*RebuildMutableStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{90} + return fileDescriptor_b8c78c1d460a3711, []int{94} } func (m *RebuildMutableStateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5357,7 +5619,7 @@ func (m *DeleteWorkflowVisibilityRecordRequest) Reset() { *m = DeleteWorkflowVisibilityRecordRequest{} } func (*DeleteWorkflowVisibilityRecordRequest) ProtoMessage() {} func (*DeleteWorkflowVisibilityRecordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{91} + return fileDescriptor_b8c78c1d460a3711, []int{95} } func (m *DeleteWorkflowVisibilityRecordRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5422,7 +5684,7 @@ } func (*DeleteWorkflowVisibilityRecordResponse) ProtoMessage() {} func (*DeleteWorkflowVisibilityRecordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{92} + return fileDescriptor_b8c78c1d460a3711, []int{96} } func (m *DeleteWorkflowVisibilityRecordResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5462,7 +5724,7 @@ func (m *UpdateWorkflowExecutionRequest) Reset() { *m = UpdateWorkflowExecutionRequest{} } func (*UpdateWorkflowExecutionRequest) ProtoMessage() {} func (*UpdateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{93} + return fileDescriptor_b8c78c1d460a3711, []int{97} } func (m *UpdateWorkflowExecutionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5774,7 @@ func (m *UpdateWorkflowExecutionResponse) Reset() { *m = UpdateWorkflowExecutionResponse{} } func (*UpdateWorkflowExecutionResponse) ProtoMessage() {} func (*UpdateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{94} + return fileDescriptor_b8c78c1d460a3711, []int{98} } func (m *UpdateWorkflowExecutionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5560,7 +5822,7 @@ } func (*StreamWorkflowReplicationMessagesRequest) ProtoMessage() {} func (*StreamWorkflowReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{95} + return fileDescriptor_b8c78c1d460a3711, []int{99} } func (m *StreamWorkflowReplicationMessagesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5636,7 +5898,7 @@ } func (*StreamWorkflowReplicationMessagesResponse) ProtoMessage() {} func (*StreamWorkflowReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{96} + return fileDescriptor_b8c78c1d460a3711, []int{100} } func (m *StreamWorkflowReplicationMessagesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5970,7 @@ func (m *PollWorkflowExecutionUpdateRequest) Reset() { *m = PollWorkflowExecutionUpdateRequest{} } func (*PollWorkflowExecutionUpdateRequest) ProtoMessage() {} func (*PollWorkflowExecutionUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{97} + return fileDescriptor_b8c78c1d460a3711, []int{101} } func (m *PollWorkflowExecutionUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5758,7 +6020,7 @@ func (m *PollWorkflowExecutionUpdateResponse) Reset() { *m = PollWorkflowExecutionUpdateResponse{} } func (*PollWorkflowExecutionUpdateResponse) ProtoMessage() {} func (*PollWorkflowExecutionUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b8c78c1d460a3711, []int{98} + return fileDescriptor_b8c78c1d460a3711, []int{102} } func (m *PollWorkflowExecutionUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5812,6 +6074,8 @@ proto.RegisterType((*RespondWorkflowTaskCompletedResponse)(nil), "temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse") proto.RegisterType((*RespondWorkflowTaskFailedRequest)(nil), "temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedRequest") proto.RegisterType((*RespondWorkflowTaskFailedResponse)(nil), "temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedResponse") + proto.RegisterType((*IsWorkflowTaskValidRequest)(nil), "temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest") + proto.RegisterType((*IsWorkflowTaskValidResponse)(nil), "temporal.server.api.historyservice.v1.IsWorkflowTaskValidResponse") proto.RegisterType((*RecordActivityTaskHeartbeatRequest)(nil), "temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatRequest") proto.RegisterType((*RecordActivityTaskHeartbeatResponse)(nil), "temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatResponse") proto.RegisterType((*RespondActivityTaskCompletedRequest)(nil), "temporal.server.api.historyservice.v1.RespondActivityTaskCompletedRequest") @@ -5820,6 +6084,8 @@ proto.RegisterType((*RespondActivityTaskFailedResponse)(nil), "temporal.server.api.historyservice.v1.RespondActivityTaskFailedResponse") proto.RegisterType((*RespondActivityTaskCanceledRequest)(nil), "temporal.server.api.historyservice.v1.RespondActivityTaskCanceledRequest") proto.RegisterType((*RespondActivityTaskCanceledResponse)(nil), "temporal.server.api.historyservice.v1.RespondActivityTaskCanceledResponse") + proto.RegisterType((*IsActivityTaskValidRequest)(nil), "temporal.server.api.historyservice.v1.IsActivityTaskValidRequest") + proto.RegisterType((*IsActivityTaskValidResponse)(nil), "temporal.server.api.historyservice.v1.IsActivityTaskValidResponse") proto.RegisterType((*SignalWorkflowExecutionRequest)(nil), "temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest") proto.RegisterType((*SignalWorkflowExecutionResponse)(nil), "temporal.server.api.historyservice.v1.SignalWorkflowExecutionResponse") proto.RegisterType((*SignalWithStartWorkflowExecutionRequest)(nil), "temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionRequest") @@ -5905,309 +6171,314 @@ } var fileDescriptor_b8c78c1d460a3711 = []byte{ - // 4819 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3c, 0x49, 0x6c, 0x1c, 0xd9, - 0x75, 0x2a, 0x76, 0x37, 0xd9, 0x7c, 0x24, 0x7b, 0x29, 0x6e, 0x2d, 0x4a, 0x6a, 0x51, 0xa5, 0x8d, - 0xd2, 0x8c, 0x5a, 0xdb, 0xd8, 0x23, 0x2b, 0x9e, 0x19, 0x8b, 0xd4, 0x46, 0x41, 0x92, 0x39, 0x45, - 0x8e, 0x66, 0x32, 0x1e, 0xb9, 0xa6, 0x58, 0xf5, 0x49, 0x56, 0xd8, 0x5d, 0xd5, 0xaa, 0x5f, 0x4d, - 0xb2, 0x27, 0x07, 0x07, 0x30, 0xb2, 0xd8, 0x87, 0x64, 0x80, 0x5c, 0x8c, 0xc0, 0xc9, 0x21, 0x40, - 0x12, 0x23, 0x40, 0x90, 0x43, 0x0e, 0x86, 0x0f, 0xbe, 0x24, 0x40, 0x10, 0x04, 0x39, 0x0c, 0x72, - 0xc9, 0x20, 0x01, 0xe2, 0x8c, 0x06, 0x41, 0x6c, 0x24, 0x07, 0x1f, 0x83, 0x20, 0x87, 0xe0, 0x6f, - 0xd5, 0xb5, 0xf6, 0x42, 0x4a, 0xd1, 0x78, 0x3c, 0x37, 0xf6, 0xff, 0xff, 0xbd, 0xff, 0xf6, 0xf7, - 0xff, 0xfb, 0xaf, 0x08, 0x5f, 0xf5, 0x50, 0xa3, 0xe9, 0xb8, 0x7a, 0xfd, 0x22, 0x46, 0xee, 0x0e, - 0x72, 0x2f, 0xea, 0x4d, 0xeb, 0xe2, 0x96, 0x85, 0x3d, 0xc7, 0x6d, 0x93, 0x11, 0xcb, 0x40, 0x17, - 0x77, 0x2e, 0x5f, 0x74, 0xd1, 0x93, 0x16, 0xc2, 0x9e, 0xe6, 0x22, 0xdc, 0x74, 0x6c, 0x8c, 0x6a, - 0x4d, 0xd7, 0xf1, 0x1c, 0xf9, 0xb4, 0x80, 0xae, 0x31, 0xe8, 0x9a, 0xde, 0xb4, 0x6a, 0x61, 0xe8, - 0xda, 0xce, 0xe5, 0xb9, 0xea, 0xa6, 0xe3, 0x6c, 0xd6, 0xd1, 0x45, 0x0a, 0xb4, 0xde, 0xda, 0xb8, - 0x68, 0xb6, 0x5c, 0xdd, 0xb3, 0x1c, 0x9b, 0xa1, 0x99, 0x3b, 0x1e, 0x9d, 0xf7, 0xac, 0x06, 0xc2, - 0x9e, 0xde, 0x68, 0xf2, 0x05, 0x27, 0x4c, 0xd4, 0x44, 0xb6, 0x89, 0x6c, 0xc3, 0x42, 0xf8, 0xe2, - 0xa6, 0xb3, 0xe9, 0xd0, 0x71, 0xfa, 0x17, 0x5f, 0x72, 0xca, 0x67, 0x84, 0x70, 0x60, 0x38, 0x8d, - 0x86, 0x63, 0x13, 0xca, 0x1b, 0x08, 0x63, 0x7d, 0x93, 0x13, 0x3c, 0x77, 0x3a, 0xb4, 0x8a, 0x53, - 0x1a, 0x5f, 0x76, 0x36, 0xb4, 0xcc, 0xd3, 0xf1, 0xf6, 0x93, 0x16, 0x6a, 0xa1, 0xf8, 0xc2, 0xf0, - 0xae, 0xc8, 0x6e, 0x35, 0x30, 0x59, 0xb4, 0xeb, 0xb8, 0xdb, 0x1b, 0x75, 0x67, 0x97, 0xaf, 0x3a, - 0x13, 0x5a, 0x25, 0x26, 0xe3, 0xd8, 0x4e, 0x86, 0xd6, 0x3d, 0x69, 0xa1, 0x24, 0xda, 0xc2, 0xc8, - 0xe8, 0x98, 0xe1, 0xd4, 0x7b, 0xb1, 0xba, 0xa1, 0x5b, 0xf5, 0x96, 0x9b, 0xc0, 0xc1, 0xf9, 0x24, - 0x03, 0x30, 0xea, 0x8e, 0xb1, 0x1d, 0x5f, 0xfb, 0x72, 0x17, 0x63, 0x89, 0xaf, 0x3e, 0x97, 0xb4, - 0xda, 0x17, 0x11, 0xd3, 0x10, 0x5f, 0xfa, 0x52, 0xd7, 0xa5, 0x11, 0x69, 0x9e, 0xed, 0xba, 0x98, - 0x28, 0x8b, 0x2f, 0xbc, 0x90, 0xb4, 0x30, 0x5d, 0xfa, 0xb5, 0xa4, 0xe5, 0xb6, 0xde, 0x40, 0xb8, - 0xa9, 0x1b, 0x09, 0x92, 0xbb, 0x94, 0xb4, 0xde, 0x45, 0xcd, 0xba, 0x65, 0x50, 0xe3, 0x8e, 0x43, - 0x5c, 0x4d, 0x82, 0x68, 0x22, 0x17, 0x5b, 0xd8, 0x43, 0x36, 0xdb, 0x03, 0xed, 0x21, 0xa3, 0x45, - 0xc0, 0x31, 0x07, 0x7a, 0xa3, 0x0f, 0x20, 0xc1, 0x94, 0xd6, 0x68, 0x79, 0xfa, 0x7a, 0x1d, 0x69, - 0xd8, 0xd3, 0x3d, 0xb1, 0xeb, 0x97, 0x13, 0xad, 0xaf, 0xa7, 0x73, 0xcf, 0x5d, 0x4f, 0xda, 0x58, - 0x37, 0x1b, 0x96, 0xdd, 0x13, 0x56, 0xf9, 0xd9, 0x30, 0x1c, 0x5b, 0xf5, 0x74, 0xd7, 0x7b, 0x9b, - 0x6f, 0x77, 0x4b, 0xb0, 0xa5, 0x32, 0x00, 0xf9, 0x04, 0x8c, 0xfb, 0xb2, 0xd5, 0x2c, 0xb3, 0x22, - 0xcd, 0x4b, 0x0b, 0xa3, 0xea, 0x98, 0x3f, 0xb6, 0x6c, 0xca, 0x06, 0x4c, 0x60, 0x82, 0x43, 0xe3, - 0x9b, 0x54, 0x86, 0xe6, 0xa5, 0x85, 0xb1, 0x2b, 0xaf, 0xfb, 0x8a, 0xa2, 0xe1, 0x26, 0xc2, 0x50, - 0x6d, 0xe7, 0x72, 0xad, 0xeb, 0xce, 0xea, 0x38, 0x45, 0x2a, 0xe8, 0xd8, 0x82, 0xe9, 0xa6, 0xee, - 0x22, 0xdb, 0xd3, 0x7c, 0xc9, 0x6b, 0x96, 0xbd, 0xe1, 0x54, 0x32, 0x74, 0xb3, 0x57, 0x6a, 0x49, - 0x21, 0xce, 0xb7, 0xc8, 0x9d, 0xcb, 0xb5, 0x15, 0x0a, 0xed, 0xef, 0xb2, 0x6c, 0x6f, 0x38, 0xea, - 0x64, 0x33, 0x3e, 0x28, 0x57, 0x60, 0x44, 0xf7, 0x08, 0x36, 0xaf, 0x92, 0x9d, 0x97, 0x16, 0x72, - 0xaa, 0xf8, 0x29, 0x37, 0x40, 0xf1, 0x35, 0xd8, 0xa1, 0x02, 0xed, 0x35, 0x2d, 0x16, 0x26, 0x35, - 0x12, 0x0f, 0x2b, 0x39, 0x4a, 0xd0, 0x5c, 0x8d, 0x05, 0xcb, 0x9a, 0x08, 0x96, 0xb5, 0x35, 0x11, - 0x2c, 0x17, 0xb3, 0x1f, 0xfe, 0xe4, 0xb8, 0xa4, 0x1e, 0xdf, 0x8d, 0x72, 0x7e, 0xcb, 0xc7, 0x44, - 0xd6, 0xca, 0x5b, 0x70, 0xd8, 0x70, 0x6c, 0xcf, 0xb2, 0x5b, 0x48, 0xd3, 0xb1, 0x66, 0xa3, 0x5d, - 0xcd, 0xb2, 0x2d, 0xcf, 0xd2, 0x3d, 0xc7, 0xad, 0x0c, 0xcf, 0x4b, 0x0b, 0x85, 0x2b, 0x17, 0xc2, - 0x32, 0xa6, 0xde, 0x45, 0x98, 0x5d, 0xe2, 0x70, 0x37, 0xf0, 0x43, 0xb4, 0xbb, 0x2c, 0x80, 0xd4, - 0x19, 0x23, 0x71, 0x5c, 0x7e, 0x00, 0x65, 0x31, 0x63, 0x6a, 0x3c, 0x04, 0x55, 0x46, 0x28, 0x1f, - 0xf3, 0xe1, 0x1d, 0xf8, 0x24, 0xd9, 0xe3, 0x36, 0xfb, 0x53, 0x2d, 0xf9, 0xa0, 0x7c, 0x44, 0x7e, - 0x04, 0x33, 0x75, 0x1d, 0x7b, 0x9a, 0xe1, 0x34, 0x9a, 0x75, 0x44, 0x25, 0xe3, 0x22, 0xdc, 0xaa, - 0x7b, 0x95, 0x7c, 0x12, 0x4e, 0x1e, 0x62, 0xa8, 0x8e, 0xda, 0x75, 0x47, 0x37, 0xb1, 0x3a, 0x45, - 0xe0, 0x97, 0x7c, 0x70, 0x95, 0x42, 0xcb, 0xdf, 0x84, 0x23, 0x1b, 0x96, 0x8b, 0x3d, 0xcd, 0xd7, - 0x02, 0x89, 0x22, 0xda, 0xba, 0x6e, 0x6c, 0x3b, 0x1b, 0x1b, 0x95, 0x51, 0x8a, 0xfc, 0x70, 0x4c, - 0xf0, 0x37, 0x79, 0x16, 0x5b, 0xcc, 0x7e, 0x8f, 0xc8, 0xbd, 0x42, 0x71, 0x08, 0xb3, 0x5b, 0xd3, - 0xf1, 0xf6, 0x22, 0x43, 0x20, 0xbf, 0x07, 0x53, 0xd8, 0x69, 0xb9, 0x06, 0xd2, 0x76, 0x88, 0xdf, - 0x3a, 0xb6, 0x46, 0xf5, 0x55, 0x01, 0x8a, 0xf8, 0x7c, 0x1a, 0xd5, 0x04, 0x15, 0x72, 0x1f, 0x31, - 0x90, 0x55, 0x02, 0xa1, 0xca, 0x0c, 0x4f, 0x70, 0x4c, 0xf9, 0xa9, 0x04, 0xd5, 0x34, 0x8b, 0x67, - 0x4e, 0x29, 0x4f, 0xc3, 0xb0, 0xdb, 0xb2, 0x3b, 0x6e, 0x96, 0x73, 0x5b, 0xf6, 0xb2, 0x29, 0xbf, - 0x01, 0x39, 0x1a, 0xe9, 0xb9, 0x63, 0x9d, 0x4b, 0xb4, 0x75, 0xba, 0x82, 0x90, 0xf3, 0x08, 0x19, - 0x9e, 0xe3, 0x2e, 0x91, 0x9f, 0x2a, 0x83, 0x93, 0x6d, 0x98, 0x44, 0xfa, 0x26, 0x72, 0xc3, 0x82, - 0xe3, 0xae, 0xd3, 0xdb, 0x4f, 0x57, 0x9c, 0x7a, 0x3d, 0x28, 0xaf, 0x37, 0x49, 0x92, 0x15, 0x44, - 0xab, 0x65, 0x8a, 0x3a, 0x38, 0xaf, 0xfc, 0xa7, 0x04, 0x33, 0x77, 0x90, 0xf7, 0x80, 0x45, 0xb9, - 0x55, 0x12, 0xe4, 0x06, 0x88, 0x27, 0x77, 0x60, 0xd4, 0xf7, 0xae, 0x38, 0xcb, 0x71, 0xd9, 0x87, - 0x65, 0xd9, 0x81, 0x95, 0xaf, 0xc2, 0x0c, 0xda, 0x6b, 0x22, 0xc3, 0x43, 0xa6, 0x66, 0xa3, 0x3d, - 0x4f, 0x43, 0x3b, 0x24, 0x80, 0x58, 0x26, 0xe5, 0x3c, 0xa3, 0x4e, 0x8a, 0xd9, 0x87, 0x68, 0xcf, - 0xbb, 0x45, 0xe6, 0x96, 0x4d, 0xf9, 0x12, 0x4c, 0x19, 0x2d, 0x97, 0x46, 0x9a, 0x75, 0x57, 0xb7, - 0x8d, 0x2d, 0xcd, 0x73, 0xb6, 0x91, 0x4d, 0x63, 0xc1, 0xb8, 0x2a, 0xf3, 0xb9, 0x45, 0x3a, 0xb5, - 0x46, 0x66, 0x94, 0x1f, 0x8f, 0xc2, 0x6c, 0x8c, 0x5b, 0xae, 0xd1, 0x10, 0x2f, 0xd2, 0x01, 0x78, - 0x59, 0x86, 0x89, 0x8e, 0xf2, 0xda, 0x4d, 0xc4, 0x05, 0x73, 0xaa, 0x17, 0xb2, 0xb5, 0x76, 0x13, - 0xa9, 0xe3, 0xbb, 0x81, 0x5f, 0xb2, 0x02, 0x13, 0x49, 0xd2, 0x18, 0xb3, 0x03, 0x52, 0xf8, 0x0a, - 0x1c, 0x6e, 0xba, 0x68, 0xc7, 0x72, 0x5a, 0x58, 0xa3, 0x71, 0x18, 0x99, 0x9d, 0xf5, 0x59, 0xba, - 0x7e, 0x46, 0x2c, 0x58, 0x65, 0xf3, 0x02, 0xf4, 0x02, 0x4c, 0x52, 0xef, 0x67, 0xae, 0xea, 0x03, - 0xe5, 0x28, 0x50, 0x89, 0x4c, 0xdd, 0x26, 0x33, 0x62, 0xf9, 0x12, 0x00, 0xf5, 0x62, 0x7a, 0x72, - 0xa3, 0x61, 0x2d, 0xc6, 0x95, 0x7f, 0xb0, 0x23, 0x8c, 0x75, 0x0c, 0x70, 0xd4, 0x13, 0x7f, 0xca, - 0x2b, 0x50, 0xc6, 0x9e, 0x65, 0x6c, 0xb7, 0xb5, 0x00, 0xae, 0x91, 0x01, 0x70, 0x15, 0x19, 0xb8, - 0x3f, 0x20, 0xff, 0x3a, 0xbc, 0x14, 0xc3, 0xa8, 0x61, 0x63, 0x0b, 0x99, 0xad, 0x3a, 0xd2, 0x3c, - 0x87, 0x49, 0x85, 0x46, 0x7c, 0xa7, 0xe5, 0x55, 0xc6, 0xfa, 0x8b, 0x3d, 0xa7, 0x23, 0xdb, 0xac, - 0x72, 0x84, 0x6b, 0x0e, 0x15, 0xe2, 0x1a, 0xc3, 0x96, 0x6a, 0x83, 0x13, 0x69, 0x36, 0x28, 0x7f, - 0x03, 0x0a, 0xbe, 0x79, 0xd0, 0x43, 0x45, 0xa5, 0x48, 0x13, 0x44, 0x72, 0x5e, 0xf4, 0xf3, 0x44, - 0xcc, 0xe4, 0x98, 0xf5, 0xfa, 0xa6, 0x46, 0x7f, 0xca, 0x6f, 0x43, 0x31, 0x84, 0xbc, 0x85, 0x2b, - 0x25, 0x8a, 0xbd, 0x96, 0x92, 0x7e, 0x12, 0xd1, 0xb6, 0xb0, 0x5a, 0x08, 0xe2, 0x6d, 0x61, 0xf9, - 0x31, 0x94, 0x45, 0xa4, 0x65, 0xc7, 0x53, 0x0b, 0xe1, 0x4a, 0x99, 0x8a, 0xf2, 0x52, 0xad, 0xcb, - 0x9d, 0x85, 0x85, 0x39, 0x0a, 0x78, 0x57, 0xc0, 0xa9, 0xa5, 0x9d, 0xc8, 0x88, 0xfc, 0x3a, 0x1c, - 0xb5, 0x88, 0xf9, 0x46, 0xd5, 0x88, 0x6c, 0xe2, 0xa8, 0x66, 0x45, 0x9e, 0x97, 0x16, 0xf2, 0x6a, - 0xc5, 0xc2, 0xab, 0x61, 0xad, 0xdc, 0x62, 0xf3, 0xf2, 0x2b, 0x30, 0x1b, 0xb3, 0x64, 0x6f, 0x8f, - 0xc6, 0xe7, 0x49, 0x16, 0x40, 0xc2, 0xd6, 0xbc, 0xb6, 0x47, 0xa2, 0xf5, 0x55, 0x98, 0xe1, 0x00, - 0xfe, 0x11, 0x81, 0x07, 0xf5, 0x29, 0x1a, 0xeb, 0x26, 0xe9, 0x6c, 0xc7, 0xc9, 0x69, 0x88, 0x7f, - 0x0f, 0xa6, 0x76, 0x69, 0x1a, 0x89, 0xa4, 0x9e, 0xe9, 0xc1, 0x53, 0xcf, 0x6e, 0x6c, 0xec, 0x5e, - 0x36, 0x9f, 0x2f, 0x8d, 0xde, 0xcb, 0xe6, 0x47, 0x4b, 0x70, 0x2f, 0x9b, 0x87, 0xd2, 0xd8, 0xbd, - 0x6c, 0x7e, 0xbc, 0x34, 0x71, 0x2f, 0x9b, 0x2f, 0x94, 0x8a, 0xca, 0x7f, 0x49, 0x30, 0x4b, 0x42, - 0xfc, 0x2f, 0x49, 0xb8, 0xfe, 0x83, 0x3c, 0x54, 0xe2, 0xec, 0x7e, 0x11, 0xaf, 0xbf, 0x88, 0xd7, - 0xcf, 0x3c, 0x5e, 0x8f, 0xa7, 0xc6, 0xeb, 0xc4, 0xc8, 0x57, 0x78, 0x66, 0x91, 0xef, 0x17, 0x33, - 0x1d, 0x74, 0x89, 0xb7, 0xe5, 0xfd, 0xc4, 0x5b, 0x39, 0x35, 0xde, 0x26, 0x46, 0xc4, 0x89, 0x52, - 0x41, 0xf9, 0x8e, 0x04, 0x47, 0x54, 0x84, 0x91, 0x17, 0x49, 0x09, 0x2f, 0x20, 0x1e, 0x2a, 0x55, - 0x38, 0x9a, 0x4c, 0x0a, 0x8b, 0x55, 0xca, 0x0f, 0x32, 0x30, 0xaf, 0x22, 0xc3, 0x71, 0xcd, 0xe0, - 0xe1, 0x9b, 0x7b, 0xf7, 0x00, 0x04, 0xbf, 0x03, 0x72, 0xfc, 0x5a, 0x3b, 0x38, 0xe5, 0xe5, 0xd8, - 0x7d, 0x56, 0x7e, 0x19, 0x64, 0xe1, 0x82, 0x66, 0x34, 0x7c, 0x95, 0xfc, 0x19, 0x11, 0x59, 0x66, - 0x61, 0x84, 0xfa, 0xae, 0x1f, 0xb1, 0x86, 0xc9, 0xcf, 0x65, 0x53, 0x3e, 0x06, 0x20, 0xea, 0x17, - 0x3c, 0x30, 0x8d, 0xaa, 0xa3, 0x7c, 0x64, 0xd9, 0x94, 0xdf, 0x87, 0xf1, 0xa6, 0x53, 0xaf, 0xfb, - 0xe5, 0x07, 0x16, 0x93, 0x5e, 0xdb, 0xef, 0xb5, 0x86, 0x55, 0x1f, 0xc6, 0x08, 0x4a, 0x21, 0x44, - 0xff, 0x02, 0x36, 0xb2, 0xbf, 0x0b, 0x98, 0xf2, 0x93, 0x3c, 0x9c, 0xe8, 0xa2, 0x2a, 0x9e, 0x7c, - 0x62, 0x39, 0x43, 0xda, 0x77, 0xce, 0xe8, 0x9a, 0x0f, 0x86, 0xba, 0xe6, 0x83, 0xc1, 0x94, 0xb6, - 0x00, 0xa5, 0x94, 0x7c, 0x53, 0xc0, 0x61, 0xbc, 0xb1, 0x34, 0x96, 0x8b, 0xa7, 0xb1, 0x40, 0xed, - 0x65, 0x38, 0x5c, 0x7b, 0xb9, 0x06, 0x15, 0x1e, 0xdf, 0x03, 0x95, 0x17, 0x7e, 0x8e, 0x1b, 0xa1, - 0xe7, 0xb8, 0x19, 0x36, 0xdf, 0xa9, 0xa6, 0xf0, 0x53, 0xdc, 0x13, 0x98, 0xf5, 0x5c, 0xdd, 0xc6, - 0x16, 0xd9, 0x36, 0x7c, 0x01, 0x66, 0xe5, 0x88, 0xaf, 0xf4, 0x0a, 0xb8, 0x6b, 0x02, 0x3c, 0xa8, - 0x3c, 0x5a, 0x40, 0x9a, 0xf6, 0x92, 0xa6, 0xe4, 0x4d, 0x38, 0x96, 0x50, 0x28, 0x0a, 0xa4, 0xba, - 0xd1, 0x01, 0x52, 0xdd, 0x5c, 0xcc, 0xaf, 0x3a, 0x59, 0xef, 0x04, 0x8c, 0x87, 0x12, 0xce, 0x18, - 0x4d, 0x38, 0x63, 0xeb, 0x81, 0x4c, 0x73, 0x07, 0x0a, 0x1d, 0x75, 0xd2, 0x02, 0xd5, 0x78, 0x9f, - 0x05, 0xaa, 0x09, 0x1f, 0x8e, 0x96, 0xa3, 0x96, 0x60, 0x5c, 0x68, 0x9a, 0xa2, 0x99, 0xe8, 0x13, - 0xcd, 0x18, 0x87, 0xa2, 0x48, 0x1c, 0x18, 0x79, 0xd2, 0x42, 0x3c, 0xdb, 0x65, 0x16, 0xc6, 0xae, - 0xbc, 0x55, 0xeb, 0xeb, 0x6d, 0xa2, 0xd6, 0xd3, 0x7b, 0x6a, 0x6f, 0x32, 0xbc, 0xb7, 0x6c, 0xcf, - 0x6d, 0xab, 0x62, 0x97, 0x8e, 0xeb, 0x16, 0xf7, 0x59, 0x3b, 0x79, 0x0d, 0xf2, 0xbc, 0x3a, 0x4c, - 0xd2, 0x1c, 0x21, 0xf9, 0x44, 0x58, 0x6d, 0xa2, 0xb4, 0x4f, 0xe0, 0x1f, 0xb0, 0x95, 0xaa, 0x0f, - 0x32, 0xf7, 0x3e, 0x8c, 0x07, 0x09, 0x93, 0x4b, 0x90, 0xd9, 0x46, 0x6d, 0x1e, 0x86, 0xc9, 0x9f, - 0xf2, 0x75, 0xc8, 0xed, 0xe8, 0xf5, 0x56, 0xca, 0x09, 0x91, 0xbe, 0x2e, 0x04, 0x9d, 0x9d, 0x60, - 0x6b, 0xab, 0x0c, 0xe4, 0xfa, 0xd0, 0x35, 0x89, 0xa5, 0xaf, 0x40, 0x32, 0xb8, 0x61, 0x78, 0xd6, - 0x8e, 0xe5, 0xb5, 0xbf, 0x48, 0x06, 0x83, 0x26, 0x83, 0xa0, 0xe4, 0x9e, 0x63, 0x32, 0xf8, 0x9b, - 0xac, 0x48, 0x06, 0x89, 0xaa, 0xe2, 0xc9, 0xe0, 0x21, 0x14, 0x23, 0xe2, 0xe2, 0xe9, 0xe0, 0x74, - 0x98, 0x97, 0x40, 0x9c, 0x62, 0xe7, 0xbf, 0x36, 0x15, 0xa1, 0x5a, 0x08, 0x8b, 0x34, 0xe6, 0xbe, - 0x43, 0xfb, 0x71, 0xdf, 0x40, 0x7c, 0xce, 0x84, 0xe3, 0x33, 0x82, 0xaa, 0x38, 0x02, 0xf3, 0x21, - 0x2d, 0x12, 0x76, 0xb2, 0x7d, 0x6e, 0x78, 0x84, 0xe3, 0xb9, 0xc1, 0xd0, 0xac, 0x86, 0x82, 0xd0, - 0x03, 0x28, 0x6f, 0x21, 0xdd, 0xf5, 0xd6, 0x91, 0xee, 0x69, 0x26, 0xf2, 0x74, 0xab, 0x8e, 0x79, - 0xc5, 0xbd, 0x77, 0x55, 0xb9, 0xe4, 0x83, 0xde, 0x64, 0x90, 0xf1, 0x8c, 0x3b, 0xbc, 0xef, 0x8c, - 0x7b, 0x21, 0xe0, 0x38, 0xbe, 0x43, 0x51, 0x1b, 0x19, 0xed, 0x78, 0xc3, 0x43, 0x31, 0xd1, 0xb1, - 0xa2, 0xfc, 0x3e, 0xad, 0xe8, 0x47, 0x12, 0x9c, 0x64, 0xc6, 0x12, 0x8a, 0x8a, 0xbc, 0x68, 0x3e, - 0x90, 0xcf, 0x3b, 0x50, 0xe2, 0xa5, 0x7a, 0x14, 0x79, 0xc3, 0xb9, 0xd9, 0xd3, 0x6f, 0xfa, 0x20, - 0x41, 0x2d, 0x0a, 0xec, 0x7c, 0x40, 0xf9, 0xe1, 0x10, 0x9c, 0xea, 0x0e, 0xc8, 0x9d, 0x00, 0x77, - 0x4e, 0x17, 0xe2, 0xe5, 0x8a, 0x7b, 0xc1, 0xdd, 0x67, 0x95, 0x37, 0xc8, 0x55, 0x32, 0xec, 0x79, - 0x08, 0x0a, 0x3a, 0x77, 0x4c, 0x9a, 0xb3, 0x71, 0x65, 0x88, 0xc6, 0xfd, 0xd7, 0xf7, 0x1b, 0x44, - 0xf8, 0x46, 0x13, 0x7a, 0x60, 0x0a, 0x93, 0x7b, 0x8b, 0x4b, 0x8e, 0xf7, 0xfc, 0x02, 0xd8, 0x8e, - 0x95, 0x3b, 0xe8, 0x6c, 0xd0, 0xa7, 0x97, 0x4d, 0xe5, 0x2f, 0x25, 0x12, 0xe6, 0x63, 0x92, 0xbb, - 0xad, 0x5b, 0xf5, 0x81, 0x54, 0xbe, 0x05, 0x85, 0x0d, 0x0a, 0x13, 0x51, 0xf8, 0x8d, 0xfd, 0x28, - 0x3c, 0xb4, 0xbb, 0x3a, 0xb1, 0x11, 0xfc, 0xa9, 0x9c, 0x24, 0xc1, 0x2e, 0x15, 0x84, 0x5f, 0x65, - 0x7e, 0x24, 0x81, 0x12, 0x0f, 0x89, 0x77, 0x85, 0xbb, 0x0e, 0xc0, 0x58, 0x33, 0x18, 0x20, 0xc2, - 0xbc, 0x2d, 0xf5, 0xc1, 0x5b, 0x2f, 0x12, 0x02, 0x31, 0x44, 0x30, 0xb8, 0x42, 0xfc, 0xb0, 0x0b, - 0x1c, 0xb7, 0xaa, 0x73, 0x50, 0x32, 0x74, 0xdb, 0x40, 0x7e, 0x6a, 0x42, 0x8c, 0xfe, 0xbc, 0x5a, - 0x64, 0xe3, 0xaa, 0x18, 0x0e, 0xba, 0x76, 0x10, 0xe7, 0x0b, 0x72, 0xed, 0x6e, 0x24, 0xc4, 0x5d, - 0xfb, 0x8c, 0xef, 0xd9, 0x29, 0x70, 0x5c, 0xe3, 0x01, 0x43, 0x0e, 0x2e, 0xfc, 0xff, 0x37, 0xe4, - 0xd4, 0xdd, 0xd3, 0x0d, 0x39, 0x09, 0x84, 0xb3, 0xf5, 0x57, 0xd4, 0x90, 0xe3, 0xfc, 0x53, 0x0d, - 0x0f, 0xc4, 0xd8, 0xaf, 0x41, 0x21, 0x6c, 0x2f, 0x03, 0x58, 0x71, 0xaf, 0xfd, 0xd5, 0x89, 0x90, - 0xc9, 0x29, 0xa7, 0x93, 0xed, 0xcd, 0x07, 0xe2, 0xcc, 0xfd, 0xed, 0x10, 0x54, 0x57, 0xad, 0x4d, - 0x5b, 0xaf, 0x1f, 0xa4, 0x5d, 0x60, 0x03, 0x0a, 0x98, 0x22, 0x89, 0x30, 0xf6, 0x46, 0xef, 0x7e, - 0x81, 0xae, 0x7b, 0xab, 0x13, 0x0c, 0xad, 0x20, 0xc5, 0x82, 0x23, 0x68, 0xcf, 0x43, 0x2e, 0xd9, - 0x29, 0xe1, 0x48, 0x9b, 0x19, 0xf4, 0x48, 0x7b, 0x58, 0x60, 0x8b, 0x4d, 0xc9, 0x35, 0x98, 0x34, - 0xb6, 0xac, 0xba, 0xd9, 0xd9, 0xc7, 0xb1, 0xeb, 0x6d, 0x7a, 0xe2, 0xc9, 0xab, 0x65, 0x3a, 0x25, - 0x80, 0xbe, 0x6e, 0xd7, 0xdb, 0xca, 0x09, 0x38, 0x9e, 0xca, 0x0b, 0x97, 0xf5, 0x3f, 0x4a, 0x70, - 0x96, 0xaf, 0xb1, 0xbc, 0xad, 0x03, 0xf7, 0x68, 0x7c, 0x5b, 0x82, 0xc3, 0x5c, 0xea, 0xbb, 0x96, - 0xb7, 0xa5, 0x25, 0x35, 0x6c, 0xdc, 0xed, 0x57, 0x01, 0xbd, 0x08, 0x52, 0x67, 0x70, 0x78, 0xa1, - 0xb0, 0xb3, 0x1b, 0xb0, 0xd0, 0x1b, 0x45, 0xd7, 0xb7, 0x70, 0xe5, 0xc7, 0x12, 0x1c, 0x57, 0x51, - 0xc3, 0xd9, 0x41, 0x0c, 0xd3, 0x3e, 0x1f, 0x2d, 0x9e, 0xdf, 0x35, 0x27, 0x7c, 0x3f, 0xc9, 0x44, - 0xee, 0x27, 0x8a, 0x42, 0xc2, 0x5e, 0x1a, 0xf9, 0x42, 0xf7, 0x43, 0x70, 0x62, 0x0d, 0xb9, 0x0d, - 0xcb, 0xd6, 0x3d, 0x74, 0x10, 0xad, 0x3b, 0x50, 0xf6, 0x04, 0x9e, 0x88, 0xb2, 0x17, 0x7b, 0x2a, - 0xbb, 0x27, 0x05, 0x6a, 0xc9, 0x47, 0xfe, 0x0b, 0xe0, 0x73, 0xa7, 0x40, 0xe9, 0xc6, 0x11, 0x17, - 0xfd, 0xff, 0x48, 0x50, 0xbd, 0x89, 0x48, 0xaa, 0x3a, 0x88, 0xdc, 0x9f, 0x9f, 0x75, 0x9d, 0x83, - 0x92, 0x8f, 0x99, 0x57, 0xfd, 0xf9, 0x71, 0xd1, 0xaf, 0xc9, 0xf3, 0xe7, 0x01, 0xfa, 0x28, 0x51, - 0x77, 0x30, 0x4a, 0x96, 0x90, 0xcc, 0xe6, 0xa2, 0x61, 0x29, 0x95, 0x77, 0x2e, 0x9f, 0x3f, 0x93, - 0xe0, 0x18, 0x2d, 0x4a, 0x1f, 0xb0, 0x61, 0x8c, 0x9d, 0x7c, 0x07, 0x6d, 0x18, 0xeb, 0xba, 0xb3, - 0x3a, 0x4e, 0x91, 0x8a, 0x58, 0xf3, 0x2a, 0x54, 0xd3, 0x96, 0x77, 0x8f, 0x30, 0xbf, 0x9f, 0x81, - 0xd3, 0x1c, 0x09, 0xcb, 0x80, 0x07, 0x61, 0xb5, 0x91, 0x92, 0xc5, 0x6f, 0xf7, 0xc1, 0x6b, 0x1f, - 0x24, 0x44, 0x12, 0xb9, 0xfc, 0x5a, 0xc0, 0xff, 0x78, 0xaf, 0x58, 0xbc, 0xd8, 0x52, 0x11, 0x4b, - 0x96, 0xc5, 0x0a, 0x51, 0x74, 0xe9, 0xe1, 0xbe, 0xd9, 0xe7, 0xef, 0xbe, 0xb9, 0x34, 0xf7, 0x5d, - 0x80, 0x33, 0xbd, 0x24, 0xc2, 0x4d, 0xf4, 0x67, 0x43, 0x70, 0x44, 0x14, 0x0d, 0x82, 0x57, 0x8e, - 0xcf, 0x84, 0xff, 0x5e, 0x85, 0x19, 0x0b, 0x6b, 0x09, 0x5d, 0x6c, 0x54, 0x37, 0x79, 0x75, 0xd2, - 0xc2, 0xb7, 0xa3, 0xed, 0x69, 0xf2, 0x3d, 0x18, 0x63, 0xb2, 0x62, 0x15, 0x83, 0xec, 0xa0, 0x15, - 0x03, 0xa0, 0xd0, 0xf4, 0x6f, 0xf9, 0x3e, 0x8c, 0xf3, 0x3e, 0x4a, 0x86, 0x2c, 0x37, 0x28, 0xb2, - 0x31, 0x06, 0x4e, 0x7f, 0x28, 0x55, 0x38, 0x9a, 0x2c, 0x6a, 0xae, 0x8b, 0xff, 0x90, 0xe0, 0xec, - 0x23, 0xe4, 0x5a, 0x1b, 0xed, 0x18, 0x57, 0x7e, 0x5d, 0xe7, 0x33, 0xa1, 0x17, 0xbf, 0x1c, 0x93, - 0xd9, 0x67, 0x39, 0xe6, 0x3c, 0x2c, 0xf4, 0x66, 0x94, 0x4b, 0xe5, 0x7f, 0x33, 0xe4, 0x92, 0x44, - 0xae, 0x8c, 0x4b, 0x44, 0x31, 0x3e, 0x15, 0xfb, 0xb9, 0xe0, 0x3d, 0x3f, 0x91, 0xd4, 0x80, 0xb7, - 0xc7, 0x06, 0x22, 0x89, 0x1f, 0x43, 0xca, 0x6c, 0xca, 0x8f, 0x20, 0xcb, 0xa6, 0xfc, 0x2e, 0x4c, - 0x8a, 0xcb, 0xa0, 0x79, 0x90, 0xa0, 0x21, 0xfb, 0x58, 0x3a, 0xb4, 0xac, 0xf8, 0xd7, 0x58, 0xfa, - 0xee, 0x43, 0xab, 0xa1, 0xb9, 0x41, 0xaa, 0xa1, 0xc5, 0x0e, 0x38, 0x2b, 0x87, 0xfa, 0x0a, 0x1f, - 0xde, 0xe7, 0xbb, 0xc0, 0x35, 0xa8, 0xc4, 0xc4, 0x23, 0x32, 0xf2, 0x08, 0x7f, 0x60, 0x0b, 0xcb, - 0x88, 0x27, 0x66, 0xe5, 0x2c, 0xc9, 0x2f, 0x5d, 0xb5, 0x2f, 0x92, 0x6d, 0x06, 0x2e, 0x30, 0xa3, - 0x4a, 0x5c, 0x49, 0x83, 0x1e, 0xc1, 0x33, 0x90, 0xc1, 0xac, 0x41, 0x29, 0xda, 0x48, 0x3d, 0xb8, - 0xb9, 0x14, 0x23, 0x8d, 0xd3, 0xb2, 0x0a, 0x45, 0x16, 0xa2, 0x0e, 0x70, 0xd8, 0x2b, 0x18, 0x21, - 0x2e, 0xd3, 0x0c, 0x30, 0x9b, 0x66, 0x80, 0xdd, 0x34, 0x92, 0xeb, 0xa6, 0x91, 0x03, 0x1b, 0x83, - 0x72, 0x09, 0x6a, 0xfd, 0x2a, 0x8a, 0xeb, 0xf6, 0x8f, 0x25, 0x98, 0xbf, 0x89, 0xb0, 0xe1, 0x5a, - 0xeb, 0x07, 0x3a, 0x6a, 0x7e, 0x03, 0x46, 0x06, 0x2d, 0x7c, 0xf4, 0xda, 0x56, 0x15, 0x18, 0x95, - 0xdf, 0xcb, 0xc2, 0x89, 0x2e, 0xab, 0xf9, 0x39, 0xea, 0x3d, 0x28, 0x75, 0x1e, 0x39, 0x0d, 0xc7, - 0xde, 0xb0, 0x36, 0x79, 0x91, 0xf6, 0x72, 0x32, 0x2d, 0x89, 0xea, 0x5f, 0xa2, 0x80, 0x6a, 0x11, - 0x85, 0x07, 0xe4, 0x4d, 0x98, 0x4d, 0x78, 0x4b, 0xa5, 0xad, 0xff, 0x8c, 0xe1, 0x8b, 0x03, 0x6c, - 0xc2, 0x1e, 0x6d, 0x77, 0x93, 0x86, 0xe5, 0xf7, 0x40, 0x6e, 0x22, 0xdb, 0xb4, 0xec, 0x4d, 0x8d, - 0x17, 0x6a, 0x2d, 0x84, 0x2b, 0x19, 0x5a, 0xfa, 0xbd, 0x90, 0xbe, 0xc7, 0x0a, 0x83, 0x11, 0x85, - 0x13, 0xba, 0x43, 0xb9, 0x19, 0x1a, 0xb4, 0x10, 0x96, 0xbf, 0x09, 0x25, 0x81, 0x9d, 0x9a, 0xb9, - 0x4b, 0x7b, 0xd4, 0x08, 0xee, 0xab, 0x3d, 0x71, 0x87, 0x8d, 0x8a, 0xee, 0x50, 0x6c, 0x06, 0xa6, - 0x5c, 0x64, 0xcb, 0x08, 0xa6, 0x05, 0xfe, 0xf0, 0xb9, 0x22, 0xd7, 0x4b, 0x13, 0x7c, 0x93, 0xd8, - 0xdb, 0xf6, 0x64, 0x33, 0x3e, 0xa1, 0xfc, 0x7b, 0x06, 0x2a, 0x2a, 0xff, 0x76, 0x06, 0xd1, 0x48, - 0x8a, 0x1f, 0x5d, 0xf9, 0x4c, 0xa4, 0xab, 0x0d, 0x98, 0x0e, 0x77, 0x54, 0xb5, 0x35, 0xcb, 0x43, - 0x0d, 0xa1, 0xc1, 0x2b, 0x03, 0x75, 0x55, 0xb5, 0x97, 0x3d, 0xd4, 0x50, 0x27, 0x77, 0x62, 0x63, - 0x58, 0xbe, 0x06, 0xc3, 0x34, 0xff, 0x60, 0x9e, 0xd9, 0x52, 0x9f, 0x9d, 0x6e, 0xea, 0x9e, 0xbe, - 0x58, 0x77, 0xd6, 0x55, 0xbe, 0x5e, 0xbe, 0x0d, 0x05, 0x1b, 0xed, 0xd2, 0xe6, 0x24, 0x8e, 0x21, - 0xd7, 0x27, 0x86, 0x71, 0x1b, 0xed, 0xaa, 0x2d, 0x96, 0xb9, 0xb0, 0xbc, 0x0e, 0x93, 0xeb, 0x3a, - 0x46, 0x51, 0x6f, 0x60, 0xb1, 0xeb, 0x4a, 0xcf, 0x0f, 0x61, 0x16, 0x75, 0x8c, 0xc2, 0xc6, 0x54, - 0x5e, 0x8f, 0x0e, 0x29, 0x47, 0xe0, 0x70, 0x82, 0x9a, 0x79, 0xec, 0xfa, 0x7b, 0x7a, 0x09, 0xe4, - 0xb3, 0x6f, 0x07, 0x7b, 0xc3, 0x84, 0x25, 0x68, 0xb1, 0xfe, 0x33, 0x16, 0x10, 0xae, 0x25, 0x52, - 0x17, 0xf8, 0x4a, 0x2a, 0xa8, 0xee, 0x50, 0x6d, 0x24, 0xd2, 0x83, 0x76, 0x1a, 0x0a, 0x2e, 0x6a, - 0x38, 0x1e, 0xd2, 0x8c, 0x7a, 0x0b, 0x7b, 0xc8, 0xa5, 0x36, 0x34, 0xaa, 0x4e, 0xb0, 0xd1, 0x25, - 0x36, 0x18, 0xb3, 0xc8, 0x4c, 0xcc, 0x22, 0x95, 0x79, 0x72, 0x4f, 0x4c, 0xe6, 0x85, 0xb3, 0xfb, - 0x87, 0x12, 0xcc, 0xac, 0xb6, 0x6d, 0x63, 0x75, 0x4b, 0x77, 0x4d, 0xde, 0xba, 0xc6, 0xf9, 0x3c, - 0x0d, 0x05, 0xfe, 0xc5, 0x88, 0x20, 0x83, 0xd9, 0xfc, 0x04, 0x1b, 0x15, 0x64, 0x1c, 0x86, 0x3c, - 0x26, 0xc0, 0xa2, 0xf9, 0x26, 0xa7, 0x8e, 0xd0, 0xdf, 0xcb, 0xa6, 0x7c, 0x03, 0xc6, 0x58, 0x0f, - 0x1d, 0x7b, 0x24, 0xcd, 0xf4, 0xf9, 0x48, 0x0a, 0x0c, 0x88, 0x0c, 0x2b, 0x87, 0x61, 0x36, 0x46, - 0x1e, 0x27, 0xfd, 0x1f, 0x86, 0x61, 0x92, 0xcc, 0x89, 0xe8, 0x34, 0x80, 0xa7, 0x1e, 0x87, 0x31, - 0x5f, 0x85, 0x9c, 0xec, 0x51, 0x15, 0xc4, 0xd0, 0xb2, 0x19, 0xb8, 0x3e, 0x67, 0x82, 0x1f, 0xab, - 0x54, 0x60, 0x44, 0x24, 0x5d, 0x96, 0xa9, 0xc5, 0xcf, 0x94, 0x06, 0x80, 0x5c, 0x4a, 0x03, 0x40, - 0xbc, 0x6f, 0x65, 0x78, 0x7f, 0x7d, 0x2b, 0x49, 0x1d, 0x4a, 0x23, 0x89, 0x1d, 0x4a, 0xd1, 0x27, - 0xf2, 0xfc, 0x7e, 0x9e, 0xc8, 0x57, 0x78, 0x3b, 0x6d, 0xe7, 0x15, 0x8a, 0xe2, 0x1a, 0xed, 0x13, - 0x57, 0x99, 0x00, 0xfb, 0xaf, 0x47, 0x14, 0xe3, 0x75, 0x18, 0x11, 0x2f, 0xdd, 0xd0, 0xe7, 0x4b, - 0xb7, 0x00, 0x08, 0x3e, 0xd8, 0x8f, 0x85, 0x1f, 0xec, 0x97, 0x60, 0x9c, 0x35, 0x5b, 0xf2, 0xcf, - 0xbd, 0xc6, 0xfb, 0xfc, 0xdc, 0x6b, 0x8c, 0xf6, 0x60, 0xf2, 0x2f, 0xbd, 0x2e, 0x01, 0xfd, 0x52, - 0x4b, 0xe3, 0xbd, 0xeb, 0x96, 0x89, 0x6c, 0xcf, 0xf2, 0xda, 0xb4, 0x37, 0x68, 0x54, 0x95, 0xc9, - 0x1c, 0x6b, 0x51, 0x5f, 0xe6, 0x33, 0xf2, 0xdb, 0x50, 0x8c, 0x84, 0x69, 0xde, 0xf6, 0x5a, 0x1b, - 0x2c, 0x40, 0xab, 0x85, 0x70, 0x70, 0x4e, 0x8b, 0x8a, 0xc5, 0x67, 0x19, 0x15, 0x67, 0x60, 0x2a, - 0xec, 0x4d, 0xdc, 0xcd, 0xbe, 0x23, 0xc1, 0x11, 0x71, 0x4e, 0x7a, 0xc1, 0x5d, 0xf4, 0xca, 0x7f, - 0x4b, 0x70, 0x34, 0x99, 0x16, 0x7e, 0x5c, 0xdb, 0x82, 0x49, 0x43, 0x37, 0xb6, 0x50, 0xf8, 0x23, - 0xd4, 0x03, 0x07, 0xe8, 0x32, 0x45, 0x1a, 0x1c, 0x92, 0x6d, 0x98, 0x31, 0x75, 0x4f, 0xa7, 0x6a, - 0x09, 0x6f, 0x36, 0x74, 0xc0, 0xcd, 0xa6, 0x04, 0xde, 0xe0, 0xa8, 0xf2, 0x4f, 0x12, 0xcc, 0x09, - 0xd6, 0xb9, 0x59, 0xdc, 0x75, 0x70, 0xf0, 0xf5, 0x78, 0xcb, 0xc1, 0x9e, 0xa6, 0x9b, 0xa6, 0x8b, - 0x30, 0x16, 0x5a, 0x20, 0x63, 0x37, 0xd8, 0x50, 0xb7, 0x40, 0xdd, 0x3b, 0x95, 0xa4, 0x1c, 0x6e, - 0xb2, 0x07, 0x3f, 0xdc, 0x28, 0xff, 0x1a, 0x30, 0xb0, 0x10, 0x67, 0x5c, 0xa7, 0x27, 0x61, 0x82, - 0xd2, 0x89, 0x35, 0xbb, 0xd5, 0x58, 0xe7, 0x69, 0x28, 0xa7, 0x8e, 0xb3, 0xc1, 0x87, 0x74, 0x4c, - 0x3e, 0x02, 0xa3, 0x82, 0x39, 0xd6, 0xd2, 0x90, 0x53, 0xf3, 0x9c, 0x3b, 0x2c, 0x3f, 0x86, 0x62, - 0x87, 0x3d, 0xaa, 0xca, 0xae, 0x5f, 0xd6, 0xfa, 0x6b, 0x09, 0x0b, 0x7e, 0x57, 0xcb, 0x12, 0x81, - 0xa3, 0xce, 0x53, 0xb0, 0x43, 0x63, 0x34, 0x0e, 0x71, 0xb1, 0xb3, 0x96, 0x2d, 0xf1, 0xf3, 0x5e, - 0x36, 0x9f, 0x2d, 0xe5, 0x94, 0x1a, 0x94, 0x97, 0xea, 0x0e, 0x46, 0x34, 0x89, 0x09, 0x85, 0x05, - 0xb5, 0x21, 0x85, 0xb4, 0xa1, 0x4c, 0x81, 0x1c, 0x5c, 0xcf, 0xfd, 0xf0, 0x65, 0x28, 0xde, 0x41, - 0x5e, 0xbf, 0x38, 0xde, 0x87, 0x52, 0x67, 0x35, 0x17, 0xe4, 0x7d, 0x00, 0xbe, 0x9c, 0x04, 0x0f, - 0xe6, 0x13, 0x17, 0xfa, 0x31, 0x53, 0x8a, 0x86, 0xb2, 0xce, 0x84, 0x4c, 0xe3, 0xc5, 0x3f, 0x4b, - 0x50, 0x66, 0xaf, 0x3d, 0xc1, 0x02, 0x64, 0x3a, 0x49, 0xf2, 0x6d, 0xc8, 0x93, 0x73, 0xc8, 0x26, - 0x09, 0x8b, 0x43, 0xb4, 0xa7, 0xfe, 0x7c, 0xf7, 0x8e, 0x7d, 0xf6, 0x4e, 0xcb, 0x20, 0x54, 0x1f, - 0x36, 0xd8, 0x3d, 0x97, 0x09, 0x75, 0xcf, 0x2d, 0x43, 0x71, 0xc7, 0xc2, 0xd6, 0xba, 0x55, 0xa7, - 0xdd, 0x2d, 0x83, 0xf4, 0x65, 0x15, 0x3a, 0x80, 0xf4, 0xd8, 0x31, 0x05, 0x72, 0x90, 0x37, 0xae, - 0x82, 0x0f, 0x25, 0x38, 0x76, 0x07, 0x79, 0x6a, 0xe7, 0xfb, 0x7a, 0xde, 0x13, 0xe9, 0x9f, 0x99, - 0xee, 0xc3, 0x30, 0x6d, 0x56, 0x25, 0x0e, 0x98, 0x49, 0x35, 0xb0, 0xc0, 0x07, 0xfa, 0xac, 0x1a, - 0xee, 0xff, 0xa4, 0x6d, 0xad, 0x2a, 0xc7, 0x41, 0xdc, 0x92, 0x1f, 0xbd, 0x68, 0xd7, 0x15, 0x3f, - 0xa7, 0x8c, 0xf1, 0x31, 0x62, 0x99, 0xca, 0xf7, 0x87, 0xa0, 0x9a, 0x46, 0x12, 0x57, 0xfb, 0xb7, - 0xa0, 0xc0, 0x54, 0xe2, 0xb7, 0x7a, 0x32, 0xda, 0xde, 0xe9, 0xb3, 0xcb, 0xa8, 0x3b, 0x7a, 0x66, - 0x1c, 0x62, 0x94, 0x35, 0xa8, 0x32, 0x7f, 0x15, 0x63, 0x73, 0x6d, 0x90, 0xe3, 0x8b, 0x82, 0xcd, - 0xa2, 0x39, 0xd6, 0x2c, 0xfa, 0x20, 0xdc, 0x2c, 0xfa, 0xea, 0x80, 0xb2, 0xf3, 0x29, 0xeb, 0xf4, - 0x8f, 0x2a, 0x1f, 0xc0, 0xfc, 0x1d, 0xe4, 0xdd, 0xbc, 0xff, 0x66, 0x17, 0x9d, 0x3d, 0xe2, 0x1f, - 0xfd, 0x10, 0xaf, 0x10, 0xb2, 0x19, 0x74, 0x6f, 0xff, 0x62, 0x49, 0xbf, 0x03, 0x22, 0x7f, 0x61, - 0xe5, 0x37, 0x25, 0x38, 0xd1, 0x65, 0x73, 0xae, 0x9d, 0xf7, 0xa1, 0x1c, 0x40, 0xcb, 0x7b, 0xb2, - 0xa4, 0xe8, 0xe5, 0xb9, 0x6f, 0x22, 0xd4, 0x92, 0x1b, 0x1e, 0xc0, 0xca, 0x77, 0x25, 0x98, 0xa2, - 0x8d, 0xb5, 0x22, 0x1a, 0x0f, 0x90, 0xb9, 0xbf, 0x1e, 0xad, 0xc0, 0x7c, 0xa9, 0x67, 0x05, 0x26, - 0x69, 0xab, 0x4e, 0xd5, 0x65, 0x1b, 0xa6, 0x23, 0x0b, 0xb8, 0x1c, 0x54, 0xc8, 0x47, 0xba, 0xe0, - 0xbe, 0x3c, 0xe8, 0x56, 0xbc, 0x15, 0xcd, 0xc7, 0xa3, 0xfc, 0xae, 0x04, 0x53, 0x2a, 0xd2, 0x9b, - 0xcd, 0x3a, 0xab, 0x94, 0xe2, 0x01, 0x38, 0x5f, 0x8d, 0x72, 0x9e, 0xdc, 0x49, 0x1f, 0xfc, 0x5f, - 0x14, 0x4c, 0x1d, 0xf1, 0xed, 0x3a, 0xdc, 0xcf, 0xc2, 0x74, 0x64, 0x01, 0xa7, 0xf4, 0x2f, 0x86, - 0x60, 0x9a, 0xd9, 0x4a, 0xd4, 0x3a, 0x6f, 0x41, 0xd6, 0xff, 0x5c, 0xa2, 0x10, 0x2c, 0x75, 0x24, - 0x45, 0xcc, 0x9b, 0x48, 0x37, 0xef, 0x23, 0xcf, 0x43, 0x2e, 0xed, 0xce, 0xa3, 0x9d, 0x9c, 0x14, - 0xbc, 0x5b, 0xf2, 0x8f, 0xdf, 0xf3, 0x32, 0x49, 0xf7, 0xbc, 0x57, 0xa1, 0x62, 0xd9, 0x64, 0x85, - 0xb5, 0x83, 0x34, 0x64, 0xfb, 0xe1, 0xa4, 0x53, 0xb6, 0x9c, 0xf6, 0xe7, 0x6f, 0xd9, 0xc2, 0xd9, - 0x97, 0x4d, 0xf9, 0x3c, 0x94, 0x1b, 0xfa, 0x9e, 0xd5, 0x68, 0x35, 0xb4, 0x26, 0x59, 0x8f, 0xad, - 0x0f, 0xd8, 0x3f, 0x92, 0xc8, 0xa9, 0x45, 0x3e, 0xb1, 0xa2, 0x6f, 0xa2, 0x55, 0xeb, 0x03, 0x24, - 0x9f, 0x81, 0x22, 0xfd, 0x8e, 0x82, 0x2e, 0x64, 0x6d, 0xff, 0xc3, 0xb4, 0xed, 0x9f, 0x7e, 0x5e, - 0x41, 0x96, 0xb1, 0xef, 0x1c, 0x3f, 0x1e, 0xa2, 0x1f, 0xe1, 0x87, 0xe4, 0xc5, 0x0d, 0xe9, 0x19, - 0x09, 0x2c, 0xd1, 0x2f, 0x87, 0x9e, 0xa1, 0x5f, 0x26, 0xf1, 0x9a, 0x49, 0xe0, 0x55, 0x6e, 0xc0, - 0x4c, 0x8c, 0x12, 0x96, 0xc2, 0xb3, 0x07, 0x8b, 0x55, 0x53, 0x51, 0x92, 0x68, 0x5e, 0xff, 0x17, - 0x09, 0x66, 0x57, 0x5a, 0xee, 0x26, 0xfa, 0x3c, 0x1a, 0xa3, 0x32, 0x07, 0x95, 0x38, 0x73, 0xa2, - 0x6d, 0x6f, 0x08, 0x66, 0x1f, 0xa0, 0xcf, 0x29, 0xe7, 0xcf, 0xc5, 0x0d, 0x17, 0xa1, 0x12, 0x17, - 0x18, 0xf7, 0xc3, 0x04, 0x1c, 0x52, 0x12, 0x8e, 0xef, 0xd3, 0xaf, 0x12, 0x37, 0x5c, 0x84, 0xb7, - 0x82, 0xd5, 0xd8, 0x41, 0x62, 0xf5, 0xbb, 0xd1, 0x58, 0xfd, 0xb5, 0x3e, 0x63, 0x75, 0xea, 0xae, - 0x9d, 0x90, 0x4d, 0x3f, 0x54, 0x4c, 0x5a, 0xc7, 0x8d, 0xe6, 0x7b, 0x12, 0x9c, 0xbf, 0x83, 0x6c, - 0xe4, 0xea, 0x1e, 0xba, 0xaf, 0x63, 0xd1, 0xd3, 0x1c, 0xf1, 0x37, 0xfc, 0x22, 0x6e, 0xcb, 0x06, - 0xbc, 0xd4, 0x17, 0x65, 0x5c, 0x61, 0xaf, 0xc0, 0x0c, 0xbd, 0xc0, 0x6a, 0xec, 0xbb, 0x2f, 0xfe, - 0xe2, 0xd1, 0xe2, 0xdf, 0x66, 0x64, 0xd4, 0x29, 0x3a, 0xbb, 0xe6, 0x4f, 0x2e, 0x91, 0x39, 0xe5, - 0x36, 0x1c, 0x09, 0x1f, 0x10, 0xc3, 0x45, 0xc4, 0xb3, 0x50, 0x0c, 0xd7, 0x32, 0xd9, 0xe1, 0x66, - 0x54, 0x2d, 0x84, 0x8a, 0x99, 0x58, 0x69, 0xc1, 0xd1, 0x64, 0x3c, 0x9c, 0xba, 0xb7, 0x60, 0x98, - 0x5d, 0xf8, 0xf8, 0xe1, 0xe8, 0xb5, 0x3e, 0x4f, 0xaf, 0xfc, 0x0a, 0x14, 0x45, 0xcb, 0x91, 0x29, - 0x7f, 0x3d, 0x0c, 0x33, 0xc9, 0x4b, 0xba, 0x5d, 0x65, 0xbe, 0x04, 0xb3, 0x0d, 0x7d, 0x4f, 0x8b, - 0x86, 0xe5, 0xce, 0xf7, 0x87, 0x53, 0x0d, 0x7d, 0x2f, 0x1a, 0x72, 0x4d, 0xf9, 0x3e, 0x94, 0x18, - 0xc6, 0xba, 0x63, 0xe8, 0xf5, 0x7e, 0x8b, 0xa2, 0xc3, 0xe4, 0x86, 0x52, 0x91, 0x54, 0x76, 0x8a, - 0xbf, 0x4f, 0x40, 0x69, 0xe9, 0xec, 0x83, 0xb8, 0x68, 0x59, 0x42, 0x78, 0xf3, 0x40, 0xa2, 0xa9, - 0xa9, 0x21, 0xc5, 0xb0, 0x13, 0x7d, 0x44, 0x5b, 0xf2, 0x6f, 0x49, 0x30, 0xb9, 0xa5, 0xdb, 0xa6, - 0xb3, 0xc3, 0xef, 0x26, 0xd4, 0x78, 0xc9, 0xfd, 0x77, 0x90, 0xef, 0xde, 0x52, 0x08, 0xb8, 0xcb, - 0x11, 0xfb, 0x57, 0x6f, 0x4e, 0x84, 0xbc, 0x15, 0x9b, 0x90, 0x9b, 0x70, 0x2a, 0x51, 0x13, 0xd1, - 0x8b, 0x60, 0xbf, 0xf5, 0xd5, 0xf9, 0xb8, 0xe2, 0x1e, 0x85, 0xae, 0x86, 0x73, 0xdf, 0x95, 0x60, - 0x32, 0x41, 0x44, 0x09, 0x1f, 0xbf, 0x3d, 0x0e, 0xdf, 0x67, 0xee, 0x1c, 0x48, 0x2a, 0x2b, 0xc8, - 0xe5, 0xfb, 0x05, 0xee, 0x37, 0x73, 0xdf, 0x96, 0x60, 0x36, 0x45, 0x5c, 0x09, 0x04, 0xa9, 0x61, - 0x82, 0xbe, 0xda, 0x27, 0x41, 0xb1, 0x0d, 0xe8, 0xe9, 0x21, 0x70, 0xcb, 0x7a, 0x07, 0xa6, 0x13, - 0xd7, 0xc8, 0x6f, 0xc0, 0x51, 0xdf, 0x4a, 0x92, 0x9c, 0x85, 0x05, 0x96, 0xc3, 0x62, 0x4d, 0xcc, - 0x63, 0x94, 0x3f, 0x91, 0x60, 0xbe, 0x97, 0x3c, 0x64, 0x05, 0x26, 0x74, 0x63, 0x1b, 0x99, 0x11, - 0xb4, 0x63, 0x74, 0x90, 0xbb, 0xde, 0x63, 0x98, 0x0b, 0xac, 0x89, 0x5a, 0x47, 0xbf, 0xdf, 0x8b, - 0xcd, 0xfa, 0x28, 0xc3, 0x46, 0xa1, 0xfc, 0x8e, 0x04, 0x73, 0x2a, 0x5a, 0x6f, 0x59, 0x75, 0xf3, - 0x45, 0xd7, 0x48, 0x8f, 0x91, 0x74, 0x9a, 0x40, 0x09, 0xcf, 0x57, 0x3f, 0x1c, 0x82, 0xd3, 0xe1, - 0x46, 0xc8, 0x0e, 0x2b, 0xec, 0x21, 0xff, 0x45, 0xfc, 0x7b, 0x94, 0x15, 0x98, 0x0c, 0xbe, 0xa9, - 0xf1, 0x7f, 0x66, 0xd1, 0xf7, 0x8b, 0x51, 0x39, 0xf0, 0x80, 0xc6, 0xfe, 0x73, 0x45, 0x08, 0x23, - 0x6d, 0x07, 0x1d, 0xac, 0x20, 0xe4, 0x63, 0xa4, 0x95, 0x38, 0xaa, 0xe3, 0x05, 0x38, 0xd3, 0x4b, - 0x70, 0x5c, 0xc6, 0x7f, 0x24, 0x41, 0xf5, 0xad, 0xa6, 0x79, 0xc0, 0x06, 0xe7, 0x5f, 0x8d, 0x9e, - 0x6a, 0x7a, 0x7f, 0x44, 0xd0, 0x7d, 0xd3, 0xce, 0xa1, 0xe6, 0x5b, 0x70, 0x3c, 0x75, 0xa9, 0xdf, - 0xf8, 0x10, 0xbd, 0x8f, 0x7f, 0x6d, 0xff, 0xdb, 0xc7, 0x6e, 0xe6, 0x7f, 0x2e, 0xc1, 0xc2, 0xaa, - 0xe7, 0x22, 0xbd, 0xd1, 0xb9, 0xbe, 0xa7, 0x16, 0x68, 0x9a, 0x30, 0x83, 0xdb, 0xb6, 0x11, 0x8a, - 0x20, 0xbd, 0xeb, 0xfa, 0x91, 0x0b, 0xd0, 0x6a, 0xdb, 0x36, 0x22, 0x41, 0x04, 0xdd, 0x3d, 0xa4, - 0x4e, 0xe1, 0x84, 0xf1, 0xc5, 0x71, 0x00, 0xdd, 0xf3, 0x5c, 0x6b, 0xbd, 0xe5, 0x21, 0x4c, 0x8e, - 0x78, 0xe7, 0xfa, 0x20, 0x96, 0x0b, 0xee, 0x71, 0xe0, 0x9b, 0x6a, 0x29, 0xaa, 0xb7, 0x74, 0xfa, - 0xba, 0xa0, 0xbe, 0x7b, 0xa8, 0xf3, 0xcd, 0x75, 0x84, 0xb4, 0x3f, 0x95, 0x40, 0x09, 0xfe, 0xab, - 0x07, 0x5f, 0xe6, 0x4c, 0x15, 0x03, 0x58, 0xdb, 0xe3, 0xa8, 0xb5, 0x2d, 0x0d, 0xf4, 0x3f, 0x26, - 0x92, 0x37, 0xee, 0x58, 0xdc, 0x6f, 0x4b, 0x70, 0xb2, 0xeb, 0x7a, 0xbf, 0x1c, 0x16, 0x35, 0xbb, - 0x9b, 0x07, 0xa3, 0x23, 0x6a, 0x7a, 0x8b, 0xcd, 0x8f, 0x3e, 0xa9, 0x1e, 0xfa, 0xf8, 0x93, 0xea, - 0xa1, 0x9f, 0x7f, 0x52, 0x95, 0x7e, 0xe3, 0x69, 0x55, 0xfa, 0xc1, 0xd3, 0xaa, 0xf4, 0x77, 0x4f, - 0xab, 0xd2, 0x47, 0x4f, 0xab, 0xd2, 0xbf, 0x3d, 0xad, 0x4a, 0x3f, 0x7d, 0x5a, 0x3d, 0xf4, 0xf3, - 0xa7, 0x55, 0xe9, 0xc3, 0x4f, 0xab, 0x87, 0x3e, 0xfa, 0xb4, 0x7a, 0xe8, 0xe3, 0x4f, 0xab, 0x87, - 0xde, 0xbd, 0xbe, 0xe9, 0x74, 0xe8, 0xb0, 0x9c, 0xae, 0xff, 0xa9, 0xf8, 0x57, 0xc2, 0x23, 0xeb, - 0xc3, 0x34, 0xca, 0x5c, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xa6, 0xf3, 0x9d, 0xe8, - 0x58, 0x00, 0x00, + // 4910 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3c, 0x49, 0x6c, 0x1c, 0x57, + 0x76, 0x2a, 0x76, 0x37, 0xd9, 0x7c, 0x24, 0x7b, 0x29, 0x6e, 0x4d, 0x52, 0x6a, 0x51, 0xa5, 0x8d, + 0x92, 0xad, 0xd6, 0xe6, 0x19, 0x6b, 0x94, 0xb1, 0x3d, 0x22, 0xb5, 0x51, 0x90, 0x34, 0x74, 0x91, + 0x96, 0x9d, 0x19, 0x6b, 0xca, 0xc5, 0xae, 0x4f, 0xb2, 0xc2, 0xee, 0xaa, 0x56, 0xfd, 0x6a, 0x2e, + 0xce, 0x61, 0x02, 0x0c, 0xb2, 0x8c, 0x0f, 0x89, 0x81, 0x5c, 0x06, 0xc1, 0x24, 0x87, 0x00, 0x49, + 0x06, 0x01, 0x82, 0x1c, 0x72, 0x18, 0xe4, 0x30, 0x97, 0x1c, 0x82, 0x20, 0xc8, 0xc1, 0x99, 0x4b, + 0x8c, 0x04, 0x59, 0x2c, 0x23, 0xc8, 0xcc, 0xcd, 0xb7, 0x2c, 0xa7, 0xe0, 0x6f, 0xd5, 0xb5, 0xf6, + 0x2a, 0x45, 0x1e, 0xc7, 0xb7, 0xee, 0xff, 0xdf, 0x7b, 0xff, 0xfd, 0xb7, 0xfe, 0xe5, 0xfd, 0x82, + 0xaf, 0xbb, 0xa8, 0xde, 0xb0, 0x1d, 0xbd, 0x76, 0x11, 0x23, 0x67, 0x0f, 0x39, 0x17, 0xf5, 0x86, + 0x79, 0x71, 0xc7, 0xc4, 0xae, 0xed, 0x1c, 0x92, 0x16, 0xb3, 0x8a, 0x2e, 0xee, 0x5d, 0xbe, 0xe8, + 0xa0, 0x27, 0x4d, 0x84, 0x5d, 0xcd, 0x41, 0xb8, 0x61, 0x5b, 0x18, 0x55, 0x1a, 0x8e, 0xed, 0xda, + 0xf2, 0x69, 0x81, 0x5d, 0x61, 0xd8, 0x15, 0xbd, 0x61, 0x56, 0x82, 0xd8, 0x95, 0xbd, 0xcb, 0xf3, + 0xe5, 0x6d, 0xdb, 0xde, 0xae, 0xa1, 0x8b, 0x14, 0x69, 0xb3, 0xb9, 0x75, 0xd1, 0x68, 0x3a, 0xba, + 0x6b, 0xda, 0x16, 0x23, 0x33, 0x7f, 0x3c, 0xdc, 0xef, 0x9a, 0x75, 0x84, 0x5d, 0xbd, 0xde, 0xe0, + 0x00, 0x27, 0x0c, 0xd4, 0x40, 0x96, 0x81, 0xac, 0xaa, 0x89, 0xf0, 0xc5, 0x6d, 0x7b, 0xdb, 0xa6, + 0xed, 0xf4, 0x17, 0x07, 0x39, 0xe5, 0x4d, 0x84, 0xcc, 0xa0, 0x6a, 0xd7, 0xeb, 0xb6, 0x45, 0x38, + 0xaf, 0x23, 0x8c, 0xf5, 0x6d, 0xce, 0xf0, 0xfc, 0xe9, 0x00, 0x14, 0xe7, 0x34, 0x0a, 0x76, 0x36, + 0x00, 0xe6, 0xea, 0x78, 0xf7, 0x49, 0x13, 0x35, 0x51, 0x14, 0x30, 0x38, 0x2a, 0xb2, 0x9a, 0x75, + 0x4c, 0x80, 0xf6, 0x6d, 0x67, 0x77, 0xab, 0x66, 0xef, 0x73, 0xa8, 0x33, 0x01, 0x28, 0xd1, 0x19, + 0xa5, 0x76, 0x32, 0x00, 0xf7, 0xa4, 0x89, 0xe2, 0x78, 0x0b, 0x12, 0xa3, 0x6d, 0x55, 0xbb, 0xd6, + 0x69, 0xaa, 0x5b, 0xba, 0x59, 0x6b, 0x3a, 0x31, 0x33, 0x38, 0x1f, 0x67, 0x00, 0xd5, 0x9a, 0x5d, + 0xdd, 0x8d, 0xc2, 0xbe, 0xdc, 0xc6, 0x58, 0xa2, 0xd0, 0xe7, 0xe2, 0xa0, 0x3d, 0x11, 0x31, 0x0d, + 0x71, 0xd0, 0x97, 0xda, 0x82, 0x86, 0xa4, 0x79, 0xb6, 0x2d, 0x30, 0x51, 0x16, 0x07, 0xbc, 0x10, + 0x07, 0x98, 0x2c, 0xfd, 0x4a, 0x1c, 0xb8, 0xa5, 0xd7, 0x11, 0x6e, 0xe8, 0xd5, 0x18, 0xc9, 0x5d, + 0x8a, 0x83, 0x77, 0x50, 0xa3, 0x66, 0x56, 0xa9, 0x71, 0x47, 0x31, 0xae, 0xc6, 0x61, 0x34, 0x90, + 0x83, 0x4d, 0xec, 0x22, 0x8b, 0x8d, 0x81, 0x0e, 0x50, 0xb5, 0x49, 0xd0, 0x31, 0x47, 0x7a, 0xa3, + 0x0b, 0x24, 0x31, 0x29, 0xad, 0xde, 0x74, 0xf5, 0xcd, 0x1a, 0xd2, 0xb0, 0xab, 0xbb, 0x62, 0xd4, + 0xaf, 0xc6, 0x5a, 0x5f, 0x47, 0xe7, 0x9e, 0xbf, 0x1e, 0x37, 0xb0, 0x6e, 0xd4, 0x4d, 0xab, 0x23, + 0xae, 0xf2, 0xf3, 0x61, 0x38, 0xb6, 0xee, 0xea, 0x8e, 0xfb, 0x36, 0x1f, 0xee, 0x96, 0x98, 0x96, + 0xca, 0x10, 0xe4, 0x13, 0x30, 0xee, 0xc9, 0x56, 0x33, 0x8d, 0x92, 0xb4, 0x28, 0x2d, 0x8d, 0xaa, + 0x63, 0x5e, 0xdb, 0xaa, 0x21, 0x57, 0x61, 0x02, 0x13, 0x1a, 0x1a, 0x1f, 0xa4, 0x34, 0xb4, 0x28, + 0x2d, 0x8d, 0x5d, 0x79, 0xdd, 0x53, 0x14, 0x0d, 0x37, 0xa1, 0x09, 0x55, 0xf6, 0x2e, 0x57, 0xda, + 0x8e, 0xac, 0x8e, 0x53, 0xa2, 0x82, 0x8f, 0x1d, 0x98, 0x6e, 0xe8, 0x0e, 0xb2, 0x5c, 0xcd, 0x93, + 0xbc, 0x66, 0x5a, 0x5b, 0x76, 0x29, 0x45, 0x07, 0x7b, 0xa5, 0x12, 0x17, 0xe2, 0x3c, 0x8b, 0xdc, + 0xbb, 0x5c, 0x59, 0xa3, 0xd8, 0xde, 0x28, 0xab, 0xd6, 0x96, 0xad, 0x4e, 0x36, 0xa2, 0x8d, 0x72, + 0x09, 0x46, 0x74, 0x97, 0x50, 0x73, 0x4b, 0xe9, 0x45, 0x69, 0x29, 0xa3, 0x8a, 0xbf, 0x72, 0x1d, + 0x14, 0x4f, 0x83, 0x2d, 0x2e, 0xd0, 0x41, 0xc3, 0x64, 0x61, 0x52, 0x23, 0xf1, 0xb0, 0x94, 0xa1, + 0x0c, 0xcd, 0x57, 0x58, 0xb0, 0xac, 0x88, 0x60, 0x59, 0xd9, 0x10, 0xc1, 0x72, 0x39, 0xfd, 0xe1, + 0xbf, 0x1e, 0x97, 0xd4, 0xe3, 0xfb, 0xe1, 0x99, 0xdf, 0xf2, 0x28, 0x11, 0x58, 0x79, 0x07, 0xe6, + 0xaa, 0xb6, 0xe5, 0x9a, 0x56, 0x13, 0x69, 0x3a, 0xd6, 0x2c, 0xb4, 0xaf, 0x99, 0x96, 0xe9, 0x9a, + 0xba, 0x6b, 0x3b, 0xa5, 0xe1, 0x45, 0x69, 0x29, 0x77, 0xe5, 0x42, 0x50, 0xc6, 0xd4, 0xbb, 0xc8, + 0x64, 0x57, 0x38, 0xde, 0x0d, 0xfc, 0x10, 0xed, 0xaf, 0x0a, 0x24, 0x75, 0xa6, 0x1a, 0xdb, 0x2e, + 0x3f, 0x80, 0xa2, 0xe8, 0x31, 0x34, 0x1e, 0x82, 0x4a, 0x23, 0x74, 0x1e, 0x8b, 0xc1, 0x11, 0x78, + 0x27, 0x19, 0xe3, 0x36, 0xfb, 0xa9, 0x16, 0x3c, 0x54, 0xde, 0x22, 0x3f, 0x82, 0x99, 0x9a, 0x8e, + 0x5d, 0xad, 0x6a, 0xd7, 0x1b, 0x35, 0x44, 0x25, 0xe3, 0x20, 0xdc, 0xac, 0xb9, 0xa5, 0x6c, 0x1c, + 0x4d, 0x1e, 0x62, 0xa8, 0x8e, 0x0e, 0x6b, 0xb6, 0x6e, 0x60, 0x75, 0x8a, 0xe0, 0xaf, 0x78, 0xe8, + 0x2a, 0xc5, 0x96, 0xbf, 0x03, 0x0b, 0x5b, 0xa6, 0x83, 0x5d, 0xcd, 0xd3, 0x02, 0x89, 0x22, 0xda, + 0xa6, 0x5e, 0xdd, 0xb5, 0xb7, 0xb6, 0x4a, 0xa3, 0x94, 0xf8, 0x5c, 0x44, 0xf0, 0x37, 0x79, 0x16, + 0x5b, 0x4e, 0xff, 0x80, 0xc8, 0xbd, 0x44, 0x69, 0x08, 0xb3, 0xdb, 0xd0, 0xf1, 0xee, 0x32, 0x23, + 0x20, 0xbf, 0x0b, 0x53, 0xd8, 0x6e, 0x3a, 0x55, 0xa4, 0xed, 0x11, 0xbf, 0xb5, 0x2d, 0x8d, 0xea, + 0xab, 0x04, 0x94, 0xf0, 0xf9, 0x24, 0xae, 0x09, 0x29, 0xe4, 0x3c, 0x62, 0x28, 0xeb, 0x04, 0x43, + 0x95, 0x19, 0x1d, 0x7f, 0x9b, 0xf2, 0x33, 0x09, 0xca, 0x49, 0x16, 0xcf, 0x9c, 0x52, 0x9e, 0x86, + 0x61, 0xa7, 0x69, 0xb5, 0xdc, 0x2c, 0xe3, 0x34, 0xad, 0x55, 0x43, 0x7e, 0x03, 0x32, 0x34, 0xd2, + 0x73, 0xc7, 0x3a, 0x17, 0x6b, 0xeb, 0x14, 0x82, 0xb0, 0xf3, 0x08, 0x55, 0x5d, 0xdb, 0x59, 0x21, + 0x7f, 0x55, 0x86, 0x27, 0x5b, 0x30, 0x89, 0xf4, 0x6d, 0xe4, 0x04, 0x05, 0xc7, 0x5d, 0xa7, 0xb3, + 0x9f, 0xae, 0xd9, 0xb5, 0x9a, 0x5f, 0x5e, 0x6f, 0x92, 0x24, 0x2b, 0x98, 0x56, 0x8b, 0x94, 0xb4, + 0xbf, 0x5f, 0xf9, 0xfb, 0x21, 0x98, 0xb9, 0x83, 0xdc, 0x07, 0x2c, 0xca, 0xad, 0x93, 0x20, 0xd7, + 0x43, 0x3c, 0xb9, 0x03, 0xa3, 0x9e, 0x77, 0x45, 0xa7, 0x1c, 0x95, 0x7d, 0x50, 0x96, 0x2d, 0x5c, + 0xf9, 0x2a, 0xcc, 0xa0, 0x83, 0x06, 0xaa, 0xba, 0xc8, 0xd0, 0x2c, 0x74, 0xe0, 0x6a, 0x68, 0x8f, + 0x04, 0x10, 0xd3, 0xa0, 0x33, 0x4f, 0xa9, 0x93, 0xa2, 0xf7, 0x21, 0x3a, 0x70, 0x6f, 0x91, 0xbe, + 0x55, 0x43, 0xbe, 0x04, 0x53, 0xd5, 0xa6, 0x43, 0x23, 0xcd, 0xa6, 0xa3, 0x5b, 0xd5, 0x1d, 0xcd, + 0xb5, 0x77, 0x91, 0x45, 0x63, 0xc1, 0xb8, 0x2a, 0xf3, 0xbe, 0x65, 0xda, 0xb5, 0x41, 0x7a, 0x64, + 0x03, 0xa6, 0x84, 0xbd, 0xf0, 0x24, 0xab, 0x99, 0x2e, 0xaa, 0xf3, 0x40, 0x70, 0xa5, 0xd2, 0x66, + 0xf1, 0xc5, 0xf4, 0x45, 0x71, 0xef, 0xb2, 0x96, 0x55, 0x17, 0xd5, 0x55, 0x79, 0x2f, 0xd2, 0xa6, + 0xfc, 0x64, 0x14, 0x66, 0x23, 0x32, 0xe5, 0x76, 0x13, 0x90, 0x98, 0x34, 0x80, 0xc4, 0x56, 0x61, + 0xa2, 0x65, 0x22, 0x87, 0x0d, 0xc4, 0xc5, 0x7f, 0xaa, 0x13, 0xb1, 0x8d, 0xc3, 0x06, 0x52, 0xc7, + 0xf7, 0x7d, 0xff, 0x64, 0x05, 0x26, 0xe2, 0x64, 0x3e, 0x66, 0xf9, 0x64, 0xfd, 0x35, 0x98, 0x6b, + 0x38, 0x68, 0xcf, 0xb4, 0x9b, 0x58, 0xa3, 0xd1, 0x1e, 0x19, 0x2d, 0xf8, 0x34, 0x85, 0x9f, 0x11, + 0x00, 0xeb, 0xac, 0x5f, 0xa0, 0x5e, 0x80, 0x49, 0x1a, 0x63, 0x58, 0x40, 0xf0, 0x90, 0x32, 0x14, + 0xa9, 0x40, 0xba, 0x6e, 0x93, 0x1e, 0x01, 0xbe, 0x02, 0x40, 0x63, 0x05, 0x5d, 0x1f, 0xd2, 0xe0, + 0x19, 0x99, 0x95, 0xb7, 0x7c, 0x24, 0x13, 0x6b, 0x99, 0xf9, 0xa8, 0x2b, 0x7e, 0xca, 0x6b, 0x50, + 0xc4, 0xae, 0x59, 0xdd, 0x3d, 0xd4, 0x7c, 0xb4, 0x46, 0x7a, 0xa0, 0x95, 0x67, 0xe8, 0x5e, 0x83, + 0xfc, 0xab, 0xf0, 0x52, 0x84, 0xa2, 0x86, 0xab, 0x3b, 0xc8, 0x68, 0xd6, 0x90, 0xe6, 0xda, 0x4c, + 0x2a, 0x34, 0xaf, 0xd8, 0x4d, 0xb7, 0x34, 0xd6, 0x5d, 0x84, 0x3b, 0x1d, 0x1a, 0x66, 0x9d, 0x13, + 0xdc, 0xb0, 0xa9, 0x10, 0x37, 0x18, 0xb5, 0x44, 0x4b, 0x9f, 0x48, 0xb4, 0xf4, 0x6f, 0x43, 0xce, + 0x33, 0x0f, 0xba, 0x74, 0x29, 0xe5, 0x69, 0x1a, 0x8a, 0xcf, 0xbe, 0x5e, 0x36, 0x8a, 0x98, 0x1c, + 0xb3, 0x5e, 0xcf, 0xd4, 0xe8, 0x5f, 0xf9, 0x6d, 0xc8, 0x07, 0x88, 0x37, 0x71, 0xa9, 0x40, 0xa9, + 0x57, 0x12, 0x92, 0x5c, 0x2c, 0xd9, 0x26, 0x56, 0x73, 0x7e, 0xba, 0x4d, 0x2c, 0x3f, 0x86, 0x62, + 0xd0, 0x3f, 0x4d, 0x84, 0x4b, 0x45, 0x2a, 0xca, 0x4b, 0x3d, 0x39, 0xa7, 0x89, 0xb0, 0x5a, 0xd8, + 0x0b, 0xb5, 0xc8, 0xaf, 0xc3, 0x51, 0x93, 0x98, 0x6f, 0x58, 0x8d, 0xc8, 0x22, 0x8e, 0x6a, 0x94, + 0xe4, 0x45, 0x69, 0x29, 0xab, 0x96, 0x4c, 0xbc, 0x1e, 0xd4, 0xca, 0x2d, 0xd6, 0x2f, 0xbf, 0x02, + 0xb3, 0x11, 0x4b, 0x76, 0x0f, 0x68, 0x16, 0x98, 0x64, 0x61, 0x2a, 0x68, 0xcd, 0x1b, 0x07, 0x24, + 0x27, 0x5c, 0x85, 0x19, 0x8e, 0xe0, 0x2d, 0x44, 0x78, 0xea, 0x98, 0xa2, 0x11, 0x75, 0x92, 0xf6, + 0xb6, 0x9c, 0x9c, 0x26, 0x92, 0x77, 0x61, 0x6a, 0x9f, 0x26, 0xab, 0x50, 0x82, 0x9b, 0xee, 0x3d, + 0xc1, 0xed, 0x47, 0xda, 0xee, 0xa5, 0xb3, 0xd9, 0xc2, 0xe8, 0xbd, 0x74, 0x76, 0xb4, 0x00, 0xf7, + 0xd2, 0x59, 0x28, 0x8c, 0xdd, 0x4b, 0x67, 0xc7, 0x0b, 0x13, 0xf7, 0xd2, 0xd9, 0x5c, 0x21, 0xaf, + 0xfc, 0x74, 0x08, 0x66, 0x49, 0x22, 0xf9, 0x32, 0x29, 0x3c, 0xc3, 0xa4, 0xf0, 0x7b, 0x59, 0x28, + 0x45, 0x85, 0xfa, 0x65, 0x56, 0xf8, 0x32, 0x2b, 0x3c, 0xf3, 0xac, 0x30, 0x9e, 0x68, 0xea, 0xb1, + 0xf1, 0x35, 0xf7, 0xcc, 0xe2, 0xeb, 0x2f, 0x66, 0xd2, 0x69, 0x13, 0xd5, 0x8b, 0xfd, 0x44, 0x75, + 0x39, 0x31, 0xaa, 0xc7, 0xc6, 0xdd, 0x89, 0x42, 0x4e, 0xf9, 0xbe, 0x04, 0x0b, 0x2a, 0xc2, 0xc8, + 0x0d, 0x25, 0x9e, 0x17, 0x10, 0x75, 0x95, 0x32, 0x1c, 0x8d, 0x67, 0x85, 0xc5, 0x2a, 0xe5, 0x47, + 0x29, 0x58, 0x54, 0x51, 0xd5, 0x76, 0x0c, 0xff, 0x46, 0x82, 0x7b, 0x77, 0x0f, 0x0c, 0xbf, 0x03, + 0x72, 0x74, 0x8b, 0xde, 0x3b, 0xe7, 0xc5, 0xc8, 0xde, 0x5c, 0x7e, 0x19, 0x64, 0xe1, 0x82, 0x46, + 0x38, 0x7c, 0x15, 0xbc, 0x1e, 0x11, 0x59, 0x66, 0x61, 0x84, 0xfa, 0xae, 0x17, 0xb1, 0x86, 0xc9, + 0xdf, 0x55, 0x43, 0x3e, 0x06, 0x20, 0xce, 0x62, 0x78, 0x60, 0x1a, 0x55, 0x47, 0x79, 0xcb, 0xaa, + 0x21, 0xbf, 0x07, 0xe3, 0x0d, 0xbb, 0x56, 0xf3, 0x8e, 0x52, 0x58, 0x4c, 0x7a, 0xad, 0xdf, 0x2d, + 0x1a, 0x3b, 0x49, 0x19, 0x23, 0x24, 0x85, 0x10, 0xbd, 0xcd, 0xe4, 0x48, 0x7f, 0x9b, 0x49, 0xe5, + 0xbf, 0xb3, 0x70, 0xa2, 0x8d, 0xaa, 0x78, 0xf2, 0x89, 0xe4, 0x0c, 0xa9, 0xef, 0x9c, 0xd1, 0x36, + 0x1f, 0x0c, 0xb5, 0xcd, 0x07, 0xbd, 0x29, 0x6d, 0x09, 0x0a, 0x09, 0xf9, 0x26, 0x87, 0x83, 0x74, + 0x23, 0x69, 0x2c, 0x13, 0x4d, 0x63, 0xbe, 0x73, 0xa4, 0xe1, 0xe0, 0x39, 0xd2, 0x35, 0x28, 0xf1, + 0xf8, 0xee, 0x3b, 0x45, 0xe2, 0xab, 0xc5, 0x11, 0xba, 0x5a, 0x9c, 0x61, 0xfd, 0xad, 0x93, 0x21, + 0xbe, 0x56, 0x7c, 0x02, 0xb3, 0xae, 0xa3, 0x5b, 0xd8, 0x24, 0xc3, 0x06, 0x37, 0xf3, 0xec, 0x68, + 0xe5, 0x6b, 0x9d, 0x02, 0xee, 0x86, 0x40, 0xf7, 0x2b, 0x8f, 0x1e, 0x86, 0x4d, 0xbb, 0x71, 0x5d, + 0xf2, 0x36, 0x1c, 0x8b, 0x39, 0xf4, 0xf2, 0xa5, 0xba, 0xd1, 0x1e, 0x52, 0xdd, 0x7c, 0xc4, 0xaf, + 0x5a, 0x59, 0xef, 0x04, 0x8c, 0x07, 0x12, 0xce, 0x18, 0x4d, 0x38, 0x63, 0x9b, 0xbe, 0x4c, 0x73, + 0x07, 0x72, 0x2d, 0x75, 0xd2, 0xc3, 0xb6, 0xf1, 0x2e, 0x0f, 0xdb, 0x26, 0x3c, 0x3c, 0x7a, 0xb4, + 0xb6, 0x02, 0xe3, 0x42, 0xd3, 0x94, 0xcc, 0x44, 0x97, 0x64, 0xc6, 0x38, 0x16, 0x25, 0x62, 0xc3, + 0xc8, 0x93, 0x26, 0xe2, 0xd9, 0x2e, 0xb5, 0x34, 0x76, 0xe5, 0xad, 0x4a, 0x57, 0xf7, 0x2c, 0x95, + 0x8e, 0xde, 0x53, 0x79, 0x93, 0xd1, 0xbd, 0x65, 0xb9, 0xce, 0xa1, 0x2a, 0x46, 0x69, 0xb9, 0x6e, + 0xbe, 0xcf, 0x73, 0xa0, 0xd7, 0x20, 0xcb, 0x4f, 0xba, 0x49, 0x9a, 0x23, 0x2c, 0x9f, 0x08, 0xaa, + 0x4d, 0x5c, 0x53, 0x10, 0xfc, 0x07, 0x0c, 0x52, 0xf5, 0x50, 0x88, 0x45, 0xf3, 0xec, 0xcc, 0x73, + 0x98, 0xf8, 0x3b, 0xff, 0x1e, 0x8c, 0xfb, 0x59, 0x96, 0x0b, 0x90, 0xda, 0x45, 0x87, 0x3c, 0x40, + 0x93, 0x9f, 0xf2, 0x75, 0xc8, 0xec, 0xe9, 0xb5, 0x66, 0xc2, 0xda, 0x91, 0xde, 0xa1, 0xf8, 0xc3, + 0x00, 0xa1, 0x76, 0xa8, 0x32, 0x94, 0xeb, 0x43, 0xd7, 0x24, 0x96, 0xd8, 0x7c, 0x69, 0xe2, 0x46, + 0xd5, 0x35, 0xf7, 0x4c, 0xf7, 0xf0, 0xcb, 0x34, 0xd1, 0x6b, 0x9a, 0xf0, 0x4b, 0xee, 0x39, 0xa6, + 0x89, 0x7f, 0x4e, 0x8b, 0x34, 0x11, 0xab, 0x2a, 0x9e, 0x26, 0x1e, 0x42, 0x3e, 0x24, 0x2e, 0x9e, + 0x28, 0x4e, 0x07, 0xe7, 0xe2, 0x8b, 0x60, 0x7c, 0xff, 0x43, 0x45, 0xa8, 0xe6, 0x82, 0x22, 0x8d, + 0x38, 0xf6, 0x50, 0x3f, 0x8e, 0xed, 0x8b, 0xdc, 0xa9, 0x60, 0xe4, 0x46, 0x50, 0x16, 0x8b, 0x63, + 0xde, 0xa4, 0x85, 0x02, 0x52, 0xba, 0xcb, 0x01, 0x17, 0x38, 0x9d, 0x1b, 0x8c, 0xcc, 0x7a, 0x20, + 0x3c, 0x3d, 0x80, 0xe2, 0x0e, 0xd2, 0x1d, 0x77, 0x13, 0xe9, 0xae, 0x66, 0x20, 0x57, 0x37, 0x6b, + 0x98, 0xef, 0x1c, 0x3b, 0x9f, 0x9d, 0x17, 0x3c, 0xd4, 0x9b, 0x0c, 0x33, 0x9a, 0x8b, 0x87, 0xfb, + 0xce, 0xc5, 0x17, 0x7c, 0x8e, 0xe3, 0x39, 0x14, 0xb5, 0x91, 0xd1, 0x96, 0x37, 0x3c, 0x14, 0x1d, + 0x2d, 0x2b, 0xca, 0xf6, 0x19, 0xb1, 0x7c, 0x21, 0x67, 0x34, 0x10, 0x72, 0x94, 0xbf, 0x94, 0xe0, + 0x24, 0x33, 0xa3, 0x40, 0x24, 0xe5, 0x97, 0x06, 0x3d, 0x45, 0x03, 0x1b, 0x0a, 0xfc, 0xaa, 0x02, + 0x85, 0xee, 0xb0, 0x6e, 0x76, 0xf4, 0xa8, 0x2e, 0x58, 0x50, 0xf3, 0x82, 0x3a, 0x6f, 0x50, 0x7e, + 0x3c, 0x04, 0xa7, 0xda, 0x23, 0x72, 0xf7, 0xc0, 0xad, 0x15, 0x89, 0xb8, 0xb9, 0xe3, 0xfe, 0x71, + 0xf7, 0x59, 0xe5, 0x1a, 0xb2, 0xfd, 0x0c, 0xfa, 0x24, 0x82, 0x9c, 0xce, 0x5d, 0x96, 0xe6, 0x79, + 0x5c, 0x1a, 0xa2, 0xb9, 0xe2, 0xf5, 0x7e, 0xc3, 0x0b, 0x1f, 0x68, 0x42, 0xf7, 0x75, 0x61, 0xb2, + 0xd7, 0x71, 0xc8, 0x96, 0xc0, 0x3b, 0x1f, 0x09, 0x1f, 0xc4, 0xd0, 0x5e, 0xbf, 0xb7, 0xaf, 0x1a, + 0xca, 0x9f, 0x4b, 0x24, 0x01, 0x44, 0x24, 0x77, 0x5b, 0x37, 0x6b, 0x3d, 0xa9, 0x7c, 0x07, 0x72, + 0x5b, 0x14, 0x27, 0xa4, 0xf0, 0x1b, 0xfd, 0x28, 0x3c, 0x30, 0xba, 0x3a, 0xb1, 0xe5, 0xff, 0xab, + 0x9c, 0x24, 0x61, 0x30, 0x11, 0x85, 0x6f, 0x7f, 0xfe, 0x53, 0x82, 0xf9, 0x55, 0xec, 0x07, 0x78, + 0xa4, 0xd7, 0x4c, 0xe3, 0x45, 0x9c, 0x8f, 0x79, 0x2e, 0x9b, 0xea, 0xd3, 0x65, 0xe3, 0x33, 0x60, + 0x3a, 0x3e, 0x03, 0x2a, 0xd7, 0x60, 0x21, 0x76, 0xe2, 0xdc, 0x16, 0xe7, 0x20, 0x6b, 0x62, 0x6d, + 0x8f, 0xb4, 0xd1, 0x59, 0x67, 0xd5, 0x11, 0x13, 0x53, 0x10, 0x12, 0x00, 0x94, 0x68, 0x82, 0xb9, + 0x2b, 0x82, 0x5f, 0x0f, 0xb2, 0x6b, 0xf8, 0xc3, 0x6d, 0xd0, 0x1e, 0x56, 0xba, 0xb0, 0x87, 0x4e, + 0x2c, 0xf8, 0x22, 0xb2, 0x30, 0x8a, 0x35, 0x12, 0xbb, 0xda, 0xe0, 0xf1, 0xd9, 0x9f, 0x83, 0x42, + 0x55, 0xb7, 0xaa, 0xc8, 0x4b, 0xf4, 0x48, 0x48, 0x21, 0xcf, 0xda, 0x55, 0xd1, 0xec, 0x0f, 0x87, + 0x7e, 0x9a, 0x2f, 0x28, 0x1c, 0xb6, 0x63, 0x21, 0x1a, 0x0e, 0xcf, 0x78, 0xd1, 0x30, 0x01, 0x8f, + 0x7b, 0x89, 0xcf, 0xf9, 0xfd, 0x80, 0xff, 0xf7, 0xce, 0x9f, 0x38, 0x7a, 0xb2, 0xf3, 0xc7, 0xa1, + 0xf0, 0x69, 0xfd, 0x05, 0x35, 0xe4, 0xe8, 0xfc, 0xa9, 0x86, 0x7b, 0x9a, 0xd8, 0xaf, 0x40, 0x2e, + 0x68, 0x2f, 0x3d, 0x58, 0x71, 0xa7, 0xf1, 0xd5, 0x89, 0x80, 0xc9, 0x29, 0xa7, 0xe3, 0xed, 0xcd, + 0x43, 0x0a, 0x44, 0x36, 0x3f, 0xc8, 0xff, 0xab, 0xc8, 0x16, 0x33, 0xf1, 0xce, 0x91, 0xed, 0xaf, + 0x87, 0xa0, 0xbc, 0x6e, 0x6e, 0x5b, 0x7a, 0x6d, 0x90, 0xb2, 0x9c, 0x2d, 0xc8, 0x61, 0x4a, 0x24, + 0x64, 0x0c, 0x6f, 0x74, 0xae, 0xcb, 0x69, 0x3b, 0xb6, 0x3a, 0xc1, 0xc8, 0x0a, 0x56, 0x4c, 0x58, + 0x40, 0x07, 0x2e, 0x72, 0xc8, 0x48, 0x31, 0x9b, 0xaa, 0x54, 0xaf, 0x1a, 0x9b, 0x13, 0xd4, 0x22, + 0x5d, 0x72, 0x05, 0x26, 0xab, 0x3b, 0x66, 0xcd, 0x68, 0x8d, 0x63, 0x5b, 0xb5, 0x43, 0xaa, 0x81, + 0xac, 0x5a, 0xa4, 0x5d, 0x02, 0xe9, 0x9b, 0x56, 0xed, 0x50, 0x39, 0x01, 0xc7, 0x13, 0xe7, 0xc2, + 0xed, 0xf3, 0xa7, 0x12, 0x9c, 0xe5, 0x30, 0xa6, 0xbb, 0x33, 0x70, 0x2d, 0xd4, 0xf7, 0x24, 0x98, + 0xe3, 0x52, 0xdf, 0x37, 0xdd, 0x1d, 0x2d, 0xae, 0x30, 0xea, 0x6e, 0xb7, 0x0a, 0xe8, 0xc4, 0x90, + 0x3a, 0x83, 0x83, 0x80, 0xc2, 0x37, 0x6f, 0xc0, 0x52, 0x67, 0x12, 0x6d, 0x6b, 0x4e, 0x94, 0x9f, + 0x48, 0x70, 0x5c, 0x45, 0x75, 0x7b, 0x0f, 0x31, 0x4a, 0x7d, 0x5e, 0xdb, 0x3d, 0xbf, 0x8d, 0x76, + 0x70, 0x87, 0x9c, 0x0a, 0xed, 0x90, 0x15, 0x85, 0xa4, 0x8a, 0x24, 0xf6, 0x85, 0xee, 0x87, 0xe0, + 0xc4, 0x06, 0x72, 0xea, 0xa6, 0xa5, 0xbb, 0x68, 0x10, 0xad, 0xdb, 0x50, 0x74, 0x05, 0x9d, 0x90, + 0xb2, 0x97, 0x3b, 0x2a, 0xbb, 0x23, 0x07, 0x6a, 0xc1, 0x23, 0xfe, 0x0b, 0xe0, 0x73, 0xa7, 0x40, + 0x69, 0x37, 0x23, 0x2e, 0xfa, 0xff, 0x91, 0xa0, 0x7c, 0x13, 0x91, 0xf4, 0x3e, 0x88, 0xdc, 0x9f, + 0x9f, 0x75, 0x9d, 0x83, 0x82, 0x47, 0x59, 0x6c, 0x40, 0xd9, 0xb6, 0xc4, 0xbb, 0x2f, 0xe2, 0x57, + 0x57, 0xf4, 0xc2, 0xac, 0x66, 0x63, 0x14, 0x2f, 0x21, 0x99, 0xf5, 0x85, 0xc3, 0x52, 0xe2, 0xdc, + 0xb9, 0x7c, 0xfe, 0x44, 0x82, 0x63, 0xf4, 0xc2, 0x64, 0xc0, 0xc2, 0x4c, 0xb6, 0xc3, 0xea, 0xb5, + 0x30, 0xb3, 0xed, 0xc8, 0xea, 0x38, 0x25, 0x2a, 0x62, 0xcd, 0xab, 0x50, 0x4e, 0x02, 0x6f, 0x1f, + 0x61, 0x7e, 0x37, 0x05, 0xa7, 0x39, 0x11, 0xb6, 0x6a, 0x18, 0x64, 0xaa, 0xf5, 0x84, 0x95, 0xcf, + 0xed, 0x2e, 0xe6, 0xda, 0x05, 0x0b, 0xa1, 0xc5, 0x8f, 0xfc, 0x9a, 0xcf, 0xff, 0x78, 0x4d, 0x66, + 0xf4, 0xb8, 0xaf, 0x24, 0x40, 0x56, 0x05, 0x84, 0x38, 0xf6, 0xeb, 0xe0, 0xbe, 0xe9, 0xe7, 0xef, + 0xbe, 0x99, 0x24, 0xf7, 0x5d, 0x82, 0x33, 0x9d, 0x24, 0xc2, 0x4d, 0xf4, 0xe7, 0x43, 0xb0, 0x20, + 0x8e, 0xad, 0xfc, 0x1b, 0xb8, 0xcf, 0x85, 0xff, 0x5e, 0x85, 0x19, 0x13, 0x6b, 0x31, 0xd5, 0xa2, + 0x54, 0x37, 0x59, 0x75, 0xd2, 0xc4, 0xb7, 0xc3, 0x65, 0xa0, 0xf2, 0x3d, 0x18, 0x63, 0xb2, 0x62, + 0xcb, 0xc4, 0x74, 0xaf, 0xcb, 0x44, 0xa0, 0xd8, 0xf4, 0xb7, 0x7c, 0x1f, 0xc6, 0x79, 0xbd, 0x32, + 0x23, 0x96, 0xe9, 0x95, 0xd8, 0x18, 0x43, 0xa7, 0x7f, 0x94, 0x32, 0x1c, 0x8d, 0x17, 0x35, 0xd7, + 0xc5, 0x7f, 0x48, 0x70, 0xf6, 0x11, 0x72, 0xcc, 0xad, 0xc3, 0xc8, 0xac, 0xbc, 0x93, 0xc5, 0xcf, + 0x85, 0x5e, 0x06, 0x5d, 0x83, 0x2b, 0xe7, 0x61, 0xa9, 0xf3, 0x44, 0xb9, 0x54, 0x3e, 0x4b, 0x91, + 0x8d, 0x25, 0xd9, 0x66, 0xaf, 0x10, 0xc5, 0x78, 0x5c, 0xf4, 0xb3, 0x29, 0xde, 0x80, 0x42, 0xb8, + 0xfe, 0xbc, 0x77, 0x81, 0xe4, 0x43, 0xf5, 0xe6, 0xc4, 0x3b, 0x39, 0xd5, 0x56, 0x14, 0xf1, 0xe2, + 0x47, 0x91, 0x75, 0x79, 0xd1, 0x63, 0xd5, 0x90, 0x55, 0xc8, 0x33, 0x0b, 0x1d, 0x20, 0x58, 0xe4, + 0xaa, 0x01, 0x61, 0xc8, 0x6b, 0xde, 0x76, 0x9f, 0xde, 0x43, 0xd2, 0x33, 0xf8, 0x4c, 0x2f, 0x67, + 0xf0, 0xf9, 0x16, 0x3a, 0x3b, 0x84, 0xf7, 0x94, 0x3c, 0xdc, 0xe7, 0x46, 0xeb, 0x1a, 0x94, 0x22, + 0x62, 0x11, 0x59, 0x78, 0x84, 0x5f, 0xf8, 0x06, 0x65, 0xc3, 0x93, 0xb1, 0x72, 0x96, 0xe4, 0x94, + 0xb6, 0x1a, 0x17, 0x09, 0x36, 0x05, 0x17, 0x98, 0x21, 0xc5, 0x42, 0xd2, 0x40, 0x47, 0xe8, 0x7c, + 0x0e, 0x8c, 0x24, 0x46, 0xe9, 0xa9, 0x41, 0x95, 0x9e, 0x60, 0x78, 0xe9, 0x24, 0xc3, 0x6b, 0xa7, + 0x91, 0x4c, 0x3b, 0x8d, 0x0c, 0x6c, 0x0c, 0xca, 0x25, 0xa8, 0x74, 0xab, 0x28, 0xae, 0xdb, 0x3f, + 0x94, 0x60, 0xf1, 0x26, 0xc2, 0x55, 0xc7, 0xdc, 0x1c, 0x68, 0x79, 0xf9, 0x6d, 0x18, 0xe9, 0xf5, + 0x80, 0xa8, 0xd3, 0xb0, 0xaa, 0xa0, 0xa8, 0xfc, 0x4e, 0x1a, 0x4e, 0xb4, 0x81, 0xe6, 0x6b, 0xa7, + 0x77, 0xa1, 0xd0, 0xba, 0x74, 0xaf, 0xda, 0xd6, 0x96, 0xb9, 0xcd, 0x2f, 0x00, 0x2e, 0xc7, 0xf3, + 0x12, 0xab, 0xfe, 0x15, 0x8a, 0xa8, 0xe6, 0x51, 0xb0, 0x41, 0xde, 0x86, 0xd9, 0x98, 0xbb, 0x7d, + 0xfa, 0xac, 0x86, 0x4d, 0xf8, 0x62, 0x0f, 0x83, 0xb0, 0x22, 0x82, 0xfd, 0xb8, 0x66, 0xf9, 0x5d, + 0x90, 0x1b, 0xc8, 0x32, 0x4c, 0x6b, 0x5b, 0xe3, 0x97, 0x00, 0x26, 0xc2, 0xa5, 0x14, 0xbd, 0x56, + 0xb8, 0x90, 0x3c, 0xc6, 0x1a, 0xc3, 0x11, 0x87, 0x28, 0x74, 0x84, 0x62, 0x23, 0xd0, 0x68, 0x22, + 0x2c, 0x7f, 0x07, 0x0a, 0x82, 0x3a, 0x35, 0x73, 0x87, 0x56, 0x66, 0x12, 0xda, 0x57, 0x3b, 0xd2, + 0x0e, 0x1a, 0x15, 0x1d, 0x21, 0xdf, 0xf0, 0x75, 0x39, 0xc8, 0x92, 0x11, 0x4c, 0x0b, 0xfa, 0xc1, + 0xb5, 0x44, 0xa6, 0x93, 0x26, 0xf8, 0x20, 0x91, 0x5a, 0x8b, 0xc9, 0x46, 0xb4, 0x43, 0xf9, 0xf7, + 0x14, 0x94, 0x54, 0xfe, 0x2e, 0x0d, 0xd1, 0x48, 0x8a, 0x1f, 0x5d, 0xf9, 0x5c, 0x64, 0xed, 0x2d, + 0x98, 0x8e, 0x2b, 0x66, 0x15, 0x1a, 0xec, 0xa7, 0x9a, 0x75, 0x32, 0x5a, 0xcd, 0x8a, 0xe5, 0x6b, + 0x30, 0x4c, 0xf3, 0x0f, 0xe6, 0x59, 0x2d, 0xf1, 0xb2, 0xf3, 0xa6, 0xee, 0xea, 0xcb, 0x35, 0x7b, + 0x53, 0xe5, 0xf0, 0xf2, 0x6d, 0xc8, 0x59, 0x68, 0x9f, 0x16, 0xcb, 0x71, 0x0a, 0x99, 0x2e, 0x29, + 0x8c, 0x5b, 0x68, 0x5f, 0x6d, 0xb2, 0xcc, 0x85, 0xe5, 0x4d, 0x98, 0xdc, 0xd4, 0x31, 0x0a, 0x7b, + 0xc3, 0x70, 0x9b, 0xaa, 0x5d, 0xbf, 0xbe, 0x97, 0x75, 0x8c, 0x82, 0xc6, 0x54, 0xdc, 0x0c, 0x37, + 0x29, 0x0b, 0x30, 0x17, 0xa3, 0x66, 0x1e, 0xbb, 0xfe, 0x96, 0x6e, 0xfc, 0x78, 0xef, 0xdb, 0xfe, + 0x5a, 0x45, 0x61, 0x09, 0x5a, 0xa4, 0x1e, 0x92, 0x05, 0x84, 0x6b, 0xb1, 0xdc, 0xf9, 0x5e, 0x20, + 0xfa, 0xd5, 0x1d, 0x38, 0x0f, 0x09, 0xd5, 0x44, 0x9e, 0x86, 0x9c, 0x83, 0xea, 0xb6, 0x8b, 0xb4, + 0x6a, 0xad, 0x89, 0x5d, 0xe4, 0x50, 0x1b, 0x1a, 0x55, 0x27, 0x58, 0xeb, 0x0a, 0x6b, 0x8c, 0x58, + 0x64, 0x2a, 0x62, 0x91, 0xca, 0x22, 0xd9, 0x1b, 0xc6, 0xcf, 0x85, 0x4f, 0xf7, 0xf7, 0x25, 0x98, + 0x59, 0x3f, 0xb4, 0xaa, 0xeb, 0x3b, 0xba, 0x63, 0xf0, 0x52, 0x4a, 0x3e, 0xcf, 0xd3, 0x90, 0xe3, + 0xaf, 0xb1, 0x04, 0x1b, 0xcc, 0xe6, 0x27, 0x58, 0xab, 0x60, 0x63, 0x0e, 0xb2, 0x98, 0x20, 0x8b, + 0x62, 0xb0, 0x8c, 0x3a, 0x42, 0xff, 0xaf, 0x1a, 0xf2, 0x0d, 0x18, 0x63, 0x35, 0x9d, 0xec, 0x6a, + 0x3e, 0xd5, 0xe5, 0xd5, 0x3c, 0x30, 0x24, 0xd2, 0xac, 0xcc, 0xc1, 0x6c, 0x84, 0x3d, 0xce, 0xfa, + 0xdf, 0x0d, 0xc3, 0x24, 0xe9, 0x13, 0xd1, 0xa9, 0x07, 0x4f, 0x3d, 0x0e, 0x63, 0x9e, 0x0a, 0x39, + 0xdb, 0xa3, 0x2a, 0x88, 0xa6, 0x55, 0xc3, 0xb7, 0x65, 0x4e, 0xf9, 0x1f, 0x82, 0xf9, 0x6e, 0xc3, + 0xd3, 0x81, 0xdb, 0xf0, 0x84, 0xa3, 0xe9, 0x4c, 0x42, 0xd9, 0x49, 0xb4, 0x8e, 0x6a, 0xb8, 0xbf, + 0x3a, 0xaa, 0xb8, 0x8a, 0xb9, 0x91, 0xd8, 0x8a, 0xb9, 0x70, 0x61, 0x46, 0xb6, 0x9f, 0xc2, 0x8c, + 0x35, 0x5e, 0xde, 0xdd, 0xba, 0xad, 0xa3, 0xb4, 0x46, 0xbb, 0xa4, 0x55, 0x24, 0xc8, 0xde, 0x2d, + 0x1b, 0xa5, 0x78, 0x1d, 0x46, 0x44, 0x7d, 0x05, 0x74, 0x59, 0x5f, 0x21, 0x10, 0xfc, 0x65, 0x22, + 0x63, 0xc1, 0x32, 0x91, 0x15, 0x18, 0x67, 0xc5, 0xbf, 0xfc, 0x29, 0xe5, 0x78, 0x97, 0x4f, 0x29, + 0xc7, 0x68, 0x4d, 0x30, 0x7f, 0x45, 0x79, 0x09, 0xe8, 0x2b, 0x48, 0x8d, 0xbf, 0xd8, 0x30, 0x0d, + 0x64, 0xb9, 0xa6, 0x7b, 0x48, 0x6b, 0xd5, 0x46, 0x55, 0x99, 0xf4, 0xb1, 0x87, 0x19, 0xab, 0xbc, + 0x47, 0x7e, 0x1b, 0xf2, 0xa1, 0x30, 0xcd, 0xcb, 0xb0, 0x2b, 0xbd, 0x05, 0x68, 0x35, 0x17, 0x0c, + 0xce, 0x49, 0x51, 0x31, 0xff, 0x2c, 0xa3, 0xe2, 0x0c, 0x4c, 0x05, 0xbd, 0x89, 0xbb, 0xd9, 0xf7, + 0x25, 0x58, 0x10, 0xeb, 0xa4, 0x17, 0xfc, 0x76, 0x44, 0xf9, 0x2f, 0x09, 0x8e, 0xc6, 0xf3, 0xc2, + 0x97, 0x6b, 0x3b, 0x30, 0x59, 0xd5, 0xab, 0x3b, 0x28, 0xf8, 0xc0, 0x7b, 0xe0, 0x00, 0x5d, 0xa4, + 0x44, 0xfd, 0x4d, 0xb2, 0x05, 0x33, 0x86, 0xee, 0xea, 0x54, 0x2d, 0xc1, 0xc1, 0x86, 0x06, 0x1c, + 0x6c, 0x4a, 0xd0, 0xf5, 0xb7, 0x2a, 0xff, 0x20, 0xc1, 0xbc, 0x98, 0x3a, 0x37, 0x8b, 0xbb, 0x36, + 0xf6, 0xdf, 0xb2, 0xef, 0xd8, 0xd8, 0xd5, 0x74, 0xc3, 0x70, 0x10, 0xc6, 0x42, 0x0b, 0xa4, 0xed, + 0x06, 0x6b, 0x6a, 0x17, 0xa8, 0x3b, 0xa7, 0x92, 0x84, 0xc5, 0x4d, 0x7a, 0xf0, 0xc5, 0x8d, 0xf2, + 0x2f, 0x3e, 0x03, 0x0b, 0xcc, 0x8c, 0xeb, 0xf4, 0x24, 0x4c, 0x50, 0x3e, 0xb1, 0x66, 0x35, 0xeb, + 0x9b, 0x3c, 0x0d, 0x65, 0xd4, 0x71, 0xd6, 0xf8, 0x90, 0xb6, 0xc9, 0x0b, 0x30, 0x2a, 0x26, 0xc7, + 0xca, 0x65, 0x32, 0x6a, 0x96, 0xcf, 0x0e, 0xcb, 0x8f, 0x21, 0xdf, 0x9a, 0x1e, 0x55, 0x65, 0xdb, + 0x57, 0xeb, 0x1e, 0x2c, 0x99, 0x82, 0x57, 0x4b, 0xb5, 0x42, 0xf0, 0xa8, 0xf3, 0xe4, 0xac, 0x40, + 0x1b, 0x8d, 0x43, 0x5c, 0xec, 0xac, 0x50, 0x50, 0xfc, 0xbd, 0x97, 0xce, 0xa6, 0x0b, 0x19, 0xa5, + 0x02, 0xc5, 0x95, 0x9a, 0x8d, 0x11, 0x4d, 0x62, 0x42, 0x61, 0x7e, 0x6d, 0x48, 0x01, 0x6d, 0x28, + 0x53, 0x20, 0xfb, 0xe1, 0xb9, 0x1f, 0xbe, 0x0c, 0xf9, 0x3b, 0xc8, 0xed, 0x96, 0xc6, 0x7b, 0x50, + 0x68, 0x41, 0x73, 0x41, 0xde, 0x07, 0xe0, 0xe0, 0x24, 0x78, 0x30, 0x9f, 0xb8, 0xd0, 0x8d, 0x99, + 0x52, 0x32, 0x74, 0xea, 0x4c, 0xc8, 0x34, 0x5e, 0xfc, 0xa3, 0x04, 0x45, 0x76, 0xc3, 0xe3, 0x3f, + 0x74, 0x4c, 0x66, 0x49, 0xbe, 0x0d, 0x59, 0xb2, 0x0e, 0xd9, 0x26, 0x61, 0x71, 0x88, 0xbe, 0xf1, + 0x38, 0xdf, 0xfe, 0x05, 0x09, 0xbb, 0xcf, 0x66, 0x18, 0xaa, 0x87, 0xeb, 0xaf, 0xd9, 0x4c, 0x05, + 0x6a, 0x36, 0x57, 0x21, 0xbf, 0x67, 0x62, 0x73, 0xd3, 0xac, 0xd1, 0xca, 0xa9, 0x5e, 0xaa, 0x01, + 0x73, 0x2d, 0x44, 0xba, 0xec, 0x98, 0x02, 0xd9, 0x3f, 0x37, 0xae, 0x82, 0x0f, 0x25, 0x38, 0x76, + 0x07, 0xb9, 0x6a, 0xeb, 0xdb, 0x15, 0xbc, 0x46, 0xd7, 0x5b, 0x33, 0xdd, 0x87, 0x61, 0x5a, 0x3c, + 0x4d, 0x1c, 0x30, 0x95, 0x68, 0x60, 0xbe, 0x8f, 0x5f, 0xb0, 0x13, 0x70, 0xef, 0x2f, 0x2d, 0xb3, + 0x56, 0x39, 0x0d, 0xe2, 0x96, 0x7c, 0xe9, 0x45, 0x6b, 0xfd, 0xf8, 0x3a, 0x65, 0x8c, 0xb7, 0x11, + 0xcb, 0x54, 0x7e, 0x38, 0x04, 0xe5, 0x24, 0x96, 0xb8, 0xda, 0xbf, 0x0b, 0x39, 0xa6, 0x12, 0xaf, + 0xf4, 0x98, 0xf1, 0xf6, 0x4e, 0x97, 0x15, 0x6c, 0xed, 0xc9, 0x33, 0xe3, 0x10, 0xad, 0xac, 0x60, + 0x9a, 0xf9, 0xab, 0x68, 0x9b, 0x3f, 0x04, 0x39, 0x0a, 0xe4, 0x2f, 0x51, 0xce, 0xb0, 0x12, 0xe5, + 0x07, 0xc1, 0x12, 0xe5, 0x57, 0x7b, 0x94, 0x9d, 0xc7, 0x59, 0xab, 0x6a, 0x59, 0x79, 0x1f, 0x16, + 0xef, 0x20, 0xf7, 0xe6, 0xfd, 0x37, 0xdb, 0xe8, 0xec, 0x11, 0x7f, 0x84, 0x46, 0xbc, 0x42, 0xc8, + 0xa6, 0xd7, 0xb1, 0xbd, 0x8d, 0x25, 0x7d, 0x97, 0x46, 0x7e, 0x61, 0xe5, 0xd7, 0x25, 0x38, 0xd1, + 0x66, 0x70, 0xae, 0x9d, 0xf7, 0xa0, 0xe8, 0x23, 0xcb, 0xeb, 0xfd, 0xa4, 0xf0, 0xe6, 0xb9, 0x6b, + 0x26, 0xd4, 0x82, 0x13, 0x6c, 0xc0, 0xca, 0x07, 0x12, 0x4c, 0xd1, 0x72, 0x6e, 0x11, 0x8d, 0x7b, + 0xc8, 0xdc, 0xdf, 0x0c, 0x9f, 0xc0, 0x7c, 0xa5, 0xe3, 0x09, 0x4c, 0xdc, 0x50, 0xad, 0x53, 0x97, + 0x5d, 0x98, 0x0e, 0x01, 0x70, 0x39, 0xa8, 0x90, 0x0d, 0x55, 0x58, 0x7e, 0xb5, 0xd7, 0xa1, 0x78, + 0x99, 0xa3, 0x47, 0x47, 0xf9, 0x6d, 0x09, 0xa6, 0x54, 0xa4, 0x37, 0x1a, 0x35, 0x76, 0x52, 0x8a, + 0x7b, 0x98, 0xf9, 0x7a, 0x78, 0xe6, 0xf1, 0x2f, 0x3b, 0xfc, 0xdf, 0x79, 0x61, 0xea, 0x88, 0x0e, + 0xd7, 0x9a, 0xfd, 0x2c, 0x4c, 0x87, 0x00, 0x38, 0xa7, 0x7f, 0x36, 0x04, 0xd3, 0xcc, 0x56, 0xc2, + 0xd6, 0x79, 0x0b, 0xd2, 0xde, 0xf3, 0x9d, 0x9c, 0xff, 0xa8, 0x23, 0x2e, 0x62, 0xde, 0x44, 0xba, + 0x71, 0x1f, 0xb9, 0x2e, 0x72, 0x68, 0xe5, 0x27, 0xad, 0x1f, 0xa6, 0xe8, 0xed, 0x92, 0x7f, 0x74, + 0x9f, 0x97, 0x8a, 0xdb, 0xe7, 0xbd, 0x0a, 0x25, 0xd3, 0x22, 0x10, 0xe6, 0x1e, 0xd2, 0x90, 0xe5, + 0x85, 0x93, 0xd6, 0xb1, 0xe5, 0xb4, 0xd7, 0x7f, 0xcb, 0x12, 0xce, 0xbe, 0x6a, 0xc8, 0xe7, 0xa1, + 0x58, 0xd7, 0x0f, 0xcc, 0x7a, 0xb3, 0xae, 0x35, 0x08, 0x3c, 0x36, 0xdf, 0x67, 0x1f, 0x69, 0xc9, + 0xa8, 0x79, 0xde, 0xb1, 0xa6, 0x6f, 0xa3, 0x75, 0xf3, 0x7d, 0x24, 0x9f, 0x81, 0x3c, 0x7d, 0xd7, + 0x43, 0x01, 0xd9, 0x33, 0x94, 0x61, 0xfa, 0x0c, 0x85, 0x3e, 0xf7, 0x21, 0x60, 0x34, 0x42, 0x2a, + 0x1f, 0xb3, 0x0f, 0x5c, 0x04, 0xe4, 0xc5, 0x0d, 0xe9, 0x19, 0x09, 0x2c, 0xd6, 0x2f, 0x87, 0x9e, + 0xa1, 0x5f, 0xc6, 0xcd, 0x35, 0x15, 0x33, 0x57, 0xb9, 0x0e, 0x33, 0x11, 0x4e, 0x58, 0x0a, 0x4f, + 0x0f, 0x16, 0xab, 0xa6, 0xc2, 0x2c, 0xd1, 0xbc, 0xfe, 0x4f, 0x12, 0xcc, 0xae, 0x35, 0x9d, 0x6d, + 0xf4, 0x45, 0x34, 0x46, 0x65, 0x1e, 0x4a, 0xd1, 0xc9, 0x89, 0xf2, 0xc6, 0x21, 0x98, 0x7d, 0x80, + 0xbe, 0xa0, 0x33, 0x7f, 0x2e, 0x6e, 0xb8, 0x0c, 0xa5, 0xa8, 0xc0, 0xb8, 0x1f, 0xc6, 0xd0, 0x90, + 0xe2, 0x68, 0xfc, 0x90, 0xbe, 0x92, 0xdd, 0x72, 0x10, 0xde, 0xf1, 0x9f, 0xc6, 0xf6, 0x12, 0xab, + 0xbf, 0x15, 0x8e, 0xd5, 0xdf, 0xe8, 0x32, 0x56, 0x27, 0x8e, 0xda, 0x0a, 0xd9, 0xf4, 0xe1, 0x6c, + 0x1c, 0x1c, 0x37, 0x9a, 0x1f, 0x48, 0x70, 0xfe, 0x0e, 0xb2, 0x90, 0xa3, 0xbb, 0xe8, 0xbe, 0x8e, + 0x45, 0xbd, 0x7c, 0xc8, 0xdf, 0xf0, 0x8b, 0xd8, 0x2d, 0x7f, 0x20, 0xc1, 0x4b, 0x5d, 0xb1, 0xc6, + 0x35, 0xf6, 0x0a, 0xcc, 0xd0, 0x1d, 0xac, 0xc6, 0x1e, 0x22, 0xf2, 0x2b, 0x8f, 0x26, 0x7f, 0x12, + 0x94, 0x52, 0xa7, 0x68, 0xef, 0x86, 0xd7, 0xb9, 0x42, 0xfa, 0x88, 0xdd, 0x8a, 0x33, 0xe9, 0x1a, + 0xb2, 0xb6, 0xdd, 0x1d, 0xfe, 0x24, 0x74, 0x82, 0xb7, 0xde, 0xa7, 0x8d, 0xca, 0x6d, 0x58, 0x08, + 0x2e, 0x24, 0x83, 0x87, 0x8d, 0x67, 0x21, 0x1f, 0x3c, 0xf3, 0x64, 0x8b, 0xa0, 0x51, 0x35, 0x17, + 0x38, 0xf4, 0xc4, 0x4a, 0x13, 0x8e, 0xc6, 0xd3, 0xe1, 0x93, 0x78, 0x0b, 0x86, 0xd9, 0xc6, 0x90, + 0x2f, 0xa2, 0x5e, 0xeb, 0x72, 0x95, 0xcb, 0xb7, 0x4a, 0x61, 0xb2, 0x9c, 0x98, 0xf2, 0x57, 0xc3, + 0x30, 0x13, 0x0f, 0xd2, 0x6e, 0xcb, 0xf3, 0x15, 0x98, 0xad, 0xeb, 0x07, 0x5a, 0x38, 0x7c, 0xb7, + 0xde, 0xcd, 0x4e, 0xd5, 0xf5, 0x83, 0x70, 0x68, 0x36, 0xe4, 0xfb, 0x50, 0x60, 0x14, 0x6b, 0x76, + 0x55, 0xaf, 0x75, 0x7b, 0x78, 0x3a, 0x4c, 0x76, 0x32, 0x25, 0x49, 0x65, 0xab, 0xfd, 0xfb, 0x04, + 0x95, 0x1e, 0xb1, 0xbd, 0x1f, 0x15, 0x2d, 0x4b, 0x1c, 0x6f, 0x0e, 0x24, 0x9a, 0x8a, 0x1a, 0x50, + 0x0c, 0x5b, 0xf9, 0x87, 0xb4, 0x25, 0xff, 0x86, 0x04, 0x93, 0x3b, 0xba, 0x65, 0xd8, 0x7b, 0x7c, + 0x0f, 0x43, 0x8d, 0x9c, 0xec, 0x93, 0x7b, 0x79, 0xaf, 0x99, 0xc0, 0xc0, 0x5d, 0x4e, 0xd8, 0xdb, + 0xa2, 0x73, 0x26, 0xe4, 0x9d, 0x48, 0x87, 0xdc, 0x80, 0x53, 0xb1, 0x9a, 0x08, 0x6f, 0x18, 0xbb, + 0x3d, 0x87, 0x5d, 0x8c, 0x2a, 0xee, 0x51, 0x60, 0x0b, 0x39, 0xff, 0x81, 0x04, 0x93, 0x31, 0x22, + 0x8a, 0x79, 0x9a, 0xf9, 0x38, 0xb8, 0xef, 0xb9, 0x33, 0x90, 0x54, 0xd6, 0x90, 0xc3, 0xc7, 0xf3, + 0xed, 0x83, 0xe6, 0xbf, 0x27, 0xc1, 0x6c, 0x82, 0xb8, 0x62, 0x18, 0x52, 0x83, 0x0c, 0x7d, 0xbd, + 0x4b, 0x86, 0x22, 0x03, 0xd0, 0x55, 0x86, 0x6f, 0x37, 0xf6, 0x0e, 0x4c, 0xc7, 0xc2, 0xc8, 0x6f, + 0xc0, 0x51, 0xcf, 0x4a, 0xe2, 0x9c, 0x85, 0xc5, 0x9f, 0x39, 0x01, 0x13, 0xf1, 0x18, 0xe5, 0x8f, + 0x24, 0x58, 0xec, 0x24, 0x0f, 0x59, 0x81, 0x09, 0xbd, 0xba, 0x8b, 0x8c, 0x10, 0xd9, 0x31, 0xda, + 0xc8, 0x5d, 0xef, 0x31, 0xcc, 0xfb, 0x60, 0xc2, 0xd6, 0xd1, 0xed, 0x6b, 0xc6, 0x59, 0x8f, 0x64, + 0xd0, 0x28, 0x94, 0xdf, 0x92, 0x60, 0x5e, 0x45, 0x9b, 0x4d, 0xb3, 0x66, 0xbc, 0xe8, 0xb3, 0xd4, + 0x63, 0x24, 0xed, 0xc6, 0x70, 0xc2, 0xf3, 0xda, 0x8f, 0x87, 0xe0, 0x74, 0xb0, 0x48, 0xb2, 0x35, + 0x15, 0x76, 0xe1, 0xff, 0x22, 0x9e, 0x10, 0xac, 0xc1, 0xa4, 0xff, 0xee, 0x8d, 0x7f, 0x84, 0xa5, + 0xeb, 0x9b, 0xa5, 0xa2, 0xef, 0xa2, 0x8d, 0x7d, 0x71, 0x25, 0x40, 0x91, 0x96, 0x8a, 0xf6, 0x76, + 0x70, 0xe4, 0x51, 0xa4, 0x27, 0x76, 0x54, 0xc7, 0x4b, 0x70, 0xa6, 0x93, 0xe0, 0xb8, 0x8c, 0xff, + 0x40, 0x82, 0xf2, 0x5b, 0x0d, 0x63, 0xc0, 0xe2, 0xe7, 0x5f, 0x0e, 0xaf, 0x7e, 0x3a, 0x3f, 0x30, + 0x68, 0x3f, 0x68, 0x6b, 0xf1, 0xf3, 0x5d, 0x38, 0x9e, 0x08, 0xea, 0x15, 0x48, 0x84, 0xf7, 0xed, + 0xdf, 0xe8, 0x7f, 0xf8, 0xc8, 0x0e, 0xfe, 0x4f, 0x25, 0x58, 0x5a, 0x77, 0x1d, 0xa4, 0xd7, 0x5b, + 0xdb, 0xfc, 0xc4, 0x83, 0x9c, 0x06, 0xcc, 0xe0, 0x43, 0xab, 0x1a, 0x88, 0x20, 0x9d, 0xcf, 0xff, + 0x43, 0x1b, 0xa5, 0xf5, 0x43, 0xab, 0x1a, 0x0a, 0x22, 0xe8, 0xee, 0x11, 0x75, 0x0a, 0xc7, 0xb4, + 0x2f, 0x8f, 0x03, 0xe8, 0xae, 0xeb, 0x98, 0x9b, 0x4d, 0x17, 0x61, 0xb2, 0x14, 0x3c, 0xd7, 0x05, + 0xb3, 0x5c, 0x70, 0x8f, 0x7d, 0xdf, 0x02, 0x90, 0xc2, 0x7a, 0x4b, 0xe6, 0xaf, 0x0d, 0xe9, 0xbb, + 0x47, 0x5a, 0xdf, 0x0a, 0x08, 0xb1, 0xf6, 0xc7, 0x12, 0x28, 0xfe, 0x4f, 0x94, 0x78, 0x32, 0x67, + 0xaa, 0xe8, 0xc1, 0xda, 0x1e, 0x87, 0xad, 0x6d, 0xa5, 0xa7, 0x6f, 0xa3, 0xc4, 0x0f, 0xdc, 0xb2, + 0xb8, 0xdf, 0x94, 0xe0, 0x64, 0x5b, 0x78, 0xef, 0xd8, 0x2c, 0x6c, 0x76, 0x37, 0x07, 0xe3, 0x23, + 0x6c, 0x7a, 0xcb, 0x8d, 0x8f, 0x3e, 0x29, 0x1f, 0xf9, 0xf8, 0x93, 0xf2, 0x91, 0xcf, 0x3e, 0x29, + 0x4b, 0xbf, 0xf6, 0xb4, 0x2c, 0xfd, 0xe8, 0x69, 0x59, 0xfa, 0x9b, 0xa7, 0x65, 0xe9, 0xa3, 0xa7, + 0x65, 0xe9, 0xdf, 0x9e, 0x96, 0xa5, 0x9f, 0x3d, 0x2d, 0x1f, 0xf9, 0xec, 0x69, 0x59, 0xfa, 0xf0, + 0xd3, 0xf2, 0x91, 0x8f, 0x3e, 0x2d, 0x1f, 0xf9, 0xf8, 0xd3, 0xf2, 0x91, 0x6f, 0x5d, 0xdf, 0xb6, + 0x5b, 0x7c, 0x98, 0x76, 0xdb, 0xaf, 0x85, 0xff, 0x52, 0xb0, 0x65, 0x73, 0x98, 0x46, 0x99, 0xab, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x8f, 0xf1, 0x0e, 0x6c, 0x5c, 0x00, 0x00, } func (this *StartWorkflowExecutionRequest) Equal(that interface{}) bool { @@ -6332,6 +6603,9 @@ if !bytes.Equal(this.CurrentBranchToken, that1.CurrentBranchToken) { return false } + if !this.VersionHistoryItem.Equal(that1.VersionHistoryItem) { + return false + } return true } func (this *GetMutableStateResponse) Equal(that interface{}) bool { @@ -6440,6 +6714,9 @@ if !bytes.Equal(this.CurrentBranchToken, that1.CurrentBranchToken) { return false } + if !this.VersionHistoryItem.Equal(that1.VersionHistoryItem) { + return false + } return true } func (this *PollMutableStateResponse) Equal(that interface{}) bool { @@ -6683,6 +6960,9 @@ return false } } + if this.Version != that1.Version { + return false + } return true } func (this *RecordActivityTaskStartedRequest) Equal(that interface{}) bool { @@ -6778,6 +7058,9 @@ if !this.Clock.Equal(that1.Clock) { return false } + if this.Version != that1.Version { + return false + } return true } func (this *RespondWorkflowTaskCompletedRequest) Equal(that interface{}) bool { @@ -6890,6 +7173,63 @@ } return true } +func (this *IsWorkflowTaskValidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*IsWorkflowTaskValidRequest) + if !ok { + that2, ok := that.(IsWorkflowTaskValidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NamespaceId != that1.NamespaceId { + return false + } + if !this.Execution.Equal(that1.Execution) { + return false + } + if !this.Clock.Equal(that1.Clock) { + return false + } + if this.ScheduledEventId != that1.ScheduledEventId { + return false + } + return true +} +func (this *IsWorkflowTaskValidResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*IsWorkflowTaskValidResponse) + if !ok { + that2, ok := that.(IsWorkflowTaskValidResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.IsValid != that1.IsValid { + return false + } + return true +} func (this *RecordActivityTaskHeartbeatRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -7085,6 +7425,63 @@ } return true } +func (this *IsActivityTaskValidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*IsActivityTaskValidRequest) + if !ok { + that2, ok := that.(IsActivityTaskValidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NamespaceId != that1.NamespaceId { + return false + } + if !this.Execution.Equal(that1.Execution) { + return false + } + if !this.Clock.Equal(that1.Clock) { + return false + } + if this.ScheduledEventId != that1.ScheduledEventId { + return false + } + return true +} +func (this *IsActivityTaskValidResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*IsActivityTaskValidResponse) + if !ok { + that2, ok := that.(IsActivityTaskValidResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.IsValid != that1.IsValid { + return false + } + return true +} func (this *SignalWorkflowExecutionRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -7587,13 +7984,13 @@ if this.NamespaceId != that1.NamespaceId { return false } - if !this.WorkflowExecution.Equal(that1.WorkflowExecution) { + if !this.ParentExecution.Equal(that1.ParentExecution) { return false } if this.ParentInitiatedId != that1.ParentInitiatedId { return false } - if !this.CompletedExecution.Equal(that1.CompletedExecution) { + if !this.ChildExecution.Equal(that1.ChildExecution) { return false } if !this.CompletionEvent.Equal(that1.CompletionEvent) { @@ -8821,6 +9218,9 @@ if this.StateTransitionCount != that1.StateTransitionCount { return false } + if this.HistoryLength != that1.HistoryLength { + return false + } return true } func (this *GetReplicationStatusRequest) Equal(that interface{}) bool { @@ -9362,7 +9762,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&historyservice.GetMutableStateRequest{") s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") if this.Execution != nil { @@ -9370,6 +9770,9 @@ } s = append(s, "ExpectedNextEventId: "+fmt.Sprintf("%#v", this.ExpectedNextEventId)+",\n") s = append(s, "CurrentBranchToken: "+fmt.Sprintf("%#v", this.CurrentBranchToken)+",\n") + if this.VersionHistoryItem != nil { + s = append(s, "VersionHistoryItem: "+fmt.Sprintf("%#v", this.VersionHistoryItem)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -9414,7 +9817,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&historyservice.PollMutableStateRequest{") s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") if this.Execution != nil { @@ -9422,6 +9825,9 @@ } s = append(s, "ExpectedNextEventId: "+fmt.Sprintf("%#v", this.ExpectedNextEventId)+",\n") s = append(s, "CurrentBranchToken: "+fmt.Sprintf("%#v", this.CurrentBranchToken)+",\n") + if this.VersionHistoryItem != nil { + s = append(s, "VersionHistoryItem: "+fmt.Sprintf("%#v", this.VersionHistoryItem)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -9506,7 +9912,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 19) + s := make([]string, 0, 20) s = append(s, "&historyservice.RecordWorkflowTaskStartedResponse{") if this.WorkflowType != nil { s = append(s, "WorkflowType: "+fmt.Sprintf("%#v", this.WorkflowType)+",\n") @@ -9545,6 +9951,7 @@ if this.Messages != nil { s = append(s, "Messages: "+fmt.Sprintf("%#v", this.Messages)+",\n") } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -9574,7 +9981,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 13) s = append(s, "&historyservice.RecordActivityTaskStartedResponse{") if this.ScheduledEvent != nil { s = append(s, "ScheduledEvent: "+fmt.Sprintf("%#v", this.ScheduledEvent)+",\n") @@ -9592,6 +9999,7 @@ if this.Clock != nil { s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n") } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -9646,6 +10054,33 @@ s = append(s, "}") return strings.Join(s, "") } +func (this *IsWorkflowTaskValidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&historyservice.IsWorkflowTaskValidRequest{") + s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") + if this.Execution != nil { + s = append(s, "Execution: "+fmt.Sprintf("%#v", this.Execution)+",\n") + } + if this.Clock != nil { + s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n") + } + s = append(s, "ScheduledEventId: "+fmt.Sprintf("%#v", this.ScheduledEventId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *IsWorkflowTaskValidResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&historyservice.IsWorkflowTaskValidResponse{") + s = append(s, "IsValid: "+fmt.Sprintf("%#v", this.IsValid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *RecordActivityTaskHeartbeatRequest) GoString() string { if this == nil { return "nil" @@ -9735,6 +10170,33 @@ s = append(s, "}") return strings.Join(s, "") } +func (this *IsActivityTaskValidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&historyservice.IsActivityTaskValidRequest{") + s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") + if this.Execution != nil { + s = append(s, "Execution: "+fmt.Sprintf("%#v", this.Execution)+",\n") + } + if this.Clock != nil { + s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n") + } + s = append(s, "ScheduledEventId: "+fmt.Sprintf("%#v", this.ScheduledEventId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *IsActivityTaskValidResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&historyservice.IsActivityTaskValidResponse{") + s = append(s, "IsValid: "+fmt.Sprintf("%#v", this.IsValid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *SignalWorkflowExecutionRequest) GoString() string { if this == nil { return "nil" @@ -9968,12 +10430,12 @@ s := make([]string, 0, 11) s = append(s, "&historyservice.RecordChildExecutionCompletedRequest{") s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") - if this.WorkflowExecution != nil { - s = append(s, "WorkflowExecution: "+fmt.Sprintf("%#v", this.WorkflowExecution)+",\n") + if this.ParentExecution != nil { + s = append(s, "ParentExecution: "+fmt.Sprintf("%#v", this.ParentExecution)+",\n") } s = append(s, "ParentInitiatedId: "+fmt.Sprintf("%#v", this.ParentInitiatedId)+",\n") - if this.CompletedExecution != nil { - s = append(s, "CompletedExecution: "+fmt.Sprintf("%#v", this.CompletedExecution)+",\n") + if this.ChildExecution != nil { + s = append(s, "ChildExecution: "+fmt.Sprintf("%#v", this.ChildExecution)+",\n") } if this.CompletionEvent != nil { s = append(s, "CompletionEvent: "+fmt.Sprintf("%#v", this.CompletionEvent)+",\n") @@ -10525,9 +10987,10 @@ if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&historyservice.GenerateLastHistoryReplicationTasksResponse{") s = append(s, "StateTransitionCount: "+fmt.Sprintf("%#v", this.StateTransitionCount)+",\n") + s = append(s, "HistoryLength: "+fmt.Sprintf("%#v", this.HistoryLength)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -10951,6 +11414,18 @@ _ = i var l int _ = l + if m.VersionHistoryItem != nil { + { + size, err := m.VersionHistoryItem.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if len(m.CurrentBranchToken) > 0 { i -= len(m.CurrentBranchToken) copy(dAtA[i:], m.CurrentBranchToken) @@ -11081,12 +11556,12 @@ dAtA[i] = 0x6a } if m.StickyTaskQueueScheduleToStartTimeout != nil { - n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyTaskQueueScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyTaskQueueScheduleToStartTimeout):]) - if err13 != nil { - return 0, err13 + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyTaskQueueScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyTaskQueueScheduleToStartTimeout):]) + if err14 != nil { + return 0, err14 } - i -= n13 - i = encodeVarintRequestResponse(dAtA, i, uint64(n13)) + i -= n14 + i = encodeVarintRequestResponse(dAtA, i, uint64(n14)) i-- dAtA[i] = 0x5a } @@ -11176,6 +11651,18 @@ _ = i var l int _ = l + if m.VersionHistoryItem != nil { + { + size, err := m.VersionHistoryItem.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if len(m.CurrentBranchToken) > 0 { i -= len(m.CurrentBranchToken) copy(dAtA[i:], m.CurrentBranchToken) @@ -11278,12 +11765,12 @@ dAtA[i] = 0x62 } if m.StickyTaskQueueScheduleToStartTimeout != nil { - n20, err20 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyTaskQueueScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyTaskQueueScheduleToStartTimeout):]) - if err20 != nil { - return 0, err20 + n22, err22 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyTaskQueueScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyTaskQueueScheduleToStartTimeout):]) + if err22 != nil { + return 0, err22 } - i -= n20 - i = encodeVarintRequestResponse(dAtA, i, uint64(n20)) + i -= n22 + i = encodeVarintRequestResponse(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x5a } @@ -11521,6 +12008,13 @@ _ = i var l int _ = l + if m.Version != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } if len(m.Messages) > 0 { for iNdEx := len(m.Messages) - 1; iNdEx >= 0; iNdEx-- { { @@ -11576,22 +12070,22 @@ } } if m.StartedTime != nil { - n31, err31 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) - if err31 != nil { - return 0, err31 + n33, err33 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) + if err33 != nil { + return 0, err33 } - i -= n31 - i = encodeVarintRequestResponse(dAtA, i, uint64(n31)) + i -= n33 + i = encodeVarintRequestResponse(dAtA, i, uint64(n33)) i-- dAtA[i] = 0x6a } if m.ScheduledTime != nil { - n32, err32 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ScheduledTime):]) - if err32 != nil { - return 0, err32 + n34, err34 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ScheduledTime):]) + if err34 != nil { + return 0, err34 } - i -= n32 - i = encodeVarintRequestResponse(dAtA, i, uint64(n32)) + i -= n34 + i = encodeVarintRequestResponse(dAtA, i, uint64(n34)) i-- dAtA[i] = 0x62 } @@ -11779,6 +12273,11 @@ _ = i var l int _ = l + if m.Version != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x48 + } if m.Clock != nil { { size, err := m.Clock.MarshalToSizedBuffer(dAtA[:i]) @@ -11823,12 +12322,12 @@ dAtA[i] = 0x2a } if m.CurrentAttemptScheduledTime != nil { - n42, err42 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CurrentAttemptScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CurrentAttemptScheduledTime):]) - if err42 != nil { - return 0, err42 + n44, err44 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CurrentAttemptScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CurrentAttemptScheduledTime):]) + if err44 != nil { + return 0, err44 } - i -= n42 - i = encodeVarintRequestResponse(dAtA, i, uint64(n42)) + i -= n44 + i = encodeVarintRequestResponse(dAtA, i, uint64(n44)) i-- dAtA[i] = 0x22 } @@ -11838,12 +12337,12 @@ dAtA[i] = 0x18 } if m.StartedTime != nil { - n43, err43 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) - if err43 != nil { - return 0, err43 + n45, err45 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) + if err45 != nil { + return 0, err45 } - i -= n43 - i = encodeVarintRequestResponse(dAtA, i, uint64(n43)) + i -= n45 + i = encodeVarintRequestResponse(dAtA, i, uint64(n45)) i-- dAtA[i] = 0x12 } @@ -12023,6 +12522,98 @@ return len(dAtA) - i, nil } +func (m *IsWorkflowTaskValidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsWorkflowTaskValidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsWorkflowTaskValidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScheduledEventId != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.ScheduledEventId)) + i-- + dAtA[i] = 0x20 + } + if m.Clock != nil { + { + size, err := m.Clock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Execution != nil { + { + size, err := m.Execution.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.NamespaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IsWorkflowTaskValidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsWorkflowTaskValidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsWorkflowTaskValidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsValid { + i-- + if m.IsValid { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *RecordActivityTaskHeartbeatRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12293,6 +12884,98 @@ return len(dAtA) - i, nil } +func (m *IsActivityTaskValidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsActivityTaskValidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsActivityTaskValidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScheduledEventId != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.ScheduledEventId)) + i-- + dAtA[i] = 0x20 + } + if m.Clock != nil { + { + size, err := m.Clock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Execution != nil { + { + size, err := m.Execution.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.NamespaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IsActivityTaskValidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsActivityTaskValidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsActivityTaskValidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsValid { + i-- + if m.IsValid { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *SignalWorkflowExecutionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13080,9 +13763,9 @@ i-- dAtA[i] = 0x2a } - if m.CompletedExecution != nil { + if m.ChildExecution != nil { { - size, err := m.CompletedExecution.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ChildExecution.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -13097,9 +13780,9 @@ i-- dAtA[i] = 0x18 } - if m.WorkflowExecution != nil { + if m.ParentExecution != nil { { - size, err := m.WorkflowExecution.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentExecution.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -13578,12 +14261,12 @@ var l int _ = l if m.StatusTime != nil { - n83, err83 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StatusTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StatusTime):]) - if err83 != nil { - return 0, err83 + n89, err89 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StatusTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StatusTime):]) + if err89 != nil { + return 0, err89 } - i -= n83 - i = encodeVarintRequestResponse(dAtA, i, uint64(n83)) + i -= n89 + i = encodeVarintRequestResponse(dAtA, i, uint64(n89)) i-- dAtA[i] = 0x1a } @@ -13706,22 +14389,22 @@ dAtA[i] = 0x52 } if m.LastHeartbeatTime != nil { - n88, err88 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastHeartbeatTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastHeartbeatTime):]) - if err88 != nil { - return 0, err88 + n94, err94 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastHeartbeatTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastHeartbeatTime):]) + if err94 != nil { + return 0, err94 } - i -= n88 - i = encodeVarintRequestResponse(dAtA, i, uint64(n88)) + i -= n94 + i = encodeVarintRequestResponse(dAtA, i, uint64(n94)) i-- dAtA[i] = 0x4a } if m.StartedTime != nil { - n89, err89 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) - if err89 != nil { - return 0, err89 + n95, err95 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) + if err95 != nil { + return 0, err95 } - i -= n89 - i = encodeVarintRequestResponse(dAtA, i, uint64(n89)) + i -= n95 + i = encodeVarintRequestResponse(dAtA, i, uint64(n95)) i-- dAtA[i] = 0x42 } @@ -13731,12 +14414,12 @@ dAtA[i] = 0x38 } if m.ScheduledTime != nil { - n90, err90 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ScheduledTime):]) - if err90 != nil { - return 0, err90 + n96, err96 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ScheduledTime):]) + if err96 != nil { + return 0, err96 } - i -= n90 - i = encodeVarintRequestResponse(dAtA, i, uint64(n90)) + i -= n96 + i = encodeVarintRequestResponse(dAtA, i, uint64(n96)) i-- dAtA[i] = 0x32 } @@ -13980,21 +14663,21 @@ dAtA[i] = 0x1a } if len(m.ShardIds) > 0 { - dAtA97 := make([]byte, len(m.ShardIds)*10) - var j96 int + dAtA103 := make([]byte, len(m.ShardIds)*10) + var j102 int for _, num1 := range m.ShardIds { num := uint64(num1) for num >= 1<<7 { - dAtA97[j96] = uint8(uint64(num)&0x7f | 0x80) + dAtA103[j102] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j96++ + j102++ } - dAtA97[j96] = uint8(num) - j96++ + dAtA103[j102] = uint8(num) + j102++ } - i -= j96 - copy(dAtA[i:], dAtA97[:j96]) - i = encodeVarintRequestResponse(dAtA, i, uint64(j96)) + i -= j102 + copy(dAtA[i:], dAtA103[:j102]) + i = encodeVarintRequestResponse(dAtA, i, uint64(j102)) i-- dAtA[i] = 0x12 } @@ -14141,12 +14824,12 @@ var l int _ = l if m.VisibilityTime != nil { - n99, err99 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err99 != nil { - return 0, err99 + n105, err105 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) + if err105 != nil { + return 0, err105 } - i -= n99 - i = encodeVarintRequestResponse(dAtA, i, uint64(n99)) + i -= n105 + i = encodeVarintRequestResponse(dAtA, i, uint64(n105)) i-- dAtA[i] = 0x22 } @@ -14900,6 +15583,11 @@ _ = i var l int _ = l + if m.HistoryLength != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.HistoryLength)) + i-- + dAtA[i] = 0x10 + } if m.StateTransitionCount != 0 { i = encodeVarintRequestResponse(dAtA, i, uint64(m.StateTransitionCount)) i-- @@ -14998,12 +15686,12 @@ var l int _ = l if m.MaxReplicationTaskVisibilityTime != nil { - n106, err106 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.MaxReplicationTaskVisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.MaxReplicationTaskVisibilityTime):]) - if err106 != nil { - return 0, err106 + n112, err112 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.MaxReplicationTaskVisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.MaxReplicationTaskVisibilityTime):]) + if err112 != nil { + return 0, err112 } - i -= n106 - i = encodeVarintRequestResponse(dAtA, i, uint64(n106)) + i -= n112 + i = encodeVarintRequestResponse(dAtA, i, uint64(n112)) i-- dAtA[i] = 0x32 } @@ -15060,12 +15748,12 @@ } } if m.ShardLocalTime != nil { - n109, err109 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ShardLocalTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ShardLocalTime):]) - if err109 != nil { - return 0, err109 + n115, err115 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ShardLocalTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ShardLocalTime):]) + if err115 != nil { + return 0, err115 } - i -= n109 - i = encodeVarintRequestResponse(dAtA, i, uint64(n109)) + i -= n115 + i = encodeVarintRequestResponse(dAtA, i, uint64(n115)) i-- dAtA[i] = 0x1a } @@ -15131,12 +15819,12 @@ var l int _ = l if m.AckedTaskVisibilityTime != nil { - n110, err110 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.AckedTaskVisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.AckedTaskVisibilityTime):]) - if err110 != nil { - return 0, err110 + n116, err116 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.AckedTaskVisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.AckedTaskVisibilityTime):]) + if err116 != nil { + return 0, err116 } - i -= n110 - i = encodeVarintRequestResponse(dAtA, i, uint64(n110)) + i -= n116 + i = encodeVarintRequestResponse(dAtA, i, uint64(n116)) i-- dAtA[i] = 0x12 } @@ -15234,22 +15922,22 @@ var l int _ = l if m.WorkflowCloseTime != nil { - n112, err112 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowCloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowCloseTime):]) - if err112 != nil { - return 0, err112 + n118, err118 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowCloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowCloseTime):]) + if err118 != nil { + return 0, err118 } - i -= n112 - i = encodeVarintRequestResponse(dAtA, i, uint64(n112)) + i -= n118 + i = encodeVarintRequestResponse(dAtA, i, uint64(n118)) i-- dAtA[i] = 0x22 } if m.WorkflowStartTime != nil { - n113, err113 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowStartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowStartTime):]) - if err113 != nil { - return 0, err113 + n119, err119 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowStartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowStartTime):]) + if err119 != nil { + return 0, err119 } - i -= n113 - i = encodeVarintRequestResponse(dAtA, i, uint64(n113)) + i -= n119 + i = encodeVarintRequestResponse(dAtA, i, uint64(n119)) i-- dAtA[i] = 0x1a } @@ -15658,6 +16346,10 @@ if l > 0 { n += 1 + l + sovRequestResponse(uint64(l)) } + if m.VersionHistoryItem != nil { + l = m.VersionHistoryItem.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } return n } @@ -15748,6 +16440,10 @@ if l > 0 { n += 1 + l + sovRequestResponse(uint64(l)) } + if m.VersionHistoryItem != nil { + l = m.VersionHistoryItem.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } return n } @@ -15942,6 +16638,9 @@ n += 2 + l + sovRequestResponse(uint64(l)) } } + if m.Version != 0 { + n += 2 + sovRequestResponse(uint64(m.Version)) + } return n } @@ -16017,6 +16716,9 @@ l = m.Clock.Size() n += 1 + l + sovRequestResponse(uint64(l)) } + if m.Version != 0 { + n += 1 + sovRequestResponse(uint64(m.Version)) + } return n } @@ -16085,6 +16787,42 @@ return n } +func (m *IsWorkflowTaskValidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.Execution != nil { + l = m.Execution.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.Clock != nil { + l = m.Clock.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.ScheduledEventId != 0 { + n += 1 + sovRequestResponse(uint64(m.ScheduledEventId)) + } + return n +} + +func (m *IsWorkflowTaskValidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsValid { + n += 2 + } + return n +} + func (m *RecordActivityTaskHeartbeatRequest) Size() (n int) { if m == nil { return 0 @@ -16192,6 +16930,42 @@ return n } +func (m *IsActivityTaskValidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.Execution != nil { + l = m.Execution.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.Clock != nil { + l = m.Clock.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + if m.ScheduledEventId != 0 { + n += 1 + sovRequestResponse(uint64(m.ScheduledEventId)) + } + return n +} + +func (m *IsActivityTaskValidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsValid { + n += 2 + } + return n +} + func (m *SignalWorkflowExecutionRequest) Size() (n int) { if m == nil { return 0 @@ -16493,15 +17267,15 @@ if l > 0 { n += 1 + l + sovRequestResponse(uint64(l)) } - if m.WorkflowExecution != nil { - l = m.WorkflowExecution.Size() + if m.ParentExecution != nil { + l = m.ParentExecution.Size() n += 1 + l + sovRequestResponse(uint64(l)) } if m.ParentInitiatedId != 0 { n += 1 + sovRequestResponse(uint64(m.ParentInitiatedId)) } - if m.CompletedExecution != nil { - l = m.CompletedExecution.Size() + if m.ChildExecution != nil { + l = m.ChildExecution.Size() n += 1 + l + sovRequestResponse(uint64(l)) } if m.CompletionEvent != nil { @@ -17265,6 +18039,9 @@ if m.StateTransitionCount != 0 { n += 1 + sovRequestResponse(uint64(m.StateTransitionCount)) } + if m.HistoryLength != 0 { + n += 1 + sovRequestResponse(uint64(m.HistoryLength)) + } return n } @@ -17589,6 +18366,7 @@ `Execution:` + strings.Replace(fmt.Sprintf("%v", this.Execution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, `ExpectedNextEventId:` + fmt.Sprintf("%v", this.ExpectedNextEventId) + `,`, `CurrentBranchToken:` + fmt.Sprintf("%v", this.CurrentBranchToken) + `,`, + `VersionHistoryItem:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistoryItem), "VersionHistoryItem", "v16.VersionHistoryItem", 1) + `,`, `}`, }, "") return s @@ -17603,13 +18381,13 @@ `NextEventId:` + fmt.Sprintf("%v", this.NextEventId) + `,`, `PreviousStartedEventId:` + fmt.Sprintf("%v", this.PreviousStartedEventId) + `,`, `LastFirstEventId:` + fmt.Sprintf("%v", this.LastFirstEventId) + `,`, - `TaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.TaskQueue), "TaskQueue", "v16.TaskQueue", 1) + `,`, - `StickyTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueue), "TaskQueue", "v16.TaskQueue", 1) + `,`, + `TaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.TaskQueue), "TaskQueue", "v17.TaskQueue", 1) + `,`, + `StickyTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueue), "TaskQueue", "v17.TaskQueue", 1) + `,`, `StickyTaskQueueScheduleToStartTimeout:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueueScheduleToStartTimeout), "Duration", "types.Duration", 1) + `,`, `CurrentBranchToken:` + fmt.Sprintf("%v", this.CurrentBranchToken) + `,`, `WorkflowState:` + fmt.Sprintf("%v", this.WorkflowState) + `,`, `WorkflowStatus:` + fmt.Sprintf("%v", this.WorkflowStatus) + `,`, - `VersionHistories:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistories), "VersionHistories", "v18.VersionHistories", 1) + `,`, + `VersionHistories:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistories), "VersionHistories", "v16.VersionHistories", 1) + `,`, `IsStickyTaskQueueEnabled:` + fmt.Sprintf("%v", this.IsStickyTaskQueueEnabled) + `,`, `LastFirstEventTxnId:` + fmt.Sprintf("%v", this.LastFirstEventTxnId) + `,`, `FirstExecutionRunId:` + fmt.Sprintf("%v", this.FirstExecutionRunId) + `,`, @@ -17627,6 +18405,7 @@ `Execution:` + strings.Replace(fmt.Sprintf("%v", this.Execution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, `ExpectedNextEventId:` + fmt.Sprintf("%v", this.ExpectedNextEventId) + `,`, `CurrentBranchToken:` + fmt.Sprintf("%v", this.CurrentBranchToken) + `,`, + `VersionHistoryItem:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistoryItem), "VersionHistoryItem", "v16.VersionHistoryItem", 1) + `,`, `}`, }, "") return s @@ -17641,11 +18420,11 @@ `NextEventId:` + fmt.Sprintf("%v", this.NextEventId) + `,`, `PreviousStartedEventId:` + fmt.Sprintf("%v", this.PreviousStartedEventId) + `,`, `LastFirstEventId:` + fmt.Sprintf("%v", this.LastFirstEventId) + `,`, - `TaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.TaskQueue), "TaskQueue", "v16.TaskQueue", 1) + `,`, - `StickyTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueue), "TaskQueue", "v16.TaskQueue", 1) + `,`, + `TaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.TaskQueue), "TaskQueue", "v17.TaskQueue", 1) + `,`, + `StickyTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueue), "TaskQueue", "v17.TaskQueue", 1) + `,`, `StickyTaskQueueScheduleToStartTimeout:` + strings.Replace(fmt.Sprintf("%v", this.StickyTaskQueueScheduleToStartTimeout), "Duration", "types.Duration", 1) + `,`, `CurrentBranchToken:` + fmt.Sprintf("%v", this.CurrentBranchToken) + `,`, - `VersionHistories:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistories), "VersionHistories", "v18.VersionHistories", 1) + `,`, + `VersionHistories:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistories), "VersionHistories", "v16.VersionHistories", 1) + `,`, `WorkflowState:` + fmt.Sprintf("%v", this.WorkflowState) + `,`, `WorkflowStatus:` + fmt.Sprintf("%v", this.WorkflowStatus) + `,`, `LastFirstEventTxnId:` + fmt.Sprintf("%v", this.LastFirstEventTxnId) + `,`, @@ -17717,14 +18496,15 @@ `NextEventId:` + fmt.Sprintf("%v", this.NextEventId) + `,`, `Attempt:` + fmt.Sprintf("%v", this.Attempt) + `,`, `StickyExecutionEnabled:` + fmt.Sprintf("%v", this.StickyExecutionEnabled) + `,`, - `TransientWorkflowTask:` + strings.Replace(fmt.Sprintf("%v", this.TransientWorkflowTask), "TransientWorkflowTaskInfo", "v18.TransientWorkflowTaskInfo", 1) + `,`, - `WorkflowExecutionTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowExecutionTaskQueue), "TaskQueue", "v16.TaskQueue", 1) + `,`, + `TransientWorkflowTask:` + strings.Replace(fmt.Sprintf("%v", this.TransientWorkflowTask), "TransientWorkflowTaskInfo", "v16.TransientWorkflowTaskInfo", 1) + `,`, + `WorkflowExecutionTaskQueue:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowExecutionTaskQueue), "TaskQueue", "v17.TaskQueue", 1) + `,`, `BranchToken:` + fmt.Sprintf("%v", this.BranchToken) + `,`, `ScheduledTime:` + strings.Replace(fmt.Sprintf("%v", this.ScheduledTime), "Timestamp", "types.Timestamp", 1) + `,`, `StartedTime:` + strings.Replace(fmt.Sprintf("%v", this.StartedTime), "Timestamp", "types.Timestamp", 1) + `,`, `Queries:` + mapStringForQueries + `,`, `Clock:` + strings.Replace(fmt.Sprintf("%v", this.Clock), "VectorClock", "v15.VectorClock", 1) + `,`, `Messages:` + repeatedStringForMessages + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s @@ -17758,6 +18538,7 @@ `WorkflowType:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowType), "WorkflowType", "v14.WorkflowType", 1) + `,`, `WorkflowNamespace:` + fmt.Sprintf("%v", this.WorkflowNamespace) + `,`, `Clock:` + strings.Replace(fmt.Sprintf("%v", this.Clock), "VectorClock", "v15.VectorClock", 1) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s @@ -17810,6 +18591,29 @@ }, "") return s } +func (this *IsWorkflowTaskValidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsWorkflowTaskValidRequest{`, + `NamespaceId:` + fmt.Sprintf("%v", this.NamespaceId) + `,`, + `Execution:` + strings.Replace(fmt.Sprintf("%v", this.Execution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, + `Clock:` + strings.Replace(fmt.Sprintf("%v", this.Clock), "VectorClock", "v15.VectorClock", 1) + `,`, + `ScheduledEventId:` + fmt.Sprintf("%v", this.ScheduledEventId) + `,`, + `}`, + }, "") + return s +} +func (this *IsWorkflowTaskValidResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsWorkflowTaskValidResponse{`, + `IsValid:` + fmt.Sprintf("%v", this.IsValid) + `,`, + `}`, + }, "") + return s +} func (this *RecordActivityTaskHeartbeatRequest) String() string { if this == nil { return "nil" @@ -17891,6 +18695,29 @@ }, "") return s } +func (this *IsActivityTaskValidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsActivityTaskValidRequest{`, + `NamespaceId:` + fmt.Sprintf("%v", this.NamespaceId) + `,`, + `Execution:` + strings.Replace(fmt.Sprintf("%v", this.Execution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, + `Clock:` + strings.Replace(fmt.Sprintf("%v", this.Clock), "VectorClock", "v15.VectorClock", 1) + `,`, + `ScheduledEventId:` + fmt.Sprintf("%v", this.ScheduledEventId) + `,`, + `}`, + }, "") + return s +} +func (this *IsActivityTaskValidResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsActivityTaskValidResponse{`, + `IsValid:` + fmt.Sprintf("%v", this.IsValid) + `,`, + `}`, + }, "") + return s +} func (this *SignalWorkflowExecutionRequest) String() string { if this == nil { return "nil" @@ -18093,9 +18920,9 @@ } s := strings.Join([]string{`&RecordChildExecutionCompletedRequest{`, `NamespaceId:` + fmt.Sprintf("%v", this.NamespaceId) + `,`, - `WorkflowExecution:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowExecution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, + `ParentExecution:` + strings.Replace(fmt.Sprintf("%v", this.ParentExecution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, `ParentInitiatedId:` + fmt.Sprintf("%v", this.ParentInitiatedId) + `,`, - `CompletedExecution:` + strings.Replace(fmt.Sprintf("%v", this.CompletedExecution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, + `ChildExecution:` + strings.Replace(fmt.Sprintf("%v", this.ChildExecution), "WorkflowExecution", "v14.WorkflowExecution", 1) + `,`, `CompletionEvent:` + strings.Replace(fmt.Sprintf("%v", this.CompletionEvent), "HistoryEvent", "v111.HistoryEvent", 1) + `,`, `Clock:` + strings.Replace(fmt.Sprintf("%v", this.Clock), "VectorClock", "v15.VectorClock", 1) + `,`, `ParentInitiatedVersion:` + fmt.Sprintf("%v", this.ParentInitiatedVersion) + `,`, @@ -18177,7 +19004,7 @@ } repeatedStringForVersionHistoryItems := "[]*VersionHistoryItem{" for _, f := range this.VersionHistoryItems { - repeatedStringForVersionHistoryItems += strings.Replace(fmt.Sprintf("%v", f), "VersionHistoryItem", "v18.VersionHistoryItem", 1) + "," + repeatedStringForVersionHistoryItems += strings.Replace(fmt.Sprintf("%v", f), "VersionHistoryItem", "v16.VersionHistoryItem", 1) + "," } repeatedStringForVersionHistoryItems += "}" s := strings.Join([]string{`&ReplicateEventsV2Request{`, @@ -18260,7 +19087,7 @@ `Attempt:` + fmt.Sprintf("%v", this.Attempt) + `,`, `LastFailure:` + strings.Replace(fmt.Sprintf("%v", this.LastFailure), "Failure", "v13.Failure", 1) + `,`, `LastWorkerIdentity:` + fmt.Sprintf("%v", this.LastWorkerIdentity) + `,`, - `VersionHistory:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistory), "VersionHistory", "v18.VersionHistory", 1) + `,`, + `VersionHistory:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistory), "VersionHistory", "v16.VersionHistory", 1) + `,`, `BaseExecutionInfo:` + strings.Replace(fmt.Sprintf("%v", this.BaseExecutionInfo), "BaseExecutionInfo", "v11.BaseExecutionInfo", 1) + `,`, `}`, }, "") @@ -18613,6 +19440,7 @@ } s := strings.Join([]string{`&GenerateLastHistoryReplicationTasksResponse{`, `StateTransitionCount:` + fmt.Sprintf("%v", this.StateTransitionCount) + `,`, + `HistoryLength:` + fmt.Sprintf("%v", this.HistoryLength) + `,`, `}`, }, "") return s @@ -19512,6 +20340,42 @@ m.CurrentBranchToken = []byte{} } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionHistoryItem", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VersionHistoryItem == nil { + m.VersionHistoryItem = &v16.VersionHistoryItem{} + } + if err := m.VersionHistoryItem.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -19724,7 +20588,7 @@ return io.ErrUnexpectedEOF } if m.TaskQueue == nil { - m.TaskQueue = &v16.TaskQueue{} + m.TaskQueue = &v17.TaskQueue{} } if err := m.TaskQueue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -19760,7 +20624,7 @@ return io.ErrUnexpectedEOF } if m.StickyTaskQueue == nil { - m.StickyTaskQueue = &v16.TaskQueue{} + m.StickyTaskQueue = &v17.TaskQueue{} } if err := m.StickyTaskQueue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -19850,7 +20714,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.WorkflowState |= v17.WorkflowExecutionState(b&0x7F) << shift + m.WorkflowState |= v18.WorkflowExecutionState(b&0x7F) << shift if b < 0x80 { break } @@ -19904,7 +20768,7 @@ return io.ErrUnexpectedEOF } if m.VersionHistories == nil { - m.VersionHistories = &v18.VersionHistories{} + m.VersionHistories = &v16.VersionHistories{} } if err := m.VersionHistories.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -20191,6 +21055,42 @@ m.CurrentBranchToken = []byte{} } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionHistoryItem", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VersionHistoryItem == nil { + m.VersionHistoryItem = &v16.VersionHistoryItem{} + } + if err := m.VersionHistoryItem.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -20403,7 +21303,7 @@ return io.ErrUnexpectedEOF } if m.TaskQueue == nil { - m.TaskQueue = &v16.TaskQueue{} + m.TaskQueue = &v17.TaskQueue{} } if err := m.TaskQueue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -20439,7 +21339,7 @@ return io.ErrUnexpectedEOF } if m.StickyTaskQueue == nil { - m.StickyTaskQueue = &v16.TaskQueue{} + m.StickyTaskQueue = &v17.TaskQueue{} } if err := m.StickyTaskQueue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -20545,7 +21445,7 @@ return io.ErrUnexpectedEOF } if m.VersionHistories == nil { - m.VersionHistories = &v18.VersionHistories{} + m.VersionHistories = &v16.VersionHistories{} } if err := m.VersionHistories.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -20565,7 +21465,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.WorkflowState |= v17.WorkflowExecutionState(b&0x7F) << shift + m.WorkflowState |= v18.WorkflowExecutionState(b&0x7F) << shift if b < 0x80 { break } @@ -21311,7 +22211,7 @@ return io.ErrUnexpectedEOF } if m.TransientWorkflowTask == nil { - m.TransientWorkflowTask = &v18.TransientWorkflowTaskInfo{} + m.TransientWorkflowTask = &v16.TransientWorkflowTaskInfo{} } if err := m.TransientWorkflowTask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -21347,7 +22247,7 @@ return io.ErrUnexpectedEOF } if m.WorkflowExecutionTaskQueue == nil { - m.WorkflowExecutionTaskQueue = &v16.TaskQueue{} + m.WorkflowExecutionTaskQueue = &v17.TaskQueue{} } if err := m.WorkflowExecutionTaskQueue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -21658,6 +22558,25 @@ return err } iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -22241,6 +23160,25 @@ return err } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -22702,6 +23640,255 @@ } return nil } +func (m *IsWorkflowTaskValidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsWorkflowTaskValidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsWorkflowTaskValidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamespaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Execution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Execution == nil { + m.Execution = &v14.WorkflowExecution{} + } + if err := m.Execution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Clock == nil { + m.Clock = &v15.VectorClock{} + } + if err := m.Clock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduledEventId", wireType) + } + m.ScheduledEventId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ScheduledEventId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsWorkflowTaskValidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsWorkflowTaskValidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsWorkflowTaskValidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsValid", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsValid = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RecordActivityTaskHeartbeatRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -23418,6 +24605,255 @@ } return nil } +func (m *IsActivityTaskValidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsActivityTaskValidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsActivityTaskValidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamespaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Execution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Execution == nil { + m.Execution = &v14.WorkflowExecution{} + } + if err := m.Execution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Clock == nil { + m.Clock = &v15.VectorClock{} + } + if err := m.Clock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduledEventId", wireType) + } + m.ScheduledEventId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ScheduledEventId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsActivityTaskValidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsActivityTaskValidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsActivityTaskValidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsValid", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsValid = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SignalWorkflowExecutionRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -25497,7 +26933,7 @@ iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25524,10 +26960,10 @@ if postIndex > l { return io.ErrUnexpectedEOF } - if m.WorkflowExecution == nil { - m.WorkflowExecution = &v14.WorkflowExecution{} + if m.ParentExecution == nil { + m.ParentExecution = &v14.WorkflowExecution{} } - if err := m.WorkflowExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParentExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25552,7 +26988,7 @@ } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletedExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChildExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25579,10 +27015,10 @@ if postIndex > l { return io.ErrUnexpectedEOF } - if m.CompletedExecution == nil { - m.CompletedExecution = &v14.WorkflowExecution{} + if m.ChildExecution == nil { + m.ChildExecution = &v14.WorkflowExecution{} } - if err := m.CompletedExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ChildExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26514,7 +27950,7 @@ if postIndex > l { return io.ErrUnexpectedEOF } - m.VersionHistoryItems = append(m.VersionHistoryItems, &v18.VersionHistoryItem{}) + m.VersionHistoryItems = append(m.VersionHistoryItems, &v16.VersionHistoryItem{}) if err := m.VersionHistoryItems[len(m.VersionHistoryItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -27546,7 +28982,7 @@ return io.ErrUnexpectedEOF } if m.VersionHistory == nil { - m.VersionHistory = &v18.VersionHistory{} + m.VersionHistory = &v16.VersionHistory{} } if err := m.VersionHistory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -28647,7 +30083,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.Category |= v17.TaskCategory(b&0x7F) << shift + m.Category |= v18.TaskCategory(b&0x7F) << shift if b < 0x80 { break } @@ -29672,7 +31108,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.Type |= v17.DeadLetterQueueType(b&0x7F) << shift + m.Type |= v18.DeadLetterQueueType(b&0x7F) << shift if b < 0x80 { break } @@ -29867,7 +31303,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.Type |= v17.DeadLetterQueueType(b&0x7F) << shift + m.Type |= v18.DeadLetterQueueType(b&0x7F) << shift if b < 0x80 { break } @@ -30041,7 +31477,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.Type |= v17.DeadLetterQueueType(b&0x7F) << shift + m.Type |= v18.DeadLetterQueueType(b&0x7F) << shift if b < 0x80 { break } @@ -30236,7 +31672,7 @@ } b := dAtA[iNdEx] iNdEx++ - m.Type |= v17.DeadLetterQueueType(b&0x7F) << shift + m.Type |= v18.DeadLetterQueueType(b&0x7F) << shift if b < 0x80 { break } @@ -30817,6 +32253,25 @@ if b < 0x80 { break } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HistoryLength", wireType) + } + m.HistoryLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HistoryLength |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } default: iNdEx = preIndex diff -Nru temporal-1.21.5-1/src/api/historyservice/v1/service.pb.go temporal-1.22.5/src/api/historyservice/v1/service.pb.go --- temporal-1.21.5-1/src/api/historyservice/v1/service.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/historyservice/v1/service.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -54,89 +54,92 @@ } var fileDescriptor_655983da427ae822 = []byte{ - // 1307 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0xcd, 0x8b, 0x23, 0x45, - 0x18, 0xc6, 0x53, 0x17, 0x91, 0x42, 0x57, 0x6d, 0xc5, 0x8f, 0x55, 0x1b, 0x3f, 0x50, 0x3c, 0x65, - 0xdc, 0x5d, 0xd0, 0xfd, 0x98, 0x75, 0x77, 0x26, 0x33, 0x93, 0x99, 0xdd, 0x19, 0x9d, 0x49, 0x66, - 0x47, 0xf0, 0x22, 0x95, 0xe4, 0x9d, 0x49, 0x31, 0x3d, 0xe9, 0xb6, 0xaa, 0x12, 0xcd, 0x41, 0x10, - 0x3c, 0x09, 0x82, 0x1f, 0x20, 0x78, 0x12, 0x3c, 0x29, 0x82, 0x20, 0x08, 0x82, 0x20, 0x78, 0x12, - 0x3c, 0x88, 0xcc, 0xcd, 0x3d, 0x3a, 0x99, 0x8b, 0xc7, 0xfd, 0x13, 0x24, 0xe9, 0x54, 0x4d, 0xaa, - 0xbb, 0x3a, 0xa9, 0xea, 0xce, 0x6d, 0x37, 0xa9, 0xe7, 0xd7, 0x4f, 0x7d, 0xe4, 0xad, 0x67, 0xde, - 0x04, 0x5f, 0x12, 0x70, 0x14, 0x85, 0x8c, 0x04, 0x0b, 0x1c, 0x58, 0x0f, 0xd8, 0x02, 0x89, 0xe8, - 0x42, 0x9b, 0x72, 0x11, 0xb2, 0xfe, 0xf0, 0x15, 0xda, 0x84, 0x85, 0xde, 0x85, 0x85, 0xf1, 0x3f, - 0xcb, 0x11, 0x0b, 0x45, 0xe8, 0xbd, 0x24, 0x45, 0xe5, 0x58, 0x54, 0x26, 0x11, 0x2d, 0xeb, 0xa2, - 0x72, 0xef, 0xc2, 0xf9, 0x45, 0x3b, 0x36, 0x83, 0xf7, 0xba, 0xc0, 0xc5, 0xbb, 0x0c, 0x78, 0x14, - 0x76, 0xf8, 0xf8, 0x21, 0x17, 0xbf, 0xb8, 0x89, 0xcf, 0xad, 0xc7, 0x83, 0xeb, 0xf1, 0x60, 0xef, - 0x3b, 0x84, 0x1f, 0xaf, 0x0b, 0xc2, 0xc4, 0xdb, 0x21, 0x3b, 0xdc, 0x0f, 0xc2, 0xf7, 0x57, 0x3f, - 0x80, 0x66, 0x57, 0xd0, 0xb0, 0xe3, 0xad, 0x94, 0xad, 0x3c, 0x95, 0xcd, 0xf2, 0x5a, 0x6c, 0xe1, - 0xfc, 0x6a, 0x41, 0x4a, 0x3c, 0x81, 0x17, 0x4a, 0xde, 0x97, 0x08, 0x3f, 0x54, 0x05, 0xb1, 0xd5, - 0x15, 0xa4, 0x11, 0x40, 0x5d, 0x10, 0x01, 0xde, 0x75, 0x4b, 0x78, 0x42, 0x27, 0xbd, 0xbd, 0x91, - 0x57, 0xae, 0x4c, 0x7d, 0x85, 0xf0, 0xc3, 0xdb, 0x61, 0x10, 0x68, 0xae, 0x6c, 0xb1, 0x49, 0xa1, - 0xb4, 0x75, 0x23, 0xb7, 0x5e, 0xf9, 0xfa, 0x16, 0xe1, 0xc7, 0x6a, 0xc0, 0x41, 0xd4, 0x05, 0x6d, - 0x1e, 0xf6, 0x77, 0x09, 0x3f, 0xdc, 0xe9, 0x42, 0x17, 0xbc, 0x65, 0x4b, 0xb6, 0x49, 0x2c, 0xfd, - 0x55, 0x0a, 0x31, 0x94, 0xc7, 0x9f, 0x10, 0x7e, 0xaa, 0x06, 0xcd, 0x90, 0xb5, 0xe4, 0xb6, 0x0f, - 0x47, 0x8d, 0xce, 0x01, 0xb4, 0xbc, 0xaa, 0xf5, 0x43, 0x32, 0x08, 0xd2, 0xed, 0x7a, 0x71, 0x90, - 0xc1, 0xf2, 0x52, 0x53, 0xd0, 0x1e, 0x15, 0xfd, 0xfc, 0x96, 0x0d, 0x84, 0x7c, 0x96, 0x8d, 0x20, - 0x65, 0xf9, 0x57, 0x84, 0x9f, 0x89, 0xff, 0xab, 0xcd, 0xad, 0x12, 0x1e, 0x45, 0x01, 0x0c, 0x5d, - 0xdf, 0xb2, 0xdf, 0xcd, 0x4c, 0x88, 0x34, 0x7e, 0x7b, 0x2e, 0xac, 0xc4, 0x72, 0xa7, 0x86, 0xae, - 0x11, 0x1a, 0x38, 0x2d, 0x77, 0x06, 0xc1, 0x7d, 0xb9, 0x33, 0x41, 0xca, 0xf2, 0x2f, 0x08, 0x3f, - 0x9d, 0xde, 0x96, 0x75, 0x20, 0x4c, 0x34, 0x80, 0x08, 0x6f, 0x23, 0xf7, 0xd6, 0x2a, 0x86, 0xb4, - 0x7d, 0x6b, 0x1e, 0x28, 0xd3, 0x39, 0x99, 0x1c, 0x9a, 0xfb, 0x9c, 0x18, 0x21, 0x39, 0xcf, 0x49, - 0x06, 0xcb, 0x74, 0x4e, 0x26, 0x87, 0xe6, 0x3b, 0x27, 0x69, 0x42, 0xce, 0x73, 0x62, 0x02, 0x25, - 0xce, 0x49, 0x7a, 0x76, 0xa4, 0xd3, 0x84, 0xa1, 0xe9, 0x8d, 0x02, 0x2b, 0x34, 0x66, 0xb8, 0x9f, - 0x93, 0x29, 0x28, 0x65, 0xfc, 0x07, 0x84, 0x9f, 0xa8, 0xd3, 0x83, 0x0e, 0x09, 0xd2, 0x89, 0xc1, - 0xfa, 0xae, 0x37, 0xeb, 0xa5, 0xe1, 0xb5, 0xa2, 0x18, 0x65, 0xf6, 0x0f, 0x84, 0x9f, 0x1b, 0x8f, - 0xa2, 0xa2, 0x9d, 0x91, 0x73, 0xde, 0x74, 0x7b, 0x5c, 0x26, 0x48, 0xda, 0x7f, 0x6b, 0x6e, 0x3c, - 0x35, 0x8f, 0x1f, 0x11, 0x7e, 0xb2, 0x06, 0x47, 0x61, 0x0f, 0x62, 0x91, 0x16, 0x37, 0xd6, 0xac, - 0xf7, 0xd7, 0x0c, 0x90, 0xbe, 0xab, 0x85, 0x39, 0xca, 0xef, 0xcf, 0x08, 0x9f, 0xdf, 0x05, 0x76, - 0x44, 0x3b, 0x44, 0x40, 0x7a, 0xc5, 0x6d, 0x3f, 0x48, 0xd9, 0x08, 0xe9, 0x79, 0x63, 0x0e, 0x24, - 0xed, 0x68, 0xaf, 0xc0, 0xb0, 0xb8, 0xe4, 0x3f, 0xda, 0x19, 0x7a, 0xd7, 0xa3, 0x9d, 0x89, 0x51, - 0x66, 0x87, 0xc1, 0x7d, 0x14, 0xb0, 0xf2, 0x07, 0x77, 0xb3, 0xdc, 0x35, 0xb8, 0x67, 0x51, 0x94, - 0xd3, 0xdf, 0x11, 0xf6, 0xc7, 0xd0, 0xb8, 0x9e, 0xa4, 0x1d, 0x6f, 0x5a, 0x3f, 0x6b, 0x1a, 0x46, - 0x3a, 0xdf, 0x9a, 0x13, 0x4d, 0x4b, 0xd3, 0xf5, 0x66, 0x1b, 0x5a, 0xdd, 0x00, 0x26, 0x6f, 0x7f, - 0xeb, 0x34, 0x6d, 0x12, 0xbb, 0xa6, 0x69, 0x33, 0x43, 0x2b, 0x75, 0x7b, 0xc0, 0xe8, 0x7e, 0x7f, - 0x8d, 0x32, 0x2e, 0xb4, 0x1c, 0x3b, 0x56, 0xb6, 0xac, 0x4b, 0xdd, 0x2c, 0x90, 0x6b, 0xa9, 0x9b, - 0xcd, 0x53, 0xf3, 0xf8, 0x0d, 0xe1, 0x67, 0xe3, 0xc4, 0x52, 0x69, 0xd3, 0xa0, 0xa5, 0xb6, 0xe3, - 0x2c, 0x88, 0xdc, 0x76, 0xca, 0x3d, 0x19, 0x14, 0x39, 0x83, 0xcd, 0xf9, 0xc0, 0x94, 0xfd, 0x7f, - 0x10, 0x7e, 0x39, 0x9e, 0xad, 0x71, 0xec, 0xe8, 0x5c, 0x0d, 0x49, 0xd0, 0xf2, 0x76, 0x9d, 0x16, - 0x6f, 0x16, 0x4e, 0x4e, 0xe8, 0xce, 0x9c, 0xa9, 0x5a, 0xc8, 0x5a, 0x01, 0xde, 0x64, 0xb4, 0x61, - 0xa8, 0x8f, 0x55, 0xeb, 0xc2, 0x96, 0x41, 0x70, 0x0d, 0x59, 0x53, 0x40, 0xca, 0xf2, 0xd7, 0x08, - 0x3f, 0x52, 0x83, 0x28, 0xa0, 0x4d, 0x22, 0x60, 0xb5, 0x07, 0x1d, 0xc1, 0xf7, 0x2e, 0x7a, 0x37, - 0xac, 0xb7, 0x3c, 0xa1, 0x94, 0x16, 0x6f, 0xe6, 0x07, 0x24, 0xca, 0xf7, 0xf8, 0x7d, 0x39, 0x87, - 0xf8, 0x3e, 0x5f, 0x71, 0xc5, 0x6b, 0x72, 0xf7, 0xf2, 0x6d, 0xa6, 0x68, 0x7d, 0x97, 0x7a, 0xbf, - 0xd3, 0xac, 0xb7, 0x09, 0x6b, 0x0d, 0xdf, 0xec, 0x72, 0xeb, 0xbe, 0x4b, 0x42, 0xe7, 0xda, 0x77, - 0x49, 0xc9, 0x95, 0xa9, 0x4f, 0x10, 0x7e, 0x60, 0xf8, 0xae, 0x0c, 0xab, 0xde, 0x55, 0x07, 0xa4, - 0x14, 0x49, 0x3b, 0xd7, 0x72, 0x69, 0xb5, 0xdb, 0x41, 0x9e, 0x46, 0x2d, 0x98, 0x2d, 0x3b, 0x1e, - 0x65, 0x53, 0x28, 0xab, 0x14, 0x62, 0x28, 0x8f, 0xdf, 0x20, 0xfc, 0xa8, 0x1c, 0x32, 0xee, 0x00, - 0xae, 0x87, 0x5c, 0x78, 0x4b, 0x8e, 0xf8, 0x09, 0xad, 0x74, 0xb8, 0x5c, 0x04, 0xa1, 0x0c, 0x7e, - 0x8c, 0x30, 0xae, 0x04, 0x21, 0x87, 0xd1, 0x7e, 0x7b, 0x97, 0x2d, 0xa1, 0x67, 0x12, 0x69, 0xe7, - 0x4a, 0x0e, 0xa5, 0x72, 0xf1, 0x21, 0xbe, 0xbf, 0x0a, 0x22, 0xb6, 0xf0, 0x9a, 0x7d, 0x73, 0x50, - 0x33, 0xf0, 0xba, 0xb3, 0x4e, 0x5b, 0x84, 0x38, 0x5d, 0x8f, 0xd2, 0xc5, 0x65, 0xa7, 0x40, 0x3e, - 0x99, 0x29, 0xae, 0xe4, 0x50, 0x6a, 0xa5, 0xa9, 0x0a, 0x42, 0x16, 0x06, 0x1a, 0x76, 0xb6, 0x80, - 0x73, 0x72, 0x00, 0xdc, 0xba, 0x34, 0x99, 0xe5, 0xae, 0xa5, 0x29, 0x8b, 0xa2, 0x5d, 0x49, 0x55, - 0x10, 0x2b, 0x9b, 0x3b, 0x26, 0xb3, 0x55, 0xfb, 0xc7, 0x98, 0x09, 0xae, 0x57, 0xd2, 0x14, 0x90, - 0xb2, 0xfc, 0x29, 0xc2, 0x0f, 0xee, 0x74, 0x81, 0xf5, 0x65, 0xb9, 0xf5, 0x6c, 0xab, 0x8f, 0xa6, - 0x92, 0xd6, 0x16, 0xf3, 0x89, 0x35, 0x3b, 0x35, 0x20, 0x51, 0x14, 0xf4, 0xe3, 0x4b, 0xca, 0xda, - 0x8e, 0xa6, 0x72, 0xb5, 0x93, 0x10, 0x2b, 0x3b, 0x9f, 0x21, 0x7c, 0x2e, 0x5e, 0x45, 0xb5, 0x8b, - 0x8b, 0x4e, 0x8b, 0x9f, 0xdc, 0xba, 0xeb, 0x39, 0xd5, 0x7a, 0x83, 0xbf, 0xcb, 0x0e, 0x60, 0xd2, - 0x93, 0x75, 0x83, 0x3f, 0x21, 0x74, 0x6e, 0xf0, 0xa7, 0xf4, 0x9a, 0xaf, 0x2d, 0xc8, 0xe9, 0x2b, - 0x29, 0x74, 0xf5, 0x95, 0xd6, 0x27, 0xbe, 0x78, 0xd8, 0x67, 0xc0, 0xdb, 0x93, 0x49, 0x9f, 0x3b, - 0x7c, 0xf1, 0x90, 0x16, 0xbb, 0x7f, 0xf1, 0x60, 0x62, 0x28, 0x8f, 0x7f, 0x23, 0xfc, 0x62, 0x15, - 0x3a, 0xc0, 0x88, 0x80, 0x4d, 0xc2, 0xc5, 0xf8, 0x46, 0x9a, 0xf8, 0xe0, 0xc6, 0x96, 0x77, 0xac, - 0x0f, 0xcf, 0x4c, 0x96, 0x9c, 0x41, 0x6d, 0x9e, 0x48, 0x6d, 0xd1, 0xf5, 0x62, 0x39, 0xce, 0x69, - 0xcb, 0xb9, 0x2a, 0xad, 0x1e, 0xd6, 0x2a, 0x85, 0x18, 0x5a, 0x02, 0xa9, 0x41, 0xa3, 0x4b, 0x83, - 0x96, 0x16, 0x92, 0x96, 0xac, 0xf7, 0x34, 0xa5, 0x75, 0x4d, 0x20, 0x46, 0x84, 0xd6, 0xa6, 0xd0, - 0xdb, 0x2e, 0x7b, 0x94, 0xd3, 0x06, 0x0d, 0x46, 0x69, 0x6f, 0xf8, 0xe7, 0x90, 0x75, 0x9b, 0x62, - 0x3a, 0xc6, 0xb5, 0x4d, 0x31, 0x8b, 0xa6, 0xf5, 0xaf, 0xee, 0x44, 0x2d, 0x52, 0xa4, 0x7f, 0x95, - 0xa1, 0x77, 0xed, 0x5f, 0x65, 0x62, 0xb4, 0x06, 0xf8, 0x76, 0x18, 0xa4, 0xfb, 0x2e, 0xb1, 0xd4, - 0xba, 0x01, 0x3e, 0x85, 0xe1, 0xda, 0x00, 0x9f, 0x8a, 0x52, 0xc6, 0xff, 0x42, 0xf8, 0xf9, 0xba, - 0x60, 0x40, 0x8e, 0xce, 0xee, 0xd3, 0x74, 0xf8, 0xb0, 0x6e, 0x02, 0xcf, 0x22, 0xc9, 0x49, 0x6c, - 0xcf, 0x0f, 0x28, 0xa7, 0xf2, 0x0a, 0x7a, 0x15, 0x2d, 0x47, 0xc7, 0x27, 0x7e, 0xe9, 0xee, 0x89, - 0x5f, 0xba, 0x77, 0xe2, 0xa3, 0x8f, 0x06, 0x3e, 0xfa, 0x7e, 0xe0, 0xa3, 0x3f, 0x07, 0x3e, 0x3a, - 0x1e, 0xf8, 0xe8, 0xdf, 0x81, 0x8f, 0xfe, 0x1b, 0xf8, 0xa5, 0x7b, 0x03, 0x1f, 0x7d, 0x7e, 0xea, - 0x97, 0x8e, 0x4f, 0xfd, 0xd2, 0xdd, 0x53, 0xbf, 0xf4, 0xce, 0xd5, 0x83, 0xf0, 0xcc, 0x0f, 0x0d, - 0xa7, 0xfe, 0x1a, 0xe1, 0x9a, 0xfe, 0x4a, 0xe3, 0xbe, 0xd1, 0x8f, 0x11, 0x2e, 0xfd, 0x1f, 0x00, - 0x00, 0xff, 0xff, 0xa1, 0x85, 0xab, 0xc0, 0x28, 0x21, 0x00, 0x00, + // 1346 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0xcd, 0x8b, 0x23, 0xc5, + 0x1b, 0xc7, 0x53, 0x97, 0x1f, 0x3f, 0x0a, 0x5d, 0xb5, 0x15, 0x5f, 0x46, 0x6d, 0x7c, 0x41, 0xf1, + 0x94, 0x71, 0x77, 0x41, 0xf7, 0x65, 0xd6, 0x75, 0x92, 0x99, 0xc9, 0xcc, 0xee, 0x8c, 0xce, 0x24, + 0xb3, 0x23, 0x78, 0x91, 0x4a, 0xf2, 0xcc, 0xa4, 0x98, 0x4e, 0xba, 0xad, 0xaa, 0x44, 0x73, 0x10, + 0x04, 0x4f, 0x82, 0xa0, 0x08, 0x82, 0x27, 0xc1, 0x93, 0x22, 0x08, 0x82, 0x20, 0x08, 0x82, 0x27, + 0xc1, 0x83, 0xc8, 0x80, 0x07, 0xf7, 0xe8, 0x64, 0x2e, 0x1e, 0xf7, 0x4f, 0x90, 0xa4, 0x53, 0x35, + 0xa9, 0xee, 0xea, 0xa4, 0xaa, 0x93, 0xdb, 0x4c, 0x52, 0xdf, 0x4f, 0x7f, 0xab, 0xea, 0xe9, 0xa7, + 0x9f, 0x7a, 0x3a, 0xf8, 0xb2, 0x80, 0x76, 0x14, 0x32, 0x12, 0x2c, 0x73, 0x60, 0x3d, 0x60, 0xcb, + 0x24, 0xa2, 0xcb, 0x2d, 0xca, 0x45, 0xc8, 0xfa, 0xc3, 0x4f, 0x68, 0x03, 0x96, 0x7b, 0x17, 0x97, + 0xc7, 0x7f, 0x16, 0x23, 0x16, 0x8a, 0xd0, 0x7b, 0x41, 0x8a, 0x8a, 0xb1, 0xa8, 0x48, 0x22, 0x5a, + 0xd4, 0x45, 0xc5, 0xde, 0xc5, 0xa5, 0x15, 0x3b, 0x36, 0x83, 0x77, 0xbb, 0xc0, 0xc5, 0x3b, 0x0c, + 0x78, 0x14, 0x76, 0xf8, 0xf8, 0x22, 0x97, 0xfe, 0x2a, 0xe1, 0x0b, 0x9b, 0xf1, 0xe0, 0x5a, 0x3c, + 0xd8, 0xfb, 0x06, 0xe1, 0x47, 0x6b, 0x82, 0x30, 0xf1, 0x56, 0xc8, 0x8e, 0x0f, 0x83, 0xf0, 0xbd, + 0xf5, 0xf7, 0xa1, 0xd1, 0x15, 0x34, 0xec, 0x78, 0x6b, 0x45, 0x2b, 0x4f, 0x45, 0xb3, 0xbc, 0x1a, + 0x5b, 0x58, 0x5a, 0x9f, 0x93, 0x12, 0x4f, 0xe0, 0xb9, 0x82, 0xf7, 0x39, 0xc2, 0x0f, 0x54, 0x40, + 0xec, 0x74, 0x05, 0xa9, 0x07, 0x50, 0x13, 0x44, 0x80, 0x77, 0xc3, 0x12, 0x9e, 0xd0, 0x49, 0x6f, + 0xaf, 0xe5, 0x95, 0x2b, 0x53, 0x5f, 0x20, 0xfc, 0xe0, 0x6e, 0x18, 0x04, 0x9a, 0x2b, 0x5b, 0x6c, + 0x52, 0x28, 0x6d, 0xdd, 0xcc, 0xad, 0x57, 0xbe, 0xbe, 0x46, 0xf8, 0x91, 0x2a, 0x70, 0x10, 0x35, + 0x41, 0x1b, 0xc7, 0xfd, 0x7d, 0xc2, 0x8f, 0xf7, 0xba, 0xd0, 0x05, 0xaf, 0x64, 0xc9, 0x36, 0x89, + 0xa5, 0xbf, 0xf2, 0x5c, 0x0c, 0xe5, 0xf1, 0x07, 0x84, 0x9f, 0xa8, 0x42, 0x23, 0x64, 0x4d, 0xb9, + 0xed, 0xc3, 0x51, 0xa3, 0x38, 0x80, 0xa6, 0x57, 0xb1, 0xbe, 0x48, 0x06, 0x41, 0xba, 0xdd, 0x9c, + 0x1f, 0x64, 0xb0, 0xbc, 0xda, 0x10, 0xb4, 0x47, 0x45, 0x3f, 0xbf, 0x65, 0x03, 0x21, 0x9f, 0x65, + 0x23, 0x48, 0x59, 0xfe, 0x19, 0xe1, 0xa7, 0xe2, 0x7f, 0xb5, 0xb9, 0x95, 0xc3, 0x76, 0x14, 0xc0, + 0xd0, 0xf5, 0x2d, 0xfb, 0xdd, 0xcc, 0x84, 0x48, 0xe3, 0xb7, 0x17, 0xc2, 0x4a, 0x2c, 0x77, 0x6a, + 0xe8, 0x06, 0xa1, 0x81, 0xd3, 0x72, 0x67, 0x10, 0xdc, 0x97, 0x3b, 0x13, 0xa4, 0x2c, 0x7f, 0x85, + 0xf0, 0xc3, 0x5b, 0x7c, 0x72, 0xc8, 0x01, 0x09, 0x68, 0xd3, 0x5b, 0xb5, 0xbc, 0x86, 0x41, 0x2b, + 0x6d, 0x96, 0xe6, 0x41, 0x28, 0x83, 0x3f, 0x21, 0xfc, 0x64, 0x3a, 0x6e, 0x36, 0x81, 0x30, 0x51, + 0x07, 0x22, 0xbc, 0xad, 0xdc, 0xb1, 0xa7, 0x18, 0xd2, 0xf0, 0xad, 0x45, 0xa0, 0x4c, 0x81, 0x3c, + 0x39, 0x34, 0x77, 0x20, 0x1b, 0x21, 0x39, 0x03, 0x39, 0x83, 0x65, 0x0a, 0xe4, 0xc9, 0xa1, 0xf9, + 0x02, 0x39, 0x4d, 0xc8, 0x19, 0xc8, 0x26, 0x50, 0x22, 0x4e, 0xd2, 0xb3, 0x23, 0x9d, 0x06, 0x0c, + 0x4d, 0x6f, 0xcd, 0xb1, 0x42, 0x63, 0x86, 0x7b, 0x9c, 0x4c, 0x41, 0x25, 0xee, 0xc0, 0xc9, 0x41, + 0xae, 0x77, 0x60, 0x4a, 0xeb, 0x7e, 0x07, 0x1a, 0x10, 0xca, 0xe0, 0x77, 0x08, 0x3f, 0x56, 0xa3, + 0x47, 0x1d, 0x12, 0xa4, 0x6b, 0x2e, 0xeb, 0x6a, 0xc9, 0xac, 0x97, 0x46, 0x37, 0xe6, 0xc5, 0x28, + 0xb3, 0xbf, 0x21, 0xfc, 0xcc, 0x78, 0x14, 0x15, 0xad, 0x8c, 0x4a, 0xf1, 0x0d, 0xb7, 0xcb, 0x65, + 0x82, 0xa4, 0xfd, 0x37, 0x17, 0xc6, 0x53, 0xf3, 0xf8, 0x1e, 0xe1, 0xc7, 0xab, 0xd0, 0x0e, 0x7b, + 0x10, 0x8b, 0xb4, 0x82, 0x6d, 0xc3, 0x3a, 0x00, 0xcd, 0x00, 0xe9, 0xbb, 0x32, 0x37, 0x47, 0xf9, + 0xfd, 0x11, 0xe1, 0xa5, 0x7d, 0x60, 0x6d, 0xda, 0x21, 0x02, 0xd2, 0x2b, 0x6e, 0x7b, 0xa7, 0x67, + 0x23, 0xa4, 0xe7, 0xad, 0x05, 0x90, 0xb4, 0xd0, 0x5e, 0x83, 0x61, 0xf6, 0xcb, 0x1f, 0xda, 0x19, + 0x7a, 0xd7, 0xd0, 0xce, 0xc4, 0x28, 0xb3, 0xc3, 0xa3, 0xcf, 0xa8, 0x44, 0xcd, 0x7f, 0xf4, 0x31, + 0xcb, 0x5d, 0x8f, 0x3e, 0x59, 0x14, 0xe5, 0xf4, 0x57, 0x84, 0xfd, 0x31, 0x34, 0x4e, 0x78, 0x69, + 0xc7, 0xdb, 0xd6, 0xd7, 0x9a, 0x86, 0x91, 0xce, 0x77, 0x16, 0x44, 0xd3, 0xce, 0x23, 0xb5, 0x46, + 0x0b, 0x9a, 0xdd, 0x00, 0x26, 0xab, 0x13, 0xeb, 0xf3, 0x88, 0x49, 0xec, 0x7a, 0x1e, 0x31, 0x33, + 0xb4, 0x54, 0x77, 0x00, 0x8c, 0x1e, 0xf6, 0x37, 0x28, 0xe3, 0x42, 0x3b, 0x09, 0x8c, 0x95, 0x4d, + 0xeb, 0x54, 0x37, 0x0b, 0xe4, 0x9a, 0xea, 0x66, 0xf3, 0xd4, 0x3c, 0x7e, 0x41, 0xf8, 0xe9, 0xb8, + 0xa4, 0x2a, 0xb7, 0x68, 0xd0, 0x54, 0xdb, 0x71, 0x5e, 0x29, 0xdd, 0x76, 0x2a, 0xcc, 0x32, 0x28, + 0x72, 0x06, 0xdb, 0x8b, 0x81, 0x29, 0xfb, 0x7f, 0x23, 0xfc, 0x62, 0x3c, 0x5b, 0xe3, 0xd8, 0x51, + 0x5c, 0x0d, 0x49, 0xd0, 0xf4, 0xf6, 0x9d, 0x16, 0x6f, 0x16, 0x4e, 0x4e, 0xe8, 0xce, 0x82, 0xa9, + 0x5a, 0x15, 0xb8, 0x06, 0xbc, 0xc1, 0x68, 0xdd, 0x90, 0x1f, 0x2b, 0xd6, 0x89, 0x2d, 0x83, 0xe0, + 0x5a, 0x05, 0x4e, 0x01, 0x29, 0xcb, 0x5f, 0x22, 0xfc, 0x50, 0x15, 0xa2, 0x80, 0x36, 0x88, 0x80, + 0xf5, 0x1e, 0x74, 0x04, 0x3f, 0xb8, 0xe4, 0xdd, 0xb4, 0xde, 0xf2, 0x84, 0x52, 0x5a, 0x7c, 0x3d, + 0x3f, 0x20, 0x91, 0xbe, 0xc7, 0xdf, 0xcb, 0x39, 0xc4, 0xcf, 0xf3, 0x35, 0x57, 0xbc, 0x26, 0x77, + 0x4f, 0xdf, 0x66, 0x8a, 0xd6, 0xb9, 0xaa, 0xf5, 0x3b, 0x8d, 0x5a, 0x8b, 0xb0, 0xe6, 0xf0, 0xcb, + 0x2e, 0xb7, 0xee, 0x5c, 0x25, 0x74, 0xae, 0x9d, 0xab, 0x94, 0x5c, 0x99, 0xfa, 0x18, 0xe1, 0xfb, + 0x86, 0xdf, 0xca, 0x4a, 0xd5, 0xbb, 0xe6, 0x80, 0x94, 0x22, 0x69, 0xe7, 0x7a, 0x2e, 0xad, 0xf6, + 0x74, 0x90, 0xd1, 0xa8, 0x15, 0x66, 0x25, 0xc7, 0x50, 0x36, 0x15, 0x65, 0xe5, 0xb9, 0x18, 0xda, + 0xb1, 0x42, 0x0e, 0x19, 0xf7, 0x50, 0x37, 0x43, 0x2e, 0xac, 0x8f, 0x15, 0x06, 0xad, 0xeb, 0xb1, + 0xc2, 0x88, 0x50, 0x06, 0x3f, 0x42, 0x18, 0x97, 0x83, 0x90, 0xc3, 0x68, 0xbf, 0xbd, 0x2b, 0x96, + 0xd0, 0x73, 0x89, 0xb4, 0x73, 0x35, 0x87, 0x52, 0xb9, 0xf8, 0x00, 0xff, 0xbf, 0x02, 0x22, 0xb6, + 0xf0, 0x8a, 0x7d, 0x7b, 0x55, 0x33, 0xf0, 0xaa, 0xb3, 0x4e, 0x5b, 0x84, 0xb8, 0xba, 0x1e, 0x55, + 0x17, 0x57, 0x9c, 0x0a, 0xf2, 0xc9, 0x9a, 0xe2, 0x6a, 0x0e, 0xa5, 0x96, 0x9a, 0x2a, 0x20, 0x64, + 0x62, 0xa0, 0x61, 0x67, 0x07, 0x38, 0x27, 0x47, 0xc0, 0xad, 0x53, 0x93, 0x59, 0xee, 0x9a, 0x9a, + 0xb2, 0x28, 0xda, 0x23, 0xa9, 0x02, 0x62, 0x6d, 0x7b, 0xcf, 0x64, 0xb6, 0x62, 0x7f, 0x19, 0x33, + 0xc1, 0xf5, 0x91, 0x34, 0x05, 0xa4, 0x2c, 0x7f, 0x82, 0xf0, 0xfd, 0x7b, 0x5d, 0x60, 0x7d, 0x99, + 0x6e, 0x3d, 0xdb, 0xec, 0xa3, 0xa9, 0xa4, 0xb5, 0x95, 0x7c, 0x62, 0xcd, 0x4e, 0x15, 0x48, 0x14, + 0x05, 0xfd, 0xf8, 0x21, 0x65, 0x6d, 0x47, 0x53, 0xb9, 0xda, 0x49, 0x88, 0x95, 0x9d, 0x4f, 0x11, + 0xbe, 0x10, 0xaf, 0xa2, 0xda, 0xc5, 0x15, 0xa7, 0xc5, 0x4f, 0x6e, 0xdd, 0x8d, 0x9c, 0x6a, 0xfd, + 0x15, 0x49, 0x97, 0x1d, 0xc1, 0xa4, 0x27, 0xeb, 0x57, 0x24, 0x09, 0xa1, 0xf3, 0x2b, 0x92, 0x94, + 0x5e, 0xf3, 0xb5, 0x03, 0x39, 0x7d, 0x25, 0x85, 0xae, 0xbe, 0xd2, 0xfa, 0xc4, 0xab, 0x9b, 0x43, + 0x06, 0xbc, 0x35, 0x59, 0xe9, 0x73, 0x87, 0x57, 0x37, 0x69, 0xb1, 0xfb, 0xab, 0x1b, 0x13, 0x43, + 0x79, 0xfc, 0x13, 0xe1, 0xe7, 0x2b, 0xd0, 0x01, 0x46, 0x04, 0x6c, 0x13, 0x2e, 0xc6, 0x4f, 0xa4, + 0x89, 0x1b, 0x37, 0xb6, 0xbc, 0x67, 0x1d, 0x3c, 0x33, 0x59, 0x72, 0x06, 0xd5, 0x45, 0x22, 0xb5, + 0x45, 0xd7, 0x93, 0xe5, 0xb8, 0x4e, 0x2b, 0xe5, 0xca, 0xb4, 0x7a, 0xb1, 0x56, 0x9e, 0x8b, 0xa1, + 0x55, 0x20, 0x55, 0xa8, 0x77, 0x69, 0xd0, 0xd4, 0x8a, 0xa4, 0x55, 0xeb, 0x3d, 0x4d, 0x69, 0x5d, + 0x2b, 0x10, 0x23, 0x42, 0x6b, 0x53, 0xe8, 0x6d, 0x97, 0x03, 0xca, 0x69, 0x9d, 0x06, 0xa3, 0x6a, + 0x6f, 0x78, 0x1c, 0xb2, 0x6e, 0x53, 0x4c, 0xc7, 0xb8, 0xb6, 0x29, 0x66, 0xd1, 0xb4, 0xfe, 0xd5, + 0x9d, 0xa8, 0x49, 0xe6, 0xe9, 0x5f, 0x65, 0xe8, 0x5d, 0xfb, 0x57, 0x99, 0x18, 0xad, 0x43, 0xbf, + 0x1b, 0x06, 0xe9, 0xbe, 0x4b, 0x2c, 0xb5, 0xee, 0xd0, 0x4f, 0x61, 0xb8, 0x76, 0xe8, 0xa7, 0xa2, + 0x94, 0xf1, 0x3f, 0x10, 0x7e, 0xb6, 0x26, 0x18, 0x90, 0xf6, 0xf9, 0xf3, 0x34, 0x5d, 0x7c, 0x58, + 0x37, 0x81, 0x67, 0x91, 0xe4, 0x24, 0x76, 0x17, 0x07, 0x94, 0x53, 0x79, 0x09, 0xbd, 0x8c, 0x4a, + 0xd1, 0xc9, 0xa9, 0x5f, 0xb8, 0x7b, 0xea, 0x17, 0xee, 0x9d, 0xfa, 0xe8, 0xc3, 0x81, 0x8f, 0xbe, + 0x1d, 0xf8, 0xe8, 0xf7, 0x81, 0x8f, 0x4e, 0x06, 0x3e, 0xfa, 0x67, 0xe0, 0xa3, 0x7f, 0x07, 0x7e, + 0xe1, 0xde, 0xc0, 0x47, 0x9f, 0x9d, 0xf9, 0x85, 0x93, 0x33, 0xbf, 0x70, 0xf7, 0xcc, 0x2f, 0xbc, + 0x7d, 0xed, 0x28, 0x3c, 0xf7, 0x43, 0xc3, 0xa9, 0xbf, 0xe7, 0xb8, 0xae, 0x7f, 0x52, 0xff, 0xdf, + 0xe8, 0xe7, 0x1c, 0x97, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x5d, 0xf6, 0xc7, 0x6a, 0x22, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -187,6 +190,8 @@ // WorkflowTaskFailedEvent written to the history and a new WorkflowTask created. This API can be used by client to // either clear sticky task queue or report ny panics during WorkflowTask processing. RespondWorkflowTaskFailed(ctx context.Context, in *RespondWorkflowTaskFailedRequest, opts ...grpc.CallOption) (*RespondWorkflowTaskFailedResponse, error) + // IsWorkflowTaskValid is called by matching service checking whether the workflow task is valid. + IsWorkflowTaskValid(ctx context.Context, in *IsWorkflowTaskValidRequest, opts ...grpc.CallOption) (*IsWorkflowTaskValidResponse, error) // RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails // to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and // 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will @@ -211,6 +216,8 @@ // PollActivityTaskQueue API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid // anymore due to activity timeout. RespondActivityTaskCanceled(ctx context.Context, in *RespondActivityTaskCanceledRequest, opts ...grpc.CallOption) (*RespondActivityTaskCanceledResponse, error) + // IsActivityTaskValid is called by matching service checking whether the workflow task is valid. + IsActivityTaskValid(ctx context.Context, in *IsActivityTaskValidRequest, opts ...grpc.CallOption) (*IsActivityTaskValidResponse, error) // SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in // WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution. SignalWorkflowExecution(ctx context.Context, in *SignalWorkflowExecutionRequest, opts ...grpc.CallOption) (*SignalWorkflowExecutionResponse, error) @@ -400,6 +407,15 @@ return out, nil } +func (c *historyServiceClient) IsWorkflowTaskValid(ctx context.Context, in *IsWorkflowTaskValidRequest, opts ...grpc.CallOption) (*IsWorkflowTaskValidResponse, error) { + out := new(IsWorkflowTaskValidResponse) + err := c.cc.Invoke(ctx, "/temporal.server.api.historyservice.v1.HistoryService/IsWorkflowTaskValid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *historyServiceClient) RecordActivityTaskHeartbeat(ctx context.Context, in *RecordActivityTaskHeartbeatRequest, opts ...grpc.CallOption) (*RecordActivityTaskHeartbeatResponse, error) { out := new(RecordActivityTaskHeartbeatResponse) err := c.cc.Invoke(ctx, "/temporal.server.api.historyservice.v1.HistoryService/RecordActivityTaskHeartbeat", in, out, opts...) @@ -436,6 +452,15 @@ return out, nil } +func (c *historyServiceClient) IsActivityTaskValid(ctx context.Context, in *IsActivityTaskValidRequest, opts ...grpc.CallOption) (*IsActivityTaskValidResponse, error) { + out := new(IsActivityTaskValidResponse) + err := c.cc.Invoke(ctx, "/temporal.server.api.historyservice.v1.HistoryService/IsActivityTaskValid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *historyServiceClient) SignalWorkflowExecution(ctx context.Context, in *SignalWorkflowExecutionRequest, opts ...grpc.CallOption) (*SignalWorkflowExecutionResponse, error) { out := new(SignalWorkflowExecutionResponse) err := c.cc.Invoke(ctx, "/temporal.server.api.historyservice.v1.HistoryService/SignalWorkflowExecution", in, out, opts...) @@ -820,6 +845,8 @@ // WorkflowTaskFailedEvent written to the history and a new WorkflowTask created. This API can be used by client to // either clear sticky task queue or report ny panics during WorkflowTask processing. RespondWorkflowTaskFailed(context.Context, *RespondWorkflowTaskFailedRequest) (*RespondWorkflowTaskFailedResponse, error) + // IsWorkflowTaskValid is called by matching service checking whether the workflow task is valid. + IsWorkflowTaskValid(context.Context, *IsWorkflowTaskValidRequest) (*IsWorkflowTaskValidResponse, error) // RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails // to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and // 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will @@ -844,6 +871,8 @@ // PollActivityTaskQueue API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid // anymore due to activity timeout. RespondActivityTaskCanceled(context.Context, *RespondActivityTaskCanceledRequest) (*RespondActivityTaskCanceledResponse, error) + // IsActivityTaskValid is called by matching service checking whether the workflow task is valid. + IsActivityTaskValid(context.Context, *IsActivityTaskValidRequest) (*IsActivityTaskValidResponse, error) // SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in // WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution. SignalWorkflowExecution(context.Context, *SignalWorkflowExecutionRequest) (*SignalWorkflowExecutionResponse, error) @@ -981,6 +1010,9 @@ func (*UnimplementedHistoryServiceServer) RespondWorkflowTaskFailed(ctx context.Context, req *RespondWorkflowTaskFailedRequest) (*RespondWorkflowTaskFailedResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RespondWorkflowTaskFailed not implemented") } +func (*UnimplementedHistoryServiceServer) IsWorkflowTaskValid(ctx context.Context, req *IsWorkflowTaskValidRequest) (*IsWorkflowTaskValidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsWorkflowTaskValid not implemented") +} func (*UnimplementedHistoryServiceServer) RecordActivityTaskHeartbeat(ctx context.Context, req *RecordActivityTaskHeartbeatRequest) (*RecordActivityTaskHeartbeatResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecordActivityTaskHeartbeat not implemented") } @@ -993,6 +1025,9 @@ func (*UnimplementedHistoryServiceServer) RespondActivityTaskCanceled(ctx context.Context, req *RespondActivityTaskCanceledRequest) (*RespondActivityTaskCanceledResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RespondActivityTaskCanceled not implemented") } +func (*UnimplementedHistoryServiceServer) IsActivityTaskValid(ctx context.Context, req *IsActivityTaskValidRequest) (*IsActivityTaskValidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsActivityTaskValid not implemented") +} func (*UnimplementedHistoryServiceServer) SignalWorkflowExecution(ctx context.Context, req *SignalWorkflowExecutionRequest) (*SignalWorkflowExecutionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SignalWorkflowExecution not implemented") } @@ -1250,6 +1285,24 @@ return interceptor(ctx, in, info, handler) } +func _HistoryService_IsWorkflowTaskValid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IsWorkflowTaskValidRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).IsWorkflowTaskValid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/temporal.server.api.historyservice.v1.HistoryService/IsWorkflowTaskValid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).IsWorkflowTaskValid(ctx, req.(*IsWorkflowTaskValidRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HistoryService_RecordActivityTaskHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RecordActivityTaskHeartbeatRequest) if err := dec(in); err != nil { @@ -1322,6 +1375,24 @@ return interceptor(ctx, in, info, handler) } +func _HistoryService_IsActivityTaskValid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IsActivityTaskValidRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).IsActivityTaskValid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/temporal.server.api.historyservice.v1.HistoryService/IsActivityTaskValid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).IsActivityTaskValid(ctx, req.(*IsActivityTaskValidRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HistoryService_SignalWorkflowExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SignalWorkflowExecutionRequest) if err := dec(in); err != nil { @@ -2015,6 +2086,10 @@ Handler: _HistoryService_RespondWorkflowTaskFailed_Handler, }, { + MethodName: "IsWorkflowTaskValid", + Handler: _HistoryService_IsWorkflowTaskValid_Handler, + }, + { MethodName: "RecordActivityTaskHeartbeat", Handler: _HistoryService_RecordActivityTaskHeartbeat_Handler, }, @@ -2031,6 +2106,10 @@ Handler: _HistoryService_RespondActivityTaskCanceled_Handler, }, { + MethodName: "IsActivityTaskValid", + Handler: _HistoryService_IsActivityTaskValid_Handler, + }, + { MethodName: "SignalWorkflowExecution", Handler: _HistoryService_SignalWorkflowExecution_Handler, }, diff -Nru temporal-1.21.5-1/src/api/historyservicemock/v1/service.pb.mock.go temporal-1.22.5/src/api/historyservicemock/v1/service.pb.mock.go --- temporal-1.21.5-1/src/api/historyservicemock/v1/service.pb.mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/historyservicemock/v1/service.pb.mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -321,6 +321,46 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetShard), varargs...) } +// IsActivityTaskValid mocks base method. +func (m *MockHistoryServiceClient) IsActivityTaskValid(ctx context.Context, in *historyservice.IsActivityTaskValidRequest, opts ...grpc.CallOption) (*historyservice.IsActivityTaskValidResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "IsActivityTaskValid", varargs...) + ret0, _ := ret[0].(*historyservice.IsActivityTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsActivityTaskValid indicates an expected call of IsActivityTaskValid. +func (mr *MockHistoryServiceClientMockRecorder) IsActivityTaskValid(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActivityTaskValid", reflect.TypeOf((*MockHistoryServiceClient)(nil).IsActivityTaskValid), varargs...) +} + +// IsWorkflowTaskValid mocks base method. +func (m *MockHistoryServiceClient) IsWorkflowTaskValid(ctx context.Context, in *historyservice.IsWorkflowTaskValidRequest, opts ...grpc.CallOption) (*historyservice.IsWorkflowTaskValidResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "IsWorkflowTaskValid", varargs...) + ret0, _ := ret[0].(*historyservice.IsWorkflowTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsWorkflowTaskValid indicates an expected call of IsWorkflowTaskValid. +func (mr *MockHistoryServiceClientMockRecorder) IsWorkflowTaskValid(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsWorkflowTaskValid", reflect.TypeOf((*MockHistoryServiceClient)(nil).IsWorkflowTaskValid), varargs...) +} + // MergeDLQMessages mocks base method. func (m *MockHistoryServiceClient) MergeDLQMessages(ctx context.Context, in *historyservice.MergeDLQMessagesRequest, opts ...grpc.CallOption) (*historyservice.MergeDLQMessagesResponse, error) { m.ctrl.T.Helper() @@ -1103,7 +1143,7 @@ } // RecvMsg mocks base method. -func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) RecvMsg(m interface{}) error { +func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) RecvMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "RecvMsg", m) ret0, _ := ret[0].(error) @@ -1131,7 +1171,7 @@ } // SendMsg mocks base method. -func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) SendMsg(m interface{}) error { +func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) SendMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "SendMsg", m) ret0, _ := ret[0].(error) @@ -1376,6 +1416,36 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetShard), arg0, arg1) } +// IsActivityTaskValid mocks base method. +func (m *MockHistoryServiceServer) IsActivityTaskValid(arg0 context.Context, arg1 *historyservice.IsActivityTaskValidRequest) (*historyservice.IsActivityTaskValidResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsActivityTaskValid", arg0, arg1) + ret0, _ := ret[0].(*historyservice.IsActivityTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsActivityTaskValid indicates an expected call of IsActivityTaskValid. +func (mr *MockHistoryServiceServerMockRecorder) IsActivityTaskValid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActivityTaskValid", reflect.TypeOf((*MockHistoryServiceServer)(nil).IsActivityTaskValid), arg0, arg1) +} + +// IsWorkflowTaskValid mocks base method. +func (m *MockHistoryServiceServer) IsWorkflowTaskValid(arg0 context.Context, arg1 *historyservice.IsWorkflowTaskValidRequest) (*historyservice.IsWorkflowTaskValidResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsWorkflowTaskValid", arg0, arg1) + ret0, _ := ret[0].(*historyservice.IsWorkflowTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsWorkflowTaskValid indicates an expected call of IsWorkflowTaskValid. +func (mr *MockHistoryServiceServerMockRecorder) IsWorkflowTaskValid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsWorkflowTaskValid", reflect.TypeOf((*MockHistoryServiceServer)(nil).IsWorkflowTaskValid), arg0, arg1) +} + // MergeDLQMessages mocks base method. func (m *MockHistoryServiceServer) MergeDLQMessages(arg0 context.Context, arg1 *historyservice.MergeDLQMessagesRequest) (*historyservice.MergeDLQMessagesResponse, error) { m.ctrl.T.Helper() @@ -1953,7 +2023,7 @@ } // RecvMsg mocks base method. -func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) RecvMsg(m interface{}) error { +func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) RecvMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "RecvMsg", m) ret0, _ := ret[0].(error) @@ -1995,7 +2065,7 @@ } // SendMsg mocks base method. -func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) SendMsg(m interface{}) error { +func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) SendMsg(m any) error { m_2.ctrl.T.Helper() ret := m_2.ctrl.Call(m_2, "SendMsg", m) ret0, _ := ret[0].(error) diff -Nru temporal-1.21.5-1/src/api/matchingservice/v1/request_response.pb.go temporal-1.22.5/src/api/matchingservice/v1/request_response.pb.go --- temporal-1.21.5-1/src/api/matchingservice/v1/request_response.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/matchingservice/v1/request_response.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -1388,6 +1388,7 @@ // Types that are valid to be assigned to Operation: // *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_ // *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ + // *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId Operation isUpdateWorkerBuildIdCompatibilityRequest_Operation `protobuf_oneof:"operation"` } @@ -1438,11 +1439,16 @@ type UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ struct { RemoveBuildIds *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds `protobuf:"bytes,4,opt,name=remove_build_ids,json=removeBuildIds,proto3,oneof" json:"remove_build_ids,omitempty"` } +type UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId struct { + PersistUnknownBuildId string `protobuf:"bytes,5,opt,name=persist_unknown_build_id,json=persistUnknownBuildId,proto3,oneof" json:"persist_unknown_build_id,omitempty"` +} func (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { } func (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { } +func (*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { +} func (m *UpdateWorkerBuildIdCompatibilityRequest) GetOperation() isUpdateWorkerBuildIdCompatibilityRequest_Operation { if m != nil { @@ -1479,11 +1485,19 @@ return nil } +func (m *UpdateWorkerBuildIdCompatibilityRequest) GetPersistUnknownBuildId() string { + if x, ok := m.GetOperation().(*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId); ok { + return x.PersistUnknownBuildId + } + return "" +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*UpdateWorkerBuildIdCompatibilityRequest) XXX_OneofWrappers() []interface{} { return []interface{}{ (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_)(nil), (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_)(nil), + (*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId)(nil), } } @@ -2412,159 +2426,160 @@ } var fileDescriptor_a429a3813476c583 = []byte{ - // 2418 bytes of a gzipped FileDescriptorProto + // 2442 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcf, 0x73, 0x1b, 0x49, 0xf5, 0xf7, 0x58, 0xb2, 0x2d, 0x3d, 0xc9, 0xb6, 0x3c, 0xdf, 0x6c, 0x22, 0x3b, 0x8e, 0x6c, 0xcf, - 0x66, 0x13, 0x27, 0x95, 0x95, 0xbf, 0x31, 0x24, 0xb5, 0xbb, 0x90, 0x5d, 0x1c, 0xc7, 0x1b, 0x7b, + 0x66, 0x37, 0x4e, 0x2a, 0x2b, 0x7f, 0x63, 0x48, 0x6a, 0xb3, 0x90, 0x5d, 0x1c, 0xc7, 0x1b, 0x7b, 0x37, 0x59, 0x9c, 0x89, 0x13, 0xa8, 0x2c, 0xc5, 0x6c, 0x6b, 0xa6, 0x23, 0x0f, 0x1e, 0xcd, 0x4c, 0xa6, 0x7b, 0xa4, 0x98, 0x13, 0x67, 0xb8, 0x2c, 0x45, 0x15, 0x05, 0xc5, 0x9d, 0x02, 0xaa, 0x38, - 0xc1, 0x85, 0x3f, 0x80, 0x2a, 0x0e, 0x1c, 0x72, 0xdc, 0x1b, 0xc4, 0xa9, 0xa2, 0x28, 0xe0, 0xb0, - 0xfc, 0x03, 0x14, 0xd5, 0x3f, 0x66, 0x46, 0x3f, 0x46, 0x96, 0xec, 0x38, 0x2c, 0xc5, 0xcd, 0x7a, - 0xfd, 0xde, 0xeb, 0xf7, 0x5e, 0x7f, 0xde, 0xe7, 0x75, 0x4b, 0x86, 0x1b, 0x14, 0x37, 0x7c, 0x2f, - 0x40, 0xce, 0x0a, 0xc1, 0x41, 0x13, 0x07, 0x2b, 0xc8, 0xb7, 0x57, 0x1a, 0x88, 0x9a, 0xbb, 0xb6, - 0x5b, 0x67, 0x22, 0xdb, 0xc4, 0x2b, 0xcd, 0xab, 0x2b, 0x01, 0x7e, 0x12, 0x62, 0x42, 0x8d, 0x00, - 0x13, 0xdf, 0x73, 0x09, 0xae, 0xfa, 0x81, 0x47, 0x3d, 0xf5, 0x42, 0x64, 0x5e, 0x15, 0xe6, 0x55, - 0xe4, 0xdb, 0xd5, 0x2e, 0xf3, 0x6a, 0xf3, 0xea, 0x5c, 0xa5, 0xee, 0x79, 0x75, 0x07, 0xaf, 0x70, - 0xab, 0x5a, 0xf8, 0x78, 0xc5, 0x0a, 0x03, 0x44, 0x6d, 0xcf, 0x15, 0x7e, 0xe6, 0x16, 0xba, 0xd7, - 0xa9, 0xdd, 0xc0, 0x84, 0xa2, 0x86, 0x2f, 0x15, 0x96, 0x2c, 0xec, 0x63, 0xd7, 0xc2, 0xae, 0x69, - 0x63, 0xb2, 0x52, 0xf7, 0xea, 0x1e, 0x97, 0xf3, 0xbf, 0xa4, 0xca, 0xf9, 0x38, 0x15, 0x96, 0x83, - 0xe9, 0x35, 0x1a, 0x9e, 0xcb, 0x42, 0x6f, 0x60, 0x42, 0x50, 0x5d, 0x46, 0x3c, 0x77, 0xa1, 0x43, - 0x0b, 0xbb, 0x61, 0x83, 0x30, 0x25, 0x8a, 0xc8, 0x9e, 0xf1, 0x24, 0xc4, 0x61, 0xa4, 0x77, 0xb1, - 0x43, 0x8f, 0x2d, 0xf3, 0xd5, 0x5e, 0x87, 0xaf, 0x77, 0x28, 0x3e, 0x09, 0x71, 0xb0, 0x3f, 0x68, - 0x57, 0x2e, 0x33, 0x3d, 0xa7, 0x57, 0xef, 0x72, 0xda, 0x71, 0x98, 0x8e, 0x67, 0xee, 0xf5, 0xea, - 0x5e, 0x4c, 0xd3, 0xed, 0x48, 0x48, 0x2a, 0x5e, 0x49, 0x53, 0xdc, 0xb5, 0x09, 0xf5, 0xd2, 0x42, - 0xfd, 0x72, 0x9a, 0xb6, 0x8f, 0x03, 0x62, 0x13, 0x8a, 0x5d, 0x81, 0x86, 0xa4, 0x5a, 0x44, 0x5a, - 0x55, 0xd3, 0xac, 0x0e, 0xa9, 0xda, 0xf5, 0x8e, 0x82, 0xb4, 0xbc, 0x60, 0xef, 0xb1, 0xe3, 0xb5, - 0x06, 0x02, 0x4e, 0xfb, 0xbb, 0x02, 0xf3, 0xdb, 0x9e, 0xe3, 0x7c, 0x43, 0x5a, 0xec, 0x20, 0xb2, - 0x77, 0x8f, 0x6d, 0xa1, 0x0b, 0x7d, 0x75, 0x09, 0x8a, 0x2e, 0x6a, 0x60, 0xe2, 0x23, 0x13, 0x1b, - 0xb6, 0x55, 0x56, 0x16, 0x95, 0xe5, 0xbc, 0x5e, 0x88, 0x65, 0x5b, 0x96, 0x7a, 0x16, 0xf2, 0xbe, - 0xe7, 0x38, 0x38, 0x60, 0xeb, 0xa3, 0x7c, 0x3d, 0x27, 0x04, 0x5b, 0x96, 0xfa, 0x09, 0x14, 0xd9, - 0xdf, 0x86, 0xdc, 0xbf, 0x9c, 0x59, 0x54, 0x96, 0x0b, 0xab, 0x37, 0xe2, 0xfc, 0x38, 0xc2, 0xbb, - 0xe2, 0xad, 0x36, 0xaf, 0x56, 0x0f, 0x0b, 0x4a, 0x2f, 0x30, 0x97, 0x51, 0x84, 0x97, 0xa0, 0xf4, - 0xd8, 0x0b, 0x5a, 0x28, 0xb0, 0xb0, 0x65, 0x10, 0x2f, 0x0c, 0x4c, 0x5c, 0xce, 0xf2, 0x28, 0xa6, - 0x63, 0xf9, 0x7d, 0x2e, 0xd6, 0xfe, 0x98, 0x87, 0x73, 0x7d, 0x1c, 0x8b, 0xaa, 0xa8, 0xe7, 0x00, - 0xf8, 0x61, 0x50, 0x6f, 0x0f, 0xbb, 0x3c, 0xd9, 0xa2, 0x9e, 0x67, 0x92, 0x1d, 0x26, 0x50, 0xbf, - 0x09, 0x6a, 0x14, 0xab, 0x81, 0x9f, 0x62, 0x33, 0x64, 0x3d, 0xc7, 0x73, 0x2e, 0xac, 0x5e, 0xea, - 0xcc, 0x49, 0x34, 0x0c, 0x4b, 0x25, 0xda, 0x6d, 0x23, 0x32, 0xd0, 0x67, 0x5a, 0xdd, 0x22, 0x75, + 0xc1, 0x65, 0xff, 0x00, 0xaa, 0x38, 0x70, 0xc8, 0x71, 0x6f, 0x10, 0xa7, 0x8a, 0xa2, 0x80, 0xc3, + 0xf2, 0x1f, 0x50, 0xfd, 0x63, 0x66, 0xf4, 0x63, 0x64, 0xc9, 0x8e, 0xc3, 0x52, 0xdc, 0xac, 0xd7, + 0xef, 0xbd, 0x7e, 0xef, 0xf5, 0xa7, 0x3f, 0xef, 0xb5, 0x64, 0xb8, 0x41, 0x71, 0xc3, 0xf7, 0x02, + 0xe4, 0xac, 0x10, 0x1c, 0x34, 0x71, 0xb0, 0x82, 0x7c, 0x7b, 0xa5, 0x81, 0xa8, 0xb9, 0x6b, 0xbb, + 0x75, 0x26, 0xb2, 0x4d, 0xbc, 0xd2, 0xbc, 0xb2, 0x12, 0xe0, 0x27, 0x21, 0x26, 0xd4, 0x08, 0x30, + 0xf1, 0x3d, 0x97, 0xe0, 0xaa, 0x1f, 0x78, 0xd4, 0x53, 0xdf, 0x8c, 0xcc, 0xab, 0xc2, 0xbc, 0x8a, + 0x7c, 0xbb, 0xda, 0x65, 0x5e, 0x6d, 0x5e, 0x99, 0xab, 0xd4, 0x3d, 0xaf, 0xee, 0xe0, 0x15, 0x6e, + 0x55, 0x0b, 0x1f, 0xaf, 0x58, 0x61, 0x80, 0xa8, 0xed, 0xb9, 0xc2, 0xcf, 0xdc, 0x42, 0xf7, 0x3a, + 0xb5, 0x1b, 0x98, 0x50, 0xd4, 0xf0, 0xa5, 0xc2, 0x92, 0x85, 0x7d, 0xec, 0x5a, 0xd8, 0x35, 0x6d, + 0x4c, 0x56, 0xea, 0x5e, 0xdd, 0xe3, 0x72, 0xfe, 0x97, 0x54, 0x39, 0x1f, 0xa7, 0xc2, 0x72, 0x30, + 0xbd, 0x46, 0xc3, 0x73, 0x59, 0xe8, 0x0d, 0x4c, 0x08, 0xaa, 0xcb, 0x88, 0xe7, 0xde, 0xec, 0xd0, + 0xc2, 0x6e, 0xd8, 0x20, 0x4c, 0x89, 0x22, 0xb2, 0x67, 0x3c, 0x09, 0x71, 0x18, 0xe9, 0x5d, 0xe8, + 0xd0, 0x63, 0xcb, 0x7c, 0xb5, 0xd7, 0xe1, 0xeb, 0x1d, 0x8a, 0x4f, 0x42, 0x1c, 0xec, 0x0f, 0xda, + 0x95, 0xcb, 0x4c, 0xcf, 0xe9, 0xd5, 0xbb, 0x94, 0x76, 0x1c, 0xa6, 0xe3, 0x99, 0x7b, 0xbd, 0xba, + 0x17, 0xd2, 0x74, 0x3b, 0x12, 0x92, 0x8a, 0x97, 0xd3, 0x14, 0x77, 0x6d, 0x42, 0xbd, 0xb4, 0x50, + 0xbf, 0x9a, 0xa6, 0xed, 0xe3, 0x80, 0xd8, 0x84, 0x62, 0x57, 0xa0, 0x21, 0xa9, 0x16, 0x91, 0x56, + 0xd5, 0x34, 0xab, 0x43, 0xaa, 0x76, 0xad, 0xa3, 0x20, 0x2d, 0x2f, 0xd8, 0x7b, 0xec, 0x78, 0xad, + 0x81, 0x80, 0xd3, 0xfe, 0xa1, 0xc0, 0xfc, 0xb6, 0xe7, 0x38, 0xdf, 0x92, 0x16, 0x3b, 0x88, 0xec, + 0xdd, 0x63, 0x5b, 0xe8, 0x42, 0x5f, 0x5d, 0x82, 0xa2, 0x8b, 0x1a, 0x98, 0xf8, 0xc8, 0xc4, 0x86, + 0x6d, 0x95, 0x95, 0x45, 0x65, 0x39, 0xaf, 0x17, 0x62, 0xd9, 0x96, 0xa5, 0x9e, 0x85, 0xbc, 0xef, + 0x39, 0x0e, 0x0e, 0xd8, 0xfa, 0x28, 0x5f, 0xcf, 0x09, 0xc1, 0x96, 0xa5, 0x7e, 0x02, 0x45, 0xf6, + 0xb7, 0x21, 0xf7, 0x2f, 0x67, 0x16, 0x95, 0xe5, 0xc2, 0xea, 0x8d, 0x38, 0x3f, 0x8e, 0xf0, 0xae, + 0x78, 0xab, 0xcd, 0x2b, 0xd5, 0xc3, 0x82, 0xd2, 0x0b, 0xcc, 0x65, 0x14, 0xe1, 0x45, 0x28, 0x3d, + 0xf6, 0x82, 0x16, 0x0a, 0x2c, 0x6c, 0x19, 0xc4, 0x0b, 0x03, 0x13, 0x97, 0xb3, 0x3c, 0x8a, 0xe9, + 0x58, 0x7e, 0x9f, 0x8b, 0xb5, 0x3f, 0xe5, 0xe1, 0x5c, 0x1f, 0xc7, 0xa2, 0x2a, 0xea, 0x39, 0x00, + 0x7e, 0x18, 0xd4, 0xdb, 0xc3, 0x2e, 0x4f, 0xb6, 0xa8, 0xe7, 0x99, 0x64, 0x87, 0x09, 0xd4, 0x6f, + 0x83, 0x1a, 0xc5, 0x6a, 0xe0, 0xa7, 0xd8, 0x0c, 0xd9, 0x9d, 0xe3, 0x39, 0x17, 0x56, 0x2f, 0x76, + 0xe6, 0x24, 0x2e, 0x0c, 0x4b, 0x25, 0xda, 0x6d, 0x23, 0x32, 0xd0, 0x67, 0x5a, 0xdd, 0x22, 0x75, 0x0b, 0x26, 0x63, 0xcf, 0x74, 0xdf, 0xc7, 0xb2, 0x50, 0xe7, 0x07, 0x39, 0xdd, 0xd9, 0xf7, 0xb1, - 0x5e, 0x6c, 0xb5, 0x7d, 0x52, 0xdf, 0x86, 0x59, 0x3f, 0xc0, 0x4d, 0xdb, 0x0b, 0x89, 0x41, 0x28, - 0x0a, 0x28, 0xb6, 0x0c, 0xdc, 0xc4, 0x2e, 0x65, 0xe7, 0xc3, 0x2a, 0x93, 0xd1, 0x4f, 0x47, 0x0a, - 0xf7, 0xc5, 0xfa, 0x06, 0x5b, 0xde, 0xb2, 0xd4, 0x65, 0x28, 0xf5, 0x58, 0x8c, 0x71, 0x8b, 0x29, - 0xd2, 0xa9, 0x59, 0x86, 0x09, 0x44, 0x59, 0x6c, 0xb4, 0x3c, 0xbe, 0xa8, 0x2c, 0x8f, 0xe9, 0xd1, - 0x47, 0x55, 0x83, 0x49, 0x17, 0x3f, 0xa5, 0x89, 0x83, 0x09, 0xee, 0xa0, 0xc0, 0x84, 0x91, 0xf5, - 0x15, 0x50, 0x6b, 0xc8, 0xdc, 0x73, 0xbc, 0xba, 0x61, 0x7a, 0xa1, 0x4b, 0x8d, 0x5d, 0xdb, 0xa5, - 0xe5, 0x1c, 0x57, 0x2c, 0xc9, 0x95, 0x75, 0xb6, 0xb0, 0x69, 0xbb, 0x54, 0x7d, 0x0b, 0xca, 0x84, - 0xda, 0xe6, 0xde, 0x7e, 0x52, 0x73, 0x03, 0xbb, 0xa8, 0xe6, 0x60, 0xab, 0x9c, 0x5f, 0x54, 0x96, - 0x73, 0xfa, 0x69, 0xb1, 0x1e, 0x97, 0x73, 0x43, 0xac, 0xaa, 0xef, 0xc0, 0x18, 0x67, 0x90, 0x32, - 0xa4, 0x55, 0x93, 0x2f, 0xb5, 0x17, 0xf3, 0x1e, 0x13, 0xe8, 0xc2, 0x44, 0x7d, 0x02, 0x67, 0x68, - 0x80, 0x5c, 0x62, 0xb3, 0x34, 0x92, 0xb3, 0x41, 0x64, 0xaf, 0x5c, 0xe0, 0xde, 0xde, 0xae, 0xa6, - 0xb1, 0xb5, 0x24, 0x02, 0xe6, 0x76, 0x27, 0x32, 0x6f, 0xc7, 0xdb, 0x96, 0xfb, 0xd8, 0xd3, 0x5f, - 0xa3, 0x69, 0x4b, 0x6a, 0x1d, 0xce, 0xf5, 0xc2, 0xcb, 0x48, 0xd8, 0xa1, 0x5c, 0x4c, 0x4b, 0x23, - 0xa6, 0x05, 0xbe, 0x67, 0x0c, 0xe9, 0xb9, 0x1e, 0x90, 0xc5, 0x6b, 0xac, 0xab, 0x6b, 0x01, 0x72, - 0xcd, 0x5d, 0x09, 0xf4, 0x29, 0x0e, 0xf4, 0x82, 0x90, 0x09, 0xa8, 0xdf, 0x86, 0x29, 0x62, 0xee, - 0x62, 0x2b, 0x74, 0xb0, 0x65, 0xb0, 0xf1, 0x51, 0x9e, 0xe6, 0x9b, 0xcf, 0x55, 0xc5, 0x6c, 0xa9, - 0x46, 0xb3, 0xa5, 0xba, 0x13, 0xcd, 0x96, 0x9b, 0xd9, 0x4f, 0xff, 0xb4, 0xa0, 0xe8, 0x93, 0xb1, - 0x1d, 0x5b, 0x51, 0xd7, 0xa1, 0x18, 0x61, 0x8a, 0xbb, 0x29, 0x0d, 0xe9, 0xa6, 0x20, 0xad, 0xb8, - 0x13, 0x07, 0x26, 0xd8, 0xa9, 0xd8, 0x98, 0x94, 0x67, 0x16, 0x33, 0xcb, 0x85, 0x55, 0xbd, 0x3a, - 0xdc, 0xa8, 0xac, 0x1e, 0xda, 0xef, 0xd5, 0x7b, 0xc2, 0xe9, 0x86, 0x4b, 0x83, 0x7d, 0x3d, 0xda, - 0x42, 0xbd, 0x01, 0x39, 0x49, 0xaf, 0xa4, 0xac, 0xf2, 0xed, 0x96, 0x3a, 0x4b, 0x1e, 0x4d, 0x1c, + 0x5e, 0x6c, 0xb5, 0x7d, 0x52, 0xaf, 0xc3, 0xac, 0x1f, 0xe0, 0xa6, 0xed, 0x85, 0xc4, 0x20, 0x14, + 0x05, 0x14, 0x5b, 0x06, 0x6e, 0x62, 0x97, 0xb2, 0xf3, 0x61, 0x95, 0xc9, 0xe8, 0xa7, 0x23, 0x85, + 0xfb, 0x62, 0x7d, 0x83, 0x2d, 0x6f, 0x59, 0xea, 0x32, 0x94, 0x7a, 0x2c, 0xc6, 0xb8, 0xc5, 0x14, + 0xe9, 0xd4, 0x2c, 0xc3, 0x04, 0xa2, 0x2c, 0x36, 0x5a, 0x1e, 0x5f, 0x54, 0x96, 0xc7, 0xf4, 0xe8, + 0xa3, 0xaa, 0xc1, 0xa4, 0x8b, 0x9f, 0xd2, 0xc4, 0xc1, 0x04, 0x77, 0x50, 0x60, 0xc2, 0xc8, 0xfa, + 0x32, 0xa8, 0x35, 0x64, 0xee, 0x39, 0x5e, 0xdd, 0x30, 0xbd, 0xd0, 0xa5, 0xc6, 0xae, 0xed, 0xd2, + 0x72, 0x8e, 0x2b, 0x96, 0xe4, 0xca, 0x3a, 0x5b, 0xd8, 0xb4, 0x5d, 0xaa, 0xbe, 0x0d, 0x65, 0x42, + 0x6d, 0x73, 0x6f, 0x3f, 0xa9, 0xb9, 0x81, 0x5d, 0x54, 0x73, 0xb0, 0x55, 0xce, 0x2f, 0x2a, 0xcb, + 0x39, 0xfd, 0xb4, 0x58, 0x8f, 0xcb, 0xb9, 0x21, 0x56, 0xd5, 0x77, 0x60, 0x8c, 0x33, 0x48, 0x19, + 0xd2, 0xaa, 0xc9, 0x97, 0xda, 0x8b, 0x79, 0x8f, 0x09, 0x74, 0x61, 0xa2, 0x3e, 0x81, 0x33, 0x34, + 0x40, 0x2e, 0xb1, 0x59, 0x1a, 0xc9, 0xd9, 0x20, 0xb2, 0x57, 0x2e, 0x70, 0x6f, 0xd7, 0xab, 0x69, + 0x6c, 0x2d, 0x89, 0x80, 0xb9, 0xdd, 0x89, 0xcc, 0xdb, 0xf1, 0xb6, 0xe5, 0x3e, 0xf6, 0xf4, 0xd7, + 0x68, 0xda, 0x92, 0x5a, 0x87, 0x73, 0xbd, 0xf0, 0x32, 0x12, 0x76, 0x28, 0x17, 0xd3, 0xd2, 0x88, + 0x69, 0x81, 0xef, 0x19, 0x43, 0x7a, 0xae, 0x07, 0x64, 0xf1, 0x1a, 0xbb, 0xd5, 0xb5, 0x00, 0xb9, + 0xe6, 0xae, 0x04, 0xfa, 0x14, 0x07, 0x7a, 0x41, 0xc8, 0x04, 0xd4, 0x6f, 0xc3, 0x14, 0x31, 0x77, + 0xb1, 0x15, 0x3a, 0xd8, 0x32, 0x58, 0xfb, 0x28, 0x4f, 0xf3, 0xcd, 0xe7, 0xaa, 0xa2, 0xb7, 0x54, + 0xa3, 0xde, 0x52, 0xdd, 0x89, 0x7a, 0xcb, 0xcd, 0xec, 0xa7, 0x7f, 0x5e, 0x50, 0xf4, 0xc9, 0xd8, + 0x8e, 0xad, 0xa8, 0xeb, 0x50, 0x8c, 0x30, 0xc5, 0xdd, 0x94, 0x86, 0x74, 0x53, 0x90, 0x56, 0xdc, + 0x89, 0x03, 0x13, 0xec, 0x54, 0x6c, 0x4c, 0xca, 0x33, 0x8b, 0x99, 0xe5, 0xc2, 0xaa, 0x5e, 0x1d, + 0xae, 0x55, 0x56, 0x0f, 0xbd, 0xef, 0xd5, 0x7b, 0xc2, 0xe9, 0x86, 0x4b, 0x83, 0x7d, 0x3d, 0xda, + 0x42, 0xbd, 0x01, 0x39, 0x49, 0xaf, 0xa4, 0xac, 0xf2, 0xed, 0x96, 0x3a, 0x4b, 0x1e, 0x75, 0x1c, 0xb6, 0xc1, 0x5d, 0xa1, 0xa9, 0xc7, 0x26, 0x73, 0x9f, 0x40, 0xb1, 0xdd, 0xaf, 0x5a, 0x82, 0xcc, 0x1e, 0xde, 0x97, 0xd4, 0xc9, 0xfe, 0x64, 0xb8, 0x6c, 0x22, 0x27, 0xc4, 0x92, 0x3a, 0x86, 0xc4, - 0x25, 0x37, 0x79, 0x67, 0xf4, 0x2d, 0xe5, 0x83, 0x6c, 0x6e, 0xb2, 0x34, 0x15, 0x93, 0xf7, 0x9a, + 0x25, 0x37, 0x79, 0x67, 0xf4, 0x6d, 0xe5, 0x83, 0x6c, 0x6e, 0xb2, 0x34, 0x15, 0x93, 0xf7, 0x9a, 0x49, 0xed, 0xa6, 0x4d, 0xf7, 0xff, 0xab, 0xc8, 0xbb, 0x5f, 0x50, 0xc7, 0x27, 0xef, 0x9c, 0x20, - 0xef, 0x14, 0xc7, 0x5f, 0x34, 0x79, 0x2f, 0x40, 0x01, 0xc9, 0xa8, 0x58, 0x19, 0x33, 0x3c, 0x01, + 0xef, 0x14, 0xc7, 0x5f, 0x36, 0x79, 0x2f, 0x40, 0x01, 0xc9, 0xa8, 0x58, 0x19, 0x33, 0x3c, 0x01, 0x88, 0x44, 0x5b, 0x16, 0x63, 0xf7, 0x58, 0x81, 0xb3, 0x7b, 0xf6, 0x70, 0x76, 0x8f, 0x73, 0xe4, - 0xec, 0x8e, 0xda, 0x3e, 0xa9, 0xd7, 0x61, 0xcc, 0x76, 0xfd, 0x90, 0x72, 0x5e, 0x2e, 0xac, 0x2e, - 0xf6, 0x73, 0xb1, 0x8d, 0xf6, 0x1d, 0x0f, 0x59, 0x44, 0x17, 0xea, 0x29, 0xfd, 0x3c, 0x7e, 0xbc, - 0x7e, 0x7e, 0x04, 0xb3, 0x91, 0xc0, 0xa0, 0x9e, 0x61, 0x3a, 0x1e, 0xc1, 0xdc, 0xa1, 0x17, 0x52, - 0xce, 0xf5, 0x85, 0xd5, 0xd9, 0x1e, 0x9f, 0xb7, 0xe4, 0xfd, 0xf4, 0x66, 0xf6, 0x27, 0xcc, 0xe5, - 0xe9, 0xc8, 0xc3, 0x8e, 0xb7, 0xce, 0xec, 0x77, 0x84, 0x79, 0x0f, 0x57, 0xe4, 0x8e, 0xc3, 0x15, - 0x3b, 0x70, 0x9a, 0x7f, 0xec, 0x8d, 0x2e, 0x3f, 0x5c, 0x74, 0xff, 0xc7, 0xcd, 0xbb, 0x42, 0xbb, - 0x03, 0x33, 0xbb, 0x18, 0x05, 0xb4, 0x86, 0x11, 0x8d, 0x1d, 0xc2, 0x70, 0x0e, 0x4b, 0xb1, 0x65, - 0xe4, 0xad, 0x6d, 0x7c, 0x16, 0x3a, 0xc7, 0x27, 0x86, 0x8a, 0x19, 0x06, 0x01, 0x1b, 0x3a, 0x52, - 0x64, 0x74, 0x9d, 0x5b, 0x71, 0xc8, 0xa2, 0x9c, 0x95, 0x7e, 0xd6, 0x84, 0x9b, 0xfb, 0x1d, 0xa7, - 0x78, 0xb7, 0x3d, 0x1d, 0x0b, 0x53, 0x64, 0x3b, 0xa4, 0x3c, 0x39, 0x24, 0xa4, 0x92, 0x7c, 0x6e, - 0x09, 0xcb, 0xde, 0xeb, 0xcb, 0xd4, 0xb1, 0xaf, 0x2f, 0x6f, 0xb6, 0xb5, 0x69, 0xcc, 0x54, 0x7c, - 0xf8, 0xe4, 0x93, 0xde, 0xfb, 0x28, 0x5a, 0x50, 0xaf, 0xc3, 0xf8, 0x2e, 0x46, 0x16, 0x0e, 0xe4, - 0x60, 0xa9, 0xf4, 0xdb, 0x72, 0x93, 0x6b, 0xe9, 0x52, 0x5b, 0xfb, 0x4b, 0x16, 0x4e, 0xaf, 0x59, - 0x56, 0xfb, 0x68, 0x38, 0x02, 0x6d, 0xde, 0x86, 0xfc, 0x4b, 0x50, 0x48, 0x62, 0xab, 0xae, 0x4b, - 0xce, 0x12, 0xf3, 0x3d, 0x73, 0x84, 0xf9, 0xce, 0x99, 0x4d, 0x8c, 0xf3, 0x2b, 0xa0, 0x26, 0x18, - 0xe9, 0xba, 0xea, 0x95, 0xe2, 0x95, 0xe8, 0xf2, 0xd5, 0xd5, 0xc0, 0xb2, 0x57, 0x24, 0xa2, 0xc7, - 0x8e, 0xdc, 0xc0, 0xfc, 0x0a, 0x19, 0xe1, 0x3a, 0x8d, 0xcf, 0xc7, 0x53, 0xf9, 0x5c, 0xfd, 0x1a, - 0x8c, 0x4b, 0x05, 0x46, 0x1a, 0x53, 0xab, 0xcb, 0xa9, 0x13, 0x9d, 0x3f, 0xc0, 0xa2, 0xc4, 0x85, - 0xa5, 0x2e, 0xed, 0xd4, 0xf7, 0x60, 0x8c, 0xbf, 0xe5, 0x64, 0x5f, 0x5f, 0x4a, 0x75, 0xc0, 0x35, - 0x98, 0x83, 0x87, 0xd8, 0xa4, 0x5e, 0xb0, 0xce, 0x3e, 0xea, 0xc2, 0x4e, 0x35, 0x61, 0xa6, 0xc9, - 0x5e, 0x62, 0x9e, 0x6b, 0x58, 0x76, 0x80, 0x19, 0xcd, 0x62, 0xd9, 0xd3, 0xd7, 0x53, 0x9d, 0xf5, - 0x1c, 0xc5, 0x43, 0x61, 0x7e, 0x2b, 0xb2, 0xd6, 0x4b, 0xcd, 0x2e, 0x89, 0x36, 0x0b, 0x67, 0x7a, - 0x70, 0x26, 0x06, 0x96, 0xf6, 0x0f, 0x81, 0xc1, 0xf6, 0x89, 0xf6, 0xc5, 0x63, 0x30, 0x7b, 0x92, - 0x18, 0x1c, 0x3b, 0x0e, 0x06, 0xc7, 0x4f, 0x1e, 0x83, 0x13, 0x83, 0x30, 0x98, 0xfb, 0x5f, 0xc6, - 0xe0, 0x07, 0xd9, 0x5c, 0xa6, 0x94, 0x95, 0x48, 0xec, 0x44, 0x9b, 0x44, 0xe2, 0xdf, 0x46, 0xe1, - 0x14, 0xbf, 0x65, 0x46, 0x40, 0x39, 0x02, 0x0e, 0x3b, 0xe1, 0x33, 0x7a, 0x3c, 0xf8, 0x3c, 0x82, - 0x49, 0x7e, 0xed, 0xed, 0xba, 0x6b, 0x5e, 0x1b, 0x78, 0xd7, 0x4c, 0x8b, 0x5a, 0x2f, 0x72, 0x5f, - 0x47, 0xbf, 0x64, 0xa6, 0x9f, 0xc6, 0xd8, 0x09, 0x33, 0xc2, 0x2f, 0x15, 0x78, 0xad, 0x2b, 0x6c, - 0x79, 0x83, 0x5d, 0x87, 0x62, 0x54, 0x05, 0x12, 0x3a, 0x94, 0x57, 0x7b, 0x98, 0x81, 0x5c, 0x90, - 0xf9, 0x32, 0x23, 0xf5, 0x43, 0x98, 0x8a, 0x9c, 0x7c, 0x07, 0x9b, 0x14, 0x5b, 0x03, 0x5e, 0x19, - 0xe2, 0x75, 0x21, 0x75, 0xf5, 0xc9, 0x27, 0xed, 0x1f, 0xb5, 0x1f, 0x8d, 0xc2, 0xa2, 0x08, 0xcf, - 0xe2, 0x7a, 0x2c, 0xc5, 0x75, 0xaf, 0xe1, 0x3b, 0x98, 0x29, 0xff, 0x87, 0x41, 0x72, 0x06, 0x26, - 0xb8, 0x93, 0xf8, 0x8e, 0x3d, 0xce, 0x3e, 0x6e, 0x59, 0xaa, 0x0b, 0x33, 0x66, 0x14, 0x54, 0x8c, - 0x20, 0x41, 0x64, 0x6b, 0x03, 0x11, 0x34, 0x28, 0x3d, 0xbd, 0x64, 0x76, 0x49, 0xb4, 0xd7, 0x61, - 0xe9, 0x10, 0x2b, 0xd9, 0x53, 0xff, 0x54, 0x60, 0x7e, 0x1d, 0xb9, 0x26, 0x76, 0xbe, 0x1e, 0x52, - 0x42, 0x91, 0x6b, 0xd9, 0x6e, 0x7d, 0xbb, 0xed, 0xf1, 0x33, 0x44, 0xd9, 0xee, 0xc0, 0x74, 0x52, - 0x36, 0x71, 0xb3, 0x1a, 0xe5, 0x4c, 0xd5, 0x55, 0xbb, 0x0e, 0x8a, 0xe2, 0xc5, 0xe2, 0x37, 0xab, - 0x49, 0xda, 0xfe, 0xf1, 0x64, 0x2e, 0x1b, 0x1d, 0x2f, 0xc6, 0x6c, 0xe7, 0x8b, 0x51, 0x5b, 0x80, - 0x73, 0x7d, 0x52, 0x96, 0x45, 0xf9, 0x99, 0x02, 0xe5, 0x5b, 0x98, 0x98, 0x81, 0x5d, 0xc3, 0xc7, - 0x79, 0xaf, 0x7e, 0x0b, 0x8a, 0x16, 0x26, 0x66, 0x7c, 0xc8, 0xa3, 0xdd, 0x5f, 0xc5, 0xf4, 0x39, - 0xe4, 0x7e, 0x7b, 0xea, 0x05, 0xe6, 0x2e, 0x3a, 0xd7, 0xdf, 0x2a, 0x30, 0x9b, 0xa2, 0x29, 0xbb, - 0xf3, 0x3d, 0x98, 0x10, 0x89, 0x92, 0xb2, 0xc2, 0xbf, 0x15, 0x78, 0xe3, 0x90, 0xda, 0x6d, 0x8b, - 0x92, 0xb8, 0x8f, 0x3d, 0x3d, 0xb2, 0x52, 0x1f, 0xc2, 0x4c, 0xdb, 0x69, 0x12, 0x8a, 0x68, 0x48, - 0x64, 0x06, 0x97, 0x87, 0x39, 0x86, 0xfb, 0xdc, 0x42, 0x9f, 0xa6, 0x9d, 0x02, 0xed, 0xe7, 0x0a, - 0x54, 0xee, 0xd8, 0x84, 0xc6, 0x8a, 0xdb, 0x28, 0xa0, 0x36, 0x1b, 0x95, 0x24, 0x2a, 0xed, 0x3c, - 0xe4, 0x93, 0xcb, 0xb4, 0xa8, 0x6b, 0x22, 0xe8, 0x29, 0x7c, 0xe6, 0xd5, 0x34, 0xb0, 0xf6, 0xd3, - 0x51, 0x58, 0xe8, 0x1b, 0xa8, 0xac, 0xf2, 0x77, 0xa1, 0x92, 0xbc, 0x95, 0x93, 0x6a, 0xf9, 0xb1, - 0xa6, 0x2c, 0xfe, 0xb5, 0x61, 0x36, 0x8f, 0xfd, 0xdf, 0xc5, 0x14, 0x59, 0x88, 0x22, 0xfd, 0x2c, - 0xea, 0xfe, 0xfe, 0x20, 0x89, 0x81, 0xed, 0xdd, 0xf1, 0x4d, 0x5f, 0xef, 0xde, 0xa3, 0x2f, 0xb5, - 0x77, 0xab, 0xfb, 0x8b, 0xa8, 0x64, 0x6f, 0xed, 0x5f, 0x59, 0xb8, 0xf8, 0xc0, 0xb7, 0x10, 0xc5, - 0x6c, 0x2c, 0xe0, 0xe0, 0x66, 0x68, 0x3b, 0xd6, 0x96, 0xc5, 0x78, 0x05, 0x51, 0xbb, 0x66, 0x3b, - 0x36, 0xdd, 0x3f, 0x42, 0xa3, 0x9c, 0xeb, 0x39, 0xaf, 0x7c, 0x7b, 0x17, 0xff, 0x58, 0x81, 0x53, - 0xc8, 0xf7, 0x9d, 0x7d, 0xc3, 0x0f, 0x6b, 0x8e, 0x6d, 0x76, 0xcd, 0xdd, 0xda, 0xb0, 0x5f, 0xaf, - 0x0d, 0x19, 0x71, 0x75, 0x8d, 0xed, 0xb5, 0xcd, 0xb7, 0x92, 0xa2, 0xcd, 0x11, 0x5d, 0x45, 0x3d, - 0x52, 0xf5, 0xfb, 0x0a, 0x94, 0x02, 0xdc, 0xf0, 0x9a, 0xd8, 0xa8, 0x31, 0x7f, 0x86, 0x6d, 0x11, - 0x49, 0xe5, 0xdf, 0x3e, 0xe9, 0xa0, 0x74, 0xbe, 0x8f, 0xd4, 0x20, 0x9b, 0x23, 0xfa, 0x54, 0xd0, - 0x21, 0x99, 0x7b, 0x0a, 0x6a, 0x6f, 0xe0, 0x6a, 0x0d, 0x26, 0xa2, 0x6a, 0x89, 0x01, 0xbd, 0x39, - 0x90, 0x7e, 0x86, 0x8c, 0x48, 0x8f, 0x1c, 0xcf, 0x59, 0x30, 0xd5, 0x19, 0x9d, 0x7a, 0x0d, 0xce, - 0xec, 0xb9, 0x5e, 0xcb, 0x35, 0x42, 0x82, 0x03, 0x83, 0xe1, 0xc9, 0x90, 0x37, 0x0b, 0x1e, 0x45, - 0x46, 0x3f, 0xc5, 0x97, 0x1f, 0x10, 0x1c, 0xdc, 0x42, 0x14, 0xc9, 0x7b, 0x08, 0xa3, 0xeb, 0xa4, - 0x8e, 0x0c, 0xbd, 0x79, 0x3d, 0x57, 0x93, 0x3e, 0x6f, 0x16, 0x20, 0xef, 0xf9, 0x58, 0xdc, 0xaa, - 0xb5, 0xcb, 0xb0, 0x3c, 0x38, 0x4c, 0x49, 0xe3, 0xbf, 0x52, 0xe0, 0xfc, 0x6d, 0x4c, 0x4f, 0x04, - 0xa9, 0x46, 0x52, 0x4e, 0x41, 0x2b, 0x1b, 0x03, 0xcb, 0x39, 0xcc, 0xd6, 0x71, 0x2d, 0xb5, 0x1f, - 0x28, 0xf0, 0xc6, 0x00, 0x0b, 0xc9, 0x3d, 0x35, 0xc8, 0x45, 0x3f, 0x90, 0xc9, 0xa3, 0x7d, 0xff, - 0x65, 0x63, 0x11, 0xde, 0xf4, 0xd8, 0xaf, 0xf6, 0xc3, 0x51, 0x38, 0x7b, 0x1b, 0x27, 0x14, 0x18, - 0x1d, 0xd8, 0xc9, 0xf5, 0x76, 0xca, 0xa5, 0x61, 0xec, 0xf8, 0x97, 0x86, 0x77, 0x61, 0xde, 0x41, - 0x84, 0x1a, 0xfd, 0xc0, 0x97, 0xe1, 0xe0, 0x2b, 0x33, 0x9d, 0x0f, 0xd3, 0x00, 0xa8, 0xc1, 0x64, - 0x0b, 0xd9, 0xd4, 0x70, 0x71, 0x8b, 0x1b, 0xf2, 0x66, 0xce, 0xe9, 0x05, 0x26, 0xfc, 0x08, 0xb7, - 0x98, 0xaa, 0xf6, 0x1b, 0x05, 0xe6, 0xd3, 0x6b, 0x22, 0x0f, 0xe6, 0x3a, 0x94, 0xdb, 0x52, 0xda, - 0x45, 0x24, 0x09, 0x84, 0x17, 0x28, 0xa7, 0x9f, 0x8a, 0xa3, 0xde, 0x44, 0x24, 0xb2, 0x57, 0x3f, - 0x86, 0x7c, 0xa2, 0x28, 0xd0, 0xf5, 0x6e, 0x2a, 0x8b, 0xb4, 0xfd, 0x22, 0x2b, 0x1e, 0x6a, 0x3c, - 0x78, 0x6c, 0xf5, 0x86, 0x94, 0x0b, 0xe5, 0x5f, 0xda, 0xef, 0x15, 0x78, 0x93, 0xd3, 0x43, 0x4a, - 0xdc, 0xbe, 0x63, 0x9b, 0xbc, 0xad, 0xf8, 0x8b, 0xf7, 0xe4, 0xce, 0x56, 0x6f, 0x4f, 0xa8, 0xe7, - 0x8d, 0xd4, 0x3f, 0xa1, 0xc3, 0xf2, 0xf8, 0x7f, 0xa8, 0x0e, 0x9b, 0x86, 0xc4, 0x30, 0x82, 0xa5, - 0xdb, 0x98, 0x4a, 0xc0, 0xc7, 0x66, 0x77, 0x91, 0xef, 0xdb, 0x6e, 0xfd, 0x08, 0xc9, 0xce, 0x42, - 0x2e, 0x22, 0x27, 0x99, 0xea, 0x84, 0xe4, 0x26, 0x6d, 0x03, 0xb4, 0xc3, 0xb6, 0x90, 0xb8, 0x58, - 0x80, 0x42, 0xdb, 0x8f, 0xe7, 0xfc, 0x66, 0x90, 0xd7, 0x21, 0x2e, 0x17, 0xd1, 0x7e, 0xad, 0xc0, - 0xd9, 0xf7, 0xbd, 0xc0, 0xc4, 0x0f, 0x5c, 0xf6, 0x54, 0x3a, 0xce, 0x95, 0xf3, 0xe8, 0xdd, 0x96, - 0x39, 0x76, 0xb7, 0x69, 0x37, 0x60, 0x3e, 0x3d, 0xdc, 0xe4, 0x37, 0x8e, 0x16, 0x22, 0x06, 0x5b, - 0xc4, 0x96, 0x84, 0x7e, 0xbe, 0x85, 0xc8, 0x1d, 0x2e, 0x60, 0xcf, 0xb5, 0x8a, 0x20, 0xf1, 0x57, - 0xc8, 0x2f, 0x1f, 0xf7, 0x62, 0xf0, 0xc4, 0x9a, 0x4a, 0xbd, 0x00, 0xd3, 0xf1, 0xbc, 0x32, 0x90, - 0xc5, 0xb2, 0xcc, 0xf2, 0x53, 0x9d, 0x8c, 0xa6, 0xd6, 0x1a, 0x13, 0xaa, 0x97, 0x61, 0x26, 0xd1, - 0x13, 0x63, 0xdb, 0x2a, 0x8f, 0x71, 0xcd, 0xe9, 0x48, 0x53, 0x4c, 0x50, 0x4b, 0x5b, 0x82, 0x85, - 0xbe, 0x45, 0x91, 0x88, 0xfe, 0x9d, 0xc2, 0x9e, 0x74, 0x02, 0xee, 0xaf, 0xb2, 0x76, 0xaf, 0xa2, - 0x7f, 0xcf, 0x83, 0x76, 0x58, 0xe8, 0x22, 0xc3, 0x9b, 0xc1, 0xb3, 0xe7, 0x95, 0x91, 0xcf, 0x9e, - 0x57, 0x46, 0x3e, 0x7f, 0x5e, 0x51, 0xbe, 0x77, 0x50, 0x51, 0x7e, 0x71, 0x50, 0x51, 0xfe, 0x70, - 0x50, 0x51, 0x9e, 0x1d, 0x54, 0x94, 0x3f, 0x1f, 0x54, 0x94, 0xbf, 0x1e, 0x54, 0x46, 0x3e, 0x3f, - 0xa8, 0x28, 0x9f, 0xbe, 0xa8, 0x8c, 0x3c, 0x7b, 0x51, 0x19, 0xf9, 0xec, 0x45, 0x65, 0xe4, 0xd1, - 0x57, 0xeb, 0x5e, 0x12, 0x9e, 0xed, 0x1d, 0xfe, 0x6f, 0x4d, 0x5f, 0xe9, 0x12, 0xd5, 0xc6, 0xf9, - 0x77, 0x77, 0x5f, 0xfa, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x3f, 0x1a, 0x76, 0x17, 0x25, - 0x00, 0x00, + 0xec, 0x8e, 0xda, 0x3e, 0xa9, 0xd7, 0x60, 0xcc, 0x76, 0xfd, 0x90, 0x72, 0x5e, 0x2e, 0xac, 0x2e, + 0xf6, 0x73, 0xb1, 0x8d, 0xf6, 0x1d, 0x0f, 0x59, 0x44, 0x17, 0xea, 0x29, 0xf7, 0x79, 0xfc, 0x78, + 0xf7, 0xf9, 0x11, 0xcc, 0x46, 0x02, 0x83, 0x7a, 0x86, 0xe9, 0x78, 0x04, 0x73, 0x87, 0x5e, 0x48, + 0x39, 0xd7, 0x17, 0x56, 0x67, 0x7b, 0x7c, 0xde, 0x92, 0xf3, 0xe9, 0xcd, 0xec, 0xcf, 0x98, 0xcb, + 0xd3, 0x91, 0x87, 0x1d, 0x6f, 0x9d, 0xd9, 0xef, 0x08, 0xf3, 0x1e, 0xae, 0xc8, 0x1d, 0x87, 0x2b, + 0x76, 0xe0, 0x34, 0xff, 0xd8, 0x1b, 0x5d, 0x7e, 0xb8, 0xe8, 0xfe, 0x8f, 0x9b, 0x77, 0x85, 0x76, + 0x07, 0x66, 0x76, 0x31, 0x0a, 0x68, 0x0d, 0x23, 0x1a, 0x3b, 0x84, 0xe1, 0x1c, 0x96, 0x62, 0xcb, + 0xc8, 0x5b, 0x5b, 0xfb, 0x2c, 0x74, 0xb6, 0x4f, 0x0c, 0x15, 0x33, 0x0c, 0x02, 0xd6, 0x74, 0xa4, + 0xc8, 0xe8, 0x3a, 0xb7, 0xe2, 0x90, 0x45, 0x39, 0x2b, 0xfd, 0xac, 0x09, 0x37, 0xf7, 0x3b, 0x4e, + 0xf1, 0x6e, 0x7b, 0x3a, 0x16, 0xa6, 0xc8, 0x76, 0x48, 0x79, 0x72, 0x48, 0x48, 0x25, 0xf9, 0xdc, + 0x12, 0x96, 0xbd, 0xe3, 0xcb, 0xd4, 0xb1, 0xc7, 0x97, 0xb7, 0xda, 0xae, 0x69, 0xcc, 0x54, 0xbc, + 0xf9, 0xe4, 0x93, 0xbb, 0xf7, 0x51, 0xb4, 0xa0, 0x5e, 0x83, 0xf1, 0x5d, 0x8c, 0x2c, 0x1c, 0xc8, + 0xc6, 0x52, 0xe9, 0xb7, 0xe5, 0x26, 0xd7, 0xd2, 0xa5, 0xb6, 0xf6, 0xd7, 0x2c, 0x9c, 0x5e, 0xb3, + 0xac, 0xf6, 0xd6, 0x70, 0x04, 0xda, 0xbc, 0x0d, 0xf9, 0x97, 0xa0, 0x90, 0xc4, 0x56, 0x5d, 0x97, + 0x9c, 0x25, 0xfa, 0x7b, 0xe6, 0x08, 0xfd, 0x9d, 0x33, 0x9b, 0x68, 0xe7, 0x97, 0x41, 0x4d, 0x30, + 0xd2, 0x35, 0xea, 0x95, 0xe2, 0x95, 0x68, 0xf8, 0xea, 0xba, 0xc0, 0xf2, 0xae, 0x48, 0x44, 0x8f, + 0x1d, 0xf9, 0x02, 0xf3, 0x11, 0x32, 0xc2, 0x75, 0x1a, 0x9f, 0x8f, 0xa7, 0xf2, 0xb9, 0xfa, 0x0d, + 0x18, 0x97, 0x0a, 0x8c, 0x34, 0xa6, 0x56, 0x97, 0x53, 0x3b, 0x3a, 0x7f, 0x80, 0x45, 0x89, 0x0b, + 0x4b, 0x5d, 0xda, 0xa9, 0xef, 0xc1, 0x18, 0x7f, 0xcb, 0xc9, 0x7b, 0x7d, 0x31, 0xd5, 0x01, 0xd7, + 0x60, 0x0e, 0x1e, 0x62, 0x93, 0x7a, 0xc1, 0x3a, 0xfb, 0xa8, 0x0b, 0x3b, 0xd5, 0x84, 0x99, 0x26, + 0x7b, 0x89, 0x79, 0xae, 0x61, 0xd9, 0x01, 0x66, 0x34, 0x8b, 0xe5, 0x9d, 0xbe, 0x96, 0xea, 0xac, + 0xe7, 0x28, 0x1e, 0x0a, 0xf3, 0x5b, 0x91, 0xb5, 0x5e, 0x6a, 0x76, 0x49, 0xb4, 0x59, 0x38, 0xd3, + 0x83, 0x33, 0xd1, 0xb0, 0xb4, 0x7f, 0x0a, 0x0c, 0xb6, 0x77, 0xb4, 0x2f, 0x1f, 0x83, 0xd9, 0x93, + 0xc4, 0xe0, 0xd8, 0x71, 0x30, 0x38, 0x7e, 0xf2, 0x18, 0x9c, 0x18, 0x84, 0xc1, 0xdc, 0xff, 0x32, + 0x06, 0x3f, 0xc8, 0xe6, 0x32, 0xa5, 0xac, 0x44, 0x62, 0x27, 0xda, 0x24, 0x12, 0xff, 0x3e, 0x0a, + 0xa7, 0xf8, 0x94, 0x19, 0x01, 0xe5, 0x08, 0x38, 0xec, 0x84, 0xcf, 0xe8, 0xf1, 0xe0, 0xf3, 0x08, + 0x26, 0xf9, 0xd8, 0xdb, 0x35, 0x6b, 0x5e, 0x1d, 0x38, 0x6b, 0xa6, 0x45, 0xad, 0x17, 0xb9, 0xaf, + 0xa3, 0x0f, 0x99, 0xe9, 0xa7, 0x31, 0x76, 0xc2, 0x8c, 0xf0, 0x6b, 0x05, 0x5e, 0xeb, 0x0a, 0x5b, + 0x4e, 0xb0, 0xeb, 0x50, 0x8c, 0xaa, 0x40, 0x42, 0x87, 0xf2, 0x6a, 0x0f, 0xd3, 0x90, 0x0b, 0x32, + 0x5f, 0x66, 0xa4, 0x7e, 0x08, 0x53, 0x91, 0x93, 0xef, 0x61, 0x93, 0x62, 0x6b, 0xc0, 0x2b, 0x43, + 0xbc, 0x2e, 0xa4, 0xae, 0x3e, 0xf9, 0xa4, 0xfd, 0xa3, 0xf6, 0x93, 0x51, 0x58, 0x14, 0xe1, 0x59, + 0x5c, 0x8f, 0xa5, 0xb8, 0xee, 0x35, 0x7c, 0x07, 0x33, 0xe5, 0xff, 0x30, 0x48, 0xce, 0xc0, 0x04, + 0x77, 0x12, 0xcf, 0xd8, 0xe3, 0xec, 0xe3, 0x96, 0xa5, 0xba, 0x30, 0x63, 0x46, 0x41, 0xc5, 0x08, + 0x12, 0x44, 0xb6, 0x36, 0x10, 0x41, 0x83, 0xd2, 0xd3, 0x4b, 0x66, 0x97, 0x44, 0x7b, 0x1d, 0x96, + 0x0e, 0xb1, 0x92, 0x77, 0xea, 0x5f, 0x0a, 0xcc, 0xaf, 0x23, 0xd7, 0xc4, 0xce, 0x37, 0x43, 0x4a, + 0x28, 0x72, 0x2d, 0xdb, 0xad, 0x6f, 0xb7, 0x3d, 0x7e, 0x86, 0x28, 0xdb, 0x1d, 0x98, 0x4e, 0xca, + 0x26, 0x26, 0xab, 0x51, 0xce, 0x54, 0x5d, 0xb5, 0xeb, 0xa0, 0x28, 0x5e, 0x2c, 0x3e, 0x59, 0x4d, + 0xd2, 0xf6, 0x8f, 0x27, 0x33, 0x6c, 0x74, 0xbc, 0x18, 0xb3, 0x9d, 0x2f, 0x46, 0x6d, 0x01, 0xce, + 0xf5, 0x49, 0x59, 0x16, 0xe5, 0x17, 0x0a, 0x94, 0x6f, 0x61, 0x62, 0x06, 0x76, 0x0d, 0x1f, 0xe7, + 0xbd, 0xfa, 0x1d, 0x28, 0x5a, 0x98, 0x98, 0xf1, 0x21, 0x8f, 0x76, 0x7f, 0x15, 0xd3, 0xe7, 0x90, + 0xfb, 0xed, 0xa9, 0x17, 0x98, 0xbb, 0xe8, 0x5c, 0x7f, 0xaf, 0xc0, 0x6c, 0x8a, 0xa6, 0xbc, 0x9d, + 0xef, 0xc1, 0x84, 0x48, 0x94, 0x94, 0x15, 0xfe, 0xad, 0xc0, 0x1b, 0x87, 0xd4, 0x6e, 0x5b, 0x94, + 0xc4, 0x7d, 0xec, 0xe9, 0x91, 0x95, 0xfa, 0x10, 0x66, 0xda, 0x4e, 0x93, 0x50, 0x44, 0x43, 0x22, + 0x33, 0xb8, 0x34, 0xcc, 0x31, 0xdc, 0xe7, 0x16, 0xfa, 0x34, 0xed, 0x14, 0x68, 0xbf, 0x54, 0xa0, + 0x72, 0xc7, 0x26, 0x34, 0x56, 0xdc, 0x46, 0x01, 0xb5, 0x59, 0xab, 0x24, 0x51, 0x69, 0xe7, 0x21, + 0x9f, 0x0c, 0xd3, 0xa2, 0xae, 0x89, 0xa0, 0xa7, 0xf0, 0x99, 0x57, 0x73, 0x81, 0xb5, 0x9f, 0x8f, + 0xc2, 0x42, 0xdf, 0x40, 0x65, 0x95, 0xbf, 0x0f, 0x95, 0xe4, 0xad, 0x9c, 0x54, 0xcb, 0x8f, 0x35, + 0x65, 0xf1, 0xaf, 0x0e, 0xb3, 0x79, 0xec, 0xff, 0x2e, 0xa6, 0xc8, 0x42, 0x14, 0xe9, 0x67, 0x51, + 0xf7, 0xf7, 0x07, 0x49, 0x0c, 0x6c, 0xef, 0x8e, 0x6f, 0xfa, 0x7a, 0xf7, 0x1e, 0x7d, 0xa9, 0xbd, + 0x5b, 0xdd, 0x5f, 0x44, 0x25, 0x7b, 0x6b, 0x9f, 0x8d, 0xc1, 0x85, 0x07, 0xbe, 0x85, 0x28, 0x66, + 0x6d, 0x01, 0x07, 0x37, 0x43, 0xdb, 0xb1, 0xb6, 0x2c, 0xc6, 0x2b, 0x88, 0xda, 0x35, 0xdb, 0xb1, + 0xe9, 0xfe, 0x11, 0x2e, 0xca, 0xb9, 0x9e, 0xf3, 0xca, 0xb7, 0xdf, 0xe2, 0x9f, 0x2a, 0x70, 0x0a, + 0xf9, 0xbe, 0xb3, 0x6f, 0xf8, 0x61, 0xcd, 0xb1, 0xcd, 0xae, 0xbe, 0x5b, 0x1b, 0xf6, 0xeb, 0xb5, + 0x21, 0x23, 0xae, 0xae, 0xb1, 0xbd, 0xb6, 0xf9, 0x56, 0x52, 0xb4, 0x39, 0xa2, 0xab, 0xa8, 0x47, + 0xaa, 0xfe, 0x50, 0x81, 0x52, 0x80, 0x1b, 0x5e, 0x13, 0x1b, 0x35, 0xe6, 0xcf, 0xb0, 0x2d, 0x22, + 0xa9, 0xfc, 0xbb, 0x27, 0x1d, 0x94, 0xce, 0xf7, 0x91, 0x1a, 0x64, 0x73, 0x44, 0x9f, 0x0a, 0x3a, + 0x24, 0xea, 0x75, 0x28, 0xcb, 0x9f, 0x6a, 0x8c, 0xd0, 0xdd, 0x73, 0xbd, 0x96, 0x1b, 0x07, 0xc5, + 0xa7, 0x82, 0xfc, 0xe6, 0x88, 0xfe, 0x9a, 0xd4, 0x78, 0x20, 0x14, 0xa4, 0xed, 0xdc, 0x53, 0x50, + 0x7b, 0x73, 0x56, 0x6b, 0x30, 0x11, 0x15, 0x5a, 0xf4, 0xf6, 0xcd, 0x81, 0xcc, 0x35, 0x64, 0x32, + 0x7a, 0xe4, 0x78, 0xce, 0x82, 0xa9, 0xce, 0xc4, 0xd4, 0xab, 0x70, 0x46, 0x04, 0x1f, 0x12, 0x1c, + 0x18, 0x0c, 0x8a, 0x86, 0x1c, 0x4a, 0x78, 0x14, 0x19, 0xfd, 0x14, 0x5f, 0x7e, 0x40, 0x70, 0x70, + 0x0b, 0x51, 0x24, 0x47, 0x18, 0xc6, 0xf4, 0xc9, 0x11, 0x30, 0xe0, 0xe7, 0xf5, 0x5c, 0x4d, 0xfa, + 0xbc, 0x59, 0x80, 0xbc, 0xe7, 0x63, 0x31, 0x90, 0x6b, 0x97, 0x60, 0x79, 0x70, 0x98, 0xb2, 0x03, + 0xfc, 0x46, 0x81, 0xf3, 0xb7, 0x31, 0x3d, 0x11, 0x90, 0x1b, 0x49, 0x39, 0x05, 0x23, 0x6d, 0x0c, + 0x2c, 0xe7, 0x30, 0x5b, 0xc7, 0xb5, 0xd4, 0x7e, 0xa4, 0xc0, 0x1b, 0x03, 0x2c, 0x24, 0x6d, 0xd5, + 0x20, 0x17, 0xfd, 0xb6, 0x26, 0x8f, 0xf6, 0xfd, 0x97, 0x8d, 0x45, 0x78, 0xd3, 0x63, 0xbf, 0xda, + 0x8f, 0x47, 0xe1, 0xec, 0x6d, 0x9c, 0xb0, 0x67, 0x74, 0x60, 0x27, 0x47, 0x0b, 0x29, 0xf3, 0xc6, + 0xd8, 0xf1, 0xe7, 0x8d, 0x77, 0x61, 0xde, 0x41, 0x84, 0x1a, 0xfd, 0xc0, 0x97, 0xe1, 0xe0, 0x2b, + 0x33, 0x9d, 0x0f, 0xd3, 0x00, 0xa8, 0xc1, 0x64, 0x0b, 0xd9, 0xd4, 0x70, 0x71, 0x8b, 0x1b, 0x72, + 0x1e, 0xc8, 0xe9, 0x05, 0x26, 0xfc, 0x08, 0xb7, 0x98, 0xaa, 0xf6, 0x3b, 0x05, 0xe6, 0xd3, 0x6b, + 0x22, 0x0f, 0xe6, 0x1a, 0x94, 0xdb, 0x52, 0xda, 0x45, 0x24, 0x09, 0x84, 0x17, 0x28, 0xa7, 0x9f, + 0x8a, 0xa3, 0xde, 0x44, 0x24, 0xb2, 0x57, 0x3f, 0x86, 0x7c, 0xa2, 0x28, 0xd0, 0xf5, 0x6e, 0x2a, + 0x01, 0xb5, 0xfd, 0x98, 0x2b, 0xde, 0x78, 0x3c, 0x78, 0x6c, 0xf5, 0x86, 0x94, 0x0b, 0xe5, 0x5f, + 0xda, 0x1f, 0x14, 0x78, 0x8b, 0xd3, 0x43, 0x4a, 0xdc, 0xbe, 0x63, 0x9b, 0xfc, 0x5a, 0xf1, 0xc7, + 0xf2, 0xc9, 0x9d, 0xad, 0xde, 0x9e, 0x50, 0xcf, 0xf3, 0xaa, 0x7f, 0x42, 0x87, 0xe5, 0xf1, 0xff, + 0x50, 0x1d, 0x36, 0x0d, 0x89, 0x61, 0x04, 0x4b, 0xb7, 0x31, 0x95, 0x80, 0x8f, 0xcd, 0xee, 0x22, + 0xdf, 0xb7, 0xdd, 0xfa, 0x11, 0x92, 0x9d, 0x85, 0x5c, 0x4c, 0xc5, 0x22, 0xd5, 0x09, 0xc9, 0x4d, + 0xda, 0x06, 0x68, 0x87, 0x6d, 0x21, 0x71, 0xb1, 0x00, 0x85, 0xb6, 0xdf, 0xdd, 0xf9, 0x50, 0x91, + 0xd7, 0x21, 0x2e, 0x17, 0xd1, 0x7e, 0xab, 0xc0, 0xd9, 0xf7, 0xbd, 0xc0, 0xc4, 0x0f, 0x5c, 0xf6, + 0xca, 0x3a, 0xce, 0xb4, 0x7a, 0xf4, 0xdb, 0x96, 0x39, 0xf6, 0x6d, 0xd3, 0x6e, 0xc0, 0x7c, 0x7a, + 0xb8, 0xc9, 0xcf, 0x23, 0x2d, 0x44, 0x0c, 0xb6, 0x88, 0x2d, 0x09, 0xfd, 0x7c, 0x0b, 0x91, 0x3b, + 0x5c, 0xc0, 0x5e, 0x7a, 0x15, 0x41, 0xe2, 0xaf, 0x90, 0x5f, 0x3e, 0xee, 0xc5, 0xe0, 0x89, 0x5d, + 0x2a, 0xf5, 0x4d, 0x98, 0x8e, 0xfb, 0x95, 0x81, 0x2c, 0x96, 0x65, 0x96, 0x9f, 0xea, 0x64, 0xd4, + 0xb5, 0xd6, 0x98, 0x50, 0xbd, 0x04, 0x33, 0x89, 0x9e, 0xe8, 0xf8, 0xac, 0x9d, 0x33, 0xcd, 0xe9, + 0x48, 0x53, 0x74, 0x50, 0x4b, 0x5b, 0x82, 0x85, 0xbe, 0x45, 0x91, 0x88, 0xfe, 0x4c, 0x61, 0xaf, + 0x41, 0x01, 0xf7, 0x57, 0x59, 0xbb, 0x57, 0x71, 0x7f, 0xcf, 0x83, 0x76, 0x58, 0xe8, 0x22, 0xc3, + 0x9b, 0xc1, 0xb3, 0xe7, 0x95, 0x91, 0xcf, 0x9f, 0x57, 0x46, 0xbe, 0x78, 0x5e, 0x51, 0x7e, 0x70, + 0x50, 0x51, 0x7e, 0x75, 0x50, 0x51, 0xfe, 0x78, 0x50, 0x51, 0x9e, 0x1d, 0x54, 0x94, 0xbf, 0x1c, + 0x54, 0x94, 0xbf, 0x1d, 0x54, 0x46, 0xbe, 0x38, 0xa8, 0x28, 0x9f, 0xbe, 0xa8, 0x8c, 0x3c, 0x7b, + 0x51, 0x19, 0xf9, 0xfc, 0x45, 0x65, 0xe4, 0xd1, 0xd7, 0xeb, 0x5e, 0x12, 0x9e, 0xed, 0x1d, 0xfe, + 0x1f, 0x51, 0x5f, 0xeb, 0x12, 0xd5, 0xc6, 0xf9, 0xd7, 0x7e, 0x5f, 0xf9, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xa5, 0xff, 0x36, 0x34, 0x52, 0x25, 0x00, 0x00, } func (this *PollWorkflowTaskQueueRequest) Equal(that interface{}) bool { @@ -3353,6 +3368,30 @@ } return true } +func (this *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) + if !ok { + that2, ok := that.(UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.PersistUnknownBuildId != that1.PersistUnknownBuildId { + return false + } + return true +} func (this *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4156,7 +4195,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&matchingservice.UpdateWorkerBuildIdCompatibilityRequest{") s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") s = append(s, "TaskQueue: "+fmt.Sprintf("%#v", this.TaskQueue)+",\n") @@ -4182,6 +4221,14 @@ `RemoveBuildIds:` + fmt.Sprintf("%#v", this.RemoveBuildIds) + `}`}, ", ") return s } +func (this *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{` + + `PersistUnknownBuildId:` + fmt.Sprintf("%#v", this.PersistUnknownBuildId) + `}`}, ", ") + return s +} func (this *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) GoString() string { if this == nil { return "nil" @@ -5697,6 +5744,20 @@ } return len(dAtA) - i, nil } +func (m *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PersistUnknownBuildId) + copy(dAtA[i:], m.PersistUnknownBuildId) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.PersistUnknownBuildId))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} func (m *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6903,6 +6964,16 @@ } return n } +func (m *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PersistUnknownBuildId) + n += 1 + l + sovRequestResponse(uint64(l)) + return n +} func (m *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) Size() (n int) { if m == nil { return 0 @@ -7494,6 +7565,16 @@ }, "") return s } +func (this *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{`, + `PersistUnknownBuildId:` + fmt.Sprintf("%v", this.PersistUnknownBuildId) + `,`, + `}`, + }, "") + return s +} func (this *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) String() string { if this == nil { return "nil" @@ -11583,6 +11664,38 @@ } m.Operation = &UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_{v} iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistUnknownBuildId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = &UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/api/matchingservice/v1/service.pb.go temporal-1.22.5/src/api/matchingservice/v1/service.pb.go --- temporal-1.21.5-1/src/api/matchingservice/v1/service.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/matchingservice/v1/service.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -141,9 +141,9 @@ // ListTaskQueuePartitions returns a map of partitionKey and hostAddress for a task queue. ListTaskQueuePartitions(ctx context.Context, in *ListTaskQueuePartitionsRequest, opts ...grpc.CallOption) (*ListTaskQueuePartitionsResponse, error) // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) UpdateWorkerBuildIdCompatibility(ctx context.Context, in *UpdateWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*UpdateWorkerBuildIdCompatibilityResponse, error) GetWorkerBuildIdCompatibility(ctx context.Context, in *GetWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetch user data for a task queue, this request should always be routed to the node holding the root partition of the workflow task queue. @@ -359,9 +359,9 @@ // ListTaskQueuePartitions returns a map of partitionKey and hostAddress for a task queue. ListTaskQueuePartitions(context.Context, *ListTaskQueuePartitionsRequest) (*ListTaskQueuePartitionsResponse, error) // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) UpdateWorkerBuildIdCompatibility(context.Context, *UpdateWorkerBuildIdCompatibilityRequest) (*UpdateWorkerBuildIdCompatibilityResponse, error) GetWorkerBuildIdCompatibility(context.Context, *GetWorkerBuildIdCompatibilityRequest) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetch user data for a task queue, this request should always be routed to the node holding the root partition of the workflow task queue. diff -Nru temporal-1.21.5-1/src/api/persistence/v1/cluster_metadata.pb.go temporal-1.22.5/src/api/persistence/v1/cluster_metadata.pb.go --- temporal-1.21.5-1/src/api/persistence/v1/cluster_metadata.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/persistence/v1/cluster_metadata.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -65,6 +65,7 @@ IsGlobalNamespaceEnabled bool `protobuf:"varint,9,opt,name=is_global_namespace_enabled,json=isGlobalNamespaceEnabled,proto3" json:"is_global_namespace_enabled,omitempty"` IsConnectionEnabled bool `protobuf:"varint,10,opt,name=is_connection_enabled,json=isConnectionEnabled,proto3" json:"is_connection_enabled,omitempty"` UseClusterIdMembership bool `protobuf:"varint,11,opt,name=use_cluster_id_membership,json=useClusterIdMembership,proto3" json:"use_cluster_id_membership,omitempty"` + Tags map[string]string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ClusterMetadata) Reset() { *m = ClusterMetadata{} } @@ -176,6 +177,13 @@ return false } +func (m *ClusterMetadata) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + type IndexSearchAttributes struct { CustomSearchAttributes map[string]v11.IndexedValueType `protobuf:"bytes,1,rep,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=temporal.api.enums.v1.IndexedValueType"` } @@ -222,6 +230,7 @@ func init() { proto.RegisterType((*ClusterMetadata)(nil), "temporal.server.api.persistence.v1.ClusterMetadata") proto.RegisterMapType((map[string]*IndexSearchAttributes)(nil), "temporal.server.api.persistence.v1.ClusterMetadata.IndexSearchAttributesEntry") + proto.RegisterMapType((map[string]string)(nil), "temporal.server.api.persistence.v1.ClusterMetadata.TagsEntry") proto.RegisterType((*IndexSearchAttributes)(nil), "temporal.server.api.persistence.v1.IndexSearchAttributes") proto.RegisterMapType((map[string]v11.IndexedValueType)(nil), "temporal.server.api.persistence.v1.IndexSearchAttributes.CustomSearchAttributesEntry") } @@ -231,49 +240,52 @@ } var fileDescriptor_1f4771d63f405884 = []byte{ - // 670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, - 0x14, 0xef, 0x50, 0x41, 0x98, 0x12, 0xd0, 0x25, 0xe0, 0x58, 0xe2, 0xa6, 0x12, 0x0d, 0x3d, 0x6d, - 0x43, 0xf5, 0x00, 0x2a, 0x07, 0x6c, 0x90, 0x70, 0x00, 0x93, 0xa2, 0x1c, 0xbc, 0x6c, 0xa6, 0xbb, - 0x0f, 0x3a, 0xba, 0x3b, 0xb3, 0x99, 0x99, 0xdd, 0xd8, 0x9b, 0x89, 0x89, 0x57, 0xfd, 0x02, 0xde, - 0x8d, 0x9f, 0xc4, 0x23, 0x47, 0x8e, 0x52, 0x2e, 0x1e, 0xf9, 0x08, 0x66, 0x67, 0x77, 0xdb, 0x42, - 0x8a, 0x1a, 0x6f, 0x3b, 0xef, 0xf7, 0x67, 0xde, 0xfc, 0xe6, 0xed, 0xe0, 0x0d, 0x0d, 0x61, 0x24, - 0x24, 0x0d, 0x1a, 0x0a, 0x64, 0x02, 0xb2, 0x41, 0x23, 0xd6, 0x88, 0x40, 0x2a, 0xa6, 0x34, 0x70, - 0x0f, 0x1a, 0xc9, 0x5a, 0xc3, 0x0b, 0x62, 0xa5, 0x41, 0xba, 0x21, 0x68, 0xea, 0x53, 0x4d, 0x9d, - 0x48, 0x0a, 0x2d, 0xac, 0x95, 0x42, 0xea, 0x64, 0x52, 0x87, 0x46, 0xcc, 0x19, 0x91, 0x3a, 0xc9, - 0x5a, 0x75, 0xc0, 0x31, 0xbe, 0xc0, 0xe3, 0x50, 0x19, 0x47, 0x11, 0x86, 0x82, 0x67, 0x3e, 0xd5, - 0x87, 0x97, 0x38, 0x49, 0x6a, 0x20, 0x78, 0xca, 0x0a, 0x41, 0x29, 0x7a, 0x0c, 0x19, 0x6d, 0xe5, - 0xfb, 0x14, 0x9e, 0x6f, 0x65, 0x9d, 0xec, 0xe5, 0x8d, 0x58, 0xf7, 0xf1, 0x6c, 0xd1, 0x1c, 0xa7, - 0x21, 0x10, 0x54, 0x43, 0xf5, 0x99, 0x76, 0x25, 0xaf, 0xed, 0xd3, 0x10, 0x2c, 0x07, 0x2f, 0x74, - 0x99, 0xd2, 0x42, 0xf6, 0x5c, 0xd5, 0xa5, 0xd2, 0x77, 0x3d, 0x11, 0x73, 0x4d, 0x26, 0x6a, 0xa8, - 0x3e, 0xd9, 0xbe, 0x9d, 0x43, 0x07, 0x29, 0xd2, 0x4a, 0x01, 0xeb, 0x1e, 0xc6, 0x85, 0x25, 0xf3, - 0x49, 0xd9, 0x18, 0xce, 0xe4, 0x95, 0x5d, 0xdf, 0xda, 0xc1, 0xb3, 0x79, 0x87, 0x2e, 0xe3, 0x47, - 0x82, 0xdc, 0xa8, 0xa1, 0x7a, 0xa5, 0xf9, 0xc0, 0x19, 0x64, 0x91, 0x86, 0x90, 0x33, 0x9c, 0x64, - 0xcd, 0x39, 0xcc, 0x3e, 0x77, 0xf9, 0x91, 0x68, 0x57, 0x92, 0xe1, 0xc2, 0xfa, 0x84, 0xf0, 0x1d, - 0xc6, 0x7d, 0x78, 0xef, 0x2a, 0xa0, 0xd2, 0xeb, 0xba, 0x54, 0x6b, 0xc9, 0x3a, 0xb1, 0x06, 0x45, - 0x26, 0x6b, 0xe5, 0x7a, 0xa5, 0xb9, 0xef, 0xfc, 0x3d, 0x60, 0xe7, 0x4a, 0x22, 0xce, 0x6e, 0x6a, - 0x79, 0x60, 0x1c, 0xb7, 0x06, 0x86, 0xdb, 0x5c, 0xcb, 0x5e, 0x7b, 0x91, 0x8d, 0xc3, 0xac, 0x55, - 0x3c, 0x5f, 0x1c, 0x98, 0xfa, 0xbe, 0x04, 0xa5, 0xc8, 0x94, 0x39, 0xf5, 0x5c, 0x5e, 0xde, 0xca, - 0xaa, 0xd6, 0x33, 0x5c, 0x3d, 0xa2, 0x2c, 0x10, 0x09, 0x48, 0x77, 0x98, 0x81, 0x27, 0x21, 0x04, - 0xae, 0xc9, 0xcd, 0x1a, 0xaa, 0x97, 0xdb, 0xa4, 0x60, 0x0c, 0xce, 0x9d, 0xe3, 0xd6, 0x3a, 0x26, - 0x8c, 0x33, 0xcd, 0x68, 0xe0, 0x5e, 0x75, 0x21, 0xd3, 0x46, 0xbb, 0x94, 0xe3, 0x2f, 0x2e, 0x5b, - 0x58, 0x9b, 0x78, 0x99, 0x29, 0xf7, 0x38, 0x10, 0x1d, 0x1a, 0x98, 0x6b, 0x56, 0x11, 0xf5, 0xc0, - 0x05, 0x4e, 0x3b, 0x01, 0xf8, 0x64, 0xa6, 0x86, 0xea, 0xd3, 0x6d, 0xc2, 0xd4, 0x8e, 0x61, 0xec, - 0x17, 0x84, 0xed, 0x0c, 0xb7, 0x9a, 0x78, 0x91, 0x29, 0xd7, 0x13, 0x9c, 0x83, 0xa7, 0xd3, 0x9e, - 0x0b, 0x21, 0x36, 0xc2, 0x05, 0xa6, 0x5a, 0x03, 0xac, 0xd0, 0x6c, 0xe0, 0xbb, 0xb1, 0x02, 0x77, - 0x38, 0x08, 0x6e, 0x08, 0x61, 0x07, 0xa4, 0xea, 0xb2, 0x88, 0x54, 0x8c, 0x6e, 0x29, 0x56, 0xd0, - 0x2a, 0xc6, 0x62, 0x6f, 0x80, 0x56, 0x3f, 0x22, 0x5c, 0xbd, 0xfe, 0x12, 0xac, 0x5b, 0xb8, 0xfc, - 0x0e, 0x7a, 0xf9, 0xa0, 0xa6, 0x9f, 0xd6, 0x4b, 0x3c, 0x99, 0xd0, 0x20, 0x06, 0x33, 0x92, 0x95, - 0xe6, 0xc6, 0xbf, 0xdc, 0xfa, 0xd8, 0x0d, 0xda, 0x99, 0xcf, 0x93, 0x89, 0x75, 0xb4, 0xf2, 0x75, - 0x02, 0x2f, 0x8e, 0x25, 0x59, 0x9f, 0x11, 0x26, 0x5e, 0xac, 0xb4, 0x08, 0xc7, 0x0c, 0x1e, 0x32, - 0x83, 0xf7, 0xfa, 0xbf, 0x5b, 0x70, 0x5a, 0xc6, 0x79, 0xfc, 0xfc, 0x2d, 0x79, 0x63, 0xc1, 0xaa, - 0xc4, 0xcb, 0x7f, 0x90, 0x8d, 0x49, 0x6c, 0x73, 0x34, 0xb1, 0xb9, 0xe6, 0xea, 0xe5, 0x9f, 0xcf, - 0x3c, 0x32, 0x83, 0x0e, 0xc1, 0x3f, 0x4c, 0xa9, 0xaf, 0x7a, 0x11, 0x8c, 0xe4, 0xf3, 0xfc, 0xed, - 0xc9, 0x99, 0x5d, 0x3a, 0x3d, 0xb3, 0x4b, 0x17, 0x67, 0x36, 0xfa, 0xd0, 0xb7, 0xd1, 0xb7, 0xbe, - 0x8d, 0x7e, 0xf4, 0x6d, 0x74, 0xd2, 0xb7, 0xd1, 0xcf, 0xbe, 0x8d, 0x7e, 0xf5, 0xed, 0xd2, 0x45, - 0xdf, 0x46, 0x5f, 0xce, 0xed, 0xd2, 0xc9, 0xb9, 0x5d, 0x3a, 0x3d, 0xb7, 0x4b, 0x6f, 0x1e, 0x1f, - 0x8b, 0xe1, 0x5e, 0x4c, 0x5c, 0xff, 0x64, 0x3e, 0x1d, 0x59, 0x76, 0xa6, 0xcc, 0xfb, 0xf5, 0xe8, - 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x7d, 0x0e, 0x79, 0x6b, 0x05, 0x00, 0x00, + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, + 0x14, 0xce, 0x10, 0xfe, 0x32, 0x41, 0x70, 0xaf, 0xb9, 0x70, 0x7d, 0x83, 0xae, 0x95, 0xa2, 0x56, + 0x64, 0xe5, 0x88, 0xb4, 0x52, 0xa1, 0x2d, 0x0b, 0x1a, 0x51, 0xc4, 0x02, 0xaa, 0x06, 0xca, 0xa2, + 0x1b, 0x6b, 0x62, 0x1f, 0x92, 0x69, 0x6d, 0x8f, 0x35, 0x33, 0xb6, 0x9a, 0x5d, 0xa5, 0x4a, 0xdd, + 0xb6, 0x2f, 0xd0, 0x7d, 0x9f, 0xa0, 0xcf, 0xd0, 0x25, 0x4b, 0x96, 0x25, 0x6c, 0xba, 0xe4, 0x11, + 0x2a, 0x8f, 0x7f, 0x12, 0x90, 0x69, 0x2b, 0x76, 0xf6, 0x7c, 0x3f, 0xf3, 0x9d, 0x73, 0x66, 0x06, + 0x6f, 0x4a, 0xf0, 0x02, 0xc6, 0x89, 0xdb, 0x14, 0xc0, 0x23, 0xe0, 0x4d, 0x12, 0xd0, 0x66, 0x00, + 0x5c, 0x50, 0x21, 0xc1, 0xb7, 0xa1, 0x19, 0xad, 0x37, 0x6d, 0x37, 0x14, 0x12, 0xb8, 0xe5, 0x81, + 0x24, 0x0e, 0x91, 0xc4, 0x0c, 0x38, 0x93, 0x4c, 0x5b, 0xcd, 0xa4, 0x66, 0x22, 0x35, 0x49, 0x40, + 0xcd, 0x31, 0xa9, 0x19, 0xad, 0xd7, 0x72, 0x8e, 0xf2, 0x05, 0x3f, 0xf4, 0x84, 0x72, 0x64, 0x9e, + 0xc7, 0xfc, 0xc4, 0xa7, 0x76, 0xef, 0x0a, 0x27, 0x8a, 0x0d, 0x98, 0x1f, 0xb3, 0x3c, 0x10, 0x82, + 0xf4, 0x20, 0xa1, 0xad, 0x7e, 0x9d, 0xc1, 0x0b, 0xed, 0x24, 0xc9, 0x7e, 0x1a, 0x44, 0xbb, 0x83, + 0xe7, 0xb2, 0x70, 0x3e, 0xf1, 0x40, 0x47, 0x75, 0xd4, 0xa8, 0x74, 0xaa, 0xe9, 0xda, 0x01, 0xf1, + 0x40, 0x33, 0xf1, 0x62, 0x9f, 0x0a, 0xc9, 0xf8, 0xc0, 0x12, 0x7d, 0xc2, 0x1d, 0xcb, 0x66, 0xa1, + 0x2f, 0xf5, 0x89, 0x3a, 0x6a, 0x4c, 0x75, 0xfe, 0x4e, 0xa1, 0xc3, 0x18, 0x69, 0xc7, 0x80, 0xf6, + 0x3f, 0xc6, 0x99, 0x25, 0x75, 0xf4, 0xb2, 0x32, 0xac, 0xa4, 0x2b, 0x7b, 0x8e, 0xb6, 0x8b, 0xe7, + 0xd2, 0x84, 0x16, 0xf5, 0x4f, 0x98, 0x3e, 0x59, 0x47, 0x8d, 0x6a, 0xeb, 0xae, 0x99, 0xf7, 0x22, + 0x6e, 0x42, 0xca, 0x30, 0xa3, 0x75, 0xf3, 0x38, 0xf9, 0xdc, 0xf3, 0x4f, 0x58, 0xa7, 0x1a, 0x8d, + 0x7e, 0xb4, 0x0f, 0x08, 0xff, 0x4b, 0x7d, 0x07, 0xde, 0x5a, 0x02, 0x08, 0xb7, 0xfb, 0x16, 0x91, + 0x92, 0xd3, 0x6e, 0x28, 0x41, 0xe8, 0x53, 0xf5, 0x72, 0xa3, 0xda, 0x3a, 0x30, 0x7f, 0xdf, 0x60, + 0xf3, 0x5a, 0x47, 0xcc, 0xbd, 0xd8, 0xf2, 0x50, 0x39, 0x6e, 0xe7, 0x86, 0x3b, 0xbe, 0xe4, 0x83, + 0xce, 0x12, 0x2d, 0xc2, 0xb4, 0x35, 0xbc, 0x90, 0x15, 0x4c, 0x1c, 0x87, 0x83, 0x10, 0xfa, 0xb4, + 0xaa, 0x7a, 0x3e, 0x5d, 0xde, 0x4e, 0x56, 0xb5, 0x27, 0xb8, 0x76, 0x42, 0xa8, 0xcb, 0x22, 0xe0, + 0xd6, 0xa8, 0x07, 0x36, 0x07, 0x0f, 0x7c, 0xa9, 0xcf, 0xd4, 0x51, 0xa3, 0xdc, 0xd1, 0x33, 0x46, + 0x5e, 0x77, 0x8a, 0x6b, 0x1b, 0x58, 0xa7, 0x3e, 0x95, 0x94, 0xb8, 0xd6, 0x75, 0x17, 0x7d, 0x56, + 0x69, 0x97, 0x53, 0xfc, 0xd9, 0x55, 0x0b, 0x6d, 0x0b, 0xaf, 0x50, 0x61, 0xf5, 0x5c, 0xd6, 0x25, + 0xae, 0x1a, 0xb3, 0x08, 0x88, 0x0d, 0x16, 0xf8, 0xa4, 0xeb, 0x82, 0xa3, 0x57, 0xea, 0xa8, 0x31, + 0xdb, 0xd1, 0xa9, 0xd8, 0x55, 0x8c, 0x83, 0x8c, 0xb0, 0x93, 0xe0, 0x5a, 0x0b, 0x2f, 0x51, 0x61, + 0xd9, 0xcc, 0xf7, 0xc1, 0x96, 0x71, 0xe6, 0x4c, 0x88, 0x95, 0x70, 0x91, 0x8a, 0x76, 0x8e, 0x65, + 0x9a, 0x4d, 0xfc, 0x5f, 0x28, 0xc0, 0x1a, 0x1d, 0x04, 0xcb, 0x03, 0xaf, 0x0b, 0x5c, 0xf4, 0x69, + 0xa0, 0x57, 0x95, 0x6e, 0x39, 0x14, 0xd0, 0xce, 0x8e, 0xc5, 0x7e, 0x8e, 0x6a, 0x2f, 0xf0, 0xa4, + 0x24, 0x3d, 0xa1, 0xcf, 0xa9, 0x19, 0x6e, 0xdd, 0x66, 0x86, 0x47, 0xa4, 0x97, 0x8e, 0x4c, 0x59, + 0xd5, 0xde, 0x23, 0x5c, 0xbb, 0x79, 0xae, 0xda, 0x5f, 0xb8, 0xfc, 0x06, 0x06, 0xe9, 0xd9, 0x8f, + 0x3f, 0xb5, 0xe7, 0x78, 0x2a, 0x22, 0x6e, 0x08, 0xea, 0x94, 0x57, 0x5b, 0x9b, 0x7f, 0x12, 0xa2, + 0x70, 0x83, 0x4e, 0xe2, 0xf3, 0x68, 0x62, 0x03, 0xd5, 0x1e, 0xe2, 0x4a, 0x1e, 0xac, 0x60, 0xcf, + 0x7f, 0xc6, 0xf7, 0xac, 0x8c, 0x09, 0x57, 0x3f, 0x4f, 0xe0, 0xa5, 0x42, 0x77, 0xed, 0x23, 0xc2, + 0xba, 0x1d, 0x0a, 0xc9, 0xbc, 0x82, 0x4b, 0x80, 0x54, 0x03, 0x5f, 0xde, 0x3a, 0xbb, 0xd9, 0x56, + 0xce, 0xc5, 0x77, 0x61, 0xd9, 0x2e, 0x04, 0x6b, 0x1c, 0xaf, 0xfc, 0x42, 0x56, 0x50, 0xf6, 0xd6, + 0x78, 0xd9, 0xf3, 0xad, 0xb5, 0xab, 0x0f, 0x81, 0x7a, 0xf0, 0xf2, 0x84, 0xe0, 0x1c, 0xc7, 0xd4, + 0xa3, 0x41, 0x00, 0x63, 0xfd, 0x79, 0xfa, 0xfa, 0xf4, 0xdc, 0x28, 0x9d, 0x9d, 0x1b, 0xa5, 0xcb, + 0x73, 0x03, 0xbd, 0x1b, 0x1a, 0xe8, 0xcb, 0xd0, 0x40, 0xdf, 0x86, 0x06, 0x3a, 0x1d, 0x1a, 0xe8, + 0xfb, 0xd0, 0x40, 0x3f, 0x86, 0x46, 0xe9, 0x72, 0x68, 0xa0, 0x4f, 0x17, 0x46, 0xe9, 0xf4, 0xc2, + 0x28, 0x9d, 0x5d, 0x18, 0xa5, 0x57, 0x0f, 0x7a, 0x6c, 0xb4, 0x17, 0x65, 0x37, 0x3f, 0xdf, 0x8f, + 0xc7, 0x7e, 0xbb, 0xd3, 0xea, 0x2d, 0xbd, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x29, 0x7b, 0x71, + 0xe1, 0xf7, 0x05, 0x00, 0x00, } func (this *ClusterMetadata) Equal(that interface{}) bool { @@ -333,6 +345,14 @@ if this.UseClusterIdMembership != that1.UseClusterIdMembership { return false } + if len(this.Tags) != len(that1.Tags) { + return false + } + for i := range this.Tags { + if this.Tags[i] != that1.Tags[i] { + return false + } + } return true } func (this *IndexSearchAttributes) Equal(that interface{}) bool { @@ -368,7 +388,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 15) + s := make([]string, 0, 16) s = append(s, "&persistence.ClusterMetadata{") s = append(s, "ClusterName: "+fmt.Sprintf("%#v", this.ClusterName)+",\n") s = append(s, "HistoryShardCount: "+fmt.Sprintf("%#v", this.HistoryShardCount)+",\n") @@ -395,6 +415,19 @@ s = append(s, "IsGlobalNamespaceEnabled: "+fmt.Sprintf("%#v", this.IsGlobalNamespaceEnabled)+",\n") s = append(s, "IsConnectionEnabled: "+fmt.Sprintf("%#v", this.IsConnectionEnabled)+",\n") s = append(s, "UseClusterIdMembership: "+fmt.Sprintf("%#v", this.UseClusterIdMembership)+",\n") + keysForTags := make([]string, 0, len(this.Tags)) + for k, _ := range this.Tags { + keysForTags = append(keysForTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTags) + mapStringForTags := "map[string]string{" + for _, k := range keysForTags { + mapStringForTags += fmt.Sprintf("%#v: %#v,", k, this.Tags[k]) + } + mapStringForTags += "}" + if this.Tags != nil { + s = append(s, "Tags: "+mapStringForTags+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -448,6 +481,25 @@ _ = i var l int _ = l + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintClusterMetadata(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintClusterMetadata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintClusterMetadata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } if m.UseClusterIdMembership { i-- if m.UseClusterIdMembership { @@ -659,6 +711,14 @@ if m.UseClusterIdMembership { n += 2 } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovClusterMetadata(uint64(len(k))) + 1 + len(v) + sovClusterMetadata(uint64(len(v))) + n += mapEntrySize + 1 + sovClusterMetadata(uint64(mapEntrySize)) + } + } return n } @@ -699,6 +759,16 @@ mapStringForIndexSearchAttributes += fmt.Sprintf("%v: %v,", k, this.IndexSearchAttributes[k]) } mapStringForIndexSearchAttributes += "}" + keysForTags := make([]string, 0, len(this.Tags)) + for k, _ := range this.Tags { + keysForTags = append(keysForTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTags) + mapStringForTags := "map[string]string{" + for _, k := range keysForTags { + mapStringForTags += fmt.Sprintf("%v: %v,", k, this.Tags[k]) + } + mapStringForTags += "}" s := strings.Join([]string{`&ClusterMetadata{`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `HistoryShardCount:` + fmt.Sprintf("%v", this.HistoryShardCount) + `,`, @@ -711,6 +781,7 @@ `IsGlobalNamespaceEnabled:` + fmt.Sprintf("%v", this.IsGlobalNamespaceEnabled) + `,`, `IsConnectionEnabled:` + fmt.Sprintf("%v", this.IsConnectionEnabled) + `,`, `UseClusterIdMembership:` + fmt.Sprintf("%v", this.UseClusterIdMembership) + `,`, + `Tags:` + mapStringForTags + `,`, `}`, }, "") return s @@ -1150,6 +1221,133 @@ } } m.UseClusterIdMembership = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterMetadata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthClusterMetadata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthClusterMetadata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthClusterMetadata + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthClusterMetadata + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipClusterMetadata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterMetadata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipClusterMetadata(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/api/persistence/v1/executions.pb.go temporal-1.22.5/src/api/persistence/v1/executions.pb.go --- temporal-1.21.5-1/src/api/persistence/v1/executions.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/persistence/v1/executions.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -73,13 +73,10 @@ Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "since" is needed here. --) - StolenSinceRenew int32 `protobuf:"varint,6,opt,name=stolen_since_renew,json=stolenSinceRenew,proto3" json:"stolen_since_renew,omitempty"` - UpdateTime *time.Time `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3,stdtime" json:"update_time,omitempty"` - ReplicationDlqAckLevel map[string]int64 `protobuf:"bytes,13,rep,name=replication_dlq_ack_level,json=replicationDlqAckLevel,proto3" json:"replication_dlq_ack_level,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - // Map from task category to ack levels of the corresponding queue processor - // Deprecated. Use queue_states instead. - QueueAckLevels map[int32]*QueueAckLevel `protobuf:"bytes,16,rep,name=queue_ack_levels,json=queueAckLevels,proto3" json:"queue_ack_levels,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - QueueStates map[int32]*QueueState `protobuf:"bytes,17,rep,name=queue_states,json=queueStates,proto3" json:"queue_states,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + StolenSinceRenew int32 `protobuf:"varint,6,opt,name=stolen_since_renew,json=stolenSinceRenew,proto3" json:"stolen_since_renew,omitempty"` + UpdateTime *time.Time `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3,stdtime" json:"update_time,omitempty"` + ReplicationDlqAckLevel map[string]int64 `protobuf:"bytes,13,rep,name=replication_dlq_ack_level,json=replicationDlqAckLevel,proto3" json:"replication_dlq_ack_level,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + QueueStates map[int32]*QueueState `protobuf:"bytes,17,rep,name=queue_states,json=queueStates,proto3" json:"queue_states,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ShardInfo) Reset() { *m = ShardInfo{} } @@ -156,13 +153,6 @@ return nil } -func (m *ShardInfo) GetQueueAckLevels() map[int32]*QueueAckLevel { - if m != nil { - return m.QueueAckLevels - } - return nil -} - func (m *ShardInfo) GetQueueStates() map[int32]*QueueState { if m != nil { return m.QueueStates @@ -1619,27 +1609,28 @@ ScheduleToCloseTimeout *time.Duration `protobuf:"bytes,11,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3,stdduration" json:"schedule_to_close_timeout,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - StartToCloseTimeout *time.Duration `protobuf:"bytes,12,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3,stdduration" json:"start_to_close_timeout,omitempty"` - HeartbeatTimeout *time.Duration `protobuf:"bytes,13,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3,stdduration" json:"heartbeat_timeout,omitempty"` - CancelRequested bool `protobuf:"varint,14,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` - CancelRequestId int64 `protobuf:"varint,15,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` - TimerTaskStatus int32 `protobuf:"varint,16,opt,name=timer_task_status,json=timerTaskStatus,proto3" json:"timer_task_status,omitempty"` - Attempt int32 `protobuf:"varint,17,opt,name=attempt,proto3" json:"attempt,omitempty"` - TaskQueue string `protobuf:"bytes,18,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StartedIdentity string `protobuf:"bytes,19,opt,name=started_identity,json=startedIdentity,proto3" json:"started_identity,omitempty"` - HasRetryPolicy bool `protobuf:"varint,20,opt,name=has_retry_policy,json=hasRetryPolicy,proto3" json:"has_retry_policy,omitempty"` - RetryInitialInterval *time.Duration `protobuf:"bytes,21,opt,name=retry_initial_interval,json=retryInitialInterval,proto3,stdduration" json:"retry_initial_interval,omitempty"` - RetryMaximumInterval *time.Duration `protobuf:"bytes,22,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3,stdduration" json:"retry_maximum_interval,omitempty"` - RetryMaximumAttempts int32 `protobuf:"varint,23,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` - RetryExpirationTime *time.Time `protobuf:"bytes,24,opt,name=retry_expiration_time,json=retryExpirationTime,proto3,stdtime" json:"retry_expiration_time,omitempty"` - RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` - RetryNonRetryableErrorTypes []string `protobuf:"bytes,26,rep,name=retry_non_retryable_error_types,json=retryNonRetryableErrorTypes,proto3" json:"retry_non_retryable_error_types,omitempty"` - RetryLastFailure *v18.Failure `protobuf:"bytes,27,opt,name=retry_last_failure,json=retryLastFailure,proto3" json:"retry_last_failure,omitempty"` - RetryLastWorkerIdentity string `protobuf:"bytes,28,opt,name=retry_last_worker_identity,json=retryLastWorkerIdentity,proto3" json:"retry_last_worker_identity,omitempty"` - ScheduledEventId int64 `protobuf:"varint,30,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - LastHeartbeatDetails *v12.Payloads `protobuf:"bytes,31,opt,name=last_heartbeat_details,json=lastHeartbeatDetails,proto3" json:"last_heartbeat_details,omitempty"` - LastHeartbeatUpdateTime *time.Time `protobuf:"bytes,32,opt,name=last_heartbeat_update_time,json=lastHeartbeatUpdateTime,proto3,stdtime" json:"last_heartbeat_update_time,omitempty"` - UseCompatibleVersion bool `protobuf:"varint,33,opt,name=use_compatible_version,json=useCompatibleVersion,proto3" json:"use_compatible_version,omitempty"` + StartToCloseTimeout *time.Duration `protobuf:"bytes,12,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3,stdduration" json:"start_to_close_timeout,omitempty"` + HeartbeatTimeout *time.Duration `protobuf:"bytes,13,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3,stdduration" json:"heartbeat_timeout,omitempty"` + CancelRequested bool `protobuf:"varint,14,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` + CancelRequestId int64 `protobuf:"varint,15,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` + TimerTaskStatus int32 `protobuf:"varint,16,opt,name=timer_task_status,json=timerTaskStatus,proto3" json:"timer_task_status,omitempty"` + Attempt int32 `protobuf:"varint,17,opt,name=attempt,proto3" json:"attempt,omitempty"` + TaskQueue string `protobuf:"bytes,18,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StartedIdentity string `protobuf:"bytes,19,opt,name=started_identity,json=startedIdentity,proto3" json:"started_identity,omitempty"` + HasRetryPolicy bool `protobuf:"varint,20,opt,name=has_retry_policy,json=hasRetryPolicy,proto3" json:"has_retry_policy,omitempty"` + RetryInitialInterval *time.Duration `protobuf:"bytes,21,opt,name=retry_initial_interval,json=retryInitialInterval,proto3,stdduration" json:"retry_initial_interval,omitempty"` + RetryMaximumInterval *time.Duration `protobuf:"bytes,22,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3,stdduration" json:"retry_maximum_interval,omitempty"` + RetryMaximumAttempts int32 `protobuf:"varint,23,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` + RetryExpirationTime *time.Time `protobuf:"bytes,24,opt,name=retry_expiration_time,json=retryExpirationTime,proto3,stdtime" json:"retry_expiration_time,omitempty"` + RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` + RetryNonRetryableErrorTypes []string `protobuf:"bytes,26,rep,name=retry_non_retryable_error_types,json=retryNonRetryableErrorTypes,proto3" json:"retry_non_retryable_error_types,omitempty"` + RetryLastFailure *v18.Failure `protobuf:"bytes,27,opt,name=retry_last_failure,json=retryLastFailure,proto3" json:"retry_last_failure,omitempty"` + RetryLastWorkerIdentity string `protobuf:"bytes,28,opt,name=retry_last_worker_identity,json=retryLastWorkerIdentity,proto3" json:"retry_last_worker_identity,omitempty"` + ScheduledEventId int64 `protobuf:"varint,30,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + LastHeartbeatDetails *v12.Payloads `protobuf:"bytes,31,opt,name=last_heartbeat_details,json=lastHeartbeatDetails,proto3" json:"last_heartbeat_details,omitempty"` + LastHeartbeatUpdateTime *time.Time `protobuf:"bytes,32,opt,name=last_heartbeat_update_time,json=lastHeartbeatUpdateTime,proto3,stdtime" json:"last_heartbeat_update_time,omitempty"` + UseCompatibleVersion bool `protobuf:"varint,33,opt,name=use_compatible_version,json=useCompatibleVersion,proto3" json:"use_compatible_version,omitempty"` + ActivityType *v12.ActivityType `protobuf:"bytes,34,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` } func (m *ActivityInfo) Reset() { *m = ActivityInfo{} } @@ -1884,6 +1875,13 @@ return false } +func (m *ActivityInfo) GetActivityType() *v12.ActivityType { + if m != nil { + return m.ActivityType + } + return nil +} + // timer_map column type TimerInfo struct { Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` @@ -2291,7 +2289,6 @@ func init() { proto.RegisterType((*ShardInfo)(nil), "temporal.server.api.persistence.v1.ShardInfo") - proto.RegisterMapType((map[int32]*QueueAckLevel)(nil), "temporal.server.api.persistence.v1.ShardInfo.QueueAckLevelsEntry") proto.RegisterMapType((map[int32]*QueueState)(nil), "temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry") proto.RegisterMapType((map[string]int64)(nil), "temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntry") proto.RegisterType((*WorkflowExecutionInfo)(nil), "temporal.server.api.persistence.v1.WorkflowExecutionInfo") @@ -2319,242 +2316,241 @@ } var fileDescriptor_67a714d0e7ba9f37 = []byte{ - // 3751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3b, 0xcd, 0x73, 0xdb, 0xc6, - 0x77, 0xa6, 0x05, 0x49, 0xe0, 0x23, 0x45, 0x41, 0xd0, 0x17, 0xa4, 0xc8, 0x94, 0xcc, 0xd8, 0xfe, - 0xc9, 0x8e, 0x4d, 0xc5, 0xb2, 0x53, 0xe7, 0x97, 0xb4, 0x71, 0x25, 0x5a, 0xb6, 0xc9, 0x38, 0xb6, - 0x03, 0x29, 0x1f, 0x93, 0x26, 0xc3, 0x81, 0x80, 0x95, 0x84, 0x8a, 0x04, 0x68, 0x2c, 0x28, 0x99, - 0x99, 0x1e, 0x72, 0xe8, 0xf4, 0x9c, 0xde, 0xfa, 0x27, 0xf4, 0xd8, 0x4b, 0x6f, 0x3d, 0xf4, 0xd0, - 0x43, 0x4f, 0x9d, 0xdc, 0x9a, 0x5b, 0x1b, 0xe7, 0xd2, 0x4b, 0x27, 0x99, 0x1e, 0x7a, 0xee, 0xec, - 0xdb, 0x5d, 0x10, 0x00, 0x21, 0x89, 0x72, 0xe3, 0x43, 0x6e, 0xe2, 0xbe, 0xcf, 0xdd, 0x7d, 0xfb, - 0x3e, 0x21, 0xb8, 0x13, 0x92, 0x76, 0xc7, 0x0f, 0xac, 0xd6, 0x1a, 0x25, 0xc1, 0x11, 0x09, 0xd6, - 0xac, 0x8e, 0xbb, 0xd6, 0x21, 0x01, 0x75, 0x69, 0x48, 0x3c, 0x9b, 0xac, 0x1d, 0xdd, 0x5e, 0x23, - 0x2f, 0x89, 0xdd, 0x0d, 0x5d, 0xdf, 0xa3, 0xd5, 0x4e, 0xe0, 0x87, 0xbe, 0x5e, 0x91, 0x44, 0x55, - 0x4e, 0x54, 0xb5, 0x3a, 0x6e, 0x35, 0x46, 0x54, 0x3d, 0xba, 0xbd, 0x58, 0xde, 0xf7, 0xfd, 0xfd, - 0x16, 0x59, 0x43, 0x8a, 0xdd, 0xee, 0xde, 0x9a, 0xd3, 0x0d, 0x2c, 0xc6, 0x84, 0xf3, 0x58, 0x5c, - 0x4e, 0xc3, 0x43, 0xb7, 0x4d, 0x68, 0x68, 0xb5, 0x3b, 0x02, 0xe1, 0xb2, 0x43, 0x3a, 0xc4, 0x73, - 0x88, 0x67, 0xbb, 0x84, 0xae, 0xed, 0xfb, 0xfb, 0x3e, 0xae, 0xe3, 0x5f, 0x02, 0xe5, 0x4a, 0xa4, - 0x3c, 0xd3, 0xda, 0xf6, 0xdb, 0x6d, 0xdf, 0x63, 0x0a, 0xb7, 0x09, 0xa5, 0xd6, 0x3e, 0xc9, 0xc4, - 0x22, 0x5e, 0xb7, 0x4d, 0x19, 0xd2, 0xb1, 0x1f, 0x1c, 0xee, 0xb5, 0xfc, 0x63, 0x81, 0x75, 0x35, - 0x81, 0xb5, 0x67, 0xb9, 0xad, 0x6e, 0x40, 0x06, 0x99, 0x5d, 0x4b, 0xa0, 0x49, 0x1e, 0x83, 0x78, - 0x37, 0xb2, 0xce, 0xd5, 0x6e, 0xf9, 0xf6, 0xe1, 0x20, 0xee, 0xf5, 0x2c, 0xdc, 0x48, 0x4f, 0xbe, - 0x2d, 0x81, 0xfa, 0xce, 0xa9, 0xa8, 0xa9, 0x2d, 0xfd, 0xe1, 0x54, 0xe4, 0xd0, 0xa2, 0x87, 0x02, - 0xf1, 0xbd, 0xa1, 0xb8, 0x36, 0x19, 0x45, 0x33, 0xec, 0x75, 0xa4, 0xde, 0x37, 0xb3, 0xc8, 0x0e, - 0x5c, 0x1a, 0xfa, 0x41, 0x6f, 0x70, 0x97, 0x6b, 0x43, 0x58, 0xda, 0x8b, 0x2e, 0xe9, 0x12, 0x7a, - 0xda, 0x5e, 0xbb, 0x1d, 0xc7, 0x0a, 0x33, 0xee, 0xe5, 0x56, 0x16, 0xf2, 0x89, 0xd7, 0x53, 0xf9, - 0x9b, 0x71, 0xc8, 0x6f, 0x1f, 0x58, 0x81, 0x53, 0xf7, 0xf6, 0x7c, 0x7d, 0x01, 0x54, 0xca, 0x7e, - 0x34, 0x5d, 0xc7, 0xc8, 0xad, 0xe4, 0x56, 0x47, 0xcd, 0x71, 0xfc, 0x5d, 0x77, 0x18, 0x28, 0xb0, - 0xbc, 0x7d, 0xc2, 0x40, 0x17, 0x57, 0x72, 0xab, 0x23, 0xe6, 0x38, 0xfe, 0xae, 0x3b, 0xfa, 0x0c, - 0x8c, 0xfa, 0xc7, 0x1e, 0x09, 0x8c, 0x91, 0x95, 0xdc, 0x6a, 0xde, 0xe4, 0x3f, 0xf4, 0x9b, 0xa0, - 0xd3, 0xd0, 0x6f, 0x11, 0xaf, 0x49, 0x5d, 0xcf, 0x26, 0xcd, 0x80, 0x78, 0xe4, 0xd8, 0x18, 0x43, - 0xae, 0x1a, 0x87, 0x6c, 0x33, 0x80, 0xc9, 0xd6, 0xf5, 0x0d, 0x28, 0xf0, 0x1d, 0x35, 0x99, 0xf9, - 0x1b, 0xe3, 0x2b, 0xb9, 0xd5, 0xc2, 0xfa, 0x62, 0x95, 0xbf, 0x8d, 0xaa, 0x7c, 0x1b, 0xd5, 0x1d, - 0xf9, 0x36, 0x36, 0x95, 0xef, 0xff, 0x63, 0x39, 0x67, 0x02, 0x27, 0x62, 0xcb, 0xfa, 0x5f, 0xe7, - 0x60, 0x21, 0x20, 0x9d, 0x96, 0x6b, 0xe3, 0xf3, 0x6a, 0x3a, 0xad, 0x17, 0x4d, 0xcb, 0x3e, 0x6c, - 0xb6, 0xc8, 0x11, 0x69, 0x19, 0x13, 0x2b, 0x23, 0xab, 0x85, 0xf5, 0x7a, 0xf5, 0xec, 0x17, 0x5b, - 0x8d, 0xce, 0xa3, 0x6a, 0xf6, 0xd9, 0x3d, 0x68, 0xbd, 0xd8, 0xb0, 0x0f, 0x9f, 0x30, 0x5e, 0x5b, - 0x5e, 0x18, 0xf4, 0xcc, 0xb9, 0x20, 0x13, 0xa8, 0x1f, 0x82, 0x86, 0xb7, 0xd7, 0x97, 0x4d, 0x0d, - 0x0d, 0x85, 0x6f, 0x9c, 0x4f, 0xf8, 0xa7, 0x8c, 0x8b, 0x64, 0x4b, 0xb9, 0xd0, 0xd2, 0x8b, 0xc4, - 0xa2, 0x6e, 0x41, 0x91, 0x0b, 0xa3, 0xa1, 0x15, 0x12, 0x6a, 0x4c, 0xa1, 0xa0, 0x8f, 0x5e, 0x43, - 0xd0, 0x36, 0x32, 0xe0, 0x52, 0x0a, 0x2f, 0xfa, 0x2b, 0x8b, 0x75, 0x78, 0xeb, 0x94, 0x63, 0xd0, - 0x35, 0x18, 0x39, 0x24, 0x3d, 0xb4, 0x96, 0xbc, 0xc9, 0xfe, 0x64, 0xe6, 0x70, 0x64, 0xb5, 0xba, - 0x44, 0x98, 0x09, 0xff, 0xf1, 0xc1, 0xc5, 0xf7, 0x73, 0x8b, 0x21, 0x4c, 0x67, 0x6c, 0x2a, 0xce, - 0x62, 0x94, 0xb3, 0x78, 0x14, 0x67, 0x51, 0x58, 0xbf, 0x3d, 0xcc, 0x7e, 0x12, 0x9c, 0xe3, 0x52, - 0x3d, 0xd0, 0xd2, 0x3b, 0xcc, 0x10, 0xf9, 0x20, 0x29, 0xb2, 0x3a, 0xb4, 0x48, 0x64, 0x1b, 0x93, - 0xd7, 0x50, 0x54, 0x45, 0x1b, 0x6d, 0x28, 0xea, 0xa8, 0x36, 0xd6, 0x50, 0x54, 0x55, 0xcb, 0x37, - 0x14, 0x35, 0xaf, 0x41, 0x43, 0x51, 0x41, 0x2b, 0x34, 0x14, 0xb5, 0xa0, 0x15, 0x1b, 0x8a, 0x5a, - 0xd4, 0x26, 0x1a, 0x8a, 0x5a, 0xd2, 0x26, 0x1b, 0x8a, 0x3a, 0xa9, 0x69, 0x95, 0xff, 0xbd, 0x06, - 0xb3, 0x5f, 0x88, 0x67, 0xba, 0x25, 0xe3, 0x0c, 0x3e, 0xca, 0xcb, 0x50, 0xf4, 0xac, 0x36, 0xa1, - 0x1d, 0xcb, 0x26, 0xf2, 0x61, 0xe6, 0xcd, 0x42, 0xb4, 0x56, 0x77, 0xf4, 0x65, 0x28, 0x44, 0xce, - 0x49, 0xbc, 0xcf, 0xbc, 0x09, 0x72, 0xa9, 0xee, 0xe8, 0x55, 0x98, 0xee, 0x58, 0x01, 0xf1, 0xc2, - 0x66, 0x82, 0x15, 0x7f, 0xb0, 0x53, 0x1c, 0xf4, 0x34, 0xc6, 0xf0, 0x26, 0xe8, 0x02, 0x3f, 0xce, - 0x57, 0x41, 0x74, 0x8d, 0x43, 0xbe, 0xe8, 0x73, 0xaf, 0xc0, 0x84, 0xc0, 0x0e, 0xba, 0x1e, 0x43, - 0x1c, 0xe5, 0x2a, 0xf2, 0x45, 0xb3, 0xeb, 0x25, 0x34, 0x70, 0x3d, 0x37, 0x74, 0xad, 0x90, 0xa0, - 0x97, 0x19, 0x43, 0x1b, 0x11, 0x1a, 0xd4, 0x25, 0xa4, 0xee, 0xe8, 0x7f, 0x84, 0x05, 0xdb, 0x6f, - 0x77, 0x5a, 0x04, 0xdf, 0x32, 0x39, 0x62, 0x94, 0xbb, 0x56, 0x68, 0x1f, 0x30, 0xaa, 0x71, 0xa4, - 0x9a, 0xeb, 0x23, 0x6c, 0x31, 0xf8, 0x26, 0x03, 0xd7, 0x1d, 0xfd, 0x12, 0x00, 0x7a, 0x68, 0xb4, - 0x62, 0x23, 0x8f, 0xba, 0xe4, 0xd9, 0x0a, 0xde, 0x17, 0xdb, 0x5b, 0xdf, 0x93, 0xf7, 0x3a, 0x04, - 0x8f, 0xc4, 0x00, 0xbe, 0x37, 0x09, 0xd9, 0xe9, 0x75, 0x08, 0x3b, 0x10, 0xfd, 0x1b, 0x58, 0x8c, - 0xb0, 0xa3, 0xf8, 0x8f, 0x4e, 0xca, 0xef, 0x86, 0x46, 0x01, 0x8d, 0x65, 0x61, 0xc0, 0x4f, 0x3d, - 0x10, 0x31, 0x7e, 0x53, 0xf9, 0x3b, 0xe6, 0xa6, 0x8c, 0xe3, 0xf4, 0xcd, 0xee, 0x70, 0x06, 0xfa, - 0xa7, 0x30, 0x13, 0xb1, 0x67, 0x87, 0x27, 0x19, 0x17, 0x87, 0x63, 0x1c, 0xed, 0xc4, 0xec, 0x46, - 0x2c, 0x77, 0xe1, 0x92, 0x43, 0xf6, 0xac, 0x6e, 0x2b, 0x76, 0x79, 0x3c, 0x62, 0x09, 0xde, 0x13, - 0xc3, 0xf1, 0x5e, 0x14, 0x5c, 0xe4, 0x45, 0xef, 0x58, 0xf4, 0x50, 0xca, 0x78, 0x07, 0xf4, 0x96, - 0x45, 0x43, 0x71, 0x2f, 0xc8, 0xdd, 0x75, 0x8c, 0x29, 0xbc, 0x96, 0x49, 0x06, 0xc1, 0x0b, 0x61, - 0x14, 0x75, 0x47, 0xbf, 0x05, 0xd3, 0x88, 0xbc, 0xe7, 0x06, 0x11, 0x89, 0xeb, 0x18, 0x3a, 0x62, - 0x6b, 0x0c, 0xf4, 0x90, 0x41, 0x90, 0xa4, 0xee, 0xe8, 0x1f, 0xc3, 0xdb, 0x88, 0x9e, 0x54, 0x9e, - 0x86, 0x56, 0xc0, 0x6c, 0x26, 0x22, 0x9f, 0x46, 0xf2, 0x32, 0x43, 0x8d, 0x6b, 0xb8, 0xcd, 0xf1, - 0x24, 0xb3, 0xfb, 0x00, 0x48, 0xc9, 0xc3, 0xca, 0xcc, 0x90, 0x61, 0x25, 0x8f, 0x34, 0x18, 0x55, - 0x1a, 0x80, 0x1a, 0x36, 0xe3, 0xd1, 0x69, 0x76, 0x48, 0x36, 0x25, 0x46, 0xf9, 0x59, 0x3f, 0x42, - 0xad, 0xc3, 0x6c, 0x72, 0x53, 0x47, 0xcc, 0x9f, 0xf8, 0x9e, 0x31, 0x87, 0x7b, 0x99, 0x3e, 0x8e, - 0xed, 0xe3, 0x73, 0x0e, 0xd2, 0x1f, 0xc2, 0x4a, 0xea, 0x20, 0xec, 0x03, 0xe2, 0x74, 0x5b, 0xf1, - 0xa3, 0x98, 0x47, 0xf2, 0xa5, 0x38, 0xf9, 0xb6, 0xc4, 0x92, 0x07, 0xb1, 0x09, 0xe5, 0x33, 0x0e, - 0xd4, 0x40, 0x2e, 0x8b, 0xc7, 0x27, 0x1f, 0xe6, 0x76, 0x5a, 0x7f, 0x69, 0x51, 0x0b, 0xc3, 0x59, - 0x54, 0x62, 0x83, 0xd2, 0x94, 0x06, 0x0e, 0xc5, 0x0a, 0x99, 0xeb, 0x0d, 0x8d, 0x45, 0x74, 0xce, - 0x09, 0x9a, 0x0d, 0x0e, 0x4a, 0x3c, 0xca, 0xc4, 0x66, 0xf0, 0x7a, 0xde, 0x1a, 0xf2, 0x7a, 0xe6, - 0x33, 0xb6, 0x8a, 0xf7, 0x64, 0xc1, 0xd2, 0x49, 0x67, 0x8e, 0x02, 0x96, 0x86, 0x14, 0xb0, 0x90, - 0x79, 0x23, 0x28, 0x22, 0x80, 0xab, 0x49, 0x11, 0x7e, 0xe0, 0xee, 0xbb, 0x9e, 0xd5, 0x4a, 0xcb, - 0x2a, 0x0f, 0x29, 0xeb, 0x72, 0x5c, 0xd6, 0x33, 0xc1, 0x2c, 0x29, 0xf3, 0x1e, 0x18, 0x49, 0x99, - 0x01, 0x79, 0xd1, 0x25, 0x14, 0x2f, 0x7f, 0x19, 0xdd, 0xdf, 0x6c, 0x9c, 0x89, 0xc9, 0xa1, 0x75, - 0x47, 0xff, 0x3a, 0xee, 0x31, 0x65, 0xee, 0x6b, 0x3c, 0x58, 0xc9, 0xad, 0x96, 0x4e, 0x08, 0x94, - 0x98, 0x33, 0xb3, 0x10, 0x99, 0x70, 0x1e, 0xbd, 0x0e, 0x89, 0x79, 0x58, 0xb1, 0xa2, 0x3f, 0x4b, - 0x1f, 0x05, 0xed, 0xee, 0xef, 0x33, 0xb5, 0x6c, 0xdf, 0x0b, 0x5d, 0x8f, 0x65, 0x52, 0xb4, 0xc9, - 0x72, 0xc7, 0xad, 0x95, 0xdc, 0xaa, 0x6a, 0xae, 0x24, 0x0e, 0x95, 0xa3, 0xd6, 0x04, 0xe6, 0x06, - 0x7d, 0x4a, 0x8e, 0x07, 0x9f, 0x8c, 0x48, 0xc5, 0x9b, 0xd4, 0xfd, 0x96, 0x34, 0x77, 0x7b, 0x2c, - 0x51, 0x7a, 0x38, 0xf8, 0x64, 0x1e, 0x73, 0xac, 0x6d, 0xf7, 0x5b, 0xb2, 0xc9, 0x70, 0xf4, 0xeb, - 0xa0, 0xd9, 0x96, 0x67, 0x93, 0x96, 0x3c, 0x28, 0xe2, 0x18, 0x97, 0x50, 0x87, 0x49, 0xbe, 0x6e, - 0xca, 0x65, 0xfd, 0x06, 0x4c, 0x25, 0x51, 0xd9, 0x99, 0xae, 0xe0, 0x99, 0x26, 0x71, 0xeb, 0x88, - 0x4b, 0x43, 0xd7, 0x3e, 0xec, 0x35, 0x63, 0x51, 0xea, 0x32, 0xc7, 0xe5, 0x80, 0x9d, 0x28, 0x56, - 0xed, 0xc3, 0x8a, 0xc0, 0x95, 0x66, 0xd1, 0x0c, 0xfd, 0x66, 0xdf, 0xa3, 0xb1, 0xc7, 0x57, 0x19, - 0xee, 0xf1, 0x2d, 0x71, 0x46, 0xd2, 0x24, 0x76, 0xfc, 0x6d, 0xe9, 0xe3, 0xd8, 0x2b, 0x34, 0x60, - 0x5c, 0xbe, 0xbb, 0xb7, 0x79, 0xe2, 0x2f, 0x7e, 0xea, 0x9f, 0xc1, 0x5c, 0x40, 0xc2, 0xa0, 0x27, - 0xe2, 0x76, 0xab, 0xe9, 0x7a, 0x21, 0x09, 0x8e, 0xac, 0x96, 0x71, 0x65, 0x38, 0xc1, 0x33, 0x48, - 0xce, 0x63, 0x7b, 0xab, 0x2e, 0x88, 0xfb, 0x6c, 0xdb, 0xd6, 0x4b, 0xb7, 0xdd, 0x6d, 0xf7, 0xd9, - 0x5e, 0x3d, 0x0f, 0xdb, 0x4f, 0x38, 0x75, 0xc4, 0xf6, 0x6e, 0x9a, 0xad, 0xd8, 0x06, 0x35, 0xae, - 0xe1, 0xb6, 0x12, 0x54, 0xc2, 0x9d, 0x50, 0xfd, 0x03, 0x56, 0x39, 0x30, 0xaa, 0x5d, 0xcb, 0x3e, - 0xf4, 0xf7, 0xf6, 0x9a, 0xb6, 0x4f, 0xf6, 0xf6, 0x5c, 0xdb, 0x25, 0x5e, 0x68, 0xfc, 0x61, 0x25, - 0xb7, 0x9a, 0x33, 0xe7, 0x11, 0x61, 0x93, 0xc3, 0x6b, 0x7d, 0xb0, 0xde, 0x86, 0x4a, 0x46, 0x82, - 0x40, 0x5e, 0x76, 0x5c, 0xae, 0x2e, 0x7f, 0xc6, 0xab, 0x43, 0x3e, 0xe3, 0xe5, 0x81, 0x4c, 0x61, - 0x2b, 0xe2, 0x84, 0x8f, 0xf8, 0x01, 0x2c, 0x73, 0x55, 0x3d, 0xdf, 0x6b, 0xe2, 0x5f, 0xd6, 0x6e, - 0x8b, 0x34, 0x49, 0x10, 0xf8, 0x01, 0xbe, 0x4b, 0x6a, 0x5c, 0x5f, 0x19, 0x59, 0xcd, 0x9b, 0x6f, - 0x21, 0xf0, 0xa9, 0xef, 0x99, 0x12, 0x69, 0x8b, 0xe1, 0xb0, 0x27, 0x47, 0xf5, 0x55, 0xd0, 0x0e, - 0x2c, 0xca, 0xe9, 0x9b, 0x1d, 0xbf, 0xe5, 0xda, 0x3d, 0xe3, 0x06, 0x9a, 0x76, 0xe9, 0xc0, 0xa2, - 0x48, 0xf1, 0x1c, 0x57, 0xf5, 0xb7, 0x61, 0xc2, 0x0e, 0x7c, 0x2f, 0xb2, 0x3f, 0xe3, 0x1d, 0xb4, - 0xd4, 0x22, 0x5b, 0x94, 0xb6, 0xc4, 0x52, 0x54, 0xea, 0xee, 0x33, 0xef, 0x65, 0xfb, 0x5d, 0x2f, - 0x34, 0xaa, 0xf8, 0xba, 0x0a, 0x7c, 0xad, 0xc6, 0x96, 0xf4, 0xab, 0x50, 0xb2, 0xec, 0xd0, 0x3d, - 0x72, 0xc3, 0x9e, 0x40, 0x7a, 0x84, 0x48, 0x13, 0x72, 0x95, 0xa3, 0xad, 0xc3, 0xac, 0x7d, 0xe0, - 0xb6, 0x9c, 0xd8, 0x51, 0x72, 0xec, 0xc7, 0x3c, 0x44, 0x22, 0x30, 0x3a, 0x1b, 0x4e, 0xb3, 0x0a, - 0x5a, 0x97, 0x92, 0x00, 0x0f, 0x3a, 0x10, 0xe8, 0x75, 0x44, 0x2f, 0xb1, 0x75, 0x76, 0x6c, 0x01, - 0xc7, 0xdc, 0x80, 0x4b, 0xf2, 0x7d, 0x8a, 0xe7, 0x4a, 0x5e, 0x86, 0x24, 0xe8, 0x2b, 0xde, 0xe0, - 0x31, 0x50, 0x20, 0xd5, 0x10, 0x67, 0x4b, 0xa0, 0x44, 0x0a, 0x8a, 0xad, 0xa6, 0x48, 0x3f, 0xe6, - 0x0a, 0x72, 0x60, 0x92, 0xe6, 0x32, 0x14, 0x45, 0xfa, 0xc0, 0x51, 0x3f, 0xe1, 0xc7, 0xc3, 0xd7, - 0x38, 0xca, 0xa7, 0x30, 0x65, 0x75, 0x43, 0xbf, 0x19, 0x10, 0x4a, 0xc2, 0x66, 0xc7, 0x77, 0xbd, - 0x90, 0x1a, 0x77, 0xd0, 0x68, 0xae, 0xf6, 0x3d, 0x2c, 0x73, 0xad, 0x51, 0x6f, 0xe3, 0xe8, 0x76, - 0xd5, 0x64, 0xd8, 0xcf, 0x11, 0xd9, 0x9c, 0x64, 0xf4, 0xb1, 0x05, 0xfd, 0xaf, 0x60, 0x8a, 0x12, - 0x2b, 0xb0, 0x0f, 0xd8, 0x1b, 0x08, 0xdc, 0xdd, 0x2e, 0xf3, 0x7b, 0x77, 0xb1, 0x40, 0x7c, 0x36, - 0x4c, 0x75, 0x93, 0x59, 0x8d, 0x54, 0xb7, 0x91, 0xe5, 0x46, 0xc4, 0x91, 0x57, 0x8c, 0x1a, 0x4d, - 0x2d, 0xeb, 0x5f, 0x80, 0xd2, 0x26, 0x6d, 0xdf, 0x78, 0x0f, 0x05, 0xd6, 0x5e, 0x5f, 0xe0, 0x27, - 0xa4, 0xed, 0x73, 0x21, 0xc8, 0x50, 0xff, 0x06, 0xa6, 0x44, 0xda, 0x24, 0xfc, 0xba, 0x4b, 0xa8, - 0xf1, 0x27, 0x78, 0x52, 0xef, 0x66, 0x4a, 0x11, 0xde, 0x9f, 0x49, 0x10, 0x49, 0xd5, 0x63, 0x49, - 0x67, 0x6a, 0x47, 0xa9, 0x15, 0xfd, 0x0e, 0xcc, 0x89, 0x3c, 0x35, 0x32, 0x40, 0x51, 0xd4, 0xdc, - 0x43, 0xc3, 0x9f, 0x46, 0x68, 0xa4, 0x22, 0x2f, 0x6e, 0xfe, 0x02, 0x26, 0xfb, 0xe8, 0xac, 0x14, - 0xa7, 0xc6, 0xfb, 0xa8, 0xd1, 0xfa, 0x30, 0xfb, 0x8e, 0x98, 0xb1, 0x52, 0x92, 0x9a, 0x25, 0x92, - 0xf8, 0x9d, 0xc8, 0x46, 0x98, 0x2a, 0x69, 0xd7, 0xf2, 0xc7, 0xf3, 0x66, 0x23, 0x66, 0x37, 0xed, - 0x54, 0xee, 0xc2, 0xfc, 0x40, 0x86, 0x1e, 0xbe, 0xc4, 0x5d, 0x7f, 0xc0, 0xcd, 0x3a, 0x99, 0xa5, - 0xef, 0xbc, 0x64, 0xbb, 0xbe, 0x0b, 0x73, 0xd8, 0x76, 0x68, 0x86, 0x81, 0xe5, 0x51, 0x37, 0xf6, - 0x58, 0x3f, 0x44, 0xa2, 0x19, 0x84, 0xee, 0x44, 0x40, 0x6e, 0xe9, 0x8f, 0xa0, 0x94, 0xac, 0xa3, - 0x8c, 0x3f, 0x1d, 0x72, 0x03, 0x13, 0x24, 0x5e, 0x3d, 0xe9, 0x6b, 0x30, 0xe3, 0x91, 0xe3, 0xc1, - 0x7b, 0xfa, 0x33, 0x5e, 0xd4, 0x7a, 0xe4, 0x38, 0x75, 0x4b, 0x4f, 0xa0, 0x28, 0x4a, 0x50, 0xec, - 0x3f, 0x1a, 0x1f, 0xa1, 0xdc, 0xeb, 0x99, 0x57, 0x84, 0x18, 0xdc, 0x64, 0xec, 0xd0, 0x0f, 0x6a, - 0xec, 0xa7, 0x2c, 0x68, 0xf1, 0x87, 0xfe, 0x3e, 0x18, 0x03, 0x05, 0xad, 0xcc, 0xe7, 0xef, 0xf3, - 0xfa, 0x34, 0x55, 0xd5, 0xca, 0x94, 0xfe, 0x0e, 0xcc, 0xd9, 0x2d, 0x9f, 0x8a, 0x73, 0xdb, 0x63, - 0x9e, 0x4b, 0x14, 0x50, 0x7f, 0x2e, 0x9c, 0x1c, 0x83, 0xee, 0x08, 0xa0, 0x28, 0xa2, 0xee, 0x81, - 0xc1, 0x89, 0x8e, 0x5c, 0xea, 0xee, 0xba, 0x2d, 0xe6, 0x47, 0x25, 0xd9, 0x06, 0x92, 0xcd, 0x22, - 0xfc, 0xf3, 0x08, 0x2c, 0x08, 0xef, 0x03, 0x08, 0x69, 0xec, 0xac, 0x37, 0x87, 0xad, 0x80, 0xb8, - 0x0e, 0xec, 0x9c, 0xb7, 0x60, 0x39, 0x5b, 0xb2, 0x28, 0xbf, 0x89, 0x63, 0xd4, 0x30, 0x74, 0x2c, - 0x65, 0x28, 0x50, 0x93, 0x38, 0xfa, 0x2e, 0x4c, 0xef, 0x5a, 0x94, 0xc4, 0xee, 0xcb, 0xf5, 0xf6, - 0x7c, 0xe3, 0xc9, 0x29, 0xef, 0x24, 0xee, 0xea, 0x36, 0x2d, 0x4a, 0x12, 0x8e, 0xc1, 0x9c, 0xda, - 0x4d, 0x2f, 0xe9, 0x5f, 0xf3, 0x6a, 0x9a, 0x04, 0xf2, 0x26, 0x9a, 0xb8, 0x27, 0xe3, 0x29, 0x0a, - 0xb9, 0x91, 0x74, 0xa4, 0xa2, 0x9f, 0x2c, 0x1c, 0x0f, 0x09, 0xc4, 0xf5, 0x6c, 0x33, 0x0a, 0x5e, - 0x58, 0x27, 0xd7, 0xf4, 0x76, 0xe4, 0xc6, 0x99, 0xe6, 0xd4, 0x78, 0x86, 0xae, 0xad, 0xf1, 0xfa, - 0xae, 0x8d, 0x97, 0x86, 0xec, 0x4f, 0xd9, 0x78, 0xeb, 0xf6, 0x57, 0x16, 0x1d, 0x98, 0xcd, 0x74, - 0xb6, 0x19, 0x2d, 0xb7, 0xf7, 0x92, 0xcd, 0xab, 0xe5, 0x93, 0x36, 0xfa, 0xdc, 0xea, 0xb5, 0x7c, - 0xcb, 0x89, 0x77, 0xc7, 0xbe, 0x84, 0x7c, 0xe4, 0x61, 0x7f, 0x5b, 0xce, 0x2e, 0x68, 0xe9, 0x0d, - 0x66, 0x08, 0xb8, 0x9f, 0x14, 0x90, 0xfd, 0x1a, 0xf9, 0xb1, 0x30, 0x39, 0x7d, 0x8e, 0xc9, 0x96, - 0x1b, 0x6f, 0xb3, 0x45, 0xed, 0xb4, 0x86, 0xa2, 0x6a, 0xda, 0x54, 0x43, 0x51, 0x6f, 0x6a, 0xb7, - 0x1a, 0x8a, 0x7a, 0x4b, 0xab, 0x36, 0x14, 0x75, 0x4d, 0x7b, 0xb7, 0xa1, 0xa8, 0xef, 0x6a, 0xb7, - 0x1b, 0x8a, 0x7a, 0x5b, 0x5b, 0x6f, 0x28, 0xea, 0xba, 0x76, 0xa7, 0x72, 0x07, 0x4a, 0x49, 0x07, - 0xcc, 0xc2, 0x75, 0xbc, 0x62, 0x40, 0x6d, 0x47, 0xcc, 0xc2, 0x41, 0xbf, 0x3e, 0xa8, 0xfc, 0x92, - 0x83, 0xb9, 0x81, 0x3b, 0xc5, 0x4e, 0x20, 0x96, 0x02, 0x01, 0x61, 0x56, 0x12, 0x2b, 0x05, 0x72, - 0xa2, 0x14, 0x40, 0x40, 0xbf, 0x14, 0x98, 0x85, 0x31, 0xe1, 0xb4, 0x78, 0xcb, 0x6e, 0x34, 0x40, - 0x47, 0xd5, 0x80, 0x51, 0x74, 0x9d, 0xd8, 0x9f, 0x2b, 0xad, 0xdf, 0x1d, 0xae, 0xc4, 0x4a, 0xea, - 0x61, 0x72, 0x16, 0xfa, 0x43, 0x18, 0x63, 0x7f, 0x74, 0x29, 0x76, 0xef, 0x12, 0xf5, 0xda, 0xd9, - 0x5c, 0xba, 0xd4, 0x14, 0xd4, 0x95, 0xff, 0x19, 0x03, 0x2d, 0xe1, 0x92, 0x7e, 0xab, 0xd6, 0x64, - 0xff, 0x0c, 0x46, 0xe2, 0x67, 0x50, 0x83, 0x7c, 0xbf, 0xd4, 0xe4, 0xaa, 0x5f, 0x3b, 0xfd, 0x1c, - 0xa2, 0x12, 0x53, 0x0d, 0x65, 0x69, 0x59, 0x85, 0xe9, 0xd0, 0x0a, 0xf6, 0x49, 0xaa, 0xed, 0xc9, - 0xdb, 0x93, 0x53, 0x1c, 0x94, 0x6a, 0x7b, 0x0a, 0xfc, 0xb8, 0xce, 0x63, 0xbc, 0x35, 0xc8, 0x21, - 0xc9, 0xb6, 0xa7, 0xc0, 0x16, 0x1b, 0x18, 0xe7, 0xdb, 0xe7, 0x8b, 0x3c, 0xe6, 0x24, 0x7b, 0x91, - 0x6a, 0xba, 0x17, 0xf9, 0x21, 0x2c, 0x0a, 0x16, 0x3c, 0xeb, 0x8d, 0xc4, 0xfa, 0x5e, 0xab, 0x87, - 0xad, 0x4b, 0xd5, 0x9c, 0xe7, 0x18, 0x35, 0x86, 0x20, 0xa5, 0x3f, 0xf3, 0x5a, 0x3d, 0x9c, 0xb0, - 0x0c, 0x36, 0x83, 0x80, 0xb7, 0xd5, 0x68, 0xba, 0x01, 0x64, 0xc0, 0xb8, 0x0c, 0x4f, 0x05, 0x3e, - 0xbf, 0x11, 0x3f, 0xf5, 0x79, 0x18, 0x97, 0x91, 0xa4, 0x88, 0x90, 0xb1, 0x90, 0x87, 0x8e, 0x3a, - 0x4c, 0xc6, 0x7d, 0x3e, 0x8b, 0x1f, 0x13, 0xc3, 0xb6, 0xbe, 0xfa, 0x84, 0x18, 0x44, 0x6e, 0x82, - 0xee, 0x10, 0x16, 0x08, 0x9a, 0xd6, 0x5e, 0xc8, 0xb2, 0x74, 0x16, 0x2a, 0x8c, 0x49, 0xdc, 0xa0, - 0xc6, 0x21, 0x1b, 0x0c, 0x50, 0x63, 0xeb, 0xfa, 0xdf, 0xe6, 0x80, 0x07, 0x93, 0x78, 0xcb, 0x95, - 0xa9, 0xe8, 0x90, 0xd0, 0x72, 0x71, 0xa0, 0xc2, 0xd4, 0x78, 0x3a, 0x8c, 0xeb, 0x4d, 0x1b, 0x6d, - 0x15, 0x45, 0xf4, 0x1b, 0xb1, 0x16, 0x3d, 0x7c, 0xc0, 0xb9, 0x3e, 0xbe, 0x60, 0x2e, 0xd8, 0x27, - 0x01, 0x17, 0xbf, 0x86, 0x85, 0x13, 0x29, 0xf5, 0xfb, 0xb0, 0x64, 0x5b, 0x5e, 0x93, 0x1e, 0xba, - 0x9d, 0x78, 0x98, 0x64, 0xde, 0xdb, 0x65, 0x35, 0x6d, 0x0e, 0x37, 0xba, 0x60, 0x5b, 0xde, 0xf6, - 0xa1, 0xdb, 0xe9, 0x87, 0xc8, 0x0d, 0x81, 0xb0, 0x59, 0x82, 0x62, 0x7c, 0x83, 0xdc, 0x97, 0x55, - 0xfe, 0x51, 0x81, 0xe9, 0xd8, 0xf0, 0xe5, 0x77, 0xf3, 0xee, 0x62, 0xb6, 0x36, 0x9a, 0xb4, 0xb5, - 0x2b, 0x50, 0x4a, 0xb5, 0x81, 0xf9, 0x04, 0xa0, 0xb8, 0x17, 0x6f, 0x01, 0x57, 0x60, 0xc2, 0x23, - 0x2f, 0x63, 0x48, 0xbc, 0xe1, 0x5f, 0x60, 0x8b, 0x12, 0x27, 0xdb, 0xfa, 0xd5, 0x13, 0xac, 0xff, - 0x32, 0x14, 0x77, 0x03, 0xcb, 0xb3, 0x0f, 0x9a, 0xa1, 0x7f, 0x48, 0xf8, 0x13, 0x28, 0x9a, 0x05, - 0xbe, 0xb6, 0xc3, 0x96, 0x64, 0x3e, 0xc9, 0x0e, 0x25, 0x81, 0x3a, 0x81, 0xa8, 0x2c, 0x9f, 0x34, - 0xbb, 0xde, 0x66, 0x8c, 0x20, 0xf6, 0x6e, 0x26, 0xcf, 0x7a, 0x37, 0xda, 0x6b, 0xbe, 0x9b, 0x25, - 0x00, 0xa9, 0x94, 0x68, 0xb0, 0xe7, 0x4d, 0x95, 0xab, 0x52, 0x77, 0x52, 0x83, 0xa5, 0x68, 0xa4, - 0x54, 0xf9, 0xef, 0x11, 0xd0, 0x53, 0x89, 0xe0, 0xef, 0xdb, 0x6c, 0x62, 0x47, 0x3d, 0x76, 0xd6, - 0x51, 0x8f, 0xbf, 0xe6, 0x51, 0x27, 0x13, 0x65, 0xf5, 0xfc, 0x89, 0x72, 0x72, 0xd6, 0x90, 0x3f, - 0xff, 0xac, 0xe1, 0xb4, 0x1c, 0x1f, 0x4e, 0xc9, 0xf1, 0x2b, 0xbf, 0x28, 0x30, 0x81, 0x6d, 0x8e, - 0xdf, 0xcd, 0x55, 0x6f, 0x41, 0x51, 0xf4, 0x2f, 0x39, 0x9f, 0x51, 0xe4, 0x53, 0x39, 0x21, 0x39, - 0x11, 0x5d, 0x4a, 0xe4, 0x51, 0x08, 0xfb, 0x3f, 0x74, 0x12, 0x1b, 0x1e, 0xc8, 0xde, 0x1d, 0xf2, - 0x1b, 0x43, 0x7e, 0xb7, 0x87, 0xcb, 0x9c, 0x44, 0x57, 0x0f, 0xd9, 0x47, 0xf3, 0x86, 0xd8, 0x62, - 0xdc, 0x30, 0xc7, 0x93, 0x86, 0x79, 0x1d, 0x22, 0x5f, 0x13, 0x0d, 0x2e, 0x54, 0xec, 0x34, 0x4e, - 0xca, 0x75, 0x39, 0xb4, 0x58, 0x00, 0x35, 0x72, 0x53, 0x79, 0xce, 0x85, 0x08, 0xef, 0x14, 0x33, - 0x6f, 0x38, 0xcb, 0xbc, 0x0b, 0xaf, 0x69, 0xde, 0x69, 0x0f, 0x58, 0x1c, 0xf4, 0x80, 0xd7, 0x41, - 0xb3, 0x5a, 0x01, 0xb1, 0x1c, 0x19, 0xb9, 0x88, 0x83, 0xde, 0x4f, 0x35, 0x27, 0xc5, 0xfa, 0x86, - 0x58, 0xae, 0xfc, 0xc3, 0x45, 0xd0, 0x64, 0xf0, 0x8a, 0x8c, 0x2e, 0xb6, 0x8d, 0x5c, 0x62, 0x1b, - 0x69, 0x6b, 0xbc, 0x78, 0xa6, 0x35, 0x8e, 0x9c, 0x62, 0x8d, 0xca, 0x89, 0xd6, 0x38, 0xfa, 0xff, - 0x77, 0x3c, 0x63, 0xc9, 0xfb, 0xfd, 0xed, 0xfc, 0x4b, 0xe5, 0x9f, 0x4a, 0x50, 0xdc, 0x10, 0xcd, - 0x4e, 0x3c, 0xae, 0x98, 0xd4, 0x5c, 0x52, 0xea, 0x3d, 0x30, 0xd2, 0xb1, 0x2d, 0x9a, 0x7d, 0xf3, - 0xaf, 0x2a, 0x66, 0x93, 0x11, 0x4e, 0x8e, 0xbe, 0x1f, 0x41, 0x29, 0x35, 0x3f, 0x52, 0x86, 0x6d, - 0xae, 0xd0, 0xc4, 0xac, 0x68, 0x15, 0xb4, 0x81, 0x01, 0x21, 0xf7, 0xc9, 0x25, 0x9a, 0x1c, 0x0a, - 0xd6, 0xa0, 0x98, 0x98, 0xbe, 0x0d, 0x7b, 0x3c, 0x05, 0x1a, 0x9b, 0xb8, 0x2d, 0x43, 0x21, 0xea, - 0x0e, 0x8b, 0x28, 0x9e, 0x37, 0x41, 0x2e, 0xf1, 0x3c, 0x3a, 0x56, 0x4e, 0x89, 0x99, 0x7e, 0x10, - 0x15, 0x52, 0x5f, 0xc1, 0xc2, 0xc9, 0x03, 0x12, 0x18, 0x6e, 0xa0, 0x30, 0x47, 0xb3, 0x47, 0x23, - 0x29, 0xde, 0xfd, 0x18, 0x71, 0x8e, 0x0f, 0x00, 0x62, 0xbc, 0x6b, 0x32, 0x5e, 0x30, 0xde, 0x3b, - 0xd8, 0x42, 0x63, 0xba, 0xa6, 0x19, 0x0f, 0xf9, 0x01, 0xc0, 0x34, 0x8f, 0x1e, 0x49, 0xae, 0x4f, - 0x60, 0xea, 0x80, 0x58, 0x41, 0xb8, 0x4b, 0xac, 0xf0, 0xbc, 0x53, 0x7f, 0x2d, 0xa2, 0x94, 0xdc, - 0xb2, 0xc6, 0x60, 0xa5, 0x73, 0x8c, 0xc1, 0x78, 0x6e, 0x94, 0x35, 0x06, 0xe3, 0x0d, 0x7b, 0x39, - 0xc0, 0x65, 0x35, 0xaa, 0xc6, 0x5d, 0x67, 0x28, 0x63, 0x19, 0x2f, 0x42, 0xe3, 0xd3, 0xa9, 0xa9, - 0xe4, 0x74, 0x2a, 0x59, 0x5f, 0xe9, 0xe9, 0xfa, 0xea, 0x7a, 0xdf, 0x8c, 0x5d, 0x87, 0x78, 0xa1, - 0x1b, 0xf6, 0xf0, 0xc3, 0x01, 0x1c, 0xb5, 0xe1, 0x7a, 0x5d, 0x2c, 0x67, 0x8e, 0x44, 0x66, 0x32, - 0x47, 0x22, 0x27, 0x4f, 0xc4, 0x66, 0xdf, 0xcc, 0x44, 0x6c, 0xee, 0xcd, 0x4c, 0xc4, 0xe6, 0x4f, - 0x99, 0x88, 0xed, 0xc0, 0x2c, 0xa7, 0x4a, 0x77, 0x9b, 0x8d, 0x21, 0x9f, 0xf7, 0x34, 0x92, 0xa7, - 0xfa, 0xcc, 0xa7, 0xce, 0xd9, 0x16, 0x4e, 0x9f, 0xb3, 0x0d, 0x31, 0xf8, 0x5a, 0x3c, 0x7b, 0xf0, - 0xf5, 0x14, 0x74, 0xce, 0x85, 0xf7, 0xbb, 0xf9, 0xd7, 0xad, 0xe2, 0x8b, 0x81, 0x95, 0x64, 0xf6, - 0x21, 0x80, 0x2c, 0x64, 0x3c, 0xe4, 0x7f, 0x9a, 0x1a, 0xd2, 0x3e, 0xb1, 0x68, 0x28, 0x56, 0x58, - 0x01, 0x1f, 0xe3, 0x27, 0x9a, 0x8f, 0x91, 0xa9, 0x2d, 0xa1, 0xa9, 0xcd, 0x47, 0x54, 0xbc, 0xd1, - 0x18, 0x99, 0x5c, 0x76, 0x09, 0x53, 0x3e, 0xa1, 0x84, 0xf9, 0x1c, 0xe6, 0x50, 0x48, 0xff, 0x69, - 0xcb, 0x6a, 0x78, 0x39, 0x4b, 0xfd, 0x81, 0xde, 0x1c, 0x35, 0x67, 0x18, 0xfd, 0x63, 0x49, 0x2e, - 0x6b, 0xd7, 0x6f, 0x60, 0x31, 0xc5, 0x37, 0xfe, 0xad, 0xcb, 0xca, 0xb0, 0x1f, 0x53, 0x24, 0x78, - 0xc7, 0x3e, 0x7a, 0xb9, 0x0b, 0x73, 0x5d, 0x4a, 0xb0, 0x59, 0x6c, 0x85, 0x2e, 0xbb, 0x32, 0x19, - 0xf4, 0x2e, 0xe3, 0xeb, 0x9a, 0xe9, 0x52, 0x52, 0x8b, 0x80, 0xa2, 0xe1, 0xda, 0x50, 0xd4, 0x11, - 0x4d, 0x69, 0x28, 0xea, 0x98, 0x36, 0xde, 0x50, 0xd4, 0x4b, 0x5a, 0xb9, 0xf2, 0x6f, 0x39, 0xc8, - 0x63, 0x8e, 0x7b, 0x46, 0xec, 0xcc, 0x8a, 0x5c, 0x17, 0x33, 0x23, 0xd7, 0x06, 0x14, 0xd0, 0xba, - 0x45, 0x5c, 0x1f, 0x19, 0xf6, 0x9b, 0x53, 0x4e, 0x24, 0xe3, 0x56, 0xdc, 0x7d, 0x29, 0x28, 0x07, - 0x3d, 0x92, 0xf0, 0x5c, 0x0b, 0xa0, 0x72, 0x2f, 0x17, 0xb5, 0x9d, 0xc6, 0xf1, 0x77, 0xdd, 0xa9, - 0xfc, 0xbb, 0x02, 0x7a, 0x2d, 0x31, 0xce, 0x3c, 0x3b, 0x2b, 0xe8, 0x8f, 0x1a, 0xb2, 0xb3, 0x82, - 0x08, 0x9e, 0xc8, 0x0a, 0xb2, 0x8e, 0x64, 0x24, 0xf3, 0x48, 0xaa, 0x30, 0x2d, 0x31, 0xe3, 0xd9, - 0x98, 0x68, 0x98, 0x09, 0x50, 0xac, 0x05, 0x76, 0x05, 0x24, 0x07, 0x59, 0xa2, 0xf2, 0x66, 0x99, - 0x4c, 0x09, 0x78, 0x13, 0x2c, 0xb3, 0x25, 0xaa, 0x66, 0xb7, 0x44, 0x97, 0x20, 0x1f, 0xa5, 0x85, - 0x32, 0xce, 0x47, 0x0b, 0xe7, 0xfc, 0x76, 0xef, 0xcb, 0xe8, 0x9b, 0x43, 0x1e, 0x5b, 0x85, 0x57, - 0x2f, 0x60, 0x96, 0xb8, 0x7a, 0x42, 0xad, 0xf1, 0x5c, 0xce, 0x78, 0x28, 0xe1, 0xfe, 0x5e, 0x7e, - 0x9d, 0x18, 0x5b, 0x62, 0x7a, 0xa4, 0xaf, 0x22, 0xea, 0x9e, 0x69, 0xc9, 0x4b, 0xc0, 0x11, 0xcc, - 0x28, 0x9f, 0x38, 0x4d, 0x9c, 0x77, 0xe2, 0xc4, 0xe9, 0x06, 0xf2, 0xe7, 0xd2, 0x40, 0xfe, 0x1c, - 0x7d, 0x75, 0x3a, 0xae, 0xa9, 0x95, 0x7f, 0xce, 0xc1, 0x94, 0x19, 0x1f, 0x61, 0xbf, 0x29, 0xc3, - 0xca, 0x8c, 0xf7, 0x23, 0xd9, 0x9f, 0xbd, 0x64, 0x1f, 0x99, 0x92, 0x7d, 0x64, 0x95, 0x7f, 0xc9, - 0x01, 0x6c, 0xe3, 0x28, 0xfd, 0x4d, 0xe9, 0x9e, 0xcc, 0x28, 0x47, 0xd2, 0x19, 0x65, 0xb6, 0xba, - 0xe3, 0xd9, 0xea, 0xa6, 0xbe, 0xf9, 0xe5, 0x4e, 0x4b, 0xd5, 0xf2, 0x95, 0xef, 0x72, 0xa0, 0xd6, - 0x0e, 0x88, 0x7d, 0x48, 0xbb, 0xed, 0xf4, 0x26, 0x46, 0xfb, 0x9b, 0x78, 0x00, 0x63, 0x7b, 0x2d, - 0xeb, 0xc8, 0x0f, 0x50, 0xe5, 0xd2, 0xfa, 0xcd, 0xd3, 0x2b, 0x18, 0xc9, 0xf1, 0x21, 0xd2, 0x98, - 0x82, 0xb6, 0xff, 0xe1, 0xf5, 0x08, 0x96, 0x76, 0xfc, 0xc7, 0xe6, 0x5f, 0xfe, 0xf0, 0x53, 0xf9, - 0xc2, 0x8f, 0x3f, 0x95, 0x2f, 0xfc, 0xfa, 0x53, 0x39, 0xf7, 0xdd, 0xab, 0x72, 0xee, 0xef, 0x5f, - 0x95, 0x73, 0xff, 0xfa, 0xaa, 0x9c, 0xfb, 0xe1, 0x55, 0x39, 0xf7, 0x9f, 0xaf, 0xca, 0xb9, 0xff, - 0x7a, 0x55, 0xbe, 0xf0, 0xeb, 0xab, 0x72, 0xee, 0xfb, 0x9f, 0xcb, 0x17, 0x7e, 0xf8, 0xb9, 0x7c, - 0xe1, 0xc7, 0x9f, 0xcb, 0x17, 0xbe, 0xba, 0xbb, 0xef, 0xf7, 0x75, 0x70, 0xfd, 0x93, 0xff, 0x55, - 0xe1, 0xc3, 0xd8, 0xcf, 0xdd, 0x31, 0x74, 0x9a, 0x77, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xb4, - 0xc4, 0xdc, 0x31, 0x4d, 0x33, 0x00, 0x00, + // 3737 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3b, 0xcf, 0x77, 0xdb, 0x46, + 0x7a, 0xa6, 0x05, 0x49, 0xe0, 0x47, 0x8a, 0x82, 0xa0, 0x5f, 0x90, 0x22, 0x53, 0x32, 0x63, 0x67, + 0xe5, 0xc4, 0xa1, 0x62, 0xd9, 0x69, 0xb2, 0x49, 0xbb, 0xae, 0x44, 0xcb, 0x31, 0xb9, 0x8e, 0x9d, + 0x40, 0xda, 0x64, 0xdf, 0x36, 0x79, 0x7c, 0x10, 0x30, 0x92, 0x50, 0x81, 0x00, 0x8d, 0x01, 0x25, + 0x73, 0x5f, 0x0f, 0x7b, 0xe8, 0x7b, 0xbd, 0x6e, 0x6f, 0xfd, 0x13, 0xda, 0x5b, 0x2f, 0xbd, 0xf7, + 0xd0, 0x43, 0x4f, 0x7d, 0xb9, 0x75, 0x6f, 0x6d, 0x9c, 0x4b, 0x2f, 0x7d, 0x9b, 0xd7, 0x43, 0xcf, + 0x7d, 0xf3, 0xcd, 0x0c, 0x08, 0x80, 0xa0, 0x44, 0xb9, 0xc9, 0x21, 0x37, 0x71, 0xbe, 0x1f, 0xf3, + 0xcd, 0x37, 0xdf, 0x7c, 0x3f, 0x21, 0xb8, 0x1f, 0x91, 0x4e, 0x37, 0x08, 0x2d, 0x6f, 0x8b, 0x92, + 0xf0, 0x8c, 0x84, 0x5b, 0x56, 0xd7, 0xdd, 0xea, 0x92, 0x90, 0xba, 0x34, 0x22, 0xbe, 0x4d, 0xb6, + 0xce, 0xee, 0x6d, 0x91, 0x97, 0xc4, 0xee, 0x45, 0x6e, 0xe0, 0xd3, 0x7a, 0x37, 0x0c, 0xa2, 0x40, + 0xaf, 0x49, 0xa2, 0x3a, 0x27, 0xaa, 0x5b, 0x5d, 0xb7, 0x9e, 0x20, 0xaa, 0x9f, 0xdd, 0x5b, 0xad, + 0x1e, 0x07, 0xc1, 0xb1, 0x47, 0xb6, 0x90, 0xe2, 0xb0, 0x77, 0xb4, 0xe5, 0xf4, 0x42, 0x8b, 0x31, + 0xe1, 0x3c, 0x56, 0xd7, 0xb3, 0xf0, 0xc8, 0xed, 0x10, 0x1a, 0x59, 0x9d, 0xae, 0x40, 0xb8, 0xe9, + 0x90, 0x2e, 0xf1, 0x1d, 0xe2, 0xdb, 0x2e, 0xa1, 0x5b, 0xc7, 0xc1, 0x71, 0x80, 0xeb, 0xf8, 0x97, + 0x40, 0xb9, 0x15, 0x0b, 0xcf, 0xa4, 0xb6, 0x83, 0x4e, 0x27, 0xf0, 0x99, 0xc0, 0x1d, 0x42, 0xa9, + 0x75, 0x4c, 0x72, 0xb1, 0x88, 0xdf, 0xeb, 0x50, 0x86, 0x74, 0x1e, 0x84, 0xa7, 0x47, 0x5e, 0x70, + 0x2e, 0xb0, 0x6e, 0xa7, 0xb0, 0x8e, 0x2c, 0xd7, 0xeb, 0x85, 0x64, 0x98, 0xd9, 0x5b, 0x29, 0x34, + 0xc9, 0x63, 0x18, 0xef, 0xed, 0x3c, 0xbd, 0xda, 0x5e, 0x60, 0x9f, 0x0e, 0xe3, 0xde, 0xc9, 0xc3, + 0x8d, 0xe5, 0xe4, 0xc7, 0x12, 0xa8, 0xef, 0x5c, 0x88, 0x9a, 0x39, 0xd2, 0xcf, 0x2e, 0x44, 0x8e, + 0x2c, 0x7a, 0x2a, 0x10, 0xdf, 0x1f, 0x8b, 0x6b, 0x9b, 0x51, 0xb4, 0xa3, 0x7e, 0x57, 0xca, 0x7d, + 0x37, 0x8f, 0xec, 0xc4, 0xa5, 0x51, 0x10, 0xf6, 0x87, 0x4f, 0xb9, 0x35, 0x86, 0xa5, 0xbd, 0xe8, + 0x91, 0x1e, 0xa1, 0x17, 0x9d, 0xb5, 0xd7, 0x75, 0xac, 0x28, 0xe7, 0x5e, 0xde, 0xcd, 0x43, 0x1e, + 0x79, 0x3d, 0xb5, 0x7f, 0x98, 0x84, 0xe2, 0xfe, 0x89, 0x15, 0x3a, 0x4d, 0xff, 0x28, 0xd0, 0x57, + 0x40, 0xa5, 0xec, 0x47, 0xdb, 0x75, 0x8c, 0xc2, 0x46, 0x61, 0x73, 0xd2, 0x9c, 0xc6, 0xdf, 0x4d, + 0x87, 0x81, 0x42, 0xcb, 0x3f, 0x26, 0x0c, 0x74, 0x7d, 0xa3, 0xb0, 0x39, 0x61, 0x4e, 0xe3, 0xef, + 0xa6, 0xa3, 0x2f, 0xc0, 0x64, 0x70, 0xee, 0x93, 0xd0, 0x98, 0xd8, 0x28, 0x6c, 0x16, 0x4d, 0xfe, + 0x43, 0xbf, 0x0b, 0x3a, 0x8d, 0x02, 0x8f, 0xf8, 0x6d, 0xea, 0xfa, 0x36, 0x69, 0x87, 0xc4, 0x27, + 0xe7, 0xc6, 0x14, 0x72, 0xd5, 0x38, 0x64, 0x9f, 0x01, 0x4c, 0xb6, 0xae, 0xef, 0x40, 0x89, 0x9f, + 0xa8, 0xcd, 0xcc, 0xdf, 0x98, 0xde, 0x28, 0x6c, 0x96, 0xb6, 0x57, 0xeb, 0xfc, 0x6d, 0xd4, 0xe5, + 0xdb, 0xa8, 0x1f, 0xc8, 0xb7, 0xb1, 0xab, 0xfc, 0xfe, 0x3f, 0xd6, 0x0b, 0x26, 0x70, 0x22, 0xb6, + 0xac, 0xff, 0x75, 0x01, 0x56, 0x42, 0xd2, 0xf5, 0x5c, 0x1b, 0x9f, 0x57, 0xdb, 0xf1, 0x5e, 0xb4, + 0x2d, 0xfb, 0xb4, 0xed, 0x91, 0x33, 0xe2, 0x19, 0x33, 0x1b, 0x13, 0x9b, 0xa5, 0xed, 0x66, 0xfd, + 0xf2, 0x17, 0x5b, 0x8f, 0xf5, 0x51, 0x37, 0x07, 0xec, 0x1e, 0x79, 0x2f, 0x76, 0xec, 0xd3, 0xa7, + 0x8c, 0xd7, 0x9e, 0x1f, 0x85, 0x7d, 0x73, 0x29, 0xcc, 0x05, 0xea, 0x16, 0x94, 0xf1, 0xf6, 0xda, + 0x34, 0xb2, 0x22, 0x42, 0x8d, 0x39, 0xdc, 0xf8, 0x17, 0x57, 0xdb, 0xf8, 0x73, 0xc6, 0x61, 0x1f, + 0x19, 0xf0, 0xdd, 0x4a, 0x2f, 0x06, 0x2b, 0xab, 0x4d, 0x78, 0xe3, 0x02, 0xc9, 0x74, 0x0d, 0x26, + 0x4e, 0x49, 0x1f, 0x2f, 0xb0, 0x68, 0xb2, 0x3f, 0xd9, 0x0d, 0x9d, 0x59, 0x5e, 0x8f, 0x88, 0x9b, + 0xe3, 0x3f, 0x3e, 0xba, 0xfe, 0x61, 0x61, 0xd5, 0x07, 0x2d, 0xbb, 0x57, 0x92, 0x7e, 0x92, 0xd3, + 0x3f, 0x4a, 0xd2, 0x97, 0xb6, 0xeb, 0xe3, 0x1c, 0x66, 0xc0, 0x36, 0xb1, 0x5f, 0x4b, 0x51, 0x15, + 0x6d, 0xb2, 0xa5, 0xa8, 0x93, 0xda, 0x54, 0x4b, 0x51, 0x55, 0xad, 0xd8, 0x52, 0xd4, 0xa2, 0x06, + 0x2d, 0x45, 0x05, 0xad, 0xd4, 0x52, 0xd4, 0x92, 0x56, 0x6e, 0x29, 0x6a, 0x59, 0x9b, 0x69, 0x29, + 0x6a, 0x45, 0x9b, 0x6d, 0x29, 0xea, 0xac, 0xa6, 0xb5, 0x14, 0x55, 0xd3, 0xe6, 0x6a, 0xff, 0xfb, + 0x16, 0x2c, 0x7e, 0x29, 0x2c, 0x79, 0x4f, 0xba, 0x62, 0xb4, 0xdb, 0x9b, 0x50, 0xf6, 0xad, 0x0e, + 0xa1, 0x5d, 0xcb, 0x26, 0xd2, 0x76, 0x8b, 0x66, 0x29, 0x5e, 0x6b, 0x3a, 0xfa, 0x3a, 0x94, 0xe2, + 0xf7, 0x2b, 0x4c, 0xb8, 0x68, 0x82, 0x5c, 0x6a, 0x3a, 0x7a, 0x1d, 0xe6, 0xbb, 0x56, 0x48, 0xfc, + 0xa8, 0x9d, 0x62, 0xc5, 0x6d, 0x7a, 0x8e, 0x83, 0x9e, 0x25, 0x18, 0xde, 0x05, 0x5d, 0xe0, 0x27, + 0xf9, 0x2a, 0x88, 0xae, 0x71, 0xc8, 0x97, 0x03, 0xee, 0x35, 0x98, 0x11, 0xd8, 0x61, 0xcf, 0x67, + 0x88, 0x93, 0x5c, 0x44, 0xbe, 0x68, 0xf6, 0xfc, 0x94, 0x04, 0xae, 0xef, 0x46, 0xae, 0x15, 0x11, + 0x7c, 0x88, 0x53, 0x78, 0x67, 0x42, 0x82, 0xa6, 0x84, 0x34, 0x1d, 0xfd, 0xe7, 0xb0, 0x62, 0x07, + 0x9d, 0xae, 0x47, 0xd0, 0xdc, 0xc9, 0x19, 0xa3, 0x3c, 0xb4, 0x22, 0xfb, 0x84, 0x51, 0x4d, 0x23, + 0xd5, 0xd2, 0x00, 0x61, 0x8f, 0xc1, 0x77, 0x19, 0xb8, 0xe9, 0xe8, 0x37, 0x00, 0xd0, 0x89, 0xa1, + 0x55, 0x19, 0x45, 0x94, 0xa5, 0xc8, 0x56, 0xf0, 0xd6, 0xd8, 0xd9, 0x06, 0xce, 0xae, 0xdf, 0x25, + 0xa8, 0x12, 0x03, 0xf8, 0xd9, 0x24, 0xe4, 0xa0, 0xdf, 0x25, 0x4c, 0x21, 0xfa, 0xd7, 0xb0, 0x1a, + 0x63, 0xc7, 0x21, 0x12, 0xdf, 0x71, 0xd0, 0x8b, 0x8c, 0x12, 0x9a, 0xcc, 0xca, 0xd0, 0x53, 0x7e, + 0x24, 0xc2, 0xe0, 0xae, 0xf2, 0x77, 0xec, 0x25, 0x1b, 0xe7, 0xd9, 0x9b, 0x3d, 0xe0, 0x0c, 0xf4, + 0xcf, 0x61, 0x21, 0x66, 0xcf, 0x94, 0x27, 0x19, 0x97, 0xc7, 0x63, 0x1c, 0x9f, 0xc4, 0xec, 0xc5, + 0x2c, 0x0f, 0xe1, 0x86, 0x43, 0x8e, 0xac, 0x9e, 0x97, 0xb8, 0x3c, 0xee, 0xd4, 0x05, 0xef, 0x99, + 0xf1, 0x78, 0xaf, 0x0a, 0x2e, 0xf2, 0xa2, 0x0f, 0x2c, 0x7a, 0x2a, 0xf7, 0x78, 0x07, 0x74, 0xcf, + 0xa2, 0x91, 0xb8, 0x17, 0xe4, 0xee, 0x3a, 0xc6, 0x1c, 0x5e, 0xcb, 0x2c, 0x83, 0xe0, 0x85, 0x30, + 0x8a, 0xa6, 0xa3, 0xbf, 0x0b, 0xf3, 0x88, 0x7c, 0xe4, 0x86, 0x31, 0x89, 0xeb, 0x18, 0x3a, 0x62, + 0x6b, 0x0c, 0xf4, 0x98, 0x41, 0x90, 0xa4, 0xe9, 0xe8, 0xbf, 0x84, 0x37, 0x11, 0x3d, 0x2d, 0x3c, + 0x8d, 0xac, 0x90, 0xd9, 0x4c, 0x4c, 0x3e, 0x8f, 0xe4, 0x55, 0x86, 0x9a, 0x94, 0x70, 0x9f, 0xe3, + 0x49, 0x66, 0x0f, 0x01, 0x90, 0x92, 0x7b, 0xde, 0x85, 0x31, 0x3d, 0x6f, 0x11, 0x69, 0xd0, 0xf1, + 0xb6, 0x00, 0x25, 0x6c, 0x27, 0x1d, 0xf8, 0xe2, 0x98, 0x6c, 0x2a, 0x8c, 0xf2, 0x57, 0x03, 0x27, + 0xbe, 0x0d, 0x8b, 0xe9, 0x43, 0x9d, 0x31, 0xaf, 0x12, 0xf8, 0xc6, 0x12, 0x9e, 0x65, 0xfe, 0x3c, + 0x71, 0x8e, 0x2f, 0x38, 0x48, 0x7f, 0x0c, 0x1b, 0x19, 0x45, 0xd8, 0x27, 0xc4, 0xe9, 0x79, 0x49, + 0x55, 0x2c, 0x23, 0xf9, 0x5a, 0x92, 0x7c, 0x5f, 0x62, 0x49, 0x45, 0xec, 0x42, 0xf5, 0x12, 0x85, + 0x1a, 0xc8, 0x65, 0xf5, 0x7c, 0xb4, 0x32, 0xf7, 0xb3, 0xf2, 0x4b, 0x8b, 0x5a, 0x19, 0xcf, 0xa2, + 0x52, 0x07, 0x94, 0xa6, 0x34, 0xa4, 0x14, 0x2b, 0x62, 0x0e, 0x38, 0x32, 0x56, 0xd1, 0x45, 0xa7, + 0x68, 0x76, 0x38, 0x28, 0xf5, 0x28, 0x53, 0x87, 0xc1, 0xeb, 0x79, 0x63, 0xcc, 0xeb, 0x59, 0xce, + 0x39, 0x2a, 0xde, 0x93, 0x05, 0x6b, 0xa3, 0x74, 0x8e, 0x1b, 0xac, 0x8d, 0xb9, 0xc1, 0x4a, 0xee, + 0x8d, 0xe0, 0x16, 0x21, 0xdc, 0x4e, 0x6f, 0x11, 0x84, 0xee, 0xb1, 0xeb, 0x5b, 0x5e, 0x76, 0xaf, + 0xea, 0x98, 0x7b, 0xdd, 0x4c, 0xee, 0xf5, 0x5c, 0x30, 0x4b, 0xef, 0xf9, 0x01, 0x18, 0xe9, 0x3d, + 0x43, 0xf2, 0xa2, 0x47, 0x28, 0x5e, 0xfe, 0x3a, 0xba, 0xbf, 0xc5, 0x24, 0x13, 0x93, 0x43, 0x9b, + 0x8e, 0xfe, 0x55, 0xd2, 0x63, 0xca, 0xf4, 0xd0, 0x78, 0xb4, 0x51, 0xd8, 0xac, 0x8c, 0x08, 0x97, + 0x98, 0x56, 0xb2, 0x40, 0x99, 0x72, 0x1e, 0xfd, 0x2e, 0x49, 0x78, 0x58, 0xb1, 0xa2, 0x3f, 0xcf, + 0xaa, 0x82, 0xf6, 0x8e, 0x8f, 0x99, 0x58, 0x76, 0xe0, 0x47, 0xae, 0xdf, 0x23, 0x6d, 0x8b, 0xb6, + 0x59, 0x7a, 0xb5, 0xb7, 0x51, 0xd8, 0x54, 0xcd, 0x8d, 0x94, 0x52, 0x39, 0x6a, 0x43, 0x60, 0xee, + 0xd0, 0x67, 0xe4, 0x7c, 0xf8, 0xc9, 0x88, 0x6c, 0xb5, 0x4d, 0xdd, 0xdf, 0x92, 0xf6, 0x61, 0x9f, + 0x25, 0x2e, 0x8f, 0x87, 0x9f, 0xcc, 0x13, 0x8e, 0xb5, 0xef, 0xfe, 0x96, 0xec, 0x32, 0x1c, 0xfd, + 0x0e, 0x68, 0xb6, 0xe5, 0xdb, 0xc4, 0x93, 0x8a, 0x22, 0x8e, 0x71, 0x03, 0x65, 0x98, 0xe5, 0xeb, + 0xa6, 0x5c, 0xd6, 0xdf, 0x86, 0xb9, 0x34, 0x2a, 0xd3, 0xe9, 0x06, 0xea, 0x34, 0x8d, 0xdb, 0x44, + 0x5c, 0x1a, 0xb9, 0xf6, 0x69, 0xbf, 0x9d, 0x88, 0x52, 0x37, 0x39, 0x2e, 0x07, 0x1c, 0xc4, 0xb1, + 0xea, 0x18, 0x36, 0x04, 0xae, 0x34, 0x8b, 0x76, 0x14, 0xb4, 0x07, 0x1e, 0x8d, 0x3d, 0xbe, 0xda, + 0x78, 0x8f, 0x6f, 0x8d, 0x33, 0x92, 0x26, 0x71, 0x10, 0xec, 0x4b, 0x1f, 0xc7, 0x5e, 0xa1, 0x01, + 0xd3, 0xf2, 0xdd, 0xbd, 0xc9, 0x73, 0x63, 0xf1, 0x53, 0xff, 0x15, 0x2c, 0x85, 0x24, 0x0a, 0xfb, + 0x22, 0x6e, 0x7b, 0x6d, 0xd7, 0x8f, 0x48, 0x78, 0x66, 0x79, 0xc6, 0xad, 0xf1, 0x36, 0x5e, 0x40, + 0x72, 0x1e, 0xdb, 0xbd, 0xa6, 0x20, 0x1e, 0xb0, 0xed, 0x58, 0x2f, 0xdd, 0x4e, 0xaf, 0x33, 0x60, + 0x7b, 0xfb, 0x2a, 0x6c, 0x3f, 0xe5, 0xd4, 0x31, 0xdb, 0x07, 0x59, 0xb6, 0xe2, 0x18, 0xd4, 0x78, + 0x0b, 0x8f, 0x95, 0xa2, 0x12, 0xee, 0x84, 0xea, 0x1f, 0xb1, 0xe4, 0x9a, 0x51, 0x1d, 0x5a, 0xf6, + 0x69, 0x70, 0x74, 0xd4, 0xb6, 0x03, 0x72, 0x74, 0xe4, 0xda, 0x2e, 0xf1, 0x23, 0xe3, 0x67, 0x1b, + 0x85, 0xcd, 0x82, 0xb9, 0x8c, 0x08, 0xbb, 0x1c, 0xde, 0x18, 0x80, 0xf5, 0x0e, 0xd4, 0x72, 0x12, + 0x04, 0xf2, 0xb2, 0xeb, 0x72, 0x71, 0xf9, 0x33, 0xde, 0x1c, 0xf3, 0x19, 0xaf, 0x0f, 0x65, 0x0a, + 0x7b, 0x31, 0x27, 0x7c, 0xc4, 0x8f, 0x60, 0x9d, 0x8b, 0xea, 0x07, 0x7e, 0x1b, 0xff, 0xb2, 0x0e, + 0x3d, 0xd2, 0x26, 0x61, 0x18, 0x84, 0xf8, 0x2e, 0xa9, 0x71, 0x67, 0x63, 0x62, 0xb3, 0x68, 0xbe, + 0x81, 0xc0, 0x67, 0x81, 0x6f, 0x4a, 0xa4, 0x3d, 0x86, 0xc3, 0x9e, 0x1c, 0xd5, 0x37, 0x41, 0x3b, + 0xb1, 0x28, 0xa7, 0x6f, 0x77, 0x03, 0xcf, 0xb5, 0xfb, 0xc6, 0xdb, 0x68, 0xda, 0x95, 0x13, 0x8b, + 0x22, 0xc5, 0x67, 0xb8, 0xaa, 0xbf, 0x09, 0x33, 0x76, 0x18, 0xf8, 0xb1, 0xfd, 0x19, 0xef, 0xa0, + 0xa5, 0x96, 0xd9, 0xa2, 0xb4, 0x25, 0x96, 0xa2, 0x52, 0xf7, 0x98, 0x79, 0x2f, 0x3b, 0xe8, 0xf9, + 0x91, 0x51, 0xc7, 0xd7, 0x55, 0xe2, 0x6b, 0x0d, 0xb6, 0xa4, 0xdf, 0x86, 0x8a, 0x65, 0x47, 0xee, + 0x99, 0x1b, 0xf5, 0x05, 0xd2, 0x27, 0x88, 0x34, 0x23, 0x57, 0x39, 0xda, 0x36, 0x2c, 0xda, 0x27, + 0xae, 0xe7, 0x24, 0x54, 0xc9, 0xb1, 0x9f, 0xf0, 0x10, 0x89, 0xc0, 0x58, 0x37, 0x9c, 0x66, 0x13, + 0xb4, 0x1e, 0x25, 0x21, 0x2a, 0x3a, 0x14, 0xe8, 0x4d, 0x44, 0xaf, 0xb0, 0x75, 0xa6, 0xb6, 0x90, + 0x63, 0xee, 0xc0, 0x0d, 0xf9, 0x3e, 0xc5, 0x73, 0x25, 0x2f, 0x23, 0x12, 0x0e, 0x04, 0x6f, 0xf1, + 0x18, 0x28, 0x90, 0x1a, 0x88, 0xb3, 0x27, 0x50, 0x62, 0x01, 0xc5, 0x51, 0x33, 0xa4, 0xbf, 0xe4, + 0x02, 0x72, 0x60, 0x9a, 0xe6, 0x26, 0x94, 0x45, 0xfa, 0xc0, 0x51, 0x3f, 0xe5, 0xea, 0xe1, 0x6b, + 0x1c, 0xe5, 0x73, 0x98, 0xb3, 0x7a, 0x51, 0xd0, 0x0e, 0x09, 0x25, 0x51, 0xbb, 0x1b, 0xb8, 0x7e, + 0x44, 0x8d, 0xfb, 0x68, 0x34, 0xb7, 0x07, 0x1e, 0x96, 0xb9, 0xd6, 0xb8, 0xfc, 0x3f, 0xbb, 0x57, + 0x37, 0x19, 0xf6, 0x67, 0x88, 0x6c, 0xce, 0x32, 0xfa, 0xc4, 0x82, 0xfe, 0x57, 0x30, 0x47, 0x89, + 0x15, 0xda, 0x27, 0xec, 0x0d, 0x84, 0xee, 0x61, 0x8f, 0xf9, 0xbd, 0x07, 0x58, 0xb0, 0x3d, 0x1f, + 0xa7, 0xc6, 0xc9, 0xad, 0x46, 0xea, 0xfb, 0xc8, 0x72, 0x27, 0xe6, 0xc8, 0x2b, 0x38, 0x8d, 0x66, + 0x96, 0xf5, 0x2f, 0x41, 0xe9, 0x90, 0x4e, 0x60, 0xbc, 0x8f, 0x1b, 0x36, 0x5e, 0x7f, 0xc3, 0x4f, + 0x49, 0x27, 0xe0, 0x9b, 0x20, 0x43, 0xfd, 0x6b, 0x98, 0x13, 0x69, 0x93, 0xf0, 0xeb, 0x2e, 0xa1, + 0xc6, 0x9f, 0xa0, 0xa6, 0xde, 0xcb, 0xdd, 0x45, 0x78, 0x7f, 0xb6, 0x83, 0x48, 0xaa, 0x9e, 0x48, + 0x3a, 0x53, 0x3b, 0xcb, 0xac, 0xe8, 0xf7, 0x61, 0x49, 0xe4, 0xa9, 0xb1, 0x01, 0x8a, 0xa2, 0xe6, + 0x03, 0x34, 0xfc, 0x79, 0x84, 0xc6, 0x22, 0xf2, 0xe2, 0xe6, 0x2f, 0x60, 0x76, 0x80, 0xce, 0x4a, + 0x63, 0x6a, 0x7c, 0x88, 0x12, 0x6d, 0x8f, 0x73, 0xee, 0x98, 0x19, 0x2b, 0x28, 0xa9, 0x59, 0x21, + 0xa9, 0xdf, 0xa9, 0x6c, 0x84, 0x89, 0x92, 0x75, 0x2d, 0x3f, 0xbf, 0x6a, 0x36, 0x62, 0xf6, 0xb2, + 0x4e, 0xe5, 0x01, 0x2c, 0x0f, 0x65, 0xe8, 0xd1, 0x4b, 0x3c, 0xf5, 0x47, 0xdc, 0xac, 0xd3, 0x59, + 0xfa, 0xc1, 0x4b, 0x76, 0xea, 0x07, 0xb0, 0x84, 0x6d, 0x80, 0x76, 0x14, 0x5a, 0x3e, 0x75, 0x13, + 0x8f, 0xf5, 0x63, 0x24, 0x5a, 0x40, 0xe8, 0x41, 0x0c, 0xe4, 0x96, 0xfe, 0x09, 0x54, 0xd2, 0x75, + 0x94, 0xf1, 0xa7, 0x63, 0x1e, 0x60, 0x86, 0x24, 0xab, 0x27, 0x7d, 0x0b, 0x16, 0x7c, 0x72, 0x3e, + 0x7c, 0x4f, 0x7f, 0xc6, 0x8b, 0x5a, 0x9f, 0x9c, 0x67, 0x6e, 0xe9, 0x29, 0x94, 0x45, 0x09, 0x8a, + 0x2d, 0x3a, 0xe3, 0x17, 0xb8, 0xef, 0x9d, 0xdc, 0x2b, 0x42, 0x0c, 0x6e, 0x32, 0x76, 0x14, 0x84, + 0x0d, 0xf6, 0x53, 0x16, 0xb4, 0xf8, 0x43, 0xff, 0x10, 0x8c, 0xa1, 0x82, 0x56, 0xe6, 0xf3, 0x0f, + 0x79, 0x7d, 0x9a, 0xa9, 0x6a, 0x65, 0x4a, 0x7f, 0x1f, 0x96, 0x6c, 0x2f, 0xa0, 0x42, 0x6f, 0x47, + 0xcc, 0x73, 0x89, 0x02, 0xea, 0xcf, 0x85, 0x93, 0x63, 0xd0, 0x03, 0x01, 0x14, 0x45, 0xd4, 0x07, + 0x60, 0x70, 0xa2, 0x33, 0x97, 0xba, 0x87, 0xae, 0xc7, 0xfc, 0xa8, 0x24, 0xdb, 0x41, 0xb2, 0x45, + 0x84, 0x7f, 0x11, 0x83, 0x05, 0xe1, 0x43, 0x00, 0xb1, 0x1b, 0xd3, 0xf5, 0xee, 0xb8, 0x15, 0x10, + 0x97, 0x81, 0xe9, 0x79, 0x0f, 0xd6, 0xf3, 0x77, 0x16, 0xe5, 0x37, 0x71, 0x8c, 0x06, 0x86, 0x8e, + 0xb5, 0x1c, 0x01, 0x1a, 0x12, 0x47, 0x3f, 0x84, 0xf9, 0x43, 0x8b, 0x92, 0xc4, 0x7d, 0xb9, 0xfe, + 0x51, 0x60, 0x3c, 0xbd, 0xe0, 0x9d, 0x24, 0x5d, 0xdd, 0xae, 0x45, 0x49, 0xca, 0x31, 0x98, 0x73, + 0x87, 0xd9, 0x25, 0xfd, 0x2b, 0x5e, 0x4d, 0x93, 0x50, 0xde, 0x44, 0x1b, 0xcf, 0x64, 0x3c, 0xc3, + 0x4d, 0xde, 0x4e, 0x3b, 0x52, 0xd1, 0x72, 0x15, 0x8e, 0x87, 0x84, 0xe2, 0x7a, 0xf6, 0x19, 0x05, + 0x2f, 0xac, 0xd3, 0x6b, 0x7a, 0x27, 0x76, 0xe3, 0x4c, 0x72, 0x6a, 0x3c, 0x47, 0xd7, 0xd6, 0x7a, + 0x7d, 0xd7, 0xc6, 0x4b, 0x43, 0xf6, 0xa7, 0x6c, 0x84, 0xf5, 0x06, 0x2b, 0xab, 0x0e, 0x2c, 0xe6, + 0x3a, 0xdb, 0x9c, 0x16, 0xd8, 0xfb, 0xe9, 0x16, 0xd6, 0xfa, 0xa8, 0x83, 0x7e, 0x66, 0xf5, 0xbd, + 0xc0, 0x72, 0x92, 0x3d, 0xb2, 0x5f, 0x43, 0x31, 0xf6, 0xb0, 0x3f, 0x2c, 0x67, 0x17, 0xb4, 0xec, + 0x01, 0x73, 0x36, 0x78, 0x98, 0xde, 0x20, 0xff, 0x35, 0x72, 0xb5, 0xb0, 0x7d, 0x06, 0x1c, 0xd3, + 0x8d, 0x37, 0xde, 0x6c, 0xcb, 0x34, 0xd5, 0x5a, 0x8a, 0x7a, 0x57, 0x7b, 0xb7, 0xa5, 0xa8, 0xef, + 0x6a, 0xf5, 0x96, 0xa2, 0x6e, 0x69, 0xef, 0xb5, 0x14, 0xf5, 0x3d, 0xed, 0x5e, 0x4b, 0x51, 0xef, + 0x69, 0xdb, 0x2d, 0x45, 0xdd, 0xd6, 0xee, 0xd7, 0xee, 0x43, 0x25, 0xed, 0x80, 0x59, 0xb8, 0x4e, + 0x56, 0x0c, 0x28, 0xed, 0x84, 0x59, 0x3a, 0x19, 0xd4, 0x07, 0xb5, 0x3f, 0x16, 0x60, 0x69, 0xe8, + 0x4e, 0xb1, 0x1f, 0x88, 0xa5, 0x40, 0x48, 0x98, 0x95, 0x24, 0x4a, 0x81, 0x82, 0x28, 0x05, 0x10, + 0x30, 0x28, 0x05, 0x16, 0x61, 0x4a, 0x38, 0x2d, 0xde, 0xb2, 0x9b, 0x0c, 0xd1, 0x51, 0xb5, 0x60, + 0x12, 0x5d, 0x27, 0xf6, 0xe7, 0x2a, 0xdb, 0x0f, 0xc6, 0x2b, 0xb1, 0xd2, 0x72, 0x98, 0x9c, 0x85, + 0xfe, 0x18, 0xa6, 0xd8, 0x1f, 0x3d, 0x8a, 0xdd, 0xbb, 0x54, 0xbd, 0x76, 0x39, 0x97, 0x1e, 0x35, + 0x05, 0x75, 0xed, 0x7f, 0xa6, 0x40, 0x4b, 0xb9, 0xa4, 0x1f, 0xaa, 0x35, 0x39, 0xd0, 0xc1, 0x44, + 0x52, 0x07, 0x0d, 0x28, 0x0e, 0x4a, 0x4d, 0x2e, 0xfa, 0x5b, 0x17, 0xeb, 0x21, 0x2e, 0x31, 0xd5, + 0x48, 0x96, 0x96, 0x75, 0x98, 0x8f, 0xac, 0xf0, 0x98, 0x64, 0xda, 0x9e, 0xbc, 0x3d, 0x39, 0xc7, + 0x41, 0x99, 0xb6, 0xa7, 0xc0, 0x4f, 0xca, 0x3c, 0xc5, 0x5b, 0x83, 0x1c, 0x92, 0x6e, 0x7b, 0x0a, + 0x6c, 0x71, 0x80, 0x69, 0x7e, 0x7c, 0xbe, 0xc8, 0x63, 0x4e, 0xba, 0x17, 0xa9, 0x66, 0x7b, 0x91, + 0x1f, 0xc3, 0xaa, 0x60, 0xc1, 0xb3, 0xde, 0x78, 0xdb, 0xc0, 0xf7, 0xfa, 0xd8, 0xba, 0x54, 0xcd, + 0x65, 0x8e, 0xd1, 0x60, 0x08, 0x72, 0xf7, 0xe7, 0xbe, 0xd7, 0xc7, 0x21, 0xc4, 0x70, 0x33, 0x08, + 0x78, 0x5b, 0x8d, 0x66, 0x1b, 0x40, 0x06, 0x4c, 0xcb, 0xf0, 0x54, 0xe2, 0x23, 0x0e, 0xf1, 0x53, + 0x5f, 0x86, 0x69, 0x19, 0x49, 0xca, 0x08, 0x99, 0x8a, 0x78, 0xe8, 0x68, 0xc2, 0x6c, 0xd2, 0xe7, + 0xb3, 0xf8, 0x31, 0x33, 0x6e, 0xeb, 0x6b, 0x40, 0x88, 0x41, 0xe4, 0x2e, 0xe8, 0x0e, 0x61, 0x81, + 0xa0, 0x6d, 0x1d, 0x45, 0x2c, 0x4b, 0x67, 0xa1, 0xc2, 0x98, 0xc5, 0x03, 0x6a, 0x1c, 0xb2, 0xc3, + 0x00, 0x0d, 0xb6, 0xae, 0xff, 0x6d, 0x01, 0x78, 0x30, 0x49, 0xb6, 0x5c, 0x99, 0x88, 0x0e, 0x89, + 0x2c, 0xd7, 0xa3, 0x86, 0x86, 0x62, 0x3c, 0x1b, 0xc7, 0xf5, 0x66, 0x8d, 0xb6, 0x8e, 0x5b, 0x0c, + 0x1a, 0xb1, 0x16, 0x3d, 0x7d, 0xc4, 0xb9, 0x3e, 0xb9, 0x66, 0xae, 0xd8, 0xa3, 0x80, 0xab, 0x5f, + 0xc1, 0xca, 0x48, 0x4a, 0xfd, 0x21, 0xac, 0xd9, 0x96, 0xdf, 0xa6, 0xa7, 0x6e, 0x37, 0x19, 0x26, + 0x99, 0xf7, 0x76, 0x59, 0x4d, 0x5b, 0xc0, 0x83, 0xae, 0xd8, 0x96, 0xbf, 0x7f, 0xea, 0x76, 0x07, + 0x21, 0x72, 0x47, 0x20, 0xec, 0x56, 0xa0, 0x9c, 0x3c, 0x20, 0xf7, 0x65, 0xb5, 0x7f, 0x52, 0x60, + 0x3e, 0x31, 0x0c, 0xf9, 0xc9, 0xbc, 0xbb, 0x84, 0xad, 0x4d, 0xa6, 0x6d, 0xed, 0x16, 0x54, 0x32, + 0x6d, 0x60, 0x3e, 0x01, 0x28, 0x1f, 0x25, 0x5b, 0xc0, 0x35, 0x98, 0xf1, 0xc9, 0xcb, 0x04, 0x12, + 0x6f, 0xf8, 0x97, 0xd8, 0xa2, 0xc4, 0xc9, 0xb7, 0x7e, 0x75, 0x84, 0xf5, 0xdf, 0x84, 0xf2, 0x61, + 0x68, 0xf9, 0xf6, 0x49, 0x3b, 0x0a, 0x4e, 0x09, 0x7f, 0x02, 0x65, 0xb3, 0xc4, 0xd7, 0x0e, 0xd8, + 0x92, 0xcc, 0x27, 0x99, 0x52, 0x52, 0xa8, 0x33, 0x88, 0xca, 0xf2, 0x49, 0xb3, 0xe7, 0xef, 0x26, + 0x08, 0x12, 0xef, 0x66, 0xf6, 0xb2, 0x77, 0xa3, 0xbd, 0xe6, 0xbb, 0x59, 0x03, 0x90, 0x42, 0x89, + 0x06, 0x7b, 0xd1, 0x54, 0xb9, 0x28, 0x4d, 0x27, 0x33, 0x5e, 0x8a, 0x07, 0x4b, 0xb5, 0xff, 0x9e, + 0x00, 0x3d, 0x93, 0x08, 0xfe, 0xb4, 0xcd, 0x26, 0xa1, 0xea, 0xa9, 0xcb, 0x54, 0x3d, 0xfd, 0x9a, + 0xaa, 0x4e, 0x27, 0xca, 0xea, 0xd5, 0x13, 0xe5, 0xf4, 0xac, 0xa1, 0x78, 0xf5, 0x59, 0xc3, 0x45, + 0x39, 0x3e, 0x5c, 0x90, 0xe3, 0xd7, 0xfe, 0xa8, 0xc0, 0x0c, 0xb6, 0x39, 0x7e, 0x32, 0x57, 0xbd, + 0x07, 0x65, 0xd1, 0xbf, 0xe4, 0x7c, 0x26, 0x91, 0x4f, 0x6d, 0x44, 0x72, 0x22, 0xba, 0x94, 0xc8, + 0xa3, 0x14, 0x0d, 0x7e, 0xe8, 0x24, 0x31, 0x3c, 0x90, 0xbd, 0x3b, 0xe4, 0x37, 0x85, 0xfc, 0xee, + 0x8d, 0x97, 0x39, 0x89, 0xae, 0x1e, 0xb2, 0x8f, 0xe7, 0x0d, 0x89, 0xc5, 0xa4, 0x61, 0x4e, 0xa7, + 0x0d, 0xf3, 0x0e, 0xc4, 0xbe, 0x26, 0x1e, 0x5c, 0xa8, 0xd8, 0x69, 0x9c, 0x95, 0xeb, 0x72, 0x68, + 0xb1, 0x02, 0x6a, 0xec, 0xa6, 0x8a, 0x9c, 0x0b, 0x11, 0xde, 0x29, 0x61, 0xde, 0x70, 0x99, 0x79, + 0x97, 0x5e, 0xd3, 0xbc, 0xb3, 0x1e, 0xb0, 0x3c, 0xec, 0x01, 0xef, 0x80, 0x66, 0x79, 0x21, 0xb1, + 0x1c, 0x19, 0xb9, 0x88, 0x83, 0xde, 0x4f, 0x35, 0x67, 0xc5, 0xfa, 0x8e, 0x58, 0xae, 0xfd, 0xe3, + 0x75, 0xd0, 0x64, 0xf0, 0x8a, 0x8d, 0x2e, 0x71, 0x8c, 0x42, 0xea, 0x18, 0x59, 0x6b, 0xbc, 0x7e, + 0xa9, 0x35, 0x4e, 0x5c, 0x60, 0x8d, 0xca, 0x48, 0x6b, 0x9c, 0xfc, 0xff, 0x3b, 0x9e, 0xa9, 0xf4, + 0xfd, 0xfe, 0x70, 0xfe, 0xa5, 0xf6, 0x37, 0xb3, 0x50, 0xde, 0x11, 0xcd, 0x4e, 0x54, 0x57, 0x62, + 0xd7, 0x42, 0x7a, 0xd7, 0x0f, 0xc0, 0xc8, 0xc6, 0xb6, 0x78, 0xf6, 0xcd, 0xbf, 0x72, 0x58, 0x4c, + 0x47, 0x38, 0x39, 0xfa, 0xfe, 0x04, 0x2a, 0x99, 0xf9, 0x91, 0x32, 0x6e, 0x73, 0x85, 0xa6, 0x66, + 0x45, 0x9b, 0xa0, 0x0d, 0x0d, 0x08, 0xb9, 0x4f, 0xae, 0xd0, 0xf4, 0x50, 0xb0, 0x01, 0xe5, 0xd4, + 0xf4, 0x6d, 0x5c, 0xf5, 0x94, 0x68, 0x62, 0xe2, 0xb6, 0x0e, 0xa5, 0xb8, 0x3b, 0x2c, 0xa2, 0x78, + 0xd1, 0x04, 0xb9, 0xc4, 0xf3, 0xe8, 0x44, 0x39, 0x25, 0x66, 0xfa, 0x61, 0x5c, 0x48, 0xfd, 0x06, + 0x56, 0x46, 0x0f, 0x48, 0x60, 0xbc, 0x81, 0xc2, 0x12, 0xcd, 0x1f, 0x8d, 0x64, 0x78, 0x0f, 0x62, + 0xc4, 0x15, 0x3e, 0x00, 0x48, 0xf0, 0x6e, 0xc8, 0x78, 0xc1, 0x78, 0x1f, 0x60, 0x0b, 0x8d, 0xc9, + 0x9a, 0x65, 0x3c, 0xe6, 0x07, 0x00, 0xf3, 0x3c, 0x7a, 0xa4, 0xb9, 0x3e, 0x85, 0xb9, 0x13, 0x62, + 0x85, 0xd1, 0x21, 0xb1, 0xa2, 0xab, 0x4e, 0xfd, 0xb5, 0x98, 0x52, 0x72, 0xcb, 0x1b, 0x83, 0x55, + 0xae, 0x30, 0x06, 0xe3, 0xb9, 0x51, 0xde, 0x18, 0x8c, 0x37, 0xec, 0xe5, 0x00, 0x97, 0xd5, 0xa8, + 0x1a, 0x77, 0x9d, 0x91, 0x8c, 0x65, 0xbc, 0x08, 0x4d, 0x4e, 0xa7, 0xe6, 0xd2, 0xd3, 0xa9, 0x74, + 0x7d, 0xa5, 0x67, 0xeb, 0xab, 0x3b, 0x03, 0x33, 0x76, 0x1d, 0xe2, 0x47, 0x6e, 0xd4, 0xc7, 0x0f, + 0x07, 0x70, 0xd4, 0x86, 0xeb, 0x4d, 0xb1, 0x9c, 0x3b, 0x12, 0x59, 0xc8, 0x1d, 0x89, 0x8c, 0x9e, + 0x88, 0x2d, 0xfe, 0x38, 0x13, 0xb1, 0xa5, 0x1f, 0x67, 0x22, 0xb6, 0x7c, 0xc1, 0x44, 0xec, 0x00, + 0x16, 0x39, 0x55, 0xb6, 0xdb, 0x6c, 0x8c, 0xf9, 0xbc, 0xe7, 0x91, 0x3c, 0xd3, 0x67, 0xbe, 0x70, + 0xce, 0xb6, 0x72, 0xf1, 0x9c, 0x6d, 0x8c, 0xc1, 0xd7, 0xea, 0xe5, 0x83, 0xaf, 0x67, 0xa0, 0x73, + 0x2e, 0xbc, 0xdf, 0xcd, 0x3f, 0x00, 0x15, 0x5f, 0x0c, 0x6c, 0xa4, 0xb3, 0x0f, 0x01, 0x64, 0x21, + 0xe3, 0x31, 0xff, 0xd3, 0xd4, 0x90, 0xf6, 0xa9, 0x45, 0x23, 0xb1, 0xc2, 0x0a, 0xf8, 0x04, 0x3f, + 0xd1, 0x7c, 0x8c, 0x4d, 0x6d, 0x0d, 0x4d, 0x6d, 0x39, 0xa6, 0xe2, 0x8d, 0xc6, 0xd8, 0xe4, 0xf2, + 0x4b, 0x98, 0xea, 0x88, 0x12, 0xe6, 0x0b, 0x58, 0xc2, 0x4d, 0x06, 0x4f, 0x5b, 0x56, 0xc3, 0xeb, + 0x79, 0xe2, 0x0f, 0xf5, 0xe6, 0xa8, 0xb9, 0xc0, 0xe8, 0x9f, 0x48, 0x72, 0x59, 0xbb, 0x7e, 0x0d, + 0xab, 0x19, 0xbe, 0xc9, 0x6f, 0x5d, 0x36, 0xc6, 0xfd, 0x98, 0x22, 0xc5, 0x3b, 0xf1, 0xd1, 0xcb, + 0x03, 0x58, 0xea, 0x51, 0x82, 0xcd, 0x62, 0x2b, 0x72, 0xd9, 0x95, 0xc9, 0xa0, 0x77, 0x13, 0x5f, + 0xd7, 0x42, 0x8f, 0x92, 0x46, 0x0c, 0xfc, 0x22, 0x8e, 0xbb, 0xf1, 0x60, 0x90, 0x87, 0x76, 0x3e, + 0xe5, 0xbe, 0x35, 0xea, 0x8c, 0x32, 0xb0, 0x62, 0x60, 0x2f, 0x5b, 0x89, 0x5f, 0x2d, 0x45, 0x9d, + 0xd0, 0x94, 0x96, 0xa2, 0x4e, 0x69, 0xd3, 0x2d, 0x45, 0xbd, 0xa1, 0x55, 0x6b, 0xff, 0x56, 0x80, + 0x22, 0xa6, 0xcb, 0x97, 0x84, 0xe1, 0xbc, 0x20, 0x78, 0x3d, 0x37, 0x08, 0xee, 0x40, 0x09, 0x1f, + 0x8a, 0x48, 0x11, 0x26, 0xc6, 0xfd, 0xc2, 0x93, 0x13, 0xc9, 0x10, 0x98, 0xf4, 0x84, 0x0a, 0xee, + 0x83, 0xce, 0x4d, 0x38, 0xc1, 0x15, 0x50, 0xb9, 0xc3, 0x8c, 0x3b, 0x58, 0xd3, 0xf8, 0xbb, 0xe9, + 0xd4, 0xfe, 0x5d, 0x01, 0xbd, 0x91, 0x9a, 0x8c, 0x5e, 0x9e, 0x60, 0x0c, 0xa6, 0x16, 0xf9, 0x09, + 0x46, 0x0c, 0x4f, 0x25, 0x18, 0x79, 0x2a, 0x99, 0xc8, 0x55, 0x49, 0x1d, 0xe6, 0x25, 0x66, 0x32, + 0xb1, 0x13, 0xbd, 0x37, 0x01, 0x4a, 0x74, 0xd3, 0x6e, 0x81, 0xe4, 0x20, 0xab, 0x5d, 0xde, 0x77, + 0x93, 0xd9, 0x05, 0xef, 0xa7, 0xe5, 0x76, 0x57, 0xd5, 0xfc, 0xee, 0xea, 0x1a, 0x14, 0xe3, 0x0c, + 0x53, 0xa6, 0x0c, 0xf1, 0xc2, 0x15, 0x3f, 0x03, 0xfc, 0x75, 0xfc, 0xf9, 0x22, 0x0f, 0xd3, 0x22, + 0x40, 0x94, 0x30, 0xe1, 0xdc, 0x1c, 0x51, 0xb6, 0x7c, 0x26, 0xc7, 0x45, 0x94, 0xf0, 0xd0, 0x21, + 0x3f, 0x74, 0x4c, 0x2c, 0x31, 0x39, 0xb2, 0x57, 0x11, 0x37, 0xe2, 0xb4, 0xf4, 0x25, 0xe0, 0x34, + 0x67, 0x92, 0x0f, 0xaf, 0x66, 0xae, 0x3a, 0xbc, 0xe2, 0x74, 0x43, 0xa9, 0x78, 0x65, 0x28, 0x15, + 0x8f, 0x3f, 0x63, 0x9d, 0xd6, 0xd4, 0xda, 0x3f, 0x17, 0x60, 0xce, 0x4c, 0x4e, 0xc3, 0x7f, 0x2c, + 0xc3, 0xca, 0x4d, 0x1d, 0x26, 0xf2, 0xbf, 0xa0, 0xc9, 0x57, 0x99, 0x92, 0xaf, 0xb2, 0xda, 0xbf, + 0x14, 0x00, 0xf6, 0x71, 0x2a, 0xff, 0x63, 0xc9, 0x9e, 0x4e, 0x4e, 0x27, 0xb2, 0xc9, 0x69, 0xbe, + 0xb8, 0xd3, 0xf9, 0xe2, 0x66, 0x3e, 0x22, 0xe6, 0x4e, 0x4b, 0xd5, 0x8a, 0xb5, 0xdf, 0x15, 0x40, + 0x6d, 0x9c, 0x10, 0xfb, 0x94, 0xf6, 0x3a, 0xd9, 0x43, 0x4c, 0x0e, 0x0e, 0xf1, 0x08, 0xa6, 0x8e, + 0x3c, 0xeb, 0x2c, 0x08, 0x51, 0xe4, 0xca, 0xf6, 0xdd, 0x8b, 0x8b, 0x21, 0xc9, 0xf1, 0x31, 0xd2, + 0x98, 0x82, 0x76, 0xf0, 0x4d, 0xf5, 0x04, 0x56, 0x89, 0xfc, 0xc7, 0xee, 0x5f, 0x7e, 0xf3, 0x6d, + 0xf5, 0xda, 0x1f, 0xbe, 0xad, 0x5e, 0xfb, 0xfe, 0xdb, 0x6a, 0xe1, 0x77, 0xaf, 0xaa, 0x85, 0xbf, + 0x7f, 0x55, 0x2d, 0xfc, 0xeb, 0xab, 0x6a, 0xe1, 0x9b, 0x57, 0xd5, 0xc2, 0x7f, 0xbe, 0xaa, 0x16, + 0xfe, 0xeb, 0x55, 0xf5, 0xda, 0xf7, 0xaf, 0xaa, 0x85, 0xdf, 0x7f, 0x57, 0xbd, 0xf6, 0xcd, 0x77, + 0xd5, 0x6b, 0x7f, 0xf8, 0xae, 0x7a, 0xed, 0x37, 0x0f, 0x8e, 0x83, 0x81, 0x0c, 0x6e, 0x30, 0xfa, + 0x1f, 0x03, 0x3e, 0x4e, 0xfc, 0x3c, 0x9c, 0x42, 0xa7, 0x79, 0xff, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0xfe, 0xf1, 0x31, 0x13, 0xbb, 0x32, 0x00, 0x00, } func (this *ShardInfo) Equal(that interface{}) bool { @@ -2603,14 +2599,6 @@ return false } } - if len(this.QueueAckLevels) != len(that1.QueueAckLevels) { - return false - } - for i := range this.QueueAckLevels { - if !this.QueueAckLevels[i].Equal(that1.QueueAckLevels[i]) { - return false - } - } if len(this.QueueStates) != len(that1.QueueStates) { return false } @@ -3531,6 +3519,9 @@ if this.UseCompatibleVersion != that1.UseCompatibleVersion { return false } + if !this.ActivityType.Equal(that1.ActivityType) { + return false + } return true } func (this *TimerInfo) Equal(that interface{}) bool { @@ -3730,7 +3721,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 11) s = append(s, "&persistence.ShardInfo{") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") s = append(s, "RangeId: "+fmt.Sprintf("%#v", this.RangeId)+",\n") @@ -3750,19 +3741,6 @@ if this.ReplicationDlqAckLevel != nil { s = append(s, "ReplicationDlqAckLevel: "+mapStringForReplicationDlqAckLevel+",\n") } - keysForQueueAckLevels := make([]int32, 0, len(this.QueueAckLevels)) - for k, _ := range this.QueueAckLevels { - keysForQueueAckLevels = append(keysForQueueAckLevels, k) - } - github_com_gogo_protobuf_sortkeys.Int32s(keysForQueueAckLevels) - mapStringForQueueAckLevels := "map[int32]*QueueAckLevel{" - for _, k := range keysForQueueAckLevels { - mapStringForQueueAckLevels += fmt.Sprintf("%#v: %#v,", k, this.QueueAckLevels[k]) - } - mapStringForQueueAckLevels += "}" - if this.QueueAckLevels != nil { - s = append(s, "QueueAckLevels: "+mapStringForQueueAckLevels+",\n") - } keysForQueueStates := make([]int32, 0, len(this.QueueStates)) for k, _ := range this.QueueStates { keysForQueueStates = append(keysForQueueStates, k) @@ -4055,7 +4033,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 34) + s := make([]string, 0, 35) s = append(s, "&persistence.ActivityInfo{") s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") s = append(s, "ScheduledEventBatchId: "+fmt.Sprintf("%#v", this.ScheduledEventBatchId)+",\n") @@ -4091,6 +4069,9 @@ } s = append(s, "LastHeartbeatUpdateTime: "+fmt.Sprintf("%#v", this.LastHeartbeatUpdateTime)+",\n") s = append(s, "UseCompatibleVersion: "+fmt.Sprintf("%#v", this.UseCompatibleVersion)+",\n") + if this.ActivityType != nil { + s = append(s, "ActivityType: "+fmt.Sprintf("%#v", this.ActivityType)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -4223,32 +4204,6 @@ dAtA[i] = 0x8a } } - if len(m.QueueAckLevels) > 0 { - for k := range m.QueueAckLevels { - v := m.QueueAckLevels[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintExecutions(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i = encodeVarintExecutions(dAtA, i, uint64(k)) - i-- - dAtA[i] = 0x8 - i = encodeVarintExecutions(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - } if len(m.ReplicationDlqAckLevel) > 0 { for k := range m.ReplicationDlqAckLevel { v := m.ReplicationDlqAckLevel[k] @@ -4267,12 +4222,12 @@ } } if m.UpdateTime != nil { - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.UpdateTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.UpdateTime):]) - if err3 != nil { - return 0, err3 + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.UpdateTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.UpdateTime):]) + if err2 != nil { + return 0, err2 } - i -= n3 - i = encodeVarintExecutions(dAtA, i, uint64(n3)) + i -= n2 + i = encodeVarintExecutions(dAtA, i, uint64(n2)) i-- dAtA[i] = 0x3a } @@ -4458,12 +4413,12 @@ dAtA[i] = 0x98 } if m.CloseTime != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CloseTime):]) - if err7 != nil { - return 0, err7 + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CloseTime):]) + if err6 != nil { + return 0, err6 } - i -= n7 - i = encodeVarintExecutions(dAtA, i, uint64(n7)) + i -= n6 + i = encodeVarintExecutions(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x4 i-- @@ -4514,12 +4469,12 @@ dAtA[i] = 0xea } if m.ExecutionTime != nil { - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ExecutionTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ExecutionTime):]) - if err9 != nil { - return 0, err9 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.ExecutionTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.ExecutionTime):]) + if err8 != nil { + return 0, err8 } - i -= n9 - i = encodeVarintExecutions(dAtA, i, uint64(n9)) + i -= n8 + i = encodeVarintExecutions(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x3 i-- @@ -4540,12 +4495,12 @@ dAtA[i] = 0xd0 } if m.WorkflowRunExpirationTime != nil { - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowRunExpirationTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowRunExpirationTime):]) - if err10 != nil { - return 0, err10 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowRunExpirationTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowRunExpirationTime):]) + if err9 != nil { + return 0, err9 } - i -= n10 - i = encodeVarintExecutions(dAtA, i, uint64(n10)) + i -= n9 + i = encodeVarintExecutions(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x3 i-- @@ -4698,12 +4653,12 @@ } } if m.WorkflowExecutionExpirationTime != nil { - n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowExecutionExpirationTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowExecutionExpirationTime):]) - if err16 != nil { - return 0, err16 + n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowExecutionExpirationTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowExecutionExpirationTime):]) + if err15 != nil { + return 0, err15 } - i -= n16 - i = encodeVarintExecutions(dAtA, i, uint64(n16)) + i -= n15 + i = encodeVarintExecutions(dAtA, i, uint64(n15)) i-- dAtA[i] = 0x2 i-- @@ -4725,24 +4680,24 @@ dAtA[i] = 0xb0 } if m.RetryMaximumInterval != nil { - n17, err17 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.RetryMaximumInterval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RetryMaximumInterval):]) - if err17 != nil { - return 0, err17 + n16, err16 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.RetryMaximumInterval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RetryMaximumInterval):]) + if err16 != nil { + return 0, err16 } - i -= n17 - i = encodeVarintExecutions(dAtA, i, uint64(n17)) + i -= n16 + i = encodeVarintExecutions(dAtA, i, uint64(n16)) i-- dAtA[i] = 0x2 i-- dAtA[i] = 0xaa } if m.RetryInitialInterval != nil { - n18, err18 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.RetryInitialInterval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RetryInitialInterval):]) - if err18 != nil { - return 0, err18 + n17, err17 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.RetryInitialInterval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RetryInitialInterval):]) + if err17 != nil { + return 0, err17 } - i -= n18 - i = encodeVarintExecutions(dAtA, i, uint64(n18)) + i -= n17 + i = encodeVarintExecutions(dAtA, i, uint64(n17)) i-- dAtA[i] = 0x2 i-- @@ -4756,12 +4711,12 @@ dAtA[i] = 0x98 } if m.StickyScheduleToStartTimeout != nil { - n19, err19 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyScheduleToStartTimeout):]) - if err19 != nil { - return 0, err19 + n18, err18 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.StickyScheduleToStartTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.StickyScheduleToStartTimeout):]) + if err18 != nil { + return 0, err18 } - i -= n19 - i = encodeVarintExecutions(dAtA, i, uint64(n19)) + i -= n18 + i = encodeVarintExecutions(dAtA, i, uint64(n18)) i-- dAtA[i] = 0x2 i-- @@ -4795,12 +4750,12 @@ dAtA[i] = 0xfa } if m.WorkflowTaskOriginalScheduledTime != nil { - n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskOriginalScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskOriginalScheduledTime):]) - if err20 != nil { - return 0, err20 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskOriginalScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskOriginalScheduledTime):]) + if err19 != nil { + return 0, err19 } - i -= n20 - i = encodeVarintExecutions(dAtA, i, uint64(n20)) + i -= n19 + i = encodeVarintExecutions(dAtA, i, uint64(n19)) i-- dAtA[i] = 0x1 i-- @@ -4819,24 +4774,24 @@ dAtA[i] = 0xe8 } if m.WorkflowTaskScheduledTime != nil { - n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskScheduledTime):]) - if err21 != nil { - return 0, err21 + n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskScheduledTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskScheduledTime):]) + if err20 != nil { + return 0, err20 } - i -= n21 - i = encodeVarintExecutions(dAtA, i, uint64(n21)) + i -= n20 + i = encodeVarintExecutions(dAtA, i, uint64(n20)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0xe2 } if m.WorkflowTaskStartedTime != nil { - n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskStartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskStartedTime):]) - if err22 != nil { - return 0, err22 + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.WorkflowTaskStartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.WorkflowTaskStartedTime):]) + if err21 != nil { + return 0, err21 } - i -= n22 - i = encodeVarintExecutions(dAtA, i, uint64(n22)) + i -= n21 + i = encodeVarintExecutions(dAtA, i, uint64(n21)) i-- dAtA[i] = 0x1 i-- @@ -4850,12 +4805,12 @@ dAtA[i] = 0xd0 } if m.WorkflowTaskTimeout != nil { - n23, err23 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowTaskTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowTaskTimeout):]) - if err23 != nil { - return 0, err23 + n22, err22 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowTaskTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowTaskTimeout):]) + if err22 != nil { + return 0, err22 } - i -= n23 - i = encodeVarintExecutions(dAtA, i, uint64(n23)) + i -= n22 + i = encodeVarintExecutions(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x1 i-- @@ -4883,24 +4838,24 @@ dAtA[i] = 0xb0 } if m.LastUpdateTime != nil { - n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUpdateTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUpdateTime):]) - if err24 != nil { - return 0, err24 + n23, err23 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUpdateTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUpdateTime):]) + if err23 != nil { + return 0, err23 } - i -= n24 - i = encodeVarintExecutions(dAtA, i, uint64(n24)) + i -= n23 + i = encodeVarintExecutions(dAtA, i, uint64(n23)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0xaa } if m.StartTime != nil { - n25, err25 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) - if err25 != nil { - return 0, err25 + n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) + if err24 != nil { + return 0, err24 } - i -= n25 - i = encodeVarintExecutions(dAtA, i, uint64(n25)) + i -= n24 + i = encodeVarintExecutions(dAtA, i, uint64(n24)) i-- dAtA[i] = 0x1 i-- @@ -4928,33 +4883,33 @@ dAtA[i] = 0x88 } if m.DefaultWorkflowTaskTimeout != nil { - n26, err26 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.DefaultWorkflowTaskTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.DefaultWorkflowTaskTimeout):]) + n25, err25 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.DefaultWorkflowTaskTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.DefaultWorkflowTaskTimeout):]) + if err25 != nil { + return 0, err25 + } + i -= n25 + i = encodeVarintExecutions(dAtA, i, uint64(n25)) + i-- + dAtA[i] = 0x6a + } + if m.WorkflowRunTimeout != nil { + n26, err26 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowRunTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowRunTimeout):]) if err26 != nil { return 0, err26 } i -= n26 i = encodeVarintExecutions(dAtA, i, uint64(n26)) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x62 } - if m.WorkflowRunTimeout != nil { - n27, err27 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowRunTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowRunTimeout):]) + if m.WorkflowExecutionTimeout != nil { + n27, err27 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowExecutionTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowExecutionTimeout):]) if err27 != nil { return 0, err27 } i -= n27 i = encodeVarintExecutions(dAtA, i, uint64(n27)) i-- - dAtA[i] = 0x62 - } - if m.WorkflowExecutionTimeout != nil { - n28, err28 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.WorkflowExecutionTimeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.WorkflowExecutionTimeout):]) - if err28 != nil { - return 0, err28 - } - i -= n28 - i = encodeVarintExecutions(dAtA, i, uint64(n28)) - i-- dAtA[i] = 0x5a } if len(m.WorkflowTypeName) > 0 { @@ -5134,12 +5089,12 @@ dAtA[i] = 0x78 } if m.VisibilityTime != nil { - n29, err29 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err29 != nil { - return 0, err29 + n28, err28 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) + if err28 != nil { + return 0, err28 } - i -= n29 - i = encodeVarintExecutions(dAtA, i, uint64(n29)) + i -= n28 + i = encodeVarintExecutions(dAtA, i, uint64(n28)) i-- dAtA[i] = 0x6a } @@ -5311,12 +5266,12 @@ dAtA[i] = 0x8a } if m.VisibilityTime != nil { - n31, err31 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err31 != nil { - return 0, err31 + n30, err30 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) + if err30 != nil { + return 0, err30 } - i -= n31 - i = encodeVarintExecutions(dAtA, i, uint64(n31)) + i -= n30 + i = encodeVarintExecutions(dAtA, i, uint64(n30)) i-- dAtA[i] = 0x1 i-- @@ -5416,33 +5371,33 @@ dAtA[i] = 0x50 } if m.StartTime != nil { - n32, err32 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) + n31, err31 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) + if err31 != nil { + return 0, err31 + } + i -= n31 + i = encodeVarintExecutions(dAtA, i, uint64(n31)) + i-- + dAtA[i] = 0x4a + } + if m.CloseTime != nil { + n32, err32 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CloseTime):]) if err32 != nil { return 0, err32 } i -= n32 i = encodeVarintExecutions(dAtA, i, uint64(n32)) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x42 } - if m.CloseTime != nil { - n33, err33 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CloseTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CloseTime):]) + if m.VisibilityTime != nil { + n33, err33 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) if err33 != nil { return 0, err33 } i -= n33 i = encodeVarintExecutions(dAtA, i, uint64(n33)) i-- - dAtA[i] = 0x42 - } - if m.VisibilityTime != nil { - n34, err34 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err34 != nil { - return 0, err34 - } - i -= n34 - i = encodeVarintExecutions(dAtA, i, uint64(n34)) - i-- dAtA[i] = 0x3a } if m.TaskId != 0 { @@ -5522,12 +5477,12 @@ dAtA[i] = 0x62 } if m.VisibilityTime != nil { - n35, err35 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err35 != nil { - return 0, err35 + n34, err34 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) + if err34 != nil { + return 0, err34 } - i -= n35 - i = encodeVarintExecutions(dAtA, i, uint64(n35)) + i -= n34 + i = encodeVarintExecutions(dAtA, i, uint64(n34)) i-- dAtA[i] = 0x5a } @@ -5611,12 +5566,12 @@ var l int _ = l if m.VisibilityTime != nil { - n36, err36 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) - if err36 != nil { - return 0, err36 + n35, err35 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.VisibilityTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.VisibilityTime):]) + if err35 != nil { + return 0, err35 } - i -= n36 - i = encodeVarintExecutions(dAtA, i, uint64(n36)) + i -= n35 + i = encodeVarintExecutions(dAtA, i, uint64(n35)) i-- dAtA[i] = 0x3a } @@ -5679,6 +5634,20 @@ _ = i var l int _ = l + if m.ActivityType != nil { + { + size, err := m.ActivityType.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintExecutions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } if m.UseCompatibleVersion { i-- if m.UseCompatibleVersion { @@ -6289,19 +6258,6 @@ n += mapEntrySize + 1 + sovExecutions(uint64(mapEntrySize)) } } - if len(m.QueueAckLevels) > 0 { - for k, v := range m.QueueAckLevels { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovExecutions(uint64(l)) - } - mapEntrySize := 1 + sovExecutions(uint64(k)) + l - n += mapEntrySize + 2 + sovExecutions(uint64(mapEntrySize)) - } - } if len(m.QueueStates) > 0 { for k, v := range m.QueueStates { _ = k @@ -7026,6 +6982,10 @@ if m.UseCompatibleVersion { n += 3 } + if m.ActivityType != nil { + l = m.ActivityType.Size() + n += 2 + l + sovExecutions(uint64(l)) + } return n } @@ -7190,16 +7150,6 @@ mapStringForReplicationDlqAckLevel += fmt.Sprintf("%v: %v,", k, this.ReplicationDlqAckLevel[k]) } mapStringForReplicationDlqAckLevel += "}" - keysForQueueAckLevels := make([]int32, 0, len(this.QueueAckLevels)) - for k, _ := range this.QueueAckLevels { - keysForQueueAckLevels = append(keysForQueueAckLevels, k) - } - github_com_gogo_protobuf_sortkeys.Int32s(keysForQueueAckLevels) - mapStringForQueueAckLevels := "map[int32]*QueueAckLevel{" - for _, k := range keysForQueueAckLevels { - mapStringForQueueAckLevels += fmt.Sprintf("%v: %v,", k, this.QueueAckLevels[k]) - } - mapStringForQueueAckLevels += "}" keysForQueueStates := make([]int32, 0, len(this.QueueStates)) for k, _ := range this.QueueStates { keysForQueueStates = append(keysForQueueStates, k) @@ -7217,7 +7167,6 @@ `StolenSinceRenew:` + fmt.Sprintf("%v", this.StolenSinceRenew) + `,`, `UpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.UpdateTime), "Timestamp", "types.Timestamp", 1) + `,`, `ReplicationDlqAckLevel:` + mapStringForReplicationDlqAckLevel + `,`, - `QueueAckLevels:` + mapStringForQueueAckLevels + `,`, `QueueStates:` + mapStringForQueueStates + `,`, `}`, }, "") @@ -7512,6 +7461,7 @@ `LastHeartbeatDetails:` + strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatDetails), "Payloads", "v12.Payloads", 1) + `,`, `LastHeartbeatUpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatUpdateTime), "Timestamp", "types.Timestamp", 1) + `,`, `UseCompatibleVersion:` + fmt.Sprintf("%v", this.UseCompatibleVersion) + `,`, + `ActivityType:` + strings.Replace(fmt.Sprintf("%v", this.ActivityType), "ActivityType", "v12.ActivityType", 1) + `,`, `}`, }, "") return s @@ -7864,121 +7814,6 @@ } m.ReplicationDlqAckLevel[mapkey] = mapvalue iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueAckLevels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExecutions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthExecutions - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthExecutions - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.QueueAckLevels == nil { - m.QueueAckLevels = make(map[int32]*QueueAckLevel) - } - var mapkey int32 - var mapvalue *QueueAckLevel - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExecutions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExecutions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapkey |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExecutions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthExecutions - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthExecutions - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &QueueAckLevel{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipExecutions(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthExecutions - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.QueueAckLevels[mapkey] = mapvalue - iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field QueueStates", wireType) @@ -13350,6 +13185,42 @@ } } m.UseCompatibleVersion = bool(v != 0) + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActivityType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExecutions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthExecutions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthExecutions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActivityType == nil { + m.ActivityType = &v12.ActivityType{} + } + if err := m.ActivityType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipExecutions(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/api/persistence/v1/queues.pb.go temporal-1.22.5/src/api/persistence/v1/queues.pb.go --- temporal-1.21.5-1/src/api/persistence/v1/queues.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/persistence/v1/queues.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -50,57 +50,6 @@ // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type QueueAckLevel struct { - AckLevel int64 `protobuf:"varint,1,opt,name=ack_level,json=ackLevel,proto3" json:"ack_level,omitempty"` - ClusterAckLevel map[string]int64 `protobuf:"bytes,2,rep,name=cluster_ack_level,json=clusterAckLevel,proto3" json:"cluster_ack_level,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` -} - -func (m *QueueAckLevel) Reset() { *m = QueueAckLevel{} } -func (*QueueAckLevel) ProtoMessage() {} -func (*QueueAckLevel) Descriptor() ([]byte, []int) { - return fileDescriptor_b7fa5f143ac80378, []int{0} -} -func (m *QueueAckLevel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueueAckLevel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueueAckLevel.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueueAckLevel) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueueAckLevel.Merge(m, src) -} -func (m *QueueAckLevel) XXX_Size() int { - return m.Size() -} -func (m *QueueAckLevel) XXX_DiscardUnknown() { - xxx_messageInfo_QueueAckLevel.DiscardUnknown(m) -} - -var xxx_messageInfo_QueueAckLevel proto.InternalMessageInfo - -func (m *QueueAckLevel) GetAckLevel() int64 { - if m != nil { - return m.AckLevel - } - return 0 -} - -func (m *QueueAckLevel) GetClusterAckLevel() map[string]int64 { - if m != nil { - return m.ClusterAckLevel - } - return nil -} - type QueueState struct { ReaderStates map[int64]*QueueReaderState `protobuf:"bytes,1,rep,name=reader_states,json=readerStates,proto3" json:"reader_states,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` ExclusiveReaderHighWatermark *TaskKey `protobuf:"bytes,2,opt,name=exclusive_reader_high_watermark,json=exclusiveReaderHighWatermark,proto3" json:"exclusive_reader_high_watermark,omitempty"` @@ -109,7 +58,7 @@ func (m *QueueState) Reset() { *m = QueueState{} } func (*QueueState) ProtoMessage() {} func (*QueueState) Descriptor() ([]byte, []int) { - return fileDescriptor_b7fa5f143ac80378, []int{1} + return fileDescriptor_b7fa5f143ac80378, []int{0} } func (m *QueueState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +108,7 @@ func (m *QueueReaderState) Reset() { *m = QueueReaderState{} } func (*QueueReaderState) ProtoMessage() {} func (*QueueReaderState) Descriptor() ([]byte, []int) { - return fileDescriptor_b7fa5f143ac80378, []int{2} + return fileDescriptor_b7fa5f143ac80378, []int{1} } func (m *QueueReaderState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -203,7 +152,7 @@ func (m *QueueSliceScope) Reset() { *m = QueueSliceScope{} } func (*QueueSliceScope) ProtoMessage() {} func (*QueueSliceScope) Descriptor() ([]byte, []int) { - return fileDescriptor_b7fa5f143ac80378, []int{3} + return fileDescriptor_b7fa5f143ac80378, []int{2} } func (m *QueueSliceScope) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,7 +203,7 @@ func (m *QueueSliceRange) Reset() { *m = QueueSliceRange{} } func (*QueueSliceRange) ProtoMessage() {} func (*QueueSliceRange) Descriptor() ([]byte, []int) { - return fileDescriptor_b7fa5f143ac80378, []int{4} + return fileDescriptor_b7fa5f143ac80378, []int{3} } func (m *QueueSliceRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,8 +247,6 @@ } func init() { - proto.RegisterType((*QueueAckLevel)(nil), "temporal.server.api.persistence.v1.QueueAckLevel") - proto.RegisterMapType((map[string]int64)(nil), "temporal.server.api.persistence.v1.QueueAckLevel.ClusterAckLevelEntry") proto.RegisterType((*QueueState)(nil), "temporal.server.api.persistence.v1.QueueState") proto.RegisterMapType((map[int64]*QueueReaderState)(nil), "temporal.server.api.persistence.v1.QueueState.ReaderStatesEntry") proto.RegisterType((*QueueReaderState)(nil), "temporal.server.api.persistence.v1.QueueReaderState") @@ -312,75 +259,39 @@ } var fileDescriptor_b7fa5f143ac80378 = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xc1, 0x6e, 0xd3, 0x30, - 0x18, 0xc7, 0xe3, 0x56, 0x9b, 0xa8, 0xdb, 0x6a, 0x5b, 0xb4, 0x43, 0x55, 0x90, 0x99, 0x7a, 0x9a, - 0x84, 0x48, 0xb4, 0x76, 0x07, 0x04, 0x17, 0x18, 0x02, 0x01, 0x65, 0xd2, 0xc8, 0x90, 0x90, 0xb8, - 0x44, 0x26, 0xfb, 0xd4, 0x9a, 0xa4, 0x49, 0xb0, 0xdd, 0xd0, 0xde, 0x78, 0x04, 0x1e, 0x03, 0x1e, - 0x80, 0x77, 0xe0, 0xd8, 0xe3, 0x4e, 0x88, 0xa6, 0x1c, 0x38, 0xee, 0x11, 0x50, 0x9c, 0x26, 0x4d, - 0x07, 0x88, 0x8c, 0x5b, 0x3e, 0xdb, 0xff, 0xdf, 0xf7, 0xff, 0xfe, 0x75, 0x8d, 0x4d, 0x09, 0xa3, - 0x30, 0xe0, 0xd4, 0x33, 0x05, 0xf0, 0x08, 0xb8, 0x49, 0x43, 0x66, 0x86, 0xc0, 0x05, 0x13, 0x12, - 0x7c, 0x07, 0xcc, 0xe8, 0xc0, 0x7c, 0x37, 0x86, 0x31, 0x08, 0x23, 0xe4, 0x81, 0x0c, 0xf4, 0x4e, - 0x26, 0x30, 0x52, 0x81, 0x41, 0x43, 0x66, 0x14, 0x04, 0x46, 0x74, 0xd0, 0xee, 0x95, 0x80, 0x86, - 0x1c, 0xce, 0x98, 0x43, 0x65, 0x06, 0x6e, 0x1b, 0x25, 0x44, 0x92, 0x0a, 0x77, 0x79, 0xbe, 0xf3, - 0x03, 0xe1, 0xe6, 0x8b, 0xc4, 0xd9, 0x03, 0xc7, 0x7d, 0x0e, 0x11, 0x78, 0xfa, 0x75, 0x5c, 0xa3, - 0x8e, 0x6b, 0x7b, 0x49, 0xd1, 0x42, 0x7b, 0x68, 0xbf, 0x6a, 0x5d, 0xa3, 0xd9, 0x26, 0xc7, 0x3b, - 0x8e, 0x37, 0x16, 0x12, 0xb8, 0xbd, 0x3a, 0x54, 0xd9, 0xab, 0xee, 0xd7, 0xbb, 0x8f, 0x8d, 0x7f, - 0xcf, 0x64, 0xac, 0xb5, 0x32, 0x1e, 0xa6, 0xa8, 0xac, 0x7e, 0xe4, 0x4b, 0x3e, 0xb5, 0xb6, 0x9c, - 0xf5, 0xd5, 0xf6, 0x11, 0xde, 0xfd, 0xd3, 0x41, 0x7d, 0x1b, 0x57, 0x5d, 0x98, 0x2a, 0x8b, 0x35, - 0x2b, 0xf9, 0xd4, 0x77, 0xf1, 0x46, 0x44, 0xbd, 0x31, 0xb4, 0x2a, 0xca, 0x76, 0x5a, 0xdc, 0xad, - 0xdc, 0x41, 0x9d, 0x6f, 0x15, 0x8c, 0x55, 0xef, 0x53, 0x49, 0x25, 0xe8, 0x80, 0x9b, 0x1c, 0xe8, - 0x19, 0x70, 0x5b, 0x24, 0xb5, 0x68, 0x21, 0x35, 0xc2, 0xfd, 0xd2, 0x23, 0x28, 0x8c, 0x61, 0x29, - 0x86, 0xfa, 0x16, 0xa9, 0xf9, 0x06, 0x2f, 0x2c, 0xe9, 0x1c, 0xdf, 0x84, 0x49, 0x32, 0x0e, 0x8b, - 0xc0, 0x5e, 0x36, 0x1c, 0xb2, 0xc1, 0xd0, 0x7e, 0x4f, 0x25, 0xf0, 0x11, 0xe5, 0xae, 0x72, 0x5a, - 0xef, 0xde, 0x2a, 0xd3, 0xf8, 0x25, 0x15, 0x6e, 0x1f, 0xa6, 0xd6, 0x8d, 0x9c, 0x99, 0xf6, 0x7f, - 0xc2, 0x06, 0xc3, 0x57, 0x19, 0xb0, 0x3d, 0xc6, 0x3b, 0xbf, 0xd9, 0x2a, 0x46, 0x55, 0x4d, 0xa3, - 0x7a, 0x56, 0x8c, 0xaa, 0xde, 0x3d, 0x2c, 0x3d, 0x79, 0x01, 0x5e, 0x0c, 0xd8, 0xc6, 0xdb, 0x97, - 0xb7, 0xf5, 0x3e, 0xde, 0x14, 0x4e, 0x10, 0xe6, 0xf1, 0xf6, 0xca, 0xc7, 0xeb, 0x31, 0x07, 0x4e, - 0x13, 0xad, 0xb5, 0x44, 0x74, 0x3e, 0x23, 0xbc, 0x75, 0x69, 0x4f, 0x7f, 0x8a, 0x37, 0x38, 0xf5, - 0x07, 0xa0, 0x06, 0xbb, 0x32, 0xdf, 0x4a, 0xa4, 0x56, 0x4a, 0xd0, 0xfb, 0xb8, 0x96, 0xff, 0x97, - 0x96, 0x99, 0xdc, 0x2e, 0x83, 0x3b, 0xc9, 0x44, 0xd6, 0x4a, 0xdf, 0xf9, 0xb2, 0xe6, 0x55, 0xf5, - 0xd1, 0x4f, 0x70, 0x93, 0xf9, 0xd9, 0x5d, 0x18, 0x31, 0x7f, 0xe9, 0xf9, 0x4a, 0xbf, 0x7c, 0x23, - 0x27, 0x1c, 0x33, 0x3f, 0x21, 0xae, 0x6e, 0xd7, 0x88, 0x4e, 0xfe, 0xe7, 0x2e, 0x35, 0x72, 0xc2, - 0x31, 0x9d, 0x1c, 0xbd, 0x9d, 0xcd, 0x89, 0x76, 0x3e, 0x27, 0xda, 0xc5, 0x9c, 0xa0, 0x0f, 0x31, - 0x41, 0x9f, 0x62, 0x82, 0xbe, 0xc6, 0x04, 0xcd, 0x62, 0x82, 0xbe, 0xc7, 0x04, 0xfd, 0x8c, 0x89, - 0x76, 0x11, 0x13, 0xf4, 0x71, 0x41, 0xb4, 0xd9, 0x82, 0x68, 0xe7, 0x0b, 0xa2, 0xbd, 0x3e, 0x1c, - 0x04, 0xab, 0x96, 0x2c, 0xf8, 0xfb, 0xc3, 0x73, 0xaf, 0x50, 0xbe, 0xd9, 0x54, 0xef, 0x4f, 0xef, - 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x23, 0x0d, 0xe4, 0x3b, 0x05, 0x00, 0x00, + // 465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x4f, 0x6b, 0x13, 0x41, + 0x18, 0xc6, 0x77, 0x12, 0x5a, 0x70, 0x9a, 0x62, 0x9d, 0x53, 0x08, 0x32, 0x86, 0x3d, 0x05, 0xc4, + 0x59, 0xda, 0xf4, 0x20, 0x7a, 0x11, 0x41, 0x50, 0x43, 0xa1, 0x4e, 0x05, 0xc1, 0x4b, 0x18, 0xb7, + 0x2f, 0xc9, 0x98, 0x64, 0x77, 0x9d, 0x99, 0x5d, 0x93, 0x9b, 0x1f, 0xc1, 0x8f, 0xa1, 0x1f, 0xc0, + 0xef, 0xe0, 0x31, 0xc7, 0x9e, 0xc4, 0x6c, 0x2e, 0x1e, 0xfb, 0x11, 0x64, 0xff, 0xaf, 0x15, 0x71, + 0xdb, 0xdb, 0xcc, 0xce, 0x3e, 0xbf, 0xe7, 0x99, 0x77, 0xde, 0x17, 0x3b, 0x06, 0x16, 0x81, 0xaf, + 0xc4, 0xdc, 0xd1, 0xa0, 0x22, 0x50, 0x8e, 0x08, 0xa4, 0x13, 0x80, 0xd2, 0x52, 0x1b, 0xf0, 0x5c, + 0x70, 0xa2, 0x43, 0xe7, 0x43, 0x08, 0x21, 0x68, 0x16, 0x28, 0xdf, 0xf8, 0xc4, 0x2e, 0x04, 0x2c, + 0x13, 0x30, 0x11, 0x48, 0x56, 0x13, 0xb0, 0xe8, 0xb0, 0x37, 0x6c, 0x00, 0x0d, 0x14, 0x9c, 0x4b, + 0x57, 0x98, 0x02, 0xdc, 0x63, 0x0d, 0x44, 0x46, 0xe8, 0x59, 0xfe, 0xbf, 0xfd, 0xa3, 0x85, 0xf1, + 0xab, 0x24, 0xd9, 0x99, 0x11, 0x06, 0x08, 0xe0, 0x7d, 0x05, 0xe2, 0x1c, 0xd4, 0x58, 0x27, 0x7b, + 0xdd, 0x45, 0xfd, 0xf6, 0x60, 0xef, 0xe8, 0x09, 0xfb, 0x7f, 0x5e, 0x56, 0x61, 0x18, 0x4f, 0x19, + 0xe9, 0x5a, 0x3f, 0xf3, 0x8c, 0x5a, 0xf1, 0x8e, 0xaa, 0x7d, 0x22, 0x0a, 0xdf, 0x83, 0xa5, 0x3b, + 0x0f, 0xb5, 0x8c, 0x60, 0x9c, 0x1b, 0x4e, 0xe5, 0x64, 0x3a, 0xfe, 0x28, 0x0c, 0xa8, 0x85, 0x50, + 0xb3, 0x6e, 0xab, 0x8f, 0x06, 0x7b, 0x47, 0xf7, 0x9b, 0x18, 0xbf, 0x16, 0x7a, 0x36, 0x82, 0x15, + 0xbf, 0x5b, 0x32, 0x33, 0xff, 0xe7, 0x72, 0x32, 0x7d, 0x53, 0x00, 0x7b, 0x21, 0xbe, 0xf3, 0x57, + 0x2c, 0x72, 0x80, 0xdb, 0x33, 0x58, 0x75, 0x51, 0x1f, 0x0d, 0xda, 0x3c, 0x59, 0x92, 0x97, 0x78, + 0x27, 0x12, 0xf3, 0x10, 0xf2, 0x00, 0xc7, 0x8d, 0x6f, 0x5e, 0x83, 0xf3, 0x0c, 0xf1, 0xa8, 0xf5, + 0x10, 0xd9, 0x63, 0x7c, 0x70, 0xf5, 0x98, 0x8c, 0xf0, 0xae, 0x76, 0xfd, 0xa0, 0x2c, 0xef, 0xb0, + 0x79, 0x79, 0xe7, 0xd2, 0x85, 0xb3, 0x44, 0xcb, 0x73, 0x84, 0xfd, 0x15, 0xe1, 0xdb, 0x57, 0xce, + 0xc8, 0x0b, 0xbc, 0xa3, 0x84, 0x37, 0x81, 0xf4, 0x62, 0xd7, 0xe6, 0xf3, 0x44, 0xca, 0x33, 0x02, + 0x19, 0xe1, 0x5b, 0x65, 0x93, 0xe5, 0x35, 0x79, 0xd0, 0x04, 0x77, 0x5a, 0x88, 0x78, 0xa5, 0xb7, + 0xbf, 0xfd, 0x91, 0x35, 0xf5, 0x21, 0xa7, 0x78, 0x5f, 0x7a, 0x45, 0x2f, 0x2c, 0xa4, 0x97, 0x67, + 0xbe, 0xd6, 0xcb, 0x77, 0x4a, 0xc2, 0x89, 0xf4, 0x12, 0x62, 0xd5, 0x5d, 0x0b, 0xb1, 0xbc, 0x49, + 0x2f, 0x75, 0x4a, 0xc2, 0x89, 0x58, 0x3e, 0x7d, 0xbf, 0xde, 0x50, 0xeb, 0x62, 0x43, 0xad, 0xcb, + 0x0d, 0x45, 0x9f, 0x62, 0x8a, 0xbe, 0xc4, 0x14, 0x7d, 0x8f, 0x29, 0x5a, 0xc7, 0x14, 0xfd, 0x8c, + 0x29, 0xfa, 0x15, 0x53, 0xeb, 0x32, 0xa6, 0xe8, 0xf3, 0x96, 0x5a, 0xeb, 0x2d, 0xb5, 0x2e, 0xb6, + 0xd4, 0x7a, 0x7b, 0x3c, 0xf1, 0x2b, 0x4b, 0xe9, 0xff, 0x7b, 0x22, 0x1f, 0xd7, 0xb6, 0xef, 0x76, + 0xd3, 0xc1, 0x1c, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xfe, 0x92, 0xa2, 0x54, 0x04, 0x00, + 0x00, } -func (this *QueueAckLevel) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueueAckLevel) - if !ok { - that2, ok := that.(QueueAckLevel) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.AckLevel != that1.AckLevel { - return false - } - if len(this.ClusterAckLevel) != len(that1.ClusterAckLevel) { - return false - } - for i := range this.ClusterAckLevel { - if this.ClusterAckLevel[i] != that1.ClusterAckLevel[i] { - return false - } - } - return true -} func (this *QueueState) Equal(that interface{}) bool { if that == nil { return this == nil @@ -496,29 +407,6 @@ } return true } -func (this *QueueAckLevel) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&persistence.QueueAckLevel{") - s = append(s, "AckLevel: "+fmt.Sprintf("%#v", this.AckLevel)+",\n") - keysForClusterAckLevel := make([]string, 0, len(this.ClusterAckLevel)) - for k, _ := range this.ClusterAckLevel { - keysForClusterAckLevel = append(keysForClusterAckLevel, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForClusterAckLevel) - mapStringForClusterAckLevel := "map[string]int64{" - for _, k := range keysForClusterAckLevel { - mapStringForClusterAckLevel += fmt.Sprintf("%#v: %#v,", k, this.ClusterAckLevel[k]) - } - mapStringForClusterAckLevel += "}" - if this.ClusterAckLevel != nil { - s = append(s, "ClusterAckLevel: "+mapStringForClusterAckLevel+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} func (this *QueueState) GoString() string { if this == nil { return "nil" @@ -594,51 +482,6 @@ pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func (m *QueueAckLevel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueueAckLevel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueueAckLevel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ClusterAckLevel) > 0 { - for k := range m.ClusterAckLevel { - v := m.ClusterAckLevel[k] - baseI := i - i = encodeVarintQueues(dAtA, i, uint64(v)) - i-- - dAtA[i] = 0x10 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintQueues(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintQueues(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if m.AckLevel != 0 { - i = encodeVarintQueues(dAtA, i, uint64(m.AckLevel)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *QueueState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -840,26 +683,6 @@ dAtA[offset] = uint8(v) return base } -func (m *QueueAckLevel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AckLevel != 0 { - n += 1 + sovQueues(uint64(m.AckLevel)) - } - if len(m.ClusterAckLevel) > 0 { - for k, v := range m.ClusterAckLevel { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovQueues(uint64(len(k))) + 1 + sovQueues(uint64(v)) - n += mapEntrySize + 1 + sovQueues(uint64(mapEntrySize)) - } - } - return n -} - func (m *QueueState) Size() (n int) { if m == nil { return 0 @@ -941,27 +764,6 @@ func sozQueues(x uint64) (n int) { return sovQueues(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *QueueAckLevel) String() string { - if this == nil { - return "nil" - } - keysForClusterAckLevel := make([]string, 0, len(this.ClusterAckLevel)) - for k, _ := range this.ClusterAckLevel { - keysForClusterAckLevel = append(keysForClusterAckLevel, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForClusterAckLevel) - mapStringForClusterAckLevel := "map[string]int64{" - for _, k := range keysForClusterAckLevel { - mapStringForClusterAckLevel += fmt.Sprintf("%v: %v,", k, this.ClusterAckLevel[k]) - } - mapStringForClusterAckLevel += "}" - s := strings.Join([]string{`&QueueAckLevel{`, - `AckLevel:` + fmt.Sprintf("%v", this.AckLevel) + `,`, - `ClusterAckLevel:` + mapStringForClusterAckLevel + `,`, - `}`, - }, "") - return s -} func (this *QueueState) String() string { if this == nil { return "nil" @@ -1028,191 +830,6 @@ pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *QueueAckLevel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueueAckLevel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueueAckLevel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AckLevel", wireType) - } - m.AckLevel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AckLevel |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterAckLevel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueues - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueues - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClusterAckLevel == nil { - m.ClusterAckLevel = make(map[string]int64) - } - var mapkey string - var mapvalue int64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthQueues - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthQueues - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueues - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvalue |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else { - iNdEx = entryPreIndex - skippy, err := skipQueues(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueues - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ClusterAckLevel[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueues(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueues - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueues - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *QueueState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff -Nru temporal-1.21.5-1/src/api/persistence/v1/task_queues.pb.go temporal-1.22.5/src/api/persistence/v1/task_queues.pb.go --- temporal-1.21.5-1/src/api/persistence/v1/task_queues.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/persistence/v1/task_queues.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -153,8 +153,17 @@ type CompatibleVersionSet struct { // Set IDs are used internally by matching. // A set typically has one set ID and extra care is taken to enforce this. - // In split brain scenarios, there may be conflicting concurrent writes to the task queue versioning data, in which - // case a set might end up with more than one ID. + // In some situations, including: + // - Replication race between task queue user data and history events + // - Replication split-brain + later merge + // - Delayed user data propagation between partitions + // - Cross-task-queue activities/child workflows/CAN where the user has not set up parallel + // versioning data + // we have to guess the set id for a build id. If that happens, and then the build id is + // discovered to be in a different set, then the sets will be merged and both (or more) + // build ids will be preserved, so that we don't lose tasks. + // The first set id is considered the "primary", and the others are "demoted". Once a build + // id is demoted, it cannot be made the primary again. SetIds []string `protobuf:"bytes,1,rep,name=set_ids,json=setIds,proto3" json:"set_ids,omitempty"` // All the compatible versions, unordered except for the last element, which is considered the set "default". BuildIds []*BuildId `protobuf:"bytes,2,rep,name=build_ids,json=buildIds,proto3" json:"build_ids,omitempty"` diff -Nru temporal-1.21.5-1/src/api/token/v1/message.pb.go temporal-1.22.5/src/api/token/v1/message.pb.go --- temporal-1.21.5-1/src/api/token/v1/message.pb.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/api/token/v1/message.pb.go 2024-02-23 09:45:43.000000000 +0000 @@ -65,6 +65,7 @@ PersistenceToken []byte `protobuf:"bytes,6,opt,name=persistence_token,json=persistenceToken,proto3" json:"persistence_token,omitempty"` TransientWorkflowTask *v1.TransientWorkflowTaskInfo `protobuf:"bytes,7,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` BranchToken []byte `protobuf:"bytes,8,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + VersionHistoryItem *v1.VersionHistoryItem `protobuf:"bytes,10,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` } func (m *HistoryContinuation) Reset() { *m = HistoryContinuation{} } @@ -148,6 +149,13 @@ return nil } +func (m *HistoryContinuation) GetVersionHistoryItem() *v1.VersionHistoryItem { + if m != nil { + return m.VersionHistoryItem + } + return nil +} + type RawHistoryContinuation struct { NamespaceId string `protobuf:"bytes,10,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` @@ -457,57 +465,59 @@ } var fileDescriptor_020fff7d28118bec = []byte{ - // 795 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x41, 0x8f, 0xdb, 0x44, - 0x14, 0xce, 0x6c, 0xb2, 0xd9, 0xf8, 0xc5, 0x2d, 0x89, 0xab, 0xd2, 0x68, 0x05, 0xde, 0x6c, 0xe0, - 0x10, 0x4a, 0x65, 0xb3, 0x70, 0x42, 0x1c, 0x90, 0xba, 0x42, 0x6a, 0x7a, 0xab, 0x15, 0x81, 0x84, - 0x04, 0xd1, 0xac, 0x3d, 0xc9, 0x8e, 0x92, 0xcc, 0xb8, 0x33, 0x63, 0x2f, 0xb9, 0xf1, 0x13, 0xfa, - 0x33, 0xe0, 0x9f, 0x70, 0xdc, 0x63, 0x4f, 0xc0, 0x66, 0x2f, 0xdc, 0xe8, 0x4f, 0x40, 0x33, 0xf6, - 0x24, 0x66, 0x6b, 0x04, 0x07, 0x6e, 0x9e, 0xef, 0x7d, 0xef, 0xcd, 0x7b, 0xdf, 0x37, 0x79, 0x81, - 0xc7, 0x8a, 0xac, 0x53, 0x2e, 0xf0, 0x2a, 0x94, 0x44, 0xe4, 0x44, 0x84, 0x38, 0xa5, 0xa1, 0xe2, - 0x4b, 0xc2, 0xc2, 0xfc, 0x2c, 0x5c, 0x13, 0x29, 0xf1, 0x82, 0x04, 0xa9, 0xe0, 0x8a, 0x7b, 0xef, - 0x59, 0x6e, 0x50, 0x70, 0x03, 0x9c, 0xd2, 0xc0, 0x70, 0x83, 0xfc, 0xec, 0xf8, 0x64, 0xc1, 0xf9, - 0x62, 0x45, 0x42, 0xc3, 0xbd, 0xc8, 0xe6, 0xa1, 0xa2, 0x6b, 0x22, 0x15, 0x5e, 0xa7, 0x45, 0xfa, - 0xf1, 0x69, 0x42, 0x52, 0xc2, 0x12, 0xc2, 0x62, 0x4a, 0x64, 0xb8, 0xe0, 0x0b, 0x6e, 0x70, 0xf3, - 0x55, 0x52, 0x6a, 0xbb, 0x89, 0x57, 0x3c, 0x5e, 0xbe, 0xd5, 0xcd, 0xf1, 0x93, 0x3a, 0xee, 0x25, - 0x95, 0x8a, 0x8b, 0xcd, 0x5b, 0xec, 0xd1, 0x9f, 0x07, 0xf0, 0xe0, 0x59, 0x11, 0x3c, 0xe7, 0x4c, - 0x51, 0x96, 0x61, 0x45, 0x39, 0xf3, 0x1e, 0x42, 0x5b, 0x64, 0x6c, 0x46, 0x93, 0x01, 0x1a, 0xa2, - 0xb1, 0x13, 0x1d, 0x8a, 0x8c, 0x4d, 0x12, 0xef, 0x43, 0xb8, 0x3f, 0xa7, 0x42, 0xaa, 0x19, 0xc9, - 0x09, 0x53, 0x3a, 0x7c, 0x30, 0x44, 0xe3, 0x66, 0xe4, 0x1a, 0xf4, 0x2b, 0x0d, 0x4e, 0x12, 0x6f, - 0x04, 0xf7, 0x18, 0xf9, 0xa1, 0x42, 0x6a, 0x1a, 0x52, 0x57, 0x83, 0x96, 0x13, 0xc0, 0x03, 0x2a, - 0x67, 0x57, 0x5c, 0x2c, 0xe7, 0x2b, 0x7e, 0x35, 0x13, 0x19, 0x63, 0x94, 0x2d, 0x06, 0x87, 0x43, - 0x34, 0xee, 0x44, 0x7d, 0x2a, 0xbf, 0x29, 0x23, 0x51, 0x11, 0xf0, 0x3e, 0x86, 0x7e, 0x4a, 0x84, - 0xa4, 0x52, 0x11, 0x16, 0x93, 0x99, 0x91, 0x77, 0xd0, 0x1e, 0xa2, 0xb1, 0x1b, 0xf5, 0x2a, 0x81, - 0xa9, 0xc6, 0xbd, 0x97, 0xf0, 0x48, 0x09, 0xcc, 0x24, 0xd5, 0xf7, 0xef, 0xee, 0x50, 0x58, 0x2e, - 0x07, 0x47, 0x43, 0x34, 0xee, 0x7e, 0xfa, 0x79, 0x50, 0xe7, 0x59, 0xa9, 0x52, 0x90, 0x9f, 0x05, - 0x53, 0x9b, 0x6e, 0xfb, 0x98, 0x62, 0xb9, 0x9c, 0xb0, 0x39, 0x8f, 0x1e, 0xaa, 0xba, 0x90, 0x77, - 0x0a, 0xee, 0x85, 0xc0, 0x2c, 0xbe, 0x2c, 0x5b, 0xeb, 0x98, 0xd6, 0xba, 0x05, 0x66, 0xba, 0x7a, - 0xde, 0xea, 0x38, 0x3d, 0x18, 0xfd, 0xdc, 0x84, 0x77, 0x23, 0x7c, 0x55, 0x27, 0xfa, 0x29, 0xb8, - 0x0c, 0xaf, 0x89, 0x4c, 0x71, 0x4c, 0xb4, 0x6c, 0x60, 0xa4, 0xef, 0xee, 0xb0, 0x49, 0xe2, 0x9d, - 0x40, 0x77, 0x37, 0x4f, 0xa9, 0xbe, 0x13, 0x81, 0x85, 0x26, 0x49, 0xc5, 0xb8, 0xe6, 0x1d, 0xe3, - 0xa4, 0xc2, 0xa2, 0xe2, 0x49, 0xab, 0x30, 0xce, 0xa0, 0x15, 0x53, 0xaa, 0xac, 0x5c, 0xeb, 0xca, - 0x99, 0x31, 0xa5, 0x19, 0xf5, 0xf7, 0xd4, 0xaf, 0x8b, 0x80, 0x37, 0x04, 0x97, 0xb0, 0x64, 0x5f, - 0xb3, 0x6d, 0x88, 0x40, 0x58, 0x62, 0x2b, 0x3e, 0x86, 0xfe, 0x9e, 0x61, 0xeb, 0x1d, 0x19, 0xda, - 0x3b, 0x96, 0x66, 0xab, 0xd5, 0x5a, 0xdc, 0xf9, 0x07, 0x8b, 0xbf, 0x83, 0x7e, 0x59, 0x6e, 0x56, - 0xd8, 0x46, 0x89, 0x1c, 0x38, 0xc6, 0xdc, 0x4f, 0xfe, 0xcd, 0xdc, 0xf2, 0xc2, 0x67, 0x36, 0x2f, - 0xea, 0xe5, 0x77, 0x90, 0xe7, 0xad, 0x0e, 0xea, 0x1d, 0x8c, 0x7e, 0x6d, 0x42, 0xcb, 0xba, 0xfb, - 0x37, 0x67, 0xd0, 0xff, 0xe7, 0xcc, 0x13, 0xf0, 0x64, 0x7c, 0x49, 0x92, 0x6c, 0x45, 0x92, 0xbb, - 0xee, 0xf4, 0x76, 0x11, 0xab, 0xe7, 0x00, 0x8e, 0xb0, 0xd2, 0xe3, 0x29, 0xe3, 0xca, 0x61, 0x64, - 0x8f, 0xfa, 0x7e, 0x1c, 0x2b, 0x9a, 0x53, 0xb5, 0xb1, 0x56, 0x38, 0x11, 0x58, 0x68, 0x92, 0x78, - 0x1f, 0xc0, 0xbd, 0xfd, 0x4f, 0x61, 0x93, 0x12, 0x63, 0x83, 0x13, 0xb9, 0x16, 0x9c, 0x6e, 0x52, - 0xa2, 0x49, 0xbb, 0x2a, 0x86, 0xd4, 0x29, 0x48, 0x16, 0x34, 0xa4, 0x2f, 0xe1, 0xd0, 0x2c, 0x9f, - 0x52, 0xef, 0x8f, 0x6a, 0xf5, 0x36, 0x8c, 0x42, 0xed, 0x58, 0x71, 0x71, 0xae, 0x8f, 0x51, 0x91, - 0xe7, 0x8d, 0xa1, 0x67, 0x1e, 0x53, 0x75, 0x62, 0x30, 0x13, 0xdf, 0x2f, 0xf1, 0xca, 0xbc, 0xf6, - 0xd5, 0x74, 0x0d, 0xc1, 0x1e, 0xbd, 0x73, 0x70, 0x6d, 0x0d, 0xbd, 0x51, 0x07, 0xae, 0xe9, 0xe5, - 0x38, 0x28, 0xd6, 0x6d, 0x60, 0xd7, 0x6d, 0x30, 0xb5, 0xeb, 0xf6, 0x69, 0xeb, 0xd5, 0x6f, 0x27, - 0x28, 0xea, 0x96, 0x59, 0x1a, 0x1f, 0xcd, 0xc1, 0x79, 0x91, 0x11, 0xb1, 0xf9, 0xaf, 0x26, 0xbf, - 0x0f, 0xa0, 0xb7, 0xc8, 0xec, 0x65, 0x46, 0x32, 0x52, 0x7a, 0xec, 0x68, 0xe4, 0x85, 0x06, 0xbc, - 0x47, 0x70, 0x64, 0xc2, 0x3b, 0x8f, 0xdb, 0xfa, 0x38, 0x49, 0x9e, 0x7e, 0x7f, 0x7d, 0xe3, 0x37, - 0x5e, 0xdf, 0xf8, 0x8d, 0x37, 0x37, 0x3e, 0xfa, 0x71, 0xeb, 0xa3, 0x9f, 0xb6, 0x3e, 0xfa, 0x65, - 0xeb, 0xa3, 0xeb, 0xad, 0x8f, 0x7e, 0xdf, 0xfa, 0xe8, 0x8f, 0xad, 0xdf, 0x78, 0xb3, 0xf5, 0xd1, - 0xab, 0x5b, 0xbf, 0x71, 0x7d, 0xeb, 0x37, 0x5e, 0xdf, 0xfa, 0x8d, 0x6f, 0xc7, 0x0b, 0xbe, 0x97, - 0x96, 0xf2, 0xba, 0xbf, 0xa2, 0x2f, 0xcc, 0xc7, 0x45, 0xdb, 0x8c, 0xfb, 0xd9, 0x5f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x5f, 0x35, 0x79, 0x84, 0xb7, 0x06, 0x00, 0x00, + // 822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x8f, 0x1b, 0x35, + 0x14, 0xc7, 0x33, 0x4d, 0x36, 0x9b, 0xbc, 0xa4, 0x25, 0x99, 0x52, 0x1a, 0x45, 0x30, 0x9b, 0x0d, + 0x1c, 0x42, 0xa9, 0x26, 0x6c, 0x39, 0x21, 0x0e, 0x48, 0x5d, 0x21, 0x91, 0xde, 0x6a, 0x45, 0x20, + 0x21, 0x41, 0xe4, 0x9d, 0x79, 0xc9, 0x5a, 0x49, 0xec, 0xa9, 0xed, 0x99, 0x25, 0x37, 0x3e, 0x42, + 0x3f, 0x06, 0x7c, 0x13, 0x8e, 0x7b, 0x41, 0xea, 0x09, 0xd8, 0xec, 0x85, 0x63, 0x3f, 0x02, 0xb2, + 0x67, 0x9c, 0xa4, 0xbb, 0x41, 0x80, 0xd4, 0xdb, 0xf8, 0xff, 0x7e, 0xb6, 0xdf, 0x7b, 0xff, 0x17, + 0x07, 0x1e, 0x69, 0x5c, 0x26, 0x42, 0xd2, 0xc5, 0x50, 0xa1, 0xcc, 0x50, 0x0e, 0x69, 0xc2, 0x86, + 0x5a, 0xcc, 0x91, 0x0f, 0xb3, 0x93, 0xe1, 0x12, 0x95, 0xa2, 0x33, 0x0c, 0x13, 0x29, 0xb4, 0xf0, + 0xdf, 0x77, 0x6c, 0x98, 0xb3, 0x21, 0x4d, 0x58, 0x68, 0xd9, 0x30, 0x3b, 0xe9, 0x1e, 0xcd, 0x84, + 0x98, 0x2d, 0x70, 0x68, 0xd9, 0xb3, 0x74, 0x3a, 0xd4, 0x6c, 0x89, 0x4a, 0xd3, 0x65, 0x92, 0x6f, + 0xef, 0x1e, 0xc7, 0x98, 0x20, 0x8f, 0x91, 0x47, 0x0c, 0xd5, 0x70, 0x26, 0x66, 0xc2, 0xea, 0xf6, + 0xab, 0x40, 0xf6, 0x66, 0x13, 0x2d, 0x44, 0x34, 0xbf, 0x95, 0x4d, 0xf7, 0xf1, 0x3e, 0xf6, 0x9c, + 0x29, 0x2d, 0xe4, 0xea, 0x16, 0xdd, 0xff, 0xad, 0x0c, 0xf7, 0xbf, 0xce, 0x83, 0xa7, 0x82, 0x6b, + 0xc6, 0x53, 0xaa, 0x99, 0xe0, 0xfe, 0x03, 0xa8, 0xca, 0x94, 0x4f, 0x58, 0xdc, 0xf1, 0x7a, 0xde, + 0xa0, 0x4e, 0x0e, 0x64, 0xca, 0x47, 0xb1, 0xff, 0x11, 0xdc, 0x9b, 0x32, 0xa9, 0xf4, 0x04, 0x33, + 0xe4, 0xda, 0x84, 0xef, 0xf4, 0xbc, 0x41, 0x99, 0x34, 0xad, 0xfa, 0x95, 0x11, 0x47, 0xb1, 0xdf, + 0x87, 0xbb, 0x1c, 0x7f, 0xdc, 0x81, 0xca, 0x16, 0x6a, 0x18, 0xd1, 0x31, 0x21, 0xdc, 0x67, 0x6a, + 0x72, 0x21, 0xe4, 0x7c, 0xba, 0x10, 0x17, 0x13, 0x99, 0x72, 0xce, 0xf8, 0xac, 0x73, 0xd0, 0xf3, + 0x06, 0x35, 0xd2, 0x66, 0xea, 0xdb, 0x22, 0x42, 0xf2, 0x80, 0xff, 0x09, 0xb4, 0x13, 0x94, 0x8a, + 0x29, 0x8d, 0x3c, 0xc2, 0x89, 0x6d, 0x6f, 0xa7, 0xda, 0xf3, 0x06, 0x4d, 0xd2, 0xda, 0x09, 0x8c, + 0x8d, 0xee, 0xbf, 0x80, 0x87, 0x5a, 0x52, 0xae, 0x98, 0xb9, 0x7f, 0x73, 0x87, 0xa6, 0x6a, 0xde, + 0x39, 0xec, 0x79, 0x83, 0xc6, 0x93, 0xcf, 0xc3, 0x7d, 0x9e, 0x15, 0x5d, 0x0a, 0xb3, 0x93, 0x70, + 0xec, 0xb6, 0xbb, 0x3c, 0xc6, 0x54, 0xcd, 0x47, 0x7c, 0x2a, 0xc8, 0x03, 0xbd, 0x2f, 0xe4, 0x1f, + 0x43, 0xf3, 0x4c, 0x52, 0x1e, 0x9d, 0x17, 0xa9, 0xd5, 0x6c, 0x6a, 0x8d, 0x5c, 0xcb, 0xb3, 0x8a, + 0xe1, 0xdd, 0xcc, 0x64, 0x2a, 0xf8, 0xa4, 0xb8, 0x69, 0xc2, 0x34, 0x2e, 0x3b, 0x60, 0x53, 0x7a, + 0xf2, 0x6f, 0x29, 0x7d, 0x93, 0xef, 0x2d, 0xdc, 0x1a, 0x69, 0x5c, 0x12, 0x3f, 0xbb, 0xa5, 0x3d, + 0xab, 0xd4, 0xea, 0x2d, 0xe8, 0xff, 0x52, 0x86, 0xf7, 0x08, 0xbd, 0xd8, 0x67, 0xed, 0x31, 0x34, + 0x39, 0x5d, 0xa2, 0x4a, 0x68, 0x84, 0xc6, 0x1c, 0xb0, 0x06, 0x37, 0x36, 0xda, 0x28, 0xf6, 0x8f, + 0xa0, 0xb1, 0xe9, 0x5a, 0xe1, 0x71, 0x9d, 0x80, 0x93, 0x46, 0xf1, 0xce, 0x78, 0x94, 0x6f, 0x8c, + 0x87, 0xd2, 0x54, 0xee, 0x38, 0x5f, 0xc9, 0xc7, 0xc3, 0xaa, 0x3b, 0xd6, 0xef, 0x52, 0x45, 0x0d, + 0xd6, 0xfa, 0x32, 0x69, 0x6f, 0xd1, 0xa2, 0x60, 0xbf, 0x07, 0x4d, 0xe4, 0xf1, 0xf6, 0xcc, 0xaa, + 0x05, 0x01, 0x79, 0xec, 0x4e, 0x7c, 0x04, 0xed, 0x2d, 0xe1, 0xce, 0x3b, 0xb4, 0xd8, 0x3b, 0x0e, + 0x73, 0xa7, 0xed, 0x1d, 0xa4, 0xda, 0x3f, 0x0c, 0xd2, 0xf7, 0xd0, 0x7e, 0xd3, 0x32, 0x86, 0xaa, + 0x53, 0xb7, 0x7e, 0x7d, 0xfa, 0xbf, 0xfc, 0x62, 0xa8, 0x48, 0x2b, 0xbb, 0xa1, 0x3c, 0xab, 0xd4, + 0xbc, 0xd6, 0x9d, 0xfe, 0xef, 0x65, 0xa8, 0xb8, 0x19, 0x7a, 0xc3, 0x19, 0xef, 0xed, 0x39, 0xf3, + 0x18, 0x7c, 0x15, 0x9d, 0x63, 0x9c, 0x2e, 0x30, 0xbe, 0xe9, 0x4e, 0x6b, 0x13, 0x71, 0xfd, 0xec, + 0xc0, 0x21, 0xd5, 0xa6, 0x3c, 0x6d, 0x5d, 0x39, 0x20, 0x6e, 0x69, 0xee, 0xa7, 0x91, 0x66, 0x19, + 0xd3, 0x2b, 0x67, 0x45, 0x9d, 0x80, 0x93, 0x46, 0xb1, 0xff, 0x21, 0xdc, 0xdd, 0xfe, 0xe0, 0x56, + 0x09, 0x5a, 0x1b, 0xea, 0xa4, 0xe9, 0xc4, 0xf1, 0x2a, 0x41, 0x03, 0x6d, 0x4e, 0xb1, 0x50, 0x2d, + 0x87, 0x9c, 0x68, 0xa1, 0x2f, 0xe1, 0xc0, 0x3e, 0x71, 0x45, 0xbf, 0x3f, 0xde, 0xdb, 0x6f, 0x4b, + 0xe4, 0xdd, 0x8e, 0xb4, 0x90, 0xa7, 0x66, 0x49, 0xf2, 0x7d, 0xfe, 0x00, 0x5a, 0x76, 0x98, 0x76, + 0x2b, 0x06, 0x5b, 0xf1, 0xbd, 0x42, 0xdf, 0xa9, 0xd7, 0x4d, 0x4d, 0xc3, 0x02, 0x6e, 0xe9, 0x9f, + 0x42, 0xd3, 0x9d, 0x61, 0xde, 0xed, 0x4e, 0xd3, 0xe6, 0xd2, 0x0d, 0xf3, 0x47, 0x3d, 0x74, 0x8f, + 0x7a, 0x38, 0x76, 0x8f, 0xfa, 0xd3, 0xca, 0xcb, 0x3f, 0x8e, 0x3c, 0xd2, 0x28, 0x76, 0x19, 0xbd, + 0x3f, 0x85, 0xfa, 0xf3, 0x14, 0xe5, 0xea, 0xbf, 0x9a, 0xfc, 0x01, 0x80, 0x79, 0xab, 0x26, 0x2f, + 0x52, 0x4c, 0xb1, 0xf0, 0xb8, 0x6e, 0x94, 0xe7, 0x46, 0xf0, 0x1f, 0xc2, 0xa1, 0x0d, 0x6f, 0x3c, + 0xae, 0x9a, 0xe5, 0x28, 0x7e, 0xfa, 0xc3, 0xe5, 0x55, 0x50, 0x7a, 0x75, 0x15, 0x94, 0x5e, 0x5f, + 0x05, 0xde, 0x4f, 0xeb, 0xc0, 0xfb, 0x79, 0x1d, 0x78, 0xbf, 0xae, 0x03, 0xef, 0x72, 0x1d, 0x78, + 0x7f, 0xae, 0x03, 0xef, 0xaf, 0x75, 0x50, 0x7a, 0xbd, 0x0e, 0xbc, 0x97, 0xd7, 0x41, 0xe9, 0xf2, + 0x3a, 0x28, 0xbd, 0xba, 0x0e, 0x4a, 0xdf, 0x0d, 0x66, 0x62, 0xdb, 0x5a, 0x26, 0xf6, 0xfd, 0xe1, + 0x7d, 0x61, 0x3f, 0xce, 0xaa, 0xb6, 0xdc, 0xcf, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x78, + 0xd0, 0xe3, 0x1d, 0x07, 0x00, 0x00, } func (this *HistoryContinuation) Equal(that interface{}) bool { @@ -550,6 +560,9 @@ if !bytes.Equal(this.BranchToken, that1.BranchToken) { return false } + if !this.VersionHistoryItem.Equal(that1.VersionHistoryItem) { + return false + } return true } func (this *RawHistoryContinuation) Equal(that interface{}) bool { @@ -695,7 +708,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&token.HistoryContinuation{") s = append(s, "RunId: "+fmt.Sprintf("%#v", this.RunId)+",\n") s = append(s, "FirstEventId: "+fmt.Sprintf("%#v", this.FirstEventId)+",\n") @@ -706,6 +719,9 @@ s = append(s, "TransientWorkflowTask: "+fmt.Sprintf("%#v", this.TransientWorkflowTask)+",\n") } s = append(s, "BranchToken: "+fmt.Sprintf("%#v", this.BranchToken)+",\n") + if this.VersionHistoryItem != nil { + s = append(s, "VersionHistoryItem: "+fmt.Sprintf("%#v", this.VersionHistoryItem)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -792,6 +808,18 @@ _ = i var l int _ = l + if m.VersionHistoryItem != nil { + { + size, err := m.VersionHistoryItem.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if len(m.BranchToken) > 0 { i -= len(m.BranchToken) copy(dAtA[i:], m.BranchToken) @@ -952,12 +980,12 @@ var l int _ = l if m.StartedTime != nil { - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) - if err3 != nil { - return 0, err3 + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartedTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartedTime):]) + if err4 != nil { + return 0, err4 } - i -= n3 - i = encodeVarintMessage(dAtA, i, uint64(n3)) + i -= n4 + i = encodeVarintMessage(dAtA, i, uint64(n4)) i-- dAtA[i] = 0x62 } @@ -1124,6 +1152,10 @@ if l > 0 { n += 1 + l + sovMessage(uint64(l)) } + if m.VersionHistoryItem != nil { + l = m.VersionHistoryItem.Size() + n += 1 + l + sovMessage(uint64(l)) + } return n } @@ -1260,6 +1292,7 @@ `PersistenceToken:` + fmt.Sprintf("%v", this.PersistenceToken) + `,`, `TransientWorkflowTask:` + strings.Replace(fmt.Sprintf("%v", this.TransientWorkflowTask), "TransientWorkflowTaskInfo", "v1.TransientWorkflowTaskInfo", 1) + `,`, `BranchToken:` + fmt.Sprintf("%v", this.BranchToken) + `,`, + `VersionHistoryItem:` + strings.Replace(fmt.Sprintf("%v", this.VersionHistoryItem), "VersionHistoryItem", "v1.VersionHistoryItem", 1) + `,`, `}`, }, "") return s @@ -1546,6 +1579,42 @@ m.BranchToken = []byte{} } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionHistoryItem", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VersionHistoryItem == nil { + m.VersionHistoryItem = &v1.VersionHistoryItem{} + } + if err := m.VersionHistoryItem.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/client/admin/client_gen.go temporal-1.22.5/src/client/admin/client_gen.go --- temporal-1.21.5-1/src/client/admin/client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/admin/client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -123,6 +123,16 @@ return c.client.GetDLQReplicationMessages(ctx, request, opts...) } +func (c *clientImpl) GetNamespace( + ctx context.Context, + request *adminservice.GetNamespaceRequest, + opts ...grpc.CallOption, +) (*adminservice.GetNamespaceResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GetNamespace(ctx, request, opts...) +} + func (c *clientImpl) GetNamespaceReplicationMessages( ctx context.Context, request *adminservice.GetNamespaceReplicationMessagesRequest, diff -Nru temporal-1.21.5-1/src/client/admin/metric_client.go temporal-1.22.5/src/client/admin/metric_client.go --- temporal-1.21.5-1/src/client/admin/metric_client.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/admin/metric_client.go 2024-02-23 09:45:43.000000000 +0000 @@ -97,7 +97,7 @@ opts ...grpc.CallOption, ) (_ adminservice.AdminService_StreamWorkflowReplicationMessagesClient, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, metrics.AdminStreamWorkflowReplicationMessagesScope) + metricsHandler, startTime := c.startMetricsRecording(ctx, metrics.AdminClientStreamWorkflowReplicationMessagesScope) defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() diff -Nru temporal-1.21.5-1/src/client/admin/metric_client_gen.go temporal-1.22.5/src/client/admin/metric_client_gen.go --- temporal-1.21.5-1/src/client/admin/metric_client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/admin/metric_client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -161,6 +161,20 @@ return c.client.GetDLQReplicationMessages(ctx, request, opts...) } +func (c *metricClient) GetNamespace( + ctx context.Context, + request *adminservice.GetNamespaceRequest, + opts ...grpc.CallOption, +) (_ *adminservice.GetNamespaceResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, metrics.AdminClientGetNamespaceScope) + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GetNamespace(ctx, request, opts...) +} + func (c *metricClient) GetNamespaceReplicationMessages( ctx context.Context, request *adminservice.GetNamespaceReplicationMessagesRequest, diff -Nru temporal-1.21.5-1/src/client/admin/retryable_client_gen.go temporal-1.22.5/src/client/admin/retryable_client_gen.go --- temporal-1.21.5-1/src/client/admin/retryable_client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/admin/retryable_client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -170,6 +170,21 @@ return resp, err } +func (c *retryableClient) GetNamespace( + ctx context.Context, + request *adminservice.GetNamespaceRequest, + opts ...grpc.CallOption, +) (*adminservice.GetNamespaceResponse, error) { + var resp *adminservice.GetNamespaceResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GetNamespace(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetNamespaceReplicationMessages( ctx context.Context, request *adminservice.GetNamespaceReplicationMessagesRequest, diff -Nru temporal-1.21.5-1/src/client/clientBean.go temporal-1.22.5/src/client/clientBean.go --- temporal-1.21.5-1/src/client/clientBean.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/clientBean.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,289 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination clientBean_mock.go - -package client - -import ( - "fmt" - "sync" - "sync/atomic" - - "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - - "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/client/admin" - "go.temporal.io/server/client/frontend" - "go.temporal.io/server/client/history" - "go.temporal.io/server/client/matching" - "go.temporal.io/server/common/cluster" -) - -type ( - // Bean is a collection of clients - Bean interface { - GetHistoryClient() historyservice.HistoryServiceClient - GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) - GetFrontendClient() workflowservice.WorkflowServiceClient - GetRemoteAdminClient(string) (adminservice.AdminServiceClient, error) - SetRemoteAdminClient(string, adminservice.AdminServiceClient) - GetRemoteFrontendClient(string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) - } - - frontendClient struct { - connection grpc.ClientConnInterface - workflowservice.WorkflowServiceClient - } - - clientBeanImpl struct { - sync.Mutex - historyClient historyservice.HistoryServiceClient - matchingClient atomic.Value - clusterMetadata cluster.Metadata - factory Factory - - adminClientsLock sync.RWMutex - adminClients map[string]adminservice.AdminServiceClient - frontendClientsLock sync.RWMutex - frontendClients map[string]frontendClient - } -) - -// NewClientBean provides a collection of clients -func NewClientBean(factory Factory, clusterMetadata cluster.Metadata) (Bean, error) { - - historyClient, err := factory.NewHistoryClientWithTimeout(history.DefaultTimeout) - if err != nil { - return nil, err - } - - adminClients := map[string]adminservice.AdminServiceClient{} - frontendClients := map[string]frontendClient{} - - currentClusterName := clusterMetadata.GetCurrentClusterName() - // Init local cluster client with membership info - adminClient, err := factory.NewLocalAdminClientWithTimeout( - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - if err != nil { - return nil, err - } - conn, client, err := factory.NewLocalFrontendClientWithTimeout( - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - if err != nil { - return nil, err - } - adminClients[currentClusterName] = adminClient - frontendClients[currentClusterName] = frontendClient{ - connection: conn, - WorkflowServiceClient: client, - } - - for clusterName, info := range clusterMetadata.GetAllClusterInfo() { - if !info.Enabled || clusterName == currentClusterName { - continue - } - adminClient = factory.NewRemoteAdminClientWithTimeout( - info.RPCAddress, - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - conn, client = factory.NewRemoteFrontendClientWithTimeout( - info.RPCAddress, - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - adminClients[clusterName] = adminClient - frontendClients[clusterName] = frontendClient{ - connection: conn, - WorkflowServiceClient: client, - } - } - - bean := &clientBeanImpl{ - factory: factory, - historyClient: historyClient, - clusterMetadata: clusterMetadata, - adminClients: adminClients, - frontendClients: frontendClients, - } - bean.registerClientEviction() - return bean, nil -} - -func (h *clientBeanImpl) registerClientEviction() { - currentCluster := h.clusterMetadata.GetCurrentClusterName() - h.clusterMetadata.RegisterMetadataChangeCallback( - h, - func(oldClusterMetadata map[string]*cluster.ClusterInformation, newClusterMetadata map[string]*cluster.ClusterInformation) { - for clusterName := range newClusterMetadata { - if clusterName == currentCluster { - continue - } - h.adminClientsLock.Lock() - delete(h.adminClients, clusterName) - h.adminClientsLock.Unlock() - h.frontendClientsLock.Lock() - delete(h.frontendClients, clusterName) - h.frontendClientsLock.Unlock() - } - }) -} - -func (h *clientBeanImpl) GetHistoryClient() historyservice.HistoryServiceClient { - return h.historyClient -} - -func (h *clientBeanImpl) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { - if client := h.matchingClient.Load(); client != nil { - return client.(matchingservice.MatchingServiceClient), nil - } - return h.lazyInitMatchingClient(namespaceIDToName) -} - -func (h *clientBeanImpl) GetFrontendClient() workflowservice.WorkflowServiceClient { - return h.frontendClients[h.clusterMetadata.GetCurrentClusterName()] -} - -func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) (adminservice.AdminServiceClient, error) { - h.adminClientsLock.RLock() - client, ok := h.adminClients[cluster] - h.adminClientsLock.RUnlock() - if ok { - return client, nil - } - - clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[cluster] - if !clusterFound { - // We intentionally return internal error here. - // This error could only happen with internal mis-configuration. - // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. - // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. - return nil, &serviceerror.Internal{ - Message: fmt.Sprintf( - "Unknown cluster name: %v with given cluster information map: %v.", - cluster, - clusterInfo, - ), - } - } - - h.adminClientsLock.Lock() - defer h.adminClientsLock.Unlock() - client, ok = h.adminClients[cluster] - if ok { - return client, nil - } - - client = h.factory.NewRemoteAdminClientWithTimeout( - clusterInfo.RPCAddress, - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - h.adminClients[cluster] = client - return client, nil -} - -func (h *clientBeanImpl) SetRemoteAdminClient( - cluster string, - client adminservice.AdminServiceClient, -) { - h.adminClientsLock.Lock() - defer h.adminClientsLock.Unlock() - - h.adminClients[cluster] = client -} - -func (h *clientBeanImpl) GetRemoteFrontendClient(clusterName string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) { - h.frontendClientsLock.RLock() - client, ok := h.frontendClients[clusterName] - h.frontendClientsLock.RUnlock() - if ok { - return client.connection, client, nil - } - - clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[clusterName] - if !clusterFound { - // We intentionally return internal error here. - // This error could only happen with internal mis-configuration. - // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. - // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. - return nil, nil, &serviceerror.Internal{ - Message: fmt.Sprintf( - "Unknown clusterName name: %v with given clusterName information map: %v.", - clusterName, - clusterInfo, - ), - } - } - - h.frontendClientsLock.Lock() - defer h.frontendClientsLock.Unlock() - - client, ok = h.frontendClients[clusterName] - if ok { - return client.connection, client, nil - } - - conn, fClient := h.factory.NewRemoteFrontendClientWithTimeout( - clusterInfo.RPCAddress, - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - client = frontendClient{ - connection: conn, - WorkflowServiceClient: fClient, - } - h.frontendClients[clusterName] = client - return client.connection, client, nil -} - -func (h *clientBeanImpl) setRemoteAdminClientLocked( - cluster string, - client adminservice.AdminServiceClient, -) { - h.adminClients[cluster] = client -} - -func (h *clientBeanImpl) lazyInitMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { - h.Lock() - defer h.Unlock() - if cached := h.matchingClient.Load(); cached != nil { - return cached.(matchingservice.MatchingServiceClient), nil - } - client, err := h.factory.NewMatchingClientWithTimeout(namespaceIDToName, matching.DefaultTimeout, matching.DefaultLongPollTimeout) - if err != nil { - return nil, err - } - h.matchingClient.Store(client) - return client, nil -} diff -Nru temporal-1.21.5-1/src/client/clientBean_mock.go temporal-1.22.5/src/client/clientBean_mock.go --- temporal-1.21.5-1/src/client/clientBean_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/clientBean_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: clientBean.go - -// Package client is a generated GoMock package. -package client - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/workflowservice/v1" - v10 "go.temporal.io/server/api/adminservice/v1" - v11 "go.temporal.io/server/api/historyservice/v1" - v12 "go.temporal.io/server/api/matchingservice/v1" - grpc "google.golang.org/grpc" -) - -// MockBean is a mock of Bean interface. -type MockBean struct { - ctrl *gomock.Controller - recorder *MockBeanMockRecorder -} - -// MockBeanMockRecorder is the mock recorder for MockBean. -type MockBeanMockRecorder struct { - mock *MockBean -} - -// NewMockBean creates a new mock instance. -func NewMockBean(ctrl *gomock.Controller) *MockBean { - mock := &MockBean{ctrl: ctrl} - mock.recorder = &MockBeanMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBean) EXPECT() *MockBeanMockRecorder { - return m.recorder -} - -// GetFrontendClient mocks base method. -func (m *MockBean) GetFrontendClient() v1.WorkflowServiceClient { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFrontendClient") - ret0, _ := ret[0].(v1.WorkflowServiceClient) - return ret0 -} - -// GetFrontendClient indicates an expected call of GetFrontendClient. -func (mr *MockBeanMockRecorder) GetFrontendClient() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFrontendClient", reflect.TypeOf((*MockBean)(nil).GetFrontendClient)) -} - -// GetHistoryClient mocks base method. -func (m *MockBean) GetHistoryClient() v11.HistoryServiceClient { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryClient") - ret0, _ := ret[0].(v11.HistoryServiceClient) - return ret0 -} - -// GetHistoryClient indicates an expected call of GetHistoryClient. -func (mr *MockBeanMockRecorder) GetHistoryClient() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryClient", reflect.TypeOf((*MockBean)(nil).GetHistoryClient)) -} - -// GetMatchingClient mocks base method. -func (m *MockBean) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (v12.MatchingServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMatchingClient", namespaceIDToName) - ret0, _ := ret[0].(v12.MatchingServiceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMatchingClient indicates an expected call of GetMatchingClient. -func (mr *MockBeanMockRecorder) GetMatchingClient(namespaceIDToName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMatchingClient", reflect.TypeOf((*MockBean)(nil).GetMatchingClient), namespaceIDToName) -} - -// GetRemoteAdminClient mocks base method. -func (m *MockBean) GetRemoteAdminClient(arg0 string) (v10.AdminServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRemoteAdminClient", arg0) - ret0, _ := ret[0].(v10.AdminServiceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRemoteAdminClient indicates an expected call of GetRemoteAdminClient. -func (mr *MockBeanMockRecorder) GetRemoteAdminClient(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).GetRemoteAdminClient), arg0) -} - -// GetRemoteFrontendClient mocks base method. -func (m *MockBean) GetRemoteFrontendClient(arg0 string) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRemoteFrontendClient", arg0) - ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetRemoteFrontendClient indicates an expected call of GetRemoteFrontendClient. -func (mr *MockBeanMockRecorder) GetRemoteFrontendClient(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteFrontendClient", reflect.TypeOf((*MockBean)(nil).GetRemoteFrontendClient), arg0) -} - -// SetRemoteAdminClient mocks base method. -func (m *MockBean) SetRemoteAdminClient(arg0 string, arg1 v10.AdminServiceClient) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetRemoteAdminClient", arg0, arg1) -} - -// SetRemoteAdminClient indicates an expected call of SetRemoteAdminClient. -func (mr *MockBeanMockRecorder) SetRemoteAdminClient(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).SetRemoteAdminClient), arg0, arg1) -} diff -Nru temporal-1.21.5-1/src/client/clientFactory_mock.go temporal-1.22.5/src/client/clientFactory_mock.go --- temporal-1.21.5-1/src/client/clientFactory_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/clientFactory_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: clientfactory.go - -// Package client is a generated GoMock package. -package client - -import ( - reflect "reflect" - time "time" - - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/workflowservice/v1" - v10 "go.temporal.io/server/api/adminservice/v1" - v11 "go.temporal.io/server/api/historyservice/v1" - v12 "go.temporal.io/server/api/matchingservice/v1" - common "go.temporal.io/server/common" - dynamicconfig "go.temporal.io/server/common/dynamicconfig" - log "go.temporal.io/server/common/log" - membership "go.temporal.io/server/common/membership" - metrics "go.temporal.io/server/common/metrics" - grpc "google.golang.org/grpc" -) - -// MockFactory is a mock of Factory interface. -type MockFactory struct { - ctrl *gomock.Controller - recorder *MockFactoryMockRecorder -} - -// MockFactoryMockRecorder is the mock recorder for MockFactory. -type MockFactoryMockRecorder struct { - mock *MockFactory -} - -// NewMockFactory creates a new mock instance. -func NewMockFactory(ctrl *gomock.Controller) *MockFactory { - mock := &MockFactory{ctrl: ctrl} - mock.recorder = &MockFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { - return m.recorder -} - -// NewHistoryClientWithTimeout mocks base method. -func (m *MockFactory) NewHistoryClientWithTimeout(timeout time.Duration) (v11.HistoryServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewHistoryClientWithTimeout", timeout) - ret0, _ := ret[0].(v11.HistoryServiceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewHistoryClientWithTimeout indicates an expected call of NewHistoryClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewHistoryClientWithTimeout(timeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewHistoryClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewHistoryClientWithTimeout), timeout) -} - -// NewLocalAdminClientWithTimeout mocks base method. -func (m *MockFactory) NewLocalAdminClientWithTimeout(timeout, largeTimeout time.Duration) (v10.AdminServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewLocalAdminClientWithTimeout", timeout, largeTimeout) - ret0, _ := ret[0].(v10.AdminServiceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewLocalAdminClientWithTimeout indicates an expected call of NewLocalAdminClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewLocalAdminClientWithTimeout(timeout, largeTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalAdminClientWithTimeout), timeout, largeTimeout) -} - -// NewLocalFrontendClientWithTimeout mocks base method. -func (m *MockFactory) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewLocalFrontendClientWithTimeout", timeout, longPollTimeout) - ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// NewLocalFrontendClientWithTimeout indicates an expected call of NewLocalFrontendClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalFrontendClientWithTimeout), timeout, longPollTimeout) -} - -// NewMatchingClientWithTimeout mocks base method. -func (m *MockFactory) NewMatchingClientWithTimeout(namespaceIDToName NamespaceIDToNameFunc, timeout, longPollTimeout time.Duration) (v12.MatchingServiceClient, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewMatchingClientWithTimeout", namespaceIDToName, timeout, longPollTimeout) - ret0, _ := ret[0].(v12.MatchingServiceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewMatchingClientWithTimeout indicates an expected call of NewMatchingClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewMatchingClientWithTimeout(namespaceIDToName, timeout, longPollTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMatchingClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewMatchingClientWithTimeout), namespaceIDToName, timeout, longPollTimeout) -} - -// NewRemoteAdminClientWithTimeout mocks base method. -func (m *MockFactory) NewRemoteAdminClientWithTimeout(rpcAddress string, timeout, largeTimeout time.Duration) v10.AdminServiceClient { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRemoteAdminClientWithTimeout", rpcAddress, timeout, largeTimeout) - ret0, _ := ret[0].(v10.AdminServiceClient) - return ret0 -} - -// NewRemoteAdminClientWithTimeout indicates an expected call of NewRemoteAdminClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewRemoteAdminClientWithTimeout(rpcAddress, timeout, largeTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteAdminClientWithTimeout), rpcAddress, timeout, largeTimeout) -} - -// NewRemoteFrontendClientWithTimeout mocks base method. -func (m *MockFactory) NewRemoteFrontendClientWithTimeout(rpcAddress string, timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRemoteFrontendClientWithTimeout", rpcAddress, timeout, longPollTimeout) - ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) - return ret0, ret1 -} - -// NewRemoteFrontendClientWithTimeout indicates an expected call of NewRemoteFrontendClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewRemoteFrontendClientWithTimeout(rpcAddress, timeout, longPollTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteFrontendClientWithTimeout), rpcAddress, timeout, longPollTimeout) -} - -// MockFactoryProvider is a mock of FactoryProvider interface. -type MockFactoryProvider struct { - ctrl *gomock.Controller - recorder *MockFactoryProviderMockRecorder -} - -// MockFactoryProviderMockRecorder is the mock recorder for MockFactoryProvider. -type MockFactoryProviderMockRecorder struct { - mock *MockFactoryProvider -} - -// NewMockFactoryProvider creates a new mock instance. -func NewMockFactoryProvider(ctrl *gomock.Controller) *MockFactoryProvider { - mock := &MockFactoryProvider{ctrl: ctrl} - mock.recorder = &MockFactoryProviderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFactoryProvider) EXPECT() *MockFactoryProviderMockRecorder { - return m.recorder -} - -// NewFactory mocks base method. -func (m *MockFactoryProvider) NewFactory(rpcFactory common.RPCFactory, monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, numberOfHistoryShards int32, logger, throttledLogger log.Logger) Factory { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewFactory", rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) - ret0, _ := ret[0].(Factory) - return ret0 -} - -// NewFactory indicates an expected call of NewFactory. -func (mr *MockFactoryProviderMockRecorder) NewFactory(rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFactory", reflect.TypeOf((*MockFactoryProvider)(nil).NewFactory), rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) -} diff -Nru temporal-1.21.5-1/src/client/client_bean.go temporal-1.22.5/src/client/client_bean.go --- temporal-1.21.5-1/src/client/client_bean.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/client/client_bean.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,289 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_bean_mock.go + +package client + +import ( + "fmt" + "sync" + "sync/atomic" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "google.golang.org/grpc" + + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/client/admin" + "go.temporal.io/server/client/frontend" + "go.temporal.io/server/client/history" + "go.temporal.io/server/client/matching" + "go.temporal.io/server/common/cluster" +) + +type ( + // Bean is a collection of clients + Bean interface { + GetHistoryClient() historyservice.HistoryServiceClient + GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) + GetFrontendClient() workflowservice.WorkflowServiceClient + GetRemoteAdminClient(string) (adminservice.AdminServiceClient, error) + SetRemoteAdminClient(string, adminservice.AdminServiceClient) + GetRemoteFrontendClient(string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) + } + + frontendClient struct { + connection grpc.ClientConnInterface + workflowservice.WorkflowServiceClient + } + + clientBeanImpl struct { + sync.Mutex + historyClient historyservice.HistoryServiceClient + matchingClient atomic.Value + clusterMetadata cluster.Metadata + factory Factory + + adminClientsLock sync.RWMutex + adminClients map[string]adminservice.AdminServiceClient + frontendClientsLock sync.RWMutex + frontendClients map[string]frontendClient + } +) + +// NewClientBean provides a collection of clients +func NewClientBean(factory Factory, clusterMetadata cluster.Metadata) (Bean, error) { + + historyClient, err := factory.NewHistoryClientWithTimeout(history.DefaultTimeout) + if err != nil { + return nil, err + } + + adminClients := map[string]adminservice.AdminServiceClient{} + frontendClients := map[string]frontendClient{} + + currentClusterName := clusterMetadata.GetCurrentClusterName() + // Init local cluster client with membership info + adminClient, err := factory.NewLocalAdminClientWithTimeout( + admin.DefaultTimeout, + admin.DefaultLargeTimeout, + ) + if err != nil { + return nil, err + } + conn, client, err := factory.NewLocalFrontendClientWithTimeout( + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + if err != nil { + return nil, err + } + adminClients[currentClusterName] = adminClient + frontendClients[currentClusterName] = frontendClient{ + connection: conn, + WorkflowServiceClient: client, + } + + for clusterName, info := range clusterMetadata.GetAllClusterInfo() { + if !info.Enabled || clusterName == currentClusterName { + continue + } + adminClient = factory.NewRemoteAdminClientWithTimeout( + info.RPCAddress, + admin.DefaultTimeout, + admin.DefaultLargeTimeout, + ) + conn, client = factory.NewRemoteFrontendClientWithTimeout( + info.RPCAddress, + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + adminClients[clusterName] = adminClient + frontendClients[clusterName] = frontendClient{ + connection: conn, + WorkflowServiceClient: client, + } + } + + bean := &clientBeanImpl{ + factory: factory, + historyClient: historyClient, + clusterMetadata: clusterMetadata, + adminClients: adminClients, + frontendClients: frontendClients, + } + bean.registerClientEviction() + return bean, nil +} + +func (h *clientBeanImpl) registerClientEviction() { + currentCluster := h.clusterMetadata.GetCurrentClusterName() + h.clusterMetadata.RegisterMetadataChangeCallback( + h, + func(oldClusterMetadata map[string]*cluster.ClusterInformation, newClusterMetadata map[string]*cluster.ClusterInformation) { + for clusterName := range newClusterMetadata { + if clusterName == currentCluster { + continue + } + h.adminClientsLock.Lock() + delete(h.adminClients, clusterName) + h.adminClientsLock.Unlock() + h.frontendClientsLock.Lock() + delete(h.frontendClients, clusterName) + h.frontendClientsLock.Unlock() + } + }) +} + +func (h *clientBeanImpl) GetHistoryClient() historyservice.HistoryServiceClient { + return h.historyClient +} + +func (h *clientBeanImpl) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { + if client := h.matchingClient.Load(); client != nil { + return client.(matchingservice.MatchingServiceClient), nil + } + return h.lazyInitMatchingClient(namespaceIDToName) +} + +func (h *clientBeanImpl) GetFrontendClient() workflowservice.WorkflowServiceClient { + return h.frontendClients[h.clusterMetadata.GetCurrentClusterName()] +} + +func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) (adminservice.AdminServiceClient, error) { + h.adminClientsLock.RLock() + client, ok := h.adminClients[cluster] + h.adminClientsLock.RUnlock() + if ok { + return client, nil + } + + clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[cluster] + if !clusterFound { + // We intentionally return internal error here. + // This error could only happen with internal mis-configuration. + // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. + // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. + return nil, &serviceerror.Internal{ + Message: fmt.Sprintf( + "Unknown cluster name: %v with given cluster information map: %v.", + cluster, + clusterInfo, + ), + } + } + + h.adminClientsLock.Lock() + defer h.adminClientsLock.Unlock() + client, ok = h.adminClients[cluster] + if ok { + return client, nil + } + + client = h.factory.NewRemoteAdminClientWithTimeout( + clusterInfo.RPCAddress, + admin.DefaultTimeout, + admin.DefaultLargeTimeout, + ) + h.adminClients[cluster] = client + return client, nil +} + +func (h *clientBeanImpl) SetRemoteAdminClient( + cluster string, + client adminservice.AdminServiceClient, +) { + h.adminClientsLock.Lock() + defer h.adminClientsLock.Unlock() + + h.adminClients[cluster] = client +} + +func (h *clientBeanImpl) GetRemoteFrontendClient(clusterName string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) { + h.frontendClientsLock.RLock() + client, ok := h.frontendClients[clusterName] + h.frontendClientsLock.RUnlock() + if ok { + return client.connection, client, nil + } + + clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[clusterName] + if !clusterFound { + // We intentionally return internal error here. + // This error could only happen with internal mis-configuration. + // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. + // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. + return nil, nil, &serviceerror.Internal{ + Message: fmt.Sprintf( + "Unknown clusterName name: %v with given clusterName information map: %v.", + clusterName, + clusterInfo, + ), + } + } + + h.frontendClientsLock.Lock() + defer h.frontendClientsLock.Unlock() + + client, ok = h.frontendClients[clusterName] + if ok { + return client.connection, client, nil + } + + conn, fClient := h.factory.NewRemoteFrontendClientWithTimeout( + clusterInfo.RPCAddress, + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + client = frontendClient{ + connection: conn, + WorkflowServiceClient: fClient, + } + h.frontendClients[clusterName] = client + return client.connection, client, nil +} + +func (h *clientBeanImpl) setRemoteAdminClientLocked( + cluster string, + client adminservice.AdminServiceClient, +) { + h.adminClients[cluster] = client +} + +func (h *clientBeanImpl) lazyInitMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { + h.Lock() + defer h.Unlock() + if cached := h.matchingClient.Load(); cached != nil { + return cached.(matchingservice.MatchingServiceClient), nil + } + client, err := h.factory.NewMatchingClientWithTimeout(namespaceIDToName, matching.DefaultTimeout, matching.DefaultLongPollTimeout) + if err != nil { + return nil, err + } + h.matchingClient.Store(client) + return client, nil +} diff -Nru temporal-1.21.5-1/src/client/client_bean_mock.go temporal-1.22.5/src/client/client_bean_mock.go --- temporal-1.21.5-1/src/client/client_bean_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/client/client_bean_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,149 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: client_bean.go + +// Package client is a generated GoMock package. +package client + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1 "go.temporal.io/api/workflowservice/v1" + v10 "go.temporal.io/server/api/adminservice/v1" + v11 "go.temporal.io/server/api/historyservice/v1" + v12 "go.temporal.io/server/api/matchingservice/v1" + grpc "google.golang.org/grpc" +) + +// MockBean is a mock of Bean interface. +type MockBean struct { + ctrl *gomock.Controller + recorder *MockBeanMockRecorder +} + +// MockBeanMockRecorder is the mock recorder for MockBean. +type MockBeanMockRecorder struct { + mock *MockBean +} + +// NewMockBean creates a new mock instance. +func NewMockBean(ctrl *gomock.Controller) *MockBean { + mock := &MockBean{ctrl: ctrl} + mock.recorder = &MockBeanMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBean) EXPECT() *MockBeanMockRecorder { + return m.recorder +} + +// GetFrontendClient mocks base method. +func (m *MockBean) GetFrontendClient() v1.WorkflowServiceClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFrontendClient") + ret0, _ := ret[0].(v1.WorkflowServiceClient) + return ret0 +} + +// GetFrontendClient indicates an expected call of GetFrontendClient. +func (mr *MockBeanMockRecorder) GetFrontendClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFrontendClient", reflect.TypeOf((*MockBean)(nil).GetFrontendClient)) +} + +// GetHistoryClient mocks base method. +func (m *MockBean) GetHistoryClient() v11.HistoryServiceClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHistoryClient") + ret0, _ := ret[0].(v11.HistoryServiceClient) + return ret0 +} + +// GetHistoryClient indicates an expected call of GetHistoryClient. +func (mr *MockBeanMockRecorder) GetHistoryClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryClient", reflect.TypeOf((*MockBean)(nil).GetHistoryClient)) +} + +// GetMatchingClient mocks base method. +func (m *MockBean) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (v12.MatchingServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMatchingClient", namespaceIDToName) + ret0, _ := ret[0].(v12.MatchingServiceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMatchingClient indicates an expected call of GetMatchingClient. +func (mr *MockBeanMockRecorder) GetMatchingClient(namespaceIDToName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMatchingClient", reflect.TypeOf((*MockBean)(nil).GetMatchingClient), namespaceIDToName) +} + +// GetRemoteAdminClient mocks base method. +func (m *MockBean) GetRemoteAdminClient(arg0 string) (v10.AdminServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRemoteAdminClient", arg0) + ret0, _ := ret[0].(v10.AdminServiceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRemoteAdminClient indicates an expected call of GetRemoteAdminClient. +func (mr *MockBeanMockRecorder) GetRemoteAdminClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).GetRemoteAdminClient), arg0) +} + +// GetRemoteFrontendClient mocks base method. +func (m *MockBean) GetRemoteFrontendClient(arg0 string) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRemoteFrontendClient", arg0) + ret0, _ := ret[0].(grpc.ClientConnInterface) + ret1, _ := ret[1].(v1.WorkflowServiceClient) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetRemoteFrontendClient indicates an expected call of GetRemoteFrontendClient. +func (mr *MockBeanMockRecorder) GetRemoteFrontendClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteFrontendClient", reflect.TypeOf((*MockBean)(nil).GetRemoteFrontendClient), arg0) +} + +// SetRemoteAdminClient mocks base method. +func (m *MockBean) SetRemoteAdminClient(arg0 string, arg1 v10.AdminServiceClient) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetRemoteAdminClient", arg0, arg1) +} + +// SetRemoteAdminClient indicates an expected call of SetRemoteAdminClient. +func (mr *MockBeanMockRecorder) SetRemoteAdminClient(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).SetRemoteAdminClient), arg0, arg1) +} diff -Nru temporal-1.21.5-1/src/client/client_factory_mock.go temporal-1.22.5/src/client/client_factory_mock.go --- temporal-1.21.5-1/src/client/client_factory_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/client/client_factory_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,196 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: clientfactory.go + +// Package client is a generated GoMock package. +package client + +import ( + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + v1 "go.temporal.io/api/workflowservice/v1" + v10 "go.temporal.io/server/api/adminservice/v1" + v11 "go.temporal.io/server/api/historyservice/v1" + v12 "go.temporal.io/server/api/matchingservice/v1" + common "go.temporal.io/server/common" + dynamicconfig "go.temporal.io/server/common/dynamicconfig" + log "go.temporal.io/server/common/log" + membership "go.temporal.io/server/common/membership" + metrics "go.temporal.io/server/common/metrics" + grpc "google.golang.org/grpc" +) + +// MockFactory is a mock of Factory interface. +type MockFactory struct { + ctrl *gomock.Controller + recorder *MockFactoryMockRecorder +} + +// MockFactoryMockRecorder is the mock recorder for MockFactory. +type MockFactoryMockRecorder struct { + mock *MockFactory +} + +// NewMockFactory creates a new mock instance. +func NewMockFactory(ctrl *gomock.Controller) *MockFactory { + mock := &MockFactory{ctrl: ctrl} + mock.recorder = &MockFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { + return m.recorder +} + +// NewHistoryClientWithTimeout mocks base method. +func (m *MockFactory) NewHistoryClientWithTimeout(timeout time.Duration) (v11.HistoryServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewHistoryClientWithTimeout", timeout) + ret0, _ := ret[0].(v11.HistoryServiceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewHistoryClientWithTimeout indicates an expected call of NewHistoryClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewHistoryClientWithTimeout(timeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewHistoryClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewHistoryClientWithTimeout), timeout) +} + +// NewLocalAdminClientWithTimeout mocks base method. +func (m *MockFactory) NewLocalAdminClientWithTimeout(timeout, largeTimeout time.Duration) (v10.AdminServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewLocalAdminClientWithTimeout", timeout, largeTimeout) + ret0, _ := ret[0].(v10.AdminServiceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewLocalAdminClientWithTimeout indicates an expected call of NewLocalAdminClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewLocalAdminClientWithTimeout(timeout, largeTimeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalAdminClientWithTimeout), timeout, largeTimeout) +} + +// NewLocalFrontendClientWithTimeout mocks base method. +func (m *MockFactory) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewLocalFrontendClientWithTimeout", timeout, longPollTimeout) + ret0, _ := ret[0].(grpc.ClientConnInterface) + ret1, _ := ret[1].(v1.WorkflowServiceClient) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// NewLocalFrontendClientWithTimeout indicates an expected call of NewLocalFrontendClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalFrontendClientWithTimeout), timeout, longPollTimeout) +} + +// NewMatchingClientWithTimeout mocks base method. +func (m *MockFactory) NewMatchingClientWithTimeout(namespaceIDToName NamespaceIDToNameFunc, timeout, longPollTimeout time.Duration) (v12.MatchingServiceClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewMatchingClientWithTimeout", namespaceIDToName, timeout, longPollTimeout) + ret0, _ := ret[0].(v12.MatchingServiceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewMatchingClientWithTimeout indicates an expected call of NewMatchingClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewMatchingClientWithTimeout(namespaceIDToName, timeout, longPollTimeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMatchingClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewMatchingClientWithTimeout), namespaceIDToName, timeout, longPollTimeout) +} + +// NewRemoteAdminClientWithTimeout mocks base method. +func (m *MockFactory) NewRemoteAdminClientWithTimeout(rpcAddress string, timeout, largeTimeout time.Duration) v10.AdminServiceClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewRemoteAdminClientWithTimeout", rpcAddress, timeout, largeTimeout) + ret0, _ := ret[0].(v10.AdminServiceClient) + return ret0 +} + +// NewRemoteAdminClientWithTimeout indicates an expected call of NewRemoteAdminClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewRemoteAdminClientWithTimeout(rpcAddress, timeout, largeTimeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteAdminClientWithTimeout), rpcAddress, timeout, largeTimeout) +} + +// NewRemoteFrontendClientWithTimeout mocks base method. +func (m *MockFactory) NewRemoteFrontendClientWithTimeout(rpcAddress string, timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewRemoteFrontendClientWithTimeout", rpcAddress, timeout, longPollTimeout) + ret0, _ := ret[0].(grpc.ClientConnInterface) + ret1, _ := ret[1].(v1.WorkflowServiceClient) + return ret0, ret1 +} + +// NewRemoteFrontendClientWithTimeout indicates an expected call of NewRemoteFrontendClientWithTimeout. +func (mr *MockFactoryMockRecorder) NewRemoteFrontendClientWithTimeout(rpcAddress, timeout, longPollTimeout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteFrontendClientWithTimeout), rpcAddress, timeout, longPollTimeout) +} + +// MockFactoryProvider is a mock of FactoryProvider interface. +type MockFactoryProvider struct { + ctrl *gomock.Controller + recorder *MockFactoryProviderMockRecorder +} + +// MockFactoryProviderMockRecorder is the mock recorder for MockFactoryProvider. +type MockFactoryProviderMockRecorder struct { + mock *MockFactoryProvider +} + +// NewMockFactoryProvider creates a new mock instance. +func NewMockFactoryProvider(ctrl *gomock.Controller) *MockFactoryProvider { + mock := &MockFactoryProvider{ctrl: ctrl} + mock.recorder = &MockFactoryProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFactoryProvider) EXPECT() *MockFactoryProviderMockRecorder { + return m.recorder +} + +// NewFactory mocks base method. +func (m *MockFactoryProvider) NewFactory(rpcFactory common.RPCFactory, monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, numberOfHistoryShards int32, logger, throttledLogger log.Logger) Factory { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewFactory", rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) + ret0, _ := ret[0].(Factory) + return ret0 +} + +// NewFactory indicates an expected call of NewFactory. +func (mr *MockFactoryProviderMockRecorder) NewFactory(rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFactory", reflect.TypeOf((*MockFactoryProvider)(nil).NewFactory), rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) +} diff -Nru temporal-1.21.5-1/src/client/clientfactory.go temporal-1.22.5/src/client/clientfactory.go --- temporal-1.21.5-1/src/client/clientfactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/clientfactory.go 2024-02-23 09:45:43.000000000 +0000 @@ -22,7 +22,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination clientFactory_mock.go +//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_factory_mock.go package client diff -Nru temporal-1.21.5-1/src/client/history/client_gen.go temporal-1.22.5/src/client/history/client_gen.go --- temporal-1.21.5-1/src/client/history/client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/history/client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -238,6 +238,46 @@ return response, nil } +func (c *clientImpl) IsActivityTaskValid( + ctx context.Context, + request *historyservice.IsActivityTaskValidRequest, + opts ...grpc.CallOption, +) (*historyservice.IsActivityTaskValidResponse, error) { + shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + var response *historyservice.IsActivityTaskValidResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.IsActivityTaskValid(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) IsWorkflowTaskValid( + ctx context.Context, + request *historyservice.IsWorkflowTaskValidRequest, + opts ...grpc.CallOption, +) (*historyservice.IsWorkflowTaskValidResponse, error) { + shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + var response *historyservice.IsWorkflowTaskValidResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.IsWorkflowTaskValid(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) MergeDLQMessages( ctx context.Context, request *historyservice.MergeDLQMessagesRequest, @@ -428,7 +468,7 @@ request *historyservice.RecordChildExecutionCompletedRequest, opts ...grpc.CallOption, ) (*historyservice.RecordChildExecutionCompletedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetParentExecution().GetWorkflowId()) var response *historyservice.RecordChildExecutionCompletedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error diff -Nru temporal-1.21.5-1/src/client/history/metric_client_gen.go temporal-1.22.5/src/client/history/metric_client_gen.go --- temporal-1.21.5-1/src/client/history/metric_client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/history/metric_client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -217,6 +217,34 @@ return c.client.GetShard(ctx, request, opts...) } +func (c *metricClient) IsActivityTaskValid( + ctx context.Context, + request *historyservice.IsActivityTaskValidRequest, + opts ...grpc.CallOption, +) (_ *historyservice.IsActivityTaskValidResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, metrics.HistoryClientIsActivityTaskValidScope) + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.IsActivityTaskValid(ctx, request, opts...) +} + +func (c *metricClient) IsWorkflowTaskValid( + ctx context.Context, + request *historyservice.IsWorkflowTaskValidRequest, + opts ...grpc.CallOption, +) (_ *historyservice.IsWorkflowTaskValidResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, metrics.HistoryClientIsWorkflowTaskValidScope) + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.IsWorkflowTaskValid(ctx, request, opts...) +} + func (c *metricClient) MergeDLQMessages( ctx context.Context, request *historyservice.MergeDLQMessagesRequest, diff -Nru temporal-1.21.5-1/src/client/history/retryable_client_gen.go temporal-1.22.5/src/client/history/retryable_client_gen.go --- temporal-1.21.5-1/src/client/history/retryable_client_gen.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/history/retryable_client_gen.go 2024-02-23 09:45:43.000000000 +0000 @@ -230,6 +230,36 @@ return resp, err } +func (c *retryableClient) IsActivityTaskValid( + ctx context.Context, + request *historyservice.IsActivityTaskValidRequest, + opts ...grpc.CallOption, +) (*historyservice.IsActivityTaskValidResponse, error) { + var resp *historyservice.IsActivityTaskValidResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.IsActivityTaskValid(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) IsWorkflowTaskValid( + ctx context.Context, + request *historyservice.IsWorkflowTaskValidRequest, + opts ...grpc.CallOption, +) (*historyservice.IsWorkflowTaskValidResponse, error) { + var resp *historyservice.IsWorkflowTaskValidResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.IsWorkflowTaskValid(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) MergeDLQMessages( ctx context.Context, request *historyservice.MergeDLQMessagesRequest, diff -Nru temporal-1.21.5-1/src/client/matching/client.go temporal-1.22.5/src/client/matching/client.go --- temporal-1.21.5-1/src/client/matching/client.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/matching/client.go 2024-02-23 09:45:43.000000000 +0000 @@ -170,7 +170,7 @@ } func (c *clientImpl) QueryWorkflow(ctx context.Context, request *matchingservice.QueryWorkflowRequest, opts ...grpc.CallOption) (*matchingservice.QueryWorkflowResponse, error) { - partition := c.loadBalancer.PickReadPartition( + partition := c.loadBalancer.PickWritePartition( namespace.ID(request.GetNamespaceId()), *request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, diff -Nru temporal-1.21.5-1/src/client/matching/loadbalancer.go temporal-1.22.5/src/client/matching/loadbalancer.go --- temporal-1.21.5-1/src/client/matching/loadbalancer.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/client/matching/loadbalancer.go 2024-02-23 09:45:43.000000000 +0000 @@ -85,7 +85,7 @@ nReadPartitions: dc.GetTaskQueuePartitionsProperty(dynamicconfig.MatchingNumTaskqueueReadPartitions), nWritePartitions: dc.GetTaskQueuePartitionsProperty(dynamicconfig.MatchingNumTaskqueueWritePartitions), forceReadPartition: dc.GetIntProperty(dynamicconfig.TestMatchingLBForceReadPartition, -1), - forceWritePartition: dc.GetIntProperty(dynamicconfig.TestMatchingLBForceReadPartition, -1), + forceWritePartition: dc.GetIntProperty(dynamicconfig.TestMatchingLBForceWritePartition, -1), } return lb } diff -Nru temporal-1.21.5-1/src/cmd/tools/rpcwrappers/main.go temporal-1.22.5/src/cmd/tools/rpcwrappers/main.go --- temporal-1.21.5-1/src/cmd/tools/rpcwrappers/main.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/cmd/tools/rpcwrappers/main.go 2024-02-23 09:45:43.000000000 +0000 @@ -48,6 +48,11 @@ clientType reflect.Type clientGenerator func(io.Writer, service) } + + fieldWithPath struct { + field *reflect.StructField + path string + } ) var ( @@ -109,6 +114,20 @@ "metricsClient.matching.PollWorkflowTaskQueue": true, "metricsClient.matching.QueryWorkflow": true, } + // Fields to ignore when looking for the routing fields in a request object. + ignoreField = map[string]bool{ + // this is the workflow that sent a signal + "SignalWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the workflow that sent a cancel request + "RequestCancelWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the workflow that sent a terminate + "TerminateWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the parent for starting a child workflow + "StartWorkflowExecutionRequest.ParentExecutionInfo": true, + // these get routed to the parent + "RecordChildExecutionCompletedRequest.ChildExecution": true, + "VerifyChildExecutionCompletionRecordedRequest.ChildExecution": true, + } ) func panicIfErr(err error) { @@ -124,95 +143,117 @@ })) } -func pathToField(t reflect.Type, name string, path string, maxDepth int) string { - p, _ := findNestedField(t, name, path, maxDepth) - return p -} - -func findNestedField(t reflect.Type, name string, path string, maxDepth int) (string, *reflect.StructField) { +func findNestedField(t reflect.Type, name string, path string, maxDepth int) []fieldWithPath { if t.Kind() != reflect.Struct || maxDepth <= 0 { - return "", nil + return nil } + var out []fieldWithPath for i := 0; i < t.NumField(); i++ { f := t.Field(i) + if ignoreField[t.Name()+"."+f.Name] { + continue + } if f.Name == name { - return path + ".Get" + name + "()", &f + out = append(out, fieldWithPath{field: &f, path: path + ".Get" + name + "()"}) } ft := f.Type if ft.Kind() == reflect.Pointer { - if path, try := findNestedField(ft.Elem(), name, path+".Get"+f.Name+"()", maxDepth-1); try != nil { - return path, try - } + out = append(out, findNestedField(ft.Elem(), name, path+".Get"+f.Name+"()", maxDepth-1)...) } } - return "", nil + return out +} + +func findOneNestedField(t reflect.Type, name string, path string, maxDepth int) fieldWithPath { + fields := findNestedField(t, name, path, maxDepth) + if len(fields) == 0 { + panic(fmt.Sprintf("Couldn't find %s in %s", name, t)) + } else if len(fields) > 1 { + panic(fmt.Sprintf("Found more than one %s in %s (%v)", name, t, fields)) + } + return fields[0] } func makeGetHistoryClient(reqType reflect.Type) string { // this magically figures out how to get a HistoryServiceClient from a request t := reqType.Elem() // we know it's a pointer - if path := pathToField(t, "ShardId", "request", 1); path != "" { - return fmt.Sprintf("shardID := %s", path) - } - if path := pathToField(t, "WorkflowId", "request", 4); path != "" { - return fmt.Sprintf("shardID := c.shardIDFromWorkflowID(request.NamespaceId, %s)", path) - } - if path := pathToField(t, "TaskToken", "request", 2); path != "" { + + shardIdField := findNestedField(t, "ShardId", "request", 1) + workflowIdField := findNestedField(t, "WorkflowId", "request", 4) + taskTokenField := findNestedField(t, "TaskToken", "request", 2) + taskInfosField := findNestedField(t, "TaskInfos", "request", 1) + + found := len(shardIdField) + len(workflowIdField) + len(taskTokenField) + len(taskInfosField) + if found < 1 { + panic(fmt.Sprintf("Found no routing fields in %s", t)) + } else if found > 1 { + panic(fmt.Sprintf("Found more than one routing field in %s (%v, %v, %v, %v)", + t, shardIdField, workflowIdField, taskTokenField, taskInfosField)) + } + + switch { + case len(shardIdField) == 1: + return fmt.Sprintf("shardID := %s", shardIdField[0].path) + case len(workflowIdField) == 1: + return fmt.Sprintf("shardID := c.shardIDFromWorkflowID(request.NamespaceId, %s)", workflowIdField[0].path) + case len(taskTokenField) == 1: return fmt.Sprintf(`taskToken, err := c.tokenSerializer.Deserialize(%s) if err != nil { return nil, err } shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) -`, path) - } - // slice needs a tiny bit of extra handling for namespace - if path := pathToField(t, "TaskInfos", "request", 1); path != "" { +`, taskTokenField[0].path) + case len(taskInfosField) == 1: + p := taskInfosField[0].path + // slice needs a tiny bit of extra handling for namespace return fmt.Sprintf(`// All workflow IDs are in the same shard per request if len(%s) == 0 { return nil, serviceerror.NewInvalidArgument("missing TaskInfos") } - shardID := c.shardIDFromWorkflowID(%s[0].NamespaceId, %s[0].WorkflowId)`, path, path, path) + shardID := c.shardIDFromWorkflowID(%s[0].NamespaceId, %s[0].WorkflowId)`, p, p, p) + default: + panic("not reached") } - panic("I don't know how to get a client from a " + t.String()) } func makeGetMatchingClient(reqType reflect.Type) string { // this magically figures out how to get a MatchingServiceClient from a request t := reqType.Elem() // we know it's a pointer - nsIDPath := pathToField(t, "NamespaceId", "request", 1) - tqPath, tqField := findNestedField(t, "TaskQueue", "request", 2) + nsID := findOneNestedField(t, "NamespaceId", "request", 1) + var tq, tqt fieldWithPath - var tqtPath string switch t.Name() { case "GetBuildIdTaskQueueMappingRequest": // Pick a random node for this request, it's not associated with a specific task queue. - tqPath = "&taskqueuepb.TaskQueue{Name: fmt.Sprintf(\"not-applicable-%d\", rand.Int())}" - tqtPath = "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED" - return fmt.Sprintf("client, err := c.getClientForTaskqueue(%s, %s, %s)", nsIDPath, tqPath, tqtPath) + tq = fieldWithPath{path: "&taskqueuepb.TaskQueue{Name: fmt.Sprintf(\"not-applicable-%d\", rand.Int())}"} + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} case "UpdateTaskQueueUserDataRequest", "ReplicateTaskQueueUserDataRequest": // Always route these requests to the same matching node by namespace. - tqPath = "&taskqueuepb.TaskQueue{Name: \"not-applicable\"}" - tqtPath = "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED" - return fmt.Sprintf("client, err := c.getClientForTaskqueue(%s, %s, %s)", nsIDPath, tqPath, tqtPath) + tq = fieldWithPath{path: "&taskqueuepb.TaskQueue{Name: \"not-applicable\"}"} + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} case "GetWorkerBuildIdCompatibilityRequest", "UpdateWorkerBuildIdCompatibilityRequest", "RespondQueryTaskCompletedRequest", "ListTaskQueuePartitionsRequest", "ApplyTaskQueueUserDataReplicationEventRequest": - tqtPath = "enumspb.TASK_QUEUE_TYPE_WORKFLOW" + tq = findOneNestedField(t, "TaskQueue", "request", 2) + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_WORKFLOW"} default: - tqtPath = pathToField(t, "TaskQueueType", "request", 2) + tq = findOneNestedField(t, "TaskQueue", "request", 2) + tqt = findOneNestedField(t, "TaskQueueType", "request", 2) } - if nsIDPath != "" && tqPath != "" && tqField != nil && tqtPath != "" { - // Some task queue fields are full messages, some are just strings - isTaskQueueMessage := tqField.Type == reflect.TypeOf((*taskqueue.TaskQueue)(nil)) - if !isTaskQueueMessage { - tqPath = fmt.Sprintf("&taskqueuepb.TaskQueue{Name: %s}", tqPath) + if nsID.path != "" && tq.path != "" && tqt.path != "" { + if tq.field != nil { + // Some task queue fields are full messages, some are just strings + isTaskQueueMessage := tq.field.Type == reflect.TypeOf((*taskqueue.TaskQueue)(nil)) + if !isTaskQueueMessage { + tq.path = fmt.Sprintf("&taskqueuepb.TaskQueue{Name: %s}", tq.path) + } } - return fmt.Sprintf("client, err := c.getClientForTaskqueue(%s, %s, %s)", nsIDPath, tqPath, tqtPath) + return fmt.Sprintf("client, err := c.getClientForTaskqueue(%s, %s, %s)", nsID.path, tq.path, tqt.path) } panic("I don't know how to get a client from a " + t.String()) diff -Nru temporal-1.21.5-1/src/common/api/metadata.go temporal-1.22.5/src/common/api/metadata.go --- temporal-1.21.5-1/src/common/api/metadata.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/api/metadata.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,166 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package api + +import "strings" + +type ( + // Describes the scope of a method (whole cluster or inividual namespace). + Scope int32 + + // Describes what level of access is needed for a method. Note that this field is + // completely advisory. Any authorizer implementation may implement whatever logic it + // chooses, including ignoring this field. It is used by the "default" authorizer to check + // against roles in claims. + Access int32 + + MethodMetadata struct { + // Describes the scope of a method (whole cluster or inividual namespace). + Scope Scope + // Describes what level of access is needed for a method (advisory). + Access Access + } +) + +const ( + // Represents a missing Scope value. + ScopeUnknown Scope = iota + // Method affects a single namespace. The request message must contain a string field named "Namespace". + ScopeNamespace + // Method affects the whole cluster. The request message must _not_ contain any field named "Namespace". + ScopeCluster +) + +const ( + // Represents a missing Access value. + AccessUnknown Access = iota + // Method is read-only and should be accessible to readers. + AccessReadOnly + // Method is a normal write method. + AccessWrite + // Method is an administrative operation. + AccessAdmin +) + +const ( + WorkflowServicePrefix = "/temporal.api.workflowservice.v1.WorkflowService/" + OperatorServicePrefix = "/temporal.api.operatorservice.v1.OperatorService/" + AdminServicePrefix = "/temporal.server.api.adminservice.v1.AdminService/" +) + +var ( + workflowServiceMetadata = map[string]MethodMetadata{ + "RegisterNamespace": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "DescribeNamespace": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ListNamespaces": MethodMetadata{Scope: ScopeCluster, Access: AccessReadOnly}, + "UpdateNamespace": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "DeprecateNamespace": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "StartWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "GetWorkflowExecutionHistory": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "GetWorkflowExecutionHistoryReverse": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "PollWorkflowTaskQueue": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondWorkflowTaskCompleted": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondWorkflowTaskFailed": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "PollActivityTaskQueue": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RecordActivityTaskHeartbeat": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RecordActivityTaskHeartbeatById": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskCompleted": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskCompletedById": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskFailed": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskFailedById": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskCanceled": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RespondActivityTaskCanceledById": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "RequestCancelWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "SignalWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "SignalWithStartWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "ResetWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "TerminateWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "DeleteWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "ListOpenWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ListClosedWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ListWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ListArchivedWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ScanWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "CountWorkflowExecutions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "GetSearchAttributes": MethodMetadata{Scope: ScopeCluster, Access: AccessReadOnly}, + "RespondQueryTaskCompleted": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "ResetStickyTaskQueue": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "QueryWorkflow": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "DescribeWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "DescribeTaskQueue": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "GetClusterInfo": MethodMetadata{Scope: ScopeCluster, Access: AccessReadOnly}, + "GetSystemInfo": MethodMetadata{Scope: ScopeCluster, Access: AccessReadOnly}, + "ListTaskQueuePartitions": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "CreateSchedule": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "DescribeSchedule": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "UpdateSchedule": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "PatchSchedule": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "ListScheduleMatchingTimes": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "DeleteSchedule": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "ListSchedules": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "UpdateWorkerBuildIdCompatibility": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "GetWorkerBuildIdCompatibility": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "GetWorkerTaskReachability": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "UpdateWorkflowExecution": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "PollWorkflowExecutionUpdate": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "StartBatchOperation": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "StopBatchOperation": MethodMetadata{Scope: ScopeNamespace, Access: AccessWrite}, + "DescribeBatchOperation": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "ListBatchOperations": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + } + operatorServiceMetadata = map[string]MethodMetadata{ + "AddSearchAttributes": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "RemoveSearchAttributes": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "ListSearchAttributes": MethodMetadata{Scope: ScopeNamespace, Access: AccessReadOnly}, + "DeleteNamespace": MethodMetadata{Scope: ScopeNamespace, Access: AccessAdmin}, + "AddOrUpdateRemoteCluster": MethodMetadata{Scope: ScopeCluster, Access: AccessAdmin}, + "RemoveRemoteCluster": MethodMetadata{Scope: ScopeCluster, Access: AccessAdmin}, + "ListClusters": MethodMetadata{Scope: ScopeCluster, Access: AccessAdmin}, + } +) + +// GetMethodMetadata gets metadata for a given API method in one of the services exported by +// frontend (WorkflowService, OperatorService, AdminService). +func GetMethodMetadata(fullApiName string) MethodMetadata { + switch { + case strings.HasPrefix(fullApiName, WorkflowServicePrefix): + return workflowServiceMetadata[MethodName(fullApiName)] + case strings.HasPrefix(fullApiName, OperatorServicePrefix): + return operatorServiceMetadata[MethodName(fullApiName)] + case strings.HasPrefix(fullApiName, AdminServicePrefix): + return MethodMetadata{Scope: ScopeCluster, Access: AccessAdmin} + default: + return MethodMetadata{Scope: ScopeUnknown, Access: AccessUnknown} + } +} + +// BaseName returns just the method name from a fullly qualified name. +func MethodName(fullApiName string) string { + index := strings.LastIndex(fullApiName, "/") + if index > -1 { + return fullApiName[index+1:] + } + return fullApiName +} diff -Nru temporal-1.21.5-1/src/common/api/metadata_test.go temporal-1.22.5/src/common/api/metadata_test.go --- temporal-1.21.5-1/src/common/api/metadata_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/api/metadata_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,115 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package api + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/api/operatorservice/v1" + "go.temporal.io/api/workflowservice/v1" + "golang.org/x/exp/maps" +) + +func TestWorkflowServiceMetadata(t *testing.T) { + tp := reflect.TypeOf((*workflowservice.WorkflowServiceServer)(nil)).Elem() + checkService(t, tp, workflowServiceMetadata) +} + +func TestOperatorServiceMetadata(t *testing.T) { + tp := reflect.TypeOf((*operatorservice.OperatorServiceServer)(nil)).Elem() + checkService(t, tp, operatorServiceMetadata) +} + +func checkService(t *testing.T, tp reflect.Type, m map[string]MethodMetadata) { + methods := getMethodNames(tp) + require.ElementsMatch(t, methods, maps.Keys(m), + "If you're adding a new method to Workflow/OperatorService, please add metadata for it in metadata.go") + + for _, method := range methods { + refMethod, ok := tp.MethodByName(method) + require.True(t, ok) + + checkNamespace := false + hasNamespace := false + namespaceIsString := false + + if refMethod.Type.NumIn() >= 2 { + // not streaming + checkNamespace = true + requestType := refMethod.Type.In(1).Elem() + var nsField reflect.StructField + nsField, hasNamespace = requestType.FieldByName("Namespace") + if hasNamespace { + namespaceIsString = nsField.Type == reflect.TypeOf("string") + } + } + + md := m[method] + switch md.Scope { + case ScopeNamespace: + if checkNamespace { + assert.Truef(t, namespaceIsString, "%s with ScopeNamespace should have a Namespace field that is a string", method) + } + case ScopeCluster: + if checkNamespace { + assert.Falsef(t, hasNamespace, "%s with ScopeCluster should not have a Namespace field", method) + } + default: + t.Error("unknown Scope for", method) + } + + switch md.Access { + case AccessReadOnly, AccessWrite, AccessAdmin: + default: + t.Error("unknown Access for", method) + } + } +} + +func TestGetMethodMetadata(t *testing.T) { + md := GetMethodMetadata("/temporal.api.workflowservice.v1.WorkflowService/RespondActivityTaskCompleted") + assert.Equal(t, ScopeNamespace, md.Scope) + assert.Equal(t, AccessWrite, md.Access) + + // all AdminService is cluster/admin + md = GetMethodMetadata("/temporal.server.api.adminservice.v1.AdminService/CloseShard") + assert.Equal(t, ScopeCluster, md.Scope) + assert.Equal(t, AccessAdmin, md.Access) + + md = GetMethodMetadata("/OtherService/Method1") + assert.Equal(t, ScopeUnknown, md.Scope) + assert.Equal(t, AccessUnknown, md.Access) +} + +func getMethodNames(tp reflect.Type) []string { + var out []string + for i := 0; i < tp.NumMethod(); i++ { + out = append(out, tp.Method(i).Name) + } + return out +} diff -Nru temporal-1.21.5-1/src/common/archiver/URI.go temporal-1.22.5/src/common/archiver/URI.go --- temporal-1.21.5-1/src/common/archiver/URI.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/URI.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package archiver - -import ( - "net/url" -) - -type ( - // URI identifies the archival resource to which records are written to and read from. - URI interface { - Scheme() string - Path() string - Hostname() string - Port() string - Username() string - Password() string - String() string - Opaque() string - Query() map[string][]string - } - - uri struct { - url *url.URL - } -) - -// NewURI constructs a new archiver URI from string. -func NewURI(s string) (URI, error) { - url, err := url.ParseRequestURI(s) - if err != nil { - return nil, err - } - return &uri{url: url}, nil -} - -func (u *uri) Scheme() string { - return u.url.Scheme -} - -func (u *uri) Path() string { - return u.url.Path -} - -func (u *uri) Hostname() string { - return u.url.Hostname() -} - -func (u *uri) Port() string { - return u.url.Port() -} - -func (u *uri) Username() string { - if u.url.User == nil { - return "" - } - return u.url.User.Username() -} - -func (u *uri) Password() string { - if u.url.User == nil { - return "" - } - password, exist := u.url.User.Password() - if !exist { - return "" - } - return password -} - -func (u *uri) Opaque() string { - return u.url.Opaque -} - -func (u *uri) Query() map[string][]string { - return u.url.Query() -} - -func (u *uri) String() string { - return u.url.String() -} diff -Nru temporal-1.21.5-1/src/common/archiver/URI_test.go temporal-1.22.5/src/common/archiver/URI_test.go --- temporal-1.21.5-1/src/common/archiver/URI_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/URI_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package archiver - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -type ( - URISuite struct { - *require.Assertions - suite.Suite - } -) - -func TestURISuite(t *testing.T) { - suite.Run(t, new(URISuite)) -} - -func (s *URISuite) SetupTest() { - s.Assertions = require.New(s.T()) -} - -func (s *URISuite) TestURI() { - testCases := []struct { - URIString string - valid bool - scheme string - path string - hostname string - port string - username string - password string - opaque string - query map[string][]string - }{ - { - URIString: "", - valid: false, - }, - { - URIString: "some random string", - valid: false, - }, - { - URIString: "mailto:a@b.com", - valid: true, - scheme: "mailto", - opaque: "a@b.com", - }, - { - URIString: "test://", - valid: true, - scheme: "test", - }, - { - URIString: "http://example.com/path", - valid: true, - scheme: "http", - hostname: "example.com", - path: "/path", - }, - { - URIString: "http://example.com/path with space", - valid: true, - scheme: "http", - hostname: "example.com", - path: "/path with space", - }, - { - URIString: "https://localhost:8080?key1=value1&key1=value2&key2=value3", - valid: true, - scheme: "https", - hostname: "localhost", - port: "8080", - query: map[string][]string{ - "key1": {"value1", "value2"}, - "key2": {"value3"}, - }, - }, - { - URIString: "file:///absolute/path/to/dir", - valid: true, - scheme: "file", - path: "/absolute/path/to/dir", - }, - { - URIString: "test://person:password@host/path", - valid: true, - scheme: "test", - hostname: "host", - path: "/path", - username: "person", - password: "password", - }, - { - URIString: "test:opaque?key1=value1&key1=value2&key2=value3", - valid: true, - scheme: "test", - opaque: "opaque", - query: map[string][]string{ - "key1": {"value1", "value2"}, - "key2": {"value3"}, - }, - }, - } - - for _, tc := range testCases { - URI, err := NewURI(tc.URIString) - if !tc.valid { - s.Error(err) - continue - } - - s.NoError(err) - s.Equal(tc.scheme, URI.Scheme()) - s.Equal(tc.path, URI.Path()) - s.Equal(tc.hostname, URI.Hostname()) - s.Equal(tc.port, URI.Port()) - s.Equal(tc.username, URI.Username()) - s.Equal(tc.password, URI.Password()) - s.Equal(tc.opaque, URI.Opaque()) - if tc.query != nil { - s.Equal(tc.query, URI.Query()) - } - } -} diff -Nru temporal-1.21.5-1/src/common/archiver/archivalMetadata.go temporal-1.22.5/src/common/archiver/archivalMetadata.go --- temporal-1.21.5-1/src/common/archiver/archivalMetadata.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/archivalMetadata.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,236 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination archivalMetadata_mock.go - -package archiver - -import ( - "fmt" - "strings" - - enumspb "go.temporal.io/api/enums/v1" - - "go.temporal.io/server/common/config" - - "go.temporal.io/server/common/dynamicconfig" -) - -type ( - // ArchivalMetadata provides cluster level archival information - ArchivalMetadata interface { - GetHistoryConfig() ArchivalConfig - GetVisibilityConfig() ArchivalConfig - } - - // ArchivalConfig is an immutable representation of the archival configuration of the cluster - // This config is determined at cluster startup time - ArchivalConfig interface { - ClusterConfiguredForArchival() bool - GetClusterState() ArchivalState - ReadEnabled() bool - GetNamespaceDefaultState() enumspb.ArchivalState - GetNamespaceDefaultURI() string - StaticClusterState() ArchivalState - } - - archivalMetadata struct { - historyConfig ArchivalConfig - visibilityConfig ArchivalConfig - } - - archivalConfig struct { - staticClusterState ArchivalState - dynamicClusterState dynamicconfig.StringPropertyFn - enableRead dynamicconfig.BoolPropertyFn - namespaceDefaultState enumspb.ArchivalState - namespaceDefaultURI string - } - - // ArchivalState represents the archival state of the cluster - ArchivalState int -) - -func (a *archivalConfig) StaticClusterState() ArchivalState { - return a.staticClusterState -} - -const ( - // ArchivalDisabled means this cluster is not configured to handle archival - ArchivalDisabled ArchivalState = iota - // ArchivalPaused means this cluster is configured to handle archival but is currently not archiving - // This state is not yet implemented, as of now ArchivalPaused is treated the same way as ArchivalDisabled - ArchivalPaused - // ArchivalEnabled means this cluster is currently archiving - ArchivalEnabled -) - -// NewArchivalMetadata constructs a new ArchivalMetadata -func NewArchivalMetadata( - dc *dynamicconfig.Collection, - historyState string, - historyReadEnabled bool, - visibilityState string, - visibilityReadEnabled bool, - namespaceDefaults *config.ArchivalNamespaceDefaults, -) ArchivalMetadata { - historyConfig := NewArchivalConfig( - historyState, - dc.GetStringProperty(dynamicconfig.HistoryArchivalState, historyState), - dc.GetBoolProperty(dynamicconfig.EnableReadFromHistoryArchival, historyReadEnabled), - namespaceDefaults.History.State, - namespaceDefaults.History.URI, - ) - - visibilityConfig := NewArchivalConfig( - visibilityState, - dc.GetStringProperty(dynamicconfig.VisibilityArchivalState, visibilityState), - dc.GetBoolProperty(dynamicconfig.EnableReadFromVisibilityArchival, visibilityReadEnabled), - namespaceDefaults.Visibility.State, - namespaceDefaults.Visibility.URI, - ) - - return &archivalMetadata{ - historyConfig: historyConfig, - visibilityConfig: visibilityConfig, - } -} - -func (metadata *archivalMetadata) GetHistoryConfig() ArchivalConfig { - return metadata.historyConfig -} - -func (metadata *archivalMetadata) GetVisibilityConfig() ArchivalConfig { - return metadata.visibilityConfig -} - -// NewArchivalConfig constructs a new valid ArchivalConfig -func NewArchivalConfig( - staticClusterStateStr string, - dynamicClusterState dynamicconfig.StringPropertyFn, - enableRead dynamicconfig.BoolPropertyFn, - namespaceDefaultStateStr string, - namespaceDefaultURI string, -) ArchivalConfig { - staticClusterState, err := getClusterArchivalState(staticClusterStateStr) - if err != nil { - panic(err) - } - namespaceDefaultState, err := getNamespaceArchivalState(namespaceDefaultStateStr) - if err != nil { - panic(err) - } - - return &archivalConfig{ - staticClusterState: staticClusterState, - dynamicClusterState: dynamicClusterState, - enableRead: enableRead, - namespaceDefaultState: namespaceDefaultState, - namespaceDefaultURI: namespaceDefaultURI, - } -} - -// NewDisabledArchvialConfig returns an ArchivalConfig where archival is disabled for both the cluster and the namespace -func NewDisabledArchvialConfig() ArchivalConfig { - return &archivalConfig{ - staticClusterState: ArchivalDisabled, - dynamicClusterState: nil, - enableRead: nil, - namespaceDefaultState: enumspb.ARCHIVAL_STATE_DISABLED, - namespaceDefaultURI: "", - } -} - -// NewEnabledArchivalConfig returns an ArchivalConfig where archival is enabled for both the cluster and the namespace -func NewEnabledArchivalConfig() ArchivalConfig { - return &archivalConfig{ - staticClusterState: ArchivalEnabled, - dynamicClusterState: dynamicconfig.GetStringPropertyFn("enabled"), - enableRead: dynamicconfig.GetBoolPropertyFn(true), - namespaceDefaultState: enumspb.ARCHIVAL_STATE_ENABLED, - namespaceDefaultURI: "some-uri", - } -} - -// ClusterConfiguredForArchival returns true if cluster is configured to handle archival, false otherwise -func (a *archivalConfig) ClusterConfiguredForArchival() bool { - return a.GetClusterState() == ArchivalEnabled -} - -func (a *archivalConfig) GetClusterState() ArchivalState { - // Only check dynamic config when archival is enabled in static config. - // If archival is disabled in static config, there will be no provider section in the static config - // and the archiver provider can not create any archiver. Therefore, in that case, - // even dynamic config says archival is enabled, we should ignore that. - // Only when archival is enabled in static config, should we check if there's any difference between static config and dynamic config. - if a.staticClusterState != ArchivalEnabled { - return a.staticClusterState - } - - dynamicStateStr := a.dynamicClusterState() - dynamicState, err := getClusterArchivalState(dynamicStateStr) - if err != nil { - return ArchivalDisabled - } - return dynamicState -} - -func (a *archivalConfig) ReadEnabled() bool { - if !a.ClusterConfiguredForArchival() { - return false - } - return a.enableRead() -} - -func (a *archivalConfig) GetNamespaceDefaultState() enumspb.ArchivalState { - return a.namespaceDefaultState -} - -func (a *archivalConfig) GetNamespaceDefaultURI() string { - return a.namespaceDefaultURI -} - -func getClusterArchivalState(str string) (ArchivalState, error) { - str = strings.TrimSpace(strings.ToLower(str)) - switch str { - case "", config.ArchivalDisabled: - return ArchivalDisabled, nil - case config.ArchivalPaused: - return ArchivalPaused, nil - case config.ArchivalEnabled: - return ArchivalEnabled, nil - } - return ArchivalDisabled, fmt.Errorf("invalid archival state of %v for cluster, valid states are: {\"\", \"disabled\", \"paused\", \"enabled\"}", str) -} - -func getNamespaceArchivalState(str string) (enumspb.ArchivalState, error) { - str = strings.TrimSpace(strings.ToLower(str)) - switch str { - case "", config.ArchivalDisabled: - return enumspb.ARCHIVAL_STATE_DISABLED, nil - case config.ArchivalEnabled: - return enumspb.ARCHIVAL_STATE_ENABLED, nil - } - return enumspb.ARCHIVAL_STATE_DISABLED, fmt.Errorf("invalid archival state of %v for namespace, valid states are: {\"\", \"disabled\", \"enabled\"}", str) -} diff -Nru temporal-1.21.5-1/src/common/archiver/archivalMetadata_mock.go temporal-1.22.5/src/common/archiver/archivalMetadata_mock.go --- temporal-1.21.5-1/src/common/archiver/archivalMetadata_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/archivalMetadata_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: archivalMetadata.go - -// Package archiver is a generated GoMock package. -package archiver - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/enums/v1" -) - -// MockArchivalMetadata is a mock of ArchivalMetadata interface. -type MockArchivalMetadata struct { - ctrl *gomock.Controller - recorder *MockArchivalMetadataMockRecorder -} - -// MockArchivalMetadataMockRecorder is the mock recorder for MockArchivalMetadata. -type MockArchivalMetadataMockRecorder struct { - mock *MockArchivalMetadata -} - -// NewMockArchivalMetadata creates a new mock instance. -func NewMockArchivalMetadata(ctrl *gomock.Controller) *MockArchivalMetadata { - mock := &MockArchivalMetadata{ctrl: ctrl} - mock.recorder = &MockArchivalMetadataMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockArchivalMetadata) EXPECT() *MockArchivalMetadataMockRecorder { - return m.recorder -} - -// GetHistoryConfig mocks base method. -func (m *MockArchivalMetadata) GetHistoryConfig() ArchivalConfig { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryConfig") - ret0, _ := ret[0].(ArchivalConfig) - return ret0 -} - -// GetHistoryConfig indicates an expected call of GetHistoryConfig. -func (mr *MockArchivalMetadataMockRecorder) GetHistoryConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryConfig", reflect.TypeOf((*MockArchivalMetadata)(nil).GetHistoryConfig)) -} - -// GetVisibilityConfig mocks base method. -func (m *MockArchivalMetadata) GetVisibilityConfig() ArchivalConfig { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVisibilityConfig") - ret0, _ := ret[0].(ArchivalConfig) - return ret0 -} - -// GetVisibilityConfig indicates an expected call of GetVisibilityConfig. -func (mr *MockArchivalMetadataMockRecorder) GetVisibilityConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVisibilityConfig", reflect.TypeOf((*MockArchivalMetadata)(nil).GetVisibilityConfig)) -} - -// MockArchivalConfig is a mock of ArchivalConfig interface. -type MockArchivalConfig struct { - ctrl *gomock.Controller - recorder *MockArchivalConfigMockRecorder -} - -// MockArchivalConfigMockRecorder is the mock recorder for MockArchivalConfig. -type MockArchivalConfigMockRecorder struct { - mock *MockArchivalConfig -} - -// NewMockArchivalConfig creates a new mock instance. -func NewMockArchivalConfig(ctrl *gomock.Controller) *MockArchivalConfig { - mock := &MockArchivalConfig{ctrl: ctrl} - mock.recorder = &MockArchivalConfigMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockArchivalConfig) EXPECT() *MockArchivalConfigMockRecorder { - return m.recorder -} - -// ClusterConfiguredForArchival mocks base method. -func (m *MockArchivalConfig) ClusterConfiguredForArchival() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClusterConfiguredForArchival") - ret0, _ := ret[0].(bool) - return ret0 -} - -// ClusterConfiguredForArchival indicates an expected call of ClusterConfiguredForArchival. -func (mr *MockArchivalConfigMockRecorder) ClusterConfiguredForArchival() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterConfiguredForArchival", reflect.TypeOf((*MockArchivalConfig)(nil).ClusterConfiguredForArchival)) -} - -// GetClusterState mocks base method. -func (m *MockArchivalConfig) GetClusterState() ArchivalState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClusterState") - ret0, _ := ret[0].(ArchivalState) - return ret0 -} - -// GetClusterState indicates an expected call of GetClusterState. -func (mr *MockArchivalConfigMockRecorder) GetClusterState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterState", reflect.TypeOf((*MockArchivalConfig)(nil).GetClusterState)) -} - -// GetNamespaceDefaultState mocks base method. -func (m *MockArchivalConfig) GetNamespaceDefaultState() v1.ArchivalState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNamespaceDefaultState") - ret0, _ := ret[0].(v1.ArchivalState) - return ret0 -} - -// GetNamespaceDefaultState indicates an expected call of GetNamespaceDefaultState. -func (mr *MockArchivalConfigMockRecorder) GetNamespaceDefaultState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceDefaultState", reflect.TypeOf((*MockArchivalConfig)(nil).GetNamespaceDefaultState)) -} - -// GetNamespaceDefaultURI mocks base method. -func (m *MockArchivalConfig) GetNamespaceDefaultURI() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNamespaceDefaultURI") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetNamespaceDefaultURI indicates an expected call of GetNamespaceDefaultURI. -func (mr *MockArchivalConfigMockRecorder) GetNamespaceDefaultURI() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceDefaultURI", reflect.TypeOf((*MockArchivalConfig)(nil).GetNamespaceDefaultURI)) -} - -// ReadEnabled mocks base method. -func (m *MockArchivalConfig) ReadEnabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadEnabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// ReadEnabled indicates an expected call of ReadEnabled. -func (mr *MockArchivalConfigMockRecorder) ReadEnabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadEnabled", reflect.TypeOf((*MockArchivalConfig)(nil).ReadEnabled)) -} - -// StaticClusterState mocks base method. -func (m *MockArchivalConfig) StaticClusterState() ArchivalState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StaticClusterState") - ret0, _ := ret[0].(ArchivalState) - return ret0 -} - -// StaticClusterState indicates an expected call of StaticClusterState. -func (mr *MockArchivalConfigMockRecorder) StaticClusterState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StaticClusterState", reflect.TypeOf((*MockArchivalConfig)(nil).StaticClusterState)) -} diff -Nru temporal-1.21.5-1/src/common/archiver/archival_metadata.go temporal-1.22.5/src/common/archiver/archival_metadata.go --- temporal-1.21.5-1/src/common/archiver/archival_metadata.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/archival_metadata.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,236 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination archival_metadata_mock.go + +package archiver + +import ( + "fmt" + "strings" + + enumspb "go.temporal.io/api/enums/v1" + + "go.temporal.io/server/common/config" + + "go.temporal.io/server/common/dynamicconfig" +) + +type ( + // ArchivalMetadata provides cluster level archival information + ArchivalMetadata interface { + GetHistoryConfig() ArchivalConfig + GetVisibilityConfig() ArchivalConfig + } + + // ArchivalConfig is an immutable representation of the archival configuration of the cluster + // This config is determined at cluster startup time + ArchivalConfig interface { + ClusterConfiguredForArchival() bool + GetClusterState() ArchivalState + ReadEnabled() bool + GetNamespaceDefaultState() enumspb.ArchivalState + GetNamespaceDefaultURI() string + StaticClusterState() ArchivalState + } + + archivalMetadata struct { + historyConfig ArchivalConfig + visibilityConfig ArchivalConfig + } + + archivalConfig struct { + staticClusterState ArchivalState + dynamicClusterState dynamicconfig.StringPropertyFn + enableRead dynamicconfig.BoolPropertyFn + namespaceDefaultState enumspb.ArchivalState + namespaceDefaultURI string + } + + // ArchivalState represents the archival state of the cluster + ArchivalState int +) + +func (a *archivalConfig) StaticClusterState() ArchivalState { + return a.staticClusterState +} + +const ( + // ArchivalDisabled means this cluster is not configured to handle archival + ArchivalDisabled ArchivalState = iota + // ArchivalPaused means this cluster is configured to handle archival but is currently not archiving + // This state is not yet implemented, as of now ArchivalPaused is treated the same way as ArchivalDisabled + ArchivalPaused + // ArchivalEnabled means this cluster is currently archiving + ArchivalEnabled +) + +// NewArchivalMetadata constructs a new ArchivalMetadata +func NewArchivalMetadata( + dc *dynamicconfig.Collection, + historyState string, + historyReadEnabled bool, + visibilityState string, + visibilityReadEnabled bool, + namespaceDefaults *config.ArchivalNamespaceDefaults, +) ArchivalMetadata { + historyConfig := NewArchivalConfig( + historyState, + dc.GetStringProperty(dynamicconfig.HistoryArchivalState, historyState), + dc.GetBoolProperty(dynamicconfig.EnableReadFromHistoryArchival, historyReadEnabled), + namespaceDefaults.History.State, + namespaceDefaults.History.URI, + ) + + visibilityConfig := NewArchivalConfig( + visibilityState, + dc.GetStringProperty(dynamicconfig.VisibilityArchivalState, visibilityState), + dc.GetBoolProperty(dynamicconfig.EnableReadFromVisibilityArchival, visibilityReadEnabled), + namespaceDefaults.Visibility.State, + namespaceDefaults.Visibility.URI, + ) + + return &archivalMetadata{ + historyConfig: historyConfig, + visibilityConfig: visibilityConfig, + } +} + +func (metadata *archivalMetadata) GetHistoryConfig() ArchivalConfig { + return metadata.historyConfig +} + +func (metadata *archivalMetadata) GetVisibilityConfig() ArchivalConfig { + return metadata.visibilityConfig +} + +// NewArchivalConfig constructs a new valid ArchivalConfig +func NewArchivalConfig( + staticClusterStateStr string, + dynamicClusterState dynamicconfig.StringPropertyFn, + enableRead dynamicconfig.BoolPropertyFn, + namespaceDefaultStateStr string, + namespaceDefaultURI string, +) ArchivalConfig { + staticClusterState, err := getClusterArchivalState(staticClusterStateStr) + if err != nil { + panic(err) + } + namespaceDefaultState, err := getNamespaceArchivalState(namespaceDefaultStateStr) + if err != nil { + panic(err) + } + + return &archivalConfig{ + staticClusterState: staticClusterState, + dynamicClusterState: dynamicClusterState, + enableRead: enableRead, + namespaceDefaultState: namespaceDefaultState, + namespaceDefaultURI: namespaceDefaultURI, + } +} + +// NewDisabledArchvialConfig returns an ArchivalConfig where archival is disabled for both the cluster and the namespace +func NewDisabledArchvialConfig() ArchivalConfig { + return &archivalConfig{ + staticClusterState: ArchivalDisabled, + dynamicClusterState: nil, + enableRead: nil, + namespaceDefaultState: enumspb.ARCHIVAL_STATE_DISABLED, + namespaceDefaultURI: "", + } +} + +// NewEnabledArchivalConfig returns an ArchivalConfig where archival is enabled for both the cluster and the namespace +func NewEnabledArchivalConfig() ArchivalConfig { + return &archivalConfig{ + staticClusterState: ArchivalEnabled, + dynamicClusterState: dynamicconfig.GetStringPropertyFn("enabled"), + enableRead: dynamicconfig.GetBoolPropertyFn(true), + namespaceDefaultState: enumspb.ARCHIVAL_STATE_ENABLED, + namespaceDefaultURI: "some-uri", + } +} + +// ClusterConfiguredForArchival returns true if cluster is configured to handle archival, false otherwise +func (a *archivalConfig) ClusterConfiguredForArchival() bool { + return a.GetClusterState() == ArchivalEnabled +} + +func (a *archivalConfig) GetClusterState() ArchivalState { + // Only check dynamic config when archival is enabled in static config. + // If archival is disabled in static config, there will be no provider section in the static config + // and the archiver provider can not create any archiver. Therefore, in that case, + // even dynamic config says archival is enabled, we should ignore that. + // Only when archival is enabled in static config, should we check if there's any difference between static config and dynamic config. + if a.staticClusterState != ArchivalEnabled { + return a.staticClusterState + } + + dynamicStateStr := a.dynamicClusterState() + dynamicState, err := getClusterArchivalState(dynamicStateStr) + if err != nil { + return ArchivalDisabled + } + return dynamicState +} + +func (a *archivalConfig) ReadEnabled() bool { + if !a.ClusterConfiguredForArchival() { + return false + } + return a.enableRead() +} + +func (a *archivalConfig) GetNamespaceDefaultState() enumspb.ArchivalState { + return a.namespaceDefaultState +} + +func (a *archivalConfig) GetNamespaceDefaultURI() string { + return a.namespaceDefaultURI +} + +func getClusterArchivalState(str string) (ArchivalState, error) { + str = strings.TrimSpace(strings.ToLower(str)) + switch str { + case "", config.ArchivalDisabled: + return ArchivalDisabled, nil + case config.ArchivalPaused: + return ArchivalPaused, nil + case config.ArchivalEnabled: + return ArchivalEnabled, nil + } + return ArchivalDisabled, fmt.Errorf("invalid archival state of %v for cluster, valid states are: {\"\", \"disabled\", \"paused\", \"enabled\"}", str) +} + +func getNamespaceArchivalState(str string) (enumspb.ArchivalState, error) { + str = strings.TrimSpace(strings.ToLower(str)) + switch str { + case "", config.ArchivalDisabled: + return enumspb.ARCHIVAL_STATE_DISABLED, nil + case config.ArchivalEnabled: + return enumspb.ARCHIVAL_STATE_ENABLED, nil + } + return enumspb.ARCHIVAL_STATE_DISABLED, fmt.Errorf("invalid archival state of %v for namespace, valid states are: {\"\", \"disabled\", \"enabled\"}", str) +} diff -Nru temporal-1.21.5-1/src/common/archiver/archival_metadata_mock.go temporal-1.22.5/src/common/archiver/archival_metadata_mock.go --- temporal-1.21.5-1/src/common/archiver/archival_metadata_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/archival_metadata_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,194 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: archival_metadata.go + +// Package archiver is a generated GoMock package. +package archiver + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1 "go.temporal.io/api/enums/v1" +) + +// MockArchivalMetadata is a mock of ArchivalMetadata interface. +type MockArchivalMetadata struct { + ctrl *gomock.Controller + recorder *MockArchivalMetadataMockRecorder +} + +// MockArchivalMetadataMockRecorder is the mock recorder for MockArchivalMetadata. +type MockArchivalMetadataMockRecorder struct { + mock *MockArchivalMetadata +} + +// NewMockArchivalMetadata creates a new mock instance. +func NewMockArchivalMetadata(ctrl *gomock.Controller) *MockArchivalMetadata { + mock := &MockArchivalMetadata{ctrl: ctrl} + mock.recorder = &MockArchivalMetadataMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockArchivalMetadata) EXPECT() *MockArchivalMetadataMockRecorder { + return m.recorder +} + +// GetHistoryConfig mocks base method. +func (m *MockArchivalMetadata) GetHistoryConfig() ArchivalConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHistoryConfig") + ret0, _ := ret[0].(ArchivalConfig) + return ret0 +} + +// GetHistoryConfig indicates an expected call of GetHistoryConfig. +func (mr *MockArchivalMetadataMockRecorder) GetHistoryConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryConfig", reflect.TypeOf((*MockArchivalMetadata)(nil).GetHistoryConfig)) +} + +// GetVisibilityConfig mocks base method. +func (m *MockArchivalMetadata) GetVisibilityConfig() ArchivalConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVisibilityConfig") + ret0, _ := ret[0].(ArchivalConfig) + return ret0 +} + +// GetVisibilityConfig indicates an expected call of GetVisibilityConfig. +func (mr *MockArchivalMetadataMockRecorder) GetVisibilityConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVisibilityConfig", reflect.TypeOf((*MockArchivalMetadata)(nil).GetVisibilityConfig)) +} + +// MockArchivalConfig is a mock of ArchivalConfig interface. +type MockArchivalConfig struct { + ctrl *gomock.Controller + recorder *MockArchivalConfigMockRecorder +} + +// MockArchivalConfigMockRecorder is the mock recorder for MockArchivalConfig. +type MockArchivalConfigMockRecorder struct { + mock *MockArchivalConfig +} + +// NewMockArchivalConfig creates a new mock instance. +func NewMockArchivalConfig(ctrl *gomock.Controller) *MockArchivalConfig { + mock := &MockArchivalConfig{ctrl: ctrl} + mock.recorder = &MockArchivalConfigMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockArchivalConfig) EXPECT() *MockArchivalConfigMockRecorder { + return m.recorder +} + +// ClusterConfiguredForArchival mocks base method. +func (m *MockArchivalConfig) ClusterConfiguredForArchival() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterConfiguredForArchival") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ClusterConfiguredForArchival indicates an expected call of ClusterConfiguredForArchival. +func (mr *MockArchivalConfigMockRecorder) ClusterConfiguredForArchival() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterConfiguredForArchival", reflect.TypeOf((*MockArchivalConfig)(nil).ClusterConfiguredForArchival)) +} + +// GetClusterState mocks base method. +func (m *MockArchivalConfig) GetClusterState() ArchivalState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterState") + ret0, _ := ret[0].(ArchivalState) + return ret0 +} + +// GetClusterState indicates an expected call of GetClusterState. +func (mr *MockArchivalConfigMockRecorder) GetClusterState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterState", reflect.TypeOf((*MockArchivalConfig)(nil).GetClusterState)) +} + +// GetNamespaceDefaultState mocks base method. +func (m *MockArchivalConfig) GetNamespaceDefaultState() v1.ArchivalState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNamespaceDefaultState") + ret0, _ := ret[0].(v1.ArchivalState) + return ret0 +} + +// GetNamespaceDefaultState indicates an expected call of GetNamespaceDefaultState. +func (mr *MockArchivalConfigMockRecorder) GetNamespaceDefaultState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceDefaultState", reflect.TypeOf((*MockArchivalConfig)(nil).GetNamespaceDefaultState)) +} + +// GetNamespaceDefaultURI mocks base method. +func (m *MockArchivalConfig) GetNamespaceDefaultURI() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNamespaceDefaultURI") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetNamespaceDefaultURI indicates an expected call of GetNamespaceDefaultURI. +func (mr *MockArchivalConfigMockRecorder) GetNamespaceDefaultURI() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceDefaultURI", reflect.TypeOf((*MockArchivalConfig)(nil).GetNamespaceDefaultURI)) +} + +// ReadEnabled mocks base method. +func (m *MockArchivalConfig) ReadEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ReadEnabled indicates an expected call of ReadEnabled. +func (mr *MockArchivalConfigMockRecorder) ReadEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadEnabled", reflect.TypeOf((*MockArchivalConfig)(nil).ReadEnabled)) +} + +// StaticClusterState mocks base method. +func (m *MockArchivalConfig) StaticClusterState() ArchivalState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StaticClusterState") + ret0, _ := ret[0].(ArchivalState) + return ret0 +} + +// StaticClusterState indicates an expected call of StaticClusterState. +func (mr *MockArchivalConfigMockRecorder) StaticClusterState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StaticClusterState", reflect.TypeOf((*MockArchivalConfig)(nil).StaticClusterState)) +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/historyArchiver.go temporal-1.22.5/src/common/archiver/filestore/historyArchiver.go --- temporal-1.21.5-1/src/common/archiver/filestore/historyArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/historyArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,319 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Filestore History Archiver will archive workflow histories to local disk. - -// Each Archive() request results in a file named in the format of -// hash(namespaceID, workflowID, runID)_version.history being created in the specified -// directory. Workflow histories stored in that file are encoded in JSON format. - -// The Get() method retrieves the archived histories from the directory specified in the -// URI. It optionally takes in a NextPageToken which specifies the workflow close failover -// version and the index of the first history batch that should be returned. Instead of -// NextPageToken, caller can also provide a close failover version, in which case, Get() method -// will return history batches starting from the beginning of that history version. If neither -// of NextPageToken or close failover version is specified, the highest close failover version -// will be picked. - -package filestore - -import ( - "context" - "errors" - "os" - "path" - "strconv" - - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" -) - -const ( - // URIScheme is the scheme for the filestore implementation - URIScheme = "file" - - errEncodeHistory = "failed to encode history batches" - errMakeDirectory = "failed to make directory" - errWriteFile = "failed to write history to file" - - targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB -) - -var ( - errInvalidFileMode = errors.New("invalid file mode") - errInvalidDirMode = errors.New("invalid directory mode") -) - -type ( - historyArchiver struct { - container *archiver.HistoryBootstrapContainer - fileMode os.FileMode - dirMode os.FileMode - - // only set in test code - historyIterator archiver.HistoryIterator - } - - getHistoryToken struct { - CloseFailoverVersion int64 - NextBatchIdx int - } -) - -// NewHistoryArchiver creates a new archiver.HistoryArchiver based on filestore -func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.FilestoreArchiver, -) (archiver.HistoryArchiver, error) { - return newHistoryArchiver(container, config, nil) -} - -func newHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.FilestoreArchiver, - historyIterator archiver.HistoryIterator, -) (*historyArchiver, error) { - fileMode, err := strconv.ParseUint(config.FileMode, 0, 32) - if err != nil { - return nil, errInvalidFileMode - } - dirMode, err := strconv.ParseUint(config.DirMode, 0, 32) - if err != nil { - return nil, errInvalidDirMode - } - return &historyArchiver{ - container: container, - fileMode: os.FileMode(fileMode), - dirMode: os.FileMode(dirMode), - historyIterator: historyIterator, - }, nil -} - -func (h *historyArchiver) Archive( - ctx context.Context, - URI archiver.URI, - request *archiver.ArchiveHistoryRequest, - opts ...archiver.ArchiveOption, -) (err error) { - featureCatalog := archiver.GetFeatureCatalog(opts...) - defer func() { - if err != nil && !common.IsPersistenceTransientError(err) && featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - }() - - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) - - if err := h.ValidateURI(URI); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return err - } - - if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) - return err - } - - historyIterator := h.historyIterator - if historyIterator == nil { // will only be set by testing code - historyIterator = archiver.NewHistoryIterator(request, h.container.ExecutionManager, targetHistoryBlobSize) - } - - var historyBatches []*historypb.History - for historyIterator.HasNext() { - historyBlob, err := historyIterator.Next(ctx) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // workflow history no longer exists, may due to duplicated archival signal - // this may happen even in the middle of iterating history as two archival signals - // can be processed concurrently. - logger.Info(archiver.ArchiveSkippedInfoMsg) - return nil - } - - logger = log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) - if !common.IsPersistenceTransientError(err) { - logger.Error(archiver.ArchiveNonRetryableErrorMsg) - } else { - logger.Error(archiver.ArchiveTransientErrorMsg) - } - return err - } - - if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) - return archiver.ErrHistoryMutated - } - - historyBatches = append(historyBatches, historyBlob.Body...) - } - - encoder := codec.NewJSONPBEncoder() - encodedHistoryBatches, err := encoder.EncodeHistories(historyBatches) - if err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) - return err - } - - dirPath := URI.Path() - if err = mkdirAll(dirPath, h.dirMode); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) - return err - } - - filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion) - if err := writeFile(path.Join(dirPath, filename), encodedHistoryBatches, h.fileMode); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) - return err - } - - return nil -} - -func (h *historyArchiver) Get( - ctx context.Context, - URI archiver.URI, - request *archiver.GetHistoryRequest, -) (*archiver.GetHistoryResponse, error) { - if err := h.ValidateURI(URI); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) - } - - if err := archiver.ValidateGetRequest(request); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) - } - - dirPath := URI.Path() - exists, err := directoryExists(dirPath) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - if !exists { - return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) - } - - var token *getHistoryToken - if request.NextPageToken != nil { - token, err = deserializeGetHistoryToken(request.NextPageToken) - if err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) - } - } else if request.CloseFailoverVersion != nil { - token = &getHistoryToken{ - CloseFailoverVersion: *request.CloseFailoverVersion, - NextBatchIdx: 0, - } - } else { - highestVersion, err := getHighestVersion(dirPath, request) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - token = &getHistoryToken{ - CloseFailoverVersion: *highestVersion, - NextBatchIdx: 0, - } - } - - filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion) - filepath := path.Join(dirPath, filename) - exists, err = fileExists(filepath) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - if !exists { - return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) - } - - encodedHistoryBatches, err := readFile(filepath) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - encoder := codec.NewJSONPBEncoder() - historyBatches, err := encoder.DecodeHistories(encodedHistoryBatches) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - historyBatches = historyBatches[token.NextBatchIdx:] - - response := &archiver.GetHistoryResponse{} - numOfEvents := 0 - numOfBatches := 0 - for _, batch := range historyBatches { - response.HistoryBatches = append(response.HistoryBatches, batch) - numOfBatches++ - numOfEvents += len(batch.Events) - if numOfEvents >= request.PageSize { - break - } - } - - if numOfBatches < len(historyBatches) { - token.NextBatchIdx += numOfBatches - nextToken, err := serializeToken(token) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.NextPageToken = nextToken - } - - return response, nil -} - -func (h *historyArchiver) ValidateURI(URI archiver.URI) error { - if URI.Scheme() != URIScheme { - return archiver.ErrURISchemeMismatch - } - - return validateDirPath(URI.Path()) -} - -func getHighestVersion(dirPath string, request *archiver.GetHistoryRequest) (*int64, error) { - filenames, err := listFilesByPrefix(dirPath, constructHistoryFilenamePrefix(request.NamespaceID, request.WorkflowID, request.RunID)) - if err != nil { - return nil, err - } - - var highestVersion *int64 - for _, filename := range filenames { - version, err := extractCloseFailoverVersion(filename) - if err != nil { - continue - } - if highestVersion == nil || version > *highestVersion { - highestVersion = &version - } - } - if highestVersion == nil { - return nil, archiver.ErrHistoryNotExist - } - return highestVersion, nil -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/historyArchiver_test.go temporal-1.22.5/src/common/archiver/filestore/historyArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/historyArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/historyArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,646 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package filestore - -import ( - "context" - "errors" - "os" - "path" - "testing" - "time" - - enumspb "go.temporal.io/api/enums/v1" - - "go.temporal.io/server/tests/testutils" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/primitives/timestamp" -) - -const ( - testNamespaceID = "test-namespace-id" - testNamespace = "test-namespace" - testWorkflowID = "test-workflow-id" - testRunID = "test-run-id" - testNextEventID = 1800 - testCloseFailoverVersion = int64(100) - testPageSize = 100 - - testFileModeStr = "0666" - testDirModeStr = "0766" -) - -var ( - testBranchToken = []byte{1, 2, 3} -) - -type historyArchiverSuite struct { - *require.Assertions - suite.Suite - - container *archiver.HistoryBootstrapContainer - testArchivalURI archiver.URI - testGetDirectory string - historyBatchesV1 []*historypb.History - historyBatchesV100 []*historypb.History -} - -func TestHistoryArchiverSuite(t *testing.T) { - suite.Run(t, new(historyArchiverSuite)) -} - -func (s *historyArchiverSuite) SetupSuite() { - var err error - s.testGetDirectory, err = os.MkdirTemp("", "TestGet") - s.Require().NoError(err) - s.setupHistoryDirectory() - s.testArchivalURI, err = archiver.NewURI("file:///a/b/c") - s.Require().NoError(err) -} - -func (s *historyArchiverSuite) TearDownSuite() { - if err := os.RemoveAll(s.testGetDirectory); err != nil { - s.Fail("Failed to remove test directory %v: %v", s.testGetDirectory, err) - } -} - -func (s *historyArchiverSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - } -} - -func (s *historyArchiverSuite) TestValidateURI() { - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "file://", - expectedErr: errEmptyDirectoryPath, - }, - { - URI: "file:///a/b/c", - expectedErr: nil, - }, - } - - historyArchiver := s.newTestHistoryArchiver(nil) - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - s.NoError(err) - s.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) - } -} - -func (s *historyArchiverSuite) TestArchive_Fail_InvalidURI() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: "", // an invalid request - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(getCanceledContext(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion + 1, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - nonRetryableErr := errors.New("some non-retryable error") - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request, archiver.GetNonRetryableErrorOption(nonRetryableErr)) - s.Equal(nonRetryableErr, err) -} - -func (s *historyArchiverSuite) TestArchive_Skip() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: false, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - }, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.NoError(err) -} - -func (s *historyArchiverSuite) TestArchive_Success() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - { - EventId: common.FirstEventID + 2, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(false), - ) - - dir := testutils.MkdirTemp(s.T(), "", "TestArchiveSingleRead") - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI("file://" + dir) - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - s.NoError(err) - - expectedFilename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion) - s.assertFileExists(path.Join(dir, expectedFilename)) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidURI() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 100, - } - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidRequest() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 0, // pageSize should be greater than 0 - } - response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *historyArchiverSuite) TestGet_Fail_DirectoryNotExist() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidToken() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, - } - URI, err := archiver.NewURI("file:///") - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *historyArchiverSuite) TestGet_Fail_FileNotExist() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := testCloseFailoverVersion - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - CloseFailoverVersion: &testCloseFailoverVersion, - } - URI, err := archiver.NewURI("file:///") - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *historyArchiverSuite) TestGet_Success_PickHighestVersion() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - URI, err := archiver.NewURI("file://" + s.testGetDirectory) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.Nil(response.NextPageToken) - s.Equal(s.historyBatchesV100, response.HistoryBatches) -} - -func (s *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := int64(1) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - CloseFailoverVersion: &testCloseFailoverVersion, - } - URI, err := archiver.NewURI("file://" + s.testGetDirectory) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.Nil(response.NextPageToken) - s.Equal(s.historyBatchesV1, response.HistoryBatches) -} - -func (s *historyArchiverSuite) TestGet_Success_SmallPageSize() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := int64(100) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 1, - CloseFailoverVersion: &testCloseFailoverVersion, - } - var combinedHistory []*historypb.History - - URI, err := archiver.NewURI("file://" + s.testGetDirectory) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.NotNil(response) - s.NotNil(response.NextPageToken) - s.NotNil(response.HistoryBatches) - s.Len(response.HistoryBatches, 1) - combinedHistory = append(combinedHistory, response.HistoryBatches...) - - request.NextPageToken = response.NextPageToken - response, err = historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.NotNil(response.HistoryBatches) - s.Len(response.HistoryBatches, 1) - combinedHistory = append(combinedHistory, response.HistoryBatches...) - - s.Equal(s.historyBatchesV100, combinedHistory) -} - -func (s *historyArchiverSuite) TestArchiveAndGet() { - mockCtrl := gomock.NewController(s.T()) - defer mockCtrl.Finish() - historyIterator := archiver.NewMockHistoryIterator(mockCtrl) - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: s.historyBatchesV100, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(false), - ) - - dir := testutils.MkdirTemp(s.T(), "", "TestArchiveAndGet") - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - archiveRequest := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI("file://" + dir) - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, archiveRequest) - s.NoError(err) - - expectedFilename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion) - s.assertFileExists(path.Join(dir, expectedFilename)) - - getRequest := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - response, err := historyArchiver.Get(context.Background(), URI, getRequest) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Equal(s.historyBatchesV100, response.HistoryBatches) -} - -func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { - config := &config.FilestoreArchiver{ - FileMode: testFileModeStr, - DirMode: testDirModeStr, - } - archiver, err := newHistoryArchiver(s.container, config, historyIterator) - s.NoError(err) - return archiver -} - -func (s *historyArchiverSuite) setupHistoryDirectory() { - now := time.Date(2020, 8, 22, 1, 2, 3, 4, time.UTC) - s.historyBatchesV1 = []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: &now, - Version: 1, - }, - }, - }, - } - - s.historyBatchesV100 = []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - { - EventId: common.FirstEventID + 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - }, - }, - } - - s.writeHistoryBatchesForGetTest(s.historyBatchesV1, int64(1)) - s.writeHistoryBatchesForGetTest(s.historyBatchesV100, testCloseFailoverVersion) -} - -func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*historypb.History, version int64) { - data, err := encodeHistories(historyBatches) - s.Require().NoError(err) - filename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, version) - err = writeFile(path.Join(s.testGetDirectory, filename), data, testFileMode) - s.Require().NoError(err) -} - -func (s *historyArchiverSuite) assertFileExists(filepath string) { - exists, err := fileExists(filepath) - s.NoError(err) - s.True(exists) -} - -func getCanceledContext() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - return ctx -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/history_archiver.go temporal-1.22.5/src/common/archiver/filestore/history_archiver.go --- temporal-1.21.5-1/src/common/archiver/filestore/history_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/history_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,319 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Filestore History Archiver will archive workflow histories to local disk. + +// Each Archive() request results in a file named in the format of +// hash(namespaceID, workflowID, runID)_version.history being created in the specified +// directory. Workflow histories stored in that file are encoded in JSON format. + +// The Get() method retrieves the archived histories from the directory specified in the +// URI. It optionally takes in a NextPageToken which specifies the workflow close failover +// version and the index of the first history batch that should be returned. Instead of +// NextPageToken, caller can also provide a close failover version, in which case, Get() method +// will return history batches starting from the beginning of that history version. If neither +// of NextPageToken or close failover version is specified, the highest close failover version +// will be picked. + +package filestore + +import ( + "context" + "errors" + "os" + "path" + "strconv" + + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +const ( + // URIScheme is the scheme for the filestore implementation + URIScheme = "file" + + errEncodeHistory = "failed to encode history batches" + errMakeDirectory = "failed to make directory" + errWriteFile = "failed to write history to file" + + targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB +) + +var ( + errInvalidFileMode = errors.New("invalid file mode") + errInvalidDirMode = errors.New("invalid directory mode") +) + +type ( + historyArchiver struct { + container *archiver.HistoryBootstrapContainer + fileMode os.FileMode + dirMode os.FileMode + + // only set in test code + historyIterator archiver.HistoryIterator + } + + getHistoryToken struct { + CloseFailoverVersion int64 + NextBatchIdx int + } +) + +// NewHistoryArchiver creates a new archiver.HistoryArchiver based on filestore +func NewHistoryArchiver( + container *archiver.HistoryBootstrapContainer, + config *config.FilestoreArchiver, +) (archiver.HistoryArchiver, error) { + return newHistoryArchiver(container, config, nil) +} + +func newHistoryArchiver( + container *archiver.HistoryBootstrapContainer, + config *config.FilestoreArchiver, + historyIterator archiver.HistoryIterator, +) (*historyArchiver, error) { + fileMode, err := strconv.ParseUint(config.FileMode, 0, 32) + if err != nil { + return nil, errInvalidFileMode + } + dirMode, err := strconv.ParseUint(config.DirMode, 0, 32) + if err != nil { + return nil, errInvalidDirMode + } + return &historyArchiver{ + container: container, + fileMode: os.FileMode(fileMode), + dirMode: os.FileMode(dirMode), + historyIterator: historyIterator, + }, nil +} + +func (h *historyArchiver) Archive( + ctx context.Context, + URI archiver.URI, + request *archiver.ArchiveHistoryRequest, + opts ...archiver.ArchiveOption, +) (err error) { + featureCatalog := archiver.GetFeatureCatalog(opts...) + defer func() { + if err != nil && !common.IsPersistenceTransientError(err) && featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + }() + + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + + if err := h.ValidateURI(URI); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return err + } + + if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) + return err + } + + historyIterator := h.historyIterator + if historyIterator == nil { // will only be set by testing code + historyIterator = archiver.NewHistoryIterator(request, h.container.ExecutionManager, targetHistoryBlobSize) + } + + var historyBatches []*historypb.History + for historyIterator.HasNext() { + historyBlob, err := historyIterator.Next(ctx) + if err != nil { + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // workflow history no longer exists, may due to duplicated archival signal + // this may happen even in the middle of iterating history as two archival signals + // can be processed concurrently. + logger.Info(archiver.ArchiveSkippedInfoMsg) + return nil + } + + logger = log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) + if !common.IsPersistenceTransientError(err) { + logger.Error(archiver.ArchiveNonRetryableErrorMsg) + } else { + logger.Error(archiver.ArchiveTransientErrorMsg) + } + return err + } + + if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) + return archiver.ErrHistoryMutated + } + + historyBatches = append(historyBatches, historyBlob.Body...) + } + + encoder := codec.NewJSONPBEncoder() + encodedHistoryBatches, err := encoder.EncodeHistories(historyBatches) + if err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) + return err + } + + dirPath := URI.Path() + if err = mkdirAll(dirPath, h.dirMode); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) + return err + } + + filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion) + if err := writeFile(path.Join(dirPath, filename), encodedHistoryBatches, h.fileMode); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) + return err + } + + return nil +} + +func (h *historyArchiver) Get( + ctx context.Context, + URI archiver.URI, + request *archiver.GetHistoryRequest, +) (*archiver.GetHistoryResponse, error) { + if err := h.ValidateURI(URI); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) + } + + if err := archiver.ValidateGetRequest(request); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) + } + + dirPath := URI.Path() + exists, err := directoryExists(dirPath) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + if !exists { + return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) + } + + var token *getHistoryToken + if request.NextPageToken != nil { + token, err = deserializeGetHistoryToken(request.NextPageToken) + if err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) + } + } else if request.CloseFailoverVersion != nil { + token = &getHistoryToken{ + CloseFailoverVersion: *request.CloseFailoverVersion, + NextBatchIdx: 0, + } + } else { + highestVersion, err := getHighestVersion(dirPath, request) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + token = &getHistoryToken{ + CloseFailoverVersion: *highestVersion, + NextBatchIdx: 0, + } + } + + filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion) + filepath := path.Join(dirPath, filename) + exists, err = fileExists(filepath) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + if !exists { + return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) + } + + encodedHistoryBatches, err := readFile(filepath) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + encoder := codec.NewJSONPBEncoder() + historyBatches, err := encoder.DecodeHistories(encodedHistoryBatches) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + historyBatches = historyBatches[token.NextBatchIdx:] + + response := &archiver.GetHistoryResponse{} + numOfEvents := 0 + numOfBatches := 0 + for _, batch := range historyBatches { + response.HistoryBatches = append(response.HistoryBatches, batch) + numOfBatches++ + numOfEvents += len(batch.Events) + if numOfEvents >= request.PageSize { + break + } + } + + if numOfBatches < len(historyBatches) { + token.NextBatchIdx += numOfBatches + nextToken, err := serializeToken(token) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.NextPageToken = nextToken + } + + return response, nil +} + +func (h *historyArchiver) ValidateURI(URI archiver.URI) error { + if URI.Scheme() != URIScheme { + return archiver.ErrURISchemeMismatch + } + + return validateDirPath(URI.Path()) +} + +func getHighestVersion(dirPath string, request *archiver.GetHistoryRequest) (*int64, error) { + filenames, err := listFilesByPrefix(dirPath, constructHistoryFilenamePrefix(request.NamespaceID, request.WorkflowID, request.RunID)) + if err != nil { + return nil, err + } + + var highestVersion *int64 + for _, filename := range filenames { + version, err := extractCloseFailoverVersion(filename) + if err != nil { + continue + } + if highestVersion == nil || version > *highestVersion { + highestVersion = &version + } + } + if highestVersion == nil { + return nil, archiver.ErrHistoryNotExist + } + return highestVersion, nil +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/history_archiver_test.go temporal-1.22.5/src/common/archiver/filestore/history_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/history_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/history_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,646 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package filestore + +import ( + "context" + "errors" + "os" + "path" + "testing" + "time" + + enumspb "go.temporal.io/api/enums/v1" + + "go.temporal.io/server/tests/testutils" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + testNamespaceID = "test-namespace-id" + testNamespace = "test-namespace" + testWorkflowID = "test-workflow-id" + testRunID = "test-run-id" + testNextEventID = 1800 + testCloseFailoverVersion = int64(100) + testPageSize = 100 + + testFileModeStr = "0666" + testDirModeStr = "0766" +) + +var ( + testBranchToken = []byte{1, 2, 3} +) + +type historyArchiverSuite struct { + *require.Assertions + suite.Suite + + container *archiver.HistoryBootstrapContainer + testArchivalURI archiver.URI + testGetDirectory string + historyBatchesV1 []*historypb.History + historyBatchesV100 []*historypb.History +} + +func TestHistoryArchiverSuite(t *testing.T) { + suite.Run(t, new(historyArchiverSuite)) +} + +func (s *historyArchiverSuite) SetupSuite() { + var err error + s.testGetDirectory, err = os.MkdirTemp("", "TestGet") + s.Require().NoError(err) + s.setupHistoryDirectory() + s.testArchivalURI, err = archiver.NewURI("file:///a/b/c") + s.Require().NoError(err) +} + +func (s *historyArchiverSuite) TearDownSuite() { + if err := os.RemoveAll(s.testGetDirectory); err != nil { + s.Fail("Failed to remove test directory %v: %v", s.testGetDirectory, err) + } +} + +func (s *historyArchiverSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.container = &archiver.HistoryBootstrapContainer{ + Logger: log.NewNoopLogger(), + } +} + +func (s *historyArchiverSuite) TestValidateURI() { + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "file://", + expectedErr: errEmptyDirectoryPath, + }, + { + URI: "file:///a/b/c", + expectedErr: nil, + }, + } + + historyArchiver := s.newTestHistoryArchiver(nil) + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + s.NoError(err) + s.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) + } +} + +func (s *historyArchiverSuite) TestArchive_Fail_InvalidURI() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: "", // an invalid request + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(getCanceledContext(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion + 1, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + nonRetryableErr := errors.New("some non-retryable error") + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request, archiver.GetNonRetryableErrorOption(nonRetryableErr)) + s.Equal(nonRetryableErr, err) +} + +func (s *historyArchiverSuite) TestArchive_Skip() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: false, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + }, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.NoError(err) +} + +func (s *historyArchiverSuite) TestArchive_Success() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + { + EventId: common.FirstEventID + 2, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(false), + ) + + dir := testutils.MkdirTemp(s.T(), "", "TestArchiveSingleRead") + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI("file://" + dir) + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + s.NoError(err) + + expectedFilename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion) + s.assertFileExists(path.Join(dir, expectedFilename)) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidURI() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 100, + } + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidRequest() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 0, // pageSize should be greater than 0 + } + response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *historyArchiverSuite) TestGet_Fail_DirectoryNotExist() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidToken() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, + } + URI, err := archiver.NewURI("file:///") + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *historyArchiverSuite) TestGet_Fail_FileNotExist() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := testCloseFailoverVersion + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + CloseFailoverVersion: &testCloseFailoverVersion, + } + URI, err := archiver.NewURI("file:///") + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *historyArchiverSuite) TestGet_Success_PickHighestVersion() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + URI, err := archiver.NewURI("file://" + s.testGetDirectory) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.Nil(response.NextPageToken) + s.Equal(s.historyBatchesV100, response.HistoryBatches) +} + +func (s *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := int64(1) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + CloseFailoverVersion: &testCloseFailoverVersion, + } + URI, err := archiver.NewURI("file://" + s.testGetDirectory) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.Nil(response.NextPageToken) + s.Equal(s.historyBatchesV1, response.HistoryBatches) +} + +func (s *historyArchiverSuite) TestGet_Success_SmallPageSize() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := int64(100) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 1, + CloseFailoverVersion: &testCloseFailoverVersion, + } + var combinedHistory []*historypb.History + + URI, err := archiver.NewURI("file://" + s.testGetDirectory) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.NotNil(response) + s.NotNil(response.NextPageToken) + s.NotNil(response.HistoryBatches) + s.Len(response.HistoryBatches, 1) + combinedHistory = append(combinedHistory, response.HistoryBatches...) + + request.NextPageToken = response.NextPageToken + response, err = historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.NotNil(response.HistoryBatches) + s.Len(response.HistoryBatches, 1) + combinedHistory = append(combinedHistory, response.HistoryBatches...) + + s.Equal(s.historyBatchesV100, combinedHistory) +} + +func (s *historyArchiverSuite) TestArchiveAndGet() { + mockCtrl := gomock.NewController(s.T()) + defer mockCtrl.Finish() + historyIterator := archiver.NewMockHistoryIterator(mockCtrl) + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: s.historyBatchesV100, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(false), + ) + + dir := testutils.MkdirTemp(s.T(), "", "TestArchiveAndGet") + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + archiveRequest := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI("file://" + dir) + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, archiveRequest) + s.NoError(err) + + expectedFilename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion) + s.assertFileExists(path.Join(dir, expectedFilename)) + + getRequest := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + response, err := historyArchiver.Get(context.Background(), URI, getRequest) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Equal(s.historyBatchesV100, response.HistoryBatches) +} + +func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { + config := &config.FilestoreArchiver{ + FileMode: testFileModeStr, + DirMode: testDirModeStr, + } + archiver, err := newHistoryArchiver(s.container, config, historyIterator) + s.NoError(err) + return archiver +} + +func (s *historyArchiverSuite) setupHistoryDirectory() { + now := time.Date(2020, 8, 22, 1, 2, 3, 4, time.UTC) + s.historyBatchesV1 = []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: &now, + Version: 1, + }, + }, + }, + } + + s.historyBatchesV100 = []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + { + EventId: common.FirstEventID + 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + }, + }, + } + + s.writeHistoryBatchesForGetTest(s.historyBatchesV1, int64(1)) + s.writeHistoryBatchesForGetTest(s.historyBatchesV100, testCloseFailoverVersion) +} + +func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*historypb.History, version int64) { + data, err := encodeHistories(historyBatches) + s.Require().NoError(err) + filename := constructHistoryFilename(testNamespaceID, testWorkflowID, testRunID, version) + err = writeFile(path.Join(s.testGetDirectory, filename), data, testFileMode) + s.Require().NoError(err) +} + +func (s *historyArchiverSuite) assertFileExists(filepath string) { + exists, err := fileExists(filepath) + s.NoError(err) + s.True(exists) +} + +func getCanceledContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/queryParser.go temporal-1.22.5/src/common/archiver/filestore/queryParser.go --- temporal-1.21.5-1/src/common/archiver/filestore/queryParser.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/queryParser.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,279 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source queryParser.go -destination queryParser_mock.go -mock_names Interface=MockQueryParser - -package filestore - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/xwb1989/sqlparser" - enumspb "go.temporal.io/api/enums/v1" - - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/util" -) - -type ( - // QueryParser parses a limited SQL where clause into a struct - QueryParser interface { - Parse(query string) (*parsedQuery, error) - } - - queryParser struct{} - - parsedQuery struct { - earliestCloseTime time.Time - latestCloseTime time.Time - workflowID *string - runID *string - workflowTypeName *string - status *enumspb.WorkflowExecutionStatus - emptyResult bool - } -) - -// All allowed fields for filtering -const ( - WorkflowID = "WorkflowId" - RunID = "RunId" - WorkflowType = "WorkflowType" - CloseTime = "CloseTime" - // Field name can't be just "Status" because it is reserved keyword in MySQL parser. - ExecutionStatus = "ExecutionStatus" -) - -const ( - queryTemplate = "select * from dummy where %s" - - defaultDateTimeFormat = time.RFC3339 -) - -// NewQueryParser creates a new query parser for filestore -func NewQueryParser() QueryParser { - return &queryParser{} -} - -func (p *queryParser) Parse(query string) (*parsedQuery, error) { - parsedQuery := &parsedQuery{ - earliestCloseTime: time.Time{}, - latestCloseTime: time.Now().UTC(), - } - if strings.TrimSpace(query) == "" { - return parsedQuery, nil - } - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) - if err != nil { - return nil, err - } - whereExpr := stmt.(*sqlparser.Select).Where.Expr - if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { - return nil, err - } - return parsedQuery, nil -} - -func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { - if expr == nil { - return errors.New("where expression is nil") - } - - switch expr := expr.(type) { - case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr, parsedQuery) - case *sqlparser.AndExpr: - return p.convertAndExpr(expr, parsedQuery) - case *sqlparser.ParenExpr: - return p.convertParenExpr(expr, parsedQuery) - default: - return errors.New("only comparison and \"and\" expression is supported") - } -} - -func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { - return p.convertWhereExpr(parenExpr.Expr, parsedQuery) -} - -func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { - if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { - return err - } - return p.convertWhereExpr(andExpr.Right, parsedQuery) -} - -func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { - colName, ok := compExpr.Left.(*sqlparser.ColName) - if !ok { - return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) - } - colNameStr := sqlparser.String(colName) - op := compExpr.Operator - valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) - if !ok { - return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) - } - valStr := sqlparser.String(valExpr) - - switch colNameStr { - case WorkflowID: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowID) - } - if parsedQuery.workflowID != nil && *parsedQuery.workflowID != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.workflowID = convert.StringPtr(val) - case RunID: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", RunID) - } - if parsedQuery.runID != nil && *parsedQuery.runID != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.runID = convert.StringPtr(val) - case WorkflowType: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowType) - } - if parsedQuery.workflowTypeName != nil && *parsedQuery.workflowTypeName != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.workflowTypeName = convert.StringPtr(val) - case ExecutionStatus: - val, err := extractStringValue(valStr) - if err != nil { - // if failed to extract string value, it means user input close status as a number - val = valStr - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", ExecutionStatus) - } - status, err := convertStatusStr(val) - if err != nil { - return err - } - if parsedQuery.status != nil && *parsedQuery.status != status { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.status = &status - case CloseTime: - timestamp, err := convertToTime(valStr) - if err != nil { - return err - } - return p.convertCloseTime(timestamp, op, parsedQuery) - default: - return fmt.Errorf("unknown filter name: %s", colNameStr) - } - - return nil -} - -func (p *queryParser) convertCloseTime(timestamp time.Time, op string, parsedQuery *parsedQuery) error { - switch op { - case "=": - if err := p.convertCloseTime(timestamp, ">=", parsedQuery); err != nil { - return err - } - if err := p.convertCloseTime(timestamp, "<=", parsedQuery); err != nil { - return err - } - case "<": - parsedQuery.latestCloseTime = util.MinTime(parsedQuery.latestCloseTime, timestamp.Add(-1*time.Nanosecond)) - case "<=": - parsedQuery.latestCloseTime = util.MinTime(parsedQuery.latestCloseTime, timestamp) - case ">": - parsedQuery.earliestCloseTime = util.MaxTime(parsedQuery.earliestCloseTime, timestamp.Add(1*time.Nanosecond)) - case ">=": - parsedQuery.earliestCloseTime = util.MaxTime(parsedQuery.earliestCloseTime, timestamp) - default: - return fmt.Errorf("operator %s is not supported for close time", op) - } - return nil -} - -func convertToTime(timeStr string) (time.Time, error) { - ts, err := strconv.ParseInt(timeStr, 10, 64) - if err == nil { - return timestamp.UnixOrZeroTime(ts), nil - } - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - -func convertStatusStr(statusStr string) (enumspb.WorkflowExecutionStatus, error) { - statusStr = strings.ToLower(strings.TrimSpace(statusStr)) - switch statusStr { - case "completed", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED)): - return enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, nil - case "failed", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED)): - return enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, nil - case "canceled", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED)): - return enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, nil - case "terminated", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED)): - return enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, nil - case "continuedasnew", "continued_as_new", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW)): - return enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, nil - case "timedout", "timed_out", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT)): - return enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, nil - default: - return 0, fmt.Errorf("unknown workflow close status: %s", statusStr) - } -} - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/queryParser_mock.go temporal-1.22.5/src/common/archiver/filestore/queryParser_mock.go --- temporal-1.21.5-1/src/common/archiver/filestore/queryParser_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/queryParser_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: queryParser.go - -// Package filestore is a generated GoMock package. -package filestore - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockQueryParser is a mock of QueryParser interface. -type MockQueryParser struct { - ctrl *gomock.Controller - recorder *MockQueryParserMockRecorder -} - -// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. -type MockQueryParserMockRecorder struct { - mock *MockQueryParser -} - -// NewMockQueryParser creates a new mock instance. -func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { - mock := &MockQueryParser{ctrl: ctrl} - mock.recorder = &MockQueryParserMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { - return m.recorder -} - -// Parse mocks base method. -func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Parse", query) - ret0, _ := ret[0].(*parsedQuery) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/queryParser_test.go temporal-1.22.5/src/common/archiver/filestore/queryParser_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/queryParser_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/queryParser_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,367 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package filestore - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - - "go.temporal.io/server/common/convert" -) - -type queryParserSuite struct { - *require.Assertions - suite.Suite - - parser QueryParser -} - -func TestQueryParserSuite(t *testing.T) { - suite.Run(t, new(queryParserSuite)) -} - -func (s *queryParserSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.parser = NewQueryParser() -} - -func (s *queryParserSuite) TestParseWorkflowID_RunID_WorkflowType() { - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: "WorkflowId = \"random workflowID\"", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "WorkflowId = \"random workflowID\" and WorkflowId = \"random workflowID\"", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "RunId = \"random runID\"", - expectErr: false, - parsedQuery: &parsedQuery{ - runID: convert.StringPtr("random runID"), - }, - }, - { - query: "WorkflowType = \"random typeName\"", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowTypeName: convert.StringPtr("random typeName"), - }, - }, - { - query: "WorkflowId = 'random workflowID'", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "WorkflowType = 'random typeName' and WorkflowType = \"another typeName\"", - expectErr: false, - parsedQuery: &parsedQuery{ - emptyResult: true, - }, - }, - { - query: "WorkflowType = 'random typeName' and (WorkflowId = \"random workflowID\" and RunId='random runID')", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - runID: convert.StringPtr("random runID"), - workflowTypeName: convert.StringPtr("random typeName"), - }, - }, - { - query: "runId = random workflowID", - expectErr: true, - }, - { - query: "WorkflowId = \"random workflowID\" or WorkflowId = \"another workflowID\"", - expectErr: true, - }, - { - query: "WorkflowId = \"random workflowID\" or runId = \"random runID\"", - expectErr: true, - }, - { - query: "workflowid = \"random workflowID\"", - expectErr: true, - }, - { - query: "runId > \"random workflowID\"", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) - if !tc.parsedQuery.emptyResult { - s.Equal(tc.parsedQuery.workflowID, parsedQuery.workflowID) - s.Equal(tc.parsedQuery.runID, parsedQuery.runID) - s.Equal(tc.parsedQuery.workflowTypeName, parsedQuery.workflowTypeName) - } - } -} - -func (s *queryParserSuite) TestParseCloseStatus() { - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: "ExecutionStatus = \"Completed\"", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED), - }, - }, - { - query: "ExecutionStatus = \"failed\"", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, - }, - { - query: "ExecutionStatus = \"canceled\"", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED), - }, - }, - { - query: "ExecutionStatus = \"terminated\"", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED), - }, - }, - { - query: "ExecutionStatus = 'continuedasnew'", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW), - }, - }, - { - query: "ExecutionStatus = 'TIMED_OUT'", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT), - }, - }, - { - query: "ExecutionStatus = 'Failed' and ExecutionStatus = \"Failed\"", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, - }, - { - query: "(ExecutionStatus = 'Timedout' and ExecutionStatus = \"canceled\")", - expectErr: false, - parsedQuery: &parsedQuery{ - emptyResult: true, - }, - }, - { - query: "status = \"Failed\"", - expectErr: true, - }, - { - query: "ExecutionStatus = \"Failed\" or ExecutionStatus = \"Failed\"", - expectErr: true, - }, - { - query: "ExecutionStatus = \"unknown\"", - expectErr: true, - }, - { - query: "ExecutionStatus > \"Failed\"", - expectErr: true, - }, - { - query: "ExecutionStatus = 3", - expectErr: false, - parsedQuery: &parsedQuery{ - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, - }, - { - query: "CloseStatus = 10", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) - if !tc.parsedQuery.emptyResult { - s.EqualValues(tc.parsedQuery.status, parsedQuery.status) - } - } -} - -func (s *queryParserSuite) TestParseCloseTime() { - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: "CloseTime <= 1000", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Time{}, - latestCloseTime: time.Unix(0, 1000), - }, - }, - { - query: "CloseTime < 2000 and CloseTime <= 1000 and CloseTime > 300", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Unix(0, 301), - latestCloseTime: time.Unix(0, 1000), - }, - }, - { - query: "CloseTime = 2000 and (CloseTime > 1000 and CloseTime <= 9999)", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Unix(0, 2000), - latestCloseTime: time.Unix(0, 2000), - }, - }, - { - query: "CloseTime <= \"2019-01-01T11:11:11Z\" and CloseTime >= 1000000", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000000), - latestCloseTime: time.Date(2019, 01, 01, 11, 11, 11, 0, time.UTC), - }, - }, - { - query: "closeTime = 2000", - expectErr: true, - }, - { - query: "CloseTime > \"2019-01-01 00:00:00\"", - expectErr: true, - }, - { - query: "ExecutionStatus > 2000 or ExecutionStatus < 1000", - expectErr: true, - }, - } - - for i, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err, "case %d", i) - s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult, "case %d", i) - if !tc.parsedQuery.emptyResult { - s.True(tc.parsedQuery.earliestCloseTime.Equal(parsedQuery.earliestCloseTime), "case %d", i) - s.True(tc.parsedQuery.latestCloseTime.Equal(parsedQuery.latestCloseTime), "case %d", i) - } - } -} - -func (s *queryParserSuite) TestParse() { - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: "CloseTime <= \"2019-01-01T11:11:11Z\" and WorkflowId = 'random workflowID'", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Time{}, - latestCloseTime: time.Date(2019, 01, 01, 11, 11, 11, 0, time.UTC), - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "CloseTime > 1999 and CloseTime < 10000 and RunId = 'random runID' and ExecutionStatus = 'Failed'", - expectErr: false, - parsedQuery: &parsedQuery{ - earliestCloseTime: time.Unix(0, 2000).UTC(), - latestCloseTime: time.Unix(0, 9999).UTC(), - runID: convert.StringPtr("random runID"), - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, - }, - { - query: "CloseTime > 2001 and CloseTime < 10000 and (RunId = 'random runID') and ExecutionStatus = 'Failed' and (RunId = 'another ID')", - expectErr: false, - parsedQuery: &parsedQuery{ - emptyResult: true, - }, - }, - } - - for i, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err, "case %d", i) - s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult, "case %d", i) - if !tc.parsedQuery.emptyResult { - s.Equal(tc.parsedQuery, parsedQuery, "case %d", i) - } - } -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/query_parser.go temporal-1.22.5/src/common/archiver/filestore/query_parser.go --- temporal-1.21.5-1/src/common/archiver/filestore/query_parser.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/query_parser.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,279 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser + +package filestore + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/xwb1989/sqlparser" + enumspb "go.temporal.io/api/enums/v1" + + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/util" +) + +type ( + // QueryParser parses a limited SQL where clause into a struct + QueryParser interface { + Parse(query string) (*parsedQuery, error) + } + + queryParser struct{} + + parsedQuery struct { + earliestCloseTime time.Time + latestCloseTime time.Time + workflowID *string + runID *string + workflowTypeName *string + status *enumspb.WorkflowExecutionStatus + emptyResult bool + } +) + +// All allowed fields for filtering +const ( + WorkflowID = "WorkflowId" + RunID = "RunId" + WorkflowType = "WorkflowType" + CloseTime = "CloseTime" + // Field name can't be just "Status" because it is reserved keyword in MySQL parser. + ExecutionStatus = "ExecutionStatus" +) + +const ( + queryTemplate = "select * from dummy where %s" + + defaultDateTimeFormat = time.RFC3339 +) + +// NewQueryParser creates a new query parser for filestore +func NewQueryParser() QueryParser { + return &queryParser{} +} + +func (p *queryParser) Parse(query string) (*parsedQuery, error) { + parsedQuery := &parsedQuery{ + earliestCloseTime: time.Time{}, + latestCloseTime: time.Now().UTC(), + } + if strings.TrimSpace(query) == "" { + return parsedQuery, nil + } + stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + if err != nil { + return nil, err + } + whereExpr := stmt.(*sqlparser.Select).Where.Expr + if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { + return nil, err + } + return parsedQuery, nil +} + +func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { + if expr == nil { + return errors.New("where expression is nil") + } + + switch expr := expr.(type) { + case *sqlparser.ComparisonExpr: + return p.convertComparisonExpr(expr, parsedQuery) + case *sqlparser.AndExpr: + return p.convertAndExpr(expr, parsedQuery) + case *sqlparser.ParenExpr: + return p.convertParenExpr(expr, parsedQuery) + default: + return errors.New("only comparison and \"and\" expression is supported") + } +} + +func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { + return p.convertWhereExpr(parenExpr.Expr, parsedQuery) +} + +func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { + if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { + return err + } + return p.convertWhereExpr(andExpr.Right, parsedQuery) +} + +func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { + colName, ok := compExpr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) + } + colNameStr := sqlparser.String(colName) + op := compExpr.Operator + valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) + if !ok { + return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) + } + valStr := sqlparser.String(valExpr) + + switch colNameStr { + case WorkflowID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowID) + } + if parsedQuery.workflowID != nil && *parsedQuery.workflowID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowID = convert.StringPtr(val) + case RunID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", RunID) + } + if parsedQuery.runID != nil && *parsedQuery.runID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.runID = convert.StringPtr(val) + case WorkflowType: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowType) + } + if parsedQuery.workflowTypeName != nil && *parsedQuery.workflowTypeName != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowTypeName = convert.StringPtr(val) + case ExecutionStatus: + val, err := extractStringValue(valStr) + if err != nil { + // if failed to extract string value, it means user input close status as a number + val = valStr + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", ExecutionStatus) + } + status, err := convertStatusStr(val) + if err != nil { + return err + } + if parsedQuery.status != nil && *parsedQuery.status != status { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.status = &status + case CloseTime: + timestamp, err := convertToTime(valStr) + if err != nil { + return err + } + return p.convertCloseTime(timestamp, op, parsedQuery) + default: + return fmt.Errorf("unknown filter name: %s", colNameStr) + } + + return nil +} + +func (p *queryParser) convertCloseTime(timestamp time.Time, op string, parsedQuery *parsedQuery) error { + switch op { + case "=": + if err := p.convertCloseTime(timestamp, ">=", parsedQuery); err != nil { + return err + } + if err := p.convertCloseTime(timestamp, "<=", parsedQuery); err != nil { + return err + } + case "<": + parsedQuery.latestCloseTime = util.MinTime(parsedQuery.latestCloseTime, timestamp.Add(-1*time.Nanosecond)) + case "<=": + parsedQuery.latestCloseTime = util.MinTime(parsedQuery.latestCloseTime, timestamp) + case ">": + parsedQuery.earliestCloseTime = util.MaxTime(parsedQuery.earliestCloseTime, timestamp.Add(1*time.Nanosecond)) + case ">=": + parsedQuery.earliestCloseTime = util.MaxTime(parsedQuery.earliestCloseTime, timestamp) + default: + return fmt.Errorf("operator %s is not supported for close time", op) + } + return nil +} + +func convertToTime(timeStr string) (time.Time, error) { + ts, err := strconv.ParseInt(timeStr, 10, 64) + if err == nil { + return timestamp.UnixOrZeroTime(ts), nil + } + timestampStr, err := extractStringValue(timeStr) + if err != nil { + return time.Time{}, err + } + parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) + if err != nil { + return time.Time{}, err + } + return parsedTime, nil +} + +func convertStatusStr(statusStr string) (enumspb.WorkflowExecutionStatus, error) { + statusStr = strings.ToLower(strings.TrimSpace(statusStr)) + switch statusStr { + case "completed", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED)): + return enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, nil + case "failed", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED)): + return enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, nil + case "canceled", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED)): + return enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, nil + case "terminated", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED)): + return enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, nil + case "continuedasnew", "continued_as_new", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW)): + return enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, nil + case "timedout", "timed_out", convert.Int32ToString(int32(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT)): + return enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, nil + default: + return 0, fmt.Errorf("unknown workflow close status: %s", statusStr) + } +} + +func extractStringValue(s string) (string, error) { + if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { + return s[1 : len(s)-1], nil + } + return "", fmt.Errorf("value %s is not a string value", s) +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/query_parser_mock.go temporal-1.22.5/src/common/archiver/filestore/query_parser_mock.go --- temporal-1.21.5-1/src/common/archiver/filestore/query_parser_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/query_parser_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go + +// Package filestore is a generated GoMock package. +package filestore + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockQueryParser is a mock of QueryParser interface. +type MockQueryParser struct { + ctrl *gomock.Controller + recorder *MockQueryParserMockRecorder +} + +// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. +type MockQueryParserMockRecorder struct { + mock *MockQueryParser +} + +// NewMockQueryParser creates a new mock instance. +func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { + mock := &MockQueryParser{ctrl: ctrl} + mock.recorder = &MockQueryParserMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { + return m.recorder +} + +// Parse mocks base method. +func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Parse", query) + ret0, _ := ret[0].(*parsedQuery) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Parse indicates an expected call of Parse. +func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/query_parser_test.go temporal-1.22.5/src/common/archiver/filestore/query_parser_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/query_parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/query_parser_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,367 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package filestore + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + + "go.temporal.io/server/common/convert" +) + +type queryParserSuite struct { + *require.Assertions + suite.Suite + + parser QueryParser +} + +func TestQueryParserSuite(t *testing.T) { + suite.Run(t, new(queryParserSuite)) +} + +func (s *queryParserSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.parser = NewQueryParser() +} + +func (s *queryParserSuite) TestParseWorkflowID_RunID_WorkflowType() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "WorkflowId = \"random workflowID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "WorkflowId = \"random workflowID\" and WorkflowId = \"random workflowID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "RunId = \"random runID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + runID: convert.StringPtr("random runID"), + }, + }, + { + query: "WorkflowType = \"random typeName\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowTypeName: convert.StringPtr("random typeName"), + }, + }, + { + query: "WorkflowId = 'random workflowID'", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "WorkflowType = 'random typeName' and WorkflowType = \"another typeName\"", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + { + query: "WorkflowType = 'random typeName' and (WorkflowId = \"random workflowID\" and RunId='random runID')", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + runID: convert.StringPtr("random runID"), + workflowTypeName: convert.StringPtr("random typeName"), + }, + }, + { + query: "runId = random workflowID", + expectErr: true, + }, + { + query: "WorkflowId = \"random workflowID\" or WorkflowId = \"another workflowID\"", + expectErr: true, + }, + { + query: "WorkflowId = \"random workflowID\" or runId = \"random runID\"", + expectErr: true, + }, + { + query: "workflowid = \"random workflowID\"", + expectErr: true, + }, + { + query: "runId > \"random workflowID\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery.workflowID, parsedQuery.workflowID) + s.Equal(tc.parsedQuery.runID, parsedQuery.runID) + s.Equal(tc.parsedQuery.workflowTypeName, parsedQuery.workflowTypeName) + } + } +} + +func (s *queryParserSuite) TestParseCloseStatus() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "ExecutionStatus = \"Completed\"", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED), + }, + }, + { + query: "ExecutionStatus = \"failed\"", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, + }, + { + query: "ExecutionStatus = \"canceled\"", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED), + }, + }, + { + query: "ExecutionStatus = \"terminated\"", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED), + }, + }, + { + query: "ExecutionStatus = 'continuedasnew'", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW), + }, + }, + { + query: "ExecutionStatus = 'TIMED_OUT'", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT), + }, + }, + { + query: "ExecutionStatus = 'Failed' and ExecutionStatus = \"Failed\"", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, + }, + { + query: "(ExecutionStatus = 'Timedout' and ExecutionStatus = \"canceled\")", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + { + query: "status = \"Failed\"", + expectErr: true, + }, + { + query: "ExecutionStatus = \"Failed\" or ExecutionStatus = \"Failed\"", + expectErr: true, + }, + { + query: "ExecutionStatus = \"unknown\"", + expectErr: true, + }, + { + query: "ExecutionStatus > \"Failed\"", + expectErr: true, + }, + { + query: "ExecutionStatus = 3", + expectErr: false, + parsedQuery: &parsedQuery{ + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, + }, + { + query: "CloseStatus = 10", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.EqualValues(tc.parsedQuery.status, parsedQuery.status) + } + } +} + +func (s *queryParserSuite) TestParseCloseTime() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "CloseTime <= 1000", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Time{}, + latestCloseTime: time.Unix(0, 1000), + }, + }, + { + query: "CloseTime < 2000 and CloseTime <= 1000 and CloseTime > 300", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Unix(0, 301), + latestCloseTime: time.Unix(0, 1000), + }, + }, + { + query: "CloseTime = 2000 and (CloseTime > 1000 and CloseTime <= 9999)", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Unix(0, 2000), + latestCloseTime: time.Unix(0, 2000), + }, + }, + { + query: "CloseTime <= \"2019-01-01T11:11:11Z\" and CloseTime >= 1000000", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000000), + latestCloseTime: time.Date(2019, 01, 01, 11, 11, 11, 0, time.UTC), + }, + }, + { + query: "closeTime = 2000", + expectErr: true, + }, + { + query: "CloseTime > \"2019-01-01 00:00:00\"", + expectErr: true, + }, + { + query: "ExecutionStatus > 2000 or ExecutionStatus < 1000", + expectErr: true, + }, + } + + for i, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err, "case %d", i) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult, "case %d", i) + if !tc.parsedQuery.emptyResult { + s.True(tc.parsedQuery.earliestCloseTime.Equal(parsedQuery.earliestCloseTime), "case %d", i) + s.True(tc.parsedQuery.latestCloseTime.Equal(parsedQuery.latestCloseTime), "case %d", i) + } + } +} + +func (s *queryParserSuite) TestParse() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "CloseTime <= \"2019-01-01T11:11:11Z\" and WorkflowId = 'random workflowID'", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Time{}, + latestCloseTime: time.Date(2019, 01, 01, 11, 11, 11, 0, time.UTC), + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "CloseTime > 1999 and CloseTime < 10000 and RunId = 'random runID' and ExecutionStatus = 'Failed'", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: time.Unix(0, 2000).UTC(), + latestCloseTime: time.Unix(0, 9999).UTC(), + runID: convert.StringPtr("random runID"), + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, + }, + { + query: "CloseTime > 2001 and CloseTime < 10000 and (RunId = 'random runID') and ExecutionStatus = 'Failed' and (RunId = 'another ID')", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + } + + for i, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err, "case %d", i) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult, "case %d", i) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery, parsedQuery, "case %d", i) + } + } +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/visibilityArchiver.go temporal-1.22.5/src/common/archiver/filestore/visibilityArchiver.go --- temporal-1.21.5-1/src/common/archiver/filestore/visibilityArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/visibilityArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,366 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package filestore - -import ( - "context" - "fmt" - "os" - "path" - "sort" - "strconv" - "strings" - "time" - - commonpb "go.temporal.io/api/common/v1" - "go.temporal.io/api/serviceerror" - workflowpb "go.temporal.io/api/workflow/v1" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" -) - -const ( - errEncodeVisibilityRecord = "failed to encode visibility record" -) - -type ( - visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - fileMode os.FileMode - dirMode os.FileMode - queryParser QueryParser - } - - queryVisibilityToken struct { - LastCloseTime time.Time - LastRunID string - } - - queryVisibilityRequest struct { - namespaceID string - pageSize int - nextPageToken []byte - parsedQuery *parsedQuery - } -) - -// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore -func NewVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, - config *config.FilestoreArchiver, -) (archiver.VisibilityArchiver, error) { - fileMode, err := strconv.ParseUint(config.FileMode, 0, 32) - if err != nil { - return nil, errInvalidFileMode - } - dirMode, err := strconv.ParseUint(config.DirMode, 0, 32) - if err != nil { - return nil, errInvalidDirMode - } - return &visibilityArchiver{ - container: container, - fileMode: os.FileMode(fileMode), - dirMode: os.FileMode(dirMode), - queryParser: NewQueryParser(), - }, nil -} - -func (v *visibilityArchiver) Archive( - ctx context.Context, - URI archiver.URI, - request *archiverspb.VisibilityRecord, - opts ...archiver.ArchiveOption, -) (err error) { - featureCatalog := archiver.GetFeatureCatalog(opts...) - defer func() { - if err != nil && featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - }() - - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) - - if err := v.ValidateURI(URI); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return err - } - - if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) - return err - } - - dirPath := path.Join(URI.Path(), request.GetNamespaceId()) - if err = mkdirAll(dirPath, v.dirMode); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) - return err - } - - encodedVisibilityRecord, err := encode(request) - if err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeVisibilityRecord), tag.Error(err)) - return err - } - - // The filename has the format: closeTimestamp_hash(runID).visibility - // This format allows the archiver to sort all records without reading the file contents - filename := constructVisibilityFilename(request.CloseTime, request.GetRunId()) - if err := writeFile(path.Join(dirPath, filename), encodedVisibilityRecord, v.fileMode); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) - return err - } - - return nil -} - -func (v *visibilityArchiver) Query( - ctx context.Context, - URI archiver.URI, - request *archiver.QueryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - if err := v.ValidateURI(URI); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) - } - - if err := archiver.ValidateQueryRequest(request); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidQueryVisibilityRequest.Error()) - } - - parsedQuery, err := v.queryParser.Parse(request.Query) - if err != nil { - return nil, serviceerror.NewInvalidArgument(err.Error()) - } - - if parsedQuery.emptyResult { - return &archiver.QueryVisibilityResponse{}, nil - } - - return v.query( - ctx, - URI, - &queryVisibilityRequest{ - namespaceID: request.NamespaceID, - pageSize: request.PageSize, - nextPageToken: request.NextPageToken, - parsedQuery: parsedQuery, - }, - saTypeMap, - ) -} - -func (v *visibilityArchiver) query( - ctx context.Context, - URI archiver.URI, - request *queryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - var token *queryVisibilityToken - if request.nextPageToken != nil { - var err error - token, err = deserializeQueryVisibilityToken(request.nextPageToken) - if err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) - } - } - - dirPath := path.Join(URI.Path(), request.namespaceID) - exists, err := directoryExists(dirPath) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - if !exists { - return &archiver.QueryVisibilityResponse{}, nil - } - - files, err := listFiles(dirPath) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - files, err = sortAndFilterFiles(files, token) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - if len(files) == 0 { - return &archiver.QueryVisibilityResponse{}, nil - } - - response := &archiver.QueryVisibilityResponse{} - for idx, file := range files { - encodedRecord, err := readFile(path.Join(dirPath, file)) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - record, err := decodeVisibilityRecord(encodedRecord) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - if record.CloseTime.Before(request.parsedQuery.earliestCloseTime) { - break - } - - if matchQuery(record, request.parsedQuery) { - executionInfo, err := convertToExecutionInfo(record, saTypeMap) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - response.Executions = append(response.Executions, executionInfo) - if len(response.Executions) == request.pageSize { - if idx != len(files) { - newToken := &queryVisibilityToken{ - LastCloseTime: timestamp.TimeValue(record.CloseTime), - LastRunID: record.GetRunId(), - } - encodedToken, err := serializeToken(newToken) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.NextPageToken = encodedToken - } - break - } - } - } - - return response, nil -} - -func (v *visibilityArchiver) ValidateURI(URI archiver.URI) error { - if URI.Scheme() != URIScheme { - return archiver.ErrURISchemeMismatch - } - - return validateDirPath((URI.Path())) -} - -type parsedVisFilename struct { - name string - closeTime time.Time - hashedRunID string -} - -// sortAndFilterFiles sort visibility record file names based on close timestamp (desc) and use hashed runID to break ties. -// if a nextPageToken is give, it only returns filenames that have a smaller close timestamp -func sortAndFilterFiles(filenames []string, token *queryVisibilityToken) ([]string, error) { - var parsedFilenames []*parsedVisFilename - for _, name := range filenames { - pieces := strings.FieldsFunc(name, func(r rune) bool { - return r == '_' || r == '.' - }) - if len(pieces) != 3 { - return nil, fmt.Errorf("failed to parse visibility filename %s", name) - } - - closeTime, err := strconv.ParseInt(pieces[0], 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse visibility filename %s", name) - } - parsedFilenames = append(parsedFilenames, &parsedVisFilename{ - name: name, - closeTime: timestamp.UnixOrZeroTime(closeTime), - hashedRunID: pieces[1], - }) - } - - sort.Slice(parsedFilenames, func(i, j int) bool { - if parsedFilenames[i].closeTime.Equal(parsedFilenames[j].closeTime) { - return parsedFilenames[i].hashedRunID > parsedFilenames[j].hashedRunID - } - return parsedFilenames[i].closeTime.After(parsedFilenames[j].closeTime) - }) - - startIdx := 0 - if token != nil { - LastHashedRunID := hash(token.LastRunID) - startIdx = sort.Search(len(parsedFilenames), func(i int) bool { - if parsedFilenames[i].closeTime.Equal(token.LastCloseTime) { - return parsedFilenames[i].hashedRunID < LastHashedRunID - } - return parsedFilenames[i].closeTime.Before(token.LastCloseTime) - }) - } - - if startIdx == len(parsedFilenames) { - return []string{}, nil - } - - var filteredFilenames []string - for _, parsedFilename := range parsedFilenames[startIdx:] { - filteredFilenames = append(filteredFilenames, parsedFilename.name) - } - return filteredFilenames, nil -} - -func matchQuery(record *archiverspb.VisibilityRecord, query *parsedQuery) bool { - if record.CloseTime.Before(query.earliestCloseTime) || record.CloseTime.After(query.latestCloseTime) { - return false - } - if query.workflowID != nil && record.GetWorkflowId() != *query.workflowID { - return false - } - if query.runID != nil && record.GetRunId() != *query.runID { - return false - } - if query.workflowTypeName != nil && record.WorkflowTypeName != *query.workflowTypeName { - return false - } - if query.status != nil && record.Status != *query.status { - return false - } - return true -} - -func convertToExecutionInfo(record *archiverspb.VisibilityRecord, saTypeMap searchattribute.NameTypeMap) (*workflowpb.WorkflowExecutionInfo, error) { - searchAttributes, err := searchattribute.Parse(record.SearchAttributes, &saTypeMap) - if err != nil { - return nil, err - } - - return &workflowpb.WorkflowExecutionInfo{ - Execution: &commonpb.WorkflowExecution{ - WorkflowId: record.GetWorkflowId(), - RunId: record.GetRunId(), - }, - Type: &commonpb.WorkflowType{ - Name: record.WorkflowTypeName, - }, - StartTime: record.StartTime, - ExecutionTime: record.ExecutionTime, - CloseTime: record.CloseTime, - Status: record.Status, - HistoryLength: record.HistoryLength, - Memo: record.Memo, - SearchAttributes: searchAttributes, - }, nil -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/visibilityArchiver_test.go temporal-1.22.5/src/common/archiver/filestore/visibilityArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/visibilityArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/visibilityArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,672 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package filestore - -import ( - "context" - "errors" - "os" - "path" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - workflowpb "go.temporal.io/api/workflow/v1" - - "go.temporal.io/server/common/searchattribute" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/tests/testutils" -) - -const ( - testWorkflowTypeName = "test-workflow-type" -) - -type visibilityArchiverSuite struct { - *require.Assertions - suite.Suite - - container *archiver.VisibilityBootstrapContainer - testArchivalURI archiver.URI - testQueryDirectory string - visibilityRecords []*archiverspb.VisibilityRecord - - controller *gomock.Controller -} - -func TestVisibilityArchiverSuite(t *testing.T) { - suite.Run(t, new(visibilityArchiverSuite)) -} - -func (s *visibilityArchiverSuite) SetupSuite() { - var err error - s.testQueryDirectory, err = os.MkdirTemp("", "TestQuery") - s.Require().NoError(err) - s.setupVisibilityDirectory() - s.testArchivalURI, err = archiver.NewURI("file:///a/b/c") - s.Require().NoError(err) -} - -func (s *visibilityArchiverSuite) TearDownSuite() { - if err := os.RemoveAll(s.testQueryDirectory); err != nil { - s.Fail("Failed to remove test query directory %v: %v", s.testQueryDirectory, err) - } -} - -func (s *visibilityArchiverSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - } - s.controller = gomock.NewController(s.T()) -} - -func (s *visibilityArchiverSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *visibilityArchiverSuite) TestValidateURI() { - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "file://", - expectedErr: errEmptyDirectoryPath, - }, - { - URI: "file:///a/b/c", - expectedErr: nil, - }, - } - - visibilityArchiver := s.newTestVisibilityArchiver() - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - s.NoError(err) - s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) - } -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidURI() { - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - request := &archiverspb.VisibilityRecord{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimeNowPtrUtc(), - ExecutionTime: nil, // workflow without backoff - CloseTime: timestamp.TimeNowPtrUtc(), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: int64(101), - } - err = visibilityArchiver.Archive(context.Background(), URI, request) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidRequest() { - visibilityArchiver := s.newTestVisibilityArchiver() - err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, &archiverspb.VisibilityRecord{}) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { - visibilityArchiver := s.newTestVisibilityArchiver() - nonRetryableErr := errors.New("some non-retryable error") - err := visibilityArchiver.Archive( - context.Background(), - s.testArchivalURI, - &archiverspb.VisibilityRecord{}, - archiver.GetNonRetryableErrorOption(nonRetryableErr), - ) - s.Equal(nonRetryableErr, err) -} - -func (s *visibilityArchiverSuite) TestArchive_Success() { - dir := testutils.MkdirTemp(s.T(), "", "TestVisibilityArchive") - - visibilityArchiver := s.newTestVisibilityArchiver() - closeTimestamp := timestamp.TimeNowPtrUtc() - request := &archiverspb.VisibilityRecord{ - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimePtr(closeTimestamp.Add(-time.Hour)), - ExecutionTime: nil, // workflow without backoff - CloseTime: closeTimestamp, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: int64(101), - Memo: &commonpb.Memo{ - Fields: map[string]*commonpb.Payload{ - "testFields": payload.EncodeBytes([]byte{1, 2, 3}), - }, - }, - SearchAttributes: map[string]string{ - "testAttribute": "456", - }, - } - URI, err := archiver.NewURI("file://" + dir) - s.NoError(err) - err = visibilityArchiver.Archive(context.Background(), URI, request) - s.NoError(err) - - expectedFilename := constructVisibilityFilename(closeTimestamp, testRunID) - filepath := path.Join(dir, testNamespaceID, expectedFilename) - s.assertFileExists(filepath) - - data, err := readFile(filepath) - s.NoError(err) - - archivedRecord := &archiverspb.VisibilityRecord{} - encoder := codec.NewJSONPBEncoder() - err = encoder.Decode(data, archivedRecord) - s.NoError(err) - s.Equal(request, archivedRecord) -} - -func (s *visibilityArchiverSuite) TestMatchQuery() { - testCases := []struct { - query *parsedQuery - record *archiverspb.VisibilityRecord - shouldMatch bool - }{ - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(1999), - }, - shouldMatch: true, - }, - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(999), - }, - shouldMatch: false, - }, - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - workflowID: convert.StringPtr("random workflowID"), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(2000), - }, - shouldMatch: false, - }, - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - workflowID: convert.StringPtr("random workflowID"), - runID: convert.StringPtr("random runID"), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(12345), - WorkflowId: "random workflowID", - RunId: "random runID", - WorkflowTypeName: "random type name", - }, - shouldMatch: true, - }, - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - workflowTypeName: convert.StringPtr("some random type name"), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(12345), - }, - shouldMatch: false, - }, - { - query: &parsedQuery{ - earliestCloseTime: time.Unix(0, 1000), - latestCloseTime: time.Unix(0, 12345), - workflowTypeName: convert.StringPtr("some random type name"), - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW), - }, - record: &archiverspb.VisibilityRecord{ - CloseTime: timestamp.UnixOrZeroTimePtr(12345), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - WorkflowTypeName: "some random type name", - }, - shouldMatch: true, - }, - } - - for _, tc := range testCases { - s.Equal(tc.shouldMatch, matchQuery(tc.record, tc.query)) - } -} - -func (s *visibilityArchiverSuite) TestSortAndFilterFiles() { - testCases := []struct { - filenames []string - token *queryVisibilityToken - expectedResult []string - }{ - { - filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, - expectedResult: []string{"1000_78.vis", "1000_654.vis", "9_54321.vis", "9_12345.vis", "5_0.vis"}, - }, - { - filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, - token: &queryVisibilityToken{ - LastCloseTime: time.Unix(0, 3), - }, - expectedResult: []string{}, - }, - { - filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, - token: &queryVisibilityToken{ - LastCloseTime: time.Unix(0, 999), - }, - expectedResult: []string{"9_54321.vis", "9_12345.vis", "5_0.vis"}, - }, - { - filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, - token: &queryVisibilityToken{ - LastCloseTime: time.Unix(0, 5).UTC(), - }, - expectedResult: []string{"5_0.vis"}, - }, - } - - for i, tc := range testCases { - result, err := sortAndFilterFiles(tc.filenames, tc.token) - s.NoError(err, "case %d", i) - s.Equal(tc.expectedResult, result, "case %d", i) - } -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { - visibilityArchiver := s.newTestVisibilityArchiver() - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) - visibilityArchiver.queryParser = mockParser - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{ - NamespaceID: "some random namespaceID", - PageSize: 10, - Query: "some invalid query", - }, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 1), - latestCloseTime: time.Unix(0, 101), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - Query: "parsed by mockParser", - PageSize: 1, - } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Empty(response.Executions) - s.Empty(response.NextPageToken) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 1), - latestCloseTime: time.Unix(0, 101), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - Query: "parsed by mockParser", - PageSize: 1, - NextPageToken: []byte{1, 2, 3}, - } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 1), - latestCloseTime: time.Unix(0, 10001), - workflowID: convert.StringPtr(testWorkflowID), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 10, - Query: "parsed by mockParser", - } - URI, err := archiver.NewURI("file://" + s.testQueryDirectory) - s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 1) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 1), - latestCloseTime: time.Unix(0, 10001), - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 2, - Query: "parsed by mockParser", - } - URI, err := archiver.NewURI("file://" + s.testQueryDirectory) - s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.NotNil(response.NextPageToken) - s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[1]) - - request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.visibilityRecords[3], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) -} - -func (s *visibilityArchiverSuite) TestArchiveAndQuery() { - dir := testutils.MkdirTemp(s.T(), "", "TestArchiveAndQuery") - - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 10), - latestCloseTime: time.Unix(0, 10001), - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - URI, err := archiver.NewURI("file://" + dir) - s.NoError(err) - for _, record := range s.visibilityRecords { - err := visibilityArchiver.Archive(context.Background(), URI, (*archiverspb.VisibilityRecord)(record)) - s.NoError(err) - } - - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - Query: "parsed by mockParser", - } - executions := []*workflowpb.WorkflowExecutionInfo{} - for len(executions) == 0 || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - executions = append(executions, response.Executions...) - request.NextPageToken = response.NextPageToken - } - s.Len(executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[1]) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { - URI := s.testArchivalURI - - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - earliestCloseTime: time.Unix(0, 10), - latestCloseTime: time.Unix(0, 10001), - status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - req := &archiver.QueryVisibilityRequest{ - NamespaceID: "", - PageSize: 1, - NextPageToken: nil, - Query: "", - } - _, err := visibilityArchiver.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { - visibilityArchiver := s.newTestVisibilityArchiver() - - req := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 0, - NextPageToken: nil, - Query: "", - } - _, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { - dir := testutils.MkdirTemp(s.T(), "", "TestQuery_EmptyQuery_Pagination") - - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI("file://" + dir) - s.NoError(err) - for _, record := range s.visibilityRecords { - err := visibilityArchiver.Archive(context.Background(), URI, record) - s.NoError(err) - } - - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - Query: "", - } - var executions []*workflowpb.WorkflowExecutionInfo - for len(executions) == 0 || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - executions = append(executions, response.Executions...) - request.NextPageToken = response.NextPageToken - } - s.Len(executions, 4) -} - -func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchiver { - config := &config.FilestoreArchiver{ - FileMode: testFileModeStr, - DirMode: testDirModeStr, - } - archiver, err := NewVisibilityArchiver(s.container, config) - s.NoError(err) - return archiver.(*visibilityArchiver) -} - -func (s *visibilityArchiverSuite) setupVisibilityDirectory() { - s.visibilityRecords = []*archiverspb.VisibilityRecord{ - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(1), - CloseTime: timestamp.UnixOrZeroTimePtr(10000), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 101, - }, - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: "some random workflow ID", - RunId: "some random run ID", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(2), - ExecutionTime: nil, - CloseTime: timestamp.UnixOrZeroTimePtr(1000), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 123, - }, - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: "another workflow ID", - RunId: "another run ID", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(3), - ExecutionTime: nil, - CloseTime: timestamp.UnixOrZeroTimePtr(10), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - HistoryLength: 456, - }, - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: "and another workflow ID", - RunId: "and another run ID", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(3), - ExecutionTime: nil, - CloseTime: timestamp.UnixOrZeroTimePtr(5), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 456, - }, - { - NamespaceId: "some random namespace ID", - Namespace: "some random namespace name", - WorkflowId: "another workflow ID", - RunId: "another run ID", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(3), - ExecutionTime: nil, - CloseTime: timestamp.UnixOrZeroTimePtr(10000), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - HistoryLength: 456, - }, - } - - for _, record := range s.visibilityRecords { - s.writeVisibilityRecordForQueryTest(record) - } -} - -func (s *visibilityArchiverSuite) writeVisibilityRecordForQueryTest(record *archiverspb.VisibilityRecord) { - data, err := encode(record) - s.Require().NoError(err) - filename := constructVisibilityFilename(record.CloseTime, record.GetRunId()) - s.Require().NoError(os.MkdirAll(path.Join(s.testQueryDirectory, record.GetNamespaceId()), testDirMode)) - err = writeFile(path.Join(s.testQueryDirectory, record.GetNamespaceId(), filename), data, testFileMode) - s.Require().NoError(err) -} - -func (s *visibilityArchiverSuite) assertFileExists(filepath string) { - exists, err := fileExists(filepath) - s.NoError(err) - s.True(exists) -} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/visibility_archiver.go temporal-1.22.5/src/common/archiver/filestore/visibility_archiver.go --- temporal-1.21.5-1/src/common/archiver/filestore/visibility_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/visibility_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,366 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package filestore + +import ( + "context" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" +) + +const ( + errEncodeVisibilityRecord = "failed to encode visibility record" +) + +type ( + visibilityArchiver struct { + container *archiver.VisibilityBootstrapContainer + fileMode os.FileMode + dirMode os.FileMode + queryParser QueryParser + } + + queryVisibilityToken struct { + LastCloseTime time.Time + LastRunID string + } + + queryVisibilityRequest struct { + namespaceID string + pageSize int + nextPageToken []byte + parsedQuery *parsedQuery + } +) + +// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore +func NewVisibilityArchiver( + container *archiver.VisibilityBootstrapContainer, + config *config.FilestoreArchiver, +) (archiver.VisibilityArchiver, error) { + fileMode, err := strconv.ParseUint(config.FileMode, 0, 32) + if err != nil { + return nil, errInvalidFileMode + } + dirMode, err := strconv.ParseUint(config.DirMode, 0, 32) + if err != nil { + return nil, errInvalidDirMode + } + return &visibilityArchiver{ + container: container, + fileMode: os.FileMode(fileMode), + dirMode: os.FileMode(dirMode), + queryParser: NewQueryParser(), + }, nil +} + +func (v *visibilityArchiver) Archive( + ctx context.Context, + URI archiver.URI, + request *archiverspb.VisibilityRecord, + opts ...archiver.ArchiveOption, +) (err error) { + featureCatalog := archiver.GetFeatureCatalog(opts...) + defer func() { + if err != nil && featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + }() + + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + + if err := v.ValidateURI(URI); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return err + } + + if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) + return err + } + + dirPath := path.Join(URI.Path(), request.GetNamespaceId()) + if err = mkdirAll(dirPath, v.dirMode); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) + return err + } + + encodedVisibilityRecord, err := encode(request) + if err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeVisibilityRecord), tag.Error(err)) + return err + } + + // The filename has the format: closeTimestamp_hash(runID).visibility + // This format allows the archiver to sort all records without reading the file contents + filename := constructVisibilityFilename(request.CloseTime, request.GetRunId()) + if err := writeFile(path.Join(dirPath, filename), encodedVisibilityRecord, v.fileMode); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) + return err + } + + return nil +} + +func (v *visibilityArchiver) Query( + ctx context.Context, + URI archiver.URI, + request *archiver.QueryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + if err := v.ValidateURI(URI); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) + } + + if err := archiver.ValidateQueryRequest(request); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidQueryVisibilityRequest.Error()) + } + + parsedQuery, err := v.queryParser.Parse(request.Query) + if err != nil { + return nil, serviceerror.NewInvalidArgument(err.Error()) + } + + if parsedQuery.emptyResult { + return &archiver.QueryVisibilityResponse{}, nil + } + + return v.query( + ctx, + URI, + &queryVisibilityRequest{ + namespaceID: request.NamespaceID, + pageSize: request.PageSize, + nextPageToken: request.NextPageToken, + parsedQuery: parsedQuery, + }, + saTypeMap, + ) +} + +func (v *visibilityArchiver) query( + ctx context.Context, + URI archiver.URI, + request *queryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + var token *queryVisibilityToken + if request.nextPageToken != nil { + var err error + token, err = deserializeQueryVisibilityToken(request.nextPageToken) + if err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) + } + } + + dirPath := path.Join(URI.Path(), request.namespaceID) + exists, err := directoryExists(dirPath) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + if !exists { + return &archiver.QueryVisibilityResponse{}, nil + } + + files, err := listFiles(dirPath) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + files, err = sortAndFilterFiles(files, token) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + if len(files) == 0 { + return &archiver.QueryVisibilityResponse{}, nil + } + + response := &archiver.QueryVisibilityResponse{} + for idx, file := range files { + encodedRecord, err := readFile(path.Join(dirPath, file)) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + record, err := decodeVisibilityRecord(encodedRecord) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + if record.CloseTime.Before(request.parsedQuery.earliestCloseTime) { + break + } + + if matchQuery(record, request.parsedQuery) { + executionInfo, err := convertToExecutionInfo(record, saTypeMap) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + response.Executions = append(response.Executions, executionInfo) + if len(response.Executions) == request.pageSize { + if idx != len(files) { + newToken := &queryVisibilityToken{ + LastCloseTime: timestamp.TimeValue(record.CloseTime), + LastRunID: record.GetRunId(), + } + encodedToken, err := serializeToken(newToken) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.NextPageToken = encodedToken + } + break + } + } + } + + return response, nil +} + +func (v *visibilityArchiver) ValidateURI(URI archiver.URI) error { + if URI.Scheme() != URIScheme { + return archiver.ErrURISchemeMismatch + } + + return validateDirPath((URI.Path())) +} + +type parsedVisFilename struct { + name string + closeTime time.Time + hashedRunID string +} + +// sortAndFilterFiles sort visibility record file names based on close timestamp (desc) and use hashed runID to break ties. +// if a nextPageToken is give, it only returns filenames that have a smaller close timestamp +func sortAndFilterFiles(filenames []string, token *queryVisibilityToken) ([]string, error) { + var parsedFilenames []*parsedVisFilename + for _, name := range filenames { + pieces := strings.FieldsFunc(name, func(r rune) bool { + return r == '_' || r == '.' + }) + if len(pieces) != 3 { + return nil, fmt.Errorf("failed to parse visibility filename %s", name) + } + + closeTime, err := strconv.ParseInt(pieces[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse visibility filename %s", name) + } + parsedFilenames = append(parsedFilenames, &parsedVisFilename{ + name: name, + closeTime: timestamp.UnixOrZeroTime(closeTime), + hashedRunID: pieces[1], + }) + } + + sort.Slice(parsedFilenames, func(i, j int) bool { + if parsedFilenames[i].closeTime.Equal(parsedFilenames[j].closeTime) { + return parsedFilenames[i].hashedRunID > parsedFilenames[j].hashedRunID + } + return parsedFilenames[i].closeTime.After(parsedFilenames[j].closeTime) + }) + + startIdx := 0 + if token != nil { + LastHashedRunID := hash(token.LastRunID) + startIdx = sort.Search(len(parsedFilenames), func(i int) bool { + if parsedFilenames[i].closeTime.Equal(token.LastCloseTime) { + return parsedFilenames[i].hashedRunID < LastHashedRunID + } + return parsedFilenames[i].closeTime.Before(token.LastCloseTime) + }) + } + + if startIdx == len(parsedFilenames) { + return []string{}, nil + } + + var filteredFilenames []string + for _, parsedFilename := range parsedFilenames[startIdx:] { + filteredFilenames = append(filteredFilenames, parsedFilename.name) + } + return filteredFilenames, nil +} + +func matchQuery(record *archiverspb.VisibilityRecord, query *parsedQuery) bool { + if record.CloseTime.Before(query.earliestCloseTime) || record.CloseTime.After(query.latestCloseTime) { + return false + } + if query.workflowID != nil && record.GetWorkflowId() != *query.workflowID { + return false + } + if query.runID != nil && record.GetRunId() != *query.runID { + return false + } + if query.workflowTypeName != nil && record.WorkflowTypeName != *query.workflowTypeName { + return false + } + if query.status != nil && record.Status != *query.status { + return false + } + return true +} + +func convertToExecutionInfo(record *archiverspb.VisibilityRecord, saTypeMap searchattribute.NameTypeMap) (*workflowpb.WorkflowExecutionInfo, error) { + searchAttributes, err := searchattribute.Parse(record.SearchAttributes, &saTypeMap) + if err != nil { + return nil, err + } + + return &workflowpb.WorkflowExecutionInfo{ + Execution: &commonpb.WorkflowExecution{ + WorkflowId: record.GetWorkflowId(), + RunId: record.GetRunId(), + }, + Type: &commonpb.WorkflowType{ + Name: record.WorkflowTypeName, + }, + StartTime: record.StartTime, + ExecutionTime: record.ExecutionTime, + CloseTime: record.CloseTime, + Status: record.Status, + HistoryLength: record.HistoryLength, + Memo: record.Memo, + SearchAttributes: searchAttributes, + }, nil +} diff -Nru temporal-1.21.5-1/src/common/archiver/filestore/visibility_archiver_test.go temporal-1.22.5/src/common/archiver/filestore/visibility_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/filestore/visibility_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/filestore/visibility_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,672 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package filestore + +import ( + "context" + "errors" + "os" + "path" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + + "go.temporal.io/server/common/searchattribute" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/tests/testutils" +) + +const ( + testWorkflowTypeName = "test-workflow-type" +) + +type visibilityArchiverSuite struct { + *require.Assertions + suite.Suite + + container *archiver.VisibilityBootstrapContainer + testArchivalURI archiver.URI + testQueryDirectory string + visibilityRecords []*archiverspb.VisibilityRecord + + controller *gomock.Controller +} + +func TestVisibilityArchiverSuite(t *testing.T) { + suite.Run(t, new(visibilityArchiverSuite)) +} + +func (s *visibilityArchiverSuite) SetupSuite() { + var err error + s.testQueryDirectory, err = os.MkdirTemp("", "TestQuery") + s.Require().NoError(err) + s.setupVisibilityDirectory() + s.testArchivalURI, err = archiver.NewURI("file:///a/b/c") + s.Require().NoError(err) +} + +func (s *visibilityArchiverSuite) TearDownSuite() { + if err := os.RemoveAll(s.testQueryDirectory); err != nil { + s.Fail("Failed to remove test query directory %v: %v", s.testQueryDirectory, err) + } +} + +func (s *visibilityArchiverSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.container = &archiver.VisibilityBootstrapContainer{ + Logger: log.NewNoopLogger(), + } + s.controller = gomock.NewController(s.T()) +} + +func (s *visibilityArchiverSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *visibilityArchiverSuite) TestValidateURI() { + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "file://", + expectedErr: errEmptyDirectoryPath, + }, + { + URI: "file:///a/b/c", + expectedErr: nil, + }, + } + + visibilityArchiver := s.newTestVisibilityArchiver() + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + s.NoError(err) + s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) + } +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidURI() { + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + request := &archiverspb.VisibilityRecord{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimeNowPtrUtc(), + ExecutionTime: nil, // workflow without backoff + CloseTime: timestamp.TimeNowPtrUtc(), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: int64(101), + } + err = visibilityArchiver.Archive(context.Background(), URI, request) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidRequest() { + visibilityArchiver := s.newTestVisibilityArchiver() + err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, &archiverspb.VisibilityRecord{}) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { + visibilityArchiver := s.newTestVisibilityArchiver() + nonRetryableErr := errors.New("some non-retryable error") + err := visibilityArchiver.Archive( + context.Background(), + s.testArchivalURI, + &archiverspb.VisibilityRecord{}, + archiver.GetNonRetryableErrorOption(nonRetryableErr), + ) + s.Equal(nonRetryableErr, err) +} + +func (s *visibilityArchiverSuite) TestArchive_Success() { + dir := testutils.MkdirTemp(s.T(), "", "TestVisibilityArchive") + + visibilityArchiver := s.newTestVisibilityArchiver() + closeTimestamp := timestamp.TimeNowPtrUtc() + request := &archiverspb.VisibilityRecord{ + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimePtr(closeTimestamp.Add(-time.Hour)), + ExecutionTime: nil, // workflow without backoff + CloseTime: closeTimestamp, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: int64(101), + Memo: &commonpb.Memo{ + Fields: map[string]*commonpb.Payload{ + "testFields": payload.EncodeBytes([]byte{1, 2, 3}), + }, + }, + SearchAttributes: map[string]string{ + "testAttribute": "456", + }, + } + URI, err := archiver.NewURI("file://" + dir) + s.NoError(err) + err = visibilityArchiver.Archive(context.Background(), URI, request) + s.NoError(err) + + expectedFilename := constructVisibilityFilename(closeTimestamp, testRunID) + filepath := path.Join(dir, testNamespaceID, expectedFilename) + s.assertFileExists(filepath) + + data, err := readFile(filepath) + s.NoError(err) + + archivedRecord := &archiverspb.VisibilityRecord{} + encoder := codec.NewJSONPBEncoder() + err = encoder.Decode(data, archivedRecord) + s.NoError(err) + s.Equal(request, archivedRecord) +} + +func (s *visibilityArchiverSuite) TestMatchQuery() { + testCases := []struct { + query *parsedQuery + record *archiverspb.VisibilityRecord + shouldMatch bool + }{ + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(1999), + }, + shouldMatch: true, + }, + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(999), + }, + shouldMatch: false, + }, + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + workflowID: convert.StringPtr("random workflowID"), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(2000), + }, + shouldMatch: false, + }, + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + workflowID: convert.StringPtr("random workflowID"), + runID: convert.StringPtr("random runID"), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(12345), + WorkflowId: "random workflowID", + RunId: "random runID", + WorkflowTypeName: "random type name", + }, + shouldMatch: true, + }, + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + workflowTypeName: convert.StringPtr("some random type name"), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(12345), + }, + shouldMatch: false, + }, + { + query: &parsedQuery{ + earliestCloseTime: time.Unix(0, 1000), + latestCloseTime: time.Unix(0, 12345), + workflowTypeName: convert.StringPtr("some random type name"), + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW), + }, + record: &archiverspb.VisibilityRecord{ + CloseTime: timestamp.UnixOrZeroTimePtr(12345), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + WorkflowTypeName: "some random type name", + }, + shouldMatch: true, + }, + } + + for _, tc := range testCases { + s.Equal(tc.shouldMatch, matchQuery(tc.record, tc.query)) + } +} + +func (s *visibilityArchiverSuite) TestSortAndFilterFiles() { + testCases := []struct { + filenames []string + token *queryVisibilityToken + expectedResult []string + }{ + { + filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, + expectedResult: []string{"1000_78.vis", "1000_654.vis", "9_54321.vis", "9_12345.vis", "5_0.vis"}, + }, + { + filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, + token: &queryVisibilityToken{ + LastCloseTime: time.Unix(0, 3), + }, + expectedResult: []string{}, + }, + { + filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, + token: &queryVisibilityToken{ + LastCloseTime: time.Unix(0, 999), + }, + expectedResult: []string{"9_54321.vis", "9_12345.vis", "5_0.vis"}, + }, + { + filenames: []string{"9_12345.vis", "5_0.vis", "9_54321.vis", "1000_654.vis", "1000_78.vis"}, + token: &queryVisibilityToken{ + LastCloseTime: time.Unix(0, 5).UTC(), + }, + expectedResult: []string{"5_0.vis"}, + }, + } + + for i, tc := range testCases { + result, err := sortAndFilterFiles(tc.filenames, tc.token) + s.NoError(err, "case %d", i) + s.Equal(tc.expectedResult, result, "case %d", i) + } +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + } + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { + visibilityArchiver := s.newTestVisibilityArchiver() + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) + visibilityArchiver.queryParser = mockParser + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{ + NamespaceID: "some random namespaceID", + PageSize: 10, + Query: "some invalid query", + }, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 1), + latestCloseTime: time.Unix(0, 101), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + Query: "parsed by mockParser", + PageSize: 1, + } + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Empty(response.Executions) + s.Empty(response.NextPageToken) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 1), + latestCloseTime: time.Unix(0, 101), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + Query: "parsed by mockParser", + PageSize: 1, + NextPageToken: []byte{1, 2, 3}, + } + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 1), + latestCloseTime: time.Unix(0, 10001), + workflowID: convert.StringPtr(testWorkflowID), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 10, + Query: "parsed by mockParser", + } + URI, err := archiver.NewURI("file://" + s.testQueryDirectory) + s.NoError(err) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 1) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 1), + latestCloseTime: time.Unix(0, 10001), + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 2, + Query: "parsed by mockParser", + } + URI, err := archiver.NewURI("file://" + s.testQueryDirectory) + s.NoError(err) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.NotNil(response.NextPageToken) + s.Len(response.Executions, 2) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[1]) + + request.NextPageToken = response.NextPageToken + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 1) + ei, err = convertToExecutionInfo(s.visibilityRecords[3], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) +} + +func (s *visibilityArchiverSuite) TestArchiveAndQuery() { + dir := testutils.MkdirTemp(s.T(), "", "TestArchiveAndQuery") + + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 10), + latestCloseTime: time.Unix(0, 10001), + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + URI, err := archiver.NewURI("file://" + dir) + s.NoError(err) + for _, record := range s.visibilityRecords { + err := visibilityArchiver.Archive(context.Background(), URI, (*archiverspb.VisibilityRecord)(record)) + s.NoError(err) + } + + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + Query: "parsed by mockParser", + } + executions := []*workflowpb.WorkflowExecutionInfo{} + for len(executions) == 0 || request.NextPageToken != nil { + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + executions = append(executions, response.Executions...) + request.NextPageToken = response.NextPageToken + } + s.Len(executions, 2) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[0]) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[1]) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { + URI := s.testArchivalURI + + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + earliestCloseTime: time.Unix(0, 10), + latestCloseTime: time.Unix(0, 10001), + status: toWorkflowExecutionStatusPtr(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + req := &archiver.QueryVisibilityRequest{ + NamespaceID: "", + PageSize: 1, + NextPageToken: nil, + Query: "", + } + _, err := visibilityArchiver.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { + visibilityArchiver := s.newTestVisibilityArchiver() + + req := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 0, + NextPageToken: nil, + Query: "", + } + _, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { + dir := testutils.MkdirTemp(s.T(), "", "TestQuery_EmptyQuery_Pagination") + + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI("file://" + dir) + s.NoError(err) + for _, record := range s.visibilityRecords { + err := visibilityArchiver.Archive(context.Background(), URI, record) + s.NoError(err) + } + + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + Query: "", + } + var executions []*workflowpb.WorkflowExecutionInfo + for len(executions) == 0 || request.NextPageToken != nil { + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + executions = append(executions, response.Executions...) + request.NextPageToken = response.NextPageToken + } + s.Len(executions, 4) +} + +func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchiver { + config := &config.FilestoreArchiver{ + FileMode: testFileModeStr, + DirMode: testDirModeStr, + } + archiver, err := NewVisibilityArchiver(s.container, config) + s.NoError(err) + return archiver.(*visibilityArchiver) +} + +func (s *visibilityArchiverSuite) setupVisibilityDirectory() { + s.visibilityRecords = []*archiverspb.VisibilityRecord{ + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(1), + CloseTime: timestamp.UnixOrZeroTimePtr(10000), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 101, + }, + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: "some random workflow ID", + RunId: "some random run ID", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(2), + ExecutionTime: nil, + CloseTime: timestamp.UnixOrZeroTimePtr(1000), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 123, + }, + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: "another workflow ID", + RunId: "another run ID", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(3), + ExecutionTime: nil, + CloseTime: timestamp.UnixOrZeroTimePtr(10), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + HistoryLength: 456, + }, + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: "and another workflow ID", + RunId: "and another run ID", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(3), + ExecutionTime: nil, + CloseTime: timestamp.UnixOrZeroTimePtr(5), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 456, + }, + { + NamespaceId: "some random namespace ID", + Namespace: "some random namespace name", + WorkflowId: "another workflow ID", + RunId: "another run ID", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(3), + ExecutionTime: nil, + CloseTime: timestamp.UnixOrZeroTimePtr(10000), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + HistoryLength: 456, + }, + } + + for _, record := range s.visibilityRecords { + s.writeVisibilityRecordForQueryTest(record) + } +} + +func (s *visibilityArchiverSuite) writeVisibilityRecordForQueryTest(record *archiverspb.VisibilityRecord) { + data, err := encode(record) + s.Require().NoError(err) + filename := constructVisibilityFilename(record.CloseTime, record.GetRunId()) + s.Require().NoError(os.MkdirAll(path.Join(s.testQueryDirectory, record.GetNamespaceId()), testDirMode)) + err = writeFile(path.Join(s.testQueryDirectory, record.GetNamespaceId(), filename), data, testFileMode) + s.Require().NoError(err) +} + +func (s *visibilityArchiverSuite) assertFileExists(filepath string) { + exists, err := fileExists(filepath) + s.NoError(err) + s.True(exists) +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/connector/clientDelegate.go temporal-1.22.5/src/common/archiver/gcloud/connector/clientDelegate.go --- temporal-1.21.5-1/src/common/archiver/gcloud/connector/clientDelegate.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/connector/clientDelegate.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination clientDelegate_mock.go - -package connector - -import ( - "context" - "os" - - "cloud.google.com/go/storage" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" -) - -type ( - // GcloudStorageClient is an interface that expose some methods from gcloud storage client - GcloudStorageClient interface { - Bucket(URI string) BucketHandleWrapper - } - - clientDelegate struct { - nativeClient *storage.Client - } -) - -type ( - // BucketHandleWrapper is an interface that expose some methods from gcloud storage bucket - BucketHandleWrapper interface { - Object(name string) ObjectHandleWrapper - Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper - Attrs(ctx context.Context) (*storage.BucketAttrs, error) - } - - bucketDelegate struct { - bucket *storage.BucketHandle - } -) - -type ( - // ObjectHandleWrapper is an interface that expose some methods from gcloud storage object - ObjectHandleWrapper interface { - NewWriter(ctx context.Context) WriterWrapper - NewReader(ctx context.Context) (ReaderWrapper, error) - Attrs(ctx context.Context) (*storage.ObjectAttrs, error) - } - - objectDelegate struct { - object *storage.ObjectHandle - } -) - -type ( - // WriterWrapper is an interface that expose some methods from gcloud storage writer - WriterWrapper interface { - Close() error - Write(p []byte) (n int, err error) - CloseWithError(err error) error - } - - writerDelegate struct { - writer *storage.Writer - } -) - -type ( - // ReaderWrapper is an interface that expose some methods from gcloud storage reader - ReaderWrapper interface { - Close() error - Read(p []byte) (int, error) - } - - readerDelegate struct { - reader *storage.Reader - } -) - -type ( - // ObjectIteratorWrapper is an interface that expose some methods from gcloud storage objectIterator - ObjectIteratorWrapper interface { - Next() (*storage.ObjectAttrs, error) - } -) - -func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) { - nativeClient, err := storage.NewClient(ctx) - return &clientDelegate{nativeClient: nativeClient}, err -} - -func newClientDelegateWithCredentials(ctx context.Context, credentialsPath string) (*clientDelegate, error) { - - jsonKey, err := os.ReadFile(credentialsPath) - if err != nil { - return newDefaultClientDelegate(ctx) - } - - conf, err := google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) - if err != nil { - return newDefaultClientDelegate(ctx) - } - - nativeClient, err := storage.NewClient(ctx, option.WithTokenSource(conf.TokenSource(ctx))) - return &clientDelegate{nativeClient: nativeClient}, err -} - -// Bucket returns a BucketHandle, which provides operations on the named bucket. -// This call does not perform any network operations. -// -// The supplied name must contain only lowercase letters, numbers, dashes, -// underscores, and dots. The full specification for valid bucket names can be -// found at: -// -// https://cloud.google.com/storage/docs/bucket-naming -func (c *clientDelegate) Bucket(bucketName string) BucketHandleWrapper { - return &bucketDelegate{bucket: c.nativeClient.Bucket(bucketName)} -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// -// https://cloud.google.com/storage/docs/bucket-naming -func (b *bucketDelegate) Object(name string) ObjectHandleWrapper { - return &objectDelegate{object: b.bucket.Object(name)} -} - -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. -func (b *bucketDelegate) Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper { - return b.bucket.Objects(ctx, q) -} - -// Attrs returns the metadata for the bucket. -func (b *bucketDelegate) Attrs(ctx context.Context) (*storage.BucketAttrs, error) { - return b.bucket.Attrs(ctx) -} - -// NewWriter returns a storage Writer that writes to the GCS object -// associated with this ObjectHandle. -// -// A new object will be created unless an object with this name already exists. -// Otherwise any previous object with the same name will be replaced. -// The object will not be available (and any previous object will remain) -// until Close has been called. -// -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. If no ContentType -// attribute is specified, the content type will be automatically sniffed -// using net/http.DetectContentType. -// -// It is the caller's responsibility to call Close when writing is done. To -// stop writing without saving the data, cancel the context. -func (o *objectDelegate) NewWriter(ctx context.Context) WriterWrapper { - return &writerDelegate{writer: o.object.NewWriter(ctx)} -} - -// NewReader creates a new Reader to read the contents of the -// object. -// ErrObjectNotExist will be returned if the object is not found. -// -// The caller must call Close on the returned Reader when done reading. -func (o *objectDelegate) NewReader(ctx context.Context) (ReaderWrapper, error) { - r, err := o.object.NewReader(ctx) - return &readerDelegate{reader: r}, err -} - -func (o *objectDelegate) Attrs(ctx context.Context) (attrs *storage.ObjectAttrs, err error) { - return o.object.Attrs(ctx) -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Attrs. -func (w *writerDelegate) Close() error { - return w.writer.Close() -} - -// Write appends to w. It implements the io.Writer interface. -// -// Since writes happen asynchronously, Write may return a nil -// error even though the write failed (or will fail). Always -// use the error returned from Writer.Close to determine if -// the upload was successful. -func (w *writerDelegate) Write(p []byte) (int, error) { - return w.writer.Write(p) -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -// -// Deprecated: cancel the context passed to NewWriter instead. -func (w *writerDelegate) CloseWithError(err error) error { - return w.writer.CloseWithError(err) -} - -// Close closes the Reader. It must be called when done reading. -func (r *readerDelegate) Close() error { - return r.reader.Close() -} - -func (r *readerDelegate) Read(p []byte) (int, error) { - return r.reader.Read(p) - -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/connector/clientDelegate_mock.go temporal-1.22.5/src/common/archiver/gcloud/connector/clientDelegate_mock.go --- temporal-1.21.5-1/src/common/archiver/gcloud/connector/clientDelegate_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/connector/clientDelegate_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,363 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: clientDelegate.go - -// Package connector is a generated GoMock package. -package connector - -import ( - context "context" - reflect "reflect" - - storage "cloud.google.com/go/storage" - gomock "github.com/golang/mock/gomock" -) - -// MockGcloudStorageClient is a mock of GcloudStorageClient interface. -type MockGcloudStorageClient struct { - ctrl *gomock.Controller - recorder *MockGcloudStorageClientMockRecorder -} - -// MockGcloudStorageClientMockRecorder is the mock recorder for MockGcloudStorageClient. -type MockGcloudStorageClientMockRecorder struct { - mock *MockGcloudStorageClient -} - -// NewMockGcloudStorageClient creates a new mock instance. -func NewMockGcloudStorageClient(ctrl *gomock.Controller) *MockGcloudStorageClient { - mock := &MockGcloudStorageClient{ctrl: ctrl} - mock.recorder = &MockGcloudStorageClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGcloudStorageClient) EXPECT() *MockGcloudStorageClientMockRecorder { - return m.recorder -} - -// Bucket mocks base method. -func (m *MockGcloudStorageClient) Bucket(URI string) BucketHandleWrapper { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Bucket", URI) - ret0, _ := ret[0].(BucketHandleWrapper) - return ret0 -} - -// Bucket indicates an expected call of Bucket. -func (mr *MockGcloudStorageClientMockRecorder) Bucket(URI interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bucket", reflect.TypeOf((*MockGcloudStorageClient)(nil).Bucket), URI) -} - -// MockBucketHandleWrapper is a mock of BucketHandleWrapper interface. -type MockBucketHandleWrapper struct { - ctrl *gomock.Controller - recorder *MockBucketHandleWrapperMockRecorder -} - -// MockBucketHandleWrapperMockRecorder is the mock recorder for MockBucketHandleWrapper. -type MockBucketHandleWrapperMockRecorder struct { - mock *MockBucketHandleWrapper -} - -// NewMockBucketHandleWrapper creates a new mock instance. -func NewMockBucketHandleWrapper(ctrl *gomock.Controller) *MockBucketHandleWrapper { - mock := &MockBucketHandleWrapper{ctrl: ctrl} - mock.recorder = &MockBucketHandleWrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBucketHandleWrapper) EXPECT() *MockBucketHandleWrapperMockRecorder { - return m.recorder -} - -// Attrs mocks base method. -func (m *MockBucketHandleWrapper) Attrs(ctx context.Context) (*storage.BucketAttrs, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Attrs", ctx) - ret0, _ := ret[0].(*storage.BucketAttrs) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Attrs indicates an expected call of Attrs. -func (mr *MockBucketHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Attrs), ctx) -} - -// Object mocks base method. -func (m *MockBucketHandleWrapper) Object(name string) ObjectHandleWrapper { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Object", name) - ret0, _ := ret[0].(ObjectHandleWrapper) - return ret0 -} - -// Object indicates an expected call of Object. -func (mr *MockBucketHandleWrapperMockRecorder) Object(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Object", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Object), name) -} - -// Objects mocks base method. -func (m *MockBucketHandleWrapper) Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Objects", ctx, q) - ret0, _ := ret[0].(ObjectIteratorWrapper) - return ret0 -} - -// Objects indicates an expected call of Objects. -func (mr *MockBucketHandleWrapperMockRecorder) Objects(ctx, q interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Objects", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Objects), ctx, q) -} - -// MockObjectHandleWrapper is a mock of ObjectHandleWrapper interface. -type MockObjectHandleWrapper struct { - ctrl *gomock.Controller - recorder *MockObjectHandleWrapperMockRecorder -} - -// MockObjectHandleWrapperMockRecorder is the mock recorder for MockObjectHandleWrapper. -type MockObjectHandleWrapperMockRecorder struct { - mock *MockObjectHandleWrapper -} - -// NewMockObjectHandleWrapper creates a new mock instance. -func NewMockObjectHandleWrapper(ctrl *gomock.Controller) *MockObjectHandleWrapper { - mock := &MockObjectHandleWrapper{ctrl: ctrl} - mock.recorder = &MockObjectHandleWrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockObjectHandleWrapper) EXPECT() *MockObjectHandleWrapperMockRecorder { - return m.recorder -} - -// Attrs mocks base method. -func (m *MockObjectHandleWrapper) Attrs(ctx context.Context) (*storage.ObjectAttrs, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Attrs", ctx) - ret0, _ := ret[0].(*storage.ObjectAttrs) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Attrs indicates an expected call of Attrs. -func (mr *MockObjectHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockObjectHandleWrapper)(nil).Attrs), ctx) -} - -// NewReader mocks base method. -func (m *MockObjectHandleWrapper) NewReader(ctx context.Context) (ReaderWrapper, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewReader", ctx) - ret0, _ := ret[0].(ReaderWrapper) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewReader indicates an expected call of NewReader. -func (mr *MockObjectHandleWrapperMockRecorder) NewReader(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewReader", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewReader), ctx) -} - -// NewWriter mocks base method. -func (m *MockObjectHandleWrapper) NewWriter(ctx context.Context) WriterWrapper { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewWriter", ctx) - ret0, _ := ret[0].(WriterWrapper) - return ret0 -} - -// NewWriter indicates an expected call of NewWriter. -func (mr *MockObjectHandleWrapperMockRecorder) NewWriter(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewWriter", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewWriter), ctx) -} - -// MockWriterWrapper is a mock of WriterWrapper interface. -type MockWriterWrapper struct { - ctrl *gomock.Controller - recorder *MockWriterWrapperMockRecorder -} - -// MockWriterWrapperMockRecorder is the mock recorder for MockWriterWrapper. -type MockWriterWrapperMockRecorder struct { - mock *MockWriterWrapper -} - -// NewMockWriterWrapper creates a new mock instance. -func NewMockWriterWrapper(ctrl *gomock.Controller) *MockWriterWrapper { - mock := &MockWriterWrapper{ctrl: ctrl} - mock.recorder = &MockWriterWrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockWriterWrapper) EXPECT() *MockWriterWrapperMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockWriterWrapper) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockWriterWrapperMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockWriterWrapper)(nil).Close)) -} - -// CloseWithError mocks base method. -func (m *MockWriterWrapper) CloseWithError(err error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseWithError", err) - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseWithError indicates an expected call of CloseWithError. -func (mr *MockWriterWrapperMockRecorder) CloseWithError(err interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseWithError", reflect.TypeOf((*MockWriterWrapper)(nil).CloseWithError), err) -} - -// Write mocks base method. -func (m *MockWriterWrapper) Write(p []byte) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", p) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Write indicates an expected call of Write. -func (mr *MockWriterWrapperMockRecorder) Write(p interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockWriterWrapper)(nil).Write), p) -} - -// MockReaderWrapper is a mock of ReaderWrapper interface. -type MockReaderWrapper struct { - ctrl *gomock.Controller - recorder *MockReaderWrapperMockRecorder -} - -// MockReaderWrapperMockRecorder is the mock recorder for MockReaderWrapper. -type MockReaderWrapperMockRecorder struct { - mock *MockReaderWrapper -} - -// NewMockReaderWrapper creates a new mock instance. -func NewMockReaderWrapper(ctrl *gomock.Controller) *MockReaderWrapper { - mock := &MockReaderWrapper{ctrl: ctrl} - mock.recorder = &MockReaderWrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockReaderWrapper) EXPECT() *MockReaderWrapperMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockReaderWrapper) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockReaderWrapperMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockReaderWrapper)(nil).Close)) -} - -// Read mocks base method. -func (m *MockReaderWrapper) Read(p []byte) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Read", p) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Read indicates an expected call of Read. -func (mr *MockReaderWrapperMockRecorder) Read(p interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockReaderWrapper)(nil).Read), p) -} - -// MockObjectIteratorWrapper is a mock of ObjectIteratorWrapper interface. -type MockObjectIteratorWrapper struct { - ctrl *gomock.Controller - recorder *MockObjectIteratorWrapperMockRecorder -} - -// MockObjectIteratorWrapperMockRecorder is the mock recorder for MockObjectIteratorWrapper. -type MockObjectIteratorWrapperMockRecorder struct { - mock *MockObjectIteratorWrapper -} - -// NewMockObjectIteratorWrapper creates a new mock instance. -func NewMockObjectIteratorWrapper(ctrl *gomock.Controller) *MockObjectIteratorWrapper { - mock := &MockObjectIteratorWrapper{ctrl: ctrl} - mock.recorder = &MockObjectIteratorWrapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockObjectIteratorWrapper) EXPECT() *MockObjectIteratorWrapperMockRecorder { - return m.recorder -} - -// Next mocks base method. -func (m *MockObjectIteratorWrapper) Next() (*storage.ObjectAttrs, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next") - ret0, _ := ret[0].(*storage.ObjectAttrs) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Next indicates an expected call of Next. -func (mr *MockObjectIteratorWrapperMockRecorder) Next() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockObjectIteratorWrapper)(nil).Next)) -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/connector/client_delegate.go temporal-1.22.5/src/common/archiver/gcloud/connector/client_delegate.go --- temporal-1.21.5-1/src/common/archiver/gcloud/connector/client_delegate.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/connector/client_delegate.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,228 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_delegate_mock.go + +package connector + +import ( + "context" + "os" + + "cloud.google.com/go/storage" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +type ( + // GcloudStorageClient is an interface that expose some methods from gcloud storage client + GcloudStorageClient interface { + Bucket(URI string) BucketHandleWrapper + } + + clientDelegate struct { + nativeClient *storage.Client + } +) + +type ( + // BucketHandleWrapper is an interface that expose some methods from gcloud storage bucket + BucketHandleWrapper interface { + Object(name string) ObjectHandleWrapper + Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper + Attrs(ctx context.Context) (*storage.BucketAttrs, error) + } + + bucketDelegate struct { + bucket *storage.BucketHandle + } +) + +type ( + // ObjectHandleWrapper is an interface that expose some methods from gcloud storage object + ObjectHandleWrapper interface { + NewWriter(ctx context.Context) WriterWrapper + NewReader(ctx context.Context) (ReaderWrapper, error) + Attrs(ctx context.Context) (*storage.ObjectAttrs, error) + } + + objectDelegate struct { + object *storage.ObjectHandle + } +) + +type ( + // WriterWrapper is an interface that expose some methods from gcloud storage writer + WriterWrapper interface { + Close() error + Write(p []byte) (n int, err error) + CloseWithError(err error) error + } + + writerDelegate struct { + writer *storage.Writer + } +) + +type ( + // ReaderWrapper is an interface that expose some methods from gcloud storage reader + ReaderWrapper interface { + Close() error + Read(p []byte) (int, error) + } + + readerDelegate struct { + reader *storage.Reader + } +) + +type ( + // ObjectIteratorWrapper is an interface that expose some methods from gcloud storage objectIterator + ObjectIteratorWrapper interface { + Next() (*storage.ObjectAttrs, error) + } +) + +func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) { + nativeClient, err := storage.NewClient(ctx) + return &clientDelegate{nativeClient: nativeClient}, err +} + +func newClientDelegateWithCredentials(ctx context.Context, credentialsPath string) (*clientDelegate, error) { + + jsonKey, err := os.ReadFile(credentialsPath) + if err != nil { + return newDefaultClientDelegate(ctx) + } + + conf, err := google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return newDefaultClientDelegate(ctx) + } + + nativeClient, err := storage.NewClient(ctx, option.WithTokenSource(conf.TokenSource(ctx))) + return &clientDelegate{nativeClient: nativeClient}, err +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// +// https://cloud.google.com/storage/docs/bucket-naming +func (c *clientDelegate) Bucket(bucketName string) BucketHandleWrapper { + return &bucketDelegate{bucket: c.nativeClient.Bucket(bucketName)} +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// +// https://cloud.google.com/storage/docs/bucket-naming +func (b *bucketDelegate) Object(name string) ObjectHandleWrapper { + return &objectDelegate{object: b.bucket.Object(name)} +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *bucketDelegate) Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper { + return b.bucket.Objects(ctx, q) +} + +// Attrs returns the metadata for the bucket. +func (b *bucketDelegate) Attrs(ctx context.Context) (*storage.BucketAttrs, error) { + return b.bucket.Attrs(ctx) +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. To +// stop writing without saving the data, cancel the context. +func (o *objectDelegate) NewWriter(ctx context.Context) WriterWrapper { + return &writerDelegate{writer: o.object.NewWriter(ctx)} +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *objectDelegate) NewReader(ctx context.Context) (ReaderWrapper, error) { + r, err := o.object.NewReader(ctx) + return &readerDelegate{reader: r}, err +} + +func (o *objectDelegate) Attrs(ctx context.Context) (attrs *storage.ObjectAttrs, err error) { + return o.object.Attrs(ctx) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *writerDelegate) Close() error { + return w.writer.Close() +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *writerDelegate) Write(p []byte) (int, error) { + return w.writer.Write(p) +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *writerDelegate) CloseWithError(err error) error { + return w.writer.CloseWithError(err) +} + +// Close closes the Reader. It must be called when done reading. +func (r *readerDelegate) Close() error { + return r.reader.Close() +} + +func (r *readerDelegate) Read(p []byte) (int, error) { + return r.reader.Read(p) + +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/connector/client_delegate_mock.go temporal-1.22.5/src/common/archiver/gcloud/connector/client_delegate_mock.go --- temporal-1.21.5-1/src/common/archiver/gcloud/connector/client_delegate_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/connector/client_delegate_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,363 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: client_delegate.go + +// Package connector is a generated GoMock package. +package connector + +import ( + context "context" + reflect "reflect" + + storage "cloud.google.com/go/storage" + gomock "github.com/golang/mock/gomock" +) + +// MockGcloudStorageClient is a mock of GcloudStorageClient interface. +type MockGcloudStorageClient struct { + ctrl *gomock.Controller + recorder *MockGcloudStorageClientMockRecorder +} + +// MockGcloudStorageClientMockRecorder is the mock recorder for MockGcloudStorageClient. +type MockGcloudStorageClientMockRecorder struct { + mock *MockGcloudStorageClient +} + +// NewMockGcloudStorageClient creates a new mock instance. +func NewMockGcloudStorageClient(ctrl *gomock.Controller) *MockGcloudStorageClient { + mock := &MockGcloudStorageClient{ctrl: ctrl} + mock.recorder = &MockGcloudStorageClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGcloudStorageClient) EXPECT() *MockGcloudStorageClientMockRecorder { + return m.recorder +} + +// Bucket mocks base method. +func (m *MockGcloudStorageClient) Bucket(URI string) BucketHandleWrapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bucket", URI) + ret0, _ := ret[0].(BucketHandleWrapper) + return ret0 +} + +// Bucket indicates an expected call of Bucket. +func (mr *MockGcloudStorageClientMockRecorder) Bucket(URI interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bucket", reflect.TypeOf((*MockGcloudStorageClient)(nil).Bucket), URI) +} + +// MockBucketHandleWrapper is a mock of BucketHandleWrapper interface. +type MockBucketHandleWrapper struct { + ctrl *gomock.Controller + recorder *MockBucketHandleWrapperMockRecorder +} + +// MockBucketHandleWrapperMockRecorder is the mock recorder for MockBucketHandleWrapper. +type MockBucketHandleWrapperMockRecorder struct { + mock *MockBucketHandleWrapper +} + +// NewMockBucketHandleWrapper creates a new mock instance. +func NewMockBucketHandleWrapper(ctrl *gomock.Controller) *MockBucketHandleWrapper { + mock := &MockBucketHandleWrapper{ctrl: ctrl} + mock.recorder = &MockBucketHandleWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBucketHandleWrapper) EXPECT() *MockBucketHandleWrapperMockRecorder { + return m.recorder +} + +// Attrs mocks base method. +func (m *MockBucketHandleWrapper) Attrs(ctx context.Context) (*storage.BucketAttrs, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Attrs", ctx) + ret0, _ := ret[0].(*storage.BucketAttrs) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Attrs indicates an expected call of Attrs. +func (mr *MockBucketHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Attrs), ctx) +} + +// Object mocks base method. +func (m *MockBucketHandleWrapper) Object(name string) ObjectHandleWrapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Object", name) + ret0, _ := ret[0].(ObjectHandleWrapper) + return ret0 +} + +// Object indicates an expected call of Object. +func (mr *MockBucketHandleWrapperMockRecorder) Object(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Object", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Object), name) +} + +// Objects mocks base method. +func (m *MockBucketHandleWrapper) Objects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Objects", ctx, q) + ret0, _ := ret[0].(ObjectIteratorWrapper) + return ret0 +} + +// Objects indicates an expected call of Objects. +func (mr *MockBucketHandleWrapperMockRecorder) Objects(ctx, q interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Objects", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Objects), ctx, q) +} + +// MockObjectHandleWrapper is a mock of ObjectHandleWrapper interface. +type MockObjectHandleWrapper struct { + ctrl *gomock.Controller + recorder *MockObjectHandleWrapperMockRecorder +} + +// MockObjectHandleWrapperMockRecorder is the mock recorder for MockObjectHandleWrapper. +type MockObjectHandleWrapperMockRecorder struct { + mock *MockObjectHandleWrapper +} + +// NewMockObjectHandleWrapper creates a new mock instance. +func NewMockObjectHandleWrapper(ctrl *gomock.Controller) *MockObjectHandleWrapper { + mock := &MockObjectHandleWrapper{ctrl: ctrl} + mock.recorder = &MockObjectHandleWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectHandleWrapper) EXPECT() *MockObjectHandleWrapperMockRecorder { + return m.recorder +} + +// Attrs mocks base method. +func (m *MockObjectHandleWrapper) Attrs(ctx context.Context) (*storage.ObjectAttrs, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Attrs", ctx) + ret0, _ := ret[0].(*storage.ObjectAttrs) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Attrs indicates an expected call of Attrs. +func (mr *MockObjectHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockObjectHandleWrapper)(nil).Attrs), ctx) +} + +// NewReader mocks base method. +func (m *MockObjectHandleWrapper) NewReader(ctx context.Context) (ReaderWrapper, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewReader", ctx) + ret0, _ := ret[0].(ReaderWrapper) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewReader indicates an expected call of NewReader. +func (mr *MockObjectHandleWrapperMockRecorder) NewReader(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewReader", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewReader), ctx) +} + +// NewWriter mocks base method. +func (m *MockObjectHandleWrapper) NewWriter(ctx context.Context) WriterWrapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewWriter", ctx) + ret0, _ := ret[0].(WriterWrapper) + return ret0 +} + +// NewWriter indicates an expected call of NewWriter. +func (mr *MockObjectHandleWrapperMockRecorder) NewWriter(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewWriter", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewWriter), ctx) +} + +// MockWriterWrapper is a mock of WriterWrapper interface. +type MockWriterWrapper struct { + ctrl *gomock.Controller + recorder *MockWriterWrapperMockRecorder +} + +// MockWriterWrapperMockRecorder is the mock recorder for MockWriterWrapper. +type MockWriterWrapperMockRecorder struct { + mock *MockWriterWrapper +} + +// NewMockWriterWrapper creates a new mock instance. +func NewMockWriterWrapper(ctrl *gomock.Controller) *MockWriterWrapper { + mock := &MockWriterWrapper{ctrl: ctrl} + mock.recorder = &MockWriterWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWriterWrapper) EXPECT() *MockWriterWrapperMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockWriterWrapper) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockWriterWrapperMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockWriterWrapper)(nil).Close)) +} + +// CloseWithError mocks base method. +func (m *MockWriterWrapper) CloseWithError(err error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseWithError", err) + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseWithError indicates an expected call of CloseWithError. +func (mr *MockWriterWrapperMockRecorder) CloseWithError(err interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseWithError", reflect.TypeOf((*MockWriterWrapper)(nil).CloseWithError), err) +} + +// Write mocks base method. +func (m *MockWriterWrapper) Write(p []byte) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", p) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Write indicates an expected call of Write. +func (mr *MockWriterWrapperMockRecorder) Write(p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockWriterWrapper)(nil).Write), p) +} + +// MockReaderWrapper is a mock of ReaderWrapper interface. +type MockReaderWrapper struct { + ctrl *gomock.Controller + recorder *MockReaderWrapperMockRecorder +} + +// MockReaderWrapperMockRecorder is the mock recorder for MockReaderWrapper. +type MockReaderWrapperMockRecorder struct { + mock *MockReaderWrapper +} + +// NewMockReaderWrapper creates a new mock instance. +func NewMockReaderWrapper(ctrl *gomock.Controller) *MockReaderWrapper { + mock := &MockReaderWrapper{ctrl: ctrl} + mock.recorder = &MockReaderWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReaderWrapper) EXPECT() *MockReaderWrapperMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockReaderWrapper) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockReaderWrapperMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockReaderWrapper)(nil).Close)) +} + +// Read mocks base method. +func (m *MockReaderWrapper) Read(p []byte) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read", p) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Read indicates an expected call of Read. +func (mr *MockReaderWrapperMockRecorder) Read(p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockReaderWrapper)(nil).Read), p) +} + +// MockObjectIteratorWrapper is a mock of ObjectIteratorWrapper interface. +type MockObjectIteratorWrapper struct { + ctrl *gomock.Controller + recorder *MockObjectIteratorWrapperMockRecorder +} + +// MockObjectIteratorWrapperMockRecorder is the mock recorder for MockObjectIteratorWrapper. +type MockObjectIteratorWrapperMockRecorder struct { + mock *MockObjectIteratorWrapper +} + +// NewMockObjectIteratorWrapper creates a new mock instance. +func NewMockObjectIteratorWrapper(ctrl *gomock.Controller) *MockObjectIteratorWrapper { + mock := &MockObjectIteratorWrapper{ctrl: ctrl} + mock.recorder = &MockObjectIteratorWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectIteratorWrapper) EXPECT() *MockObjectIteratorWrapperMockRecorder { + return m.recorder +} + +// Next mocks base method. +func (m *MockObjectIteratorWrapper) Next() (*storage.ObjectAttrs, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(*storage.ObjectAttrs) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Next indicates an expected call of Next. +func (mr *MockObjectIteratorWrapperMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockObjectIteratorWrapper)(nil).Next)) +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/historyArchiver.go temporal-1.22.5/src/common/archiver/gcloud/historyArchiver.go --- temporal-1.21.5-1/src/common/archiver/gcloud/historyArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/historyArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,407 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gcloud - -import ( - "context" - "encoding/binary" - "errors" - "path/filepath" - "time" - - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/gcloud/connector" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" -) - -var ( - errUploadNonRetryable = errors.New("upload non-retryable error") -) - -const ( - // URIScheme is the scheme for the gcloud storage implementation - URIScheme = "gs" - - targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB - errEncodeHistory = "failed to encode history batches" - errBucketHistory = "failed to get google storage bucket handle" - errWriteFile = "failed to write history to google storage" -) - -type historyArchiver struct { - container *archiver.HistoryBootstrapContainer - gcloudStorage connector.Client - - // only set in test code - historyIterator archiver.HistoryIterator -} - -type progress struct { - CurrentPageNumber int - IteratorState []byte -} - -type getHistoryToken struct { - CloseFailoverVersion int64 - HighestPart int - CurrentPart int - BatchIdxOffset int -} - -// NewHistoryArchiver creates a new gcloud storage HistoryArchiver -func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.GstorageArchiver, -) (archiver.HistoryArchiver, error) { - storage, err := connector.NewClient(context.Background(), config) - if err == nil { - return newHistoryArchiver(container, nil, storage), nil - } - return nil, err -} - -func newHistoryArchiver(container *archiver.HistoryBootstrapContainer, historyIterator archiver.HistoryIterator, storage connector.Client) archiver.HistoryArchiver { - return &historyArchiver{ - container: container, - gcloudStorage: storage, - historyIterator: historyIterator, - } -} - -// Archive is used to archive a workflow history. When the context expires the method should stop trying to archive. -// Implementors are free to archive however they want, including implementing retries of sub-operations. The URI defines -// the resource that histories should be archived into. The implementor gets to determine how to interpret the URI. -// The Archive method may or may not be automatically retried by the caller. The ArchiveOptions are used -// to interact with these retries including giving the implementor the ability to cancel retries and record progress -// between retry attempts. -// This method will be invoked after a workflow passes its retention period. -func (h *historyArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiver.ArchiveHistoryRequest, opts ...archiver.ArchiveOption) (err error) { - handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) - featureCatalog := archiver.GetFeatureCatalog(opts...) - startTime := time.Now().UTC() - defer func() { - handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) - if err != nil { - - if err.Error() != errUploadNonRetryable.Error() { - handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) - return - } - - handler.Counter(metrics.HistoryArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) - if featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - - } - }() - - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) - - if err := h.ValidateURI(URI); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return errUploadNonRetryable - } - - if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) - return errUploadNonRetryable - } - - var totalUploadSize int64 - historyIterator := h.historyIterator - var progress progress - if historyIterator == nil { // will only be set by testing code - historyIterator, _ = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) - } - - encoder := codec.NewJSONPBEncoder() - - for historyIterator.HasNext() { - part := progress.CurrentPageNumber - historyBlob, err := historyIterator.Next(ctx) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // workflow history no longer exists, may due to duplicated archival signal - // this may happen even in the middle of iterating history as two archival signals - // can be processed concurrently. - logger.Info(archiver.ArchiveSkippedInfoMsg) - handler.Counter(metrics.HistoryArchiverDuplicateArchivalsCount.GetMetricName()).Record(1) - return nil - } - - logger = log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) - if !common.IsPersistenceTransientError(err) { - logger.Error(archiver.ArchiveNonRetryableErrorMsg) - return errUploadNonRetryable - } - logger.Error(archiver.ArchiveTransientErrorMsg) - return err - } - - if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) - return archiver.ErrHistoryMutated - } - - encodedHistoryPart, err := encoder.EncodeHistories(historyBlob.Body) - if err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) - return errUploadNonRetryable - } - - filename := constructHistoryFilenameMultipart(request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion, part) - if exist, _ := h.gcloudStorage.Exist(ctx, URI, filename); !exist { - if err := h.gcloudStorage.Upload(ctx, URI, filename, encodedHistoryPart); err != nil { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) - handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) - return err - } - - totalUploadSize = totalUploadSize + int64(binary.Size(encodedHistoryPart)) - } - - if err := saveHistoryIteratorState(ctx, featureCatalog, historyIterator, part, &progress); err != nil { - return err - } - } - - handler.Counter(metrics.HistoryArchiverTotalUploadSize.GetMetricName()).Record(totalUploadSize) - handler.Counter(metrics.HistoryArchiverHistorySize.GetMetricName()).Record(totalUploadSize) - handler.Counter(metrics.HistoryArchiverArchiveSuccessCount.GetMetricName()).Record(1) - return -} - -// Get is used to access an archived history. When context expires method should stop trying to fetch history. -// The URI identifies the resource from which history should be accessed and it is up to the implementor to interpret this URI. -// This method should thrift errors - see filestore as an example. -func (h *historyArchiver) Get(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*archiver.GetHistoryResponse, error) { - - err := h.ValidateURI(URI) - if err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) - } - - if err := archiver.ValidateGetRequest(request); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) - } - - var token *getHistoryToken - if request.NextPageToken != nil { - token, err = deserializeGetHistoryToken(request.NextPageToken) - if err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) - } - } else { - highestVersion, historyhighestPart, historyCurrentPart, err := h.getHighestVersion(ctx, URI, request) - if err != nil { - return nil, serviceerror.NewUnavailable(err.Error()) - } - if highestVersion == nil { - return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) - } - token = &getHistoryToken{ - CloseFailoverVersion: *highestVersion, - HighestPart: *historyhighestPart, - CurrentPart: *historyCurrentPart, - BatchIdxOffset: 0, - } - } - - response := &archiver.GetHistoryResponse{} - response.HistoryBatches = []*historypb.History{} - numOfEvents := 0 - encoder := codec.NewJSONPBEncoder() - -outer: - for token.CurrentPart <= token.HighestPart { - - filename := constructHistoryFilenameMultipart(request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion, token.CurrentPart) - encodedHistoryBatches, err := h.gcloudStorage.Get(ctx, URI, filename) - if err != nil { - return nil, serviceerror.NewUnavailable(err.Error()) - } - if encodedHistoryBatches == nil { - return nil, serviceerror.NewInternal("Fail retrieving history file: " + URI.String() + "/" + filename) - } - - batches, err := encoder.DecodeHistories(encodedHistoryBatches) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - // trim the batches in the beginning based on token.BatchIdxOffset - batches = batches[token.BatchIdxOffset:] - - for idx, batch := range batches { - response.HistoryBatches = append(response.HistoryBatches, batch) - token.BatchIdxOffset++ - numOfEvents += len(batch.Events) - - if numOfEvents >= request.PageSize { - if idx == len(batches)-1 { - // handle the edge case where page size is meeted after adding the last batch - token.BatchIdxOffset = 0 - token.CurrentPart++ - } - break outer - } - } - - // reset the offset to 0 as we will read a new page - token.BatchIdxOffset = 0 - token.CurrentPart++ - - } - - if token.CurrentPart <= token.HighestPart { - nextToken, err := serializeToken(token) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.NextPageToken = nextToken - } - - return response, nil -} - -// ValidateURI is used to define what a valid URI for an implementation is. -func (h *historyArchiver) ValidateURI(URI archiver.URI) (err error) { - - if err = h.validateURI(URI); err == nil { - _, err = h.gcloudStorage.Exist(context.Background(), URI, "") - } - - return -} - -func (h *historyArchiver) validateURI(URI archiver.URI) (err error) { - if URI.Scheme() != URIScheme { - return archiver.ErrURISchemeMismatch - } - - if URI.Path() == "" || URI.Hostname() == "" { - return archiver.ErrInvalidURI - } - - return -} - -func historyMutated(request *archiver.ArchiveHistoryRequest, historyBatches []*historypb.History, isLast bool) bool { - lastBatch := historyBatches[len(historyBatches)-1].Events - lastEvent := lastBatch[len(lastBatch)-1] - lastFailoverVersion := lastEvent.GetVersion() - if lastFailoverVersion > request.CloseFailoverVersion { - return true - } - - if !isLast { - return false - } - lastEventID := lastEvent.GetEventId() - return lastFailoverVersion != request.CloseFailoverVersion || lastEventID+1 != request.NextEventID -} - -func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*int64, *int, *int, error) { - - filenames, err := h.gcloudStorage.Query(ctx, URI, constructHistoryFilenamePrefix(request.NamespaceID, request.WorkflowID, request.RunID)) - - if err != nil { - return nil, nil, nil, err - } - - var highestVersion *int64 - var highestVersionPart *int - var lowestVersionPart *int - - for _, filename := range filenames { - version, partVersionID, err := extractCloseFailoverVersion(filepath.Base(filename)) - if err != nil || (request.CloseFailoverVersion != nil && version != *request.CloseFailoverVersion) { - continue - } - - if highestVersion == nil || version > *highestVersion { - highestVersion = &version - highestVersionPart = new(int) - lowestVersionPart = new(int) - } - - if *highestVersion == version { - if highestVersionPart == nil || partVersionID > *highestVersionPart { - highestVersionPart = &partVersionID - } - - if lowestVersionPart == nil || partVersionID < *lowestVersionPart { - lowestVersionPart = &partVersionID - } - } - - } - - return highestVersion, highestVersionPart, lowestVersionPart, nil -} - -func loadHistoryIterator(ctx context.Context, request *archiver.ArchiveHistoryRequest, executionManager persistence.ExecutionManager, featureCatalog *archiver.ArchiveFeatureCatalog, progress *progress) (historyIterator archiver.HistoryIterator, err error) { - - defer func() { - if err != nil || historyIterator == nil { - historyIterator, err = archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, nil) - } - }() - - if featureCatalog.ProgressManager != nil { - if featureCatalog.ProgressManager.HasProgress(ctx) { - err = featureCatalog.ProgressManager.LoadProgress(ctx, &progress) - if err == nil { - historyIterator, err = archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, progress.IteratorState) - } - } - - } - return -} - -func saveHistoryIteratorState(ctx context.Context, featureCatalog *archiver.ArchiveFeatureCatalog, historyIterator archiver.HistoryIterator, currentPartNum int, progress *progress) (err error) { - var state []byte - if featureCatalog.ProgressManager != nil { - state, err = historyIterator.GetState() - if err == nil { - progress.CurrentPageNumber = currentPartNum + 1 - progress.IteratorState = state - - err = featureCatalog.ProgressManager.RecordProgress(ctx, progress) - } - } - - return err -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/historyArchiver_test.go temporal-1.22.5/src/common/archiver/gcloud/historyArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/gcloud/historyArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/historyArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,588 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gcloud - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/gcloud/connector" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/primitives/timestamp" -) - -const ( - testNamespaceID = "test-namespace-id" - testNamespace = "test-namespace" - testWorkflowID = "test-workflow-id" - testRunID = "test-run-id" - testNextEventID = 1800 - testCloseFailoverVersion = 100 - testPageSize = 100 - exampleHistoryRecord = `[{"events":[{"eventId":1,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}}]}]` - twoEventsExampleHistoryRecord = `[{"events":[{"eventId":1,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}},{"eventId":2,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}}]}]` -) - -var ( - testBranchToken = []byte{1, 2, 3} -) - -func (h *historyArchiverSuite) SetupTest() { - h.Assertions = require.New(h.T()) - h.controller = gomock.NewController(h.T()) - h.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } - h.testArchivalURI, _ = archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") -} - -func (h *historyArchiverSuite) TearDownTest() { - h.controller.Finish() -} - -func TestHistoryArchiverSuite(t *testing.T) { - suite.Run(t, new(historyArchiverSuite)) -} - -type historyArchiverSuite struct { - *require.Assertions - suite.Suite - - controller *gomock.Controller - - container *archiver.HistoryBootstrapContainer - testArchivalURI archiver.URI -} - -func getCanceledContext() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - return ctx -} - -func (h *historyArchiverSuite) TestValidateURI() { - ctx := context.Background() - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "gs:my-bucket-cad/temporal_archival/development", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://my-bucket-cad", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs:/my-bucket-cad/temporal_archival/development", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://my-bucket-cad/temporal_archival/development", - expectedErr: nil, - }, - } - - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, gomock.Any(), "").Return(false, nil) - historyArchiver := new(historyArchiver) - historyArchiver.gcloudStorage = storageWrapper - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - h.NoError(err) - h.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) - } -} - -func (h *historyArchiverSuite) TestArchive_Fail_InvalidURI() { - mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) - storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI("wrongscheme://") - h.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - h.Error(err) -} - -func (h *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: "", - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.Error(err) -} - -func (h *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.Error(err) -} - -func (h *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { - - ctx := getCanceledContext() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), gomock.Any(), "").Return(true, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.Error(err) -} - -func (h *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion + 1, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.Error(err) -} - -func (h *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { - - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("upload non-retryable error")), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(ctx, h.testArchivalURI, request, archiver.GetNonRetryableErrorOption(errUploadNonRetryable)) - h.Equal(errUploadNonRetryable, err) -} - -func (h *historyArchiverSuite) TestArchive_Skip() { - ctx := context.Background() - - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, gomock.Any()).Return(false, nil) - storageWrapper.EXPECT().Upload(ctx, h.testArchivalURI, gomock.Any(), gomock.Any()).Return(nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: false, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - }, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.NoError(err) -} - -func (h *historyArchiverSuite) TestArchive_Success() { - - ctx := context.Background() - - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, gomock.Any()).Return(false, nil).Times(2) - storageWrapper.EXPECT().Upload(ctx, h.testArchivalURI, gomock.Any(), gomock.Any()).Return(nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - { - EventId: common.FirstEventID + 2, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(false), - ) - - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - - err := historyArchiver.Archive(ctx, h.testArchivalURI, request) - h.NoError(err) -} - -func (h *historyArchiverSuite) TestGet_Fail_InvalidURI() { - ctx := context.Background() - mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) - storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 100, - } - URI, err := archiver.NewURI("wrongscheme://") - h.NoError(err) - response, err := historyArchiver.Get(ctx, URI, request) - h.Nil(response) - h.Error(err) -} - -func (h *historyArchiverSuite) TestGet_Fail_InvalidToken() { - ctx := context.Background() - mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) - storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, - } - URI, err := archiver.NewURI("gs:///") - h.NoError(err) - response, err := historyArchiver.Get(ctx, URI, request) - h.Nil(response) - h.Error(err) - h.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (h *historyArchiverSuite) TestGet_Success_PickHighestVersion() { - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, gomock.Any()).Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleHistoryRecord), nil) - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - - response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) - h.NoError(err) - h.Nil(response.NextPageToken) -} - -func (h *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { - - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-25_0.history").Return([]byte(exampleHistoryRecord), nil) - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - CloseFailoverVersion: convert.Int64Ptr(-25), - } - - response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) - h.NoError(err) - h.Nil(response.NextPageToken) -} - -func (h *historyArchiverSuite) TestGet_Success_PageSize() { - - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-24_1.history", "905702227796330300141628222723188294514017512010591354159_-24_2.history", "905702227796330300141628222723188294514017512010591354159_-24_3.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleHistoryRecord), nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_1.history").Return([]byte(exampleHistoryRecord), nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 2, - } - - response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) - h.NoError(err) - h.NotNil(response.NextPageToken) - h.EqualValues(len(response.HistoryBatches), 2) -} - -func (h *historyArchiverSuite) TestGet_Success_FromToken() { - - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_2.history").Return([]byte(exampleHistoryRecord), nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_3.history").Return([]byte(twoEventsExampleHistoryRecord), nil) - storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_4.history").Return([]byte(exampleHistoryRecord), nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - - token := &getHistoryToken{ - CloseFailoverVersion: -24, - HighestPart: 5, - CurrentPart: 2, - BatchIdxOffset: 0, - } - - nextPageToken, err := serializeToken(token) - h.NoError(err) - - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 4, - NextPageToken: nextPageToken, - } - - h.NoError(err) - response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) - h.NoError(err) - h.NotNil(response.NextPageToken) - - token, err = deserializeGetHistoryToken(response.NextPageToken) - h.NoError(err) - - h.EqualValues(5, token.HighestPart) - h.EqualValues(5, token.CurrentPart) - h.EqualValues(3, len(response.HistoryBatches)) - numOfEvents := 0 - for _, batch := range response.HistoryBatches { - numOfEvents += len(batch.Events) - } - - h.EqualValues(4, numOfEvents) -} - -func (h *historyArchiverSuite) TestGet_NoHistory() { - - ctx := context.Background() - storageWrapper := connector.NewMockClient(h.controller) - storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) - storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{}, nil) - - historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 2, - } - - _, err := historyArchiver.Get(ctx, h.testArchivalURI, request) - h.Assert().IsType(&serviceerror.NotFound{}, err) -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/history_archiver.go temporal-1.22.5/src/common/archiver/gcloud/history_archiver.go --- temporal-1.21.5-1/src/common/archiver/gcloud/history_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/history_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,407 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package gcloud + +import ( + "context" + "encoding/binary" + "errors" + "path/filepath" + "time" + + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/gcloud/connector" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" +) + +var ( + errUploadNonRetryable = errors.New("upload non-retryable error") +) + +const ( + // URIScheme is the scheme for the gcloud storage implementation + URIScheme = "gs" + + targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB + errEncodeHistory = "failed to encode history batches" + errBucketHistory = "failed to get google storage bucket handle" + errWriteFile = "failed to write history to google storage" +) + +type historyArchiver struct { + container *archiver.HistoryBootstrapContainer + gcloudStorage connector.Client + + // only set in test code + historyIterator archiver.HistoryIterator +} + +type progress struct { + CurrentPageNumber int + IteratorState []byte +} + +type getHistoryToken struct { + CloseFailoverVersion int64 + HighestPart int + CurrentPart int + BatchIdxOffset int +} + +// NewHistoryArchiver creates a new gcloud storage HistoryArchiver +func NewHistoryArchiver( + container *archiver.HistoryBootstrapContainer, + config *config.GstorageArchiver, +) (archiver.HistoryArchiver, error) { + storage, err := connector.NewClient(context.Background(), config) + if err == nil { + return newHistoryArchiver(container, nil, storage), nil + } + return nil, err +} + +func newHistoryArchiver(container *archiver.HistoryBootstrapContainer, historyIterator archiver.HistoryIterator, storage connector.Client) archiver.HistoryArchiver { + return &historyArchiver{ + container: container, + gcloudStorage: storage, + historyIterator: historyIterator, + } +} + +// Archive is used to archive a workflow history. When the context expires the method should stop trying to archive. +// Implementors are free to archive however they want, including implementing retries of sub-operations. The URI defines +// the resource that histories should be archived into. The implementor gets to determine how to interpret the URI. +// The Archive method may or may not be automatically retried by the caller. The ArchiveOptions are used +// to interact with these retries including giving the implementor the ability to cancel retries and record progress +// between retry attempts. +// This method will be invoked after a workflow passes its retention period. +func (h *historyArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiver.ArchiveHistoryRequest, opts ...archiver.ArchiveOption) (err error) { + handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + featureCatalog := archiver.GetFeatureCatalog(opts...) + startTime := time.Now().UTC() + defer func() { + handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) + if err != nil { + + if err.Error() != errUploadNonRetryable.Error() { + handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) + return + } + + handler.Counter(metrics.HistoryArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) + if featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + + } + }() + + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + + if err := h.ValidateURI(URI); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return errUploadNonRetryable + } + + if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) + return errUploadNonRetryable + } + + var totalUploadSize int64 + historyIterator := h.historyIterator + var progress progress + if historyIterator == nil { // will only be set by testing code + historyIterator, _ = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) + } + + encoder := codec.NewJSONPBEncoder() + + for historyIterator.HasNext() { + part := progress.CurrentPageNumber + historyBlob, err := historyIterator.Next(ctx) + if err != nil { + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // workflow history no longer exists, may due to duplicated archival signal + // this may happen even in the middle of iterating history as two archival signals + // can be processed concurrently. + logger.Info(archiver.ArchiveSkippedInfoMsg) + handler.Counter(metrics.HistoryArchiverDuplicateArchivalsCount.GetMetricName()).Record(1) + return nil + } + + logger = log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) + if !common.IsPersistenceTransientError(err) { + logger.Error(archiver.ArchiveNonRetryableErrorMsg) + return errUploadNonRetryable + } + logger.Error(archiver.ArchiveTransientErrorMsg) + return err + } + + if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) + return archiver.ErrHistoryMutated + } + + encodedHistoryPart, err := encoder.EncodeHistories(historyBlob.Body) + if err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) + return errUploadNonRetryable + } + + filename := constructHistoryFilenameMultipart(request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion, part) + if exist, _ := h.gcloudStorage.Exist(ctx, URI, filename); !exist { + if err := h.gcloudStorage.Upload(ctx, URI, filename, encodedHistoryPart); err != nil { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) + handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) + return err + } + + totalUploadSize = totalUploadSize + int64(binary.Size(encodedHistoryPart)) + } + + if err := saveHistoryIteratorState(ctx, featureCatalog, historyIterator, part, &progress); err != nil { + return err + } + } + + handler.Counter(metrics.HistoryArchiverTotalUploadSize.GetMetricName()).Record(totalUploadSize) + handler.Counter(metrics.HistoryArchiverHistorySize.GetMetricName()).Record(totalUploadSize) + handler.Counter(metrics.HistoryArchiverArchiveSuccessCount.GetMetricName()).Record(1) + return +} + +// Get is used to access an archived history. When context expires method should stop trying to fetch history. +// The URI identifies the resource from which history should be accessed and it is up to the implementor to interpret this URI. +// This method should thrift errors - see filestore as an example. +func (h *historyArchiver) Get(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*archiver.GetHistoryResponse, error) { + + err := h.ValidateURI(URI) + if err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) + } + + if err := archiver.ValidateGetRequest(request); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) + } + + var token *getHistoryToken + if request.NextPageToken != nil { + token, err = deserializeGetHistoryToken(request.NextPageToken) + if err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) + } + } else { + highestVersion, historyhighestPart, historyCurrentPart, err := h.getHighestVersion(ctx, URI, request) + if err != nil { + return nil, serviceerror.NewUnavailable(err.Error()) + } + if highestVersion == nil { + return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) + } + token = &getHistoryToken{ + CloseFailoverVersion: *highestVersion, + HighestPart: *historyhighestPart, + CurrentPart: *historyCurrentPart, + BatchIdxOffset: 0, + } + } + + response := &archiver.GetHistoryResponse{} + response.HistoryBatches = []*historypb.History{} + numOfEvents := 0 + encoder := codec.NewJSONPBEncoder() + +outer: + for token.CurrentPart <= token.HighestPart { + + filename := constructHistoryFilenameMultipart(request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion, token.CurrentPart) + encodedHistoryBatches, err := h.gcloudStorage.Get(ctx, URI, filename) + if err != nil { + return nil, serviceerror.NewUnavailable(err.Error()) + } + if encodedHistoryBatches == nil { + return nil, serviceerror.NewInternal("Fail retrieving history file: " + URI.String() + "/" + filename) + } + + batches, err := encoder.DecodeHistories(encodedHistoryBatches) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + // trim the batches in the beginning based on token.BatchIdxOffset + batches = batches[token.BatchIdxOffset:] + + for idx, batch := range batches { + response.HistoryBatches = append(response.HistoryBatches, batch) + token.BatchIdxOffset++ + numOfEvents += len(batch.Events) + + if numOfEvents >= request.PageSize { + if idx == len(batches)-1 { + // handle the edge case where page size is meeted after adding the last batch + token.BatchIdxOffset = 0 + token.CurrentPart++ + } + break outer + } + } + + // reset the offset to 0 as we will read a new page + token.BatchIdxOffset = 0 + token.CurrentPart++ + + } + + if token.CurrentPart <= token.HighestPart { + nextToken, err := serializeToken(token) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.NextPageToken = nextToken + } + + return response, nil +} + +// ValidateURI is used to define what a valid URI for an implementation is. +func (h *historyArchiver) ValidateURI(URI archiver.URI) (err error) { + + if err = h.validateURI(URI); err == nil { + _, err = h.gcloudStorage.Exist(context.Background(), URI, "") + } + + return +} + +func (h *historyArchiver) validateURI(URI archiver.URI) (err error) { + if URI.Scheme() != URIScheme { + return archiver.ErrURISchemeMismatch + } + + if URI.Path() == "" || URI.Hostname() == "" { + return archiver.ErrInvalidURI + } + + return +} + +func historyMutated(request *archiver.ArchiveHistoryRequest, historyBatches []*historypb.History, isLast bool) bool { + lastBatch := historyBatches[len(historyBatches)-1].Events + lastEvent := lastBatch[len(lastBatch)-1] + lastFailoverVersion := lastEvent.GetVersion() + if lastFailoverVersion > request.CloseFailoverVersion { + return true + } + + if !isLast { + return false + } + lastEventID := lastEvent.GetEventId() + return lastFailoverVersion != request.CloseFailoverVersion || lastEventID+1 != request.NextEventID +} + +func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*int64, *int, *int, error) { + + filenames, err := h.gcloudStorage.Query(ctx, URI, constructHistoryFilenamePrefix(request.NamespaceID, request.WorkflowID, request.RunID)) + + if err != nil { + return nil, nil, nil, err + } + + var highestVersion *int64 + var highestVersionPart *int + var lowestVersionPart *int + + for _, filename := range filenames { + version, partVersionID, err := extractCloseFailoverVersion(filepath.Base(filename)) + if err != nil || (request.CloseFailoverVersion != nil && version != *request.CloseFailoverVersion) { + continue + } + + if highestVersion == nil || version > *highestVersion { + highestVersion = &version + highestVersionPart = new(int) + lowestVersionPart = new(int) + } + + if *highestVersion == version { + if highestVersionPart == nil || partVersionID > *highestVersionPart { + highestVersionPart = &partVersionID + } + + if lowestVersionPart == nil || partVersionID < *lowestVersionPart { + lowestVersionPart = &partVersionID + } + } + + } + + return highestVersion, highestVersionPart, lowestVersionPart, nil +} + +func loadHistoryIterator(ctx context.Context, request *archiver.ArchiveHistoryRequest, executionManager persistence.ExecutionManager, featureCatalog *archiver.ArchiveFeatureCatalog, progress *progress) (historyIterator archiver.HistoryIterator, err error) { + + defer func() { + if err != nil || historyIterator == nil { + historyIterator, err = archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, nil) + } + }() + + if featureCatalog.ProgressManager != nil { + if featureCatalog.ProgressManager.HasProgress(ctx) { + err = featureCatalog.ProgressManager.LoadProgress(ctx, &progress) + if err == nil { + historyIterator, err = archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, progress.IteratorState) + } + } + + } + return +} + +func saveHistoryIteratorState(ctx context.Context, featureCatalog *archiver.ArchiveFeatureCatalog, historyIterator archiver.HistoryIterator, currentPartNum int, progress *progress) (err error) { + var state []byte + if featureCatalog.ProgressManager != nil { + state, err = historyIterator.GetState() + if err == nil { + progress.CurrentPageNumber = currentPartNum + 1 + progress.IteratorState = state + + err = featureCatalog.ProgressManager.RecordProgress(ctx, progress) + } + } + + return err +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/history_archiver_test.go temporal-1.22.5/src/common/archiver/gcloud/history_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/gcloud/history_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/history_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,588 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package gcloud + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/gcloud/connector" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + testNamespaceID = "test-namespace-id" + testNamespace = "test-namespace" + testWorkflowID = "test-workflow-id" + testRunID = "test-run-id" + testNextEventID = 1800 + testCloseFailoverVersion = 100 + testPageSize = 100 + exampleHistoryRecord = `[{"events":[{"eventId":1,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}}]}]` + twoEventsExampleHistoryRecord = `[{"events":[{"eventId":1,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}},{"eventId":2,"eventTime": "2020-07-30T00:30:03.082421843Z","eventType":"WorkflowExecutionStarted","version":-24,"taskId":5242897,"workflowExecutionStartedEventAttributes":{"workflowType":{"name":"MobileOnlyWorkflow::processMobileOnly"},"taskQueue":{"name":"MobileOnly"},"input":null,"workflowExecutionTimeout":"300s","workflowTaskTimeout":"60s","originalExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","identity":"","firstExecutionRunId":"1fd5d4c8-1590-4a0a-8027-535e8729de8e","attempt":1,"firstWorkflowTaskBackoff":"0s"}}]}]` +) + +var ( + testBranchToken = []byte{1, 2, 3} +) + +func (h *historyArchiverSuite) SetupTest() { + h.Assertions = require.New(h.T()) + h.controller = gomock.NewController(h.T()) + h.container = &archiver.HistoryBootstrapContainer{ + Logger: log.NewNoopLogger(), + MetricsHandler: metrics.NoopMetricsHandler, + } + h.testArchivalURI, _ = archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") +} + +func (h *historyArchiverSuite) TearDownTest() { + h.controller.Finish() +} + +func TestHistoryArchiverSuite(t *testing.T) { + suite.Run(t, new(historyArchiverSuite)) +} + +type historyArchiverSuite struct { + *require.Assertions + suite.Suite + + controller *gomock.Controller + + container *archiver.HistoryBootstrapContainer + testArchivalURI archiver.URI +} + +func getCanceledContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx +} + +func (h *historyArchiverSuite) TestValidateURI() { + ctx := context.Background() + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "gs:my-bucket-cad/temporal_archival/development", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://my-bucket-cad", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs:/my-bucket-cad/temporal_archival/development", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://my-bucket-cad/temporal_archival/development", + expectedErr: nil, + }, + } + + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, gomock.Any(), "").Return(false, nil) + historyArchiver := new(historyArchiver) + historyArchiver.gcloudStorage = storageWrapper + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + h.NoError(err) + h.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) + } +} + +func (h *historyArchiverSuite) TestArchive_Fail_InvalidURI() { + mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) + storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI("wrongscheme://") + h.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + h.Error(err) +} + +func (h *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: "", + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.Error(err) +} + +func (h *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.Error(err) +} + +func (h *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { + + ctx := getCanceledContext() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), gomock.Any(), "").Return(true, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.Error(err) +} + +func (h *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion + 1, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.Error(err) +} + +func (h *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { + + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("upload non-retryable error")), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(ctx, h.testArchivalURI, request, archiver.GetNonRetryableErrorOption(errUploadNonRetryable)) + h.Equal(errUploadNonRetryable, err) +} + +func (h *historyArchiverSuite) TestArchive_Skip() { + ctx := context.Background() + + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, gomock.Any()).Return(false, nil) + storageWrapper.EXPECT().Upload(ctx, h.testArchivalURI, gomock.Any(), gomock.Any()).Return(nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: false, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + }, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.NoError(err) +} + +func (h *historyArchiverSuite) TestArchive_Success() { + + ctx := context.Background() + + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, gomock.Any()).Return(false, nil).Times(2) + storageWrapper.EXPECT().Upload(ctx, h.testArchivalURI, gomock.Any(), gomock.Any()).Return(nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + { + EventId: common.FirstEventID + 2, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(false), + ) + + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + + err := historyArchiver.Archive(ctx, h.testArchivalURI, request) + h.NoError(err) +} + +func (h *historyArchiverSuite) TestGet_Fail_InvalidURI() { + ctx := context.Background() + mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) + storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 100, + } + URI, err := archiver.NewURI("wrongscheme://") + h.NoError(err) + response, err := historyArchiver.Get(ctx, URI, request) + h.Nil(response) + h.Error(err) +} + +func (h *historyArchiverSuite) TestGet_Fail_InvalidToken() { + ctx := context.Background() + mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) + storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, + } + URI, err := archiver.NewURI("gs:///") + h.NoError(err) + response, err := historyArchiver.Get(ctx, URI, request) + h.Nil(response) + h.Error(err) + h.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (h *historyArchiverSuite) TestGet_Success_PickHighestVersion() { + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, gomock.Any()).Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleHistoryRecord), nil) + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + + response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) + h.NoError(err) + h.Nil(response.NextPageToken) +} + +func (h *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { + + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-25_0.history").Return([]byte(exampleHistoryRecord), nil) + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + CloseFailoverVersion: convert.Int64Ptr(-25), + } + + response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) + h.NoError(err) + h.Nil(response.NextPageToken) +} + +func (h *historyArchiverSuite) TestGet_Success_PageSize() { + + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-24_1.history", "905702227796330300141628222723188294514017512010591354159_-24_2.history", "905702227796330300141628222723188294514017512010591354159_-24_3.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleHistoryRecord), nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_1.history").Return([]byte(exampleHistoryRecord), nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 2, + } + + response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) + h.NoError(err) + h.NotNil(response.NextPageToken) + h.EqualValues(len(response.HistoryBatches), 2) +} + +func (h *historyArchiverSuite) TestGet_Success_FromToken() { + + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_2.history").Return([]byte(exampleHistoryRecord), nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_3.history").Return([]byte(twoEventsExampleHistoryRecord), nil) + storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_4.history").Return([]byte(exampleHistoryRecord), nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + + token := &getHistoryToken{ + CloseFailoverVersion: -24, + HighestPart: 5, + CurrentPart: 2, + BatchIdxOffset: 0, + } + + nextPageToken, err := serializeToken(token) + h.NoError(err) + + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 4, + NextPageToken: nextPageToken, + } + + h.NoError(err) + response, err := historyArchiver.Get(ctx, h.testArchivalURI, request) + h.NoError(err) + h.NotNil(response.NextPageToken) + + token, err = deserializeGetHistoryToken(response.NextPageToken) + h.NoError(err) + + h.EqualValues(5, token.HighestPart) + h.EqualValues(5, token.CurrentPart) + h.EqualValues(3, len(response.HistoryBatches)) + numOfEvents := 0 + for _, batch := range response.HistoryBatches { + numOfEvents += len(batch.Events) + } + + h.EqualValues(4, numOfEvents) +} + +func (h *historyArchiverSuite) TestGet_NoHistory() { + + ctx := context.Background() + storageWrapper := connector.NewMockClient(h.controller) + storageWrapper.EXPECT().Exist(ctx, h.testArchivalURI, "").Return(true, nil) + storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{}, nil) + + historyIterator := archiver.NewMockHistoryIterator(h.controller) + historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 2, + } + + _, err := historyArchiver.Get(ctx, h.testArchivalURI, request) + h.Assert().IsType(&serviceerror.NotFound{}, err) +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/queryParser.go temporal-1.22.5/src/common/archiver/gcloud/queryParser.go --- temporal-1.21.5-1/src/common/archiver/gcloud/queryParser.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/queryParser.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,253 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source queryParser.go -destination queryParser_mock.go -mock_names Interface=MockQueryParser - -package gcloud - -import ( - "errors" - "fmt" - "time" - - "github.com/xwb1989/sqlparser" - - "go.temporal.io/server/common/convert" -) - -type ( - // QueryParser parses a limited SQL where clause into a struct - QueryParser interface { - Parse(query string) (*parsedQuery, error) - } - - queryParser struct{} - - parsedQuery struct { - workflowID *string - workflowType *string - startTime time.Time - closeTime time.Time - searchPrecision *string - runID *string - emptyResult bool - } -) - -// All allowed fields for filtering -const ( - WorkflowID = "WorkflowId" - RunID = "RunId" - WorkflowType = "WorkflowType" - CloseTime = "CloseTime" - StartTime = "StartTime" - SearchPrecision = "SearchPrecision" -) - -// Precision specific values -const ( - PrecisionDay = "Day" - PrecisionHour = "Hour" - PrecisionMinute = "Minute" - PrecisionSecond = "Second" -) - -const ( - queryTemplate = "select * from dummy where %s" - - defaultDateTimeFormat = time.RFC3339 -) - -// NewQueryParser creates a new query parser for filestore -func NewQueryParser() QueryParser { - return &queryParser{} -} - -func (p *queryParser) Parse(query string) (*parsedQuery, error) { - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) - if err != nil { - return nil, err - } - whereExpr := stmt.(*sqlparser.Select).Where.Expr - parsedQuery := &parsedQuery{} - if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { - return nil, err - } - - if (parsedQuery.closeTime.IsZero() && parsedQuery.startTime.IsZero()) || (!parsedQuery.closeTime.IsZero() && !parsedQuery.startTime.IsZero()) { - return nil, errors.New("requires a StartTime or CloseTime") - } - - if parsedQuery.searchPrecision == nil { - return nil, errors.New("SearchPrecision is required when searching for a StartTime or CloseTime") - } - - return parsedQuery, nil -} - -func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { - if expr == nil { - return errors.New("where expression is nil") - } - - switch expr := expr.(type) { - case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr, parsedQuery) - case *sqlparser.AndExpr: - return p.convertAndExpr(expr, parsedQuery) - case *sqlparser.ParenExpr: - return p.convertParenExpr(expr, parsedQuery) - default: - return errors.New("only comparison and \"and\" expression is supported") - } -} - -func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { - return p.convertWhereExpr(parenExpr.Expr, parsedQuery) -} - -func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { - if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { - return err - } - return p.convertWhereExpr(andExpr.Right, parsedQuery) -} - -func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { - colName, ok := compExpr.Left.(*sqlparser.ColName) - if !ok { - return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) - } - colNameStr := sqlparser.String(colName) - op := compExpr.Operator - valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) - if !ok { - return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) - } - valStr := sqlparser.String(valExpr) - - switch colNameStr { - case WorkflowID: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowID) - } - if parsedQuery.workflowID != nil && *parsedQuery.workflowID != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.workflowID = convert.StringPtr(val) - case RunID: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", RunID) - } - if parsedQuery.runID != nil && *parsedQuery.runID != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.runID = convert.StringPtr(val) - case CloseTime: - closeTime, err := convertToTime(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", CloseTime) - } - parsedQuery.closeTime = closeTime - - case StartTime: - startTime, err := convertToTime(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", CloseTime) - } - parsedQuery.startTime = startTime - case WorkflowType: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowType) - } - if parsedQuery.workflowType != nil && *parsedQuery.workflowType != val { - parsedQuery.emptyResult = true - return nil - } - parsedQuery.workflowType = convert.StringPtr(val) - case SearchPrecision: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", SearchPrecision) - } - if parsedQuery.searchPrecision != nil && *parsedQuery.searchPrecision != val { - return fmt.Errorf("only one expression is allowed for %s", SearchPrecision) - } - switch val { - case PrecisionDay: - case PrecisionHour: - case PrecisionMinute: - case PrecisionSecond: - default: - return fmt.Errorf("invalid value for %s: %s", SearchPrecision, val) - } - parsedQuery.searchPrecision = convert.StringPtr(val) - default: - return fmt.Errorf("unknown filter name: %s", colNameStr) - } - - return nil -} - -func convertToTime(timeStr string) (time.Time, error) { - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/queryParser_mock.go temporal-1.22.5/src/common/archiver/gcloud/queryParser_mock.go --- temporal-1.21.5-1/src/common/archiver/gcloud/queryParser_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/queryParser_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: queryParser.go - -// Package gcloud is a generated GoMock package. -package gcloud - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockQueryParser is a mock of QueryParser interface. -type MockQueryParser struct { - ctrl *gomock.Controller - recorder *MockQueryParserMockRecorder -} - -// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. -type MockQueryParserMockRecorder struct { - mock *MockQueryParser -} - -// NewMockQueryParser creates a new mock instance. -func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { - mock := &MockQueryParser{ctrl: ctrl} - mock.recorder = &MockQueryParserMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { - return m.recorder -} - -// Parse mocks base method. -func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Parse", query) - ret0, _ := ret[0].(*parsedQuery) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/query_parser.go temporal-1.22.5/src/common/archiver/gcloud/query_parser.go --- temporal-1.21.5-1/src/common/archiver/gcloud/query_parser.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/query_parser.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,253 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser + +package gcloud + +import ( + "errors" + "fmt" + "time" + + "github.com/xwb1989/sqlparser" + + "go.temporal.io/server/common/convert" +) + +type ( + // QueryParser parses a limited SQL where clause into a struct + QueryParser interface { + Parse(query string) (*parsedQuery, error) + } + + queryParser struct{} + + parsedQuery struct { + workflowID *string + workflowType *string + startTime time.Time + closeTime time.Time + searchPrecision *string + runID *string + emptyResult bool + } +) + +// All allowed fields for filtering +const ( + WorkflowID = "WorkflowId" + RunID = "RunId" + WorkflowType = "WorkflowType" + CloseTime = "CloseTime" + StartTime = "StartTime" + SearchPrecision = "SearchPrecision" +) + +// Precision specific values +const ( + PrecisionDay = "Day" + PrecisionHour = "Hour" + PrecisionMinute = "Minute" + PrecisionSecond = "Second" +) + +const ( + queryTemplate = "select * from dummy where %s" + + defaultDateTimeFormat = time.RFC3339 +) + +// NewQueryParser creates a new query parser for filestore +func NewQueryParser() QueryParser { + return &queryParser{} +} + +func (p *queryParser) Parse(query string) (*parsedQuery, error) { + stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + if err != nil { + return nil, err + } + whereExpr := stmt.(*sqlparser.Select).Where.Expr + parsedQuery := &parsedQuery{} + if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { + return nil, err + } + + if (parsedQuery.closeTime.IsZero() && parsedQuery.startTime.IsZero()) || (!parsedQuery.closeTime.IsZero() && !parsedQuery.startTime.IsZero()) { + return nil, errors.New("requires a StartTime or CloseTime") + } + + if parsedQuery.searchPrecision == nil { + return nil, errors.New("SearchPrecision is required when searching for a StartTime or CloseTime") + } + + return parsedQuery, nil +} + +func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { + if expr == nil { + return errors.New("where expression is nil") + } + + switch expr := expr.(type) { + case *sqlparser.ComparisonExpr: + return p.convertComparisonExpr(expr, parsedQuery) + case *sqlparser.AndExpr: + return p.convertAndExpr(expr, parsedQuery) + case *sqlparser.ParenExpr: + return p.convertParenExpr(expr, parsedQuery) + default: + return errors.New("only comparison and \"and\" expression is supported") + } +} + +func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { + return p.convertWhereExpr(parenExpr.Expr, parsedQuery) +} + +func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { + if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { + return err + } + return p.convertWhereExpr(andExpr.Right, parsedQuery) +} + +func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { + colName, ok := compExpr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) + } + colNameStr := sqlparser.String(colName) + op := compExpr.Operator + valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) + if !ok { + return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) + } + valStr := sqlparser.String(valExpr) + + switch colNameStr { + case WorkflowID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowID) + } + if parsedQuery.workflowID != nil && *parsedQuery.workflowID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowID = convert.StringPtr(val) + case RunID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", RunID) + } + if parsedQuery.runID != nil && *parsedQuery.runID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.runID = convert.StringPtr(val) + case CloseTime: + closeTime, err := convertToTime(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", CloseTime) + } + parsedQuery.closeTime = closeTime + + case StartTime: + startTime, err := convertToTime(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", CloseTime) + } + parsedQuery.startTime = startTime + case WorkflowType: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowType) + } + if parsedQuery.workflowType != nil && *parsedQuery.workflowType != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowType = convert.StringPtr(val) + case SearchPrecision: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", SearchPrecision) + } + if parsedQuery.searchPrecision != nil && *parsedQuery.searchPrecision != val { + return fmt.Errorf("only one expression is allowed for %s", SearchPrecision) + } + switch val { + case PrecisionDay: + case PrecisionHour: + case PrecisionMinute: + case PrecisionSecond: + default: + return fmt.Errorf("invalid value for %s: %s", SearchPrecision, val) + } + parsedQuery.searchPrecision = convert.StringPtr(val) + default: + return fmt.Errorf("unknown filter name: %s", colNameStr) + } + + return nil +} + +func convertToTime(timeStr string) (time.Time, error) { + timestampStr, err := extractStringValue(timeStr) + if err != nil { + return time.Time{}, err + } + parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) + if err != nil { + return time.Time{}, err + } + return parsedTime, nil +} + +func extractStringValue(s string) (string, error) { + if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { + return s[1 : len(s)-1], nil + } + return "", fmt.Errorf("value %s is not a string value", s) +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/query_parser_mock.go temporal-1.22.5/src/common/archiver/gcloud/query_parser_mock.go --- temporal-1.21.5-1/src/common/archiver/gcloud/query_parser_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/query_parser_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go + +// Package gcloud is a generated GoMock package. +package gcloud + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockQueryParser is a mock of QueryParser interface. +type MockQueryParser struct { + ctrl *gomock.Controller + recorder *MockQueryParserMockRecorder +} + +// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. +type MockQueryParserMockRecorder struct { + mock *MockQueryParser +} + +// NewMockQueryParser creates a new mock instance. +func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { + mock := &MockQueryParser{ctrl: ctrl} + mock.recorder = &MockQueryParserMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { + return m.recorder +} + +// Parse mocks base method. +func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Parse", query) + ret0, _ := ret[0].(*parsedQuery) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Parse indicates an expected call of Parse. +func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/visibilityArchiver.go temporal-1.22.5/src/common/archiver/gcloud/visibilityArchiver.go --- temporal-1.21.5-1/src/common/archiver/gcloud/visibilityArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/visibilityArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,334 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gcloud - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "time" - - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/gcloud/connector" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" -) - -const ( - errEncodeVisibilityRecord = "failed to encode visibility record" - indexKeyStartTimeout = "startTimeout" - indexKeyCloseTimeout = "closeTimeout" - timeoutInSeconds = 5 -) - -var ( - errRetryable = errors.New("retryable error") -) - -type ( - visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - gcloudStorage connector.Client - queryParser QueryParser - } - - queryVisibilityToken struct { - Offset int - } - - queryVisibilityRequest struct { - namespaceID string - pageSize int - nextPageToken []byte - parsedQuery *parsedQuery - } -) - -func newVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, storage connector.Client) *visibilityArchiver { - return &visibilityArchiver{ - container: container, - gcloudStorage: storage, - queryParser: NewQueryParser(), - } -} - -// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore -func NewVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, config *config.GstorageArchiver) (archiver.VisibilityArchiver, error) { - storage, err := connector.NewClient(context.Background(), config) - return newVisibilityArchiver(container, storage), err -} - -// Archive is used to archive one workflow visibility record. -// Check the Archive() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. -// The only difference is that the ArchiveOption parameter won't include an option for recording process. -// Please make sure your implementation is lossless. If any in-memory batching mechanism is used, then those batched records will be lost during server restarts. -// This method will be invoked when workflow closes. Note that because of conflict resolution, it is possible for a workflow to through the closing process multiple times, which means that this method can be invoked more than once after a workflow closes. -func (v *visibilityArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiverspb.VisibilityRecord, opts ...archiver.ArchiveOption) (err error) { - handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) - featureCatalog := archiver.GetFeatureCatalog(opts...) - startTime := time.Now().UTC() - defer func() { - handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) - if err != nil { - if isRetryableError(err) { - handler.Counter(metrics.VisibilityArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) - } else { - handler.Counter(metrics.VisibilityArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) - if featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - } - } - }() - - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) - - if err := v.ValidateURI(URI); err != nil { - if isRetryableError(err) { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return err - } - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return err - } - - if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) - return err - } - - encodedVisibilityRecord, err := encode(request) - if err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeVisibilityRecord), tag.Error(err)) - return err - } - - // The filename has the format: closeTimestamp_hash(runID).visibility - // This format allows the archiver to sort all records without reading the file contents - filename := constructVisibilityFilename(request.GetNamespaceId(), request.WorkflowTypeName, request.GetWorkflowId(), request.GetRunId(), indexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)) - if err := v.gcloudStorage.Upload(ctx, URI, filename, encodedVisibilityRecord); err != nil { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) - return errRetryable - } - - filename = constructVisibilityFilename(request.GetNamespaceId(), request.WorkflowTypeName, request.GetWorkflowId(), request.GetRunId(), indexKeyStartTimeout, timestamp.TimeValue(request.StartTime)) - if err := v.gcloudStorage.Upload(ctx, URI, filename, encodedVisibilityRecord); err != nil { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) - return errRetryable - } - - handler.Counter(metrics.VisibilityArchiveSuccessCount.GetMetricName()).Record(1) - return nil -} - -// Query is used to retrieve archived visibility records. -// Check the Get() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. -// The request includes a string field called query, which describes what kind of visibility records should be returned. For example, it can be some SQL-like syntax query string. -// Your implementation is responsible for parsing and validating the query, and also returning all visibility records that match the query. -// Currently the maximum context timeout passed into the method is 3 minutes, so it's ok if this method takes a long time to run. -func (v *visibilityArchiver) Query( - ctx context.Context, - URI archiver.URI, - request *archiver.QueryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - - if err := v.ValidateURI(URI); err != nil { - return nil, &serviceerror.InvalidArgument{Message: archiver.ErrInvalidURI.Error()} - } - - if err := archiver.ValidateQueryRequest(request); err != nil { - return nil, &serviceerror.InvalidArgument{Message: archiver.ErrInvalidQueryVisibilityRequest.Error()} - } - - if strings.TrimSpace(request.Query) == "" { - return v.queryAll(ctx, URI, request, saTypeMap) - } - - parsedQuery, err := v.queryParser.Parse(request.Query) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: err.Error()} - } - - if parsedQuery.emptyResult { - return &archiver.QueryVisibilityResponse{}, nil - } - - return v.query( - ctx, - URI, - &queryVisibilityRequest{ - namespaceID: request.NamespaceID, - pageSize: request.PageSize, - nextPageToken: request.NextPageToken, - parsedQuery: parsedQuery, - }, - saTypeMap, - ) -} - -func (v *visibilityArchiver) query( - ctx context.Context, - uri archiver.URI, - request *queryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - prefix := constructVisibilityFilenamePrefix(request.namespaceID, indexKeyCloseTimeout) - if !request.parsedQuery.closeTime.IsZero() { - prefix = constructTimeBasedSearchKey( - request.namespaceID, - indexKeyCloseTimeout, - request.parsedQuery.closeTime, - *request.parsedQuery.searchPrecision, - ) - } - - if !request.parsedQuery.startTime.IsZero() { - prefix = constructTimeBasedSearchKey( - request.namespaceID, - indexKeyStartTimeout, - request.parsedQuery.startTime, - *request.parsedQuery.searchPrecision, - ) - } - - return v.queryPrefix(ctx, uri, request, saTypeMap, prefix) -} - -func (v *visibilityArchiver) queryAll( - ctx context.Context, - URI archiver.URI, - request *archiver.QueryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - - return v.queryPrefix(ctx, URI, &queryVisibilityRequest{ - namespaceID: request.NamespaceID, - pageSize: request.PageSize, - nextPageToken: request.NextPageToken, - parsedQuery: &parsedQuery{}, - }, saTypeMap, request.NamespaceID) -} - -func (v *visibilityArchiver) queryPrefix(ctx context.Context, uri archiver.URI, request *queryVisibilityRequest, saTypeMap searchattribute.NameTypeMap, prefix string) (*archiver.QueryVisibilityResponse, error) { - token, err := v.parseToken(request.nextPageToken) - if err != nil { - return nil, err - } - - filters := make([]connector.Precondition, 0) - if request.parsedQuery.workflowID != nil { - filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.workflowID))) - } - - if request.parsedQuery.runID != nil { - filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.runID))) - } - - if request.parsedQuery.workflowType != nil { - filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.workflowType))) - } - - filenames, completed, currentCursorPos, err := v.gcloudStorage.QueryWithFilters(ctx, uri, prefix, request.pageSize, token.Offset, filters) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: err.Error()} - } - - response := &archiver.QueryVisibilityResponse{} - for _, file := range filenames { - encodedRecord, err := v.gcloudStorage.Get(ctx, uri, fmt.Sprintf("%s/%s", request.namespaceID, filepath.Base(file))) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: err.Error()} - } - - record, err := decodeVisibilityRecord(encodedRecord) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: err.Error()} - } - - executionInfo, err := convertToExecutionInfo(record, saTypeMap) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.Executions = append(response.Executions, executionInfo) - } - - if !completed { - newToken := &queryVisibilityToken{ - Offset: currentCursorPos, - } - encodedToken, err := serializeToken(newToken) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: err.Error()} - } - response.NextPageToken = encodedToken - } - - return response, nil -} - -func (v *visibilityArchiver) parseToken(nextPageToken []byte) (*queryVisibilityToken, error) { - token := new(queryVisibilityToken) - if nextPageToken != nil { - var err error - token, err = deserializeQueryVisibilityToken(nextPageToken) - if err != nil { - return nil, &serviceerror.InvalidArgument{Message: archiver.ErrNextPageTokenCorrupted.Error()} - } - } - return token, nil -} - -// ValidateURI is used to define what a valid URI for an implementation is. -func (v *visibilityArchiver) ValidateURI(URI archiver.URI) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutInSeconds*time.Second) - defer cancel() - - if err = v.validateURI(URI); err == nil { - _, err = v.gcloudStorage.Exist(ctx, URI, "") - } - - return -} - -func (v *visibilityArchiver) validateURI(URI archiver.URI) (err error) { - if URI.Scheme() != URIScheme { - return archiver.ErrURISchemeMismatch - } - - if URI.Path() == "" || URI.Hostname() == "" { - return archiver.ErrInvalidURI - } - - return -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/visibilityArchiver_test.go temporal-1.22.5/src/common/archiver/gcloud/visibilityArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/gcloud/visibilityArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/visibilityArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,467 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gcloud - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflow/v1" - - "go.temporal.io/server/common/searchattribute" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/gcloud/connector" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/primitives/timestamp" -) - -const ( - testWorkflowTypeName = "test-workflow-type" - exampleVisibilityRecord = `{"namespaceId":"test-namespace-id","namespace":"test-namespace","workflowId":"test-workflow-id","runId":"test-run-id","workflowTypeName":"test-workflow-type","startTime":"2020-02-05T09:56:14.804475Z","closeTime":"2020-02-05T09:56:15.946478Z","status":"Completed","historyLength":36,"memo":null,"searchAttributes":null,"historyArchivalUri":"gs://my-bucket-cad/temporal_archival/development"}` - exampleVisibilityRecord2 = `{"namespaceId":"test-namespace-id","namespace":"test-namespace", -"workflowId":"test-workflow-id2","runId":"test-run-id","workflowTypeName":"test-workflow-type", -"startTime":"2020-02-05T09:56:14.804475Z","closeTime":"2020-02-05T09:56:15.946478Z","status":"Completed","historyLength":36,"memo":null,"searchAttributes":null,"historyArchivalUri":"gs://my-bucket-cad/temporal_archival/development"}` -) - -func (s *visibilityArchiverSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.controller = gomock.NewController(s.T()) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } - s.expectedVisibilityRecords = []*archiverspb.VisibilityRecord{ - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(1580896574804475000), - CloseTime: timestamp.UnixOrZeroTimePtr(1580896575946478000), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - HistoryLength: 36, - }, - } -} - -func (s *visibilityArchiverSuite) TearDownTest() { - s.controller.Finish() -} - -func TestVisibilityArchiverSuiteSuite(t *testing.T) { - suite.Run(t, new(visibilityArchiverSuite)) -} - -type visibilityArchiverSuite struct { - *require.Assertions - suite.Suite - controller *gomock.Controller - container *archiver.VisibilityBootstrapContainer - expectedVisibilityRecords []*archiverspb.VisibilityRecord -} - -func (s *visibilityArchiverSuite) TestValidateVisibilityURI() { - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "gs:my-bucket-cad/temporal_archival/visibility", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://my-bucket-cad", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs:/my-bucket-cad/temporal_archival/visibility", - expectedErr: archiver.ErrInvalidURI, - }, - { - URI: "gs://my-bucket-cad/temporal_archival/visibility", - expectedErr: nil, - }, - } - - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), gomock.Any(), "").Return(false, nil) - visibilityArchiver := new(visibilityArchiver) - visibilityArchiver.gcloudStorage = storageWrapper - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - s.NoError(err) - s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) - } -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidVisibilityURI() { - ctx := context.Background() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - request := &archiverspb.VisibilityRecord{ - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - } - - err = visibilityArchiver.Archive(ctx, URI, request) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidVisibilityURI() { - ctx := context.Background() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 10, - Query: "WorkflowType='type::example' AND CloseTime='2020-02-05T11:00:00Z' AND SearchPrecision='Day'", - } - - _, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestVisibilityArchive() { - ctx := context.Background() - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - storageWrapper.EXPECT().Upload(gomock.Any(), URI, gomock.Any(), gomock.Any()).Return(nil).Times(2) - - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - - request := &archiverspb.VisibilityRecord{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimeNowPtrUtc(), - ExecutionTime: nil, // workflow without backoff - CloseTime: timestamp.TimeNowPtrUtc(), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: int64(101), - } - - err = visibilityArchiver.Archive(ctx, URI, request) - s.NoError(err) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { - ctx := context.Background() - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) - visibilityArchiver.queryParser = mockParser - response, err := visibilityArchiver.Query(ctx, URI, &archiver.QueryVisibilityRequest{ - NamespaceID: "some random namespaceID", - PageSize: 10, - Query: "some invalid query", - }, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - - mockParser := NewMockQueryParser(s.controller) - startTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") - closeTime := startTime.Add(time.Hour) - precision := PrecisionDay - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: closeTime, - startTime: startTime, - searchPrecision: &precision, - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - Query: "parsed by mockParser", - PageSize: 1, - NextPageToken: []byte{1, 2, 3}, - } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { - ctx := context.Background() - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), 10, 0, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, true, 1, nil) - storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - - mockParser := NewMockQueryParser(s.controller) - dayPrecision := "Day" - closeTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: closeTime, - searchPrecision: &dayPrecision, - workflowType: convert.StringPtr("MobileOnlyWorkflow::processMobileOnly"), - workflowID: convert.StringPtr(testWorkflowID), - runID: convert.StringPtr(testRunID), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 10, - Query: "parsed by mockParser", - } - - response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 1) - ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { - - pageSize := 2 - ctx := context.Background() - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil).Times(2) - storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), pageSize, 0, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility", "closeTimeout_2020-02-05T09:56:15Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, false, 1, nil) - storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), pageSize, 1, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:16Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, true, 2, nil) - storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:15Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:16Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) - s.NoError(err) - - mockParser := NewMockQueryParser(s.controller) - dayPrecision := "Day" - closeTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: closeTime, - searchPrecision: &dayPrecision, - workflowType: convert.StringPtr("MobileOnlyWorkflow::processMobileOnly"), - workflowID: convert.StringPtr(testWorkflowID), - runID: convert.StringPtr(testRunID), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: pageSize, - Query: "parsed by mockParser", - } - - response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.NotNil(response.NextPageToken) - s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[1]) - - request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - arc := newVisibilityArchiver(s.container, storageWrapper) - req := &archiver.QueryVisibilityRequest{ - NamespaceID: "", - PageSize: 1, - NextPageToken: nil, - Query: "", - } - _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - arc := newVisibilityArchiver(s.container, storageWrapper) - - req := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 0, - NextPageToken: nil, - Query: "", - } - _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { - URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") - s.NoError(err) - storageWrapper := connector.NewMockClient(s.controller) - storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(true, nil).Times(2) - storageWrapper.EXPECT().QueryWithFilters( - gomock.Any(), - URI, - gomock.Any(), - 1, - 0, - gomock.Any(), - ).Return( - []string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, - false, - 1, - nil, - ) - storageWrapper.EXPECT().QueryWithFilters( - gomock.Any(), - URI, - gomock.Any(), - 1, - 1, - gomock.Any(), - ).Return( - []string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id2_MobileOnlyWorkflow::processMobileOnly_test-run" + - "-id.visibility"}, - true, - 2, - nil, - ) - storageWrapper.EXPECT().Get( - gomock.Any(), - URI, - "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility", - ).Return([]byte(exampleVisibilityRecord), nil) - storageWrapper.EXPECT().Get(gomock.Any(), URI, - "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id2_MobileOnlyWorkflow"+ - "::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord2), nil) - - arc := newVisibilityArchiver(s.container, storageWrapper) - - response := &archiver.QueryVisibilityResponse{ - Executions: nil, - NextPageToken: nil, - } - - limit := 10 - executions := make(map[string]*workflow.WorkflowExecutionInfo, limit) - - numPages := 2 - for i := 0; i < numPages; i++ { - req := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - NextPageToken: response.NextPageToken, - Query: "", - } - response, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Len(response.Executions, 1) - - s.Equal( - i == numPages-1, - response.NextPageToken == nil, - "should have no next page token on the last iteration", - ) - - for _, execution := range response.Executions { - key := execution.Execution.GetWorkflowId() + - "/" + execution.Execution.GetRunId() + - "/" + execution.CloseTime.String() - executions[key] = execution - } - } - s.Len(executions, 2, "there should be exactly 2 unique executions") -} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/visibility_archiver.go temporal-1.22.5/src/common/archiver/gcloud/visibility_archiver.go --- temporal-1.21.5-1/src/common/archiver/gcloud/visibility_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/visibility_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,334 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package gcloud + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + "time" + + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/gcloud/connector" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" +) + +const ( + errEncodeVisibilityRecord = "failed to encode visibility record" + indexKeyStartTimeout = "startTimeout" + indexKeyCloseTimeout = "closeTimeout" + timeoutInSeconds = 5 +) + +var ( + errRetryable = errors.New("retryable error") +) + +type ( + visibilityArchiver struct { + container *archiver.VisibilityBootstrapContainer + gcloudStorage connector.Client + queryParser QueryParser + } + + queryVisibilityToken struct { + Offset int + } + + queryVisibilityRequest struct { + namespaceID string + pageSize int + nextPageToken []byte + parsedQuery *parsedQuery + } +) + +func newVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, storage connector.Client) *visibilityArchiver { + return &visibilityArchiver{ + container: container, + gcloudStorage: storage, + queryParser: NewQueryParser(), + } +} + +// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore +func NewVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, config *config.GstorageArchiver) (archiver.VisibilityArchiver, error) { + storage, err := connector.NewClient(context.Background(), config) + return newVisibilityArchiver(container, storage), err +} + +// Archive is used to archive one workflow visibility record. +// Check the Archive() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. +// The only difference is that the ArchiveOption parameter won't include an option for recording process. +// Please make sure your implementation is lossless. If any in-memory batching mechanism is used, then those batched records will be lost during server restarts. +// This method will be invoked when workflow closes. Note that because of conflict resolution, it is possible for a workflow to through the closing process multiple times, which means that this method can be invoked more than once after a workflow closes. +func (v *visibilityArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiverspb.VisibilityRecord, opts ...archiver.ArchiveOption) (err error) { + handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + featureCatalog := archiver.GetFeatureCatalog(opts...) + startTime := time.Now().UTC() + defer func() { + handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) + if err != nil { + if isRetryableError(err) { + handler.Counter(metrics.VisibilityArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) + } else { + handler.Counter(metrics.VisibilityArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) + if featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + } + } + }() + + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + + if err := v.ValidateURI(URI); err != nil { + if isRetryableError(err) { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return err + } + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return err + } + + if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) + return err + } + + encodedVisibilityRecord, err := encode(request) + if err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeVisibilityRecord), tag.Error(err)) + return err + } + + // The filename has the format: closeTimestamp_hash(runID).visibility + // This format allows the archiver to sort all records without reading the file contents + filename := constructVisibilityFilename(request.GetNamespaceId(), request.WorkflowTypeName, request.GetWorkflowId(), request.GetRunId(), indexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)) + if err := v.gcloudStorage.Upload(ctx, URI, filename, encodedVisibilityRecord); err != nil { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) + return errRetryable + } + + filename = constructVisibilityFilename(request.GetNamespaceId(), request.WorkflowTypeName, request.GetWorkflowId(), request.GetRunId(), indexKeyStartTimeout, timestamp.TimeValue(request.StartTime)) + if err := v.gcloudStorage.Upload(ctx, URI, filename, encodedVisibilityRecord); err != nil { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) + return errRetryable + } + + handler.Counter(metrics.VisibilityArchiveSuccessCount.GetMetricName()).Record(1) + return nil +} + +// Query is used to retrieve archived visibility records. +// Check the Get() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. +// The request includes a string field called query, which describes what kind of visibility records should be returned. For example, it can be some SQL-like syntax query string. +// Your implementation is responsible for parsing and validating the query, and also returning all visibility records that match the query. +// Currently the maximum context timeout passed into the method is 3 minutes, so it's ok if this method takes a long time to run. +func (v *visibilityArchiver) Query( + ctx context.Context, + URI archiver.URI, + request *archiver.QueryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + + if err := v.ValidateURI(URI); err != nil { + return nil, &serviceerror.InvalidArgument{Message: archiver.ErrInvalidURI.Error()} + } + + if err := archiver.ValidateQueryRequest(request); err != nil { + return nil, &serviceerror.InvalidArgument{Message: archiver.ErrInvalidQueryVisibilityRequest.Error()} + } + + if strings.TrimSpace(request.Query) == "" { + return v.queryAll(ctx, URI, request, saTypeMap) + } + + parsedQuery, err := v.queryParser.Parse(request.Query) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: err.Error()} + } + + if parsedQuery.emptyResult { + return &archiver.QueryVisibilityResponse{}, nil + } + + return v.query( + ctx, + URI, + &queryVisibilityRequest{ + namespaceID: request.NamespaceID, + pageSize: request.PageSize, + nextPageToken: request.NextPageToken, + parsedQuery: parsedQuery, + }, + saTypeMap, + ) +} + +func (v *visibilityArchiver) query( + ctx context.Context, + uri archiver.URI, + request *queryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + prefix := constructVisibilityFilenamePrefix(request.namespaceID, indexKeyCloseTimeout) + if !request.parsedQuery.closeTime.IsZero() { + prefix = constructTimeBasedSearchKey( + request.namespaceID, + indexKeyCloseTimeout, + request.parsedQuery.closeTime, + *request.parsedQuery.searchPrecision, + ) + } + + if !request.parsedQuery.startTime.IsZero() { + prefix = constructTimeBasedSearchKey( + request.namespaceID, + indexKeyStartTimeout, + request.parsedQuery.startTime, + *request.parsedQuery.searchPrecision, + ) + } + + return v.queryPrefix(ctx, uri, request, saTypeMap, prefix) +} + +func (v *visibilityArchiver) queryAll( + ctx context.Context, + URI archiver.URI, + request *archiver.QueryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + + return v.queryPrefix(ctx, URI, &queryVisibilityRequest{ + namespaceID: request.NamespaceID, + pageSize: request.PageSize, + nextPageToken: request.NextPageToken, + parsedQuery: &parsedQuery{}, + }, saTypeMap, request.NamespaceID) +} + +func (v *visibilityArchiver) queryPrefix(ctx context.Context, uri archiver.URI, request *queryVisibilityRequest, saTypeMap searchattribute.NameTypeMap, prefix string) (*archiver.QueryVisibilityResponse, error) { + token, err := v.parseToken(request.nextPageToken) + if err != nil { + return nil, err + } + + filters := make([]connector.Precondition, 0) + if request.parsedQuery.workflowID != nil { + filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.workflowID))) + } + + if request.parsedQuery.runID != nil { + filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.runID))) + } + + if request.parsedQuery.workflowType != nil { + filters = append(filters, newWorkflowIDPrecondition(hash(*request.parsedQuery.workflowType))) + } + + filenames, completed, currentCursorPos, err := v.gcloudStorage.QueryWithFilters(ctx, uri, prefix, request.pageSize, token.Offset, filters) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: err.Error()} + } + + response := &archiver.QueryVisibilityResponse{} + for _, file := range filenames { + encodedRecord, err := v.gcloudStorage.Get(ctx, uri, fmt.Sprintf("%s/%s", request.namespaceID, filepath.Base(file))) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: err.Error()} + } + + record, err := decodeVisibilityRecord(encodedRecord) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: err.Error()} + } + + executionInfo, err := convertToExecutionInfo(record, saTypeMap) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.Executions = append(response.Executions, executionInfo) + } + + if !completed { + newToken := &queryVisibilityToken{ + Offset: currentCursorPos, + } + encodedToken, err := serializeToken(newToken) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: err.Error()} + } + response.NextPageToken = encodedToken + } + + return response, nil +} + +func (v *visibilityArchiver) parseToken(nextPageToken []byte) (*queryVisibilityToken, error) { + token := new(queryVisibilityToken) + if nextPageToken != nil { + var err error + token, err = deserializeQueryVisibilityToken(nextPageToken) + if err != nil { + return nil, &serviceerror.InvalidArgument{Message: archiver.ErrNextPageTokenCorrupted.Error()} + } + } + return token, nil +} + +// ValidateURI is used to define what a valid URI for an implementation is. +func (v *visibilityArchiver) ValidateURI(URI archiver.URI) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutInSeconds*time.Second) + defer cancel() + + if err = v.validateURI(URI); err == nil { + _, err = v.gcloudStorage.Exist(ctx, URI, "") + } + + return +} + +func (v *visibilityArchiver) validateURI(URI archiver.URI) (err error) { + if URI.Scheme() != URIScheme { + return archiver.ErrURISchemeMismatch + } + + if URI.Path() == "" || URI.Hostname() == "" { + return archiver.ErrInvalidURI + } + + return +} diff -Nru temporal-1.21.5-1/src/common/archiver/gcloud/visibility_archiver_test.go temporal-1.22.5/src/common/archiver/gcloud/visibility_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/gcloud/visibility_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/gcloud/visibility_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,467 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package gcloud + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflow/v1" + + "go.temporal.io/server/common/searchattribute" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/gcloud/connector" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + testWorkflowTypeName = "test-workflow-type" + exampleVisibilityRecord = `{"namespaceId":"test-namespace-id","namespace":"test-namespace","workflowId":"test-workflow-id","runId":"test-run-id","workflowTypeName":"test-workflow-type","startTime":"2020-02-05T09:56:14.804475Z","closeTime":"2020-02-05T09:56:15.946478Z","status":"Completed","historyLength":36,"memo":null,"searchAttributes":null,"historyArchivalUri":"gs://my-bucket-cad/temporal_archival/development"}` + exampleVisibilityRecord2 = `{"namespaceId":"test-namespace-id","namespace":"test-namespace", +"workflowId":"test-workflow-id2","runId":"test-run-id","workflowTypeName":"test-workflow-type", +"startTime":"2020-02-05T09:56:14.804475Z","closeTime":"2020-02-05T09:56:15.946478Z","status":"Completed","historyLength":36,"memo":null,"searchAttributes":null,"historyArchivalUri":"gs://my-bucket-cad/temporal_archival/development"}` +) + +func (s *visibilityArchiverSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + s.container = &archiver.VisibilityBootstrapContainer{ + Logger: log.NewNoopLogger(), + MetricsHandler: metrics.NoopMetricsHandler, + } + s.expectedVisibilityRecords = []*archiverspb.VisibilityRecord{ + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(1580896574804475000), + CloseTime: timestamp.UnixOrZeroTimePtr(1580896575946478000), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + HistoryLength: 36, + }, + } +} + +func (s *visibilityArchiverSuite) TearDownTest() { + s.controller.Finish() +} + +func TestVisibilityArchiverSuiteSuite(t *testing.T) { + suite.Run(t, new(visibilityArchiverSuite)) +} + +type visibilityArchiverSuite struct { + *require.Assertions + suite.Suite + controller *gomock.Controller + container *archiver.VisibilityBootstrapContainer + expectedVisibilityRecords []*archiverspb.VisibilityRecord +} + +func (s *visibilityArchiverSuite) TestValidateVisibilityURI() { + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "gs:my-bucket-cad/temporal_archival/visibility", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://my-bucket-cad", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs:/my-bucket-cad/temporal_archival/visibility", + expectedErr: archiver.ErrInvalidURI, + }, + { + URI: "gs://my-bucket-cad/temporal_archival/visibility", + expectedErr: nil, + }, + } + + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), gomock.Any(), "").Return(false, nil) + visibilityArchiver := new(visibilityArchiver) + visibilityArchiver.gcloudStorage = storageWrapper + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + s.NoError(err) + s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) + } +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidVisibilityURI() { + ctx := context.Background() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + request := &archiverspb.VisibilityRecord{ + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + } + + err = visibilityArchiver.Archive(ctx, URI, request) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidVisibilityURI() { + ctx := context.Background() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 10, + Query: "WorkflowType='type::example' AND CloseTime='2020-02-05T11:00:00Z' AND SearchPrecision='Day'", + } + + _, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestVisibilityArchive() { + ctx := context.Background() + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + storageWrapper.EXPECT().Upload(gomock.Any(), URI, gomock.Any(), gomock.Any()).Return(nil).Times(2) + + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + + request := &archiverspb.VisibilityRecord{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimeNowPtrUtc(), + ExecutionTime: nil, // workflow without backoff + CloseTime: timestamp.TimeNowPtrUtc(), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: int64(101), + } + + err = visibilityArchiver.Archive(ctx, URI, request) + s.NoError(err) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { + ctx := context.Background() + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) + visibilityArchiver.queryParser = mockParser + response, err := visibilityArchiver.Query(ctx, URI, &archiver.QueryVisibilityRequest{ + NamespaceID: "some random namespaceID", + PageSize: 10, + Query: "some invalid query", + }, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + + mockParser := NewMockQueryParser(s.controller) + startTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") + closeTime := startTime.Add(time.Hour) + precision := PrecisionDay + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: closeTime, + startTime: startTime, + searchPrecision: &precision, + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + Query: "parsed by mockParser", + PageSize: 1, + NextPageToken: []byte{1, 2, 3}, + } + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { + ctx := context.Background() + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), 10, 0, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, true, 1, nil) + storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) + + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + + mockParser := NewMockQueryParser(s.controller) + dayPrecision := "Day" + closeTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: closeTime, + searchPrecision: &dayPrecision, + workflowType: convert.StringPtr("MobileOnlyWorkflow::processMobileOnly"), + workflowID: convert.StringPtr(testWorkflowID), + runID: convert.StringPtr(testRunID), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 10, + Query: "parsed by mockParser", + } + + response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 1) + ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { + + pageSize := 2 + ctx := context.Background() + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil).Times(2) + storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), pageSize, 0, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility", "closeTimeout_2020-02-05T09:56:15Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, false, 1, nil) + storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), pageSize, 1, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:16Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, true, 2, nil) + storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) + storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:15Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) + storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:16Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) + + visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + s.NoError(err) + + mockParser := NewMockQueryParser(s.controller) + dayPrecision := "Day" + closeTime, _ := time.Parse(time.RFC3339, "2019-10-04T11:00:00+00:00") + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: closeTime, + searchPrecision: &dayPrecision, + workflowType: convert.StringPtr("MobileOnlyWorkflow::processMobileOnly"), + workflowID: convert.StringPtr(testWorkflowID), + runID: convert.StringPtr(testRunID), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: pageSize, + Query: "parsed by mockParser", + } + + response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.NotNil(response.NextPageToken) + s.Len(response.Executions, 2) + ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) + ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[1]) + + request.NextPageToken = response.NextPageToken + response, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 1) + ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + arc := newVisibilityArchiver(s.container, storageWrapper) + req := &archiver.QueryVisibilityRequest{ + NamespaceID: "", + PageSize: 1, + NextPageToken: nil, + Query: "", + } + _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) + arc := newVisibilityArchiver(s.container, storageWrapper) + + req := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 0, + NextPageToken: nil, + Query: "", + } + _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { + URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/visibility") + s.NoError(err) + storageWrapper := connector.NewMockClient(s.controller) + storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(true, nil).Times(2) + storageWrapper.EXPECT().QueryWithFilters( + gomock.Any(), + URI, + gomock.Any(), + 1, + 0, + gomock.Any(), + ).Return( + []string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, + false, + 1, + nil, + ) + storageWrapper.EXPECT().QueryWithFilters( + gomock.Any(), + URI, + gomock.Any(), + 1, + 1, + gomock.Any(), + ).Return( + []string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id2_MobileOnlyWorkflow::processMobileOnly_test-run" + + "-id.visibility"}, + true, + 2, + nil, + ) + storageWrapper.EXPECT().Get( + gomock.Any(), + URI, + "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility", + ).Return([]byte(exampleVisibilityRecord), nil) + storageWrapper.EXPECT().Get(gomock.Any(), URI, + "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id2_MobileOnlyWorkflow"+ + "::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord2), nil) + + arc := newVisibilityArchiver(s.container, storageWrapper) + + response := &archiver.QueryVisibilityResponse{ + Executions: nil, + NextPageToken: nil, + } + + limit := 10 + executions := make(map[string]*workflow.WorkflowExecutionInfo, limit) + + numPages := 2 + for i := 0; i < numPages; i++ { + req := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + NextPageToken: response.NextPageToken, + Query: "", + } + response, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Len(response.Executions, 1) + + s.Equal( + i == numPages-1, + response.NextPageToken == nil, + "should have no next page token on the last iteration", + ) + + for _, execution := range response.Executions { + key := execution.Execution.GetWorkflowId() + + "/" + execution.Execution.GetRunId() + + "/" + execution.CloseTime.String() + executions[key] = execution + } + } + s.Len(executions, 2, "there should be exactly 2 unique executions") +} diff -Nru temporal-1.21.5-1/src/common/archiver/historyIterator.go temporal-1.22.5/src/common/archiver/historyIterator.go --- temporal-1.21.5-1/src/common/archiver/historyIterator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/historyIterator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,273 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination historyIterator_mock.go - -package archiver - -import ( - "bytes" - "context" - "encoding/json" - "errors" - - "github.com/gogo/protobuf/jsonpb" - "github.com/gogo/protobuf/proto" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/persistence" -) - -const ( - historyPageSize = 250 -) - -type ( - // HistoryIterator is used to get history batches - HistoryIterator interface { - Next(context.Context) (*archiverspb.HistoryBlob, error) - HasNext() bool - GetState() ([]byte, error) - } - - historyIteratorState struct { - NextEventID int64 - FinishedIteration bool - } - - historyIterator struct { - historyIteratorState - - request *ArchiveHistoryRequest - executionManager persistence.ExecutionManager - sizeEstimator SizeEstimator - historyPageSize int - targetHistoryBlobSize int - } -) - -var ( - errIteratorDepleted = errors.New("iterator is depleted") -) - -// NewHistoryIterator returns a new HistoryIterator -func NewHistoryIterator( - request *ArchiveHistoryRequest, - executionManager persistence.ExecutionManager, - targetHistoryBlobSize int, -) HistoryIterator { - return newHistoryIterator(request, executionManager, targetHistoryBlobSize) -} - -// NewHistoryIteratorFromState returns a new HistoryIterator with specified state -func NewHistoryIteratorFromState( - request *ArchiveHistoryRequest, - executionManager persistence.ExecutionManager, - targetHistoryBlobSize int, - initialState []byte, -) (HistoryIterator, error) { - it := newHistoryIterator(request, executionManager, targetHistoryBlobSize) - if initialState == nil { - return it, nil - } - if err := it.reset(initialState); err != nil { - return nil, err - } - return it, nil -} - -func newHistoryIterator( - request *ArchiveHistoryRequest, - executionManager persistence.ExecutionManager, - targetHistoryBlobSize int, -) *historyIterator { - return &historyIterator{ - historyIteratorState: historyIteratorState{ - NextEventID: common.FirstEventID, - FinishedIteration: false, - }, - request: request, - executionManager: executionManager, - historyPageSize: historyPageSize, - targetHistoryBlobSize: targetHistoryBlobSize, - sizeEstimator: NewJSONSizeEstimator(), - } -} - -func (i *historyIterator) Next( - ctx context.Context, -) (*archiverspb.HistoryBlob, error) { - if !i.HasNext() { - return nil, errIteratorDepleted - } - - historyBatches, newIterState, err := i.readHistoryBatches(ctx, i.NextEventID) - if err != nil { - return nil, err - } - - i.historyIteratorState = newIterState - firstEvent := historyBatches[0].Events[0] - lastBatch := historyBatches[len(historyBatches)-1] - lastEvent := lastBatch.Events[len(lastBatch.Events)-1] - eventCount := int64(0) - for _, batch := range historyBatches { - eventCount += int64(len(batch.Events)) - } - header := &archiverspb.HistoryBlobHeader{ - Namespace: i.request.Namespace, - NamespaceId: i.request.NamespaceID, - WorkflowId: i.request.WorkflowID, - RunId: i.request.RunID, - IsLast: i.FinishedIteration, - FirstFailoverVersion: firstEvent.Version, - LastFailoverVersion: lastEvent.Version, - FirstEventId: firstEvent.EventId, - LastEventId: lastEvent.EventId, - EventCount: eventCount, - } - - return &archiverspb.HistoryBlob{ - Header: header, - Body: historyBatches, - }, nil -} - -// HasNext returns true if there are more items to iterate over. -func (i *historyIterator) HasNext() bool { - return !i.FinishedIteration -} - -// GetState returns the encoded iterator state -func (i *historyIterator) GetState() ([]byte, error) { - return json.Marshal(i.historyIteratorState) -} - -func (i *historyIterator) readHistoryBatches( - ctx context.Context, - firstEventID int64, -) ([]*historypb.History, historyIteratorState, error) { - size := 0 - targetSize := i.targetHistoryBlobSize - var historyBatches []*historypb.History - newIterState := historyIteratorState{} - for size < targetSize { - currHistoryBatches, err := i.readHistory(ctx, firstEventID) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound && firstEventID != common.FirstEventID { - newIterState.FinishedIteration = true - return historyBatches, newIterState, nil - } - if err != nil { - return nil, newIterState, err - } - for idx, batch := range currHistoryBatches { - historyBatchSize, err := i.sizeEstimator.EstimateSize(batch) - if err != nil { - return nil, newIterState, err - } - size += historyBatchSize - historyBatches = append(historyBatches, batch) - firstEventID = batch.Events[len(batch.Events)-1].EventId + 1 - - // In case targetSize is satisfied before reaching the end of current set of batches, return immediately. - // Otherwise, we need to look ahead to see if there's more history batches. - if size >= targetSize && idx != len(currHistoryBatches)-1 { - newIterState.FinishedIteration = false - newIterState.NextEventID = firstEventID - return historyBatches, newIterState, nil - } - } - } - - // If you are here, it means the target size is met after adding the last batch of read history. - // We need to check if there's more history batches. - _, err := i.readHistory(ctx, firstEventID) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound && firstEventID != common.FirstEventID { - newIterState.FinishedIteration = true - return historyBatches, newIterState, nil - } - if err != nil { - return nil, newIterState, err - } - newIterState.FinishedIteration = false - newIterState.NextEventID = firstEventID - return historyBatches, newIterState, nil -} - -func (i *historyIterator) readHistory(ctx context.Context, firstEventID int64) ([]*historypb.History, error) { - req := &persistence.ReadHistoryBranchRequest{ - BranchToken: i.request.BranchToken, - MinEventID: firstEventID, - MaxEventID: common.EndEventID, - PageSize: i.historyPageSize, - ShardID: i.request.ShardID, - } - historyBatches, _, _, err := persistence.ReadFullPageEventsByBatch(ctx, i.executionManager, req) - return historyBatches, err -} - -// reset resets iterator to a certain state given its encoded representation -// if it returns an error, the operation will have no effect on the iterator -func (i *historyIterator) reset(stateToken []byte) error { - var iteratorState historyIteratorState - if err := json.Unmarshal(stateToken, &iteratorState); err != nil { - return err - } - i.historyIteratorState = iteratorState - return nil -} - -type ( - // SizeEstimator is used to estimate the size of any object - SizeEstimator interface { - EstimateSize(v interface{}) (int, error) - } - - jsonSizeEstimator struct { - marshaler jsonpb.Marshaler - } -) - -func (e *jsonSizeEstimator) EstimateSize(v interface{}) (int, error) { - // jsonpb must be used for proto structs. - if protoMessage, ok := v.(proto.Message); ok { - var buf bytes.Buffer - err := e.marshaler.Marshal(&buf, protoMessage) - return buf.Len(), err - } - - data, err := json.Marshal(v) - if err != nil { - return 0, err - } - return len(data), nil -} - -// NewJSONSizeEstimator returns a new SizeEstimator which uses json encoding to estimate size -func NewJSONSizeEstimator() SizeEstimator { - return &jsonSizeEstimator{} -} diff -Nru temporal-1.21.5-1/src/common/archiver/historyIterator_mock.go temporal-1.22.5/src/common/archiver/historyIterator_mock.go --- temporal-1.21.5-1/src/common/archiver/historyIterator_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/historyIterator_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: historyIterator.go - -// Package archiver is a generated GoMock package. -package archiver - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - archiver "go.temporal.io/server/api/archiver/v1" -) - -// MockHistoryIterator is a mock of HistoryIterator interface. -type MockHistoryIterator struct { - ctrl *gomock.Controller - recorder *MockHistoryIteratorMockRecorder -} - -// MockHistoryIteratorMockRecorder is the mock recorder for MockHistoryIterator. -type MockHistoryIteratorMockRecorder struct { - mock *MockHistoryIterator -} - -// NewMockHistoryIterator creates a new mock instance. -func NewMockHistoryIterator(ctrl *gomock.Controller) *MockHistoryIterator { - mock := &MockHistoryIterator{ctrl: ctrl} - mock.recorder = &MockHistoryIteratorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockHistoryIterator) EXPECT() *MockHistoryIteratorMockRecorder { - return m.recorder -} - -// GetState mocks base method. -func (m *MockHistoryIterator) GetState() ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState") - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetState indicates an expected call of GetState. -func (mr *MockHistoryIteratorMockRecorder) GetState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockHistoryIterator)(nil).GetState)) -} - -// HasNext mocks base method. -func (m *MockHistoryIterator) HasNext() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasNext") - ret0, _ := ret[0].(bool) - return ret0 -} - -// HasNext indicates an expected call of HasNext. -func (mr *MockHistoryIteratorMockRecorder) HasNext() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasNext", reflect.TypeOf((*MockHistoryIterator)(nil).HasNext)) -} - -// Next mocks base method. -func (m *MockHistoryIterator) Next(arg0 context.Context) (*archiver.HistoryBlob, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next", arg0) - ret0, _ := ret[0].(*archiver.HistoryBlob) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Next indicates an expected call of Next. -func (mr *MockHistoryIteratorMockRecorder) Next(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockHistoryIterator)(nil).Next), arg0) -} - -// MockSizeEstimator is a mock of SizeEstimator interface. -type MockSizeEstimator struct { - ctrl *gomock.Controller - recorder *MockSizeEstimatorMockRecorder -} - -// MockSizeEstimatorMockRecorder is the mock recorder for MockSizeEstimator. -type MockSizeEstimatorMockRecorder struct { - mock *MockSizeEstimator -} - -// NewMockSizeEstimator creates a new mock instance. -func NewMockSizeEstimator(ctrl *gomock.Controller) *MockSizeEstimator { - mock := &MockSizeEstimator{ctrl: ctrl} - mock.recorder = &MockSizeEstimatorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSizeEstimator) EXPECT() *MockSizeEstimatorMockRecorder { - return m.recorder -} - -// EstimateSize mocks base method. -func (m *MockSizeEstimator) EstimateSize(v interface{}) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EstimateSize", v) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// EstimateSize indicates an expected call of EstimateSize. -func (mr *MockSizeEstimatorMockRecorder) EstimateSize(v interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EstimateSize", reflect.TypeOf((*MockSizeEstimator)(nil).EstimateSize), v) -} diff -Nru temporal-1.21.5-1/src/common/archiver/historyIterator_test.go temporal-1.22.5/src/common/archiver/historyIterator_test.go --- temporal-1.21.5-1/src/common/archiver/historyIterator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/historyIterator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,737 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package archiver - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives/timestamp" -) - -const ( - testNamespaceID = "test-namespace-id" - testNamespace = "test-namespace" - testWorkflowID = "test-workflow-id" - testRunID = "test-run-id" - testShardID = int32(1) - testNextEventID = 1800 - testCloseFailoverVersion = 100 - testDefaultPersistencePageSize = 250 - testDefaultTargetHistoryBlobSize = 2 * 1024 * 124 - testDefaultHistoryEventSize = 50 -) - -var ( - testBranchToken = []byte{1, 2, 3} -) - -type ( - HistoryIteratorSuite struct { - *require.Assertions - suite.Suite - - controller *gomock.Controller - mockExecutionMgr *persistence.MockExecutionManager - } - - page struct { - firstbatchIdx int - numBatches int - firstEventFailoverVersion int64 - lastEventFailoverVersion int64 - } - - testSizeEstimator struct{} -) - -func (e *testSizeEstimator) EstimateSize(v interface{}) (int, error) { - historyBatch, ok := v.(*historypb.History) - if !ok { - return -1, errors.New("test size estimator only estimate the size of history batches") - } - return testDefaultHistoryEventSize * len(historyBatch.Events), nil -} - -func newTestSizeEstimator() SizeEstimator { - return &testSizeEstimator{} -} - -func TestHistoryIteratorSuite(t *testing.T) { - suite.Run(t, new(HistoryIteratorSuite)) -} - -func (s *HistoryIteratorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.controller = gomock.NewController(s.T()) - s.mockExecutionMgr = persistence.NewMockExecutionManager(s.controller) -} - -func (s *HistoryIteratorSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *HistoryIteratorSuite) TestReadHistory_Failed_EventsV2() { - s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(nil, errors.New("got error reading history branch")) - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) - history, err := itr.readHistory(context.Background(), common.FirstEventID) - s.Error(err) - s.Nil(history) -} - -func (s *HistoryIteratorSuite) TestReadHistory_Success_EventsV2() { - resp := persistence.ReadHistoryBranchByBatchResponse{ - History: []*historypb.History{}, - NextPageToken: []byte{}, - } - s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(&resp, nil) - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) - history, err := itr.readHistory(context.Background(), common.FirstEventID) - s.NoError(err) - s.Len(history, 0) -} - -// In the following test: -// batchInfo represents # of events for each history batch. -// page represents the metadata of the set of history batches that should be requested by the iterator -// and returned by the history manager. Each page specifies the index of the first history batch it should -// return, # of batches to return and first/last event failover version for the set of batches returned. -// Note that is possible that a history batch is contained in multiple pages. - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Fail_FirstCallToReadHistoryGivesError() { - batchInfo := []int{1} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, 0, false, pages...) - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) - startingIteratorState := s.copyIteratorState(itr) - events, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.Error(err) - s.Nil(events) - s.False(nextIterState.FinishedIteration) - s.Zero(nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Fail_NonFirstCallToReadHistoryGivesError() { - batchInfo := []int{1, 1} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 1, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, 1, false, pages...) - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) - startingIteratorState := s.copyIteratorState(itr) - events, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.Error(err) - s.Nil(events) - s.False(nextIterState.FinishedIteration) - s.Zero(nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadToHistoryEnd() { - batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 5, - numBatches: 4, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - // ensure target history batches size is greater than total history length to ensure all of history is read - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 20*testDefaultHistoryEventSize, nil) - startingIteratorState := s.copyIteratorState(itr) - history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.NoError(err) - s.NotNil(history) - s.Len(history, 9) - s.True(nextIterState.FinishedIteration) - s.Zero(nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_TargetSizeSatisfiedWithoutReadingToEnd() { - batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 5, - numBatches: 4, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, -1, false, pages...) - // ensure target history batches is smaller than full length of history so that not all of history is read - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 11*testDefaultHistoryEventSize, nil) - startingIteratorState := s.copyIteratorState(itr) - history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.NoError(err) - s.NotNil(history) - s.Len(history, 7) - s.False(nextIterState.FinishedIteration) - s.Equal(int64(13), nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadExactlyToHistoryEnd() { - batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 5, - numBatches: 4, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - // ensure target history batches size is equal to the full length of history so that all of history is read - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 16*testDefaultHistoryEventSize, nil) - startingIteratorState := s.copyIteratorState(itr) - history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.NoError(err) - s.NotNil(history) - s.Len(history, 9) - s.True(nextIterState.FinishedIteration) - s.Zero(nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadPageMultipleTimes() { - batchInfo := []int{1, 3, 2} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 2, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - // ensure target history batches is very small so that one page needs multiple read - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 2*testDefaultHistoryEventSize, nil) - startingIteratorState := s.copyIteratorState(itr) - history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) - s.NoError(err) - s.NotNil(history) - s.Len(history, 2) - s.False(nextIterState.FinishedIteration) - s.Equal(int64(5), nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) - - history, nextIterState, err = itr.readHistoryBatches(context.Background(), nextIterState.NextEventID) - s.NoError(err) - s.NotNil(history) - s.Len(history, 1) - s.True(nextIterState.FinishedIteration) - s.Zero(nextIterState.NextEventID) - s.assertStateMatches(startingIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestNext_Fail_IteratorDepleted() { - batchInfo := []int{1, 3, 2, 1, 2, 3, 4} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 2, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 2, - }, - { - firstbatchIdx: 3, - numBatches: 4, - firstEventFailoverVersion: 2, - lastEventFailoverVersion: 5, - }, - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - // set target history batches such that a single call to next will read all of history - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 16*testDefaultHistoryEventSize, nil) - blob, err := itr.Next(context.Background()) - s.Nil(err) - - expectedIteratorState := historyIteratorState{ - // when iteration is finished page token is not advanced - FinishedIteration: true, - NextEventID: 0, - } - s.assertStateMatches(expectedIteratorState, itr) - s.NotNil(blob) - expectedHeader := &archiverspb.HistoryBlobHeader{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - IsLast: true, - FirstFailoverVersion: 1, - LastFailoverVersion: 5, - FirstEventId: common.FirstEventID, - LastEventId: 16, - EventCount: 16, - } - s.Equal(expectedHeader, blob.Header) - s.Len(blob.Body, 7) - s.NoError(err) - s.False(itr.HasNext()) - - blob, err = itr.Next(context.Background()) - s.Equal(err, errIteratorDepleted) - s.Nil(blob) - s.assertStateMatches(expectedIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestNext_Fail_ReturnErrOnSecondCallToNext() { - batchInfo := []int{1, 3, 2, 1, 3, 2} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 2, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 5, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, 3, false, pages...) - // set target blob size such that the first two pages are read for blob one without error, third page will return error - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 6*testDefaultHistoryEventSize, nil) - blob, err := itr.Next(context.Background()) - s.NoError(err) - expectedIteratorState := historyIteratorState{ - FinishedIteration: false, - NextEventID: 7, - } - s.assertStateMatches(expectedIteratorState, itr) - s.NotNil(blob) - expectedHeader := &archiverspb.HistoryBlobHeader{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - IsLast: false, - FirstFailoverVersion: 1, - LastFailoverVersion: 1, - FirstEventId: common.FirstEventID, - LastEventId: 6, - EventCount: 6, - } - s.Equal(expectedHeader, blob.Header) - s.NoError(err) - s.True(itr.HasNext()) - - blob, err = itr.Next(context.Background()) - s.Error(err) - s.Nil(blob) - s.assertStateMatches(expectedIteratorState, itr) -} - -func (s *HistoryIteratorSuite) TestNext_Success_TenCallsToNext() { - var batchInfo []int - for i := 0; i < 100; i++ { - batchInfo = append(batchInfo, []int{1, 2, 3, 4, 4, 3, 2, 1}...) - } - var pages []page - for i := 0; i < 100; i++ { - p := page{ - firstbatchIdx: i * 8, - numBatches: 8, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - } - pages = append(pages, p) - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - // set target blob size size such that every 10 persistence pages is one group of history batches - itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 20*10*testDefaultHistoryEventSize, nil) - expectedIteratorState := historyIteratorState{ - FinishedIteration: false, - NextEventID: common.FirstEventID, - } - for i := 0; i < 10; i++ { - s.assertStateMatches(expectedIteratorState, itr) - s.True(itr.HasNext()) - blob, err := itr.Next(context.Background()) - s.NoError(err) - s.NotNil(blob) - expectedHeader := &archiverspb.HistoryBlobHeader{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - IsLast: false, - FirstFailoverVersion: 1, - LastFailoverVersion: 1, - FirstEventId: common.FirstEventID + int64(i*200), - LastEventId: int64(200 + (i * 200)), - EventCount: 200, - } - if i == 9 { - expectedHeader.IsLast = true - } - s.Equal(expectedHeader, blob.Header) - - if i < 9 { - expectedIteratorState.FinishedIteration = false - expectedIteratorState.NextEventID = int64(200*(i+1) + 1) - } else { - expectedIteratorState.NextEventID = 0 - expectedIteratorState.FinishedIteration = true - } - } - s.assertStateMatches(expectedIteratorState, itr) - s.False(itr.HasNext()) -} - -func (s *HistoryIteratorSuite) TestNext_Success_SameHistoryDifferentPage() { - batchInfo := []int{2, 4, 4, 3, 2, 1, 1, 2} - pages := []page{ - { - firstbatchIdx: 0, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 2, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 2, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 4, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 5, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - eventsPerRead := 6 - targetBlobSize := eventsPerRead * testDefaultHistoryEventSize - s.initMockExecutionManager(batchInfo, -1, true, pages...) - itr1 := s.constructTestHistoryIterator(s.mockExecutionMgr, targetBlobSize, nil) - - pages = []page{ - { - firstbatchIdx: 0, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 1, - numBatches: 3, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 2, - numBatches: 1, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 3, - numBatches: 5, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - { - firstbatchIdx: 4, - numBatches: 4, - firstEventFailoverVersion: 1, - lastEventFailoverVersion: 1, - }, - } - s.initMockExecutionManager(batchInfo, -1, true, pages...) - itr2 := s.constructTestHistoryIterator(s.mockExecutionMgr, targetBlobSize, nil) - - totalPages := 3 - expectedFirstEventID := []int64{1, 7, 14} - for i := 0; i != totalPages; i++ { - s.True(itr1.HasNext()) - history1, err := itr1.Next(context.Background()) - s.NoError(err) - - s.True(itr2.HasNext()) - history2, err := itr2.Next(context.Background()) - s.NoError(err) - - s.Equal(history1.Header, history2.Header) - s.Equal(len(history1.Body), len(history2.Body)) - s.Equal(expectedFirstEventID[i], history1.Body[0].Events[0].GetEventId()) - s.Equal(expectedFirstEventID[i], history2.Body[0].Events[0].GetEventId()) - } - expectedIteratorState := historyIteratorState{ - NextEventID: 0, - FinishedIteration: true, - } - s.assertStateMatches(expectedIteratorState, itr1) - s.assertStateMatches(expectedIteratorState, itr2) - s.False(itr1.HasNext()) - s.False(itr2.HasNext()) -} - -func (s *HistoryIteratorSuite) TestNewIteratorWithState() { - itr := s.constructTestHistoryIterator(nil, testDefaultTargetHistoryBlobSize, nil) - testIteratorState := historyIteratorState{ - FinishedIteration: true, - NextEventID: 4, - } - itr.historyIteratorState = testIteratorState - stateToken, err := itr.GetState() - s.NoError(err) - - newItr := s.constructTestHistoryIterator(nil, testDefaultTargetHistoryBlobSize, stateToken) - s.assertStateMatches(testIteratorState, newItr) -} - -func (s *HistoryIteratorSuite) initMockExecutionManager(batchInfo []int, returnErrorOnPage int, addNotExistCall bool, pages ...page) { - firstEventIDs := []int64{common.FirstEventID} - for i, batchSize := range batchInfo { - firstEventIDs = append(firstEventIDs, firstEventIDs[i]+int64(batchSize)) - } - - testShardId := testShardID - for i, p := range pages { - req := &persistence.ReadHistoryBranchRequest{ - BranchToken: testBranchToken, - MinEventID: firstEventIDs[p.firstbatchIdx], - MaxEventID: common.EndEventID, - PageSize: testDefaultPersistencePageSize, - ShardID: testShardId, - } - if returnErrorOnPage == i { - s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(nil, errors.New("got error getting workflow execution history")) - return - } - - resp := &persistence.ReadHistoryBranchByBatchResponse{ - History: s.constructHistoryBatches(batchInfo, p, firstEventIDs[p.firstbatchIdx]), - } - s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(resp, nil).MaxTimes(2) - } - - if addNotExistCall { - req := &persistence.ReadHistoryBranchRequest{ - BranchToken: testBranchToken, - MinEventID: firstEventIDs[len(firstEventIDs)-1], - MaxEventID: common.EndEventID, - PageSize: testDefaultPersistencePageSize, - ShardID: testShardId, - } - s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(nil, serviceerror.NewNotFound("Reach the end")) - } -} - -func (s *HistoryIteratorSuite) copyIteratorState(itr *historyIterator) historyIteratorState { - return itr.historyIteratorState -} - -func (s *HistoryIteratorSuite) assertStateMatches(expected historyIteratorState, itr *historyIterator) { - s.Equal(expected.NextEventID, itr.NextEventID) - s.Equal(expected.FinishedIteration, itr.FinishedIteration) -} - -func (s *HistoryIteratorSuite) constructHistoryBatches(batchInfo []int, page page, firstEventID int64) []*historypb.History { - var batches []*historypb.History - eventsID := firstEventID - for batchIdx, numEvents := range batchInfo[page.firstbatchIdx : page.firstbatchIdx+page.numBatches] { - var events []*historypb.HistoryEvent - for i := 0; i < numEvents; i++ { - event := &historypb.HistoryEvent{ - EventId: eventsID, - Version: page.firstEventFailoverVersion, - } - eventsID++ - if batchIdx == page.numBatches-1 { - event.Version = page.lastEventFailoverVersion - } - events = append(events, event) - } - batches = append(batches, &historypb.History{ - Events: events, - }) - } - return batches -} - -func (s *HistoryIteratorSuite) constructTestHistoryIterator( - mockExecutionMgr *persistence.MockExecutionManager, - targetHistoryBlobSize int, - initialState []byte, -) *historyIterator { - request := &ArchiveHistoryRequest{ - ShardID: testShardID, - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - itr := newHistoryIterator(request, mockExecutionMgr, targetHistoryBlobSize) - if initialState != nil { - err := itr.reset(initialState) - s.NoError(err) - } - itr.sizeEstimator = newTestSizeEstimator() - return itr -} -func (s *HistoryIteratorSuite) TestJSONSizeEstimator() { - e := NewJSONSizeEstimator() - - historyEvent := &historypb.HistoryEvent{ - EventId: 1, - EventTime: timestamp.TimePtr(time.Date(1978, 8, 22, 12, 59, 59, 999999, time.UTC)), - TaskId: 1, - Version: 1, - } - historyEvent.EventType = enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED - historyEvent.Attributes = &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: "taskQueue", - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - StartToCloseTimeout: timestamp.DurationPtr(10 * time.Second), - Attempt: 1, - }} - - h := &historypb.History{ - Events: []*historypb.HistoryEvent{ - historyEvent, - }, - } - - size, err := e.EstimateSize(h) - s.NoError(err) - s.Equal(266, size) -} diff -Nru temporal-1.21.5-1/src/common/archiver/history_iterator.go temporal-1.22.5/src/common/archiver/history_iterator.go --- temporal-1.21.5-1/src/common/archiver/history_iterator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/history_iterator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,273 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination history_iterator_mock.go + +package archiver + +import ( + "bytes" + "context" + "encoding/json" + "errors" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/persistence" +) + +const ( + historyPageSize = 250 +) + +type ( + // HistoryIterator is used to get history batches + HistoryIterator interface { + Next(context.Context) (*archiverspb.HistoryBlob, error) + HasNext() bool + GetState() ([]byte, error) + } + + historyIteratorState struct { + NextEventID int64 + FinishedIteration bool + } + + historyIterator struct { + historyIteratorState + + request *ArchiveHistoryRequest + executionManager persistence.ExecutionManager + sizeEstimator SizeEstimator + historyPageSize int + targetHistoryBlobSize int + } +) + +var ( + errIteratorDepleted = errors.New("iterator is depleted") +) + +// NewHistoryIterator returns a new HistoryIterator +func NewHistoryIterator( + request *ArchiveHistoryRequest, + executionManager persistence.ExecutionManager, + targetHistoryBlobSize int, +) HistoryIterator { + return newHistoryIterator(request, executionManager, targetHistoryBlobSize) +} + +// NewHistoryIteratorFromState returns a new HistoryIterator with specified state +func NewHistoryIteratorFromState( + request *ArchiveHistoryRequest, + executionManager persistence.ExecutionManager, + targetHistoryBlobSize int, + initialState []byte, +) (HistoryIterator, error) { + it := newHistoryIterator(request, executionManager, targetHistoryBlobSize) + if initialState == nil { + return it, nil + } + if err := it.reset(initialState); err != nil { + return nil, err + } + return it, nil +} + +func newHistoryIterator( + request *ArchiveHistoryRequest, + executionManager persistence.ExecutionManager, + targetHistoryBlobSize int, +) *historyIterator { + return &historyIterator{ + historyIteratorState: historyIteratorState{ + NextEventID: common.FirstEventID, + FinishedIteration: false, + }, + request: request, + executionManager: executionManager, + historyPageSize: historyPageSize, + targetHistoryBlobSize: targetHistoryBlobSize, + sizeEstimator: NewJSONSizeEstimator(), + } +} + +func (i *historyIterator) Next( + ctx context.Context, +) (*archiverspb.HistoryBlob, error) { + if !i.HasNext() { + return nil, errIteratorDepleted + } + + historyBatches, newIterState, err := i.readHistoryBatches(ctx, i.NextEventID) + if err != nil { + return nil, err + } + + i.historyIteratorState = newIterState + firstEvent := historyBatches[0].Events[0] + lastBatch := historyBatches[len(historyBatches)-1] + lastEvent := lastBatch.Events[len(lastBatch.Events)-1] + eventCount := int64(0) + for _, batch := range historyBatches { + eventCount += int64(len(batch.Events)) + } + header := &archiverspb.HistoryBlobHeader{ + Namespace: i.request.Namespace, + NamespaceId: i.request.NamespaceID, + WorkflowId: i.request.WorkflowID, + RunId: i.request.RunID, + IsLast: i.FinishedIteration, + FirstFailoverVersion: firstEvent.Version, + LastFailoverVersion: lastEvent.Version, + FirstEventId: firstEvent.EventId, + LastEventId: lastEvent.EventId, + EventCount: eventCount, + } + + return &archiverspb.HistoryBlob{ + Header: header, + Body: historyBatches, + }, nil +} + +// HasNext returns true if there are more items to iterate over. +func (i *historyIterator) HasNext() bool { + return !i.FinishedIteration +} + +// GetState returns the encoded iterator state +func (i *historyIterator) GetState() ([]byte, error) { + return json.Marshal(i.historyIteratorState) +} + +func (i *historyIterator) readHistoryBatches( + ctx context.Context, + firstEventID int64, +) ([]*historypb.History, historyIteratorState, error) { + size := 0 + targetSize := i.targetHistoryBlobSize + var historyBatches []*historypb.History + newIterState := historyIteratorState{} + for size < targetSize { + currHistoryBatches, err := i.readHistory(ctx, firstEventID) + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound && firstEventID != common.FirstEventID { + newIterState.FinishedIteration = true + return historyBatches, newIterState, nil + } + if err != nil { + return nil, newIterState, err + } + for idx, batch := range currHistoryBatches { + historyBatchSize, err := i.sizeEstimator.EstimateSize(batch) + if err != nil { + return nil, newIterState, err + } + size += historyBatchSize + historyBatches = append(historyBatches, batch) + firstEventID = batch.Events[len(batch.Events)-1].EventId + 1 + + // In case targetSize is satisfied before reaching the end of current set of batches, return immediately. + // Otherwise, we need to look ahead to see if there's more history batches. + if size >= targetSize && idx != len(currHistoryBatches)-1 { + newIterState.FinishedIteration = false + newIterState.NextEventID = firstEventID + return historyBatches, newIterState, nil + } + } + } + + // If you are here, it means the target size is met after adding the last batch of read history. + // We need to check if there's more history batches. + _, err := i.readHistory(ctx, firstEventID) + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound && firstEventID != common.FirstEventID { + newIterState.FinishedIteration = true + return historyBatches, newIterState, nil + } + if err != nil { + return nil, newIterState, err + } + newIterState.FinishedIteration = false + newIterState.NextEventID = firstEventID + return historyBatches, newIterState, nil +} + +func (i *historyIterator) readHistory(ctx context.Context, firstEventID int64) ([]*historypb.History, error) { + req := &persistence.ReadHistoryBranchRequest{ + BranchToken: i.request.BranchToken, + MinEventID: firstEventID, + MaxEventID: common.EndEventID, + PageSize: i.historyPageSize, + ShardID: i.request.ShardID, + } + historyBatches, _, _, err := persistence.ReadFullPageEventsByBatch(ctx, i.executionManager, req) + return historyBatches, err +} + +// reset resets iterator to a certain state given its encoded representation +// if it returns an error, the operation will have no effect on the iterator +func (i *historyIterator) reset(stateToken []byte) error { + var iteratorState historyIteratorState + if err := json.Unmarshal(stateToken, &iteratorState); err != nil { + return err + } + i.historyIteratorState = iteratorState + return nil +} + +type ( + // SizeEstimator is used to estimate the size of any object + SizeEstimator interface { + EstimateSize(v interface{}) (int, error) + } + + jsonSizeEstimator struct { + marshaler jsonpb.Marshaler + } +) + +func (e *jsonSizeEstimator) EstimateSize(v interface{}) (int, error) { + // jsonpb must be used for proto structs. + if protoMessage, ok := v.(proto.Message); ok { + var buf bytes.Buffer + err := e.marshaler.Marshal(&buf, protoMessage) + return buf.Len(), err + } + + data, err := json.Marshal(v) + if err != nil { + return 0, err + } + return len(data), nil +} + +// NewJSONSizeEstimator returns a new SizeEstimator which uses json encoding to estimate size +func NewJSONSizeEstimator() SizeEstimator { + return &jsonSizeEstimator{} +} diff -Nru temporal-1.21.5-1/src/common/archiver/history_iterator_mock.go temporal-1.22.5/src/common/archiver/history_iterator_mock.go --- temporal-1.21.5-1/src/common/archiver/history_iterator_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/history_iterator_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,142 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: history_iterator.go + +// Package archiver is a generated GoMock package. +package archiver + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + archiver "go.temporal.io/server/api/archiver/v1" +) + +// MockHistoryIterator is a mock of HistoryIterator interface. +type MockHistoryIterator struct { + ctrl *gomock.Controller + recorder *MockHistoryIteratorMockRecorder +} + +// MockHistoryIteratorMockRecorder is the mock recorder for MockHistoryIterator. +type MockHistoryIteratorMockRecorder struct { + mock *MockHistoryIterator +} + +// NewMockHistoryIterator creates a new mock instance. +func NewMockHistoryIterator(ctrl *gomock.Controller) *MockHistoryIterator { + mock := &MockHistoryIterator{ctrl: ctrl} + mock.recorder = &MockHistoryIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHistoryIterator) EXPECT() *MockHistoryIteratorMockRecorder { + return m.recorder +} + +// GetState mocks base method. +func (m *MockHistoryIterator) GetState() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetState") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetState indicates an expected call of GetState. +func (mr *MockHistoryIteratorMockRecorder) GetState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockHistoryIterator)(nil).GetState)) +} + +// HasNext mocks base method. +func (m *MockHistoryIterator) HasNext() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasNext") + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasNext indicates an expected call of HasNext. +func (mr *MockHistoryIteratorMockRecorder) HasNext() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasNext", reflect.TypeOf((*MockHistoryIterator)(nil).HasNext)) +} + +// Next mocks base method. +func (m *MockHistoryIterator) Next(arg0 context.Context) (*archiver.HistoryBlob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next", arg0) + ret0, _ := ret[0].(*archiver.HistoryBlob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Next indicates an expected call of Next. +func (mr *MockHistoryIteratorMockRecorder) Next(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockHistoryIterator)(nil).Next), arg0) +} + +// MockSizeEstimator is a mock of SizeEstimator interface. +type MockSizeEstimator struct { + ctrl *gomock.Controller + recorder *MockSizeEstimatorMockRecorder +} + +// MockSizeEstimatorMockRecorder is the mock recorder for MockSizeEstimator. +type MockSizeEstimatorMockRecorder struct { + mock *MockSizeEstimator +} + +// NewMockSizeEstimator creates a new mock instance. +func NewMockSizeEstimator(ctrl *gomock.Controller) *MockSizeEstimator { + mock := &MockSizeEstimator{ctrl: ctrl} + mock.recorder = &MockSizeEstimatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSizeEstimator) EXPECT() *MockSizeEstimatorMockRecorder { + return m.recorder +} + +// EstimateSize mocks base method. +func (m *MockSizeEstimator) EstimateSize(v interface{}) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EstimateSize", v) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EstimateSize indicates an expected call of EstimateSize. +func (mr *MockSizeEstimatorMockRecorder) EstimateSize(v interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EstimateSize", reflect.TypeOf((*MockSizeEstimator)(nil).EstimateSize), v) +} diff -Nru temporal-1.21.5-1/src/common/archiver/history_iterator_test.go temporal-1.22.5/src/common/archiver/history_iterator_test.go --- temporal-1.21.5-1/src/common/archiver/history_iterator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/history_iterator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,737 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package archiver + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + testNamespaceID = "test-namespace-id" + testNamespace = "test-namespace" + testWorkflowID = "test-workflow-id" + testRunID = "test-run-id" + testShardID = int32(1) + testNextEventID = 1800 + testCloseFailoverVersion = 100 + testDefaultPersistencePageSize = 250 + testDefaultTargetHistoryBlobSize = 2 * 1024 * 124 + testDefaultHistoryEventSize = 50 +) + +var ( + testBranchToken = []byte{1, 2, 3} +) + +type ( + HistoryIteratorSuite struct { + *require.Assertions + suite.Suite + + controller *gomock.Controller + mockExecutionMgr *persistence.MockExecutionManager + } + + page struct { + firstbatchIdx int + numBatches int + firstEventFailoverVersion int64 + lastEventFailoverVersion int64 + } + + testSizeEstimator struct{} +) + +func (e *testSizeEstimator) EstimateSize(v interface{}) (int, error) { + historyBatch, ok := v.(*historypb.History) + if !ok { + return -1, errors.New("test size estimator only estimate the size of history batches") + } + return testDefaultHistoryEventSize * len(historyBatch.Events), nil +} + +func newTestSizeEstimator() SizeEstimator { + return &testSizeEstimator{} +} + +func TestHistoryIteratorSuite(t *testing.T) { + suite.Run(t, new(HistoryIteratorSuite)) +} + +func (s *HistoryIteratorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + s.mockExecutionMgr = persistence.NewMockExecutionManager(s.controller) +} + +func (s *HistoryIteratorSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *HistoryIteratorSuite) TestReadHistory_Failed_EventsV2() { + s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(nil, errors.New("got error reading history branch")) + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) + history, err := itr.readHistory(context.Background(), common.FirstEventID) + s.Error(err) + s.Nil(history) +} + +func (s *HistoryIteratorSuite) TestReadHistory_Success_EventsV2() { + resp := persistence.ReadHistoryBranchByBatchResponse{ + History: []*historypb.History{}, + NextPageToken: []byte{}, + } + s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(&resp, nil) + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) + history, err := itr.readHistory(context.Background(), common.FirstEventID) + s.NoError(err) + s.Len(history, 0) +} + +// In the following test: +// batchInfo represents # of events for each history batch. +// page represents the metadata of the set of history batches that should be requested by the iterator +// and returned by the history manager. Each page specifies the index of the first history batch it should +// return, # of batches to return and first/last event failover version for the set of batches returned. +// Note that is possible that a history batch is contained in multiple pages. + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Fail_FirstCallToReadHistoryGivesError() { + batchInfo := []int{1} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, 0, false, pages...) + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) + startingIteratorState := s.copyIteratorState(itr) + events, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.Error(err) + s.Nil(events) + s.False(nextIterState.FinishedIteration) + s.Zero(nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Fail_NonFirstCallToReadHistoryGivesError() { + batchInfo := []int{1, 1} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 1, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, 1, false, pages...) + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, testDefaultTargetHistoryBlobSize, nil) + startingIteratorState := s.copyIteratorState(itr) + events, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.Error(err) + s.Nil(events) + s.False(nextIterState.FinishedIteration) + s.Zero(nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadToHistoryEnd() { + batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 5, + numBatches: 4, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + // ensure target history batches size is greater than total history length to ensure all of history is read + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 20*testDefaultHistoryEventSize, nil) + startingIteratorState := s.copyIteratorState(itr) + history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.NoError(err) + s.NotNil(history) + s.Len(history, 9) + s.True(nextIterState.FinishedIteration) + s.Zero(nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_TargetSizeSatisfiedWithoutReadingToEnd() { + batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 5, + numBatches: 4, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, -1, false, pages...) + // ensure target history batches is smaller than full length of history so that not all of history is read + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 11*testDefaultHistoryEventSize, nil) + startingIteratorState := s.copyIteratorState(itr) + history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.NoError(err) + s.NotNil(history) + s.Len(history, 7) + s.False(nextIterState.FinishedIteration) + s.Equal(int64(13), nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadExactlyToHistoryEnd() { + batchInfo := []int{1, 2, 1, 1, 1, 3, 3, 1, 3} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 5, + numBatches: 4, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + // ensure target history batches size is equal to the full length of history so that all of history is read + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 16*testDefaultHistoryEventSize, nil) + startingIteratorState := s.copyIteratorState(itr) + history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.NoError(err) + s.NotNil(history) + s.Len(history, 9) + s.True(nextIterState.FinishedIteration) + s.Zero(nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestReadHistoryBatches_Success_ReadPageMultipleTimes() { + batchInfo := []int{1, 3, 2} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 2, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + // ensure target history batches is very small so that one page needs multiple read + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 2*testDefaultHistoryEventSize, nil) + startingIteratorState := s.copyIteratorState(itr) + history, nextIterState, err := itr.readHistoryBatches(context.Background(), common.FirstEventID) + s.NoError(err) + s.NotNil(history) + s.Len(history, 2) + s.False(nextIterState.FinishedIteration) + s.Equal(int64(5), nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) + + history, nextIterState, err = itr.readHistoryBatches(context.Background(), nextIterState.NextEventID) + s.NoError(err) + s.NotNil(history) + s.Len(history, 1) + s.True(nextIterState.FinishedIteration) + s.Zero(nextIterState.NextEventID) + s.assertStateMatches(startingIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestNext_Fail_IteratorDepleted() { + batchInfo := []int{1, 3, 2, 1, 2, 3, 4} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 2, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 2, + }, + { + firstbatchIdx: 3, + numBatches: 4, + firstEventFailoverVersion: 2, + lastEventFailoverVersion: 5, + }, + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + // set target history batches such that a single call to next will read all of history + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 16*testDefaultHistoryEventSize, nil) + blob, err := itr.Next(context.Background()) + s.Nil(err) + + expectedIteratorState := historyIteratorState{ + // when iteration is finished page token is not advanced + FinishedIteration: true, + NextEventID: 0, + } + s.assertStateMatches(expectedIteratorState, itr) + s.NotNil(blob) + expectedHeader := &archiverspb.HistoryBlobHeader{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + IsLast: true, + FirstFailoverVersion: 1, + LastFailoverVersion: 5, + FirstEventId: common.FirstEventID, + LastEventId: 16, + EventCount: 16, + } + s.Equal(expectedHeader, blob.Header) + s.Len(blob.Body, 7) + s.NoError(err) + s.False(itr.HasNext()) + + blob, err = itr.Next(context.Background()) + s.Equal(err, errIteratorDepleted) + s.Nil(blob) + s.assertStateMatches(expectedIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestNext_Fail_ReturnErrOnSecondCallToNext() { + batchInfo := []int{1, 3, 2, 1, 3, 2} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 2, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 5, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, 3, false, pages...) + // set target blob size such that the first two pages are read for blob one without error, third page will return error + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 6*testDefaultHistoryEventSize, nil) + blob, err := itr.Next(context.Background()) + s.NoError(err) + expectedIteratorState := historyIteratorState{ + FinishedIteration: false, + NextEventID: 7, + } + s.assertStateMatches(expectedIteratorState, itr) + s.NotNil(blob) + expectedHeader := &archiverspb.HistoryBlobHeader{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + IsLast: false, + FirstFailoverVersion: 1, + LastFailoverVersion: 1, + FirstEventId: common.FirstEventID, + LastEventId: 6, + EventCount: 6, + } + s.Equal(expectedHeader, blob.Header) + s.NoError(err) + s.True(itr.HasNext()) + + blob, err = itr.Next(context.Background()) + s.Error(err) + s.Nil(blob) + s.assertStateMatches(expectedIteratorState, itr) +} + +func (s *HistoryIteratorSuite) TestNext_Success_TenCallsToNext() { + var batchInfo []int + for i := 0; i < 100; i++ { + batchInfo = append(batchInfo, []int{1, 2, 3, 4, 4, 3, 2, 1}...) + } + var pages []page + for i := 0; i < 100; i++ { + p := page{ + firstbatchIdx: i * 8, + numBatches: 8, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + } + pages = append(pages, p) + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + // set target blob size size such that every 10 persistence pages is one group of history batches + itr := s.constructTestHistoryIterator(s.mockExecutionMgr, 20*10*testDefaultHistoryEventSize, nil) + expectedIteratorState := historyIteratorState{ + FinishedIteration: false, + NextEventID: common.FirstEventID, + } + for i := 0; i < 10; i++ { + s.assertStateMatches(expectedIteratorState, itr) + s.True(itr.HasNext()) + blob, err := itr.Next(context.Background()) + s.NoError(err) + s.NotNil(blob) + expectedHeader := &archiverspb.HistoryBlobHeader{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + IsLast: false, + FirstFailoverVersion: 1, + LastFailoverVersion: 1, + FirstEventId: common.FirstEventID + int64(i*200), + LastEventId: int64(200 + (i * 200)), + EventCount: 200, + } + if i == 9 { + expectedHeader.IsLast = true + } + s.Equal(expectedHeader, blob.Header) + + if i < 9 { + expectedIteratorState.FinishedIteration = false + expectedIteratorState.NextEventID = int64(200*(i+1) + 1) + } else { + expectedIteratorState.NextEventID = 0 + expectedIteratorState.FinishedIteration = true + } + } + s.assertStateMatches(expectedIteratorState, itr) + s.False(itr.HasNext()) +} + +func (s *HistoryIteratorSuite) TestNext_Success_SameHistoryDifferentPage() { + batchInfo := []int{2, 4, 4, 3, 2, 1, 1, 2} + pages := []page{ + { + firstbatchIdx: 0, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 2, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 2, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 4, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 5, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + eventsPerRead := 6 + targetBlobSize := eventsPerRead * testDefaultHistoryEventSize + s.initMockExecutionManager(batchInfo, -1, true, pages...) + itr1 := s.constructTestHistoryIterator(s.mockExecutionMgr, targetBlobSize, nil) + + pages = []page{ + { + firstbatchIdx: 0, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 1, + numBatches: 3, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 2, + numBatches: 1, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 3, + numBatches: 5, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + { + firstbatchIdx: 4, + numBatches: 4, + firstEventFailoverVersion: 1, + lastEventFailoverVersion: 1, + }, + } + s.initMockExecutionManager(batchInfo, -1, true, pages...) + itr2 := s.constructTestHistoryIterator(s.mockExecutionMgr, targetBlobSize, nil) + + totalPages := 3 + expectedFirstEventID := []int64{1, 7, 14} + for i := 0; i != totalPages; i++ { + s.True(itr1.HasNext()) + history1, err := itr1.Next(context.Background()) + s.NoError(err) + + s.True(itr2.HasNext()) + history2, err := itr2.Next(context.Background()) + s.NoError(err) + + s.Equal(history1.Header, history2.Header) + s.Equal(len(history1.Body), len(history2.Body)) + s.Equal(expectedFirstEventID[i], history1.Body[0].Events[0].GetEventId()) + s.Equal(expectedFirstEventID[i], history2.Body[0].Events[0].GetEventId()) + } + expectedIteratorState := historyIteratorState{ + NextEventID: 0, + FinishedIteration: true, + } + s.assertStateMatches(expectedIteratorState, itr1) + s.assertStateMatches(expectedIteratorState, itr2) + s.False(itr1.HasNext()) + s.False(itr2.HasNext()) +} + +func (s *HistoryIteratorSuite) TestNewIteratorWithState() { + itr := s.constructTestHistoryIterator(nil, testDefaultTargetHistoryBlobSize, nil) + testIteratorState := historyIteratorState{ + FinishedIteration: true, + NextEventID: 4, + } + itr.historyIteratorState = testIteratorState + stateToken, err := itr.GetState() + s.NoError(err) + + newItr := s.constructTestHistoryIterator(nil, testDefaultTargetHistoryBlobSize, stateToken) + s.assertStateMatches(testIteratorState, newItr) +} + +func (s *HistoryIteratorSuite) initMockExecutionManager(batchInfo []int, returnErrorOnPage int, addNotExistCall bool, pages ...page) { + firstEventIDs := []int64{common.FirstEventID} + for i, batchSize := range batchInfo { + firstEventIDs = append(firstEventIDs, firstEventIDs[i]+int64(batchSize)) + } + + testShardId := testShardID + for i, p := range pages { + req := &persistence.ReadHistoryBranchRequest{ + BranchToken: testBranchToken, + MinEventID: firstEventIDs[p.firstbatchIdx], + MaxEventID: common.EndEventID, + PageSize: testDefaultPersistencePageSize, + ShardID: testShardId, + } + if returnErrorOnPage == i { + s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(nil, errors.New("got error getting workflow execution history")) + return + } + + resp := &persistence.ReadHistoryBranchByBatchResponse{ + History: s.constructHistoryBatches(batchInfo, p, firstEventIDs[p.firstbatchIdx]), + } + s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(resp, nil).MaxTimes(2) + } + + if addNotExistCall { + req := &persistence.ReadHistoryBranchRequest{ + BranchToken: testBranchToken, + MinEventID: firstEventIDs[len(firstEventIDs)-1], + MaxEventID: common.EndEventID, + PageSize: testDefaultPersistencePageSize, + ShardID: testShardId, + } + s.mockExecutionMgr.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), req).Return(nil, serviceerror.NewNotFound("Reach the end")) + } +} + +func (s *HistoryIteratorSuite) copyIteratorState(itr *historyIterator) historyIteratorState { + return itr.historyIteratorState +} + +func (s *HistoryIteratorSuite) assertStateMatches(expected historyIteratorState, itr *historyIterator) { + s.Equal(expected.NextEventID, itr.NextEventID) + s.Equal(expected.FinishedIteration, itr.FinishedIteration) +} + +func (s *HistoryIteratorSuite) constructHistoryBatches(batchInfo []int, page page, firstEventID int64) []*historypb.History { + var batches []*historypb.History + eventsID := firstEventID + for batchIdx, numEvents := range batchInfo[page.firstbatchIdx : page.firstbatchIdx+page.numBatches] { + var events []*historypb.HistoryEvent + for i := 0; i < numEvents; i++ { + event := &historypb.HistoryEvent{ + EventId: eventsID, + Version: page.firstEventFailoverVersion, + } + eventsID++ + if batchIdx == page.numBatches-1 { + event.Version = page.lastEventFailoverVersion + } + events = append(events, event) + } + batches = append(batches, &historypb.History{ + Events: events, + }) + } + return batches +} + +func (s *HistoryIteratorSuite) constructTestHistoryIterator( + mockExecutionMgr *persistence.MockExecutionManager, + targetHistoryBlobSize int, + initialState []byte, +) *historyIterator { + request := &ArchiveHistoryRequest{ + ShardID: testShardID, + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + itr := newHistoryIterator(request, mockExecutionMgr, targetHistoryBlobSize) + if initialState != nil { + err := itr.reset(initialState) + s.NoError(err) + } + itr.sizeEstimator = newTestSizeEstimator() + return itr +} +func (s *HistoryIteratorSuite) TestJSONSizeEstimator() { + e := NewJSONSizeEstimator() + + historyEvent := &historypb.HistoryEvent{ + EventId: 1, + EventTime: timestamp.TimePtr(time.Date(1978, 8, 22, 12, 59, 59, 999999, time.UTC)), + TaskId: 1, + Version: 1, + } + historyEvent.EventType = enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED + historyEvent.Attributes = &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: "taskQueue", + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + StartToCloseTimeout: timestamp.DurationPtr(10 * time.Second), + Attempt: 1, + }} + + h := &historypb.History{ + Events: []*historypb.HistoryEvent{ + historyEvent, + }, + } + + size, err := e.EstimateSize(h) + s.NoError(err) + s.Equal(266, size) +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/historyArchiver.go temporal-1.22.5/src/common/archiver/s3store/historyArchiver.go --- temporal-1.21.5-1/src/common/archiver/s3store/historyArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/historyArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,420 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// S3 History Archiver will archive workflow histories to amazon s3 - -package s3store - -import ( - "context" - "encoding/binary" - "errors" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" -) - -const ( - // URIScheme is the scheme for the s3 implementation - URIScheme = "s3" - errEncodeHistory = "failed to encode history batches" - errWriteKey = "failed to write history to s3" - defaultBlobstoreTimeout = time.Minute - targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB -) - -var ( - errNoBucketSpecified = errors.New("no bucket specified") - errBucketNotExists = errors.New("requested bucket does not exist") - errEmptyAwsRegion = errors.New("empty aws region") -) - -type ( - historyArchiver struct { - container *archiver.HistoryBootstrapContainer - s3cli s3iface.S3API - // only set in test code - historyIterator archiver.HistoryIterator - } - - getHistoryToken struct { - CloseFailoverVersion int64 - BatchIdx int - } - - uploadProgress struct { - BatchIdx int - IteratorState []byte - uploadedSize int64 - historySize int64 - } -) - -// NewHistoryArchiver creates a new archiver.HistoryArchiver based on s3 -func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.S3Archiver, -) (archiver.HistoryArchiver, error) { - return newHistoryArchiver(container, config, nil) -} - -func newHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.S3Archiver, - historyIterator archiver.HistoryIterator, -) (*historyArchiver, error) { - if len(config.Region) == 0 { - return nil, errEmptyAwsRegion - } - s3Config := &aws.Config{ - Endpoint: config.Endpoint, - Region: aws.String(config.Region), - S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), - } - sess, err := session.NewSession(s3Config) - if err != nil { - return nil, err - } - - return &historyArchiver{ - container: container, - s3cli: s3.New(sess), - historyIterator: historyIterator, - }, nil -} -func (h *historyArchiver) Archive( - ctx context.Context, - URI archiver.URI, - request *archiver.ArchiveHistoryRequest, - opts ...archiver.ArchiveOption, -) (err error) { - handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) - featureCatalog := archiver.GetFeatureCatalog(opts...) - startTime := time.Now().UTC() - defer func() { - handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) - if err != nil { - if common.IsPersistenceTransientError(err) || isRetryableError(err) { - handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) - } else { - handler.Counter(metrics.HistoryArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) - if featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - } - } - }() - - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) - - if err := SoftValidateURI(URI); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) - return err - } - - if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) - return err - } - - var progress uploadProgress - historyIterator := h.historyIterator - if historyIterator == nil { // will only be set by testing code - historyIterator = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) - } - for historyIterator.HasNext() { - historyBlob, err := historyIterator.Next(ctx) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // workflow history no longer exists, may due to duplicated archival signal - // this may happen even in the middle of iterating history as two archival signals - // can be processed concurrently. - logger.Info(archiver.ArchiveSkippedInfoMsg) - handler.Counter(metrics.HistoryArchiverDuplicateArchivalsCount.GetMetricName()).Record(1) - return nil - } - - logger := log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) - if common.IsPersistenceTransientError(err) { - logger.Error(archiver.ArchiveTransientErrorMsg) - } else { - logger.Error(archiver.ArchiveNonRetryableErrorMsg) - } - return err - } - - if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) - return archiver.ErrHistoryMutated - } - - encoder := codec.NewJSONPBEncoder() - encodedHistoryBlob, err := encoder.Encode(historyBlob) - if err != nil { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) - return err - } - key := constructHistoryKey(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion, progress.BatchIdx) - - exists, err := KeyExists(ctx, h.s3cli, URI, key) - if err != nil { - if isRetryableError(err) { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) - } else { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) - } - return err - } - blobSize := int64(binary.Size(encodedHistoryBlob)) - if exists { - handler.Counter(metrics.HistoryArchiverBlobExistsCount.GetMetricName()).Record(1) - } else { - if err := Upload(ctx, h.s3cli, URI, key, encodedHistoryBlob); err != nil { - if isRetryableError(err) { - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) - } else { - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) - } - return err - } - progress.uploadedSize += blobSize - handler.Histogram(metrics.HistoryArchiverBlobSize.GetMetricName(), metrics.HistoryArchiverBlobSize.GetMetricUnit()).Record(blobSize) - } - - progress.historySize += blobSize - progress.BatchIdx = progress.BatchIdx + 1 - saveHistoryIteratorState(ctx, featureCatalog, historyIterator, &progress) - } - - handler.Histogram(metrics.HistoryArchiverTotalUploadSize.GetMetricName(), metrics.HistoryArchiverTotalUploadSize.GetMetricUnit()).Record(progress.uploadedSize) - handler.Histogram(metrics.HistoryArchiverHistorySize.GetMetricName(), metrics.HistoryArchiverHistorySize.GetMetricUnit()).Record(progress.historySize) - handler.Counter(metrics.HistoryArchiverArchiveSuccessCount.GetMetricName()).Record(1) - return nil -} - -func loadHistoryIterator(ctx context.Context, request *archiver.ArchiveHistoryRequest, executionManager persistence.ExecutionManager, featureCatalog *archiver.ArchiveFeatureCatalog, progress *uploadProgress) (historyIterator archiver.HistoryIterator) { - if featureCatalog.ProgressManager != nil { - if featureCatalog.ProgressManager.HasProgress(ctx) { - err := featureCatalog.ProgressManager.LoadProgress(ctx, progress) - if err == nil { - historyIterator, err := archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, progress.IteratorState) - if err == nil { - return historyIterator - } - } - progress.IteratorState = nil - progress.BatchIdx = 0 - progress.historySize = 0 - progress.uploadedSize = 0 - } - } - return archiver.NewHistoryIterator(request, executionManager, targetHistoryBlobSize) -} - -func saveHistoryIteratorState(ctx context.Context, featureCatalog *archiver.ArchiveFeatureCatalog, historyIterator archiver.HistoryIterator, progress *uploadProgress) { - // Saving history state is a best effort operation. Ignore errors and continue - if featureCatalog.ProgressManager != nil { - state, err := historyIterator.GetState() - if err != nil { - return - } - progress.IteratorState = state - err = featureCatalog.ProgressManager.RecordProgress(ctx, progress) - if err != nil { - return - } - } -} - -func (h *historyArchiver) Get( - ctx context.Context, - URI archiver.URI, - request *archiver.GetHistoryRequest, -) (*archiver.GetHistoryResponse, error) { - if err := SoftValidateURI(URI); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) - } - - if err := archiver.ValidateGetRequest(request); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) - } - - var err error - var token *getHistoryToken - if request.NextPageToken != nil { - token, err = deserializeGetHistoryToken(request.NextPageToken) - if err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) - } - } else if request.CloseFailoverVersion != nil { - token = &getHistoryToken{ - CloseFailoverVersion: *request.CloseFailoverVersion, - } - } else { - highestVersion, err := h.getHighestVersion(ctx, URI, request) - if err != nil { - if err == archiver.ErrHistoryNotExist { - return nil, serviceerror.NewNotFound(err.Error()) - } - return nil, serviceerror.NewInvalidArgument(err.Error()) - } - token = &getHistoryToken{ - CloseFailoverVersion: *highestVersion, - } - } - encoder := codec.NewJSONPBEncoder() - response := &archiver.GetHistoryResponse{} - numOfEvents := 0 - isTruncated := false - for { - if numOfEvents >= request.PageSize { - isTruncated = true - break - } - key := constructHistoryKey(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion, token.BatchIdx) - - encodedRecord, err := Download(ctx, h.s3cli, URI, key) - if err != nil { - if isRetryableError(err) { - return nil, serviceerror.NewUnavailable(err.Error()) - } - switch err.(type) { - case *serviceerror.InvalidArgument, *serviceerror.Unavailable, *serviceerror.NotFound: - return nil, err - default: - return nil, serviceerror.NewInternal(err.Error()) - } - } - - historyBlob := archiverspb.HistoryBlob{} - err = encoder.Decode(encodedRecord, &historyBlob) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - - for _, batch := range historyBlob.Body { - response.HistoryBatches = append(response.HistoryBatches, batch) - numOfEvents += len(batch.Events) - } - - if historyBlob.Header.IsLast { - break - } - token.BatchIdx++ - } - - if isTruncated { - nextToken, err := SerializeToken(token) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.NextPageToken = nextToken - } - - return response, nil -} - -func (h *historyArchiver) ValidateURI(URI archiver.URI) error { - err := SoftValidateURI(URI) - if err != nil { - return err - } - return BucketExists(context.TODO(), h.s3cli, URI) -} - -func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*int64, error) { - ctx, cancel := ensureContextTimeout(ctx) - defer cancel() - var prefix = constructHistoryKeyPrefix(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID) + "/" - results, err := h.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(URI.Hostname()), - Prefix: aws.String(prefix), - Delimiter: aws.String("/"), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchBucket { - return nil, serviceerror.NewInvalidArgument(errBucketNotExists.Error()) - } - return nil, err - } - var highestVersion *int64 - - for _, v := range results.CommonPrefixes { - var version int64 - version, err = strconv.ParseInt(strings.Replace(strings.Replace(*v.Prefix, prefix, "", 1), "/", "", 1), 10, 64) - if err != nil { - continue - } - if highestVersion == nil || version > *highestVersion { - highestVersion = &version - } - } - if highestVersion == nil { - return nil, archiver.ErrHistoryNotExist - } - return highestVersion, nil -} - -func isRetryableError(err error) bool { - if err == nil { - return false - } - if aerr, ok := err.(awserr.Error); ok { - return isStatusCodeRetryable(aerr) || request.IsErrorRetryable(aerr) || request.IsErrorThrottle(aerr) - } - return false -} - -func isStatusCodeRetryable(err error) bool { - if aerr, ok := err.(awserr.Error); ok { - if rerr, ok := err.(awserr.RequestFailure); ok { - if rerr.StatusCode() == 429 { - return true - } - if rerr.StatusCode() >= 500 && rerr.StatusCode() != 501 { - return true - } - } - return isStatusCodeRetryable(aerr.OrigErr()) - } - return false -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/historyArchiver_test.go temporal-1.22.5/src/common/archiver/s3store/historyArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/historyArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/historyArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,794 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package s3store - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "sort" - "strconv" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/s3store/mocks" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/primitives/timestamp" -) - -const ( - testNamespaceID = "test-namespace-id" - testNamespace = "test-namespace" - testWorkflowID = "test-workflow-id" - testRunID = "test-run-id" - testNextEventID = 1800 - testCloseFailoverVersion = int64(100) - testPageSize = 100 - testBucket = "test-bucket" - testBucketURI = "s3://test-bucket" -) - -var testBranchToken = []byte{1, 2, 3} - -type historyArchiverSuite struct { - *require.Assertions - suite.Suite - s3cli *mocks.MockS3API - container *archiver.HistoryBootstrapContainer - testArchivalURI archiver.URI - historyBatchesV1 []*archiverspb.HistoryBlob - historyBatchesV100 []*archiverspb.HistoryBlob - controller *gomock.Controller -} - -func TestHistoryArchiverSuite(t *testing.T) { - suite.Run(t, new(historyArchiverSuite)) -} - -func (s *historyArchiverSuite) SetupSuite() { - var err error - s.testArchivalURI, err = archiver.NewURI(testBucketURI) - s.Require().NoError(err) -} - -func (s *historyArchiverSuite) TearDownSuite() { -} - -func (s *historyArchiverSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } - - s.controller = gomock.NewController(s.T()) - s.s3cli = mocks.NewMockS3API(s.controller) - setupFsEmulation(s.s3cli) - s.setupHistoryDirectory() -} - -func setupFsEmulation(s3cli *mocks.MockS3API) { - fs := make(map[string][]byte) - - putObjectFn := func(_ aws.Context, input *s3.PutObjectInput, _ ...request.Option) (*s3.PutObjectOutput, error) { - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(input.Body); err != nil { - return nil, err - } - fs[*input.Bucket+*input.Key] = buf.Bytes() - return &s3.PutObjectOutput{}, nil - } - - s3cli.EXPECT().ListObjectsV2WithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (*s3.ListObjectsV2Output, error) { - objects := make([]*s3.Object, 0) - commonPrefixMap := map[string]bool{} - for k := range fs { - if strings.HasPrefix(k, *input.Bucket+*input.Prefix) { - key := k[len(*input.Bucket):] - keyWithoutPrefix := key[len(*input.Prefix):] - index := strings.Index(keyWithoutPrefix, "/") - if index == -1 || input.Delimiter == nil { - objects = append(objects, &s3.Object{ - Key: aws.String(key), - }) - } else { - commonPrefixMap[key[:len(*input.Prefix)+index]] = true - } - } - } - commonPrefixes := make([]*s3.CommonPrefix, 0) - for k := range commonPrefixMap { - commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ - Prefix: aws.String(k), - }) - } - - sort.SliceStable(objects, func(i, j int) bool { - return *objects[i].Key < *objects[j].Key - }) - maxKeys := 1000 - if input.MaxKeys != nil { - maxKeys = int(*input.MaxKeys) - } - start := 0 - if input.ContinuationToken != nil { - start, _ = strconv.Atoi(*input.ContinuationToken) - } - - if input.StartAfter != nil { - for k, v := range objects { - if *input.StartAfter == *v.Key { - start = k + 1 - } - } - } - - isTruncated := false - var nextContinuationToken *string - if len(objects) > start+maxKeys { - isTruncated = true - nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) - objects = objects[start : start+maxKeys] - } else { - objects = objects[start:] - } - - if input.StartAfter != nil { - for k, v := range commonPrefixes { - if *input.StartAfter == *v.Prefix { - start = k + 1 - } - } - } - - if len(commonPrefixes) > start+maxKeys { - isTruncated = true - nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) - commonPrefixes = commonPrefixes[start : start+maxKeys] - } else if len(commonPrefixes) > 0 { - commonPrefixes = commonPrefixes[start:] - } - - return &s3.ListObjectsV2Output{ - CommonPrefixes: commonPrefixes, - Contents: objects, - IsTruncated: &isTruncated, - NextContinuationToken: nextContinuationToken, - }, nil - }).AnyTimes() - s3cli.EXPECT().PutObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn(putObjectFn).AnyTimes() - - s3cli.EXPECT().HeadObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadObjectInput, options ...request.Option) (*s3.HeadObjectOutput, error) { - _, ok := fs[*input.Bucket+*input.Key] - if !ok { - return nil, awserr.New("NotFound", "", nil) - } - - return &s3.HeadObjectOutput{}, nil - }).AnyTimes() - - s3cli.EXPECT().GetObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.GetObjectInput, options ...request.Option) (*s3.GetObjectOutput, error) { - _, ok := fs[*input.Bucket+*input.Key] - if !ok { - return nil, awserr.New(s3.ErrCodeNoSuchKey, "", nil) - } - - return &s3.GetObjectOutput{ - Body: io.NopCloser(bytes.NewReader(fs[*input.Bucket+*input.Key])), - }, nil - }).AnyTimes() -} - -func (s *historyArchiverSuite) TestValidateURI() { - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "s3://", - expectedErr: errNoBucketSpecified, - }, - { - URI: "s3://bucket/a/b/c", - expectedErr: errBucketNotExists, - }, - { - URI: testBucketURI, - expectedErr: nil, - }, - } - - s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { - if *input.Bucket != s.testArchivalURI.Hostname() { - return nil, awserr.New("NotFound", "", nil) - } - - return &s3.HeadBucketOutput{}, nil - }).AnyTimes() - - historyArchiver := s.newTestHistoryArchiver(nil) - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - s.NoError(err) - s.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) - } -} - -func (s *historyArchiverSuite) TestArchive_Fail_InvalidURI() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: "", // an invalid request - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(getCanceledContext(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion + 1, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) - s.Error(err) -} - -func (s *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - nonRetryableErr := errors.New("some non-retryable error") - err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request, archiver.GetNonRetryableErrorOption(nonRetryableErr)) - s.Equal(nonRetryableErr, err) -} - -func (s *historyArchiverSuite) TestArchive_Skip() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: false, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - }, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI(testBucketURI + "/TestArchive_Skip") - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - s.NoError(err) - - expectedkey := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion, 0) - s.assertKeyExists(expectedkey) -} - -func (s *historyArchiverSuite) TestArchive_Success() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - historyBatches := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - { - EventId: common.FirstEventID + 2, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: timestamp.TimePtr(time.Now().UTC()), - Version: testCloseFailoverVersion, - }, - }, - }, - } - historyBlob := &archiverspb.HistoryBlob{ - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: historyBatches, - } - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), - historyIterator.EXPECT().HasNext().Return(false), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - request := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI(testBucketURI + "/TestArchive_Success") - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, request) - s.NoError(err) - - expectedkey := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion, 0) - s.assertKeyExists(expectedkey) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidURI() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 100, - } - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidRequest() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 0, // pageSize should be greater than 0 - } - response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *historyArchiverSuite) TestGet_Fail_InvalidToken() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, - } - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *historyArchiverSuite) TestGet_Fail_KeyNotExist() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := testCloseFailoverVersion - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - CloseFailoverVersion: &testCloseFailoverVersion, - } - URI, err := archiver.NewURI("s3://test-bucket/non-existent") - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *historyArchiverSuite) TestGet_Success_PickHighestVersion() { - historyArchiver := s.newTestHistoryArchiver(nil) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.Nil(response.NextPageToken) - s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) -} - -func (s *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := int64(1) - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - CloseFailoverVersion: &testCloseFailoverVersion, - } - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.Nil(response.NextPageToken) - s.Equal(s.historyBatchesV1[0].Body, response.HistoryBatches) -} - -func (s *historyArchiverSuite) TestGet_Success_SmallPageSize() { - historyArchiver := s.newTestHistoryArchiver(nil) - testCloseFailoverVersion := testCloseFailoverVersion - request := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: 1, - CloseFailoverVersion: &testCloseFailoverVersion, - } - var combinedHistory []*historypb.History - - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.NotNil(response) - s.NotNil(response.NextPageToken) - s.NotNil(response.HistoryBatches) - s.Len(response.HistoryBatches, 1) - combinedHistory = append(combinedHistory, response.HistoryBatches...) - - request.NextPageToken = response.NextPageToken - response, err = historyArchiver.Get(context.Background(), URI, request) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.NotNil(response.HistoryBatches) - s.Len(response.HistoryBatches, 1) - combinedHistory = append(combinedHistory, response.HistoryBatches...) - - s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), combinedHistory) -} - -func (s *historyArchiverSuite) TestGet_EmptyHistory_ReturnsNotFoundError() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - historyArchiver := s.newTestHistoryArchiver(historyIterator) - URI, err := archiver.NewURI(testBucketURI + "/TestArchiveAndGet") - s.NoError(err) - getRequest := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - response, err := historyArchiver.Get(context.Background(), URI, getRequest) - s.Error(err) - s.Nil(response) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *historyArchiverSuite) TestArchiveAndGet() { - historyIterator := archiver.NewMockHistoryIterator(s.controller) - gomock.InOrder( - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(s.historyBatchesV100[0], nil), - historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(s.historyBatchesV100[1], nil), - historyIterator.EXPECT().HasNext().Return(false), - ) - - historyArchiver := s.newTestHistoryArchiver(historyIterator) - archiveRequest := &archiver.ArchiveHistoryRequest{ - NamespaceID: testNamespaceID, - Namespace: testNamespace, - WorkflowID: testWorkflowID, - RunID: testRunID, - BranchToken: testBranchToken, - NextEventID: testNextEventID, - CloseFailoverVersion: testCloseFailoverVersion, - } - URI, err := archiver.NewURI(testBucketURI + "/TestArchiveAndGet") - s.NoError(err) - err = historyArchiver.Archive(context.Background(), URI, archiveRequest) - s.NoError(err) - - getRequest := &archiver.GetHistoryRequest{ - NamespaceID: testNamespaceID, - WorkflowID: testWorkflowID, - RunID: testRunID, - PageSize: testPageSize, - } - response, err := historyArchiver.Get(context.Background(), URI, getRequest) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) -} - -func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { - // config := &config.S3Archiver{} - // archiver, err := newHistoryArchiver(s.container, config, historyIterator) - archiver := &historyArchiver{ - container: s.container, - s3cli: s.s3cli, - historyIterator: historyIterator, - } - return archiver -} - -func (s *historyArchiverSuite) setupHistoryDirectory() { - now := time.Date(2020, 8, 22, 1, 2, 3, 4, time.UTC) - - s.historyBatchesV1 = []*archiverspb.HistoryBlob{ - { - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: &now, - Version: 1, - }, - }, - }, - }, - }, - } - - s.historyBatchesV100 = []*archiverspb.HistoryBlob{ - { - Header: &archiverspb.HistoryBlobHeader{ - IsLast: false, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: common.FirstEventID + 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - { - EventId: common.FirstEventID + 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - }, - }, - }, - }, - { - Header: &archiverspb.HistoryBlobHeader{ - IsLast: true, - }, - Body: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: testNextEventID - 1, - EventTime: &now, - Version: testCloseFailoverVersion, - }, - }, - }, - }, - }, - } - - s.writeHistoryBatchesForGetTest(s.historyBatchesV1, int64(1)) - s.writeHistoryBatchesForGetTest(s.historyBatchesV100, testCloseFailoverVersion) -} - -func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*archiverspb.HistoryBlob, version int64) { - for i, batch := range historyBatches { - encoder := codec.NewJSONPBEncoder() - data, err := encoder.Encode(batch) - s.Require().NoError(err) - key := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, version, i) - _, err = s.s3cli.PutObjectWithContext(context.Background(), &s3.PutObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(key), - Body: bytes.NewReader(data), - }) - s.Require().NoError(err) - } -} - -func (s *historyArchiverSuite) assertKeyExists(key string) { - _, err := s.s3cli.GetObjectWithContext(context.Background(), &s3.GetObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(key), - }) - s.NoError(err) -} - -func getCanceledContext() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - return ctx -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/history_archiver.go temporal-1.22.5/src/common/archiver/s3store/history_archiver.go --- temporal-1.21.5-1/src/common/archiver/s3store/history_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/history_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,420 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// S3 History Archiver will archive workflow histories to amazon s3 + +package s3store + +import ( + "context" + "encoding/binary" + "errors" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" +) + +const ( + // URIScheme is the scheme for the s3 implementation + URIScheme = "s3" + errEncodeHistory = "failed to encode history batches" + errWriteKey = "failed to write history to s3" + defaultBlobstoreTimeout = time.Minute + targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB +) + +var ( + errNoBucketSpecified = errors.New("no bucket specified") + errBucketNotExists = errors.New("requested bucket does not exist") + errEmptyAwsRegion = errors.New("empty aws region") +) + +type ( + historyArchiver struct { + container *archiver.HistoryBootstrapContainer + s3cli s3iface.S3API + // only set in test code + historyIterator archiver.HistoryIterator + } + + getHistoryToken struct { + CloseFailoverVersion int64 + BatchIdx int + } + + uploadProgress struct { + BatchIdx int + IteratorState []byte + uploadedSize int64 + historySize int64 + } +) + +// NewHistoryArchiver creates a new archiver.HistoryArchiver based on s3 +func NewHistoryArchiver( + container *archiver.HistoryBootstrapContainer, + config *config.S3Archiver, +) (archiver.HistoryArchiver, error) { + return newHistoryArchiver(container, config, nil) +} + +func newHistoryArchiver( + container *archiver.HistoryBootstrapContainer, + config *config.S3Archiver, + historyIterator archiver.HistoryIterator, +) (*historyArchiver, error) { + if len(config.Region) == 0 { + return nil, errEmptyAwsRegion + } + s3Config := &aws.Config{ + Endpoint: config.Endpoint, + Region: aws.String(config.Region), + S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), + } + sess, err := session.NewSession(s3Config) + if err != nil { + return nil, err + } + + return &historyArchiver{ + container: container, + s3cli: s3.New(sess), + historyIterator: historyIterator, + }, nil +} +func (h *historyArchiver) Archive( + ctx context.Context, + URI archiver.URI, + request *archiver.ArchiveHistoryRequest, + opts ...archiver.ArchiveOption, +) (err error) { + handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + featureCatalog := archiver.GetFeatureCatalog(opts...) + startTime := time.Now().UTC() + defer func() { + handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) + if err != nil { + if common.IsPersistenceTransientError(err) || isRetryableError(err) { + handler.Counter(metrics.HistoryArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) + } else { + handler.Counter(metrics.HistoryArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) + if featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + } + } + }() + + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + + if err := SoftValidateURI(URI); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) + return err + } + + if err := archiver.ValidateHistoryArchiveRequest(request); err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err)) + return err + } + + var progress uploadProgress + historyIterator := h.historyIterator + if historyIterator == nil { // will only be set by testing code + historyIterator = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) + } + for historyIterator.HasNext() { + historyBlob, err := historyIterator.Next(ctx) + if err != nil { + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // workflow history no longer exists, may due to duplicated archival signal + // this may happen even in the middle of iterating history as two archival signals + // can be processed concurrently. + logger.Info(archiver.ArchiveSkippedInfoMsg) + handler.Counter(metrics.HistoryArchiverDuplicateArchivalsCount.GetMetricName()).Record(1) + return nil + } + + logger := log.With(logger, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err)) + if common.IsPersistenceTransientError(err) { + logger.Error(archiver.ArchiveTransientErrorMsg) + } else { + logger.Error(archiver.ArchiveNonRetryableErrorMsg) + } + return err + } + + if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated)) + return archiver.ErrHistoryMutated + } + + encoder := codec.NewJSONPBEncoder() + encodedHistoryBlob, err := encoder.Encode(historyBlob) + if err != nil { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err)) + return err + } + key := constructHistoryKey(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion, progress.BatchIdx) + + exists, err := KeyExists(ctx, h.s3cli, URI, key) + if err != nil { + if isRetryableError(err) { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) + } else { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) + } + return err + } + blobSize := int64(binary.Size(encodedHistoryBlob)) + if exists { + handler.Counter(metrics.HistoryArchiverBlobExistsCount.GetMetricName()).Record(1) + } else { + if err := Upload(ctx, h.s3cli, URI, key, encodedHistoryBlob); err != nil { + if isRetryableError(err) { + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) + } else { + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteKey), tag.Error(err)) + } + return err + } + progress.uploadedSize += blobSize + handler.Histogram(metrics.HistoryArchiverBlobSize.GetMetricName(), metrics.HistoryArchiverBlobSize.GetMetricUnit()).Record(blobSize) + } + + progress.historySize += blobSize + progress.BatchIdx = progress.BatchIdx + 1 + saveHistoryIteratorState(ctx, featureCatalog, historyIterator, &progress) + } + + handler.Histogram(metrics.HistoryArchiverTotalUploadSize.GetMetricName(), metrics.HistoryArchiverTotalUploadSize.GetMetricUnit()).Record(progress.uploadedSize) + handler.Histogram(metrics.HistoryArchiverHistorySize.GetMetricName(), metrics.HistoryArchiverHistorySize.GetMetricUnit()).Record(progress.historySize) + handler.Counter(metrics.HistoryArchiverArchiveSuccessCount.GetMetricName()).Record(1) + return nil +} + +func loadHistoryIterator(ctx context.Context, request *archiver.ArchiveHistoryRequest, executionManager persistence.ExecutionManager, featureCatalog *archiver.ArchiveFeatureCatalog, progress *uploadProgress) (historyIterator archiver.HistoryIterator) { + if featureCatalog.ProgressManager != nil { + if featureCatalog.ProgressManager.HasProgress(ctx) { + err := featureCatalog.ProgressManager.LoadProgress(ctx, progress) + if err == nil { + historyIterator, err := archiver.NewHistoryIteratorFromState(request, executionManager, targetHistoryBlobSize, progress.IteratorState) + if err == nil { + return historyIterator + } + } + progress.IteratorState = nil + progress.BatchIdx = 0 + progress.historySize = 0 + progress.uploadedSize = 0 + } + } + return archiver.NewHistoryIterator(request, executionManager, targetHistoryBlobSize) +} + +func saveHistoryIteratorState(ctx context.Context, featureCatalog *archiver.ArchiveFeatureCatalog, historyIterator archiver.HistoryIterator, progress *uploadProgress) { + // Saving history state is a best effort operation. Ignore errors and continue + if featureCatalog.ProgressManager != nil { + state, err := historyIterator.GetState() + if err != nil { + return + } + progress.IteratorState = state + err = featureCatalog.ProgressManager.RecordProgress(ctx, progress) + if err != nil { + return + } + } +} + +func (h *historyArchiver) Get( + ctx context.Context, + URI archiver.URI, + request *archiver.GetHistoryRequest, +) (*archiver.GetHistoryResponse, error) { + if err := SoftValidateURI(URI); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) + } + + if err := archiver.ValidateGetRequest(request); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error()) + } + + var err error + var token *getHistoryToken + if request.NextPageToken != nil { + token, err = deserializeGetHistoryToken(request.NextPageToken) + if err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error()) + } + } else if request.CloseFailoverVersion != nil { + token = &getHistoryToken{ + CloseFailoverVersion: *request.CloseFailoverVersion, + } + } else { + highestVersion, err := h.getHighestVersion(ctx, URI, request) + if err != nil { + if err == archiver.ErrHistoryNotExist { + return nil, serviceerror.NewNotFound(err.Error()) + } + return nil, serviceerror.NewInvalidArgument(err.Error()) + } + token = &getHistoryToken{ + CloseFailoverVersion: *highestVersion, + } + } + encoder := codec.NewJSONPBEncoder() + response := &archiver.GetHistoryResponse{} + numOfEvents := 0 + isTruncated := false + for { + if numOfEvents >= request.PageSize { + isTruncated = true + break + } + key := constructHistoryKey(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion, token.BatchIdx) + + encodedRecord, err := Download(ctx, h.s3cli, URI, key) + if err != nil { + if isRetryableError(err) { + return nil, serviceerror.NewUnavailable(err.Error()) + } + switch err.(type) { + case *serviceerror.InvalidArgument, *serviceerror.Unavailable, *serviceerror.NotFound: + return nil, err + default: + return nil, serviceerror.NewInternal(err.Error()) + } + } + + historyBlob := archiverspb.HistoryBlob{} + err = encoder.Decode(encodedRecord, &historyBlob) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + + for _, batch := range historyBlob.Body { + response.HistoryBatches = append(response.HistoryBatches, batch) + numOfEvents += len(batch.Events) + } + + if historyBlob.Header.IsLast { + break + } + token.BatchIdx++ + } + + if isTruncated { + nextToken, err := SerializeToken(token) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.NextPageToken = nextToken + } + + return response, nil +} + +func (h *historyArchiver) ValidateURI(URI archiver.URI) error { + err := SoftValidateURI(URI) + if err != nil { + return err + } + return BucketExists(context.TODO(), h.s3cli, URI) +} + +func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.URI, request *archiver.GetHistoryRequest) (*int64, error) { + ctx, cancel := ensureContextTimeout(ctx) + defer cancel() + var prefix = constructHistoryKeyPrefix(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID) + "/" + results, err := h.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(URI.Hostname()), + Prefix: aws.String(prefix), + Delimiter: aws.String("/"), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchBucket { + return nil, serviceerror.NewInvalidArgument(errBucketNotExists.Error()) + } + return nil, err + } + var highestVersion *int64 + + for _, v := range results.CommonPrefixes { + var version int64 + version, err = strconv.ParseInt(strings.Replace(strings.Replace(*v.Prefix, prefix, "", 1), "/", "", 1), 10, 64) + if err != nil { + continue + } + if highestVersion == nil || version > *highestVersion { + highestVersion = &version + } + } + if highestVersion == nil { + return nil, archiver.ErrHistoryNotExist + } + return highestVersion, nil +} + +func isRetryableError(err error) bool { + if err == nil { + return false + } + if aerr, ok := err.(awserr.Error); ok { + return isStatusCodeRetryable(aerr) || request.IsErrorRetryable(aerr) || request.IsErrorThrottle(aerr) + } + return false +} + +func isStatusCodeRetryable(err error) bool { + if aerr, ok := err.(awserr.Error); ok { + if rerr, ok := err.(awserr.RequestFailure); ok { + if rerr.StatusCode() == 429 { + return true + } + if rerr.StatusCode() >= 500 && rerr.StatusCode() != 501 { + return true + } + } + return isStatusCodeRetryable(aerr.OrigErr()) + } + return false +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/history_archiver_test.go temporal-1.22.5/src/common/archiver/s3store/history_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/history_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/history_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,794 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s3store + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/s3store/mocks" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + testNamespaceID = "test-namespace-id" + testNamespace = "test-namespace" + testWorkflowID = "test-workflow-id" + testRunID = "test-run-id" + testNextEventID = 1800 + testCloseFailoverVersion = int64(100) + testPageSize = 100 + testBucket = "test-bucket" + testBucketURI = "s3://test-bucket" +) + +var testBranchToken = []byte{1, 2, 3} + +type historyArchiverSuite struct { + *require.Assertions + suite.Suite + s3cli *mocks.MockS3API + container *archiver.HistoryBootstrapContainer + testArchivalURI archiver.URI + historyBatchesV1 []*archiverspb.HistoryBlob + historyBatchesV100 []*archiverspb.HistoryBlob + controller *gomock.Controller +} + +func TestHistoryArchiverSuite(t *testing.T) { + suite.Run(t, new(historyArchiverSuite)) +} + +func (s *historyArchiverSuite) SetupSuite() { + var err error + s.testArchivalURI, err = archiver.NewURI(testBucketURI) + s.Require().NoError(err) +} + +func (s *historyArchiverSuite) TearDownSuite() { +} + +func (s *historyArchiverSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.container = &archiver.HistoryBootstrapContainer{ + Logger: log.NewNoopLogger(), + MetricsHandler: metrics.NoopMetricsHandler, + } + + s.controller = gomock.NewController(s.T()) + s.s3cli = mocks.NewMockS3API(s.controller) + setupFsEmulation(s.s3cli) + s.setupHistoryDirectory() +} + +func setupFsEmulation(s3cli *mocks.MockS3API) { + fs := make(map[string][]byte) + + putObjectFn := func(_ aws.Context, input *s3.PutObjectInput, _ ...request.Option) (*s3.PutObjectOutput, error) { + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(input.Body); err != nil { + return nil, err + } + fs[*input.Bucket+*input.Key] = buf.Bytes() + return &s3.PutObjectOutput{}, nil + } + + s3cli.EXPECT().ListObjectsV2WithContext(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (*s3.ListObjectsV2Output, error) { + objects := make([]*s3.Object, 0) + commonPrefixMap := map[string]bool{} + for k := range fs { + if strings.HasPrefix(k, *input.Bucket+*input.Prefix) { + key := k[len(*input.Bucket):] + keyWithoutPrefix := key[len(*input.Prefix):] + index := strings.Index(keyWithoutPrefix, "/") + if index == -1 || input.Delimiter == nil { + objects = append(objects, &s3.Object{ + Key: aws.String(key), + }) + } else { + commonPrefixMap[key[:len(*input.Prefix)+index]] = true + } + } + } + commonPrefixes := make([]*s3.CommonPrefix, 0) + for k := range commonPrefixMap { + commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ + Prefix: aws.String(k), + }) + } + + sort.SliceStable(objects, func(i, j int) bool { + return *objects[i].Key < *objects[j].Key + }) + maxKeys := 1000 + if input.MaxKeys != nil { + maxKeys = int(*input.MaxKeys) + } + start := 0 + if input.ContinuationToken != nil { + start, _ = strconv.Atoi(*input.ContinuationToken) + } + + if input.StartAfter != nil { + for k, v := range objects { + if *input.StartAfter == *v.Key { + start = k + 1 + } + } + } + + isTruncated := false + var nextContinuationToken *string + if len(objects) > start+maxKeys { + isTruncated = true + nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) + objects = objects[start : start+maxKeys] + } else { + objects = objects[start:] + } + + if input.StartAfter != nil { + for k, v := range commonPrefixes { + if *input.StartAfter == *v.Prefix { + start = k + 1 + } + } + } + + if len(commonPrefixes) > start+maxKeys { + isTruncated = true + nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) + commonPrefixes = commonPrefixes[start : start+maxKeys] + } else if len(commonPrefixes) > 0 { + commonPrefixes = commonPrefixes[start:] + } + + return &s3.ListObjectsV2Output{ + CommonPrefixes: commonPrefixes, + Contents: objects, + IsTruncated: &isTruncated, + NextContinuationToken: nextContinuationToken, + }, nil + }).AnyTimes() + s3cli.EXPECT().PutObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn(putObjectFn).AnyTimes() + + s3cli.EXPECT().HeadObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx aws.Context, input *s3.HeadObjectInput, options ...request.Option) (*s3.HeadObjectOutput, error) { + _, ok := fs[*input.Bucket+*input.Key] + if !ok { + return nil, awserr.New("NotFound", "", nil) + } + + return &s3.HeadObjectOutput{}, nil + }).AnyTimes() + + s3cli.EXPECT().GetObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx aws.Context, input *s3.GetObjectInput, options ...request.Option) (*s3.GetObjectOutput, error) { + _, ok := fs[*input.Bucket+*input.Key] + if !ok { + return nil, awserr.New(s3.ErrCodeNoSuchKey, "", nil) + } + + return &s3.GetObjectOutput{ + Body: io.NopCloser(bytes.NewReader(fs[*input.Bucket+*input.Key])), + }, nil + }).AnyTimes() +} + +func (s *historyArchiverSuite) TestValidateURI() { + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "s3://", + expectedErr: errNoBucketSpecified, + }, + { + URI: "s3://bucket/a/b/c", + expectedErr: errBucketNotExists, + }, + { + URI: testBucketURI, + expectedErr: nil, + }, + } + + s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { + if *input.Bucket != s.testArchivalURI.Hostname() { + return nil, awserr.New("NotFound", "", nil) + } + + return &s3.HeadBucketOutput{}, nil + }).AnyTimes() + + historyArchiver := s.newTestHistoryArchiver(nil) + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + s.NoError(err) + s.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) + } +} + +func (s *historyArchiverSuite) TestArchive_Fail_InvalidURI() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: "", // an invalid request + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(getCanceledContext(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion + 1, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) + s.Error(err) +} + +func (s *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + nonRetryableErr := errors.New("some non-retryable error") + err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request, archiver.GetNonRetryableErrorOption(nonRetryableErr)) + s.Equal(nonRetryableErr, err) +} + +func (s *historyArchiverSuite) TestArchive_Skip() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: false, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + }, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI(testBucketURI + "/TestArchive_Skip") + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + s.NoError(err) + + expectedkey := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion, 0) + s.assertKeyExists(expectedkey) +} + +func (s *historyArchiverSuite) TestArchive_Success() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + historyBatches := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + { + EventId: common.FirstEventID + 2, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: timestamp.TimePtr(time.Now().UTC()), + Version: testCloseFailoverVersion, + }, + }, + }, + } + historyBlob := &archiverspb.HistoryBlob{ + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: historyBatches, + } + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), + historyIterator.EXPECT().HasNext().Return(false), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + request := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI(testBucketURI + "/TestArchive_Success") + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, request) + s.NoError(err) + + expectedkey := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion, 0) + s.assertKeyExists(expectedkey) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidURI() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 100, + } + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidRequest() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 0, // pageSize should be greater than 0 + } + response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *historyArchiverSuite) TestGet_Fail_InvalidToken() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, + } + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *historyArchiverSuite) TestGet_Fail_KeyNotExist() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := testCloseFailoverVersion + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + CloseFailoverVersion: &testCloseFailoverVersion, + } + URI, err := archiver.NewURI("s3://test-bucket/non-existent") + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *historyArchiverSuite) TestGet_Success_PickHighestVersion() { + historyArchiver := s.newTestHistoryArchiver(nil) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.Nil(response.NextPageToken) + s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) +} + +func (s *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := int64(1) + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + CloseFailoverVersion: &testCloseFailoverVersion, + } + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.Nil(response.NextPageToken) + s.Equal(s.historyBatchesV1[0].Body, response.HistoryBatches) +} + +func (s *historyArchiverSuite) TestGet_Success_SmallPageSize() { + historyArchiver := s.newTestHistoryArchiver(nil) + testCloseFailoverVersion := testCloseFailoverVersion + request := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: 1, + CloseFailoverVersion: &testCloseFailoverVersion, + } + var combinedHistory []*historypb.History + + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.NotNil(response) + s.NotNil(response.NextPageToken) + s.NotNil(response.HistoryBatches) + s.Len(response.HistoryBatches, 1) + combinedHistory = append(combinedHistory, response.HistoryBatches...) + + request.NextPageToken = response.NextPageToken + response, err = historyArchiver.Get(context.Background(), URI, request) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.NotNil(response.HistoryBatches) + s.Len(response.HistoryBatches, 1) + combinedHistory = append(combinedHistory, response.HistoryBatches...) + + s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), combinedHistory) +} + +func (s *historyArchiverSuite) TestGet_EmptyHistory_ReturnsNotFoundError() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + historyArchiver := s.newTestHistoryArchiver(historyIterator) + URI, err := archiver.NewURI(testBucketURI + "/TestArchiveAndGet") + s.NoError(err) + getRequest := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + response, err := historyArchiver.Get(context.Background(), URI, getRequest) + s.Error(err) + s.Nil(response) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *historyArchiverSuite) TestArchiveAndGet() { + historyIterator := archiver.NewMockHistoryIterator(s.controller) + gomock.InOrder( + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(s.historyBatchesV100[0], nil), + historyIterator.EXPECT().HasNext().Return(true), + historyIterator.EXPECT().Next(gomock.Any()).Return(s.historyBatchesV100[1], nil), + historyIterator.EXPECT().HasNext().Return(false), + ) + + historyArchiver := s.newTestHistoryArchiver(historyIterator) + archiveRequest := &archiver.ArchiveHistoryRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + WorkflowID: testWorkflowID, + RunID: testRunID, + BranchToken: testBranchToken, + NextEventID: testNextEventID, + CloseFailoverVersion: testCloseFailoverVersion, + } + URI, err := archiver.NewURI(testBucketURI + "/TestArchiveAndGet") + s.NoError(err) + err = historyArchiver.Archive(context.Background(), URI, archiveRequest) + s.NoError(err) + + getRequest := &archiver.GetHistoryRequest{ + NamespaceID: testNamespaceID, + WorkflowID: testWorkflowID, + RunID: testRunID, + PageSize: testPageSize, + } + response, err := historyArchiver.Get(context.Background(), URI, getRequest) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) +} + +func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { + // config := &config.S3Archiver{} + // archiver, err := newHistoryArchiver(s.container, config, historyIterator) + archiver := &historyArchiver{ + container: s.container, + s3cli: s.s3cli, + historyIterator: historyIterator, + } + return archiver +} + +func (s *historyArchiverSuite) setupHistoryDirectory() { + now := time.Date(2020, 8, 22, 1, 2, 3, 4, time.UTC) + + s.historyBatchesV1 = []*archiverspb.HistoryBlob{ + { + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: &now, + Version: 1, + }, + }, + }, + }, + }, + } + + s.historyBatchesV100 = []*archiverspb.HistoryBlob{ + { + Header: &archiverspb.HistoryBlobHeader{ + IsLast: false, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: common.FirstEventID + 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + { + EventId: common.FirstEventID + 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + }, + }, + }, + }, + { + Header: &archiverspb.HistoryBlobHeader{ + IsLast: true, + }, + Body: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: testNextEventID - 1, + EventTime: &now, + Version: testCloseFailoverVersion, + }, + }, + }, + }, + }, + } + + s.writeHistoryBatchesForGetTest(s.historyBatchesV1, int64(1)) + s.writeHistoryBatchesForGetTest(s.historyBatchesV100, testCloseFailoverVersion) +} + +func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*archiverspb.HistoryBlob, version int64) { + for i, batch := range historyBatches { + encoder := codec.NewJSONPBEncoder() + data, err := encoder.Encode(batch) + s.Require().NoError(err) + key := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, version, i) + _, err = s.s3cli.PutObjectWithContext(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(testBucket), + Key: aws.String(key), + Body: bytes.NewReader(data), + }) + s.Require().NoError(err) + } +} + +func (s *historyArchiverSuite) assertKeyExists(key string) { + _, err := s.s3cli.GetObjectWithContext(context.Background(), &s3.GetObjectInput{ + Bucket: aws.String(testBucket), + Key: aws.String(key), + }) + s.NoError(err) +} + +func getCanceledContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/mocks/S3API.go temporal-1.22.5/src/common/archiver/s3store/mocks/S3API.go --- temporal-1.21.5-1/src/common/archiver/s3store/mocks/S3API.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/mocks/S3API.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,5207 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - aws "github.com/aws/aws-sdk-go/aws" - request "github.com/aws/aws-sdk-go/aws/request" - s3 "github.com/aws/aws-sdk-go/service/s3" - gomock "github.com/golang/mock/gomock" -) - -// MockS3API is a mock of S3API interface. -type MockS3API struct { - ctrl *gomock.Controller - recorder *MockS3APIMockRecorder -} - -// MockS3APIMockRecorder is the mock recorder for MockS3API. -type MockS3APIMockRecorder struct { - mock *MockS3API -} - -// NewMockS3API creates a new mock instance. -func NewMockS3API(ctrl *gomock.Controller) *MockS3API { - mock := &MockS3API{ctrl: ctrl} - mock.recorder = &MockS3APIMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockS3API) EXPECT() *MockS3APIMockRecorder { - return m.recorder -} - -// AbortMultipartUpload mocks base method. -func (m *MockS3API) AbortMultipartUpload(arg0 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. -func (mr *MockS3APIMockRecorder) AbortMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUpload), arg0) -} - -// AbortMultipartUploadRequest mocks base method. -func (m *MockS3API) AbortMultipartUploadRequest(arg0 *s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AbortMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.AbortMultipartUploadOutput) - return ret0, ret1 -} - -// AbortMultipartUploadRequest indicates an expected call of AbortMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) AbortMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadRequest), arg0) -} - -// AbortMultipartUploadWithContext mocks base method. -func (m *MockS3API) AbortMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "AbortMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...) -} - -// CompleteMultipartUpload mocks base method. -func (m *MockS3API) CompleteMultipartUpload(arg0 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. -func (mr *MockS3APIMockRecorder) CompleteMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUpload), arg0) -} - -// CompleteMultipartUploadRequest mocks base method. -func (m *MockS3API) CompleteMultipartUploadRequest(arg0 *s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CompleteMultipartUploadOutput) - return ret0, ret1 -} - -// CompleteMultipartUploadRequest indicates an expected call of CompleteMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) CompleteMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadRequest), arg0) -} - -// CompleteMultipartUploadWithContext mocks base method. -func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CompleteMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...) -} - -// CopyObject mocks base method. -func (m *MockS3API) CopyObject(arg0 *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyObject", arg0) - ret0, _ := ret[0].(*s3.CopyObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CopyObject indicates an expected call of CopyObject. -func (mr *MockS3APIMockRecorder) CopyObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockS3API)(nil).CopyObject), arg0) -} - -// CopyObjectRequest mocks base method. -func (m *MockS3API) CopyObjectRequest(arg0 *s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CopyObjectOutput) - return ret0, ret1 -} - -// CopyObjectRequest indicates an expected call of CopyObjectRequest. -func (mr *MockS3APIMockRecorder) CopyObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectRequest", reflect.TypeOf((*MockS3API)(nil).CopyObjectRequest), arg0) -} - -// CopyObjectWithContext mocks base method. -func (m *MockS3API) CopyObjectWithContext(arg0 aws.Context, arg1 *s3.CopyObjectInput, arg2 ...request.Option) (*s3.CopyObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CopyObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.CopyObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CopyObjectWithContext indicates an expected call of CopyObjectWithContext. -func (mr *MockS3APIMockRecorder) CopyObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectWithContext", reflect.TypeOf((*MockS3API)(nil).CopyObjectWithContext), varargs...) -} - -// CreateBucket mocks base method. -func (m *MockS3API) CreateBucket(arg0 *s3.CreateBucketInput) (*s3.CreateBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucket", arg0) - ret0, _ := ret[0].(*s3.CreateBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateBucket indicates an expected call of CreateBucket. -func (mr *MockS3APIMockRecorder) CreateBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockS3API)(nil).CreateBucket), arg0) -} - -// CreateBucketRequest mocks base method. -func (m *MockS3API) CreateBucketRequest(arg0 *s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CreateBucketOutput) - return ret0, ret1 -} - -// CreateBucketRequest indicates an expected call of CreateBucketRequest. -func (mr *MockS3APIMockRecorder) CreateBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketRequest", reflect.TypeOf((*MockS3API)(nil).CreateBucketRequest), arg0) -} - -// CreateBucketWithContext mocks base method. -func (m *MockS3API) CreateBucketWithContext(arg0 aws.Context, arg1 *s3.CreateBucketInput, arg2 ...request.Option) (*s3.CreateBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CreateBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.CreateBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateBucketWithContext indicates an expected call of CreateBucketWithContext. -func (mr *MockS3APIMockRecorder) CreateBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketWithContext", reflect.TypeOf((*MockS3API)(nil).CreateBucketWithContext), varargs...) -} - -// CreateMultipartUpload mocks base method. -func (m *MockS3API) CreateMultipartUpload(arg0 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. -func (mr *MockS3APIMockRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUpload), arg0) -} - -// CreateMultipartUploadRequest mocks base method. -func (m *MockS3API) CreateMultipartUploadRequest(arg0 *s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CreateMultipartUploadOutput) - return ret0, ret1 -} - -// CreateMultipartUploadRequest indicates an expected call of CreateMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) CreateMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadRequest), arg0) -} - -// CreateMultipartUploadWithContext mocks base method. -func (m *MockS3API) CreateMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CreateMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...) -} - -// DeleteBucket mocks base method. -func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucket", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucket indicates an expected call of DeleteBucket. -func (mr *MockS3APIMockRecorder) DeleteBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockS3API)(nil).DeleteBucket), arg0) -} - -// DeleteBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfiguration(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfiguration indicates an expected call of DeleteBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfiguration), arg0) -} - -// DeleteBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfigurationRequest(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfigurationRequest indicates an expected call of DeleteBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationRequest), arg0) -} - -// DeleteBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfigurationWithContext indicates an expected call of DeleteBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationWithContext), varargs...) -} - -// DeleteBucketCors mocks base method. -func (m *MockS3API) DeleteBucketCors(arg0 *s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketCors", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketCors indicates an expected call of DeleteBucketCors. -func (mr *MockS3APIMockRecorder) DeleteBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCors", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCors), arg0) -} - -// DeleteBucketCorsRequest mocks base method. -func (m *MockS3API) DeleteBucketCorsRequest(arg0 *s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketCorsOutput) - return ret0, ret1 -} - -// DeleteBucketCorsRequest indicates an expected call of DeleteBucketCorsRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsRequest), arg0) -} - -// DeleteBucketCorsWithContext mocks base method. -func (m *MockS3API) DeleteBucketCorsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketCorsInput, arg2 ...request.Option) (*s3.DeleteBucketCorsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketCorsWithContext indicates an expected call of DeleteBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsWithContext), varargs...) -} - -// DeleteBucketEncryption mocks base method. -func (m *MockS3API) DeleteBucketEncryption(arg0 *s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketEncryption indicates an expected call of DeleteBucketEncryption. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryption", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryption), arg0) -} - -// DeleteBucketEncryptionRequest mocks base method. -func (m *MockS3API) DeleteBucketEncryptionRequest(arg0 *s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketEncryptionOutput) - return ret0, ret1 -} - -// DeleteBucketEncryptionRequest indicates an expected call of DeleteBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionRequest), arg0) -} - -// DeleteBucketEncryptionWithContext mocks base method. -func (m *MockS3API) DeleteBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketEncryptionInput, arg2 ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketEncryptionWithContext indicates an expected call of DeleteBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionWithContext), varargs...) -} - -// DeleteBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfiguration(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketIntelligentTieringConfiguration indicates an expected call of DeleteBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfiguration), arg0) -} - -// DeleteBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationRequest(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketIntelligentTieringConfigurationRequest indicates an expected call of DeleteBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationRequest), arg0) -} - -// DeleteBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketIntelligentTieringConfigurationWithContext indicates an expected call of DeleteBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// DeleteBucketInventoryConfiguration mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfiguration(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketInventoryConfiguration indicates an expected call of DeleteBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfiguration), arg0) -} - -// DeleteBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfigurationRequest(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketInventoryConfigurationRequest indicates an expected call of DeleteBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationRequest), arg0) -} - -// DeleteBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketInventoryConfigurationWithContext indicates an expected call of DeleteBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationWithContext), varargs...) -} - -// DeleteBucketLifecycle mocks base method. -func (m *MockS3API) DeleteBucketLifecycle(arg0 *s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketLifecycle indicates an expected call of DeleteBucketLifecycle. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycle), arg0) -} - -// DeleteBucketLifecycleRequest mocks base method. -func (m *MockS3API) DeleteBucketLifecycleRequest(arg0 *s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketLifecycleOutput) - return ret0, ret1 -} - -// DeleteBucketLifecycleRequest indicates an expected call of DeleteBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleRequest), arg0) -} - -// DeleteBucketLifecycleWithContext mocks base method. -func (m *MockS3API) DeleteBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketLifecycleInput, arg2 ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketLifecycleWithContext indicates an expected call of DeleteBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleWithContext), varargs...) -} - -// DeleteBucketMetricsConfiguration mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfiguration(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketMetricsConfiguration indicates an expected call of DeleteBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfiguration), arg0) -} - -// DeleteBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfigurationRequest(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketMetricsConfigurationRequest indicates an expected call of DeleteBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationRequest), arg0) -} - -// DeleteBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketMetricsConfigurationWithContext indicates an expected call of DeleteBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationWithContext), varargs...) -} - -// DeleteBucketOwnershipControls mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControls(arg0 *s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketOwnershipControls indicates an expected call of DeleteBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControls), arg0) -} - -// DeleteBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControlsRequest(arg0 *s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// DeleteBucketOwnershipControlsRequest indicates an expected call of DeleteBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsRequest), arg0) -} - -// DeleteBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketOwnershipControlsWithContext indicates an expected call of DeleteBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsWithContext), varargs...) -} - -// DeleteBucketPolicy mocks base method. -func (m *MockS3API) DeleteBucketPolicy(arg0 *s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketPolicy indicates an expected call of DeleteBucketPolicy. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicy", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicy), arg0) -} - -// DeleteBucketPolicyRequest mocks base method. -func (m *MockS3API) DeleteBucketPolicyRequest(arg0 *s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketPolicyOutput) - return ret0, ret1 -} - -// DeleteBucketPolicyRequest indicates an expected call of DeleteBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyRequest), arg0) -} - -// DeleteBucketPolicyWithContext mocks base method. -func (m *MockS3API) DeleteBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketPolicyInput, arg2 ...request.Option) (*s3.DeleteBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketPolicyWithContext indicates an expected call of DeleteBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyWithContext), varargs...) -} - -// DeleteBucketReplication mocks base method. -func (m *MockS3API) DeleteBucketReplication(arg0 *s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketReplication", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketReplication indicates an expected call of DeleteBucketReplication. -func (mr *MockS3APIMockRecorder) DeleteBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplication", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplication), arg0) -} - -// DeleteBucketReplicationRequest mocks base method. -func (m *MockS3API) DeleteBucketReplicationRequest(arg0 *s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketReplicationOutput) - return ret0, ret1 -} - -// DeleteBucketReplicationRequest indicates an expected call of DeleteBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationRequest), arg0) -} - -// DeleteBucketReplicationWithContext mocks base method. -func (m *MockS3API) DeleteBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketReplicationInput, arg2 ...request.Option) (*s3.DeleteBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketReplicationWithContext indicates an expected call of DeleteBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationWithContext), varargs...) -} - -// DeleteBucketRequest mocks base method. -func (m *MockS3API) DeleteBucketRequest(arg0 *s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketOutput) - return ret0, ret1 -} - -// DeleteBucketRequest indicates an expected call of DeleteBucketRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketRequest), arg0) -} - -// DeleteBucketTagging mocks base method. -func (m *MockS3API) DeleteBucketTagging(arg0 *s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketTagging", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketTagging indicates an expected call of DeleteBucketTagging. -func (mr *MockS3APIMockRecorder) DeleteBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTagging", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTagging), arg0) -} - -// DeleteBucketTaggingRequest mocks base method. -func (m *MockS3API) DeleteBucketTaggingRequest(arg0 *s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketTaggingOutput) - return ret0, ret1 -} - -// DeleteBucketTaggingRequest indicates an expected call of DeleteBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingRequest), arg0) -} - -// DeleteBucketTaggingWithContext mocks base method. -func (m *MockS3API) DeleteBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketTaggingInput, arg2 ...request.Option) (*s3.DeleteBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketTaggingWithContext indicates an expected call of DeleteBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingWithContext), varargs...) -} - -// DeleteBucketWebsite mocks base method. -func (m *MockS3API) DeleteBucketWebsite(arg0 *s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWebsite indicates an expected call of DeleteBucketWebsite. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsite", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsite), arg0) -} - -// DeleteBucketWebsiteRequest mocks base method. -func (m *MockS3API) DeleteBucketWebsiteRequest(arg0 *s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketWebsiteOutput) - return ret0, ret1 -} - -// DeleteBucketWebsiteRequest indicates an expected call of DeleteBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteRequest), arg0) -} - -// DeleteBucketWebsiteWithContext mocks base method. -func (m *MockS3API) DeleteBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketWebsiteInput, arg2 ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWebsiteWithContext indicates an expected call of DeleteBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteWithContext), varargs...) -} - -// DeleteBucketWithContext mocks base method. -func (m *MockS3API) DeleteBucketWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInput, arg2 ...request.Option) (*s3.DeleteBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWithContext indicates an expected call of DeleteBucketWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWithContext), varargs...) -} - -// DeleteObject mocks base method. -func (m *MockS3API) DeleteObject(arg0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObject", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObject indicates an expected call of DeleteObject. -func (mr *MockS3APIMockRecorder) DeleteObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockS3API)(nil).DeleteObject), arg0) -} - -// DeleteObjectRequest mocks base method. -func (m *MockS3API) DeleteObjectRequest(arg0 *s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectOutput) - return ret0, ret1 -} - -// DeleteObjectRequest indicates an expected call of DeleteObjectRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectRequest), arg0) -} - -// DeleteObjectTagging mocks base method. -func (m *MockS3API) DeleteObjectTagging(arg0 *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. -func (mr *MockS3APIMockRecorder) DeleteObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTagging), arg0) -} - -// DeleteObjectTaggingRequest mocks base method. -func (m *MockS3API) DeleteObjectTaggingRequest(arg0 *s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectTaggingOutput) - return ret0, ret1 -} - -// DeleteObjectTaggingRequest indicates an expected call of DeleteObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingRequest), arg0) -} - -// DeleteObjectTaggingWithContext mocks base method. -func (m *MockS3API) DeleteObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectTaggingInput, arg2 ...request.Option) (*s3.DeleteObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectTaggingWithContext indicates an expected call of DeleteObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingWithContext), varargs...) -} - -// DeleteObjectWithContext mocks base method. -func (m *MockS3API) DeleteObjectWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...) -} - -// DeleteObjects mocks base method. -func (m *MockS3API) DeleteObjects(arg0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjects", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjects indicates an expected call of DeleteObjects. -func (mr *MockS3APIMockRecorder) DeleteObjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockS3API)(nil).DeleteObjects), arg0) -} - -// DeleteObjectsRequest mocks base method. -func (m *MockS3API) DeleteObjectsRequest(arg0 *s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectsOutput) - return ret0, ret1 -} - -// DeleteObjectsRequest indicates an expected call of DeleteObjectsRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsRequest), arg0) -} - -// DeleteObjectsWithContext mocks base method. -func (m *MockS3API) DeleteObjectsWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...) -} - -// DeletePublicAccessBlock mocks base method. -func (m *MockS3API) DeletePublicAccessBlock(arg0 *s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeletePublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeletePublicAccessBlock indicates an expected call of DeletePublicAccessBlock. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlock), arg0) -} - -// DeletePublicAccessBlockRequest mocks base method. -func (m *MockS3API) DeletePublicAccessBlockRequest(arg0 *s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeletePublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeletePublicAccessBlockOutput) - return ret0, ret1 -} - -// DeletePublicAccessBlockRequest indicates an expected call of DeletePublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockRequest), arg0) -} - -// DeletePublicAccessBlockWithContext mocks base method. -func (m *MockS3API) DeletePublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.DeletePublicAccessBlockInput, arg2 ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeletePublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeletePublicAccessBlockWithContext indicates an expected call of DeletePublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockWithContext), varargs...) -} - -// GetBucketAccelerateConfiguration mocks base method. -func (m *MockS3API) GetBucketAccelerateConfiguration(arg0 *s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAccelerateConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAccelerateConfiguration indicates an expected call of GetBucketAccelerateConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfiguration), arg0) -} - -// GetBucketAccelerateConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketAccelerateConfigurationRequest(arg0 *s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAccelerateConfigurationOutput) - return ret0, ret1 -} - -// GetBucketAccelerateConfigurationRequest indicates an expected call of GetBucketAccelerateConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationRequest), arg0) -} - -// GetBucketAccelerateConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAccelerateConfigurationWithContext indicates an expected call of GetBucketAccelerateConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationWithContext), varargs...) -} - -// GetBucketAcl mocks base method. -func (m *MockS3API) GetBucketAcl(arg0 *s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAcl", arg0) - ret0, _ := ret[0].(*s3.GetBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAcl indicates an expected call of GetBucketAcl. -func (mr *MockS3APIMockRecorder) GetBucketAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAcl", reflect.TypeOf((*MockS3API)(nil).GetBucketAcl), arg0) -} - -// GetBucketAclRequest mocks base method. -func (m *MockS3API) GetBucketAclRequest(arg0 *s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAclOutput) - return ret0, ret1 -} - -// GetBucketAclRequest indicates an expected call of GetBucketAclRequest. -func (mr *MockS3APIMockRecorder) GetBucketAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAclRequest), arg0) -} - -// GetBucketAclWithContext mocks base method. -func (m *MockS3API) GetBucketAclWithContext(arg0 aws.Context, arg1 *s3.GetBucketAclInput, arg2 ...request.Option) (*s3.GetBucketAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAclWithContext indicates an expected call of GetBucketAclWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAclWithContext), varargs...) -} - -// GetBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfiguration(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAnalyticsConfiguration indicates an expected call of GetBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfiguration), arg0) -} - -// GetBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfigurationRequest(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// GetBucketAnalyticsConfigurationRequest indicates an expected call of GetBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationRequest), arg0) -} - -// GetBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAnalyticsConfigurationWithContext indicates an expected call of GetBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationWithContext), varargs...) -} - -// GetBucketCors mocks base method. -func (m *MockS3API) GetBucketCors(arg0 *s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketCors", arg0) - ret0, _ := ret[0].(*s3.GetBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketCors indicates an expected call of GetBucketCors. -func (mr *MockS3APIMockRecorder) GetBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCors", reflect.TypeOf((*MockS3API)(nil).GetBucketCors), arg0) -} - -// GetBucketCorsRequest mocks base method. -func (m *MockS3API) GetBucketCorsRequest(arg0 *s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketCorsOutput) - return ret0, ret1 -} - -// GetBucketCorsRequest indicates an expected call of GetBucketCorsRequest. -func (mr *MockS3APIMockRecorder) GetBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsRequest), arg0) -} - -// GetBucketCorsWithContext mocks base method. -func (m *MockS3API) GetBucketCorsWithContext(arg0 aws.Context, arg1 *s3.GetBucketCorsInput, arg2 ...request.Option) (*s3.GetBucketCorsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketCorsWithContext indicates an expected call of GetBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) GetBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsWithContext), varargs...) -} - -// GetBucketEncryption mocks base method. -func (m *MockS3API) GetBucketEncryption(arg0 *s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketEncryption indicates an expected call of GetBucketEncryption. -func (mr *MockS3APIMockRecorder) GetBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryption", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryption), arg0) -} - -// GetBucketEncryptionRequest mocks base method. -func (m *MockS3API) GetBucketEncryptionRequest(arg0 *s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketEncryptionOutput) - return ret0, ret1 -} - -// GetBucketEncryptionRequest indicates an expected call of GetBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) GetBucketEncryptionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionRequest), arg0) -} - -// GetBucketEncryptionWithContext mocks base method. -func (m *MockS3API) GetBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.GetBucketEncryptionInput, arg2 ...request.Option) (*s3.GetBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketEncryptionWithContext indicates an expected call of GetBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) GetBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionWithContext), varargs...) -} - -// GetBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfiguration(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfiguration indicates an expected call of GetBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfiguration), arg0) -} - -// GetBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfigurationRequest(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfigurationRequest indicates an expected call of GetBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationRequest), arg0) -} - -// GetBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfigurationWithContext indicates an expected call of GetBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// GetBucketInventoryConfiguration mocks base method. -func (m *MockS3API) GetBucketInventoryConfiguration(arg0 *s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketInventoryConfiguration indicates an expected call of GetBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfiguration), arg0) -} - -// GetBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketInventoryConfigurationRequest(arg0 *s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// GetBucketInventoryConfigurationRequest indicates an expected call of GetBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationRequest), arg0) -} - -// GetBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketInventoryConfigurationWithContext indicates an expected call of GetBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationWithContext), varargs...) -} - -// GetBucketLifecycle mocks base method. -func (m *MockS3API) GetBucketLifecycle(arg0 *s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycle indicates an expected call of GetBucketLifecycle. -func (mr *MockS3APIMockRecorder) GetBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycle), arg0) -} - -// GetBucketLifecycleConfiguration mocks base method. -func (m *MockS3API) GetBucketLifecycleConfiguration(arg0 *s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleConfiguration indicates an expected call of GetBucketLifecycleConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfiguration), arg0) -} - -// GetBucketLifecycleConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketLifecycleConfigurationRequest(arg0 *s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLifecycleConfigurationOutput) - return ret0, ret1 -} - -// GetBucketLifecycleConfigurationRequest indicates an expected call of GetBucketLifecycleConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationRequest), arg0) -} - -// GetBucketLifecycleConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleConfigurationWithContext indicates an expected call of GetBucketLifecycleConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationWithContext), varargs...) -} - -// GetBucketLifecycleRequest mocks base method. -func (m *MockS3API) GetBucketLifecycleRequest(arg0 *s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLifecycleOutput) - return ret0, ret1 -} - -// GetBucketLifecycleRequest indicates an expected call of GetBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleRequest), arg0) -} - -// GetBucketLifecycleWithContext mocks base method. -func (m *MockS3API) GetBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleInput, arg2 ...request.Option) (*s3.GetBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleWithContext indicates an expected call of GetBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleWithContext), varargs...) -} - -// GetBucketLocation mocks base method. -func (m *MockS3API) GetBucketLocation(arg0 *s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLocation", arg0) - ret0, _ := ret[0].(*s3.GetBucketLocationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLocation indicates an expected call of GetBucketLocation. -func (mr *MockS3APIMockRecorder) GetBucketLocation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocation", reflect.TypeOf((*MockS3API)(nil).GetBucketLocation), arg0) -} - -// GetBucketLocationRequest mocks base method. -func (m *MockS3API) GetBucketLocationRequest(arg0 *s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLocationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLocationOutput) - return ret0, ret1 -} - -// GetBucketLocationRequest indicates an expected call of GetBucketLocationRequest. -func (mr *MockS3APIMockRecorder) GetBucketLocationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationRequest), arg0) -} - -// GetBucketLocationWithContext mocks base method. -func (m *MockS3API) GetBucketLocationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLocationInput, arg2 ...request.Option) (*s3.GetBucketLocationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLocationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLocationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLocationWithContext indicates an expected call of GetBucketLocationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLocationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationWithContext), varargs...) -} - -// GetBucketLogging mocks base method. -func (m *MockS3API) GetBucketLogging(arg0 *s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLogging", arg0) - ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLogging indicates an expected call of GetBucketLogging. -func (mr *MockS3APIMockRecorder) GetBucketLogging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLogging", reflect.TypeOf((*MockS3API)(nil).GetBucketLogging), arg0) -} - -// GetBucketLoggingRequest mocks base method. -func (m *MockS3API) GetBucketLoggingRequest(arg0 *s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLoggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLoggingOutput) - return ret0, ret1 -} - -// GetBucketLoggingRequest indicates an expected call of GetBucketLoggingRequest. -func (mr *MockS3APIMockRecorder) GetBucketLoggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingRequest), arg0) -} - -// GetBucketLoggingWithContext mocks base method. -func (m *MockS3API) GetBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketLoggingInput, arg2 ...request.Option) (*s3.GetBucketLoggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLoggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLoggingWithContext indicates an expected call of GetBucketLoggingWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingWithContext), varargs...) -} - -// GetBucketMetricsConfiguration mocks base method. -func (m *MockS3API) GetBucketMetricsConfiguration(arg0 *s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketMetricsConfiguration indicates an expected call of GetBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfiguration), arg0) -} - -// GetBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketMetricsConfigurationRequest(arg0 *s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// GetBucketMetricsConfigurationRequest indicates an expected call of GetBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationRequest), arg0) -} - -// GetBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketMetricsConfigurationWithContext indicates an expected call of GetBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationWithContext), varargs...) -} - -// GetBucketNotification mocks base method. -func (m *MockS3API) GetBucketNotification(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotification", arg0) - ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotification indicates an expected call of GetBucketNotification. -func (mr *MockS3APIMockRecorder) GetBucketNotification(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotification", reflect.TypeOf((*MockS3API)(nil).GetBucketNotification), arg0) -} - -// GetBucketNotificationConfiguration mocks base method. -func (m *MockS3API) GetBucketNotificationConfiguration(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationConfiguration", arg0) - ret0, _ := ret[0].(*s3.NotificationConfiguration) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationConfiguration indicates an expected call of GetBucketNotificationConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfiguration), arg0) -} - -// GetBucketNotificationConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketNotificationConfigurationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.NotificationConfiguration) - return ret0, ret1 -} - -// GetBucketNotificationConfigurationRequest indicates an expected call of GetBucketNotificationConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationRequest), arg0) -} - -// GetBucketNotificationConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfiguration, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.NotificationConfiguration) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationConfigurationWithContext indicates an expected call of GetBucketNotificationConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationWithContext), varargs...) -} - -// GetBucketNotificationRequest mocks base method. -func (m *MockS3API) GetBucketNotificationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.NotificationConfigurationDeprecated) - return ret0, ret1 -} - -// GetBucketNotificationRequest indicates an expected call of GetBucketNotificationRequest. -func (mr *MockS3APIMockRecorder) GetBucketNotificationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationRequest), arg0) -} - -// GetBucketNotificationWithContext mocks base method. -func (m *MockS3API) GetBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfigurationDeprecated, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketNotificationWithContext", varargs...) - ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationWithContext indicates an expected call of GetBucketNotificationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationWithContext), varargs...) -} - -// GetBucketOwnershipControls mocks base method. -func (m *MockS3API) GetBucketOwnershipControls(arg0 *s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketOwnershipControls indicates an expected call of GetBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControls), arg0) -} - -// GetBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) GetBucketOwnershipControlsRequest(arg0 *s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// GetBucketOwnershipControlsRequest indicates an expected call of GetBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsRequest), arg0) -} - -// GetBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) GetBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.GetBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketOwnershipControlsWithContext indicates an expected call of GetBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsWithContext), varargs...) -} - -// GetBucketPolicy mocks base method. -func (m *MockS3API) GetBucketPolicy(arg0 *s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicy indicates an expected call of GetBucketPolicy. -func (mr *MockS3APIMockRecorder) GetBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicy", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicy), arg0) -} - -// GetBucketPolicyRequest mocks base method. -func (m *MockS3API) GetBucketPolicyRequest(arg0 *s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketPolicyOutput) - return ret0, ret1 -} - -// GetBucketPolicyRequest indicates an expected call of GetBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) GetBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyRequest), arg0) -} - -// GetBucketPolicyStatus mocks base method. -func (m *MockS3API) GetBucketPolicyStatus(arg0 *s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyStatus", arg0) - ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyStatus indicates an expected call of GetBucketPolicyStatus. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatus(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatus", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatus), arg0) -} - -// GetBucketPolicyStatusRequest mocks base method. -func (m *MockS3API) GetBucketPolicyStatusRequest(arg0 *s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyStatusRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketPolicyStatusOutput) - return ret0, ret1 -} - -// GetBucketPolicyStatusRequest indicates an expected call of GetBucketPolicyStatusRequest. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusRequest), arg0) -} - -// GetBucketPolicyStatusWithContext mocks base method. -func (m *MockS3API) GetBucketPolicyStatusWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyStatusInput, arg2 ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketPolicyStatusWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyStatusWithContext indicates an expected call of GetBucketPolicyStatusWithContext. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusWithContext), varargs...) -} - -// GetBucketPolicyWithContext mocks base method. -func (m *MockS3API) GetBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyInput, arg2 ...request.Option) (*s3.GetBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyWithContext indicates an expected call of GetBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) GetBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyWithContext), varargs...) -} - -// GetBucketReplication mocks base method. -func (m *MockS3API) GetBucketReplication(arg0 *s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketReplication", arg0) - ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketReplication indicates an expected call of GetBucketReplication. -func (mr *MockS3APIMockRecorder) GetBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplication", reflect.TypeOf((*MockS3API)(nil).GetBucketReplication), arg0) -} - -// GetBucketReplicationRequest mocks base method. -func (m *MockS3API) GetBucketReplicationRequest(arg0 *s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketReplicationOutput) - return ret0, ret1 -} - -// GetBucketReplicationRequest indicates an expected call of GetBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) GetBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationRequest), arg0) -} - -// GetBucketReplicationWithContext mocks base method. -func (m *MockS3API) GetBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.GetBucketReplicationInput, arg2 ...request.Option) (*s3.GetBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketReplicationWithContext indicates an expected call of GetBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationWithContext), varargs...) -} - -// GetBucketRequestPayment mocks base method. -func (m *MockS3API) GetBucketRequestPayment(arg0 *s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketRequestPayment", arg0) - ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketRequestPayment indicates an expected call of GetBucketRequestPayment. -func (mr *MockS3APIMockRecorder) GetBucketRequestPayment(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPayment), arg0) -} - -// GetBucketRequestPaymentRequest mocks base method. -func (m *MockS3API) GetBucketRequestPaymentRequest(arg0 *s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketRequestPaymentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketRequestPaymentOutput) - return ret0, ret1 -} - -// GetBucketRequestPaymentRequest indicates an expected call of GetBucketRequestPaymentRequest. -func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentRequest), arg0) -} - -// GetBucketRequestPaymentWithContext mocks base method. -func (m *MockS3API) GetBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.GetBucketRequestPaymentInput, arg2 ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketRequestPaymentWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketRequestPaymentWithContext indicates an expected call of GetBucketRequestPaymentWithContext. -func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentWithContext), varargs...) -} - -// GetBucketTagging mocks base method. -func (m *MockS3API) GetBucketTagging(arg0 *s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketTagging", arg0) - ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketTagging indicates an expected call of GetBucketTagging. -func (mr *MockS3APIMockRecorder) GetBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTagging", reflect.TypeOf((*MockS3API)(nil).GetBucketTagging), arg0) -} - -// GetBucketTaggingRequest mocks base method. -func (m *MockS3API) GetBucketTaggingRequest(arg0 *s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketTaggingOutput) - return ret0, ret1 -} - -// GetBucketTaggingRequest indicates an expected call of GetBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) GetBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingRequest), arg0) -} - -// GetBucketTaggingWithContext mocks base method. -func (m *MockS3API) GetBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketTaggingInput, arg2 ...request.Option) (*s3.GetBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketTaggingWithContext indicates an expected call of GetBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) GetBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingWithContext), varargs...) -} - -// GetBucketVersioning mocks base method. -func (m *MockS3API) GetBucketVersioning(arg0 *s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketVersioning", arg0) - ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketVersioning indicates an expected call of GetBucketVersioning. -func (mr *MockS3APIMockRecorder) GetBucketVersioning(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioning", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioning), arg0) -} - -// GetBucketVersioningRequest mocks base method. -func (m *MockS3API) GetBucketVersioningRequest(arg0 *s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketVersioningRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketVersioningOutput) - return ret0, ret1 -} - -// GetBucketVersioningRequest indicates an expected call of GetBucketVersioningRequest. -func (mr *MockS3APIMockRecorder) GetBucketVersioningRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningRequest), arg0) -} - -// GetBucketVersioningWithContext mocks base method. -func (m *MockS3API) GetBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.GetBucketVersioningInput, arg2 ...request.Option) (*s3.GetBucketVersioningOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketVersioningWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketVersioningWithContext indicates an expected call of GetBucketVersioningWithContext. -func (mr *MockS3APIMockRecorder) GetBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningWithContext), varargs...) -} - -// GetBucketWebsite mocks base method. -func (m *MockS3API) GetBucketWebsite(arg0 *s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketWebsite indicates an expected call of GetBucketWebsite. -func (mr *MockS3APIMockRecorder) GetBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsite", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsite), arg0) -} - -// GetBucketWebsiteRequest mocks base method. -func (m *MockS3API) GetBucketWebsiteRequest(arg0 *s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketWebsiteOutput) - return ret0, ret1 -} - -// GetBucketWebsiteRequest indicates an expected call of GetBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) GetBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteRequest), arg0) -} - -// GetBucketWebsiteWithContext mocks base method. -func (m *MockS3API) GetBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.GetBucketWebsiteInput, arg2 ...request.Option) (*s3.GetBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketWebsiteWithContext indicates an expected call of GetBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) GetBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteWithContext), varargs...) -} - -// GetObject mocks base method. -func (m *MockS3API) GetObject(arg0 *s3.GetObjectInput) (*s3.GetObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObject", arg0) - ret0, _ := ret[0].(*s3.GetObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObject indicates an expected call of GetObject. -func (mr *MockS3APIMockRecorder) GetObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockS3API)(nil).GetObject), arg0) -} - -// GetObjectAcl mocks base method. -func (m *MockS3API) GetObjectAcl(arg0 *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAcl", arg0) - ret0, _ := ret[0].(*s3.GetObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAcl indicates an expected call of GetObjectAcl. -func (mr *MockS3APIMockRecorder) GetObjectAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAcl", reflect.TypeOf((*MockS3API)(nil).GetObjectAcl), arg0) -} - -// GetObjectAclRequest mocks base method. -func (m *MockS3API) GetObjectAclRequest(arg0 *s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectAclOutput) - return ret0, ret1 -} - -// GetObjectAclRequest indicates an expected call of GetObjectAclRequest. -func (mr *MockS3APIMockRecorder) GetObjectAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAclRequest), arg0) -} - -// GetObjectAclWithContext mocks base method. -func (m *MockS3API) GetObjectAclWithContext(arg0 aws.Context, arg1 *s3.GetObjectAclInput, arg2 ...request.Option) (*s3.GetObjectAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAclWithContext indicates an expected call of GetObjectAclWithContext. -func (mr *MockS3APIMockRecorder) GetObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAclWithContext), varargs...) -} - -// GetObjectAttributes mocks base method. -func (m *MockS3API) GetObjectAttributes(arg0 *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAttributes", arg0) - ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAttributes indicates an expected call of GetObjectAttributes. -func (mr *MockS3APIMockRecorder) GetObjectAttributes(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributes", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributes), arg0) -} - -// GetObjectAttributesRequest mocks base method. -func (m *MockS3API) GetObjectAttributesRequest(arg0 *s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAttributesRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectAttributesOutput) - return ret0, ret1 -} - -// GetObjectAttributesRequest indicates an expected call of GetObjectAttributesRequest. -func (mr *MockS3APIMockRecorder) GetObjectAttributesRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesRequest), arg0) -} - -// GetObjectAttributesWithContext mocks base method. -func (m *MockS3API) GetObjectAttributesWithContext(arg0 aws.Context, arg1 *s3.GetObjectAttributesInput, arg2 ...request.Option) (*s3.GetObjectAttributesOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectAttributesWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAttributesWithContext indicates an expected call of GetObjectAttributesWithContext. -func (mr *MockS3APIMockRecorder) GetObjectAttributesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesWithContext), varargs...) -} - -// GetObjectLegalHold mocks base method. -func (m *MockS3API) GetObjectLegalHold(arg0 *s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLegalHold", arg0) - ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLegalHold indicates an expected call of GetObjectLegalHold. -func (mr *MockS3APIMockRecorder) GetObjectLegalHold(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHold), arg0) -} - -// GetObjectLegalHoldRequest mocks base method. -func (m *MockS3API) GetObjectLegalHoldRequest(arg0 *s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLegalHoldRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectLegalHoldOutput) - return ret0, ret1 -} - -// GetObjectLegalHoldRequest indicates an expected call of GetObjectLegalHoldRequest. -func (mr *MockS3APIMockRecorder) GetObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldRequest), arg0) -} - -// GetObjectLegalHoldWithContext mocks base method. -func (m *MockS3API) GetObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.GetObjectLegalHoldInput, arg2 ...request.Option) (*s3.GetObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectLegalHoldWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLegalHoldWithContext indicates an expected call of GetObjectLegalHoldWithContext. -func (mr *MockS3APIMockRecorder) GetObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldWithContext), varargs...) -} - -// GetObjectLockConfiguration mocks base method. -func (m *MockS3API) GetObjectLockConfiguration(arg0 *s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLockConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLockConfiguration indicates an expected call of GetObjectLockConfiguration. -func (mr *MockS3APIMockRecorder) GetObjectLockConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfiguration), arg0) -} - -// GetObjectLockConfigurationRequest mocks base method. -func (m *MockS3API) GetObjectLockConfigurationRequest(arg0 *s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLockConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectLockConfigurationOutput) - return ret0, ret1 -} - -// GetObjectLockConfigurationRequest indicates an expected call of GetObjectLockConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationRequest), arg0) -} - -// GetObjectLockConfigurationWithContext mocks base method. -func (m *MockS3API) GetObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetObjectLockConfigurationInput, arg2 ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectLockConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLockConfigurationWithContext indicates an expected call of GetObjectLockConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationWithContext), varargs...) -} - -// GetObjectRequest mocks base method. -func (m *MockS3API) GetObjectRequest(arg0 *s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectOutput) - return ret0, ret1 -} - -// GetObjectRequest indicates an expected call of GetObjectRequest. -func (mr *MockS3APIMockRecorder) GetObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRequest), arg0) -} - -// GetObjectRetention mocks base method. -func (m *MockS3API) GetObjectRetention(arg0 *s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRetention", arg0) - ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectRetention indicates an expected call of GetObjectRetention. -func (mr *MockS3APIMockRecorder) GetObjectRetention(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetention", reflect.TypeOf((*MockS3API)(nil).GetObjectRetention), arg0) -} - -// GetObjectRetentionRequest mocks base method. -func (m *MockS3API) GetObjectRetentionRequest(arg0 *s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRetentionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectRetentionOutput) - return ret0, ret1 -} - -// GetObjectRetentionRequest indicates an expected call of GetObjectRetentionRequest. -func (mr *MockS3APIMockRecorder) GetObjectRetentionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionRequest), arg0) -} - -// GetObjectRetentionWithContext mocks base method. -func (m *MockS3API) GetObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.GetObjectRetentionInput, arg2 ...request.Option) (*s3.GetObjectRetentionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectRetentionWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectRetentionWithContext indicates an expected call of GetObjectRetentionWithContext. -func (mr *MockS3APIMockRecorder) GetObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionWithContext), varargs...) -} - -// GetObjectTagging mocks base method. -func (m *MockS3API) GetObjectTagging(arg0 *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTagging", arg0) - ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTagging indicates an expected call of GetObjectTagging. -func (mr *MockS3APIMockRecorder) GetObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockS3API)(nil).GetObjectTagging), arg0) -} - -// GetObjectTaggingRequest mocks base method. -func (m *MockS3API) GetObjectTaggingRequest(arg0 *s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectTaggingOutput) - return ret0, ret1 -} - -// GetObjectTaggingRequest indicates an expected call of GetObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) GetObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingRequest), arg0) -} - -// GetObjectTaggingWithContext mocks base method. -func (m *MockS3API) GetObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.GetObjectTaggingInput, arg2 ...request.Option) (*s3.GetObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTaggingWithContext indicates an expected call of GetObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) GetObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingWithContext), varargs...) -} - -// GetObjectTorrent mocks base method. -func (m *MockS3API) GetObjectTorrent(arg0 *s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTorrent", arg0) - ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTorrent indicates an expected call of GetObjectTorrent. -func (mr *MockS3APIMockRecorder) GetObjectTorrent(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrent", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrent), arg0) -} - -// GetObjectTorrentRequest mocks base method. -func (m *MockS3API) GetObjectTorrentRequest(arg0 *s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTorrentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectTorrentOutput) - return ret0, ret1 -} - -// GetObjectTorrentRequest indicates an expected call of GetObjectTorrentRequest. -func (mr *MockS3APIMockRecorder) GetObjectTorrentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentRequest), arg0) -} - -// GetObjectTorrentWithContext mocks base method. -func (m *MockS3API) GetObjectTorrentWithContext(arg0 aws.Context, arg1 *s3.GetObjectTorrentInput, arg2 ...request.Option) (*s3.GetObjectTorrentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectTorrentWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTorrentWithContext indicates an expected call of GetObjectTorrentWithContext. -func (mr *MockS3APIMockRecorder) GetObjectTorrentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentWithContext), varargs...) -} - -// GetObjectWithContext mocks base method. -func (m *MockS3API) GetObjectWithContext(arg0 aws.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectWithContext indicates an expected call of GetObjectWithContext. -func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...) -} - -// GetPublicAccessBlock mocks base method. -func (m *MockS3API) GetPublicAccessBlock(arg0 *s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPublicAccessBlock indicates an expected call of GetPublicAccessBlock. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlock), arg0) -} - -// GetPublicAccessBlockRequest mocks base method. -func (m *MockS3API) GetPublicAccessBlockRequest(arg0 *s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetPublicAccessBlockOutput) - return ret0, ret1 -} - -// GetPublicAccessBlockRequest indicates an expected call of GetPublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockRequest), arg0) -} - -// GetPublicAccessBlockWithContext mocks base method. -func (m *MockS3API) GetPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.GetPublicAccessBlockInput, arg2 ...request.Option) (*s3.GetPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetPublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPublicAccessBlockWithContext indicates an expected call of GetPublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockWithContext), varargs...) -} - -// HeadBucket mocks base method. -func (m *MockS3API) HeadBucket(arg0 *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadBucket", arg0) - ret0, _ := ret[0].(*s3.HeadBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadBucket indicates an expected call of HeadBucket. -func (mr *MockS3APIMockRecorder) HeadBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucket", reflect.TypeOf((*MockS3API)(nil).HeadBucket), arg0) -} - -// HeadBucketRequest mocks base method. -func (m *MockS3API) HeadBucketRequest(arg0 *s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.HeadBucketOutput) - return ret0, ret1 -} - -// HeadBucketRequest indicates an expected call of HeadBucketRequest. -func (mr *MockS3APIMockRecorder) HeadBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketRequest", reflect.TypeOf((*MockS3API)(nil).HeadBucketRequest), arg0) -} - -// HeadBucketWithContext mocks base method. -func (m *MockS3API) HeadBucketWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.Option) (*s3.HeadBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "HeadBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.HeadBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadBucketWithContext indicates an expected call of HeadBucketWithContext. -func (mr *MockS3APIMockRecorder) HeadBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketWithContext", reflect.TypeOf((*MockS3API)(nil).HeadBucketWithContext), varargs...) -} - -// HeadObject mocks base method. -func (m *MockS3API) HeadObject(arg0 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadObject", arg0) - ret0, _ := ret[0].(*s3.HeadObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadObject indicates an expected call of HeadObject. -func (mr *MockS3APIMockRecorder) HeadObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3API)(nil).HeadObject), arg0) -} - -// HeadObjectRequest mocks base method. -func (m *MockS3API) HeadObjectRequest(arg0 *s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.HeadObjectOutput) - return ret0, ret1 -} - -// HeadObjectRequest indicates an expected call of HeadObjectRequest. -func (mr *MockS3APIMockRecorder) HeadObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectRequest", reflect.TypeOf((*MockS3API)(nil).HeadObjectRequest), arg0) -} - -// HeadObjectWithContext mocks base method. -func (m *MockS3API) HeadObjectWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.HeadObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadObjectWithContext indicates an expected call of HeadObjectWithContext. -func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...) -} - -// ListBucketAnalyticsConfigurations mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurations(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurations indicates an expected call of ListBucketAnalyticsConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurations), arg0) -} - -// ListBucketAnalyticsConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurationsRequest(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketAnalyticsConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurationsRequest indicates an expected call of ListBucketAnalyticsConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsRequest), arg0) -} - -// ListBucketAnalyticsConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketAnalyticsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurationsWithContext indicates an expected call of ListBucketAnalyticsConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsWithContext), varargs...) -} - -// ListBucketIntelligentTieringConfigurations mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurations(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurations indicates an expected call of ListBucketIntelligentTieringConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurations), arg0) -} - -// ListBucketIntelligentTieringConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurationsRequest(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurationsRequest indicates an expected call of ListBucketIntelligentTieringConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsRequest), arg0) -} - -// ListBucketIntelligentTieringConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketIntelligentTieringConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurationsWithContext indicates an expected call of ListBucketIntelligentTieringConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsWithContext), varargs...) -} - -// ListBucketInventoryConfigurations mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurations(arg0 *s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketInventoryConfigurations indicates an expected call of ListBucketInventoryConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurations), arg0) -} - -// ListBucketInventoryConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurationsRequest(arg0 *s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketInventoryConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketInventoryConfigurationsRequest indicates an expected call of ListBucketInventoryConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsRequest), arg0) -} - -// ListBucketInventoryConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketInventoryConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketInventoryConfigurationsWithContext indicates an expected call of ListBucketInventoryConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsWithContext), varargs...) -} - -// ListBucketMetricsConfigurations mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurations(arg0 *s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketMetricsConfigurations indicates an expected call of ListBucketMetricsConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurations), arg0) -} - -// ListBucketMetricsConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurationsRequest(arg0 *s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketMetricsConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketMetricsConfigurationsRequest indicates an expected call of ListBucketMetricsConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsRequest), arg0) -} - -// ListBucketMetricsConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketMetricsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketMetricsConfigurationsWithContext indicates an expected call of ListBucketMetricsConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsWithContext), varargs...) -} - -// ListBuckets mocks base method. -func (m *MockS3API) ListBuckets(arg0 *s3.ListBucketsInput) (*s3.ListBucketsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBuckets", arg0) - ret0, _ := ret[0].(*s3.ListBucketsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBuckets indicates an expected call of ListBuckets. -func (mr *MockS3APIMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockS3API)(nil).ListBuckets), arg0) -} - -// ListBucketsRequest mocks base method. -func (m *MockS3API) ListBucketsRequest(arg0 *s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketsOutput) - return ret0, ret1 -} - -// ListBucketsRequest indicates an expected call of ListBucketsRequest. -func (mr *MockS3APIMockRecorder) ListBucketsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketsRequest), arg0) -} - -// ListBucketsWithContext mocks base method. -func (m *MockS3API) ListBucketsWithContext(arg0 aws.Context, arg1 *s3.ListBucketsInput, arg2 ...request.Option) (*s3.ListBucketsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketsWithContext indicates an expected call of ListBucketsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...) -} - -// ListMultipartUploads mocks base method. -func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploads", arg0) - ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListMultipartUploads indicates an expected call of ListMultipartUploads. -func (mr *MockS3APIMockRecorder) ListMultipartUploads(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploads), arg0) -} - -// ListMultipartUploadsPages mocks base method. -func (m *MockS3API) ListMultipartUploadsPages(arg0 *s3.ListMultipartUploadsInput, arg1 func(*s3.ListMultipartUploadsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploadsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListMultipartUploadsPages indicates an expected call of ListMultipartUploadsPages. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPages", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPages), arg0, arg1) -} - -// ListMultipartUploadsPagesWithContext mocks base method. -func (m *MockS3API) ListMultipartUploadsPagesWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 func(*s3.ListMultipartUploadsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListMultipartUploadsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListMultipartUploadsPagesWithContext indicates an expected call of ListMultipartUploadsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPagesWithContext), varargs...) -} - -// ListMultipartUploadsRequest mocks base method. -func (m *MockS3API) ListMultipartUploadsRequest(arg0 *s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploadsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListMultipartUploadsOutput) - return ret0, ret1 -} - -// ListMultipartUploadsRequest indicates an expected call of ListMultipartUploadsRequest. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsRequest", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsRequest), arg0) -} - -// ListMultipartUploadsWithContext mocks base method. -func (m *MockS3API) ListMultipartUploadsWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 ...request.Option) (*s3.ListMultipartUploadsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListMultipartUploadsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListMultipartUploadsWithContext indicates an expected call of ListMultipartUploadsWithContext. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsWithContext), varargs...) -} - -// ListObjectVersions mocks base method. -func (m *MockS3API) ListObjectVersions(arg0 *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersions", arg0) - ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectVersions indicates an expected call of ListObjectVersions. -func (mr *MockS3APIMockRecorder) ListObjectVersions(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockS3API)(nil).ListObjectVersions), arg0) -} - -// ListObjectVersionsPages mocks base method. -func (m *MockS3API) ListObjectVersionsPages(arg0 *s3.ListObjectVersionsInput, arg1 func(*s3.ListObjectVersionsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersionsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectVersionsPages indicates an expected call of ListObjectVersionsPages. -func (mr *MockS3APIMockRecorder) ListObjectVersionsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPages), arg0, arg1) -} - -// ListObjectVersionsPagesWithContext mocks base method. -func (m *MockS3API) ListObjectVersionsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 func(*s3.ListObjectVersionsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectVersionsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectVersionsPagesWithContext indicates an expected call of ListObjectVersionsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectVersionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPagesWithContext), varargs...) -} - -// ListObjectVersionsRequest mocks base method. -func (m *MockS3API) ListObjectVersionsRequest(arg0 *s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersionsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectVersionsOutput) - return ret0, ret1 -} - -// ListObjectVersionsRequest indicates an expected call of ListObjectVersionsRequest. -func (mr *MockS3APIMockRecorder) ListObjectVersionsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsRequest), arg0) -} - -// ListObjectVersionsWithContext mocks base method. -func (m *MockS3API) ListObjectVersionsWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 ...request.Option) (*s3.ListObjectVersionsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectVersionsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectVersionsWithContext indicates an expected call of ListObjectVersionsWithContext. -func (mr *MockS3APIMockRecorder) ListObjectVersionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsWithContext), varargs...) -} - -// ListObjects mocks base method. -func (m *MockS3API) ListObjects(arg0 *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjects", arg0) - ret0, _ := ret[0].(*s3.ListObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjects indicates an expected call of ListObjects. -func (mr *MockS3APIMockRecorder) ListObjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockS3API)(nil).ListObjects), arg0) -} - -// ListObjectsPages mocks base method. -func (m *MockS3API) ListObjectsPages(arg0 *s3.ListObjectsInput, arg1 func(*s3.ListObjectsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsPages indicates an expected call of ListObjectsPages. -func (mr *MockS3APIMockRecorder) ListObjectsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectsPages), arg0, arg1) -} - -// ListObjectsPagesWithContext mocks base method. -func (m *MockS3API) ListObjectsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 func(*s3.ListObjectsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsPagesWithContext indicates an expected call of ListObjectsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsPagesWithContext), varargs...) -} - -// ListObjectsRequest mocks base method. -func (m *MockS3API) ListObjectsRequest(arg0 *s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectsOutput) - return ret0, ret1 -} - -// ListObjectsRequest indicates an expected call of ListObjectsRequest. -func (mr *MockS3APIMockRecorder) ListObjectsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectsRequest), arg0) -} - -// ListObjectsV2 mocks base method. -func (m *MockS3API) ListObjectsV2(arg0 *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2", arg0) - ret0, _ := ret[0].(*s3.ListObjectsV2Output) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsV2 indicates an expected call of ListObjectsV2. -func (mr *MockS3APIMockRecorder) ListObjectsV2(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2), arg0) -} - -// ListObjectsV2Pages mocks base method. -func (m *MockS3API) ListObjectsV2Pages(arg0 *s3.ListObjectsV2Input, arg1 func(*s3.ListObjectsV2Output, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2Pages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsV2Pages indicates an expected call of ListObjectsV2Pages. -func (mr *MockS3APIMockRecorder) ListObjectsV2Pages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Pages", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Pages), arg0, arg1) -} - -// ListObjectsV2PagesWithContext mocks base method. -func (m *MockS3API) ListObjectsV2PagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 func(*s3.ListObjectsV2Output, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsV2PagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsV2PagesWithContext indicates an expected call of ListObjectsV2PagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsV2PagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2PagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2PagesWithContext), varargs...) -} - -// ListObjectsV2Request mocks base method. -func (m *MockS3API) ListObjectsV2Request(arg0 *s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2Request", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectsV2Output) - return ret0, ret1 -} - -// ListObjectsV2Request indicates an expected call of ListObjectsV2Request. -func (mr *MockS3APIMockRecorder) ListObjectsV2Request(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Request", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Request), arg0) -} - -// ListObjectsV2WithContext mocks base method. -func (m *MockS3API) ListObjectsV2WithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 ...request.Option) (*s3.ListObjectsV2Output, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsV2WithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectsV2Output) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsV2WithContext indicates an expected call of ListObjectsV2WithContext. -func (mr *MockS3APIMockRecorder) ListObjectsV2WithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2WithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2WithContext), varargs...) -} - -// ListObjectsWithContext mocks base method. -func (m *MockS3API) ListObjectsWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 ...request.Option) (*s3.ListObjectsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsWithContext indicates an expected call of ListObjectsWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsWithContext), varargs...) -} - -// ListParts mocks base method. -func (m *MockS3API) ListParts(arg0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListParts", arg0) - ret0, _ := ret[0].(*s3.ListPartsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListParts indicates an expected call of ListParts. -func (mr *MockS3APIMockRecorder) ListParts(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockS3API)(nil).ListParts), arg0) -} - -// ListPartsPages mocks base method. -func (m *MockS3API) ListPartsPages(arg0 *s3.ListPartsInput, arg1 func(*s3.ListPartsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPartsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListPartsPages indicates an expected call of ListPartsPages. -func (mr *MockS3APIMockRecorder) ListPartsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPages", reflect.TypeOf((*MockS3API)(nil).ListPartsPages), arg0, arg1) -} - -// ListPartsPagesWithContext mocks base method. -func (m *MockS3API) ListPartsPagesWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 func(*s3.ListPartsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPartsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListPartsPagesWithContext indicates an expected call of ListPartsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListPartsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsPagesWithContext), varargs...) -} - -// ListPartsRequest mocks base method. -func (m *MockS3API) ListPartsRequest(arg0 *s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPartsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListPartsOutput) - return ret0, ret1 -} - -// ListPartsRequest indicates an expected call of ListPartsRequest. -func (mr *MockS3APIMockRecorder) ListPartsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsRequest", reflect.TypeOf((*MockS3API)(nil).ListPartsRequest), arg0) -} - -// ListPartsWithContext mocks base method. -func (m *MockS3API) ListPartsWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPartsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListPartsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPartsWithContext indicates an expected call of ListPartsWithContext. -func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...) -} - -// PutBucketAccelerateConfiguration mocks base method. -func (m *MockS3API) PutBucketAccelerateConfiguration(arg0 *s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAccelerateConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAccelerateConfiguration indicates an expected call of PutBucketAccelerateConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfiguration), arg0) -} - -// PutBucketAccelerateConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketAccelerateConfigurationRequest(arg0 *s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAccelerateConfigurationOutput) - return ret0, ret1 -} - -// PutBucketAccelerateConfigurationRequest indicates an expected call of PutBucketAccelerateConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationRequest), arg0) -} - -// PutBucketAccelerateConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAccelerateConfigurationWithContext indicates an expected call of PutBucketAccelerateConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationWithContext), varargs...) -} - -// PutBucketAcl mocks base method. -func (m *MockS3API) PutBucketAcl(arg0 *s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAcl", arg0) - ret0, _ := ret[0].(*s3.PutBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAcl indicates an expected call of PutBucketAcl. -func (mr *MockS3APIMockRecorder) PutBucketAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAcl", reflect.TypeOf((*MockS3API)(nil).PutBucketAcl), arg0) -} - -// PutBucketAclRequest mocks base method. -func (m *MockS3API) PutBucketAclRequest(arg0 *s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAclOutput) - return ret0, ret1 -} - -// PutBucketAclRequest indicates an expected call of PutBucketAclRequest. -func (mr *MockS3APIMockRecorder) PutBucketAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAclRequest), arg0) -} - -// PutBucketAclWithContext mocks base method. -func (m *MockS3API) PutBucketAclWithContext(arg0 aws.Context, arg1 *s3.PutBucketAclInput, arg2 ...request.Option) (*s3.PutBucketAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAclWithContext indicates an expected call of PutBucketAclWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAclWithContext), varargs...) -} - -// PutBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfiguration(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAnalyticsConfiguration indicates an expected call of PutBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfiguration), arg0) -} - -// PutBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfigurationRequest(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// PutBucketAnalyticsConfigurationRequest indicates an expected call of PutBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationRequest), arg0) -} - -// PutBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAnalyticsConfigurationWithContext indicates an expected call of PutBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationWithContext), varargs...) -} - -// PutBucketCors mocks base method. -func (m *MockS3API) PutBucketCors(arg0 *s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketCors", arg0) - ret0, _ := ret[0].(*s3.PutBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketCors indicates an expected call of PutBucketCors. -func (mr *MockS3APIMockRecorder) PutBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCors", reflect.TypeOf((*MockS3API)(nil).PutBucketCors), arg0) -} - -// PutBucketCorsRequest mocks base method. -func (m *MockS3API) PutBucketCorsRequest(arg0 *s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketCorsOutput) - return ret0, ret1 -} - -// PutBucketCorsRequest indicates an expected call of PutBucketCorsRequest. -func (mr *MockS3APIMockRecorder) PutBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsRequest), arg0) -} - -// PutBucketCorsWithContext mocks base method. -func (m *MockS3API) PutBucketCorsWithContext(arg0 aws.Context, arg1 *s3.PutBucketCorsInput, arg2 ...request.Option) (*s3.PutBucketCorsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketCorsWithContext indicates an expected call of PutBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) PutBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsWithContext), varargs...) -} - -// PutBucketEncryption mocks base method. -func (m *MockS3API) PutBucketEncryption(arg0 *s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketEncryption indicates an expected call of PutBucketEncryption. -func (mr *MockS3APIMockRecorder) PutBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryption", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryption), arg0) -} - -// PutBucketEncryptionRequest mocks base method. -func (m *MockS3API) PutBucketEncryptionRequest(arg0 *s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketEncryptionOutput) - return ret0, ret1 -} - -// PutBucketEncryptionRequest indicates an expected call of PutBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) PutBucketEncryptionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionRequest), arg0) -} - -// PutBucketEncryptionWithContext mocks base method. -func (m *MockS3API) PutBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.PutBucketEncryptionInput, arg2 ...request.Option) (*s3.PutBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketEncryptionWithContext indicates an expected call of PutBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) PutBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionWithContext), varargs...) -} - -// PutBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfiguration(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfiguration indicates an expected call of PutBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfiguration), arg0) -} - -// PutBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfigurationRequest(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfigurationRequest indicates an expected call of PutBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationRequest), arg0) -} - -// PutBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfigurationWithContext indicates an expected call of PutBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// PutBucketInventoryConfiguration mocks base method. -func (m *MockS3API) PutBucketInventoryConfiguration(arg0 *s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketInventoryConfiguration indicates an expected call of PutBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfiguration), arg0) -} - -// PutBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketInventoryConfigurationRequest(arg0 *s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// PutBucketInventoryConfigurationRequest indicates an expected call of PutBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationRequest), arg0) -} - -// PutBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketInventoryConfigurationWithContext indicates an expected call of PutBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationWithContext), varargs...) -} - -// PutBucketLifecycle mocks base method. -func (m *MockS3API) PutBucketLifecycle(arg0 *s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycle indicates an expected call of PutBucketLifecycle. -func (mr *MockS3APIMockRecorder) PutBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycle), arg0) -} - -// PutBucketLifecycleConfiguration mocks base method. -func (m *MockS3API) PutBucketLifecycleConfiguration(arg0 *s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleConfiguration indicates an expected call of PutBucketLifecycleConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfiguration), arg0) -} - -// PutBucketLifecycleConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketLifecycleConfigurationRequest(arg0 *s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLifecycleConfigurationOutput) - return ret0, ret1 -} - -// PutBucketLifecycleConfigurationRequest indicates an expected call of PutBucketLifecycleConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationRequest), arg0) -} - -// PutBucketLifecycleConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleConfigurationWithContext indicates an expected call of PutBucketLifecycleConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationWithContext), varargs...) -} - -// PutBucketLifecycleRequest mocks base method. -func (m *MockS3API) PutBucketLifecycleRequest(arg0 *s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLifecycleOutput) - return ret0, ret1 -} - -// PutBucketLifecycleRequest indicates an expected call of PutBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleRequest), arg0) -} - -// PutBucketLifecycleWithContext mocks base method. -func (m *MockS3API) PutBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleInput, arg2 ...request.Option) (*s3.PutBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleWithContext indicates an expected call of PutBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleWithContext), varargs...) -} - -// PutBucketLogging mocks base method. -func (m *MockS3API) PutBucketLogging(arg0 *s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLogging", arg0) - ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLogging indicates an expected call of PutBucketLogging. -func (mr *MockS3APIMockRecorder) PutBucketLogging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLogging", reflect.TypeOf((*MockS3API)(nil).PutBucketLogging), arg0) -} - -// PutBucketLoggingRequest mocks base method. -func (m *MockS3API) PutBucketLoggingRequest(arg0 *s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLoggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLoggingOutput) - return ret0, ret1 -} - -// PutBucketLoggingRequest indicates an expected call of PutBucketLoggingRequest. -func (mr *MockS3APIMockRecorder) PutBucketLoggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingRequest), arg0) -} - -// PutBucketLoggingWithContext mocks base method. -func (m *MockS3API) PutBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketLoggingInput, arg2 ...request.Option) (*s3.PutBucketLoggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLoggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLoggingWithContext indicates an expected call of PutBucketLoggingWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingWithContext), varargs...) -} - -// PutBucketMetricsConfiguration mocks base method. -func (m *MockS3API) PutBucketMetricsConfiguration(arg0 *s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketMetricsConfiguration indicates an expected call of PutBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfiguration), arg0) -} - -// PutBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketMetricsConfigurationRequest(arg0 *s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// PutBucketMetricsConfigurationRequest indicates an expected call of PutBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationRequest), arg0) -} - -// PutBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketMetricsConfigurationWithContext indicates an expected call of PutBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationWithContext), varargs...) -} - -// PutBucketNotification mocks base method. -func (m *MockS3API) PutBucketNotification(arg0 *s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotification", arg0) - ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotification indicates an expected call of PutBucketNotification. -func (mr *MockS3APIMockRecorder) PutBucketNotification(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotification", reflect.TypeOf((*MockS3API)(nil).PutBucketNotification), arg0) -} - -// PutBucketNotificationConfiguration mocks base method. -func (m *MockS3API) PutBucketNotificationConfiguration(arg0 *s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationConfiguration indicates an expected call of PutBucketNotificationConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfiguration), arg0) -} - -// PutBucketNotificationConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketNotificationConfigurationRequest(arg0 *s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketNotificationConfigurationOutput) - return ret0, ret1 -} - -// PutBucketNotificationConfigurationRequest indicates an expected call of PutBucketNotificationConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationRequest), arg0) -} - -// PutBucketNotificationConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationConfigurationInput, arg2 ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationConfigurationWithContext indicates an expected call of PutBucketNotificationConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationWithContext), varargs...) -} - -// PutBucketNotificationRequest mocks base method. -func (m *MockS3API) PutBucketNotificationRequest(arg0 *s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketNotificationOutput) - return ret0, ret1 -} - -// PutBucketNotificationRequest indicates an expected call of PutBucketNotificationRequest. -func (mr *MockS3APIMockRecorder) PutBucketNotificationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationRequest), arg0) -} - -// PutBucketNotificationWithContext mocks base method. -func (m *MockS3API) PutBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationInput, arg2 ...request.Option) (*s3.PutBucketNotificationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketNotificationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationWithContext indicates an expected call of PutBucketNotificationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationWithContext), varargs...) -} - -// PutBucketOwnershipControls mocks base method. -func (m *MockS3API) PutBucketOwnershipControls(arg0 *s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketOwnershipControls indicates an expected call of PutBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControls), arg0) -} - -// PutBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) PutBucketOwnershipControlsRequest(arg0 *s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// PutBucketOwnershipControlsRequest indicates an expected call of PutBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsRequest), arg0) -} - -// PutBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) PutBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.PutBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketOwnershipControlsWithContext indicates an expected call of PutBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsWithContext), varargs...) -} - -// PutBucketPolicy mocks base method. -func (m *MockS3API) PutBucketPolicy(arg0 *s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketPolicy indicates an expected call of PutBucketPolicy. -func (mr *MockS3APIMockRecorder) PutBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicy", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicy), arg0) -} - -// PutBucketPolicyRequest mocks base method. -func (m *MockS3API) PutBucketPolicyRequest(arg0 *s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketPolicyOutput) - return ret0, ret1 -} - -// PutBucketPolicyRequest indicates an expected call of PutBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) PutBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyRequest), arg0) -} - -// PutBucketPolicyWithContext mocks base method. -func (m *MockS3API) PutBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.PutBucketPolicyInput, arg2 ...request.Option) (*s3.PutBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketPolicyWithContext indicates an expected call of PutBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) PutBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyWithContext), varargs...) -} - -// PutBucketReplication mocks base method. -func (m *MockS3API) PutBucketReplication(arg0 *s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketReplication", arg0) - ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketReplication indicates an expected call of PutBucketReplication. -func (mr *MockS3APIMockRecorder) PutBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplication", reflect.TypeOf((*MockS3API)(nil).PutBucketReplication), arg0) -} - -// PutBucketReplicationRequest mocks base method. -func (m *MockS3API) PutBucketReplicationRequest(arg0 *s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketReplicationOutput) - return ret0, ret1 -} - -// PutBucketReplicationRequest indicates an expected call of PutBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) PutBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationRequest), arg0) -} - -// PutBucketReplicationWithContext mocks base method. -func (m *MockS3API) PutBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.PutBucketReplicationInput, arg2 ...request.Option) (*s3.PutBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketReplicationWithContext indicates an expected call of PutBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationWithContext), varargs...) -} - -// PutBucketRequestPayment mocks base method. -func (m *MockS3API) PutBucketRequestPayment(arg0 *s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketRequestPayment", arg0) - ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketRequestPayment indicates an expected call of PutBucketRequestPayment. -func (mr *MockS3APIMockRecorder) PutBucketRequestPayment(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPayment), arg0) -} - -// PutBucketRequestPaymentRequest mocks base method. -func (m *MockS3API) PutBucketRequestPaymentRequest(arg0 *s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketRequestPaymentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketRequestPaymentOutput) - return ret0, ret1 -} - -// PutBucketRequestPaymentRequest indicates an expected call of PutBucketRequestPaymentRequest. -func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentRequest), arg0) -} - -// PutBucketRequestPaymentWithContext mocks base method. -func (m *MockS3API) PutBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.PutBucketRequestPaymentInput, arg2 ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketRequestPaymentWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketRequestPaymentWithContext indicates an expected call of PutBucketRequestPaymentWithContext. -func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentWithContext), varargs...) -} - -// PutBucketTagging mocks base method. -func (m *MockS3API) PutBucketTagging(arg0 *s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketTagging", arg0) - ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketTagging indicates an expected call of PutBucketTagging. -func (mr *MockS3APIMockRecorder) PutBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTagging", reflect.TypeOf((*MockS3API)(nil).PutBucketTagging), arg0) -} - -// PutBucketTaggingRequest mocks base method. -func (m *MockS3API) PutBucketTaggingRequest(arg0 *s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketTaggingOutput) - return ret0, ret1 -} - -// PutBucketTaggingRequest indicates an expected call of PutBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) PutBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingRequest), arg0) -} - -// PutBucketTaggingWithContext mocks base method. -func (m *MockS3API) PutBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketTaggingInput, arg2 ...request.Option) (*s3.PutBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketTaggingWithContext indicates an expected call of PutBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) PutBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingWithContext), varargs...) -} - -// PutBucketVersioning mocks base method. -func (m *MockS3API) PutBucketVersioning(arg0 *s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketVersioning", arg0) - ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketVersioning indicates an expected call of PutBucketVersioning. -func (mr *MockS3APIMockRecorder) PutBucketVersioning(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioning", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioning), arg0) -} - -// PutBucketVersioningRequest mocks base method. -func (m *MockS3API) PutBucketVersioningRequest(arg0 *s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketVersioningRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketVersioningOutput) - return ret0, ret1 -} - -// PutBucketVersioningRequest indicates an expected call of PutBucketVersioningRequest. -func (mr *MockS3APIMockRecorder) PutBucketVersioningRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningRequest), arg0) -} - -// PutBucketVersioningWithContext mocks base method. -func (m *MockS3API) PutBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.PutBucketVersioningInput, arg2 ...request.Option) (*s3.PutBucketVersioningOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketVersioningWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketVersioningWithContext indicates an expected call of PutBucketVersioningWithContext. -func (mr *MockS3APIMockRecorder) PutBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningWithContext), varargs...) -} - -// PutBucketWebsite mocks base method. -func (m *MockS3API) PutBucketWebsite(arg0 *s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketWebsite indicates an expected call of PutBucketWebsite. -func (mr *MockS3APIMockRecorder) PutBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsite", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsite), arg0) -} - -// PutBucketWebsiteRequest mocks base method. -func (m *MockS3API) PutBucketWebsiteRequest(arg0 *s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketWebsiteOutput) - return ret0, ret1 -} - -// PutBucketWebsiteRequest indicates an expected call of PutBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) PutBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteRequest), arg0) -} - -// PutBucketWebsiteWithContext mocks base method. -func (m *MockS3API) PutBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.PutBucketWebsiteInput, arg2 ...request.Option) (*s3.PutBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketWebsiteWithContext indicates an expected call of PutBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) PutBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteWithContext), varargs...) -} - -// PutObject mocks base method. -func (m *MockS3API) PutObject(arg0 *s3.PutObjectInput) (*s3.PutObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObject", arg0) - ret0, _ := ret[0].(*s3.PutObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObject indicates an expected call of PutObject. -func (mr *MockS3APIMockRecorder) PutObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockS3API)(nil).PutObject), arg0) -} - -// PutObjectAcl mocks base method. -func (m *MockS3API) PutObjectAcl(arg0 *s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectAcl", arg0) - ret0, _ := ret[0].(*s3.PutObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectAcl indicates an expected call of PutObjectAcl. -func (mr *MockS3APIMockRecorder) PutObjectAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAcl", reflect.TypeOf((*MockS3API)(nil).PutObjectAcl), arg0) -} - -// PutObjectAclRequest mocks base method. -func (m *MockS3API) PutObjectAclRequest(arg0 *s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectAclOutput) - return ret0, ret1 -} - -// PutObjectAclRequest indicates an expected call of PutObjectAclRequest. -func (mr *MockS3APIMockRecorder) PutObjectAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectAclRequest), arg0) -} - -// PutObjectAclWithContext mocks base method. -func (m *MockS3API) PutObjectAclWithContext(arg0 aws.Context, arg1 *s3.PutObjectAclInput, arg2 ...request.Option) (*s3.PutObjectAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectAclWithContext indicates an expected call of PutObjectAclWithContext. -func (mr *MockS3APIMockRecorder) PutObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectAclWithContext), varargs...) -} - -// PutObjectLegalHold mocks base method. -func (m *MockS3API) PutObjectLegalHold(arg0 *s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLegalHold", arg0) - ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLegalHold indicates an expected call of PutObjectLegalHold. -func (mr *MockS3APIMockRecorder) PutObjectLegalHold(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHold), arg0) -} - -// PutObjectLegalHoldRequest mocks base method. -func (m *MockS3API) PutObjectLegalHoldRequest(arg0 *s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLegalHoldRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectLegalHoldOutput) - return ret0, ret1 -} - -// PutObjectLegalHoldRequest indicates an expected call of PutObjectLegalHoldRequest. -func (mr *MockS3APIMockRecorder) PutObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldRequest), arg0) -} - -// PutObjectLegalHoldWithContext mocks base method. -func (m *MockS3API) PutObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.PutObjectLegalHoldInput, arg2 ...request.Option) (*s3.PutObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectLegalHoldWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLegalHoldWithContext indicates an expected call of PutObjectLegalHoldWithContext. -func (mr *MockS3APIMockRecorder) PutObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldWithContext), varargs...) -} - -// PutObjectLockConfiguration mocks base method. -func (m *MockS3API) PutObjectLockConfiguration(arg0 *s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLockConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLockConfiguration indicates an expected call of PutObjectLockConfiguration. -func (mr *MockS3APIMockRecorder) PutObjectLockConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfiguration), arg0) -} - -// PutObjectLockConfigurationRequest mocks base method. -func (m *MockS3API) PutObjectLockConfigurationRequest(arg0 *s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLockConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectLockConfigurationOutput) - return ret0, ret1 -} - -// PutObjectLockConfigurationRequest indicates an expected call of PutObjectLockConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationRequest), arg0) -} - -// PutObjectLockConfigurationWithContext mocks base method. -func (m *MockS3API) PutObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutObjectLockConfigurationInput, arg2 ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectLockConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLockConfigurationWithContext indicates an expected call of PutObjectLockConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationWithContext), varargs...) -} - -// PutObjectRequest mocks base method. -func (m *MockS3API) PutObjectRequest(arg0 *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectOutput) - return ret0, ret1 -} - -// PutObjectRequest indicates an expected call of PutObjectRequest. -func (mr *MockS3APIMockRecorder) PutObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRequest), arg0) -} - -// PutObjectRetention mocks base method. -func (m *MockS3API) PutObjectRetention(arg0 *s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRetention", arg0) - ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectRetention indicates an expected call of PutObjectRetention. -func (mr *MockS3APIMockRecorder) PutObjectRetention(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetention", reflect.TypeOf((*MockS3API)(nil).PutObjectRetention), arg0) -} - -// PutObjectRetentionRequest mocks base method. -func (m *MockS3API) PutObjectRetentionRequest(arg0 *s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRetentionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectRetentionOutput) - return ret0, ret1 -} - -// PutObjectRetentionRequest indicates an expected call of PutObjectRetentionRequest. -func (mr *MockS3APIMockRecorder) PutObjectRetentionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionRequest), arg0) -} - -// PutObjectRetentionWithContext mocks base method. -func (m *MockS3API) PutObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.PutObjectRetentionInput, arg2 ...request.Option) (*s3.PutObjectRetentionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectRetentionWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectRetentionWithContext indicates an expected call of PutObjectRetentionWithContext. -func (mr *MockS3APIMockRecorder) PutObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionWithContext), varargs...) -} - -// PutObjectTagging mocks base method. -func (m *MockS3API) PutObjectTagging(arg0 *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectTagging", arg0) - ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectTagging indicates an expected call of PutObjectTagging. -func (mr *MockS3APIMockRecorder) PutObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockS3API)(nil).PutObjectTagging), arg0) -} - -// PutObjectTaggingRequest mocks base method. -func (m *MockS3API) PutObjectTaggingRequest(arg0 *s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectTaggingOutput) - return ret0, ret1 -} - -// PutObjectTaggingRequest indicates an expected call of PutObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) PutObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingRequest), arg0) -} - -// PutObjectTaggingWithContext mocks base method. -func (m *MockS3API) PutObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.PutObjectTaggingInput, arg2 ...request.Option) (*s3.PutObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectTaggingWithContext indicates an expected call of PutObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) PutObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingWithContext), varargs...) -} - -// PutObjectWithContext mocks base method. -func (m *MockS3API) PutObjectWithContext(arg0 aws.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectWithContext indicates an expected call of PutObjectWithContext. -func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...) -} - -// PutPublicAccessBlock mocks base method. -func (m *MockS3API) PutPublicAccessBlock(arg0 *s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutPublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutPublicAccessBlock indicates an expected call of PutPublicAccessBlock. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlock), arg0) -} - -// PutPublicAccessBlockRequest mocks base method. -func (m *MockS3API) PutPublicAccessBlockRequest(arg0 *s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutPublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutPublicAccessBlockOutput) - return ret0, ret1 -} - -// PutPublicAccessBlockRequest indicates an expected call of PutPublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockRequest), arg0) -} - -// PutPublicAccessBlockWithContext mocks base method. -func (m *MockS3API) PutPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.PutPublicAccessBlockInput, arg2 ...request.Option) (*s3.PutPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutPublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutPublicAccessBlockWithContext indicates an expected call of PutPublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockWithContext), varargs...) -} - -// RestoreObject mocks base method. -func (m *MockS3API) RestoreObject(arg0 *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreObject", arg0) - ret0, _ := ret[0].(*s3.RestoreObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RestoreObject indicates an expected call of RestoreObject. -func (mr *MockS3APIMockRecorder) RestoreObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockS3API)(nil).RestoreObject), arg0) -} - -// RestoreObjectRequest mocks base method. -func (m *MockS3API) RestoreObjectRequest(arg0 *s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.RestoreObjectOutput) - return ret0, ret1 -} - -// RestoreObjectRequest indicates an expected call of RestoreObjectRequest. -func (mr *MockS3APIMockRecorder) RestoreObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectRequest", reflect.TypeOf((*MockS3API)(nil).RestoreObjectRequest), arg0) -} - -// RestoreObjectWithContext mocks base method. -func (m *MockS3API) RestoreObjectWithContext(arg0 aws.Context, arg1 *s3.RestoreObjectInput, arg2 ...request.Option) (*s3.RestoreObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RestoreObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.RestoreObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RestoreObjectWithContext indicates an expected call of RestoreObjectWithContext. -func (mr *MockS3APIMockRecorder) RestoreObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectWithContext", reflect.TypeOf((*MockS3API)(nil).RestoreObjectWithContext), varargs...) -} - -// SelectObjectContent mocks base method. -func (m *MockS3API) SelectObjectContent(arg0 *s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectObjectContent", arg0) - ret0, _ := ret[0].(*s3.SelectObjectContentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SelectObjectContent indicates an expected call of SelectObjectContent. -func (mr *MockS3APIMockRecorder) SelectObjectContent(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContent", reflect.TypeOf((*MockS3API)(nil).SelectObjectContent), arg0) -} - -// SelectObjectContentRequest mocks base method. -func (m *MockS3API) SelectObjectContentRequest(arg0 *s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectObjectContentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.SelectObjectContentOutput) - return ret0, ret1 -} - -// SelectObjectContentRequest indicates an expected call of SelectObjectContentRequest. -func (mr *MockS3APIMockRecorder) SelectObjectContentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentRequest", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentRequest), arg0) -} - -// SelectObjectContentWithContext mocks base method. -func (m *MockS3API) SelectObjectContentWithContext(arg0 aws.Context, arg1 *s3.SelectObjectContentInput, arg2 ...request.Option) (*s3.SelectObjectContentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "SelectObjectContentWithContext", varargs...) - ret0, _ := ret[0].(*s3.SelectObjectContentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SelectObjectContentWithContext indicates an expected call of SelectObjectContentWithContext. -func (mr *MockS3APIMockRecorder) SelectObjectContentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentWithContext", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentWithContext), varargs...) -} - -// UploadPart mocks base method. -func (m *MockS3API) UploadPart(arg0 *s3.UploadPartInput) (*s3.UploadPartOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPart", arg0) - ret0, _ := ret[0].(*s3.UploadPartOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPart indicates an expected call of UploadPart. -func (mr *MockS3APIMockRecorder) UploadPart(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockS3API)(nil).UploadPart), arg0) -} - -// UploadPartCopy mocks base method. -func (m *MockS3API) UploadPartCopy(arg0 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartCopy", arg0) - ret0, _ := ret[0].(*s3.UploadPartCopyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartCopy indicates an expected call of UploadPartCopy. -func (mr *MockS3APIMockRecorder) UploadPartCopy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockS3API)(nil).UploadPartCopy), arg0) -} - -// UploadPartCopyRequest mocks base method. -func (m *MockS3API) UploadPartCopyRequest(arg0 *s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartCopyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.UploadPartCopyOutput) - return ret0, ret1 -} - -// UploadPartCopyRequest indicates an expected call of UploadPartCopyRequest. -func (mr *MockS3APIMockRecorder) UploadPartCopyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyRequest), arg0) -} - -// UploadPartCopyWithContext mocks base method. -func (m *MockS3API) UploadPartCopyWithContext(arg0 aws.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UploadPartCopyWithContext", varargs...) - ret0, _ := ret[0].(*s3.UploadPartCopyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext. -func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...) -} - -// UploadPartRequest mocks base method. -func (m *MockS3API) UploadPartRequest(arg0 *s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.UploadPartOutput) - return ret0, ret1 -} - -// UploadPartRequest indicates an expected call of UploadPartRequest. -func (mr *MockS3APIMockRecorder) UploadPartRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartRequest), arg0) -} - -// UploadPartWithContext mocks base method. -func (m *MockS3API) UploadPartWithContext(arg0 aws.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UploadPartWithContext", varargs...) - ret0, _ := ret[0].(*s3.UploadPartOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartWithContext indicates an expected call of UploadPartWithContext. -func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartWithContext), varargs...) -} - -// WaitUntilBucketExists mocks base method. -func (m *MockS3API) WaitUntilBucketExists(arg0 *s3.HeadBucketInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilBucketExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketExists indicates an expected call of WaitUntilBucketExists. -func (mr *MockS3APIMockRecorder) WaitUntilBucketExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExists), arg0) -} - -// WaitUntilBucketExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilBucketExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilBucketExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketExistsWithContext indicates an expected call of WaitUntilBucketExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilBucketExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExistsWithContext), varargs...) -} - -// WaitUntilBucketNotExists mocks base method. -func (m *MockS3API) WaitUntilBucketNotExists(arg0 *s3.HeadBucketInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilBucketNotExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketNotExists indicates an expected call of WaitUntilBucketNotExists. -func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExists), arg0) -} - -// WaitUntilBucketNotExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilBucketNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilBucketNotExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketNotExistsWithContext indicates an expected call of WaitUntilBucketNotExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExistsWithContext), varargs...) -} - -// WaitUntilObjectExists mocks base method. -func (m *MockS3API) WaitUntilObjectExists(arg0 *s3.HeadObjectInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilObjectExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectExists indicates an expected call of WaitUntilObjectExists. -func (mr *MockS3APIMockRecorder) WaitUntilObjectExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExists), arg0) -} - -// WaitUntilObjectExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilObjectExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilObjectExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectExistsWithContext indicates an expected call of WaitUntilObjectExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilObjectExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExistsWithContext), varargs...) -} - -// WaitUntilObjectNotExists mocks base method. -func (m *MockS3API) WaitUntilObjectNotExists(arg0 *s3.HeadObjectInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilObjectNotExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectNotExists indicates an expected call of WaitUntilObjectNotExists. -func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExists), arg0) -} - -// WaitUntilObjectNotExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilObjectNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilObjectNotExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectNotExistsWithContext indicates an expected call of WaitUntilObjectNotExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExistsWithContext), varargs...) -} - -// WriteGetObjectResponse mocks base method. -func (m *MockS3API) WriteGetObjectResponse(arg0 *s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteGetObjectResponse", arg0) - ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WriteGetObjectResponse indicates an expected call of WriteGetObjectResponse. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponse(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponse", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponse), arg0) -} - -// WriteGetObjectResponseRequest mocks base method. -func (m *MockS3API) WriteGetObjectResponseRequest(arg0 *s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteGetObjectResponseRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.WriteGetObjectResponseOutput) - return ret0, ret1 -} - -// WriteGetObjectResponseRequest indicates an expected call of WriteGetObjectResponseRequest. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponseRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseRequest", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseRequest), arg0) -} - -// WriteGetObjectResponseWithContext mocks base method. -func (m *MockS3API) WriteGetObjectResponseWithContext(arg0 aws.Context, arg1 *s3.WriteGetObjectResponseInput, arg2 ...request.Option) (*s3.WriteGetObjectResponseOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WriteGetObjectResponseWithContext", varargs...) - ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WriteGetObjectResponseWithContext indicates an expected call of WriteGetObjectResponseWithContext. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponseWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseWithContext", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseWithContext), varargs...) -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/mocks/s3api.go temporal-1.22.5/src/common/archiver/s3store/mocks/s3api.go --- temporal-1.21.5-1/src/common/archiver/s3store/mocks/s3api.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/mocks/s3api.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,5207 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + aws "github.com/aws/aws-sdk-go/aws" + request "github.com/aws/aws-sdk-go/aws/request" + s3 "github.com/aws/aws-sdk-go/service/s3" + gomock "github.com/golang/mock/gomock" +) + +// MockS3API is a mock of S3API interface. +type MockS3API struct { + ctrl *gomock.Controller + recorder *MockS3APIMockRecorder +} + +// MockS3APIMockRecorder is the mock recorder for MockS3API. +type MockS3APIMockRecorder struct { + mock *MockS3API +} + +// NewMockS3API creates a new mock instance. +func NewMockS3API(ctrl *gomock.Controller) *MockS3API { + mock := &MockS3API{ctrl: ctrl} + mock.recorder = &MockS3APIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockS3API) EXPECT() *MockS3APIMockRecorder { + return m.recorder +} + +// AbortMultipartUpload mocks base method. +func (m *MockS3API) AbortMultipartUpload(arg0 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. +func (mr *MockS3APIMockRecorder) AbortMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUpload), arg0) +} + +// AbortMultipartUploadRequest mocks base method. +func (m *MockS3API) AbortMultipartUploadRequest(arg0 *s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.AbortMultipartUploadOutput) + return ret0, ret1 +} + +// AbortMultipartUploadRequest indicates an expected call of AbortMultipartUploadRequest. +func (mr *MockS3APIMockRecorder) AbortMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadRequest), arg0) +} + +// AbortMultipartUploadWithContext mocks base method. +func (m *MockS3API) AbortMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AbortMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext. +func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...) +} + +// CompleteMultipartUpload mocks base method. +func (m *MockS3API) CompleteMultipartUpload(arg0 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. +func (mr *MockS3APIMockRecorder) CompleteMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUpload), arg0) +} + +// CompleteMultipartUploadRequest mocks base method. +func (m *MockS3API) CompleteMultipartUploadRequest(arg0 *s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CompleteMultipartUploadOutput) + return ret0, ret1 +} + +// CompleteMultipartUploadRequest indicates an expected call of CompleteMultipartUploadRequest. +func (mr *MockS3APIMockRecorder) CompleteMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadRequest), arg0) +} + +// CompleteMultipartUploadWithContext mocks base method. +func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CompleteMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext. +func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...) +} + +// CopyObject mocks base method. +func (m *MockS3API) CopyObject(arg0 *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObject", arg0) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObject indicates an expected call of CopyObject. +func (mr *MockS3APIMockRecorder) CopyObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockS3API)(nil).CopyObject), arg0) +} + +// CopyObjectRequest mocks base method. +func (m *MockS3API) CopyObjectRequest(arg0 *s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CopyObjectOutput) + return ret0, ret1 +} + +// CopyObjectRequest indicates an expected call of CopyObjectRequest. +func (mr *MockS3APIMockRecorder) CopyObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectRequest", reflect.TypeOf((*MockS3API)(nil).CopyObjectRequest), arg0) +} + +// CopyObjectWithContext mocks base method. +func (m *MockS3API) CopyObjectWithContext(arg0 aws.Context, arg1 *s3.CopyObjectInput, arg2 ...request.Option) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CopyObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObjectWithContext indicates an expected call of CopyObjectWithContext. +func (mr *MockS3APIMockRecorder) CopyObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectWithContext", reflect.TypeOf((*MockS3API)(nil).CopyObjectWithContext), varargs...) +} + +// CreateBucket mocks base method. +func (m *MockS3API) CreateBucket(arg0 *s3.CreateBucketInput) (*s3.CreateBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBucket", arg0) + ret0, _ := ret[0].(*s3.CreateBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateBucket indicates an expected call of CreateBucket. +func (mr *MockS3APIMockRecorder) CreateBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockS3API)(nil).CreateBucket), arg0) +} + +// CreateBucketRequest mocks base method. +func (m *MockS3API) CreateBucketRequest(arg0 *s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CreateBucketOutput) + return ret0, ret1 +} + +// CreateBucketRequest indicates an expected call of CreateBucketRequest. +func (mr *MockS3APIMockRecorder) CreateBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketRequest", reflect.TypeOf((*MockS3API)(nil).CreateBucketRequest), arg0) +} + +// CreateBucketWithContext mocks base method. +func (m *MockS3API) CreateBucketWithContext(arg0 aws.Context, arg1 *s3.CreateBucketInput, arg2 ...request.Option) (*s3.CreateBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.CreateBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateBucketWithContext indicates an expected call of CreateBucketWithContext. +func (mr *MockS3APIMockRecorder) CreateBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketWithContext", reflect.TypeOf((*MockS3API)(nil).CreateBucketWithContext), varargs...) +} + +// CreateMultipartUpload mocks base method. +func (m *MockS3API) CreateMultipartUpload(arg0 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. +func (mr *MockS3APIMockRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUpload), arg0) +} + +// CreateMultipartUploadRequest mocks base method. +func (m *MockS3API) CreateMultipartUploadRequest(arg0 *s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CreateMultipartUploadOutput) + return ret0, ret1 +} + +// CreateMultipartUploadRequest indicates an expected call of CreateMultipartUploadRequest. +func (mr *MockS3APIMockRecorder) CreateMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadRequest), arg0) +} + +// CreateMultipartUploadWithContext mocks base method. +func (m *MockS3API) CreateMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext. +func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...) +} + +// DeleteBucket mocks base method. +func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucket", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucket indicates an expected call of DeleteBucket. +func (mr *MockS3APIMockRecorder) DeleteBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockS3API)(nil).DeleteBucket), arg0) +} + +// DeleteBucketAnalyticsConfiguration mocks base method. +func (m *MockS3API) DeleteBucketAnalyticsConfiguration(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfiguration indicates an expected call of DeleteBucketAnalyticsConfiguration. +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfiguration), arg0) +} + +// DeleteBucketAnalyticsConfigurationRequest mocks base method. +func (m *MockS3API) DeleteBucketAnalyticsConfigurationRequest(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfigurationRequest indicates an expected call of DeleteBucketAnalyticsConfigurationRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationRequest), arg0) +} + +// DeleteBucketAnalyticsConfigurationWithContext mocks base method. +func (m *MockS3API) DeleteBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfigurationWithContext indicates an expected call of DeleteBucketAnalyticsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationWithContext), varargs...) +} + +// DeleteBucketCors mocks base method. +func (m *MockS3API) DeleteBucketCors(arg0 *s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketCors", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketCors indicates an expected call of DeleteBucketCors. +func (mr *MockS3APIMockRecorder) DeleteBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCors", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCors), arg0) +} + +// DeleteBucketCorsRequest mocks base method. +func (m *MockS3API) DeleteBucketCorsRequest(arg0 *s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketCorsOutput) + return ret0, ret1 +} + +// DeleteBucketCorsRequest indicates an expected call of DeleteBucketCorsRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsRequest), arg0) +} + +// DeleteBucketCorsWithContext mocks base method. +func (m *MockS3API) DeleteBucketCorsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketCorsInput, arg2 ...request.Option) (*s3.DeleteBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketCorsWithContext indicates an expected call of DeleteBucketCorsWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsWithContext), varargs...) +} + +// DeleteBucketEncryption mocks base method. +func (m *MockS3API) DeleteBucketEncryption(arg0 *s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketEncryption indicates an expected call of DeleteBucketEncryption. +func (mr *MockS3APIMockRecorder) DeleteBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryption", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryption), arg0) +} + +// DeleteBucketEncryptionRequest mocks base method. +func (m *MockS3API) DeleteBucketEncryptionRequest(arg0 *s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketEncryptionOutput) + return ret0, ret1 +} + +// DeleteBucketEncryptionRequest indicates an expected call of DeleteBucketEncryptionRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionRequest), arg0) +} + +// DeleteBucketEncryptionWithContext mocks base method. +func (m *MockS3API) DeleteBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketEncryptionInput, arg2 ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketEncryptionWithContext indicates an expected call of DeleteBucketEncryptionWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionWithContext), varargs...) +} + +// DeleteBucketIntelligentTieringConfiguration mocks base method. +func (m *MockS3API) DeleteBucketIntelligentTieringConfiguration(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketIntelligentTieringConfiguration indicates an expected call of DeleteBucketIntelligentTieringConfiguration. +func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfiguration), arg0) +} + +// DeleteBucketIntelligentTieringConfigurationRequest mocks base method. +func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationRequest(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketIntelligentTieringConfigurationRequest indicates an expected call of DeleteBucketIntelligentTieringConfigurationRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationRequest), arg0) +} + +// DeleteBucketIntelligentTieringConfigurationWithContext mocks base method. +func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketIntelligentTieringConfigurationWithContext indicates an expected call of DeleteBucketIntelligentTieringConfigurationWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationWithContext), varargs...) +} + +// DeleteBucketInventoryConfiguration mocks base method. +func (m *MockS3API) DeleteBucketInventoryConfiguration(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketInventoryConfiguration indicates an expected call of DeleteBucketInventoryConfiguration. +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfiguration), arg0) +} + +// DeleteBucketInventoryConfigurationRequest mocks base method. +func (m *MockS3API) DeleteBucketInventoryConfigurationRequest(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketInventoryConfigurationRequest indicates an expected call of DeleteBucketInventoryConfigurationRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationRequest), arg0) +} + +// DeleteBucketInventoryConfigurationWithContext mocks base method. +func (m *MockS3API) DeleteBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketInventoryConfigurationWithContext indicates an expected call of DeleteBucketInventoryConfigurationWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationWithContext), varargs...) +} + +// DeleteBucketLifecycle mocks base method. +func (m *MockS3API) DeleteBucketLifecycle(arg0 *s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketLifecycle indicates an expected call of DeleteBucketLifecycle. +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycle), arg0) +} + +// DeleteBucketLifecycleRequest mocks base method. +func (m *MockS3API) DeleteBucketLifecycleRequest(arg0 *s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketLifecycleOutput) + return ret0, ret1 +} + +// DeleteBucketLifecycleRequest indicates an expected call of DeleteBucketLifecycleRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleRequest), arg0) +} + +// DeleteBucketLifecycleWithContext mocks base method. +func (m *MockS3API) DeleteBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketLifecycleInput, arg2 ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketLifecycleWithContext indicates an expected call of DeleteBucketLifecycleWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleWithContext), varargs...) +} + +// DeleteBucketMetricsConfiguration mocks base method. +func (m *MockS3API) DeleteBucketMetricsConfiguration(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketMetricsConfiguration indicates an expected call of DeleteBucketMetricsConfiguration. +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfiguration), arg0) +} + +// DeleteBucketMetricsConfigurationRequest mocks base method. +func (m *MockS3API) DeleteBucketMetricsConfigurationRequest(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketMetricsConfigurationRequest indicates an expected call of DeleteBucketMetricsConfigurationRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationRequest), arg0) +} + +// DeleteBucketMetricsConfigurationWithContext mocks base method. +func (m *MockS3API) DeleteBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketMetricsConfigurationWithContext indicates an expected call of DeleteBucketMetricsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationWithContext), varargs...) +} + +// DeleteBucketOwnershipControls mocks base method. +func (m *MockS3API) DeleteBucketOwnershipControls(arg0 *s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketOwnershipControls", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketOwnershipControls indicates an expected call of DeleteBucketOwnershipControls. +func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControls(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControls), arg0) +} + +// DeleteBucketOwnershipControlsRequest mocks base method. +func (m *MockS3API) DeleteBucketOwnershipControlsRequest(arg0 *s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketOwnershipControlsOutput) + return ret0, ret1 +} + +// DeleteBucketOwnershipControlsRequest indicates an expected call of DeleteBucketOwnershipControlsRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsRequest), arg0) +} + +// DeleteBucketOwnershipControlsWithContext mocks base method. +func (m *MockS3API) DeleteBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketOwnershipControlsWithContext indicates an expected call of DeleteBucketOwnershipControlsWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsWithContext), varargs...) +} + +// DeleteBucketPolicy mocks base method. +func (m *MockS3API) DeleteBucketPolicy(arg0 *s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketPolicy indicates an expected call of DeleteBucketPolicy. +func (mr *MockS3APIMockRecorder) DeleteBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicy", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicy), arg0) +} + +// DeleteBucketPolicyRequest mocks base method. +func (m *MockS3API) DeleteBucketPolicyRequest(arg0 *s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketPolicyOutput) + return ret0, ret1 +} + +// DeleteBucketPolicyRequest indicates an expected call of DeleteBucketPolicyRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyRequest), arg0) +} + +// DeleteBucketPolicyWithContext mocks base method. +func (m *MockS3API) DeleteBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketPolicyInput, arg2 ...request.Option) (*s3.DeleteBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketPolicyWithContext indicates an expected call of DeleteBucketPolicyWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyWithContext), varargs...) +} + +// DeleteBucketReplication mocks base method. +func (m *MockS3API) DeleteBucketReplication(arg0 *s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketReplication", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketReplication indicates an expected call of DeleteBucketReplication. +func (mr *MockS3APIMockRecorder) DeleteBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplication", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplication), arg0) +} + +// DeleteBucketReplicationRequest mocks base method. +func (m *MockS3API) DeleteBucketReplicationRequest(arg0 *s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketReplicationOutput) + return ret0, ret1 +} + +// DeleteBucketReplicationRequest indicates an expected call of DeleteBucketReplicationRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationRequest), arg0) +} + +// DeleteBucketReplicationWithContext mocks base method. +func (m *MockS3API) DeleteBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketReplicationInput, arg2 ...request.Option) (*s3.DeleteBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketReplicationWithContext indicates an expected call of DeleteBucketReplicationWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationWithContext), varargs...) +} + +// DeleteBucketRequest mocks base method. +func (m *MockS3API) DeleteBucketRequest(arg0 *s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketOutput) + return ret0, ret1 +} + +// DeleteBucketRequest indicates an expected call of DeleteBucketRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketRequest), arg0) +} + +// DeleteBucketTagging mocks base method. +func (m *MockS3API) DeleteBucketTagging(arg0 *s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketTagging", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketTagging indicates an expected call of DeleteBucketTagging. +func (mr *MockS3APIMockRecorder) DeleteBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTagging", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTagging), arg0) +} + +// DeleteBucketTaggingRequest mocks base method. +func (m *MockS3API) DeleteBucketTaggingRequest(arg0 *s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketTaggingOutput) + return ret0, ret1 +} + +// DeleteBucketTaggingRequest indicates an expected call of DeleteBucketTaggingRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingRequest), arg0) +} + +// DeleteBucketTaggingWithContext mocks base method. +func (m *MockS3API) DeleteBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketTaggingInput, arg2 ...request.Option) (*s3.DeleteBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketTaggingWithContext indicates an expected call of DeleteBucketTaggingWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingWithContext), varargs...) +} + +// DeleteBucketWebsite mocks base method. +func (m *MockS3API) DeleteBucketWebsite(arg0 *s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWebsite indicates an expected call of DeleteBucketWebsite. +func (mr *MockS3APIMockRecorder) DeleteBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsite", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsite), arg0) +} + +// DeleteBucketWebsiteRequest mocks base method. +func (m *MockS3API) DeleteBucketWebsiteRequest(arg0 *s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketWebsiteOutput) + return ret0, ret1 +} + +// DeleteBucketWebsiteRequest indicates an expected call of DeleteBucketWebsiteRequest. +func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteRequest), arg0) +} + +// DeleteBucketWebsiteWithContext mocks base method. +func (m *MockS3API) DeleteBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketWebsiteInput, arg2 ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWebsiteWithContext indicates an expected call of DeleteBucketWebsiteWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteWithContext), varargs...) +} + +// DeleteBucketWithContext mocks base method. +func (m *MockS3API) DeleteBucketWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInput, arg2 ...request.Option) (*s3.DeleteBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWithContext indicates an expected call of DeleteBucketWithContext. +func (mr *MockS3APIMockRecorder) DeleteBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWithContext), varargs...) +} + +// DeleteObject mocks base method. +func (m *MockS3API) DeleteObject(arg0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObject", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObject indicates an expected call of DeleteObject. +func (mr *MockS3APIMockRecorder) DeleteObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockS3API)(nil).DeleteObject), arg0) +} + +// DeleteObjectRequest mocks base method. +func (m *MockS3API) DeleteObjectRequest(arg0 *s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectOutput) + return ret0, ret1 +} + +// DeleteObjectRequest indicates an expected call of DeleteObjectRequest. +func (mr *MockS3APIMockRecorder) DeleteObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectRequest), arg0) +} + +// DeleteObjectTagging mocks base method. +func (m *MockS3API) DeleteObjectTagging(arg0 *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. +func (mr *MockS3APIMockRecorder) DeleteObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTagging), arg0) +} + +// DeleteObjectTaggingRequest mocks base method. +func (m *MockS3API) DeleteObjectTaggingRequest(arg0 *s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectTaggingOutput) + return ret0, ret1 +} + +// DeleteObjectTaggingRequest indicates an expected call of DeleteObjectTaggingRequest. +func (mr *MockS3APIMockRecorder) DeleteObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingRequest), arg0) +} + +// DeleteObjectTaggingWithContext mocks base method. +func (m *MockS3API) DeleteObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectTaggingInput, arg2 ...request.Option) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTaggingWithContext indicates an expected call of DeleteObjectTaggingWithContext. +func (mr *MockS3APIMockRecorder) DeleteObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingWithContext), varargs...) +} + +// DeleteObjectWithContext mocks base method. +func (m *MockS3API) DeleteObjectWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext. +func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...) +} + +// DeleteObjects mocks base method. +func (m *MockS3API) DeleteObjects(arg0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjects", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjects indicates an expected call of DeleteObjects. +func (mr *MockS3APIMockRecorder) DeleteObjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockS3API)(nil).DeleteObjects), arg0) +} + +// DeleteObjectsRequest mocks base method. +func (m *MockS3API) DeleteObjectsRequest(arg0 *s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectsOutput) + return ret0, ret1 +} + +// DeleteObjectsRequest indicates an expected call of DeleteObjectsRequest. +func (mr *MockS3APIMockRecorder) DeleteObjectsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsRequest), arg0) +} + +// DeleteObjectsWithContext mocks base method. +func (m *MockS3API) DeleteObjectsWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectsWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext. +func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...) +} + +// DeletePublicAccessBlock mocks base method. +func (m *MockS3API) DeletePublicAccessBlock(arg0 *s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePublicAccessBlock indicates an expected call of DeletePublicAccessBlock. +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlock), arg0) +} + +// DeletePublicAccessBlockRequest mocks base method. +func (m *MockS3API) DeletePublicAccessBlockRequest(arg0 *s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeletePublicAccessBlockOutput) + return ret0, ret1 +} + +// DeletePublicAccessBlockRequest indicates an expected call of DeletePublicAccessBlockRequest. +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockRequest), arg0) +} + +// DeletePublicAccessBlockWithContext mocks base method. +func (m *MockS3API) DeletePublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.DeletePublicAccessBlockInput, arg2 ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeletePublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePublicAccessBlockWithContext indicates an expected call of DeletePublicAccessBlockWithContext. +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockWithContext), varargs...) +} + +// GetBucketAccelerateConfiguration mocks base method. +func (m *MockS3API) GetBucketAccelerateConfiguration(arg0 *s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAccelerateConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAccelerateConfiguration indicates an expected call of GetBucketAccelerateConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfiguration), arg0) +} + +// GetBucketAccelerateConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketAccelerateConfigurationRequest(arg0 *s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAccelerateConfigurationOutput) + return ret0, ret1 +} + +// GetBucketAccelerateConfigurationRequest indicates an expected call of GetBucketAccelerateConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationRequest), arg0) +} + +// GetBucketAccelerateConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAccelerateConfigurationWithContext indicates an expected call of GetBucketAccelerateConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationWithContext), varargs...) +} + +// GetBucketAcl mocks base method. +func (m *MockS3API) GetBucketAcl(arg0 *s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAcl", arg0) + ret0, _ := ret[0].(*s3.GetBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAcl indicates an expected call of GetBucketAcl. +func (mr *MockS3APIMockRecorder) GetBucketAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAcl", reflect.TypeOf((*MockS3API)(nil).GetBucketAcl), arg0) +} + +// GetBucketAclRequest mocks base method. +func (m *MockS3API) GetBucketAclRequest(arg0 *s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAclOutput) + return ret0, ret1 +} + +// GetBucketAclRequest indicates an expected call of GetBucketAclRequest. +func (mr *MockS3APIMockRecorder) GetBucketAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAclRequest), arg0) +} + +// GetBucketAclWithContext mocks base method. +func (m *MockS3API) GetBucketAclWithContext(arg0 aws.Context, arg1 *s3.GetBucketAclInput, arg2 ...request.Option) (*s3.GetBucketAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAclWithContext indicates an expected call of GetBucketAclWithContext. +func (mr *MockS3APIMockRecorder) GetBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAclWithContext), varargs...) +} + +// GetBucketAnalyticsConfiguration mocks base method. +func (m *MockS3API) GetBucketAnalyticsConfiguration(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAnalyticsConfiguration indicates an expected call of GetBucketAnalyticsConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfiguration), arg0) +} + +// GetBucketAnalyticsConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketAnalyticsConfigurationRequest(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// GetBucketAnalyticsConfigurationRequest indicates an expected call of GetBucketAnalyticsConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationRequest), arg0) +} + +// GetBucketAnalyticsConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAnalyticsConfigurationWithContext indicates an expected call of GetBucketAnalyticsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationWithContext), varargs...) +} + +// GetBucketCors mocks base method. +func (m *MockS3API) GetBucketCors(arg0 *s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketCors", arg0) + ret0, _ := ret[0].(*s3.GetBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketCors indicates an expected call of GetBucketCors. +func (mr *MockS3APIMockRecorder) GetBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCors", reflect.TypeOf((*MockS3API)(nil).GetBucketCors), arg0) +} + +// GetBucketCorsRequest mocks base method. +func (m *MockS3API) GetBucketCorsRequest(arg0 *s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketCorsOutput) + return ret0, ret1 +} + +// GetBucketCorsRequest indicates an expected call of GetBucketCorsRequest. +func (mr *MockS3APIMockRecorder) GetBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsRequest), arg0) +} + +// GetBucketCorsWithContext mocks base method. +func (m *MockS3API) GetBucketCorsWithContext(arg0 aws.Context, arg1 *s3.GetBucketCorsInput, arg2 ...request.Option) (*s3.GetBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketCorsWithContext indicates an expected call of GetBucketCorsWithContext. +func (mr *MockS3APIMockRecorder) GetBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsWithContext), varargs...) +} + +// GetBucketEncryption mocks base method. +func (m *MockS3API) GetBucketEncryption(arg0 *s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketEncryption indicates an expected call of GetBucketEncryption. +func (mr *MockS3APIMockRecorder) GetBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryption", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryption), arg0) +} + +// GetBucketEncryptionRequest mocks base method. +func (m *MockS3API) GetBucketEncryptionRequest(arg0 *s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketEncryptionOutput) + return ret0, ret1 +} + +// GetBucketEncryptionRequest indicates an expected call of GetBucketEncryptionRequest. +func (mr *MockS3APIMockRecorder) GetBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionRequest), arg0) +} + +// GetBucketEncryptionWithContext mocks base method. +func (m *MockS3API) GetBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.GetBucketEncryptionInput, arg2 ...request.Option) (*s3.GetBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketEncryptionWithContext indicates an expected call of GetBucketEncryptionWithContext. +func (mr *MockS3APIMockRecorder) GetBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionWithContext), varargs...) +} + +// GetBucketIntelligentTieringConfiguration mocks base method. +func (m *MockS3API) GetBucketIntelligentTieringConfiguration(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketIntelligentTieringConfiguration indicates an expected call of GetBucketIntelligentTieringConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfiguration), arg0) +} + +// GetBucketIntelligentTieringConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketIntelligentTieringConfigurationRequest(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketIntelligentTieringConfigurationOutput) + return ret0, ret1 +} + +// GetBucketIntelligentTieringConfigurationRequest indicates an expected call of GetBucketIntelligentTieringConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationRequest), arg0) +} + +// GetBucketIntelligentTieringConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketIntelligentTieringConfigurationWithContext indicates an expected call of GetBucketIntelligentTieringConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationWithContext), varargs...) +} + +// GetBucketInventoryConfiguration mocks base method. +func (m *MockS3API) GetBucketInventoryConfiguration(arg0 *s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketInventoryConfiguration indicates an expected call of GetBucketInventoryConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfiguration), arg0) +} + +// GetBucketInventoryConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketInventoryConfigurationRequest(arg0 *s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// GetBucketInventoryConfigurationRequest indicates an expected call of GetBucketInventoryConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationRequest), arg0) +} + +// GetBucketInventoryConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketInventoryConfigurationWithContext indicates an expected call of GetBucketInventoryConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationWithContext), varargs...) +} + +// GetBucketLifecycle mocks base method. +func (m *MockS3API) GetBucketLifecycle(arg0 *s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycle indicates an expected call of GetBucketLifecycle. +func (mr *MockS3APIMockRecorder) GetBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycle), arg0) +} + +// GetBucketLifecycleConfiguration mocks base method. +func (m *MockS3API) GetBucketLifecycleConfiguration(arg0 *s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleConfiguration indicates an expected call of GetBucketLifecycleConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfiguration), arg0) +} + +// GetBucketLifecycleConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketLifecycleConfigurationRequest(arg0 *s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLifecycleConfigurationOutput) + return ret0, ret1 +} + +// GetBucketLifecycleConfigurationRequest indicates an expected call of GetBucketLifecycleConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationRequest), arg0) +} + +// GetBucketLifecycleConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleConfigurationWithContext indicates an expected call of GetBucketLifecycleConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationWithContext), varargs...) +} + +// GetBucketLifecycleRequest mocks base method. +func (m *MockS3API) GetBucketLifecycleRequest(arg0 *s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLifecycleOutput) + return ret0, ret1 +} + +// GetBucketLifecycleRequest indicates an expected call of GetBucketLifecycleRequest. +func (mr *MockS3APIMockRecorder) GetBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleRequest), arg0) +} + +// GetBucketLifecycleWithContext mocks base method. +func (m *MockS3API) GetBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleInput, arg2 ...request.Option) (*s3.GetBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleWithContext indicates an expected call of GetBucketLifecycleWithContext. +func (mr *MockS3APIMockRecorder) GetBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleWithContext), varargs...) +} + +// GetBucketLocation mocks base method. +func (m *MockS3API) GetBucketLocation(arg0 *s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLocation", arg0) + ret0, _ := ret[0].(*s3.GetBucketLocationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLocation indicates an expected call of GetBucketLocation. +func (mr *MockS3APIMockRecorder) GetBucketLocation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocation", reflect.TypeOf((*MockS3API)(nil).GetBucketLocation), arg0) +} + +// GetBucketLocationRequest mocks base method. +func (m *MockS3API) GetBucketLocationRequest(arg0 *s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLocationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLocationOutput) + return ret0, ret1 +} + +// GetBucketLocationRequest indicates an expected call of GetBucketLocationRequest. +func (mr *MockS3APIMockRecorder) GetBucketLocationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationRequest), arg0) +} + +// GetBucketLocationWithContext mocks base method. +func (m *MockS3API) GetBucketLocationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLocationInput, arg2 ...request.Option) (*s3.GetBucketLocationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLocationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLocationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLocationWithContext indicates an expected call of GetBucketLocationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketLocationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationWithContext), varargs...) +} + +// GetBucketLogging mocks base method. +func (m *MockS3API) GetBucketLogging(arg0 *s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLogging", arg0) + ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLogging indicates an expected call of GetBucketLogging. +func (mr *MockS3APIMockRecorder) GetBucketLogging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLogging", reflect.TypeOf((*MockS3API)(nil).GetBucketLogging), arg0) +} + +// GetBucketLoggingRequest mocks base method. +func (m *MockS3API) GetBucketLoggingRequest(arg0 *s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLoggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLoggingOutput) + return ret0, ret1 +} + +// GetBucketLoggingRequest indicates an expected call of GetBucketLoggingRequest. +func (mr *MockS3APIMockRecorder) GetBucketLoggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingRequest), arg0) +} + +// GetBucketLoggingWithContext mocks base method. +func (m *MockS3API) GetBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketLoggingInput, arg2 ...request.Option) (*s3.GetBucketLoggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLoggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLoggingWithContext indicates an expected call of GetBucketLoggingWithContext. +func (mr *MockS3APIMockRecorder) GetBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingWithContext), varargs...) +} + +// GetBucketMetricsConfiguration mocks base method. +func (m *MockS3API) GetBucketMetricsConfiguration(arg0 *s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketMetricsConfiguration indicates an expected call of GetBucketMetricsConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfiguration), arg0) +} + +// GetBucketMetricsConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketMetricsConfigurationRequest(arg0 *s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// GetBucketMetricsConfigurationRequest indicates an expected call of GetBucketMetricsConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationRequest), arg0) +} + +// GetBucketMetricsConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketMetricsConfigurationWithContext indicates an expected call of GetBucketMetricsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationWithContext), varargs...) +} + +// GetBucketNotification mocks base method. +func (m *MockS3API) GetBucketNotification(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotification", arg0) + ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotification indicates an expected call of GetBucketNotification. +func (mr *MockS3APIMockRecorder) GetBucketNotification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotification", reflect.TypeOf((*MockS3API)(nil).GetBucketNotification), arg0) +} + +// GetBucketNotificationConfiguration mocks base method. +func (m *MockS3API) GetBucketNotificationConfiguration(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationConfiguration", arg0) + ret0, _ := ret[0].(*s3.NotificationConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationConfiguration indicates an expected call of GetBucketNotificationConfiguration. +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfiguration), arg0) +} + +// GetBucketNotificationConfigurationRequest mocks base method. +func (m *MockS3API) GetBucketNotificationConfigurationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.NotificationConfiguration) + return ret0, ret1 +} + +// GetBucketNotificationConfigurationRequest indicates an expected call of GetBucketNotificationConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationRequest), arg0) +} + +// GetBucketNotificationConfigurationWithContext mocks base method. +func (m *MockS3API) GetBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfiguration, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.NotificationConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationConfigurationWithContext indicates an expected call of GetBucketNotificationConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationWithContext), varargs...) +} + +// GetBucketNotificationRequest mocks base method. +func (m *MockS3API) GetBucketNotificationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.NotificationConfigurationDeprecated) + return ret0, ret1 +} + +// GetBucketNotificationRequest indicates an expected call of GetBucketNotificationRequest. +func (mr *MockS3APIMockRecorder) GetBucketNotificationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationRequest), arg0) +} + +// GetBucketNotificationWithContext mocks base method. +func (m *MockS3API) GetBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfigurationDeprecated, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketNotificationWithContext", varargs...) + ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationWithContext indicates an expected call of GetBucketNotificationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationWithContext), varargs...) +} + +// GetBucketOwnershipControls mocks base method. +func (m *MockS3API) GetBucketOwnershipControls(arg0 *s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketOwnershipControls", arg0) + ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketOwnershipControls indicates an expected call of GetBucketOwnershipControls. +func (mr *MockS3APIMockRecorder) GetBucketOwnershipControls(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControls), arg0) +} + +// GetBucketOwnershipControlsRequest mocks base method. +func (m *MockS3API) GetBucketOwnershipControlsRequest(arg0 *s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketOwnershipControlsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketOwnershipControlsOutput) + return ret0, ret1 +} + +// GetBucketOwnershipControlsRequest indicates an expected call of GetBucketOwnershipControlsRequest. +func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsRequest), arg0) +} + +// GetBucketOwnershipControlsWithContext mocks base method. +func (m *MockS3API) GetBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.GetBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketOwnershipControlsWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketOwnershipControlsWithContext indicates an expected call of GetBucketOwnershipControlsWithContext. +func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsWithContext), varargs...) +} + +// GetBucketPolicy mocks base method. +func (m *MockS3API) GetBucketPolicy(arg0 *s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicy indicates an expected call of GetBucketPolicy. +func (mr *MockS3APIMockRecorder) GetBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicy", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicy), arg0) +} + +// GetBucketPolicyRequest mocks base method. +func (m *MockS3API) GetBucketPolicyRequest(arg0 *s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketPolicyOutput) + return ret0, ret1 +} + +// GetBucketPolicyRequest indicates an expected call of GetBucketPolicyRequest. +func (mr *MockS3APIMockRecorder) GetBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyRequest), arg0) +} + +// GetBucketPolicyStatus mocks base method. +func (m *MockS3API) GetBucketPolicyStatus(arg0 *s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyStatus", arg0) + ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyStatus indicates an expected call of GetBucketPolicyStatus. +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatus", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatus), arg0) +} + +// GetBucketPolicyStatusRequest mocks base method. +func (m *MockS3API) GetBucketPolicyStatusRequest(arg0 *s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyStatusRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketPolicyStatusOutput) + return ret0, ret1 +} + +// GetBucketPolicyStatusRequest indicates an expected call of GetBucketPolicyStatusRequest. +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusRequest), arg0) +} + +// GetBucketPolicyStatusWithContext mocks base method. +func (m *MockS3API) GetBucketPolicyStatusWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyStatusInput, arg2 ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketPolicyStatusWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyStatusWithContext indicates an expected call of GetBucketPolicyStatusWithContext. +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusWithContext), varargs...) +} + +// GetBucketPolicyWithContext mocks base method. +func (m *MockS3API) GetBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyInput, arg2 ...request.Option) (*s3.GetBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyWithContext indicates an expected call of GetBucketPolicyWithContext. +func (mr *MockS3APIMockRecorder) GetBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyWithContext), varargs...) +} + +// GetBucketReplication mocks base method. +func (m *MockS3API) GetBucketReplication(arg0 *s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketReplication", arg0) + ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketReplication indicates an expected call of GetBucketReplication. +func (mr *MockS3APIMockRecorder) GetBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplication", reflect.TypeOf((*MockS3API)(nil).GetBucketReplication), arg0) +} + +// GetBucketReplicationRequest mocks base method. +func (m *MockS3API) GetBucketReplicationRequest(arg0 *s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketReplicationOutput) + return ret0, ret1 +} + +// GetBucketReplicationRequest indicates an expected call of GetBucketReplicationRequest. +func (mr *MockS3APIMockRecorder) GetBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationRequest), arg0) +} + +// GetBucketReplicationWithContext mocks base method. +func (m *MockS3API) GetBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.GetBucketReplicationInput, arg2 ...request.Option) (*s3.GetBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketReplicationWithContext indicates an expected call of GetBucketReplicationWithContext. +func (mr *MockS3APIMockRecorder) GetBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationWithContext), varargs...) +} + +// GetBucketRequestPayment mocks base method. +func (m *MockS3API) GetBucketRequestPayment(arg0 *s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketRequestPayment", arg0) + ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketRequestPayment indicates an expected call of GetBucketRequestPayment. +func (mr *MockS3APIMockRecorder) GetBucketRequestPayment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPayment), arg0) +} + +// GetBucketRequestPaymentRequest mocks base method. +func (m *MockS3API) GetBucketRequestPaymentRequest(arg0 *s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketRequestPaymentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketRequestPaymentOutput) + return ret0, ret1 +} + +// GetBucketRequestPaymentRequest indicates an expected call of GetBucketRequestPaymentRequest. +func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentRequest), arg0) +} + +// GetBucketRequestPaymentWithContext mocks base method. +func (m *MockS3API) GetBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.GetBucketRequestPaymentInput, arg2 ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketRequestPaymentWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketRequestPaymentWithContext indicates an expected call of GetBucketRequestPaymentWithContext. +func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentWithContext), varargs...) +} + +// GetBucketTagging mocks base method. +func (m *MockS3API) GetBucketTagging(arg0 *s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketTagging", arg0) + ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketTagging indicates an expected call of GetBucketTagging. +func (mr *MockS3APIMockRecorder) GetBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTagging", reflect.TypeOf((*MockS3API)(nil).GetBucketTagging), arg0) +} + +// GetBucketTaggingRequest mocks base method. +func (m *MockS3API) GetBucketTaggingRequest(arg0 *s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketTaggingOutput) + return ret0, ret1 +} + +// GetBucketTaggingRequest indicates an expected call of GetBucketTaggingRequest. +func (mr *MockS3APIMockRecorder) GetBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingRequest), arg0) +} + +// GetBucketTaggingWithContext mocks base method. +func (m *MockS3API) GetBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketTaggingInput, arg2 ...request.Option) (*s3.GetBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketTaggingWithContext indicates an expected call of GetBucketTaggingWithContext. +func (mr *MockS3APIMockRecorder) GetBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingWithContext), varargs...) +} + +// GetBucketVersioning mocks base method. +func (m *MockS3API) GetBucketVersioning(arg0 *s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketVersioning", arg0) + ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketVersioning indicates an expected call of GetBucketVersioning. +func (mr *MockS3APIMockRecorder) GetBucketVersioning(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioning", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioning), arg0) +} + +// GetBucketVersioningRequest mocks base method. +func (m *MockS3API) GetBucketVersioningRequest(arg0 *s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketVersioningRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketVersioningOutput) + return ret0, ret1 +} + +// GetBucketVersioningRequest indicates an expected call of GetBucketVersioningRequest. +func (mr *MockS3APIMockRecorder) GetBucketVersioningRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningRequest), arg0) +} + +// GetBucketVersioningWithContext mocks base method. +func (m *MockS3API) GetBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.GetBucketVersioningInput, arg2 ...request.Option) (*s3.GetBucketVersioningOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketVersioningWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketVersioningWithContext indicates an expected call of GetBucketVersioningWithContext. +func (mr *MockS3APIMockRecorder) GetBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningWithContext), varargs...) +} + +// GetBucketWebsite mocks base method. +func (m *MockS3API) GetBucketWebsite(arg0 *s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketWebsite indicates an expected call of GetBucketWebsite. +func (mr *MockS3APIMockRecorder) GetBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsite", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsite), arg0) +} + +// GetBucketWebsiteRequest mocks base method. +func (m *MockS3API) GetBucketWebsiteRequest(arg0 *s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketWebsiteOutput) + return ret0, ret1 +} + +// GetBucketWebsiteRequest indicates an expected call of GetBucketWebsiteRequest. +func (mr *MockS3APIMockRecorder) GetBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteRequest), arg0) +} + +// GetBucketWebsiteWithContext mocks base method. +func (m *MockS3API) GetBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.GetBucketWebsiteInput, arg2 ...request.Option) (*s3.GetBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketWebsiteWithContext indicates an expected call of GetBucketWebsiteWithContext. +func (mr *MockS3APIMockRecorder) GetBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteWithContext), varargs...) +} + +// GetObject mocks base method. +func (m *MockS3API) GetObject(arg0 *s3.GetObjectInput) (*s3.GetObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObject", arg0) + ret0, _ := ret[0].(*s3.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObject indicates an expected call of GetObject. +func (mr *MockS3APIMockRecorder) GetObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockS3API)(nil).GetObject), arg0) +} + +// GetObjectAcl mocks base method. +func (m *MockS3API) GetObjectAcl(arg0 *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAcl", arg0) + ret0, _ := ret[0].(*s3.GetObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAcl indicates an expected call of GetObjectAcl. +func (mr *MockS3APIMockRecorder) GetObjectAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAcl", reflect.TypeOf((*MockS3API)(nil).GetObjectAcl), arg0) +} + +// GetObjectAclRequest mocks base method. +func (m *MockS3API) GetObjectAclRequest(arg0 *s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectAclOutput) + return ret0, ret1 +} + +// GetObjectAclRequest indicates an expected call of GetObjectAclRequest. +func (mr *MockS3APIMockRecorder) GetObjectAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAclRequest), arg0) +} + +// GetObjectAclWithContext mocks base method. +func (m *MockS3API) GetObjectAclWithContext(arg0 aws.Context, arg1 *s3.GetObjectAclInput, arg2 ...request.Option) (*s3.GetObjectAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAclWithContext indicates an expected call of GetObjectAclWithContext. +func (mr *MockS3APIMockRecorder) GetObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAclWithContext), varargs...) +} + +// GetObjectAttributes mocks base method. +func (m *MockS3API) GetObjectAttributes(arg0 *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAttributes", arg0) + ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAttributes indicates an expected call of GetObjectAttributes. +func (mr *MockS3APIMockRecorder) GetObjectAttributes(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributes", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributes), arg0) +} + +// GetObjectAttributesRequest mocks base method. +func (m *MockS3API) GetObjectAttributesRequest(arg0 *s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAttributesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectAttributesOutput) + return ret0, ret1 +} + +// GetObjectAttributesRequest indicates an expected call of GetObjectAttributesRequest. +func (mr *MockS3APIMockRecorder) GetObjectAttributesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesRequest), arg0) +} + +// GetObjectAttributesWithContext mocks base method. +func (m *MockS3API) GetObjectAttributesWithContext(arg0 aws.Context, arg1 *s3.GetObjectAttributesInput, arg2 ...request.Option) (*s3.GetObjectAttributesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectAttributesWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAttributesWithContext indicates an expected call of GetObjectAttributesWithContext. +func (mr *MockS3APIMockRecorder) GetObjectAttributesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesWithContext), varargs...) +} + +// GetObjectLegalHold mocks base method. +func (m *MockS3API) GetObjectLegalHold(arg0 *s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLegalHold", arg0) + ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLegalHold indicates an expected call of GetObjectLegalHold. +func (mr *MockS3APIMockRecorder) GetObjectLegalHold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHold), arg0) +} + +// GetObjectLegalHoldRequest mocks base method. +func (m *MockS3API) GetObjectLegalHoldRequest(arg0 *s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLegalHoldRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectLegalHoldOutput) + return ret0, ret1 +} + +// GetObjectLegalHoldRequest indicates an expected call of GetObjectLegalHoldRequest. +func (mr *MockS3APIMockRecorder) GetObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldRequest), arg0) +} + +// GetObjectLegalHoldWithContext mocks base method. +func (m *MockS3API) GetObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.GetObjectLegalHoldInput, arg2 ...request.Option) (*s3.GetObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectLegalHoldWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLegalHoldWithContext indicates an expected call of GetObjectLegalHoldWithContext. +func (mr *MockS3APIMockRecorder) GetObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldWithContext), varargs...) +} + +// GetObjectLockConfiguration mocks base method. +func (m *MockS3API) GetObjectLockConfiguration(arg0 *s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLockConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLockConfiguration indicates an expected call of GetObjectLockConfiguration. +func (mr *MockS3APIMockRecorder) GetObjectLockConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfiguration), arg0) +} + +// GetObjectLockConfigurationRequest mocks base method. +func (m *MockS3API) GetObjectLockConfigurationRequest(arg0 *s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLockConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectLockConfigurationOutput) + return ret0, ret1 +} + +// GetObjectLockConfigurationRequest indicates an expected call of GetObjectLockConfigurationRequest. +func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationRequest), arg0) +} + +// GetObjectLockConfigurationWithContext mocks base method. +func (m *MockS3API) GetObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetObjectLockConfigurationInput, arg2 ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectLockConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLockConfigurationWithContext indicates an expected call of GetObjectLockConfigurationWithContext. +func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationWithContext), varargs...) +} + +// GetObjectRequest mocks base method. +func (m *MockS3API) GetObjectRequest(arg0 *s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectOutput) + return ret0, ret1 +} + +// GetObjectRequest indicates an expected call of GetObjectRequest. +func (mr *MockS3APIMockRecorder) GetObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRequest), arg0) +} + +// GetObjectRetention mocks base method. +func (m *MockS3API) GetObjectRetention(arg0 *s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRetention", arg0) + ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectRetention indicates an expected call of GetObjectRetention. +func (mr *MockS3APIMockRecorder) GetObjectRetention(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetention", reflect.TypeOf((*MockS3API)(nil).GetObjectRetention), arg0) +} + +// GetObjectRetentionRequest mocks base method. +func (m *MockS3API) GetObjectRetentionRequest(arg0 *s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRetentionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectRetentionOutput) + return ret0, ret1 +} + +// GetObjectRetentionRequest indicates an expected call of GetObjectRetentionRequest. +func (mr *MockS3APIMockRecorder) GetObjectRetentionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionRequest), arg0) +} + +// GetObjectRetentionWithContext mocks base method. +func (m *MockS3API) GetObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.GetObjectRetentionInput, arg2 ...request.Option) (*s3.GetObjectRetentionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectRetentionWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectRetentionWithContext indicates an expected call of GetObjectRetentionWithContext. +func (mr *MockS3APIMockRecorder) GetObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionWithContext), varargs...) +} + +// GetObjectTagging mocks base method. +func (m *MockS3API) GetObjectTagging(arg0 *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTagging", arg0) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTagging indicates an expected call of GetObjectTagging. +func (mr *MockS3APIMockRecorder) GetObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockS3API)(nil).GetObjectTagging), arg0) +} + +// GetObjectTaggingRequest mocks base method. +func (m *MockS3API) GetObjectTaggingRequest(arg0 *s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectTaggingOutput) + return ret0, ret1 +} + +// GetObjectTaggingRequest indicates an expected call of GetObjectTaggingRequest. +func (mr *MockS3APIMockRecorder) GetObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingRequest), arg0) +} + +// GetObjectTaggingWithContext mocks base method. +func (m *MockS3API) GetObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.GetObjectTaggingInput, arg2 ...request.Option) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTaggingWithContext indicates an expected call of GetObjectTaggingWithContext. +func (mr *MockS3APIMockRecorder) GetObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingWithContext), varargs...) +} + +// GetObjectTorrent mocks base method. +func (m *MockS3API) GetObjectTorrent(arg0 *s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTorrent", arg0) + ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTorrent indicates an expected call of GetObjectTorrent. +func (mr *MockS3APIMockRecorder) GetObjectTorrent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrent", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrent), arg0) +} + +// GetObjectTorrentRequest mocks base method. +func (m *MockS3API) GetObjectTorrentRequest(arg0 *s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTorrentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectTorrentOutput) + return ret0, ret1 +} + +// GetObjectTorrentRequest indicates an expected call of GetObjectTorrentRequest. +func (mr *MockS3APIMockRecorder) GetObjectTorrentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentRequest), arg0) +} + +// GetObjectTorrentWithContext mocks base method. +func (m *MockS3API) GetObjectTorrentWithContext(arg0 aws.Context, arg1 *s3.GetObjectTorrentInput, arg2 ...request.Option) (*s3.GetObjectTorrentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectTorrentWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTorrentWithContext indicates an expected call of GetObjectTorrentWithContext. +func (mr *MockS3APIMockRecorder) GetObjectTorrentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentWithContext), varargs...) +} + +// GetObjectWithContext mocks base method. +func (m *MockS3API) GetObjectWithContext(arg0 aws.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectWithContext indicates an expected call of GetObjectWithContext. +func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...) +} + +// GetPublicAccessBlock mocks base method. +func (m *MockS3API) GetPublicAccessBlock(arg0 *s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicAccessBlock indicates an expected call of GetPublicAccessBlock. +func (mr *MockS3APIMockRecorder) GetPublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlock), arg0) +} + +// GetPublicAccessBlockRequest mocks base method. +func (m *MockS3API) GetPublicAccessBlockRequest(arg0 *s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetPublicAccessBlockOutput) + return ret0, ret1 +} + +// GetPublicAccessBlockRequest indicates an expected call of GetPublicAccessBlockRequest. +func (mr *MockS3APIMockRecorder) GetPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockRequest), arg0) +} + +// GetPublicAccessBlockWithContext mocks base method. +func (m *MockS3API) GetPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.GetPublicAccessBlockInput, arg2 ...request.Option) (*s3.GetPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicAccessBlockWithContext indicates an expected call of GetPublicAccessBlockWithContext. +func (mr *MockS3APIMockRecorder) GetPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockWithContext), varargs...) +} + +// HeadBucket mocks base method. +func (m *MockS3API) HeadBucket(arg0 *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadBucket", arg0) + ret0, _ := ret[0].(*s3.HeadBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadBucket indicates an expected call of HeadBucket. +func (mr *MockS3APIMockRecorder) HeadBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucket", reflect.TypeOf((*MockS3API)(nil).HeadBucket), arg0) +} + +// HeadBucketRequest mocks base method. +func (m *MockS3API) HeadBucketRequest(arg0 *s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.HeadBucketOutput) + return ret0, ret1 +} + +// HeadBucketRequest indicates an expected call of HeadBucketRequest. +func (mr *MockS3APIMockRecorder) HeadBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketRequest", reflect.TypeOf((*MockS3API)(nil).HeadBucketRequest), arg0) +} + +// HeadBucketWithContext mocks base method. +func (m *MockS3API) HeadBucketWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.Option) (*s3.HeadBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "HeadBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.HeadBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadBucketWithContext indicates an expected call of HeadBucketWithContext. +func (mr *MockS3APIMockRecorder) HeadBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketWithContext", reflect.TypeOf((*MockS3API)(nil).HeadBucketWithContext), varargs...) +} + +// HeadObject mocks base method. +func (m *MockS3API) HeadObject(arg0 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObject", arg0) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObject indicates an expected call of HeadObject. +func (mr *MockS3APIMockRecorder) HeadObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3API)(nil).HeadObject), arg0) +} + +// HeadObjectRequest mocks base method. +func (m *MockS3API) HeadObjectRequest(arg0 *s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.HeadObjectOutput) + return ret0, ret1 +} + +// HeadObjectRequest indicates an expected call of HeadObjectRequest. +func (mr *MockS3APIMockRecorder) HeadObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectRequest", reflect.TypeOf((*MockS3API)(nil).HeadObjectRequest), arg0) +} + +// HeadObjectWithContext mocks base method. +func (m *MockS3API) HeadObjectWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObjectWithContext indicates an expected call of HeadObjectWithContext. +func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...) +} + +// ListBucketAnalyticsConfigurations mocks base method. +func (m *MockS3API) ListBucketAnalyticsConfigurations(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurations indicates an expected call of ListBucketAnalyticsConfigurations. +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurations), arg0) +} + +// ListBucketAnalyticsConfigurationsRequest mocks base method. +func (m *MockS3API) ListBucketAnalyticsConfigurationsRequest(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketAnalyticsConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurationsRequest indicates an expected call of ListBucketAnalyticsConfigurationsRequest. +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsRequest), arg0) +} + +// ListBucketAnalyticsConfigurationsWithContext mocks base method. +func (m *MockS3API) ListBucketAnalyticsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketAnalyticsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurationsWithContext indicates an expected call of ListBucketAnalyticsConfigurationsWithContext. +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsWithContext), varargs...) +} + +// ListBucketIntelligentTieringConfigurations mocks base method. +func (m *MockS3API) ListBucketIntelligentTieringConfigurations(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketIntelligentTieringConfigurations indicates an expected call of ListBucketIntelligentTieringConfigurations. +func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurations), arg0) +} + +// ListBucketIntelligentTieringConfigurationsRequest mocks base method. +func (m *MockS3API) ListBucketIntelligentTieringConfigurationsRequest(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketIntelligentTieringConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketIntelligentTieringConfigurationsRequest indicates an expected call of ListBucketIntelligentTieringConfigurationsRequest. +func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsRequest), arg0) +} + +// ListBucketIntelligentTieringConfigurationsWithContext mocks base method. +func (m *MockS3API) ListBucketIntelligentTieringConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketIntelligentTieringConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketIntelligentTieringConfigurationsWithContext indicates an expected call of ListBucketIntelligentTieringConfigurationsWithContext. +func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsWithContext), varargs...) +} + +// ListBucketInventoryConfigurations mocks base method. +func (m *MockS3API) ListBucketInventoryConfigurations(arg0 *s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketInventoryConfigurations indicates an expected call of ListBucketInventoryConfigurations. +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurations), arg0) +} + +// ListBucketInventoryConfigurationsRequest mocks base method. +func (m *MockS3API) ListBucketInventoryConfigurationsRequest(arg0 *s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketInventoryConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketInventoryConfigurationsRequest indicates an expected call of ListBucketInventoryConfigurationsRequest. +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsRequest), arg0) +} + +// ListBucketInventoryConfigurationsWithContext mocks base method. +func (m *MockS3API) ListBucketInventoryConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketInventoryConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketInventoryConfigurationsWithContext indicates an expected call of ListBucketInventoryConfigurationsWithContext. +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsWithContext), varargs...) +} + +// ListBucketMetricsConfigurations mocks base method. +func (m *MockS3API) ListBucketMetricsConfigurations(arg0 *s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketMetricsConfigurations indicates an expected call of ListBucketMetricsConfigurations. +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurations), arg0) +} + +// ListBucketMetricsConfigurationsRequest mocks base method. +func (m *MockS3API) ListBucketMetricsConfigurationsRequest(arg0 *s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketMetricsConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketMetricsConfigurationsRequest indicates an expected call of ListBucketMetricsConfigurationsRequest. +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsRequest), arg0) +} + +// ListBucketMetricsConfigurationsWithContext mocks base method. +func (m *MockS3API) ListBucketMetricsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketMetricsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketMetricsConfigurationsWithContext indicates an expected call of ListBucketMetricsConfigurationsWithContext. +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsWithContext), varargs...) +} + +// ListBuckets mocks base method. +func (m *MockS3API) ListBuckets(arg0 *s3.ListBucketsInput) (*s3.ListBucketsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBuckets", arg0) + ret0, _ := ret[0].(*s3.ListBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBuckets indicates an expected call of ListBuckets. +func (mr *MockS3APIMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockS3API)(nil).ListBuckets), arg0) +} + +// ListBucketsRequest mocks base method. +func (m *MockS3API) ListBucketsRequest(arg0 *s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketsOutput) + return ret0, ret1 +} + +// ListBucketsRequest indicates an expected call of ListBucketsRequest. +func (mr *MockS3APIMockRecorder) ListBucketsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketsRequest), arg0) +} + +// ListBucketsWithContext mocks base method. +func (m *MockS3API) ListBucketsWithContext(arg0 aws.Context, arg1 *s3.ListBucketsInput, arg2 ...request.Option) (*s3.ListBucketsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketsWithContext indicates an expected call of ListBucketsWithContext. +func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...) +} + +// ListMultipartUploads mocks base method. +func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploads", arg0) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploads indicates an expected call of ListMultipartUploads. +func (mr *MockS3APIMockRecorder) ListMultipartUploads(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploads), arg0) +} + +// ListMultipartUploadsPages mocks base method. +func (m *MockS3API) ListMultipartUploadsPages(arg0 *s3.ListMultipartUploadsInput, arg1 func(*s3.ListMultipartUploadsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploadsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListMultipartUploadsPages indicates an expected call of ListMultipartUploadsPages. +func (mr *MockS3APIMockRecorder) ListMultipartUploadsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPages", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPages), arg0, arg1) +} + +// ListMultipartUploadsPagesWithContext mocks base method. +func (m *MockS3API) ListMultipartUploadsPagesWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 func(*s3.ListMultipartUploadsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMultipartUploadsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListMultipartUploadsPagesWithContext indicates an expected call of ListMultipartUploadsPagesWithContext. +func (mr *MockS3APIMockRecorder) ListMultipartUploadsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPagesWithContext), varargs...) +} + +// ListMultipartUploadsRequest mocks base method. +func (m *MockS3API) ListMultipartUploadsRequest(arg0 *s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploadsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListMultipartUploadsOutput) + return ret0, ret1 +} + +// ListMultipartUploadsRequest indicates an expected call of ListMultipartUploadsRequest. +func (mr *MockS3APIMockRecorder) ListMultipartUploadsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsRequest", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsRequest), arg0) +} + +// ListMultipartUploadsWithContext mocks base method. +func (m *MockS3API) ListMultipartUploadsWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 ...request.Option) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMultipartUploadsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploadsWithContext indicates an expected call of ListMultipartUploadsWithContext. +func (mr *MockS3APIMockRecorder) ListMultipartUploadsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsWithContext), varargs...) +} + +// ListObjectVersions mocks base method. +func (m *MockS3API) ListObjectVersions(arg0 *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersions", arg0) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersions indicates an expected call of ListObjectVersions. +func (mr *MockS3APIMockRecorder) ListObjectVersions(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockS3API)(nil).ListObjectVersions), arg0) +} + +// ListObjectVersionsPages mocks base method. +func (m *MockS3API) ListObjectVersionsPages(arg0 *s3.ListObjectVersionsInput, arg1 func(*s3.ListObjectVersionsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersionsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectVersionsPages indicates an expected call of ListObjectVersionsPages. +func (mr *MockS3APIMockRecorder) ListObjectVersionsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPages), arg0, arg1) +} + +// ListObjectVersionsPagesWithContext mocks base method. +func (m *MockS3API) ListObjectVersionsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 func(*s3.ListObjectVersionsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectVersionsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectVersionsPagesWithContext indicates an expected call of ListObjectVersionsPagesWithContext. +func (mr *MockS3APIMockRecorder) ListObjectVersionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPagesWithContext), varargs...) +} + +// ListObjectVersionsRequest mocks base method. +func (m *MockS3API) ListObjectVersionsRequest(arg0 *s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersionsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectVersionsOutput) + return ret0, ret1 +} + +// ListObjectVersionsRequest indicates an expected call of ListObjectVersionsRequest. +func (mr *MockS3APIMockRecorder) ListObjectVersionsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsRequest), arg0) +} + +// ListObjectVersionsWithContext mocks base method. +func (m *MockS3API) ListObjectVersionsWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 ...request.Option) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectVersionsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersionsWithContext indicates an expected call of ListObjectVersionsWithContext. +func (mr *MockS3APIMockRecorder) ListObjectVersionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsWithContext), varargs...) +} + +// ListObjects mocks base method. +func (m *MockS3API) ListObjects(arg0 *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", arg0) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjects indicates an expected call of ListObjects. +func (mr *MockS3APIMockRecorder) ListObjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockS3API)(nil).ListObjects), arg0) +} + +// ListObjectsPages mocks base method. +func (m *MockS3API) ListObjectsPages(arg0 *s3.ListObjectsInput, arg1 func(*s3.ListObjectsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsPages indicates an expected call of ListObjectsPages. +func (mr *MockS3APIMockRecorder) ListObjectsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectsPages), arg0, arg1) +} + +// ListObjectsPagesWithContext mocks base method. +func (m *MockS3API) ListObjectsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 func(*s3.ListObjectsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsPagesWithContext indicates an expected call of ListObjectsPagesWithContext. +func (mr *MockS3APIMockRecorder) ListObjectsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsPagesWithContext), varargs...) +} + +// ListObjectsRequest mocks base method. +func (m *MockS3API) ListObjectsRequest(arg0 *s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectsOutput) + return ret0, ret1 +} + +// ListObjectsRequest indicates an expected call of ListObjectsRequest. +func (mr *MockS3APIMockRecorder) ListObjectsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectsRequest), arg0) +} + +// ListObjectsV2 mocks base method. +func (m *MockS3API) ListObjectsV2(arg0 *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2", arg0) + ret0, _ := ret[0].(*s3.ListObjectsV2Output) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsV2 indicates an expected call of ListObjectsV2. +func (mr *MockS3APIMockRecorder) ListObjectsV2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2), arg0) +} + +// ListObjectsV2Pages mocks base method. +func (m *MockS3API) ListObjectsV2Pages(arg0 *s3.ListObjectsV2Input, arg1 func(*s3.ListObjectsV2Output, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2Pages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsV2Pages indicates an expected call of ListObjectsV2Pages. +func (mr *MockS3APIMockRecorder) ListObjectsV2Pages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Pages", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Pages), arg0, arg1) +} + +// ListObjectsV2PagesWithContext mocks base method. +func (m *MockS3API) ListObjectsV2PagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 func(*s3.ListObjectsV2Output, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsV2PagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsV2PagesWithContext indicates an expected call of ListObjectsV2PagesWithContext. +func (mr *MockS3APIMockRecorder) ListObjectsV2PagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2PagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2PagesWithContext), varargs...) +} + +// ListObjectsV2Request mocks base method. +func (m *MockS3API) ListObjectsV2Request(arg0 *s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2Request", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectsV2Output) + return ret0, ret1 +} + +// ListObjectsV2Request indicates an expected call of ListObjectsV2Request. +func (mr *MockS3APIMockRecorder) ListObjectsV2Request(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Request", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Request), arg0) +} + +// ListObjectsV2WithContext mocks base method. +func (m *MockS3API) ListObjectsV2WithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 ...request.Option) (*s3.ListObjectsV2Output, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsV2WithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsV2Output) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsV2WithContext indicates an expected call of ListObjectsV2WithContext. +func (mr *MockS3APIMockRecorder) ListObjectsV2WithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2WithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2WithContext), varargs...) +} + +// ListObjectsWithContext mocks base method. +func (m *MockS3API) ListObjectsWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 ...request.Option) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsWithContext indicates an expected call of ListObjectsWithContext. +func (mr *MockS3APIMockRecorder) ListObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsWithContext), varargs...) +} + +// ListParts mocks base method. +func (m *MockS3API) ListParts(arg0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListParts", arg0) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListParts indicates an expected call of ListParts. +func (mr *MockS3APIMockRecorder) ListParts(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockS3API)(nil).ListParts), arg0) +} + +// ListPartsPages mocks base method. +func (m *MockS3API) ListPartsPages(arg0 *s3.ListPartsInput, arg1 func(*s3.ListPartsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPartsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPartsPages indicates an expected call of ListPartsPages. +func (mr *MockS3APIMockRecorder) ListPartsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPages", reflect.TypeOf((*MockS3API)(nil).ListPartsPages), arg0, arg1) +} + +// ListPartsPagesWithContext mocks base method. +func (m *MockS3API) ListPartsPagesWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 func(*s3.ListPartsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPartsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPartsPagesWithContext indicates an expected call of ListPartsPagesWithContext. +func (mr *MockS3APIMockRecorder) ListPartsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsPagesWithContext), varargs...) +} + +// ListPartsRequest mocks base method. +func (m *MockS3API) ListPartsRequest(arg0 *s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPartsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListPartsOutput) + return ret0, ret1 +} + +// ListPartsRequest indicates an expected call of ListPartsRequest. +func (mr *MockS3APIMockRecorder) ListPartsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsRequest", reflect.TypeOf((*MockS3API)(nil).ListPartsRequest), arg0) +} + +// ListPartsWithContext mocks base method. +func (m *MockS3API) ListPartsWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPartsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPartsWithContext indicates an expected call of ListPartsWithContext. +func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...) +} + +// PutBucketAccelerateConfiguration mocks base method. +func (m *MockS3API) PutBucketAccelerateConfiguration(arg0 *s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAccelerateConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAccelerateConfiguration indicates an expected call of PutBucketAccelerateConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfiguration), arg0) +} + +// PutBucketAccelerateConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketAccelerateConfigurationRequest(arg0 *s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAccelerateConfigurationOutput) + return ret0, ret1 +} + +// PutBucketAccelerateConfigurationRequest indicates an expected call of PutBucketAccelerateConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationRequest), arg0) +} + +// PutBucketAccelerateConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAccelerateConfigurationWithContext indicates an expected call of PutBucketAccelerateConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationWithContext), varargs...) +} + +// PutBucketAcl mocks base method. +func (m *MockS3API) PutBucketAcl(arg0 *s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAcl", arg0) + ret0, _ := ret[0].(*s3.PutBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAcl indicates an expected call of PutBucketAcl. +func (mr *MockS3APIMockRecorder) PutBucketAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAcl", reflect.TypeOf((*MockS3API)(nil).PutBucketAcl), arg0) +} + +// PutBucketAclRequest mocks base method. +func (m *MockS3API) PutBucketAclRequest(arg0 *s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAclOutput) + return ret0, ret1 +} + +// PutBucketAclRequest indicates an expected call of PutBucketAclRequest. +func (mr *MockS3APIMockRecorder) PutBucketAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAclRequest), arg0) +} + +// PutBucketAclWithContext mocks base method. +func (m *MockS3API) PutBucketAclWithContext(arg0 aws.Context, arg1 *s3.PutBucketAclInput, arg2 ...request.Option) (*s3.PutBucketAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAclWithContext indicates an expected call of PutBucketAclWithContext. +func (mr *MockS3APIMockRecorder) PutBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAclWithContext), varargs...) +} + +// PutBucketAnalyticsConfiguration mocks base method. +func (m *MockS3API) PutBucketAnalyticsConfiguration(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAnalyticsConfiguration indicates an expected call of PutBucketAnalyticsConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfiguration), arg0) +} + +// PutBucketAnalyticsConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketAnalyticsConfigurationRequest(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// PutBucketAnalyticsConfigurationRequest indicates an expected call of PutBucketAnalyticsConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationRequest), arg0) +} + +// PutBucketAnalyticsConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAnalyticsConfigurationWithContext indicates an expected call of PutBucketAnalyticsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationWithContext), varargs...) +} + +// PutBucketCors mocks base method. +func (m *MockS3API) PutBucketCors(arg0 *s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketCors", arg0) + ret0, _ := ret[0].(*s3.PutBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketCors indicates an expected call of PutBucketCors. +func (mr *MockS3APIMockRecorder) PutBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCors", reflect.TypeOf((*MockS3API)(nil).PutBucketCors), arg0) +} + +// PutBucketCorsRequest mocks base method. +func (m *MockS3API) PutBucketCorsRequest(arg0 *s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketCorsOutput) + return ret0, ret1 +} + +// PutBucketCorsRequest indicates an expected call of PutBucketCorsRequest. +func (mr *MockS3APIMockRecorder) PutBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsRequest), arg0) +} + +// PutBucketCorsWithContext mocks base method. +func (m *MockS3API) PutBucketCorsWithContext(arg0 aws.Context, arg1 *s3.PutBucketCorsInput, arg2 ...request.Option) (*s3.PutBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketCorsWithContext indicates an expected call of PutBucketCorsWithContext. +func (mr *MockS3APIMockRecorder) PutBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsWithContext), varargs...) +} + +// PutBucketEncryption mocks base method. +func (m *MockS3API) PutBucketEncryption(arg0 *s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketEncryption indicates an expected call of PutBucketEncryption. +func (mr *MockS3APIMockRecorder) PutBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryption", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryption), arg0) +} + +// PutBucketEncryptionRequest mocks base method. +func (m *MockS3API) PutBucketEncryptionRequest(arg0 *s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketEncryptionOutput) + return ret0, ret1 +} + +// PutBucketEncryptionRequest indicates an expected call of PutBucketEncryptionRequest. +func (mr *MockS3APIMockRecorder) PutBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionRequest), arg0) +} + +// PutBucketEncryptionWithContext mocks base method. +func (m *MockS3API) PutBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.PutBucketEncryptionInput, arg2 ...request.Option) (*s3.PutBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketEncryptionWithContext indicates an expected call of PutBucketEncryptionWithContext. +func (mr *MockS3APIMockRecorder) PutBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionWithContext), varargs...) +} + +// PutBucketIntelligentTieringConfiguration mocks base method. +func (m *MockS3API) PutBucketIntelligentTieringConfiguration(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketIntelligentTieringConfiguration indicates an expected call of PutBucketIntelligentTieringConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfiguration), arg0) +} + +// PutBucketIntelligentTieringConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketIntelligentTieringConfigurationRequest(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketIntelligentTieringConfigurationOutput) + return ret0, ret1 +} + +// PutBucketIntelligentTieringConfigurationRequest indicates an expected call of PutBucketIntelligentTieringConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationRequest), arg0) +} + +// PutBucketIntelligentTieringConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketIntelligentTieringConfigurationWithContext indicates an expected call of PutBucketIntelligentTieringConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationWithContext), varargs...) +} + +// PutBucketInventoryConfiguration mocks base method. +func (m *MockS3API) PutBucketInventoryConfiguration(arg0 *s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketInventoryConfiguration indicates an expected call of PutBucketInventoryConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfiguration), arg0) +} + +// PutBucketInventoryConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketInventoryConfigurationRequest(arg0 *s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// PutBucketInventoryConfigurationRequest indicates an expected call of PutBucketInventoryConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationRequest), arg0) +} + +// PutBucketInventoryConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketInventoryConfigurationWithContext indicates an expected call of PutBucketInventoryConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationWithContext), varargs...) +} + +// PutBucketLifecycle mocks base method. +func (m *MockS3API) PutBucketLifecycle(arg0 *s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycle indicates an expected call of PutBucketLifecycle. +func (mr *MockS3APIMockRecorder) PutBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycle), arg0) +} + +// PutBucketLifecycleConfiguration mocks base method. +func (m *MockS3API) PutBucketLifecycleConfiguration(arg0 *s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleConfiguration indicates an expected call of PutBucketLifecycleConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfiguration), arg0) +} + +// PutBucketLifecycleConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketLifecycleConfigurationRequest(arg0 *s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLifecycleConfigurationOutput) + return ret0, ret1 +} + +// PutBucketLifecycleConfigurationRequest indicates an expected call of PutBucketLifecycleConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationRequest), arg0) +} + +// PutBucketLifecycleConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleConfigurationWithContext indicates an expected call of PutBucketLifecycleConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationWithContext), varargs...) +} + +// PutBucketLifecycleRequest mocks base method. +func (m *MockS3API) PutBucketLifecycleRequest(arg0 *s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLifecycleOutput) + return ret0, ret1 +} + +// PutBucketLifecycleRequest indicates an expected call of PutBucketLifecycleRequest. +func (mr *MockS3APIMockRecorder) PutBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleRequest), arg0) +} + +// PutBucketLifecycleWithContext mocks base method. +func (m *MockS3API) PutBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleInput, arg2 ...request.Option) (*s3.PutBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleWithContext indicates an expected call of PutBucketLifecycleWithContext. +func (mr *MockS3APIMockRecorder) PutBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleWithContext), varargs...) +} + +// PutBucketLogging mocks base method. +func (m *MockS3API) PutBucketLogging(arg0 *s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLogging", arg0) + ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLogging indicates an expected call of PutBucketLogging. +func (mr *MockS3APIMockRecorder) PutBucketLogging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLogging", reflect.TypeOf((*MockS3API)(nil).PutBucketLogging), arg0) +} + +// PutBucketLoggingRequest mocks base method. +func (m *MockS3API) PutBucketLoggingRequest(arg0 *s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLoggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLoggingOutput) + return ret0, ret1 +} + +// PutBucketLoggingRequest indicates an expected call of PutBucketLoggingRequest. +func (mr *MockS3APIMockRecorder) PutBucketLoggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingRequest), arg0) +} + +// PutBucketLoggingWithContext mocks base method. +func (m *MockS3API) PutBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketLoggingInput, arg2 ...request.Option) (*s3.PutBucketLoggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLoggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLoggingWithContext indicates an expected call of PutBucketLoggingWithContext. +func (mr *MockS3APIMockRecorder) PutBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingWithContext), varargs...) +} + +// PutBucketMetricsConfiguration mocks base method. +func (m *MockS3API) PutBucketMetricsConfiguration(arg0 *s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketMetricsConfiguration indicates an expected call of PutBucketMetricsConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfiguration), arg0) +} + +// PutBucketMetricsConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketMetricsConfigurationRequest(arg0 *s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// PutBucketMetricsConfigurationRequest indicates an expected call of PutBucketMetricsConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationRequest), arg0) +} + +// PutBucketMetricsConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketMetricsConfigurationWithContext indicates an expected call of PutBucketMetricsConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationWithContext), varargs...) +} + +// PutBucketNotification mocks base method. +func (m *MockS3API) PutBucketNotification(arg0 *s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotification", arg0) + ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotification indicates an expected call of PutBucketNotification. +func (mr *MockS3APIMockRecorder) PutBucketNotification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotification", reflect.TypeOf((*MockS3API)(nil).PutBucketNotification), arg0) +} + +// PutBucketNotificationConfiguration mocks base method. +func (m *MockS3API) PutBucketNotificationConfiguration(arg0 *s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationConfiguration indicates an expected call of PutBucketNotificationConfiguration. +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfiguration), arg0) +} + +// PutBucketNotificationConfigurationRequest mocks base method. +func (m *MockS3API) PutBucketNotificationConfigurationRequest(arg0 *s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketNotificationConfigurationOutput) + return ret0, ret1 +} + +// PutBucketNotificationConfigurationRequest indicates an expected call of PutBucketNotificationConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationRequest), arg0) +} + +// PutBucketNotificationConfigurationWithContext mocks base method. +func (m *MockS3API) PutBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationConfigurationInput, arg2 ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationConfigurationWithContext indicates an expected call of PutBucketNotificationConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationWithContext), varargs...) +} + +// PutBucketNotificationRequest mocks base method. +func (m *MockS3API) PutBucketNotificationRequest(arg0 *s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketNotificationOutput) + return ret0, ret1 +} + +// PutBucketNotificationRequest indicates an expected call of PutBucketNotificationRequest. +func (mr *MockS3APIMockRecorder) PutBucketNotificationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationRequest), arg0) +} + +// PutBucketNotificationWithContext mocks base method. +func (m *MockS3API) PutBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationInput, arg2 ...request.Option) (*s3.PutBucketNotificationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketNotificationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationWithContext indicates an expected call of PutBucketNotificationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationWithContext), varargs...) +} + +// PutBucketOwnershipControls mocks base method. +func (m *MockS3API) PutBucketOwnershipControls(arg0 *s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketOwnershipControls", arg0) + ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketOwnershipControls indicates an expected call of PutBucketOwnershipControls. +func (mr *MockS3APIMockRecorder) PutBucketOwnershipControls(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControls), arg0) +} + +// PutBucketOwnershipControlsRequest mocks base method. +func (m *MockS3API) PutBucketOwnershipControlsRequest(arg0 *s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketOwnershipControlsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketOwnershipControlsOutput) + return ret0, ret1 +} + +// PutBucketOwnershipControlsRequest indicates an expected call of PutBucketOwnershipControlsRequest. +func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsRequest), arg0) +} + +// PutBucketOwnershipControlsWithContext mocks base method. +func (m *MockS3API) PutBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.PutBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketOwnershipControlsWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketOwnershipControlsWithContext indicates an expected call of PutBucketOwnershipControlsWithContext. +func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsWithContext), varargs...) +} + +// PutBucketPolicy mocks base method. +func (m *MockS3API) PutBucketPolicy(arg0 *s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketPolicy indicates an expected call of PutBucketPolicy. +func (mr *MockS3APIMockRecorder) PutBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicy", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicy), arg0) +} + +// PutBucketPolicyRequest mocks base method. +func (m *MockS3API) PutBucketPolicyRequest(arg0 *s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketPolicyOutput) + return ret0, ret1 +} + +// PutBucketPolicyRequest indicates an expected call of PutBucketPolicyRequest. +func (mr *MockS3APIMockRecorder) PutBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyRequest), arg0) +} + +// PutBucketPolicyWithContext mocks base method. +func (m *MockS3API) PutBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.PutBucketPolicyInput, arg2 ...request.Option) (*s3.PutBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketPolicyWithContext indicates an expected call of PutBucketPolicyWithContext. +func (mr *MockS3APIMockRecorder) PutBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyWithContext), varargs...) +} + +// PutBucketReplication mocks base method. +func (m *MockS3API) PutBucketReplication(arg0 *s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketReplication", arg0) + ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketReplication indicates an expected call of PutBucketReplication. +func (mr *MockS3APIMockRecorder) PutBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplication", reflect.TypeOf((*MockS3API)(nil).PutBucketReplication), arg0) +} + +// PutBucketReplicationRequest mocks base method. +func (m *MockS3API) PutBucketReplicationRequest(arg0 *s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketReplicationOutput) + return ret0, ret1 +} + +// PutBucketReplicationRequest indicates an expected call of PutBucketReplicationRequest. +func (mr *MockS3APIMockRecorder) PutBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationRequest), arg0) +} + +// PutBucketReplicationWithContext mocks base method. +func (m *MockS3API) PutBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.PutBucketReplicationInput, arg2 ...request.Option) (*s3.PutBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketReplicationWithContext indicates an expected call of PutBucketReplicationWithContext. +func (mr *MockS3APIMockRecorder) PutBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationWithContext), varargs...) +} + +// PutBucketRequestPayment mocks base method. +func (m *MockS3API) PutBucketRequestPayment(arg0 *s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketRequestPayment", arg0) + ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketRequestPayment indicates an expected call of PutBucketRequestPayment. +func (mr *MockS3APIMockRecorder) PutBucketRequestPayment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPayment), arg0) +} + +// PutBucketRequestPaymentRequest mocks base method. +func (m *MockS3API) PutBucketRequestPaymentRequest(arg0 *s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketRequestPaymentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketRequestPaymentOutput) + return ret0, ret1 +} + +// PutBucketRequestPaymentRequest indicates an expected call of PutBucketRequestPaymentRequest. +func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentRequest), arg0) +} + +// PutBucketRequestPaymentWithContext mocks base method. +func (m *MockS3API) PutBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.PutBucketRequestPaymentInput, arg2 ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketRequestPaymentWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketRequestPaymentWithContext indicates an expected call of PutBucketRequestPaymentWithContext. +func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentWithContext), varargs...) +} + +// PutBucketTagging mocks base method. +func (m *MockS3API) PutBucketTagging(arg0 *s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketTagging", arg0) + ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketTagging indicates an expected call of PutBucketTagging. +func (mr *MockS3APIMockRecorder) PutBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTagging", reflect.TypeOf((*MockS3API)(nil).PutBucketTagging), arg0) +} + +// PutBucketTaggingRequest mocks base method. +func (m *MockS3API) PutBucketTaggingRequest(arg0 *s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketTaggingOutput) + return ret0, ret1 +} + +// PutBucketTaggingRequest indicates an expected call of PutBucketTaggingRequest. +func (mr *MockS3APIMockRecorder) PutBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingRequest), arg0) +} + +// PutBucketTaggingWithContext mocks base method. +func (m *MockS3API) PutBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketTaggingInput, arg2 ...request.Option) (*s3.PutBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketTaggingWithContext indicates an expected call of PutBucketTaggingWithContext. +func (mr *MockS3APIMockRecorder) PutBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingWithContext), varargs...) +} + +// PutBucketVersioning mocks base method. +func (m *MockS3API) PutBucketVersioning(arg0 *s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketVersioning", arg0) + ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketVersioning indicates an expected call of PutBucketVersioning. +func (mr *MockS3APIMockRecorder) PutBucketVersioning(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioning", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioning), arg0) +} + +// PutBucketVersioningRequest mocks base method. +func (m *MockS3API) PutBucketVersioningRequest(arg0 *s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketVersioningRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketVersioningOutput) + return ret0, ret1 +} + +// PutBucketVersioningRequest indicates an expected call of PutBucketVersioningRequest. +func (mr *MockS3APIMockRecorder) PutBucketVersioningRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningRequest), arg0) +} + +// PutBucketVersioningWithContext mocks base method. +func (m *MockS3API) PutBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.PutBucketVersioningInput, arg2 ...request.Option) (*s3.PutBucketVersioningOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketVersioningWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketVersioningWithContext indicates an expected call of PutBucketVersioningWithContext. +func (mr *MockS3APIMockRecorder) PutBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningWithContext), varargs...) +} + +// PutBucketWebsite mocks base method. +func (m *MockS3API) PutBucketWebsite(arg0 *s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketWebsite indicates an expected call of PutBucketWebsite. +func (mr *MockS3APIMockRecorder) PutBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsite", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsite), arg0) +} + +// PutBucketWebsiteRequest mocks base method. +func (m *MockS3API) PutBucketWebsiteRequest(arg0 *s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketWebsiteOutput) + return ret0, ret1 +} + +// PutBucketWebsiteRequest indicates an expected call of PutBucketWebsiteRequest. +func (mr *MockS3APIMockRecorder) PutBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteRequest), arg0) +} + +// PutBucketWebsiteWithContext mocks base method. +func (m *MockS3API) PutBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.PutBucketWebsiteInput, arg2 ...request.Option) (*s3.PutBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketWebsiteWithContext indicates an expected call of PutBucketWebsiteWithContext. +func (mr *MockS3APIMockRecorder) PutBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteWithContext), varargs...) +} + +// PutObject mocks base method. +func (m *MockS3API) PutObject(arg0 *s3.PutObjectInput) (*s3.PutObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObject", arg0) + ret0, _ := ret[0].(*s3.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObject indicates an expected call of PutObject. +func (mr *MockS3APIMockRecorder) PutObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockS3API)(nil).PutObject), arg0) +} + +// PutObjectAcl mocks base method. +func (m *MockS3API) PutObjectAcl(arg0 *s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectAcl", arg0) + ret0, _ := ret[0].(*s3.PutObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectAcl indicates an expected call of PutObjectAcl. +func (mr *MockS3APIMockRecorder) PutObjectAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAcl", reflect.TypeOf((*MockS3API)(nil).PutObjectAcl), arg0) +} + +// PutObjectAclRequest mocks base method. +func (m *MockS3API) PutObjectAclRequest(arg0 *s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectAclOutput) + return ret0, ret1 +} + +// PutObjectAclRequest indicates an expected call of PutObjectAclRequest. +func (mr *MockS3APIMockRecorder) PutObjectAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectAclRequest), arg0) +} + +// PutObjectAclWithContext mocks base method. +func (m *MockS3API) PutObjectAclWithContext(arg0 aws.Context, arg1 *s3.PutObjectAclInput, arg2 ...request.Option) (*s3.PutObjectAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectAclWithContext indicates an expected call of PutObjectAclWithContext. +func (mr *MockS3APIMockRecorder) PutObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectAclWithContext), varargs...) +} + +// PutObjectLegalHold mocks base method. +func (m *MockS3API) PutObjectLegalHold(arg0 *s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLegalHold", arg0) + ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLegalHold indicates an expected call of PutObjectLegalHold. +func (mr *MockS3APIMockRecorder) PutObjectLegalHold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHold), arg0) +} + +// PutObjectLegalHoldRequest mocks base method. +func (m *MockS3API) PutObjectLegalHoldRequest(arg0 *s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLegalHoldRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectLegalHoldOutput) + return ret0, ret1 +} + +// PutObjectLegalHoldRequest indicates an expected call of PutObjectLegalHoldRequest. +func (mr *MockS3APIMockRecorder) PutObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldRequest), arg0) +} + +// PutObjectLegalHoldWithContext mocks base method. +func (m *MockS3API) PutObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.PutObjectLegalHoldInput, arg2 ...request.Option) (*s3.PutObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectLegalHoldWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLegalHoldWithContext indicates an expected call of PutObjectLegalHoldWithContext. +func (mr *MockS3APIMockRecorder) PutObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldWithContext), varargs...) +} + +// PutObjectLockConfiguration mocks base method. +func (m *MockS3API) PutObjectLockConfiguration(arg0 *s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLockConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLockConfiguration indicates an expected call of PutObjectLockConfiguration. +func (mr *MockS3APIMockRecorder) PutObjectLockConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfiguration), arg0) +} + +// PutObjectLockConfigurationRequest mocks base method. +func (m *MockS3API) PutObjectLockConfigurationRequest(arg0 *s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLockConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectLockConfigurationOutput) + return ret0, ret1 +} + +// PutObjectLockConfigurationRequest indicates an expected call of PutObjectLockConfigurationRequest. +func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationRequest), arg0) +} + +// PutObjectLockConfigurationWithContext mocks base method. +func (m *MockS3API) PutObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutObjectLockConfigurationInput, arg2 ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectLockConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLockConfigurationWithContext indicates an expected call of PutObjectLockConfigurationWithContext. +func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationWithContext), varargs...) +} + +// PutObjectRequest mocks base method. +func (m *MockS3API) PutObjectRequest(arg0 *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectOutput) + return ret0, ret1 +} + +// PutObjectRequest indicates an expected call of PutObjectRequest. +func (mr *MockS3APIMockRecorder) PutObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRequest), arg0) +} + +// PutObjectRetention mocks base method. +func (m *MockS3API) PutObjectRetention(arg0 *s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRetention", arg0) + ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectRetention indicates an expected call of PutObjectRetention. +func (mr *MockS3APIMockRecorder) PutObjectRetention(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetention", reflect.TypeOf((*MockS3API)(nil).PutObjectRetention), arg0) +} + +// PutObjectRetentionRequest mocks base method. +func (m *MockS3API) PutObjectRetentionRequest(arg0 *s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRetentionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectRetentionOutput) + return ret0, ret1 +} + +// PutObjectRetentionRequest indicates an expected call of PutObjectRetentionRequest. +func (mr *MockS3APIMockRecorder) PutObjectRetentionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionRequest), arg0) +} + +// PutObjectRetentionWithContext mocks base method. +func (m *MockS3API) PutObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.PutObjectRetentionInput, arg2 ...request.Option) (*s3.PutObjectRetentionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectRetentionWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectRetentionWithContext indicates an expected call of PutObjectRetentionWithContext. +func (mr *MockS3APIMockRecorder) PutObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionWithContext), varargs...) +} + +// PutObjectTagging mocks base method. +func (m *MockS3API) PutObjectTagging(arg0 *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTagging", arg0) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTagging indicates an expected call of PutObjectTagging. +func (mr *MockS3APIMockRecorder) PutObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockS3API)(nil).PutObjectTagging), arg0) +} + +// PutObjectTaggingRequest mocks base method. +func (m *MockS3API) PutObjectTaggingRequest(arg0 *s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectTaggingOutput) + return ret0, ret1 +} + +// PutObjectTaggingRequest indicates an expected call of PutObjectTaggingRequest. +func (mr *MockS3APIMockRecorder) PutObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingRequest), arg0) +} + +// PutObjectTaggingWithContext mocks base method. +func (m *MockS3API) PutObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.PutObjectTaggingInput, arg2 ...request.Option) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTaggingWithContext indicates an expected call of PutObjectTaggingWithContext. +func (mr *MockS3APIMockRecorder) PutObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingWithContext), varargs...) +} + +// PutObjectWithContext mocks base method. +func (m *MockS3API) PutObjectWithContext(arg0 aws.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectWithContext indicates an expected call of PutObjectWithContext. +func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...) +} + +// PutPublicAccessBlock mocks base method. +func (m *MockS3API) PutPublicAccessBlock(arg0 *s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutPublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutPublicAccessBlock indicates an expected call of PutPublicAccessBlock. +func (mr *MockS3APIMockRecorder) PutPublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlock), arg0) +} + +// PutPublicAccessBlockRequest mocks base method. +func (m *MockS3API) PutPublicAccessBlockRequest(arg0 *s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutPublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutPublicAccessBlockOutput) + return ret0, ret1 +} + +// PutPublicAccessBlockRequest indicates an expected call of PutPublicAccessBlockRequest. +func (mr *MockS3APIMockRecorder) PutPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockRequest), arg0) +} + +// PutPublicAccessBlockWithContext mocks base method. +func (m *MockS3API) PutPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.PutPublicAccessBlockInput, arg2 ...request.Option) (*s3.PutPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutPublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutPublicAccessBlockWithContext indicates an expected call of PutPublicAccessBlockWithContext. +func (mr *MockS3APIMockRecorder) PutPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockWithContext), varargs...) +} + +// RestoreObject mocks base method. +func (m *MockS3API) RestoreObject(arg0 *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObject", arg0) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObject indicates an expected call of RestoreObject. +func (mr *MockS3APIMockRecorder) RestoreObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockS3API)(nil).RestoreObject), arg0) +} + +// RestoreObjectRequest mocks base method. +func (m *MockS3API) RestoreObjectRequest(arg0 *s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.RestoreObjectOutput) + return ret0, ret1 +} + +// RestoreObjectRequest indicates an expected call of RestoreObjectRequest. +func (mr *MockS3APIMockRecorder) RestoreObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectRequest", reflect.TypeOf((*MockS3API)(nil).RestoreObjectRequest), arg0) +} + +// RestoreObjectWithContext mocks base method. +func (m *MockS3API) RestoreObjectWithContext(arg0 aws.Context, arg1 *s3.RestoreObjectInput, arg2 ...request.Option) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RestoreObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObjectWithContext indicates an expected call of RestoreObjectWithContext. +func (mr *MockS3APIMockRecorder) RestoreObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectWithContext", reflect.TypeOf((*MockS3API)(nil).RestoreObjectWithContext), varargs...) +} + +// SelectObjectContent mocks base method. +func (m *MockS3API) SelectObjectContent(arg0 *s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectObjectContent", arg0) + ret0, _ := ret[0].(*s3.SelectObjectContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SelectObjectContent indicates an expected call of SelectObjectContent. +func (mr *MockS3APIMockRecorder) SelectObjectContent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContent", reflect.TypeOf((*MockS3API)(nil).SelectObjectContent), arg0) +} + +// SelectObjectContentRequest mocks base method. +func (m *MockS3API) SelectObjectContentRequest(arg0 *s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectObjectContentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.SelectObjectContentOutput) + return ret0, ret1 +} + +// SelectObjectContentRequest indicates an expected call of SelectObjectContentRequest. +func (mr *MockS3APIMockRecorder) SelectObjectContentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentRequest", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentRequest), arg0) +} + +// SelectObjectContentWithContext mocks base method. +func (m *MockS3API) SelectObjectContentWithContext(arg0 aws.Context, arg1 *s3.SelectObjectContentInput, arg2 ...request.Option) (*s3.SelectObjectContentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SelectObjectContentWithContext", varargs...) + ret0, _ := ret[0].(*s3.SelectObjectContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SelectObjectContentWithContext indicates an expected call of SelectObjectContentWithContext. +func (mr *MockS3APIMockRecorder) SelectObjectContentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentWithContext", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentWithContext), varargs...) +} + +// UploadPart mocks base method. +func (m *MockS3API) UploadPart(arg0 *s3.UploadPartInput) (*s3.UploadPartOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPart", arg0) + ret0, _ := ret[0].(*s3.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPart indicates an expected call of UploadPart. +func (mr *MockS3APIMockRecorder) UploadPart(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockS3API)(nil).UploadPart), arg0) +} + +// UploadPartCopy mocks base method. +func (m *MockS3API) UploadPartCopy(arg0 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopy", arg0) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopy indicates an expected call of UploadPartCopy. +func (mr *MockS3APIMockRecorder) UploadPartCopy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockS3API)(nil).UploadPartCopy), arg0) +} + +// UploadPartCopyRequest mocks base method. +func (m *MockS3API) UploadPartCopyRequest(arg0 *s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.UploadPartCopyOutput) + return ret0, ret1 +} + +// UploadPartCopyRequest indicates an expected call of UploadPartCopyRequest. +func (mr *MockS3APIMockRecorder) UploadPartCopyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyRequest), arg0) +} + +// UploadPartCopyWithContext mocks base method. +func (m *MockS3API) UploadPartCopyWithContext(arg0 aws.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPartCopyWithContext", varargs...) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext. +func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...) +} + +// UploadPartRequest mocks base method. +func (m *MockS3API) UploadPartRequest(arg0 *s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.UploadPartOutput) + return ret0, ret1 +} + +// UploadPartRequest indicates an expected call of UploadPartRequest. +func (mr *MockS3APIMockRecorder) UploadPartRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartRequest), arg0) +} + +// UploadPartWithContext mocks base method. +func (m *MockS3API) UploadPartWithContext(arg0 aws.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPartWithContext", varargs...) + ret0, _ := ret[0].(*s3.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartWithContext indicates an expected call of UploadPartWithContext. +func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartWithContext), varargs...) +} + +// WaitUntilBucketExists mocks base method. +func (m *MockS3API) WaitUntilBucketExists(arg0 *s3.HeadBucketInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilBucketExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketExists indicates an expected call of WaitUntilBucketExists. +func (mr *MockS3APIMockRecorder) WaitUntilBucketExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExists), arg0) +} + +// WaitUntilBucketExistsWithContext mocks base method. +func (m *MockS3API) WaitUntilBucketExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilBucketExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketExistsWithContext indicates an expected call of WaitUntilBucketExistsWithContext. +func (mr *MockS3APIMockRecorder) WaitUntilBucketExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExistsWithContext), varargs...) +} + +// WaitUntilBucketNotExists mocks base method. +func (m *MockS3API) WaitUntilBucketNotExists(arg0 *s3.HeadBucketInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilBucketNotExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketNotExists indicates an expected call of WaitUntilBucketNotExists. +func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExists), arg0) +} + +// WaitUntilBucketNotExistsWithContext mocks base method. +func (m *MockS3API) WaitUntilBucketNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilBucketNotExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketNotExistsWithContext indicates an expected call of WaitUntilBucketNotExistsWithContext. +func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExistsWithContext), varargs...) +} + +// WaitUntilObjectExists mocks base method. +func (m *MockS3API) WaitUntilObjectExists(arg0 *s3.HeadObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilObjectExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectExists indicates an expected call of WaitUntilObjectExists. +func (mr *MockS3APIMockRecorder) WaitUntilObjectExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExists), arg0) +} + +// WaitUntilObjectExistsWithContext mocks base method. +func (m *MockS3API) WaitUntilObjectExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilObjectExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectExistsWithContext indicates an expected call of WaitUntilObjectExistsWithContext. +func (mr *MockS3APIMockRecorder) WaitUntilObjectExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExistsWithContext), varargs...) +} + +// WaitUntilObjectNotExists mocks base method. +func (m *MockS3API) WaitUntilObjectNotExists(arg0 *s3.HeadObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilObjectNotExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectNotExists indicates an expected call of WaitUntilObjectNotExists. +func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExists), arg0) +} + +// WaitUntilObjectNotExistsWithContext mocks base method. +func (m *MockS3API) WaitUntilObjectNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilObjectNotExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectNotExistsWithContext indicates an expected call of WaitUntilObjectNotExistsWithContext. +func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExistsWithContext), varargs...) +} + +// WriteGetObjectResponse mocks base method. +func (m *MockS3API) WriteGetObjectResponse(arg0 *s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteGetObjectResponse", arg0) + ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WriteGetObjectResponse indicates an expected call of WriteGetObjectResponse. +func (mr *MockS3APIMockRecorder) WriteGetObjectResponse(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponse", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponse), arg0) +} + +// WriteGetObjectResponseRequest mocks base method. +func (m *MockS3API) WriteGetObjectResponseRequest(arg0 *s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteGetObjectResponseRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.WriteGetObjectResponseOutput) + return ret0, ret1 +} + +// WriteGetObjectResponseRequest indicates an expected call of WriteGetObjectResponseRequest. +func (mr *MockS3APIMockRecorder) WriteGetObjectResponseRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseRequest", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseRequest), arg0) +} + +// WriteGetObjectResponseWithContext mocks base method. +func (m *MockS3API) WriteGetObjectResponseWithContext(arg0 aws.Context, arg1 *s3.WriteGetObjectResponseInput, arg2 ...request.Option) (*s3.WriteGetObjectResponseOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WriteGetObjectResponseWithContext", varargs...) + ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WriteGetObjectResponseWithContext indicates an expected call of WriteGetObjectResponseWithContext. +func (mr *MockS3APIMockRecorder) WriteGetObjectResponseWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseWithContext", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseWithContext), varargs...) +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/queryParser.go temporal-1.22.5/src/common/archiver/s3store/queryParser.go --- temporal-1.21.5-1/src/common/archiver/s3store/queryParser.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/queryParser.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source queryParser.go -destination queryParser_mock.go -mock_names Interface=MockQueryParser - -package s3store - -import ( - "errors" - "fmt" - "strconv" - "time" - - "github.com/xwb1989/sqlparser" - - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/primitives/timestamp" -) - -type ( - // QueryParser parses a limited SQL where clause into a struct - QueryParser interface { - Parse(query string) (*parsedQuery, error) - } - - queryParser struct{} - - parsedQuery struct { - workflowTypeName *string - workflowID *string - startTime *time.Time - closeTime *time.Time - searchPrecision *string - } -) - -// All allowed fields for filtering -const ( - WorkflowTypeName = "WorkflowTypeName" - WorkflowID = "WorkflowId" - StartTime = "StartTime" - CloseTime = "CloseTime" - SearchPrecision = "SearchPrecision" -) - -// Precision specific values -const ( - PrecisionDay = "Day" - PrecisionHour = "Hour" - PrecisionMinute = "Minute" - PrecisionSecond = "Second" -) -const ( - queryTemplate = "select * from dummy where %s" - defaultDateTimeFormat = time.RFC3339 -) - -// NewQueryParser creates a new query parser for filestore -func NewQueryParser() QueryParser { - return &queryParser{} -} - -func (p *queryParser) Parse(query string) (*parsedQuery, error) { - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) - if err != nil { - return nil, err - } - whereExpr := stmt.(*sqlparser.Select).Where.Expr - parsedQuery := &parsedQuery{} - if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { - return nil, err - } - if parsedQuery.workflowID == nil && parsedQuery.workflowTypeName == nil { - return nil, errors.New("WorkflowId or WorkflowTypeName is required in query") - } - if parsedQuery.workflowID != nil && parsedQuery.workflowTypeName != nil { - return nil, errors.New("only one of WorkflowId or WorkflowTypeName can be specified in a query") - } - if parsedQuery.closeTime != nil && parsedQuery.startTime != nil { - return nil, errors.New("only one of StartTime or CloseTime can be specified in a query") - } - if (parsedQuery.closeTime != nil || parsedQuery.startTime != nil) && parsedQuery.searchPrecision == nil { - return nil, errors.New("SearchPrecision is required when searching for a StartTime or CloseTime") - } - - if parsedQuery.closeTime == nil && parsedQuery.startTime == nil && parsedQuery.searchPrecision != nil { - return nil, errors.New("SearchPrecision requires a StartTime or CloseTime") - } - return parsedQuery, nil -} - -func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { - if expr == nil { - return errors.New("where expression is nil") - } - - switch expr := expr.(type) { - case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr, parsedQuery) - case *sqlparser.AndExpr: - return p.convertAndExpr(expr, parsedQuery) - case *sqlparser.ParenExpr: - return p.convertParenExpr(expr, parsedQuery) - default: - return errors.New("only comparison and \"and\" expression is supported") - } -} - -func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { - return p.convertWhereExpr(parenExpr.Expr, parsedQuery) -} - -func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { - if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { - return err - } - return p.convertWhereExpr(andExpr.Right, parsedQuery) -} - -func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { - colName, ok := compExpr.Left.(*sqlparser.ColName) - if !ok { - return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) - } - colNameStr := sqlparser.String(colName) - op := compExpr.Operator - valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) - if !ok { - return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) - } - valStr := sqlparser.String(valExpr) - - switch colNameStr { - case WorkflowTypeName: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowTypeName) - } - if parsedQuery.workflowTypeName != nil { - return fmt.Errorf("can not query %s multiple times", WorkflowTypeName) - } - parsedQuery.workflowTypeName = convert.StringPtr(val) - case WorkflowID: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", WorkflowID) - } - if parsedQuery.workflowID != nil { - return fmt.Errorf("can not query %s multiple times", WorkflowID) - } - parsedQuery.workflowID = convert.StringPtr(val) - case CloseTime: - timestamp, err := convertToTime(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", CloseTime) - } - parsedQuery.closeTime = ×tamp - case StartTime: - timestamp, err := convertToTime(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", CloseTime) - } - parsedQuery.startTime = ×tamp - case SearchPrecision: - val, err := extractStringValue(valStr) - if err != nil { - return err - } - if op != "=" { - return fmt.Errorf("only operation = is support for %s", SearchPrecision) - } - if parsedQuery.searchPrecision != nil && *parsedQuery.searchPrecision != val { - return fmt.Errorf("only one expression is allowed for %s", SearchPrecision) - } - switch val { - case PrecisionDay: - case PrecisionHour: - case PrecisionMinute: - case PrecisionSecond: - default: - return fmt.Errorf("invalid value for %s: %s", SearchPrecision, val) - } - parsedQuery.searchPrecision = convert.StringPtr(val) - - default: - return fmt.Errorf("unknown filter name: %s", colNameStr) - } - - return nil -} - -func convertToTime(timeStr string) (time.Time, error) { - ts, err := strconv.ParseInt(timeStr, 10, 64) - if err == nil { - return timestamp.UnixOrZeroTime(ts), nil - } - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/queryParser_mock.go temporal-1.22.5/src/common/archiver/s3store/queryParser_mock.go --- temporal-1.21.5-1/src/common/archiver/s3store/queryParser_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/queryParser_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: queryParser.go - -// Package s3store is a generated GoMock package. -package s3store - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockQueryParser is a mock of QueryParser interface. -type MockQueryParser struct { - ctrl *gomock.Controller - recorder *MockQueryParserMockRecorder -} - -// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. -type MockQueryParserMockRecorder struct { - mock *MockQueryParser -} - -// NewMockQueryParser creates a new mock instance. -func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { - mock := &MockQueryParser{ctrl: ctrl} - mock.recorder = &MockQueryParserMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { - return m.recorder -} - -// Parse mocks base method. -func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Parse", query) - ret0, _ := ret[0].(*parsedQuery) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/queryParser_test.go temporal-1.22.5/src/common/archiver/s3store/queryParser_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/queryParser_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/queryParser_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,276 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package s3store - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/primitives/timestamp" -) - -type queryParserSuite struct { - *require.Assertions - suite.Suite - - parser QueryParser -} - -func TestQueryParserSuite(t *testing.T) { - suite.Run(t, new(queryParserSuite)) -} - -func (s *queryParserSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.parser = NewQueryParser() -} - -func (s *queryParserSuite) TestParseWorkflowIDAndWorkflowTypeName() { - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: "WorkflowId = \"random workflowID\"", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "WorkflowTypeName = \"random workflowTypeName\"", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowTypeName: convert.StringPtr("random workflowTypeName"), - }, - }, - { - query: "WorkflowId = \"random workflowID\" and WorkflowTypeName = \"random workflowTypeName\"", - expectErr: true, - }, - { - query: "WorkflowId = \"random workflowID\" and WorkflowId = \"random workflowID\"", - expectErr: true, - }, - { - query: "RunId = \"random runID\"", - expectErr: true, - }, - { - query: "WorkflowId = 'random workflowID'", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "(WorkflowId = \"random workflowID\")", - expectErr: false, - parsedQuery: &parsedQuery{ - workflowID: convert.StringPtr("random workflowID"), - }, - }, - { - query: "runId = random workflowID", - expectErr: true, - }, - { - query: "WorkflowId = \"random workflowID\" or WorkflowId = \"another workflowID\"", - expectErr: true, - }, - { - query: "WorkflowId = \"random workflowID\" or runId = \"random runID\"", - expectErr: true, - }, - { - query: "workflowid = \"random workflowID\"", - expectErr: true, - }, - { - query: "runId > \"random workflowID\"", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.workflowID, parsedQuery.workflowID) - s.Equal(tc.parsedQuery.workflowTypeName, parsedQuery.workflowTypeName) - - } -} - -func (s *queryParserSuite) TestParsePrecision() { - commonQueryPart := "WorkflowId = \"random workflowID\" AND " - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Day'", - expectErr: false, - parsedQuery: &parsedQuery{ - searchPrecision: convert.StringPtr(PrecisionDay), - }, - }, - { - query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Hour'", - expectErr: false, - parsedQuery: &parsedQuery{ - searchPrecision: convert.StringPtr(PrecisionHour), - }, - }, - { - query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Minute'", - expectErr: false, - parsedQuery: &parsedQuery{ - searchPrecision: convert.StringPtr(PrecisionMinute), - }, - }, - { - query: commonQueryPart + "StartTime = 1000 and SearchPrecision = 'Second'", - expectErr: false, - parsedQuery: &parsedQuery{ - searchPrecision: convert.StringPtr(PrecisionSecond), - }, - }, - { - query: commonQueryPart + "SearchPrecision = 'Second'", - expectErr: true, - }, - { - query: commonQueryPart + "SearchPrecision = 'Invalid string'", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.searchPrecision, parsedQuery.searchPrecision) - } -} - -func (s *queryParserSuite) TestParseCloseTime() { - commonQueryPart := "WorkflowId = \"random workflowID\" AND SearchPrecision = 'Day' AND " - - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: commonQueryPart + "CloseTime = 1000", - expectErr: false, - parsedQuery: &parsedQuery{ - closeTime: timestamp.TimePtr(time.Unix(0, 1000).UTC()), - }, - }, - { - query: commonQueryPart + "CloseTime = \"2019-01-01T11:11:11Z\"", - expectErr: false, - parsedQuery: &parsedQuery{ - closeTime: timestamp.TimePtr(time.Date(2019, 1, 1, 11, 11, 11, 0, time.UTC)), - }, - }, - { - query: commonQueryPart + "closeTime = 2000", - expectErr: true, - }, - { - query: commonQueryPart + "CloseTime > \"2019-01-01 00:00:00\"", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.closeTime, parsedQuery.closeTime) - - } -} - -func (s *queryParserSuite) TestParseStartTime() { - commonQueryPart := "WorkflowId = \"random workflowID\" AND SearchPrecision = 'Day' AND " - - testCases := []struct { - query string - expectErr bool - parsedQuery *parsedQuery - }{ - { - query: commonQueryPart + "StartTime = 1000", - expectErr: false, - parsedQuery: &parsedQuery{ - startTime: timestamp.TimePtr(time.Unix(0, 1000)), - }, - }, - { - query: commonQueryPart + "StartTime = \"2019-01-01T11:11:11Z\"", - expectErr: false, - parsedQuery: &parsedQuery{ - startTime: timestamp.TimePtr(time.Date(2019, 1, 1, 11, 11, 11, 0, time.UTC)), - }, - }, - { - query: commonQueryPart + "startTime = 2000", - expectErr: true, - }, - { - query: commonQueryPart + "StartTime > \"2019-01-01 00:00:00\"", - expectErr: true, - }, - } - - for _, tc := range testCases { - parsedQuery, err := s.parser.Parse(tc.query) - if tc.expectErr { - s.Error(err) - continue - } - s.NoError(err) - s.Equal(tc.parsedQuery.closeTime, parsedQuery.closeTime) - } -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/query_parser.go temporal-1.22.5/src/common/archiver/s3store/query_parser.go --- temporal-1.21.5-1/src/common/archiver/s3store/query_parser.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/query_parser.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,246 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser + +package s3store + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/xwb1989/sqlparser" + + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/primitives/timestamp" +) + +type ( + // QueryParser parses a limited SQL where clause into a struct + QueryParser interface { + Parse(query string) (*parsedQuery, error) + } + + queryParser struct{} + + parsedQuery struct { + workflowTypeName *string + workflowID *string + startTime *time.Time + closeTime *time.Time + searchPrecision *string + } +) + +// All allowed fields for filtering +const ( + WorkflowTypeName = "WorkflowTypeName" + WorkflowID = "WorkflowId" + StartTime = "StartTime" + CloseTime = "CloseTime" + SearchPrecision = "SearchPrecision" +) + +// Precision specific values +const ( + PrecisionDay = "Day" + PrecisionHour = "Hour" + PrecisionMinute = "Minute" + PrecisionSecond = "Second" +) +const ( + queryTemplate = "select * from dummy where %s" + defaultDateTimeFormat = time.RFC3339 +) + +// NewQueryParser creates a new query parser for filestore +func NewQueryParser() QueryParser { + return &queryParser{} +} + +func (p *queryParser) Parse(query string) (*parsedQuery, error) { + stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + if err != nil { + return nil, err + } + whereExpr := stmt.(*sqlparser.Select).Where.Expr + parsedQuery := &parsedQuery{} + if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { + return nil, err + } + if parsedQuery.workflowID == nil && parsedQuery.workflowTypeName == nil { + return nil, errors.New("WorkflowId or WorkflowTypeName is required in query") + } + if parsedQuery.workflowID != nil && parsedQuery.workflowTypeName != nil { + return nil, errors.New("only one of WorkflowId or WorkflowTypeName can be specified in a query") + } + if parsedQuery.closeTime != nil && parsedQuery.startTime != nil { + return nil, errors.New("only one of StartTime or CloseTime can be specified in a query") + } + if (parsedQuery.closeTime != nil || parsedQuery.startTime != nil) && parsedQuery.searchPrecision == nil { + return nil, errors.New("SearchPrecision is required when searching for a StartTime or CloseTime") + } + + if parsedQuery.closeTime == nil && parsedQuery.startTime == nil && parsedQuery.searchPrecision != nil { + return nil, errors.New("SearchPrecision requires a StartTime or CloseTime") + } + return parsedQuery, nil +} + +func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { + if expr == nil { + return errors.New("where expression is nil") + } + + switch expr := expr.(type) { + case *sqlparser.ComparisonExpr: + return p.convertComparisonExpr(expr, parsedQuery) + case *sqlparser.AndExpr: + return p.convertAndExpr(expr, parsedQuery) + case *sqlparser.ParenExpr: + return p.convertParenExpr(expr, parsedQuery) + default: + return errors.New("only comparison and \"and\" expression is supported") + } +} + +func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { + return p.convertWhereExpr(parenExpr.Expr, parsedQuery) +} + +func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { + if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { + return err + } + return p.convertWhereExpr(andExpr.Right, parsedQuery) +} + +func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { + colName, ok := compExpr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) + } + colNameStr := sqlparser.String(colName) + op := compExpr.Operator + valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) + if !ok { + return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) + } + valStr := sqlparser.String(valExpr) + + switch colNameStr { + case WorkflowTypeName: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowTypeName) + } + if parsedQuery.workflowTypeName != nil { + return fmt.Errorf("can not query %s multiple times", WorkflowTypeName) + } + parsedQuery.workflowTypeName = convert.StringPtr(val) + case WorkflowID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowID) + } + if parsedQuery.workflowID != nil { + return fmt.Errorf("can not query %s multiple times", WorkflowID) + } + parsedQuery.workflowID = convert.StringPtr(val) + case CloseTime: + timestamp, err := convertToTime(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", CloseTime) + } + parsedQuery.closeTime = ×tamp + case StartTime: + timestamp, err := convertToTime(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", CloseTime) + } + parsedQuery.startTime = ×tamp + case SearchPrecision: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", SearchPrecision) + } + if parsedQuery.searchPrecision != nil && *parsedQuery.searchPrecision != val { + return fmt.Errorf("only one expression is allowed for %s", SearchPrecision) + } + switch val { + case PrecisionDay: + case PrecisionHour: + case PrecisionMinute: + case PrecisionSecond: + default: + return fmt.Errorf("invalid value for %s: %s", SearchPrecision, val) + } + parsedQuery.searchPrecision = convert.StringPtr(val) + + default: + return fmt.Errorf("unknown filter name: %s", colNameStr) + } + + return nil +} + +func convertToTime(timeStr string) (time.Time, error) { + ts, err := strconv.ParseInt(timeStr, 10, 64) + if err == nil { + return timestamp.UnixOrZeroTime(ts), nil + } + timestampStr, err := extractStringValue(timeStr) + if err != nil { + return time.Time{}, err + } + parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) + if err != nil { + return time.Time{}, err + } + return parsedTime, nil +} + +func extractStringValue(s string) (string, error) { + if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { + return s[1 : len(s)-1], nil + } + return "", fmt.Errorf("value %s is not a string value", s) +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/query_parser_mock.go temporal-1.22.5/src/common/archiver/s3store/query_parser_mock.go --- temporal-1.21.5-1/src/common/archiver/s3store/query_parser_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/query_parser_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go + +// Package s3store is a generated GoMock package. +package s3store + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockQueryParser is a mock of QueryParser interface. +type MockQueryParser struct { + ctrl *gomock.Controller + recorder *MockQueryParserMockRecorder +} + +// MockQueryParserMockRecorder is the mock recorder for MockQueryParser. +type MockQueryParserMockRecorder struct { + mock *MockQueryParser +} + +// NewMockQueryParser creates a new mock instance. +func NewMockQueryParser(ctrl *gomock.Controller) *MockQueryParser { + mock := &MockQueryParser{ctrl: ctrl} + mock.recorder = &MockQueryParserMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueryParser) EXPECT() *MockQueryParserMockRecorder { + return m.recorder +} + +// Parse mocks base method. +func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Parse", query) + ret0, _ := ret[0].(*parsedQuery) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Parse indicates an expected call of Parse. +func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/query_parser_test.go temporal-1.22.5/src/common/archiver/s3store/query_parser_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/query_parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/query_parser_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,276 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s3store + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/primitives/timestamp" +) + +type queryParserSuite struct { + *require.Assertions + suite.Suite + + parser QueryParser +} + +func TestQueryParserSuite(t *testing.T) { + suite.Run(t, new(queryParserSuite)) +} + +func (s *queryParserSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.parser = NewQueryParser() +} + +func (s *queryParserSuite) TestParseWorkflowIDAndWorkflowTypeName() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "WorkflowId = \"random workflowID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "WorkflowTypeName = \"random workflowTypeName\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowTypeName: convert.StringPtr("random workflowTypeName"), + }, + }, + { + query: "WorkflowId = \"random workflowID\" and WorkflowTypeName = \"random workflowTypeName\"", + expectErr: true, + }, + { + query: "WorkflowId = \"random workflowID\" and WorkflowId = \"random workflowID\"", + expectErr: true, + }, + { + query: "RunId = \"random runID\"", + expectErr: true, + }, + { + query: "WorkflowId = 'random workflowID'", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "(WorkflowId = \"random workflowID\")", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: convert.StringPtr("random workflowID"), + }, + }, + { + query: "runId = random workflowID", + expectErr: true, + }, + { + query: "WorkflowId = \"random workflowID\" or WorkflowId = \"another workflowID\"", + expectErr: true, + }, + { + query: "WorkflowId = \"random workflowID\" or runId = \"random runID\"", + expectErr: true, + }, + { + query: "workflowid = \"random workflowID\"", + expectErr: true, + }, + { + query: "runId > \"random workflowID\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.workflowID, parsedQuery.workflowID) + s.Equal(tc.parsedQuery.workflowTypeName, parsedQuery.workflowTypeName) + + } +} + +func (s *queryParserSuite) TestParsePrecision() { + commonQueryPart := "WorkflowId = \"random workflowID\" AND " + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Day'", + expectErr: false, + parsedQuery: &parsedQuery{ + searchPrecision: convert.StringPtr(PrecisionDay), + }, + }, + { + query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Hour'", + expectErr: false, + parsedQuery: &parsedQuery{ + searchPrecision: convert.StringPtr(PrecisionHour), + }, + }, + { + query: commonQueryPart + "CloseTime = 1000 and SearchPrecision = 'Minute'", + expectErr: false, + parsedQuery: &parsedQuery{ + searchPrecision: convert.StringPtr(PrecisionMinute), + }, + }, + { + query: commonQueryPart + "StartTime = 1000 and SearchPrecision = 'Second'", + expectErr: false, + parsedQuery: &parsedQuery{ + searchPrecision: convert.StringPtr(PrecisionSecond), + }, + }, + { + query: commonQueryPart + "SearchPrecision = 'Second'", + expectErr: true, + }, + { + query: commonQueryPart + "SearchPrecision = 'Invalid string'", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.searchPrecision, parsedQuery.searchPrecision) + } +} + +func (s *queryParserSuite) TestParseCloseTime() { + commonQueryPart := "WorkflowId = \"random workflowID\" AND SearchPrecision = 'Day' AND " + + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: commonQueryPart + "CloseTime = 1000", + expectErr: false, + parsedQuery: &parsedQuery{ + closeTime: timestamp.TimePtr(time.Unix(0, 1000).UTC()), + }, + }, + { + query: commonQueryPart + "CloseTime = \"2019-01-01T11:11:11Z\"", + expectErr: false, + parsedQuery: &parsedQuery{ + closeTime: timestamp.TimePtr(time.Date(2019, 1, 1, 11, 11, 11, 0, time.UTC)), + }, + }, + { + query: commonQueryPart + "closeTime = 2000", + expectErr: true, + }, + { + query: commonQueryPart + "CloseTime > \"2019-01-01 00:00:00\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.closeTime, parsedQuery.closeTime) + + } +} + +func (s *queryParserSuite) TestParseStartTime() { + commonQueryPart := "WorkflowId = \"random workflowID\" AND SearchPrecision = 'Day' AND " + + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: commonQueryPart + "StartTime = 1000", + expectErr: false, + parsedQuery: &parsedQuery{ + startTime: timestamp.TimePtr(time.Unix(0, 1000)), + }, + }, + { + query: commonQueryPart + "StartTime = \"2019-01-01T11:11:11Z\"", + expectErr: false, + parsedQuery: &parsedQuery{ + startTime: timestamp.TimePtr(time.Date(2019, 1, 1, 11, 11, 11, 0, time.UTC)), + }, + }, + { + query: commonQueryPart + "startTime = 2000", + expectErr: true, + }, + { + query: commonQueryPart + "StartTime > \"2019-01-01 00:00:00\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.closeTime, parsedQuery.closeTime) + } +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/visibilityArchiver.go temporal-1.22.5/src/common/archiver/s3store/visibilityArchiver.go --- temporal-1.21.5-1/src/common/archiver/s3store/visibilityArchiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/visibilityArchiver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,382 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package s3store - -import ( - "context" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" - "go.temporal.io/api/serviceerror" - workflowpb "go.temporal.io/api/workflow/v1" - - "go.temporal.io/server/common/searchattribute" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/primitives/timestamp" -) - -type ( - visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - s3cli s3iface.S3API - queryParser QueryParser - } - - queryVisibilityRequest struct { - namespaceID string - pageSize int - nextPageToken []byte - parsedQuery *parsedQuery - } - - indexToArchive struct { - primaryIndex string - primaryIndexValue string - secondaryIndex string - secondaryIndexTimestamp time.Time - } -) - -const ( - errEncodeVisibilityRecord = "failed to encode visibility record" - secondaryIndexKeyStartTimeout = "startTimeout" - secondaryIndexKeyCloseTimeout = "closeTimeout" - primaryIndexKeyWorkflowTypeName = "workflowTypeName" - primaryIndexKeyWorkflowID = "workflowID" -) - -// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on s3 -func NewVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, - config *config.S3Archiver, -) (archiver.VisibilityArchiver, error) { - return newVisibilityArchiver(container, config) -} - -func newVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, - config *config.S3Archiver) (*visibilityArchiver, error) { - s3Config := &aws.Config{ - Endpoint: config.Endpoint, - Region: aws.String(config.Region), - S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), - } - sess, err := session.NewSession(s3Config) - if err != nil { - return nil, err - } - return &visibilityArchiver{ - container: container, - s3cli: s3.New(sess), - queryParser: NewQueryParser(), - }, nil -} - -func (v *visibilityArchiver) Archive( - ctx context.Context, - URI archiver.URI, - request *archiverspb.VisibilityRecord, - opts ...archiver.ArchiveOption, -) (err error) { - handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.VisibilityArchiverScope), metrics.NamespaceTag(request.Namespace)) - featureCatalog := archiver.GetFeatureCatalog(opts...) - startTime := time.Now().UTC() - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) - archiveFailReason := "" - defer func() { - handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) - if err != nil { - if isRetryableError(err) { - handler.Counter(metrics.VisibilityArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) - logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(archiveFailReason), tag.Error(err)) - } else { - handler.Counter(metrics.VisibilityArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) - logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiveFailReason), tag.Error(err)) - if featureCatalog.NonRetryableError != nil { - err = featureCatalog.NonRetryableError() - } - } - } - }() - - if err := SoftValidateURI(URI); err != nil { - archiveFailReason = archiver.ErrReasonInvalidURI - return err - } - - if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { - archiveFailReason = archiver.ErrReasonInvalidArchiveRequest - return err - } - - encodedVisibilityRecord, err := Encode(request) - if err != nil { - archiveFailReason = errEncodeVisibilityRecord - return err - } - indexes := createIndexesToArchive(request) - // Upload archive to all indexes - for _, element := range indexes { - key := constructTimestampIndex(URI.Path(), request.GetNamespaceId(), element.primaryIndex, element.primaryIndexValue, element.secondaryIndex, element.secondaryIndexTimestamp, request.GetRunId()) - if err := Upload(ctx, v.s3cli, URI, key, encodedVisibilityRecord); err != nil { - archiveFailReason = errWriteKey - return err - } - } - handler.Counter(metrics.VisibilityArchiveSuccessCount.GetMetricName()).Record(1) - return nil -} - -func createIndexesToArchive(request *archiverspb.VisibilityRecord) []indexToArchive { - return []indexToArchive{ - {primaryIndexKeyWorkflowTypeName, request.WorkflowTypeName, secondaryIndexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)}, - {primaryIndexKeyWorkflowTypeName, request.WorkflowTypeName, secondaryIndexKeyStartTimeout, timestamp.TimeValue(request.StartTime)}, - {primaryIndexKeyWorkflowID, request.GetWorkflowId(), secondaryIndexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)}, - {primaryIndexKeyWorkflowID, request.GetWorkflowId(), secondaryIndexKeyStartTimeout, timestamp.TimeValue(request.StartTime)}, - } -} - -func (v *visibilityArchiver) Query( - ctx context.Context, - URI archiver.URI, - request *archiver.QueryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - - if err := SoftValidateURI(URI); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) - } - - if err := archiver.ValidateQueryRequest(request); err != nil { - return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidQueryVisibilityRequest.Error()) - } - - if strings.TrimSpace(request.Query) == "" { - return v.queryAll(ctx, URI, request, saTypeMap) - } - - parsedQuery, err := v.queryParser.Parse(request.Query) - if err != nil { - return nil, serviceerror.NewInvalidArgument(err.Error()) - } - - return v.query( - ctx, - URI, - &queryVisibilityRequest{ - namespaceID: request.NamespaceID, - pageSize: request.PageSize, - nextPageToken: request.NextPageToken, - parsedQuery: parsedQuery, - }, - saTypeMap, - ) -} - -// queryAll returns all workflow executions in the archive. -func (v *visibilityArchiver) queryAll( - ctx context.Context, - uri archiver.URI, - request *archiver.QueryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - // remaining is the number of workflow executions left to return before we reach pageSize. - remaining := request.PageSize - nextPageToken := request.NextPageToken - var executions []*workflowpb.WorkflowExecutionInfo - // We need to loop because the number of workflow executions returned by each call to query may be fewer than - // pageSize. This is because we may have to skip some workflow executions after querying S3 (client-side filtering) - // because there are 2 entries in S3 for each workflow execution indexed by workflowTypeName (one for closeTimeout - // and one for startTimeout), and we only want to return one entry per workflow execution. See - // createIndexesToArchive for a list of all indexes. - for { - searchPrefix := constructVisibilitySearchPrefix(uri.Path(), request.NamespaceID) - // We suffix searchPrefix with workflowTypeName because the data in S3 is duplicated across combinations of 2 - // different primary indices (workflowID and workflowTypeName) and 2 different secondary indices (closeTimeout - // and startTimeout). We only want to return one entry per workflow execution, but the full path to the S3 key - // is ////, and we don't have - // the primaryIndexValue when we make the call to query, so we can only specify the primaryIndexKey. - searchPrefix += "/" + primaryIndexKeyWorkflowTypeName - // The pageSize we supply here is actually the maximum number of keys to fetch from S3. For each execution, - // there should be 2 keys in S3 for this prefix, so you might think that we should multiply the pageSize by 2. - // However, if we do that, we may end up returning more than pageSize workflow executions to the end user of - // this API. This is because we aren't guaranteed that both keys for a given workflow execution will be returned - // in the same call. For example, if the user supplies a pageSize of 1, and we specify a maximum number of keys - // of 2 to S3, we may get back entries from S3 for 2 different workflow executions. You might think that we can - // just truncate this result to 1 workflow execution, but then the nextPageToken would be incorrect. So, we may - // need to make multiple calls to S3 to get the correct number of workflow executions, which will probably make - // this API call slower. - res, err := v.queryPrefix(ctx, uri, &queryVisibilityRequest{ - namespaceID: request.NamespaceID, - pageSize: remaining, - nextPageToken: nextPageToken, - parsedQuery: &parsedQuery{}, - }, saTypeMap, searchPrefix, func(key string) bool { - // We only want to return entries for the closeTimeout secondary index, which will always be of the form: - // .../closeTimeout//, so we split the key on "/" and check that the third-to-last - // element is "closeTimeout". - elements := strings.Split(key, "/") - return len(elements) >= 3 && elements[len(elements)-3] == secondaryIndexKeyCloseTimeout - }) - if err != nil { - return nil, err - } - nextPageToken = res.NextPageToken - executions = append(executions, res.Executions...) - remaining -= len(res.Executions) - if len(nextPageToken) == 0 || remaining <= 0 { - break - } - } - return &archiver.QueryVisibilityResponse{ - Executions: executions, - NextPageToken: nextPageToken, - }, nil -} - -func (v *visibilityArchiver) query( - ctx context.Context, - URI archiver.URI, - request *queryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, -) (*archiver.QueryVisibilityResponse, error) { - primaryIndex := primaryIndexKeyWorkflowTypeName - primaryIndexValue := request.parsedQuery.workflowTypeName - if request.parsedQuery.workflowID != nil { - primaryIndex = primaryIndexKeyWorkflowID - primaryIndexValue = request.parsedQuery.workflowID - } - - prefix := constructIndexedVisibilitySearchPrefix( - URI.Path(), - request.namespaceID, - primaryIndex, - *primaryIndexValue, - secondaryIndexKeyCloseTimeout, - ) + "/" - if request.parsedQuery.closeTime != nil { - prefix = constructTimeBasedSearchKey( - URI.Path(), - request.namespaceID, - primaryIndex, - *primaryIndexValue, - secondaryIndexKeyCloseTimeout, - *request.parsedQuery.closeTime, - *request.parsedQuery.searchPrecision, - ) - } - if request.parsedQuery.startTime != nil { - prefix = constructTimeBasedSearchKey( - URI.Path(), - request.namespaceID, - primaryIndex, - *primaryIndexValue, - secondaryIndexKeyStartTimeout, - *request.parsedQuery.startTime, - *request.parsedQuery.searchPrecision, - ) - } - - return v.queryPrefix(ctx, URI, request, saTypeMap, prefix, nil) -} - -// queryPrefix returns all workflow executions in the archive that match the given prefix. The keyFilter function is an -// optional filter that can be used to further filter the results. If keyFilter returns false for a given key, that key -// will be skipped, and the object will not be downloaded from S3 or included in the results. -func (v *visibilityArchiver) queryPrefix( - ctx context.Context, - uri archiver.URI, - request *queryVisibilityRequest, - saTypeMap searchattribute.NameTypeMap, - prefix string, - keyFilter func(key string) bool, -) (*archiver.QueryVisibilityResponse, error) { - ctx, cancel := ensureContextTimeout(ctx) - defer cancel() - - var token *string - - if request.nextPageToken != nil { - token = deserializeQueryVisibilityToken(request.nextPageToken) - } - results, err := v.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(uri.Hostname()), - Prefix: aws.String(prefix), - MaxKeys: aws.Int64(int64(request.pageSize)), - ContinuationToken: token, - }) - if err != nil { - if isRetryableError(err) { - return nil, serviceerror.NewUnavailable(err.Error()) - } - return nil, serviceerror.NewInvalidArgument(err.Error()) - } - if len(results.Contents) == 0 { - return &archiver.QueryVisibilityResponse{}, nil - } - - response := &archiver.QueryVisibilityResponse{} - if *results.IsTruncated { - response.NextPageToken = serializeQueryVisibilityToken(*results.NextContinuationToken) - } - for _, item := range results.Contents { - if keyFilter != nil && !keyFilter(*item.Key) { - continue - } - - encodedRecord, err := Download(ctx, v.s3cli, uri, *item.Key) - if err != nil { - return nil, serviceerror.NewUnavailable(err.Error()) - } - - record, err := decodeVisibilityRecord(encodedRecord) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - executionInfo, err := convertToExecutionInfo(record, saTypeMap) - if err != nil { - return nil, serviceerror.NewInternal(err.Error()) - } - response.Executions = append(response.Executions, executionInfo) - } - return response, nil -} - -func (v *visibilityArchiver) ValidateURI(URI archiver.URI) error { - err := SoftValidateURI(URI) - if err != nil { - return err - } - return BucketExists(context.TODO(), v.s3cli, URI) -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/visibilityArchiver_test.go temporal-1.22.5/src/common/archiver/s3store/visibilityArchiver_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/visibilityArchiver_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/visibilityArchiver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,708 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package s3store - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common/searchattribute" - - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/s3store/mocks" - "go.temporal.io/server/common/codec" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/primitives/timestamp" - - commonpb "go.temporal.io/api/common/v1" - workflowpb "go.temporal.io/api/workflow/v1" -) - -type visibilityArchiverSuite struct { - *require.Assertions - suite.Suite - s3cli *mocks.MockS3API - - container *archiver.VisibilityBootstrapContainer - visibilityRecords []*archiverspb.VisibilityRecord - - controller *gomock.Controller - testArchivalURI archiver.URI -} - -func TestVisibilityArchiverSuite(t *testing.T) { - suite.Run(t, new(visibilityArchiverSuite)) -} - -func (s *visibilityArchiverSuite) TestValidateURI() { - testCases := []struct { - URI string - expectedErr error - }{ - { - URI: "wrongscheme:///a/b/c", - expectedErr: archiver.ErrURISchemeMismatch, - }, - { - URI: "s3://", - expectedErr: errNoBucketSpecified, - }, - { - URI: "s3:///test", - expectedErr: errNoBucketSpecified, - }, - { - URI: "s3://bucket/a/b/c", - expectedErr: errBucketNotExists, - }, - { - URI: testBucketURI, - expectedErr: nil, - }, - } - - s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { - if *input.Bucket != s.testArchivalURI.Hostname() { - return nil, awserr.New("NotFound", "", nil) - } - - return &s3.HeadBucketOutput{}, nil - }).AnyTimes() - - visibilityArchiver := s.newTestVisibilityArchiver() - for _, tc := range testCases { - URI, err := archiver.NewURI(tc.URI) - s.NoError(err) - s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) - } -} - -func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchiver { - return &visibilityArchiver{ - container: s.container, - s3cli: s.s3cli, - queryParser: NewQueryParser(), - } -} - -const ( - testWorkflowTypeName = "test-workflow-type" -) - -func (s *visibilityArchiverSuite) SetupSuite() { - var err error - - s.testArchivalURI, err = archiver.NewURI(testBucketURI) - s.Require().NoError(err) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } -} - -func (s *visibilityArchiverSuite) TearDownSuite() { -} - -func (s *visibilityArchiverSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.controller = gomock.NewController(s.T()) - - s.s3cli = mocks.NewMockS3API(s.controller) - setupFsEmulation(s.s3cli) - s.setupVisibilityDirectory() -} - -func (s *visibilityArchiverSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidURI() { - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - request := &archiverspb.VisibilityRecord{ - Namespace: testNamespace, - NamespaceId: testNamespaceID, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimeNowPtrUtc(), - ExecutionTime: nil, // workflow without backoff - CloseTime: timestamp.TimeNowPtrUtc(), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: int64(101), - } - err = visibilityArchiver.Archive(context.Background(), URI, request) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidRequest() { - visibilityArchiver := s.newTestVisibilityArchiver() - err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, &archiverspb.VisibilityRecord{}) - s.Error(err) -} - -func (s *visibilityArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { - visibilityArchiver := s.newTestVisibilityArchiver() - nonRetryableErr := errors.New("some non-retryable error") - err := visibilityArchiver.Archive( - context.Background(), - s.testArchivalURI, - &archiverspb.VisibilityRecord{ - NamespaceId: testNamespaceID, - }, - archiver.GetNonRetryableErrorOption(nonRetryableErr), - ) - s.Equal(nonRetryableErr, err) -} - -func (s *visibilityArchiverSuite) TestArchive_Success() { - visibilityArchiver := s.newTestVisibilityArchiver() - closeTimestamp := timestamp.TimeNowPtrUtc() - request := &archiverspb.VisibilityRecord{ - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimePtr(closeTimestamp.Add(-time.Hour)), - ExecutionTime: nil, // workflow without backoff - CloseTime: closeTimestamp, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: int64(101), - Memo: &commonpb.Memo{ - Fields: map[string]*commonpb.Payload{ - "testFields": payload.EncodeBytes([]byte{1, 2, 3}), - }, - }, - SearchAttributes: map[string]string{ - "testAttribute": "456", - }, - } - URI, err := archiver.NewURI(testBucketURI + "/test-archive-success") - s.NoError(err) - err = visibilityArchiver.Archive(context.Background(), URI, request) - s.NoError(err) - - expectedKey := constructTimestampIndex(URI.Path(), testNamespaceID, primaryIndexKeyWorkflowID, testWorkflowID, secondaryIndexKeyCloseTimeout, timestamp.TimeValue(closeTimestamp), testRunID) - data, err := Download(context.Background(), visibilityArchiver.s3cli, URI, expectedKey) - s.NoError(err, expectedKey) - - archivedRecord := &archiverspb.VisibilityRecord{} - encoder := codec.NewJSONPBEncoder() - err = encoder.Decode(data, archivedRecord) - s.NoError(err) - s.Equal(request, archivedRecord) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI("wrongscheme://") - s.NoError(err) - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { - visibilityArchiver := s.newTestVisibilityArchiver() - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) - visibilityArchiver.queryParser = mockParser - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{ - NamespaceID: "some random namespaceID", - PageSize: 10, - Query: "some invalid query", - }, searchattribute.TestNameTypeMap) - s.Error(err) - s.Nil(response) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowID: convert.StringPtr(testWorkflowID), - closeTime: &time.Time{}, - searchPrecision: convert.StringPtr(PrecisionSecond), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - Query: "parsed by mockParser", - PageSize: 1, - } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Empty(response.Executions) - s.Empty(response.NextPageToken) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: timestamp.TimePtr(time.Unix(0, int64(1*time.Hour)).UTC()), - searchPrecision: convert.StringPtr(PrecisionHour), - workflowID: convert.StringPtr(testWorkflowID), - }, nil) - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 10, - Query: "parsed by mockParser", - } - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(response.Executions[0], ei) -} - -func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { - visibilityArchiver := s.newTestVisibilityArchiver() - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: timestamp.TimePtr(time.Unix(0, 0).UTC()), - searchPrecision: convert.StringPtr(PrecisionDay), - workflowID: convert.StringPtr(testWorkflowID), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 2, - Query: "parsed by mockParser", - } - URI, err := archiver.NewURI(testBucketURI) - s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.NotNil(response.NextPageToken) - s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[1]) - - request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Nil(response.NextPageToken) - s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, response.Executions[0]) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { - arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) - uri, err := archiver.NewURI(testBucketURI) - s.NoError(err) - req := &archiver.QueryVisibilityRequest{ - NamespaceID: "", - PageSize: 1, - NextPageToken: nil, - Query: "", - } - _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { - arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) - - uri, err := archiver.NewURI(testBucketURI) - s.NoError(err) - - req := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 0, - NextPageToken: nil, - Query: "", - } - _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) - - var svcErr *serviceerror.InvalidArgument - - s.ErrorAs(err, &svcErr) -} - -func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { - arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) - uri, err := archiver.NewURI(testBucketURI) - s.NoError(err) - - executions := make(map[string]*workflowpb.WorkflowExecutionInfo, len(s.visibilityRecords)) - var nextPageToken []byte - - for { - req := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - NextPageToken: nextPageToken, - Query: "", - } - response, err := arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - nextPageToken = response.NextPageToken - for _, execution := range response.Executions { - key := execution.Execution.GetWorkflowId() + - "/" + execution.Execution.GetRunId() + - "/" + execution.CloseTime.String() - if executions[key] != nil { - s.Fail("duplicate key", key) - } - executions[key] = execution - } - if len(nextPageToken) == 0 { - break - } - } - s.Len(executions, len(s.visibilityRecords)) -} - -type precisionTest struct { - day int - hour int - minute int - second int - precision string -} - -func (s *visibilityArchiverSuite) TestArchiveAndQueryPrecisions() { - precisionTests := []*precisionTest{ - { - day: 1, - hour: 0, - minute: 0, - second: 0, - precision: PrecisionDay, - }, - { - day: 1, - hour: 1, - minute: 0, - second: 0, - precision: PrecisionDay, - }, - { - day: 2, - hour: 1, - minute: 0, - second: 0, - precision: PrecisionHour, - }, - { - day: 2, - hour: 1, - minute: 30, - second: 0, - precision: PrecisionHour, - }, - { - day: 3, - hour: 2, - minute: 1, - second: 0, - precision: PrecisionMinute, - }, - { - day: 3, - hour: 2, - minute: 1, - second: 30, - precision: PrecisionMinute, - }, - { - day: 4, - hour: 3, - minute: 2, - second: 1, - precision: PrecisionSecond, - }, - { - day: 4, - hour: 3, - minute: 2, - second: 1, - precision: PrecisionSecond, - }, - { - day: 4, - hour: 3, - minute: 2, - second: 2, - precision: PrecisionSecond, - }, - { - day: 4, - hour: 3, - minute: 2, - second: 2, - precision: PrecisionSecond, - }, - } - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI(testBucketURI + "/archive-and-query-precision") - s.NoError(err) - - for i, testData := range precisionTests { - record := archiverspb.VisibilityRecord{ - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: fmt.Sprintf("%s-%d", testRunID, i), - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - CloseTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 101, - } - err := visibilityArchiver.Archive(context.Background(), URI, &record) - s.NoError(err, "case %d", i) - } - - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 100, - Query: "parsed by mockParser", - } - - for i, testData := range precisionTests { - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: convert.StringPtr(testData.precision), - workflowID: convert.StringPtr(testWorkflowID), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Len(response.Executions, 2, "Iteration ", i) - - mockParser = NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - startTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: convert.StringPtr(testData.precision), - workflowID: convert.StringPtr(testWorkflowID), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Len(response.Executions, 2, "Iteration ", i) - - mockParser = NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: convert.StringPtr(testData.precision), - workflowTypeName: convert.StringPtr(testWorkflowTypeName), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Len(response.Executions, 2, "Iteration ", i) - - mockParser = NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - startTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: convert.StringPtr(testData.precision), - workflowTypeName: convert.StringPtr(testWorkflowTypeName), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - s.Len(response.Executions, 2, "Iteration ", i) - } -} - -func (s *visibilityArchiverSuite) TestArchiveAndQuery() { - visibilityArchiver := s.newTestVisibilityArchiver() - URI, err := archiver.NewURI(testBucketURI + "/archive-and-query") - s.NoError(err) - for _, record := range s.visibilityRecords { - err := visibilityArchiver.Archive(context.Background(), URI, (*archiverspb.VisibilityRecord)(record)) - s.NoError(err) - } - - mockParser := NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowID: convert.StringPtr(testWorkflowID), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - request := &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - Query: "parsed by mockParser", - } - executions := []*workflowpb.WorkflowExecutionInfo{} - first := true - for first || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - executions = append(executions, response.Executions...) - request.NextPageToken = response.NextPageToken - first = false - } - s.Len(executions, 3) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[1]) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[2]) - - mockParser = NewMockQueryParser(s.controller) - mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowTypeName: convert.StringPtr(testWorkflowTypeName), - }, nil).AnyTimes() - visibilityArchiver.queryParser = mockParser - request = &archiver.QueryVisibilityRequest{ - NamespaceID: testNamespaceID, - PageSize: 1, - Query: "parsed by mockParser", - } - executions = []*workflowpb.WorkflowExecutionInfo{} - first = true - for first || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) - s.NoError(err) - s.NotNil(response) - executions = append(executions, response.Executions...) - request.NextPageToken = response.NextPageToken - first = false - } - s.Len(executions, 3) - ei, err = convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[1]) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) - s.NoError(err) - s.Equal(ei, executions[2]) -} - -func (s *visibilityArchiverSuite) setupVisibilityDirectory() { - s.visibilityRecords = []*archiverspb.VisibilityRecord{ - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID, - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(1), - CloseTime: timestamp.UnixOrZeroTimePtr(int64(time.Hour)), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 101, - }, - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID + "1", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(1), - CloseTime: timestamp.UnixOrZeroTimePtr(int64(time.Hour + 30*time.Minute)), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 101, - }, - { - NamespaceId: testNamespaceID, - Namespace: testNamespace, - WorkflowId: testWorkflowID, - RunId: testRunID + "1", - WorkflowTypeName: testWorkflowTypeName, - StartTime: timestamp.UnixOrZeroTimePtr(1), - CloseTime: timestamp.UnixOrZeroTimePtr(int64(3 * time.Hour)), - Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - HistoryLength: 101, - }, - } - visibilityArchiver := s.newTestVisibilityArchiver() - for _, record := range s.visibilityRecords { - s.writeVisibilityRecordForQueryTest(visibilityArchiver, record) - } -} - -func (s *visibilityArchiverSuite) writeVisibilityRecordForQueryTest(visibilityArchiver *visibilityArchiver, record *archiverspb.VisibilityRecord) { - err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, record) - s.Require().NoError(err) -} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/visibility_archiver.go temporal-1.22.5/src/common/archiver/s3store/visibility_archiver.go --- temporal-1.21.5-1/src/common/archiver/s3store/visibility_archiver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/visibility_archiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,382 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s3store + +import ( + "context" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + + "go.temporal.io/server/common/searchattribute" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives/timestamp" +) + +type ( + visibilityArchiver struct { + container *archiver.VisibilityBootstrapContainer + s3cli s3iface.S3API + queryParser QueryParser + } + + queryVisibilityRequest struct { + namespaceID string + pageSize int + nextPageToken []byte + parsedQuery *parsedQuery + } + + indexToArchive struct { + primaryIndex string + primaryIndexValue string + secondaryIndex string + secondaryIndexTimestamp time.Time + } +) + +const ( + errEncodeVisibilityRecord = "failed to encode visibility record" + secondaryIndexKeyStartTimeout = "startTimeout" + secondaryIndexKeyCloseTimeout = "closeTimeout" + primaryIndexKeyWorkflowTypeName = "workflowTypeName" + primaryIndexKeyWorkflowID = "workflowID" +) + +// NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on s3 +func NewVisibilityArchiver( + container *archiver.VisibilityBootstrapContainer, + config *config.S3Archiver, +) (archiver.VisibilityArchiver, error) { + return newVisibilityArchiver(container, config) +} + +func newVisibilityArchiver( + container *archiver.VisibilityBootstrapContainer, + config *config.S3Archiver) (*visibilityArchiver, error) { + s3Config := &aws.Config{ + Endpoint: config.Endpoint, + Region: aws.String(config.Region), + S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), + } + sess, err := session.NewSession(s3Config) + if err != nil { + return nil, err + } + return &visibilityArchiver{ + container: container, + s3cli: s3.New(sess), + queryParser: NewQueryParser(), + }, nil +} + +func (v *visibilityArchiver) Archive( + ctx context.Context, + URI archiver.URI, + request *archiverspb.VisibilityRecord, + opts ...archiver.ArchiveOption, +) (err error) { + handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.VisibilityArchiverScope), metrics.NamespaceTag(request.Namespace)) + featureCatalog := archiver.GetFeatureCatalog(opts...) + startTime := time.Now().UTC() + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + archiveFailReason := "" + defer func() { + handler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) + if err != nil { + if isRetryableError(err) { + handler.Counter(metrics.VisibilityArchiverArchiveTransientErrorCount.GetMetricName()).Record(1) + logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(archiveFailReason), tag.Error(err)) + } else { + handler.Counter(metrics.VisibilityArchiverArchiveNonRetryableErrorCount.GetMetricName()).Record(1) + logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiveFailReason), tag.Error(err)) + if featureCatalog.NonRetryableError != nil { + err = featureCatalog.NonRetryableError() + } + } + } + }() + + if err := SoftValidateURI(URI); err != nil { + archiveFailReason = archiver.ErrReasonInvalidURI + return err + } + + if err := archiver.ValidateVisibilityArchivalRequest(request); err != nil { + archiveFailReason = archiver.ErrReasonInvalidArchiveRequest + return err + } + + encodedVisibilityRecord, err := Encode(request) + if err != nil { + archiveFailReason = errEncodeVisibilityRecord + return err + } + indexes := createIndexesToArchive(request) + // Upload archive to all indexes + for _, element := range indexes { + key := constructTimestampIndex(URI.Path(), request.GetNamespaceId(), element.primaryIndex, element.primaryIndexValue, element.secondaryIndex, element.secondaryIndexTimestamp, request.GetRunId()) + if err := Upload(ctx, v.s3cli, URI, key, encodedVisibilityRecord); err != nil { + archiveFailReason = errWriteKey + return err + } + } + handler.Counter(metrics.VisibilityArchiveSuccessCount.GetMetricName()).Record(1) + return nil +} + +func createIndexesToArchive(request *archiverspb.VisibilityRecord) []indexToArchive { + return []indexToArchive{ + {primaryIndexKeyWorkflowTypeName, request.WorkflowTypeName, secondaryIndexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)}, + {primaryIndexKeyWorkflowTypeName, request.WorkflowTypeName, secondaryIndexKeyStartTimeout, timestamp.TimeValue(request.StartTime)}, + {primaryIndexKeyWorkflowID, request.GetWorkflowId(), secondaryIndexKeyCloseTimeout, timestamp.TimeValue(request.CloseTime)}, + {primaryIndexKeyWorkflowID, request.GetWorkflowId(), secondaryIndexKeyStartTimeout, timestamp.TimeValue(request.StartTime)}, + } +} + +func (v *visibilityArchiver) Query( + ctx context.Context, + URI archiver.URI, + request *archiver.QueryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + + if err := SoftValidateURI(URI); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error()) + } + + if err := archiver.ValidateQueryRequest(request); err != nil { + return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidQueryVisibilityRequest.Error()) + } + + if strings.TrimSpace(request.Query) == "" { + return v.queryAll(ctx, URI, request, saTypeMap) + } + + parsedQuery, err := v.queryParser.Parse(request.Query) + if err != nil { + return nil, serviceerror.NewInvalidArgument(err.Error()) + } + + return v.query( + ctx, + URI, + &queryVisibilityRequest{ + namespaceID: request.NamespaceID, + pageSize: request.PageSize, + nextPageToken: request.NextPageToken, + parsedQuery: parsedQuery, + }, + saTypeMap, + ) +} + +// queryAll returns all workflow executions in the archive. +func (v *visibilityArchiver) queryAll( + ctx context.Context, + uri archiver.URI, + request *archiver.QueryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + // remaining is the number of workflow executions left to return before we reach pageSize. + remaining := request.PageSize + nextPageToken := request.NextPageToken + var executions []*workflowpb.WorkflowExecutionInfo + // We need to loop because the number of workflow executions returned by each call to query may be fewer than + // pageSize. This is because we may have to skip some workflow executions after querying S3 (client-side filtering) + // because there are 2 entries in S3 for each workflow execution indexed by workflowTypeName (one for closeTimeout + // and one for startTimeout), and we only want to return one entry per workflow execution. See + // createIndexesToArchive for a list of all indexes. + for { + searchPrefix := constructVisibilitySearchPrefix(uri.Path(), request.NamespaceID) + // We suffix searchPrefix with workflowTypeName because the data in S3 is duplicated across combinations of 2 + // different primary indices (workflowID and workflowTypeName) and 2 different secondary indices (closeTimeout + // and startTimeout). We only want to return one entry per workflow execution, but the full path to the S3 key + // is ////, and we don't have + // the primaryIndexValue when we make the call to query, so we can only specify the primaryIndexKey. + searchPrefix += "/" + primaryIndexKeyWorkflowTypeName + // The pageSize we supply here is actually the maximum number of keys to fetch from S3. For each execution, + // there should be 2 keys in S3 for this prefix, so you might think that we should multiply the pageSize by 2. + // However, if we do that, we may end up returning more than pageSize workflow executions to the end user of + // this API. This is because we aren't guaranteed that both keys for a given workflow execution will be returned + // in the same call. For example, if the user supplies a pageSize of 1, and we specify a maximum number of keys + // of 2 to S3, we may get back entries from S3 for 2 different workflow executions. You might think that we can + // just truncate this result to 1 workflow execution, but then the nextPageToken would be incorrect. So, we may + // need to make multiple calls to S3 to get the correct number of workflow executions, which will probably make + // this API call slower. + res, err := v.queryPrefix(ctx, uri, &queryVisibilityRequest{ + namespaceID: request.NamespaceID, + pageSize: remaining, + nextPageToken: nextPageToken, + parsedQuery: &parsedQuery{}, + }, saTypeMap, searchPrefix, func(key string) bool { + // We only want to return entries for the closeTimeout secondary index, which will always be of the form: + // .../closeTimeout//, so we split the key on "/" and check that the third-to-last + // element is "closeTimeout". + elements := strings.Split(key, "/") + return len(elements) >= 3 && elements[len(elements)-3] == secondaryIndexKeyCloseTimeout + }) + if err != nil { + return nil, err + } + nextPageToken = res.NextPageToken + executions = append(executions, res.Executions...) + remaining -= len(res.Executions) + if len(nextPageToken) == 0 || remaining <= 0 { + break + } + } + return &archiver.QueryVisibilityResponse{ + Executions: executions, + NextPageToken: nextPageToken, + }, nil +} + +func (v *visibilityArchiver) query( + ctx context.Context, + URI archiver.URI, + request *queryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, +) (*archiver.QueryVisibilityResponse, error) { + primaryIndex := primaryIndexKeyWorkflowTypeName + primaryIndexValue := request.parsedQuery.workflowTypeName + if request.parsedQuery.workflowID != nil { + primaryIndex = primaryIndexKeyWorkflowID + primaryIndexValue = request.parsedQuery.workflowID + } + + prefix := constructIndexedVisibilitySearchPrefix( + URI.Path(), + request.namespaceID, + primaryIndex, + *primaryIndexValue, + secondaryIndexKeyCloseTimeout, + ) + "/" + if request.parsedQuery.closeTime != nil { + prefix = constructTimeBasedSearchKey( + URI.Path(), + request.namespaceID, + primaryIndex, + *primaryIndexValue, + secondaryIndexKeyCloseTimeout, + *request.parsedQuery.closeTime, + *request.parsedQuery.searchPrecision, + ) + } + if request.parsedQuery.startTime != nil { + prefix = constructTimeBasedSearchKey( + URI.Path(), + request.namespaceID, + primaryIndex, + *primaryIndexValue, + secondaryIndexKeyStartTimeout, + *request.parsedQuery.startTime, + *request.parsedQuery.searchPrecision, + ) + } + + return v.queryPrefix(ctx, URI, request, saTypeMap, prefix, nil) +} + +// queryPrefix returns all workflow executions in the archive that match the given prefix. The keyFilter function is an +// optional filter that can be used to further filter the results. If keyFilter returns false for a given key, that key +// will be skipped, and the object will not be downloaded from S3 or included in the results. +func (v *visibilityArchiver) queryPrefix( + ctx context.Context, + uri archiver.URI, + request *queryVisibilityRequest, + saTypeMap searchattribute.NameTypeMap, + prefix string, + keyFilter func(key string) bool, +) (*archiver.QueryVisibilityResponse, error) { + ctx, cancel := ensureContextTimeout(ctx) + defer cancel() + + var token *string + + if request.nextPageToken != nil { + token = deserializeQueryVisibilityToken(request.nextPageToken) + } + results, err := v.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(uri.Hostname()), + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(int64(request.pageSize)), + ContinuationToken: token, + }) + if err != nil { + if isRetryableError(err) { + return nil, serviceerror.NewUnavailable(err.Error()) + } + return nil, serviceerror.NewInvalidArgument(err.Error()) + } + if len(results.Contents) == 0 { + return &archiver.QueryVisibilityResponse{}, nil + } + + response := &archiver.QueryVisibilityResponse{} + if *results.IsTruncated { + response.NextPageToken = serializeQueryVisibilityToken(*results.NextContinuationToken) + } + for _, item := range results.Contents { + if keyFilter != nil && !keyFilter(*item.Key) { + continue + } + + encodedRecord, err := Download(ctx, v.s3cli, uri, *item.Key) + if err != nil { + return nil, serviceerror.NewUnavailable(err.Error()) + } + + record, err := decodeVisibilityRecord(encodedRecord) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + executionInfo, err := convertToExecutionInfo(record, saTypeMap) + if err != nil { + return nil, serviceerror.NewInternal(err.Error()) + } + response.Executions = append(response.Executions, executionInfo) + } + return response, nil +} + +func (v *visibilityArchiver) ValidateURI(URI archiver.URI) error { + err := SoftValidateURI(URI) + if err != nil { + return err + } + return BucketExists(context.TODO(), v.s3cli, URI) +} diff -Nru temporal-1.21.5-1/src/common/archiver/s3store/visibility_archiver_test.go temporal-1.22.5/src/common/archiver/s3store/visibility_archiver_test.go --- temporal-1.21.5-1/src/common/archiver/s3store/visibility_archiver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/s3store/visibility_archiver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,708 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s3store + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common/searchattribute" + + archiverspb "go.temporal.io/server/api/archiver/v1" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/s3store/mocks" + "go.temporal.io/server/common/codec" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/primitives/timestamp" + + commonpb "go.temporal.io/api/common/v1" + workflowpb "go.temporal.io/api/workflow/v1" +) + +type visibilityArchiverSuite struct { + *require.Assertions + suite.Suite + s3cli *mocks.MockS3API + + container *archiver.VisibilityBootstrapContainer + visibilityRecords []*archiverspb.VisibilityRecord + + controller *gomock.Controller + testArchivalURI archiver.URI +} + +func TestVisibilityArchiverSuite(t *testing.T) { + suite.Run(t, new(visibilityArchiverSuite)) +} + +func (s *visibilityArchiverSuite) TestValidateURI() { + testCases := []struct { + URI string + expectedErr error + }{ + { + URI: "wrongscheme:///a/b/c", + expectedErr: archiver.ErrURISchemeMismatch, + }, + { + URI: "s3://", + expectedErr: errNoBucketSpecified, + }, + { + URI: "s3:///test", + expectedErr: errNoBucketSpecified, + }, + { + URI: "s3://bucket/a/b/c", + expectedErr: errBucketNotExists, + }, + { + URI: testBucketURI, + expectedErr: nil, + }, + } + + s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { + if *input.Bucket != s.testArchivalURI.Hostname() { + return nil, awserr.New("NotFound", "", nil) + } + + return &s3.HeadBucketOutput{}, nil + }).AnyTimes() + + visibilityArchiver := s.newTestVisibilityArchiver() + for _, tc := range testCases { + URI, err := archiver.NewURI(tc.URI) + s.NoError(err) + s.Equal(tc.expectedErr, visibilityArchiver.ValidateURI(URI)) + } +} + +func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchiver { + return &visibilityArchiver{ + container: s.container, + s3cli: s.s3cli, + queryParser: NewQueryParser(), + } +} + +const ( + testWorkflowTypeName = "test-workflow-type" +) + +func (s *visibilityArchiverSuite) SetupSuite() { + var err error + + s.testArchivalURI, err = archiver.NewURI(testBucketURI) + s.Require().NoError(err) + s.container = &archiver.VisibilityBootstrapContainer{ + Logger: log.NewNoopLogger(), + MetricsHandler: metrics.NoopMetricsHandler, + } +} + +func (s *visibilityArchiverSuite) TearDownSuite() { +} + +func (s *visibilityArchiverSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + + s.s3cli = mocks.NewMockS3API(s.controller) + setupFsEmulation(s.s3cli) + s.setupVisibilityDirectory() +} + +func (s *visibilityArchiverSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidURI() { + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + request := &archiverspb.VisibilityRecord{ + Namespace: testNamespace, + NamespaceId: testNamespaceID, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimeNowPtrUtc(), + ExecutionTime: nil, // workflow without backoff + CloseTime: timestamp.TimeNowPtrUtc(), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: int64(101), + } + err = visibilityArchiver.Archive(context.Background(), URI, request) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidRequest() { + visibilityArchiver := s.newTestVisibilityArchiver() + err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, &archiverspb.VisibilityRecord{}) + s.Error(err) +} + +func (s *visibilityArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { + visibilityArchiver := s.newTestVisibilityArchiver() + nonRetryableErr := errors.New("some non-retryable error") + err := visibilityArchiver.Archive( + context.Background(), + s.testArchivalURI, + &archiverspb.VisibilityRecord{ + NamespaceId: testNamespaceID, + }, + archiver.GetNonRetryableErrorOption(nonRetryableErr), + ) + s.Equal(nonRetryableErr, err) +} + +func (s *visibilityArchiverSuite) TestArchive_Success() { + visibilityArchiver := s.newTestVisibilityArchiver() + closeTimestamp := timestamp.TimeNowPtrUtc() + request := &archiverspb.VisibilityRecord{ + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimePtr(closeTimestamp.Add(-time.Hour)), + ExecutionTime: nil, // workflow without backoff + CloseTime: closeTimestamp, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: int64(101), + Memo: &commonpb.Memo{ + Fields: map[string]*commonpb.Payload{ + "testFields": payload.EncodeBytes([]byte{1, 2, 3}), + }, + }, + SearchAttributes: map[string]string{ + "testAttribute": "456", + }, + } + URI, err := archiver.NewURI(testBucketURI + "/test-archive-success") + s.NoError(err) + err = visibilityArchiver.Archive(context.Background(), URI, request) + s.NoError(err) + + expectedKey := constructTimestampIndex(URI.Path(), testNamespaceID, primaryIndexKeyWorkflowID, testWorkflowID, secondaryIndexKeyCloseTimeout, timestamp.TimeValue(closeTimestamp), testRunID) + data, err := Download(context.Background(), visibilityArchiver.s3cli, URI, expectedKey) + s.NoError(err, expectedKey) + + archivedRecord := &archiverspb.VisibilityRecord{} + encoder := codec.NewJSONPBEncoder() + err = encoder.Decode(data, archivedRecord) + s.NoError(err) + s.Equal(request, archivedRecord) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI("wrongscheme://") + s.NoError(err) + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + } + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { + visibilityArchiver := s.newTestVisibilityArchiver() + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(nil, errors.New("invalid query")) + visibilityArchiver.queryParser = mockParser + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{ + NamespaceID: "some random namespaceID", + PageSize: 10, + Query: "some invalid query", + }, searchattribute.TestNameTypeMap) + s.Error(err) + s.Nil(response) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + workflowID: convert.StringPtr(testWorkflowID), + closeTime: &time.Time{}, + searchPrecision: convert.StringPtr(PrecisionSecond), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + Query: "parsed by mockParser", + PageSize: 1, + } + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Empty(response.Executions) + s.Empty(response.NextPageToken) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: timestamp.TimePtr(time.Unix(0, int64(1*time.Hour)).UTC()), + searchPrecision: convert.StringPtr(PrecisionHour), + workflowID: convert.StringPtr(testWorkflowID), + }, nil) + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 10, + Query: "parsed by mockParser", + } + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 2) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(response.Executions[0], ei) +} + +func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { + visibilityArchiver := s.newTestVisibilityArchiver() + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: timestamp.TimePtr(time.Unix(0, 0).UTC()), + searchPrecision: convert.StringPtr(PrecisionDay), + workflowID: convert.StringPtr(testWorkflowID), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 2, + Query: "parsed by mockParser", + } + URI, err := archiver.NewURI(testBucketURI) + s.NoError(err) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.NotNil(response.NextPageToken) + s.Len(response.Executions, 2) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[1]) + + request.NextPageToken = response.NextPageToken + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Nil(response.NextPageToken) + s.Len(response.Executions, 1) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, response.Executions[0]) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { + arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) + uri, err := archiver.NewURI(testBucketURI) + s.NoError(err) + req := &archiver.QueryVisibilityRequest{ + NamespaceID: "", + PageSize: 1, + NextPageToken: nil, + Query: "", + } + _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { + arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) + + uri, err := archiver.NewURI(testBucketURI) + s.NoError(err) + + req := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 0, + NextPageToken: nil, + Query: "", + } + _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + + var svcErr *serviceerror.InvalidArgument + + s.ErrorAs(err, &svcErr) +} + +func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { + arc := archiver.VisibilityArchiver(s.newTestVisibilityArchiver()) + uri, err := archiver.NewURI(testBucketURI) + s.NoError(err) + + executions := make(map[string]*workflowpb.WorkflowExecutionInfo, len(s.visibilityRecords)) + var nextPageToken []byte + + for { + req := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + NextPageToken: nextPageToken, + Query: "", + } + response, err := arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + nextPageToken = response.NextPageToken + for _, execution := range response.Executions { + key := execution.Execution.GetWorkflowId() + + "/" + execution.Execution.GetRunId() + + "/" + execution.CloseTime.String() + if executions[key] != nil { + s.Fail("duplicate key", key) + } + executions[key] = execution + } + if len(nextPageToken) == 0 { + break + } + } + s.Len(executions, len(s.visibilityRecords)) +} + +type precisionTest struct { + day int + hour int + minute int + second int + precision string +} + +func (s *visibilityArchiverSuite) TestArchiveAndQueryPrecisions() { + precisionTests := []*precisionTest{ + { + day: 1, + hour: 0, + minute: 0, + second: 0, + precision: PrecisionDay, + }, + { + day: 1, + hour: 1, + minute: 0, + second: 0, + precision: PrecisionDay, + }, + { + day: 2, + hour: 1, + minute: 0, + second: 0, + precision: PrecisionHour, + }, + { + day: 2, + hour: 1, + minute: 30, + second: 0, + precision: PrecisionHour, + }, + { + day: 3, + hour: 2, + minute: 1, + second: 0, + precision: PrecisionMinute, + }, + { + day: 3, + hour: 2, + minute: 1, + second: 30, + precision: PrecisionMinute, + }, + { + day: 4, + hour: 3, + minute: 2, + second: 1, + precision: PrecisionSecond, + }, + { + day: 4, + hour: 3, + minute: 2, + second: 1, + precision: PrecisionSecond, + }, + { + day: 4, + hour: 3, + minute: 2, + second: 2, + precision: PrecisionSecond, + }, + { + day: 4, + hour: 3, + minute: 2, + second: 2, + precision: PrecisionSecond, + }, + } + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI(testBucketURI + "/archive-and-query-precision") + s.NoError(err) + + for i, testData := range precisionTests { + record := archiverspb.VisibilityRecord{ + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: fmt.Sprintf("%s-%d", testRunID, i), + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + CloseTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 101, + } + err := visibilityArchiver.Archive(context.Background(), URI, &record) + s.NoError(err, "case %d", i) + } + + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 100, + Query: "parsed by mockParser", + } + + for i, testData := range precisionTests { + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: convert.StringPtr(testData.precision), + workflowID: convert.StringPtr(testWorkflowID), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Len(response.Executions, 2, "Iteration ", i) + + mockParser = NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + startTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: convert.StringPtr(testData.precision), + workflowID: convert.StringPtr(testWorkflowID), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Len(response.Executions, 2, "Iteration ", i) + + mockParser = NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + closeTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: convert.StringPtr(testData.precision), + workflowTypeName: convert.StringPtr(testWorkflowTypeName), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Len(response.Executions, 2, "Iteration ", i) + + mockParser = NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + startTime: timestamp.TimePtr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: convert.StringPtr(testData.precision), + workflowTypeName: convert.StringPtr(testWorkflowTypeName), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + s.Len(response.Executions, 2, "Iteration ", i) + } +} + +func (s *visibilityArchiverSuite) TestArchiveAndQuery() { + visibilityArchiver := s.newTestVisibilityArchiver() + URI, err := archiver.NewURI(testBucketURI + "/archive-and-query") + s.NoError(err) + for _, record := range s.visibilityRecords { + err := visibilityArchiver.Archive(context.Background(), URI, (*archiverspb.VisibilityRecord)(record)) + s.NoError(err) + } + + mockParser := NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + workflowID: convert.StringPtr(testWorkflowID), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + request := &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + Query: "parsed by mockParser", + } + executions := []*workflowpb.WorkflowExecutionInfo{} + first := true + for first || request.NextPageToken != nil { + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + executions = append(executions, response.Executions...) + request.NextPageToken = response.NextPageToken + first = false + } + s.Len(executions, 3) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[0]) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[1]) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[2]) + + mockParser = NewMockQueryParser(s.controller) + mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ + workflowTypeName: convert.StringPtr(testWorkflowTypeName), + }, nil).AnyTimes() + visibilityArchiver.queryParser = mockParser + request = &archiver.QueryVisibilityRequest{ + NamespaceID: testNamespaceID, + PageSize: 1, + Query: "parsed by mockParser", + } + executions = []*workflowpb.WorkflowExecutionInfo{} + first = true + for first || request.NextPageToken != nil { + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + s.NoError(err) + s.NotNil(response) + executions = append(executions, response.Executions...) + request.NextPageToken = response.NextPageToken + first = false + } + s.Len(executions, 3) + ei, err = convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[0]) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[1]) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + s.NoError(err) + s.Equal(ei, executions[2]) +} + +func (s *visibilityArchiverSuite) setupVisibilityDirectory() { + s.visibilityRecords = []*archiverspb.VisibilityRecord{ + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID, + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(1), + CloseTime: timestamp.UnixOrZeroTimePtr(int64(time.Hour)), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 101, + }, + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID + "1", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(1), + CloseTime: timestamp.UnixOrZeroTimePtr(int64(time.Hour + 30*time.Minute)), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 101, + }, + { + NamespaceId: testNamespaceID, + Namespace: testNamespace, + WorkflowId: testWorkflowID, + RunId: testRunID + "1", + WorkflowTypeName: testWorkflowTypeName, + StartTime: timestamp.UnixOrZeroTimePtr(1), + CloseTime: timestamp.UnixOrZeroTimePtr(int64(3 * time.Hour)), + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + HistoryLength: 101, + }, + } + visibilityArchiver := s.newTestVisibilityArchiver() + for _, record := range s.visibilityRecords { + s.writeVisibilityRecordForQueryTest(visibilityArchiver, record) + } +} + +func (s *visibilityArchiverSuite) writeVisibilityRecordForQueryTest(visibilityArchiver *visibilityArchiver, record *archiverspb.VisibilityRecord) { + err := visibilityArchiver.Archive(context.Background(), s.testArchivalURI, record) + s.Require().NoError(err) +} diff -Nru temporal-1.21.5-1/src/common/archiver/uri.go temporal-1.22.5/src/common/archiver/uri.go --- temporal-1.21.5-1/src/common/archiver/uri.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/uri.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,103 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package archiver + +import ( + "net/url" +) + +type ( + // URI identifies the archival resource to which records are written to and read from. + URI interface { + Scheme() string + Path() string + Hostname() string + Port() string + Username() string + Password() string + String() string + Opaque() string + Query() map[string][]string + } + + uri struct { + url *url.URL + } +) + +// NewURI constructs a new archiver URI from string. +func NewURI(s string) (URI, error) { + url, err := url.ParseRequestURI(s) + if err != nil { + return nil, err + } + return &uri{url: url}, nil +} + +func (u *uri) Scheme() string { + return u.url.Scheme +} + +func (u *uri) Path() string { + return u.url.Path +} + +func (u *uri) Hostname() string { + return u.url.Hostname() +} + +func (u *uri) Port() string { + return u.url.Port() +} + +func (u *uri) Username() string { + if u.url.User == nil { + return "" + } + return u.url.User.Username() +} + +func (u *uri) Password() string { + if u.url.User == nil { + return "" + } + password, exist := u.url.User.Password() + if !exist { + return "" + } + return password +} + +func (u *uri) Opaque() string { + return u.url.Opaque +} + +func (u *uri) Query() map[string][]string { + return u.url.Query() +} + +func (u *uri) String() string { + return u.url.String() +} diff -Nru temporal-1.21.5-1/src/common/archiver/uri_test.go temporal-1.22.5/src/common/archiver/uri_test.go --- temporal-1.21.5-1/src/common/archiver/uri_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/archiver/uri_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,152 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package archiver + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type ( + URISuite struct { + *require.Assertions + suite.Suite + } +) + +func TestURISuite(t *testing.T) { + suite.Run(t, new(URISuite)) +} + +func (s *URISuite) SetupTest() { + s.Assertions = require.New(s.T()) +} + +func (s *URISuite) TestURI() { + testCases := []struct { + URIString string + valid bool + scheme string + path string + hostname string + port string + username string + password string + opaque string + query map[string][]string + }{ + { + URIString: "", + valid: false, + }, + { + URIString: "some random string", + valid: false, + }, + { + URIString: "mailto:a@b.com", + valid: true, + scheme: "mailto", + opaque: "a@b.com", + }, + { + URIString: "test://", + valid: true, + scheme: "test", + }, + { + URIString: "http://example.com/path", + valid: true, + scheme: "http", + hostname: "example.com", + path: "/path", + }, + { + URIString: "http://example.com/path with space", + valid: true, + scheme: "http", + hostname: "example.com", + path: "/path with space", + }, + { + URIString: "https://localhost:8080?key1=value1&key1=value2&key2=value3", + valid: true, + scheme: "https", + hostname: "localhost", + port: "8080", + query: map[string][]string{ + "key1": {"value1", "value2"}, + "key2": {"value3"}, + }, + }, + { + URIString: "file:///absolute/path/to/dir", + valid: true, + scheme: "file", + path: "/absolute/path/to/dir", + }, + { + URIString: "test://person:password@host/path", + valid: true, + scheme: "test", + hostname: "host", + path: "/path", + username: "person", + password: "password", + }, + { + URIString: "test:opaque?key1=value1&key1=value2&key2=value3", + valid: true, + scheme: "test", + opaque: "opaque", + query: map[string][]string{ + "key1": {"value1", "value2"}, + "key2": {"value3"}, + }, + }, + } + + for _, tc := range testCases { + URI, err := NewURI(tc.URIString) + if !tc.valid { + s.Error(err) + continue + } + + s.NoError(err) + s.Equal(tc.scheme, URI.Scheme()) + s.Equal(tc.path, URI.Path()) + s.Equal(tc.hostname, URI.Hostname()) + s.Equal(tc.port, URI.Port()) + s.Equal(tc.username, URI.Username()) + s.Equal(tc.password, URI.Password()) + s.Equal(tc.opaque, URI.Opaque()) + if tc.query != nil { + s.Equal(tc.query, URI.Query()) + } + } +} diff -Nru temporal-1.21.5-1/src/common/auth/tlsConfigHelper.go temporal-1.22.5/src/common/auth/tlsConfigHelper.go --- temporal-1.21.5-1/src/common/auth/tlsConfigHelper.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/auth/tlsConfigHelper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package auth - -import ( - "crypto/tls" - "crypto/x509" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" -) - -// Helper methods for creating tls.Config structs to ensure MinVersion is 1.3 - -func NewEmptyTLSConfig() *tls.Config { - return &tls.Config{ - MinVersion: tls.VersionTLS12, - NextProtos: []string{ - "h2", - }, - } -} - -func NewTLSConfigForServer( - serverName string, - enableHostVerification bool, -) *tls.Config { - c := NewEmptyTLSConfig() - c.ServerName = serverName - c.InsecureSkipVerify = !enableHostVerification - return c -} - -func NewDynamicTLSClientConfig( - getCert func() (*tls.Certificate, error), - rootCAs *x509.CertPool, - serverName string, - enableHostVerification bool, -) *tls.Config { - c := NewTLSConfigForServer(serverName, enableHostVerification) - - if getCert != nil { - c.GetClientCertificate = func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return getCert() - } - } - c.RootCAs = rootCAs - - return c -} - -func NewTLSConfigWithCertsAndCAs( - clientAuth tls.ClientAuthType, - certificates []tls.Certificate, - clientCAs *x509.CertPool, - logger log.Logger, -) *tls.Config { - c := NewEmptyTLSConfig() - c.ClientAuth = clientAuth - c.Certificates = certificates - c.ClientCAs = clientCAs - c.VerifyConnection = func(state tls.ConnectionState) error { - logger.Debug("successfully established incoming TLS connection", tag.ServerName(state.ServerName), tag.Name(tlsCN(state))) - return nil - } - return c -} - -func tlsCN(state tls.ConnectionState) string { - - if len(state.PeerCertificates) == 0 { - return "" - } - return state.PeerCertificates[0].Subject.CommonName -} diff -Nru temporal-1.21.5-1/src/common/auth/tls_config_helper.go temporal-1.22.5/src/common/auth/tls_config_helper.go --- temporal-1.21.5-1/src/common/auth/tls_config_helper.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/auth/tls_config_helper.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,97 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package auth + +import ( + "crypto/tls" + "crypto/x509" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +// Helper methods for creating tls.Config structs to ensure MinVersion is 1.3 + +func NewEmptyTLSConfig() *tls.Config { + return &tls.Config{ + MinVersion: tls.VersionTLS12, + NextProtos: []string{ + "h2", + }, + } +} + +func NewTLSConfigForServer( + serverName string, + enableHostVerification bool, +) *tls.Config { + c := NewEmptyTLSConfig() + c.ServerName = serverName + c.InsecureSkipVerify = !enableHostVerification + return c +} + +func NewDynamicTLSClientConfig( + getCert func() (*tls.Certificate, error), + rootCAs *x509.CertPool, + serverName string, + enableHostVerification bool, +) *tls.Config { + c := NewTLSConfigForServer(serverName, enableHostVerification) + + if getCert != nil { + c.GetClientCertificate = func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return getCert() + } + } + c.RootCAs = rootCAs + + return c +} + +func NewTLSConfigWithCertsAndCAs( + clientAuth tls.ClientAuthType, + certificates []tls.Certificate, + clientCAs *x509.CertPool, + logger log.Logger, +) *tls.Config { + c := NewEmptyTLSConfig() + c.ClientAuth = clientAuth + c.Certificates = certificates + c.ClientCAs = clientCAs + c.VerifyConnection = func(state tls.ConnectionState) error { + logger.Debug("successfully established incoming TLS connection", tag.ServerName(state.ServerName), tag.Name(tlsCN(state))) + return nil + } + return c +} + +func tlsCN(state tls.ConnectionState) string { + + if len(state.PeerCertificates) == 0 { + return "" + } + return state.PeerCertificates[0].Subject.CommonName +} diff -Nru temporal-1.21.5-1/src/common/authorization/default_authorizer.go temporal-1.22.5/src/common/authorization/default_authorizer.go --- temporal-1.21.5-1/src/common/authorization/default_authorizer.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/authorization/default_authorizer.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,7 +26,8 @@ import ( "context" - "strings" + + "go.temporal.io/server/common/api" ) type ( @@ -34,11 +35,6 @@ } ) -const ( - operatorServicePrefix = "/temporal.api.operatorservice.v1.OperatorService/" - adminServicePrefix = "/temporal.server.api.adminservice.v1.AdminService/" -) - var _ Authorizer = (*defaultAuthorizer)(nil) // NewDefaultAuthorizer creates a default authorizer @@ -53,9 +49,9 @@ // Rules: // // Health check APIs are allowed to everyone. -// System Admin is allowed to access all APIs on all namespaces. -// System Writer is allowed to access non admin APIs on all namespaces. -// System Reader is allowed to access readonly APIs on all namespaces. +// System Admin is allowed to access all APIs on all namespaces and cluster-level. +// System Writer is allowed to access non admin APIs on all namespaces and cluster-level. +// System Reader is allowed to access readonly APIs on all namespaces and cluster-level. // Namespace Admin is allowed to access all APIs on their namespaces. // Namespace Writer is allowed to access non admin APIs on their namespaces. // Namespace Reader is allowed to access non admin readonly APIs on their namespaces. @@ -65,59 +61,38 @@ if IsHealthCheckAPI(target.APIName) { return resultAllow, nil } - if claims == nil { return resultDeny, nil } - // System Admin is allowed for everything - if claims.System >= RoleAdmin { - return resultAllow, nil - } - - // admin service means admin / operator service - isAdminService := strings.HasPrefix(target.APIName, adminServicePrefix) || strings.HasPrefix(target.APIName, operatorServicePrefix) - - // System Writer is allowed for non admin service APIs - if claims.System >= RoleWriter && !isAdminService { - return resultAllow, nil - } - api := ApiName(target.APIName) - readOnlyNamespaceAPI := IsReadOnlyNamespaceAPI(api) - readOnlyGlobalAPI := IsReadOnlyGlobalAPI(api) - // System Reader is allowed for all read only APIs - if claims.System >= RoleReader && (readOnlyNamespaceAPI || readOnlyGlobalAPI) { - return resultAllow, nil - } + metadata := api.GetMethodMetadata(target.APIName) - // Below are for non system roles. - role, found := claims.Namespaces[target.Namespace] - if !found || role == RoleUndefined { + var hasRole Role + switch metadata.Scope { + case api.ScopeCluster: + hasRole = claims.System + case api.ScopeNamespace: + // Note: system-level claims apply across all namespaces. + // Note: if claims.Namespace is nil or target.Namespace is not found, the lookup will return zero. + hasRole = claims.System | claims.Namespaces[target.Namespace] + default: return resultDeny, nil } - if isAdminService { - // for admin service APIs, only RoleAdmin of given namespace can access - if role >= RoleAdmin { - return resultAllow, nil - } - } else { - // for non admin service APIs - if role >= RoleWriter { - return resultAllow, nil - } - if role >= RoleReader && readOnlyNamespaceAPI { - return resultAllow, nil - } + if hasRole >= getRequiredRole(metadata.Access) { + return resultAllow, nil } - return resultDeny, nil } -func ApiName(api string) string { - index := strings.LastIndex(api, "/") - if index > -1 { - return api[index+1:] +// Convert from api.Access to Role +func getRequiredRole(access api.Access) Role { + switch access { + case api.AccessReadOnly: + return RoleReader + case api.AccessWrite: + return RoleWriter + default: + return RoleAdmin } - return api } diff -Nru temporal-1.21.5-1/src/common/authorization/default_authorizer_test.go temporal-1.22.5/src/common/authorization/default_authorizer_test.go --- temporal-1.21.5-1/src/common/authorization/default_authorizer_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/authorization/default_authorizer_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -66,21 +66,17 @@ claimsSystemReader = Claims{ System: RoleReader, } - targetFooBar = CallTarget{ - APIName: "Foo", + targetNamespaceWriteBar = CallTarget{ + APIName: "/temporal.api.workflowservice.v1.WorkflowService/RespondWorkflowTaskCompleted", Namespace: "bar", } - targetFooBAR = CallTarget{ - APIName: "Foo", + targetNamespaceWriteBAR = CallTarget{ + APIName: "/temporal.api.workflowservice.v1.WorkflowService/RespondWorkflowTaskCompleted", Namespace: "BAR", } - targetListNamespaces = CallTarget{ - APIName: "/temporal.api.workflowservice.v1.WorkflowService/ListNamespaces", - Namespace: "BAR", - } - targetDescribeNamespace = CallTarget{ - APIName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", - Namespace: "BAR", + targetOperatorNamespaceRead = CallTarget{ + APIName: "/temporal.api.operatorservice.v1.OperatorService/ListSearchAttributes", + Namespace: testNamespace, } targetGrpcHealthCheck = CallTarget{ APIName: "/grpc.health.v1.Health/Check", @@ -91,16 +87,12 @@ Namespace: "", } targetStartWorkflow = CallTarget{ - Namespace: testNamespace, APIName: "/temporal.api.workflowservice.v1.WorkflowService/StartWorkflowExecution", + Namespace: testNamespace, } targetAdminAPI = CallTarget{ - Namespace: testNamespace, APIName: "/temporal.server.api.adminservice.v1.AdminService/AddSearchAttributes", - } - targetAdminReadonlyAPI = CallTarget{ Namespace: testNamespace, - APIName: "/temporal.server.api.adminservice.v1.AdminService/GetSearchAttributes", } ) @@ -137,44 +129,40 @@ Decision Decision }{ // SystemAdmin is allowed on everything - {"SystemAdminOnFooBar", claimsSystemAdmin, targetFooBar, DecisionAllow}, + {"SystemAdminOnFooBar", claimsSystemAdmin, targetNamespaceWriteBar, DecisionAllow}, {"SystemAdminOnAdminAPI", claimsSystemAdmin, targetAdminAPI, DecisionAllow}, - {"SystemAdminOnReadonlyAPI", claimsSystemAdmin, targetAdminReadonlyAPI, DecisionAllow}, {"SystemAdminOnStartWorkflow", claimsSystemAdmin, targetStartWorkflow, DecisionAllow}, // SystemWriter is allowed on all read only APIs and non-admin APIs on every namespaces - {"SystemWriterOnFooBar", claimsSystemWriter, targetFooBar, DecisionAllow}, + {"SystemWriterOnFooBar", claimsSystemWriter, targetNamespaceWriteBar, DecisionAllow}, {"SystemWriterOnAdminAPI", claimsSystemWriter, targetAdminAPI, DecisionDeny}, - {"SystemWriterOnReadonlyAPI", claimsSystemWriter, targetAdminReadonlyAPI, DecisionAllow}, {"SystemWriterOnStartWorkflow", claimsSystemWriter, targetStartWorkflow, DecisionAllow}, // SystemReader is allowed on all read only APIs and blocked - {"SystemReaderOnFooBar", claimsSystemReader, targetFooBar, DecisionDeny}, + {"SystemReaderOnFooBar", claimsSystemReader, targetNamespaceWriteBar, DecisionDeny}, {"SystemReaderOnAdminAPI", claimsSystemReader, targetAdminAPI, DecisionDeny}, - {"SystemReaderOnReadonlyAPI", claimsSystemReader, targetAdminReadonlyAPI, DecisionAllow}, {"SystemReaderOnStartWorkflow", claimsSystemReader, targetStartWorkflow, DecisionDeny}, // NamespaceAdmin is allowed on admin service to their own namespaces (test-namespace) - {"NamespaceAdminOnAdminAPI", claimsNamespaceAdmin, targetAdminAPI, DecisionAllow}, - {"NamespaceAdminOnReadonlyAPI", claimsNamespaceAdmin, targetAdminReadonlyAPI, DecisionAllow}, + {"NamespaceAdminOnAdminAPI", claimsNamespaceAdmin, targetAdminAPI, DecisionDeny}, {"NamespaceAdminOnStartWorkflow", claimsNamespaceAdmin, targetStartWorkflow, DecisionAllow}, - {"NamespaceAdminOnFooBar", claimsNamespaceAdmin, targetFooBar, DecisionDeny}, // namespace mismatch + {"NamespaceAdminOnFooBar", claimsNamespaceAdmin, targetNamespaceWriteBar, DecisionDeny}, // namespace mismatch - {"BarAdminOnFooBar", claimsBarAdmin, targetFooBar, DecisionAllow}, - {"BarAdminOnFooBAR", claimsBarAdmin, targetFooBAR, DecisionDeny}, // namespace case mismatch + {"BarAdminOnFooBar", claimsBarAdmin, targetNamespaceWriteBar, DecisionAllow}, + {"BarAdminOnFooBAR", claimsBarAdmin, targetNamespaceWriteBAR, DecisionDeny}, // namespace case mismatch // NamespaceWriter is not allowed on admin APIs {"NamespaceWriterOnAdminAPI", claimsNamespaceWriter, targetAdminAPI, DecisionDeny}, - {"NamespaceWriterOnReadonlyAPI", claimsNamespaceWriter, targetAdminReadonlyAPI, DecisionDeny}, {"NamespaceWriterOnStartWorkflow", claimsNamespaceWriter, targetStartWorkflow, DecisionAllow}, - {"NamespaceWriterOnFooBar", claimsNamespaceWriter, targetFooBar, DecisionDeny}, // namespace mismatch + {"NamespaceWriterOnOperatorNamespaceRead", claimsNamespaceWriter, targetOperatorNamespaceRead, DecisionAllow}, + {"NamespaceWriterOnFooBar", claimsNamespaceWriter, targetNamespaceWriteBar, DecisionDeny}, // namespace mismatch // NamespaceReader is allowed on read-only APIs on non admin service {"NamespaceReaderOnAdminAPI", claimsNamespaceReader, targetAdminAPI, DecisionDeny}, - {"NamespaceReaderOnReadonlyAPI", claimsNamespaceReader, targetAdminReadonlyAPI, DecisionDeny}, {"NamespaceReaderOnStartWorkflow", claimsNamespaceReader, targetStartWorkflow, DecisionDeny}, - {"NamespaceReaderOnFooBar", claimsNamespaceReader, targetFooBar, DecisionDeny}, // namespace mismatch + {"NamespaceReaderOnFooBar", claimsNamespaceReader, targetNamespaceWriteBar, DecisionDeny}, // namespace mismatch {"NamespaceReaderOnListWorkflow", claimsNamespaceReader, targetGetSystemInfo, DecisionAllow}, + {"NamespaceReaderOnOperatorNamespaceRead", claimsNamespaceReader, targetOperatorNamespaceRead, DecisionAllow}, // healthcheck allowed to everyone {"RoleNoneOnGetSystemInfo", claimsNone, targetGetSystemInfo, DecisionAllow}, diff -Nru temporal-1.21.5-1/src/common/authorization/frontend_api.go temporal-1.22.5/src/common/authorization/frontend_api.go --- temporal-1.21.5-1/src/common/authorization/frontend_api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/authorization/frontend_api.go 2024-02-23 09:45:43.000000000 +0000 @@ -24,50 +24,23 @@ package authorization -var readOnlyNamespaceAPI = map[string]struct{}{ - "DescribeNamespace": {}, - "GetWorkflowExecutionHistory": {}, - "GetWorkflowExecutionHistoryReverse": {}, - "ListOpenWorkflowExecutions": {}, - "ListClosedWorkflowExecutions": {}, - "ListWorkflowExecutions": {}, - "ListArchivedWorkflowExecutions": {}, - "ScanWorkflowExecutions": {}, - "CountWorkflowExecutions": {}, - "QueryWorkflow": {}, - "DescribeWorkflowExecution": {}, - "DescribeTaskQueue": {}, - "ListTaskQueuePartitions": {}, - "DescribeSchedule": {}, - "ListSchedules": {}, - "ListScheduleMatchingTimes": {}, - "DescribeBatchOperation": {}, - "ListBatchOperations": {}, - "GetWorkerBuildIdCompatibility": {}, - "GetWorkerTaskReachability": {}, -} - -var readOnlyGlobalAPI = map[string]struct{}{ - "ListNamespaces": {}, - "GetSearchAttributes": {}, - "GetClusterInfo": {}, - "GetSystemInfo": {}, -} +import "go.temporal.io/server/common/api" -// note that these use the fully-qualified name var healthCheckAPI = map[string]struct{}{ "/grpc.health.v1.Health/Check": {}, "/temporal.api.workflowservice.v1.WorkflowService/GetSystemInfo": {}, } -func IsReadOnlyNamespaceAPI(api string) bool { - _, found := readOnlyNamespaceAPI[api] - return found +func IsReadOnlyNamespaceAPI(workflowServiceMethod string) bool { + fullApiName := api.WorkflowServicePrefix + workflowServiceMethod + metadata := api.GetMethodMetadata(fullApiName) + return metadata.Scope == api.ScopeNamespace && metadata.Access == api.AccessReadOnly } -func IsReadOnlyGlobalAPI(api string) bool { - _, found := readOnlyGlobalAPI[api] - return found +func IsReadOnlyGlobalAPI(workflowServiceMethod string) bool { + fullApiName := api.WorkflowServicePrefix + workflowServiceMethod + metadata := api.GetMethodMetadata(fullApiName) + return metadata.Scope == api.ScopeCluster && metadata.Access == api.AccessReadOnly } func IsHealthCheckAPI(fullApi string) bool { diff -Nru temporal-1.21.5-1/src/common/backoff/retry_test.go temporal-1.22.5/src/common/backoff/retry_test.go --- temporal-1.21.5-1/src/common/backoff/retry_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/backoff/retry_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -234,7 +234,7 @@ throttleRetryPolicy = testThrottleRetryPolicy policy := NewExponentialRetryPolicy(10 * time.Millisecond). - WithMaximumAttempts(1) + WithMaximumAttempts(2) // test if throttle retry policy is used on resource exhausted error attempt := 1 diff -Nru temporal-1.21.5-1/src/common/backoff/retrypolicy.go temporal-1.22.5/src/common/backoff/retrypolicy.go --- temporal-1.21.5-1/src/common/backoff/retrypolicy.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/backoff/retrypolicy.go 2024-02-23 09:45:43.000000000 +0000 @@ -162,7 +162,8 @@ // ComputeNextDelay returns the next delay interval. This is used by Retrier to delay calling the operation again func (p *ExponentialRetryPolicy) ComputeNextDelay(elapsedTime time.Duration, numAttempts int) time.Duration { // Check to see if we ran out of maximum number of attempts - if p.maximumAttempts != noMaximumAttempts && numAttempts > p.maximumAttempts { + // NOTE: if maxAttempts is X, return done when numAttempts == X, otherwise there will be attempt X+1 + if p.maximumAttempts != noMaximumAttempts && numAttempts >= p.maximumAttempts { return done } diff -Nru temporal-1.21.5-1/src/common/backoff/retrypolicy_test.go temporal-1.22.5/src/common/backoff/retrypolicy_test.go --- temporal-1.21.5-1/src/common/backoff/retrypolicy_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/backoff/retrypolicy_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -113,16 +113,18 @@ } func (s *RetryPolicySuite) TestNumberOfAttempts() { + maxAttempts := 5 policy := createPolicy(time.Second). - WithMaximumAttempts(5) + WithMaximumAttempts(maxAttempts) r, _ := createRetrier(policy) var next time.Duration - for i := 0; i < 6; i++ { + for i := 0; i < maxAttempts-1; i++ { next = r.NextBackOff() + s.NotEqual(done, next) } - s.Equal(done, next) + s.Equal(done, r.NextBackOff()) } // Test to make sure relative maximum interval for each retry is honoured diff -Nru temporal-1.21.5-1/src/common/cache/cache.go temporal-1.22.5/src/common/cache/cache.go --- temporal-1.21.5-1/src/common/cache/cache.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cache/cache.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,6 +26,8 @@ import ( "time" + + "go.temporal.io/server/common/clock" ) // A Cache is a generalized interface to a cache. See cache.LRU for a specific @@ -51,7 +53,8 @@ // Iterator returns the iterator of the cache Iterator() Iterator - // Size returns the number of entries currently stored in the Cache + // Size returns current size of the Cache, the size definition is implementation of SizeGetter interface + // for the entry size, if the entry does not implement SizeGetter interface, the size is 1 Size() int } @@ -61,18 +64,15 @@ // are older than the TTL will not be returned. TTL time.Duration - // InitialCapacity controls the initial capacity of the cache - InitialCapacity int - // Pin prevents in-use objects from getting evicted. Pin bool + + // TimeSource is an optional clock to use for time-skipping and testing. If this is nil, a real clock will be used. + TimeSource clock.TimeSource } // SimpleOptions provides options that can be used to configure SimpleCache type SimpleOptions struct { - // InitialCapacity controls the initial capacity of the cache - InitialCapacity int - // RemovedFunc is an optional function called when an element // is scheduled for deletion RemovedFunc RemovedFunc diff -Nru temporal-1.21.5-1/src/common/cache/lru.go temporal-1.22.5/src/common/cache/lru.go --- temporal-1.21.5-1/src/common/cache/lru.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cache/lru.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,22 +29,30 @@ "errors" "sync" "time" + + "go.temporal.io/server/common/clock" ) var ( // ErrCacheFull is returned if Put fails due to cache being filled with pinned elements - ErrCacheFull = errors.New("Cache capacity is fully occupied with pinned elements") + ErrCacheFull = errors.New("cache capacity is fully occupied with pinned elements") + // ErrCacheItemTooLarge is returned if Put fails due to item size being larger than max cache capacity + ErrCacheItemTooLarge = errors.New("cache item size is larger than max cache capacity") ) +const emptyEntrySize = 0 + // lru is a concurrent fixed size cache that evicts elements in lru order type ( lru struct { - mut sync.Mutex - byAccess *list.List - byKey map[interface{}]*list.Element - maxSize int - ttl time.Duration - pin bool + mut sync.Mutex + byAccess *list.List + byKey map[interface{}]*list.Element + maxSize int + currSize int + ttl time.Duration + pin bool + timeSource clock.TimeSource } iteratorImpl struct { @@ -58,6 +66,7 @@ createTime time.Time value interface{} refCount int + size int } ) @@ -83,6 +92,7 @@ entry = &entryImpl{ key: entry.key, value: entry.value, + size: entry.size, createTime: entry.createTime, } it.prepareNext() @@ -109,7 +119,7 @@ c.mut.Lock() iterator := &iteratorImpl{ lru: c, - createTime: time.Now().UTC(), + createTime: c.timeSource.Now().UTC(), nextItem: c.byAccess.Front(), } iterator.prepareNext() @@ -124,6 +134,10 @@ return entry.value } +func (entry *entryImpl) Size() int { + return entry.size +} + func (entry *entryImpl) CreateTime() time.Time { return entry.createTime } @@ -133,13 +147,19 @@ if opts == nil { opts = &Options{} } + timeSource := opts.TimeSource + if timeSource == nil { + timeSource = clock.NewRealTimeSource() + } return &lru{ - byAccess: list.New(), - byKey: make(map[interface{}]*list.Element, opts.InitialCapacity), - ttl: opts.TTL, - maxSize: maxSize, - pin: opts.Pin, + byAccess: list.New(), + byKey: make(map[interface{}]*list.Element), + ttl: opts.TTL, + maxSize: maxSize, + currSize: 0, + pin: opts.Pin, + timeSource: timeSource, } } @@ -149,14 +169,6 @@ return New(maxSize, nil) } -// NewLRUWithInitialCapacity creates a new LRU cache with an initial capacity -// and a max size -func NewLRUWithInitialCapacity(initialCapacity, maxSize int) Cache { - return New(maxSize, &Options{ - InitialCapacity: initialCapacity, - }) -} - // Get retrieves the value stored under the given key func (c *lru) Get(key interface{}) interface{} { if c.maxSize == 0 { // @@ -172,7 +184,7 @@ entry := element.Value.(*entryImpl) - if c.isEntryExpired(entry, time.Now().UTC()) { + if c.isEntryExpired(entry, c.timeSource.Now().UTC()) { // Entry has expired c.deleteInternal(element) return nil @@ -239,12 +251,15 @@ entry.refCount-- } -// Size returns the number of entries currently in the lru, useful if cache is not full +// Size returns the current size of the lru, useful if cache is not full. This size is calculated by summing +// the size of all entries in the cache. And the entry size is calculated by the size of the value. +// The size of the value is calculated implementing the Sizeable interface. If the value does not implement +// the Sizeable interface, the size is 1. func (c *lru) Size() int { c.mut.Lock() defer c.mut.Unlock() - return len(c.byKey) + return c.currSize } // Put puts a new value associated with a given key, returning the existing value (if present) @@ -253,77 +268,124 @@ if c.maxSize == 0 { return nil, nil } + newEntrySize := getSize(value) + if newEntrySize > c.maxSize { + return nil, ErrCacheItemTooLarge + } + c.mut.Lock() defer c.mut.Unlock() elt := c.byKey[key] + // If the entry exists, check if it has expired or update the value if elt != nil { - entry := elt.Value.(*entryImpl) - if c.isEntryExpired(entry, time.Now().UTC()) { - // Entry has expired - c.deleteInternal(elt) - } else { - existing := entry.value + existingEntry := elt.Value.(*entryImpl) + if !c.isEntryExpired(existingEntry, time.Now().UTC()) { + existingVal := existingEntry.value if allowUpdate { - entry.value = value - if c.ttl != 0 { - entry.createTime = time.Now().UTC() + newCacheSize := c.calculateNewCacheSize(newEntrySize, existingEntry.Size()) + if newCacheSize > c.maxSize { + c.tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize, existingEntry) + // calculate again after eviction + newCacheSize = c.calculateNewCacheSize(newEntrySize, existingEntry.Size()) + if newCacheSize > c.maxSize { + // This should never happen since allowUpdate is always **true** for non-pinned cache, + // and if all entries are not pinned(ref==0), then the cache should never be full as long as + // new entry's size is less than max size. + // However, to prevent any unexpected behavior, it checks the cache size again. + return nil, ErrCacheFull + } } + existingEntry.value = value + existingEntry.size = newEntrySize + c.currSize = newCacheSize + c.updateEntryTTL(existingEntry) } + c.updateEntryRefCount(existingEntry) c.byAccess.MoveToFront(elt) - if c.pin { - entry.refCount++ - } - return existing, nil + return existingVal, nil } - } - entry := &entryImpl{ - key: key, - value: value, + // Entry has expired + c.deleteInternal(elt) } - if c.pin { - entry.refCount++ - } + c.tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize, nil) - if c.ttl != 0 { - entry.createTime = time.Now().UTC() + // check if the new entry can fit in the cache + newCacheSize := c.calculateNewCacheSize(newEntrySize, emptyEntrySize) + if newCacheSize > c.maxSize { + return nil, ErrCacheFull } - if len(c.byKey) >= c.maxSize { - c.evictOnceInternal() - } - if len(c.byKey) >= c.maxSize { - return nil, ErrCacheFull + entry := &entryImpl{ + key: key, + value: value, + size: newEntrySize, } + c.updateEntryTTL(entry) + c.updateEntryRefCount(entry) element := c.byAccess.PushFront(entry) c.byKey[key] = element + c.currSize = newCacheSize return nil, nil } +func (c *lru) calculateNewCacheSize(newEntrySize int, existingEntrySize int) int { + return c.currSize - existingEntrySize + newEntrySize +} + func (c *lru) deleteInternal(element *list.Element) { entry := c.byAccess.Remove(element).(*entryImpl) + c.currSize -= entry.Size() delete(c.byKey, entry.key) } -func (c *lru) evictOnceInternal() { +// tryEvictUntilEnoughSpace try to evict entries until there is enough space for the new entry without +// evicting the existing entry. the existing entry is skipped because it is being updated. +func (c *lru) tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize int, existingEntry *entryImpl) { element := c.byAccess.Back() - for element != nil { + existingEntrySize := 0 + if existingEntry != nil { + existingEntrySize = existingEntry.Size() + } + + for c.calculateNewCacheSize(newEntrySize, existingEntrySize) > c.maxSize && element != nil { entry := element.Value.(*entryImpl) - if entry.refCount == 0 { - c.deleteInternal(element) - return + if existingEntry != nil && entry.key == existingEntry.key { + element = element.Prev() + continue } + element = c.tryEvictAndGetPreviousElement(entry, element) + } +} - // entry.refCount > 0 - // skip, entry still being referenced - element = element.Prev() +func (c *lru) tryEvictAndGetPreviousElement(entry *entryImpl, element *list.Element) *list.Element { + if entry.refCount == 0 { + elementPrev := element.Prev() + // currSize will be updated within deleteInternal + c.deleteInternal(element) + return elementPrev } + // entry.refCount > 0 + // skip, entry still being referenced + return element.Prev() } func (c *lru) isEntryExpired(entry *entryImpl, currentTime time.Time) bool { return entry.refCount == 0 && !entry.createTime.IsZero() && currentTime.After(entry.createTime.Add(c.ttl)) } + +func (c *lru) updateEntryTTL(entry *entryImpl) { + if c.ttl != 0 { + entry.createTime = c.timeSource.Now().UTC() + } +} + +func (c *lru) updateEntryRefCount(entry *entryImpl) { + if c.pin { + entry.refCount++ + } +} diff -Nru temporal-1.21.5-1/src/common/cache/lru_test.go temporal-1.22.5/src/common/cache/lru_test.go --- temporal-1.21.5-1/src/common/cache/lru_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cache/lru_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,19 +25,34 @@ package cache import ( + "math/rand" "sync" "testing" "time" + "github.com/google/uuid" "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/clock" ) -type keyType struct { - dummyString string - dummyInt int +type ( + keyType struct { + dummyString string + dummyInt int + } + + testEntryWithCacheSize struct { + cacheSize int + } +) + +func (c *testEntryWithCacheSize) CacheSize() int { + return c.cacheSize } func TestLRU(t *testing.T) { + t.Parallel() + cache := NewLRU(4) cache.Put("A", "Foo") @@ -56,22 +71,28 @@ cache.Put("A", "Foo2") assert.Equal(t, "Foo2", cache.Get("A")) + assert.Equal(t, 4, cache.Size()) cache.Put("E", "Epsi") assert.Equal(t, "Epsi", cache.Get("E")) assert.Equal(t, "Foo2", cache.Get("A")) assert.Nil(t, cache.Get("B")) // Oldest, should be evicted + assert.Equal(t, 4, cache.Size()) // Access C, D is now LRU cache.Get("C") cache.Put("F", "Felp") assert.Nil(t, cache.Get("D")) + assert.Equal(t, 4, cache.Size()) cache.Delete("A") assert.Nil(t, cache.Get("A")) + assert.Equal(t, 3, cache.Size()) } func TestGenerics(t *testing.T) { + t.Parallel() + key := keyType{ dummyString: "some random key", dummyInt: 59, @@ -90,20 +111,31 @@ dummyString: "some other random key", dummyInt: 56, })) + assert.Equal(t, 1, cache.Size()) + + cache.Put(key, "some other random value") + assert.Equal(t, "some other random value", cache.Get(key)) + assert.Equal(t, 1, cache.Size()) } func TestLRUWithTTL(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() cache := New(5, &Options{ - TTL: time.Millisecond * 100, + TTL: time.Millisecond * 100, + TimeSource: timeSource, }) cache.Put("A", "foo") assert.Equal(t, "foo", cache.Get("A")) - time.Sleep(time.Millisecond * 300) + timeSource.Advance(time.Millisecond * 300) assert.Nil(t, cache.Get("A")) assert.Equal(t, 0, cache.Size()) } func TestLRUCacheConcurrentAccess(t *testing.T) { + t.Parallel() + cache := NewLRU(5) values := map[string]string{ "A": "foo", @@ -155,105 +187,140 @@ } func TestTTL(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() cache := New(5, &Options{ - TTL: time.Millisecond * 50, + TTL: time.Millisecond * 50, + TimeSource: timeSource, }) cache.Put("A", t) assert.Equal(t, t, cache.Get("A")) - time.Sleep(time.Millisecond * 100) + timeSource.Advance(time.Millisecond * 100) assert.Nil(t, cache.Get("A")) } func TestTTLWithPin(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() cache := New(5, &Options{ - TTL: time.Millisecond * 50, - Pin: true, + TTL: time.Millisecond * 50, + Pin: true, + TimeSource: timeSource, }) _, err := cache.PutIfNotExist("A", t) assert.NoError(t, err) assert.Equal(t, t, cache.Get("A")) - time.Sleep(time.Millisecond * 100) + assert.Equal(t, 1, cache.Size()) + timeSource.Advance(time.Millisecond * 100) assert.Equal(t, t, cache.Get("A")) + assert.Equal(t, 1, cache.Size()) // release 3 time since put if not exist also increase the counter cache.Release("A") cache.Release("A") cache.Release("A") assert.Nil(t, cache.Get("A")) + assert.Equal(t, 0, cache.Size()) } func TestMaxSizeWithPin_MidItem(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() cache := New(2, &Options{ - TTL: time.Millisecond * 50, - Pin: true, + TTL: time.Millisecond * 50, + Pin: true, + TimeSource: timeSource, }) _, err := cache.PutIfNotExist("A", t) assert.NoError(t, err) + assert.Equal(t, 1, cache.Size()) _, err = cache.PutIfNotExist("B", t) assert.NoError(t, err) + assert.Equal(t, 2, cache.Size()) _, err = cache.PutIfNotExist("C", t) assert.Error(t, err) + assert.Equal(t, 2, cache.Size()) assert.Equal(t, t, cache.Get("A")) cache.Release("A") // get will also increase the ref count assert.Equal(t, t, cache.Get("B")) cache.Release("B") // get will also increase the ref count + assert.Equal(t, 2, cache.Size()) cache.Release("B") // B's ref count is 0 _, err = cache.PutIfNotExist("C", t) assert.NoError(t, err) assert.Equal(t, t, cache.Get("C")) cache.Release("C") // get will also increase the ref count + assert.Equal(t, 2, cache.Size()) cache.Release("A") // A's ref count is 0 cache.Release("C") // C's ref count is 0 + assert.Equal(t, 2, cache.Size()) - time.Sleep(time.Millisecond * 100) + timeSource.Advance(time.Millisecond * 100) assert.Nil(t, cache.Get("A")) assert.Nil(t, cache.Get("B")) assert.Nil(t, cache.Get("C")) + assert.Equal(t, 0, cache.Size()) } func TestMaxSizeWithPin_LastItem(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() cache := New(2, &Options{ - TTL: time.Millisecond * 50, - Pin: true, + TTL: time.Millisecond * 50, + Pin: true, + TimeSource: timeSource, }) _, err := cache.PutIfNotExist("A", t) assert.NoError(t, err) + assert.Equal(t, 1, cache.Size()) _, err = cache.PutIfNotExist("B", t) assert.NoError(t, err) + assert.Equal(t, 2, cache.Size()) _, err = cache.PutIfNotExist("C", t) assert.Error(t, err) + assert.Equal(t, 2, cache.Size()) assert.Equal(t, t, cache.Get("A")) cache.Release("A") // get will also increase the ref count assert.Equal(t, t, cache.Get("B")) cache.Release("B") // get will also increase the ref count + assert.Equal(t, 2, cache.Size()) cache.Release("A") // A's ref count is 0 _, err = cache.PutIfNotExist("C", t) assert.NoError(t, err) assert.Equal(t, t, cache.Get("C")) cache.Release("C") // get will also increase the ref count + assert.Equal(t, 2, cache.Size()) cache.Release("B") // B's ref count is 0 cache.Release("C") // C's ref count is 0 + assert.Equal(t, 2, cache.Size()) - time.Sleep(time.Millisecond * 100) + timeSource.Advance(time.Millisecond * 100) assert.Nil(t, cache.Get("A")) assert.Nil(t, cache.Get("B")) assert.Nil(t, cache.Get("C")) + assert.Equal(t, 0, cache.Size()) } func TestIterator(t *testing.T) { + t.Parallel() + expected := map[string]string{ "A": "Alpha", "B": "Beta", @@ -287,6 +354,8 @@ } func TestZeroSizeCache(t *testing.T) { + t.Parallel() + cache := NewLRU(0) _, err := cache.PutIfNotExist("A", t) assert.NoError(t, err) @@ -302,3 +371,247 @@ assert.Nil(t, err) assert.Equal(t, 0, cache.Size()) } + +func TestCache_ItemSizeTooLarge(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + res := cache.Put(uuid.New(), &testEntryWithCacheSize{maxTotalBytes}) + assert.Equal(t, res, nil) + assert.Equal(t, 10, cache.Size()) + + res, err := cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{maxTotalBytes + 1}) + assert.Equal(t, err, ErrCacheItemTooLarge) + assert.Equal(t, res, nil) + assert.Equal(t, 10, cache.Size()) + +} + +func TestCache_ItemHasCacheSizeDefined(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + numPuts := rand.Intn(1024) + + startWG := sync.WaitGroup{} + endWG := sync.WaitGroup{} + + startWG.Add(numPuts) + endWG.Add(numPuts) + + go func() { + startWG.Wait() + assert.True(t, cache.Size() < maxTotalBytes) + }() + for i := 0; i < numPuts; i++ { + go func() { + defer endWG.Done() + + startWG.Wait() + key := uuid.New() + cache.Put(key, &testEntryWithCacheSize{rand.Int()}) + }() + startWG.Done() + } + + endWG.Wait() +} + +func TestCache_ItemHasCacheSizeDefined_PutWithNewKeys(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + // Put with new key and value size greater than cache size, should not be added to cache + cache.Put(uuid.New(), &testEntryWithCacheSize{15}) + assert.Equal(t, 0, cache.Size()) + + // Put with new key and value size less than cache size, should be added to cache + cache.Put(uuid.New(), &testEntryWithCacheSize{5}) + assert.Equal(t, 5, cache.Size()) + + // Put with new key and value size less than cache size, should evict 0 ref items and added to cache + cache.Put(uuid.New(), &testEntryWithCacheSize{10}) + assert.Equal(t, 10, cache.Size()) + + // Put with new key and value size less than cache size, should evict 0 ref items until enough spaces and added to cache + cache.Put(uuid.New(), &testEntryWithCacheSize{3}) + assert.Equal(t, 3, cache.Size()) + cache.Put(uuid.New(), &testEntryWithCacheSize{7}) + assert.Equal(t, 10, cache.Size()) +} + +func TestCache_ItemHasCacheSizeDefined_PutWithSameKeyAndDifferentSizes(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + key1 := "A" + cache.Put(key1, &testEntryWithCacheSize{4}) + assert.Equal(t, 4, cache.Size()) + + key2 := "B" + cache.Put(key2, &testEntryWithCacheSize{4}) + // 4 + 4 = 8 < 10 should not evict any items + assert.Equal(t, 8, cache.Size()) + // put same key with smaller size, should not evict any items + cache.Put(key2, &testEntryWithCacheSize{3}) + assert.Equal(t, cache.Get(key1), &testEntryWithCacheSize{4}) + // 8 - 4 + 3 = 7 < 10, should not evict any items + assert.Equal(t, 7, cache.Size()) + + // put same key with larger size, but below cache size, should not evict any items + cache.Put(key2, &testEntryWithCacheSize{6}) + // 7 - 3 + 6 = 10 =< 10, should not evict any items + assert.Equal(t, 10, cache.Size()) + // get key1 after to make it the most recently used + assert.Equal(t, cache.Get(key2), &testEntryWithCacheSize{6}) + assert.Equal(t, cache.Get(key1), &testEntryWithCacheSize{4}) + + // put same key with larger size, but take all cache size, should evict all items + cache.Put(key2, &testEntryWithCacheSize{10}) + // 10 - 4 - 6 + 10 = 10 =< 10, should evict all items + assert.Equal(t, 10, cache.Size()) + assert.Equal(t, cache.Get(key1), nil) + assert.Equal(t, cache.Get(key2), &testEntryWithCacheSize{10}) +} + +func TestCache_ItemHasCacheSizeDefined_PutWithSameKey(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + key := uuid.New() + + // Put with same key and value size greater than cache size, should not be added to cache + cache.Put(key, &testEntryWithCacheSize{15}) + assert.Equal(t, 0, cache.Size()) + + // Put with same key and value size less than cache size, should be added to cache + cache.Put(key, &testEntryWithCacheSize{5}) + assert.Equal(t, 5, cache.Size()) + + // Put with same key and value size less than cache size, should be evicted until enough space and added to cache + cache.Put(key, &testEntryWithCacheSize{10}) + assert.Equal(t, 10, cache.Size()) + + // Put with same key and value size less than cache size, should be evicted until enough space and added to cache + cache.Put(key, &testEntryWithCacheSize{3}) + assert.Equal(t, 3, cache.Size()) + cache.Put(key, &testEntryWithCacheSize{7}) + assert.Equal(t, 7, cache.Size()) +} + +func TestCache_ItemHasCacheSizeDefined_PutIfNotExistWithNewKeys(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + + // PutIfNotExist with new keys with size greater than cache size, should return error and not add to cache + val, err := cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{15}) + assert.Equal(t, ErrCacheItemTooLarge, err) + assert.Nil(t, val) + assert.Equal(t, 0, cache.Size()) + + // PutIfNotExist with new keys with size less than cache size, should add to cache + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{5}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{5}, val) + assert.Equal(t, 5, cache.Size()) + + // PutIfNotExist with new keys with size less than cache size, should evict item and add to cache + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{10}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{10}, val) + assert.Equal(t, 10, cache.Size()) + + // PutIfNotExist with new keys with size less than cache size, should evict item and add to cache + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{5}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{5}, val) + assert.Equal(t, 5, cache.Size()) +} + +func TestCache_ItemHasCacheSizeDefined_PutIfNotExistWithSameKey(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := NewLRU(maxTotalBytes) + key := uuid.New().String() + + // PutIfNotExist with new keys with size greater than cache size, should return error and not add to cache + val, err := cache.PutIfNotExist(key, &testEntryWithCacheSize{15}) + assert.Equal(t, ErrCacheItemTooLarge, err) + assert.Nil(t, val) + assert.Equal(t, 0, cache.Size()) + + // PutIfNotExist with new keys with size less than cache size, should add to cache + val, err = cache.PutIfNotExist(key, &testEntryWithCacheSize{5}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{5}, val) + assert.Equal(t, 5, cache.Size()) + + // PutIfNotExist with same keys with size less than cache size, should not be added to cache + val, err = cache.PutIfNotExist(key, &testEntryWithCacheSize{10}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{5}, val) + assert.Equal(t, 5, cache.Size()) +} + +func TestCache_PutIfNotExistWithNewKeys_Pin(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := New(maxTotalBytes, &Options{Pin: true}) + + val, err := cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{15}) + assert.Equal(t, ErrCacheItemTooLarge, err) + assert.Nil(t, val) + assert.Equal(t, 0, cache.Size()) + + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{3}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{3}, val) + assert.Equal(t, 3, cache.Size()) + + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{7}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{7}, val) + assert.Equal(t, 10, cache.Size()) + + val, err = cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{8}) + assert.Equal(t, ErrCacheFull, err) + assert.Nil(t, val) + assert.Equal(t, 10, cache.Size()) +} + +func TestCache_PutIfNotExistWithSameKeys_Pin(t *testing.T) { + t.Parallel() + + maxTotalBytes := 10 + cache := New(maxTotalBytes, &Options{Pin: true}) + + key := uuid.New() + val, err := cache.PutIfNotExist(key, &testEntryWithCacheSize{15}) + assert.Equal(t, ErrCacheItemTooLarge, err) + assert.Nil(t, val) + assert.Equal(t, 0, cache.Size()) + + val, err = cache.PutIfNotExist(key, &testEntryWithCacheSize{3}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{3}, val) + assert.Equal(t, 3, cache.Size()) + + val, err = cache.PutIfNotExist(key, &testEntryWithCacheSize{7}) + assert.NoError(t, err) + assert.Equal(t, &testEntryWithCacheSize{3}, val) + assert.Equal(t, 3, cache.Size()) +} diff -Nru temporal-1.21.5-1/src/common/cache/simple.go temporal-1.22.5/src/common/cache/simple.go --- temporal-1.21.5-1/src/common/cache/simple.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cache/simple.go 2024-02-23 09:45:43.000000000 +0000 @@ -102,7 +102,7 @@ } return &simple{ iterateList: list.New(), - accessMap: make(map[interface{}]*list.Element, opts.InitialCapacity), + accessMap: make(map[interface{}]*list.Element), rmFunc: opts.RemovedFunc, } } diff -Nru temporal-1.21.5-1/src/common/cache/size_getter.go temporal-1.22.5/src/common/cache/size_getter.go --- temporal-1.21.5-1/src/common/cache/size_getter.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/cache/size_getter.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,42 @@ +// The MIT License +// +// Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination size_getter_mock.go + +package cache + +// SizeGetter is an interface that can be implemented by cache entries to provide their size +type ( + SizeGetter interface { + CacheSize() int + } +) + +func getSize(value interface{}) int { + if v, ok := value.(SizeGetter); ok { + return v.CacheSize() + } + // if the object does not have a CacheSize() method, assume is count limit cache, which size should be 1 + return 1 +} diff -Nru temporal-1.21.5-1/src/common/cache/size_getter_mock.go temporal-1.22.5/src/common/cache/size_getter_mock.go --- temporal-1.21.5-1/src/common/cache/size_getter_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/cache/size_getter_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,72 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: size_getter.go + +// Package cache is a generated GoMock package. +package cache + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockSizeGetter is a mock of SizeGetter interface. +type MockSizeGetter struct { + ctrl *gomock.Controller + recorder *MockSizeGetterMockRecorder +} + +// MockSizeGetterMockRecorder is the mock recorder for MockSizeGetter. +type MockSizeGetterMockRecorder struct { + mock *MockSizeGetter +} + +// NewMockSizeGetter creates a new mock instance. +func NewMockSizeGetter(ctrl *gomock.Controller) *MockSizeGetter { + mock := &MockSizeGetter{ctrl: ctrl} + mock.recorder = &MockSizeGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSizeGetter) EXPECT() *MockSizeGetterMockRecorder { + return m.recorder +} + +// CacheSize mocks base method. +func (m *MockSizeGetter) CacheSize() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CacheSize") + ret0, _ := ret[0].(int) + return ret0 +} + +// CacheSize indicates an expected call of CacheSize. +func (mr *MockSizeGetterMockRecorder) CacheSize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CacheSize", reflect.TypeOf((*MockSizeGetter)(nil).CacheSize)) +} diff -Nru temporal-1.21.5-1/src/common/clientCache.go temporal-1.22.5/src/common/clientCache.go --- temporal-1.21.5-1/src/common/clientCache.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/clientCache.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package common - -import ( - "sync" -) - -type ( - // ClientCache store initialized clients - ClientCache interface { - GetClientForKey(key string) (interface{}, error) - GetClientForClientKey(clientKey string) (interface{}, error) - GetAllClients() ([]interface{}, error) - } - - keyResolver interface { - Lookup(key string) (string, error) - GetAllAddresses() ([]string, error) - } - - clientProvider func(string) (interface{}, error) - - clientCacheImpl struct { - keyResolver keyResolver - clientProvider clientProvider - - cacheLock sync.RWMutex - clients map[string]interface{} - } -) - -// NewClientCache creates a new client cache based on membership -func NewClientCache( - keyResolver keyResolver, - clientProvider clientProvider, -) ClientCache { - - return &clientCacheImpl{ - keyResolver: keyResolver, - clientProvider: clientProvider, - - clients: make(map[string]interface{}), - } -} - -func (c *clientCacheImpl) GetClientForKey(key string) (interface{}, error) { - clientKey, err := c.keyResolver.Lookup(key) - if err != nil { - return nil, err - } - - return c.GetClientForClientKey(clientKey) -} - -func (c *clientCacheImpl) GetClientForClientKey(clientKey string) (interface{}, error) { - c.cacheLock.RLock() - client, ok := c.clients[clientKey] - c.cacheLock.RUnlock() - if ok { - return client, nil - } - - c.cacheLock.Lock() - defer c.cacheLock.Unlock() - - client, ok = c.clients[clientKey] - if ok { - return client, nil - } - - client, err := c.clientProvider(clientKey) - if err != nil { - return nil, err - } - c.clients[clientKey] = client - return client, nil -} - -func (c *clientCacheImpl) GetAllClients() ([]interface{}, error) { - var result []interface{} - allAddresses, err := c.keyResolver.GetAllAddresses() - if err != nil { - return nil, err - } - for _, addr := range allAddresses { - client, err := c.GetClientForClientKey(addr) - if err != nil { - return nil, err - } - result = append(result, client) - } - - return result, nil -} diff -Nru temporal-1.21.5-1/src/common/client_cache.go temporal-1.22.5/src/common/client_cache.go --- temporal-1.21.5-1/src/common/client_cache.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/client_cache.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,117 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import ( + "sync" +) + +type ( + // ClientCache store initialized clients + ClientCache interface { + GetClientForKey(key string) (interface{}, error) + GetClientForClientKey(clientKey string) (interface{}, error) + GetAllClients() ([]interface{}, error) + } + + keyResolver interface { + Lookup(key string) (string, error) + GetAllAddresses() ([]string, error) + } + + clientProvider func(string) (interface{}, error) + + clientCacheImpl struct { + keyResolver keyResolver + clientProvider clientProvider + + cacheLock sync.RWMutex + clients map[string]interface{} + } +) + +// NewClientCache creates a new client cache based on membership +func NewClientCache( + keyResolver keyResolver, + clientProvider clientProvider, +) ClientCache { + + return &clientCacheImpl{ + keyResolver: keyResolver, + clientProvider: clientProvider, + + clients: make(map[string]interface{}), + } +} + +func (c *clientCacheImpl) GetClientForKey(key string) (interface{}, error) { + clientKey, err := c.keyResolver.Lookup(key) + if err != nil { + return nil, err + } + + return c.GetClientForClientKey(clientKey) +} + +func (c *clientCacheImpl) GetClientForClientKey(clientKey string) (interface{}, error) { + c.cacheLock.RLock() + client, ok := c.clients[clientKey] + c.cacheLock.RUnlock() + if ok { + return client, nil + } + + c.cacheLock.Lock() + defer c.cacheLock.Unlock() + + client, ok = c.clients[clientKey] + if ok { + return client, nil + } + + client, err := c.clientProvider(clientKey) + if err != nil { + return nil, err + } + c.clients[clientKey] = client + return client, nil +} + +func (c *clientCacheImpl) GetAllClients() ([]interface{}, error) { + var result []interface{} + allAddresses, err := c.keyResolver.GetAllAddresses() + if err != nil { + return nil, err + } + for _, addr := range allAddresses { + client, err := c.GetClientForClientKey(addr) + if err != nil { + return nil, err + } + result = append(result, client) + } + + return result, nil +} diff -Nru temporal-1.21.5-1/src/common/clock/event_time_source.go temporal-1.22.5/src/common/clock/event_time_source.go --- temporal-1.21.5-1/src/common/clock/event_time_source.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/clock/event_time_source.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,162 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock + +import ( + "sync" + "time" +) + +type ( + // EventTimeSource is a fake TimeSource. Unlike other fake clock implementations, the methods are synchronous, so + // when you call Advance or Update, all triggered timers from AfterFunc will fire before the method returns, in the + // same goroutine. + EventTimeSource struct { + mu sync.RWMutex + now time.Time + timers []*fakeTimer + } + + // fakeTimer is a fake implementation of [Timer]. + fakeTimer struct { + // need a link to the parent timeSource for synchronization + timeSource *EventTimeSource + // deadline for when the timer should fire + deadline time.Time + // callback to call when the timer fires + callback func() + // done is true if the timer has fired or been stopped + done bool + // index of the timer in the parent timeSource + index int + } +) + +// NewEventTimeSource returns a EventTimeSource with the current time set to Unix zero: 1970-01-01 00:00:00 +0000 UTC. +func NewEventTimeSource() *EventTimeSource { + return &EventTimeSource{ + now: time.Unix(0, 0), + } +} + +// Now return the current time. +func (ts *EventTimeSource) Now() time.Time { + ts.mu.RLock() + defer ts.mu.RUnlock() + + return ts.now +} + +// AfterFunc return a timer that will fire after the specified duration. It is important to note that the timeSource is +// locked while the callback is called. This means that you must be cautious about calling any other mutating methods on +// the timeSource from within the callback. Doing so will probably result in a deadlock. To avoid this, you may want to +// wrap all such calls in a goroutine. If the duration is non-positive, the callback will fire immediately before +// AfterFunc returns. +func (ts *EventTimeSource) AfterFunc(d time.Duration, f func()) Timer { + ts.mu.Lock() + defer ts.mu.Unlock() + + if d < 0 { + d = 0 + } + t := &fakeTimer{timeSource: ts, deadline: ts.now.Add(d), callback: f} + t.index = len(ts.timers) + ts.timers = append(ts.timers, t) + ts.fireTimers() + + return t +} + +// Update the fake current time. It returns the timeSource so that you can chain calls like this: +// timeSource := NewEventTimeSource().Update(time.Now()) +func (ts *EventTimeSource) Update(now time.Time) *EventTimeSource { + ts.mu.Lock() + defer ts.mu.Unlock() + + ts.now = now + ts.fireTimers() + return ts +} + +// Advance the timer by the specified duration. +func (ts *EventTimeSource) Advance(d time.Duration) { + ts.mu.Lock() + defer ts.mu.Unlock() + + ts.now = ts.now.Add(d) + ts.fireTimers() +} + +// fireTimers fires all timers that are ready. +func (ts *EventTimeSource) fireTimers() { + n := 0 + for _, t := range ts.timers { + if t.deadline.After(ts.now) { + ts.timers[n] = t + t.index = n + n++ + } else { + t.callback() + t.done = true + } + } + ts.timers = ts.timers[:n] +} + +// Reset the timer to fire after the specified duration. Returns true if the timer was active. +func (t *fakeTimer) Reset(d time.Duration) bool { + t.timeSource.mu.Lock() + defer t.timeSource.mu.Unlock() + + if t.done { + return false + } + + t.deadline = t.timeSource.now.Add(d) + t.timeSource.fireTimers() + return true +} + +// Stop the timer. Returns true if the timer was active. +func (t *fakeTimer) Stop() bool { + t.timeSource.mu.Lock() + defer t.timeSource.mu.Unlock() + + if t.done { + return false + } + + i := t.index + timers := t.timeSource.timers + + timers[i] = timers[len(timers)-1] // swap with last timer + timers[i].index = i // update index of swapped timer + timers = timers[:len(timers)-1] // shrink list + + t.timeSource.timers = timers + t.done = true // ensure that the timer is not reused + + return true +} diff -Nru temporal-1.21.5-1/src/common/clock/event_time_source_test.go temporal-1.22.5/src/common/clock/event_time_source_test.go --- temporal-1.21.5-1/src/common/clock/event_time_source_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/clock/event_time_source_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,200 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/clock" +) + +// event is a helper to verify how many times a callback was triggered. Because callbacks are triggered synchronously +// with calls to EventTimeSource.Advance, we don't need any further synchronization. +type event struct { + t *testing.T + count int +} + +// Fire is the callback to be triggered. +func (e *event) Fire() { + e.count++ +} + +// AssertFiredOnce asserts that the callback was triggered exactly once. +func (e *event) AssertFiredOnce(msg string) { + e.t.Helper() + assert.Equal(e.t, 1, e.count, msg) +} + +// AssertNotFired asserts that the callback was not triggered. +func (e *event) AssertNotFired(msg string) { + e.t.Helper() + assert.Zero(e.t, e.count, msg) +} + +func ExampleEventTimeSource() { + // Create a new fake timeSource. + source := clock.NewEventTimeSource() + + // Create a timer which fires after 1 second. + source.AfterFunc(time.Second, func() { + fmt.Println("timer fired") + }) + + // Advance the time source by 1 second. + fmt.Println("advancing time source by 1 second") + source.Advance(time.Second) + fmt.Println("time source advanced") + + // Output: + // advancing time source by 1 second + // timer fired + // time source advanced +} + +func TestEventTimeSource_AfterFunc(t *testing.T) { + t.Parallel() + + // Create a new fake time source and an event to fire. + source := clock.NewEventTimeSource() + ev := event{t: t} + + // Create a timer which fires after 2ns. + source.AfterFunc(2, ev.Fire) + + // Advance the time source by 1ns. + source.Advance(1) + ev.AssertNotFired( + "Advancing the time source should not fire the timer if its deadline still has not been reached", + ) + + // Advance the time source by 1ns more. + source.Advance(1) + ev.AssertFiredOnce("Advancing a time source past a timer's deadline should fire the timer") +} + +func TestEventTimeSource_AfterFunc_Reset(t *testing.T) { + t.Parallel() + + // Create a new fake time source and two events to fire. + source := clock.NewEventTimeSource() + ev1 := event{t: t} + ev2 := event{t: t} + + // Create a timer for each event which fires after 2ns. + timer := source.AfterFunc(2, ev1.Fire) + source.AfterFunc(2, ev2.Fire) + + // Advance the time source by 1ns and verify that neither timer has fired. + source.Advance(1) + ev1.AssertNotFired("Timer should not fire before deadline") + ev2.AssertNotFired("Timer should not fire before deadline") + + // Reset the first timer to fire after an additional 2ns. + assert.True(t, timer.Reset(2), "`Reset` should return true if the timer was not already stopped") + + // Advance the time source by 1ns and verify that the first timer has not fired but the second timer has. + source.Advance(1) + ev1.AssertNotFired("Timer which was reset should not fire after original deadline but before new deadline") + ev2.AssertFiredOnce("Timer which was not reset should fire after deadline") + + // Advance the time source by 1ns more and verify that the reset timer has fired. + source.Advance(1) + ev1.AssertFiredOnce("The reset timer should fire after its new deadline") + + // Reset the first timer and advance the time source past the new deadline to verify that the timer does not fire + // again. + assert.False(t, timer.Reset(1), "`Reset` should return false if the timer was already stopped") + source.Advance(1) + ev1.AssertFiredOnce("The timer should never fire twice, even if it was reset") +} + +func TestEventTimeSource_AfterFunc_Stop(t *testing.T) { + t.Parallel() + + // Create a new fake time source and two events to fire. + source := clock.NewEventTimeSource() + ev1 := event{t: t} + ev2 := event{t: t} + + // Create a timer for each event which fires after 1ns. + timer := source.AfterFunc(1, ev1.Fire) + source.AfterFunc(1, ev2.Fire) + + // Stop the first timer. + assert.True(t, timer.Stop(), "`Stop` should return true if the timer was not already stopped") + + // Advance the time source by 1ns and verify that the first timer has not fired and the second timer has. + source.Advance(1) + ev1.AssertNotFired("A timer should not fire if it was already stopped") + ev2.AssertFiredOnce("A timer which was not stopped should fire after its deadline") + + // Verify that subsequent calls to `Stop` return false. + assert.False(t, timer.Stop(), "`Stop` return false if the timer was already stopped") +} + +func TestEventTimeSource_AfterFunc_NegativeDelay(t *testing.T) { + t.Parallel() + + // Create a new fake time source and one event to fire. + source := clock.NewEventTimeSource() + ev1 := event{t: t} + + // Create a timer which fires after -1ns. This should fire immediately. + timer := source.AfterFunc(-1, ev1.Fire) + + // Verify that the timer has fired. + ev1.AssertFiredOnce("A timer with a negative delay should fire immediately") + + // Verify that the timer is stopped. + assert.False(t, timer.Stop(), "`Stop` should return false if the timer was already stopped") +} + +func TestEventTimeSource_Update(t *testing.T) { + t.Parallel() + + // Create a new fake time source and two events to fire. + source := clock.NewEventTimeSource() + ev1 := event{t: t} + ev2 := event{t: t} + + // Create a timer for each event which fires after 1ns. + source.AfterFunc(1, ev1.Fire) + source.AfterFunc(1, ev2.Fire) + + // Verify that the time source starts at Unix epoch. + assert.Equal( + t, time.Unix(0, 0), source.Now(), "The fake time source should start at the unix epoch", + ) + + // Update to move the time source forward by 1ns. + source.Update(time.Unix(0, 1)) + assert.Equal(t, time.Unix(0, 1), source.Now()) + ev1.AssertFiredOnce("Timer should fire after deadline") + ev2.AssertFiredOnce("Timer should fire after deadline") +} diff -Nru temporal-1.21.5-1/src/common/clock/hybrid_logical_clock/hybrid_logical_clock.go temporal-1.22.5/src/common/clock/hybrid_logical_clock/hybrid_logical_clock.go --- temporal-1.21.5-1/src/common/clock/hybrid_logical_clock/hybrid_logical_clock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/clock/hybrid_logical_clock/hybrid_logical_clock.go 2024-02-23 09:45:43.000000000 +0000 @@ -68,7 +68,7 @@ return 0 } -// Compare 2 clocks, returns 0 if a == b, -1 if a > b, 1 if a < b +// Compare 2 Clocks, returns 0 if a == b, -1 if a > b, 1 if a < b func Compare(a Clock, b Clock) int { if a.WallClock == b.WallClock { if a.Version == b.Version { @@ -89,7 +89,7 @@ return Compare(a, b) > 0 } -// Max returns the maximum of two clocks +// Max returns the maximum of two Clocks func Max(a Clock, b Clock) Clock { if Compare(a, b) > 0 { return b @@ -97,7 +97,7 @@ return a } -// Min returns the minimum of two clocks +// Min returns the minimum of two Clocks func Min(a Clock, b Clock) Clock { if Compare(a, b) > 0 { return a @@ -105,17 +105,36 @@ return b } -// Equal returns whether two clocks are equal +// Equal returns whether two Clocks are equal func Equal(a Clock, b Clock) bool { return Compare(a, b) == 0 } -// Ptr returns a pointer to a clock (to ease inlining the APIs in this package). +// Ptr returns a pointer to a Clock (to ease inlining the APIs in this package). func Ptr(c Clock) *Clock { return &c } -// UTC returns UTC time of a clock in millisecond resolution. +// UTC returns a Time from a Clock in millisecond resolution. The Time's Location is set to UTC. func UTC(c Clock) time.Time { return time.Unix(c.WallClock/1000, c.WallClock%1000*1000000).UTC() } + +// UTC returns a Time from a *Clock in millisecond resolution. The Time's Location is set to UTC. +// If the argument is nil, it returns the Unix epoch. +func UTCPtr(c *Clock) time.Time { + if c == nil { + return UTC(Clock{}) + } + return UTC(*c) +} + +// Since returns time.Since(UTC(c)) +func Since(c Clock) time.Duration { + return time.Since(UTC(c)) +} + +// SincePtr returns time.Since(UTCFromPtr(c)) +func SincePtr(c *Clock) time.Duration { + return time.Since(UTCPtr(c)) +} diff -Nru temporal-1.21.5-1/src/common/clock/time_source.go temporal-1.22.5/src/common/clock/time_source.go --- temporal-1.21.5-1/src/common/clock/time_source.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/clock/time_source.go 2024-02-23 09:45:43.000000000 +0000 @@ -22,58 +22,44 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// Package clock provides extensions to the [time] package. package clock import ( - "sync/atomic" "time" - - // clockwork is not currently used but it is useful to have the option to use this in testing code - // this comment is needed to stop lint from complaining about this _ import - _ "github.com/jonboulle/clockwork" ) type ( - // TimeSource is an interface for any - // entity that provides the current - // time. Its primarily used to mock - // out timesources in unit test + // TimeSource is an interface to make it easier to test code that uses time. TimeSource interface { Now() time.Time + AfterFunc(d time.Duration, f func()) Timer } - // RealTimeSource serves real wall-clock time - RealTimeSource struct{} - - // EventTimeSource serves fake controlled time - EventTimeSource struct { - now int64 + // Timer is a timer returned by TimeSource.AfterFunc. Unlike the timers returned by [time.NewTimer] or time.Ticker, + // this timer does not have a channel. That is because the callback already reacts to the timer firing. + Timer interface { + // Reset changes the expiration time of the timer. It returns true if the timer had been active, false if the + // timer had expired or been stopped. + Reset(d time.Duration) bool + // Stop prevents the Timer from firing. It returns true if the call stops the timer, false if the timer has + // already expired or been stopped. + Stop() bool } + // RealTimeSource is a timeSource that uses the real wall timeSource time. The zero value is valid. + RealTimeSource struct{} ) -// NewRealTimeSource returns a time source that servers -// real wall clock time -func NewRealTimeSource() *RealTimeSource { - return &RealTimeSource{} +// NewRealTimeSource returns a timeSource that uses the real wall timeSource time. +func NewRealTimeSource() RealTimeSource { + return RealTimeSource{} } -// Now return the real current time -func (ts *RealTimeSource) Now() time.Time { +// Now returns the current time, with the location set to UTC. +func (ts RealTimeSource) Now() time.Time { return time.Now().UTC() } -// NewEventTimeSource returns a time source that servers -// fake controlled time -func NewEventTimeSource() *EventTimeSource { - return &EventTimeSource{} -} - -// Now return the fake current time -func (ts *EventTimeSource) Now() time.Time { - return time.Unix(0, atomic.LoadInt64(&ts.now)).UTC() -} - -// Update update the fake current time -func (ts *EventTimeSource) Update(now time.Time) *EventTimeSource { - atomic.StoreInt64(&ts.now, now.UnixNano()) - return ts +// AfterFunc is a pass-through to time.AfterFunc. +func (ts RealTimeSource) AfterFunc(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) } diff -Nru temporal-1.21.5-1/src/common/clock/time_source_test.go temporal-1.22.5/src/common/clock/time_source_test.go --- temporal-1.21.5-1/src/common/clock/time_source_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/clock/time_source_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,53 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clock_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/clock" +) + +func TestNewRealClock_Now(t *testing.T) { + t.Parallel() + + source := clock.NewRealTimeSource() + location := source.Now().Location() + assert.Equal(t, "UTC", location.String()) +} + +func TestNewRealClock_AfterFunc(t *testing.T) { + t.Parallel() + + source := clock.NewRealTimeSource() + ch := make(chan struct{}) + timer := source.AfterFunc(0, func() { + close(ch) + }) + + <-ch + assert.False(t, timer.Stop()) +} diff -Nru temporal-1.21.5-1/src/common/cluster/metadata.go temporal-1.22.5/src/common/cluster/metadata.go --- temporal-1.21.5-1/src/common/cluster/metadata.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cluster/metadata.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,6 +35,8 @@ "sync/atomic" "time" + "golang.org/x/exp/maps" + "go.temporal.io/server/common" "go.temporal.io/server/common/collection" "go.temporal.io/server/common/dynamicconfig" @@ -55,7 +57,6 @@ type ( Metadata interface { - common.Daemon common.Pingable // IsGlobalNamespaceEnabled whether the global namespace is enabled, @@ -81,6 +82,8 @@ GetFailoverVersionIncrement() int64 RegisterMetadataChangeCallback(callbackId any, cb CallbackFn) UnRegisterMetadataChangeCallback(callbackId any) + Start() + Stop() } CallbackFn func(oldClusterMetadata map[string]*ClusterInformation, newClusterMetadata map[string]*ClusterInformation) @@ -97,6 +100,8 @@ CurrentClusterName string `yaml:"currentClusterName"` // ClusterInformation contains all cluster names to corresponding information about that cluster ClusterInformation map[string]ClusterInformation `yaml:"clusterInformation"` + // Tag contains customized tag about the current cluster + Tags map[string]string `yaml:"tags"` } // ClusterInformation contains the information about each cluster which participated in cross DC @@ -106,8 +111,9 @@ // Address indicate the remote service address(Host:Port). Host can be DNS name. RPCAddress string `yaml:"rpcAddress"` // Cluster ID allows to explicitly set the ID of the cluster. Optional. - ClusterID string `yaml:"-"` - ShardCount int32 `yaml:"-"` // Ignore this field when loading config. + ClusterID string `yaml:"-"` + ShardCount int32 `yaml:"-"` // Ignore this field when loading config. + Tags map[string]string `yaml:"-"` // Ignore this field. Use cluster.Config.Tags for customized tags. // private field to track cluster information updates version int64 } @@ -429,9 +435,12 @@ case <-timer.C: for err := m.refreshClusterMetadata(ctx); err != nil; err = m.refreshClusterMetadata(ctx) { m.logger.Error("Error refreshing remote cluster metadata", tag.Error(err)) + refreshTimer := time.NewTimer(m.refreshDuration() / 2) + select { - case <-time.After(m.refreshDuration() / 2): + case <-refreshTimer.C: case <-ctx.Done(): + refreshTimer.Stop() return nil } } @@ -459,12 +468,14 @@ InitialFailoverVersion: newClusterInfo.InitialFailoverVersion, RPCAddress: newClusterInfo.RPCAddress, ShardCount: newClusterInfo.ShardCount, + Tags: newClusterInfo.Tags, version: newClusterInfo.version, } } else if newClusterInfo.version > oldClusterInfo.version { if newClusterInfo.Enabled == oldClusterInfo.Enabled && newClusterInfo.RPCAddress == oldClusterInfo.RPCAddress && - newClusterInfo.InitialFailoverVersion == oldClusterInfo.InitialFailoverVersion { + newClusterInfo.InitialFailoverVersion == oldClusterInfo.InitialFailoverVersion && + maps.Equal(newClusterInfo.Tags, oldClusterInfo.Tags) { // key cluster info does not change continue } @@ -474,6 +485,7 @@ InitialFailoverVersion: oldClusterInfo.InitialFailoverVersion, RPCAddress: oldClusterInfo.RPCAddress, ShardCount: oldClusterInfo.ShardCount, + Tags: oldClusterInfo.Tags, version: oldClusterInfo.version, } newEntries[clusterName] = &ClusterInformation{ @@ -481,6 +493,7 @@ InitialFailoverVersion: newClusterInfo.InitialFailoverVersion, RPCAddress: newClusterInfo.RPCAddress, ShardCount: newClusterInfo.ShardCount, + Tags: newClusterInfo.Tags, version: newClusterInfo.version, } } @@ -585,6 +598,7 @@ InitialFailoverVersion: getClusterResp.GetInitialFailoverVersion(), RPCAddress: getClusterResp.GetClusterAddress(), ShardCount: getClusterResp.GetHistoryShardCount(), + Tags: getClusterResp.GetTags(), version: getClusterResp.Version, } } diff -Nru temporal-1.21.5-1/src/common/cluster/metadata_test.go temporal-1.22.5/src/common/cluster/metadata_test.go --- temporal-1.21.5-1/src/common/cluster/metadata_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/cluster/metadata_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -53,6 +53,7 @@ failoverVersionIncrement int64 clusterName string secondClusterName string + thirdClusterName string } ) @@ -77,6 +78,7 @@ s.failoverVersionIncrement = 100 s.clusterName = uuid.New() s.secondClusterName = uuid.New() + s.thirdClusterName = uuid.New() clusterInfo := map[string]ClusterInformation{ s.clusterName: { @@ -93,6 +95,13 @@ ShardCount: 2, version: 1, }, + s.thirdClusterName: { + Enabled: true, + InitialFailoverVersion: int64(5), + RPCAddress: uuid.New(), + ShardCount: 1, + version: 1, + }, } s.metadata = NewMetadata( s.isGlobalNamespaceEnabled, @@ -143,7 +152,7 @@ s.metadata.RegisterMetadataChangeCallback( s, func(oldClusterMetadata map[string]*ClusterInformation, newClusterMetadata map[string]*ClusterInformation) { - s.Equal(2, len(newClusterMetadata)) + s.Equal(3, len(newClusterMetadata)) }) s.metadata.UnRegisterMetadataChangeCallback(s) @@ -166,12 +175,20 @@ newMetadata, ok = newClusterMetadata[s.secondClusterName] s.True(ok) s.Nil(newMetadata) + + oldMetadata, ok = oldClusterMetadata[s.thirdClusterName] + s.True(ok) + s.NotNil(oldMetadata) + newMetadata, ok = newClusterMetadata[s.thirdClusterName] + s.True(ok) + s.NotNil(newMetadata) } s.mockClusterMetadataStore.EXPECT().ListClusterMetadata(gomock.Any(), gomock.Any()).Return( &persistence.ListClusterMetadataResponse{ ClusterMetadata: []*persistence.GetClusterMetadataResponse{ { + // No change and not include in callback ClusterMetadata: persistencespb.ClusterMetadata{ ClusterName: s.clusterName, IsConnectionEnabled: true, @@ -182,12 +199,26 @@ Version: 1, }, { + // Updated, included in callback + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: s.thirdClusterName, + IsConnectionEnabled: true, + InitialFailoverVersion: 1, + HistoryShardCount: 1, + ClusterAddress: uuid.New(), + Tags: map[string]string{"test": "test"}, + }, + Version: 2, + }, + { + // Newly added, included in callback ClusterMetadata: persistencespb.ClusterMetadata{ ClusterName: id, IsConnectionEnabled: true, InitialFailoverVersion: 2, HistoryShardCount: 2, ClusterAddress: uuid.New(), + Tags: map[string]string{"test": "test"}, }, Version: 2, }, @@ -195,6 +226,9 @@ }, nil) err := s.metadata.refreshClusterMetadata(context.Background()) s.NoError(err) + clusterInfo := s.metadata.GetAllClusterInfo() + s.Equal("test", clusterInfo[s.thirdClusterName].Tags["test"]) + s.Equal("test", clusterInfo[id].Tags["test"]) } func (s *metadataSuite) Test_ListAllClusterMetadataFromDB_Success() { diff -Nru temporal-1.21.5-1/src/common/collection/pagingIterator.go temporal-1.22.5/src/common/collection/pagingIterator.go --- temporal-1.21.5-1/src/common/collection/pagingIterator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/collection/pagingIterator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,127 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection - -type ( - // PaginationFn is the function which get a page of results - PaginationFn[V any] func(paginationToken []byte) ([]V, []byte, error) - - // PagingIteratorImpl is the implementation of PagingIterator - PagingIteratorImpl[V any] struct { - paginationFn PaginationFn[V] - pageToken []byte - pageErr error - pageItems []V - nextPageItemIndex int - } -) - -// NewPagingIterator create a new paging iterator -func NewPagingIterator[V any]( - paginationFn PaginationFn[V], -) Iterator[V] { - iter := &PagingIteratorImpl[V]{ - paginationFn: paginationFn, - pageToken: nil, - pageErr: nil, - pageItems: nil, - nextPageItemIndex: 0, - } - iter.getNextPage() // this will initialize the paging iterator - return iter -} - -// NewPagingIteratorWithToken create a new paging iterator with initial token -func NewPagingIteratorWithToken[V any]( - paginationFn PaginationFn[V], - pageToken []byte, -) Iterator[V] { - iter := &PagingIteratorImpl[V]{ - paginationFn: paginationFn, - pageToken: pageToken, - pageErr: nil, - pageItems: nil, - nextPageItemIndex: 0, - } - iter.getNextPage() // this will initialize the paging iterator - return iter -} - -// HasNext return whether has next item or err -func (iter *PagingIteratorImpl[V]) HasNext() bool { - // pagination encounters error - if iter.pageErr != nil { - return true - } - - // still have local cached item to return - if iter.nextPageItemIndex < len(iter.pageItems) { - return true - } - - if len(iter.pageToken) != 0 { - iter.getNextPage() - return iter.HasNext() - } - - return false -} - -// Next return next item or err -func (iter *PagingIteratorImpl[V]) Next() (V, error) { - if !iter.HasNext() { - panic("HistoryEventIterator Next() called without checking HasNext()") - } - - if iter.pageErr != nil { - err := iter.pageErr - iter.pageErr = nil - var v V - return v, err - } - - // we have cached events - if iter.nextPageItemIndex < len(iter.pageItems) { - index := iter.nextPageItemIndex - iter.nextPageItemIndex++ - return iter.pageItems[index], nil - } - - panic("HistoryEventIterator Next() should return either a history event or a err") -} - -func (iter *PagingIteratorImpl[V]) getNextPage() { - items, token, err := iter.paginationFn(iter.pageToken) - if err == nil { - iter.pageItems = items - iter.pageToken = token - iter.pageErr = nil - } else { - iter.pageItems = nil - iter.pageToken = nil - iter.pageErr = err - } - iter.nextPageItemIndex = 0 -} diff -Nru temporal-1.21.5-1/src/common/collection/pagingIterator_test.go temporal-1.22.5/src/common/collection/pagingIterator_test.go --- temporal-1.21.5-1/src/common/collection/pagingIterator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/collection/pagingIterator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,162 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/suite" -) - -type ( - pagingIteratorSuite struct { - suite.Suite - } -) - -func TestPagingIteratorSuite(t *testing.T) { - s := new(pagingIteratorSuite) - suite.Run(t, s) -} - -func (s *pagingIteratorSuite) SetupSuite() { -} - -func (s *pagingIteratorSuite) TearDownSuite() { - -} - -func (s *pagingIteratorSuite) SetupTest() { - -} - -func (s *pagingIteratorSuite) TearDownTest() { - -} - -func (s *pagingIteratorSuite) TestIteration_NoErr() { - phase := 0 - outputs := [][]int{ - {1, 2, 3, 4, 5}, - {}, - {6}, - {}, - } - tokens := [][]byte{ - []byte("some random token 1"), - []byte("some random token 2"), - []byte("some random token 3"), - []byte(nil), - } - pagingFn := func(token []byte) ([]int, []byte, error) { - switch phase { - case 0: - s.Equal(0, len(token)) - defer func() { phase++ }() - return outputs[phase], tokens[phase], nil - case 1: - s.Equal(tokens[0], token) - defer func() { phase++ }() - return outputs[phase], tokens[phase], nil - case 2: - s.Equal(tokens[1], token) - defer func() { phase++ }() - return outputs[phase], tokens[phase], nil - case 3: - s.Equal(tokens[2], token) - defer func() { phase++ }() - return outputs[phase], tokens[phase], nil - default: - panic("should not reach here during test") - } - } - - result := []int{} - ite := NewPagingIterator(pagingFn) - for ite.HasNext() { - num, err := ite.Next() - s.Nil(err) - result = append(result, num) - } - s.Equal([]int{1, 2, 3, 4, 5, 6}, result) -} - -func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { - phase := 0 - ite := NewPagingIterator(func(token []byte) ([]interface{}, []byte, error) { - switch phase { - case 0: - defer func() { phase++ }() - return nil, nil, errors.New("some random error") - default: - panic("should not reach here during test") - } - }) - - s.True(ite.HasNext()) - item, err := ite.Next() - s.Nil(item) - s.NotNil(err) - s.False(ite.HasNext()) -} - -func (s *pagingIteratorSuite) TestIteration_Err_NotBegining() { - - phase := 0 - outputs := [][]interface{}{ - {1, 2, 3, 4, 5}, - } - tokens := [][]byte{ - []byte("some random token 1"), - } - pagingFn := func(token []byte) ([]interface{}, []byte, error) { - switch phase { - case 0: - s.Equal(0, len(token)) - defer func() { phase++ }() - return outputs[phase], tokens[phase], nil - case 1: - s.Equal(tokens[0], token) - defer func() { phase++ }() - return nil, nil, errors.New("some random error") - default: - panic("should not reach here during test") - } - } - - result := []int{} - ite := NewPagingIterator(pagingFn) - for ite.HasNext() { - item, err := ite.Next() - if err != nil { - break - } - num, ok := item.(int) - s.True(ok) - result = append(result, num) - } - s.Equal([]int{1, 2, 3, 4, 5}, result) -} diff -Nru temporal-1.21.5-1/src/common/collection/paging_iterator.go temporal-1.22.5/src/common/collection/paging_iterator.go --- temporal-1.21.5-1/src/common/collection/paging_iterator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/collection/paging_iterator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,127 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package collection + +type ( + // PaginationFn is the function which get a page of results + PaginationFn[V any] func(paginationToken []byte) ([]V, []byte, error) + + // PagingIteratorImpl is the implementation of PagingIterator + PagingIteratorImpl[V any] struct { + paginationFn PaginationFn[V] + pageToken []byte + pageErr error + pageItems []V + nextPageItemIndex int + } +) + +// NewPagingIterator create a new paging iterator +func NewPagingIterator[V any]( + paginationFn PaginationFn[V], +) Iterator[V] { + iter := &PagingIteratorImpl[V]{ + paginationFn: paginationFn, + pageToken: nil, + pageErr: nil, + pageItems: nil, + nextPageItemIndex: 0, + } + iter.getNextPage() // this will initialize the paging iterator + return iter +} + +// NewPagingIteratorWithToken create a new paging iterator with initial token +func NewPagingIteratorWithToken[V any]( + paginationFn PaginationFn[V], + pageToken []byte, +) Iterator[V] { + iter := &PagingIteratorImpl[V]{ + paginationFn: paginationFn, + pageToken: pageToken, + pageErr: nil, + pageItems: nil, + nextPageItemIndex: 0, + } + iter.getNextPage() // this will initialize the paging iterator + return iter +} + +// HasNext return whether has next item or err +func (iter *PagingIteratorImpl[V]) HasNext() bool { + // pagination encounters error + if iter.pageErr != nil { + return true + } + + // still have local cached item to return + if iter.nextPageItemIndex < len(iter.pageItems) { + return true + } + + if len(iter.pageToken) != 0 { + iter.getNextPage() + return iter.HasNext() + } + + return false +} + +// Next return next item or err +func (iter *PagingIteratorImpl[V]) Next() (V, error) { + if !iter.HasNext() { + panic("HistoryEventIterator Next() called without checking HasNext()") + } + + if iter.pageErr != nil { + err := iter.pageErr + iter.pageErr = nil + var v V + return v, err + } + + // we have cached events + if iter.nextPageItemIndex < len(iter.pageItems) { + index := iter.nextPageItemIndex + iter.nextPageItemIndex++ + return iter.pageItems[index], nil + } + + panic("HistoryEventIterator Next() should return either a history event or a err") +} + +func (iter *PagingIteratorImpl[V]) getNextPage() { + items, token, err := iter.paginationFn(iter.pageToken) + if err == nil { + iter.pageItems = items + iter.pageToken = token + iter.pageErr = nil + } else { + iter.pageItems = nil + iter.pageToken = nil + iter.pageErr = err + } + iter.nextPageItemIndex = 0 +} diff -Nru temporal-1.21.5-1/src/common/collection/paging_iterator_test.go temporal-1.22.5/src/common/collection/paging_iterator_test.go --- temporal-1.21.5-1/src/common/collection/paging_iterator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/collection/paging_iterator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,162 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package collection + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/suite" +) + +type ( + pagingIteratorSuite struct { + suite.Suite + } +) + +func TestPagingIteratorSuite(t *testing.T) { + s := new(pagingIteratorSuite) + suite.Run(t, s) +} + +func (s *pagingIteratorSuite) SetupSuite() { +} + +func (s *pagingIteratorSuite) TearDownSuite() { + +} + +func (s *pagingIteratorSuite) SetupTest() { + +} + +func (s *pagingIteratorSuite) TearDownTest() { + +} + +func (s *pagingIteratorSuite) TestIteration_NoErr() { + phase := 0 + outputs := [][]int{ + {1, 2, 3, 4, 5}, + {}, + {6}, + {}, + } + tokens := [][]byte{ + []byte("some random token 1"), + []byte("some random token 2"), + []byte("some random token 3"), + []byte(nil), + } + pagingFn := func(token []byte) ([]int, []byte, error) { + switch phase { + case 0: + s.Equal(0, len(token)) + defer func() { phase++ }() + return outputs[phase], tokens[phase], nil + case 1: + s.Equal(tokens[0], token) + defer func() { phase++ }() + return outputs[phase], tokens[phase], nil + case 2: + s.Equal(tokens[1], token) + defer func() { phase++ }() + return outputs[phase], tokens[phase], nil + case 3: + s.Equal(tokens[2], token) + defer func() { phase++ }() + return outputs[phase], tokens[phase], nil + default: + panic("should not reach here during test") + } + } + + result := []int{} + ite := NewPagingIterator(pagingFn) + for ite.HasNext() { + num, err := ite.Next() + s.Nil(err) + result = append(result, num) + } + s.Equal([]int{1, 2, 3, 4, 5, 6}, result) +} + +func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { + phase := 0 + ite := NewPagingIterator(func(token []byte) ([]interface{}, []byte, error) { + switch phase { + case 0: + defer func() { phase++ }() + return nil, nil, errors.New("some random error") + default: + panic("should not reach here during test") + } + }) + + s.True(ite.HasNext()) + item, err := ite.Next() + s.Nil(item) + s.NotNil(err) + s.False(ite.HasNext()) +} + +func (s *pagingIteratorSuite) TestIteration_Err_NotBegining() { + + phase := 0 + outputs := [][]interface{}{ + {1, 2, 3, 4, 5}, + } + tokens := [][]byte{ + []byte("some random token 1"), + } + pagingFn := func(token []byte) ([]interface{}, []byte, error) { + switch phase { + case 0: + s.Equal(0, len(token)) + defer func() { phase++ }() + return outputs[phase], tokens[phase], nil + case 1: + s.Equal(tokens[0], token) + defer func() { phase++ }() + return nil, nil, errors.New("some random error") + default: + panic("should not reach here during test") + } + } + + result := []int{} + ite := NewPagingIterator(pagingFn) + for ite.HasNext() { + item, err := ite.Next() + if err != nil { + break + } + num, ok := item.(int) + s.True(ok) + result = append(result, num) + } + s.Equal([]int{1, 2, 3, 4, 5}, result) +} diff -Nru temporal-1.21.5-1/src/common/collection/priorityQueue.go temporal-1.22.5/src/common/collection/priorityQueue.go --- temporal-1.21.5-1/src/common/collection/priorityQueue.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/collection/priorityQueue.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection - -import ( - "container/heap" -) - -type ( - priorityQueueImpl[T any] struct { - compareLess func(this T, other T) bool - items []T - } -) - -// NewPriorityQueue create a new priority queue -func NewPriorityQueue[T any]( - compareLess func(this T, other T) bool, -) Queue[T] { - return &priorityQueueImpl[T]{ - compareLess: compareLess, - } -} - -// NewPriorityQueueWithItems creats a new priority queue -// with the provided list of items. -// PriorityQueue will take ownership of the passed in items, -// so caller should stop modifying it. -// The complexity is O(n) where n is the number of items -func NewPriorityQueueWithItems[T any]( - compareLess func(this T, other T) bool, - items []T, -) Queue[T] { - pq := &priorityQueueImpl[T]{ - compareLess: compareLess, - items: items, - } - heap.Init(pq) - return pq -} - -// Peek returns the top item of the priority queue -func (pq *priorityQueueImpl[T]) Peek() T { - if pq.IsEmpty() { - panic("Cannot peek item because priority queue is empty") - } - return pq.items[0] -} - -// Add push an item to priority queue -func (pq *priorityQueueImpl[T]) Add(item T) { - heap.Push(pq, item) -} - -// Remove pop an item from priority queue -func (pq *priorityQueueImpl[T]) Remove() T { - return heap.Pop(pq).(T) -} - -// IsEmpty indicate if the priority queue is empty -func (pq *priorityQueueImpl[T]) IsEmpty() bool { - return pq.Len() == 0 -} - -// below are the functions used by heap.Interface and go internal heap implementation - -// Len implements sort.Interface -func (pq *priorityQueueImpl[T]) Len() int { - return len(pq.items) -} - -// Less implements sort.Interface -func (pq *priorityQueueImpl[T]) Less(i, j int) bool { - return pq.compareLess(pq.items[i], pq.items[j]) -} - -// Swap implements sort.Interface -func (pq *priorityQueueImpl[T]) Swap(i, j int) { - pq.items[i], pq.items[j] = pq.items[j], pq.items[i] -} - -// Push push an item to priority queue, used by go internal heap implementation -func (pq *priorityQueueImpl[T]) Push(item interface{}) { - pq.items = append(pq.items, item.(T)) -} - -// Pop pop an item from priority queue, used by go internal heap implementation -func (pq *priorityQueueImpl[T]) Pop() interface{} { - pqItem := pq.items[pq.Len()-1] - pq.items = pq.items[0 : pq.Len()-1] - return pqItem -} diff -Nru temporal-1.21.5-1/src/common/collection/priorityQueue_test.go temporal-1.22.5/src/common/collection/priorityQueue_test.go --- temporal-1.21.5-1/src/common/collection/priorityQueue_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/collection/priorityQueue_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection - -import ( - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/suite" -) - -type ( - PriorityQueueSuite struct { - suite.Suite - pq Queue[*testPriorityQueueItem] - } - - testPriorityQueueItem struct { - value int - } -) - -func testPriorityQueueItemCompareLess(this *testPriorityQueueItem, that *testPriorityQueueItem) bool { - return this.value < that.value -} - -func TestPriorityQueueSuite(t *testing.T) { - suite.Run(t, new(PriorityQueueSuite)) -} - -func (s *PriorityQueueSuite) SetupTest() { - s.pq = NewPriorityQueue(testPriorityQueueItemCompareLess) -} - -func (s *PriorityQueueSuite) TestNewPriorityQueueWithItems() { - items := []*testPriorityQueueItem{ - {value: 10}, - {value: 3}, - {value: 5}, - {value: 4}, - {value: 1}, - {value: 16}, - {value: -10}, - } - s.pq = NewPriorityQueueWithItems( - testPriorityQueueItemCompareLess, - items, - ) - - expected := []int{-10, 1, 3, 4, 5, 10, 16} - result := []int{} - - for !s.pq.IsEmpty() { - result = append(result, s.pq.Remove().value) - } - s.Equal(expected, result) -} - -func (s *PriorityQueueSuite) TestInsertAndPop() { - s.pq.Add(&testPriorityQueueItem{10}) - s.pq.Add(&testPriorityQueueItem{3}) - s.pq.Add(&testPriorityQueueItem{5}) - s.pq.Add(&testPriorityQueueItem{4}) - s.pq.Add(&testPriorityQueueItem{1}) - s.pq.Add(&testPriorityQueueItem{16}) - s.pq.Add(&testPriorityQueueItem{-10}) - - expected := []int{-10, 1, 3, 4, 5, 10, 16} - result := []int{} - - for !s.pq.IsEmpty() { - result = append(result, s.pq.Remove().value) - } - s.Equal(expected, result) - - s.pq.Add(&testPriorityQueueItem{1000}) - s.pq.Add(&testPriorityQueueItem{1233}) - s.pq.Remove() // remove 1000 - s.pq.Add(&testPriorityQueueItem{4}) - s.pq.Add(&testPriorityQueueItem{18}) - s.pq.Add(&testPriorityQueueItem{192}) - s.pq.Add(&testPriorityQueueItem{255}) - s.pq.Remove() // remove 4 - s.pq.Remove() // remove 18 - s.pq.Add(&testPriorityQueueItem{59}) - s.pq.Add(&testPriorityQueueItem{727}) - - expected = []int{59, 192, 255, 727, 1233} - result = []int{} - - for !s.pq.IsEmpty() { - result = append(result, s.pq.Remove().value) - } - s.Equal(expected, result) -} - -func (s *PriorityQueueSuite) TestRandomNumber() { - for round := 0; round < 1000; round++ { - - expected := []int{} - result := []int{} - for i := 0; i < 1000; i++ { - num := rand.Int() - s.pq.Add(&testPriorityQueueItem{num}) - expected = append(expected, num) - } - sort.Ints(expected) - - for !s.pq.IsEmpty() { - result = append(result, s.pq.Remove().value) - } - s.Equal(expected, result) - } -} diff -Nru temporal-1.21.5-1/src/common/collection/priority_queue.go temporal-1.22.5/src/common/collection/priority_queue.go --- temporal-1.21.5-1/src/common/collection/priority_queue.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/collection/priority_queue.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,114 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package collection + +import ( + "container/heap" +) + +type ( + priorityQueueImpl[T any] struct { + compareLess func(this T, other T) bool + items []T + } +) + +// NewPriorityQueue create a new priority queue +func NewPriorityQueue[T any]( + compareLess func(this T, other T) bool, +) Queue[T] { + return &priorityQueueImpl[T]{ + compareLess: compareLess, + } +} + +// NewPriorityQueueWithItems creats a new priority queue +// with the provided list of items. +// PriorityQueue will take ownership of the passed in items, +// so caller should stop modifying it. +// The complexity is O(n) where n is the number of items +func NewPriorityQueueWithItems[T any]( + compareLess func(this T, other T) bool, + items []T, +) Queue[T] { + pq := &priorityQueueImpl[T]{ + compareLess: compareLess, + items: items, + } + heap.Init(pq) + return pq +} + +// Peek returns the top item of the priority queue +func (pq *priorityQueueImpl[T]) Peek() T { + if pq.IsEmpty() { + panic("Cannot peek item because priority queue is empty") + } + return pq.items[0] +} + +// Add push an item to priority queue +func (pq *priorityQueueImpl[T]) Add(item T) { + heap.Push(pq, item) +} + +// Remove pop an item from priority queue +func (pq *priorityQueueImpl[T]) Remove() T { + return heap.Pop(pq).(T) +} + +// IsEmpty indicate if the priority queue is empty +func (pq *priorityQueueImpl[T]) IsEmpty() bool { + return pq.Len() == 0 +} + +// below are the functions used by heap.Interface and go internal heap implementation + +// Len implements sort.Interface +func (pq *priorityQueueImpl[T]) Len() int { + return len(pq.items) +} + +// Less implements sort.Interface +func (pq *priorityQueueImpl[T]) Less(i, j int) bool { + return pq.compareLess(pq.items[i], pq.items[j]) +} + +// Swap implements sort.Interface +func (pq *priorityQueueImpl[T]) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] +} + +// Push push an item to priority queue, used by go internal heap implementation +func (pq *priorityQueueImpl[T]) Push(item interface{}) { + pq.items = append(pq.items, item.(T)) +} + +// Pop pop an item from priority queue, used by go internal heap implementation +func (pq *priorityQueueImpl[T]) Pop() interface{} { + pqItem := pq.items[pq.Len()-1] + pq.items = pq.items[0 : pq.Len()-1] + return pqItem +} diff -Nru temporal-1.21.5-1/src/common/collection/priority_queue_test.go temporal-1.22.5/src/common/collection/priority_queue_test.go --- temporal-1.21.5-1/src/common/collection/priority_queue_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/collection/priority_queue_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,137 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package collection + +import ( + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/suite" +) + +type ( + PriorityQueueSuite struct { + suite.Suite + pq Queue[*testPriorityQueueItem] + } + + testPriorityQueueItem struct { + value int + } +) + +func testPriorityQueueItemCompareLess(this *testPriorityQueueItem, that *testPriorityQueueItem) bool { + return this.value < that.value +} + +func TestPriorityQueueSuite(t *testing.T) { + suite.Run(t, new(PriorityQueueSuite)) +} + +func (s *PriorityQueueSuite) SetupTest() { + s.pq = NewPriorityQueue(testPriorityQueueItemCompareLess) +} + +func (s *PriorityQueueSuite) TestNewPriorityQueueWithItems() { + items := []*testPriorityQueueItem{ + {value: 10}, + {value: 3}, + {value: 5}, + {value: 4}, + {value: 1}, + {value: 16}, + {value: -10}, + } + s.pq = NewPriorityQueueWithItems( + testPriorityQueueItemCompareLess, + items, + ) + + expected := []int{-10, 1, 3, 4, 5, 10, 16} + result := []int{} + + for !s.pq.IsEmpty() { + result = append(result, s.pq.Remove().value) + } + s.Equal(expected, result) +} + +func (s *PriorityQueueSuite) TestInsertAndPop() { + s.pq.Add(&testPriorityQueueItem{10}) + s.pq.Add(&testPriorityQueueItem{3}) + s.pq.Add(&testPriorityQueueItem{5}) + s.pq.Add(&testPriorityQueueItem{4}) + s.pq.Add(&testPriorityQueueItem{1}) + s.pq.Add(&testPriorityQueueItem{16}) + s.pq.Add(&testPriorityQueueItem{-10}) + + expected := []int{-10, 1, 3, 4, 5, 10, 16} + result := []int{} + + for !s.pq.IsEmpty() { + result = append(result, s.pq.Remove().value) + } + s.Equal(expected, result) + + s.pq.Add(&testPriorityQueueItem{1000}) + s.pq.Add(&testPriorityQueueItem{1233}) + s.pq.Remove() // remove 1000 + s.pq.Add(&testPriorityQueueItem{4}) + s.pq.Add(&testPriorityQueueItem{18}) + s.pq.Add(&testPriorityQueueItem{192}) + s.pq.Add(&testPriorityQueueItem{255}) + s.pq.Remove() // remove 4 + s.pq.Remove() // remove 18 + s.pq.Add(&testPriorityQueueItem{59}) + s.pq.Add(&testPriorityQueueItem{727}) + + expected = []int{59, 192, 255, 727, 1233} + result = []int{} + + for !s.pq.IsEmpty() { + result = append(result, s.pq.Remove().value) + } + s.Equal(expected, result) +} + +func (s *PriorityQueueSuite) TestRandomNumber() { + for round := 0; round < 1000; round++ { + + expected := []int{} + result := []int{} + for i := 0; i < 1000; i++ { + num := rand.Int() + s.pq.Add(&testPriorityQueueItem{num}) + expected = append(expected, num) + } + sort.Ints(expected) + + for !s.pq.IsEmpty() { + result = append(result, s.pq.Remove().value) + } + s.Equal(expected, result) + } +} diff -Nru temporal-1.21.5-1/src/common/config/config.go temporal-1.22.5/src/common/config/config.go --- temporal-1.21.5-1/src/common/config/config.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/config/config.go 2024-02-23 09:45:43.000000000 +0000 @@ -85,7 +85,7 @@ // RPC contains the rpc config items RPC struct { - // GRPCPort is the port on which gRPC will listen + // GRPCPort is the port on which gRPC will listen GRPCPort int `yaml:"grpcPort"` // Port used for membership listener MembershipPort int `yaml:"membershipPort"` @@ -95,6 +95,12 @@ // check net.ParseIP for supported syntax, only IPv4 is supported, // mutually exclusive with `BindOnLocalHost` option BindOnIP string `yaml:"bindOnIP"` + // HTTPPort is the port on which HTTP will listen. If unset/0, HTTP will be + // disabled. This setting only applies to the frontend service. + HTTPPort int `yaml:"httpPort"` + // HTTPAdditionalForwardedHeaders adds additional headers to the default set + // forwarded from HTTP to gRPC. + HTTPAdditionalForwardedHeaders []string `yaml:"httpAdditionalForwardedHeaders"` } // Global contains config items that apply process-wide to all services diff -Nru temporal-1.21.5-1/src/common/constants.go temporal-1.22.5/src/common/constants.go --- temporal-1.21.5-1/src/common/constants.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/constants.go 2024-02-23 09:45:43.000000000 +0000 @@ -102,3 +102,8 @@ // DefaultQueueReaderID is the default readerID when loading history tasks DefaultQueueReaderID int64 = 0 ) + +const ( + // DefaultOperatorRPSRatio is the default percentage of rate limit that should be used for operator priority requests + DefaultOperatorRPSRatio float64 = 0.2 +) diff -Nru temporal-1.21.5-1/src/common/daemon.go temporal-1.22.5/src/common/daemon.go --- temporal-1.21.5-1/src/common/daemon.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/daemon.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,12 +34,3 @@ // DaemonStatusStopped coroutine pool stopped DaemonStatusStopped int32 = 2 ) - -type ( - // Daemon is the base interfaces implemented by - // background tasks within Temporal - Daemon interface { - Start() - Stop() - } -) diff -Nru temporal-1.21.5-1/src/common/deadlock/deadlock.go temporal-1.22.5/src/common/deadlock/deadlock.go --- temporal-1.21.5-1/src/common/deadlock/deadlock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/deadlock/deadlock.go 2024-02-23 09:45:43.000000000 +0000 @@ -154,9 +154,11 @@ // unbuffered channel). lc.ping(ctx, []common.Pingable{lc.root}) + timer := time.NewTimer(lc.dd.config.Interval()) select { - case <-time.After(lc.dd.config.Interval()): + case <-timer.C: case <-ctx.Done(): + timer.Stop() return ctx.Err() } } diff -Nru temporal-1.21.5-1/src/common/defaultRetrySettings.go temporal-1.22.5/src/common/defaultRetrySettings.go --- temporal-1.21.5-1/src/common/defaultRetrySettings.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/defaultRetrySettings.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package common - -import "time" - -// DefaultRetrySettings indicates what the "default" retry settings -// are if it is not specified on an Activity or for any unset fields -// if a policy is explicitly set on a workflow -type DefaultRetrySettings struct { - InitialInterval time.Duration - MaximumIntervalCoefficient float64 - BackoffCoefficient float64 - MaximumAttempts int32 -} diff -Nru temporal-1.21.5-1/src/common/default_retry_settings.go temporal-1.22.5/src/common/default_retry_settings.go --- temporal-1.21.5-1/src/common/default_retry_settings.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/default_retry_settings.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,37 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import "time" + +// DefaultRetrySettings indicates what the "default" retry settings +// are if it is not specified on an Activity or for any unset fields +// if a policy is explicitly set on a workflow +type DefaultRetrySettings struct { + InitialInterval time.Duration + MaximumIntervalCoefficient float64 + BackoffCoefficient float64 + MaximumAttempts int32 +} diff -Nru temporal-1.21.5-1/src/common/dynamicconfig/constants.go temporal-1.22.5/src/common/dynamicconfig/constants.go --- temporal-1.21.5-1/src/common/dynamicconfig/constants.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/dynamicconfig/constants.go 2024-02-23 09:45:43.000000000 +0000 @@ -119,6 +119,9 @@ // ShardPerNsRPSWarnPercent is the per-shard per-namespace RPS limit for warning as a percentage of ShardRPSWarnLimit // these warning are not emitted if the value is set to 0 or less ShardPerNsRPSWarnPercent = "system.shardPerNsRPSWarnPercent" + // OperatorRPSRatio is the percentage of the rate limit provided to priority rate limiters that should be used for + // operator API calls (highest priority). Should be >0.0 and <= 1.0 (defaults to 20% if not specified) + OperatorRPSRatio = "system.operatorRPSRatio" // Whether the deadlock detector should dump goroutines DeadlockDumpGoroutines = "system.deadlock.DumpGoroutines" @@ -237,8 +240,10 @@ FrontendVisibilityMaxPageSize = "frontend.visibilityMaxPageSize" // FrontendHistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page FrontendHistoryMaxPageSize = "frontend.historyMaxPageSize" - // FrontendRPS is workflow rate limit per second + // FrontendRPS is workflow rate limit per second per-instance FrontendRPS = "frontend.rps" + // FrontendGlobalRPS is workflow rate limit per second for the whole cluster + FrontendGlobalRPS = "frontend.globalRPS" // FrontendNamespaceReplicationInducingAPIsRPS limits the per second request rate for namespace replication inducing // APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). // This config is EXPERIMENTAL and may be changed or removed in a later release. @@ -247,8 +252,20 @@ FrontendMaxNamespaceRPSPerInstance = "frontend.namespaceRPS" // FrontendMaxNamespaceBurstPerInstance is workflow namespace burst limit FrontendMaxNamespaceBurstPerInstance = "frontend.namespaceBurst" - // FrontendMaxNamespaceCountPerInstance limits concurrent task queue polls per namespace per instance - FrontendMaxNamespaceCountPerInstance = "frontend.namespaceCount" + // FrontendMaxConcurrentLongRunningRequestsPerInstance limits concurrent long-running requests per-instance, + // per-API. Example requests include long-poll requests, and `Query` requests (which need to wait for WFTs). The + // limit is applied individually to each API method. This value is ignored if + // FrontendGlobalMaxConcurrentLongRunningRequests is greater than zero. Warning: setting this to zero will cause all + // long-running requests to fail. The name `frontend.namespaceCount` is kept for backwards compatibility with + // existing deployments even though it is a bit of a misnomer. This does not limit the number of namespaces; it is a + // per-_namespace_ limit on the _count_ of long-running requests. Requests are only throttled when the limit is + // exceeded, not when it is only reached. + FrontendMaxConcurrentLongRunningRequestsPerInstance = "frontend.namespaceCount" + // FrontendGlobalMaxConcurrentLongRunningRequests limits concurrent long-running requests across all frontend + // instances in the cluster, for a given namespace, per-API method. If this is set to 0 (the default), then it is + // ignored. The name `frontend.globalNamespaceCount` is kept for consistency with the per-instance limit name, + // `frontend.namespaceCount`. + FrontendGlobalMaxConcurrentLongRunningRequests = "frontend.globalNamespaceCount" // FrontendMaxNamespaceVisibilityRPSPerInstance is namespace rate limit per second for visibility APIs. // This config is EXPERIMENTAL and may be changed or removed in a later release. FrontendMaxNamespaceVisibilityRPSPerInstance = "frontend.namespaceRPS.visibility" @@ -481,15 +498,17 @@ // HistoryCacheNonUserContextLockTimeout controls how long non-user call (callerType != API or Operator) // will wait on workflow lock acquisition. Requires service restart to take effect. HistoryCacheNonUserContextLockTimeout = "history.cacheNonUserContextLockTimeout" + // EnableAPIGetCurrentRunIDLock controls if a lock should be acquired before getting current run ID for API requests + EnableAPIGetCurrentRunIDLock = "history.enableAPIGetCurrentRunIDLock" // HistoryStartupMembershipJoinDelay is the duration a history instance waits // before joining membership after starting. HistoryStartupMembershipJoinDelay = "history.startupMembershipJoinDelay" // HistoryShutdownDrainDuration is the duration of traffic drain during shutdown HistoryShutdownDrainDuration = "history.shutdownDrainDuration" - // EventsCacheInitialSize is initial size of events cache - EventsCacheInitialSize = "history.eventsCacheInitialSize" - // EventsCacheMaxSize is max size of events cache - EventsCacheMaxSize = "history.eventsCacheMaxSize" + // XDCCacheMaxSizeBytes is max size of events cache in bytes + XDCCacheMaxSizeBytes = "history.xdcCacheMaxSizeBytes" + // EventsCacheMaxSizeBytes is max size of events cache in bytes + EventsCacheMaxSizeBytes = "history.eventsCacheMaxSizeBytes" // EventsCacheTTL is TTL of events cache EventsCacheTTL = "history.eventsCacheTTL" // AcquireShardInterval is interval that timer used to acquire shard @@ -682,8 +701,6 @@ ArchivalProcessorArchiveDelay = "history.archivalProcessorArchiveDelay" // ArchivalBackendMaxRPS is the maximum rate of requests per second to the archival backend ArchivalBackendMaxRPS = "history.archivalBackendMaxRPS" - // DurableArchivalEnabled is the flag to enable durable archival - DurableArchivalEnabled = "history.durableArchivalEnabled" // WorkflowExecutionMaxInFlightUpdates is the max number of updates that can be in-flight (admitted but not yet completed) for any given workflow execution. WorkflowExecutionMaxInFlightUpdates = "history.maxInFlightUpdates" @@ -803,6 +820,9 @@ ReplicationBypassCorruptedData = "history.ReplicationBypassCorruptedData" // ReplicationEnableDLQMetrics is the flag to emit DLQ metrics ReplicationEnableDLQMetrics = "history.ReplicationEnableDLQMetrics" + // HistoryTaskDLQInteralErrors causes history task processing to send tasks failing with serviceerror.Internal to + // the dlq (or will drop them if not enabled) + HistoryTaskDropInternalErrors = "history.TaskDLQInternalErrors" // ReplicationStreamSyncStatusDuration sync replication status duration ReplicationStreamSyncStatusDuration = "history.ReplicationStreamSyncStatusDuration" diff -Nru temporal-1.21.5-1/src/common/dynamicconfig/shared_constants.go temporal-1.22.5/src/common/dynamicconfig/shared_constants.go --- temporal-1.21.5-1/src/common/dynamicconfig/shared_constants.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/dynamicconfig/shared_constants.go 2024-02-23 09:45:43.000000000 +0000 @@ -68,6 +68,12 @@ // dynamicRateLimitIncreaseStepSizeKey the amount the rate limit multiplier is increased when the system is healthy. should be between 0 and 1 dynamicRateLimitIncreaseStepSizeKey = "rateIncreaseStepSize" dynamicRateLimitIncreaseStepSizeDefault = 0.1 + // dynamicRateLimitMultiMinKey is the minimum the rate limit multiplier can be reduced to + dynamicRateLimitMultiMinKey = "rateMultiMin" + dynamicRateLimitMultiMinDefault = 0.8 + // dynamicRateLimitMultiMaxKey is the maximum the rate limit multiplier can be increased to + dynamicRateLimitMultiMaxKey = "rateMultiMax" + dynamicRateLimitMultiMaxDefault = 1.0 ) var DefaultDynamicRateLimitingParams = map[string]interface{}{ @@ -77,4 +83,6 @@ dynamicRateLimitErrorThresholdKey: dynamicRateLimitErrorThresholdDefault, dynamicRateLimitBackoffStepSizeKey: dynamicRateLimitBackoffStepSizeDefault, dynamicRateLimitIncreaseStepSizeKey: dynamicRateLimitIncreaseStepSizeDefault, + dynamicRateLimitMultiMinKey: dynamicRateLimitMultiMinDefault, + dynamicRateLimitMultiMaxKey: dynamicRateLimitMultiMaxDefault, } diff -Nru temporal-1.21.5-1/src/common/flusher/flusher.go temporal-1.22.5/src/common/flusher/flusher.go --- temporal-1.21.5-1/src/common/flusher/flusher.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/flusher/flusher.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -// The MIT License -// -// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package flusher - -import ( - "go.temporal.io/server/common" - "go.temporal.io/server/common/future" -) - -type ( - Writer[T any] interface { - Write(items []T) error - } - - Flusher[T any] interface { - common.Daemon - Buffer(item T) future.Future[struct{}] - Flush() - } - - FlushItem[T any] struct { - Item T - Future *future.FutureImpl[struct{}] - } -) diff -Nru temporal-1.21.5-1/src/common/flusher/flusher_impl.go temporal-1.22.5/src/common/flusher/flusher_impl.go --- temporal-1.21.5-1/src/common/flusher/flusher_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/flusher/flusher_impl.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,301 +0,0 @@ -// The MIT License -// -// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package flusher - -import ( - "sync" - "sync/atomic" - "time" - - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/channel" - "go.temporal.io/server/common/future" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" -) - -var ( - ErrFull = serviceerror.NewUnavailable("flush buffer is full") - ErrShutdown = serviceerror.NewUnavailable("flush buffer is shutdown") -) - -type ( - //Type Generic Flush Buffer that is size bound and time bound. - //The Flush Buffer will flush after a configurable amount of time as well as once the buffer reaches a configurable capacity. - //The number of flush buffers can also be configured. - //Starts with x free buffers, once a free buffer reaches capacity or if the timer is up, the free buffer will get switched to a full buffer. - //A full buffer will get flushed in the background and switched back to a free buffer. - //When a free buffer switches to a full buffer, another free buffer will take its place if there are any available at the moment. - flusherImpl[T any] struct { - status int32 - flushTimeout time.Duration - bufferCapacity int - numBuffer int - flushNotifierChan chan struct{} - logger log.Logger - shutdownChan channel.ShutdownOnce - writer Writer[T] - - sync.Mutex - flushTimer *time.Timer - flushBufferPointer *[]FlushItem[T] - - flushBuffer []FlushItem[T] - fullBufferChan chan []FlushItem[T] - freeBufferChan chan []FlushItem[T] - } -) - -func NewFlusher[T any]( - bufferCapacity int, - numBuffer int, - flushTimeout time.Duration, - writer Writer[T], - logger log.Logger, -) *flusherImpl[T] { - if bufferCapacity < 1 { - panic("bufferCapacity must be >= 1") - } else if numBuffer < 2 { - panic("numBuffer must be 2= 1") - } - - flushTimer := time.NewTimer(flushTimeout) - flushTimer.Stop() // Stop the timer after creation since we only want timer to start running upon first Item insertion - - freeBufferChan := make(chan []FlushItem[T], numBuffer) - fullBufferChan := make(chan []FlushItem[T], numBuffer) - for i := 0; i < numBuffer-1; i++ { // -1 since flushBuffer counts as the first free buffer - freeBufferChan <- make([]FlushItem[T], 0, bufferCapacity) - } - return &flusherImpl[T]{ - status: common.DaemonStatusInitialized, - flushTimeout: flushTimeout, // time waited after first Item insertion before flushing the buffer - numBuffer: numBuffer, // no of total flush buffers - bufferCapacity: bufferCapacity, // buffer size, will flush a buffer once no of items added to the buffer nears this limit - flushTimer: flushTimer, - flushNotifierChan: make(chan struct{}, 1), - writer: writer, - flushBuffer: make([]FlushItem[T], 0, bufferCapacity), - freeBufferChan: freeBufferChan, - fullBufferChan: fullBufferChan, - logger: logger, - shutdownChan: channel.NewShutdownOnce(), - } -} - -func (f *flusherImpl[T]) Start() { - if !atomic.CompareAndSwapInt32( - &f.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - - go f.timeEventLoop() - go f.flushEventLoop() -} - -func (f *flusherImpl[T]) Stop() { - if !atomic.CompareAndSwapInt32( - &f.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - - f.shutdownChan.Shutdown() - - f.Lock() - defer f.Unlock() - - f.cancel(f.flushBuffer) - f.flushBuffer = nil - f.stopTimerLocked() -FreeBufferLoop: - for { - select { - case <-f.freeBufferChan: - // noop - default: - break FreeBufferLoop - } - } -FullBufferLoop: - for { - select { - case buffer := <-f.fullBufferChan: - f.cancel(buffer) - default: - break FullBufferLoop - } - } -} - -func (f *flusherImpl[T]) flushEventLoop() { -Loop: - for { - select { - case fullBuffer := <-f.fullBufferChan: - f.flush(fullBuffer) - freeBuffer := fullBuffer[:0] - f.freeBufferChan <- freeBuffer - case <-f.shutdownChan.Channel(): - f.Stop() - break Loop - } - } -} - -func (f *flusherImpl[T]) timeEventLoop() { -Loop: - for { - select { - case <-f.flushTimer.C: - f.Lock() - if &f.flushBuffer == f.flushBufferPointer { - f.pushDirtyBufferLocked() - f.stopTimerLocked() - } - f.Unlock() - case <-f.shutdownChan.Channel(): - f.Stop() - break Loop - } - } -} - -func (f *flusherImpl[T]) pullCleanBufferLocked() []FlushItem[T] { - var newFreeBuffer []FlushItem[T] - select { - case freeBuffer := <-f.freeBufferChan: - newFreeBuffer = freeBuffer - default: - newFreeBuffer = nil // set to nil to indicate no available flush buffer - } - return newFreeBuffer -} - -func (f *flusherImpl[T]) pushDirtyBufferLocked() { - freeBuffer := f.pullCleanBufferLocked() - fullBuffer := f.flushBuffer - f.flushBuffer = freeBuffer - f.fullBufferChan <- fullBuffer -} - -func (f *flusherImpl[T]) Buffer(item T) future.Future[struct{}] { - if f.shutdownChan.IsShutdown() { - return future.NewReadyFuture[struct{}](struct{}{}, ErrShutdown) - } - - flushItem := FlushItem[T]{ - Item: item, - Future: future.NewFuture[struct{}](), - } - f.Lock() - defer f.Unlock() - - if f.shutdownChan.IsShutdown() { - return future.NewReadyFuture[struct{}](struct{}{}, ErrShutdown) - } - - if f.flushBuffer != nil { // nil check to make sure there is a usable flush buffer - f.appendLocked(flushItem) - } else { - newFlushBuffer := f.pullCleanBufferLocked() - if newFlushBuffer != nil { // nil check to make sure there is a usable flush buffer - f.flushBuffer = newFlushBuffer - f.appendLocked(flushItem) - } else { - flushItem.Future.Set(struct{}{}, ErrFull) - } - } - - return flushItem.Future -} - -func (f *flusherImpl[T]) Flush() { - if f.shutdownChan.IsShutdown() { - return - } - - f.Lock() - defer f.Unlock() - - if f.shutdownChan.IsShutdown() { - return - } - - if len(f.flushBuffer) == 0 { - // nothing to flush - return - } - f.stopTimerLocked() - f.pushDirtyBufferLocked() -} - -func (f *flusherImpl[T]) appendLocked(flushItem FlushItem[T]) { - if len(f.flushBuffer) == 0 { // start timer if it's first Item insertion - f.startTimerLocked() - } - f.flushBuffer = append(f.flushBuffer, flushItem) - if len(f.flushBuffer) >= f.bufferCapacity { - f.stopTimerLocked() - f.pushDirtyBufferLocked() - } -} - -func (f *flusherImpl[T]) flush(flushBuffer []FlushItem[T]) { - items := make([]T, len(flushBuffer)) - for i := 0; i < len(items); i++ { - items[i] = flushBuffer[i].Item - } - err := f.writer.Write(items) - if err != nil { - f.logger.Error("Flusher failed to write", tag.Error(err)) - } - for _, flushItem := range flushBuffer { - flushItem.Future.Set(struct{}{}, err) - } -} - -func (f *flusherImpl[T]) cancel(flushBuffer []FlushItem[T]) { - for _, flushItem := range flushBuffer { - flushItem.Future.Set(struct{}{}, ErrShutdown) - } -} - -func (f *flusherImpl[T]) startTimerLocked() { - f.flushTimer.Reset(f.flushTimeout) - f.flushBufferPointer = &f.flushBuffer -} - -func (f *flusherImpl[T]) stopTimerLocked() { - f.flushTimer.Stop() - f.flushBufferPointer = nil -} diff -Nru temporal-1.21.5-1/src/common/flusher/flusher_test.go temporal-1.22.5/src/common/flusher/flusher_test.go --- temporal-1.21.5-1/src/common/flusher/flusher_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/flusher/flusher_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,367 +0,0 @@ -// The MIT License -// -// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package flusher - -import ( - "context" - "math/rand" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "go.temporal.io/server/common/log" -) - -type ( - flusherSuite struct { - *require.Assertions - suite.Suite - - ctx context.Context - } - - fakeTask struct { - id int64 - } - fakeWriter struct { - sync.Mutex - tasks []*fakeTask - } -) - -func TestFlusher(t *testing.T) { - fs := new(flusherSuite) - suite.Run(t, fs) -} - -func (s *flusherSuite) SetupSuite() { - rand.Seed(time.Now().UnixNano()) - - s.Assertions = require.New(s.T()) -} - -func (s *flusherSuite) TearDownSuite() { - -} - -func (s *flusherSuite) SetupTest() { - s.ctx = context.Background() -} - -func (s *flusherSuite) TearDownTest() { - -} - -func (s *flusherSuite) TestBuffer_Buffer() { - bufferCapacity := 2 - numBuffer := 2 - flushTimeout := time.Minute - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - - task := newFakeTask() - fut := flushBuffer.Buffer(task) - s.False(fut.Ready()) - - flushBuffer.Lock() - defer flushBuffer.Unlock() - s.Equal(1, len(flushBuffer.flushBuffer)) - s.Equal(task, flushBuffer.flushBuffer[0].Item) - s.Equal(&flushBuffer.flushBuffer, flushBuffer.flushBufferPointer) - s.Equal(1, len(flushBuffer.freeBufferChan)) - s.Equal(0, len(flushBuffer.fullBufferChan)) - - s.Equal([]*fakeTask{}, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Switch() { - bufferCapacity := 2 - numBuffer := 2 - flushTimeout := time.Minute - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - - task0 := newFakeTask() - task1 := newFakeTask() - task2 := newFakeTask() - fut0 := flushBuffer.Buffer(task0) - fut1 := flushBuffer.Buffer(task1) - fut2 := flushBuffer.Buffer(task2) - s.False(fut0.Ready()) - s.False(fut1.Ready()) - s.False(fut2.Ready()) - - flushBuffer.Lock() - defer flushBuffer.Unlock() - s.Equal(1, len(flushBuffer.flushBuffer)) - s.Equal(task2, flushBuffer.flushBuffer[0].Item) - s.Equal(&flushBuffer.flushBuffer, flushBuffer.flushBufferPointer) - s.Equal(0, len(flushBuffer.freeBufferChan)) - s.Equal(1, len(flushBuffer.fullBufferChan)) - buffer := <-flushBuffer.fullBufferChan - s.Equal(task0, buffer[0].Item) - s.Equal(task1, buffer[1].Item) - s.Equal([]*fakeTask{}, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Timer() { - bufferCapacity := 2 - numBuffer := 2 - flushTimeout := time.Millisecond - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - flushBuffer.Start() - defer flushBuffer.Stop() - - task := newFakeTask() - fut := flushBuffer.Buffer(task) - _, err := fut.Get(s.ctx) - s.NoError(err) - - flushBuffer.Lock() - s.Equal(0, len(flushBuffer.flushBuffer)) - s.Nil(flushBuffer.flushBufferPointer) - s.Equal(1, len(flushBuffer.freeBufferChan)) - s.Equal(0, len(flushBuffer.fullBufferChan)) - flushBuffer.Unlock() - - s.Equal([]*fakeTask{task}, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Flush() { - bufferCapacity := 2 - numBuffer := 2 - flushTimeout := time.Minute - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - flushBuffer.Start() - defer flushBuffer.Stop() - - task := newFakeTask() - fut := flushBuffer.Buffer(task) - flushBuffer.Flush() - _, err := fut.Get(s.ctx) - s.NoError(err) - - flushBuffer.Lock() - s.Equal(0, len(flushBuffer.flushBuffer)) - s.Nil(flushBuffer.flushBufferPointer) - s.Equal(1, len(flushBuffer.freeBufferChan)) - s.Equal(0, len(flushBuffer.fullBufferChan)) - flushBuffer.Unlock() - - s.Equal([]*fakeTask{task}, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Full() { - bufferCapacity := 1 - numBuffer := 2 - flushTimeout := time.Minute - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - - task0 := newFakeTask() - task1 := newFakeTask() - task2 := newFakeTask() - fut0 := flushBuffer.Buffer(task0) - fut1 := flushBuffer.Buffer(task1) - fut2 := flushBuffer.Buffer(task2) - s.False(fut0.Ready()) - s.False(fut1.Ready()) - _, err := fut2.Get(s.ctx) - s.Equal(ErrFull, err) - - flushBuffer.Lock() - defer flushBuffer.Unlock() - s.Equal(0, len(flushBuffer.flushBuffer)) - s.Nil(flushBuffer.flushBufferPointer) - s.Equal(0, len(flushBuffer.freeBufferChan)) - s.Equal(2, len(flushBuffer.fullBufferChan)) - buffer := <-flushBuffer.fullBufferChan - s.Equal(task0, buffer[0].Item) - buffer = <-flushBuffer.fullBufferChan - s.Equal(task1, buffer[0].Item) - s.Equal([]*fakeTask{}, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Shutdown() { - bufferCapacity := 2 - numBuffer := 2 - flushTimeout := time.Millisecond - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - flushBuffer.Start() - - task0 := newFakeTask() - task1 := newFakeTask() - task2 := newFakeTask() - fut0 := flushBuffer.Buffer(task0) - fut1 := flushBuffer.Buffer(task1) - fut2 := flushBuffer.Buffer(task2) - - flushedTasks := []*fakeTask{} - flushBuffer.Stop() - _, err := fut0.Get(s.ctx) - if err == nil { - flushedTasks = append(flushedTasks, task0) - } - _, err = fut1.Get(s.ctx) - if err == nil { - flushedTasks = append(flushedTasks, task1) - } - _, err = fut2.Get(s.ctx) - s.Equal(ErrShutdown, err) - - flushBuffer.Lock() - s.Nil(flushBuffer.flushBuffer) - s.Nil(flushBuffer.flushBufferPointer) - // should not test the len of flushBuffer.freeBufferChan since this buffer is managed async-ly - s.Equal(0, len(flushBuffer.fullBufferChan)) - flushBuffer.Unlock() - - s.Equal(flushedTasks, writer.Get()) -} - -func (s *flusherSuite) TestBuffer_Concurrent() { - bufferCapacity := 128 - numBuffer := 2 - flushTimeout := 4 * time.Millisecond - writer := &fakeWriter{} - flushBuffer := NewFlusher[*fakeTask]( - bufferCapacity, - numBuffer, - flushTimeout, - writer, - log.NewTestLogger(), - ) - flushBuffer.Start() - defer flushBuffer.Stop() - - numTaskProducer := bufferCapacity * 2 - numTaskPerProducer := 64 - - startWaitGroup := sync.WaitGroup{} - endWaitGroup := sync.WaitGroup{} - - startWaitGroup.Add(numTaskProducer) - endWaitGroup.Add(numTaskProducer) - for i := 0; i < numTaskProducer; i++ { - go func() { - startWaitGroup.Wait() - defer endWaitGroup.Done() - - for i := 0; i < numTaskPerProducer; i++ { - task := newFakeTask() - for { - fut := flushBuffer.Buffer(task) - _, err := fut.Get(s.ctx) - if err != nil { - time.Sleep(time.Millisecond) - } else { - break - } - } - } - }() - startWaitGroup.Done() - } - endWaitGroup.Wait() - - flushBuffer.Lock() - s.Equal(0, len(flushBuffer.flushBuffer)) - s.Nil(flushBuffer.flushBufferPointer) - s.Equal(1, len(flushBuffer.freeBufferChan)) - s.Equal(0, len(flushBuffer.fullBufferChan)) - flushBuffer.Unlock() - - s.Equal(numTaskProducer*numTaskPerProducer, len(writer.Get())) -} - -func (w *fakeWriter) Write( - tasks []*fakeTask, -) error { - w.Lock() - defer w.Unlock() - w.tasks = append(w.tasks, tasks...) - return nil -} - -func (w *fakeWriter) Get() []*fakeTask { - w.Lock() - defer w.Unlock() - tasks := w.tasks - w.tasks = nil - - if tasks != nil { - return tasks - } else { - return []*fakeTask{} - } -} - -func newFakeTask() *fakeTask { - return &fakeTask{ - id: rand.Int63(), - } -} diff -Nru temporal-1.21.5-1/src/common/future/future_test.go temporal-1.22.5/src/common/future/future_test.go --- temporal-1.21.5-1/src/common/future/future_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/future/future_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,6 +26,7 @@ import ( "context" + "runtime" "sync" "testing" @@ -179,6 +180,7 @@ startWG.Wait() for !s.future.Ready() { + runtime.Gosched() } value, err := s.future.Get(ctx) diff -Nru temporal-1.21.5-1/src/common/headers/caller_info.go temporal-1.22.5/src/common/headers/caller_info.go --- temporal-1.21.5-1/src/common/headers/caller_info.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/headers/caller_info.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,6 +31,7 @@ ) const ( + CallerTypeOperator = "operator" CallerTypeAPI = "api" CallerTypeBackground = "background" CallerTypePreemptable = "preemptable" @@ -113,7 +114,7 @@ ) context.Context { return setIncomingMD(ctx, map[string]string{ callerNameHeaderName: info.CallerName, - callerTypeHeaderName: info.CallerType, + CallerTypeHeaderName: info.CallerType, callOriginHeaderName: info.CallOrigin, }) } @@ -133,7 +134,7 @@ ctx context.Context, callerType string, ) context.Context { - return setIncomingMD(ctx, map[string]string{callerTypeHeaderName: callerType}) + return setIncomingMD(ctx, map[string]string{CallerTypeHeaderName: callerType}) } // SetOrigin set call origin in the context. @@ -168,7 +169,7 @@ func GetCallerInfo( ctx context.Context, ) CallerInfo { - values := GetValues(ctx, callerNameHeaderName, callerTypeHeaderName, callOriginHeaderName) + values := GetValues(ctx, callerNameHeaderName, CallerTypeHeaderName, callOriginHeaderName) return CallerInfo{ CallerName: values[0], CallerType: values[1], diff -Nru temporal-1.21.5-1/src/common/headers/caller_info_test.go temporal-1.22.5/src/common/headers/caller_info_test.go --- temporal-1.21.5-1/src/common/headers/caller_info_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/headers/caller_info_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -127,7 +127,7 @@ s.True(ok) s.Equal(existingValue, md.Get(existingKey)[0]) s.Equal(callerName, md.Get(callerNameHeaderName)[0]) - s.Equal(callerType, md.Get(callerTypeHeaderName)[0]) + s.Equal(callerType, md.Get(CallerTypeHeaderName)[0]) s.Equal(callOrigin, md.Get(callOriginHeaderName)[0]) s.Len(md, 4) } @@ -146,7 +146,7 @@ md, ok := metadata.FromIncomingContext(ctx) s.True(ok) s.Equal(callerName, md.Get(callerNameHeaderName)[0]) - s.Equal(callerType, md.Get(callerTypeHeaderName)[0]) + s.Equal(callerType, md.Get(CallerTypeHeaderName)[0]) s.Equal(callOrigin, md.Get(callOriginHeaderName)[0]) s.Len(md, 3) } @@ -169,7 +169,7 @@ md, ok := metadata.FromIncomingContext(ctx) s.True(ok) s.Equal(callerName, md.Get(callerNameHeaderName)[0]) - s.Equal(callerType, md.Get(callerTypeHeaderName)[0]) + s.Equal(callerType, md.Get(CallerTypeHeaderName)[0]) s.Equal(callOrigin, md.Get(callOriginHeaderName)[0]) s.Len(md, 3) } @@ -187,7 +187,7 @@ md, ok := metadata.FromIncomingContext(ctx) s.True(ok) s.Equal(callerName, md.Get(callerNameHeaderName)[0]) - s.Equal(callerType, md.Get(callerTypeHeaderName)[0]) + s.Equal(callerType, md.Get(CallerTypeHeaderName)[0]) s.Empty(md.Get(callOriginHeaderName)) s.Len(md, 2) } diff -Nru temporal-1.21.5-1/src/common/headers/headers.go temporal-1.22.5/src/common/headers/headers.go --- temporal-1.21.5-1/src/common/headers/headers.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/headers/headers.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,7 +38,7 @@ SupportedFeaturesHeaderDelim = "," callerNameHeaderName = "caller-name" - callerTypeHeaderName = "caller-type" + CallerTypeHeaderName = "caller-type" callOriginHeaderName = "call-initiation" ) @@ -50,7 +50,7 @@ SupportedServerVersionsHeaderName, SupportedFeaturesHeaderName, callerNameHeaderName, - callerTypeHeaderName, + CallerTypeHeaderName, callOriginHeaderName, } ) diff -Nru temporal-1.21.5-1/src/common/headers/versionChecker.go temporal-1.22.5/src/common/headers/versionChecker.go --- temporal-1.21.5-1/src/common/headers/versionChecker.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/headers/versionChecker.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,186 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package headers - -import ( - "context" - "fmt" - "strings" - - "github.com/blang/semver/v4" - "golang.org/x/exp/slices" - "google.golang.org/grpc/metadata" - - "go.temporal.io/api/serviceerror" -) - -const ( - ClientNameServer = "temporal-server" - ClientNameGoSDK = "temporal-go" - ClientNameJavaSDK = "temporal-java" - ClientNamePHPSDK = "temporal-php" - ClientNameTypeScriptSDK = "temporal-typescript" - ClientNamePythonSDK = "temporal-python" - ClientNameCLI = "temporal-cli" - ClientNameUI = "temporal-ui" - - ServerVersion = "1.21.5" - - // SupportedServerVersions is used by CLI and inter role communication. - SupportedServerVersions = ">=1.0.0 <2.0.0" - - // FeatureFollowsNextRunID means that the client supports following next execution run id for - // completed/failed/timedout completion events when getting the final result of a workflow. - FeatureFollowsNextRunID = "follows-next-run-id" -) - -var ( - // AllFeatures contains all known features. This list is used as the value of the supported - // features header for internal server requests. There is an assumption that if a feature is - // defined, then the server itself supports it. - AllFeatures = strings.Join([]string{ - FeatureFollowsNextRunID, - }, SupportedFeaturesHeaderDelim) - - SupportedClients = map[string]string{ - ClientNameGoSDK: "<2.0.0", - ClientNameJavaSDK: "<2.0.0", - ClientNamePHPSDK: "<2.0.0", - ClientNameTypeScriptSDK: "<2.0.0", - ClientNameCLI: "<2.0.0", - ClientNameServer: "<2.0.0", - ClientNameUI: "<3.0.0", - } - - internalVersionHeaderPairs = []string{ - ClientNameHeaderName, ClientNameServer, - ClientVersionHeaderName, ServerVersion, - SupportedServerVersionsHeaderName, SupportedServerVersions, - SupportedFeaturesHeaderName, AllFeatures, - } -) - -type ( - // VersionChecker is used to check client/server compatibility and client's capabilities - VersionChecker interface { - ClientSupported(ctx context.Context) error - ClientSupportsFeature(ctx context.Context, feature string) bool - } - - versionChecker struct { - supportedClients map[string]string - supportedClientsRange map[string]semver.Range - serverVersion semver.Version - } -) - -// NewDefaultVersionChecker constructs a new VersionChecker using default versions from const. -func NewDefaultVersionChecker() *versionChecker { - return NewVersionChecker(SupportedClients, ServerVersion) -} - -// NewVersionChecker constructs a new VersionChecker -func NewVersionChecker(supportedClients map[string]string, serverVersion string) *versionChecker { - return &versionChecker{ - serverVersion: semver.MustParse(serverVersion), - supportedClients: supportedClients, - supportedClientsRange: mustParseRanges(supportedClients), - } -} - -// GetClientNameAndVersion extracts SDK name and version from context headers -func GetClientNameAndVersion(ctx context.Context) (string, string) { - headers := GetValues(ctx, ClientNameHeaderName, ClientVersionHeaderName) - clientName := headers[0] - clientVersion := headers[1] - return clientName, clientVersion -} - -// SetVersions sets headers for internal communications. -func SetVersions(ctx context.Context) context.Context { - return metadata.AppendToOutgoingContext(ctx, internalVersionHeaderPairs...) -} - -// SetVersionsForTests sets headers as they would be received from the client. -// Must be used in tests only. -func SetVersionsForTests(ctx context.Context, clientVersion, clientName, supportedServerVersions, supportedFeatures string) context.Context { - return metadata.NewIncomingContext(ctx, metadata.New(map[string]string{ - ClientNameHeaderName: clientName, - ClientVersionHeaderName: clientVersion, - SupportedServerVersionsHeaderName: supportedServerVersions, - SupportedFeaturesHeaderName: supportedFeatures, - })) -} - -// ClientSupported returns an error if client is unsupported, nil otherwise. -func (vc *versionChecker) ClientSupported(ctx context.Context) error { - - headers := GetValues(ctx, ClientNameHeaderName, ClientVersionHeaderName, SupportedServerVersionsHeaderName) - clientName := headers[0] - clientVersion := headers[1] - supportedServerVersions := headers[2] - - // Validate client version only if it is provided and server knows about this client. - if clientName != "" && clientVersion != "" { - if supportedClientRange, ok := vc.supportedClientsRange[clientName]; ok { - clientVersionParsed, parseErr := semver.Parse(clientVersion) - if parseErr != nil { - return serviceerror.NewInvalidArgument(fmt.Sprintf("Unable to parse client version: %v", parseErr)) - } - if !supportedClientRange(clientVersionParsed) { - return serviceerror.NewClientVersionNotSupported(clientVersion, clientName, vc.supportedClients[clientName]) - } - } - } - - // Validate supported server version if it is provided. - if supportedServerVersions != "" { - supportedServerVersionsParsed, parseErr := semver.ParseRange(supportedServerVersions) - if parseErr != nil { - return serviceerror.NewInvalidArgument(fmt.Sprintf("Unable to parse supported server versions: %v", parseErr)) - } - if !supportedServerVersionsParsed(vc.serverVersion) { - return serviceerror.NewServerVersionNotSupported(vc.serverVersion.String(), supportedServerVersions) - } - } - - return nil -} - -// ClientSupportsFeature returns true if the client reports support for the -// given feature (which should be one of the Feature... constants above). -func (vc *versionChecker) ClientSupportsFeature(ctx context.Context, feature string) bool { - headers := GetValues(ctx, SupportedFeaturesHeaderName) - clientFeatures := strings.Split(headers[0], SupportedFeaturesHeaderDelim) - return slices.Contains(clientFeatures, feature) -} - -func mustParseRanges(ranges map[string]string) map[string]semver.Range { - out := make(map[string]semver.Range, len(ranges)) - for c, r := range ranges { - out[c] = semver.MustParseRange(r) - } - return out -} diff -Nru temporal-1.21.5-1/src/common/headers/versionChecker_test.go temporal-1.22.5/src/common/headers/versionChecker_test.go --- temporal-1.21.5-1/src/common/headers/versionChecker_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/headers/versionChecker_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package headers - -import ( - "context" - "strings" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.temporal.io/api/serviceerror" -) - -type ( - VersionCheckerSuite struct { - *require.Assertions - suite.Suite - } -) - -func TestVersionCheckerSuite(t *testing.T) { - suite.Run(t, new(VersionCheckerSuite)) -} - -func (s *VersionCheckerSuite) SetupTest() { - s.Assertions = require.New(s.T()) -} - -func (s *VersionCheckerSuite) TestClientSupported() { - serverVersion := "22.8.78" - myFeature := "my-new-feature-flag" - - testCases := []struct { - callContext context.Context - expectErr bool - supportsMyFeature bool - }{ - { - callContext: context.Background(), - expectErr: false, - }, - { - callContext: s.constructCallContext("", "unknown-client", "", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("0.0.0", "", "", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("0.0.0", "unknown-client", "", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("malformed-version", ClientNameGoSDK, "", ""), - expectErr: true, - }, - { - callContext: s.constructCallContext("3.0.1", ClientNameGoSDK, "", ""), - expectErr: true, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "<23.1.0", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, ">23.1.0", ""), - expectErr: true, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "<1.0.0 >=3.5.6 || >22.0.0", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("", ClientNameGoSDK, "<1.0.0 >=3.5.6 || >22.0.0", ""), - expectErr: false, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", myFeature), - supportsMyFeature: true, - }, - { - callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", - strings.Join([]string{"another-feature", myFeature, "third-feature"}, SupportedFeaturesHeaderDelim)), - supportsMyFeature: true, - }, - } - - versionChecker := NewVersionChecker(map[string]string{ - ClientNameGoSDK: "<3.0.0", - }, serverVersion) - - for caseIndex, tc := range testCases { - err := versionChecker.ClientSupported(tc.callContext) - if tc.expectErr { - s.Errorf(err, "Case #%d", caseIndex) - switch err.(type) { - case *serviceerror.InvalidArgument, *serviceerror.ClientVersionNotSupported, *serviceerror.ServerVersionNotSupported: - default: - s.Fail("error has wrong type: %T", err) - } - } else { - s.NoErrorf(err, "Case #%d", caseIndex) - } - - if tc.callContext != nil { - s.Equal(tc.supportsMyFeature, versionChecker.ClientSupportsFeature(tc.callContext, myFeature)) - } - } -} - -func (s *VersionCheckerSuite) constructCallContext(clientVersion, clientName, supportedServerVersions, supportedFeatures string) context.Context { - return SetVersionsForTests(context.Background(), clientVersion, clientName, supportedServerVersions, supportedFeatures) -} diff -Nru temporal-1.21.5-1/src/common/headers/version_checker.go temporal-1.22.5/src/common/headers/version_checker.go --- temporal-1.21.5-1/src/common/headers/version_checker.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/headers/version_checker.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,187 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package headers + +import ( + "context" + "fmt" + "strings" + + "github.com/blang/semver/v4" + "golang.org/x/exp/slices" + "google.golang.org/grpc/metadata" + + "go.temporal.io/api/serviceerror" +) + +const ( + ClientNameServer = "temporal-server" + ClientNameServerHTTP = "temporal-server-http" + ClientNameGoSDK = "temporal-go" + ClientNameJavaSDK = "temporal-java" + ClientNamePHPSDK = "temporal-php" + ClientNameTypeScriptSDK = "temporal-typescript" + ClientNamePythonSDK = "temporal-python" + ClientNameCLI = "temporal-cli" + ClientNameUI = "temporal-ui" + + ServerVersion = "1.22.5" + + // SupportedServerVersions is used by CLI and inter role communication. + SupportedServerVersions = ">=1.0.0 <2.0.0" + + // FeatureFollowsNextRunID means that the client supports following next execution run id for + // completed/failed/timedout completion events when getting the final result of a workflow. + FeatureFollowsNextRunID = "follows-next-run-id" +) + +var ( + // AllFeatures contains all known features. This list is used as the value of the supported + // features header for internal server requests. There is an assumption that if a feature is + // defined, then the server itself supports it. + AllFeatures = strings.Join([]string{ + FeatureFollowsNextRunID, + }, SupportedFeaturesHeaderDelim) + + SupportedClients = map[string]string{ + ClientNameGoSDK: "<2.0.0", + ClientNameJavaSDK: "<2.0.0", + ClientNamePHPSDK: "<2.0.0", + ClientNameTypeScriptSDK: "<2.0.0", + ClientNameCLI: "<2.0.0", + ClientNameServer: "<2.0.0", + ClientNameUI: "<3.0.0", + } + + internalVersionHeaderPairs = []string{ + ClientNameHeaderName, ClientNameServer, + ClientVersionHeaderName, ServerVersion, + SupportedServerVersionsHeaderName, SupportedServerVersions, + SupportedFeaturesHeaderName, AllFeatures, + } +) + +type ( + // VersionChecker is used to check client/server compatibility and client's capabilities + VersionChecker interface { + ClientSupported(ctx context.Context) error + ClientSupportsFeature(ctx context.Context, feature string) bool + } + + versionChecker struct { + supportedClients map[string]string + supportedClientsRange map[string]semver.Range + serverVersion semver.Version + } +) + +// NewDefaultVersionChecker constructs a new VersionChecker using default versions from const. +func NewDefaultVersionChecker() *versionChecker { + return NewVersionChecker(SupportedClients, ServerVersion) +} + +// NewVersionChecker constructs a new VersionChecker +func NewVersionChecker(supportedClients map[string]string, serverVersion string) *versionChecker { + return &versionChecker{ + serverVersion: semver.MustParse(serverVersion), + supportedClients: supportedClients, + supportedClientsRange: mustParseRanges(supportedClients), + } +} + +// GetClientNameAndVersion extracts SDK name and version from context headers +func GetClientNameAndVersion(ctx context.Context) (string, string) { + headers := GetValues(ctx, ClientNameHeaderName, ClientVersionHeaderName) + clientName := headers[0] + clientVersion := headers[1] + return clientName, clientVersion +} + +// SetVersions sets headers for internal communications. +func SetVersions(ctx context.Context) context.Context { + return metadata.AppendToOutgoingContext(ctx, internalVersionHeaderPairs...) +} + +// SetVersionsForTests sets headers as they would be received from the client. +// Must be used in tests only. +func SetVersionsForTests(ctx context.Context, clientVersion, clientName, supportedServerVersions, supportedFeatures string) context.Context { + return metadata.NewIncomingContext(ctx, metadata.New(map[string]string{ + ClientNameHeaderName: clientName, + ClientVersionHeaderName: clientVersion, + SupportedServerVersionsHeaderName: supportedServerVersions, + SupportedFeaturesHeaderName: supportedFeatures, + })) +} + +// ClientSupported returns an error if client is unsupported, nil otherwise. +func (vc *versionChecker) ClientSupported(ctx context.Context) error { + + headers := GetValues(ctx, ClientNameHeaderName, ClientVersionHeaderName, SupportedServerVersionsHeaderName) + clientName := headers[0] + clientVersion := headers[1] + supportedServerVersions := headers[2] + + // Validate client version only if it is provided and server knows about this client. + if clientName != "" && clientVersion != "" { + if supportedClientRange, ok := vc.supportedClientsRange[clientName]; ok { + clientVersionParsed, parseErr := semver.Parse(clientVersion) + if parseErr != nil { + return serviceerror.NewInvalidArgument(fmt.Sprintf("Unable to parse client version: %v", parseErr)) + } + if !supportedClientRange(clientVersionParsed) { + return serviceerror.NewClientVersionNotSupported(clientVersion, clientName, vc.supportedClients[clientName]) + } + } + } + + // Validate supported server version if it is provided. + if supportedServerVersions != "" { + supportedServerVersionsParsed, parseErr := semver.ParseRange(supportedServerVersions) + if parseErr != nil { + return serviceerror.NewInvalidArgument(fmt.Sprintf("Unable to parse supported server versions: %v", parseErr)) + } + if !supportedServerVersionsParsed(vc.serverVersion) { + return serviceerror.NewServerVersionNotSupported(vc.serverVersion.String(), supportedServerVersions) + } + } + + return nil +} + +// ClientSupportsFeature returns true if the client reports support for the +// given feature (which should be one of the Feature... constants above). +func (vc *versionChecker) ClientSupportsFeature(ctx context.Context, feature string) bool { + headers := GetValues(ctx, SupportedFeaturesHeaderName) + clientFeatures := strings.Split(headers[0], SupportedFeaturesHeaderDelim) + return slices.Contains(clientFeatures, feature) +} + +func mustParseRanges(ranges map[string]string) map[string]semver.Range { + out := make(map[string]semver.Range, len(ranges)) + for c, r := range ranges { + out[c] = semver.MustParseRange(r) + } + return out +} diff -Nru temporal-1.21.5-1/src/common/headers/version_checker_test.go temporal-1.22.5/src/common/headers/version_checker_test.go --- temporal-1.21.5-1/src/common/headers/version_checker_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/headers/version_checker_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,141 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package headers + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/api/serviceerror" +) + +type ( + VersionCheckerSuite struct { + *require.Assertions + suite.Suite + } +) + +func TestVersionCheckerSuite(t *testing.T) { + suite.Run(t, new(VersionCheckerSuite)) +} + +func (s *VersionCheckerSuite) SetupTest() { + s.Assertions = require.New(s.T()) +} + +func (s *VersionCheckerSuite) TestClientSupported() { + serverVersion := "22.8.78" + myFeature := "my-new-feature-flag" + + testCases := []struct { + callContext context.Context + expectErr bool + supportsMyFeature bool + }{ + { + callContext: context.Background(), + expectErr: false, + }, + { + callContext: s.constructCallContext("", "unknown-client", "", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("0.0.0", "", "", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("0.0.0", "unknown-client", "", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("malformed-version", ClientNameGoSDK, "", ""), + expectErr: true, + }, + { + callContext: s.constructCallContext("3.0.1", ClientNameGoSDK, "", ""), + expectErr: true, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "<23.1.0", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, ">23.1.0", ""), + expectErr: true, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "<1.0.0 >=3.5.6 || >22.0.0", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("", ClientNameGoSDK, "<1.0.0 >=3.5.6 || >22.0.0", ""), + expectErr: false, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", myFeature), + supportsMyFeature: true, + }, + { + callContext: s.constructCallContext("2.4.5", ClientNameGoSDK, "", + strings.Join([]string{"another-feature", myFeature, "third-feature"}, SupportedFeaturesHeaderDelim)), + supportsMyFeature: true, + }, + } + + versionChecker := NewVersionChecker(map[string]string{ + ClientNameGoSDK: "<3.0.0", + }, serverVersion) + + for caseIndex, tc := range testCases { + err := versionChecker.ClientSupported(tc.callContext) + if tc.expectErr { + s.Errorf(err, "Case #%d", caseIndex) + switch err.(type) { + case *serviceerror.InvalidArgument, *serviceerror.ClientVersionNotSupported, *serviceerror.ServerVersionNotSupported: + default: + s.Fail("error has wrong type: %T", err) + } + } else { + s.NoErrorf(err, "Case #%d", caseIndex) + } + + if tc.callContext != nil { + s.Equal(tc.supportsMyFeature, versionChecker.ClientSupportsFeature(tc.callContext, myFeature)) + } + } +} + +func (s *VersionCheckerSuite) constructCallContext(clientVersion, clientName, supportedServerVersions, supportedFeatures string) context.Context { + return SetVersionsForTests(context.Background(), clientVersion, clientName, supportedServerVersions, supportedFeatures) +} diff -Nru temporal-1.21.5-1/src/common/locks/id_mutex.go temporal-1.22.5/src/common/locks/id_mutex.go --- temporal-1.21.5-1/src/common/locks/id_mutex.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/locks/id_mutex.go 2024-02-23 09:45:43.000000000 +0000 @@ -108,6 +108,7 @@ shard := idMutex.shards[idMutex.getShardIndex(identifier)] shard.Lock() + defer shard.Unlock() mutexInfo, ok := shard.mutexInfos[identifier] if !ok { panic("cannot find workflow lock") @@ -118,7 +119,6 @@ } else { mutexInfo.waitCount-- } - shard.Unlock() } func (idMutex *idMutexImpl) getShardIndex(key interface{}) uint32 { diff -Nru temporal-1.21.5-1/src/common/membership/grpc_resolver_test.go temporal-1.22.5/src/common/membership/grpc_resolver_test.go --- temporal-1.21.5-1/src/common/membership/grpc_resolver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/membership/grpc_resolver_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,127 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package membership + +import ( + "context" + "net" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/internal/nettest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func TestGRPCBuilder(t *testing.T) { + t.Parallel() + + // There's a lot of channel stuff in this test, so we use a context to make sure we don't hang forever if something + // goes wrong. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + ctrl := gomock.NewController(t) + sr := NewMockServiceResolver(ctrl) + + // On the first call to [ServiceResolver.Members], return an empty list of members + sr.EXPECT().Members().Return([]HostInfo{}) + // Once our resolver registers a listener to membership changes, get a hold of the channel it's listening on. + sr.EXPECT().AddListener(gomock.Any(), gomock.Any()).Do(func(_ string, ch chan<- *ChangedEvent) { + // Return a single member on the next call to [ServiceResolver.Members]. This simulates a temporary network + // partition where we can't find any hosts for the frontend for a short period of time, but then we get a host. + sr.EXPECT().Members().Return([]HostInfo{ + NewHostInfoFromAddress("localhost:1234"), + }).MinTimes(1) // MinTimes(1) because we don't control when ResolveNow is called + + // After the first call to [ServiceResolver.Members] returns an empty list, expect our resolver to request a + // refresh of the members list. When it does, notify the listener that the members list has changed + sr.EXPECT().RequestRefresh().Do(func() { + select { + case <-ctx.Done(): + case ch <- &ChangedEvent{}: + } + }) + }) + + monitor := NewMockMonitor(ctrl) + monitor.EXPECT().GetResolver(primitives.FrontendService).Return(sr, nil) + + // Start a fake local server and then dial it. + serverErrs := make(chan error) + p := nettest.NewPipe() + + // This is our fake server. It accepts a connection and then immediately closes it. + go func() { + conn, _ := p.Accept(ctx.Done()) + serverErrs <- conn.Close() + }() + + // This is where we invoke the code under test. We dial the frontend service. The URL should use our custom + // protocol, and then our resolver should resolve this to the localhost:1234 address. + resolverBuilder := &grpcBuilder{} + resolverBuilder.monitor.Store(monitor) + + url := (&GRPCResolver{}).MakeURL(primitives.FrontendService) + assert.Equal(t, "membership://frontend", url) + + // dialedAddress is the actual address that the gRPC framework dialed after resolving the URL using our resolver. + var dialedAddress string + + conn, err := grpc.Dial( + url, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(resolverBuilder), + grpc.WithContextDialer(func(ctx context.Context, s string) (net.Conn, error) { + dialedAddress = s + return p.Connect(ctx.Done()) + }), + ) + require.NoError(t, err) + + require.NoError(t, <-serverErrs) + + // The gRPC library calls [resolver.Resolver.Close] when the connection is closed in a background goroutine, so we + // can't synchronously assert that [ServiceResolver.RemoveListener] was called right after the connection is closed. + // Instead, we use a channel to signal that the listener was removed. + listenerRemoved := make(chan struct{}) + + sr.EXPECT().RemoveListener(gomock.Any()).Do(func(string) { + close(listenerRemoved) + }) + assert.NoError(t, conn.Close()) + select { + case <-ctx.Done(): + t.Fatal("timed out waiting for resolver to be removed") + case <-listenerRemoved: + } + + // Verify that the address we dialed was the address of the single host in the members list. + assert.Equal(t, "localhost:1234", dialedAddress) +} diff -Nru temporal-1.21.5-1/src/common/membership/ringpop/monitor_test.go temporal-1.22.5/src/common/membership/ringpop/monitor_test.go --- temporal-1.21.5-1/src/common/membership/ringpop/monitor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/membership/ringpop/monitor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -77,12 +77,14 @@ s.T().Log("Killing host 1") testService.KillHost(testService.hostUUIDs[1]) + timer := time.NewTimer(time.Minute) select { case e := <-listenCh: + timer.Stop() s.Equal(1, len(e.HostsRemoved), "ringpop monitor event does not report the removed host") s.Equal(testService.hostAddrs[1], e.HostsRemoved[0].GetAddress(), "ringpop monitor reported that a wrong host was removed") s.Nil(e.HostsAdded, "Unexpected host reported to be added by ringpop monitor") - case <-time.After(time.Minute): + case <-timer.C: s.Fail("Timed out waiting for failure to be detected by ringpop") } diff -Nru temporal-1.21.5-1/src/common/membership/ringpop/ringpop.go temporal-1.22.5/src/common/membership/ringpop/ringpop.go --- temporal-1.21.5-1/src/common/membership/ringpop/ringpop.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/membership/ringpop/ringpop.go 2024-02-23 09:45:43.000000000 +0000 @@ -45,7 +45,7 @@ ) type ( - // service is a wrapper around ringpop.Ringpop that implements common.Daemon + // service is a wrapper around ringpop.Ringpop service struct { status int32 *ringpop.Ringpop diff -Nru temporal-1.21.5-1/src/common/metrics/config.go temporal-1.22.5/src/common/metrics/config.go --- temporal-1.21.5-1/src/common/metrics/config.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/config.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,7 +29,7 @@ "fmt" "time" - "github.com/cactus/go-statsd-client/statsd" + "github.com/cactus/go-statsd-client/v5/statsd" prom "github.com/prometheus/client_golang/prometheus" "github.com/uber-go/tally/v4" "github.com/uber-go/tally/v4/m3" @@ -448,9 +448,9 @@ } // MetricsHandlerFromConfig is used at startup to construct a MetricsHandler -func MetricsHandlerFromConfig(logger log.Logger, c *Config) Handler { +func MetricsHandlerFromConfig(logger log.Logger, c *Config) (Handler, error) { if c == nil { - return NoopMetricsHandler + return NoopMetricsHandler, nil } setDefaultPerUnitHistogramBoundaries(&c.ClientConfig) @@ -467,7 +467,7 @@ return NewTallyMetricsHandler( c.ClientConfig, NewScope(logger, c), - ) + ), nil } func configExcludeTags(cfg ClientConfig) map[string]map[string]struct{} { diff -Nru temporal-1.21.5-1/src/common/metrics/config_test.go temporal-1.22.5/src/common/metrics/config_test.go --- temporal-1.21.5-1/src/common/metrics/config_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/config_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,6 +28,7 @@ "testing" "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/uber-go/tally/v4" @@ -160,3 +161,54 @@ s.Equal(test.expectResult, config.PerUnitHistogramBoundaries) } } + +func TestMetricsHandlerFromConfig(t *testing.T) { + t.Parallel() + + logger := log.NewTestLogger() + + for _, c := range []struct { + name string + cfg *Config + expectedType interface{} + }{ + { + name: "nil config", + cfg: nil, + expectedType: &noopMetricsHandler{}, + }, + { + name: "tally", + cfg: &Config{ + Prometheus: &PrometheusConfig{ + Framework: FrameworkTally, + ListenAddress: "localhost:0", + }, + }, + expectedType: &tallyMetricsHandler{}, + }, + { + name: "opentelemetry", + cfg: &Config{ + Prometheus: &PrometheusConfig{ + Framework: FrameworkOpentelemetry, + ListenAddress: "localhost:0", + }, + }, + expectedType: &otelMetricsHandler{}, + }, + } { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + handler, err := MetricsHandlerFromConfig(logger, c.cfg) + require.NoError(t, err) + t.Cleanup(func() { + handler.Stop(logger) + }) + assert.IsType(t, c.expectedType, handler) + }) + } + +} diff -Nru temporal-1.21.5-1/src/common/metrics/defs.go temporal-1.22.5/src/common/metrics/defs.go --- temporal-1.21.5-1/src/common/metrics/defs.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/defs.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,8 +31,9 @@ // metricDefinition contains the definition for a metric metricDefinition struct { - name string - unit MetricUnit + name string + description string + unit MetricUnit } ) @@ -52,26 +53,22 @@ return md.unit } -func NewTimerDef(name string) metricDefinition { - return metricDefinition{name: name, unit: Milliseconds} +func NewTimerDef(name string, opts ...Option) metricDefinition { + return globalRegistry.register(name, append(opts, WithUnit(Milliseconds))...) } -func NewBytesHistogramDef(name string) metricDefinition { - return metricDefinition{name: name, unit: Bytes} +func NewBytesHistogramDef(name string, opts ...Option) metricDefinition { + return globalRegistry.register(name, append(opts, WithUnit(Bytes))...) } -func NewDimensionlessHistogramDef(name string) metricDefinition { - return metricDefinition{name: name, unit: Dimensionless} +func NewDimensionlessHistogramDef(name string, opts ...Option) metricDefinition { + return globalRegistry.register(name, append(opts, WithUnit(Dimensionless))...) } -func NewTimeHistogramDef(name string) metricDefinition { - return metricDefinition{name: name, unit: Milliseconds} +func NewCounterDef(name string, opts ...Option) metricDefinition { + return globalRegistry.register(name, opts...) } -func NewCounterDef(name string) metricDefinition { - return metricDefinition{name: name} -} - -func NewGaugeDef(name string) metricDefinition { - return metricDefinition{name: name} +func NewGaugeDef(name string, opts ...Option) metricDefinition { + return globalRegistry.register(name, opts...) } diff -Nru temporal-1.21.5-1/src/common/metrics/metric_defs.go temporal-1.22.5/src/common/metrics/metric_defs.go --- temporal-1.21.5-1/src/common/metrics/metric_defs.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/metric_defs.go 2024-02-23 09:45:43.000000000 +0000 @@ -40,6 +40,7 @@ visibilityTypeTagName = "visibility_type" ErrorTypeTagName = "error_type" httpStatusTagName = "http_status" + versionedTagName = "versioned" resourceExhaustedTag = "resource_exhausted_cause" standardVisibilityTagValue = "standard_visibility" advancedVisibilityTagValue = "advanced_visibility" @@ -113,6 +114,8 @@ AdminClientGetReplicationMessagesScope = "AdminClientGetReplicationMessages" // AdminClientGetNamespaceReplicationMessagesScope tracks RPC calls to admin service AdminClientGetNamespaceReplicationMessagesScope = "AdminClientGetNamespaceReplicationMessages" + // AdminClientGetNamespaceScope tracks RPC calls to admin service + AdminClientGetNamespaceScope = "AdminClientGetNamespace" // AdminClientGetDLQReplicationMessagesScope tracks RPC calls to admin service AdminClientGetDLQReplicationMessagesScope = "AdminClientGetDLQReplicationMessages" // AdminClientReapplyEventsScope tracks RPC calls to admin service @@ -131,79 +134,8 @@ AdminClientGetTaskQueueTasksScope = "AdminClientGetTaskQueueTasks" // AdminClientDeleteWorkflowExecutionScope tracks RPC calls to admin service AdminClientDeleteWorkflowExecutionScope = "AdminClientDeleteWorkflowExecution" - - // AdminDescribeHistoryHostScope is the metric scope for admin.AdminDescribeHistoryHost - AdminDescribeHistoryHostScope = "AdminDescribeHistoryHost" - // AdminAddSearchAttributesScope is the metric scope for admin.AdminAddSearchAttributes - AdminAddSearchAttributesScope = "AdminAddSearchAttributes" - // AdminRemoveSearchAttributesScope is the metric scope for admin.AdminRemoveSearchAttributes - AdminRemoveSearchAttributesScope = "AdminRemoveSearchAttributes" - // AdminGetSearchAttributesScope is the metric scope for admin.AdminGetSearchAttributes - AdminGetSearchAttributesScope = "AdminGetSearchAttributes" - // AdminRebuildMutableStateScope is the metric scope for admin.AdminRebuildMutableState - AdminRebuildMutableStateScope = "AdminRebuildMutableState" - // AdminDescribeMutableStateScope is the metric scope for admin.AdminDescribeMutableState - AdminDescribeMutableStateScope = "AdminDescribeMutableState" - // AdminGetWorkflowExecutionRawHistoryV2Scope is the metric scope for admin.GetWorkflowExecutionRawHistoryScope - AdminGetWorkflowExecutionRawHistoryV2Scope = "AdminGetWorkflowExecutionRawHistoryV2" - // AdminGetReplicationMessagesScope is the metric scope for admin.GetReplicationMessages - AdminGetReplicationMessagesScope = "AdminGetReplicationMessages" - // AdminGetNamespaceReplicationMessagesScope is the metric scope for admin.GetNamespaceReplicationMessages - AdminGetNamespaceReplicationMessagesScope = "AdminGetNamespaceReplicationMessages" - // AdminGetDLQReplicationMessagesScope is the metric scope for admin.GetDLQReplicationMessages - AdminGetDLQReplicationMessagesScope = "AdminGetDLQReplicationMessages" - // AdminReapplyEventsScope is the metric scope for admin.ReapplyEvents - AdminReapplyEventsScope = "AdminReapplyEvents" - // AdminRefreshWorkflowTasksScope is the metric scope for admin.RefreshWorkflowTasks - AdminRefreshWorkflowTasksScope = "AdminRefreshWorkflowTasks" - // AdminResendReplicationTasksScope is the metric scope for admin.ResendReplicationTasks - AdminResendReplicationTasksScope = "AdminResendReplicationTasks" - // AdminGetTaskQueueTasksScope is the metric scope for admin.GetTaskQueueTasks - AdminGetTaskQueueTasksScope = "AdminGetTaskQueueTasks" - // AdminRemoveTaskScope is the metric scope for admin.AdminRemoveTask - AdminRemoveTaskScope = "AdminRemoveTask" - // AdminCloseShardScope is the metric scope for admin.AdminCloseShard - AdminCloseShardScope = "AdminCloseShard" - // AdminGetShardScope is the metric scope for admin.AdminGetShard - AdminGetShardScope = "AdminGetShard" - // AdminListHistoryTasksScope is the metric scope for admin.ListHistoryTasks - AdminListHistoryTasksScope = "AdminListHistoryTasks" - // AdminGetDLQMessagesScope is the metric scope for admin.AdminGetDLQMessages - AdminGetDLQMessagesScope = "AdminGetDLQMessages" - // AdminPurgeDLQMessagesScope is the metric scope for admin.AdminPurgeDLQMessages - AdminPurgeDLQMessagesScope = "AdminPurgeDLQMessages" - // AdminMergeDLQMessagesScope is the metric scope for admin.AdminMergeDLQMessages - AdminMergeDLQMessagesScope = "AdminMergeDLQMessages" - // AdminListClusterMembersScope is the metric scope for admin.AdminListClusterMembers - AdminListClusterMembersScope = "AdminListClusterMembers" - // AdminDescribeClusterScope is the metric scope for admin.AdminDescribeCluster - AdminDescribeClusterScope = "AdminDescribeCluster" - // AdminListClustersScope is the metric scope for admin.AdminListClusters - AdminListClustersScope = "AdminListClusters" - // AdminAddOrUpdateRemoteClusterScope is the metric scope for admin.AdminAddOrUpdateRemoteCluster - AdminAddOrUpdateRemoteClusterScope = "AdminAddOrUpdateRemoteCluster" - // AdminRemoveRemoteClusterScope is the metric scope for admin.AdminRemoveRemoteCluster - AdminRemoveRemoteClusterScope = "AdminRemoveRemoteCluster" - // AdminDeleteWorkflowExecutionScope is the metric scope for admin.AdminDeleteWorkflowExecution - AdminDeleteWorkflowExecutionScope = "AdminDeleteWorkflowExecution" - // AdminStreamWorkflowReplicationMessagesScope is the metric scope for admin.AdminStreamReplicationMessages - AdminStreamWorkflowReplicationMessagesScope = "AdminStreamWorkflowReplicationMessages" - - // OperatorAddSearchAttributesScope is the metric scope for operator.AddSearchAttributes - OperatorAddSearchAttributesScope - // OperatorRemoveSearchAttributesScope is the metric scope for operator.RemoveSearchAttributes - OperatorRemoveSearchAttributesScope = "OperatorRemoveSearchAttributes" - // OperatorListSearchAttributesScope is the metric scope for operator.ListSearchAttributes - OperatorListSearchAttributesScope = "OperatorListSearchAttributes" - OperatorDeleteNamespaceScope = "OperatorDeleteNamespace" - // OperatorAddOrUpdateRemoteClusterScope is the metric scope for operator.AddOrUpdateRemoteCluster - OperatorAddOrUpdateRemoteClusterScope = "OperatorAddOrUpdateRemoteCluster" - // OperatorRemoveRemoteClusterScope is the metric scope for operator.RemoveRemoteCluster - OperatorRemoveRemoteClusterScope = "OperatorRemoveRemoteCluster" - // OperatorListClustersScope is the metric scope for operator.OperatorListClusters - OperatorListClustersScope = "OperatorListClusters" - // OperatorDeleteWorkflowExecutionScope is the metric scope for operator.DeleteWorkflowExecution - OperatorDeleteWorkflowExecutionScope = "OperatorDeleteWorkflowExecution" + // AdminClientStreamWorkflowReplicationMessagesScope tracks RPC calls to admin service + AdminClientStreamWorkflowReplicationMessagesScope = "AdminClientStreamWorkflowReplicationMessages" ) // Frontend Client Operations @@ -420,6 +352,10 @@ HistoryClientGetDLQReplicationMessagesScope = "HistoryClientGetDLQReplicationMessages" // HistoryClientGetShardScope tracks RPC calls to history service HistoryClientGetShardScope = "HistoryClientGetShard" + // HistoryClientIsActivityTaskValidScope tracks RPC calls to history service + HistoryClientIsActivityTaskValidScope = "HistoryClientIsActivityTaskValid" + // HistoryClientIsWorkflowTaskValidScope tracks RPC calls to history service + HistoryClientIsWorkflowTaskValidScope = "HistoryClientIsWorkflowTaskValid" // HistoryClientRebuildMutableStateScope tracks RPC calls to history service HistoryClientRebuildMutableStateScope = "HistoryClientRebuildMutableState" // HistoryClientRemoveTaskScope tracks RPC calls to history service @@ -707,18 +643,16 @@ // Frontend Scope const ( - // FrontendStartWorkflowExecutionScope is the metric scope for frontend.StartWorkflowExecution - FrontendStartWorkflowExecutionScope = "StartWorkflowExecution" + // AdminGetWorkflowExecutionRawHistoryV2Scope is the metric scope for admin.GetWorkflowExecutionRawHistoryScope + AdminGetWorkflowExecutionRawHistoryV2Scope = "AdminGetWorkflowExecutionRawHistoryV2" + + // OperatorAddSearchAttributesScope is the metric scope for operator.AddSearchAttributes + OperatorAddSearchAttributesScope = "OperatorAddSearchAttributes" + // OperatorDeleteNamespaceScope is the metric scope for operator.OperatorDeleteNamespace + OperatorDeleteNamespaceScope = "OperatorDeleteNamespace" + // FrontendPollWorkflowTaskQueueScope is the metric scope for frontend.PollWorkflowTaskQueue FrontendPollWorkflowTaskQueueScope = "PollWorkflowTaskQueue" - // FrontendPollActivityTaskQueueScope is the metric scope for frontend.PollActivityTaskQueue - FrontendPollActivityTaskQueueScope = "PollActivityTaskQueue" - // FrontendRecordActivityTaskHeartbeatScope is the metric scope for frontend.RecordActivityTaskHeartbeat - FrontendRecordActivityTaskHeartbeatScope = "RecordActivityTaskHeartbeat" - // FrontendRecordActivityTaskHeartbeatByIdScope is the metric scope for frontend.RecordActivityTaskHeartbeatById - FrontendRecordActivityTaskHeartbeatByIdScope = "RecordActivityTaskHeartbeatById" - // FrontendRespondWorkflowTaskCompletedScope is the metric scope for frontend.RespondWorkflowTaskCompleted - FrontendRespondWorkflowTaskCompletedScope = "RespondWorkflowTaskCompleted" // FrontendRespondWorkflowTaskFailedScope is the metric scope for frontend.RespondWorkflowTaskFailed FrontendRespondWorkflowTaskFailedScope = "RespondWorkflowTaskFailed" // FrontendRespondQueryTaskCompletedScope is the metric scope for frontend.RespondQueryTaskCompleted @@ -745,10 +679,6 @@ FrontendGetWorkflowExecutionRawHistoryScope = "GetWorkflowExecutionRawHistory" // FrontendPollForWorkflowExecutionRawHistoryScope is the metric scope for frontend.GetWorkflowExecutionRawHistory FrontendPollForWorkflowExecutionRawHistoryScope = "PollForWorkflowExecutionRawHistory" - // FrontendSignalWorkflowExecutionScope is the metric scope for frontend.SignalWorkflowExecution - FrontendSignalWorkflowExecutionScope = "SignalWorkflowExecution" - // FrontendSignalWithStartWorkflowExecutionScope is the metric scope for frontend.SignalWithStartWorkflowExecution - FrontendSignalWithStartWorkflowExecutionScope = "SignalWithStartWorkflowExecution" // FrontendTerminateWorkflowExecutionScope is the metric scope for frontend.TerminateWorkflowExecution FrontendTerminateWorkflowExecutionScope = "TerminateWorkflowExecution" // FrontendDeleteWorkflowExecutionScope is the metric scope for frontend.DeleteWorkflowExecution @@ -775,8 +705,6 @@ FrontendUpdateNamespaceScope = "UpdateNamespace" // FrontendDeprecateNamespaceScope is the metric scope for frontend.DeprecateNamespace FrontendDeprecateNamespaceScope = "DeprecateNamespace" - // FrontendQueryWorkflowScope is the metric scope for frontend.QueryWorkflow - FrontendQueryWorkflowScope = "QueryWorkflow" // FrontendDescribeWorkflowExecutionScope is the metric scope for frontend.DescribeWorkflowExecution FrontendDescribeWorkflowExecutionScope = "DescribeWorkflowExecution" // FrontendDescribeTaskQueueScope is the metric scope for frontend.DescribeTaskQueue @@ -787,8 +715,6 @@ FrontendResetStickyTaskQueueScope = "ResetStickyTaskQueue" // FrontendListNamespacesScope is the metric scope for frontend.ListNamespace FrontendListNamespacesScope = "ListNamespaces" - // FrontendResetWorkflowExecutionScope is the metric scope for frontend.ResetWorkflowExecution - FrontendResetWorkflowExecutionScope = "ResetWorkflowExecution" // FrontendGetSearchAttributesScope is the metric scope for frontend.GetSearchAttributes FrontendGetSearchAttributesScope = "GetSearchAttributes" // FrontendGetClusterInfoScope is the metric scope for frontend.GetClusterInfo @@ -1217,56 +1143,66 @@ ) var ( - ServiceRequests = NewCounterDef("service_requests") - ServicePendingRequests = NewGaugeDef("service_pending_requests") - ServiceFailures = NewCounterDef("service_errors") - ServiceErrorWithType = NewCounterDef("service_error_with_type") - ServiceCriticalFailures = NewCounterDef("service_errors_critical") - ServiceLatency = NewTimerDef("service_latency") - ServiceLatencyNoUserLatency = NewTimerDef("service_latency_nouserlatency") - ServiceLatencyUserLatency = NewTimerDef("service_latency_userlatency") - ServiceErrInvalidArgumentCounter = NewCounterDef("service_errors_invalid_argument") - ServiceErrNamespaceNotActiveCounter = NewCounterDef("service_errors_namespace_not_active") - ServiceErrResourceExhaustedCounter = NewCounterDef("service_errors_resource_exhausted") - ServiceErrNotFoundCounter = NewCounterDef("service_errors_entity_not_found") - ServiceErrExecutionAlreadyStartedCounter = NewCounterDef("service_errors_execution_already_started") - ServiceErrNamespaceAlreadyExistsCounter = NewCounterDef("service_errors_namespace_already_exists") - ServiceErrCancellationAlreadyRequestedCounter = NewCounterDef("service_errors_cancellation_already_requested") - ServiceErrQueryFailedCounter = NewCounterDef("service_errors_query_failed") - ServiceErrContextCancelledCounter = NewCounterDef("service_errors_context_cancelled") - ServiceErrContextTimeoutCounter = NewCounterDef("service_errors_context_timeout") - ServiceErrRetryTaskCounter = NewCounterDef("service_errors_retry_task") - ServiceErrBadBinaryCounter = NewCounterDef("service_errors_bad_binary") - ServiceErrClientVersionNotSupportedCounter = NewCounterDef("service_errors_client_version_not_supported") - ServiceErrIncompleteHistoryCounter = NewCounterDef("service_errors_incomplete_history") - ServiceErrNonDeterministicCounter = NewCounterDef("service_errors_nondeterministic") - ServiceErrUnauthorizedCounter = NewCounterDef("service_errors_unauthorized") - ServiceErrAuthorizeFailedCounter = NewCounterDef("service_errors_authorize_failed") - ActionCounter = NewCounterDef("action") - TlsCertsExpired = NewGaugeDef("certificates_expired") - TlsCertsExpiring = NewGaugeDef("certificates_expiring") - ServiceAuthorizationLatency = NewTimerDef("service_authorization_latency") - EventBlobSize = NewBytesHistogramDef("event_blob_size") - NamespaceCachePrepareCallbacksLatency = NewTimerDef("namespace_cache_prepare_callbacks_latency") - NamespaceCacheCallbacksLatency = NewTimerDef("namespace_cache_callbacks_latency") - LockRequests = NewCounterDef("lock_requests") - LockFailures = NewCounterDef("lock_failures") - LockLatency = NewTimerDef("lock_latency") - ClientRequests = NewCounterDef("client_requests") - ClientFailures = NewCounterDef("client_errors") - ClientLatency = NewTimerDef("client_latency") - ClientRedirectionRequests = NewCounterDef("client_redirection_requests") - ClientRedirectionFailures = NewCounterDef("client_redirection_errors") - ClientRedirectionLatency = NewTimerDef("client_redirection_latency") - StateTransitionCount = NewDimensionlessHistogramDef("state_transition_count") - HistorySize = NewBytesHistogramDef("history_size") - HistoryCount = NewDimensionlessHistogramDef("history_count") - SearchAttributesSize = NewBytesHistogramDef("search_attributes_size") - MemoSize = NewBytesHistogramDef("memo_size") - TooManyPendingChildWorkflows = NewCounterDef("wf_too_many_pending_child_workflows") - TooManyPendingActivities = NewCounterDef("wf_too_many_pending_activities") - TooManyPendingCancelRequests = NewCounterDef("wf_too_many_pending_cancel_requests") - TooManyPendingSignalsToExternalWorkflows = NewCounterDef("wf_too_many_pending_external_workflow_signals") + ServiceRequests = NewCounterDef( + "service_requests", + WithDescription("The number of RPC requests received by the service."), + ) + ServicePendingRequests = NewGaugeDef("service_pending_requests") + ServiceFailures = NewCounterDef("service_errors") + ServicePanic = NewCounterDef("service_panics") + ServiceErrorWithType = NewCounterDef("service_error_with_type") + ServiceLatency = NewTimerDef("service_latency") + ServiceLatencyNoUserLatency = NewTimerDef("service_latency_nouserlatency") + ServiceLatencyUserLatency = NewTimerDef("service_latency_userlatency") + ServiceErrInvalidArgumentCounter = NewCounterDef("service_errors_invalid_argument") + ServiceErrNamespaceNotActiveCounter = NewCounterDef("service_errors_namespace_not_active") + ServiceErrResourceExhaustedCounter = NewCounterDef("service_errors_resource_exhausted") + ServiceErrNotFoundCounter = NewCounterDef("service_errors_entity_not_found") + ServiceErrExecutionAlreadyStartedCounter = NewCounterDef("service_errors_execution_already_started") + ServiceErrContextTimeoutCounter = NewCounterDef("service_errors_context_timeout") + ServiceErrRetryTaskCounter = NewCounterDef("service_errors_retry_task") + ServiceErrIncompleteHistoryCounter = NewCounterDef("service_errors_incomplete_history") + ServiceErrNonDeterministicCounter = NewCounterDef("service_errors_nondeterministic") + ServiceErrUnauthorizedCounter = NewCounterDef("service_errors_unauthorized") + ServiceErrAuthorizeFailedCounter = NewCounterDef("service_errors_authorize_failed") + ActionCounter = NewCounterDef("action") + TlsCertsExpired = NewGaugeDef("certificates_expired") + TlsCertsExpiring = NewGaugeDef("certificates_expiring") + ServiceAuthorizationLatency = NewTimerDef("service_authorization_latency") + EventBlobSize = NewBytesHistogramDef("event_blob_size") + LockRequests = NewCounterDef("lock_requests") + LockFailures = NewCounterDef("lock_failures") + LockLatency = NewTimerDef("lock_latency") + ClientRequests = NewCounterDef( + "client_requests", + WithDescription("The number of requests sent by the client to an individual service, keyed by `service_role` and `operation`."), + ) + ClientFailures = NewCounterDef("client_errors") + ClientLatency = NewTimerDef("client_latency") + ClientRedirectionRequests = NewCounterDef("client_redirection_requests") + ClientRedirectionFailures = NewCounterDef("client_redirection_errors") + ClientRedirectionLatency = NewTimerDef("client_redirection_latency") + StateTransitionCount = NewDimensionlessHistogramDef("state_transition_count") + HistorySize = NewBytesHistogramDef("history_size") + HistoryCount = NewDimensionlessHistogramDef("history_count") + SearchAttributesSize = NewBytesHistogramDef("search_attributes_size") + MemoSize = NewBytesHistogramDef("memo_size") + TooManyPendingChildWorkflows = NewCounterDef( + "wf_too_many_pending_child_workflows", + WithDescription("The number of Workflow Tasks failed because they would cause the limit on the number of pending child workflows to be exceeded. See https://t.mp/limits for more information."), + ) + TooManyPendingActivities = NewCounterDef( + "wf_too_many_pending_activities", + WithDescription("The number of Workflow Tasks failed because they would cause the limit on the number of pending activities to be exceeded. See https://t.mp/limits for more information."), + ) + TooManyPendingCancelRequests = NewCounterDef( + "wf_too_many_pending_cancel_requests", + WithDescription("The number of Workflow Tasks failed because they would cause the limit on the number of pending cancel requests to be exceeded. See https://t.mp/limits for more information."), + ) + TooManyPendingSignalsToExternalWorkflows = NewCounterDef( + "wf_too_many_pending_external_workflow_signals", + WithDescription("The number of Workflow Tasks failed because they would cause the limit on the number of pending signals to external workflows to be exceeded. See https://t.mp/limits for more information."), + ) // Frontend AddSearchAttributesWorkflowSuccessCount = NewCounterDef("add_search_attributes_workflow_success") @@ -1277,6 +1213,10 @@ VersionCheckFailedCount = NewCounterDef("version_check_failed") VersionCheckRequestFailedCount = NewCounterDef("version_check_request_failed") VersionCheckLatency = NewTimerDef("version_check_latency") + HTTPServiceRequests = NewCounterDef( + "http_service_requests", + WithDescription("The number of HTTP requests received by the service."), + ) // History CacheRequests = NewCounterDef("cache_requests") @@ -1290,59 +1230,87 @@ // ArchivalTaskInvalidURI is emitted by the archival queue task executor when the history or visibility URI for an // archival task is not a valid URI. // We may emit this metric several times for a single task if the task is retried. - ArchivalTaskInvalidURI = NewCounterDef("archival_task_invalid_uri") - ArchiverClientSendSignalCount = NewCounterDef("archiver_client_sent_signal") - ArchiverClientSendSignalFailureCount = NewCounterDef("archiver_client_send_signal_error") - ArchiverClientHistoryRequestCount = NewCounterDef("archiver_client_history_request") - ArchiverClientHistoryInlineArchiveAttemptCount = NewCounterDef("archiver_client_history_inline_archive_attempt") - ArchiverClientHistoryInlineArchiveFailureCount = NewCounterDef("archiver_client_history_inline_archive_failure") - ArchiverClientVisibilityRequestCount = NewCounterDef("archiver_client_visibility_request") - ArchiverClientVisibilityInlineArchiveAttemptCount = NewCounterDef("archiver_client_visibility_inline_archive_attempt") - ArchiverClientVisibilityInlineArchiveFailureCount = NewCounterDef("archiver_client_visibility_inline_archive_failure") - ArchiverArchiveLatency = NewTimerDef("archiver_archive_latency") - ArchiverArchiveTargetLatency = NewTimerDef("archiver_archive_target_latency") - ShardContextClosedCounter = NewCounterDef("shard_closed_count") - ShardContextCreatedCounter = NewCounterDef("sharditem_created_count") - ShardContextRemovedCounter = NewCounterDef("sharditem_removed_count") - ShardContextAcquisitionLatency = NewTimerDef("sharditem_acquisition_latency") - ShardInfoReplicationPendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_replication_pending_task") - ShardInfoTransferActivePendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_transfer_active_pending_task") - ShardInfoTransferStandbyPendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_transfer_standby_pending_task") - ShardInfoTimerActivePendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_timer_active_pending_task") - ShardInfoTimerStandbyPendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_timer_standby_pending_task") - ShardInfoVisibilityPendingTasksTimer = NewDimensionlessHistogramDef("shardinfo_visibility_pending_task") - ShardInfoReplicationLagHistogram = NewDimensionlessHistogramDef("shardinfo_replication_lag") - ShardInfoTransferLagHistogram = NewDimensionlessHistogramDef("shardinfo_transfer_lag") - ShardInfoTimerLagTimer = NewTimerDef("shardinfo_timer_lag") - ShardInfoVisibilityLagHistogram = NewDimensionlessHistogramDef("shardinfo_visibility_lag") - ShardInfoImmediateQueueLagHistogram = NewDimensionlessHistogramDef("shardinfo_immediate_queue_lag") - ShardInfoScheduledQueueLagTimer = NewTimerDef("shardinfo_scheduled_queue_lag") - SyncShardFromRemoteCounter = NewCounterDef("syncshard_remote_count") - SyncShardFromRemoteFailure = NewCounterDef("syncshard_remote_failed") - TaskRequests = NewCounterDef("task_requests") - TaskLoadLatency = NewTimerDef("task_latency_load") // latency from task generation to task loading (persistence scheduleToStart) - TaskScheduleLatency = NewTimerDef("task_latency_schedule") // latency from task submission to in-memory queue to processing (in-memory scheduleToStart) - TaskProcessingLatency = NewTimerDef("task_latency_processing") // latency for processing task one time - TaskLatency = NewTimerDef("task_latency") // task in-memory latency across multiple attempts - TaskQueueLatency = NewTimerDef("task_latency_queue") // task e2e latency - TaskAttempt = NewDimensionlessHistogramDef("task_attempt") - TaskFailures = NewCounterDef("task_errors") - TaskDiscarded = NewCounterDef("task_errors_discarded") - TaskSkipped = NewCounterDef("task_skipped") - TaskVersionMisMatch = NewCounterDef("task_errors_version_mismatch") - TasksDependencyTaskNotCompleted = NewCounterDef("task_dependency_task_not_completed") - TaskStandbyRetryCounter = NewCounterDef("task_errors_standby_retry_counter") - TaskWorkflowBusyCounter = NewCounterDef("task_errors_workflow_busy") - TaskNotActiveCounter = NewCounterDef("task_errors_not_active_counter") - TaskLimitExceededCounter = NewCounterDef("task_errors_limit_exceeded_counter") - TaskNamespaceHandoverCounter = NewCounterDef("task_errors_namespace_handover") - TaskThrottledCounter = NewCounterDef("task_errors_throttled") - TaskCorruptionCounter = NewCounterDef("task_errors_corruption") - TaskScheduleToStartLatency = NewTimerDef("task_schedule_to_start_latency") - TransferTaskMissingEventCounter = NewCounterDef("transfer_task_missing_event_counter") - TaskBatchCompleteCounter = NewCounterDef("task_batch_complete_counter") - TaskReschedulerPendingTasks = NewDimensionlessHistogramDef("task_rescheduler_pending_tasks") - PendingTasksCounter = NewDimensionlessHistogramDef("pending_tasks") + ArchivalTaskInvalidURI = NewCounterDef("archival_task_invalid_uri") + ArchiverClientSendSignalCount = NewCounterDef("archiver_client_sent_signal") + ArchiverClientSendSignalFailureCount = NewCounterDef("archiver_client_send_signal_error") + ArchiverClientHistoryRequestCount = NewCounterDef("archiver_client_history_request") + ArchiverClientHistoryInlineArchiveAttemptCount = NewCounterDef("archiver_client_history_inline_archive_attempt") + ArchiverClientHistoryInlineArchiveFailureCount = NewCounterDef("archiver_client_history_inline_archive_failure") + ArchiverClientVisibilityRequestCount = NewCounterDef("archiver_client_visibility_request") + ArchiverClientVisibilityInlineArchiveAttemptCount = NewCounterDef("archiver_client_visibility_inline_archive_attempt") + ArchiverClientVisibilityInlineArchiveFailureCount = NewCounterDef("archiver_client_visibility_inline_archive_failure") + ArchiverArchiveLatency = NewTimerDef("archiver_archive_latency") + ArchiverArchiveTargetLatency = NewTimerDef("archiver_archive_target_latency") + ShardContextClosedCounter = NewCounterDef("shard_closed_count") + ShardContextCreatedCounter = NewCounterDef("sharditem_created_count") + ShardContextRemovedCounter = NewCounterDef("sharditem_removed_count") + ShardContextAcquisitionLatency = NewTimerDef("sharditem_acquisition_latency") + ShardInfoImmediateQueueLagHistogram = NewDimensionlessHistogramDef( + "shardinfo_immediate_queue_lag", + WithDescription("A histogram across history shards for the difference between the smallest taskID of pending history tasks and the last generated history task ID."), + ) + ShardInfoScheduledQueueLagTimer = NewTimerDef( + "shardinfo_scheduled_queue_lag", + WithDescription("A histogram across history shards for the difference between the earliest scheduled time of pending history tasks and current time."), + ) + SyncShardFromRemoteCounter = NewCounterDef("syncshard_remote_count") + SyncShardFromRemoteFailure = NewCounterDef("syncshard_remote_failed") + TaskRequests = NewCounterDef( + "task_requests", + WithDescription("The number of history tasks processed."), + ) + TaskLoadLatency = NewTimerDef( + "task_latency_load", + WithDescription("Latency from history task generation to loading into memory (persistence schedule to start latency)."), + ) + TaskScheduleLatency = NewTimerDef( + "task_latency_schedule", + WithDescription("Latency from history task loading to start processing (in-memory schedule to start latency)."), + ) + TaskProcessingLatency = NewTimerDef( + "task_latency_processing", + WithDescription("Latency for processing a history task one time."), + ) + TaskLatency = NewTimerDef( + "task_latency", + WithDescription("Latency for procsssing and completing a history task. This latency is across all attempts but excludes any latencies related to workflow lock or user qutoa limit."), + ) + TaskQueueLatency = NewTimerDef( + "task_latency_queue", + WithDescription("End-to-end latency for processing and completing a history task, from task generation to completion."), + ) + TaskAttempt = NewDimensionlessHistogramDef( + "task_attempt", + WithDescription("The number of attempts took to complete a history task."), + ) + TaskFailures = NewCounterDef( + "task_errors", + WithDescription("The number of unexpected history task processing errors."), + ) + TaskDiscarded = NewCounterDef("task_errors_discarded") + TaskSkipped = NewCounterDef("task_skipped") + TaskVersionMisMatch = NewCounterDef("task_errors_version_mismatch") + TasksDependencyTaskNotCompleted = NewCounterDef("task_dependency_task_not_completed") + TaskStandbyRetryCounter = NewCounterDef("task_errors_standby_retry_counter") + TaskWorkflowBusyCounter = NewCounterDef( + "task_errors_workflow_busy", + WithDescription("The number of history task processing errors caused by failing to acquire workflow lock within the configured timeout (history.cacheNonUserContextLockTimeout)."), + ) + TaskNotActiveCounter = NewCounterDef("task_errors_not_active_counter") + TaskNamespaceHandoverCounter = NewCounterDef("task_errors_namespace_handover") + TaskInternalErrorCounter = NewCounterDef("task_errors_internal") + TaskThrottledCounter = NewCounterDef( + "task_errors_throttled", + WithDescription("The number of history task processing errors caused by resource exhausted errors, excluding workflow busy case."), + ) + TaskCorruptionCounter = NewCounterDef("task_errors_corruption") + TaskScheduleToStartLatency = NewTimerDef("task_schedule_to_start_latency") + TaskBatchCompleteCounter = NewCounterDef("task_batch_complete_counter") + TaskReschedulerPendingTasks = NewDimensionlessHistogramDef("task_rescheduler_pending_tasks") + PendingTasksCounter = NewDimensionlessHistogramDef( + "pending_tasks", + WithDescription("A histogram across history shards for the number of in-memory pending history tasks."), + ) TaskSchedulerThrottled = NewCounterDef("task_scheduler_throttled") QueueScheduleLatency = NewTimerDef("queue_latency_schedule") // latency for scheduling 100 tasks in one task channel QueueReaderCountHistogram = NewDimensionlessHistogramDef("queue_reader_count") @@ -1390,7 +1358,6 @@ AutoResetPointCorruptionCounter = NewCounterDef("auto_reset_point_corruption") ConcurrencyUpdateFailureCounter = NewCounterDef("concurrency_update_failure") ServiceErrShardOwnershipLostCounter = NewCounterDef("service_errors_shard_ownership_lost") - ServiceErrTaskAlreadyStartedCounter = NewCounterDef("service_errors_task_already_started") HeartbeatTimeoutCounter = NewCounterDef("heartbeat_timeout") ScheduleToStartTimeoutCounter = NewCounterDef("schedule_to_start_timeout") StartToCloseTimeoutCounter = NewCounterDef("start_to_close_timeout") @@ -1406,78 +1373,66 @@ CompleteWorkflowTaskWithStickyEnabledCounter = NewCounterDef("complete_workflow_task_sticky_enabled_count") CompleteWorkflowTaskWithStickyDisabledCounter = NewCounterDef("complete_workflow_task_sticky_disabled_count") WorkflowTaskHeartbeatTimeoutCounter = NewCounterDef("workflow_task_heartbeat_timeout_count") - EmptyReplicationEventsCounter = NewCounterDef("empty_replication_events") DuplicateReplicationEventsCounter = NewCounterDef("duplicate_replication_events") - StaleReplicationEventsCounter = NewCounterDef("stale_replication_events") - ReplicationEventsSizeTimer = NewTimerDef("replication_events_size") - BufferReplicationTaskTimer = NewTimerDef("buffer_replication_tasks") - UnbufferReplicationTaskTimer = NewTimerDef("unbuffer_replication_tasks") - HistoryConflictsCounter = NewCounterDef("history_conflicts") - CompleteTaskFailedCounter = NewCounterDef("complete_task_fail_count") AcquireLockFailedCounter = NewCounterDef("acquire_lock_failed") WorkflowContextCleared = NewCounterDef("workflow_context_cleared") - MutableStateSize = NewBytesHistogramDef("mutable_state_size") - ExecutionInfoSize = NewBytesHistogramDef("execution_info_size") - ExecutionStateSize = NewBytesHistogramDef("execution_state_size") - ActivityInfoSize = NewBytesHistogramDef("activity_info_size") - TimerInfoSize = NewBytesHistogramDef("timer_info_size") - ChildInfoSize = NewBytesHistogramDef("child_info_size") - RequestCancelInfoSize = NewBytesHistogramDef("request_cancel_info_size") - SignalInfoSize = NewBytesHistogramDef("signal_info_size") - SignalRequestIDSize = NewBytesHistogramDef("signal_request_id_size") - BufferedEventsSize = NewBytesHistogramDef("buffered_events_size") - ActivityInfoCount = NewDimensionlessHistogramDef("activity_info_count") - TimerInfoCount = NewDimensionlessHistogramDef("timer_info_count") - ChildInfoCount = NewDimensionlessHistogramDef("child_info_count") - SignalInfoCount = NewDimensionlessHistogramDef("signal_info_count") - RequestCancelInfoCount = NewDimensionlessHistogramDef("request_cancel_info_count") - SignalRequestIDCount = NewDimensionlessHistogramDef("signal_request_id_count") - BufferedEventsCount = NewDimensionlessHistogramDef("buffered_events_count") - TaskCount = NewDimensionlessHistogramDef("task_count") - TotalActivityCount = NewDimensionlessHistogramDef("total_activity_count") - TotalUserTimerCount = NewDimensionlessHistogramDef("total_user_timer_count") - TotalChildExecutionCount = NewDimensionlessHistogramDef("total_child_execution_count") - TotalRequestCancelExternalCount = NewDimensionlessHistogramDef("total_request_cancel_external_count") - TotalSignalExternalCount = NewDimensionlessHistogramDef("total_signal_external_count") - TotalSignalCount = NewDimensionlessHistogramDef("total_signal_count") - WorkflowRetryBackoffTimerCount = NewCounterDef("workflow_retry_backoff_timer") - WorkflowCronBackoffTimerCount = NewCounterDef("workflow_cron_backoff_timer") - WorkflowDelayedStartBackoffTimerCount = NewCounterDef("workflow_delayed_start_backoff_timer") - WorkflowCleanupDeleteCount = NewCounterDef("workflow_cleanup_delete") - WorkflowCleanupArchiveCount = NewCounterDef("workflow_cleanup_archive") - WorkflowCleanupNopCount = NewCounterDef("workflow_cleanup_nop") - WorkflowCleanupDeleteHistoryInlineCount = NewCounterDef("workflow_cleanup_delete_history_inline") - WorkflowSuccessCount = NewCounterDef("workflow_success") - WorkflowCancelCount = NewCounterDef("workflow_cancel") - WorkflowFailedCount = NewCounterDef("workflow_failed") - WorkflowTimeoutCount = NewCounterDef("workflow_timeout") - WorkflowTerminateCount = NewCounterDef("workflow_terminate") - WorkflowContinuedAsNewCount = NewCounterDef("workflow_continued_as_new") - LastRetrievedMessageID = NewGaugeDef("last_retrieved_message_id") - LastProcessedMessageID = NewGaugeDef("last_processed_message_id") - ReplicationTasksSend = NewCounterDef("replication_tasks_send") - ReplicationTasksRecv = NewCounterDef("replication_tasks_recv") - ReplicationTasksRecvBacklog = NewDimensionlessHistogramDef("replication_tasks_recv_backlog") - ReplicationTasksSkipped = NewCounterDef("replication_tasks_skipped") - ReplicationTasksApplied = NewCounterDef("replication_tasks_applied") - ReplicationTasksFailed = NewCounterDef("replication_tasks_failed") + MutableStateSize = NewBytesHistogramDef( + "mutable_state_size", + WithDescription("The size of an individual Workflow Execution's state, emitted each time a workflow execution is retrieved or updated."), + ) + ExecutionInfoSize = NewBytesHistogramDef("execution_info_size") + ExecutionStateSize = NewBytesHistogramDef("execution_state_size") + ActivityInfoSize = NewBytesHistogramDef("activity_info_size") + TimerInfoSize = NewBytesHistogramDef("timer_info_size") + ChildInfoSize = NewBytesHistogramDef("child_info_size") + RequestCancelInfoSize = NewBytesHistogramDef("request_cancel_info_size") + SignalInfoSize = NewBytesHistogramDef("signal_info_size") + SignalRequestIDSize = NewBytesHistogramDef("signal_request_id_size") + BufferedEventsSize = NewBytesHistogramDef("buffered_events_size") + ActivityInfoCount = NewDimensionlessHistogramDef("activity_info_count") + TimerInfoCount = NewDimensionlessHistogramDef("timer_info_count") + ChildInfoCount = NewDimensionlessHistogramDef("child_info_count") + SignalInfoCount = NewDimensionlessHistogramDef("signal_info_count") + RequestCancelInfoCount = NewDimensionlessHistogramDef("request_cancel_info_count") + SignalRequestIDCount = NewDimensionlessHistogramDef("signal_request_id_count") + BufferedEventsCount = NewDimensionlessHistogramDef("buffered_events_count") + TaskCount = NewDimensionlessHistogramDef("task_count") + TotalActivityCount = NewDimensionlessHistogramDef("total_activity_count") + TotalUserTimerCount = NewDimensionlessHistogramDef("total_user_timer_count") + TotalChildExecutionCount = NewDimensionlessHistogramDef("total_child_execution_count") + TotalRequestCancelExternalCount = NewDimensionlessHistogramDef("total_request_cancel_external_count") + TotalSignalExternalCount = NewDimensionlessHistogramDef("total_signal_external_count") + TotalSignalCount = NewDimensionlessHistogramDef("total_signal_count") + WorkflowRetryBackoffTimerCount = NewCounterDef("workflow_retry_backoff_timer") + WorkflowCronBackoffTimerCount = NewCounterDef("workflow_cron_backoff_timer") + WorkflowDelayedStartBackoffTimerCount = NewCounterDef("workflow_delayed_start_backoff_timer") + WorkflowCleanupDeleteCount = NewCounterDef("workflow_cleanup_delete") + WorkflowCleanupArchiveCount = NewCounterDef("workflow_cleanup_archive") + WorkflowCleanupNopCount = NewCounterDef("workflow_cleanup_nop") + WorkflowCleanupDeleteHistoryInlineCount = NewCounterDef("workflow_cleanup_delete_history_inline") + WorkflowSuccessCount = NewCounterDef("workflow_success") + WorkflowCancelCount = NewCounterDef("workflow_cancel") + WorkflowFailedCount = NewCounterDef("workflow_failed") + WorkflowTimeoutCount = NewCounterDef("workflow_timeout") + WorkflowTerminateCount = NewCounterDef("workflow_terminate") + WorkflowContinuedAsNewCount = NewCounterDef("workflow_continued_as_new") + ReplicationTasksSend = NewCounterDef("replication_tasks_send") + ReplicationTasksRecv = NewCounterDef("replication_tasks_recv") + ReplicationTasksRecvBacklog = NewDimensionlessHistogramDef("replication_tasks_recv_backlog") + ReplicationTasksSkipped = NewCounterDef("replication_tasks_skipped") + ReplicationTasksApplied = NewCounterDef("replication_tasks_applied") + ReplicationTasksFailed = NewCounterDef("replication_tasks_failed") // ReplicationTasksLag is a heuristic for how far behind the remote DC is for a given cluster. It measures the // difference between task IDs so its unit should be "tasks". - // It currently has units of "ms", which is incorrect. See https://github.com/temporalio/temporal/issues/4483. - ReplicationTasksLag = NewTimeHistogramDef("replication_tasks_lag") + ReplicationTasksLag = NewDimensionlessHistogramDef("replication_tasks_lag") // ReplicationTasksFetched records the number of tasks fetched by the poller. - // It has the same unit issue as ReplicationTasksLag. - ReplicationTasksFetched = NewTimeHistogramDef("replication_tasks_fetched") - // ReplicationTasksReturned is the same as ReplicationTasksFetched. - ReplicationTasksReturned = NewTimeHistogramDef("replication_tasks_returned") + ReplicationTasksFetched = NewDimensionlessHistogramDef("replication_tasks_fetched") ReplicationLatency = NewTimerDef("replication_latency") - ReplicationTasksAppliedLatency = NewTimerDef("replication_tasks_applied_latency") ReplicationDLQFailed = NewCounterDef("replication_dlq_enqueue_failed") ReplicationDLQMaxLevelGauge = NewGaugeDef("replication_dlq_max_level") ReplicationDLQAckLevelGauge = NewGaugeDef("replication_dlq_ack_level") ReplicationNonEmptyDLQCount = NewCounterDef("replication_dlq_non_empty") - GetReplicationMessagesForShardLatency = NewTimerDef("get_replication_messages_for_shard") - GetDLQReplicationMessagesLatency = NewTimerDef("get_dlq_replication_messages") + ReplicationOutlierNamespace = NewCounterDef("replication_outlier_namespace") EventReapplySkippedCount = NewCounterDef("event_reapply_skipped_count") DirectQueryDispatchLatency = NewTimerDef("direct_query_dispatch_latency") DirectQueryDispatchStickyLatency = NewTimerDef("direct_query_dispatch_sticky_latency") @@ -1489,10 +1444,8 @@ DirectQueryDispatchTimeoutBeforeNonStickyCount = NewCounterDef("direct_query_dispatch_timeout_before_non_sticky") WorkflowTaskQueryLatency = NewTimerDef("workflow_task_query_latency") ConsistentQueryTimeoutCount = NewCounterDef("consistent_query_timeout") - QueryBeforeFirstWorkflowTaskCount = NewCounterDef("query_before_first_workflow_task") QueryBufferExceededCount = NewCounterDef("query_buffer_exceeded") QueryRegistryInvalidStateCount = NewCounterDef("query_registry_invalid_state") - WorkerNotSupportsConsistentQueryCount = NewCounterDef("worker_not_supports_consistent_query") WorkflowTaskTimeoutOverrideCount = NewCounterDef("workflow_task_timeout_overrides") WorkflowRunTimeoutOverrideCount = NewCounterDef("workflow_run_timeout_overrides") ReplicationTaskCleanupCount = NewCounterDef("replication_task_cleanup_count") @@ -1509,6 +1462,7 @@ InorderBufferedEventsCounter = NewCounterDef("inordered_buffered_events") ShardLingerSuccess = NewTimerDef("shard_linger_success") ShardLingerTimeouts = NewCounterDef("shard_linger_timeouts") + DynamicRateLimiterMultiplier = NewGaugeDef("dynamic_rate_limit_multiplier") // Matching MatchingClientForwardedCounter = NewCounterDef("forwarded") @@ -1526,15 +1480,7 @@ BufferThrottlePerTaskQueueCounter = NewCounterDef("buffer_throttle_count") ExpiredTasksPerTaskQueueCounter = NewCounterDef("tasks_expired") ForwardedPerTaskQueueCounter = NewCounterDef("forwarded_per_tl") - ForwardTaskCallsPerTaskQueue = NewCounterDef("forward_task_calls") ForwardTaskErrorsPerTaskQueue = NewCounterDef("forward_task_errors") - ForwardQueryCallsPerTaskQueue = NewCounterDef("forward_query_calls") - ForwardQueryErrorsPerTaskQueue = NewCounterDef("forward_query_errors") - ForwardPollCallsPerTaskQueue = NewCounterDef("forward_poll_calls") - ForwardPollErrorsPerTaskQueue = NewCounterDef("forward_poll_errors") - ForwardTaskLatencyPerTaskQueue = NewTimerDef("forward_task_latency") - ForwardQueryLatencyPerTaskQueue = NewTimerDef("forward_query_latency") - ForwardPollLatencyPerTaskQueue = NewTimerDef("forward_poll_latency") LocalToLocalMatchPerTaskQueueCounter = NewCounterDef("local_to_local_matches") LocalToRemoteMatchPerTaskQueueCounter = NewCounterDef("local_to_remote_matches") RemoteToLocalMatchPerTaskQueueCounter = NewCounterDef("remote_to_local_matches") @@ -1546,116 +1492,136 @@ TaskWriteLatencyPerTaskQueue = NewTimerDef("task_write_latency") TaskLagPerTaskQueueGauge = NewGaugeDef("task_lag_per_tl") NoRecentPollerTasksPerTaskQueueCounter = NewCounterDef("no_poller_tasks") + UnknownBuildPollsCounter = NewCounterDef("unknown_build_polls") + UnknownBuildTasksCounter = NewCounterDef("unknown_build_tasks") // Worker - ExecutorTasksDoneCount = NewCounterDef("executor_done") - ExecutorTasksErrCount = NewCounterDef("executor_err") - ExecutorTasksDeferredCount = NewCounterDef("executor_deferred") - ExecutorTasksDroppedCount = NewCounterDef("executor_dropped") - StartedCount = NewCounterDef("started") - StoppedCount = NewCounterDef("stopped") - ScanDuration = NewTimerDef("scan_duration") - TaskProcessedCount = NewGaugeDef("task_processed") - TaskDeletedCount = NewGaugeDef("task_deleted") - TaskQueueProcessedCount = NewGaugeDef("taskqueue_processed") - TaskQueueDeletedCount = NewGaugeDef("taskqueue_deleted") - TaskQueueOutstandingCount = NewGaugeDef("taskqueue_outstanding") - HistoryArchiverArchiveNonRetryableErrorCount = NewCounterDef("history_archiver_archive_non_retryable_error") - HistoryArchiverArchiveTransientErrorCount = NewCounterDef("history_archiver_archive_transient_error") - HistoryArchiverArchiveSuccessCount = NewCounterDef("history_archiver_archive_success") - HistoryArchiverHistoryMutatedCount = NewCounterDef("history_archiver_history_mutated") - HistoryArchiverTotalUploadSize = NewBytesHistogramDef("history_archiver_total_upload_size") - HistoryArchiverHistorySize = NewBytesHistogramDef("history_archiver_history_size") - HistoryArchiverDuplicateArchivalsCount = NewCounterDef("history_archiver_duplicate_archivals") - HistoryArchiverBlobExistsCount = NewCounterDef("history_archiver_blob_exists") - HistoryArchiverBlobSize = NewBytesHistogramDef("history_archiver_blob_size") - HistoryArchiverRunningDeterministicConstructionCheckCount = NewCounterDef("history_archiver_running_deterministic_construction_check") - HistoryArchiverDeterministicConstructionCheckFailedCount = NewCounterDef("history_archiver_deterministic_construction_check_failed") - HistoryArchiverRunningBlobIntegrityCheckCount = NewCounterDef("history_archiver_running_blob_integrity_check") - HistoryArchiverBlobIntegrityCheckFailedCount = NewCounterDef("history_archiver_blob_integrity_check_failed") - HistoryWorkflowExecutionCacheLatency = NewTimerDef("history_workflow_execution_cache_latency") - VisibilityArchiverArchiveNonRetryableErrorCount = NewCounterDef("visibility_archiver_archive_non_retryable_error") - VisibilityArchiverArchiveTransientErrorCount = NewCounterDef("visibility_archiver_archive_transient_error") - VisibilityArchiveSuccessCount = NewCounterDef("visibility_archiver_archive_success") - HistoryScavengerSuccessCount = NewCounterDef("scavenger_success") - HistoryScavengerErrorCount = NewCounterDef("scavenger_errors") - HistoryScavengerSkipCount = NewCounterDef("scavenger_skips") - ExecutionsOutstandingCount = NewGaugeDef("executions_outstanding") - ArchiverNonRetryableErrorCount = NewCounterDef("archiver_non_retryable_error") - ArchiverStartedCount = NewCounterDef("archiver_started") - ArchiverStoppedCount = NewCounterDef("archiver_stopped") - ArchiverCoroutineStartedCount = NewCounterDef("archiver_coroutine_started") - ArchiverCoroutineStoppedCount = NewCounterDef("archiver_coroutine_stopped") - ArchiverHandleHistoryRequestLatency = NewTimerDef("archiver_handle_history_request_latency") - ArchiverHandleVisibilityRequestLatency = NewTimerDef("archiver_handle_visibility_request_latency") - ArchiverUploadWithRetriesLatency = NewTimerDef("archiver_upload_with_retries_latency") - ArchiverDeleteWithRetriesLatency = NewTimerDef("archiver_delete_with_retries_latency") - ArchiverUploadFailedAllRetriesCount = NewCounterDef("archiver_upload_failed_all_retries") - ArchiverUploadSuccessCount = NewCounterDef("archiver_upload_success") - ArchiverDeleteFailedAllRetriesCount = NewCounterDef("archiver_delete_failed_all_retries") - ArchiverDeleteSuccessCount = NewCounterDef("archiver_delete_success") - ArchiverHandleVisibilityFailedAllRetiresCount = NewCounterDef("archiver_handle_visibility_failed_all_retries") - ArchiverHandleVisibilitySuccessCount = NewCounterDef("archiver_handle_visibility_success") - ArchiverBacklogSizeGauge = NewGaugeDef("archiver_backlog_size") - ArchiverPumpTimeoutCount = NewCounterDef("archiver_pump_timeout") - ArchiverPumpSignalThresholdCount = NewCounterDef("archiver_pump_signal_threshold") - ArchiverPumpTimeoutWithoutSignalsCount = NewCounterDef("archiver_pump_timeout_without_signals") - ArchiverPumpSignalChannelClosedCount = NewCounterDef("archiver_pump_signal_channel_closed") - ArchiverWorkflowStartedCount = NewCounterDef("archiver_workflow_started") - ArchiverNumPumpedRequestsCount = NewCounterDef("archiver_num_pumped_requests") - ArchiverNumHandledRequestsCount = NewCounterDef("archiver_num_handled_requests") - ArchiverPumpedNotEqualHandledCount = NewCounterDef("archiver_pumped_not_equal_handled") - ArchiverHandleAllRequestsLatency = NewTimerDef("archiver_handle_all_requests_latency") - ArchiverWorkflowStoppingCount = NewCounterDef("archiver_workflow_stopping") - ScavengerValidationRequestsCount = NewCounterDef("scavenger_validation_requests") - ScavengerValidationFailuresCount = NewCounterDef("scavenger_validation_failures") - ScavengerValidationSkipsCount = NewCounterDef("scavenger_validation_skips") - AddSearchAttributesFailuresCount = NewCounterDef("add_search_attributes_failures") - DeleteNamespaceSuccessCount = NewCounterDef("delete_namespace_success") - RenameNamespaceSuccessCount = NewCounterDef("rename_namespace_success") - DeleteExecutionsSuccessCount = NewCounterDef("delete_executions_success") - DeleteNamespaceFailuresCount = NewCounterDef("delete_namespace_failures") - UpdateNamespaceFailuresCount = NewCounterDef("update_namespace_failures") - RenameNamespaceFailuresCount = NewCounterDef("rename_namespace_failures") - ReadNamespaceFailuresCount = NewCounterDef("read_namespace_failures") - ListExecutionsFailuresCount = NewCounterDef("list_executions_failures") - CountExecutionsFailuresCount = NewCounterDef("count_executions_failures") - DeleteExecutionFailuresCount = NewCounterDef("delete_execution_failures") - DeleteExecutionNotFoundCount = NewCounterDef("delete_execution_not_found") - RateLimiterFailuresCount = NewCounterDef("rate_limiter_failures") - BatcherProcessorSuccess = NewCounterDef("batcher_processor_requests") - BatcherProcessorFailures = NewCounterDef("batcher_processor_errors") - BatcherOperationFailures = NewCounterDef("batcher_operation_errors") - ElasticsearchBulkProcessorRequests = NewCounterDef("elasticsearch_bulk_processor_requests") - ElasticsearchBulkProcessorQueuedRequests = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_queued_requests") - ElasticsearchBulkProcessorFailures = NewCounterDef("elasticsearch_bulk_processor_errors") - ElasticsearchBulkProcessorCorruptedData = NewCounterDef("elasticsearch_bulk_processor_corrupted_data") - ElasticsearchBulkProcessorDuplicateRequest = NewCounterDef("elasticsearch_bulk_processor_duplicate_request") - ElasticsearchBulkProcessorRequestLatency = NewTimerDef("elasticsearch_bulk_processor_request_latency") - ElasticsearchBulkProcessorCommitLatency = NewTimerDef("elasticsearch_bulk_processor_commit_latency") - ElasticsearchBulkProcessorWaitAddLatency = NewTimerDef("elasticsearch_bulk_processor_wait_add_latency") - ElasticsearchBulkProcessorWaitStartLatency = NewTimerDef("elasticsearch_bulk_processor_wait_start_latency") - ElasticsearchBulkProcessorBulkSize = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_bulk_size") - ElasticsearchBulkProcessorBulkResquestTookLatency = NewTimerDef("elasticsearch_bulk_processor_bulk_request_took_latency") - ElasticsearchDocumentParseFailuresCount = NewCounterDef("elasticsearch_document_parse_failures_counter") - ElasticsearchDocumentGenerateFailuresCount = NewCounterDef("elasticsearch_document_generate_failures_counter") - ElasticsearchCustomOrderByClauseCount = NewCounterDef("elasticsearch_custom_order_by_clause_counter") - CatchUpReadyShardCountGauge = NewGaugeDef("catchup_ready_shard_count") - HandoverReadyShardCountGauge = NewGaugeDef("handover_ready_shard_count") - ReplicatorMessages = NewCounterDef("replicator_messages") - ReplicatorFailures = NewCounterDef("replicator_errors") - ReplicatorLatency = NewTimerDef("replicator_latency") - ReplicatorDLQFailures = NewCounterDef("replicator_dlq_enqueue_fails") - NamespaceReplicationEnqueueDLQCount = NewCounterDef("namespace_replication_dlq_enqueue_requests") - ParentClosePolicyProcessorSuccess = NewCounterDef("parent_close_policy_processor_requests") - ParentClosePolicyProcessorFailures = NewCounterDef("parent_close_policy_processor_errors") - ScheduleMissedCatchupWindow = NewCounterDef("schedule_missed_catchup_window") - ScheduleRateLimited = NewCounterDef("schedule_rate_limited") - ScheduleBufferOverruns = NewCounterDef("schedule_buffer_overruns") - ScheduleActionSuccess = NewCounterDef("schedule_action_success") - ScheduleActionErrors = NewCounterDef("schedule_action_errors") - ScheduleCancelWorkflowErrors = NewCounterDef("schedule_cancel_workflow_errors") - ScheduleTerminateWorkflowErrors = NewCounterDef("schedule_terminate_workflow_errors") + ExecutorTasksDoneCount = NewCounterDef("executor_done") + ExecutorTasksErrCount = NewCounterDef("executor_err") + ExecutorTasksDeferredCount = NewCounterDef("executor_deferred") + ExecutorTasksDroppedCount = NewCounterDef("executor_dropped") + StartedCount = NewCounterDef("started") + StoppedCount = NewCounterDef("stopped") + TaskProcessedCount = NewGaugeDef("task_processed") + TaskDeletedCount = NewGaugeDef("task_deleted") + TaskQueueProcessedCount = NewGaugeDef("taskqueue_processed") + TaskQueueDeletedCount = NewGaugeDef("taskqueue_deleted") + TaskQueueOutstandingCount = NewGaugeDef("taskqueue_outstanding") + HistoryArchiverArchiveNonRetryableErrorCount = NewCounterDef("history_archiver_archive_non_retryable_error") + HistoryArchiverArchiveTransientErrorCount = NewCounterDef("history_archiver_archive_transient_error") + HistoryArchiverArchiveSuccessCount = NewCounterDef("history_archiver_archive_success") + HistoryArchiverTotalUploadSize = NewBytesHistogramDef("history_archiver_total_upload_size") + HistoryArchiverHistorySize = NewBytesHistogramDef("history_archiver_history_size") + HistoryArchiverDuplicateArchivalsCount = NewCounterDef("history_archiver_duplicate_archivals") + HistoryArchiverBlobExistsCount = NewCounterDef("history_archiver_blob_exists") + HistoryArchiverBlobSize = NewBytesHistogramDef("history_archiver_blob_size") + HistoryWorkflowExecutionCacheLatency = NewTimerDef("history_workflow_execution_cache_latency") + VisibilityArchiverArchiveNonRetryableErrorCount = NewCounterDef("visibility_archiver_archive_non_retryable_error") + VisibilityArchiverArchiveTransientErrorCount = NewCounterDef("visibility_archiver_archive_transient_error") + VisibilityArchiveSuccessCount = NewCounterDef("visibility_archiver_archive_success") + HistoryScavengerSuccessCount = NewCounterDef("scavenger_success") + HistoryScavengerErrorCount = NewCounterDef("scavenger_errors") + HistoryScavengerSkipCount = NewCounterDef("scavenger_skips") + ExecutionsOutstandingCount = NewGaugeDef("executions_outstanding") + ArchiverNonRetryableErrorCount = NewCounterDef("archiver_non_retryable_error") + ArchiverStartedCount = NewCounterDef("archiver_started") + ArchiverStoppedCount = NewCounterDef("archiver_stopped") + ArchiverCoroutineStartedCount = NewCounterDef("archiver_coroutine_started") + ArchiverCoroutineStoppedCount = NewCounterDef("archiver_coroutine_stopped") + ArchiverHandleHistoryRequestLatency = NewTimerDef("archiver_handle_history_request_latency") + ArchiverHandleVisibilityRequestLatency = NewTimerDef("archiver_handle_visibility_request_latency") + ArchiverUploadWithRetriesLatency = NewTimerDef("archiver_upload_with_retries_latency") + ArchiverDeleteWithRetriesLatency = NewTimerDef("archiver_delete_with_retries_latency") + ArchiverUploadFailedAllRetriesCount = NewCounterDef("archiver_upload_failed_all_retries") + ArchiverUploadSuccessCount = NewCounterDef("archiver_upload_success") + ArchiverDeleteFailedAllRetriesCount = NewCounterDef("archiver_delete_failed_all_retries") + ArchiverDeleteSuccessCount = NewCounterDef("archiver_delete_success") + ArchiverHandleVisibilityFailedAllRetiresCount = NewCounterDef("archiver_handle_visibility_failed_all_retries") + ArchiverHandleVisibilitySuccessCount = NewCounterDef("archiver_handle_visibility_success") + ArchiverBacklogSizeGauge = NewGaugeDef("archiver_backlog_size") + ArchiverPumpTimeoutCount = NewCounterDef("archiver_pump_timeout") + ArchiverPumpSignalThresholdCount = NewCounterDef("archiver_pump_signal_threshold") + ArchiverPumpTimeoutWithoutSignalsCount = NewCounterDef("archiver_pump_timeout_without_signals") + ArchiverPumpSignalChannelClosedCount = NewCounterDef("archiver_pump_signal_channel_closed") + ArchiverWorkflowStartedCount = NewCounterDef("archiver_workflow_started") + ArchiverNumPumpedRequestsCount = NewCounterDef("archiver_num_pumped_requests") + ArchiverNumHandledRequestsCount = NewCounterDef("archiver_num_handled_requests") + ArchiverPumpedNotEqualHandledCount = NewCounterDef("archiver_pumped_not_equal_handled") + ArchiverHandleAllRequestsLatency = NewTimerDef("archiver_handle_all_requests_latency") + ArchiverWorkflowStoppingCount = NewCounterDef("archiver_workflow_stopping") + ScavengerValidationRequestsCount = NewCounterDef("scavenger_validation_requests") + ScavengerValidationFailuresCount = NewCounterDef("scavenger_validation_failures") + ScavengerValidationSkipsCount = NewCounterDef("scavenger_validation_skips") + AddSearchAttributesFailuresCount = NewCounterDef("add_search_attributes_failures") + DeleteNamespaceSuccessCount = NewCounterDef("delete_namespace_success") + RenameNamespaceSuccessCount = NewCounterDef("rename_namespace_success") + DeleteExecutionsSuccessCount = NewCounterDef("delete_executions_success") + DeleteNamespaceFailuresCount = NewCounterDef("delete_namespace_failures") + UpdateNamespaceFailuresCount = NewCounterDef("update_namespace_failures") + RenameNamespaceFailuresCount = NewCounterDef("rename_namespace_failures") + ReadNamespaceFailuresCount = NewCounterDef("read_namespace_failures") + ListExecutionsFailuresCount = NewCounterDef("list_executions_failures") + CountExecutionsFailuresCount = NewCounterDef("count_executions_failures") + DeleteExecutionFailuresCount = NewCounterDef("delete_execution_failures") + DeleteExecutionNotFoundCount = NewCounterDef("delete_execution_not_found") + RateLimiterFailuresCount = NewCounterDef("rate_limiter_failures") + BatcherProcessorSuccess = NewCounterDef( + "batcher_processor_requests", + WithDescription("The number of individual workflow execution tasks successfully processed by the batch request processor"), + ) + BatcherProcessorFailures = NewCounterDef("batcher_processor_errors") + BatcherOperationFailures = NewCounterDef("batcher_operation_errors") + ElasticsearchBulkProcessorRequests = NewCounterDef("elasticsearch_bulk_processor_requests") + ElasticsearchBulkProcessorQueuedRequests = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_queued_requests") + ElasticsearchBulkProcessorFailures = NewCounterDef("elasticsearch_bulk_processor_errors") + ElasticsearchBulkProcessorCorruptedData = NewCounterDef("elasticsearch_bulk_processor_corrupted_data") + ElasticsearchBulkProcessorDuplicateRequest = NewCounterDef("elasticsearch_bulk_processor_duplicate_request") + ElasticsearchBulkProcessorRequestLatency = NewTimerDef("elasticsearch_bulk_processor_request_latency") + ElasticsearchBulkProcessorCommitLatency = NewTimerDef("elasticsearch_bulk_processor_commit_latency") + ElasticsearchBulkProcessorWaitAddLatency = NewTimerDef("elasticsearch_bulk_processor_wait_add_latency") + ElasticsearchBulkProcessorWaitStartLatency = NewTimerDef("elasticsearch_bulk_processor_wait_start_latency") + ElasticsearchBulkProcessorBulkSize = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_bulk_size") + ElasticsearchBulkProcessorBulkResquestTookLatency = NewTimerDef("elasticsearch_bulk_processor_bulk_request_took_latency") + ElasticsearchDocumentParseFailuresCount = NewCounterDef("elasticsearch_document_parse_failures_counter") + ElasticsearchDocumentGenerateFailuresCount = NewCounterDef("elasticsearch_document_generate_failures_counter") + ElasticsearchCustomOrderByClauseCount = NewCounterDef("elasticsearch_custom_order_by_clause_counter") + CatchUpReadyShardCountGauge = NewGaugeDef("catchup_ready_shard_count") + HandoverReadyShardCountGauge = NewGaugeDef("handover_ready_shard_count") + ReplicatorMessages = NewCounterDef("replicator_messages") + ReplicatorFailures = NewCounterDef("replicator_errors") + ReplicatorLatency = NewTimerDef("replicator_latency") + ReplicatorDLQFailures = NewCounterDef("replicator_dlq_enqueue_fails") + NamespaceReplicationEnqueueDLQCount = NewCounterDef("namespace_replication_dlq_enqueue_requests") + ParentClosePolicyProcessorSuccess = NewCounterDef("parent_close_policy_processor_requests") + ParentClosePolicyProcessorFailures = NewCounterDef("parent_close_policy_processor_errors") + ScheduleMissedCatchupWindow = NewCounterDef( + "schedule_missed_catchup_window", + WithDescription("The number of times a schedule missed an action due to the configured catchup window"), + ) + ScheduleRateLimited = NewCounterDef( + "schedule_rate_limited", + WithDescription("The number of times a schedule action was delayed by more than 1s due to rate limiting"), + ) + ScheduleBufferOverruns = NewCounterDef( + "schedule_buffer_overruns", + WithDescription("The number of schedule actions that were dropped due to the action buffer being full"), + ) + ScheduleActionSuccess = NewCounterDef( + "schedule_action_success", + WithDescription("The number of schedule actions that were successfully taken by a schedule"), + ) + ScheduleActionErrors = NewCounterDef( + "schedule_action_errors", + WithDescription("The number of schedule actions that failed to start"), + ) + ScheduleCancelWorkflowErrors = NewCounterDef( + "schedule_cancel_workflow_errors", + WithDescription("The number of times a schedule got an error trying to cancel a previous run"), + ) + ScheduleTerminateWorkflowErrors = NewCounterDef( + "schedule_terminate_workflow_errors", + WithDescription("The number of times a schedule got an error trying to terminate a previous run"), + ) // Force replication EncounterZombieWorkflowCount = NewCounterDef("encounter_zombie_workflow_count") @@ -1674,25 +1640,24 @@ NamespaceReplicationDLQMaxLevelGauge = NewGaugeDef("namespace_dlq_max_level") // Persistence - PersistenceRequests = NewCounterDef("persistence_requests") - PersistenceFailures = NewCounterDef("persistence_errors") - PersistenceErrorWithType = NewCounterDef("persistence_error_with_type") - PersistenceLatency = NewTimerDef("persistence_latency") - PersistenceShardRPS = NewDimensionlessHistogramDef("persistence_shard_rps") - PersistenceErrShardExistsCounter = NewCounterDef("persistence_errors_shard_exists") - PersistenceErrShardOwnershipLostCounter = NewCounterDef("persistence_errors_shard_ownership_lost") - PersistenceErrConditionFailedCounter = NewCounterDef("persistence_errors_condition_failed") - PersistenceErrCurrentWorkflowConditionFailedCounter = NewCounterDef("persistence_errors_current_workflow_condition_failed") - PersistenceErrWorkflowConditionFailedCounter = NewCounterDef("persistence_errors_workflow_condition_failed") - PersistenceErrTimeoutCounter = NewCounterDef("persistence_errors_timeout") - PersistenceErrBusyCounter = NewCounterDef("persistence_errors_busy") - PersistenceErrEntityNotExistsCounter = NewCounterDef("persistence_errors_entity_not_exists") - PersistenceErrNamespaceAlreadyExistsCounter = NewCounterDef("persistence_errors_namespace_already_exists") - PersistenceErrBadRequestCounter = NewCounterDef("persistence_errors_bad_request") - PersistenceErrResourceExhaustedCounter = NewCounterDef("persistence_errors_resource_exhausted") - VisibilityPersistenceRequests = NewCounterDef("visibility_persistence_requests") - VisibilityPersistenceErrorWithType = NewCounterDef("visibility_persistence_error_with_type") - VisibilityPersistenceFailures = NewCounterDef("visibility_persistence_errors") - VisibilityPersistenceResourceExhausted = NewCounterDef("visibility_persistence_resource_exhausted") - VisibilityPersistenceLatency = NewTimerDef("visibility_persistence_latency") + PersistenceRequests = NewCounterDef( + "persistence_requests", + WithDescription("Persistence requests, keyed by `operation`"), + ) + PersistenceFailures = NewCounterDef("persistence_errors") + PersistenceErrorWithType = NewCounterDef( + "persistence_error_with_type", + WithDescription("Persistence errors, keyed by `error_type`"), + ) + PersistenceLatency = NewTimerDef( + "persistence_latency", + WithDescription("Persistence latency, keyed by `operation`"), + ) + PersistenceShardRPS = NewDimensionlessHistogramDef("persistence_shard_rps") + PersistenceErrResourceExhaustedCounter = NewCounterDef("persistence_errors_resource_exhausted") + VisibilityPersistenceRequests = NewCounterDef("visibility_persistence_requests") + VisibilityPersistenceErrorWithType = NewCounterDef("visibility_persistence_error_with_type") + VisibilityPersistenceFailures = NewCounterDef("visibility_persistence_errors") + VisibilityPersistenceResourceExhausted = NewCounterDef("visibility_persistence_resource_exhausted") + VisibilityPersistenceLatency = NewTimerDef("visibility_persistence_latency") ) diff -Nru temporal-1.21.5-1/src/common/metrics/metricstest/capture_handler.go temporal-1.22.5/src/common/metrics/metricstest/capture_handler.go --- temporal-1.21.5-1/src/common/metrics/metricstest/capture_handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/metricstest/capture_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,146 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metricstest + +import ( + "sync" + "time" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" +) + +// CapturedRecording is a single recording. Fields here should not be mutated. +type CapturedRecording struct { + Value any + Tags map[string]string + Unit metrics.MetricUnit +} + +// Capture is a specific capture instance. +type Capture struct { + recordings map[string][]*CapturedRecording + recordingsLock sync.RWMutex +} + +// Snapshot returns a copy of all metrics recorded, keyed by name. +func (c *Capture) Snapshot() map[string][]*CapturedRecording { + c.recordingsLock.RLock() + defer c.recordingsLock.RUnlock() + ret := make(map[string][]*CapturedRecording, len(c.recordings)) + for k, v := range c.recordings { + recs := make([]*CapturedRecording, len(v)) + copy(recs, v) + ret[k] = recs + } + return ret +} + +func (c *Capture) record(name string, r *CapturedRecording) { + c.recordingsLock.Lock() + defer c.recordingsLock.Unlock() + c.recordings[name] = append(c.recordings[name], r) +} + +// CaptureHandler is a [metrics.Handler] that captures each metric recording. +type CaptureHandler struct { + tags []metrics.Tag + captures map[*Capture]struct{} + capturesLock *sync.RWMutex +} + +var _ metrics.Handler = (*CaptureHandler)(nil) + +// NewCaptureHandler creates a new [metrics.Handler] that captures. +func NewCaptureHandler() *CaptureHandler { + return &CaptureHandler{ + captures: map[*Capture]struct{}{}, + capturesLock: &sync.RWMutex{}, + } +} + +// StartCapture returns a started capture. StopCapture should be called on +// complete. +func (c *CaptureHandler) StartCapture() *Capture { + capture := &Capture{recordings: map[string][]*CapturedRecording{}} + c.capturesLock.Lock() + defer c.capturesLock.Unlock() + c.captures[capture] = struct{}{} + return capture +} + +// StopCapture stops capturing metrics for the given capture instance. +func (c *CaptureHandler) StopCapture(capture *Capture) { + c.capturesLock.Lock() + defer c.capturesLock.Unlock() + delete(c.captures, capture) +} + +// WithTags implements [metrics.Handler.WithTags]. +func (c *CaptureHandler) WithTags(tags ...metrics.Tag) metrics.Handler { + return &CaptureHandler{ + tags: append(append(make([]metrics.Tag, 0, len(c.tags)+len(tags)), c.tags...), tags...), + captures: c.captures, + capturesLock: c.capturesLock, + } +} + +func (c *CaptureHandler) record(name string, v any, unit metrics.MetricUnit, tags ...metrics.Tag) { + rec := &CapturedRecording{Value: v, Tags: make(map[string]string, len(c.tags)+len(tags)), Unit: unit} + for _, tag := range c.tags { + rec.Tags[tag.Key()] = tag.Value() + } + for _, tag := range tags { + rec.Tags[tag.Key()] = tag.Value() + } + c.capturesLock.RLock() + defer c.capturesLock.RUnlock() + for c := range c.captures { + c.record(name, rec) + } +} + +// Counter implements [metrics.Handler.Counter]. +func (c *CaptureHandler) Counter(name string) metrics.CounterIface { + return metrics.CounterFunc(func(v int64, tags ...metrics.Tag) { c.record(name, v, "", tags...) }) +} + +// Gauge implements [metrics.Handler.Gauge]. +func (c *CaptureHandler) Gauge(name string) metrics.GaugeIface { + return metrics.GaugeFunc(func(v float64, tags ...metrics.Tag) { c.record(name, v, "", tags...) }) +} + +// Timer implements [metrics.Handler.Timer]. +func (c *CaptureHandler) Timer(name string) metrics.TimerIface { + return metrics.TimerFunc(func(v time.Duration, tags ...metrics.Tag) { c.record(name, v, "", tags...) }) +} + +// Histogram implements [metrics.Handler.Histogram]. +func (c *CaptureHandler) Histogram(name string, unit metrics.MetricUnit) metrics.HistogramIface { + return metrics.HistogramFunc(func(v int64, tags ...metrics.Tag) { c.record(name, v, unit, tags...) }) +} + +// Stop implements [metrics.Handler.Stop]. +func (*CaptureHandler) Stop(log.Logger) {} diff -Nru temporal-1.21.5-1/src/common/metrics/metricstest/metricstest.go temporal-1.22.5/src/common/metrics/metricstest/metricstest.go --- temporal-1.21.5-1/src/common/metrics/metricstest/metricstest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/metricstest/metricstest.go 2024-02-23 09:45:43.000000000 +0000 @@ -109,7 +109,10 @@ ) meter := provider.Meter("temporal") - otelHandler := metrics.NewOtelMetricsHandler(logger, &otelProvider{meter: meter}, clientConfig) + otelHandler, err := metrics.NewOtelMetricsHandler(logger, &otelProvider{meter: meter}, clientConfig) + if err != nil { + return nil, err + } metricsHandler := &Handler{ Handler: otelHandler, reg: registry, diff -Nru temporal-1.21.5-1/src/common/metrics/option.go temporal-1.22.5/src/common/metrics/option.go --- temporal-1.21.5-1/src/common/metrics/option.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/option.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,45 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +// Option is used to configure a metric definition. Note that options are currently only supported when using the +// Prometheus reporter with the OpenTelemetry framework. +type Option interface { + apply(m *metricDefinition) +} + +// WithDescription sets the description, or "help text", of a metric. See [ServiceRequests] for an example. +type WithDescription string + +func (h WithDescription) apply(m *metricDefinition) { + m.description = string(h) +} + +// WithUnit sets the unit of a metric. See NewBytesHistogramDef for an example. +type WithUnit MetricUnit + +func (h WithUnit) apply(m *metricDefinition) { + m.unit = MetricUnit(h) +} diff -Nru temporal-1.21.5-1/src/common/metrics/otel_metrics_handler.go temporal-1.22.5/src/common/metrics/otel_metrics_handler.go --- temporal-1.21.5-1/src/common/metrics/otel_metrics_handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/otel_metrics_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,6 +26,8 @@ import ( "context" + "fmt" + "sync" "time" "go.opentelemetry.io/otel/attribute" @@ -36,91 +38,165 @@ ) // otelMetricsHandler is an adapter around an OpenTelemetry [metric.Meter] that implements the [Handler] interface. -type otelMetricsHandler struct { - l log.Logger - tags []Tag - provider OpenTelemetryProvider - excludeTags excludeTags -} +type ( + otelMetricsHandler struct { + l log.Logger + set attribute.Set + provider OpenTelemetryProvider + excludeTags map[string]map[string]struct{} + catalog catalog + gauges *sync.Map // string -> *gaugeAdapter. note: shared between multiple otelMetricsHandlers + } + + // This is to work around the lack of synchronous gauge: + // https://github.com/open-telemetry/opentelemetry-specification/issues/2318 + // Basically, otel gauges only support getting a value with a callback, they can't store a + // value for us. So we have to store it ourselves and supply it to a callback. + gaugeAdapter struct { + lock sync.Mutex + values map[attribute.Distinct]gaugeValue + } + gaugeValue struct { + value float64 + // In practice, we can use attribute.Set itself as the map key in gaugeAdapter and it + // works, but according to the API we should use attribute.Distinct. But we can't get a + // Set back from a Distinct, so we have to store the set here also. + set attribute.Set + } + gaugeAdapterGauge struct { + omp *otelMetricsHandler + adapter *gaugeAdapter + } +) var _ Handler = (*otelMetricsHandler)(nil) -func NewOtelMetricsHandler(l log.Logger, o OpenTelemetryProvider, cfg ClientConfig) *otelMetricsHandler { +// NewOtelMetricsHandler returns a new Handler that uses the provided OpenTelemetry [metric.Meter] to record metrics. +// This OTel handler supports metric descriptions for metrics registered with the New*Def functions. However, those +// functions must be called before this constructor. Otherwise, the descriptions will be empty. This is because the +// OTel metric descriptions are generated from the globalRegistry. You may also record metrics that are not registered +// via the New*Def functions. In that case, the metric description will be the OTel default (the metric name itself). +func NewOtelMetricsHandler( + l log.Logger, + o OpenTelemetryProvider, + cfg ClientConfig, +) (*otelMetricsHandler, error) { + c, err := globalRegistry.buildCatalog() + if err != nil { + return nil, fmt.Errorf("failed to build metrics catalog: %w", err) + } return &otelMetricsHandler{ l: l, + set: makeInitialSet(cfg.Tags), provider: o, excludeTags: configExcludeTags(cfg), - } + catalog: c, + gauges: new(sync.Map), + }, nil } // WithTags creates a new Handler with the provided Tag list. // Tags are merged with the existing tags. func (omp *otelMetricsHandler) WithTags(tags ...Tag) Handler { - return &otelMetricsHandler{ - provider: omp.provider, - excludeTags: omp.excludeTags, - tags: append(omp.tags, tags...), - } + newHandler := *omp + newHandler.set = newHandler.makeSet(tags) + return &newHandler } // Counter obtains a counter for the given name. func (omp *otelMetricsHandler) Counter(counter string) CounterIface { - c, err := omp.provider.GetMeter().Int64Counter(counter) + opts := addOptions(omp, counterOptions{}, counter) + c, err := omp.provider.GetMeter().Int64Counter(counter, opts...) if err != nil { omp.l.Error("error getting metric", tag.NewStringTag("MetricName", counter), tag.Error(err)) return CounterFunc(func(i int64, t ...Tag) {}) } return CounterFunc(func(i int64, t ...Tag) { - option := metric.WithAttributes(tagsToAttributes(omp.tags, t, omp.excludeTags)...) + option := metric.WithAttributeSet(omp.makeSet(t)) c.Add(context.Background(), i, option) }) } +func (omp *otelMetricsHandler) getGaugeAdapter(gauge string) (*gaugeAdapter, error) { + if v, ok := omp.gauges.Load(gauge); ok { + return v.(*gaugeAdapter), nil + } + adapter := &gaugeAdapter{ + values: make(map[attribute.Distinct]gaugeValue), + } + if v, wasLoaded := omp.gauges.LoadOrStore(gauge, adapter); wasLoaded { + return v.(*gaugeAdapter), nil + } + + opts := addOptions(omp, gaugeOptions{ + metric.WithFloat64Callback(adapter.callback), + }, gauge) + // Register the gauge with otel. It will call our callback when it wants to read the values. + _, err := omp.provider.GetMeter().Float64ObservableGauge(gauge, opts...) + if err != nil { + omp.gauges.Delete(gauge) + omp.l.Error("error getting metric", tag.NewStringTag("MetricName", gauge), tag.Error(err)) + return nil, err + } + + return adapter, nil +} + // Gauge obtains a gauge for the given name. func (omp *otelMetricsHandler) Gauge(gauge string) GaugeIface { - c, err := omp.provider.GetMeter().Float64ObservableGauge(gauge) + adapter, err := omp.getGaugeAdapter(gauge) if err != nil { - omp.l.Error("error getting metric", tag.NewStringTag("MetricName", gauge), tag.Error(err)) return GaugeFunc(func(i float64, t ...Tag) {}) } + return &gaugeAdapterGauge{ + omp: omp, + adapter: adapter, + } +} - return GaugeFunc(func(i float64, t ...Tag) { - _, err = omp.provider.GetMeter().RegisterCallback(func(ctx context.Context, o metric.Observer) error { - option := metric.WithAttributes(tagsToAttributes(omp.tags, t, omp.excludeTags)...) - o.ObserveFloat64(c, i, option) - return nil - }, c) - if err != nil { - omp.l.Error("error setting callback metric update", tag.NewStringTag("MetricName", gauge), tag.Error(err)) - } - }) +func (a *gaugeAdapter) callback(ctx context.Context, o metric.Float64Observer) error { + a.lock.Lock() + defer a.lock.Unlock() + for _, v := range a.values { + o.Observe(v.value, metric.WithAttributeSet(v.set)) + } + return nil +} + +func (g *gaugeAdapterGauge) Record(v float64, tags ...Tag) { + set := g.omp.makeSet(tags) + g.adapter.lock.Lock() + defer g.adapter.lock.Unlock() + g.adapter.values[set.Equivalent()] = gaugeValue{value: v, set: set} } // Timer obtains a timer for the given name. func (omp *otelMetricsHandler) Timer(timer string) TimerIface { - c, err := omp.provider.GetMeter().Int64Histogram(timer, metric.WithUnit(Milliseconds)) + opts := addOptions(omp, histogramOptions{metric.WithUnit(Milliseconds)}, timer) + c, err := omp.provider.GetMeter().Int64Histogram(timer, opts...) if err != nil { omp.l.Error("error getting metric", tag.NewStringTag("MetricName", timer), tag.Error(err)) return TimerFunc(func(i time.Duration, t ...Tag) {}) } return TimerFunc(func(i time.Duration, t ...Tag) { - option := metric.WithAttributes(tagsToAttributes(omp.tags, t, omp.excludeTags)...) + option := metric.WithAttributeSet(omp.makeSet(t)) c.Record(context.Background(), i.Milliseconds(), option) }) } // Histogram obtains a histogram for the given name. func (omp *otelMetricsHandler) Histogram(histogram string, unit MetricUnit) HistogramIface { - c, err := omp.provider.GetMeter().Int64Histogram(histogram, metric.WithUnit(string(unit))) + opts := addOptions(omp, histogramOptions{metric.WithUnit(string(unit))}, histogram) + c, err := omp.provider.GetMeter().Int64Histogram(histogram, opts...) if err != nil { omp.l.Error("error getting metric", tag.NewStringTag("MetricName", histogram), tag.Error(err)) return HistogramFunc(func(i int64, t ...Tag) {}) } return HistogramFunc(func(i int64, t ...Tag) { - option := metric.WithAttributes(tagsToAttributes(omp.tags, t, omp.excludeTags)...) + option := metric.WithAttributeSet(omp.makeSet(t)) c.Record(context.Background(), i, option) }) } @@ -129,27 +205,38 @@ omp.provider.Stop(l) } -// tagsToAttributes helper to merge registred tags and additional tags converting to attribute.KeyValue struct -func tagsToAttributes(t1 []Tag, t2 []Tag, e excludeTags) []attribute.KeyValue { - var attrs []attribute.KeyValue - - convert := func(tag Tag) attribute.KeyValue { - if vals, ok := e[tag.Key()]; ok { - if _, ok := vals[tag.Value()]; !ok { - return attribute.String(tag.Key(), tagExcludedValue) - } +// makeSet returns an otel attribute.Set with the given tags merged with the +// otelMetricsHandler's tags. +func (omp *otelMetricsHandler) makeSet(tags []Tag) attribute.Set { + if len(tags) == 0 { + return omp.set + } + attrs := make([]attribute.KeyValue, 0, omp.set.Len()+len(tags)) + for i := omp.set.Iter(); i.Next(); { + attrs = append(attrs, i.Attribute()) + } + for _, t := range tags { + attrs = append(attrs, omp.convertTag(t)) + } + return attribute.NewSet(attrs...) +} + +func (omp *otelMetricsHandler) convertTag(tag Tag) attribute.KeyValue { + if vals, ok := omp.excludeTags[tag.Key()]; ok { + if _, ok := vals[tag.Value()]; !ok { + return attribute.String(tag.Key(), tagExcludedValue) } - - return attribute.String(tag.Key(), tag.Value()) } + return attribute.String(tag.Key(), tag.Value()) +} - for i := range t1 { - attrs = append(attrs, convert(t1[i])) +func makeInitialSet(tags map[string]string) attribute.Set { + if len(tags) == 0 { + return *attribute.EmptySet() } - - for i := range t2 { - attrs = append(attrs, convert(t2[i])) + var attrs []attribute.KeyValue + for k, v := range tags { + attrs = append(attrs, attribute.String(k, v)) } - - return attrs + return attribute.NewSet(attrs...) } diff -Nru temporal-1.21.5-1/src/common/metrics/otel_metrics_handler_test.go temporal-1.22.5/src/common/metrics/otel_metrics_handler_test.go --- temporal-1.21.5-1/src/common/metrics/otel_metrics_handler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/otel_metrics_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,6 +34,7 @@ "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" sdkmetrics "go.opentelemetry.io/otel/sdk/metric" @@ -101,11 +102,17 @@ ), ), ) - p := NewOtelMetricsHandler(log.NewTestLogger(), &testProvider{meter: provider.Meter("test")}, defaultConfig) + + p, err := NewOtelMetricsHandler( + log.NewTestLogger(), + &testProvider{meter: provider.Meter("test")}, + defaultConfig, + ) + require.NoError(t, err) recordMetrics(p) var got metricdata.ResourceMetrics - err := rdr.Collect(ctx, &got) + err = rdr.Collect(ctx, &got) assert.Nil(t, err) want := []metricdata.Metrics{ @@ -257,7 +264,8 @@ meter := erroneousMeter{err: testErr} provider := &testProvider{meter: meter} cfg := ClientConfig{} - handler := NewOtelMetricsHandler(logger, provider, cfg) + handler, err := NewOtelMetricsHandler(logger, provider, cfg) + require.NoError(t, err) msg := "error getting metric" errTag := tag.Error(testErr) diff -Nru temporal-1.21.5-1/src/common/metrics/otel_options.go temporal-1.22.5/src/common/metrics/otel_options.go --- temporal-1.21.5-1/src/common/metrics/otel_options.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/otel_options.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +import ( + "go.opentelemetry.io/otel/metric" +) + +type ( + // optionSet represents a slice of metric options. We need it to be able to add options of the + // [metric.InstrumentOption] type to slices which may be of any other type implemented by metric.InstrumentOption. + // Normally, you could do something like `T metric.InstrumentOption` here, but the type dependency here is reversed. + // We need a generic type T that is implemented *by* metric.InstrumentOption, not the other way around. + // This is the only solution which avoids duplicating all the logic of the addOptions function without relying on + // reflection, an error-prone type assertion, or a type switch with a runtime error for unhandled cases. + optionSet[T any] interface { + addOption(option metric.InstrumentOption) T + } + counterOptions []metric.Int64CounterOption + gaugeOptions []metric.Float64ObservableGaugeOption + histogramOptions []metric.Int64HistogramOption +) + +func addOptions[T optionSet[T]](omp *otelMetricsHandler, opts T, metricName string) T { + metricDef, ok := omp.catalog.getMetric(metricName) + if !ok { + return opts + } + + if description := metricDef.description; description != "" { + opts = opts.addOption(metric.WithDescription(description)) + } + + if unit := metricDef.unit; unit != "" { + opts = opts.addOption(metric.WithUnit(string(unit))) + } + + return opts +} + +func (opts counterOptions) addOption(option metric.InstrumentOption) counterOptions { + return append(opts, option) +} + +func (opts gaugeOptions) addOption(option metric.InstrumentOption) gaugeOptions { + return append(opts, option) +} + +func (opts histogramOptions) addOption(option metric.InstrumentOption) histogramOptions { + return append(opts, option) +} diff -Nru temporal-1.21.5-1/src/common/metrics/otel_options_test.go temporal-1.22.5/src/common/metrics/otel_options_test.go --- temporal-1.21.5-1/src/common/metrics/otel_options_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/otel_options_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,106 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/metric" +) + +type testCase struct { + name string + catalog map[string]metricDefinition + expectedOpts []metric.InstrumentOption +} + +func TestAddOptions(t *testing.T) { + t.Parallel() + + metricName := "foo" + inputOpts := []metric.InstrumentOption{ + metric.WithDescription("foo description"), + metric.WithUnit(Milliseconds), + } + for _, c := range []testCase{ + { + name: "missing metric", + catalog: map[string]metricDefinition{}, + expectedOpts: inputOpts, + }, + { + name: "empty metric definition", + catalog: map[string]metricDefinition{ + metricName: {}, + }, + expectedOpts: inputOpts, + }, + { + name: "opts overwritten", + catalog: map[string]metricDefinition{ + metricName: { + description: "bar description", + unit: Bytes, + }, + }, + expectedOpts: []metric.InstrumentOption{ + metric.WithDescription("foo description"), + metric.WithUnit(Milliseconds), + metric.WithDescription("bar description"), + metric.WithUnit(Bytes), + }, + }, + } { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + handler := &otelMetricsHandler{catalog: c.catalog} + var ( + counter counterOptions + gauge gaugeOptions + hist histogramOptions + ) + for _, opt := range inputOpts { + counter = append(counter, opt.(metric.Int64CounterOption)) + gauge = append(gauge, opt.(metric.Float64ObservableGaugeOption)) + hist = append(hist, opt.(metric.Int64HistogramOption)) + } + counter = addOptions(handler, counter, metricName) + gauge = addOptions(handler, gauge, metricName) + hist = addOptions(handler, hist, metricName) + require.Len(t, counter, len(c.expectedOpts)) + require.Len(t, gauge, len(c.expectedOpts)) + require.Len(t, hist, len(c.expectedOpts)) + for i, opt := range c.expectedOpts { + assert.Equal(t, opt, counter[i]) + assert.Equal(t, opt, gauge[i]) + assert.Equal(t, opt, hist[i]) + } + }) + } +} diff -Nru temporal-1.21.5-1/src/common/metrics/panic.go temporal-1.22.5/src/common/metrics/panic.go --- temporal-1.21.5-1/src/common/metrics/panic.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/panic.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,56 @@ +// The MIT License + +// +// Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +import ( + "fmt" + "runtime/debug" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +// CapturePanic is used to capture panic, it will emit the service panic metric, log the panic and also return the error through pointer. +// If the panic value is not error then a default error is returned +// We have to use pointer is because in golang: "recover return nil if was not called directly by a deferred function." +// And we have to set the returned error otherwise our handler will return nil as error which is incorrect +func CapturePanic(logger log.Logger, metricHandler Handler, retError *error) { + //revive:disable-next-line:defer + if panicObj := recover(); panicObj != nil { + err, ok := panicObj.(error) + if !ok { + err = serviceerror.NewInternal(fmt.Sprintf("panic: %v", panicObj)) + } + + st := string(debug.Stack()) + + logger.Error("Panic is captured", tag.SysStackTrace(st), tag.Error(err)) + + metricHandler.Counter(ServicePanic.GetMetricName()).Record(1) + *retError = serviceerror.NewInternal(err.Error()) + } +} diff -Nru temporal-1.21.5-1/src/common/metrics/registry.go temporal-1.22.5/src/common/metrics/registry.go --- temporal-1.21.5-1/src/common/metrics/registry.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/registry.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,111 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +import ( + "errors" + "fmt" + "sync" +) + +type ( + // registry tracks a list of metricDefinition objects added with register and then builds a catalog + // of them using the buildCatalog method. See globalRegistry for more. + registry struct { + sync.Mutex + definitions []metricDefinition + } + // catalog is a map of metric name to definition. It should not be modified after it is built. + catalog map[string]metricDefinition +) + +var ( + // globalRegistry tracks metrics defined via the New*Def methods. We use a global variable here so that clients may + // continue to refer to package-level metrics like metrics.ServiceRequests, while still allowing us to iterate over + // all metrics defined in the package to register them with the metrics system. The sequence through which metrics + // are registered, sampled and scraped is as follows: + // + // 1. When the metrics package is initialized, statements calling New*Def are executed to define metrics, + // which adds them metric to the global registry. + // 2. Before a Handler object is constructed, one this package's fx provider functions will call registry.buildCatalog to + // buildCatalog the catalog for these metrics. + // 3. The constructed catalog is passed to the Handler so that it knows the metadata for all defined metrics. + // 4. Clients call methods on the Handler to obtain metric objects like Handler.Counter and Handler.Timer. + // 5. Those methods retrieve the metadata from the catalog and use it to construct the metric object using a + // third-party metrics library, e.g. OpenTelemetry. This is where most of the work happens. + // 6. Clients record a metric using that metrics object, e.g. by calling CounterFunc, and the sample is recorded. + // 7. At some point, the /metrics endpoint is scraped, and the Prometheus handler we register will iterate over all + // the aggregated samples and metrics and write them to the response. The metric metadata we passed to the + // third-party metrics library in step 5 is used here and rendered in the response as comments like: + // # HELP . + globalRegistry registry + // errMetricAlreadyExists is returned by registry.buildCatalog when it finds two metrics with the same name. + errMetricAlreadyExists = errors.New("metric already exists") +) + +// register adds a metric definition to the list of pending metric definitions. This method is thread-safe. +func (c *registry) register(name string, opts ...Option) metricDefinition { + c.Lock() + defer c.Unlock() + + d := metricDefinition{ + name: name, + description: "", + unit: "", + } + for _, opt := range opts { + opt.apply(&d) + } + + c.definitions = append(c.definitions, d) + + return d +} + +// buildCatalog builds a catalog from the list of pending metric definitions. It is safe to call this method multiple +// times. This method is thread-safe. +func (c *registry) buildCatalog() (catalog, error) { + c.Lock() + defer c.Unlock() + + r := make(catalog, len(c.definitions)) + for _, d := range c.definitions { + if original, ok := r[d.name]; ok { + return nil, fmt.Errorf( + "%w: metric %q already defined with %+v. Cannot redefine with %+v", + errMetricAlreadyExists, d.name, original, d, + ) + } + + r[d.name] = d + } + + return r, nil +} + +func (c catalog) getMetric(name string) (metricDefinition, bool) { + def, ok := c[name] + return def, ok +} diff -Nru temporal-1.21.5-1/src/common/metrics/registry_test.go temporal-1.22.5/src/common/metrics/registry_test.go --- temporal-1.21.5-1/src/common/metrics/registry_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/registry_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,56 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegistryBuildCatalog_Ok(t *testing.T) { + t.Parallel() + + r := registry{} + r.register("foo", WithDescription("foo description")) + r.register("bar", WithDescription("bar description")) + c, err := r.buildCatalog() + require.Nil(t, err) + require.Equal(t, 2, len(c)) + require.Equal(t, "foo description", c["foo"].description) + require.Equal(t, "bar description", c["bar"].description) +} + +func TestRegistryBuildCatalog_ErrMetricAlreadyExists(t *testing.T) { + t.Parallel() + + b := registry{} + b.register("foo", WithDescription("foo description")) + b.register("foo", WithDescription("bar description")) + _, err := b.buildCatalog() + assert.ErrorIs(t, err, errMetricAlreadyExists) + assert.ErrorContains(t, err, "foo") +} diff -Nru temporal-1.21.5-1/src/common/metrics/tags.go temporal-1.22.5/src/common/metrics/tags.go --- temporal-1.21.5-1/src/common/metrics/tags.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/tags.go 2024-02-23 09:45:43.000000000 +0000 @@ -250,6 +250,11 @@ return &tagImpl{key: visibilityTypeTagName, value: value} } +// VersionedTag represents whether a loaded task queue manager represents a specific version set. +func VersionedTag(versioned bool) Tag { + return &tagImpl{key: versionedTagName, value: strconv.FormatBool(versioned)} +} + func ServiceErrorTypeTag(err error) Tag { return &tagImpl{key: ErrorTypeTagName, value: strings.TrimPrefix(fmt.Sprintf(getType, err), errorPrefix)} } diff -Nru temporal-1.21.5-1/src/common/metrics/tally/statsd/reporter.go temporal-1.22.5/src/common/metrics/tally/statsd/reporter.go --- temporal-1.21.5-1/src/common/metrics/tally/statsd/reporter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/metrics/tally/statsd/reporter.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,7 +29,7 @@ "strings" "time" - "github.com/cactus/go-statsd-client/statsd" + "github.com/cactus/go-statsd-client/v5/statsd" "github.com/uber-go/tally/v4" tallystatsdreporter "github.com/uber-go/tally/v4/statsd" ) diff -Nru temporal-1.21.5-1/src/common/namespace/archivalConfigStateMachine.go temporal-1.22.5/src/common/namespace/archivalConfigStateMachine.go --- temporal-1.21.5-1/src/common/namespace/archivalConfigStateMachine.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/archivalConfigStateMachine.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,242 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" -) - -// namespaceArchivalConfigStateMachine is only used by namespaceHandler. -// It is simply meant to simplify the logic around archival namespace state changes. -// Logically this class can be thought of as part of namespaceHandler. - -type ( - // ArchivalConfigState represents the state of archival config - // the only invalid state is {URI="", state=enabled} - // once URI is set it is immutable - ArchivalConfigState struct { - State enumspb.ArchivalState - URI string - } - - // ArchivalConfigEvent represents a change request to archival config state - // the only restriction placed on events is that defaultURI is not empty - // state can be nil, enabled, or disabled (nil indicates no update by user is being attempted) - ArchivalConfigEvent struct { - DefaultURI string - URI string - State enumspb.ArchivalState - } -) - -// the following errors represents impossible code states that should never occur -var ( - errInvalidState = serviceerror.NewInvalidArgument("Encountered illegal state: archival is enabled but URI is not set (should be impossible)") - errInvalidEvent = serviceerror.NewInvalidArgument("Encountered illegal event: default URI is not set (should be impossible)") - errCannotHandleStateChange = serviceerror.NewInvalidArgument("Encountered current state and event that cannot be handled (should be impossible)") - errURIUpdate = serviceerror.NewInvalidArgument("Cannot update existing archival URI") -) - -func NeverEnabledState() *ArchivalConfigState { - return &ArchivalConfigState{ - URI: "", - State: enumspb.ARCHIVAL_STATE_DISABLED, - } -} - -func (e *ArchivalConfigEvent) Validate() error { - if len(e.DefaultURI) == 0 { - return errInvalidEvent - } - return nil -} - -func (s *ArchivalConfigState) validate() error { - if s.State == enumspb.ARCHIVAL_STATE_ENABLED && len(s.URI) == 0 { - return errInvalidState - } - return nil -} - -func (s *ArchivalConfigState) GetNextState( - e *ArchivalConfigEvent, - URIValidationFunc func(URI string) error, -) (nextState *ArchivalConfigState, changed bool, err error) { - defer func() { - // ensure that any existing URI name was not mutated - if nextState != nil && len(s.URI) != 0 && s.URI != nextState.URI { - nextState = nil - changed = false - err = errCannotHandleStateChange - return - } - - // ensure that next state is valid - if nextState != nil { - if nextStateErr := nextState.validate(); nextStateErr != nil { - nextState = nil - changed = false - err = nextStateErr - return - } - } - - if nextState != nil && nextState.URI != "" { - if validateURIErr := URIValidationFunc(nextState.URI); validateURIErr != nil { - nextState = nil - changed = false - err = validateURIErr - return - } - } - }() - - if s == nil || e == nil { - return nil, false, errCannotHandleStateChange - } - if err := s.validate(); err != nil { - return nil, false, err - } - if err := e.Validate(); err != nil { - return nil, false, err - } - - /** - At this point state and event are both non-nil and valid. - - State can be any one of the following: - {state=enabled, URI="foo"} - {state=disabled, URI="foo"} - {state=disabled, URI=""} - - Event can be any one of the following: - {state=enabled, URI="foo", defaultURI="bar"} - {state=enabled, URI="", defaultURI="bar"} - {state=disabled, URI="foo", defaultURI="bar"} - {state=disabled, URI="", defaultURI="bar"} - {state=nil, URI="foo", defaultURI="bar"} - {state=nil, URI="", defaultURI="bar"} - */ - - stateURISet := len(s.URI) != 0 - eventURISet := len(e.URI) != 0 - - // factor this case out to ensure that URI is immutable - if stateURISet && eventURISet && s.URI != e.URI { - return nil, false, errURIUpdate - } - - // state 1 - if s.State == enumspb.ARCHIVAL_STATE_ENABLED && stateURISet { - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_DISABLED, - URI: s.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_DISABLED, - URI: s.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { - return s, false, nil - } - } - - // state 2 - if s.State == enumspb.ARCHIVAL_STATE_DISABLED && stateURISet { - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { - return &ArchivalConfigState{ - URI: s.URI, - State: enumspb.ARCHIVAL_STATE_ENABLED, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_ENABLED, - URI: s.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { - return s, false, nil - } - } - - // state 3 - if s.State == enumspb.ARCHIVAL_STATE_DISABLED && !stateURISet { - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_ENABLED, - URI: e.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_ENABLED, - URI: e.DefaultURI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_DISABLED, - URI: e.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { - return s, false, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { - return &ArchivalConfigState{ - State: enumspb.ARCHIVAL_STATE_DISABLED, - URI: e.URI, - }, true, nil - } - if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { - return s, false, nil - } - } - return nil, false, errCannotHandleStateChange -} diff -Nru temporal-1.21.5-1/src/common/namespace/archival_config_state_machine.go temporal-1.22.5/src/common/namespace/archival_config_state_machine.go --- temporal-1.21.5-1/src/common/namespace/archival_config_state_machine.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/archival_config_state_machine.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,242 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" +) + +// namespaceArchivalConfigStateMachine is only used by namespaceHandler. +// It is simply meant to simplify the logic around archival namespace state changes. +// Logically this class can be thought of as part of namespaceHandler. + +type ( + // ArchivalConfigState represents the state of archival config + // the only invalid state is {URI="", state=enabled} + // once URI is set it is immutable + ArchivalConfigState struct { + State enumspb.ArchivalState + URI string + } + + // ArchivalConfigEvent represents a change request to archival config state + // the only restriction placed on events is that defaultURI is not empty + // state can be nil, enabled, or disabled (nil indicates no update by user is being attempted) + ArchivalConfigEvent struct { + DefaultURI string + URI string + State enumspb.ArchivalState + } +) + +// the following errors represents impossible code states that should never occur +var ( + errInvalidState = serviceerror.NewInvalidArgument("Encountered illegal state: archival is enabled but URI is not set (should be impossible)") + errInvalidEvent = serviceerror.NewInvalidArgument("Encountered illegal event: default URI is not set (should be impossible)") + errCannotHandleStateChange = serviceerror.NewInvalidArgument("Encountered current state and event that cannot be handled (should be impossible)") + errURIUpdate = serviceerror.NewInvalidArgument("Cannot update existing archival URI") +) + +func NeverEnabledState() *ArchivalConfigState { + return &ArchivalConfigState{ + URI: "", + State: enumspb.ARCHIVAL_STATE_DISABLED, + } +} + +func (e *ArchivalConfigEvent) Validate() error { + if len(e.DefaultURI) == 0 { + return errInvalidEvent + } + return nil +} + +func (s *ArchivalConfigState) validate() error { + if s.State == enumspb.ARCHIVAL_STATE_ENABLED && len(s.URI) == 0 { + return errInvalidState + } + return nil +} + +func (s *ArchivalConfigState) GetNextState( + e *ArchivalConfigEvent, + URIValidationFunc func(URI string) error, +) (nextState *ArchivalConfigState, changed bool, err error) { + defer func() { + // ensure that any existing URI name was not mutated + if nextState != nil && len(s.URI) != 0 && s.URI != nextState.URI { + nextState = nil + changed = false + err = errCannotHandleStateChange + return + } + + // ensure that next state is valid + if nextState != nil { + if nextStateErr := nextState.validate(); nextStateErr != nil { + nextState = nil + changed = false + err = nextStateErr + return + } + } + + if nextState != nil && nextState.URI != "" { + if validateURIErr := URIValidationFunc(nextState.URI); validateURIErr != nil { + nextState = nil + changed = false + err = validateURIErr + return + } + } + }() + + if s == nil || e == nil { + return nil, false, errCannotHandleStateChange + } + if err := s.validate(); err != nil { + return nil, false, err + } + if err := e.Validate(); err != nil { + return nil, false, err + } + + /** + At this point state and event are both non-nil and valid. + + State can be any one of the following: + {state=enabled, URI="foo"} + {state=disabled, URI="foo"} + {state=disabled, URI=""} + + Event can be any one of the following: + {state=enabled, URI="foo", defaultURI="bar"} + {state=enabled, URI="", defaultURI="bar"} + {state=disabled, URI="foo", defaultURI="bar"} + {state=disabled, URI="", defaultURI="bar"} + {state=nil, URI="foo", defaultURI="bar"} + {state=nil, URI="", defaultURI="bar"} + */ + + stateURISet := len(s.URI) != 0 + eventURISet := len(e.URI) != 0 + + // factor this case out to ensure that URI is immutable + if stateURISet && eventURISet && s.URI != e.URI { + return nil, false, errURIUpdate + } + + // state 1 + if s.State == enumspb.ARCHIVAL_STATE_ENABLED && stateURISet { + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_DISABLED, + URI: s.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_DISABLED, + URI: s.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { + return s, false, nil + } + } + + // state 2 + if s.State == enumspb.ARCHIVAL_STATE_DISABLED && stateURISet { + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { + return &ArchivalConfigState{ + URI: s.URI, + State: enumspb.ARCHIVAL_STATE_ENABLED, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_ENABLED, + URI: s.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { + return s, false, nil + } + } + + // state 3 + if s.State == enumspb.ARCHIVAL_STATE_DISABLED && !stateURISet { + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_ENABLED, + URI: e.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_ENABLED && !eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_ENABLED, + URI: e.DefaultURI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_DISABLED, + URI: e.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_DISABLED && !eventURISet { + return s, false, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && eventURISet { + return &ArchivalConfigState{ + State: enumspb.ARCHIVAL_STATE_DISABLED, + URI: e.URI, + }, true, nil + } + if e.State == enumspb.ARCHIVAL_STATE_UNSPECIFIED && !eventURISet { + return s, false, nil + } + } + return nil, false, errCannotHandleStateChange +} diff -Nru temporal-1.21.5-1/src/common/namespace/attrValidator.go temporal-1.22.5/src/common/namespace/attrValidator.go --- temporal-1.21.5-1/src/common/namespace/attrValidator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/attrValidator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "fmt" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" -) - -type ( - // AttrValidatorImpl is namespace attr validator - AttrValidatorImpl struct { - clusterMetadata cluster.Metadata - } -) - -// NewAttrValidator create a new namespace attr validator -func NewAttrValidator( - clusterMetadata cluster.Metadata, -) *AttrValidatorImpl { - - return &AttrValidatorImpl{ - clusterMetadata: clusterMetadata, - } -} - -func (d *AttrValidatorImpl) ValidateNamespaceConfig(config *persistencespb.NamespaceConfig) error { - if config.HistoryArchivalState == enumspb.ARCHIVAL_STATE_ENABLED && len(config.HistoryArchivalUri) == 0 { - return errInvalidArchivalConfig - } - if config.VisibilityArchivalState == enumspb.ARCHIVAL_STATE_ENABLED && len(config.VisibilityArchivalUri) == 0 { - return errInvalidArchivalConfig - } - return nil -} - -func (d *AttrValidatorImpl) ValidateNamespaceReplicationConfigForLocalNamespace( - replicationConfig *persistencespb.NamespaceReplicationConfig, -) error { - - activeCluster := replicationConfig.ActiveClusterName - clusters := replicationConfig.Clusters - - if err := d.validateClusterName(activeCluster); err != nil { - return err - } - for _, clusterName := range clusters { - if err := d.validateClusterName(clusterName); err != nil { - return err - } - } - - if activeCluster != d.clusterMetadata.GetCurrentClusterName() { - return serviceerror.NewInvalidArgument("Invalid local namespace active cluster") - } - - if len(clusters) != 1 || clusters[0] != activeCluster { - return serviceerror.NewInvalidArgument("Invalid local namespace clusters") - } - - return nil -} - -func (d *AttrValidatorImpl) ValidateNamespaceReplicationConfigForGlobalNamespace( - replicationConfig *persistencespb.NamespaceReplicationConfig, -) error { - - activeCluster := replicationConfig.ActiveClusterName - clusters := replicationConfig.Clusters - - if err := d.validateClusterName(activeCluster); err != nil { - return err - } - for _, clusterName := range clusters { - if err := d.validateClusterName(clusterName); err != nil { - return err - } - } - - activeClusterInClusters := false - for _, clusterName := range clusters { - if clusterName == activeCluster { - activeClusterInClusters = true - break - } - } - if !activeClusterInClusters { - return errActiveClusterNotInClusters - } - - return nil -} - -func (d *AttrValidatorImpl) validateClusterName( - clusterName string, -) error { - - if info, ok := d.clusterMetadata.GetAllClusterInfo()[clusterName]; !ok || !info.Enabled { - return serviceerror.NewInvalidArgument(fmt.Sprintf("Invalid cluster name: %v", clusterName)) - } - return nil -} diff -Nru temporal-1.21.5-1/src/common/namespace/attrValidator_test.go temporal-1.22.5/src/common/namespace/attrValidator_test.go --- temporal-1.21.5-1/src/common/namespace/attrValidator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/attrValidator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,186 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/suite" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" -) - -type ( - attrValidatorSuite struct { - suite.Suite - - controller *gomock.Controller - mockClusterMetadata *cluster.MockMetadata - - validator *AttrValidatorImpl - } -) - -func TestAttrValidatorSuite(t *testing.T) { - s := new(attrValidatorSuite) - suite.Run(t, s) -} - -func (s *attrValidatorSuite) SetupSuite() { -} - -func (s *attrValidatorSuite) TearDownSuite() { -} - -func (s *attrValidatorSuite) SetupTest() { - s.controller = gomock.NewController(s.T()) - s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) - - s.validator = NewAttrValidator(s.mockClusterMetadata) -} - -func (s *attrValidatorSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *attrValidatorSuite) TestClusterName() { - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( - cluster.TestAllClusterInfo, - ).AnyTimes() - - err := s.validator.validateClusterName("some random foo bar") - s.IsType(&serviceerror.InvalidArgument{}, err) - - err = s.validator.validateClusterName(cluster.TestCurrentClusterName) - s.NoError(err) - - err = s.validator.validateClusterName(cluster.TestAlternativeClusterName) - s.NoError(err) -} - -func (s *attrValidatorSuite) TestValidateNamespaceReplicationConfigForLocalNamespace() { - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return( - cluster.TestCurrentClusterName, - ).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( - cluster.TestAllClusterInfo, - ).AnyTimes() - - err := s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{ - cluster.TestAlternativeClusterName, - }, - }, - ) - s.IsType(&serviceerror.InvalidArgument{}, err) - - err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - ) - s.IsType(&serviceerror.InvalidArgument{}, err) - - err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - ) - s.IsType(&serviceerror.InvalidArgument{}, err) - - err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - }, - }, - ) - s.NoError(err) -} - -func (s *attrValidatorSuite) TestValidateNamespaceReplicationConfigForGlobalNamespace() { - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return( - cluster.TestCurrentClusterName, - ).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( - cluster.TestAllClusterInfo, - ).AnyTimes() - - err := s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - }, - }, - ) - s.NoError(err) - - err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{ - cluster.TestAlternativeClusterName, - }, - }, - ) - s.NoError(err) - - err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - ) - s.NoError(err) - - err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - ) - s.NoError(err) -} diff -Nru temporal-1.21.5-1/src/common/namespace/attr_validator.go temporal-1.22.5/src/common/namespace/attr_validator.go --- temporal-1.21.5-1/src/common/namespace/attr_validator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/attr_validator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,129 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "fmt" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" +) + +type ( + // AttrValidatorImpl is namespace attr validator + AttrValidatorImpl struct { + clusterMetadata cluster.Metadata + } +) + +// NewAttrValidator create a new namespace attr validator +func NewAttrValidator( + clusterMetadata cluster.Metadata, +) *AttrValidatorImpl { + + return &AttrValidatorImpl{ + clusterMetadata: clusterMetadata, + } +} + +func (d *AttrValidatorImpl) ValidateNamespaceConfig(config *persistencespb.NamespaceConfig) error { + if config.HistoryArchivalState == enumspb.ARCHIVAL_STATE_ENABLED && len(config.HistoryArchivalUri) == 0 { + return errInvalidArchivalConfig + } + if config.VisibilityArchivalState == enumspb.ARCHIVAL_STATE_ENABLED && len(config.VisibilityArchivalUri) == 0 { + return errInvalidArchivalConfig + } + return nil +} + +func (d *AttrValidatorImpl) ValidateNamespaceReplicationConfigForLocalNamespace( + replicationConfig *persistencespb.NamespaceReplicationConfig, +) error { + + activeCluster := replicationConfig.ActiveClusterName + clusters := replicationConfig.Clusters + + if err := d.validateClusterName(activeCluster); err != nil { + return err + } + for _, clusterName := range clusters { + if err := d.validateClusterName(clusterName); err != nil { + return err + } + } + + if activeCluster != d.clusterMetadata.GetCurrentClusterName() { + return serviceerror.NewInvalidArgument("Invalid local namespace active cluster") + } + + if len(clusters) != 1 || clusters[0] != activeCluster { + return serviceerror.NewInvalidArgument("Invalid local namespace clusters") + } + + return nil +} + +func (d *AttrValidatorImpl) ValidateNamespaceReplicationConfigForGlobalNamespace( + replicationConfig *persistencespb.NamespaceReplicationConfig, +) error { + + activeCluster := replicationConfig.ActiveClusterName + clusters := replicationConfig.Clusters + + if err := d.validateClusterName(activeCluster); err != nil { + return err + } + for _, clusterName := range clusters { + if err := d.validateClusterName(clusterName); err != nil { + return err + } + } + + activeClusterInClusters := false + for _, clusterName := range clusters { + if clusterName == activeCluster { + activeClusterInClusters = true + break + } + } + if !activeClusterInClusters { + return errActiveClusterNotInClusters + } + + return nil +} + +func (d *AttrValidatorImpl) validateClusterName( + clusterName string, +) error { + + if info, ok := d.clusterMetadata.GetAllClusterInfo()[clusterName]; !ok || !info.Enabled { + return serviceerror.NewInvalidArgument(fmt.Sprintf("Invalid cluster name: %v", clusterName)) + } + return nil +} diff -Nru temporal-1.21.5-1/src/common/namespace/attr_validator_test.go temporal-1.22.5/src/common/namespace/attr_validator_test.go --- temporal-1.21.5-1/src/common/namespace/attr_validator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/attr_validator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,186 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" +) + +type ( + attrValidatorSuite struct { + suite.Suite + + controller *gomock.Controller + mockClusterMetadata *cluster.MockMetadata + + validator *AttrValidatorImpl + } +) + +func TestAttrValidatorSuite(t *testing.T) { + s := new(attrValidatorSuite) + suite.Run(t, s) +} + +func (s *attrValidatorSuite) SetupSuite() { +} + +func (s *attrValidatorSuite) TearDownSuite() { +} + +func (s *attrValidatorSuite) SetupTest() { + s.controller = gomock.NewController(s.T()) + s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) + + s.validator = NewAttrValidator(s.mockClusterMetadata) +} + +func (s *attrValidatorSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *attrValidatorSuite) TestClusterName() { + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( + cluster.TestAllClusterInfo, + ).AnyTimes() + + err := s.validator.validateClusterName("some random foo bar") + s.IsType(&serviceerror.InvalidArgument{}, err) + + err = s.validator.validateClusterName(cluster.TestCurrentClusterName) + s.NoError(err) + + err = s.validator.validateClusterName(cluster.TestAlternativeClusterName) + s.NoError(err) +} + +func (s *attrValidatorSuite) TestValidateNamespaceReplicationConfigForLocalNamespace() { + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return( + cluster.TestCurrentClusterName, + ).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( + cluster.TestAllClusterInfo, + ).AnyTimes() + + err := s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestAlternativeClusterName, + }, + }, + ) + s.IsType(&serviceerror.InvalidArgument{}, err) + + err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + ) + s.IsType(&serviceerror.InvalidArgument{}, err) + + err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + ) + s.IsType(&serviceerror.InvalidArgument{}, err) + + err = s.validator.ValidateNamespaceReplicationConfigForLocalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + }, + }, + ) + s.NoError(err) +} + +func (s *attrValidatorSuite) TestValidateNamespaceReplicationConfigForGlobalNamespace() { + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return( + cluster.TestCurrentClusterName, + ).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return( + cluster.TestAllClusterInfo, + ).AnyTimes() + + err := s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + }, + }, + ) + s.NoError(err) + + err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestAlternativeClusterName, + }, + }, + ) + s.NoError(err) + + err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + ) + s.NoError(err) + + err = s.validator.ValidateNamespaceReplicationConfigForGlobalNamespace( + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + ) + s.NoError(err) +} diff -Nru temporal-1.21.5-1/src/common/namespace/dlqMessageHandler.go temporal-1.22.5/src/common/namespace/dlqMessageHandler.go --- temporal-1.21.5-1/src/common/namespace/dlqMessageHandler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlqMessageHandler.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dlqMessageHandler_mock.go - -package namespace - -import ( - "context" - - "go.temporal.io/api/serviceerror" - - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/persistence" -) - -type ( - // DLQMessageHandler is the interface handles namespace DLQ messages - DLQMessageHandler interface { - Read(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]*replicationspb.ReplicationTask, []byte, error) - Purge(ctx context.Context, lastMessageID int64) error - Merge(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]byte, error) - } - - dlqMessageHandlerImpl struct { - replicationHandler ReplicationTaskExecutor - namespaceReplicationQueue persistence.NamespaceReplicationQueue - logger log.Logger - } -) - -// NewDLQMessageHandler returns a DLQTaskHandler instance -func NewDLQMessageHandler( - replicationHandler ReplicationTaskExecutor, - namespaceReplicationQueue persistence.NamespaceReplicationQueue, - logger log.Logger, -) DLQMessageHandler { - return &dlqMessageHandlerImpl{ - replicationHandler: replicationHandler, - namespaceReplicationQueue: namespaceReplicationQueue, - logger: logger, - } -} - -// ReadMessages reads namespace replication DLQ messages -func (d *dlqMessageHandlerImpl) Read( - ctx context.Context, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]*replicationspb.ReplicationTask, []byte, error) { - - ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) - if err != nil { - return nil, nil, err - } - - return d.namespaceReplicationQueue.GetMessagesFromDLQ( - ctx, - ackLevel, - lastMessageID, - pageSize, - pageToken, - ) -} - -// PurgeMessages purges namespace replication DLQ messages -func (d *dlqMessageHandlerImpl) Purge( - ctx context.Context, - lastMessageID int64, -) error { - - ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) - if err != nil { - return err - } - - if err := d.namespaceReplicationQueue.RangeDeleteMessagesFromDLQ( - ctx, - ackLevel, - lastMessageID, - ); err != nil { - return err - } - - if err := d.namespaceReplicationQueue.UpdateDLQAckLevel( - ctx, - lastMessageID, - ); err != nil { - d.logger.Error("Failed to update DLQ ack level after purging messages", tag.Error(err)) - } - - return nil -} - -// MergeMessages merges namespace replication DLQ messages -func (d *dlqMessageHandlerImpl) Merge( - ctx context.Context, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]byte, error) { - - ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) - if err != nil { - return nil, err - } - - messages, token, err := d.namespaceReplicationQueue.GetMessagesFromDLQ( - ctx, - ackLevel, - lastMessageID, - pageSize, - pageToken, - ) - if err != nil { - return nil, err - } - - var ackedMessageID int64 - for _, message := range messages { - namespaceTask := message.GetNamespaceTaskAttributes() - if namespaceTask == nil { - return nil, serviceerror.NewInternal("Encounter non namespace replication task in namespace replication queue.") - } - - if err := d.replicationHandler.Execute( - ctx, - namespaceTask, - ); err != nil { - return nil, err - } - ackedMessageID = message.SourceTaskId - } - - if err := d.namespaceReplicationQueue.RangeDeleteMessagesFromDLQ( - ctx, - ackLevel, - ackedMessageID, - ); err != nil { - d.logger.Error("failed to delete merged tasks on merging namespace DLQ message", tag.Error(err)) - return nil, err - } - if err := d.namespaceReplicationQueue.UpdateDLQAckLevel(ctx, ackedMessageID); err != nil { - d.logger.Error("failed to update ack level on merging namespace DLQ message", tag.Error(err)) - } - - return token, nil -} diff -Nru temporal-1.21.5-1/src/common/namespace/dlqMessageHandler_mock.go temporal-1.22.5/src/common/namespace/dlqMessageHandler_mock.go --- temporal-1.21.5-1/src/common/namespace/dlqMessageHandler_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlqMessageHandler_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: dlqMessageHandler.go - -// Package namespace is a generated GoMock package. -package namespace - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - repication "go.temporal.io/server/api/replication/v1" -) - -// MockDLQMessageHandler is a mock of DLQMessageHandler interface. -type MockDLQMessageHandler struct { - ctrl *gomock.Controller - recorder *MockDLQMessageHandlerMockRecorder -} - -// MockDLQMessageHandlerMockRecorder is the mock recorder for MockDLQMessageHandler. -type MockDLQMessageHandlerMockRecorder struct { - mock *MockDLQMessageHandler -} - -// NewMockDLQMessageHandler creates a new mock instance. -func NewMockDLQMessageHandler(ctrl *gomock.Controller) *MockDLQMessageHandler { - mock := &MockDLQMessageHandler{ctrl: ctrl} - mock.recorder = &MockDLQMessageHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDLQMessageHandler) EXPECT() *MockDLQMessageHandlerMockRecorder { - return m.recorder -} - -// Merge mocks base method. -func (m *MockDLQMessageHandler) Merge(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Merge", ctx, lastMessageID, pageSize, pageToken) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Merge indicates an expected call of Merge. -func (mr *MockDLQMessageHandlerMockRecorder) Merge(ctx, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Merge", reflect.TypeOf((*MockDLQMessageHandler)(nil).Merge), ctx, lastMessageID, pageSize, pageToken) -} - -// Purge mocks base method. -func (m *MockDLQMessageHandler) Purge(ctx context.Context, lastMessageID int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Purge", ctx, lastMessageID) - ret0, _ := ret[0].(error) - return ret0 -} - -// Purge indicates an expected call of Purge. -func (mr *MockDLQMessageHandlerMockRecorder) Purge(ctx, lastMessageID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Purge", reflect.TypeOf((*MockDLQMessageHandler)(nil).Purge), ctx, lastMessageID) -} - -// Read mocks base method. -func (m *MockDLQMessageHandler) Read(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]*repication.ReplicationTask, []byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Read", ctx, lastMessageID, pageSize, pageToken) - ret0, _ := ret[0].([]*repication.ReplicationTask) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Read indicates an expected call of Read. -func (mr *MockDLQMessageHandlerMockRecorder) Read(ctx, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockDLQMessageHandler)(nil).Read), ctx, lastMessageID, pageSize, pageToken) -} diff -Nru temporal-1.21.5-1/src/common/namespace/dlqMessageHandler_test.go temporal-1.22.5/src/common/namespace/dlqMessageHandler_test.go --- temporal-1.21.5-1/src/common/namespace/dlqMessageHandler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlqMessageHandler_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,378 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - enumsspb "go.temporal.io/server/api/enums/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" -) - -type ( - dlqMessageHandlerSuite struct { - suite.Suite - - *require.Assertions - controller *gomock.Controller - - mockReplicationTaskExecutor *MockReplicationTaskExecutor - mockReplicationQueue *persistence.MockNamespaceReplicationQueue - dlqMessageHandler *dlqMessageHandlerImpl - } -) - -func TestDLQMessageHandlerSuite(t *testing.T) { - s := new(dlqMessageHandlerSuite) - suite.Run(t, s) -} - -func (s *dlqMessageHandlerSuite) SetupSuite() { -} - -func (s *dlqMessageHandlerSuite) TearDownSuite() { - -} - -func (s *dlqMessageHandlerSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.controller = gomock.NewController(s.T()) - - logger := log.NewTestLogger() - s.mockReplicationTaskExecutor = NewMockReplicationTaskExecutor(s.controller) - s.mockReplicationQueue = persistence.NewMockNamespaceReplicationQueue(s.controller) - - s.dlqMessageHandler = NewDLQMessageHandler( - s.mockReplicationTaskExecutor, - s.mockReplicationQueue, - logger, - ).(*dlqMessageHandlerImpl) -} - -func (s *dlqMessageHandlerSuite) TearDownTest() { -} - -func (s *dlqMessageHandlerSuite) TestReadMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: 1, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(tasks, nil, nil) - - resp, token, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) - - s.NoError(err) - s.Equal(tasks, resp) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestReadMessages_ThrowErrorOnGetDLQAckLevel() { - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: 1, - }, - } - testError := fmt.Errorf("test") - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(tasks, nil, nil).Times(0) - - _, _, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) - - s.Equal(testError, err) -} - -func (s *dlqMessageHandlerSuite) TestReadMessages_ThrowErrorOnReadMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - - testError := fmt.Errorf("test") - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(nil, nil, testError) - - _, _, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) - - s.Equal(testError, err) -} - -func (s *dlqMessageHandlerSuite) TestPurgeMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID).Return(nil) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), lastMessageID).Return(nil) - err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) - - s.NoError(err) -} - -func (s *dlqMessageHandlerSuite) TestPurgeMessages_ThrowErrorOnGetDLQAckLevel() { - lastMessageID := int64(20) - testError := fmt.Errorf("test") - - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(0) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) - err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) - - s.Equal(testError, err) -} - -func (s *dlqMessageHandlerSuite) TestPurgeMessages_ThrowErrorOnPurgeMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - testError := fmt.Errorf("test") - - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID).Return(testError) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) - err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) - - s.Equal(testError, err) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - messageID := int64(11) - - namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute, - }, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(tasks, nil, nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute).Return(nil) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), messageID).Return(nil) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID).Return(nil) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.NoError(err) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnGetDLQAckLevel() { - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - messageID := int64(11) - testError := fmt.Errorf("test") - namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: int64(messageID), - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute, - }, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(tasks, nil, nil).Times(0) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Times(0) - s.mockReplicationQueue.EXPECT().DeleteMessageFromDLQ(gomock.Any(), gomock.Any()).Times(0) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.Equal(testError, err) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnGetDLQMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - testError := fmt.Errorf("test") - - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(nil, nil, testError) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Times(0) - s.mockReplicationQueue.EXPECT().DeleteMessageFromDLQ(gomock.Any(), gomock.Any()).Times(0) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.Equal(testError, err) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnHandleReceivingTask() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - messageID1 := int64(11) - messageID2 := int64(12) - testError := fmt.Errorf("test") - namespaceAttribute1 := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - namespaceAttribute2 := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID1, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute1, - }, - }, - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID2, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute2, - }, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(tasks, nil, nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute1).Return(nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute2).Return(testError) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.Equal(testError, err) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnDeleteMessages() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - messageID1 := int64(11) - messageID2 := int64(12) - testError := fmt.Errorf("test") - namespaceAttribute1 := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - namespaceAttribute2 := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID1, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute1, - }, - }, - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID2, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute2, - }, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(tasks, nil, nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute1).Return(nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute2).Return(nil) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID2).Return(testError) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.Error(err) - s.Nil(token) -} - -func (s *dlqMessageHandlerSuite) TestMergeMessages_IgnoreErrorOnUpdateDLQAckLevel() { - ackLevel := int64(10) - lastMessageID := int64(20) - pageSize := 100 - pageToken := []byte{} - messageID := int64(11) - testError := fmt.Errorf("test") - namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ - Id: uuid.New(), - } - - tasks := []*replicationspb.ReplicationTask{ - { - TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, - SourceTaskId: messageID, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: namespaceAttribute, - }, - }, - } - s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) - s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). - Return(tasks, nil, nil) - s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute).Return(nil) - s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID).Return(nil) - s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), messageID).Return(testError) - - token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) - s.NoError(err) - s.Nil(token) -} diff -Nru temporal-1.21.5-1/src/common/namespace/dlq_message_handler.go temporal-1.22.5/src/common/namespace/dlq_message_handler.go --- temporal-1.21.5-1/src/common/namespace/dlq_message_handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlq_message_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,172 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dlq_message_handler_mock.go + +package namespace + +import ( + "context" + + "go.temporal.io/api/serviceerror" + + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/persistence" +) + +type ( + // DLQMessageHandler is the interface handles namespace DLQ messages + DLQMessageHandler interface { + Read(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]*replicationspb.ReplicationTask, []byte, error) + Purge(ctx context.Context, lastMessageID int64) error + Merge(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]byte, error) + } + + dlqMessageHandlerImpl struct { + replicationHandler ReplicationTaskExecutor + namespaceReplicationQueue persistence.NamespaceReplicationQueue + logger log.Logger + } +) + +// NewDLQMessageHandler returns a DLQTaskHandler instance +func NewDLQMessageHandler( + replicationHandler ReplicationTaskExecutor, + namespaceReplicationQueue persistence.NamespaceReplicationQueue, + logger log.Logger, +) DLQMessageHandler { + return &dlqMessageHandlerImpl{ + replicationHandler: replicationHandler, + namespaceReplicationQueue: namespaceReplicationQueue, + logger: logger, + } +} + +// ReadMessages reads namespace replication DLQ messages +func (d *dlqMessageHandlerImpl) Read( + ctx context.Context, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*replicationspb.ReplicationTask, []byte, error) { + + ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) + if err != nil { + return nil, nil, err + } + + return d.namespaceReplicationQueue.GetMessagesFromDLQ( + ctx, + ackLevel, + lastMessageID, + pageSize, + pageToken, + ) +} + +// PurgeMessages purges namespace replication DLQ messages +func (d *dlqMessageHandlerImpl) Purge( + ctx context.Context, + lastMessageID int64, +) error { + + ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) + if err != nil { + return err + } + + if err := d.namespaceReplicationQueue.RangeDeleteMessagesFromDLQ( + ctx, + ackLevel, + lastMessageID, + ); err != nil { + return err + } + + if err := d.namespaceReplicationQueue.UpdateDLQAckLevel( + ctx, + lastMessageID, + ); err != nil { + d.logger.Error("Failed to update DLQ ack level after purging messages", tag.Error(err)) + } + + return nil +} + +// MergeMessages merges namespace replication DLQ messages +func (d *dlqMessageHandlerImpl) Merge( + ctx context.Context, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]byte, error) { + + ackLevel, err := d.namespaceReplicationQueue.GetDLQAckLevel(ctx) + if err != nil { + return nil, err + } + + messages, token, err := d.namespaceReplicationQueue.GetMessagesFromDLQ( + ctx, + ackLevel, + lastMessageID, + pageSize, + pageToken, + ) + if err != nil { + return nil, err + } + + var ackedMessageID int64 + for _, message := range messages { + namespaceTask := message.GetNamespaceTaskAttributes() + if namespaceTask == nil { + return nil, serviceerror.NewInternal("Encounter non namespace replication task in namespace replication queue.") + } + + if err := d.replicationHandler.Execute( + ctx, + namespaceTask, + ); err != nil { + return nil, err + } + ackedMessageID = message.SourceTaskId + } + + if err := d.namespaceReplicationQueue.RangeDeleteMessagesFromDLQ( + ctx, + ackLevel, + ackedMessageID, + ); err != nil { + d.logger.Error("failed to delete merged tasks on merging namespace DLQ message", tag.Error(err)) + return nil, err + } + if err := d.namespaceReplicationQueue.UpdateDLQAckLevel(ctx, ackedMessageID); err != nil { + d.logger.Error("failed to update ack level on merging namespace DLQ message", tag.Error(err)) + } + + return token, nil +} diff -Nru temporal-1.21.5-1/src/common/namespace/dlq_message_handler_mock.go temporal-1.22.5/src/common/namespace/dlq_message_handler_mock.go --- temporal-1.21.5-1/src/common/namespace/dlq_message_handler_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlq_message_handler_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,105 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: dlq_message_handler.go + +// Package namespace is a generated GoMock package. +package namespace + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + repication "go.temporal.io/server/api/replication/v1" +) + +// MockDLQMessageHandler is a mock of DLQMessageHandler interface. +type MockDLQMessageHandler struct { + ctrl *gomock.Controller + recorder *MockDLQMessageHandlerMockRecorder +} + +// MockDLQMessageHandlerMockRecorder is the mock recorder for MockDLQMessageHandler. +type MockDLQMessageHandlerMockRecorder struct { + mock *MockDLQMessageHandler +} + +// NewMockDLQMessageHandler creates a new mock instance. +func NewMockDLQMessageHandler(ctrl *gomock.Controller) *MockDLQMessageHandler { + mock := &MockDLQMessageHandler{ctrl: ctrl} + mock.recorder = &MockDLQMessageHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDLQMessageHandler) EXPECT() *MockDLQMessageHandlerMockRecorder { + return m.recorder +} + +// Merge mocks base method. +func (m *MockDLQMessageHandler) Merge(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Merge", ctx, lastMessageID, pageSize, pageToken) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Merge indicates an expected call of Merge. +func (mr *MockDLQMessageHandlerMockRecorder) Merge(ctx, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Merge", reflect.TypeOf((*MockDLQMessageHandler)(nil).Merge), ctx, lastMessageID, pageSize, pageToken) +} + +// Purge mocks base method. +func (m *MockDLQMessageHandler) Purge(ctx context.Context, lastMessageID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Purge", ctx, lastMessageID) + ret0, _ := ret[0].(error) + return ret0 +} + +// Purge indicates an expected call of Purge. +func (mr *MockDLQMessageHandlerMockRecorder) Purge(ctx, lastMessageID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Purge", reflect.TypeOf((*MockDLQMessageHandler)(nil).Purge), ctx, lastMessageID) +} + +// Read mocks base method. +func (m *MockDLQMessageHandler) Read(ctx context.Context, lastMessageID int64, pageSize int, pageToken []byte) ([]*repication.ReplicationTask, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read", ctx, lastMessageID, pageSize, pageToken) + ret0, _ := ret[0].([]*repication.ReplicationTask) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Read indicates an expected call of Read. +func (mr *MockDLQMessageHandlerMockRecorder) Read(ctx, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockDLQMessageHandler)(nil).Read), ctx, lastMessageID, pageSize, pageToken) +} diff -Nru temporal-1.21.5-1/src/common/namespace/dlq_message_handler_test.go temporal-1.22.5/src/common/namespace/dlq_message_handler_test.go --- temporal-1.21.5-1/src/common/namespace/dlq_message_handler_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/dlq_message_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,378 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + enumsspb "go.temporal.io/server/api/enums/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" +) + +type ( + dlqMessageHandlerSuite struct { + suite.Suite + + *require.Assertions + controller *gomock.Controller + + mockReplicationTaskExecutor *MockReplicationTaskExecutor + mockReplicationQueue *persistence.MockNamespaceReplicationQueue + dlqMessageHandler *dlqMessageHandlerImpl + } +) + +func TestDLQMessageHandlerSuite(t *testing.T) { + s := new(dlqMessageHandlerSuite) + suite.Run(t, s) +} + +func (s *dlqMessageHandlerSuite) SetupSuite() { +} + +func (s *dlqMessageHandlerSuite) TearDownSuite() { + +} + +func (s *dlqMessageHandlerSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + + logger := log.NewTestLogger() + s.mockReplicationTaskExecutor = NewMockReplicationTaskExecutor(s.controller) + s.mockReplicationQueue = persistence.NewMockNamespaceReplicationQueue(s.controller) + + s.dlqMessageHandler = NewDLQMessageHandler( + s.mockReplicationTaskExecutor, + s.mockReplicationQueue, + logger, + ).(*dlqMessageHandlerImpl) +} + +func (s *dlqMessageHandlerSuite) TearDownTest() { +} + +func (s *dlqMessageHandlerSuite) TestReadMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: 1, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(tasks, nil, nil) + + resp, token, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) + + s.NoError(err) + s.Equal(tasks, resp) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestReadMessages_ThrowErrorOnGetDLQAckLevel() { + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: 1, + }, + } + testError := fmt.Errorf("test") + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(tasks, nil, nil).Times(0) + + _, _, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) + + s.Equal(testError, err) +} + +func (s *dlqMessageHandlerSuite) TestReadMessages_ThrowErrorOnReadMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + + testError := fmt.Errorf("test") + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(nil, nil, testError) + + _, _, err := s.dlqMessageHandler.Read(context.Background(), lastMessageID, pageSize, pageToken) + + s.Equal(testError, err) +} + +func (s *dlqMessageHandlerSuite) TestPurgeMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID).Return(nil) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), lastMessageID).Return(nil) + err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) + + s.NoError(err) +} + +func (s *dlqMessageHandlerSuite) TestPurgeMessages_ThrowErrorOnGetDLQAckLevel() { + lastMessageID := int64(20) + testError := fmt.Errorf("test") + + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(0) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) + err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) + + s.Equal(testError, err) +} + +func (s *dlqMessageHandlerSuite) TestPurgeMessages_ThrowErrorOnPurgeMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + testError := fmt.Errorf("test") + + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID).Return(testError) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) + err := s.dlqMessageHandler.Purge(context.Background(), lastMessageID) + + s.Equal(testError, err) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + messageID := int64(11) + + namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute, + }, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(tasks, nil, nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute).Return(nil) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), messageID).Return(nil) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID).Return(nil) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.NoError(err) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnGetDLQAckLevel() { + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + messageID := int64(11) + testError := fmt.Errorf("test") + namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: int64(messageID), + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute, + }, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(int64(-1), testError) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(tasks, nil, nil).Times(0) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Times(0) + s.mockReplicationQueue.EXPECT().DeleteMessageFromDLQ(gomock.Any(), gomock.Any()).Times(0) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.Equal(testError, err) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnGetDLQMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + testError := fmt.Errorf("test") + + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(nil, nil, testError) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Times(0) + s.mockReplicationQueue.EXPECT().DeleteMessageFromDLQ(gomock.Any(), gomock.Any()).Times(0) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), gomock.Any()).Times(0) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.Equal(testError, err) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnHandleReceivingTask() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + messageID1 := int64(11) + messageID2 := int64(12) + testError := fmt.Errorf("test") + namespaceAttribute1 := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + namespaceAttribute2 := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID1, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute1, + }, + }, + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID2, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute2, + }, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(tasks, nil, nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute1).Return(nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute2).Return(testError) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.Equal(testError, err) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages_ThrowErrorOnDeleteMessages() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + messageID1 := int64(11) + messageID2 := int64(12) + testError := fmt.Errorf("test") + namespaceAttribute1 := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + namespaceAttribute2 := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID1, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute1, + }, + }, + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID2, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute2, + }, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(tasks, nil, nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute1).Return(nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute2).Return(nil) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID2).Return(testError) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.Error(err) + s.Nil(token) +} + +func (s *dlqMessageHandlerSuite) TestMergeMessages_IgnoreErrorOnUpdateDLQAckLevel() { + ackLevel := int64(10) + lastMessageID := int64(20) + pageSize := 100 + pageToken := []byte{} + messageID := int64(11) + testError := fmt.Errorf("test") + namespaceAttribute := &replicationspb.NamespaceTaskAttributes{ + Id: uuid.New(), + } + + tasks := []*replicationspb.ReplicationTask{ + { + TaskType: enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK, + SourceTaskId: messageID, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: namespaceAttribute, + }, + }, + } + s.mockReplicationQueue.EXPECT().GetDLQAckLevel(gomock.Any()).Return(ackLevel, nil) + s.mockReplicationQueue.EXPECT().GetMessagesFromDLQ(gomock.Any(), ackLevel, lastMessageID, pageSize, pageToken). + Return(tasks, nil, nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), namespaceAttribute).Return(nil) + s.mockReplicationQueue.EXPECT().RangeDeleteMessagesFromDLQ(gomock.Any(), ackLevel, messageID).Return(nil) + s.mockReplicationQueue.EXPECT().UpdateDLQAckLevel(gomock.Any(), messageID).Return(testError) + + token, err := s.dlqMessageHandler.Merge(context.Background(), lastMessageID, pageSize, pageToken) + s.NoError(err) + s.Nil(token) +} diff -Nru temporal-1.21.5-1/src/common/namespace/registry.go temporal-1.22.5/src/common/namespace/registry.go --- temporal-1.21.5-1/src/common/namespace/registry.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/registry.go 2024-02-23 09:45:43.000000000 +0000 @@ -58,9 +58,8 @@ ) const ( - cacheInitialSize = 10 * 1024 - cacheMaxSize = 64 * 1024 - cacheTTL = 0 // 0 means infinity + cacheMaxSize = 64 * 1024 + cacheTTL = 0 // 0 means infinity // CacheRefreshFailureRetryInterval is the wait time // if refreshment encounters error CacheRefreshFailureRetryInterval = 1 * time.Second @@ -78,12 +77,10 @@ var ( cacheOpts = cache.Options{ - InitialCapacity: cacheInitialSize, - TTL: cacheTTL, + TTL: cacheTTL, } readthroughNotFoundCacheOpts = cache.Options{ - InitialCapacity: cacheInitialSize, - TTL: readthroughCacheTTL, + TTL: readthroughCacheTTL, } ) @@ -132,7 +129,6 @@ // Registry provides access to Namespace objects by name or by ID. Registry interface { - common.Daemon common.Pingable GetNamespace(name Name) (*Namespace, error) GetNamespaceByID(id ID) (*Namespace, error) @@ -147,6 +143,8 @@ // GetCustomSearchAttributesMapper is a temporary solution to be able to get search attributes // with from persistence if forceSearchAttributesCacheRefreshOnRead is true. GetCustomSearchAttributesMapper(name Name) (CustomSearchAttributesMapper, error) + Start() + Stop() } registry struct { @@ -393,9 +391,11 @@ return nil default: r.logger.Error("Error refreshing namespace cache", tag.Error(err)) + timer := time.NewTimer(CacheRefreshFailureRetryInterval) select { - case <-time.After(CacheRefreshFailureRetryInterval): + case <-timer.C: case <-ctx.Done(): + timer.Stop() return nil } } diff -Nru temporal-1.21.5-1/src/common/namespace/replicationTaskExecutor.go temporal-1.22.5/src/common/namespace/replicationTaskExecutor.go --- temporal-1.21.5-1/src/common/namespace/replicationTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replicationTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,357 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replicationTaskHandler_mock.go - -package namespace - -import ( - "context" - - enumspb "go.temporal.io/api/enums/v1" - replicationpb "go.temporal.io/api/replication/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" -) - -var ( - // ErrEmptyNamespaceReplicationTask is the error to indicate empty replication task - ErrEmptyNamespaceReplicationTask = serviceerror.NewInvalidArgument("empty namespace replication task") - // ErrInvalidNamespaceOperation is the error to indicate empty namespace operation attribute - ErrInvalidNamespaceOperation = serviceerror.NewInvalidArgument("invalid namespace operation attribute") - // ErrInvalidNamespaceID is the error to indicate empty rID attribute - ErrInvalidNamespaceID = serviceerror.NewInvalidArgument("invalid namespace ID attribute") - // ErrInvalidNamespaceInfo is the error to indicate empty info attribute - ErrInvalidNamespaceInfo = serviceerror.NewInvalidArgument("invalid namespace info attribute") - // ErrInvalidNamespaceConfig is the error to indicate empty config attribute - ErrInvalidNamespaceConfig = serviceerror.NewInvalidArgument("invalid namespace config attribute") - // ErrInvalidNamespaceReplicationConfig is the error to indicate empty replication config attribute - ErrInvalidNamespaceReplicationConfig = serviceerror.NewInvalidArgument("invalid namespace replication config attribute") - // ErrInvalidNamespaceConfigVersion is the error to indicate empty config version attribute - ErrInvalidNamespaceConfigVersion = serviceerror.NewInvalidArgument("invalid namespace config version attribute") - // ErrInvalidNamespaceFailoverVersion is the error to indicate empty failover version attribute - ErrInvalidNamespaceFailoverVersion = serviceerror.NewInvalidArgument("invalid namespace failover version attribute") - // ErrInvalidNamespaceState is the error to indicate invalid namespace state - ErrInvalidNamespaceState = serviceerror.NewInvalidArgument("invalid namespace state attribute") - // ErrNameUUIDCollision is the error to indicate namespace name / UUID collision - ErrNameUUIDCollision = serviceerror.NewInvalidArgument("namespace replication encountered name / UUID collision") -) - -// NOTE: the counterpart of namespace replication transmission logic is in service/fropntend package - -type ( - // ReplicationTaskExecutor is the interface which is to execute namespace replication task - ReplicationTaskExecutor interface { - Execute(ctx context.Context, task *replicationspb.NamespaceTaskAttributes) error - } - - namespaceReplicationTaskExecutorImpl struct { - currentCluster string - metadataManager persistence.MetadataManager - logger log.Logger - } -) - -// NewReplicationTaskExecutor create a new instance of namespace replicator -func NewReplicationTaskExecutor( - currentCluster string, - metadataManagerV2 persistence.MetadataManager, - logger log.Logger, -) ReplicationTaskExecutor { - - return &namespaceReplicationTaskExecutorImpl{ - currentCluster: currentCluster, - metadataManager: metadataManagerV2, - logger: logger, - } -} - -// Execute handles receiving of the namespace replication task -func (h *namespaceReplicationTaskExecutorImpl) Execute( - ctx context.Context, - task *replicationspb.NamespaceTaskAttributes, -) error { - if err := h.validateNamespaceReplicationTask(task); err != nil { - return err - } - if shouldProcess, err := h.shouldProcessTask(ctx, task); !shouldProcess || err != nil { - return err - } - - switch task.GetNamespaceOperation() { - case enumsspb.NAMESPACE_OPERATION_CREATE: - return h.handleNamespaceCreationReplicationTask(ctx, task) - case enumsspb.NAMESPACE_OPERATION_UPDATE: - return h.handleNamespaceUpdateReplicationTask(ctx, task) - default: - return ErrInvalidNamespaceOperation - } -} - -func checkClusterIncludedInReplicationConfig(clusterName string, repCfg []*replicationpb.ClusterReplicationConfig) bool { - for _, cluster := range repCfg { - if clusterName == cluster.ClusterName { - return true - } - } - return false -} - -func (h *namespaceReplicationTaskExecutorImpl) shouldProcessTask(ctx context.Context, task *replicationspb.NamespaceTaskAttributes) (bool, error) { - resp, err := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ - Name: task.Info.GetName(), - }) - switch err.(type) { - case nil: - if resp.Namespace.Info.Id != task.GetId() { - return false, ErrNameUUIDCollision - } - - return true, nil - case *serviceerror.NamespaceNotFound: - return checkClusterIncludedInReplicationConfig(h.currentCluster, task.ReplicationConfig.Clusters), nil - default: - // return the original err - return false, err - } -} - -// handleNamespaceCreationReplicationTask handles the namespace creation replication task -func (h *namespaceReplicationTaskExecutorImpl) handleNamespaceCreationReplicationTask( - ctx context.Context, - task *replicationspb.NamespaceTaskAttributes, -) error { - // task already validated - err := h.validateNamespaceStatus(task.Info.State) - if err != nil { - return err - } - - request := &persistence.CreateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: task.GetId(), - Name: task.Info.GetName(), - State: task.Info.GetState(), - Description: task.Info.GetDescription(), - Owner: task.Info.GetOwnerEmail(), - Data: task.Info.Data, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: task.Config.GetWorkflowExecutionRetentionTtl(), - HistoryArchivalState: task.Config.GetHistoryArchivalState(), - HistoryArchivalUri: task.Config.GetHistoryArchivalUri(), - VisibilityArchivalState: task.Config.GetVisibilityArchivalState(), - VisibilityArchivalUri: task.Config.GetVisibilityArchivalUri(), - CustomSearchAttributeAliases: task.Config.GetCustomSearchAttributeAliases(), - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: task.ReplicationConfig.GetActiveClusterName(), - Clusters: ConvertClusterReplicationConfigFromProto(task.ReplicationConfig.Clusters), - }, - ConfigVersion: task.GetConfigVersion(), - FailoverVersion: task.GetFailoverVersion(), - }, - IsGlobalNamespace: true, // local namespace will not be replicated - } - - _, err = h.metadataManager.CreateNamespace(ctx, request) - if err != nil { - // SQL and Cassandra handle namespace UUID collision differently - // here, whenever seeing a error replicating a namespace - // do a check if there is a name / UUID collision - - recordExists := true - resp, getErr := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ - Name: task.Info.GetName(), - }) - switch getErr.(type) { - case nil: - if resp.Namespace.Info.Id != task.GetId() { - return ErrNameUUIDCollision - } - case *serviceerror.NamespaceNotFound: - // no check is necessary - recordExists = false - default: - // return the original err - return err - } - - resp, getErr = h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ - ID: task.GetId(), - }) - switch getErr.(type) { - case nil: - if resp.Namespace.Info.Name != task.Info.GetName() { - return ErrNameUUIDCollision - } - case *serviceerror.NamespaceNotFound: - // no check is necessary - recordExists = false - default: - // return the original err - return err - } - - if recordExists { - // name -> id & id -> name check pass, this is duplication request - return nil - } - return err - } - - return err -} - -// handleNamespaceUpdateReplicationTask handles the namespace update replication task -func (h *namespaceReplicationTaskExecutorImpl) handleNamespaceUpdateReplicationTask( - ctx context.Context, - task *replicationspb.NamespaceTaskAttributes, -) error { - // task already validated - err := h.validateNamespaceStatus(task.Info.State) - if err != nil { - return err - } - - // first we need to get the current notification version since we need to it for conditional update - metadata, err := h.metadataManager.GetMetadata(ctx) - if err != nil { - return err - } - notificationVersion := metadata.NotificationVersion - - // plus, we need to check whether the config version is <= the config version set in the input - // plus, we need to check whether the failover version is <= the failover version set in the input - resp, err := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ - Name: task.Info.GetName(), - }) - if err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { - // this can happen if the create namespace replication task is to processed. - // e.g. new cluster which does not have anything - return h.handleNamespaceCreationReplicationTask(ctx, task) - } - return err - } - - recordUpdated := false - request := &persistence.UpdateNamespaceRequest{ - Namespace: resp.Namespace, - NotificationVersion: notificationVersion, - IsGlobalNamespace: resp.IsGlobalNamespace, - } - - if resp.Namespace.ConfigVersion < task.GetConfigVersion() { - recordUpdated = true - request.Namespace.Info = &persistencespb.NamespaceInfo{ - Id: task.GetId(), - Name: task.Info.GetName(), - State: task.Info.GetState(), - Description: task.Info.GetDescription(), - Owner: task.Info.GetOwnerEmail(), - Data: task.Info.Data, - } - request.Namespace.Config = &persistencespb.NamespaceConfig{ - Retention: task.Config.GetWorkflowExecutionRetentionTtl(), - HistoryArchivalState: task.Config.GetHistoryArchivalState(), - HistoryArchivalUri: task.Config.GetHistoryArchivalUri(), - VisibilityArchivalState: task.Config.GetVisibilityArchivalState(), - VisibilityArchivalUri: task.Config.GetVisibilityArchivalUri(), - CustomSearchAttributeAliases: task.Config.GetCustomSearchAttributeAliases(), - } - if task.Config.GetBadBinaries() != nil { - request.Namespace.Config.BadBinaries = task.Config.GetBadBinaries() - } - request.Namespace.ReplicationConfig.Clusters = ConvertClusterReplicationConfigFromProto(task.ReplicationConfig.Clusters) - request.Namespace.ConfigVersion = task.GetConfigVersion() - } - if resp.Namespace.FailoverVersion < task.GetFailoverVersion() { - recordUpdated = true - request.Namespace.ReplicationConfig.ActiveClusterName = task.ReplicationConfig.GetActiveClusterName() - request.Namespace.FailoverVersion = task.GetFailoverVersion() - request.Namespace.FailoverNotificationVersion = notificationVersion - request.Namespace.ReplicationConfig.FailoverHistory = convertFailoverHistoryToPersistenceProto(task.GetFailoverHistory()) - } - - if !recordUpdated { - return nil - } - - return h.metadataManager.UpdateNamespace(ctx, request) -} - -func (h *namespaceReplicationTaskExecutorImpl) validateNamespaceReplicationTask(task *replicationspb.NamespaceTaskAttributes) error { - if task == nil { - return ErrEmptyNamespaceReplicationTask - } - - if task.Id == "" { - return ErrInvalidNamespaceID - } else if task.Info == nil { - return ErrInvalidNamespaceInfo - } else if task.Config == nil { - return ErrInvalidNamespaceConfig - } else if task.ReplicationConfig == nil { - return ErrInvalidNamespaceReplicationConfig - } - return nil -} - -func ConvertClusterReplicationConfigFromProto( - input []*replicationpb.ClusterReplicationConfig, -) []string { - var output []string - for _, cluster := range input { - clusterName := cluster.GetClusterName() - output = append(output, clusterName) - } - return output -} - -func convertFailoverHistoryToPersistenceProto(failoverHistory []*replicationpb.FailoverStatus) []*persistencespb.FailoverStatus { - var persistencePb []*persistencespb.FailoverStatus - for _, status := range failoverHistory { - persistencePb = append(persistencePb, &persistencespb.FailoverStatus{ - FailoverTime: status.GetFailoverTime(), - FailoverVersion: status.GetFailoverVersion(), - }) - } - return persistencePb -} - -func (h *namespaceReplicationTaskExecutorImpl) validateNamespaceStatus(input enumspb.NamespaceState) error { - switch input { - case enumspb.NAMESPACE_STATE_REGISTERED, enumspb.NAMESPACE_STATE_DEPRECATED: - return nil - default: - return ErrInvalidNamespaceState - } -} diff -Nru temporal-1.21.5-1/src/common/namespace/replicationTaskExecutor_test.go temporal-1.22.5/src/common/namespace/replicationTaskExecutor_test.go --- temporal-1.21.5-1/src/common/namespace/replicationTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replicationTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,732 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - namespacepb "go.temporal.io/api/namespace/v1" - replicationpb "go.temporal.io/api/replication/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" -) - -type ( - namespaceReplicationTaskExecutorSuite struct { - suite.Suite - controller *gomock.Controller - - mockMetadataMgr *persistence.MockMetadataManager - namespaceReplicator *namespaceReplicationTaskExecutorImpl - } -) - -func TestNamespaceReplicationTaskExecutorSuite(t *testing.T) { - s := new(namespaceReplicationTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *namespaceReplicationTaskExecutorSuite) SetupSuite() { -} - -func (s *namespaceReplicationTaskExecutorSuite) TearDownSuite() { - -} - -func (s *namespaceReplicationTaskExecutorSuite) SetupTest() { - s.controller = gomock.NewController(s.T()) - s.mockMetadataMgr = persistence.NewMockMetadataManager(s.controller) - logger := log.NewTestLogger() - s.namespaceReplicator = NewReplicationTaskExecutor( - "some random standby cluster name", - s.mockMetadataMgr, - logger, - ).(*namespaceReplicationTaskExecutorImpl) -} - -func (s *namespaceReplicationTaskExecutorSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_NameUUIDCollision() { - operation := enumsspb.NAMESPACE_OPERATION_CREATE - id := uuid.New() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: clusterActive, - }, - { - ClusterName: clusterStandby, - }, - } - - task := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: operation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - } - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - }, - }}, nil) - task.Id = uuid.New() - task.Info.Name = name - err := s.namespaceReplicator.Execute(context.Background(), task) - s.NotNil(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - - task.Id = id - task.Info.Name = "other random namespace test name" - var count int - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: task.Info.Name, - }).DoAndReturn(func(_ context.Context, request *persistence.GetNamespaceRequest) (*persistence.GetNamespaceResponse, error) { - nsID := id - if count != 0 { - nsID = uuid.New() - } - count++ - return &persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: nsID, - }, - }}, nil - }).Times(2) - s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), gomock.Any()).Return(nil, errors.New("test")) - err = s.namespaceReplicator.Execute(context.Background(), task) - s.NotNil(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_Success() { - operation := enumsspb.NAMESPACE_OPERATION_CREATE - id := uuid.New() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: clusterActive, - }, - { - ClusterName: clusterStandby, - }, - } - - task := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: operation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - } - - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{Name: name}).Return( - nil, &serviceerror.NamespaceNotFound{}).Times(1) - s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), &persistence.CreateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - State: task.Info.State, - Name: task.Info.Name, - Description: task.Info.Description, - Owner: task.Info.OwnerEmail, - Data: task.Info.Data, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: task.Config.WorkflowExecutionRetentionTtl, - HistoryArchivalState: task.Config.HistoryArchivalState, - HistoryArchivalUri: task.Config.HistoryArchivalUri, - VisibilityArchivalState: task.Config.VisibilityArchivalState, - VisibilityArchivalUri: task.Config.VisibilityArchivalUri, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: task.ReplicationConfig.ActiveClusterName, - Clusters: []string{clusterActive, clusterStandby}, - }, - ConfigVersion: configVersion, - FailoverNotificationVersion: 0, - FailoverVersion: failoverVersion, - }, - IsGlobalNamespace: true, - }) - err := s.namespaceReplicator.Execute(context.Background(), task) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_Duplicate() { - name := uuid.New() - id := uuid.New() - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - clusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: clusterActive, - }, - { - ClusterName: clusterStandby, - }, - } - task := &replicationspb.NamespaceTaskAttributes{ - Id: id, - NamespaceOperation: enumsspb.NAMESPACE_OPERATION_CREATE, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: enumspb.NAMESPACE_STATE_REGISTERED, - }, - Config: &namespacepb.NamespaceConfig{}, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - } - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - }}, nil).Times(2) - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - ID: id, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Name: name, - }, - }}, nil).Times(1) - s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), gomock.Any()).Return(nil, errors.New("test")) - err := s.namespaceReplicator.Execute(context.Background(), task) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NamespaceNotExist() { - operation := enumsspb.NAMESPACE_OPERATION_UPDATE - id := uuid.New() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "some random test description" - ownerEmail := "some random test owner" - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(12) - failoverVersion := int64(59) - namespaceData := map[string]string{"k1": "v1", "k2": "v2"} - clusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: clusterActive, - }, - { - ClusterName: clusterStandby, - }, - } - - updateTask := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: operation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: namespaceData, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - } - - s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{NotificationVersion: 0}, nil) - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{Name: name}).Return( - nil, &serviceerror.NamespaceNotFound{}).Times(2) - s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), &persistence.CreateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - State: updateTask.Info.State, - Name: updateTask.Info.Name, - Description: updateTask.Info.Description, - Owner: updateTask.Info.OwnerEmail, - Data: updateTask.Info.Data, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: updateTask.Config.WorkflowExecutionRetentionTtl, - HistoryArchivalState: updateTask.Config.HistoryArchivalState, - HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, - VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, - VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: updateTask.ReplicationConfig.ActiveClusterName, - Clusters: []string{clusterActive, clusterStandby}, - }, - ConfigVersion: configVersion, - FailoverNotificationVersion: 0, - FailoverVersion: failoverVersion, - }, - IsGlobalNamespace: true, - }) - err := s.namespaceReplicator.Execute(context.Background(), updateTask) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_UpdateConfig_UpdateActiveCluster() { - id := uuid.New() - name := "some random namespace test name" - updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - updateState := enumspb.NAMESPACE_STATE_DEPRECATED - updateDescription := "other random namespace test description" - updateOwnerEmail := "other random namespace test owner" - updatedData := map[string]string{"k": "v1"} - updateRetention := 122 * time.Hour * 24 - updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateHistoryArchivalURI := "some updated history archival uri" - updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateVisibilityArchivalURI := "some updated visibility archival uri" - updateClusterActive := "other random active cluster name" - updateClusterStandby := "other random standby cluster name" - updateConfigVersion := int64(1) - updateFailoverVersion := int64(59) - failoverTime := time.Now() - failoverHistory := []*replicationpb.FailoverStatus{ - { - FailoverTime: &failoverTime, - FailoverVersion: 999, - }, - } - updateClusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: updateClusterActive, - }, - { - ClusterName: updateClusterStandby, - }, - } - updateTask := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: updateOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: updateState, - Description: updateDescription, - OwnerEmail: updateOwnerEmail, - Data: updatedData, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &updateRetention, - HistoryArchivalState: updateHistoryArchivalState, - HistoryArchivalUri: updateHistoryArchivalURI, - VisibilityArchivalState: updateVisibilityArchivalState, - VisibilityArchivalUri: updateVisibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - ConfigVersion: updateConfigVersion, - FailoverVersion: updateFailoverVersion, - FailoverHistory: failoverHistory, - } - - s.namespaceReplicator.currentCluster = updateClusterStandby - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - }}, nil).Times(2) - s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ - NotificationVersion: updateFailoverVersion, - }, nil).Times(1) - s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - State: updateTask.Info.State, - Name: updateTask.Info.Name, - Description: updateTask.Info.Description, - Owner: updateTask.Info.OwnerEmail, - Data: updateTask.Info.Data, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: updateTask.Config.WorkflowExecutionRetentionTtl, - HistoryArchivalState: updateTask.Config.HistoryArchivalState, - HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, - VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, - VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: updateTask.ReplicationConfig.ActiveClusterName, - Clusters: []string{updateClusterActive, updateClusterStandby}, - FailoverHistory: convertFailoverHistoryToPersistenceProto(failoverHistory), - }, - ConfigVersion: updateConfigVersion, - FailoverNotificationVersion: updateFailoverVersion, - FailoverVersion: updateFailoverVersion, - }, - IsGlobalNamespace: false, - NotificationVersion: updateFailoverVersion, - }) - err := s.namespaceReplicator.Execute(context.Background(), updateTask) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_UpdateConfig_NoUpdateActiveCluster() { - id := uuid.New() - name := "some random namespace test name" - updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - updateState := enumspb.NAMESPACE_STATE_DEPRECATED - updateDescription := "other random namespace test description" - updateOwnerEmail := "other random namespace test owner" - updatedData := map[string]string{"k": "v1"} - updateRetention := 122 * time.Hour * 24 - updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateHistoryArchivalURI := "some updated history archival uri" - updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateVisibilityArchivalURI := "some updated visibility archival uri" - updateClusterActive := "other random active cluster name" - updateClusterStandby := "other random standby cluster name" - updateConfigVersion := int64(1) - updateFailoverVersion := int64(59) - updateClusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: updateClusterActive, - }, - { - ClusterName: updateClusterStandby, - }, - } - updateTask := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: updateOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: updateState, - Description: updateDescription, - OwnerEmail: updateOwnerEmail, - Data: updatedData, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &updateRetention, - HistoryArchivalState: updateHistoryArchivalState, - HistoryArchivalUri: updateHistoryArchivalURI, - VisibilityArchivalState: updateVisibilityArchivalState, - VisibilityArchivalUri: updateVisibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - ConfigVersion: updateConfigVersion, - FailoverVersion: updateFailoverVersion, - } - - s.namespaceReplicator.currentCluster = updateClusterStandby - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - FailoverVersion: updateFailoverVersion + 1, - }}, nil).Times(2) - s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ - NotificationVersion: updateFailoverVersion, - }, nil).Times(1) - s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - State: updateTask.Info.State, - Name: updateTask.Info.Name, - Description: updateTask.Info.Description, - Owner: updateTask.Info.OwnerEmail, - Data: updateTask.Info.Data, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: updateTask.Config.WorkflowExecutionRetentionTtl, - HistoryArchivalState: updateTask.Config.HistoryArchivalState, - HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, - VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, - VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - Clusters: []string{updateClusterActive, updateClusterStandby}, - }, - ConfigVersion: updateConfigVersion, - FailoverNotificationVersion: 0, - FailoverVersion: updateFailoverVersion + 1, - }, - IsGlobalNamespace: false, - NotificationVersion: updateFailoverVersion, - }) - err := s.namespaceReplicator.Execute(context.Background(), updateTask) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NoUpdateConfig_UpdateActiveCluster() { - id := uuid.New() - name := "some random namespace test name" - updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - updateState := enumspb.NAMESPACE_STATE_DEPRECATED - updateDescription := "other random namespace test description" - updateOwnerEmail := "other random namespace test owner" - updatedData := map[string]string{"k": "v1"} - updateRetention := 122 * time.Hour * 24 - updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateHistoryArchivalURI := "some updated history archival uri" - updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateVisibilityArchivalURI := "some updated visibility archival uri" - updateClusterActive := "other random active cluster name" - updateClusterStandby := "other random standby cluster name" - updateConfigVersion := int64(1) - updateFailoverVersion := int64(59) - updateClusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: updateClusterActive, - }, - { - ClusterName: updateClusterStandby, - }, - } - updateTask := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: updateOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: updateState, - Description: updateDescription, - OwnerEmail: updateOwnerEmail, - Data: updatedData, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &updateRetention, - HistoryArchivalState: updateHistoryArchivalState, - HistoryArchivalUri: updateHistoryArchivalURI, - VisibilityArchivalState: updateVisibilityArchivalState, - VisibilityArchivalUri: updateVisibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - ConfigVersion: updateConfigVersion, - FailoverVersion: updateFailoverVersion, - } - - s.namespaceReplicator.currentCluster = updateClusterStandby - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - ConfigVersion: updateConfigVersion + 1, - }}, nil).Times(2) - s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ - NotificationVersion: updateFailoverVersion, - }, nil).Times(1) - s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - }, - ConfigVersion: updateConfigVersion + 1, - FailoverNotificationVersion: updateFailoverVersion, - FailoverVersion: updateFailoverVersion, - }, - IsGlobalNamespace: false, - NotificationVersion: updateFailoverVersion, - }) - err := s.namespaceReplicator.Execute(context.Background(), updateTask) - s.Nil(err) -} - -func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NoUpdateConfig_NoUpdateActiveCluster() { - id := uuid.New() - name := "some random namespace test name" - updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - updateState := enumspb.NAMESPACE_STATE_DEPRECATED - updateDescription := "other random namespace test description" - updateOwnerEmail := "other random namespace test owner" - updatedData := map[string]string{"k": "v1"} - updateRetention := 122 * time.Hour * 24 - updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateHistoryArchivalURI := "some updated history archival uri" - updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updateVisibilityArchivalURI := "some updated visibility archival uri" - updateClusterActive := "other random active cluster name" - updateClusterStandby := "other random standby cluster name" - updateConfigVersion := int64(1) - updateFailoverVersion := int64(59) - updateClusters := []*replicationpb.ClusterReplicationConfig{ - { - ClusterName: updateClusterActive, - }, - { - ClusterName: updateClusterStandby, - }, - } - updateTask := &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: updateOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: updateState, - Description: updateDescription, - OwnerEmail: updateOwnerEmail, - Data: updatedData, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &updateRetention, - HistoryArchivalState: updateHistoryArchivalState, - HistoryArchivalUri: updateHistoryArchivalURI, - VisibilityArchivalState: updateVisibilityArchivalState, - VisibilityArchivalUri: updateVisibilityArchivalURI, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - ConfigVersion: updateConfigVersion, - FailoverVersion: updateFailoverVersion, - } - - s.namespaceReplicator.currentCluster = updateClusterStandby - s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ - Name: name, - }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: id, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - ConfigVersion: updateConfigVersion + 1, - FailoverVersion: updateFailoverVersion + 1, - }}, nil).Times(2) - s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ - NotificationVersion: updateFailoverVersion, - }, nil).Times(1) - - s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Times(0) - err := s.namespaceReplicator.Execute(context.Background(), updateTask) - s.Nil(err) -} diff -Nru temporal-1.21.5-1/src/common/namespace/replicationTaskHandler_mock.go temporal-1.22.5/src/common/namespace/replicationTaskHandler_mock.go --- temporal-1.21.5-1/src/common/namespace/replicationTaskHandler_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replicationTaskHandler_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: replicationTaskExecutor.go - -// Package namespace is a generated GoMock package. -package namespace - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - repication "go.temporal.io/server/api/replication/v1" -) - -// MockReplicationTaskExecutor is a mock of ReplicationTaskExecutor interface. -type MockReplicationTaskExecutor struct { - ctrl *gomock.Controller - recorder *MockReplicationTaskExecutorMockRecorder -} - -// MockReplicationTaskExecutorMockRecorder is the mock recorder for MockReplicationTaskExecutor. -type MockReplicationTaskExecutorMockRecorder struct { - mock *MockReplicationTaskExecutor -} - -// NewMockReplicationTaskExecutor creates a new mock instance. -func NewMockReplicationTaskExecutor(ctrl *gomock.Controller) *MockReplicationTaskExecutor { - mock := &MockReplicationTaskExecutor{ctrl: ctrl} - mock.recorder = &MockReplicationTaskExecutorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockReplicationTaskExecutor) EXPECT() *MockReplicationTaskExecutorMockRecorder { - return m.recorder -} - -// Execute mocks base method. -func (m *MockReplicationTaskExecutor) Execute(ctx context.Context, task *repication.NamespaceTaskAttributes) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Execute", ctx, task) - ret0, _ := ret[0].(error) - return ret0 -} - -// Execute indicates an expected call of Execute. -func (mr *MockReplicationTaskExecutorMockRecorder) Execute(ctx, task interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockReplicationTaskExecutor)(nil).Execute), ctx, task) -} diff -Nru temporal-1.21.5-1/src/common/namespace/replication_task_executor.go temporal-1.22.5/src/common/namespace/replication_task_executor.go --- temporal-1.21.5-1/src/common/namespace/replication_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replication_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,357 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replication_task_handler_mock.go + +package namespace + +import ( + "context" + + enumspb "go.temporal.io/api/enums/v1" + replicationpb "go.temporal.io/api/replication/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" +) + +var ( + // ErrEmptyNamespaceReplicationTask is the error to indicate empty replication task + ErrEmptyNamespaceReplicationTask = serviceerror.NewInvalidArgument("empty namespace replication task") + // ErrInvalidNamespaceOperation is the error to indicate empty namespace operation attribute + ErrInvalidNamespaceOperation = serviceerror.NewInvalidArgument("invalid namespace operation attribute") + // ErrInvalidNamespaceID is the error to indicate empty rID attribute + ErrInvalidNamespaceID = serviceerror.NewInvalidArgument("invalid namespace ID attribute") + // ErrInvalidNamespaceInfo is the error to indicate empty info attribute + ErrInvalidNamespaceInfo = serviceerror.NewInvalidArgument("invalid namespace info attribute") + // ErrInvalidNamespaceConfig is the error to indicate empty config attribute + ErrInvalidNamespaceConfig = serviceerror.NewInvalidArgument("invalid namespace config attribute") + // ErrInvalidNamespaceReplicationConfig is the error to indicate empty replication config attribute + ErrInvalidNamespaceReplicationConfig = serviceerror.NewInvalidArgument("invalid namespace replication config attribute") + // ErrInvalidNamespaceConfigVersion is the error to indicate empty config version attribute + ErrInvalidNamespaceConfigVersion = serviceerror.NewInvalidArgument("invalid namespace config version attribute") + // ErrInvalidNamespaceFailoverVersion is the error to indicate empty failover version attribute + ErrInvalidNamespaceFailoverVersion = serviceerror.NewInvalidArgument("invalid namespace failover version attribute") + // ErrInvalidNamespaceState is the error to indicate invalid namespace state + ErrInvalidNamespaceState = serviceerror.NewInvalidArgument("invalid namespace state attribute") + // ErrNameUUIDCollision is the error to indicate namespace name / UUID collision + ErrNameUUIDCollision = serviceerror.NewInvalidArgument("namespace replication encountered name / UUID collision") +) + +// NOTE: the counterpart of namespace replication transmission logic is in service/fropntend package + +type ( + // ReplicationTaskExecutor is the interface which is to execute namespace replication task + ReplicationTaskExecutor interface { + Execute(ctx context.Context, task *replicationspb.NamespaceTaskAttributes) error + } + + namespaceReplicationTaskExecutorImpl struct { + currentCluster string + metadataManager persistence.MetadataManager + logger log.Logger + } +) + +// NewReplicationTaskExecutor create a new instance of namespace replicator +func NewReplicationTaskExecutor( + currentCluster string, + metadataManagerV2 persistence.MetadataManager, + logger log.Logger, +) ReplicationTaskExecutor { + + return &namespaceReplicationTaskExecutorImpl{ + currentCluster: currentCluster, + metadataManager: metadataManagerV2, + logger: logger, + } +} + +// Execute handles receiving of the namespace replication task +func (h *namespaceReplicationTaskExecutorImpl) Execute( + ctx context.Context, + task *replicationspb.NamespaceTaskAttributes, +) error { + if err := h.validateNamespaceReplicationTask(task); err != nil { + return err + } + if shouldProcess, err := h.shouldProcessTask(ctx, task); !shouldProcess || err != nil { + return err + } + + switch task.GetNamespaceOperation() { + case enumsspb.NAMESPACE_OPERATION_CREATE: + return h.handleNamespaceCreationReplicationTask(ctx, task) + case enumsspb.NAMESPACE_OPERATION_UPDATE: + return h.handleNamespaceUpdateReplicationTask(ctx, task) + default: + return ErrInvalidNamespaceOperation + } +} + +func checkClusterIncludedInReplicationConfig(clusterName string, repCfg []*replicationpb.ClusterReplicationConfig) bool { + for _, cluster := range repCfg { + if clusterName == cluster.ClusterName { + return true + } + } + return false +} + +func (h *namespaceReplicationTaskExecutorImpl) shouldProcessTask(ctx context.Context, task *replicationspb.NamespaceTaskAttributes) (bool, error) { + resp, err := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ + Name: task.Info.GetName(), + }) + switch err.(type) { + case nil: + if resp.Namespace.Info.Id != task.GetId() { + return false, ErrNameUUIDCollision + } + + return true, nil + case *serviceerror.NamespaceNotFound: + return checkClusterIncludedInReplicationConfig(h.currentCluster, task.ReplicationConfig.Clusters), nil + default: + // return the original err + return false, err + } +} + +// handleNamespaceCreationReplicationTask handles the namespace creation replication task +func (h *namespaceReplicationTaskExecutorImpl) handleNamespaceCreationReplicationTask( + ctx context.Context, + task *replicationspb.NamespaceTaskAttributes, +) error { + // task already validated + err := h.validateNamespaceStatus(task.Info.State) + if err != nil { + return err + } + + request := &persistence.CreateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: task.GetId(), + Name: task.Info.GetName(), + State: task.Info.GetState(), + Description: task.Info.GetDescription(), + Owner: task.Info.GetOwnerEmail(), + Data: task.Info.Data, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: task.Config.GetWorkflowExecutionRetentionTtl(), + HistoryArchivalState: task.Config.GetHistoryArchivalState(), + HistoryArchivalUri: task.Config.GetHistoryArchivalUri(), + VisibilityArchivalState: task.Config.GetVisibilityArchivalState(), + VisibilityArchivalUri: task.Config.GetVisibilityArchivalUri(), + CustomSearchAttributeAliases: task.Config.GetCustomSearchAttributeAliases(), + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: task.ReplicationConfig.GetActiveClusterName(), + Clusters: ConvertClusterReplicationConfigFromProto(task.ReplicationConfig.Clusters), + }, + ConfigVersion: task.GetConfigVersion(), + FailoverVersion: task.GetFailoverVersion(), + }, + IsGlobalNamespace: true, // local namespace will not be replicated + } + + _, err = h.metadataManager.CreateNamespace(ctx, request) + if err != nil { + // SQL and Cassandra handle namespace UUID collision differently + // here, whenever seeing a error replicating a namespace + // do a check if there is a name / UUID collision + + recordExists := true + resp, getErr := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ + Name: task.Info.GetName(), + }) + switch getErr.(type) { + case nil: + if resp.Namespace.Info.Id != task.GetId() { + return ErrNameUUIDCollision + } + case *serviceerror.NamespaceNotFound: + // no check is necessary + recordExists = false + default: + // return the original err + return err + } + + resp, getErr = h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ + ID: task.GetId(), + }) + switch getErr.(type) { + case nil: + if resp.Namespace.Info.Name != task.Info.GetName() { + return ErrNameUUIDCollision + } + case *serviceerror.NamespaceNotFound: + // no check is necessary + recordExists = false + default: + // return the original err + return err + } + + if recordExists { + // name -> id & id -> name check pass, this is duplication request + return nil + } + return err + } + + return err +} + +// handleNamespaceUpdateReplicationTask handles the namespace update replication task +func (h *namespaceReplicationTaskExecutorImpl) handleNamespaceUpdateReplicationTask( + ctx context.Context, + task *replicationspb.NamespaceTaskAttributes, +) error { + // task already validated + err := h.validateNamespaceStatus(task.Info.State) + if err != nil { + return err + } + + // first we need to get the current notification version since we need to it for conditional update + metadata, err := h.metadataManager.GetMetadata(ctx) + if err != nil { + return err + } + notificationVersion := metadata.NotificationVersion + + // plus, we need to check whether the config version is <= the config version set in the input + // plus, we need to check whether the failover version is <= the failover version set in the input + resp, err := h.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ + Name: task.Info.GetName(), + }) + if err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { + // this can happen if the create namespace replication task is to processed. + // e.g. new cluster which does not have anything + return h.handleNamespaceCreationReplicationTask(ctx, task) + } + return err + } + + recordUpdated := false + request := &persistence.UpdateNamespaceRequest{ + Namespace: resp.Namespace, + NotificationVersion: notificationVersion, + IsGlobalNamespace: resp.IsGlobalNamespace, + } + + if resp.Namespace.ConfigVersion < task.GetConfigVersion() { + recordUpdated = true + request.Namespace.Info = &persistencespb.NamespaceInfo{ + Id: task.GetId(), + Name: task.Info.GetName(), + State: task.Info.GetState(), + Description: task.Info.GetDescription(), + Owner: task.Info.GetOwnerEmail(), + Data: task.Info.Data, + } + request.Namespace.Config = &persistencespb.NamespaceConfig{ + Retention: task.Config.GetWorkflowExecutionRetentionTtl(), + HistoryArchivalState: task.Config.GetHistoryArchivalState(), + HistoryArchivalUri: task.Config.GetHistoryArchivalUri(), + VisibilityArchivalState: task.Config.GetVisibilityArchivalState(), + VisibilityArchivalUri: task.Config.GetVisibilityArchivalUri(), + CustomSearchAttributeAliases: task.Config.GetCustomSearchAttributeAliases(), + } + if task.Config.GetBadBinaries() != nil { + request.Namespace.Config.BadBinaries = task.Config.GetBadBinaries() + } + request.Namespace.ReplicationConfig.Clusters = ConvertClusterReplicationConfigFromProto(task.ReplicationConfig.Clusters) + request.Namespace.ConfigVersion = task.GetConfigVersion() + } + if resp.Namespace.FailoverVersion < task.GetFailoverVersion() { + recordUpdated = true + request.Namespace.ReplicationConfig.ActiveClusterName = task.ReplicationConfig.GetActiveClusterName() + request.Namespace.FailoverVersion = task.GetFailoverVersion() + request.Namespace.FailoverNotificationVersion = notificationVersion + request.Namespace.ReplicationConfig.FailoverHistory = convertFailoverHistoryToPersistenceProto(task.GetFailoverHistory()) + } + + if !recordUpdated { + return nil + } + + return h.metadataManager.UpdateNamespace(ctx, request) +} + +func (h *namespaceReplicationTaskExecutorImpl) validateNamespaceReplicationTask(task *replicationspb.NamespaceTaskAttributes) error { + if task == nil { + return ErrEmptyNamespaceReplicationTask + } + + if task.Id == "" { + return ErrInvalidNamespaceID + } else if task.Info == nil { + return ErrInvalidNamespaceInfo + } else if task.Config == nil { + return ErrInvalidNamespaceConfig + } else if task.ReplicationConfig == nil { + return ErrInvalidNamespaceReplicationConfig + } + return nil +} + +func ConvertClusterReplicationConfigFromProto( + input []*replicationpb.ClusterReplicationConfig, +) []string { + var output []string + for _, cluster := range input { + clusterName := cluster.GetClusterName() + output = append(output, clusterName) + } + return output +} + +func convertFailoverHistoryToPersistenceProto(failoverHistory []*replicationpb.FailoverStatus) []*persistencespb.FailoverStatus { + var persistencePb []*persistencespb.FailoverStatus + for _, status := range failoverHistory { + persistencePb = append(persistencePb, &persistencespb.FailoverStatus{ + FailoverTime: status.GetFailoverTime(), + FailoverVersion: status.GetFailoverVersion(), + }) + } + return persistencePb +} + +func (h *namespaceReplicationTaskExecutorImpl) validateNamespaceStatus(input enumspb.NamespaceState) error { + switch input { + case enumspb.NAMESPACE_STATE_REGISTERED, enumspb.NAMESPACE_STATE_DEPRECATED: + return nil + default: + return ErrInvalidNamespaceState + } +} diff -Nru temporal-1.21.5-1/src/common/namespace/replication_task_executor_test.go temporal-1.22.5/src/common/namespace/replication_task_executor_test.go --- temporal-1.21.5-1/src/common/namespace/replication_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replication_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,732 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + namespacepb "go.temporal.io/api/namespace/v1" + replicationpb "go.temporal.io/api/replication/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" +) + +type ( + namespaceReplicationTaskExecutorSuite struct { + suite.Suite + controller *gomock.Controller + + mockMetadataMgr *persistence.MockMetadataManager + namespaceReplicator *namespaceReplicationTaskExecutorImpl + } +) + +func TestNamespaceReplicationTaskExecutorSuite(t *testing.T) { + s := new(namespaceReplicationTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *namespaceReplicationTaskExecutorSuite) SetupSuite() { +} + +func (s *namespaceReplicationTaskExecutorSuite) TearDownSuite() { + +} + +func (s *namespaceReplicationTaskExecutorSuite) SetupTest() { + s.controller = gomock.NewController(s.T()) + s.mockMetadataMgr = persistence.NewMockMetadataManager(s.controller) + logger := log.NewTestLogger() + s.namespaceReplicator = NewReplicationTaskExecutor( + "some random standby cluster name", + s.mockMetadataMgr, + logger, + ).(*namespaceReplicationTaskExecutorImpl) +} + +func (s *namespaceReplicationTaskExecutorSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_NameUUIDCollision() { + operation := enumsspb.NAMESPACE_OPERATION_CREATE + id := uuid.New() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: clusterActive, + }, + { + ClusterName: clusterStandby, + }, + } + + task := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: operation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + } + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + }, + }}, nil) + task.Id = uuid.New() + task.Info.Name = name + err := s.namespaceReplicator.Execute(context.Background(), task) + s.NotNil(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + + task.Id = id + task.Info.Name = "other random namespace test name" + var count int + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: task.Info.Name, + }).DoAndReturn(func(_ context.Context, request *persistence.GetNamespaceRequest) (*persistence.GetNamespaceResponse, error) { + nsID := id + if count != 0 { + nsID = uuid.New() + } + count++ + return &persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: nsID, + }, + }}, nil + }).Times(2) + s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), gomock.Any()).Return(nil, errors.New("test")) + err = s.namespaceReplicator.Execute(context.Background(), task) + s.NotNil(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_Success() { + operation := enumsspb.NAMESPACE_OPERATION_CREATE + id := uuid.New() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: clusterActive, + }, + { + ClusterName: clusterStandby, + }, + } + + task := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: operation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + } + + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{Name: name}).Return( + nil, &serviceerror.NamespaceNotFound{}).Times(1) + s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), &persistence.CreateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + State: task.Info.State, + Name: task.Info.Name, + Description: task.Info.Description, + Owner: task.Info.OwnerEmail, + Data: task.Info.Data, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: task.Config.WorkflowExecutionRetentionTtl, + HistoryArchivalState: task.Config.HistoryArchivalState, + HistoryArchivalUri: task.Config.HistoryArchivalUri, + VisibilityArchivalState: task.Config.VisibilityArchivalState, + VisibilityArchivalUri: task.Config.VisibilityArchivalUri, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: task.ReplicationConfig.ActiveClusterName, + Clusters: []string{clusterActive, clusterStandby}, + }, + ConfigVersion: configVersion, + FailoverNotificationVersion: 0, + FailoverVersion: failoverVersion, + }, + IsGlobalNamespace: true, + }) + err := s.namespaceReplicator.Execute(context.Background(), task) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_RegisterNamespaceTask_Duplicate() { + name := uuid.New() + id := uuid.New() + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + clusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: clusterActive, + }, + { + ClusterName: clusterStandby, + }, + } + task := &replicationspb.NamespaceTaskAttributes{ + Id: id, + NamespaceOperation: enumsspb.NAMESPACE_OPERATION_CREATE, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: enumspb.NAMESPACE_STATE_REGISTERED, + }, + Config: &namespacepb.NamespaceConfig{}, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + } + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + }}, nil).Times(2) + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: id, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Name: name, + }, + }}, nil).Times(1) + s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), gomock.Any()).Return(nil, errors.New("test")) + err := s.namespaceReplicator.Execute(context.Background(), task) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NamespaceNotExist() { + operation := enumsspb.NAMESPACE_OPERATION_UPDATE + id := uuid.New() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "some random test description" + ownerEmail := "some random test owner" + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(12) + failoverVersion := int64(59) + namespaceData := map[string]string{"k1": "v1", "k2": "v2"} + clusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: clusterActive, + }, + { + ClusterName: clusterStandby, + }, + } + + updateTask := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: operation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: namespaceData, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + } + + s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{NotificationVersion: 0}, nil) + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{Name: name}).Return( + nil, &serviceerror.NamespaceNotFound{}).Times(2) + s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), &persistence.CreateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + State: updateTask.Info.State, + Name: updateTask.Info.Name, + Description: updateTask.Info.Description, + Owner: updateTask.Info.OwnerEmail, + Data: updateTask.Info.Data, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: updateTask.Config.WorkflowExecutionRetentionTtl, + HistoryArchivalState: updateTask.Config.HistoryArchivalState, + HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, + VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, + VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: updateTask.ReplicationConfig.ActiveClusterName, + Clusters: []string{clusterActive, clusterStandby}, + }, + ConfigVersion: configVersion, + FailoverNotificationVersion: 0, + FailoverVersion: failoverVersion, + }, + IsGlobalNamespace: true, + }) + err := s.namespaceReplicator.Execute(context.Background(), updateTask) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_UpdateConfig_UpdateActiveCluster() { + id := uuid.New() + name := "some random namespace test name" + updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + updateState := enumspb.NAMESPACE_STATE_DEPRECATED + updateDescription := "other random namespace test description" + updateOwnerEmail := "other random namespace test owner" + updatedData := map[string]string{"k": "v1"} + updateRetention := 122 * time.Hour * 24 + updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateHistoryArchivalURI := "some updated history archival uri" + updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateVisibilityArchivalURI := "some updated visibility archival uri" + updateClusterActive := "other random active cluster name" + updateClusterStandby := "other random standby cluster name" + updateConfigVersion := int64(1) + updateFailoverVersion := int64(59) + failoverTime := time.Now() + failoverHistory := []*replicationpb.FailoverStatus{ + { + FailoverTime: &failoverTime, + FailoverVersion: 999, + }, + } + updateClusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: updateClusterActive, + }, + { + ClusterName: updateClusterStandby, + }, + } + updateTask := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: updateOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: updateState, + Description: updateDescription, + OwnerEmail: updateOwnerEmail, + Data: updatedData, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &updateRetention, + HistoryArchivalState: updateHistoryArchivalState, + HistoryArchivalUri: updateHistoryArchivalURI, + VisibilityArchivalState: updateVisibilityArchivalState, + VisibilityArchivalUri: updateVisibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + ConfigVersion: updateConfigVersion, + FailoverVersion: updateFailoverVersion, + FailoverHistory: failoverHistory, + } + + s.namespaceReplicator.currentCluster = updateClusterStandby + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + }}, nil).Times(2) + s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ + NotificationVersion: updateFailoverVersion, + }, nil).Times(1) + s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + State: updateTask.Info.State, + Name: updateTask.Info.Name, + Description: updateTask.Info.Description, + Owner: updateTask.Info.OwnerEmail, + Data: updateTask.Info.Data, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: updateTask.Config.WorkflowExecutionRetentionTtl, + HistoryArchivalState: updateTask.Config.HistoryArchivalState, + HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, + VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, + VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: updateTask.ReplicationConfig.ActiveClusterName, + Clusters: []string{updateClusterActive, updateClusterStandby}, + FailoverHistory: convertFailoverHistoryToPersistenceProto(failoverHistory), + }, + ConfigVersion: updateConfigVersion, + FailoverNotificationVersion: updateFailoverVersion, + FailoverVersion: updateFailoverVersion, + }, + IsGlobalNamespace: false, + NotificationVersion: updateFailoverVersion, + }) + err := s.namespaceReplicator.Execute(context.Background(), updateTask) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_UpdateConfig_NoUpdateActiveCluster() { + id := uuid.New() + name := "some random namespace test name" + updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + updateState := enumspb.NAMESPACE_STATE_DEPRECATED + updateDescription := "other random namespace test description" + updateOwnerEmail := "other random namespace test owner" + updatedData := map[string]string{"k": "v1"} + updateRetention := 122 * time.Hour * 24 + updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateHistoryArchivalURI := "some updated history archival uri" + updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateVisibilityArchivalURI := "some updated visibility archival uri" + updateClusterActive := "other random active cluster name" + updateClusterStandby := "other random standby cluster name" + updateConfigVersion := int64(1) + updateFailoverVersion := int64(59) + updateClusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: updateClusterActive, + }, + { + ClusterName: updateClusterStandby, + }, + } + updateTask := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: updateOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: updateState, + Description: updateDescription, + OwnerEmail: updateOwnerEmail, + Data: updatedData, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &updateRetention, + HistoryArchivalState: updateHistoryArchivalState, + HistoryArchivalUri: updateHistoryArchivalURI, + VisibilityArchivalState: updateVisibilityArchivalState, + VisibilityArchivalUri: updateVisibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + ConfigVersion: updateConfigVersion, + FailoverVersion: updateFailoverVersion, + } + + s.namespaceReplicator.currentCluster = updateClusterStandby + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + FailoverVersion: updateFailoverVersion + 1, + }}, nil).Times(2) + s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ + NotificationVersion: updateFailoverVersion, + }, nil).Times(1) + s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + State: updateTask.Info.State, + Name: updateTask.Info.Name, + Description: updateTask.Info.Description, + Owner: updateTask.Info.OwnerEmail, + Data: updateTask.Info.Data, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: updateTask.Config.WorkflowExecutionRetentionTtl, + HistoryArchivalState: updateTask.Config.HistoryArchivalState, + HistoryArchivalUri: updateTask.Config.HistoryArchivalUri, + VisibilityArchivalState: updateTask.Config.VisibilityArchivalState, + VisibilityArchivalUri: updateTask.Config.VisibilityArchivalUri, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + Clusters: []string{updateClusterActive, updateClusterStandby}, + }, + ConfigVersion: updateConfigVersion, + FailoverNotificationVersion: 0, + FailoverVersion: updateFailoverVersion + 1, + }, + IsGlobalNamespace: false, + NotificationVersion: updateFailoverVersion, + }) + err := s.namespaceReplicator.Execute(context.Background(), updateTask) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NoUpdateConfig_UpdateActiveCluster() { + id := uuid.New() + name := "some random namespace test name" + updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + updateState := enumspb.NAMESPACE_STATE_DEPRECATED + updateDescription := "other random namespace test description" + updateOwnerEmail := "other random namespace test owner" + updatedData := map[string]string{"k": "v1"} + updateRetention := 122 * time.Hour * 24 + updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateHistoryArchivalURI := "some updated history archival uri" + updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateVisibilityArchivalURI := "some updated visibility archival uri" + updateClusterActive := "other random active cluster name" + updateClusterStandby := "other random standby cluster name" + updateConfigVersion := int64(1) + updateFailoverVersion := int64(59) + updateClusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: updateClusterActive, + }, + { + ClusterName: updateClusterStandby, + }, + } + updateTask := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: updateOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: updateState, + Description: updateDescription, + OwnerEmail: updateOwnerEmail, + Data: updatedData, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &updateRetention, + HistoryArchivalState: updateHistoryArchivalState, + HistoryArchivalUri: updateHistoryArchivalURI, + VisibilityArchivalState: updateVisibilityArchivalState, + VisibilityArchivalUri: updateVisibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + ConfigVersion: updateConfigVersion, + FailoverVersion: updateFailoverVersion, + } + + s.namespaceReplicator.currentCluster = updateClusterStandby + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + ConfigVersion: updateConfigVersion + 1, + }}, nil).Times(2) + s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ + NotificationVersion: updateFailoverVersion, + }, nil).Times(1) + s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), &persistence.UpdateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + }, + ConfigVersion: updateConfigVersion + 1, + FailoverNotificationVersion: updateFailoverVersion, + FailoverVersion: updateFailoverVersion, + }, + IsGlobalNamespace: false, + NotificationVersion: updateFailoverVersion, + }) + err := s.namespaceReplicator.Execute(context.Background(), updateTask) + s.Nil(err) +} + +func (s *namespaceReplicationTaskExecutorSuite) TestExecute_UpdateNamespaceTask_NoUpdateConfig_NoUpdateActiveCluster() { + id := uuid.New() + name := "some random namespace test name" + updateOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + updateState := enumspb.NAMESPACE_STATE_DEPRECATED + updateDescription := "other random namespace test description" + updateOwnerEmail := "other random namespace test owner" + updatedData := map[string]string{"k": "v1"} + updateRetention := 122 * time.Hour * 24 + updateHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateHistoryArchivalURI := "some updated history archival uri" + updateVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updateVisibilityArchivalURI := "some updated visibility archival uri" + updateClusterActive := "other random active cluster name" + updateClusterStandby := "other random standby cluster name" + updateConfigVersion := int64(1) + updateFailoverVersion := int64(59) + updateClusters := []*replicationpb.ClusterReplicationConfig{ + { + ClusterName: updateClusterActive, + }, + { + ClusterName: updateClusterStandby, + }, + } + updateTask := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: updateOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: updateState, + Description: updateDescription, + OwnerEmail: updateOwnerEmail, + Data: updatedData, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &updateRetention, + HistoryArchivalState: updateHistoryArchivalState, + HistoryArchivalUri: updateHistoryArchivalURI, + VisibilityArchivalState: updateVisibilityArchivalState, + VisibilityArchivalUri: updateVisibilityArchivalURI, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + ConfigVersion: updateConfigVersion, + FailoverVersion: updateFailoverVersion, + } + + s.namespaceReplicator.currentCluster = updateClusterStandby + s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: name, + }).Return(&persistence.GetNamespaceResponse{Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: id, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + ConfigVersion: updateConfigVersion + 1, + FailoverVersion: updateFailoverVersion + 1, + }}, nil).Times(2) + s.mockMetadataMgr.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{ + NotificationVersion: updateFailoverVersion, + }, nil).Times(1) + + s.mockMetadataMgr.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Times(0) + err := s.namespaceReplicator.Execute(context.Background(), updateTask) + s.Nil(err) +} diff -Nru temporal-1.21.5-1/src/common/namespace/replication_task_handler_mock.go temporal-1.22.5/src/common/namespace/replication_task_handler_mock.go --- temporal-1.21.5-1/src/common/namespace/replication_task_handler_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/replication_task_handler_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,74 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: replication_task_executor.go + +// Package namespace is a generated GoMock package. +package namespace + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + repication "go.temporal.io/server/api/replication/v1" +) + +// MockReplicationTaskExecutor is a mock of ReplicationTaskExecutor interface. +type MockReplicationTaskExecutor struct { + ctrl *gomock.Controller + recorder *MockReplicationTaskExecutorMockRecorder +} + +// MockReplicationTaskExecutorMockRecorder is the mock recorder for MockReplicationTaskExecutor. +type MockReplicationTaskExecutorMockRecorder struct { + mock *MockReplicationTaskExecutor +} + +// NewMockReplicationTaskExecutor creates a new mock instance. +func NewMockReplicationTaskExecutor(ctrl *gomock.Controller) *MockReplicationTaskExecutor { + mock := &MockReplicationTaskExecutor{ctrl: ctrl} + mock.recorder = &MockReplicationTaskExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReplicationTaskExecutor) EXPECT() *MockReplicationTaskExecutorMockRecorder { + return m.recorder +} + +// Execute mocks base method. +func (m *MockReplicationTaskExecutor) Execute(ctx context.Context, task *repication.NamespaceTaskAttributes) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Execute", ctx, task) + ret0, _ := ret[0].(error) + return ret0 +} + +// Execute indicates an expected call of Execute. +func (mr *MockReplicationTaskExecutorMockRecorder) Execute(ctx, task interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockReplicationTaskExecutor)(nil).Execute), ctx, task) +} diff -Nru temporal-1.21.5-1/src/common/namespace/transmissionTaskHandler.go temporal-1.22.5/src/common/namespace/transmissionTaskHandler.go --- temporal-1.21.5-1/src/common/namespace/transmissionTaskHandler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/transmissionTaskHandler.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "context" - - enumspb "go.temporal.io/api/enums/v1" - namespacepb "go.temporal.io/api/namespace/v1" - replicationpb "go.temporal.io/api/replication/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" -) - -// NOTE: the counterpart of namespace replication receiving logic is in service/worker package - -type ( - // Replicator is the interface which can replicate the namespace - Replicator interface { - HandleTransmissionTask( - ctx context.Context, - namespaceOperation enumsspb.NamespaceOperation, - info *persistencespb.NamespaceInfo, - config *persistencespb.NamespaceConfig, - replicationConfig *persistencespb.NamespaceReplicationConfig, - replicationClusterListUpdated bool, - configVersion int64, - failoverVersion int64, - isGlobalNamespace bool, - failoverHistoy []*persistencespb.FailoverStatus, - ) error - } - - namespaceReplicatorImpl struct { - namespaceReplicationQueue persistence.NamespaceReplicationQueue - logger log.Logger - } -) - -// NewNamespaceReplicator create a new instance of namespace replicator -func NewNamespaceReplicator( - namespaceReplicationQueue persistence.NamespaceReplicationQueue, - logger log.Logger, -) Replicator { - return &namespaceReplicatorImpl{ - namespaceReplicationQueue: namespaceReplicationQueue, - logger: logger, - } -} - -// HandleTransmissionTask handle transmission of the namespace replication task -func (namespaceReplicator *namespaceReplicatorImpl) HandleTransmissionTask( - ctx context.Context, - namespaceOperation enumsspb.NamespaceOperation, - info *persistencespb.NamespaceInfo, - config *persistencespb.NamespaceConfig, - replicationConfig *persistencespb.NamespaceReplicationConfig, - replicationClusterListUpdated bool, - configVersion int64, - failoverVersion int64, - isGlobalNamespace bool, - failoverHistoy []*persistencespb.FailoverStatus, -) error { - - if !isGlobalNamespace { - return nil - } - if len(replicationConfig.Clusters) <= 1 && !replicationClusterListUpdated { - return nil - } - if info.State == enumspb.NAMESPACE_STATE_DELETED { - // Don't replicate deleted namespace changes. - return nil - } - - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - task := &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: namespaceOperation, - Id: info.Id, - Info: &namespacepb.NamespaceInfo{ - Name: info.Name, - State: info.State, - Description: info.Description, - OwnerEmail: info.Owner, - Data: info.Data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: config.Retention, - HistoryArchivalState: config.HistoryArchivalState, - HistoryArchivalUri: config.HistoryArchivalUri, - VisibilityArchivalState: config.VisibilityArchivalState, - VisibilityArchivalUri: config.VisibilityArchivalUri, - BadBinaries: config.BadBinaries, - CustomSearchAttributeAliases: config.CustomSearchAttributeAliases, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: replicationConfig.ActiveClusterName, - Clusters: convertClusterReplicationConfigToProto(replicationConfig.Clusters), - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - FailoverHistory: convertFailoverHistoryToReplicationProto(failoverHistoy), - }, - } - - return namespaceReplicator.namespaceReplicationQueue.Publish( - ctx, - &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: task, - }) -} - -func convertClusterReplicationConfigToProto( - input []string, -) []*replicationpb.ClusterReplicationConfig { - output := make([]*replicationpb.ClusterReplicationConfig, 0, len(input)) - for _, clusterName := range input { - output = append(output, &replicationpb.ClusterReplicationConfig{ClusterName: clusterName}) - } - return output -} - -func convertFailoverHistoryToReplicationProto( - failoverHistoy []*persistencespb.FailoverStatus, -) []*replicationpb.FailoverStatus { - var replicationProto []*replicationpb.FailoverStatus - for _, failoverStatus := range failoverHistoy { - replicationProto = append(replicationProto, &replicationpb.FailoverStatus{ - FailoverTime: failoverStatus.GetFailoverTime(), - FailoverVersion: failoverStatus.GetFailoverVersion(), - }) - } - - return replicationProto -} diff -Nru temporal-1.21.5-1/src/common/namespace/transmissionTaskHandler_test.go temporal-1.22.5/src/common/namespace/transmissionTaskHandler_test.go --- temporal-1.21.5-1/src/common/namespace/transmissionTaskHandler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/transmissionTaskHandler_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,465 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package namespace - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - namespacepb "go.temporal.io/api/namespace/v1" - replicationpb "go.temporal.io/api/replication/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives" -) - -type ( - transmissionTaskSuite struct { - suite.Suite - - controller *gomock.Controller - - namespaceReplicator *namespaceReplicatorImpl - namespaceReplicationQueue *persistence.MockNamespaceReplicationQueue - } -) - -func TestTransmissionTaskSuite(t *testing.T) { - s := new(transmissionTaskSuite) - suite.Run(t, s) -} - -func (s *transmissionTaskSuite) SetupSuite() { -} - -func (s *transmissionTaskSuite) TearDownSuite() { - -} - -func (s *transmissionTaskSuite) SetupTest() { - s.controller = gomock.NewController(s.T()) - s.namespaceReplicationQueue = persistence.NewMockNamespaceReplicationQueue(s.controller) - s.namespaceReplicator = NewNamespaceReplicator( - s.namespaceReplicationQueue, - log.NewTestLogger(), - ).(*namespaceReplicatorImpl) -} - -func (s *transmissionTaskSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_IsGlobalNamespace() { - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - id := primitives.NewUUID().String() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []string{clusterActive, clusterStandby} - - namespaceOperation := enumsspb.NAMESPACE_OPERATION_CREATE - info := &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: enumspb.NAMESPACE_STATE_REGISTERED, - Description: description, - Owner: ownerEmail, - Data: data, - } - config := &persistencespb.NamespaceConfig{ - Retention: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - } - replicationConfig := &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - } - isGlobalNamespace := true - - s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: namespaceOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: convertClusterReplicationConfigToProto(clusters), - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - }, - }, - }).Return(nil) - - err := s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - true, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) -} - -func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_NotGlobalNamespace() { - id := primitives.NewUUID().String() - name := "some random namespace test name" - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []string{clusterActive, clusterStandby} - - namespaceOperation := enumsspb.NAMESPACE_OPERATION_CREATE - info := &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: enumspb.NAMESPACE_STATE_REGISTERED, - Description: description, - Owner: ownerEmail, - Data: data, - } - config := &persistencespb.NamespaceConfig{ - Retention: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{}, - } - replicationConfig := &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - } - isGlobalNamespace := false - - err := s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - true, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) -} - -func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_IsGlobalNamespace() { - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - id := primitives.NewUUID().String() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_DEPRECATED - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []string{clusterActive, clusterStandby} - - namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - info := &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: enumspb.NAMESPACE_STATE_DEPRECATED, - Description: description, - Owner: ownerEmail, - Data: data, - } - config := &persistencespb.NamespaceConfig{ - Retention: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - } - replicationConfig := &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - } - isGlobalNamespace := true - - s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: namespaceOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: convertClusterReplicationConfigToProto(clusters), - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion}, - }, - }).Return(nil) - - err := s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - true, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) -} - -func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_NotGlobalNamespace() { - id := primitives.NewUUID().String() - name := "some random namespace test name" - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - clusters := []string{clusterActive, clusterStandby} - - namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - info := &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: enumspb.NAMESPACE_STATE_DEPRECATED, - Description: description, - Owner: ownerEmail, - Data: data, - } - config := &persistencespb.NamespaceConfig{ - Retention: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - } - replicationConfig := &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - } - isGlobalNamespace := false - - err := s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - true, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) -} - -func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_ReplicationClusterListUpdated() { - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - id := primitives.NewUUID().String() - name := "some random namespace test name" - state := enumspb.NAMESPACE_STATE_DEPRECATED - description := "some random test description" - ownerEmail := "some random test owner" - data := map[string]string{"k": "v"} - retention := 10 * time.Hour * 24 - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "some random history archival uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "some random visibility archival uri" - clusterActive := "some random active cluster name" - configVersion := int64(0) - failoverVersion := int64(59) - singleClusterList := []string{clusterActive} - - namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE - info := &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: enumspb.NAMESPACE_STATE_DEPRECATED, - Description: description, - Owner: ownerEmail, - Data: data, - } - config := &persistencespb.NamespaceConfig{ - Retention: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - } - replicationConfig := &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: singleClusterList, - } - - isGlobalNamespace := true - - s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - NamespaceOperation: namespaceOperation, - Id: id, - Info: &namespacepb.NamespaceInfo{ - Name: name, - State: state, - Description: description, - OwnerEmail: ownerEmail, - Data: data, - }, - Config: &namespacepb.NamespaceConfig{ - WorkflowExecutionRetentionTtl: &retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, - }, - ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: convertClusterReplicationConfigToProto(singleClusterList), - }, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion}, - }, - }).Return(nil).Times(1) - - err := s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - true, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) - - err = s.namespaceReplicator.HandleTransmissionTask( - context.Background(), - namespaceOperation, - info, - config, - replicationConfig, - false, - configVersion, - failoverVersion, - isGlobalNamespace, - nil, - ) - s.Nil(err) -} diff -Nru temporal-1.21.5-1/src/common/namespace/transmission_task_handler.go temporal-1.22.5/src/common/namespace/transmission_task_handler.go --- temporal-1.21.5-1/src/common/namespace/transmission_task_handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/transmission_task_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,163 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "context" + + enumspb "go.temporal.io/api/enums/v1" + namespacepb "go.temporal.io/api/namespace/v1" + replicationpb "go.temporal.io/api/replication/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" +) + +// NOTE: the counterpart of namespace replication receiving logic is in service/worker package + +type ( + // Replicator is the interface which can replicate the namespace + Replicator interface { + HandleTransmissionTask( + ctx context.Context, + namespaceOperation enumsspb.NamespaceOperation, + info *persistencespb.NamespaceInfo, + config *persistencespb.NamespaceConfig, + replicationConfig *persistencespb.NamespaceReplicationConfig, + replicationClusterListUpdated bool, + configVersion int64, + failoverVersion int64, + isGlobalNamespace bool, + failoverHistoy []*persistencespb.FailoverStatus, + ) error + } + + namespaceReplicatorImpl struct { + namespaceReplicationQueue persistence.NamespaceReplicationQueue + logger log.Logger + } +) + +// NewNamespaceReplicator create a new instance of namespace replicator +func NewNamespaceReplicator( + namespaceReplicationQueue persistence.NamespaceReplicationQueue, + logger log.Logger, +) Replicator { + return &namespaceReplicatorImpl{ + namespaceReplicationQueue: namespaceReplicationQueue, + logger: logger, + } +} + +// HandleTransmissionTask handle transmission of the namespace replication task +func (namespaceReplicator *namespaceReplicatorImpl) HandleTransmissionTask( + ctx context.Context, + namespaceOperation enumsspb.NamespaceOperation, + info *persistencespb.NamespaceInfo, + config *persistencespb.NamespaceConfig, + replicationConfig *persistencespb.NamespaceReplicationConfig, + replicationClusterListUpdated bool, + configVersion int64, + failoverVersion int64, + isGlobalNamespace bool, + failoverHistoy []*persistencespb.FailoverStatus, +) error { + + if !isGlobalNamespace { + return nil + } + if len(replicationConfig.Clusters) <= 1 && !replicationClusterListUpdated { + return nil + } + if info.State == enumspb.NAMESPACE_STATE_DELETED { + // Don't replicate deleted namespace changes. + return nil + } + + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + task := &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: namespaceOperation, + Id: info.Id, + Info: &namespacepb.NamespaceInfo{ + Name: info.Name, + State: info.State, + Description: info.Description, + OwnerEmail: info.Owner, + Data: info.Data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: config.Retention, + HistoryArchivalState: config.HistoryArchivalState, + HistoryArchivalUri: config.HistoryArchivalUri, + VisibilityArchivalState: config.VisibilityArchivalState, + VisibilityArchivalUri: config.VisibilityArchivalUri, + BadBinaries: config.BadBinaries, + CustomSearchAttributeAliases: config.CustomSearchAttributeAliases, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: replicationConfig.ActiveClusterName, + Clusters: convertClusterReplicationConfigToProto(replicationConfig.Clusters), + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + FailoverHistory: convertFailoverHistoryToReplicationProto(failoverHistoy), + }, + } + + return namespaceReplicator.namespaceReplicationQueue.Publish( + ctx, + &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: task, + }) +} + +func convertClusterReplicationConfigToProto( + input []string, +) []*replicationpb.ClusterReplicationConfig { + output := make([]*replicationpb.ClusterReplicationConfig, 0, len(input)) + for _, clusterName := range input { + output = append(output, &replicationpb.ClusterReplicationConfig{ClusterName: clusterName}) + } + return output +} + +func convertFailoverHistoryToReplicationProto( + failoverHistoy []*persistencespb.FailoverStatus, +) []*replicationpb.FailoverStatus { + var replicationProto []*replicationpb.FailoverStatus + for _, failoverStatus := range failoverHistoy { + replicationProto = append(replicationProto, &replicationpb.FailoverStatus{ + FailoverTime: failoverStatus.GetFailoverTime(), + FailoverVersion: failoverStatus.GetFailoverVersion(), + }) + } + + return replicationProto +} diff -Nru temporal-1.21.5-1/src/common/namespace/transmission_task_handler_test.go temporal-1.22.5/src/common/namespace/transmission_task_handler_test.go --- temporal-1.21.5-1/src/common/namespace/transmission_task_handler_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/namespace/transmission_task_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,465 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package namespace + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + namespacepb "go.temporal.io/api/namespace/v1" + replicationpb "go.temporal.io/api/replication/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives" +) + +type ( + transmissionTaskSuite struct { + suite.Suite + + controller *gomock.Controller + + namespaceReplicator *namespaceReplicatorImpl + namespaceReplicationQueue *persistence.MockNamespaceReplicationQueue + } +) + +func TestTransmissionTaskSuite(t *testing.T) { + s := new(transmissionTaskSuite) + suite.Run(t, s) +} + +func (s *transmissionTaskSuite) SetupSuite() { +} + +func (s *transmissionTaskSuite) TearDownSuite() { + +} + +func (s *transmissionTaskSuite) SetupTest() { + s.controller = gomock.NewController(s.T()) + s.namespaceReplicationQueue = persistence.NewMockNamespaceReplicationQueue(s.controller) + s.namespaceReplicator = NewNamespaceReplicator( + s.namespaceReplicationQueue, + log.NewTestLogger(), + ).(*namespaceReplicatorImpl) +} + +func (s *transmissionTaskSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_IsGlobalNamespace() { + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + id := primitives.NewUUID().String() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []string{clusterActive, clusterStandby} + + namespaceOperation := enumsspb.NAMESPACE_OPERATION_CREATE + info := &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: enumspb.NAMESPACE_STATE_REGISTERED, + Description: description, + Owner: ownerEmail, + Data: data, + } + config := &persistencespb.NamespaceConfig{ + Retention: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + } + replicationConfig := &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + } + isGlobalNamespace := true + + s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: namespaceOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: convertClusterReplicationConfigToProto(clusters), + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + }, + }, + }).Return(nil) + + err := s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + true, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) +} + +func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_NotGlobalNamespace() { + id := primitives.NewUUID().String() + name := "some random namespace test name" + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []string{clusterActive, clusterStandby} + + namespaceOperation := enumsspb.NAMESPACE_OPERATION_CREATE + info := &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: enumspb.NAMESPACE_STATE_REGISTERED, + Description: description, + Owner: ownerEmail, + Data: data, + } + config := &persistencespb.NamespaceConfig{ + Retention: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{}, + } + replicationConfig := &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + } + isGlobalNamespace := false + + err := s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + true, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) +} + +func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_IsGlobalNamespace() { + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + id := primitives.NewUUID().String() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_DEPRECATED + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []string{clusterActive, clusterStandby} + + namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + info := &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: enumspb.NAMESPACE_STATE_DEPRECATED, + Description: description, + Owner: ownerEmail, + Data: data, + } + config := &persistencespb.NamespaceConfig{ + Retention: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + } + replicationConfig := &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + } + isGlobalNamespace := true + + s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: namespaceOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: convertClusterReplicationConfigToProto(clusters), + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion}, + }, + }).Return(nil) + + err := s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + true, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) +} + +func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_NotGlobalNamespace() { + id := primitives.NewUUID().String() + name := "some random namespace test name" + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + clusters := []string{clusterActive, clusterStandby} + + namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + info := &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: enumspb.NAMESPACE_STATE_DEPRECATED, + Description: description, + Owner: ownerEmail, + Data: data, + } + config := &persistencespb.NamespaceConfig{ + Retention: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + } + replicationConfig := &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + } + isGlobalNamespace := false + + err := s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + true, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) +} + +func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_ReplicationClusterListUpdated() { + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + id := primitives.NewUUID().String() + name := "some random namespace test name" + state := enumspb.NAMESPACE_STATE_DEPRECATED + description := "some random test description" + ownerEmail := "some random test owner" + data := map[string]string{"k": "v"} + retention := 10 * time.Hour * 24 + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "some random history archival uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "some random visibility archival uri" + clusterActive := "some random active cluster name" + configVersion := int64(0) + failoverVersion := int64(59) + singleClusterList := []string{clusterActive} + + namespaceOperation := enumsspb.NAMESPACE_OPERATION_UPDATE + info := &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: enumspb.NAMESPACE_STATE_DEPRECATED, + Description: description, + Owner: ownerEmail, + Data: data, + } + config := &persistencespb.NamespaceConfig{ + Retention: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + } + replicationConfig := &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: singleClusterList, + } + + isGlobalNamespace := true + + s.namespaceReplicationQueue.EXPECT().Publish(gomock.Any(), &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: namespaceOperation, + Id: id, + Info: &namespacepb.NamespaceInfo{ + Name: name, + State: state, + Description: description, + OwnerEmail: ownerEmail, + Data: data, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: &retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}}, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: convertClusterReplicationConfigToProto(singleClusterList), + }, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion}, + }, + }).Return(nil).Times(1) + + err := s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + true, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) + + err = s.namespaceReplicator.HandleTransmissionTask( + context.Background(), + namespaceOperation, + info, + config, + replicationConfig, + false, + configVersion, + failoverVersion, + isGlobalNamespace, + nil, + ) + s.Nil(err) +} diff -Nru temporal-1.21.5-1/src/common/payload/payload.go temporal-1.22.5/src/common/payload/payload.go --- temporal-1.21.5-1/src/common/payload/payload.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/payload/payload.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,8 @@ package payload import ( - "github.com/gogo/protobuf/proto" + "bytes" + commonpb "go.temporal.io/api/common/v1" "go.temporal.io/sdk/converter" "go.temporal.io/server/common/util" @@ -94,7 +95,7 @@ } res := util.CloneMapNonNil(dst) for k, v := range src { - if proto.Equal(v, nilPayload) || proto.Equal(v, emptySlicePayload) { + if isEqual(v, nilPayload) || isEqual(v, emptySlicePayload) { delete(res, k) } else { res[k] = v @@ -102,3 +103,14 @@ } return res } + +// isEqual returns true if both have the same encoding and data. +// It does not take additional metadata into consideration. +// Note that data equality it's not the same as semantic equality, ie., +// `[]` and `[ ]` are semantically the same, but different not data-wise. +// Only use if you know that the data is encoded the same way. +func isEqual(a, b *commonpb.Payload) bool { + aEnc := a.GetMetadata()[converter.MetadataEncoding] + bEnc := a.GetMetadata()[converter.MetadataEncoding] + return bytes.Equal(aEnc, bEnc) && bytes.Equal(a.GetData(), b.GetData()) +} diff -Nru temporal-1.21.5-1/src/common/payload/payload_test.go temporal-1.22.5/src/common/payload/payload_test.go --- temporal-1.21.5-1/src/common/payload/payload_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/payload/payload_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,56 +38,56 @@ } func TestToString(t *testing.T) { - assert := assert.New(t) + s := assert.New(t) var result string p := EncodeString("str") result = ToString(p) - assert.Equal(`"str"`, result) + s.Equal(`"str"`, result) p, err := Encode(10) - assert.NoError(err) + s.NoError(err) result = ToString(p) - assert.Equal("10", result) + s.Equal("10", result) p, err = Encode([]byte{41, 42, 43}) - assert.NoError(err) + s.NoError(err) result = ToString(p) - assert.Equal("KSor", result) + s.Equal("KSor", result) p, err = Encode(&testStruct{ Int: 10, String: "str", Bytes: []byte{51, 52, 53}, }) - assert.NoError(err) + s.NoError(err) result = ToString(p) - assert.Equal(`{"Int":10,"String":"str","Bytes":"MzQ1"}`, result) + s.Equal(`{"Int":10,"String":"str","Bytes":"MzQ1"}`, result) p, err = Encode(nil) - assert.NoError(err) + s.NoError(err) result = ToString(p) - assert.Equal("nil", result) + s.Equal("nil", result) result = ToString(nil) - assert.Equal("", result) + s.Equal("", result) } func TestMergeMapOfPayload(t *testing.T) { - assert := assert.New(t) + s := assert.New(t) var currentMap map[string]*commonpb.Payload var newMap map[string]*commonpb.Payload resultMap := MergeMapOfPayload(currentMap, newMap) - assert.Equal(newMap, resultMap) + s.Equal(newMap, resultMap) newMap = make(map[string]*commonpb.Payload) resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal(newMap, resultMap) + s.Equal(newMap, resultMap) newMap = map[string]*commonpb.Payload{"key": EncodeString("val")} resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal(newMap, resultMap) + s.Equal(newMap, resultMap) newMap = map[string]*commonpb.Payload{ "key": EncodeString("val"), @@ -95,11 +95,11 @@ "emptyArray": emptySlicePayload, } resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal(map[string]*commonpb.Payload{"key": EncodeString("val")}, resultMap) + s.Equal(map[string]*commonpb.Payload{"key": EncodeString("val")}, resultMap) currentMap = map[string]*commonpb.Payload{"number": EncodeString("1")} resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal( + s.Equal( map[string]*commonpb.Payload{"number": EncodeString("1"), "key": EncodeString("val")}, resultMap, ) @@ -107,10 +107,35 @@ newValue, _ := Encode(nil) newMap = map[string]*commonpb.Payload{"number": newValue} resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal(0, len(resultMap)) + s.Equal(0, len(resultMap)) newValue, _ = Encode([]int{}) + newValue.Metadata["key"] = []byte("foo") newMap = map[string]*commonpb.Payload{"number": newValue} resultMap = MergeMapOfPayload(currentMap, newMap) - assert.Equal(0, len(resultMap)) + s.Equal(0, len(resultMap)) +} + +func TestIsEqual(t *testing.T) { + s := assert.New(t) + + a, _ := Encode(nil) + b, _ := Encode(nil) + s.True(isEqual(a, b)) + + a, _ = Encode([]string{}) + b, _ = Encode([]string{}) + s.True(isEqual(a, b)) + + a.Metadata["key"] = []byte("foo") + b.Metadata["key"] = []byte("bar") + s.True(isEqual(a, b)) + + a, _ = Encode(nil) + b, _ = Encode([]string{}) + s.False(isEqual(a, b)) + + a, _ = Encode([]string{}) + b, _ = Encode("foo") + s.False(isEqual(a, b)) } diff -Nru temporal-1.21.5-1/src/common/persistence/client/factory.go temporal-1.22.5/src/common/persistence/client/factory.go --- temporal-1.21.5-1/src/common/persistence/client/factory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -64,6 +64,7 @@ dataStoreFactory DataStoreFactory config *config.Persistence serializer serialization.Serializer + eventBlobCache p.XDCCache metricsHandler metrics.Handler logger log.Logger clusterName string @@ -84,6 +85,7 @@ cfg *config.Persistence, ratelimiter quotas.RequestRateLimiter, serializer serialization.Serializer, + eventBlobCache p.XDCCache, clusterName string, metricsHandler metrics.Handler, logger log.Logger, @@ -93,6 +95,7 @@ dataStoreFactory: dataStoreFactory, config: cfg, serializer: serializer, + eventBlobCache: eventBlobCache, metricsHandler: metricsHandler, logger: logger, clusterName: clusterName, @@ -182,7 +185,7 @@ return nil, err } - result := p.NewExecutionManager(store, f.serializer, f.logger, f.config.TransactionSizeLimit) + result := p.NewExecutionManager(store, f.serializer, f.eventBlobCache, f.logger, f.config.TransactionSizeLimit) if f.ratelimiter != nil { result = p.NewExecutionPersistenceRateLimitedClient(result, f.ratelimiter, f.logger) } diff -Nru temporal-1.21.5-1/src/common/persistence/client/fx.go temporal-1.22.5/src/common/persistence/client/fx.go --- temporal-1.21.5-1/src/common/persistence/client/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -45,6 +45,7 @@ PersistenceNamespaceMaxQps dynamicconfig.IntPropertyFnWithNamespaceFilter PersistencePerShardNamespaceMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter EnablePriorityRateLimiting dynamicconfig.BoolPropertyFn + OperatorRPSRatio dynamicconfig.FloatPropertyFn DynamicRateLimitingParams dynamicconfig.MapPropertyFn @@ -54,11 +55,13 @@ fx.In DataStoreFactory DataStoreFactory + EventBlobCache persistence.XDCCache Cfg *config.Persistence PersistenceMaxQPS PersistenceMaxQps PersistenceNamespaceMaxQPS PersistenceNamespaceMaxQps PersistencePerShardNamespaceMaxQPS PersistencePerShardNamespaceMaxQPS EnablePriorityRateLimiting EnablePriorityRateLimiting + OperatorRPSRatio OperatorRPSRatio ClusterName ClusterName ServiceName primitives.ServiceName MetricsHandler metrics.Handler @@ -75,12 +78,22 @@ fx.Provide(ClusterNameProvider), fx.Provide(DataStoreFactoryProvider), fx.Provide(HealthSignalAggregatorProvider), + fx.Provide(EventBlobCacheProvider), ) func ClusterNameProvider(config *cluster.Config) ClusterName { return ClusterName(config.CurrentClusterName) } +func EventBlobCacheProvider( + dc *dynamicconfig.Collection, +) persistence.XDCCache { + return persistence.NewEventsBlobCache( + dc.GetIntProperty(dynamicconfig.XDCCacheMaxSizeBytes, 8*1024*1024)(), + 20*time.Second, + ) +} + func FactoryProvider( params NewFactoryParams, ) Factory { @@ -92,8 +105,10 @@ params.PersistenceMaxQPS, params.PersistencePerShardNamespaceMaxQPS, RequestPriorityFn, + params.OperatorRPSRatio, params.HealthSignals, params.DynamicRateLimitingParams, + params.MetricsHandler, params.Logger, ) } else { @@ -106,6 +121,7 @@ params.Cfg, requestRatelimiter, serialization.NewSerializer(), + params.EventBlobCache, string(params.ClusterName), params.MetricsHandler, params.Logger, diff -Nru temporal-1.21.5-1/src/common/persistence/client/health_request_rate_limiter.go temporal-1.22.5/src/common/persistence/client/health_request_rate_limiter.go --- temporal-1.21.5-1/src/common/persistence/client/health_request_rate_limiter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/health_request_rate_limiter.go 2024-02-23 09:45:43.000000000 +0000 @@ -33,21 +33,21 @@ "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/quotas" ) const ( - DefaultRefreshInterval = 10 * time.Second - DefaultRateBurstRatio = 1.0 - DefaultMinRateMultiplier = 0.1 - DefaultMaxRateMultiplier = 1.0 + DefaultRefreshInterval = 10 * time.Second + DefaultRateBurstRatio = 1.0 + DefaultInitialRateMultiplier = 1.0 ) type ( HealthRequestRateLimiterImpl struct { - enabled *atomic.Bool + enabled atomic.Bool params DynamicRateLimitingParams // dynamic config map curOptions dynamicRateLimitingOptions // current dynamic config values (updated on refresh) @@ -59,11 +59,10 @@ rateFn quotas.RateFn rateToBurstRatio float64 - minRateMultiplier float64 - maxRateMultiplier float64 curRateMultiplier float64 - logger log.Logger + metricsHandler metrics.Handler + logger log.Logger } dynamicRateLimitingOptions struct { @@ -80,6 +79,9 @@ // when the system is healthy and current rate < max rate, the current rate multiplier will be // increased by this amount RateIncreaseStepSize float64 + + RateMultiMax float64 + RateMultiMin float64 } ) @@ -89,19 +91,19 @@ healthSignals persistence.HealthSignalAggregator, rateFn quotas.RateFn, params DynamicRateLimitingParams, + metricsHandler metrics.Handler, logger log.Logger, ) *HealthRequestRateLimiterImpl { limiter := &HealthRequestRateLimiterImpl{ - enabled: &atomic.Bool{}, + enabled: atomic.Bool{}, rateLimiter: quotas.NewRateLimiter(rateFn(), int(DefaultRateBurstRatio*rateFn())), healthSignals: healthSignals, rateFn: rateFn, params: params, refreshTimer: time.NewTicker(DefaultRefreshInterval), rateToBurstRatio: DefaultRateBurstRatio, - minRateMultiplier: DefaultMinRateMultiplier, - maxRateMultiplier: DefaultMaxRateMultiplier, - curRateMultiplier: DefaultMaxRateMultiplier, + curRateMultiplier: DefaultInitialRateMultiplier, + metricsHandler: metricsHandler, logger: logger, } limiter.refreshDynamicParams() @@ -149,17 +151,18 @@ func (rl *HealthRequestRateLimiterImpl) refreshRate() { if rl.latencyThresholdExceeded() || rl.errorThresholdExceeded() { // limit exceeded, do backoff - rl.curRateMultiplier = math.Max(rl.minRateMultiplier, rl.curRateMultiplier-rl.curOptions.RateBackoffStepSize) - rl.rateLimiter.SetRate(rl.curRateMultiplier * rl.rateFn()) - rl.rateLimiter.SetBurst(int(rl.rateToBurstRatio * rl.rateFn())) + rl.curRateMultiplier = math.Max(rl.curOptions.RateMultiMin, rl.curRateMultiplier-rl.curOptions.RateBackoffStepSize) + rl.metricsHandler.Gauge(metrics.DynamicRateLimiterMultiplier.GetMetricName()).Record(rl.curRateMultiplier) rl.logger.Info("Health threshold exceeded, reducing rate limit.", tag.NewFloat64("newMulti", rl.curRateMultiplier), tag.NewFloat64("newRate", rl.rateLimiter.Rate()), tag.NewFloat64("latencyAvg", rl.healthSignals.AverageLatency()), tag.NewFloat64("errorRatio", rl.healthSignals.ErrorRatio())) - } else if rl.curRateMultiplier < rl.maxRateMultiplier { + } else if rl.curRateMultiplier < rl.curOptions.RateMultiMax { // already doing backoff and under thresholds, increase limit - rl.curRateMultiplier = math.Min(rl.maxRateMultiplier, rl.curRateMultiplier+rl.curOptions.RateIncreaseStepSize) - rl.rateLimiter.SetRate(rl.curRateMultiplier * rl.rateFn()) - rl.rateLimiter.SetBurst(int(rl.rateToBurstRatio * rl.rateFn())) + rl.curRateMultiplier = math.Min(rl.curOptions.RateMultiMax, rl.curRateMultiplier+rl.curOptions.RateIncreaseStepSize) + rl.metricsHandler.Gauge(metrics.DynamicRateLimiterMultiplier.GetMetricName()).Record(rl.curRateMultiplier) rl.logger.Info("System healthy, increasing rate limit.", tag.NewFloat64("newMulti", rl.curRateMultiplier), tag.NewFloat64("newRate", rl.rateLimiter.Rate()), tag.NewFloat64("latencyAvg", rl.healthSignals.AverageLatency()), tag.NewFloat64("errorRatio", rl.healthSignals.ErrorRatio())) } + // Always set rate to pickup changes to underlying rate limit dynamic config + rl.rateLimiter.SetRate(rl.curRateMultiplier * rl.rateFn()) + rl.rateLimiter.SetBurst(int(rl.rateToBurstRatio * rl.rateFn())) } func (rl *HealthRequestRateLimiterImpl) refreshDynamicParams() { diff -Nru temporal-1.21.5-1/src/common/persistence/client/quotas.go temporal-1.22.5/src/common/persistence/client/quotas.go --- temporal-1.21.5-1/src/common/persistence/client/quotas.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/quotas.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,6 +27,7 @@ import ( "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" p "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/quotas" "go.temporal.io/server/service/history/tasks" @@ -41,29 +42,30 @@ var ( CallerTypeDefaultPriority = map[string]int{ - headers.CallerTypeAPI: 1, - headers.CallerTypeBackground: 3, - headers.CallerTypePreemptable: 4, + headers.CallerTypeOperator: 0, + headers.CallerTypeAPI: 2, + headers.CallerTypeBackground: 4, + headers.CallerTypePreemptable: 5, } APITypeCallOriginPriorityOverride = map[string]int{ - "StartWorkflowExecution": 0, - "SignalWithStartWorkflowExecution": 0, - "SignalWorkflowExecution": 0, - "RequestCancelWorkflowExecution": 0, - "TerminateWorkflowExecution": 0, - "GetWorkflowExecutionHistory": 0, - "UpdateWorkflowExecution": 0, + "StartWorkflowExecution": 1, + "SignalWithStartWorkflowExecution": 1, + "SignalWorkflowExecution": 1, + "RequestCancelWorkflowExecution": 1, + "TerminateWorkflowExecution": 1, + "GetWorkflowExecutionHistory": 1, + "UpdateWorkflowExecution": 1, } BackgroundTypeAPIPriorityOverride = map[string]int{ - "GetOrCreateShard": 0, - "UpdateShard": 0, + "GetOrCreateShard": 1, + "UpdateShard": 1, // This is a preprequisite for checkpointing queue process progress - p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryTransfer): 0, - p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryTimer): 0, - p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryVisibility): 0, + p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryTransfer): 1, + p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryTimer): 1, + p.ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", tasks.CategoryVisibility): 1, // Task resource isolation assumes task can always be loaded. // When one namespace has high load, all task processing goroutines @@ -73,12 +75,12 @@ // NOTE: we also don't want task loading to consume all persistence request tokens, // and blocks all other operations. This is done by setting the queue host rps limit // dynamic config. - p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryTransfer): 2, - p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryTimer): 2, - p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryVisibility): 2, + p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryTransfer): 3, + p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryTimer): 3, + p.ConstructHistoryTaskAPI("GetHistoryTasks", tasks.CategoryVisibility): 3, } - RequestPrioritiesOrdered = []int{0, 1, 2, 3, 4} + RequestPrioritiesOrdered = []int{0, 1, 2, 3, 4, 5} ) func NewPriorityRateLimiter( @@ -86,21 +88,23 @@ hostMaxQPS PersistenceMaxQps, perShardNamespaceMaxQPS PersistencePerShardNamespaceMaxQPS, requestPriorityFn quotas.RequestPriorityFn, + operatorRPSRatio OperatorRPSRatio, healthSignals p.HealthSignalAggregator, dynamicParams DynamicRateLimitingParams, + metricsHandler metrics.Handler, logger log.Logger, ) quotas.RequestRateLimiter { hostRateFn := func() float64 { return float64(hostMaxQPS()) } return quotas.NewMultiRequestRateLimiter( // per shardID+namespaceID rate limiters - newPerShardPerNamespacePriorityRateLimiter(perShardNamespaceMaxQPS, hostMaxQPS, requestPriorityFn), + newPerShardPerNamespacePriorityRateLimiter(perShardNamespaceMaxQPS, hostMaxQPS, requestPriorityFn, operatorRPSRatio), // per namespaceID rate limiters - newPriorityNamespaceRateLimiter(namespaceMaxQPS, hostMaxQPS, requestPriorityFn), + newPriorityNamespaceRateLimiter(namespaceMaxQPS, hostMaxQPS, requestPriorityFn, operatorRPSRatio), // host-level dynamic rate limiter - newPriorityDynamicRateLimiter(hostRateFn, requestPriorityFn, healthSignals, dynamicParams, logger), + newPriorityDynamicRateLimiter(hostRateFn, requestPriorityFn, operatorRPSRatio, healthSignals, dynamicParams, metricsHandler, logger), // basic host-level rate limiter - newPriorityRateLimiter(hostRateFn, requestPriorityFn), + newPriorityRateLimiter(hostRateFn, requestPriorityFn, operatorRPSRatio), ) } @@ -108,6 +112,7 @@ perShardNamespaceMaxQPS PersistencePerShardNamespaceMaxQPS, hostMaxQPS PersistenceMaxQps, requestPriorityFn quotas.RequestPriorityFn, + operatorRPSRatio OperatorRPSRatio, ) quotas.RequestRateLimiter { return quotas.NewMapRequestRateLimiter(func(req quotas.Request) quotas.RequestRateLimiter { if hasCaller(req) && hasCallerSegment(req) { @@ -118,6 +123,7 @@ return float64(perShardNamespaceMaxQPS(req.Caller)) }, requestPriorityFn, + operatorRPSRatio, ) } return quotas.NoopRequestRateLimiter @@ -137,6 +143,7 @@ namespaceMaxQPS PersistenceNamespaceMaxQps, hostMaxQPS PersistenceMaxQps, requestPriorityFn quotas.RequestPriorityFn, + operatorRPSRatio OperatorRPSRatio, ) quotas.RequestRateLimiter { return quotas.NewNamespaceRequestRateLimiter(func(req quotas.Request) quotas.RequestRateLimiter { if hasCaller(req) { @@ -154,6 +161,7 @@ return namespaceQPS }, requestPriorityFn, + operatorRPSRatio, ) } return quotas.NoopRequestRateLimiter @@ -163,10 +171,15 @@ func newPriorityRateLimiter( rateFn quotas.RateFn, requestPriorityFn quotas.RequestPriorityFn, + operatorRPSRatio OperatorRPSRatio, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range RequestPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultOutgoingRateLimiter(rateFn)) + if priority == CallerTypeDefaultPriority[headers.CallerTypeOperator] { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultOutgoingRateLimiter(operatorRateFn(rateFn, operatorRPSRatio))) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultOutgoingRateLimiter(rateFn)) + } } return quotas.NewPriorityRateLimiter( @@ -178,14 +191,20 @@ func newPriorityDynamicRateLimiter( rateFn quotas.RateFn, requestPriorityFn quotas.RequestPriorityFn, + operatorRPSRatio OperatorRPSRatio, healthSignals p.HealthSignalAggregator, dynamicParams DynamicRateLimitingParams, + metricsHandler metrics.Handler, logger log.Logger, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range RequestPrioritiesOrdered { // TODO: refactor this so dynamic rate adjustment is global for all priorities - rateLimiters[priority] = NewHealthRequestRateLimiterImpl(healthSignals, rateFn, dynamicParams, logger) + if priority == CallerTypeDefaultPriority[headers.CallerTypeOperator] { + rateLimiters[priority] = NewHealthRequestRateLimiterImpl(healthSignals, operatorRateFn(rateFn, operatorRPSRatio), dynamicParams, metricsHandler, logger) + } else { + rateLimiters[priority] = NewHealthRequestRateLimiterImpl(healthSignals, rateFn, dynamicParams, metricsHandler, logger) + } } return quotas.NewPriorityRateLimiter( @@ -211,6 +230,8 @@ func RequestPriorityFn(req quotas.Request) int { switch req.CallerType { + case headers.CallerTypeOperator: + return CallerTypeDefaultPriority[req.CallerType] case headers.CallerTypeAPI: if priority, ok := APITypeCallOriginPriorityOverride[req.Initiation]; ok { return priority @@ -224,8 +245,14 @@ case headers.CallerTypePreemptable: return CallerTypeDefaultPriority[req.CallerType] default: - // default requests to high priority to be consistent with existing behavior - return RequestPrioritiesOrdered[0] + // default requests to API priority to be consistent with existing behavior + return CallerTypeDefaultPriority[headers.CallerTypeAPI] + } +} + +func operatorRateFn(rateFn quotas.RateFn, operatorRPSRatio OperatorRPSRatio) quotas.RateFn { + return func() float64 { + return operatorRPSRatio() * rateFn() } } diff -Nru temporal-1.21.5-1/src/common/persistence/client/quotas_test.go temporal-1.22.5/src/common/persistence/client/quotas_test.go --- temporal-1.21.5-1/src/common/persistence/client/quotas_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/quotas_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,6 +31,7 @@ "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/quotas" "golang.org/x/exp/slices" @@ -104,12 +105,13 @@ } func (s *quotasSuite) TestPriorityNamespaceRateLimiter_DoesLimit() { - var namespaceMaxRPS = func(namespace string) int { return 1 } - var hostMaxRPS = func() int { return 1 } + namespaceMaxRPS := func(namespace string) int { return 1 } + hostMaxRPS := func() int { return 1 } + operatorRPSRatioFn := func() float64 { return 0.2 } - var limiter = newPriorityNamespaceRateLimiter(namespaceMaxRPS, hostMaxRPS, RequestPriorityFn) + limiter := newPriorityNamespaceRateLimiter(namespaceMaxRPS, hostMaxRPS, RequestPriorityFn, operatorRPSRatioFn) - var request = quotas.NewRequest( + request := quotas.NewRequest( "test-api", 1, "test-namespace", @@ -131,12 +133,13 @@ } func (s *quotasSuite) TestPerShardNamespaceRateLimiter_DoesLimit() { - var perShardNamespaceMaxRPS = func(namespace string) int { return 1 } - var hostMaxRPS = func() int { return 1 } + perShardNamespaceMaxRPS := func(namespace string) int { return 1 } + hostMaxRPS := func() int { return 1 } + operatorRPSRatioFn := func() float64 { return 0.2 } - var limiter = newPerShardPerNamespacePriorityRateLimiter(perShardNamespaceMaxRPS, hostMaxRPS, RequestPriorityFn) + limiter := newPerShardPerNamespacePriorityRateLimiter(perShardNamespaceMaxRPS, hostMaxRPS, RequestPriorityFn, operatorRPSRatioFn) - var request = quotas.NewRequest( + request := quotas.NewRequest( "test-api", 1, "test-namespace", @@ -156,3 +159,36 @@ s.True(wasLimited) } + +func (s *quotasSuite) TestOperatorPrioritized() { + rateFn := func() float64 { return 5 } + operatorRPSRatioFn := func() float64 { return 0.2 } + limiter := newPriorityRateLimiter(rateFn, RequestPriorityFn, operatorRPSRatioFn) + + operatorRequest := quotas.NewRequest( + "DescribeWorkflowExecution", + 1, + "test-namespace", + headers.CallerTypeOperator, + -1, + "DescribeWorkflowExecution") + + apiRequest := quotas.NewRequest( + "DescribeWorkflowExecution", + 1, + "test-namespace", + headers.CallerTypeAPI, + -1, + "DescribeWorkflowExecution") + + requestTime := time.Now() + wasLimited := false + + for i := 0; i < 6; i++ { + if !limiter.Allow(requestTime, apiRequest) { + wasLimited = true + s.True(limiter.Allow(requestTime, operatorRequest)) + } + } + s.True(wasLimited) +} diff -Nru temporal-1.21.5-1/src/common/persistence/client/targeted_fault_injection.go temporal-1.22.5/src/common/persistence/client/targeted_fault_injection.go --- temporal-1.21.5-1/src/common/persistence/client/targeted_fault_injection.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/client/targeted_fault_injection.go 2024-02-23 09:45:43.000000000 +0000 @@ -32,6 +32,9 @@ "strings" "time" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/config" "go.temporal.io/server/common/persistence" ) @@ -42,24 +45,24 @@ methods := make(map[string]ErrorGenerator, len(cfg.Methods)) for methodName, methodConfig := range cfg.Methods { var faultWeights []FaultWeight - methodErrorRate := 0.0 - for errorName, errorRate := range methodConfig.Errors { - err := getErrorFromName(errorName) + methodErrRate := 0.0 + for errName, errRate := range methodConfig.Errors { + err := newError(errName, errRate) faultWeights = append(faultWeights, FaultWeight{ errFactory: func(data string) error { return err }, - weight: errorRate, + weight: errRate, }) - methodErrorRate += errorRate + methodErrRate += errRate } - errorGenerator := NewDefaultErrorGenerator(methodErrorRate, faultWeights) + errGenerator := NewDefaultErrorGenerator(methodErrRate, faultWeights) seed := methodConfig.Seed if seed == 0 { seed = time.Now().UnixNano() } - errorGenerator.r = rand.New(rand.NewSource(seed)) - methods[methodName] = errorGenerator + errGenerator.r = rand.New(rand.NewSource(seed)) + methods[methodName] = errGenerator } return &dataStoreErrorGenerator{MethodErrorGenerators: methods} } @@ -95,16 +98,20 @@ return methodErrorGenerator.Generate() } -// getErrorFromName returns an error based on the provided name. If the name is not recognized, then this method will +// newError returns an error based on the provided name. If the name is not recognized, then this method will // panic. -func getErrorFromName(name string) error { - switch name { - case "ShardOwnershipLostError": - return &persistence.ShardOwnershipLostError{} - case "DeadlineExceededError": - return context.DeadlineExceeded +func newError(errName string, errRate float64) error { + switch errName { + case "ShardOwnershipLost": + return &persistence.ShardOwnershipLostError{Msg: fmt.Sprintf("fault injection error (%f): persistence.ShardOwnershipLostError", errRate)} + case "DeadlineExceeded": + return fmt.Errorf("fault injection error (%f): %w", errRate, context.DeadlineExceeded) + case "ResourceExhausted": + return serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, fmt.Sprintf("fault injection error (%f): serviceerror.ResourceExhausted", errRate)) + case "Unavailable": + return serviceerror.NewUnavailable(fmt.Sprintf("fault injection error (%f): serviceerror.Unavailable", errRate)) default: - panic(fmt.Sprintf("unknown error type: %v", name)) + panic(fmt.Sprintf("unknown error type: %v", errName)) } } diff -Nru temporal-1.21.5-1/src/common/persistence/clusterMetadata.go temporal-1.22.5/src/common/persistence/clusterMetadata.go --- temporal-1.21.5-1/src/common/persistence/clusterMetadata.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/clusterMetadata.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -// GetOrUseDefaultActiveCluster return the current cluster name or use the input if valid -func GetOrUseDefaultActiveCluster(currentClusterName string, activeClusterName string) string { - if len(activeClusterName) == 0 { - return currentClusterName - } - return activeClusterName -} - -// GetOrUseDefaultClusters return the current cluster or use the input if valid -func GetOrUseDefaultClusters(currentClusterName string, clusters []string) []string { - if len(clusters) == 0 { - return []string{currentClusterName} - } - return clusters -} diff -Nru temporal-1.21.5-1/src/common/persistence/clusterMetadataStore.go temporal-1.22.5/src/common/persistence/clusterMetadataStore.go --- temporal-1.21.5-1/src/common/persistence/clusterMetadataStore.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/clusterMetadataStore.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,248 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "context" - "errors" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence/serialization" -) - -const ( - clusterMetadataEncoding = enumspb.ENCODING_TYPE_PROTO3 -) - -var ( - // ErrInvalidMembershipExpiry is used when upserting new cluster membership with an invalid duration - ErrInvalidMembershipExpiry = errors.New("membershipExpiry duration should be atleast 1 second") - - // ErrIncompleteMembershipUpsert is used when upserting new cluster membership with missing fields - ErrIncompleteMembershipUpsert = errors.New("membership upserts require all fields") -) - -type ( - // clusterMetadataManagerImpl implements MetadataManager based on MetadataStore and Serializer - clusterMetadataManagerImpl struct { - serializer serialization.Serializer - persistence ClusterMetadataStore - currentClusterName string - logger log.Logger - } -) - -var _ ClusterMetadataManager = (*clusterMetadataManagerImpl)(nil) - -// NewClusterMetadataManagerImpl returns new ClusterMetadataManager -func NewClusterMetadataManagerImpl( - persistence ClusterMetadataStore, - serializer serialization.Serializer, - currentClusterName string, - logger log.Logger, -) ClusterMetadataManager { - return &clusterMetadataManagerImpl{ - serializer: serializer, - persistence: persistence, - currentClusterName: currentClusterName, - logger: logger, - } -} - -func (m *clusterMetadataManagerImpl) GetName() string { - return m.persistence.GetName() -} - -func (m *clusterMetadataManagerImpl) Close() { - m.persistence.Close() -} - -func (m *clusterMetadataManagerImpl) GetClusterMembers( - ctx context.Context, - request *GetClusterMembersRequest, -) (*GetClusterMembersResponse, error) { - return m.persistence.GetClusterMembers(ctx, request) -} - -func (m *clusterMetadataManagerImpl) UpsertClusterMembership( - ctx context.Context, - request *UpsertClusterMembershipRequest, -) error { - if request.RecordExpiry.Seconds() < 1 { - return ErrInvalidMembershipExpiry - } - if request.Role == All { - return ErrIncompleteMembershipUpsert - } - if request.RPCAddress == nil { - return ErrIncompleteMembershipUpsert - } - if request.RPCPort == 0 { - return ErrIncompleteMembershipUpsert - } - if request.SessionStart.IsZero() { - return ErrIncompleteMembershipUpsert - } - - return m.persistence.UpsertClusterMembership(ctx, request) -} - -func (m *clusterMetadataManagerImpl) PruneClusterMembership( - ctx context.Context, - request *PruneClusterMembershipRequest, -) error { - return m.persistence.PruneClusterMembership(ctx, request) -} - -func (m *clusterMetadataManagerImpl) ListClusterMetadata( - ctx context.Context, - request *ListClusterMetadataRequest, -) (*ListClusterMetadataResponse, error) { - resp, err := m.persistence.ListClusterMetadata(ctx, &InternalListClusterMetadataRequest{ - PageSize: request.PageSize, - NextPageToken: request.NextPageToken, - }) - if err != nil { - return nil, err - } - - clusterMetadata := make([]*GetClusterMetadataResponse, 0, len(resp.ClusterMetadata)) - for _, cm := range resp.ClusterMetadata { - res, err := m.convertInternalGetClusterMetadataResponse(cm) - if err != nil { - return nil, err - } - clusterMetadata = append(clusterMetadata, res) - } - return &ListClusterMetadataResponse{ClusterMetadata: clusterMetadata, NextPageToken: resp.NextPageToken}, nil -} - -func (m *clusterMetadataManagerImpl) GetCurrentClusterMetadata( - ctx context.Context, -) (*GetClusterMetadataResponse, error) { - resp, err := m.persistence.GetClusterMetadata(ctx, &InternalGetClusterMetadataRequest{ClusterName: m.currentClusterName}) - if err != nil { - return nil, err - } - - mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) - if err != nil { - return nil, err - } - return &GetClusterMetadataResponse{ClusterMetadata: *mcm, Version: resp.Version}, nil -} - -func (m *clusterMetadataManagerImpl) GetClusterMetadata( - ctx context.Context, - request *GetClusterMetadataRequest, -) (*GetClusterMetadataResponse, error) { - resp, err := m.persistence.GetClusterMetadata(ctx, &InternalGetClusterMetadataRequest{ClusterName: request.ClusterName}) - if err != nil { - return nil, err - } - - mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) - if err != nil { - return nil, err - } - return &GetClusterMetadataResponse{ClusterMetadata: *mcm, Version: resp.Version}, nil -} - -func (m *clusterMetadataManagerImpl) SaveClusterMetadata( - ctx context.Context, - request *SaveClusterMetadataRequest, -) (bool, error) { - mcm, err := m.serializer.SerializeClusterMetadata(&request.ClusterMetadata, clusterMetadataEncoding) - if err != nil { - return false, err - } - - oldClusterMetadata, err := m.GetClusterMetadata(ctx, &GetClusterMetadataRequest{ClusterName: request.GetClusterName()}) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - return m.persistence.SaveClusterMetadata(ctx, &InternalSaveClusterMetadataRequest{ - ClusterName: request.ClusterName, - ClusterMetadata: mcm, - Version: request.Version, - }) - } - if err != nil { - return false, err - } - if immutableFieldsChanged(oldClusterMetadata.ClusterMetadata, request.ClusterMetadata) { - return false, nil - } - - return m.persistence.SaveClusterMetadata(ctx, &InternalSaveClusterMetadataRequest{ - ClusterName: request.ClusterName, - ClusterMetadata: mcm, - Version: request.Version, - }) -} - -func (m *clusterMetadataManagerImpl) DeleteClusterMetadata( - ctx context.Context, - request *DeleteClusterMetadataRequest, -) error { - if request.ClusterName == m.currentClusterName { - return serviceerror.NewInvalidArgument("Cannot delete current cluster metadata") - } - - return m.persistence.DeleteClusterMetadata(ctx, &InternalDeleteClusterMetadataRequest{ClusterName: request.ClusterName}) -} - -func (m *clusterMetadataManagerImpl) convertInternalGetClusterMetadataResponse( - resp *InternalGetClusterMetadataResponse, -) (*GetClusterMetadataResponse, error) { - mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) - if err != nil { - return nil, err - } - - return &GetClusterMetadataResponse{ - ClusterMetadata: *mcm, - Version: resp.Version, - }, nil -} - -// immutableFieldsChanged returns true if any of immutable fields changed. -func immutableFieldsChanged(old persistencespb.ClusterMetadata, cur persistencespb.ClusterMetadata) bool { - if (old.ClusterName != "" && old.ClusterName != cur.ClusterName) || - (old.ClusterId != "" && old.ClusterId != cur.ClusterId) || - (old.HistoryShardCount != 0 && old.HistoryShardCount != cur.HistoryShardCount) || - (old.IsGlobalNamespaceEnabled && !cur.IsGlobalNamespaceEnabled) { - return true - } - if old.IsGlobalNamespaceEnabled { - if (old.FailoverVersionIncrement != 0 && old.FailoverVersionIncrement != cur.FailoverVersionIncrement) || - (old.InitialFailoverVersion != 0 && old.InitialFailoverVersion != cur.InitialFailoverVersion) { - return true - } - } - return false -} diff -Nru temporal-1.21.5-1/src/common/persistence/cluster_metadata.go temporal-1.22.5/src/common/persistence/cluster_metadata.go --- temporal-1.21.5-1/src/common/persistence/cluster_metadata.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/cluster_metadata.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,41 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +// GetOrUseDefaultActiveCluster return the current cluster name or use the input if valid +func GetOrUseDefaultActiveCluster(currentClusterName string, activeClusterName string) string { + if len(activeClusterName) == 0 { + return currentClusterName + } + return activeClusterName +} + +// GetOrUseDefaultClusters return the current cluster or use the input if valid +func GetOrUseDefaultClusters(currentClusterName string, clusters []string) []string { + if len(clusters) == 0 { + return []string{currentClusterName} + } + return clusters +} diff -Nru temporal-1.21.5-1/src/common/persistence/cluster_metadata_store.go temporal-1.22.5/src/common/persistence/cluster_metadata_store.go --- temporal-1.21.5-1/src/common/persistence/cluster_metadata_store.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/cluster_metadata_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,248 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "context" + "errors" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence/serialization" +) + +const ( + clusterMetadataEncoding = enumspb.ENCODING_TYPE_PROTO3 +) + +var ( + // ErrInvalidMembershipExpiry is used when upserting new cluster membership with an invalid duration + ErrInvalidMembershipExpiry = errors.New("membershipExpiry duration should be atleast 1 second") + + // ErrIncompleteMembershipUpsert is used when upserting new cluster membership with missing fields + ErrIncompleteMembershipUpsert = errors.New("membership upserts require all fields") +) + +type ( + // clusterMetadataManagerImpl implements MetadataManager based on MetadataStore and Serializer + clusterMetadataManagerImpl struct { + serializer serialization.Serializer + persistence ClusterMetadataStore + currentClusterName string + logger log.Logger + } +) + +var _ ClusterMetadataManager = (*clusterMetadataManagerImpl)(nil) + +// NewClusterMetadataManagerImpl returns new ClusterMetadataManager +func NewClusterMetadataManagerImpl( + persistence ClusterMetadataStore, + serializer serialization.Serializer, + currentClusterName string, + logger log.Logger, +) ClusterMetadataManager { + return &clusterMetadataManagerImpl{ + serializer: serializer, + persistence: persistence, + currentClusterName: currentClusterName, + logger: logger, + } +} + +func (m *clusterMetadataManagerImpl) GetName() string { + return m.persistence.GetName() +} + +func (m *clusterMetadataManagerImpl) Close() { + m.persistence.Close() +} + +func (m *clusterMetadataManagerImpl) GetClusterMembers( + ctx context.Context, + request *GetClusterMembersRequest, +) (*GetClusterMembersResponse, error) { + return m.persistence.GetClusterMembers(ctx, request) +} + +func (m *clusterMetadataManagerImpl) UpsertClusterMembership( + ctx context.Context, + request *UpsertClusterMembershipRequest, +) error { + if request.RecordExpiry.Seconds() < 1 { + return ErrInvalidMembershipExpiry + } + if request.Role == All { + return ErrIncompleteMembershipUpsert + } + if request.RPCAddress == nil { + return ErrIncompleteMembershipUpsert + } + if request.RPCPort == 0 { + return ErrIncompleteMembershipUpsert + } + if request.SessionStart.IsZero() { + return ErrIncompleteMembershipUpsert + } + + return m.persistence.UpsertClusterMembership(ctx, request) +} + +func (m *clusterMetadataManagerImpl) PruneClusterMembership( + ctx context.Context, + request *PruneClusterMembershipRequest, +) error { + return m.persistence.PruneClusterMembership(ctx, request) +} + +func (m *clusterMetadataManagerImpl) ListClusterMetadata( + ctx context.Context, + request *ListClusterMetadataRequest, +) (*ListClusterMetadataResponse, error) { + resp, err := m.persistence.ListClusterMetadata(ctx, &InternalListClusterMetadataRequest{ + PageSize: request.PageSize, + NextPageToken: request.NextPageToken, + }) + if err != nil { + return nil, err + } + + clusterMetadata := make([]*GetClusterMetadataResponse, 0, len(resp.ClusterMetadata)) + for _, cm := range resp.ClusterMetadata { + res, err := m.convertInternalGetClusterMetadataResponse(cm) + if err != nil { + return nil, err + } + clusterMetadata = append(clusterMetadata, res) + } + return &ListClusterMetadataResponse{ClusterMetadata: clusterMetadata, NextPageToken: resp.NextPageToken}, nil +} + +func (m *clusterMetadataManagerImpl) GetCurrentClusterMetadata( + ctx context.Context, +) (*GetClusterMetadataResponse, error) { + resp, err := m.persistence.GetClusterMetadata(ctx, &InternalGetClusterMetadataRequest{ClusterName: m.currentClusterName}) + if err != nil { + return nil, err + } + + mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) + if err != nil { + return nil, err + } + return &GetClusterMetadataResponse{ClusterMetadata: *mcm, Version: resp.Version}, nil +} + +func (m *clusterMetadataManagerImpl) GetClusterMetadata( + ctx context.Context, + request *GetClusterMetadataRequest, +) (*GetClusterMetadataResponse, error) { + resp, err := m.persistence.GetClusterMetadata(ctx, &InternalGetClusterMetadataRequest{ClusterName: request.ClusterName}) + if err != nil { + return nil, err + } + + mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) + if err != nil { + return nil, err + } + return &GetClusterMetadataResponse{ClusterMetadata: *mcm, Version: resp.Version}, nil +} + +func (m *clusterMetadataManagerImpl) SaveClusterMetadata( + ctx context.Context, + request *SaveClusterMetadataRequest, +) (bool, error) { + mcm, err := m.serializer.SerializeClusterMetadata(&request.ClusterMetadata, clusterMetadataEncoding) + if err != nil { + return false, err + } + + oldClusterMetadata, err := m.GetClusterMetadata(ctx, &GetClusterMetadataRequest{ClusterName: request.GetClusterName()}) + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + return m.persistence.SaveClusterMetadata(ctx, &InternalSaveClusterMetadataRequest{ + ClusterName: request.ClusterName, + ClusterMetadata: mcm, + Version: request.Version, + }) + } + if err != nil { + return false, err + } + if immutableFieldsChanged(oldClusterMetadata.ClusterMetadata, request.ClusterMetadata) { + return false, nil + } + + return m.persistence.SaveClusterMetadata(ctx, &InternalSaveClusterMetadataRequest{ + ClusterName: request.ClusterName, + ClusterMetadata: mcm, + Version: request.Version, + }) +} + +func (m *clusterMetadataManagerImpl) DeleteClusterMetadata( + ctx context.Context, + request *DeleteClusterMetadataRequest, +) error { + if request.ClusterName == m.currentClusterName { + return serviceerror.NewInvalidArgument("Cannot delete current cluster metadata") + } + + return m.persistence.DeleteClusterMetadata(ctx, &InternalDeleteClusterMetadataRequest{ClusterName: request.ClusterName}) +} + +func (m *clusterMetadataManagerImpl) convertInternalGetClusterMetadataResponse( + resp *InternalGetClusterMetadataResponse, +) (*GetClusterMetadataResponse, error) { + mcm, err := m.serializer.DeserializeClusterMetadata(resp.ClusterMetadata) + if err != nil { + return nil, err + } + + return &GetClusterMetadataResponse{ + ClusterMetadata: *mcm, + Version: resp.Version, + }, nil +} + +// immutableFieldsChanged returns true if any of immutable fields changed. +func immutableFieldsChanged(old persistencespb.ClusterMetadata, cur persistencespb.ClusterMetadata) bool { + if (old.ClusterName != "" && old.ClusterName != cur.ClusterName) || + (old.ClusterId != "" && old.ClusterId != cur.ClusterId) || + (old.HistoryShardCount != 0 && old.HistoryShardCount != cur.HistoryShardCount) || + (old.IsGlobalNamespaceEnabled && !cur.IsGlobalNamespaceEnabled) { + return true + } + if old.IsGlobalNamespaceEnabled { + if (old.FailoverVersionIncrement != 0 && old.FailoverVersionIncrement != cur.FailoverVersionIncrement) || + (old.InitialFailoverVersion != 0 && old.InitialFailoverVersion != cur.InitialFailoverVersion) { + return true + } + } + return false +} diff -Nru temporal-1.21.5-1/src/common/persistence/dataInterfaces.go temporal-1.22.5/src/common/persistence/dataInterfaces.go --- temporal-1.21.5-1/src/common/persistence/dataInterfaces.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/dataInterfaces.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1299 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dataInterfaces_mock.go - -package persistence - -import ( - "context" - "fmt" - "net" - "strings" - "time" - - "github.com/pborman/uuid" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/service/history/tasks" -) - -// CreateWorkflowMode workflow creation mode -type CreateWorkflowMode int - -// QueueType is an enum that represents various queue types in persistence -type QueueType int32 - -// Queue types used in queue table -// Use positive numbers for queue type -// Negative numbers are reserved for DLQ - -const ( - NamespaceReplicationQueueType QueueType = iota + 1 -) - -// Create Workflow Execution Mode -const ( - // CreateWorkflowModeBrandNew fail if current record exists - // Only applicable for CreateWorkflowExecution - CreateWorkflowModeBrandNew CreateWorkflowMode = iota - // CreateWorkflowModeUpdateCurrent update current record only if workflow is closed - // Only applicable for CreateWorkflowExecution - CreateWorkflowModeUpdateCurrent - // CreateWorkflowModeBypassCurrent do not update current record since workflow is in zombie state - // applicable for CreateWorkflowExecution, UpdateWorkflowExecution - CreateWorkflowModeBypassCurrent -) - -// UpdateWorkflowMode update mode -type UpdateWorkflowMode int - -// Update Workflow Execution Mode -const ( - // UpdateWorkflowModeUpdateCurrent update workflow, including current record - // NOTE: update on current record is a condition update - UpdateWorkflowModeUpdateCurrent UpdateWorkflowMode = iota - // UpdateWorkflowModeBypassCurrent update workflow, without current record - // NOTE: current record CANNOT point to the workflow to be updated - UpdateWorkflowModeBypassCurrent -) - -// ConflictResolveWorkflowMode conflict resolve mode -type ConflictResolveWorkflowMode int - -// Conflict Resolve Workflow Mode -const ( - // ConflictResolveWorkflowModeUpdateCurrent conflict resolve workflow, including current record - // NOTE: update on current record is a condition update - ConflictResolveWorkflowModeUpdateCurrent ConflictResolveWorkflowMode = iota - // ConflictResolveWorkflowModeBypassCurrent conflict resolve workflow, without current record - // NOTE: current record CANNOT point to the workflow to be updated - ConflictResolveWorkflowModeBypassCurrent -) - -// UnknownNumRowsAffected is returned when the number of rows that an API affected cannot be determined -const UnknownNumRowsAffected = -1 - -const ( - // InitialFailoverNotificationVersion is the initial failover version for a namespace - InitialFailoverNotificationVersion int64 = 0 -) - -const numItemsInGarbageInfo = 3 - -const ScheduledTaskMinPrecision = time.Millisecond - -type ( - // InvalidPersistenceRequestError represents invalid request to persistence - InvalidPersistenceRequestError struct { - Msg string - } - - // AppendHistoryTimeoutError represents a failed insert to history tree / node request - AppendHistoryTimeoutError struct { - Msg string - } - - // CurrentWorkflowConditionFailedError represents a failed conditional update for current workflow record - CurrentWorkflowConditionFailedError struct { - Msg string - RequestID string - RunID string - State enumsspb.WorkflowExecutionState - Status enumspb.WorkflowExecutionStatus - LastWriteVersion int64 - } - - // WorkflowConditionFailedError represents a failed conditional update for workflow record - WorkflowConditionFailedError struct { - Msg string - NextEventID int64 - DBRecordVersion int64 - } - - // ConditionFailedError represents a failed conditional update for execution record - ConditionFailedError struct { - Msg string - } - - // ShardAlreadyExistError is returned when conditionally creating a shard fails - ShardAlreadyExistError struct { - Msg string - } - - // ShardOwnershipLostError is returned when conditional update fails due to RangeID for the shard - ShardOwnershipLostError struct { - ShardID int32 - Msg string - } - - // TimeoutError is returned when a write operation fails due to a timeout - TimeoutError struct { - Msg string - } - - // TransactionSizeLimitError is returned when the transaction size is too large - TransactionSizeLimitError struct { - Msg string - } - - // TaskQueueKey is the struct used to identity TaskQueues - TaskQueueKey struct { - NamespaceID string - TaskQueueName string - TaskQueueType enumspb.TaskQueueType - } - - // GetOrCreateShardRequest is used to get shard information, or supply - // initial information to create a shard in executions table - GetOrCreateShardRequest struct { - ShardID int32 - InitialShardInfo *persistencespb.ShardInfo // optional, zero value will be used if missing - LifecycleContext context.Context // cancelled when shard is unloaded - } - - // GetOrCreateShardResponse is the response to GetOrCreateShard - GetOrCreateShardResponse struct { - ShardInfo *persistencespb.ShardInfo - } - - // UpdateShardRequest is used to update shard information - UpdateShardRequest struct { - ShardInfo *persistencespb.ShardInfo - PreviousRangeID int64 - } - - // AssertShardOwnershipRequest is used to assert shard ownership - AssertShardOwnershipRequest struct { - ShardID int32 - RangeID int64 - } - - // AddHistoryTasksRequest is used to write new tasks - AddHistoryTasksRequest struct { - ShardID int32 - RangeID int64 - - NamespaceID string - WorkflowID string - RunID string - - Tasks map[tasks.Category][]tasks.Task - } - - // CreateWorkflowExecutionRequest is used to write a new workflow execution - CreateWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode CreateWorkflowMode - - PreviousRunID string - PreviousLastWriteVersion int64 - - NewWorkflowSnapshot WorkflowSnapshot - NewWorkflowEvents []*WorkflowEvents - } - - // CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest - CreateWorkflowExecutionResponse struct { - NewMutableStateStats MutableStateStatistics - } - - // UpdateWorkflowExecutionRequest is used to update a workflow execution - UpdateWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode UpdateWorkflowMode - - UpdateWorkflowMutation WorkflowMutation - UpdateWorkflowEvents []*WorkflowEvents - NewWorkflowSnapshot *WorkflowSnapshot - NewWorkflowEvents []*WorkflowEvents - } - - // UpdateWorkflowExecutionResponse is response for UpdateWorkflowExecutionRequest - UpdateWorkflowExecutionResponse struct { - UpdateMutableStateStats MutableStateStatistics - NewMutableStateStats *MutableStateStatistics - } - - // ConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for a single run - ConflictResolveWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode ConflictResolveWorkflowMode - - // workflow to be resetted - ResetWorkflowSnapshot WorkflowSnapshot - ResetWorkflowEvents []*WorkflowEvents - - // maybe new workflow - NewWorkflowSnapshot *WorkflowSnapshot - NewWorkflowEvents []*WorkflowEvents - - // current workflow - CurrentWorkflowMutation *WorkflowMutation - CurrentWorkflowEvents []*WorkflowEvents - } - - ConflictResolveWorkflowExecutionResponse struct { - ResetMutableStateStats MutableStateStatistics - NewMutableStateStats *MutableStateStatistics - CurrentMutableStateStats *MutableStateStatistics - } - - // GetCurrentExecutionRequest is used to retrieve the current RunId for an execution - GetCurrentExecutionRequest struct { - ShardID int32 - NamespaceID string - WorkflowID string - } - - // GetCurrentExecutionResponse is the response to GetCurrentExecution - GetCurrentExecutionResponse struct { - StartRequestID string - RunID string - State enumsspb.WorkflowExecutionState - Status enumspb.WorkflowExecutionStatus - } - - // GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution - GetWorkflowExecutionRequest struct { - ShardID int32 - NamespaceID string - WorkflowID string - RunID string - } - - // GetWorkflowExecutionResponse is the response to GetWorkflowExecutionRequest - GetWorkflowExecutionResponse struct { - State *persistencespb.WorkflowMutableState - DBRecordVersion int64 - MutableStateStats MutableStateStatistics - } - - // SetWorkflowExecutionRequest is used to overwrite the info of a workflow execution - SetWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - SetWorkflowSnapshot WorkflowSnapshot - } - - // SetWorkflowExecutionResponse is the response to SetWorkflowExecutionRequest - SetWorkflowExecutionResponse struct { - } - - // ListConcreteExecutionsRequest is request to ListConcreteExecutions - ListConcreteExecutionsRequest struct { - ShardID int32 - PageSize int - PageToken []byte - } - - // ListConcreteExecutionsResponse is response to ListConcreteExecutions - ListConcreteExecutionsResponse struct { - States []*persistencespb.WorkflowMutableState - PageToken []byte - } - - // WorkflowEvents is used as generic workflow history events transaction container - WorkflowEvents struct { - NamespaceID string - WorkflowID string - RunID string - BranchToken []byte - PrevTxnID int64 - TxnID int64 - Events []*historypb.HistoryEvent - } - - // WorkflowMutation is used as generic workflow execution state mutation - WorkflowMutation struct { - ExecutionInfo *persistencespb.WorkflowExecutionInfo - ExecutionState *persistencespb.WorkflowExecutionState - // TODO deprecate NextEventID in favor of DBRecordVersion - NextEventID int64 - - UpsertActivityInfos map[int64]*persistencespb.ActivityInfo - DeleteActivityInfos map[int64]struct{} - UpsertTimerInfos map[string]*persistencespb.TimerInfo - DeleteTimerInfos map[string]struct{} - UpsertChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo - DeleteChildExecutionInfos map[int64]struct{} - UpsertRequestCancelInfos map[int64]*persistencespb.RequestCancelInfo - DeleteRequestCancelInfos map[int64]struct{} - UpsertSignalInfos map[int64]*persistencespb.SignalInfo - DeleteSignalInfos map[int64]struct{} - UpsertSignalRequestedIDs map[string]struct{} - DeleteSignalRequestedIDs map[string]struct{} - NewBufferedEvents []*historypb.HistoryEvent - ClearBufferedEvents bool - - Tasks map[tasks.Category][]tasks.Task - - // TODO deprecate Condition in favor of DBRecordVersion - Condition int64 - DBRecordVersion int64 - Checksum *persistencespb.Checksum - } - - // WorkflowSnapshot is used as generic workflow execution state snapshot - WorkflowSnapshot struct { - ExecutionInfo *persistencespb.WorkflowExecutionInfo - ExecutionState *persistencespb.WorkflowExecutionState - // TODO deprecate NextEventID in favor of DBRecordVersion - NextEventID int64 - - ActivityInfos map[int64]*persistencespb.ActivityInfo - TimerInfos map[string]*persistencespb.TimerInfo - ChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo - RequestCancelInfos map[int64]*persistencespb.RequestCancelInfo - SignalInfos map[int64]*persistencespb.SignalInfo - SignalRequestedIDs map[string]struct{} - - Tasks map[tasks.Category][]tasks.Task - - // TODO deprecate Condition in favor of DBRecordVersion - Condition int64 - DBRecordVersion int64 - Checksum *persistencespb.Checksum - } - - // DeleteWorkflowExecutionRequest is used to delete a workflow execution - DeleteWorkflowExecutionRequest struct { - ShardID int32 - NamespaceID string - WorkflowID string - RunID string - } - - // DeleteCurrentWorkflowExecutionRequest is used to delete the current workflow execution - DeleteCurrentWorkflowExecutionRequest struct { - ShardID int32 - NamespaceID string - WorkflowID string - RunID string - } - - // RegisterHistoryTaskReaderRequest is a hint for underlying persistence implementation - // that a new queue reader is created by queue processing logic - RegisterHistoryTaskReaderRequest struct { - ShardID int32 - ShardOwner string - TaskCategory tasks.Category - ReaderID int64 - } - - // UnregisterHistoryTaskReaderRequest is a hint for underlying persistence implementation - // that queue processing logic is done using an existing queue reader - UnregisterHistoryTaskReaderRequest RegisterHistoryTaskReaderRequest - - // UpdateHistoryTaskReaderProgressRequest is a hint for underlying persistence implementation - // that a certain queue reader's process and the fact that it won't try to load tasks with - // key less than InclusiveMinPendingTaskKey - UpdateHistoryTaskReaderProgressRequest struct { - ShardID int32 - ShardOwner string - TaskCategory tasks.Category - ReaderID int64 - InclusiveMinPendingTaskKey tasks.Key - } - - // GetHistoryTasksRequest is used to get a range of history tasks - // Either max TaskID or FireTime is required depending on the - // task category type. Min TaskID or FireTime is optional. - GetHistoryTasksRequest struct { - ShardID int32 - TaskCategory tasks.Category - ReaderID int64 - InclusiveMinTaskKey tasks.Key - ExclusiveMaxTaskKey tasks.Key - BatchSize int - NextPageToken []byte - } - - // GetHistoryTasksResponse is the response for GetHistoryTasks - GetHistoryTasksResponse struct { - Tasks []tasks.Task - NextPageToken []byte - } - - // CompleteHistoryTaskRequest delete one history task - CompleteHistoryTaskRequest struct { - ShardID int32 - TaskCategory tasks.Category - TaskKey tasks.Key - } - - // RangeCompleteHistoryTasksRequest deletes a range of history tasks - // Either max TaskID or FireTime is required depending on the - // task category type. Min TaskID or FireTime is optional. - RangeCompleteHistoryTasksRequest struct { - ShardID int32 - TaskCategory tasks.Category - InclusiveMinTaskKey tasks.Key - ExclusiveMaxTaskKey tasks.Key - } - - // GetReplicationTasksRequest is used to read tasks from the replication task queue - GetReplicationTasksRequest struct { - ShardID int32 - MinTaskID int64 - MaxTaskID int64 - BatchSize int - NextPageToken []byte - } - - // PutReplicationTaskToDLQRequest is used to put a replication task to dlq - PutReplicationTaskToDLQRequest struct { - ShardID int32 - SourceClusterName string - TaskInfo *persistencespb.ReplicationTaskInfo - } - - // GetReplicationTasksFromDLQRequest is used to get replication tasks from dlq - GetReplicationTasksFromDLQRequest struct { - GetHistoryTasksRequest - - SourceClusterName string - } - - // DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ - DeleteReplicationTaskFromDLQRequest struct { - CompleteHistoryTaskRequest - - SourceClusterName string - } - - // RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ - RangeDeleteReplicationTaskFromDLQRequest struct { - RangeCompleteHistoryTasksRequest - - SourceClusterName string - } - - // CreateTaskQueueRequest create a new task queue - CreateTaskQueueRequest struct { - RangeID int64 - TaskQueueInfo *persistencespb.TaskQueueInfo - } - - // CreateTaskQueueResponse is the response to CreateTaskQueue - CreateTaskQueueResponse struct { - } - - // UpdateTaskQueueRequest is used to update task queue implementation information - UpdateTaskQueueRequest struct { - RangeID int64 - TaskQueueInfo *persistencespb.TaskQueueInfo - - PrevRangeID int64 - } - - // UpdateTaskQueueResponse is the response to UpdateTaskQueue - UpdateTaskQueueResponse struct { - } - - // GetTaskQueueRequest get the target task queue - GetTaskQueueRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - } - - // GetTaskQueueResponse is the response to GetTaskQueue - GetTaskQueueResponse struct { - RangeID int64 - TaskQueueInfo *persistencespb.TaskQueueInfo - } - - // GetTaskQueueUserDataRequest is the input type for the GetTaskQueueUserData API - GetTaskQueueUserDataRequest struct { - NamespaceID string - TaskQueue string - } - - // GetTaskQueueUserDataResponse is the output type for the GetTaskQueueUserData API - GetTaskQueueUserDataResponse struct { - UserData *persistencespb.VersionedTaskQueueUserData - } - - // UpdateTaskQueueUserDataRequest is the input type for the UpdateTaskQueueUserData API - UpdateTaskQueueUserDataRequest struct { - NamespaceID string - TaskQueue string - UserData *persistencespb.VersionedTaskQueueUserData - BuildIdsAdded []string - BuildIdsRemoved []string - } - - ListTaskQueueUserDataEntriesRequest struct { - NamespaceID string - PageSize int - NextPageToken []byte - } - - TaskQueueUserDataEntry struct { - TaskQueue string - UserData *persistencespb.VersionedTaskQueueUserData - } - - ListTaskQueueUserDataEntriesResponse struct { - NextPageToken []byte - Entries []*TaskQueueUserDataEntry - } - - GetTaskQueuesByBuildIdRequest struct { - NamespaceID string - BuildID string - } - - CountTaskQueuesByBuildIdRequest struct { - NamespaceID string - BuildID string - } - - // ListTaskQueueRequest contains the request params needed to invoke ListTaskQueue API - ListTaskQueueRequest struct { - PageSize int - PageToken []byte - } - - // ListTaskQueueResponse is the response from ListTaskQueue API - ListTaskQueueResponse struct { - Items []*PersistedTaskQueueInfo - NextPageToken []byte - } - - // DeleteTaskQueueRequest contains the request params needed to invoke DeleteTaskQueue API - DeleteTaskQueueRequest struct { - TaskQueue *TaskQueueKey - RangeID int64 - } - - // CreateTasksRequest is used to create a new task for a workflow execution - CreateTasksRequest struct { - TaskQueueInfo *PersistedTaskQueueInfo - Tasks []*persistencespb.AllocatedTaskInfo - } - - // CreateTasksResponse is the response to CreateTasksRequest - CreateTasksResponse struct { - } - - PersistedTaskQueueInfo struct { - Data *persistencespb.TaskQueueInfo - RangeID int64 - } - - // GetTasksRequest is used to retrieve tasks of a task queue - GetTasksRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - InclusiveMinTaskID int64 - ExclusiveMaxTaskID int64 - PageSize int - NextPageToken []byte - } - - // GetTasksResponse is the response to GetTasksRequests - GetTasksResponse struct { - Tasks []*persistencespb.AllocatedTaskInfo - NextPageToken []byte - } - - // CompleteTaskRequest is used to complete a task - CompleteTaskRequest struct { - TaskQueue *TaskQueueKey - TaskID int64 - } - - // CompleteTasksLessThanRequest contains the request params needed to invoke CompleteTasksLessThan API - CompleteTasksLessThanRequest struct { - NamespaceID string - TaskQueueName string - TaskType enumspb.TaskQueueType - ExclusiveMaxTaskID int64 // Tasks less than this ID will be completed - Limit int // Limit on the max number of tasks that can be completed. Required param - } - - // CreateNamespaceRequest is used to create the namespace - CreateNamespaceRequest struct { - Namespace *persistencespb.NamespaceDetail - IsGlobalNamespace bool - } - - // CreateNamespaceResponse is the response for CreateNamespace - CreateNamespaceResponse struct { - ID string - } - - // GetNamespaceRequest is used to read namespace - GetNamespaceRequest struct { - ID string - Name string - } - - // GetNamespaceResponse is the response for GetNamespace - GetNamespaceResponse struct { - Namespace *persistencespb.NamespaceDetail - IsGlobalNamespace bool - NotificationVersion int64 - } - - // UpdateNamespaceRequest is used to update namespace - UpdateNamespaceRequest struct { - Namespace *persistencespb.NamespaceDetail - IsGlobalNamespace bool - NotificationVersion int64 - } - - // RenameNamespaceRequest is used to rename namespace. - RenameNamespaceRequest struct { - PreviousName string - NewName string - } - - // DeleteNamespaceRequest is used to delete namespace entry from namespaces table - DeleteNamespaceRequest struct { - ID string - } - - // DeleteNamespaceByNameRequest is used to delete namespace entry from namespaces_by_name table - DeleteNamespaceByNameRequest struct { - Name string - } - - // ListNamespacesRequest is used to list namespaces - ListNamespacesRequest struct { - PageSize int - NextPageToken []byte - IncludeDeleted bool - } - - // ListNamespacesResponse is the response for GetNamespace - ListNamespacesResponse struct { - Namespaces []*GetNamespaceResponse - NextPageToken []byte - } - - // GetMetadataResponse is the response for GetMetadata - GetMetadataResponse struct { - NotificationVersion int64 - } - - // MutableStateStatistics is the size stats for MutableState - MutableStateStatistics struct { - TotalSize int - HistoryStatistics *HistoryStatistics - - // Breakdown of size into more granular stats - ExecutionInfoSize int - ExecutionStateSize int - - ActivityInfoSize int - TimerInfoSize int - ChildInfoSize int - RequestCancelInfoSize int - SignalInfoSize int - SignalRequestIDSize int - BufferedEventsSize int - // UpdateInfoSize is included in ExecutionInfoSize - - // Item count for various information captured within mutable state - ActivityInfoCount int - TimerInfoCount int - ChildInfoCount int - RequestCancelInfoCount int - SignalInfoCount int - SignalRequestIDCount int - BufferedEventsCount int - TaskCountByCategory map[string]int - UpdateInfoCount int - - // Total item count for various information captured within mutable state - TotalActivityCount int64 - TotalUserTimerCount int64 - TotalChildExecutionCount int64 - TotalRequestCancelExternalCount int64 - TotalSignalExternalCount int64 - TotalSignalCount int64 - TotalUpdateCount int64 - } - - HistoryStatistics struct { - SizeDiff int - CountDiff int - } - - // AppendHistoryNodesRequest is used to append a batch of history nodes - AppendHistoryNodesRequest struct { - // The shard to get history node data - ShardID int32 - // true if this is the first append request to the branch - IsNewBranch bool - // the info for clean up data in background - Info string - // The branch to be appended - BranchToken []byte - // The batch of events to be appended. The first eventID will become the nodeID of this batch - Events []*historypb.HistoryEvent - // TransactionID for events before these events. For events chaining - PrevTransactionID int64 - // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins - TransactionID int64 - } - - // AppendHistoryNodesResponse is a response to AppendHistoryNodesRequest - AppendHistoryNodesResponse struct { - // the size of the event data that has been appended - Size int - } - - // AppendRawHistoryNodesRequest is used to append a batch of raw history nodes - AppendRawHistoryNodesRequest struct { - // The shard to get history node data - ShardID int32 - // true if this is the first append request to the branch - IsNewBranch bool - // the info for clean up data in background - Info string - // The branch to be appended - BranchToken []byte - // The batch of events to be appended. The first eventID will become the nodeID of this batch - History *commonpb.DataBlob - // TransactionID for events before these events. For events chaining - PrevTransactionID int64 - // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins - TransactionID int64 - // NodeID is the first event id. - NodeID int64 - } - - // ReadHistoryBranchRequest is used to read a history branch - ReadHistoryBranchRequest struct { - // The shard to get history branch data - ShardID int32 - // The branch to be read - BranchToken []byte - // Get the history nodes from MinEventID. Inclusive. - MinEventID int64 - // Get the history nodes upto MaxEventID. Exclusive. - MaxEventID int64 - // Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page. - // However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events. - PageSize int - // Token to continue reading next page of history append transactions. Pass in empty slice for first page - NextPageToken []byte - } - - // ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest - ReadHistoryBranchResponse struct { - // History events - HistoryEvents []*historypb.HistoryEvent - // Token to read next page if there are more events beyond page size. - // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. - // Empty means we have reached the last page, not need to continue - NextPageToken []byte - // Size of history read from store - Size int - } - - // ReadHistoryBranchRequest is used to read a history branch - ReadHistoryBranchReverseRequest struct { - // The shard to get history branch data - ShardID int32 - // The branch to be read - BranchToken []byte - // Get the history nodes upto MaxEventID. Exclusive. - MaxEventID int64 - // Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page. - // However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events. - PageSize int - // LastFirstTransactionID specified in mutable state. Only used for reading in reverse order. - LastFirstTransactionID int64 - // Token to continue reading next page of history append transactions. Pass in empty slice for first page - NextPageToken []byte - } - - // ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest - ReadHistoryBranchReverseResponse struct { - // History events - HistoryEvents []*historypb.HistoryEvent - // Token to read next page if there are more events beyond page size. - // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. - // Empty means we have reached the last page, not need to continue - NextPageToken []byte - // Size of history read from store - Size int - } - - // ReadHistoryBranchByBatchResponse is the response to ReadHistoryBranchRequest - ReadHistoryBranchByBatchResponse struct { - // History events by batch - History []*historypb.History - // TransactionID for relevant History batch - TransactionIDs []int64 - // Token to read next page if there are more events beyond page size. - // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. - // Empty means we have reached the last page, not need to continue - NextPageToken []byte - // Size of history read from store - Size int - } - - // ReadRawHistoryBranchResponse is the response to ReadHistoryBranchRequest - ReadRawHistoryBranchResponse struct { - // HistoryEventBlobs history event blobs - HistoryEventBlobs []*commonpb.DataBlob - // NodeIDs is the first event id of each history blob - NodeIDs []int64 - // Token to read next page if there are more events beyond page size. - // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. - // Empty means we have reached the last page, not need to continue - NextPageToken []byte - // Size of history read from store - Size int - } - - // ForkHistoryBranchRequest is used to fork a history branch - ForkHistoryBranchRequest struct { - // The shard to get history branch data - ShardID int32 - // The namespace performing the fork - NamespaceID string - // The base branch to fork from - ForkBranchToken []byte - // The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive) - // Application must provide a void forking nodeID, it must be a valid nodeID in that branch. A valid nodeID is the firstEventID of a valid batch of events. - // And ForkNodeID > 1 because forking from 1 doesn't make any sense. - ForkNodeID int64 - // the info for clean up data in background - Info string - } - - // ForkHistoryBranchResponse is the response to ForkHistoryBranchRequest - ForkHistoryBranchResponse struct { - // branchToken to represent the new branch - NewBranchToken []byte - } - - // CompleteForkBranchRequest is used to complete forking - CompleteForkBranchRequest struct { - // the new branch returned from ForkHistoryBranchRequest - BranchToken []byte - // true means the fork is success, will update the flag, otherwise will delete the new branch - Success bool - // The shard to update history branch data - ShardID *int - } - - // DeleteHistoryBranchRequest is used to remove a history branch - DeleteHistoryBranchRequest struct { - // The shard to delete history branch data - ShardID int32 - // branch to be deleted - BranchToken []byte - } - - // TrimHistoryBranchRequest is used to validate & trim a history branch - TrimHistoryBranchRequest struct { - // The shard to delete history branch data - ShardID int32 - // branch to be validated & trimmed - BranchToken []byte - // known valid node ID - NodeID int64 - // known valid transaction ID - TransactionID int64 - } - - // TrimHistoryBranchResponse is the response to TrimHistoryBranchRequest - TrimHistoryBranchResponse struct { - } - - // GetHistoryTreeRequest is used to retrieve branch info of a history tree - GetHistoryTreeRequest struct { - // A UUID of a tree - TreeID string - // Get data from this shard - ShardID int32 - } - - // HistoryBranchDetail contains detailed information of a branch - HistoryBranchDetail struct { - BranchToken []byte - ForkTime *time.Time - Info string - } - - // GetHistoryTreeResponse is a response to GetHistoryTreeRequest - GetHistoryTreeResponse struct { - // all branches of a tree - BranchTokens [][]byte - } - - // GetAllHistoryTreeBranchesRequest is a request of GetAllHistoryTreeBranches - GetAllHistoryTreeBranchesRequest struct { - // pagination token - NextPageToken []byte - // maximum number of branches returned per page - PageSize int - } - - // GetAllHistoryTreeBranchesResponse is a response to GetAllHistoryTreeBranches - GetAllHistoryTreeBranchesResponse struct { - // pagination token - NextPageToken []byte - // all branches of all trees - Branches []HistoryBranchDetail - } - - // ListClusterMetadataRequest is the request to ListClusterMetadata - ListClusterMetadataRequest struct { - PageSize int - NextPageToken []byte - } - - // ListClusterMetadataResponse is the response to ListClusterMetadata - ListClusterMetadataResponse struct { - ClusterMetadata []*GetClusterMetadataResponse - NextPageToken []byte - } - - // GetClusterMetadataRequest is the request to GetClusterMetadata - GetClusterMetadataRequest struct { - ClusterName string - } - - // GetClusterMetadataResponse is the response to GetClusterMetadata - GetClusterMetadataResponse struct { - persistencespb.ClusterMetadata - Version int64 - } - - // SaveClusterMetadataRequest is the request to SaveClusterMetadata - SaveClusterMetadataRequest struct { - persistencespb.ClusterMetadata - Version int64 - } - - // DeleteClusterMetadataRequest is the request to DeleteClusterMetadata - DeleteClusterMetadataRequest struct { - ClusterName string - } - - // GetClusterMembersRequest is the request to GetClusterMembers - GetClusterMembersRequest struct { - LastHeartbeatWithin time.Duration - RPCAddressEquals net.IP - HostIDEquals uuid.UUID - RoleEquals ServiceType - SessionStartedAfter time.Time - NextPageToken []byte - PageSize int - } - - // GetClusterMembersResponse is the response to GetClusterMembers - GetClusterMembersResponse struct { - ActiveMembers []*ClusterMember - NextPageToken []byte - } - - // ClusterMember is used as a response to GetClusterMembers - ClusterMember struct { - Role ServiceType - HostID uuid.UUID - RPCAddress net.IP - RPCPort uint16 - SessionStart time.Time - LastHeartbeat time.Time - RecordExpiry time.Time - } - - // UpsertClusterMembershipRequest is the request to UpsertClusterMembership - UpsertClusterMembershipRequest struct { - Role ServiceType - HostID uuid.UUID - RPCAddress net.IP - RPCPort uint16 - SessionStart time.Time - RecordExpiry time.Duration - } - - // PruneClusterMembershipRequest is the request to PruneClusterMembership - PruneClusterMembershipRequest struct { - MaxRecordsPruned int - } - - // Closeable is an interface for any entity that supports a close operation to release resources - // TODO: allow this method to return errors - Closeable interface { - Close() - } - - // ShardManager is used to manage all shards - ShardManager interface { - Closeable - GetName() string - - GetOrCreateShard(ctx context.Context, request *GetOrCreateShardRequest) (*GetOrCreateShardResponse, error) - UpdateShard(ctx context.Context, request *UpdateShardRequest) error - AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error - } - - // ExecutionManager is used to manage workflow executions - ExecutionManager interface { - Closeable - GetName() string - GetHistoryBranchUtil() HistoryBranchUtil - - CreateWorkflowExecution(ctx context.Context, request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error) - UpdateWorkflowExecution(ctx context.Context, request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) - ConflictResolveWorkflowExecution(ctx context.Context, request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error) - DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error - DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error - GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error) - GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error) - SetWorkflowExecution(ctx context.Context, request *SetWorkflowExecutionRequest) (*SetWorkflowExecutionResponse, error) - - // Scan operations - - ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error) - - // Tasks related APIs - - // Hints for persistence implementaion regarding hisotry task readers - RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error - UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) - UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) - - AddHistoryTasks(ctx context.Context, request *AddHistoryTasksRequest) error - GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) - CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error - RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error - - PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error - GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*GetHistoryTasksResponse, error) - DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error - RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error - IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) - - // The below are history V2 APIs - // V2 regards history events growing as a tree, decoupled from workflow concepts - // For Temporal, treeID is new runID, except for fork(reset), treeID will be the runID that it forks from. - - // AppendHistoryNodes add a node to history node table - AppendHistoryNodes(ctx context.Context, request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error) - // AppendRawHistoryNodes add a node of raw histories to history node table - AppendRawHistoryNodes(ctx context.Context, request *AppendRawHistoryNodesRequest) (*AppendHistoryNodesResponse, error) - // ReadHistoryBranch returns history node data for a branch - ReadHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error) - // ReadHistoryBranchByBatch returns history node data for a branch ByBatch - ReadHistoryBranchByBatch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error) - // ReadHistoryBranchReverse returns history node data for a branch - ReadHistoryBranchReverse(ctx context.Context, request *ReadHistoryBranchReverseRequest) (*ReadHistoryBranchReverseResponse, error) - // ReadRawHistoryBranch returns history node raw data for a branch ByBatch - // NOTE: this API should only be used by 3+DC - ReadRawHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error) - // ForkHistoryBranch forks a new branch from a old branch - ForkHistoryBranch(ctx context.Context, request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error) - // DeleteHistoryBranch removes a branch - // If this is the last branch to delete, it will also remove the root node - DeleteHistoryBranch(ctx context.Context, request *DeleteHistoryBranchRequest) error - // TrimHistoryBranch validate & trim a history branch - TrimHistoryBranch(ctx context.Context, request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error) - // GetHistoryTree returns all branch information of a tree - GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error) - // GetAllHistoryTreeBranches returns all branches of all trees - GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error) - } - - // TaskManager is used to manage tasks and task queues - TaskManager interface { - Closeable - GetName() string - CreateTaskQueue(ctx context.Context, request *CreateTaskQueueRequest) (*CreateTaskQueueResponse, error) - UpdateTaskQueue(ctx context.Context, request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) - GetTaskQueue(ctx context.Context, request *GetTaskQueueRequest) (*GetTaskQueueResponse, error) - ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*ListTaskQueueResponse, error) - DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error - CreateTasks(ctx context.Context, request *CreateTasksRequest) (*CreateTasksResponse, error) - GetTasks(ctx context.Context, request *GetTasksRequest) (*GetTasksResponse, error) - CompleteTask(ctx context.Context, request *CompleteTaskRequest) error - // CompleteTasksLessThan completes tasks less than or equal to the given task id - // This API takes a limit parameter which specifies the count of maxRows that - // can be deleted. This parameter may be ignored by the underlying storage, but - // its mandatory to specify it. On success this method returns the number of rows - // actually deleted. If the underlying storage doesn't support "limit", all rows - // less than or equal to taskID will be deleted. - // On success, this method returns either: - // - UnknownNumRowsAffected (this means all rows below value are deleted) - // - number of rows deleted, which may be equal to limit - CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) - - // GetTaskQueueUserData gets versioned user data. - // This data would only exist if a user uses APIs that generate it, such as the worker versioning related APIs. - // The caller should be prepared to gracefully handle the "NotFound" service error. - GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) - // UpdateTaskQueueUserData updates the user data for a given task queue. - // The request takes the _current_ known version along with the data to update. - // The caller should +1 increment the cached version number if this call succeeds. - // Fails with ConditionFailedError if the user data was updated concurrently. - UpdateTaskQueueUserData(ctx context.Context, request *UpdateTaskQueueUserDataRequest) error - ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*ListTaskQueueUserDataEntriesResponse, error) - GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) - CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) - } - - // MetadataManager is used to manage metadata CRUD for namespace entities - MetadataManager interface { - Closeable - GetName() string - CreateNamespace(ctx context.Context, request *CreateNamespaceRequest) (*CreateNamespaceResponse, error) - GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*GetNamespaceResponse, error) - UpdateNamespace(ctx context.Context, request *UpdateNamespaceRequest) error - RenameNamespace(ctx context.Context, request *RenameNamespaceRequest) error - DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error - DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error - ListNamespaces(ctx context.Context, request *ListNamespacesRequest) (*ListNamespacesResponse, error) - GetMetadata(ctx context.Context) (*GetMetadataResponse, error) - InitializeSystemNamespaces(ctx context.Context, currentClusterName string) error - } - - // ClusterMetadataManager is used to manage cluster-wide metadata and configuration - ClusterMetadataManager interface { - Closeable - GetName() string - GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) - UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error - PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error - ListClusterMetadata(ctx context.Context, request *ListClusterMetadataRequest) (*ListClusterMetadataResponse, error) - GetCurrentClusterMetadata(ctx context.Context) (*GetClusterMetadataResponse, error) - GetClusterMetadata(ctx context.Context, request *GetClusterMetadataRequest) (*GetClusterMetadataResponse, error) - SaveClusterMetadata(ctx context.Context, request *SaveClusterMetadataRequest) (bool, error) - DeleteClusterMetadata(ctx context.Context, request *DeleteClusterMetadataRequest) error - } -) - -func (e *InvalidPersistenceRequestError) Error() string { - return e.Msg -} - -func (e *AppendHistoryTimeoutError) Error() string { - return e.Msg -} - -func (e *CurrentWorkflowConditionFailedError) Error() string { - return e.Msg -} - -func (e *WorkflowConditionFailedError) Error() string { - return e.Msg -} - -func (e *ConditionFailedError) Error() string { - return e.Msg -} - -func (e *ShardAlreadyExistError) Error() string { - return e.Msg -} - -func (e *ShardOwnershipLostError) Error() string { - return e.Msg -} - -func (e *TimeoutError) Error() string { - return e.Msg -} - -func (e *TransactionSizeLimitError) Error() string { - return e.Msg -} - -func IsConflictErr(err error) bool { - switch err.(type) { - case *CurrentWorkflowConditionFailedError, - *WorkflowConditionFailedError, - *ConditionFailedError: - return true - } - return false -} - -// UnixMilliseconds returns t as a Unix time, the number of milliseconds elapsed since January 1, 1970 UTC. -// It should be used for all CQL timestamp. -func UnixMilliseconds(t time.Time) int64 { - // Handling zero time separately because UnixNano is undefined for zero times. - if t.IsZero() { - return 0 - } - - unixNano := t.UnixNano() - if unixNano < 0 { - // Time is before January 1, 1970 UTC - return 0 - } - return unixNano / int64(time.Millisecond) -} - -// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string -func BuildHistoryGarbageCleanupInfo(namespaceID, workflowID, runID string) string { - return fmt.Sprintf("%v:%v:%v", namespaceID, workflowID, runID) -} - -// SplitHistoryGarbageCleanupInfo returns workflow identity information -func SplitHistoryGarbageCleanupInfo(info string) (namespaceID, workflowID, runID string, err error) { - ss := strings.Split(info, ":") - // workflowID can contain ":" so len(ss) can be greater than 3 - if len(ss) < numItemsInGarbageInfo { - return "", "", "", fmt.Errorf("not able to split info for %s", info) - } - namespaceID = ss[0] - runID = ss[len(ss)-1] - workflowEnd := len(info) - len(runID) - 1 - workflowID = info[len(namespaceID)+1 : workflowEnd] - return -} - -type ServiceType int - -const ( - All ServiceType = iota - Frontend - History - Matching - Worker - InternalFrontend -) diff -Nru temporal-1.21.5-1/src/common/persistence/dataInterfaces_mock.go temporal-1.22.5/src/common/persistence/dataInterfaces_mock.go --- temporal-1.21.5-1/src/common/persistence/dataInterfaces_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/dataInterfaces_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1291 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: dataInterfaces.go - -// Package persistence is a generated GoMock package. -package persistence - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockCloseable is a mock of Closeable interface. -type MockCloseable struct { - ctrl *gomock.Controller - recorder *MockCloseableMockRecorder -} - -// MockCloseableMockRecorder is the mock recorder for MockCloseable. -type MockCloseableMockRecorder struct { - mock *MockCloseable -} - -// NewMockCloseable creates a new mock instance. -func NewMockCloseable(ctrl *gomock.Controller) *MockCloseable { - mock := &MockCloseable{ctrl: ctrl} - mock.recorder = &MockCloseableMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCloseable) EXPECT() *MockCloseableMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockCloseable) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockCloseableMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCloseable)(nil).Close)) -} - -// MockShardManager is a mock of ShardManager interface. -type MockShardManager struct { - ctrl *gomock.Controller - recorder *MockShardManagerMockRecorder -} - -// MockShardManagerMockRecorder is the mock recorder for MockShardManager. -type MockShardManagerMockRecorder struct { - mock *MockShardManager -} - -// NewMockShardManager creates a new mock instance. -func NewMockShardManager(ctrl *gomock.Controller) *MockShardManager { - mock := &MockShardManager{ctrl: ctrl} - mock.recorder = &MockShardManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockShardManager) EXPECT() *MockShardManagerMockRecorder { - return m.recorder -} - -// AssertShardOwnership mocks base method. -func (m *MockShardManager) AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AssertShardOwnership", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// AssertShardOwnership indicates an expected call of AssertShardOwnership. -func (mr *MockShardManagerMockRecorder) AssertShardOwnership(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssertShardOwnership", reflect.TypeOf((*MockShardManager)(nil).AssertShardOwnership), ctx, request) -} - -// Close mocks base method. -func (m *MockShardManager) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockShardManagerMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShardManager)(nil).Close)) -} - -// GetName mocks base method. -func (m *MockShardManager) GetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetName indicates an expected call of GetName. -func (mr *MockShardManagerMockRecorder) GetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockShardManager)(nil).GetName)) -} - -// GetOrCreateShard mocks base method. -func (m *MockShardManager) GetOrCreateShard(ctx context.Context, request *GetOrCreateShardRequest) (*GetOrCreateShardResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOrCreateShard", ctx, request) - ret0, _ := ret[0].(*GetOrCreateShardResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetOrCreateShard indicates an expected call of GetOrCreateShard. -func (mr *MockShardManagerMockRecorder) GetOrCreateShard(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrCreateShard", reflect.TypeOf((*MockShardManager)(nil).GetOrCreateShard), ctx, request) -} - -// UpdateShard mocks base method. -func (m *MockShardManager) UpdateShard(ctx context.Context, request *UpdateShardRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateShard", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateShard indicates an expected call of UpdateShard. -func (mr *MockShardManagerMockRecorder) UpdateShard(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateShard", reflect.TypeOf((*MockShardManager)(nil).UpdateShard), ctx, request) -} - -// MockExecutionManager is a mock of ExecutionManager interface. -type MockExecutionManager struct { - ctrl *gomock.Controller - recorder *MockExecutionManagerMockRecorder -} - -// MockExecutionManagerMockRecorder is the mock recorder for MockExecutionManager. -type MockExecutionManagerMockRecorder struct { - mock *MockExecutionManager -} - -// NewMockExecutionManager creates a new mock instance. -func NewMockExecutionManager(ctrl *gomock.Controller) *MockExecutionManager { - mock := &MockExecutionManager{ctrl: ctrl} - mock.recorder = &MockExecutionManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockExecutionManager) EXPECT() *MockExecutionManagerMockRecorder { - return m.recorder -} - -// AddHistoryTasks mocks base method. -func (m *MockExecutionManager) AddHistoryTasks(ctx context.Context, request *AddHistoryTasksRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddHistoryTasks", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddHistoryTasks indicates an expected call of AddHistoryTasks. -func (mr *MockExecutionManagerMockRecorder) AddHistoryTasks(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).AddHistoryTasks), ctx, request) -} - -// AppendHistoryNodes mocks base method. -func (m *MockExecutionManager) AppendHistoryNodes(ctx context.Context, request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppendHistoryNodes", ctx, request) - ret0, _ := ret[0].(*AppendHistoryNodesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AppendHistoryNodes indicates an expected call of AppendHistoryNodes. -func (mr *MockExecutionManagerMockRecorder) AppendHistoryNodes(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendHistoryNodes", reflect.TypeOf((*MockExecutionManager)(nil).AppendHistoryNodes), ctx, request) -} - -// AppendRawHistoryNodes mocks base method. -func (m *MockExecutionManager) AppendRawHistoryNodes(ctx context.Context, request *AppendRawHistoryNodesRequest) (*AppendHistoryNodesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppendRawHistoryNodes", ctx, request) - ret0, _ := ret[0].(*AppendHistoryNodesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AppendRawHistoryNodes indicates an expected call of AppendRawHistoryNodes. -func (mr *MockExecutionManagerMockRecorder) AppendRawHistoryNodes(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendRawHistoryNodes", reflect.TypeOf((*MockExecutionManager)(nil).AppendRawHistoryNodes), ctx, request) -} - -// Close mocks base method. -func (m *MockExecutionManager) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockExecutionManagerMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockExecutionManager)(nil).Close)) -} - -// CompleteHistoryTask mocks base method. -func (m *MockExecutionManager) CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteHistoryTask", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// CompleteHistoryTask indicates an expected call of CompleteHistoryTask. -func (mr *MockExecutionManagerMockRecorder) CompleteHistoryTask(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteHistoryTask", reflect.TypeOf((*MockExecutionManager)(nil).CompleteHistoryTask), ctx, request) -} - -// ConflictResolveWorkflowExecution mocks base method. -func (m *MockExecutionManager) ConflictResolveWorkflowExecution(ctx context.Context, request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ConflictResolveWorkflowExecution", ctx, request) - ret0, _ := ret[0].(*ConflictResolveWorkflowExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ConflictResolveWorkflowExecution indicates an expected call of ConflictResolveWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) ConflictResolveWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConflictResolveWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).ConflictResolveWorkflowExecution), ctx, request) -} - -// CreateWorkflowExecution mocks base method. -func (m *MockExecutionManager) CreateWorkflowExecution(ctx context.Context, request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateWorkflowExecution", ctx, request) - ret0, _ := ret[0].(*CreateWorkflowExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateWorkflowExecution indicates an expected call of CreateWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) CreateWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).CreateWorkflowExecution), ctx, request) -} - -// DeleteCurrentWorkflowExecution mocks base method. -func (m *MockExecutionManager) DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteCurrentWorkflowExecution", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteCurrentWorkflowExecution indicates an expected call of DeleteCurrentWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) DeleteCurrentWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).DeleteCurrentWorkflowExecution), ctx, request) -} - -// DeleteHistoryBranch mocks base method. -func (m *MockExecutionManager) DeleteHistoryBranch(ctx context.Context, request *DeleteHistoryBranchRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteHistoryBranch", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteHistoryBranch indicates an expected call of DeleteHistoryBranch. -func (mr *MockExecutionManagerMockRecorder) DeleteHistoryBranch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).DeleteHistoryBranch), ctx, request) -} - -// DeleteReplicationTaskFromDLQ mocks base method. -func (m *MockExecutionManager) DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteReplicationTaskFromDLQ", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteReplicationTaskFromDLQ indicates an expected call of DeleteReplicationTaskFromDLQ. -func (mr *MockExecutionManagerMockRecorder) DeleteReplicationTaskFromDLQ(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).DeleteReplicationTaskFromDLQ), ctx, request) -} - -// DeleteWorkflowExecution mocks base method. -func (m *MockExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteWorkflowExecution", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) DeleteWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).DeleteWorkflowExecution), ctx, request) -} - -// ForkHistoryBranch mocks base method. -func (m *MockExecutionManager) ForkHistoryBranch(ctx context.Context, request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ForkHistoryBranch", ctx, request) - ret0, _ := ret[0].(*ForkHistoryBranchResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ForkHistoryBranch indicates an expected call of ForkHistoryBranch. -func (mr *MockExecutionManagerMockRecorder) ForkHistoryBranch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForkHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ForkHistoryBranch), ctx, request) -} - -// GetAllHistoryTreeBranches mocks base method. -func (m *MockExecutionManager) GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllHistoryTreeBranches", ctx, request) - ret0, _ := ret[0].(*GetAllHistoryTreeBranchesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllHistoryTreeBranches indicates an expected call of GetAllHistoryTreeBranches. -func (mr *MockExecutionManagerMockRecorder) GetAllHistoryTreeBranches(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllHistoryTreeBranches", reflect.TypeOf((*MockExecutionManager)(nil).GetAllHistoryTreeBranches), ctx, request) -} - -// GetCurrentExecution mocks base method. -func (m *MockExecutionManager) GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentExecution", ctx, request) - ret0, _ := ret[0].(*GetCurrentExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentExecution indicates an expected call of GetCurrentExecution. -func (mr *MockExecutionManagerMockRecorder) GetCurrentExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentExecution", reflect.TypeOf((*MockExecutionManager)(nil).GetCurrentExecution), ctx, request) -} - -// GetHistoryBranchUtil mocks base method. -func (m *MockExecutionManager) GetHistoryBranchUtil() HistoryBranchUtil { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryBranchUtil") - ret0, _ := ret[0].(HistoryBranchUtil) - return ret0 -} - -// GetHistoryBranchUtil indicates an expected call of GetHistoryBranchUtil. -func (mr *MockExecutionManagerMockRecorder) GetHistoryBranchUtil() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryBranchUtil", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryBranchUtil)) -} - -// GetHistoryTasks mocks base method. -func (m *MockExecutionManager) GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryTasks", ctx, request) - ret0, _ := ret[0].(*GetHistoryTasksResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHistoryTasks indicates an expected call of GetHistoryTasks. -func (mr *MockExecutionManagerMockRecorder) GetHistoryTasks(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryTasks), ctx, request) -} - -// GetHistoryTree mocks base method. -func (m *MockExecutionManager) GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryTree", ctx, request) - ret0, _ := ret[0].(*GetHistoryTreeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHistoryTree indicates an expected call of GetHistoryTree. -func (mr *MockExecutionManagerMockRecorder) GetHistoryTree(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryTree", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryTree), ctx, request) -} - -// GetName mocks base method. -func (m *MockExecutionManager) GetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetName indicates an expected call of GetName. -func (mr *MockExecutionManagerMockRecorder) GetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockExecutionManager)(nil).GetName)) -} - -// GetReplicationTasksFromDLQ mocks base method. -func (m *MockExecutionManager) GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*GetHistoryTasksResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReplicationTasksFromDLQ", ctx, request) - ret0, _ := ret[0].(*GetHistoryTasksResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetReplicationTasksFromDLQ indicates an expected call of GetReplicationTasksFromDLQ. -func (mr *MockExecutionManagerMockRecorder) GetReplicationTasksFromDLQ(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationTasksFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).GetReplicationTasksFromDLQ), ctx, request) -} - -// GetWorkflowExecution mocks base method. -func (m *MockExecutionManager) GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWorkflowExecution", ctx, request) - ret0, _ := ret[0].(*GetWorkflowExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWorkflowExecution indicates an expected call of GetWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) GetWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).GetWorkflowExecution), ctx, request) -} - -// IsReplicationDLQEmpty mocks base method. -func (m *MockExecutionManager) IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsReplicationDLQEmpty", ctx, request) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsReplicationDLQEmpty indicates an expected call of IsReplicationDLQEmpty. -func (mr *MockExecutionManagerMockRecorder) IsReplicationDLQEmpty(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsReplicationDLQEmpty", reflect.TypeOf((*MockExecutionManager)(nil).IsReplicationDLQEmpty), ctx, request) -} - -// ListConcreteExecutions mocks base method. -func (m *MockExecutionManager) ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListConcreteExecutions", ctx, request) - ret0, _ := ret[0].(*ListConcreteExecutionsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListConcreteExecutions indicates an expected call of ListConcreteExecutions. -func (mr *MockExecutionManagerMockRecorder) ListConcreteExecutions(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConcreteExecutions", reflect.TypeOf((*MockExecutionManager)(nil).ListConcreteExecutions), ctx, request) -} - -// PutReplicationTaskToDLQ mocks base method. -func (m *MockExecutionManager) PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutReplicationTaskToDLQ", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// PutReplicationTaskToDLQ indicates an expected call of PutReplicationTaskToDLQ. -func (mr *MockExecutionManagerMockRecorder) PutReplicationTaskToDLQ(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutReplicationTaskToDLQ", reflect.TypeOf((*MockExecutionManager)(nil).PutReplicationTaskToDLQ), ctx, request) -} - -// RangeCompleteHistoryTasks mocks base method. -func (m *MockExecutionManager) RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RangeCompleteHistoryTasks", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// RangeCompleteHistoryTasks indicates an expected call of RangeCompleteHistoryTasks. -func (mr *MockExecutionManagerMockRecorder) RangeCompleteHistoryTasks(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeCompleteHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).RangeCompleteHistoryTasks), ctx, request) -} - -// RangeDeleteReplicationTaskFromDLQ mocks base method. -func (m *MockExecutionManager) RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RangeDeleteReplicationTaskFromDLQ", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// RangeDeleteReplicationTaskFromDLQ indicates an expected call of RangeDeleteReplicationTaskFromDLQ. -func (mr *MockExecutionManagerMockRecorder) RangeDeleteReplicationTaskFromDLQ(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeDeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).RangeDeleteReplicationTaskFromDLQ), ctx, request) -} - -// ReadHistoryBranch mocks base method. -func (m *MockExecutionManager) ReadHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadHistoryBranch", ctx, request) - ret0, _ := ret[0].(*ReadHistoryBranchResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadHistoryBranch indicates an expected call of ReadHistoryBranch. -func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranch), ctx, request) -} - -// ReadHistoryBranchByBatch mocks base method. -func (m *MockExecutionManager) ReadHistoryBranchByBatch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadHistoryBranchByBatch", ctx, request) - ret0, _ := ret[0].(*ReadHistoryBranchByBatchResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadHistoryBranchByBatch indicates an expected call of ReadHistoryBranchByBatch. -func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranchByBatch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranchByBatch", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranchByBatch), ctx, request) -} - -// ReadHistoryBranchReverse mocks base method. -func (m *MockExecutionManager) ReadHistoryBranchReverse(ctx context.Context, request *ReadHistoryBranchReverseRequest) (*ReadHistoryBranchReverseResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadHistoryBranchReverse", ctx, request) - ret0, _ := ret[0].(*ReadHistoryBranchReverseResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadHistoryBranchReverse indicates an expected call of ReadHistoryBranchReverse. -func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranchReverse(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranchReverse", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranchReverse), ctx, request) -} - -// ReadRawHistoryBranch mocks base method. -func (m *MockExecutionManager) ReadRawHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadRawHistoryBranch", ctx, request) - ret0, _ := ret[0].(*ReadRawHistoryBranchResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadRawHistoryBranch indicates an expected call of ReadRawHistoryBranch. -func (mr *MockExecutionManagerMockRecorder) ReadRawHistoryBranch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRawHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ReadRawHistoryBranch), ctx, request) -} - -// RegisterHistoryTaskReader mocks base method. -func (m *MockExecutionManager) RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterHistoryTaskReader", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// RegisterHistoryTaskReader indicates an expected call of RegisterHistoryTaskReader. -func (mr *MockExecutionManagerMockRecorder) RegisterHistoryTaskReader(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterHistoryTaskReader", reflect.TypeOf((*MockExecutionManager)(nil).RegisterHistoryTaskReader), ctx, request) -} - -// SetWorkflowExecution mocks base method. -func (m *MockExecutionManager) SetWorkflowExecution(ctx context.Context, request *SetWorkflowExecutionRequest) (*SetWorkflowExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWorkflowExecution", ctx, request) - ret0, _ := ret[0].(*SetWorkflowExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWorkflowExecution indicates an expected call of SetWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) SetWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).SetWorkflowExecution), ctx, request) -} - -// TrimHistoryBranch mocks base method. -func (m *MockExecutionManager) TrimHistoryBranch(ctx context.Context, request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TrimHistoryBranch", ctx, request) - ret0, _ := ret[0].(*TrimHistoryBranchResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TrimHistoryBranch indicates an expected call of TrimHistoryBranch. -func (mr *MockExecutionManagerMockRecorder) TrimHistoryBranch(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrimHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).TrimHistoryBranch), ctx, request) -} - -// UnregisterHistoryTaskReader mocks base method. -func (m *MockExecutionManager) UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "UnregisterHistoryTaskReader", ctx, request) -} - -// UnregisterHistoryTaskReader indicates an expected call of UnregisterHistoryTaskReader. -func (mr *MockExecutionManagerMockRecorder) UnregisterHistoryTaskReader(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnregisterHistoryTaskReader", reflect.TypeOf((*MockExecutionManager)(nil).UnregisterHistoryTaskReader), ctx, request) -} - -// UpdateHistoryTaskReaderProgress mocks base method. -func (m *MockExecutionManager) UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "UpdateHistoryTaskReaderProgress", ctx, request) -} - -// UpdateHistoryTaskReaderProgress indicates an expected call of UpdateHistoryTaskReaderProgress. -func (mr *MockExecutionManagerMockRecorder) UpdateHistoryTaskReaderProgress(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHistoryTaskReaderProgress", reflect.TypeOf((*MockExecutionManager)(nil).UpdateHistoryTaskReaderProgress), ctx, request) -} - -// UpdateWorkflowExecution mocks base method. -func (m *MockExecutionManager) UpdateWorkflowExecution(ctx context.Context, request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateWorkflowExecution", ctx, request) - ret0, _ := ret[0].(*UpdateWorkflowExecutionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateWorkflowExecution indicates an expected call of UpdateWorkflowExecution. -func (mr *MockExecutionManagerMockRecorder) UpdateWorkflowExecution(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).UpdateWorkflowExecution), ctx, request) -} - -// MockTaskManager is a mock of TaskManager interface. -type MockTaskManager struct { - ctrl *gomock.Controller - recorder *MockTaskManagerMockRecorder -} - -// MockTaskManagerMockRecorder is the mock recorder for MockTaskManager. -type MockTaskManagerMockRecorder struct { - mock *MockTaskManager -} - -// NewMockTaskManager creates a new mock instance. -func NewMockTaskManager(ctrl *gomock.Controller) *MockTaskManager { - mock := &MockTaskManager{ctrl: ctrl} - mock.recorder = &MockTaskManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockTaskManager) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockTaskManagerMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTaskManager)(nil).Close)) -} - -// CompleteTask mocks base method. -func (m *MockTaskManager) CompleteTask(ctx context.Context, request *CompleteTaskRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteTask", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// CompleteTask indicates an expected call of CompleteTask. -func (mr *MockTaskManagerMockRecorder) CompleteTask(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteTask", reflect.TypeOf((*MockTaskManager)(nil).CompleteTask), ctx, request) -} - -// CompleteTasksLessThan mocks base method. -func (m *MockTaskManager) CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteTasksLessThan", ctx, request) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteTasksLessThan indicates an expected call of CompleteTasksLessThan. -func (mr *MockTaskManagerMockRecorder) CompleteTasksLessThan(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteTasksLessThan", reflect.TypeOf((*MockTaskManager)(nil).CompleteTasksLessThan), ctx, request) -} - -// CountTaskQueuesByBuildId mocks base method. -func (m *MockTaskManager) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CountTaskQueuesByBuildId", ctx, request) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CountTaskQueuesByBuildId indicates an expected call of CountTaskQueuesByBuildId. -func (mr *MockTaskManagerMockRecorder) CountTaskQueuesByBuildId(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountTaskQueuesByBuildId", reflect.TypeOf((*MockTaskManager)(nil).CountTaskQueuesByBuildId), ctx, request) -} - -// CreateTaskQueue mocks base method. -func (m *MockTaskManager) CreateTaskQueue(ctx context.Context, request *CreateTaskQueueRequest) (*CreateTaskQueueResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateTaskQueue", ctx, request) - ret0, _ := ret[0].(*CreateTaskQueueResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateTaskQueue indicates an expected call of CreateTaskQueue. -func (mr *MockTaskManagerMockRecorder) CreateTaskQueue(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).CreateTaskQueue), ctx, request) -} - -// CreateTasks mocks base method. -func (m *MockTaskManager) CreateTasks(ctx context.Context, request *CreateTasksRequest) (*CreateTasksResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateTasks", ctx, request) - ret0, _ := ret[0].(*CreateTasksResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateTasks indicates an expected call of CreateTasks. -func (mr *MockTaskManagerMockRecorder) CreateTasks(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTasks", reflect.TypeOf((*MockTaskManager)(nil).CreateTasks), ctx, request) -} - -// DeleteTaskQueue mocks base method. -func (m *MockTaskManager) DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteTaskQueue", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteTaskQueue indicates an expected call of DeleteTaskQueue. -func (mr *MockTaskManagerMockRecorder) DeleteTaskQueue(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).DeleteTaskQueue), ctx, request) -} - -// GetName mocks base method. -func (m *MockTaskManager) GetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetName indicates an expected call of GetName. -func (mr *MockTaskManagerMockRecorder) GetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockTaskManager)(nil).GetName)) -} - -// GetTaskQueue mocks base method. -func (m *MockTaskManager) GetTaskQueue(ctx context.Context, request *GetTaskQueueRequest) (*GetTaskQueueResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTaskQueue", ctx, request) - ret0, _ := ret[0].(*GetTaskQueueResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTaskQueue indicates an expected call of GetTaskQueue. -func (mr *MockTaskManagerMockRecorder) GetTaskQueue(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueue), ctx, request) -} - -// GetTaskQueueUserData mocks base method. -func (m *MockTaskManager) GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTaskQueueUserData", ctx, request) - ret0, _ := ret[0].(*GetTaskQueueUserDataResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. -func (mr *MockTaskManagerMockRecorder) GetTaskQueueUserData(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueueUserData), ctx, request) -} - -// GetTaskQueuesByBuildId mocks base method. -func (m *MockTaskManager) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTaskQueuesByBuildId", ctx, request) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTaskQueuesByBuildId indicates an expected call of GetTaskQueuesByBuildId. -func (mr *MockTaskManagerMockRecorder) GetTaskQueuesByBuildId(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueuesByBuildId", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueuesByBuildId), ctx, request) -} - -// GetTasks mocks base method. -func (m *MockTaskManager) GetTasks(ctx context.Context, request *GetTasksRequest) (*GetTasksResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTasks", ctx, request) - ret0, _ := ret[0].(*GetTasksResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTasks indicates an expected call of GetTasks. -func (mr *MockTaskManagerMockRecorder) GetTasks(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTasks", reflect.TypeOf((*MockTaskManager)(nil).GetTasks), ctx, request) -} - -// ListTaskQueue mocks base method. -func (m *MockTaskManager) ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*ListTaskQueueResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListTaskQueue", ctx, request) - ret0, _ := ret[0].(*ListTaskQueueResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListTaskQueue indicates an expected call of ListTaskQueue. -func (mr *MockTaskManagerMockRecorder) ListTaskQueue(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).ListTaskQueue), ctx, request) -} - -// ListTaskQueueUserDataEntries mocks base method. -func (m *MockTaskManager) ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*ListTaskQueueUserDataEntriesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListTaskQueueUserDataEntries", ctx, request) - ret0, _ := ret[0].(*ListTaskQueueUserDataEntriesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListTaskQueueUserDataEntries indicates an expected call of ListTaskQueueUserDataEntries. -func (mr *MockTaskManagerMockRecorder) ListTaskQueueUserDataEntries(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueueUserDataEntries", reflect.TypeOf((*MockTaskManager)(nil).ListTaskQueueUserDataEntries), ctx, request) -} - -// UpdateTaskQueue mocks base method. -func (m *MockTaskManager) UpdateTaskQueue(ctx context.Context, request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateTaskQueue", ctx, request) - ret0, _ := ret[0].(*UpdateTaskQueueResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateTaskQueue indicates an expected call of UpdateTaskQueue. -func (mr *MockTaskManagerMockRecorder) UpdateTaskQueue(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).UpdateTaskQueue), ctx, request) -} - -// UpdateTaskQueueUserData mocks base method. -func (m *MockTaskManager) UpdateTaskQueueUserData(ctx context.Context, request *UpdateTaskQueueUserDataRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateTaskQueueUserData", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateTaskQueueUserData indicates an expected call of UpdateTaskQueueUserData. -func (mr *MockTaskManagerMockRecorder) UpdateTaskQueueUserData(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueUserData", reflect.TypeOf((*MockTaskManager)(nil).UpdateTaskQueueUserData), ctx, request) -} - -// MockMetadataManager is a mock of MetadataManager interface. -type MockMetadataManager struct { - ctrl *gomock.Controller - recorder *MockMetadataManagerMockRecorder -} - -// MockMetadataManagerMockRecorder is the mock recorder for MockMetadataManager. -type MockMetadataManagerMockRecorder struct { - mock *MockMetadataManager -} - -// NewMockMetadataManager creates a new mock instance. -func NewMockMetadataManager(ctrl *gomock.Controller) *MockMetadataManager { - mock := &MockMetadataManager{ctrl: ctrl} - mock.recorder = &MockMetadataManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockMetadataManager) EXPECT() *MockMetadataManagerMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockMetadataManager) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockMetadataManagerMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMetadataManager)(nil).Close)) -} - -// CreateNamespace mocks base method. -func (m *MockMetadataManager) CreateNamespace(ctx context.Context, request *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateNamespace", ctx, request) - ret0, _ := ret[0].(*CreateNamespaceResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateNamespace indicates an expected call of CreateNamespace. -func (mr *MockMetadataManagerMockRecorder) CreateNamespace(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespace", reflect.TypeOf((*MockMetadataManager)(nil).CreateNamespace), ctx, request) -} - -// DeleteNamespace mocks base method. -func (m *MockMetadataManager) DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteNamespace", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteNamespace indicates an expected call of DeleteNamespace. -func (mr *MockMetadataManagerMockRecorder) DeleteNamespace(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNamespace", reflect.TypeOf((*MockMetadataManager)(nil).DeleteNamespace), ctx, request) -} - -// DeleteNamespaceByName mocks base method. -func (m *MockMetadataManager) DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteNamespaceByName", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteNamespaceByName indicates an expected call of DeleteNamespaceByName. -func (mr *MockMetadataManagerMockRecorder) DeleteNamespaceByName(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNamespaceByName", reflect.TypeOf((*MockMetadataManager)(nil).DeleteNamespaceByName), ctx, request) -} - -// GetMetadata mocks base method. -func (m *MockMetadataManager) GetMetadata(ctx context.Context) (*GetMetadataResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetadata", ctx) - ret0, _ := ret[0].(*GetMetadataResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMetadata indicates an expected call of GetMetadata. -func (mr *MockMetadataManagerMockRecorder) GetMetadata(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockMetadataManager)(nil).GetMetadata), ctx) -} - -// GetName mocks base method. -func (m *MockMetadataManager) GetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetName indicates an expected call of GetName. -func (mr *MockMetadataManagerMockRecorder) GetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockMetadataManager)(nil).GetName)) -} - -// GetNamespace mocks base method. -func (m *MockMetadataManager) GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*GetNamespaceResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNamespace", ctx, request) - ret0, _ := ret[0].(*GetNamespaceResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNamespace indicates an expected call of GetNamespace. -func (mr *MockMetadataManagerMockRecorder) GetNamespace(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockMetadataManager)(nil).GetNamespace), ctx, request) -} - -// InitializeSystemNamespaces mocks base method. -func (m *MockMetadataManager) InitializeSystemNamespaces(ctx context.Context, currentClusterName string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InitializeSystemNamespaces", ctx, currentClusterName) - ret0, _ := ret[0].(error) - return ret0 -} - -// InitializeSystemNamespaces indicates an expected call of InitializeSystemNamespaces. -func (mr *MockMetadataManagerMockRecorder) InitializeSystemNamespaces(ctx, currentClusterName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeSystemNamespaces", reflect.TypeOf((*MockMetadataManager)(nil).InitializeSystemNamespaces), ctx, currentClusterName) -} - -// ListNamespaces mocks base method. -func (m *MockMetadataManager) ListNamespaces(ctx context.Context, request *ListNamespacesRequest) (*ListNamespacesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListNamespaces", ctx, request) - ret0, _ := ret[0].(*ListNamespacesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListNamespaces indicates an expected call of ListNamespaces. -func (mr *MockMetadataManagerMockRecorder) ListNamespaces(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNamespaces", reflect.TypeOf((*MockMetadataManager)(nil).ListNamespaces), ctx, request) -} - -// RenameNamespace mocks base method. -func (m *MockMetadataManager) RenameNamespace(ctx context.Context, request *RenameNamespaceRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RenameNamespace", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// RenameNamespace indicates an expected call of RenameNamespace. -func (mr *MockMetadataManagerMockRecorder) RenameNamespace(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenameNamespace", reflect.TypeOf((*MockMetadataManager)(nil).RenameNamespace), ctx, request) -} - -// UpdateNamespace mocks base method. -func (m *MockMetadataManager) UpdateNamespace(ctx context.Context, request *UpdateNamespaceRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateNamespace", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateNamespace indicates an expected call of UpdateNamespace. -func (mr *MockMetadataManagerMockRecorder) UpdateNamespace(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNamespace", reflect.TypeOf((*MockMetadataManager)(nil).UpdateNamespace), ctx, request) -} - -// MockClusterMetadataManager is a mock of ClusterMetadataManager interface. -type MockClusterMetadataManager struct { - ctrl *gomock.Controller - recorder *MockClusterMetadataManagerMockRecorder -} - -// MockClusterMetadataManagerMockRecorder is the mock recorder for MockClusterMetadataManager. -type MockClusterMetadataManagerMockRecorder struct { - mock *MockClusterMetadataManager -} - -// NewMockClusterMetadataManager creates a new mock instance. -func NewMockClusterMetadataManager(ctrl *gomock.Controller) *MockClusterMetadataManager { - mock := &MockClusterMetadataManager{ctrl: ctrl} - mock.recorder = &MockClusterMetadataManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClusterMetadataManager) EXPECT() *MockClusterMetadataManagerMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockClusterMetadataManager) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close. -func (mr *MockClusterMetadataManagerMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClusterMetadataManager)(nil).Close)) -} - -// DeleteClusterMetadata mocks base method. -func (m *MockClusterMetadataManager) DeleteClusterMetadata(ctx context.Context, request *DeleteClusterMetadataRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteClusterMetadata", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteClusterMetadata indicates an expected call of DeleteClusterMetadata. -func (mr *MockClusterMetadataManagerMockRecorder) DeleteClusterMetadata(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).DeleteClusterMetadata), ctx, request) -} - -// GetClusterMembers mocks base method. -func (m *MockClusterMetadataManager) GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClusterMembers", ctx, request) - ret0, _ := ret[0].(*GetClusterMembersResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetClusterMembers indicates an expected call of GetClusterMembers. -func (mr *MockClusterMetadataManagerMockRecorder) GetClusterMembers(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterMembers", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetClusterMembers), ctx, request) -} - -// GetClusterMetadata mocks base method. -func (m *MockClusterMetadataManager) GetClusterMetadata(ctx context.Context, request *GetClusterMetadataRequest) (*GetClusterMetadataResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClusterMetadata", ctx, request) - ret0, _ := ret[0].(*GetClusterMetadataResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetClusterMetadata indicates an expected call of GetClusterMetadata. -func (mr *MockClusterMetadataManagerMockRecorder) GetClusterMetadata(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetClusterMetadata), ctx, request) -} - -// GetCurrentClusterMetadata mocks base method. -func (m *MockClusterMetadataManager) GetCurrentClusterMetadata(ctx context.Context) (*GetClusterMetadataResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentClusterMetadata", ctx) - ret0, _ := ret[0].(*GetClusterMetadataResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentClusterMetadata indicates an expected call of GetCurrentClusterMetadata. -func (mr *MockClusterMetadataManagerMockRecorder) GetCurrentClusterMetadata(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetCurrentClusterMetadata), ctx) -} - -// GetName mocks base method. -func (m *MockClusterMetadataManager) GetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetName indicates an expected call of GetName. -func (mr *MockClusterMetadataManagerMockRecorder) GetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetName)) -} - -// ListClusterMetadata mocks base method. -func (m *MockClusterMetadataManager) ListClusterMetadata(ctx context.Context, request *ListClusterMetadataRequest) (*ListClusterMetadataResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListClusterMetadata", ctx, request) - ret0, _ := ret[0].(*ListClusterMetadataResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListClusterMetadata indicates an expected call of ListClusterMetadata. -func (mr *MockClusterMetadataManagerMockRecorder) ListClusterMetadata(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).ListClusterMetadata), ctx, request) -} - -// PruneClusterMembership mocks base method. -func (m *MockClusterMetadataManager) PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneClusterMembership", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneClusterMembership indicates an expected call of PruneClusterMembership. -func (mr *MockClusterMetadataManagerMockRecorder) PruneClusterMembership(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneClusterMembership", reflect.TypeOf((*MockClusterMetadataManager)(nil).PruneClusterMembership), ctx, request) -} - -// SaveClusterMetadata mocks base method. -func (m *MockClusterMetadataManager) SaveClusterMetadata(ctx context.Context, request *SaveClusterMetadataRequest) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveClusterMetadata", ctx, request) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveClusterMetadata indicates an expected call of SaveClusterMetadata. -func (mr *MockClusterMetadataManagerMockRecorder) SaveClusterMetadata(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).SaveClusterMetadata), ctx, request) -} - -// UpsertClusterMembership mocks base method. -func (m *MockClusterMetadataManager) UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpsertClusterMembership", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpsertClusterMembership indicates an expected call of UpsertClusterMembership. -func (mr *MockClusterMetadataManagerMockRecorder) UpsertClusterMembership(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertClusterMembership", reflect.TypeOf((*MockClusterMetadataManager)(nil).UpsertClusterMembership), ctx, request) -} diff -Nru temporal-1.21.5-1/src/common/persistence/data_interfaces.go temporal-1.22.5/src/common/persistence/data_interfaces.go --- temporal-1.21.5-1/src/common/persistence/data_interfaces.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/data_interfaces.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1299 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination data_interfaces_mock.go + +package persistence + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/pborman/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/service/history/tasks" +) + +// CreateWorkflowMode workflow creation mode +type CreateWorkflowMode int + +// QueueType is an enum that represents various queue types in persistence +type QueueType int32 + +// Queue types used in queue table +// Use positive numbers for queue type +// Negative numbers are reserved for DLQ + +const ( + NamespaceReplicationQueueType QueueType = iota + 1 +) + +// Create Workflow Execution Mode +const ( + // CreateWorkflowModeBrandNew fail if current record exists + // Only applicable for CreateWorkflowExecution + CreateWorkflowModeBrandNew CreateWorkflowMode = iota + // CreateWorkflowModeUpdateCurrent update current record only if workflow is closed + // Only applicable for CreateWorkflowExecution + CreateWorkflowModeUpdateCurrent + // CreateWorkflowModeBypassCurrent do not update current record since workflow is in zombie state + // applicable for CreateWorkflowExecution, UpdateWorkflowExecution + CreateWorkflowModeBypassCurrent +) + +// UpdateWorkflowMode update mode +type UpdateWorkflowMode int + +// Update Workflow Execution Mode +const ( + // UpdateWorkflowModeUpdateCurrent update workflow, including current record + // NOTE: update on current record is a condition update + UpdateWorkflowModeUpdateCurrent UpdateWorkflowMode = iota + // UpdateWorkflowModeBypassCurrent update workflow, without current record + // NOTE: current record CANNOT point to the workflow to be updated + UpdateWorkflowModeBypassCurrent +) + +// ConflictResolveWorkflowMode conflict resolve mode +type ConflictResolveWorkflowMode int + +// Conflict Resolve Workflow Mode +const ( + // ConflictResolveWorkflowModeUpdateCurrent conflict resolve workflow, including current record + // NOTE: update on current record is a condition update + ConflictResolveWorkflowModeUpdateCurrent ConflictResolveWorkflowMode = iota + // ConflictResolveWorkflowModeBypassCurrent conflict resolve workflow, without current record + // NOTE: current record CANNOT point to the workflow to be updated + ConflictResolveWorkflowModeBypassCurrent +) + +// UnknownNumRowsAffected is returned when the number of rows that an API affected cannot be determined +const UnknownNumRowsAffected = -1 + +const ( + // InitialFailoverNotificationVersion is the initial failover version for a namespace + InitialFailoverNotificationVersion int64 = 0 +) + +const numItemsInGarbageInfo = 3 + +const ScheduledTaskMinPrecision = time.Millisecond + +type ( + // InvalidPersistenceRequestError represents invalid request to persistence + InvalidPersistenceRequestError struct { + Msg string + } + + // AppendHistoryTimeoutError represents a failed insert to history tree / node request + AppendHistoryTimeoutError struct { + Msg string + } + + // CurrentWorkflowConditionFailedError represents a failed conditional update for current workflow record + CurrentWorkflowConditionFailedError struct { + Msg string + RequestID string + RunID string + State enumsspb.WorkflowExecutionState + Status enumspb.WorkflowExecutionStatus + LastWriteVersion int64 + } + + // WorkflowConditionFailedError represents a failed conditional update for workflow record + WorkflowConditionFailedError struct { + Msg string + NextEventID int64 + DBRecordVersion int64 + } + + // ConditionFailedError represents a failed conditional update for execution record + ConditionFailedError struct { + Msg string + } + + // ShardAlreadyExistError is returned when conditionally creating a shard fails + ShardAlreadyExistError struct { + Msg string + } + + // ShardOwnershipLostError is returned when conditional update fails due to RangeID for the shard + ShardOwnershipLostError struct { + ShardID int32 + Msg string + } + + // TimeoutError is returned when a write operation fails due to a timeout + TimeoutError struct { + Msg string + } + + // TransactionSizeLimitError is returned when the transaction size is too large + TransactionSizeLimitError struct { + Msg string + } + + // TaskQueueKey is the struct used to identity TaskQueues + TaskQueueKey struct { + NamespaceID string + TaskQueueName string + TaskQueueType enumspb.TaskQueueType + } + + // GetOrCreateShardRequest is used to get shard information, or supply + // initial information to create a shard in executions table + GetOrCreateShardRequest struct { + ShardID int32 + InitialShardInfo *persistencespb.ShardInfo // optional, zero value will be used if missing + LifecycleContext context.Context // cancelled when shard is unloaded + } + + // GetOrCreateShardResponse is the response to GetOrCreateShard + GetOrCreateShardResponse struct { + ShardInfo *persistencespb.ShardInfo + } + + // UpdateShardRequest is used to update shard information + UpdateShardRequest struct { + ShardInfo *persistencespb.ShardInfo + PreviousRangeID int64 + } + + // AssertShardOwnershipRequest is used to assert shard ownership + AssertShardOwnershipRequest struct { + ShardID int32 + RangeID int64 + } + + // AddHistoryTasksRequest is used to write new tasks + AddHistoryTasksRequest struct { + ShardID int32 + RangeID int64 + + NamespaceID string + WorkflowID string + RunID string + + Tasks map[tasks.Category][]tasks.Task + } + + // CreateWorkflowExecutionRequest is used to write a new workflow execution + CreateWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode CreateWorkflowMode + + PreviousRunID string + PreviousLastWriteVersion int64 + + NewWorkflowSnapshot WorkflowSnapshot + NewWorkflowEvents []*WorkflowEvents + } + + // CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest + CreateWorkflowExecutionResponse struct { + NewMutableStateStats MutableStateStatistics + } + + // UpdateWorkflowExecutionRequest is used to update a workflow execution + UpdateWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode UpdateWorkflowMode + + UpdateWorkflowMutation WorkflowMutation + UpdateWorkflowEvents []*WorkflowEvents + NewWorkflowSnapshot *WorkflowSnapshot + NewWorkflowEvents []*WorkflowEvents + } + + // UpdateWorkflowExecutionResponse is response for UpdateWorkflowExecutionRequest + UpdateWorkflowExecutionResponse struct { + UpdateMutableStateStats MutableStateStatistics + NewMutableStateStats *MutableStateStatistics + } + + // ConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for a single run + ConflictResolveWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode ConflictResolveWorkflowMode + + // workflow to be resetted + ResetWorkflowSnapshot WorkflowSnapshot + ResetWorkflowEvents []*WorkflowEvents + + // maybe new workflow + NewWorkflowSnapshot *WorkflowSnapshot + NewWorkflowEvents []*WorkflowEvents + + // current workflow + CurrentWorkflowMutation *WorkflowMutation + CurrentWorkflowEvents []*WorkflowEvents + } + + ConflictResolveWorkflowExecutionResponse struct { + ResetMutableStateStats MutableStateStatistics + NewMutableStateStats *MutableStateStatistics + CurrentMutableStateStats *MutableStateStatistics + } + + // GetCurrentExecutionRequest is used to retrieve the current RunId for an execution + GetCurrentExecutionRequest struct { + ShardID int32 + NamespaceID string + WorkflowID string + } + + // GetCurrentExecutionResponse is the response to GetCurrentExecution + GetCurrentExecutionResponse struct { + StartRequestID string + RunID string + State enumsspb.WorkflowExecutionState + Status enumspb.WorkflowExecutionStatus + } + + // GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution + GetWorkflowExecutionRequest struct { + ShardID int32 + NamespaceID string + WorkflowID string + RunID string + } + + // GetWorkflowExecutionResponse is the response to GetWorkflowExecutionRequest + GetWorkflowExecutionResponse struct { + State *persistencespb.WorkflowMutableState + DBRecordVersion int64 + MutableStateStats MutableStateStatistics + } + + // SetWorkflowExecutionRequest is used to overwrite the info of a workflow execution + SetWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + SetWorkflowSnapshot WorkflowSnapshot + } + + // SetWorkflowExecutionResponse is the response to SetWorkflowExecutionRequest + SetWorkflowExecutionResponse struct { + } + + // ListConcreteExecutionsRequest is request to ListConcreteExecutions + ListConcreteExecutionsRequest struct { + ShardID int32 + PageSize int + PageToken []byte + } + + // ListConcreteExecutionsResponse is response to ListConcreteExecutions + ListConcreteExecutionsResponse struct { + States []*persistencespb.WorkflowMutableState + PageToken []byte + } + + // WorkflowEvents is used as generic workflow history events transaction container + WorkflowEvents struct { + NamespaceID string + WorkflowID string + RunID string + BranchToken []byte + PrevTxnID int64 + TxnID int64 + Events []*historypb.HistoryEvent + } + + // WorkflowMutation is used as generic workflow execution state mutation + WorkflowMutation struct { + ExecutionInfo *persistencespb.WorkflowExecutionInfo + ExecutionState *persistencespb.WorkflowExecutionState + // TODO deprecate NextEventID in favor of DBRecordVersion + NextEventID int64 + + UpsertActivityInfos map[int64]*persistencespb.ActivityInfo + DeleteActivityInfos map[int64]struct{} + UpsertTimerInfos map[string]*persistencespb.TimerInfo + DeleteTimerInfos map[string]struct{} + UpsertChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo + DeleteChildExecutionInfos map[int64]struct{} + UpsertRequestCancelInfos map[int64]*persistencespb.RequestCancelInfo + DeleteRequestCancelInfos map[int64]struct{} + UpsertSignalInfos map[int64]*persistencespb.SignalInfo + DeleteSignalInfos map[int64]struct{} + UpsertSignalRequestedIDs map[string]struct{} + DeleteSignalRequestedIDs map[string]struct{} + NewBufferedEvents []*historypb.HistoryEvent + ClearBufferedEvents bool + + Tasks map[tasks.Category][]tasks.Task + + // TODO deprecate Condition in favor of DBRecordVersion + Condition int64 + DBRecordVersion int64 + Checksum *persistencespb.Checksum + } + + // WorkflowSnapshot is used as generic workflow execution state snapshot + WorkflowSnapshot struct { + ExecutionInfo *persistencespb.WorkflowExecutionInfo + ExecutionState *persistencespb.WorkflowExecutionState + // TODO deprecate NextEventID in favor of DBRecordVersion + NextEventID int64 + + ActivityInfos map[int64]*persistencespb.ActivityInfo + TimerInfos map[string]*persistencespb.TimerInfo + ChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo + RequestCancelInfos map[int64]*persistencespb.RequestCancelInfo + SignalInfos map[int64]*persistencespb.SignalInfo + SignalRequestedIDs map[string]struct{} + + Tasks map[tasks.Category][]tasks.Task + + // TODO deprecate Condition in favor of DBRecordVersion + Condition int64 + DBRecordVersion int64 + Checksum *persistencespb.Checksum + } + + // DeleteWorkflowExecutionRequest is used to delete a workflow execution + DeleteWorkflowExecutionRequest struct { + ShardID int32 + NamespaceID string + WorkflowID string + RunID string + } + + // DeleteCurrentWorkflowExecutionRequest is used to delete the current workflow execution + DeleteCurrentWorkflowExecutionRequest struct { + ShardID int32 + NamespaceID string + WorkflowID string + RunID string + } + + // RegisterHistoryTaskReaderRequest is a hint for underlying persistence implementation + // that a new queue reader is created by queue processing logic + RegisterHistoryTaskReaderRequest struct { + ShardID int32 + ShardOwner string + TaskCategory tasks.Category + ReaderID int64 + } + + // UnregisterHistoryTaskReaderRequest is a hint for underlying persistence implementation + // that queue processing logic is done using an existing queue reader + UnregisterHistoryTaskReaderRequest RegisterHistoryTaskReaderRequest + + // UpdateHistoryTaskReaderProgressRequest is a hint for underlying persistence implementation + // that a certain queue reader's process and the fact that it won't try to load tasks with + // key less than InclusiveMinPendingTaskKey + UpdateHistoryTaskReaderProgressRequest struct { + ShardID int32 + ShardOwner string + TaskCategory tasks.Category + ReaderID int64 + InclusiveMinPendingTaskKey tasks.Key + } + + // GetHistoryTasksRequest is used to get a range of history tasks + // Either max TaskID or FireTime is required depending on the + // task category type. Min TaskID or FireTime is optional. + GetHistoryTasksRequest struct { + ShardID int32 + TaskCategory tasks.Category + ReaderID int64 + InclusiveMinTaskKey tasks.Key + ExclusiveMaxTaskKey tasks.Key + BatchSize int + NextPageToken []byte + } + + // GetHistoryTasksResponse is the response for GetHistoryTasks + GetHistoryTasksResponse struct { + Tasks []tasks.Task + NextPageToken []byte + } + + // CompleteHistoryTaskRequest delete one history task + CompleteHistoryTaskRequest struct { + ShardID int32 + TaskCategory tasks.Category + TaskKey tasks.Key + } + + // RangeCompleteHistoryTasksRequest deletes a range of history tasks + // Either max TaskID or FireTime is required depending on the + // task category type. Min TaskID or FireTime is optional. + RangeCompleteHistoryTasksRequest struct { + ShardID int32 + TaskCategory tasks.Category + InclusiveMinTaskKey tasks.Key + ExclusiveMaxTaskKey tasks.Key + } + + // GetReplicationTasksRequest is used to read tasks from the replication task queue + GetReplicationTasksRequest struct { + ShardID int32 + MinTaskID int64 + MaxTaskID int64 + BatchSize int + NextPageToken []byte + } + + // PutReplicationTaskToDLQRequest is used to put a replication task to dlq + PutReplicationTaskToDLQRequest struct { + ShardID int32 + SourceClusterName string + TaskInfo *persistencespb.ReplicationTaskInfo + } + + // GetReplicationTasksFromDLQRequest is used to get replication tasks from dlq + GetReplicationTasksFromDLQRequest struct { + GetHistoryTasksRequest + + SourceClusterName string + } + + // DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ + DeleteReplicationTaskFromDLQRequest struct { + CompleteHistoryTaskRequest + + SourceClusterName string + } + + // RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ + RangeDeleteReplicationTaskFromDLQRequest struct { + RangeCompleteHistoryTasksRequest + + SourceClusterName string + } + + // CreateTaskQueueRequest create a new task queue + CreateTaskQueueRequest struct { + RangeID int64 + TaskQueueInfo *persistencespb.TaskQueueInfo + } + + // CreateTaskQueueResponse is the response to CreateTaskQueue + CreateTaskQueueResponse struct { + } + + // UpdateTaskQueueRequest is used to update task queue implementation information + UpdateTaskQueueRequest struct { + RangeID int64 + TaskQueueInfo *persistencespb.TaskQueueInfo + + PrevRangeID int64 + } + + // UpdateTaskQueueResponse is the response to UpdateTaskQueue + UpdateTaskQueueResponse struct { + } + + // GetTaskQueueRequest get the target task queue + GetTaskQueueRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + } + + // GetTaskQueueResponse is the response to GetTaskQueue + GetTaskQueueResponse struct { + RangeID int64 + TaskQueueInfo *persistencespb.TaskQueueInfo + } + + // GetTaskQueueUserDataRequest is the input type for the GetTaskQueueUserData API + GetTaskQueueUserDataRequest struct { + NamespaceID string + TaskQueue string + } + + // GetTaskQueueUserDataResponse is the output type for the GetTaskQueueUserData API + GetTaskQueueUserDataResponse struct { + UserData *persistencespb.VersionedTaskQueueUserData + } + + // UpdateTaskQueueUserDataRequest is the input type for the UpdateTaskQueueUserData API + UpdateTaskQueueUserDataRequest struct { + NamespaceID string + TaskQueue string + UserData *persistencespb.VersionedTaskQueueUserData + BuildIdsAdded []string + BuildIdsRemoved []string + } + + ListTaskQueueUserDataEntriesRequest struct { + NamespaceID string + PageSize int + NextPageToken []byte + } + + TaskQueueUserDataEntry struct { + TaskQueue string + UserData *persistencespb.VersionedTaskQueueUserData + } + + ListTaskQueueUserDataEntriesResponse struct { + NextPageToken []byte + Entries []*TaskQueueUserDataEntry + } + + GetTaskQueuesByBuildIdRequest struct { + NamespaceID string + BuildID string + } + + CountTaskQueuesByBuildIdRequest struct { + NamespaceID string + BuildID string + } + + // ListTaskQueueRequest contains the request params needed to invoke ListTaskQueue API + ListTaskQueueRequest struct { + PageSize int + PageToken []byte + } + + // ListTaskQueueResponse is the response from ListTaskQueue API + ListTaskQueueResponse struct { + Items []*PersistedTaskQueueInfo + NextPageToken []byte + } + + // DeleteTaskQueueRequest contains the request params needed to invoke DeleteTaskQueue API + DeleteTaskQueueRequest struct { + TaskQueue *TaskQueueKey + RangeID int64 + } + + // CreateTasksRequest is used to create a new task for a workflow execution + CreateTasksRequest struct { + TaskQueueInfo *PersistedTaskQueueInfo + Tasks []*persistencespb.AllocatedTaskInfo + } + + // CreateTasksResponse is the response to CreateTasksRequest + CreateTasksResponse struct { + } + + PersistedTaskQueueInfo struct { + Data *persistencespb.TaskQueueInfo + RangeID int64 + } + + // GetTasksRequest is used to retrieve tasks of a task queue + GetTasksRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + InclusiveMinTaskID int64 + ExclusiveMaxTaskID int64 + PageSize int + NextPageToken []byte + } + + // GetTasksResponse is the response to GetTasksRequests + GetTasksResponse struct { + Tasks []*persistencespb.AllocatedTaskInfo + NextPageToken []byte + } + + // CompleteTaskRequest is used to complete a task + CompleteTaskRequest struct { + TaskQueue *TaskQueueKey + TaskID int64 + } + + // CompleteTasksLessThanRequest contains the request params needed to invoke CompleteTasksLessThan API + CompleteTasksLessThanRequest struct { + NamespaceID string + TaskQueueName string + TaskType enumspb.TaskQueueType + ExclusiveMaxTaskID int64 // Tasks less than this ID will be completed + Limit int // Limit on the max number of tasks that can be completed. Required param + } + + // CreateNamespaceRequest is used to create the namespace + CreateNamespaceRequest struct { + Namespace *persistencespb.NamespaceDetail + IsGlobalNamespace bool + } + + // CreateNamespaceResponse is the response for CreateNamespace + CreateNamespaceResponse struct { + ID string + } + + // GetNamespaceRequest is used to read namespace + GetNamespaceRequest struct { + ID string + Name string + } + + // GetNamespaceResponse is the response for GetNamespace + GetNamespaceResponse struct { + Namespace *persistencespb.NamespaceDetail + IsGlobalNamespace bool + NotificationVersion int64 + } + + // UpdateNamespaceRequest is used to update namespace + UpdateNamespaceRequest struct { + Namespace *persistencespb.NamespaceDetail + IsGlobalNamespace bool + NotificationVersion int64 + } + + // RenameNamespaceRequest is used to rename namespace. + RenameNamespaceRequest struct { + PreviousName string + NewName string + } + + // DeleteNamespaceRequest is used to delete namespace entry from namespaces table + DeleteNamespaceRequest struct { + ID string + } + + // DeleteNamespaceByNameRequest is used to delete namespace entry from namespaces_by_name table + DeleteNamespaceByNameRequest struct { + Name string + } + + // ListNamespacesRequest is used to list namespaces + ListNamespacesRequest struct { + PageSize int + NextPageToken []byte + IncludeDeleted bool + } + + // ListNamespacesResponse is the response for GetNamespace + ListNamespacesResponse struct { + Namespaces []*GetNamespaceResponse + NextPageToken []byte + } + + // GetMetadataResponse is the response for GetMetadata + GetMetadataResponse struct { + NotificationVersion int64 + } + + // MutableStateStatistics is the size stats for MutableState + MutableStateStatistics struct { + TotalSize int + HistoryStatistics *HistoryStatistics + + // Breakdown of size into more granular stats + ExecutionInfoSize int + ExecutionStateSize int + + ActivityInfoSize int + TimerInfoSize int + ChildInfoSize int + RequestCancelInfoSize int + SignalInfoSize int + SignalRequestIDSize int + BufferedEventsSize int + // UpdateInfoSize is included in ExecutionInfoSize + + // Item count for various information captured within mutable state + ActivityInfoCount int + TimerInfoCount int + ChildInfoCount int + RequestCancelInfoCount int + SignalInfoCount int + SignalRequestIDCount int + BufferedEventsCount int + TaskCountByCategory map[string]int + UpdateInfoCount int + + // Total item count for various information captured within mutable state + TotalActivityCount int64 + TotalUserTimerCount int64 + TotalChildExecutionCount int64 + TotalRequestCancelExternalCount int64 + TotalSignalExternalCount int64 + TotalSignalCount int64 + TotalUpdateCount int64 + } + + HistoryStatistics struct { + SizeDiff int + CountDiff int + } + + // AppendHistoryNodesRequest is used to append a batch of history nodes + AppendHistoryNodesRequest struct { + // The shard to get history node data + ShardID int32 + // true if this is the first append request to the branch + IsNewBranch bool + // the info for clean up data in background + Info string + // The branch to be appended + BranchToken []byte + // The batch of events to be appended. The first eventID will become the nodeID of this batch + Events []*historypb.HistoryEvent + // TransactionID for events before these events. For events chaining + PrevTransactionID int64 + // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins + TransactionID int64 + } + + // AppendHistoryNodesResponse is a response to AppendHistoryNodesRequest + AppendHistoryNodesResponse struct { + // the size of the event data that has been appended + Size int + } + + // AppendRawHistoryNodesRequest is used to append a batch of raw history nodes + AppendRawHistoryNodesRequest struct { + // The shard to get history node data + ShardID int32 + // true if this is the first append request to the branch + IsNewBranch bool + // the info for clean up data in background + Info string + // The branch to be appended + BranchToken []byte + // The batch of events to be appended. The first eventID will become the nodeID of this batch + History *commonpb.DataBlob + // TransactionID for events before these events. For events chaining + PrevTransactionID int64 + // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins + TransactionID int64 + // NodeID is the first event id. + NodeID int64 + } + + // ReadHistoryBranchRequest is used to read a history branch + ReadHistoryBranchRequest struct { + // The shard to get history branch data + ShardID int32 + // The branch to be read + BranchToken []byte + // Get the history nodes from MinEventID. Inclusive. + MinEventID int64 + // Get the history nodes upto MaxEventID. Exclusive. + MaxEventID int64 + // Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page. + // However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events. + PageSize int + // Token to continue reading next page of history append transactions. Pass in empty slice for first page + NextPageToken []byte + } + + // ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest + ReadHistoryBranchResponse struct { + // History events + HistoryEvents []*historypb.HistoryEvent + // Token to read next page if there are more events beyond page size. + // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. + // Empty means we have reached the last page, not need to continue + NextPageToken []byte + // Size of history read from store + Size int + } + + // ReadHistoryBranchRequest is used to read a history branch + ReadHistoryBranchReverseRequest struct { + // The shard to get history branch data + ShardID int32 + // The branch to be read + BranchToken []byte + // Get the history nodes upto MaxEventID. Exclusive. + MaxEventID int64 + // Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page. + // However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events. + PageSize int + // LastFirstTransactionID specified in mutable state. Only used for reading in reverse order. + LastFirstTransactionID int64 + // Token to continue reading next page of history append transactions. Pass in empty slice for first page + NextPageToken []byte + } + + // ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest + ReadHistoryBranchReverseResponse struct { + // History events + HistoryEvents []*historypb.HistoryEvent + // Token to read next page if there are more events beyond page size. + // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. + // Empty means we have reached the last page, not need to continue + NextPageToken []byte + // Size of history read from store + Size int + } + + // ReadHistoryBranchByBatchResponse is the response to ReadHistoryBranchRequest + ReadHistoryBranchByBatchResponse struct { + // History events by batch + History []*historypb.History + // TransactionID for relevant History batch + TransactionIDs []int64 + // Token to read next page if there are more events beyond page size. + // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. + // Empty means we have reached the last page, not need to continue + NextPageToken []byte + // Size of history read from store + Size int + } + + // ReadRawHistoryBranchResponse is the response to ReadHistoryBranchRequest + ReadRawHistoryBranchResponse struct { + // HistoryEventBlobs history event blobs + HistoryEventBlobs []*commonpb.DataBlob + // NodeIDs is the first event id of each history blob + NodeIDs []int64 + // Token to read next page if there are more events beyond page size. + // Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page. + // Empty means we have reached the last page, not need to continue + NextPageToken []byte + // Size of history read from store + Size int + } + + // ForkHistoryBranchRequest is used to fork a history branch + ForkHistoryBranchRequest struct { + // The shard to get history branch data + ShardID int32 + // The namespace performing the fork + NamespaceID string + // The base branch to fork from + ForkBranchToken []byte + // The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive) + // Application must provide a void forking nodeID, it must be a valid nodeID in that branch. A valid nodeID is the firstEventID of a valid batch of events. + // And ForkNodeID > 1 because forking from 1 doesn't make any sense. + ForkNodeID int64 + // the info for clean up data in background + Info string + } + + // ForkHistoryBranchResponse is the response to ForkHistoryBranchRequest + ForkHistoryBranchResponse struct { + // branchToken to represent the new branch + NewBranchToken []byte + } + + // CompleteForkBranchRequest is used to complete forking + CompleteForkBranchRequest struct { + // the new branch returned from ForkHistoryBranchRequest + BranchToken []byte + // true means the fork is success, will update the flag, otherwise will delete the new branch + Success bool + // The shard to update history branch data + ShardID *int + } + + // DeleteHistoryBranchRequest is used to remove a history branch + DeleteHistoryBranchRequest struct { + // The shard to delete history branch data + ShardID int32 + // branch to be deleted + BranchToken []byte + } + + // TrimHistoryBranchRequest is used to validate & trim a history branch + TrimHistoryBranchRequest struct { + // The shard to delete history branch data + ShardID int32 + // branch to be validated & trimmed + BranchToken []byte + // known valid node ID + NodeID int64 + // known valid transaction ID + TransactionID int64 + } + + // TrimHistoryBranchResponse is the response to TrimHistoryBranchRequest + TrimHistoryBranchResponse struct { + } + + // GetHistoryTreeRequest is used to retrieve branch info of a history tree + GetHistoryTreeRequest struct { + // A UUID of a tree + TreeID string + // Get data from this shard + ShardID int32 + } + + // HistoryBranchDetail contains detailed information of a branch + HistoryBranchDetail struct { + BranchToken []byte + ForkTime *time.Time + Info string + } + + // GetHistoryTreeResponse is a response to GetHistoryTreeRequest + GetHistoryTreeResponse struct { + // all branches of a tree + BranchTokens [][]byte + } + + // GetAllHistoryTreeBranchesRequest is a request of GetAllHistoryTreeBranches + GetAllHistoryTreeBranchesRequest struct { + // pagination token + NextPageToken []byte + // maximum number of branches returned per page + PageSize int + } + + // GetAllHistoryTreeBranchesResponse is a response to GetAllHistoryTreeBranches + GetAllHistoryTreeBranchesResponse struct { + // pagination token + NextPageToken []byte + // all branches of all trees + Branches []HistoryBranchDetail + } + + // ListClusterMetadataRequest is the request to ListClusterMetadata + ListClusterMetadataRequest struct { + PageSize int + NextPageToken []byte + } + + // ListClusterMetadataResponse is the response to ListClusterMetadata + ListClusterMetadataResponse struct { + ClusterMetadata []*GetClusterMetadataResponse + NextPageToken []byte + } + + // GetClusterMetadataRequest is the request to GetClusterMetadata + GetClusterMetadataRequest struct { + ClusterName string + } + + // GetClusterMetadataResponse is the response to GetClusterMetadata + GetClusterMetadataResponse struct { + persistencespb.ClusterMetadata + Version int64 + } + + // SaveClusterMetadataRequest is the request to SaveClusterMetadata + SaveClusterMetadataRequest struct { + persistencespb.ClusterMetadata + Version int64 + } + + // DeleteClusterMetadataRequest is the request to DeleteClusterMetadata + DeleteClusterMetadataRequest struct { + ClusterName string + } + + // GetClusterMembersRequest is the request to GetClusterMembers + GetClusterMembersRequest struct { + LastHeartbeatWithin time.Duration + RPCAddressEquals net.IP + HostIDEquals uuid.UUID + RoleEquals ServiceType + SessionStartedAfter time.Time + NextPageToken []byte + PageSize int + } + + // GetClusterMembersResponse is the response to GetClusterMembers + GetClusterMembersResponse struct { + ActiveMembers []*ClusterMember + NextPageToken []byte + } + + // ClusterMember is used as a response to GetClusterMembers + ClusterMember struct { + Role ServiceType + HostID uuid.UUID + RPCAddress net.IP + RPCPort uint16 + SessionStart time.Time + LastHeartbeat time.Time + RecordExpiry time.Time + } + + // UpsertClusterMembershipRequest is the request to UpsertClusterMembership + UpsertClusterMembershipRequest struct { + Role ServiceType + HostID uuid.UUID + RPCAddress net.IP + RPCPort uint16 + SessionStart time.Time + RecordExpiry time.Duration + } + + // PruneClusterMembershipRequest is the request to PruneClusterMembership + PruneClusterMembershipRequest struct { + MaxRecordsPruned int + } + + // Closeable is an interface for any entity that supports a close operation to release resources + // TODO: allow this method to return errors + Closeable interface { + Close() + } + + // ShardManager is used to manage all shards + ShardManager interface { + Closeable + GetName() string + + GetOrCreateShard(ctx context.Context, request *GetOrCreateShardRequest) (*GetOrCreateShardResponse, error) + UpdateShard(ctx context.Context, request *UpdateShardRequest) error + AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error + } + + // ExecutionManager is used to manage workflow executions + ExecutionManager interface { + Closeable + GetName() string + GetHistoryBranchUtil() HistoryBranchUtil + + CreateWorkflowExecution(ctx context.Context, request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error) + UpdateWorkflowExecution(ctx context.Context, request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) + ConflictResolveWorkflowExecution(ctx context.Context, request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error) + DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error + DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error + GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error) + GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error) + SetWorkflowExecution(ctx context.Context, request *SetWorkflowExecutionRequest) (*SetWorkflowExecutionResponse, error) + + // Scan operations + + ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error) + + // Tasks related APIs + + // Hints for persistence implementaion regarding hisotry task readers + RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error + UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) + UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) + + AddHistoryTasks(ctx context.Context, request *AddHistoryTasksRequest) error + GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) + CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error + RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error + + PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error + GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*GetHistoryTasksResponse, error) + DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error + RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error + IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) + + // The below are history V2 APIs + // V2 regards history events growing as a tree, decoupled from workflow concepts + // For Temporal, treeID is new runID, except for fork(reset), treeID will be the runID that it forks from. + + // AppendHistoryNodes add a node to history node table + AppendHistoryNodes(ctx context.Context, request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error) + // AppendRawHistoryNodes add a node of raw histories to history node table + AppendRawHistoryNodes(ctx context.Context, request *AppendRawHistoryNodesRequest) (*AppendHistoryNodesResponse, error) + // ReadHistoryBranch returns history node data for a branch + ReadHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error) + // ReadHistoryBranchByBatch returns history node data for a branch ByBatch + ReadHistoryBranchByBatch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error) + // ReadHistoryBranchReverse returns history node data for a branch + ReadHistoryBranchReverse(ctx context.Context, request *ReadHistoryBranchReverseRequest) (*ReadHistoryBranchReverseResponse, error) + // ReadRawHistoryBranch returns history node raw data for a branch ByBatch + // NOTE: this API should only be used by 3+DC + ReadRawHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error) + // ForkHistoryBranch forks a new branch from a old branch + ForkHistoryBranch(ctx context.Context, request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error) + // DeleteHistoryBranch removes a branch + // If this is the last branch to delete, it will also remove the root node + DeleteHistoryBranch(ctx context.Context, request *DeleteHistoryBranchRequest) error + // TrimHistoryBranch validate & trim a history branch + TrimHistoryBranch(ctx context.Context, request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error) + // GetHistoryTree returns all branch information of a tree + GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error) + // GetAllHistoryTreeBranches returns all branches of all trees + GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error) + } + + // TaskManager is used to manage tasks and task queues + TaskManager interface { + Closeable + GetName() string + CreateTaskQueue(ctx context.Context, request *CreateTaskQueueRequest) (*CreateTaskQueueResponse, error) + UpdateTaskQueue(ctx context.Context, request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) + GetTaskQueue(ctx context.Context, request *GetTaskQueueRequest) (*GetTaskQueueResponse, error) + ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*ListTaskQueueResponse, error) + DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error + CreateTasks(ctx context.Context, request *CreateTasksRequest) (*CreateTasksResponse, error) + GetTasks(ctx context.Context, request *GetTasksRequest) (*GetTasksResponse, error) + CompleteTask(ctx context.Context, request *CompleteTaskRequest) error + // CompleteTasksLessThan completes tasks less than or equal to the given task id + // This API takes a limit parameter which specifies the count of maxRows that + // can be deleted. This parameter may be ignored by the underlying storage, but + // its mandatory to specify it. On success this method returns the number of rows + // actually deleted. If the underlying storage doesn't support "limit", all rows + // less than or equal to taskID will be deleted. + // On success, this method returns either: + // - UnknownNumRowsAffected (this means all rows below value are deleted) + // - number of rows deleted, which may be equal to limit + CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) + + // GetTaskQueueUserData gets versioned user data. + // This data would only exist if a user uses APIs that generate it, such as the worker versioning related APIs. + // The caller should be prepared to gracefully handle the "NotFound" service error. + GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) + // UpdateTaskQueueUserData updates the user data for a given task queue. + // The request takes the _current_ known version along with the data to update. + // The caller should +1 increment the cached version number if this call succeeds. + // Fails with ConditionFailedError if the user data was updated concurrently. + UpdateTaskQueueUserData(ctx context.Context, request *UpdateTaskQueueUserDataRequest) error + ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*ListTaskQueueUserDataEntriesResponse, error) + GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) + CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) + } + + // MetadataManager is used to manage metadata CRUD for namespace entities + MetadataManager interface { + Closeable + GetName() string + CreateNamespace(ctx context.Context, request *CreateNamespaceRequest) (*CreateNamespaceResponse, error) + GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*GetNamespaceResponse, error) + UpdateNamespace(ctx context.Context, request *UpdateNamespaceRequest) error + RenameNamespace(ctx context.Context, request *RenameNamespaceRequest) error + DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error + DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error + ListNamespaces(ctx context.Context, request *ListNamespacesRequest) (*ListNamespacesResponse, error) + GetMetadata(ctx context.Context) (*GetMetadataResponse, error) + InitializeSystemNamespaces(ctx context.Context, currentClusterName string) error + } + + // ClusterMetadataManager is used to manage cluster-wide metadata and configuration + ClusterMetadataManager interface { + Closeable + GetName() string + GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) + UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error + PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error + ListClusterMetadata(ctx context.Context, request *ListClusterMetadataRequest) (*ListClusterMetadataResponse, error) + GetCurrentClusterMetadata(ctx context.Context) (*GetClusterMetadataResponse, error) + GetClusterMetadata(ctx context.Context, request *GetClusterMetadataRequest) (*GetClusterMetadataResponse, error) + SaveClusterMetadata(ctx context.Context, request *SaveClusterMetadataRequest) (bool, error) + DeleteClusterMetadata(ctx context.Context, request *DeleteClusterMetadataRequest) error + } +) + +func (e *InvalidPersistenceRequestError) Error() string { + return e.Msg +} + +func (e *AppendHistoryTimeoutError) Error() string { + return e.Msg +} + +func (e *CurrentWorkflowConditionFailedError) Error() string { + return e.Msg +} + +func (e *WorkflowConditionFailedError) Error() string { + return e.Msg +} + +func (e *ConditionFailedError) Error() string { + return e.Msg +} + +func (e *ShardAlreadyExistError) Error() string { + return e.Msg +} + +func (e *ShardOwnershipLostError) Error() string { + return e.Msg +} + +func (e *TimeoutError) Error() string { + return e.Msg +} + +func (e *TransactionSizeLimitError) Error() string { + return e.Msg +} + +func IsConflictErr(err error) bool { + switch err.(type) { + case *CurrentWorkflowConditionFailedError, + *WorkflowConditionFailedError, + *ConditionFailedError: + return true + } + return false +} + +// UnixMilliseconds returns t as a Unix time, the number of milliseconds elapsed since January 1, 1970 UTC. +// It should be used for all CQL timestamp. +func UnixMilliseconds(t time.Time) int64 { + // Handling zero time separately because UnixNano is undefined for zero times. + if t.IsZero() { + return 0 + } + + unixNano := t.UnixNano() + if unixNano < 0 { + // Time is before January 1, 1970 UTC + return 0 + } + return unixNano / int64(time.Millisecond) +} + +// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string +func BuildHistoryGarbageCleanupInfo(namespaceID, workflowID, runID string) string { + return fmt.Sprintf("%v:%v:%v", namespaceID, workflowID, runID) +} + +// SplitHistoryGarbageCleanupInfo returns workflow identity information +func SplitHistoryGarbageCleanupInfo(info string) (namespaceID, workflowID, runID string, err error) { + ss := strings.Split(info, ":") + // workflowID can contain ":" so len(ss) can be greater than 3 + if len(ss) < numItemsInGarbageInfo { + return "", "", "", fmt.Errorf("not able to split info for %s", info) + } + namespaceID = ss[0] + runID = ss[len(ss)-1] + workflowEnd := len(info) - len(runID) - 1 + workflowID = info[len(namespaceID)+1 : workflowEnd] + return +} + +type ServiceType int + +const ( + All ServiceType = iota + Frontend + History + Matching + Worker + InternalFrontend +) diff -Nru temporal-1.21.5-1/src/common/persistence/data_interfaces_mock.go temporal-1.22.5/src/common/persistence/data_interfaces_mock.go --- temporal-1.21.5-1/src/common/persistence/data_interfaces_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/data_interfaces_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1291 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: data_interfaces.go + +// Package persistence is a generated GoMock package. +package persistence + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockCloseable is a mock of Closeable interface. +type MockCloseable struct { + ctrl *gomock.Controller + recorder *MockCloseableMockRecorder +} + +// MockCloseableMockRecorder is the mock recorder for MockCloseable. +type MockCloseableMockRecorder struct { + mock *MockCloseable +} + +// NewMockCloseable creates a new mock instance. +func NewMockCloseable(ctrl *gomock.Controller) *MockCloseable { + mock := &MockCloseable{ctrl: ctrl} + mock.recorder = &MockCloseableMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCloseable) EXPECT() *MockCloseableMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockCloseable) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockCloseableMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCloseable)(nil).Close)) +} + +// MockShardManager is a mock of ShardManager interface. +type MockShardManager struct { + ctrl *gomock.Controller + recorder *MockShardManagerMockRecorder +} + +// MockShardManagerMockRecorder is the mock recorder for MockShardManager. +type MockShardManagerMockRecorder struct { + mock *MockShardManager +} + +// NewMockShardManager creates a new mock instance. +func NewMockShardManager(ctrl *gomock.Controller) *MockShardManager { + mock := &MockShardManager{ctrl: ctrl} + mock.recorder = &MockShardManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockShardManager) EXPECT() *MockShardManagerMockRecorder { + return m.recorder +} + +// AssertShardOwnership mocks base method. +func (m *MockShardManager) AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssertShardOwnership", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// AssertShardOwnership indicates an expected call of AssertShardOwnership. +func (mr *MockShardManagerMockRecorder) AssertShardOwnership(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssertShardOwnership", reflect.TypeOf((*MockShardManager)(nil).AssertShardOwnership), ctx, request) +} + +// Close mocks base method. +func (m *MockShardManager) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockShardManagerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShardManager)(nil).Close)) +} + +// GetName mocks base method. +func (m *MockShardManager) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName. +func (mr *MockShardManagerMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockShardManager)(nil).GetName)) +} + +// GetOrCreateShard mocks base method. +func (m *MockShardManager) GetOrCreateShard(ctx context.Context, request *GetOrCreateShardRequest) (*GetOrCreateShardResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOrCreateShard", ctx, request) + ret0, _ := ret[0].(*GetOrCreateShardResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetOrCreateShard indicates an expected call of GetOrCreateShard. +func (mr *MockShardManagerMockRecorder) GetOrCreateShard(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrCreateShard", reflect.TypeOf((*MockShardManager)(nil).GetOrCreateShard), ctx, request) +} + +// UpdateShard mocks base method. +func (m *MockShardManager) UpdateShard(ctx context.Context, request *UpdateShardRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateShard", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateShard indicates an expected call of UpdateShard. +func (mr *MockShardManagerMockRecorder) UpdateShard(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateShard", reflect.TypeOf((*MockShardManager)(nil).UpdateShard), ctx, request) +} + +// MockExecutionManager is a mock of ExecutionManager interface. +type MockExecutionManager struct { + ctrl *gomock.Controller + recorder *MockExecutionManagerMockRecorder +} + +// MockExecutionManagerMockRecorder is the mock recorder for MockExecutionManager. +type MockExecutionManagerMockRecorder struct { + mock *MockExecutionManager +} + +// NewMockExecutionManager creates a new mock instance. +func NewMockExecutionManager(ctrl *gomock.Controller) *MockExecutionManager { + mock := &MockExecutionManager{ctrl: ctrl} + mock.recorder = &MockExecutionManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecutionManager) EXPECT() *MockExecutionManagerMockRecorder { + return m.recorder +} + +// AddHistoryTasks mocks base method. +func (m *MockExecutionManager) AddHistoryTasks(ctx context.Context, request *AddHistoryTasksRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddHistoryTasks", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddHistoryTasks indicates an expected call of AddHistoryTasks. +func (mr *MockExecutionManagerMockRecorder) AddHistoryTasks(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).AddHistoryTasks), ctx, request) +} + +// AppendHistoryNodes mocks base method. +func (m *MockExecutionManager) AppendHistoryNodes(ctx context.Context, request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppendHistoryNodes", ctx, request) + ret0, _ := ret[0].(*AppendHistoryNodesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppendHistoryNodes indicates an expected call of AppendHistoryNodes. +func (mr *MockExecutionManagerMockRecorder) AppendHistoryNodes(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendHistoryNodes", reflect.TypeOf((*MockExecutionManager)(nil).AppendHistoryNodes), ctx, request) +} + +// AppendRawHistoryNodes mocks base method. +func (m *MockExecutionManager) AppendRawHistoryNodes(ctx context.Context, request *AppendRawHistoryNodesRequest) (*AppendHistoryNodesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppendRawHistoryNodes", ctx, request) + ret0, _ := ret[0].(*AppendHistoryNodesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppendRawHistoryNodes indicates an expected call of AppendRawHistoryNodes. +func (mr *MockExecutionManagerMockRecorder) AppendRawHistoryNodes(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendRawHistoryNodes", reflect.TypeOf((*MockExecutionManager)(nil).AppendRawHistoryNodes), ctx, request) +} + +// Close mocks base method. +func (m *MockExecutionManager) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockExecutionManagerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockExecutionManager)(nil).Close)) +} + +// CompleteHistoryTask mocks base method. +func (m *MockExecutionManager) CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteHistoryTask", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// CompleteHistoryTask indicates an expected call of CompleteHistoryTask. +func (mr *MockExecutionManagerMockRecorder) CompleteHistoryTask(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteHistoryTask", reflect.TypeOf((*MockExecutionManager)(nil).CompleteHistoryTask), ctx, request) +} + +// ConflictResolveWorkflowExecution mocks base method. +func (m *MockExecutionManager) ConflictResolveWorkflowExecution(ctx context.Context, request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConflictResolveWorkflowExecution", ctx, request) + ret0, _ := ret[0].(*ConflictResolveWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ConflictResolveWorkflowExecution indicates an expected call of ConflictResolveWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) ConflictResolveWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConflictResolveWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).ConflictResolveWorkflowExecution), ctx, request) +} + +// CreateWorkflowExecution mocks base method. +func (m *MockExecutionManager) CreateWorkflowExecution(ctx context.Context, request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateWorkflowExecution", ctx, request) + ret0, _ := ret[0].(*CreateWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateWorkflowExecution indicates an expected call of CreateWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) CreateWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).CreateWorkflowExecution), ctx, request) +} + +// DeleteCurrentWorkflowExecution mocks base method. +func (m *MockExecutionManager) DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCurrentWorkflowExecution", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCurrentWorkflowExecution indicates an expected call of DeleteCurrentWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) DeleteCurrentWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).DeleteCurrentWorkflowExecution), ctx, request) +} + +// DeleteHistoryBranch mocks base method. +func (m *MockExecutionManager) DeleteHistoryBranch(ctx context.Context, request *DeleteHistoryBranchRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteHistoryBranch", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteHistoryBranch indicates an expected call of DeleteHistoryBranch. +func (mr *MockExecutionManagerMockRecorder) DeleteHistoryBranch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).DeleteHistoryBranch), ctx, request) +} + +// DeleteReplicationTaskFromDLQ mocks base method. +func (m *MockExecutionManager) DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteReplicationTaskFromDLQ", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteReplicationTaskFromDLQ indicates an expected call of DeleteReplicationTaskFromDLQ. +func (mr *MockExecutionManagerMockRecorder) DeleteReplicationTaskFromDLQ(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).DeleteReplicationTaskFromDLQ), ctx, request) +} + +// DeleteWorkflowExecution mocks base method. +func (m *MockExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteWorkflowExecution", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) DeleteWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).DeleteWorkflowExecution), ctx, request) +} + +// ForkHistoryBranch mocks base method. +func (m *MockExecutionManager) ForkHistoryBranch(ctx context.Context, request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForkHistoryBranch", ctx, request) + ret0, _ := ret[0].(*ForkHistoryBranchResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForkHistoryBranch indicates an expected call of ForkHistoryBranch. +func (mr *MockExecutionManagerMockRecorder) ForkHistoryBranch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForkHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ForkHistoryBranch), ctx, request) +} + +// GetAllHistoryTreeBranches mocks base method. +func (m *MockExecutionManager) GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllHistoryTreeBranches", ctx, request) + ret0, _ := ret[0].(*GetAllHistoryTreeBranchesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllHistoryTreeBranches indicates an expected call of GetAllHistoryTreeBranches. +func (mr *MockExecutionManagerMockRecorder) GetAllHistoryTreeBranches(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllHistoryTreeBranches", reflect.TypeOf((*MockExecutionManager)(nil).GetAllHistoryTreeBranches), ctx, request) +} + +// GetCurrentExecution mocks base method. +func (m *MockExecutionManager) GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentExecution", ctx, request) + ret0, _ := ret[0].(*GetCurrentExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentExecution indicates an expected call of GetCurrentExecution. +func (mr *MockExecutionManagerMockRecorder) GetCurrentExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentExecution", reflect.TypeOf((*MockExecutionManager)(nil).GetCurrentExecution), ctx, request) +} + +// GetHistoryBranchUtil mocks base method. +func (m *MockExecutionManager) GetHistoryBranchUtil() HistoryBranchUtil { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHistoryBranchUtil") + ret0, _ := ret[0].(HistoryBranchUtil) + return ret0 +} + +// GetHistoryBranchUtil indicates an expected call of GetHistoryBranchUtil. +func (mr *MockExecutionManagerMockRecorder) GetHistoryBranchUtil() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryBranchUtil", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryBranchUtil)) +} + +// GetHistoryTasks mocks base method. +func (m *MockExecutionManager) GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHistoryTasks", ctx, request) + ret0, _ := ret[0].(*GetHistoryTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHistoryTasks indicates an expected call of GetHistoryTasks. +func (mr *MockExecutionManagerMockRecorder) GetHistoryTasks(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryTasks), ctx, request) +} + +// GetHistoryTree mocks base method. +func (m *MockExecutionManager) GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHistoryTree", ctx, request) + ret0, _ := ret[0].(*GetHistoryTreeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHistoryTree indicates an expected call of GetHistoryTree. +func (mr *MockExecutionManagerMockRecorder) GetHistoryTree(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryTree", reflect.TypeOf((*MockExecutionManager)(nil).GetHistoryTree), ctx, request) +} + +// GetName mocks base method. +func (m *MockExecutionManager) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName. +func (mr *MockExecutionManagerMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockExecutionManager)(nil).GetName)) +} + +// GetReplicationTasksFromDLQ mocks base method. +func (m *MockExecutionManager) GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*GetHistoryTasksResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetReplicationTasksFromDLQ", ctx, request) + ret0, _ := ret[0].(*GetHistoryTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetReplicationTasksFromDLQ indicates an expected call of GetReplicationTasksFromDLQ. +func (mr *MockExecutionManagerMockRecorder) GetReplicationTasksFromDLQ(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationTasksFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).GetReplicationTasksFromDLQ), ctx, request) +} + +// GetWorkflowExecution mocks base method. +func (m *MockExecutionManager) GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkflowExecution", ctx, request) + ret0, _ := ret[0].(*GetWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkflowExecution indicates an expected call of GetWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) GetWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).GetWorkflowExecution), ctx, request) +} + +// IsReplicationDLQEmpty mocks base method. +func (m *MockExecutionManager) IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsReplicationDLQEmpty", ctx, request) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsReplicationDLQEmpty indicates an expected call of IsReplicationDLQEmpty. +func (mr *MockExecutionManagerMockRecorder) IsReplicationDLQEmpty(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsReplicationDLQEmpty", reflect.TypeOf((*MockExecutionManager)(nil).IsReplicationDLQEmpty), ctx, request) +} + +// ListConcreteExecutions mocks base method. +func (m *MockExecutionManager) ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListConcreteExecutions", ctx, request) + ret0, _ := ret[0].(*ListConcreteExecutionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListConcreteExecutions indicates an expected call of ListConcreteExecutions. +func (mr *MockExecutionManagerMockRecorder) ListConcreteExecutions(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConcreteExecutions", reflect.TypeOf((*MockExecutionManager)(nil).ListConcreteExecutions), ctx, request) +} + +// PutReplicationTaskToDLQ mocks base method. +func (m *MockExecutionManager) PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutReplicationTaskToDLQ", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutReplicationTaskToDLQ indicates an expected call of PutReplicationTaskToDLQ. +func (mr *MockExecutionManagerMockRecorder) PutReplicationTaskToDLQ(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutReplicationTaskToDLQ", reflect.TypeOf((*MockExecutionManager)(nil).PutReplicationTaskToDLQ), ctx, request) +} + +// RangeCompleteHistoryTasks mocks base method. +func (m *MockExecutionManager) RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RangeCompleteHistoryTasks", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// RangeCompleteHistoryTasks indicates an expected call of RangeCompleteHistoryTasks. +func (mr *MockExecutionManagerMockRecorder) RangeCompleteHistoryTasks(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeCompleteHistoryTasks", reflect.TypeOf((*MockExecutionManager)(nil).RangeCompleteHistoryTasks), ctx, request) +} + +// RangeDeleteReplicationTaskFromDLQ mocks base method. +func (m *MockExecutionManager) RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RangeDeleteReplicationTaskFromDLQ", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// RangeDeleteReplicationTaskFromDLQ indicates an expected call of RangeDeleteReplicationTaskFromDLQ. +func (mr *MockExecutionManagerMockRecorder) RangeDeleteReplicationTaskFromDLQ(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeDeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).RangeDeleteReplicationTaskFromDLQ), ctx, request) +} + +// ReadHistoryBranch mocks base method. +func (m *MockExecutionManager) ReadHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadHistoryBranch", ctx, request) + ret0, _ := ret[0].(*ReadHistoryBranchResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadHistoryBranch indicates an expected call of ReadHistoryBranch. +func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranch), ctx, request) +} + +// ReadHistoryBranchByBatch mocks base method. +func (m *MockExecutionManager) ReadHistoryBranchByBatch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadHistoryBranchByBatch", ctx, request) + ret0, _ := ret[0].(*ReadHistoryBranchByBatchResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadHistoryBranchByBatch indicates an expected call of ReadHistoryBranchByBatch. +func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranchByBatch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranchByBatch", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranchByBatch), ctx, request) +} + +// ReadHistoryBranchReverse mocks base method. +func (m *MockExecutionManager) ReadHistoryBranchReverse(ctx context.Context, request *ReadHistoryBranchReverseRequest) (*ReadHistoryBranchReverseResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadHistoryBranchReverse", ctx, request) + ret0, _ := ret[0].(*ReadHistoryBranchReverseResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadHistoryBranchReverse indicates an expected call of ReadHistoryBranchReverse. +func (mr *MockExecutionManagerMockRecorder) ReadHistoryBranchReverse(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadHistoryBranchReverse", reflect.TypeOf((*MockExecutionManager)(nil).ReadHistoryBranchReverse), ctx, request) +} + +// ReadRawHistoryBranch mocks base method. +func (m *MockExecutionManager) ReadRawHistoryBranch(ctx context.Context, request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadRawHistoryBranch", ctx, request) + ret0, _ := ret[0].(*ReadRawHistoryBranchResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadRawHistoryBranch indicates an expected call of ReadRawHistoryBranch. +func (mr *MockExecutionManagerMockRecorder) ReadRawHistoryBranch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRawHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).ReadRawHistoryBranch), ctx, request) +} + +// RegisterHistoryTaskReader mocks base method. +func (m *MockExecutionManager) RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterHistoryTaskReader", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegisterHistoryTaskReader indicates an expected call of RegisterHistoryTaskReader. +func (mr *MockExecutionManagerMockRecorder) RegisterHistoryTaskReader(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterHistoryTaskReader", reflect.TypeOf((*MockExecutionManager)(nil).RegisterHistoryTaskReader), ctx, request) +} + +// SetWorkflowExecution mocks base method. +func (m *MockExecutionManager) SetWorkflowExecution(ctx context.Context, request *SetWorkflowExecutionRequest) (*SetWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWorkflowExecution", ctx, request) + ret0, _ := ret[0].(*SetWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWorkflowExecution indicates an expected call of SetWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) SetWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).SetWorkflowExecution), ctx, request) +} + +// TrimHistoryBranch mocks base method. +func (m *MockExecutionManager) TrimHistoryBranch(ctx context.Context, request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TrimHistoryBranch", ctx, request) + ret0, _ := ret[0].(*TrimHistoryBranchResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TrimHistoryBranch indicates an expected call of TrimHistoryBranch. +func (mr *MockExecutionManagerMockRecorder) TrimHistoryBranch(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrimHistoryBranch", reflect.TypeOf((*MockExecutionManager)(nil).TrimHistoryBranch), ctx, request) +} + +// UnregisterHistoryTaskReader mocks base method. +func (m *MockExecutionManager) UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UnregisterHistoryTaskReader", ctx, request) +} + +// UnregisterHistoryTaskReader indicates an expected call of UnregisterHistoryTaskReader. +func (mr *MockExecutionManagerMockRecorder) UnregisterHistoryTaskReader(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnregisterHistoryTaskReader", reflect.TypeOf((*MockExecutionManager)(nil).UnregisterHistoryTaskReader), ctx, request) +} + +// UpdateHistoryTaskReaderProgress mocks base method. +func (m *MockExecutionManager) UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateHistoryTaskReaderProgress", ctx, request) +} + +// UpdateHistoryTaskReaderProgress indicates an expected call of UpdateHistoryTaskReaderProgress. +func (mr *MockExecutionManagerMockRecorder) UpdateHistoryTaskReaderProgress(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHistoryTaskReaderProgress", reflect.TypeOf((*MockExecutionManager)(nil).UpdateHistoryTaskReaderProgress), ctx, request) +} + +// UpdateWorkflowExecution mocks base method. +func (m *MockExecutionManager) UpdateWorkflowExecution(ctx context.Context, request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateWorkflowExecution", ctx, request) + ret0, _ := ret[0].(*UpdateWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWorkflowExecution indicates an expected call of UpdateWorkflowExecution. +func (mr *MockExecutionManagerMockRecorder) UpdateWorkflowExecution(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecution", reflect.TypeOf((*MockExecutionManager)(nil).UpdateWorkflowExecution), ctx, request) +} + +// MockTaskManager is a mock of TaskManager interface. +type MockTaskManager struct { + ctrl *gomock.Controller + recorder *MockTaskManagerMockRecorder +} + +// MockTaskManagerMockRecorder is the mock recorder for MockTaskManager. +type MockTaskManagerMockRecorder struct { + mock *MockTaskManager +} + +// NewMockTaskManager creates a new mock instance. +func NewMockTaskManager(ctrl *gomock.Controller) *MockTaskManager { + mock := &MockTaskManager{ctrl: ctrl} + mock.recorder = &MockTaskManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockTaskManager) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockTaskManagerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTaskManager)(nil).Close)) +} + +// CompleteTask mocks base method. +func (m *MockTaskManager) CompleteTask(ctx context.Context, request *CompleteTaskRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteTask", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// CompleteTask indicates an expected call of CompleteTask. +func (mr *MockTaskManagerMockRecorder) CompleteTask(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteTask", reflect.TypeOf((*MockTaskManager)(nil).CompleteTask), ctx, request) +} + +// CompleteTasksLessThan mocks base method. +func (m *MockTaskManager) CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteTasksLessThan", ctx, request) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteTasksLessThan indicates an expected call of CompleteTasksLessThan. +func (mr *MockTaskManagerMockRecorder) CompleteTasksLessThan(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteTasksLessThan", reflect.TypeOf((*MockTaskManager)(nil).CompleteTasksLessThan), ctx, request) +} + +// CountTaskQueuesByBuildId mocks base method. +func (m *MockTaskManager) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountTaskQueuesByBuildId", ctx, request) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountTaskQueuesByBuildId indicates an expected call of CountTaskQueuesByBuildId. +func (mr *MockTaskManagerMockRecorder) CountTaskQueuesByBuildId(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountTaskQueuesByBuildId", reflect.TypeOf((*MockTaskManager)(nil).CountTaskQueuesByBuildId), ctx, request) +} + +// CreateTaskQueue mocks base method. +func (m *MockTaskManager) CreateTaskQueue(ctx context.Context, request *CreateTaskQueueRequest) (*CreateTaskQueueResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTaskQueue", ctx, request) + ret0, _ := ret[0].(*CreateTaskQueueResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTaskQueue indicates an expected call of CreateTaskQueue. +func (mr *MockTaskManagerMockRecorder) CreateTaskQueue(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).CreateTaskQueue), ctx, request) +} + +// CreateTasks mocks base method. +func (m *MockTaskManager) CreateTasks(ctx context.Context, request *CreateTasksRequest) (*CreateTasksResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTasks", ctx, request) + ret0, _ := ret[0].(*CreateTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTasks indicates an expected call of CreateTasks. +func (mr *MockTaskManagerMockRecorder) CreateTasks(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTasks", reflect.TypeOf((*MockTaskManager)(nil).CreateTasks), ctx, request) +} + +// DeleteTaskQueue mocks base method. +func (m *MockTaskManager) DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTaskQueue", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTaskQueue indicates an expected call of DeleteTaskQueue. +func (mr *MockTaskManagerMockRecorder) DeleteTaskQueue(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).DeleteTaskQueue), ctx, request) +} + +// GetName mocks base method. +func (m *MockTaskManager) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName. +func (mr *MockTaskManagerMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockTaskManager)(nil).GetName)) +} + +// GetTaskQueue mocks base method. +func (m *MockTaskManager) GetTaskQueue(ctx context.Context, request *GetTaskQueueRequest) (*GetTaskQueueResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskQueue", ctx, request) + ret0, _ := ret[0].(*GetTaskQueueResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskQueue indicates an expected call of GetTaskQueue. +func (mr *MockTaskManagerMockRecorder) GetTaskQueue(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueue), ctx, request) +} + +// GetTaskQueueUserData mocks base method. +func (m *MockTaskManager) GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskQueueUserData", ctx, request) + ret0, _ := ret[0].(*GetTaskQueueUserDataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. +func (mr *MockTaskManagerMockRecorder) GetTaskQueueUserData(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueueUserData), ctx, request) +} + +// GetTaskQueuesByBuildId mocks base method. +func (m *MockTaskManager) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskQueuesByBuildId", ctx, request) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskQueuesByBuildId indicates an expected call of GetTaskQueuesByBuildId. +func (mr *MockTaskManagerMockRecorder) GetTaskQueuesByBuildId(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueuesByBuildId", reflect.TypeOf((*MockTaskManager)(nil).GetTaskQueuesByBuildId), ctx, request) +} + +// GetTasks mocks base method. +func (m *MockTaskManager) GetTasks(ctx context.Context, request *GetTasksRequest) (*GetTasksResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTasks", ctx, request) + ret0, _ := ret[0].(*GetTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTasks indicates an expected call of GetTasks. +func (mr *MockTaskManagerMockRecorder) GetTasks(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTasks", reflect.TypeOf((*MockTaskManager)(nil).GetTasks), ctx, request) +} + +// ListTaskQueue mocks base method. +func (m *MockTaskManager) ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*ListTaskQueueResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTaskQueue", ctx, request) + ret0, _ := ret[0].(*ListTaskQueueResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTaskQueue indicates an expected call of ListTaskQueue. +func (mr *MockTaskManagerMockRecorder) ListTaskQueue(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).ListTaskQueue), ctx, request) +} + +// ListTaskQueueUserDataEntries mocks base method. +func (m *MockTaskManager) ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*ListTaskQueueUserDataEntriesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTaskQueueUserDataEntries", ctx, request) + ret0, _ := ret[0].(*ListTaskQueueUserDataEntriesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTaskQueueUserDataEntries indicates an expected call of ListTaskQueueUserDataEntries. +func (mr *MockTaskManagerMockRecorder) ListTaskQueueUserDataEntries(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueueUserDataEntries", reflect.TypeOf((*MockTaskManager)(nil).ListTaskQueueUserDataEntries), ctx, request) +} + +// UpdateTaskQueue mocks base method. +func (m *MockTaskManager) UpdateTaskQueue(ctx context.Context, request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTaskQueue", ctx, request) + ret0, _ := ret[0].(*UpdateTaskQueueResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateTaskQueue indicates an expected call of UpdateTaskQueue. +func (mr *MockTaskManagerMockRecorder) UpdateTaskQueue(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueue", reflect.TypeOf((*MockTaskManager)(nil).UpdateTaskQueue), ctx, request) +} + +// UpdateTaskQueueUserData mocks base method. +func (m *MockTaskManager) UpdateTaskQueueUserData(ctx context.Context, request *UpdateTaskQueueUserDataRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTaskQueueUserData", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTaskQueueUserData indicates an expected call of UpdateTaskQueueUserData. +func (mr *MockTaskManagerMockRecorder) UpdateTaskQueueUserData(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueUserData", reflect.TypeOf((*MockTaskManager)(nil).UpdateTaskQueueUserData), ctx, request) +} + +// MockMetadataManager is a mock of MetadataManager interface. +type MockMetadataManager struct { + ctrl *gomock.Controller + recorder *MockMetadataManagerMockRecorder +} + +// MockMetadataManagerMockRecorder is the mock recorder for MockMetadataManager. +type MockMetadataManagerMockRecorder struct { + mock *MockMetadataManager +} + +// NewMockMetadataManager creates a new mock instance. +func NewMockMetadataManager(ctrl *gomock.Controller) *MockMetadataManager { + mock := &MockMetadataManager{ctrl: ctrl} + mock.recorder = &MockMetadataManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMetadataManager) EXPECT() *MockMetadataManagerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockMetadataManager) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockMetadataManagerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMetadataManager)(nil).Close)) +} + +// CreateNamespace mocks base method. +func (m *MockMetadataManager) CreateNamespace(ctx context.Context, request *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNamespace", ctx, request) + ret0, _ := ret[0].(*CreateNamespaceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNamespace indicates an expected call of CreateNamespace. +func (mr *MockMetadataManagerMockRecorder) CreateNamespace(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespace", reflect.TypeOf((*MockMetadataManager)(nil).CreateNamespace), ctx, request) +} + +// DeleteNamespace mocks base method. +func (m *MockMetadataManager) DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNamespace", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNamespace indicates an expected call of DeleteNamespace. +func (mr *MockMetadataManagerMockRecorder) DeleteNamespace(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNamespace", reflect.TypeOf((*MockMetadataManager)(nil).DeleteNamespace), ctx, request) +} + +// DeleteNamespaceByName mocks base method. +func (m *MockMetadataManager) DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNamespaceByName", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNamespaceByName indicates an expected call of DeleteNamespaceByName. +func (mr *MockMetadataManagerMockRecorder) DeleteNamespaceByName(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNamespaceByName", reflect.TypeOf((*MockMetadataManager)(nil).DeleteNamespaceByName), ctx, request) +} + +// GetMetadata mocks base method. +func (m *MockMetadataManager) GetMetadata(ctx context.Context) (*GetMetadataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetadata", ctx) + ret0, _ := ret[0].(*GetMetadataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata. +func (mr *MockMetadataManagerMockRecorder) GetMetadata(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockMetadataManager)(nil).GetMetadata), ctx) +} + +// GetName mocks base method. +func (m *MockMetadataManager) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName. +func (mr *MockMetadataManagerMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockMetadataManager)(nil).GetName)) +} + +// GetNamespace mocks base method. +func (m *MockMetadataManager) GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*GetNamespaceResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNamespace", ctx, request) + ret0, _ := ret[0].(*GetNamespaceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNamespace indicates an expected call of GetNamespace. +func (mr *MockMetadataManagerMockRecorder) GetNamespace(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockMetadataManager)(nil).GetNamespace), ctx, request) +} + +// InitializeSystemNamespaces mocks base method. +func (m *MockMetadataManager) InitializeSystemNamespaces(ctx context.Context, currentClusterName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeSystemNamespaces", ctx, currentClusterName) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeSystemNamespaces indicates an expected call of InitializeSystemNamespaces. +func (mr *MockMetadataManagerMockRecorder) InitializeSystemNamespaces(ctx, currentClusterName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeSystemNamespaces", reflect.TypeOf((*MockMetadataManager)(nil).InitializeSystemNamespaces), ctx, currentClusterName) +} + +// ListNamespaces mocks base method. +func (m *MockMetadataManager) ListNamespaces(ctx context.Context, request *ListNamespacesRequest) (*ListNamespacesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNamespaces", ctx, request) + ret0, _ := ret[0].(*ListNamespacesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNamespaces indicates an expected call of ListNamespaces. +func (mr *MockMetadataManagerMockRecorder) ListNamespaces(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNamespaces", reflect.TypeOf((*MockMetadataManager)(nil).ListNamespaces), ctx, request) +} + +// RenameNamespace mocks base method. +func (m *MockMetadataManager) RenameNamespace(ctx context.Context, request *RenameNamespaceRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RenameNamespace", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// RenameNamespace indicates an expected call of RenameNamespace. +func (mr *MockMetadataManagerMockRecorder) RenameNamespace(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenameNamespace", reflect.TypeOf((*MockMetadataManager)(nil).RenameNamespace), ctx, request) +} + +// UpdateNamespace mocks base method. +func (m *MockMetadataManager) UpdateNamespace(ctx context.Context, request *UpdateNamespaceRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNamespace", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateNamespace indicates an expected call of UpdateNamespace. +func (mr *MockMetadataManagerMockRecorder) UpdateNamespace(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNamespace", reflect.TypeOf((*MockMetadataManager)(nil).UpdateNamespace), ctx, request) +} + +// MockClusterMetadataManager is a mock of ClusterMetadataManager interface. +type MockClusterMetadataManager struct { + ctrl *gomock.Controller + recorder *MockClusterMetadataManagerMockRecorder +} + +// MockClusterMetadataManagerMockRecorder is the mock recorder for MockClusterMetadataManager. +type MockClusterMetadataManagerMockRecorder struct { + mock *MockClusterMetadataManager +} + +// NewMockClusterMetadataManager creates a new mock instance. +func NewMockClusterMetadataManager(ctrl *gomock.Controller) *MockClusterMetadataManager { + mock := &MockClusterMetadataManager{ctrl: ctrl} + mock.recorder = &MockClusterMetadataManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClusterMetadataManager) EXPECT() *MockClusterMetadataManagerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClusterMetadataManager) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClusterMetadataManagerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClusterMetadataManager)(nil).Close)) +} + +// DeleteClusterMetadata mocks base method. +func (m *MockClusterMetadataManager) DeleteClusterMetadata(ctx context.Context, request *DeleteClusterMetadataRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteClusterMetadata", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteClusterMetadata indicates an expected call of DeleteClusterMetadata. +func (mr *MockClusterMetadataManagerMockRecorder) DeleteClusterMetadata(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).DeleteClusterMetadata), ctx, request) +} + +// GetClusterMembers mocks base method. +func (m *MockClusterMetadataManager) GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterMembers", ctx, request) + ret0, _ := ret[0].(*GetClusterMembersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusterMembers indicates an expected call of GetClusterMembers. +func (mr *MockClusterMetadataManagerMockRecorder) GetClusterMembers(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterMembers", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetClusterMembers), ctx, request) +} + +// GetClusterMetadata mocks base method. +func (m *MockClusterMetadataManager) GetClusterMetadata(ctx context.Context, request *GetClusterMetadataRequest) (*GetClusterMetadataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterMetadata", ctx, request) + ret0, _ := ret[0].(*GetClusterMetadataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusterMetadata indicates an expected call of GetClusterMetadata. +func (mr *MockClusterMetadataManagerMockRecorder) GetClusterMetadata(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetClusterMetadata), ctx, request) +} + +// GetCurrentClusterMetadata mocks base method. +func (m *MockClusterMetadataManager) GetCurrentClusterMetadata(ctx context.Context) (*GetClusterMetadataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentClusterMetadata", ctx) + ret0, _ := ret[0].(*GetClusterMetadataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentClusterMetadata indicates an expected call of GetCurrentClusterMetadata. +func (mr *MockClusterMetadataManagerMockRecorder) GetCurrentClusterMetadata(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetCurrentClusterMetadata), ctx) +} + +// GetName mocks base method. +func (m *MockClusterMetadataManager) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName. +func (mr *MockClusterMetadataManagerMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockClusterMetadataManager)(nil).GetName)) +} + +// ListClusterMetadata mocks base method. +func (m *MockClusterMetadataManager) ListClusterMetadata(ctx context.Context, request *ListClusterMetadataRequest) (*ListClusterMetadataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClusterMetadata", ctx, request) + ret0, _ := ret[0].(*ListClusterMetadataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusterMetadata indicates an expected call of ListClusterMetadata. +func (mr *MockClusterMetadataManagerMockRecorder) ListClusterMetadata(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).ListClusterMetadata), ctx, request) +} + +// PruneClusterMembership mocks base method. +func (m *MockClusterMetadataManager) PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PruneClusterMembership", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// PruneClusterMembership indicates an expected call of PruneClusterMembership. +func (mr *MockClusterMetadataManagerMockRecorder) PruneClusterMembership(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneClusterMembership", reflect.TypeOf((*MockClusterMetadataManager)(nil).PruneClusterMembership), ctx, request) +} + +// SaveClusterMetadata mocks base method. +func (m *MockClusterMetadataManager) SaveClusterMetadata(ctx context.Context, request *SaveClusterMetadataRequest) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveClusterMetadata", ctx, request) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SaveClusterMetadata indicates an expected call of SaveClusterMetadata. +func (mr *MockClusterMetadataManagerMockRecorder) SaveClusterMetadata(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveClusterMetadata", reflect.TypeOf((*MockClusterMetadataManager)(nil).SaveClusterMetadata), ctx, request) +} + +// UpsertClusterMembership mocks base method. +func (m *MockClusterMetadataManager) UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertClusterMembership", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertClusterMembership indicates an expected call of UpsertClusterMembership. +func (mr *MockClusterMetadataManagerMockRecorder) UpsertClusterMembership(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertClusterMembership", reflect.TypeOf((*MockClusterMetadataManager)(nil).UpsertClusterMembership), ctx, request) +} diff -Nru temporal-1.21.5-1/src/common/persistence/execution_manager.go temporal-1.22.5/src/common/persistence/execution_manager.go --- temporal-1.21.5-1/src/common/persistence/execution_manager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/execution_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -37,6 +37,7 @@ historyspb "go.temporal.io/server/api/history/v1" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common" + "go.temporal.io/server/common/definition" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -49,6 +50,7 @@ // executionManagerImpl implements ExecutionManager based on ExecutionStore, statsComputer and Serializer executionManagerImpl struct { serializer serialization.Serializer + eventBlobCache XDCCache persistence ExecutionStore logger log.Logger pagingTokenSerializer *jsonHistoryTokenSerializer @@ -62,12 +64,13 @@ func NewExecutionManager( persistence ExecutionStore, serializer serialization.Serializer, + eventBlobCache XDCCache, logger log.Logger, transactionSizeLimit dynamicconfig.IntPropertyFn, ) ExecutionManager { - return &executionManagerImpl{ serializer: serializer, + eventBlobCache: eventBlobCache, persistence: persistence, logger: logger, pagingTokenSerializer: newJSONHistoryTokenSerializer(), @@ -91,10 +94,16 @@ ) (*CreateWorkflowExecutionResponse, error) { newSnapshot := request.NewWorkflowSnapshot - newWorkflowNewEvents, newHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.NewWorkflowEvents) + newWorkflowXDCKVs, newWorkflowNewEvents, newHistoryDiff, err := m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.NewWorkflowSnapshot.ExecutionInfo, + request.NewWorkflowEvents, + ) if err != nil { return nil, err } + newSnapshot.ExecutionInfo.ExecutionStats.HistorySize += int64(newHistoryDiff.SizeDiff) if err := ValidateCreateWorkflowModeState( @@ -128,6 +137,7 @@ if _, err := m.persistence.CreateWorkflowExecution(ctx, newRequest); err != nil { return nil, err } + m.addXDCCacheKV(newWorkflowXDCKVs) return &CreateWorkflowExecutionResponse{ NewMutableStateStats: *statusOfInternalWorkflowSnapshot( serializedNewWorkflowSnapshot, @@ -144,17 +154,30 @@ updateMutation := request.UpdateWorkflowMutation newSnapshot := request.NewWorkflowSnapshot - updateWorkflowNewEvents, updateWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.UpdateWorkflowEvents) + updateWorkflowXDCKVs, updateWorkflowNewEvents, updateWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.UpdateWorkflowMutation.ExecutionInfo, + request.UpdateWorkflowEvents, + ) if err != nil { return nil, err } updateMutation.ExecutionInfo.ExecutionStats.HistorySize += int64(updateWorkflowHistoryDiff.SizeDiff) - newWorkflowNewEvents, newWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.NewWorkflowEvents) - if err != nil { - return nil, err - } + var newWorkflowXDCKVs map[XDCCacheKey]XDCCacheValue + var newWorkflowNewEvents []*InternalAppendHistoryNodesRequest + var newWorkflowHistoryDiff *HistoryStatistics if newSnapshot != nil { + newWorkflowXDCKVs, newWorkflowNewEvents, newWorkflowHistoryDiff, err = m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.NewWorkflowSnapshot.ExecutionInfo, + request.NewWorkflowEvents, + ) + if err != nil { + return nil, err + } newSnapshot.ExecutionInfo.ExecutionStats.HistorySize += int64(newWorkflowHistoryDiff.SizeDiff) } @@ -199,6 +222,8 @@ err = m.persistence.UpdateWorkflowExecution(ctx, newRequest) switch err.(type) { case nil: + m.addXDCCacheKV(updateWorkflowXDCKVs) + m.addXDCCacheKV(newWorkflowXDCKVs) return &UpdateWorkflowExecutionResponse{ UpdateMutableStateStats: *statusOfInternalWorkflowMutation( &newRequest.UpdateWorkflowMutation, @@ -234,25 +259,46 @@ newSnapshot := request.NewWorkflowSnapshot currentMutation := request.CurrentWorkflowMutation - resetWorkflowEventsNewEvents, resetWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.ResetWorkflowEvents) + resetWorkflowXDCKVs, resetWorkflowEvents, resetWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.ResetWorkflowSnapshot.ExecutionInfo, + request.ResetWorkflowEvents, + ) if err != nil { return nil, err } resetSnapshot.ExecutionInfo.ExecutionStats.HistorySize += int64(resetWorkflowHistoryDiff.SizeDiff) - newWorkflowEventsNewEvents, newWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.NewWorkflowEvents) - if err != nil { - return nil, err - } + var newWorkflowXDCKVs map[XDCCacheKey]XDCCacheValue + var newWorkflowEvents []*InternalAppendHistoryNodesRequest + var newWorkflowHistoryDiff *HistoryStatistics if newSnapshot != nil { + newWorkflowXDCKVs, newWorkflowEvents, newWorkflowHistoryDiff, err = m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.NewWorkflowSnapshot.ExecutionInfo, + request.NewWorkflowEvents, + ) + if err != nil { + return nil, err + } newSnapshot.ExecutionInfo.ExecutionStats.HistorySize += int64(newWorkflowHistoryDiff.SizeDiff) } - currentWorkflowEventsNewEvents, currentWorkflowHistoryDiff, err := m.serializeWorkflowEventBatches(ctx, request.ShardID, request.CurrentWorkflowEvents) - if err != nil { - return nil, err - } + var currentWorkflowXDCKVs map[XDCCacheKey]XDCCacheValue + var currentWorkflowEvents []*InternalAppendHistoryNodesRequest + var currentWorkflowHistoryDiff *HistoryStatistics if currentMutation != nil { + currentWorkflowXDCKVs, currentWorkflowEvents, currentWorkflowHistoryDiff, err = m.serializeWorkflowEventBatches( + ctx, + request.ShardID, + request.CurrentWorkflowMutation.ExecutionInfo, + request.CurrentWorkflowEvents, + ) + if err != nil { + return nil, err + } currentMutation.ExecutionInfo.ExecutionStats.HistorySize += int64(currentWorkflowHistoryDiff.SizeDiff) } @@ -291,18 +337,21 @@ Mode: request.Mode, ResetWorkflowSnapshot: *serializedResetWorkflowSnapshot, - ResetWorkflowEventsNewEvents: resetWorkflowEventsNewEvents, + ResetWorkflowEventsNewEvents: resetWorkflowEvents, NewWorkflowSnapshot: serializedNewWorkflowMutation, - NewWorkflowEventsNewEvents: newWorkflowEventsNewEvents, + NewWorkflowEventsNewEvents: newWorkflowEvents, CurrentWorkflowMutation: serializedCurrentWorkflowMutation, - CurrentWorkflowEventsNewEvents: currentWorkflowEventsNewEvents, + CurrentWorkflowEventsNewEvents: currentWorkflowEvents, } err = m.persistence.ConflictResolveWorkflowExecution(ctx, newRequest) switch err.(type) { case nil: + m.addXDCCacheKV(resetWorkflowXDCKVs) + m.addXDCCacheKV(newWorkflowXDCKVs) + m.addXDCCacheKV(currentWorkflowXDCKVs) return &ConflictResolveWorkflowExecutionResponse{ ResetMutableStateStats: *statusOfInternalWorkflowSnapshot( &newRequest.ResetWorkflowSnapshot, @@ -394,25 +443,56 @@ func (m *executionManagerImpl) serializeWorkflowEventBatches( ctx context.Context, shardID int32, + executionInfo *persistencespb.WorkflowExecutionInfo, eventBatches []*WorkflowEvents, -) ([]*InternalAppendHistoryNodesRequest, *HistoryStatistics, error) { +) (map[XDCCacheKey]XDCCacheValue, []*InternalAppendHistoryNodesRequest, *HistoryStatistics, error) { var historyStatistics HistoryStatistics if len(eventBatches) == 0 { - return nil, &historyStatistics, nil + return nil, nil, &historyStatistics, nil } + xdcKVs := make(map[XDCCacheKey]XDCCacheValue, len(eventBatches)) workflowNewEvents := make([]*InternalAppendHistoryNodesRequest, 0, len(eventBatches)) for _, workflowEvents := range eventBatches { newEvents, err := m.serializeWorkflowEvents(ctx, shardID, workflowEvents) if err != nil { - return nil, nil, err + return nil, nil, nil, err } + versionHistoryItems, _, baseWorkflowInfo, err := GetXDCCacheValue( + executionInfo, + workflowEvents.Events[0].EventId, + workflowEvents.Events[0].Version, + ) + if err != nil { + return nil, nil, nil, err + } + xdcKVs[NewXDCCacheKey( + definition.NewWorkflowKey(workflowEvents.NamespaceID, workflowEvents.WorkflowID, workflowEvents.RunID), + workflowEvents.Events[0].EventId, + workflowEvents.Events[len(workflowEvents.Events)-1].EventId+1, + workflowEvents.Events[0].Version, + )] = NewXDCCacheValue( + baseWorkflowInfo, + versionHistoryItems, + newEvents.Node.Events, + ) newEvents.ShardID = shardID workflowNewEvents = append(workflowNewEvents, newEvents) historyStatistics.SizeDiff += len(newEvents.Node.Events.Data) historyStatistics.CountDiff += len(workflowEvents.Events) } - return workflowNewEvents, &historyStatistics, nil + return xdcKVs, workflowNewEvents, &historyStatistics, nil +} + +func (m *executionManagerImpl) addXDCCacheKV( + xdcKVs map[XDCCacheKey]XDCCacheValue, +) { + if m.eventBlobCache == nil { + return + } + for k, v := range xdcKVs { + m.eventBlobCache.Put(k, v) + } } func (m *executionManagerImpl) DeserializeBufferedEvents( // unexport diff -Nru temporal-1.21.5-1/src/common/persistence/health_signal_aggregator.go temporal-1.22.5/src/common/persistence/health_signal_aggregator.go --- temporal-1.21.5-1/src/common/persistence/health_signal_aggregator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/health_signal_aggregator.go 2024-02-23 09:45:43.000000000 +0000 @@ -43,10 +43,11 @@ type ( HealthSignalAggregator interface { - common.Daemon Record(callerSegment int32, namespace string, latency time.Duration, err error) AverageLatency() float64 ErrorRatio() float64 + Start() + Stop() } HealthSignalAggregatorImpl struct { diff -Nru temporal-1.21.5-1/src/common/persistence/jsonHistoryTokenSerializer.go temporal-1.22.5/src/common/persistence/jsonHistoryTokenSerializer.go --- temporal-1.21.5-1/src/common/persistence/jsonHistoryTokenSerializer.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/jsonHistoryTokenSerializer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import "encoding/json" - -type ( - jsonHistoryTokenSerializer struct{} - - // historyPagingToken is used to serialize/deserialize pagination token for ReadHistoryBranchRequest - historyPagingToken struct { - LastEventID int64 - // the pagination token passing to persistence - StoreToken []byte - // recording which branchRange it is reading - CurrentRangeIndex int - FinalRangeIndex int - - // LastNodeID is the last known node ID attached to a history node - LastNodeID int64 - // LastTransactionID is the last known transaction ID attached to a history node - LastTransactionID int64 - } -) - -const notStartedIndex = -1 - -// newJSONHistoryTokenSerializer creates a new instance of TaskTokenSerializer -func newJSONHistoryTokenSerializer() *jsonHistoryTokenSerializer { - return &jsonHistoryTokenSerializer{} -} - -func (t *historyPagingToken) SetRangeIndexes( - current int, - final int, -) { - - t.CurrentRangeIndex = current - t.FinalRangeIndex = final -} - -func (j *jsonHistoryTokenSerializer) Serialize( - token *historyPagingToken, -) ([]byte, error) { - - data, err := json.Marshal(token) - return data, err -} - -func (j *jsonHistoryTokenSerializer) Deserialize( - data []byte, - defaultLastEventID int64, - defaultLastNodeID int64, - defaultLastTransactionID int64, -) (*historyPagingToken, error) { - - if len(data) == 0 { - token := historyPagingToken{ - LastEventID: defaultLastEventID, - CurrentRangeIndex: notStartedIndex, - LastNodeID: defaultLastNodeID, - LastTransactionID: defaultLastTransactionID, - } - return &token, nil - } - - token := historyPagingToken{} - err := json.Unmarshal(data, &token) - return &token, err -} diff -Nru temporal-1.21.5-1/src/common/persistence/json_history_token_serializer.go temporal-1.22.5/src/common/persistence/json_history_token_serializer.go --- temporal-1.21.5-1/src/common/persistence/json_history_token_serializer.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/json_history_token_serializer.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,92 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import "encoding/json" + +type ( + jsonHistoryTokenSerializer struct{} + + // historyPagingToken is used to serialize/deserialize pagination token for ReadHistoryBranchRequest + historyPagingToken struct { + LastEventID int64 + // the pagination token passing to persistence + StoreToken []byte + // recording which branchRange it is reading + CurrentRangeIndex int + FinalRangeIndex int + + // LastNodeID is the last known node ID attached to a history node + LastNodeID int64 + // LastTransactionID is the last known transaction ID attached to a history node + LastTransactionID int64 + } +) + +const notStartedIndex = -1 + +// newJSONHistoryTokenSerializer creates a new instance of TaskTokenSerializer +func newJSONHistoryTokenSerializer() *jsonHistoryTokenSerializer { + return &jsonHistoryTokenSerializer{} +} + +func (t *historyPagingToken) SetRangeIndexes( + current int, + final int, +) { + + t.CurrentRangeIndex = current + t.FinalRangeIndex = final +} + +func (j *jsonHistoryTokenSerializer) Serialize( + token *historyPagingToken, +) ([]byte, error) { + + data, err := json.Marshal(token) + return data, err +} + +func (j *jsonHistoryTokenSerializer) Deserialize( + data []byte, + defaultLastEventID int64, + defaultLastNodeID int64, + defaultLastTransactionID int64, +) (*historyPagingToken, error) { + + if len(data) == 0 { + token := historyPagingToken{ + LastEventID: defaultLastEventID, + CurrentRangeIndex: notStartedIndex, + LastNodeID: defaultLastNodeID, + LastTransactionID: defaultLastTransactionID, + } + return &token, nil + } + + token := historyPagingToken{} + err := json.Unmarshal(data, &token) + return &token, err +} diff -Nru temporal-1.21.5-1/src/common/persistence/mock/store_mock.go temporal-1.22.5/src/common/persistence/mock/store_mock.go --- temporal-1.21.5-1/src/common/persistence/mock/store_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/mock/store_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -23,7 +23,7 @@ // THE SOFTWARE. // Code generated by MockGen. DO NOT EDIT. -// Source: persistenceInterface.go +// Source: persistence_interface.go // Package mock is a generated GoMock package. package mock diff -Nru temporal-1.21.5-1/src/common/persistence/namespaceReplicationQueue.go temporal-1.22.5/src/common/persistence/namespaceReplicationQueue.go --- temporal-1.21.5-1/src/common/persistence/namespaceReplicationQueue.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/namespaceReplicationQueue.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,440 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination namespaceReplicationQueue_mock.go - -package persistence - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - - "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/internal/goro" - - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence/serialization" -) - -const ( - purgeInterval = 5 * time.Minute - localNamespaceReplicationCluster = "namespaceReplication" -) - -var _ NamespaceReplicationQueue = (*namespaceReplicationQueueImpl)(nil) - -// NewNamespaceReplicationQueue creates a new NamespaceReplicationQueue instance -func NewNamespaceReplicationQueue( - queue Queue, - serializer serialization.Serializer, - clusterName string, - metricsHandler metrics.Handler, - logger log.Logger, -) (NamespaceReplicationQueue, error) { - - blob, err := serializer.QueueMetadataToBlob( - &persistence.QueueMetadata{ - ClusterAckLevels: make(map[string]int64), - }, enumspb.ENCODING_TYPE_PROTO3) - if err != nil { - return nil, err - } - err = queue.Init(context.TODO(), blob) - if err != nil { - return nil, err - } - - return &namespaceReplicationQueueImpl{ - queue: queue, - clusterName: clusterName, - metricsHandler: metricsHandler, - logger: logger, - ackNotificationChan: make(chan bool), - done: make(chan bool), - status: common.DaemonStatusInitialized, - serializer: serializer, - }, nil -} - -type ( - namespaceReplicationQueueImpl struct { - queue Queue - clusterName string - metricsHandler metrics.Handler - logger log.Logger - ackLevelUpdated bool - ackNotificationChan chan bool - done chan bool - status int32 - gorogrp goro.Group - serializer serialization.Serializer - } - - // NamespaceReplicationQueue is used to publish and list namespace replication tasks - NamespaceReplicationQueue interface { - common.Daemon - Publish(ctx context.Context, task *replicationspb.ReplicationTask) error - GetReplicationMessages( - ctx context.Context, - lastMessageID int64, - maxCount int, - ) ([]*replicationspb.ReplicationTask, int64, error) - UpdateAckLevel(ctx context.Context, lastProcessedMessageID int64, clusterName string) error - GetAckLevels(ctx context.Context) (map[string]int64, error) - - PublishToDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error - GetMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, - ) ([]*replicationspb.ReplicationTask, []byte, error) - UpdateDLQAckLevel(ctx context.Context, lastProcessedMessageID int64) error - GetDLQAckLevel(ctx context.Context) (int64, error) - - RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64) error - DeleteMessageFromDLQ(ctx context.Context, messageID int64) error - } -) - -func (q *namespaceReplicationQueueImpl) Start() { - if !atomic.CompareAndSwapInt32(&q.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { - return - } - - q.gorogrp.Go(q.purgeProcessor) -} - -func (q *namespaceReplicationQueueImpl) Stop() { - if !atomic.CompareAndSwapInt32(&q.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { - return - } - close(q.done) - - q.gorogrp.Cancel() -} - -func (q *namespaceReplicationQueueImpl) Publish(ctx context.Context, task *replicationspb.ReplicationTask) error { - blob, err := q.serializer.ReplicationTaskToBlob(task, enumspb.ENCODING_TYPE_PROTO3) - if err != nil { - return fmt.Errorf("failed to encode message: %v", err) - } - return q.queue.EnqueueMessage(ctx, *blob) -} - -func (q *namespaceReplicationQueueImpl) PublishToDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error { - blob, err := q.serializer.ReplicationTaskToBlob(task, enumspb.ENCODING_TYPE_PROTO3) - if err != nil { - return fmt.Errorf("failed to encode message: %v", err) - } - messageID, err := q.queue.EnqueueMessageToDLQ(ctx, *blob) - if err != nil { - return err - } - - q.metricsHandler.Gauge(metrics.NamespaceReplicationDLQMaxLevelGauge.GetMetricName()). - Record(float64(messageID), metrics.OperationTag(metrics.PersistenceNamespaceReplicationQueueScope)) - return nil -} - -func (q *namespaceReplicationQueueImpl) GetReplicationMessages( - ctx context.Context, - lastMessageID int64, - pageSize int, -) ([]*replicationspb.ReplicationTask, int64, error) { - - messages, err := q.queue.ReadMessages(ctx, lastMessageID, pageSize) - if err != nil { - return nil, lastMessageID, err - } - - replicationTasks := make([]*replicationspb.ReplicationTask, 0, len(messages)) - for _, message := range messages { - replicationTask, err := q.serializer.ReplicationTaskFromBlob(NewDataBlob(message.Data, message.Encoding)) - if err != nil { - return nil, lastMessageID, fmt.Errorf("failed to decode task: %v", err) - } - - lastMessageID = message.ID - replicationTasks = append(replicationTasks, replicationTask) - } - - return replicationTasks, lastMessageID, nil -} - -func (q *namespaceReplicationQueueImpl) UpdateAckLevel( - ctx context.Context, - lastProcessedMessageID int64, - clusterName string, -) error { - return q.updateAckLevelWithRetry(ctx, lastProcessedMessageID, clusterName, false) -} - -func (q *namespaceReplicationQueueImpl) updateAckLevelWithRetry( - ctx context.Context, - lastProcessedMessageID int64, - clusterName string, - isDLQ bool, -) error { -conditionFailedRetry: - for { - err := q.updateAckLevel(ctx, lastProcessedMessageID, clusterName, isDLQ) - switch err.(type) { - case *ConditionFailedError: - continue conditionFailedRetry - } - - return err - } -} - -func (q *namespaceReplicationQueueImpl) updateAckLevel( - ctx context.Context, - lastProcessedMessageID int64, - clusterName string, - isDLQ bool, -) error { - var ackLevelErr error - var internalMetadata *InternalQueueMetadata - if isDLQ { - internalMetadata, ackLevelErr = q.queue.GetDLQAckLevels(ctx) - } else { - internalMetadata, ackLevelErr = q.queue.GetAckLevels(ctx) - } - - if ackLevelErr != nil { - return ackLevelErr - } - - ackLevels, err := q.ackLevelsFromBlob(internalMetadata.Blob) - if err != nil { - return err - } - - // Ignore possibly delayed message - if ack, ok := ackLevels[clusterName]; ok && ack > lastProcessedMessageID { - return nil - } - - // TODO remove this block in 1.12.x - delete(ackLevels, "") - // TODO remove this block in 1.12.x - - // update ack level - ackLevels[clusterName] = lastProcessedMessageID - blob, err := q.serializer.QueueMetadataToBlob(&persistence.QueueMetadata{ - ClusterAckLevels: ackLevels, - }, enumspb.ENCODING_TYPE_PROTO3) - if err != nil { - return err - } - - internalMetadata.Blob = blob - if isDLQ { - err = q.queue.UpdateDLQAckLevel(ctx, internalMetadata) - } else { - err = q.queue.UpdateAckLevel(ctx, internalMetadata) - } - if err != nil { - return fmt.Errorf("failed to update ack level: %v", err) - } - - select { - case q.ackNotificationChan <- true: - default: - } - - return nil -} - -func (q *namespaceReplicationQueueImpl) GetAckLevels( - ctx context.Context, -) (map[string]int64, error) { - metadata, err := q.queue.GetAckLevels(ctx) - if err != nil { - return nil, err - } - return q.ackLevelsFromBlob(metadata.Blob) -} - -func (q *namespaceReplicationQueueImpl) ackLevelsFromBlob(blob *commonpb.DataBlob) (map[string]int64, error) { - if blob == nil { - return make(map[string]int64), nil - } - - metadata, err := q.serializer.QueueMetadataFromBlob(blob) - if err != nil { - return nil, err - } - ackLevels := metadata.ClusterAckLevels - if ackLevels == nil { - ackLevels = make(map[string]int64) - } - return ackLevels, nil -} - -func (q *namespaceReplicationQueueImpl) GetMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]*replicationspb.ReplicationTask, []byte, error) { - - messages, token, err := q.queue.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) - if err != nil { - return nil, nil, err - } - - var replicationTasks []*replicationspb.ReplicationTask - for _, message := range messages { - replicationTask, err := q.serializer.ReplicationTaskFromBlob(NewDataBlob(message.Data, message.Encoding)) - if err != nil { - return nil, nil, fmt.Errorf("failed to decode dlq task: %v", err) - } - - // Overwrite to local cluster message id - replicationTask.SourceTaskId = message.ID - replicationTasks = append(replicationTasks, replicationTask) - } - - return replicationTasks, token, nil -} - -func (q *namespaceReplicationQueueImpl) UpdateDLQAckLevel( - ctx context.Context, - lastProcessedMessageID int64, -) error { - return q.updateAckLevelWithRetry(ctx, lastProcessedMessageID, localNamespaceReplicationCluster, true) -} - -func (q *namespaceReplicationQueueImpl) GetDLQAckLevel( - ctx context.Context, -) (int64, error) { - metadata, err := q.queue.GetDLQAckLevels(ctx) - if err != nil { - return EmptyQueueMessageID, err - } - dlqMetadata, err := q.ackLevelsFromBlob(metadata.Blob) - if err != nil { - return EmptyQueueMessageID, err - } - - ackLevel, ok := dlqMetadata[localNamespaceReplicationCluster] - if !ok { - return EmptyQueueMessageID, nil - } - return ackLevel, nil -} - -func (q *namespaceReplicationQueueImpl) RangeDeleteMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, -) error { - - return q.queue.RangeDeleteMessagesFromDLQ( - ctx, - firstMessageID, - lastMessageID, - ) -} - -func (q *namespaceReplicationQueueImpl) DeleteMessageFromDLQ( - ctx context.Context, - messageID int64, -) error { - - return q.queue.DeleteMessageFromDLQ(ctx, messageID) -} - -func (q *namespaceReplicationQueueImpl) purgeAckedMessages( - ctx context.Context, -) error { - ackLevelByCluster, err := q.GetAckLevels(ctx) - if err != nil { - return fmt.Errorf("failed to purge messages: %v", err) - } - - if len(ackLevelByCluster) == 0 { - return nil - } - - var minAckLevel *int64 - for _, ackLevel := range ackLevelByCluster { - if minAckLevel == nil || ackLevel < *minAckLevel { - minAckLevel = convert.Int64Ptr(ackLevel) - } - } - if minAckLevel == nil { - return nil - } - - err = q.queue.DeleteMessagesBefore(ctx, *minAckLevel) - if err != nil { - return fmt.Errorf("failed to purge messages: %v", err) - } - q.metricsHandler.Gauge(metrics.NamespaceReplicationTaskAckLevelGauge.GetMetricName()). - Record(float64(*minAckLevel), metrics.OperationTag(metrics.PersistenceNamespaceReplicationQueueScope)) - return nil -} - -func (q *namespaceReplicationQueueImpl) purgeProcessor( - ctx context.Context, -) error { - ctx = headers.SetCallerInfo(ctx, headers.SystemPreemptableCallerInfo) - - ticker := time.NewTicker(purgeInterval) - defer ticker.Stop() - - for { - select { - case <-q.done: - return nil - case <-ticker.C: - if q.ackLevelUpdated { - err := q.purgeAckedMessages(ctx) - if err != nil { - q.logger.Warn("Failed to purge acked namespace replication messages.", tag.Error(err)) - } else { - q.ackLevelUpdated = false - } - } - case <-q.ackNotificationChan: - q.ackLevelUpdated = true - } - } -} diff -Nru temporal-1.21.5-1/src/common/persistence/namespaceReplicationQueue_mock.go temporal-1.22.5/src/common/persistence/namespaceReplicationQueue_mock.go --- temporal-1.21.5-1/src/common/persistence/namespaceReplicationQueue_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/namespaceReplicationQueue_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,230 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: namespaceReplicationQueue.go - -// Package persistence is a generated GoMock package. -package persistence - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - repication "go.temporal.io/server/api/replication/v1" -) - -// MockNamespaceReplicationQueue is a mock of NamespaceReplicationQueue interface. -type MockNamespaceReplicationQueue struct { - ctrl *gomock.Controller - recorder *MockNamespaceReplicationQueueMockRecorder -} - -// MockNamespaceReplicationQueueMockRecorder is the mock recorder for MockNamespaceReplicationQueue. -type MockNamespaceReplicationQueueMockRecorder struct { - mock *MockNamespaceReplicationQueue -} - -// NewMockNamespaceReplicationQueue creates a new mock instance. -func NewMockNamespaceReplicationQueue(ctrl *gomock.Controller) *MockNamespaceReplicationQueue { - mock := &MockNamespaceReplicationQueue{ctrl: ctrl} - mock.recorder = &MockNamespaceReplicationQueueMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNamespaceReplicationQueue) EXPECT() *MockNamespaceReplicationQueueMockRecorder { - return m.recorder -} - -// DeleteMessageFromDLQ mocks base method. -func (m *MockNamespaceReplicationQueue) DeleteMessageFromDLQ(ctx context.Context, messageID int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteMessageFromDLQ", ctx, messageID) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteMessageFromDLQ indicates an expected call of DeleteMessageFromDLQ. -func (mr *MockNamespaceReplicationQueueMockRecorder) DeleteMessageFromDLQ(ctx, messageID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMessageFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).DeleteMessageFromDLQ), ctx, messageID) -} - -// GetAckLevels mocks base method. -func (m *MockNamespaceReplicationQueue) GetAckLevels(ctx context.Context) (map[string]int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAckLevels", ctx) - ret0, _ := ret[0].(map[string]int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAckLevels indicates an expected call of GetAckLevels. -func (mr *MockNamespaceReplicationQueueMockRecorder) GetAckLevels(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAckLevels", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetAckLevels), ctx) -} - -// GetDLQAckLevel mocks base method. -func (m *MockNamespaceReplicationQueue) GetDLQAckLevel(ctx context.Context) (int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDLQAckLevel", ctx) - ret0, _ := ret[0].(int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDLQAckLevel indicates an expected call of GetDLQAckLevel. -func (mr *MockNamespaceReplicationQueueMockRecorder) GetDLQAckLevel(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetDLQAckLevel), ctx) -} - -// GetMessagesFromDLQ mocks base method. -func (m *MockNamespaceReplicationQueue) GetMessagesFromDLQ(ctx context.Context, firstMessageID, lastMessageID int64, pageSize int, pageToken []byte) ([]*repication.ReplicationTask, []byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMessagesFromDLQ", ctx, firstMessageID, lastMessageID, pageSize, pageToken) - ret0, _ := ret[0].([]*repication.ReplicationTask) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetMessagesFromDLQ indicates an expected call of GetMessagesFromDLQ. -func (mr *MockNamespaceReplicationQueueMockRecorder) GetMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessagesFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetMessagesFromDLQ), ctx, firstMessageID, lastMessageID, pageSize, pageToken) -} - -// GetReplicationMessages mocks base method. -func (m *MockNamespaceReplicationQueue) GetReplicationMessages(ctx context.Context, lastMessageID int64, maxCount int) ([]*repication.ReplicationTask, int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReplicationMessages", ctx, lastMessageID, maxCount) - ret0, _ := ret[0].([]*repication.ReplicationTask) - ret1, _ := ret[1].(int64) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetReplicationMessages indicates an expected call of GetReplicationMessages. -func (mr *MockNamespaceReplicationQueueMockRecorder) GetReplicationMessages(ctx, lastMessageID, maxCount interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetReplicationMessages), ctx, lastMessageID, maxCount) -} - -// Publish mocks base method. -func (m *MockNamespaceReplicationQueue) Publish(ctx context.Context, task *repication.ReplicationTask) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Publish", ctx, task) - ret0, _ := ret[0].(error) - return ret0 -} - -// Publish indicates an expected call of Publish. -func (mr *MockNamespaceReplicationQueueMockRecorder) Publish(ctx, task interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Publish), ctx, task) -} - -// PublishToDLQ mocks base method. -func (m *MockNamespaceReplicationQueue) PublishToDLQ(ctx context.Context, task *repication.ReplicationTask) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PublishToDLQ", ctx, task) - ret0, _ := ret[0].(error) - return ret0 -} - -// PublishToDLQ indicates an expected call of PublishToDLQ. -func (mr *MockNamespaceReplicationQueueMockRecorder) PublishToDLQ(ctx, task interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishToDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).PublishToDLQ), ctx, task) -} - -// RangeDeleteMessagesFromDLQ mocks base method. -func (m *MockNamespaceReplicationQueue) RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID, lastMessageID int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RangeDeleteMessagesFromDLQ", ctx, firstMessageID, lastMessageID) - ret0, _ := ret[0].(error) - return ret0 -} - -// RangeDeleteMessagesFromDLQ indicates an expected call of RangeDeleteMessagesFromDLQ. -func (mr *MockNamespaceReplicationQueueMockRecorder) RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeDeleteMessagesFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).RangeDeleteMessagesFromDLQ), ctx, firstMessageID, lastMessageID) -} - -// Start mocks base method. -func (m *MockNamespaceReplicationQueue) Start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start") -} - -// Start indicates an expected call of Start. -func (mr *MockNamespaceReplicationQueueMockRecorder) Start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Start)) -} - -// Stop mocks base method. -func (m *MockNamespaceReplicationQueue) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") -} - -// Stop indicates an expected call of Stop. -func (mr *MockNamespaceReplicationQueueMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Stop)) -} - -// UpdateAckLevel mocks base method. -func (m *MockNamespaceReplicationQueue) UpdateAckLevel(ctx context.Context, lastProcessedMessageID int64, clusterName string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateAckLevel", ctx, lastProcessedMessageID, clusterName) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateAckLevel indicates an expected call of UpdateAckLevel. -func (mr *MockNamespaceReplicationQueueMockRecorder) UpdateAckLevel(ctx, lastProcessedMessageID, clusterName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).UpdateAckLevel), ctx, lastProcessedMessageID, clusterName) -} - -// UpdateDLQAckLevel mocks base method. -func (m *MockNamespaceReplicationQueue) UpdateDLQAckLevel(ctx context.Context, lastProcessedMessageID int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateDLQAckLevel", ctx, lastProcessedMessageID) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateDLQAckLevel indicates an expected call of UpdateDLQAckLevel. -func (mr *MockNamespaceReplicationQueueMockRecorder) UpdateDLQAckLevel(ctx, lastProcessedMessageID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDLQAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).UpdateDLQAckLevel), ctx, lastProcessedMessageID) -} diff -Nru temporal-1.21.5-1/src/common/persistence/namespace_replication_queue.go temporal-1.22.5/src/common/persistence/namespace_replication_queue.go --- temporal-1.21.5-1/src/common/persistence/namespace_replication_queue.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/namespace_replication_queue.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,441 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination namespace_replication_queue_mock.go + +package persistence + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + + "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/internal/goro" + + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence/serialization" +) + +const ( + purgeInterval = 5 * time.Minute + localNamespaceReplicationCluster = "namespaceReplication" +) + +var _ NamespaceReplicationQueue = (*namespaceReplicationQueueImpl)(nil) + +// NewNamespaceReplicationQueue creates a new NamespaceReplicationQueue instance +func NewNamespaceReplicationQueue( + queue Queue, + serializer serialization.Serializer, + clusterName string, + metricsHandler metrics.Handler, + logger log.Logger, +) (NamespaceReplicationQueue, error) { + + blob, err := serializer.QueueMetadataToBlob( + &persistence.QueueMetadata{ + ClusterAckLevels: make(map[string]int64), + }, enumspb.ENCODING_TYPE_PROTO3) + if err != nil { + return nil, err + } + err = queue.Init(context.TODO(), blob) + if err != nil { + return nil, err + } + + return &namespaceReplicationQueueImpl{ + queue: queue, + clusterName: clusterName, + metricsHandler: metricsHandler, + logger: logger, + ackNotificationChan: make(chan bool), + done: make(chan bool), + status: common.DaemonStatusInitialized, + serializer: serializer, + }, nil +} + +type ( + namespaceReplicationQueueImpl struct { + queue Queue + clusterName string + metricsHandler metrics.Handler + logger log.Logger + ackLevelUpdated bool + ackNotificationChan chan bool + done chan bool + status int32 + gorogrp goro.Group + serializer serialization.Serializer + } + + // NamespaceReplicationQueue is used to publish and list namespace replication tasks + NamespaceReplicationQueue interface { + Publish(ctx context.Context, task *replicationspb.ReplicationTask) error + GetReplicationMessages( + ctx context.Context, + lastMessageID int64, + maxCount int, + ) ([]*replicationspb.ReplicationTask, int64, error) + UpdateAckLevel(ctx context.Context, lastProcessedMessageID int64, clusterName string) error + GetAckLevels(ctx context.Context) (map[string]int64, error) + + PublishToDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error + GetMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, + ) ([]*replicationspb.ReplicationTask, []byte, error) + UpdateDLQAckLevel(ctx context.Context, lastProcessedMessageID int64) error + GetDLQAckLevel(ctx context.Context) (int64, error) + + RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64) error + DeleteMessageFromDLQ(ctx context.Context, messageID int64) error + Start() + Stop() + } +) + +func (q *namespaceReplicationQueueImpl) Start() { + if !atomic.CompareAndSwapInt32(&q.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { + return + } + + q.gorogrp.Go(q.purgeProcessor) +} + +func (q *namespaceReplicationQueueImpl) Stop() { + if !atomic.CompareAndSwapInt32(&q.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { + return + } + close(q.done) + + q.gorogrp.Cancel() +} + +func (q *namespaceReplicationQueueImpl) Publish(ctx context.Context, task *replicationspb.ReplicationTask) error { + blob, err := q.serializer.ReplicationTaskToBlob(task, enumspb.ENCODING_TYPE_PROTO3) + if err != nil { + return fmt.Errorf("failed to encode message: %v", err) + } + return q.queue.EnqueueMessage(ctx, *blob) +} + +func (q *namespaceReplicationQueueImpl) PublishToDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error { + blob, err := q.serializer.ReplicationTaskToBlob(task, enumspb.ENCODING_TYPE_PROTO3) + if err != nil { + return fmt.Errorf("failed to encode message: %v", err) + } + messageID, err := q.queue.EnqueueMessageToDLQ(ctx, *blob) + if err != nil { + return err + } + + q.metricsHandler.Gauge(metrics.NamespaceReplicationDLQMaxLevelGauge.GetMetricName()). + Record(float64(messageID), metrics.OperationTag(metrics.PersistenceNamespaceReplicationQueueScope)) + return nil +} + +func (q *namespaceReplicationQueueImpl) GetReplicationMessages( + ctx context.Context, + lastMessageID int64, + pageSize int, +) ([]*replicationspb.ReplicationTask, int64, error) { + + messages, err := q.queue.ReadMessages(ctx, lastMessageID, pageSize) + if err != nil { + return nil, lastMessageID, err + } + + replicationTasks := make([]*replicationspb.ReplicationTask, 0, len(messages)) + for _, message := range messages { + replicationTask, err := q.serializer.ReplicationTaskFromBlob(NewDataBlob(message.Data, message.Encoding)) + if err != nil { + return nil, lastMessageID, fmt.Errorf("failed to decode task: %v", err) + } + + lastMessageID = message.ID + replicationTasks = append(replicationTasks, replicationTask) + } + + return replicationTasks, lastMessageID, nil +} + +func (q *namespaceReplicationQueueImpl) UpdateAckLevel( + ctx context.Context, + lastProcessedMessageID int64, + clusterName string, +) error { + return q.updateAckLevelWithRetry(ctx, lastProcessedMessageID, clusterName, false) +} + +func (q *namespaceReplicationQueueImpl) updateAckLevelWithRetry( + ctx context.Context, + lastProcessedMessageID int64, + clusterName string, + isDLQ bool, +) error { +conditionFailedRetry: + for { + err := q.updateAckLevel(ctx, lastProcessedMessageID, clusterName, isDLQ) + switch err.(type) { + case *ConditionFailedError: + continue conditionFailedRetry + } + + return err + } +} + +func (q *namespaceReplicationQueueImpl) updateAckLevel( + ctx context.Context, + lastProcessedMessageID int64, + clusterName string, + isDLQ bool, +) error { + var ackLevelErr error + var internalMetadata *InternalQueueMetadata + if isDLQ { + internalMetadata, ackLevelErr = q.queue.GetDLQAckLevels(ctx) + } else { + internalMetadata, ackLevelErr = q.queue.GetAckLevels(ctx) + } + + if ackLevelErr != nil { + return ackLevelErr + } + + ackLevels, err := q.ackLevelsFromBlob(internalMetadata.Blob) + if err != nil { + return err + } + + // Ignore possibly delayed message + if ack, ok := ackLevels[clusterName]; ok && ack > lastProcessedMessageID { + return nil + } + + // TODO remove this block in 1.12.x + delete(ackLevels, "") + // TODO remove this block in 1.12.x + + // update ack level + ackLevels[clusterName] = lastProcessedMessageID + blob, err := q.serializer.QueueMetadataToBlob(&persistence.QueueMetadata{ + ClusterAckLevels: ackLevels, + }, enumspb.ENCODING_TYPE_PROTO3) + if err != nil { + return err + } + + internalMetadata.Blob = blob + if isDLQ { + err = q.queue.UpdateDLQAckLevel(ctx, internalMetadata) + } else { + err = q.queue.UpdateAckLevel(ctx, internalMetadata) + } + if err != nil { + return fmt.Errorf("failed to update ack level: %v", err) + } + + select { + case q.ackNotificationChan <- true: + default: + } + + return nil +} + +func (q *namespaceReplicationQueueImpl) GetAckLevels( + ctx context.Context, +) (map[string]int64, error) { + metadata, err := q.queue.GetAckLevels(ctx) + if err != nil { + return nil, err + } + return q.ackLevelsFromBlob(metadata.Blob) +} + +func (q *namespaceReplicationQueueImpl) ackLevelsFromBlob(blob *commonpb.DataBlob) (map[string]int64, error) { + if blob == nil { + return make(map[string]int64), nil + } + + metadata, err := q.serializer.QueueMetadataFromBlob(blob) + if err != nil { + return nil, err + } + ackLevels := metadata.ClusterAckLevels + if ackLevels == nil { + ackLevels = make(map[string]int64) + } + return ackLevels, nil +} + +func (q *namespaceReplicationQueueImpl) GetMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*replicationspb.ReplicationTask, []byte, error) { + + messages, token, err := q.queue.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) + if err != nil { + return nil, nil, err + } + + var replicationTasks []*replicationspb.ReplicationTask + for _, message := range messages { + replicationTask, err := q.serializer.ReplicationTaskFromBlob(NewDataBlob(message.Data, message.Encoding)) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode dlq task: %v", err) + } + + // Overwrite to local cluster message id + replicationTask.SourceTaskId = message.ID + replicationTasks = append(replicationTasks, replicationTask) + } + + return replicationTasks, token, nil +} + +func (q *namespaceReplicationQueueImpl) UpdateDLQAckLevel( + ctx context.Context, + lastProcessedMessageID int64, +) error { + return q.updateAckLevelWithRetry(ctx, lastProcessedMessageID, localNamespaceReplicationCluster, true) +} + +func (q *namespaceReplicationQueueImpl) GetDLQAckLevel( + ctx context.Context, +) (int64, error) { + metadata, err := q.queue.GetDLQAckLevels(ctx) + if err != nil { + return EmptyQueueMessageID, err + } + dlqMetadata, err := q.ackLevelsFromBlob(metadata.Blob) + if err != nil { + return EmptyQueueMessageID, err + } + + ackLevel, ok := dlqMetadata[localNamespaceReplicationCluster] + if !ok { + return EmptyQueueMessageID, nil + } + return ackLevel, nil +} + +func (q *namespaceReplicationQueueImpl) RangeDeleteMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, +) error { + + return q.queue.RangeDeleteMessagesFromDLQ( + ctx, + firstMessageID, + lastMessageID, + ) +} + +func (q *namespaceReplicationQueueImpl) DeleteMessageFromDLQ( + ctx context.Context, + messageID int64, +) error { + + return q.queue.DeleteMessageFromDLQ(ctx, messageID) +} + +func (q *namespaceReplicationQueueImpl) purgeAckedMessages( + ctx context.Context, +) error { + ackLevelByCluster, err := q.GetAckLevels(ctx) + if err != nil { + return fmt.Errorf("failed to purge messages: %v", err) + } + + if len(ackLevelByCluster) == 0 { + return nil + } + + var minAckLevel *int64 + for _, ackLevel := range ackLevelByCluster { + if minAckLevel == nil || ackLevel < *minAckLevel { + minAckLevel = convert.Int64Ptr(ackLevel) + } + } + if minAckLevel == nil { + return nil + } + + err = q.queue.DeleteMessagesBefore(ctx, *minAckLevel) + if err != nil { + return fmt.Errorf("failed to purge messages: %v", err) + } + q.metricsHandler.Gauge(metrics.NamespaceReplicationTaskAckLevelGauge.GetMetricName()). + Record(float64(*minAckLevel), metrics.OperationTag(metrics.PersistenceNamespaceReplicationQueueScope)) + return nil +} + +func (q *namespaceReplicationQueueImpl) purgeProcessor( + ctx context.Context, +) error { + ctx = headers.SetCallerInfo(ctx, headers.SystemPreemptableCallerInfo) + + ticker := time.NewTicker(purgeInterval) + defer ticker.Stop() + + for { + select { + case <-q.done: + return nil + case <-ticker.C: + if q.ackLevelUpdated { + err := q.purgeAckedMessages(ctx) + if err != nil { + q.logger.Warn("Failed to purge acked namespace replication messages.", tag.Error(err)) + } else { + q.ackLevelUpdated = false + } + } + case <-q.ackNotificationChan: + q.ackLevelUpdated = true + } + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/namespace_replication_queue_mock.go temporal-1.22.5/src/common/persistence/namespace_replication_queue_mock.go --- temporal-1.21.5-1/src/common/persistence/namespace_replication_queue_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/namespace_replication_queue_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,230 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: namespace_replication_queue.go + +// Package persistence is a generated GoMock package. +package persistence + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + repication "go.temporal.io/server/api/replication/v1" +) + +// MockNamespaceReplicationQueue is a mock of NamespaceReplicationQueue interface. +type MockNamespaceReplicationQueue struct { + ctrl *gomock.Controller + recorder *MockNamespaceReplicationQueueMockRecorder +} + +// MockNamespaceReplicationQueueMockRecorder is the mock recorder for MockNamespaceReplicationQueue. +type MockNamespaceReplicationQueueMockRecorder struct { + mock *MockNamespaceReplicationQueue +} + +// NewMockNamespaceReplicationQueue creates a new mock instance. +func NewMockNamespaceReplicationQueue(ctrl *gomock.Controller) *MockNamespaceReplicationQueue { + mock := &MockNamespaceReplicationQueue{ctrl: ctrl} + mock.recorder = &MockNamespaceReplicationQueueMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNamespaceReplicationQueue) EXPECT() *MockNamespaceReplicationQueueMockRecorder { + return m.recorder +} + +// DeleteMessageFromDLQ mocks base method. +func (m *MockNamespaceReplicationQueue) DeleteMessageFromDLQ(ctx context.Context, messageID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteMessageFromDLQ", ctx, messageID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteMessageFromDLQ indicates an expected call of DeleteMessageFromDLQ. +func (mr *MockNamespaceReplicationQueueMockRecorder) DeleteMessageFromDLQ(ctx, messageID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMessageFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).DeleteMessageFromDLQ), ctx, messageID) +} + +// GetAckLevels mocks base method. +func (m *MockNamespaceReplicationQueue) GetAckLevels(ctx context.Context) (map[string]int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAckLevels", ctx) + ret0, _ := ret[0].(map[string]int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAckLevels indicates an expected call of GetAckLevels. +func (mr *MockNamespaceReplicationQueueMockRecorder) GetAckLevels(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAckLevels", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetAckLevels), ctx) +} + +// GetDLQAckLevel mocks base method. +func (m *MockNamespaceReplicationQueue) GetDLQAckLevel(ctx context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDLQAckLevel", ctx) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDLQAckLevel indicates an expected call of GetDLQAckLevel. +func (mr *MockNamespaceReplicationQueueMockRecorder) GetDLQAckLevel(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetDLQAckLevel), ctx) +} + +// GetMessagesFromDLQ mocks base method. +func (m *MockNamespaceReplicationQueue) GetMessagesFromDLQ(ctx context.Context, firstMessageID, lastMessageID int64, pageSize int, pageToken []byte) ([]*repication.ReplicationTask, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessagesFromDLQ", ctx, firstMessageID, lastMessageID, pageSize, pageToken) + ret0, _ := ret[0].([]*repication.ReplicationTask) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetMessagesFromDLQ indicates an expected call of GetMessagesFromDLQ. +func (mr *MockNamespaceReplicationQueueMockRecorder) GetMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessagesFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetMessagesFromDLQ), ctx, firstMessageID, lastMessageID, pageSize, pageToken) +} + +// GetReplicationMessages mocks base method. +func (m *MockNamespaceReplicationQueue) GetReplicationMessages(ctx context.Context, lastMessageID int64, maxCount int) ([]*repication.ReplicationTask, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetReplicationMessages", ctx, lastMessageID, maxCount) + ret0, _ := ret[0].([]*repication.ReplicationTask) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetReplicationMessages indicates an expected call of GetReplicationMessages. +func (mr *MockNamespaceReplicationQueueMockRecorder) GetReplicationMessages(ctx, lastMessageID, maxCount interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).GetReplicationMessages), ctx, lastMessageID, maxCount) +} + +// Publish mocks base method. +func (m *MockNamespaceReplicationQueue) Publish(ctx context.Context, task *repication.ReplicationTask) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Publish", ctx, task) + ret0, _ := ret[0].(error) + return ret0 +} + +// Publish indicates an expected call of Publish. +func (mr *MockNamespaceReplicationQueueMockRecorder) Publish(ctx, task interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Publish), ctx, task) +} + +// PublishToDLQ mocks base method. +func (m *MockNamespaceReplicationQueue) PublishToDLQ(ctx context.Context, task *repication.ReplicationTask) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishToDLQ", ctx, task) + ret0, _ := ret[0].(error) + return ret0 +} + +// PublishToDLQ indicates an expected call of PublishToDLQ. +func (mr *MockNamespaceReplicationQueueMockRecorder) PublishToDLQ(ctx, task interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishToDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).PublishToDLQ), ctx, task) +} + +// RangeDeleteMessagesFromDLQ mocks base method. +func (m *MockNamespaceReplicationQueue) RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID, lastMessageID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RangeDeleteMessagesFromDLQ", ctx, firstMessageID, lastMessageID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RangeDeleteMessagesFromDLQ indicates an expected call of RangeDeleteMessagesFromDLQ. +func (mr *MockNamespaceReplicationQueueMockRecorder) RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RangeDeleteMessagesFromDLQ", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).RangeDeleteMessagesFromDLQ), ctx, firstMessageID, lastMessageID) +} + +// Start mocks base method. +func (m *MockNamespaceReplicationQueue) Start() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Start") +} + +// Start indicates an expected call of Start. +func (mr *MockNamespaceReplicationQueueMockRecorder) Start() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Start)) +} + +// Stop mocks base method. +func (m *MockNamespaceReplicationQueue) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockNamespaceReplicationQueueMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).Stop)) +} + +// UpdateAckLevel mocks base method. +func (m *MockNamespaceReplicationQueue) UpdateAckLevel(ctx context.Context, lastProcessedMessageID int64, clusterName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAckLevel", ctx, lastProcessedMessageID, clusterName) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAckLevel indicates an expected call of UpdateAckLevel. +func (mr *MockNamespaceReplicationQueueMockRecorder) UpdateAckLevel(ctx, lastProcessedMessageID, clusterName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).UpdateAckLevel), ctx, lastProcessedMessageID, clusterName) +} + +// UpdateDLQAckLevel mocks base method. +func (m *MockNamespaceReplicationQueue) UpdateDLQAckLevel(ctx context.Context, lastProcessedMessageID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDLQAckLevel", ctx, lastProcessedMessageID) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateDLQAckLevel indicates an expected call of UpdateDLQAckLevel. +func (mr *MockNamespaceReplicationQueueMockRecorder) UpdateDLQAckLevel(ctx, lastProcessedMessageID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDLQAckLevel", reflect.TypeOf((*MockNamespaceReplicationQueue)(nil).UpdateDLQAckLevel), ctx, lastProcessedMessageID) +} diff -Nru temporal-1.21.5-1/src/common/persistence/operationModeValidator.go temporal-1.22.5/src/common/persistence/operationModeValidator.go --- temporal-1.21.5-1/src/common/persistence/operationModeValidator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/operationModeValidator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,413 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "fmt" - - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" -) - -// NOTE: when modifying this file, plz make each case clear, -// do not combine cases together. -// The idea for this file is to test whether current record -// points to a zombie record. - -// ValidateCreateWorkflowModeState validate workflow creation mode & workflow state -func ValidateCreateWorkflowModeState( - mode CreateWorkflowMode, - newWorkflowSnapshot WorkflowSnapshot, -) error { - - workflowState := newWorkflowSnapshot.ExecutionState.State - if err := checkWorkflowState(workflowState); err != nil { - return err - } - - switch mode { - case CreateWorkflowModeBrandNew, - CreateWorkflowModeUpdateCurrent: - if workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidCreateWorkflowMode( - mode, - workflowState, - ) - } - return nil - - case CreateWorkflowModeBypassCurrent: - if workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { - return newInvalidCreateWorkflowMode( - mode, - workflowState, - ) - } - return nil - - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) - } -} - -// ValidateUpdateWorkflowModeState validate workflow update mode & workflow state -func ValidateUpdateWorkflowModeState( - mode UpdateWorkflowMode, - currentWorkflowMutation WorkflowMutation, - newWorkflowSnapshot *WorkflowSnapshot, -) error { - - currentWorkflowState := currentWorkflowMutation.ExecutionState.State - if err := checkWorkflowState(currentWorkflowState); err != nil { - return err - } - var newWorkflowState *enumsspb.WorkflowExecutionState - if newWorkflowSnapshot != nil { - newWorkflowState = &newWorkflowSnapshot.ExecutionState.State - if err := checkWorkflowState(*newWorkflowState); err != nil { - return err - } - } - - switch mode { - case UpdateWorkflowModeUpdateCurrent: - // update current record - // 1. current workflow only -> - // current workflow cannot be zombie - // 2. current workflow & new workflow -> - // current workflow cannot be created / running, - // new workflow cannot be zombie - - // case 1 - if newWorkflowState == nil { - if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidUpdateWorkflowMode(mode, currentWorkflowState) - } - return nil - } - - // case 2 - if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidUpdateWorkflowWithNewMode(mode, currentWorkflowState, *newWorkflowState) - } - return nil - - case UpdateWorkflowModeBypassCurrent: - // bypass current record - // 1. current workflow only -> - // current workflow cannot be created / running - // 2. current workflow & new workflow -> - // current workflow cannot be created / running, - // new workflow cannot be created / running - - // case 1 - if newWorkflowState == nil { - if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { - return newInvalidUpdateWorkflowMode(mode, currentWorkflowState) - } - return nil - } - - // case 2 - if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { - return newInvalidUpdateWorkflowWithNewMode( - mode, - currentWorkflowState, - *newWorkflowState, - ) - } - return nil - - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) - } -} - -// ValidateConflictResolveWorkflowModeState validate workflow conflict resolve mode & workflow state -func ValidateConflictResolveWorkflowModeState( - mode ConflictResolveWorkflowMode, - resetWorkflowSnapshot WorkflowSnapshot, - newWorkflowSnapshot *WorkflowSnapshot, - currentWorkflowMutation *WorkflowMutation, -) error { - - resetWorkflowState := resetWorkflowSnapshot.ExecutionState.State - if err := checkWorkflowState(resetWorkflowState); err != nil { - return err - } - var newWorkflowState *enumsspb.WorkflowExecutionState - if newWorkflowSnapshot != nil { - newWorkflowState = &newWorkflowSnapshot.ExecutionState.State - if err := checkWorkflowState(*newWorkflowState); err != nil { - return err - } - } - var currentWorkflowState *enumsspb.WorkflowExecutionState - if currentWorkflowMutation != nil { - currentWorkflowState = ¤tWorkflowMutation.ExecutionState.State - if err := checkWorkflowState(*currentWorkflowState); err != nil { - return err - } - } - - switch mode { - case ConflictResolveWorkflowModeUpdateCurrent: - // update current record - // 1. reset workflow only -> - // reset workflow cannot be zombie - // 2. reset workflow & new workflow -> - // reset workflow cannot be created / running / zombie, - // new workflow cannot be zombie - // 3. current workflow & reset workflow -> - // current workflow cannot be created / running, - // reset workflow cannot be zombie - // 4. current workflow & reset workflow & new workflow -> - // current workflow cannot be created / running, - // reset workflow cannot be created / running / zombie, - // new workflow cannot be zombie - - // TODO remove case 1 & 2 support once 2DC is deprecated - // it is ok that currentWorkflowMutation is null, only for 2 DC case - // NDC should always require current workflow for CAS - // Note: current workflow mutation can be in zombie state, for the update - - // case 1 & 2 - if currentWorkflowState == nil { - // case 1 - if newWorkflowState == nil { - if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidConflictResolveWorkflowMode( - mode, - resetWorkflowState, - ) - } - return nil - } - - // case 2 - if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidConflictResolveWorkflowWithNewMode( - mode, - resetWorkflowState, - *newWorkflowState, - ) - } - return nil - } - - // case 3 & 4 - // case 3 - if newWorkflowState == nil { - if *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidConflictResolveWorkflowWithCurrentMode( - mode, - resetWorkflowState, - *currentWorkflowState, - ) - } - return nil - } - - // case 4 - if *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return newInvalidConflictResolveWorkflowWithCurrentWithNewMode( - mode, - resetWorkflowState, - *newWorkflowState, - *currentWorkflowState, - ) - } - return nil - - case ConflictResolveWorkflowModeBypassCurrent: - // bypass current record - // * current workflow cannot be set - // 1. reset workflow only -> - // reset workflow cannot be created / running - // 2. reset workflow & new workflow -> - // reset workflow cannot be created / running / zombie, - // new workflow cannot be created / running / completed - - // precondition - if currentWorkflowMutation != nil { - return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow conflict resolve mode %v, encountered current workflow", mode)) - } - - // case 1 - if newWorkflowState == nil { - if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { - return newInvalidConflictResolveWorkflowMode( - mode, - resetWorkflowState, - ) - } - return nil - } - - // case 2 - if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || - *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { - return newInvalidConflictResolveWorkflowWithNewMode( - mode, - resetWorkflowState, - *newWorkflowState, - ) - } - return nil - - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) - } -} - -func checkWorkflowState(state enumsspb.WorkflowExecutionState) error { - switch state { - case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: - return nil - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown workflow state: %v", state)) - } -} - -func newInvalidCreateWorkflowMode( - mode CreateWorkflowMode, - workflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow create mode %v, state: %v", - mode, - workflowState, - ), - ) -} - -func newInvalidUpdateWorkflowMode( - mode UpdateWorkflowMode, - currentWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow update mode %v, state: %v", - mode, - currentWorkflowState, - ), - ) -} - -func newInvalidUpdateWorkflowWithNewMode( - mode UpdateWorkflowMode, - currentWorkflowState enumsspb.WorkflowExecutionState, - newWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow update mode %v, current state: %v, new state: %v", - mode, - currentWorkflowState, - newWorkflowState, - ), - ) -} - -func newInvalidConflictResolveWorkflowMode( - mode ConflictResolveWorkflowMode, - resetWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow conflict resolve mode %v, reset state: %v", - mode, - resetWorkflowState, - ), - ) -} - -func newInvalidConflictResolveWorkflowWithNewMode( - mode ConflictResolveWorkflowMode, - resetWorkflowState enumsspb.WorkflowExecutionState, - newWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow conflict resolve mode %v, reset state: %v, new state: %v", - mode, - resetWorkflowState, - newWorkflowState, - ), - ) -} - -func newInvalidConflictResolveWorkflowWithCurrentMode( - mode ConflictResolveWorkflowMode, - resetWorkflowState enumsspb.WorkflowExecutionState, - currentWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow conflict resolve mode %v, reset state: %v, current state: %v", - mode, - resetWorkflowState, - currentWorkflowState, - ), - ) -} - -func newInvalidConflictResolveWorkflowWithCurrentWithNewMode( - mode ConflictResolveWorkflowMode, - resetWorkflowState enumsspb.WorkflowExecutionState, - newWorkflowState enumsspb.WorkflowExecutionState, - currentWorkflowState enumsspb.WorkflowExecutionState, -) error { - return serviceerror.NewInternal(fmt.Sprintf( - "Invalid workflow conflict resolve mode %v, reset state: %v, new state: %v, current state: %v", - mode, - resetWorkflowState, - newWorkflowState, - currentWorkflowState, - ), - ) -} diff -Nru temporal-1.21.5-1/src/common/persistence/operationModeValidator_test.go temporal-1.22.5/src/common/persistence/operationModeValidator_test.go --- temporal-1.21.5-1/src/common/persistence/operationModeValidator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/operationModeValidator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,420 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/suite" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" -) - -type ( - validateOperationWorkflowModeStateSuite struct { - suite.Suite - } -) - -func TestValidateOperationWorkflowModeStateSuite(t *testing.T) { - s := new(validateOperationWorkflowModeStateSuite) - suite.Run(t, s) -} - -func (s *validateOperationWorkflowModeStateSuite) SetupSuite() { -} - -func (s *validateOperationWorkflowModeStateSuite) TearDownSuite() { - -} - -func (s *validateOperationWorkflowModeStateSuite) SetupTest() { - -} - -func (s *validateOperationWorkflowModeStateSuite) TearDownTest() { - -} - -func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_UpdateCurrent() { - - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - - creatModes := []CreateWorkflowMode{ - CreateWorkflowModeBrandNew, - CreateWorkflowModeUpdateCurrent, - } - - for state, expectError := range stateToError { - testSnapshot := s.newTestWorkflowSnapshot(state) - for _, createMode := range creatModes { - err := ValidateCreateWorkflowModeState(createMode, testSnapshot) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_BypassCurrent() { - - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - - for state, expectError := range stateToError { - testSnapshot := s.newTestWorkflowSnapshot(state) - err := ValidateCreateWorkflowModeState(CreateWorkflowModeBypassCurrent, testSnapshot) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_UpdateCurrent() { - - // only current workflow - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - for state, expectError := range stateToError { - testCurrentMutation := s.newTestWorkflowMutation(state) - err := ValidateUpdateWorkflowModeState( - UpdateWorkflowModeUpdateCurrent, - testCurrentMutation, - nil, - ) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } - - // current workflow & new workflow - currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - newStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - for currentState, currentExpectError := range currentStateToError { - for newState, newExpectError := range newStateToError { - testCurrentMutation := s.newTestWorkflowMutation(currentState) - testNewSnapshot := s.newTestWorkflowSnapshot(newState) - err := ValidateUpdateWorkflowModeState( - UpdateWorkflowModeUpdateCurrent, - testCurrentMutation, - &testNewSnapshot, - ) - if currentExpectError || newExpectError { - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_BypassCurrent() { - - // only current workflow - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for state, expectError := range stateToError { - testMutation := s.newTestWorkflowMutation(state) - err := ValidateUpdateWorkflowModeState( - UpdateWorkflowModeBypassCurrent, - testMutation, - nil, - ) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } - - // current workflow & new workflow - currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - newStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for currentState, currentExpectError := range currentStateToError { - for newState, newExpectError := range newStateToError { - testCurrentMutation := s.newTestWorkflowMutation(currentState) - testNewSnapshot := s.newTestWorkflowSnapshot(newState) - err := ValidateUpdateWorkflowModeState( - UpdateWorkflowModeBypassCurrent, - testCurrentMutation, - &testNewSnapshot, - ) - if currentExpectError || newExpectError { - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_UpdateCurrent() { - - // only reset workflow - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - for state, expectError := range stateToError { - testSnapshot := s.newTestWorkflowSnapshot(state) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeUpdateCurrent, - testSnapshot, - nil, - nil, - ) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } - - // reset workflow & new workflow - resetStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - newStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - for resetState, resetExpectError := range resetStateToError { - for newState, newExpectError := range newStateToError { - testResetSnapshot := s.newTestWorkflowSnapshot(resetState) - testNewSnapshot := s.newTestWorkflowSnapshot(newState) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeUpdateCurrent, - testResetSnapshot, - &testNewSnapshot, - nil, - ) - if resetExpectError || newExpectError { - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } - - // reset workflow & current workflow - resetStateToError = map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for resetState, resetExpectError := range resetStateToError { - for currentState, currentExpectError := range currentStateToError { - testResetSnapshot := s.newTestWorkflowSnapshot(resetState) - testCurrentSnapshot := s.newTestWorkflowMutation(currentState) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeUpdateCurrent, - testResetSnapshot, - nil, - &testCurrentSnapshot, - ) - if resetExpectError || currentExpectError { - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } - - // reset workflow & new workflow & current workflow - resetStateToError = map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - newStateToError = map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - currentStateToError = map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for resetState, resetExpectError := range resetStateToError { - for newState, newExpectError := range newStateToError { - for currentState, currentExpectError := range currentStateToError { - testResetSnapshot := s.newTestWorkflowSnapshot(resetState) - testNewSnapshot := s.newTestWorkflowSnapshot(newState) - testCurrentSnapshot := s.newTestWorkflowMutation(currentState) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeUpdateCurrent, - testResetSnapshot, - &testNewSnapshot, - &testCurrentSnapshot, - ) - if resetExpectError || newExpectError || currentExpectError { - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_BypassCurrent() { - - // only reset workflow - stateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for state, expectError := range stateToError { - testSnapshot := s.newTestWorkflowSnapshot(state) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeBypassCurrent, - testSnapshot, - nil, - nil, - ) - if !expectError { - s.NoError(err, err) - } else { - s.Error(err, err) - } - } - - // reset workflow & new workflow - resetStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, - } - newStateToError := map[enumsspb.WorkflowExecutionState]bool{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, - } - for resetState, resetExpectError := range resetStateToError { - for newState, newExpectError := range newStateToError { - testResetSnapshot := s.newTestWorkflowSnapshot(resetState) - testNewSnapshot := s.newTestWorkflowSnapshot(newState) - err := ValidateConflictResolveWorkflowModeState( - ConflictResolveWorkflowModeBypassCurrent, - testResetSnapshot, - &testNewSnapshot, - nil, - ) - if resetExpectError || newExpectError { - if err == nil { - fmt.Print("##") - } - s.Error(err, err) - } else { - s.NoError(err, err) - } - } - } -} - -func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowSnapshot( - state enumsspb.WorkflowExecutionState, -) WorkflowSnapshot { - return WorkflowSnapshot{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{}, - ExecutionState: &persistencespb.WorkflowExecutionState{State: state}, - } -} - -func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowMutation( - state enumsspb.WorkflowExecutionState, -) WorkflowMutation { - return WorkflowMutation{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{}, - ExecutionState: &persistencespb.WorkflowExecutionState{State: state}, - } -} diff -Nru temporal-1.21.5-1/src/common/persistence/operation_mode_validator.go temporal-1.22.5/src/common/persistence/operation_mode_validator.go --- temporal-1.21.5-1/src/common/persistence/operation_mode_validator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/operation_mode_validator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,413 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "fmt" + + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" +) + +// NOTE: when modifying this file, plz make each case clear, +// do not combine cases together. +// The idea for this file is to test whether current record +// points to a zombie record. + +// ValidateCreateWorkflowModeState validate workflow creation mode & workflow state +func ValidateCreateWorkflowModeState( + mode CreateWorkflowMode, + newWorkflowSnapshot WorkflowSnapshot, +) error { + + workflowState := newWorkflowSnapshot.ExecutionState.State + if err := checkWorkflowState(workflowState); err != nil { + return err + } + + switch mode { + case CreateWorkflowModeBrandNew, + CreateWorkflowModeUpdateCurrent: + if workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidCreateWorkflowMode( + mode, + workflowState, + ) + } + return nil + + case CreateWorkflowModeBypassCurrent: + if workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + workflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { + return newInvalidCreateWorkflowMode( + mode, + workflowState, + ) + } + return nil + + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) + } +} + +// ValidateUpdateWorkflowModeState validate workflow update mode & workflow state +func ValidateUpdateWorkflowModeState( + mode UpdateWorkflowMode, + currentWorkflowMutation WorkflowMutation, + newWorkflowSnapshot *WorkflowSnapshot, +) error { + + currentWorkflowState := currentWorkflowMutation.ExecutionState.State + if err := checkWorkflowState(currentWorkflowState); err != nil { + return err + } + var newWorkflowState *enumsspb.WorkflowExecutionState + if newWorkflowSnapshot != nil { + newWorkflowState = &newWorkflowSnapshot.ExecutionState.State + if err := checkWorkflowState(*newWorkflowState); err != nil { + return err + } + } + + switch mode { + case UpdateWorkflowModeUpdateCurrent: + // update current record + // 1. current workflow only -> + // current workflow cannot be zombie + // 2. current workflow & new workflow -> + // current workflow cannot be created / running, + // new workflow cannot be zombie + + // case 1 + if newWorkflowState == nil { + if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidUpdateWorkflowMode(mode, currentWorkflowState) + } + return nil + } + + // case 2 + if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidUpdateWorkflowWithNewMode(mode, currentWorkflowState, *newWorkflowState) + } + return nil + + case UpdateWorkflowModeBypassCurrent: + // bypass current record + // 1. current workflow only -> + // current workflow cannot be created / running + // 2. current workflow & new workflow -> + // current workflow cannot be created / running, + // new workflow cannot be created / running + + // case 1 + if newWorkflowState == nil { + if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { + return newInvalidUpdateWorkflowMode(mode, currentWorkflowState) + } + return nil + } + + // case 2 + if currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { + return newInvalidUpdateWorkflowWithNewMode( + mode, + currentWorkflowState, + *newWorkflowState, + ) + } + return nil + + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) + } +} + +// ValidateConflictResolveWorkflowModeState validate workflow conflict resolve mode & workflow state +func ValidateConflictResolveWorkflowModeState( + mode ConflictResolveWorkflowMode, + resetWorkflowSnapshot WorkflowSnapshot, + newWorkflowSnapshot *WorkflowSnapshot, + currentWorkflowMutation *WorkflowMutation, +) error { + + resetWorkflowState := resetWorkflowSnapshot.ExecutionState.State + if err := checkWorkflowState(resetWorkflowState); err != nil { + return err + } + var newWorkflowState *enumsspb.WorkflowExecutionState + if newWorkflowSnapshot != nil { + newWorkflowState = &newWorkflowSnapshot.ExecutionState.State + if err := checkWorkflowState(*newWorkflowState); err != nil { + return err + } + } + var currentWorkflowState *enumsspb.WorkflowExecutionState + if currentWorkflowMutation != nil { + currentWorkflowState = ¤tWorkflowMutation.ExecutionState.State + if err := checkWorkflowState(*currentWorkflowState); err != nil { + return err + } + } + + switch mode { + case ConflictResolveWorkflowModeUpdateCurrent: + // update current record + // 1. reset workflow only -> + // reset workflow cannot be zombie + // 2. reset workflow & new workflow -> + // reset workflow cannot be created / running / zombie, + // new workflow cannot be zombie + // 3. current workflow & reset workflow -> + // current workflow cannot be created / running, + // reset workflow cannot be zombie + // 4. current workflow & reset workflow & new workflow -> + // current workflow cannot be created / running, + // reset workflow cannot be created / running / zombie, + // new workflow cannot be zombie + + // TODO remove case 1 & 2 support once 2DC is deprecated + // it is ok that currentWorkflowMutation is null, only for 2 DC case + // NDC should always require current workflow for CAS + // Note: current workflow mutation can be in zombie state, for the update + + // case 1 & 2 + if currentWorkflowState == nil { + // case 1 + if newWorkflowState == nil { + if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidConflictResolveWorkflowMode( + mode, + resetWorkflowState, + ) + } + return nil + } + + // case 2 + if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidConflictResolveWorkflowWithNewMode( + mode, + resetWorkflowState, + *newWorkflowState, + ) + } + return nil + } + + // case 3 & 4 + // case 3 + if newWorkflowState == nil { + if *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidConflictResolveWorkflowWithCurrentMode( + mode, + resetWorkflowState, + *currentWorkflowState, + ) + } + return nil + } + + // case 4 + if *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + *currentWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return newInvalidConflictResolveWorkflowWithCurrentWithNewMode( + mode, + resetWorkflowState, + *newWorkflowState, + *currentWorkflowState, + ) + } + return nil + + case ConflictResolveWorkflowModeBypassCurrent: + // bypass current record + // * current workflow cannot be set + // 1. reset workflow only -> + // reset workflow cannot be created / running + // 2. reset workflow & new workflow -> + // reset workflow cannot be created / running / zombie, + // new workflow cannot be created / running / completed + + // precondition + if currentWorkflowMutation != nil { + return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow conflict resolve mode %v, encountered current workflow", mode)) + } + + // case 1 + if newWorkflowState == nil { + if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING { + return newInvalidConflictResolveWorkflowMode( + mode, + resetWorkflowState, + ) + } + return nil + } + + // case 2 + if resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + resetWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_CREATED || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING || + *newWorkflowState == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + return newInvalidConflictResolveWorkflowWithNewMode( + mode, + resetWorkflowState, + *newWorkflowState, + ) + } + return nil + + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", mode)) + } +} + +func checkWorkflowState(state enumsspb.WorkflowExecutionState) error { + switch state { + case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: + return nil + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown workflow state: %v", state)) + } +} + +func newInvalidCreateWorkflowMode( + mode CreateWorkflowMode, + workflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow create mode %v, state: %v", + mode, + workflowState, + ), + ) +} + +func newInvalidUpdateWorkflowMode( + mode UpdateWorkflowMode, + currentWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow update mode %v, state: %v", + mode, + currentWorkflowState, + ), + ) +} + +func newInvalidUpdateWorkflowWithNewMode( + mode UpdateWorkflowMode, + currentWorkflowState enumsspb.WorkflowExecutionState, + newWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow update mode %v, current state: %v, new state: %v", + mode, + currentWorkflowState, + newWorkflowState, + ), + ) +} + +func newInvalidConflictResolveWorkflowMode( + mode ConflictResolveWorkflowMode, + resetWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow conflict resolve mode %v, reset state: %v", + mode, + resetWorkflowState, + ), + ) +} + +func newInvalidConflictResolveWorkflowWithNewMode( + mode ConflictResolveWorkflowMode, + resetWorkflowState enumsspb.WorkflowExecutionState, + newWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow conflict resolve mode %v, reset state: %v, new state: %v", + mode, + resetWorkflowState, + newWorkflowState, + ), + ) +} + +func newInvalidConflictResolveWorkflowWithCurrentMode( + mode ConflictResolveWorkflowMode, + resetWorkflowState enumsspb.WorkflowExecutionState, + currentWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow conflict resolve mode %v, reset state: %v, current state: %v", + mode, + resetWorkflowState, + currentWorkflowState, + ), + ) +} + +func newInvalidConflictResolveWorkflowWithCurrentWithNewMode( + mode ConflictResolveWorkflowMode, + resetWorkflowState enumsspb.WorkflowExecutionState, + newWorkflowState enumsspb.WorkflowExecutionState, + currentWorkflowState enumsspb.WorkflowExecutionState, +) error { + return serviceerror.NewInternal(fmt.Sprintf( + "Invalid workflow conflict resolve mode %v, reset state: %v, new state: %v, current state: %v", + mode, + resetWorkflowState, + newWorkflowState, + currentWorkflowState, + ), + ) +} diff -Nru temporal-1.21.5-1/src/common/persistence/operation_mode_validator_test.go temporal-1.22.5/src/common/persistence/operation_mode_validator_test.go --- temporal-1.21.5-1/src/common/persistence/operation_mode_validator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/operation_mode_validator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,420 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" +) + +type ( + validateOperationWorkflowModeStateSuite struct { + suite.Suite + } +) + +func TestValidateOperationWorkflowModeStateSuite(t *testing.T) { + s := new(validateOperationWorkflowModeStateSuite) + suite.Run(t, s) +} + +func (s *validateOperationWorkflowModeStateSuite) SetupSuite() { +} + +func (s *validateOperationWorkflowModeStateSuite) TearDownSuite() { + +} + +func (s *validateOperationWorkflowModeStateSuite) SetupTest() { + +} + +func (s *validateOperationWorkflowModeStateSuite) TearDownTest() { + +} + +func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_UpdateCurrent() { + + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + + creatModes := []CreateWorkflowMode{ + CreateWorkflowModeBrandNew, + CreateWorkflowModeUpdateCurrent, + } + + for state, expectError := range stateToError { + testSnapshot := s.newTestWorkflowSnapshot(state) + for _, createMode := range creatModes { + err := ValidateCreateWorkflowModeState(createMode, testSnapshot) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) TestCreateMode_BypassCurrent() { + + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + + for state, expectError := range stateToError { + testSnapshot := s.newTestWorkflowSnapshot(state) + err := ValidateCreateWorkflowModeState(CreateWorkflowModeBypassCurrent, testSnapshot) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_UpdateCurrent() { + + // only current workflow + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + for state, expectError := range stateToError { + testCurrentMutation := s.newTestWorkflowMutation(state) + err := ValidateUpdateWorkflowModeState( + UpdateWorkflowModeUpdateCurrent, + testCurrentMutation, + nil, + ) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } + + // current workflow & new workflow + currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + newStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + for currentState, currentExpectError := range currentStateToError { + for newState, newExpectError := range newStateToError { + testCurrentMutation := s.newTestWorkflowMutation(currentState) + testNewSnapshot := s.newTestWorkflowSnapshot(newState) + err := ValidateUpdateWorkflowModeState( + UpdateWorkflowModeUpdateCurrent, + testCurrentMutation, + &testNewSnapshot, + ) + if currentExpectError || newExpectError { + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) TestUpdateMode_BypassCurrent() { + + // only current workflow + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for state, expectError := range stateToError { + testMutation := s.newTestWorkflowMutation(state) + err := ValidateUpdateWorkflowModeState( + UpdateWorkflowModeBypassCurrent, + testMutation, + nil, + ) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } + + // current workflow & new workflow + currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + newStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for currentState, currentExpectError := range currentStateToError { + for newState, newExpectError := range newStateToError { + testCurrentMutation := s.newTestWorkflowMutation(currentState) + testNewSnapshot := s.newTestWorkflowSnapshot(newState) + err := ValidateUpdateWorkflowModeState( + UpdateWorkflowModeBypassCurrent, + testCurrentMutation, + &testNewSnapshot, + ) + if currentExpectError || newExpectError { + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_UpdateCurrent() { + + // only reset workflow + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + for state, expectError := range stateToError { + testSnapshot := s.newTestWorkflowSnapshot(state) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeUpdateCurrent, + testSnapshot, + nil, + nil, + ) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } + + // reset workflow & new workflow + resetStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + newStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + for resetState, resetExpectError := range resetStateToError { + for newState, newExpectError := range newStateToError { + testResetSnapshot := s.newTestWorkflowSnapshot(resetState) + testNewSnapshot := s.newTestWorkflowSnapshot(newState) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeUpdateCurrent, + testResetSnapshot, + &testNewSnapshot, + nil, + ) + if resetExpectError || newExpectError { + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } + + // reset workflow & current workflow + resetStateToError = map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + currentStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for resetState, resetExpectError := range resetStateToError { + for currentState, currentExpectError := range currentStateToError { + testResetSnapshot := s.newTestWorkflowSnapshot(resetState) + testCurrentSnapshot := s.newTestWorkflowMutation(currentState) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeUpdateCurrent, + testResetSnapshot, + nil, + &testCurrentSnapshot, + ) + if resetExpectError || currentExpectError { + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } + + // reset workflow & new workflow & current workflow + resetStateToError = map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + newStateToError = map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: false, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + currentStateToError = map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for resetState, resetExpectError := range resetStateToError { + for newState, newExpectError := range newStateToError { + for currentState, currentExpectError := range currentStateToError { + testResetSnapshot := s.newTestWorkflowSnapshot(resetState) + testNewSnapshot := s.newTestWorkflowSnapshot(newState) + testCurrentSnapshot := s.newTestWorkflowMutation(currentState) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeUpdateCurrent, + testResetSnapshot, + &testNewSnapshot, + &testCurrentSnapshot, + ) + if resetExpectError || newExpectError || currentExpectError { + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) TestConflictResolveMode_BypassCurrent() { + + // only reset workflow + stateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for state, expectError := range stateToError { + testSnapshot := s.newTestWorkflowSnapshot(state) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeBypassCurrent, + testSnapshot, + nil, + nil, + ) + if !expectError { + s.NoError(err, err) + } else { + s.Error(err, err) + } + } + + // reset workflow & new workflow + resetStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: false, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: true, + } + newStateToError := map[enumsspb.WorkflowExecutionState]bool{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: true, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: true, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: false, + } + for resetState, resetExpectError := range resetStateToError { + for newState, newExpectError := range newStateToError { + testResetSnapshot := s.newTestWorkflowSnapshot(resetState) + testNewSnapshot := s.newTestWorkflowSnapshot(newState) + err := ValidateConflictResolveWorkflowModeState( + ConflictResolveWorkflowModeBypassCurrent, + testResetSnapshot, + &testNewSnapshot, + nil, + ) + if resetExpectError || newExpectError { + if err == nil { + fmt.Print("##") + } + s.Error(err, err) + } else { + s.NoError(err, err) + } + } + } +} + +func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowSnapshot( + state enumsspb.WorkflowExecutionState, +) WorkflowSnapshot { + return WorkflowSnapshot{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{}, + ExecutionState: &persistencespb.WorkflowExecutionState{State: state}, + } +} + +func (s *validateOperationWorkflowModeStateSuite) newTestWorkflowMutation( + state enumsspb.WorkflowExecutionState, +) WorkflowMutation { + return WorkflowMutation{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{}, + ExecutionState: &persistencespb.WorkflowExecutionState{State: state}, + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/clusterMetadataManagerTest.go temporal-1.22.5/src/common/persistence/persistence-tests/clusterMetadataManagerTest.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/clusterMetadataManagerTest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/clusterMetadataManagerTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,487 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistencetests - -import ( - "context" - "net" - "time" - - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "go.temporal.io/api/serviceerror" - versionpb "go.temporal.io/api/version/v1" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/debug" - p "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives" -) - -type ( - // ClusterMetadataManagerSuite runs tests that cover the ClusterMetadata read/write scenarios - ClusterMetadataManagerSuite struct { - TestBase - // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, - // not merely log an error - *require.Assertions - - ctx context.Context - cancel context.CancelFunc - } -) - -// SetupSuite implementation -func (s *ClusterMetadataManagerSuite) SetupSuite() { -} - -// SetupTest implementation -func (s *ClusterMetadataManagerSuite) SetupTest() { - // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil - s.Assertions = require.New(s.T()) - s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) -} - -// TearDownTest implementation -func (s *ClusterMetadataManagerSuite) TearDownTest() { - s.cancel() -} - -// TearDownSuite implementation -func (s *ClusterMetadataManagerSuite) TearDownSuite() { - s.TearDownWorkflowStore() -} - -// TestClusterMembershipEmptyInitially verifies the GetClusterMembers() works with an initial empty table -func (s *ClusterMetadataManagerSuite) TestClusterMembershipEmptyInitially() { - resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10}) - s.Nil(err) - s.NotNil(resp) - s.Empty(resp.ActiveMembers) -} - -// TestClusterMembershipUpsertCanRead verifies that we can UpsertClusterMembership and read our result -func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertCanReadAny() { - req := &p.UpsertClusterMembershipRequest{ - HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, - RPCAddress: net.ParseIP("127.0.0.2"), - RPCPort: 123, - Role: p.Frontend, - SessionStart: time.Now().UTC(), - RecordExpiry: time.Second, - } - - err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) - s.Nil(err) - - resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{}) - - s.Nil(err) - s.NotNil(resp) - s.NotEmpty(resp.ActiveMembers) -} - -// TestClusterMembershipUpsertCanRead verifies that we can UpsertClusterMembership and read our result -func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertCanPageRead() { - // Expire previous records - // Todo: MetaMgr should provide api to clear all members - time.Sleep(time.Second * 3) - err := s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) - s.Nil(err) - - expectedIds := make(map[string]int, 100) - for i := 0; i < 100; i++ { - hostID := primitives.NewUUID().Downcast() - expectedIds[primitives.UUIDString(hostID)]++ - req := &p.UpsertClusterMembershipRequest{ - HostID: hostID, - RPCAddress: net.ParseIP("127.0.0.2"), - RPCPort: 123, - Role: p.Frontend, - SessionStart: time.Now().UTC(), - RecordExpiry: 3 * time.Second, - } - - err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) - s.NoError(err) - } - - hostCount := 0 - var nextPageToken []byte - for { - resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{PageSize: 9, NextPageToken: nextPageToken}) - s.NoError(err) - nextPageToken = resp.NextPageToken - for _, member := range resp.ActiveMembers { - expectedIds[primitives.UUIDString(member.HostID)]-- - hostCount++ - } - - if nextPageToken == nil { - break - } - } - - s.Equal(100, hostCount) - for id, val := range expectedIds { - s.Zero(val, "identifier was either not found in db, or shouldn't be there - "+id) - } - - time.Sleep(time.Second * 3) - err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 1000}) - s.NoError(err) -} - -func (s *ClusterMetadataManagerSuite) validateUpsert(req *p.UpsertClusterMembershipRequest, resp *p.GetClusterMembersResponse, err error) { - s.Nil(err) - s.NotNil(resp) - s.NotEmpty(resp.ActiveMembers) - s.Equal(len(resp.ActiveMembers), 1) - // Have to round to 1 second due to SQL implementations. Cassandra truncates at 1ms. - s.Equal(resp.ActiveMembers[0].SessionStart.Round(time.Second), req.SessionStart.Round(time.Second)) - s.Equal(resp.ActiveMembers[0].RPCAddress.String(), req.RPCAddress.String()) - s.Equal(resp.ActiveMembers[0].RPCPort, req.RPCPort) - s.True(resp.ActiveMembers[0].RecordExpiry.After(time.Now().UTC())) - s.Equal(resp.ActiveMembers[0].HostID, req.HostID) - s.Equal(resp.ActiveMembers[0].Role, req.Role) -} - -// TestClusterMembershipReadFiltersCorrectly verifies that we can UpsertClusterMembership and read our result using filters -func (s *ClusterMetadataManagerSuite) TestClusterMembershipReadFiltersCorrectly() { - now := time.Now().UTC() - req := &p.UpsertClusterMembershipRequest{ - HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, - RPCAddress: net.ParseIP("127.0.0.2"), - RPCPort: 123, - Role: p.Frontend, - SessionStart: now, - RecordExpiry: time.Second * 4, - } - - err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) - s.Nil(err) - - resp, err := s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10, HostIDEquals: req.HostID}, - ) - - s.validateUpsert(req, resp, err) - - time.Sleep(time.Second * 1) - resp, err = s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Millisecond, HostIDEquals: req.HostID}, - ) - - s.Nil(err) - s.NotNil(resp) - s.Empty(resp.ActiveMembers) - - resp, err = s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{RoleEquals: p.Matching}, - ) - - s.Nil(err) - s.NotNil(resp) - s.Empty(resp.ActiveMembers) - - resp, err = s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{SessionStartedAfter: time.Now().UTC()}, - ) - - s.Nil(err) - s.NotNil(resp) - s.Empty(resp.ActiveMembers) - - resp, err = s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{SessionStartedAfter: now.Add(-time.Minute), RPCAddressEquals: req.RPCAddress, HostIDEquals: req.HostID}, - ) - - s.validateUpsert(req, resp, err) - - time.Sleep(time.Second * 3) - err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 1000}) - s.NoError(err) -} - -// TestClusterMembershipUpsertExpiresCorrectly verifies RecordExpiry functions properly for ClusterMembership records -func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertExpiresCorrectly() { - req := &p.UpsertClusterMembershipRequest{ - HostID: uuid.NewUUID(), - RPCAddress: net.ParseIP("127.0.0.2"), - RPCPort: 123, - Role: p.Frontend, - SessionStart: time.Now().UTC(), - RecordExpiry: time.Second, - } - - err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) - s.NoError(err) - - err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) - s.NoError(err) - - resp, err := s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10, HostIDEquals: req.HostID}, - ) - - s.NoError(err) - s.NotNil(resp) - s.NotEmpty(resp.ActiveMembers) - s.Equal(len(resp.ActiveMembers), 1) - // Have to round to 1 second due to SQL implementations. Cassandra truncates at 1ms. - s.Equal(resp.ActiveMembers[0].SessionStart.Round(time.Second), req.SessionStart.Round(time.Second)) - s.Equal(resp.ActiveMembers[0].RPCAddress.String(), req.RPCAddress.String()) - s.Equal(resp.ActiveMembers[0].RPCPort, req.RPCPort) - s.True(resp.ActiveMembers[0].RecordExpiry.After(time.Now().UTC())) - s.Equal(resp.ActiveMembers[0].HostID, req.HostID) - s.Equal(resp.ActiveMembers[0].Role, req.Role) - - time.Sleep(time.Second * 2) - - err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) - s.Nil(err) - - resp, err = s.ClusterMetadataManager.GetClusterMembers( - s.ctx, - &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10}, - ) - - s.Nil(err) - s.NotNil(resp) - s.Empty(resp.ActiveMembers) -} - -// TestClusterMembershipUpsertInvalidExpiry verifies we cannot specify a non-positive RecordExpiry duration -func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertInvalidExpiry() { - req := &p.UpsertClusterMembershipRequest{ - HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, - RPCAddress: net.ParseIP("127.0.0.2"), - RPCPort: 123, - Role: p.Frontend, - SessionStart: time.Now().UTC(), - RecordExpiry: time.Second * 0, - } - - err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) - s.NotNil(err) - s.IsType(err, p.ErrInvalidMembershipExpiry) -} - -// TestInitImmutableMetadataReadWrite runs through the various cases of ClusterMetadata behavior -// Cases: -// 1 - Get, no data persisted -// 2 - Init, no data persisted -// 3 - Get, data persisted -// 4 - Init, data persisted -// 5 - Update, add version info and make sure it's persisted and can be retrieved. -// 6 - Delete, no data persisted -func (s *ClusterMetadataManagerSuite) TestInitImmutableMetadataReadWrite() { - clusterNameToPersist := "testing" - historyShardsToPersist := int32(43) - clusterIdToPersist := "12345" - clusterAddress := "cluster-address" - failoverVersionIncrement := int64(10) - initialFailoverVersion := int64(1) - - // Case 1 - Get, mo data persisted - // Fetch the persisted values, there should be nothing on start. - // This doesn't error on no row found, but returns an empty record. - getResp, err := s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - - // Validate they match our initializations - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) - s.Nil(getResp) - - // Case 2 - Init, no data persisted yet - // First commit, this should be persisted - initialResp, err := s.ClusterMetadataManager.SaveClusterMetadata( - s.ctx, - &p.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterNameToPersist, - HistoryShardCount: historyShardsToPersist, - ClusterId: clusterIdToPersist, - ClusterAddress: clusterAddress, - FailoverVersionIncrement: failoverVersionIncrement, - InitialFailoverVersion: initialFailoverVersion, - IsGlobalNamespaceEnabled: true, - IsConnectionEnabled: true, - }}) - - s.Nil(err) - s.True(initialResp) // request should be applied as this is first initialize - - // Case 3 - Get, data persisted - // Fetch the persisted values - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - - // Validate they match our initializations - s.Nil(err) - s.True(getResp != nil) - s.Equal(clusterNameToPersist, getResp.ClusterName) - s.Equal(historyShardsToPersist, getResp.HistoryShardCount) - s.Equal(clusterIdToPersist, getResp.ClusterId) - s.Equal(clusterAddress, getResp.ClusterAddress) - s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) - s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) - s.True(getResp.IsGlobalNamespaceEnabled) - s.True(getResp.IsConnectionEnabled) - - // Case 4 - Init, data persisted - // Attempt to overwrite with new values - secondResp, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterNameToPersist, - HistoryShardCount: int32(77), - }}) - - s.Nil(err) - s.False(secondResp) // Should not have applied, and should match values from first request - - // Refetch persisted - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - - // Validate they match our initial values - s.Nil(err) - s.NotNil(getResp) - s.Equal(clusterNameToPersist, getResp.ClusterName) - s.Equal(historyShardsToPersist, getResp.HistoryShardCount) - s.Equal(clusterIdToPersist, getResp.ClusterId) - s.Equal(clusterAddress, getResp.ClusterAddress) - s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) - s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) - s.True(getResp.IsGlobalNamespaceEnabled) - s.True(getResp.IsConnectionEnabled) - - // Case 5 - Update version info - getResp.VersionInfo = &versionpb.VersionInfo{ - Current: &versionpb.ReleaseInfo{ - Version: "1.0", - }, - } - thirdResp, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ - ClusterMetadata: getResp.ClusterMetadata, - Version: getResp.Version, - }) - s.Nil(err) - s.True(thirdResp) - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - s.Nil(err) - s.NotNil(getResp) - s.Equal("1.0", getResp.ClusterMetadata.VersionInfo.Current.Version) - - // Case 6 - Delete Cluster Metadata - err = s.ClusterMetadataManager.DeleteClusterMetadata(s.ctx, &p.DeleteClusterMetadataRequest{ClusterName: clusterNameToPersist}) - s.Nil(err) - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - - // Validate they match our initializations - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) - s.Nil(getResp) - - // Case 7 - Update current cluster metadata - clusterNameToPersist = "active" - initialResp, err = s.ClusterMetadataManager.SaveClusterMetadata( - s.ctx, - &p.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterNameToPersist, - HistoryShardCount: historyShardsToPersist, - ClusterId: clusterIdToPersist, - ClusterAddress: clusterAddress, - FailoverVersionIncrement: failoverVersionIncrement, - InitialFailoverVersion: initialFailoverVersion, - IsGlobalNamespaceEnabled: true, - IsConnectionEnabled: true, - }}) - s.Nil(err) - s.True(initialResp) - - // Case 8 - Get, data persisted - // Fetch the persisted values - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - - // Validate they match our initializations - s.Nil(err) - s.True(getResp != nil) - s.Equal(clusterNameToPersist, getResp.ClusterName) - s.Equal(historyShardsToPersist, getResp.HistoryShardCount) - s.Equal(clusterIdToPersist, getResp.ClusterId) - s.Equal(clusterAddress, getResp.ClusterAddress) - s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) - s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) - s.True(getResp.IsGlobalNamespaceEnabled) - s.True(getResp.IsConnectionEnabled) - - // Case 9 - Update current cluster metadata - getResp.VersionInfo = &versionpb.VersionInfo{ - Current: &versionpb.ReleaseInfo{ - Version: "2.0", - }, - } - applied, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ - ClusterMetadata: getResp.ClusterMetadata, - Version: getResp.Version, - }) - s.True(applied) - s.NoError(err) - - // Case 10 - Get, data persisted - // Fetch the persisted values - getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) - s.NoError(err) - s.Equal("2.0", getResp.ClusterMetadata.VersionInfo.Current.Version) - - // Case 11 - List - _, err = s.ClusterMetadataManager.SaveClusterMetadata( - s.ctx, - &p.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterNameToPersist + "2", - HistoryShardCount: historyShardsToPersist, - ClusterId: clusterIdToPersist, - ClusterAddress: clusterAddress, - FailoverVersionIncrement: failoverVersionIncrement, - InitialFailoverVersion: initialFailoverVersion, - IsGlobalNamespaceEnabled: true, - IsConnectionEnabled: true, - }}) - s.NoError(err) - - resp, err := s.ClusterMetadataManager.ListClusterMetadata(s.ctx, &p.ListClusterMetadataRequest{PageSize: 1}) - s.NoError(err) - s.Equal(1, len(resp.ClusterMetadata)) - resp, err = s.ClusterMetadataManager.ListClusterMetadata(s.ctx, &p.ListClusterMetadataRequest{PageSize: 1, NextPageToken: resp.NextPageToken}) - s.NoError(err) - s.Equal(1, len(resp.ClusterMetadata)) -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/cluster_metadata_manager.go temporal-1.22.5/src/common/persistence/persistence-tests/cluster_metadata_manager.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/cluster_metadata_manager.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/cluster_metadata_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,487 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "context" + "net" + "time" + + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "go.temporal.io/api/serviceerror" + versionpb "go.temporal.io/api/version/v1" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/debug" + p "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives" +) + +type ( + // ClusterMetadataManagerSuite runs tests that cover the ClusterMetadata read/write scenarios + ClusterMetadataManagerSuite struct { + TestBase + // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, + // not merely log an error + *require.Assertions + + ctx context.Context + cancel context.CancelFunc + } +) + +// SetupSuite implementation +func (s *ClusterMetadataManagerSuite) SetupSuite() { +} + +// SetupTest implementation +func (s *ClusterMetadataManagerSuite) SetupTest() { + // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil + s.Assertions = require.New(s.T()) + s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) +} + +// TearDownTest implementation +func (s *ClusterMetadataManagerSuite) TearDownTest() { + s.cancel() +} + +// TearDownSuite implementation +func (s *ClusterMetadataManagerSuite) TearDownSuite() { + s.TearDownWorkflowStore() +} + +// TestClusterMembershipEmptyInitially verifies the GetClusterMembers() works with an initial empty table +func (s *ClusterMetadataManagerSuite) TestClusterMembershipEmptyInitially() { + resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10}) + s.Nil(err) + s.NotNil(resp) + s.Empty(resp.ActiveMembers) +} + +// TestClusterMembershipUpsertCanRead verifies that we can UpsertClusterMembership and read our result +func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertCanReadAny() { + req := &p.UpsertClusterMembershipRequest{ + HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + RPCAddress: net.ParseIP("127.0.0.2"), + RPCPort: 123, + Role: p.Frontend, + SessionStart: time.Now().UTC(), + RecordExpiry: time.Second, + } + + err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) + s.Nil(err) + + resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{}) + + s.Nil(err) + s.NotNil(resp) + s.NotEmpty(resp.ActiveMembers) +} + +// TestClusterMembershipUpsertCanRead verifies that we can UpsertClusterMembership and read our result +func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertCanPageRead() { + // Expire previous records + // Todo: MetaMgr should provide api to clear all members + time.Sleep(time.Second * 3) + err := s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) + s.Nil(err) + + expectedIds := make(map[string]int, 100) + for i := 0; i < 100; i++ { + hostID := primitives.NewUUID().Downcast() + expectedIds[primitives.UUIDString(hostID)]++ + req := &p.UpsertClusterMembershipRequest{ + HostID: hostID, + RPCAddress: net.ParseIP("127.0.0.2"), + RPCPort: 123, + Role: p.Frontend, + SessionStart: time.Now().UTC(), + RecordExpiry: 3 * time.Second, + } + + err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) + s.NoError(err) + } + + hostCount := 0 + var nextPageToken []byte + for { + resp, err := s.ClusterMetadataManager.GetClusterMembers(s.ctx, &p.GetClusterMembersRequest{PageSize: 9, NextPageToken: nextPageToken}) + s.NoError(err) + nextPageToken = resp.NextPageToken + for _, member := range resp.ActiveMembers { + expectedIds[primitives.UUIDString(member.HostID)]-- + hostCount++ + } + + if nextPageToken == nil { + break + } + } + + s.Equal(100, hostCount) + for id, val := range expectedIds { + s.Zero(val, "identifier was either not found in db, or shouldn't be there - "+id) + } + + time.Sleep(time.Second * 3) + err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 1000}) + s.NoError(err) +} + +func (s *ClusterMetadataManagerSuite) validateUpsert(req *p.UpsertClusterMembershipRequest, resp *p.GetClusterMembersResponse, err error) { + s.Nil(err) + s.NotNil(resp) + s.NotEmpty(resp.ActiveMembers) + s.Equal(len(resp.ActiveMembers), 1) + // Have to round to 1 second due to SQL implementations. Cassandra truncates at 1ms. + s.Equal(resp.ActiveMembers[0].SessionStart.Round(time.Second), req.SessionStart.Round(time.Second)) + s.Equal(resp.ActiveMembers[0].RPCAddress.String(), req.RPCAddress.String()) + s.Equal(resp.ActiveMembers[0].RPCPort, req.RPCPort) + s.True(resp.ActiveMembers[0].RecordExpiry.After(time.Now().UTC())) + s.Equal(resp.ActiveMembers[0].HostID, req.HostID) + s.Equal(resp.ActiveMembers[0].Role, req.Role) +} + +// TestClusterMembershipReadFiltersCorrectly verifies that we can UpsertClusterMembership and read our result using filters +func (s *ClusterMetadataManagerSuite) TestClusterMembershipReadFiltersCorrectly() { + now := time.Now().UTC() + req := &p.UpsertClusterMembershipRequest{ + HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + RPCAddress: net.ParseIP("127.0.0.2"), + RPCPort: 123, + Role: p.Frontend, + SessionStart: now, + RecordExpiry: time.Second * 4, + } + + err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) + s.Nil(err) + + resp, err := s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10, HostIDEquals: req.HostID}, + ) + + s.validateUpsert(req, resp, err) + + time.Sleep(time.Second * 1) + resp, err = s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Millisecond, HostIDEquals: req.HostID}, + ) + + s.Nil(err) + s.NotNil(resp) + s.Empty(resp.ActiveMembers) + + resp, err = s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{RoleEquals: p.Matching}, + ) + + s.Nil(err) + s.NotNil(resp) + s.Empty(resp.ActiveMembers) + + resp, err = s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{SessionStartedAfter: time.Now().UTC()}, + ) + + s.Nil(err) + s.NotNil(resp) + s.Empty(resp.ActiveMembers) + + resp, err = s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{SessionStartedAfter: now.Add(-time.Minute), RPCAddressEquals: req.RPCAddress, HostIDEquals: req.HostID}, + ) + + s.validateUpsert(req, resp, err) + + time.Sleep(time.Second * 3) + err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 1000}) + s.NoError(err) +} + +// TestClusterMembershipUpsertExpiresCorrectly verifies RecordExpiry functions properly for ClusterMembership records +func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertExpiresCorrectly() { + req := &p.UpsertClusterMembershipRequest{ + HostID: uuid.NewUUID(), + RPCAddress: net.ParseIP("127.0.0.2"), + RPCPort: 123, + Role: p.Frontend, + SessionStart: time.Now().UTC(), + RecordExpiry: time.Second, + } + + err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) + s.NoError(err) + + err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) + s.NoError(err) + + resp, err := s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10, HostIDEquals: req.HostID}, + ) + + s.NoError(err) + s.NotNil(resp) + s.NotEmpty(resp.ActiveMembers) + s.Equal(len(resp.ActiveMembers), 1) + // Have to round to 1 second due to SQL implementations. Cassandra truncates at 1ms. + s.Equal(resp.ActiveMembers[0].SessionStart.Round(time.Second), req.SessionStart.Round(time.Second)) + s.Equal(resp.ActiveMembers[0].RPCAddress.String(), req.RPCAddress.String()) + s.Equal(resp.ActiveMembers[0].RPCPort, req.RPCPort) + s.True(resp.ActiveMembers[0].RecordExpiry.After(time.Now().UTC())) + s.Equal(resp.ActiveMembers[0].HostID, req.HostID) + s.Equal(resp.ActiveMembers[0].Role, req.Role) + + time.Sleep(time.Second * 2) + + err = s.ClusterMetadataManager.PruneClusterMembership(s.ctx, &p.PruneClusterMembershipRequest{MaxRecordsPruned: 100}) + s.Nil(err) + + resp, err = s.ClusterMetadataManager.GetClusterMembers( + s.ctx, + &p.GetClusterMembersRequest{LastHeartbeatWithin: time.Minute * 10}, + ) + + s.Nil(err) + s.NotNil(resp) + s.Empty(resp.ActiveMembers) +} + +// TestClusterMembershipUpsertInvalidExpiry verifies we cannot specify a non-positive RecordExpiry duration +func (s *ClusterMetadataManagerSuite) TestClusterMembershipUpsertInvalidExpiry() { + req := &p.UpsertClusterMembershipRequest{ + HostID: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + RPCAddress: net.ParseIP("127.0.0.2"), + RPCPort: 123, + Role: p.Frontend, + SessionStart: time.Now().UTC(), + RecordExpiry: time.Second * 0, + } + + err := s.ClusterMetadataManager.UpsertClusterMembership(s.ctx, req) + s.NotNil(err) + s.IsType(err, p.ErrInvalidMembershipExpiry) +} + +// TestInitImmutableMetadataReadWrite runs through the various cases of ClusterMetadata behavior +// Cases: +// 1 - Get, no data persisted +// 2 - Init, no data persisted +// 3 - Get, data persisted +// 4 - Init, data persisted +// 5 - Update, add version info and make sure it's persisted and can be retrieved. +// 6 - Delete, no data persisted +func (s *ClusterMetadataManagerSuite) TestInitImmutableMetadataReadWrite() { + clusterNameToPersist := "testing" + historyShardsToPersist := int32(43) + clusterIdToPersist := "12345" + clusterAddress := "cluster-address" + failoverVersionIncrement := int64(10) + initialFailoverVersion := int64(1) + + // Case 1 - Get, mo data persisted + // Fetch the persisted values, there should be nothing on start. + // This doesn't error on no row found, but returns an empty record. + getResp, err := s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + + // Validate they match our initializations + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) + s.Nil(getResp) + + // Case 2 - Init, no data persisted yet + // First commit, this should be persisted + initialResp, err := s.ClusterMetadataManager.SaveClusterMetadata( + s.ctx, + &p.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterNameToPersist, + HistoryShardCount: historyShardsToPersist, + ClusterId: clusterIdToPersist, + ClusterAddress: clusterAddress, + FailoverVersionIncrement: failoverVersionIncrement, + InitialFailoverVersion: initialFailoverVersion, + IsGlobalNamespaceEnabled: true, + IsConnectionEnabled: true, + }}) + + s.Nil(err) + s.True(initialResp) // request should be applied as this is first initialize + + // Case 3 - Get, data persisted + // Fetch the persisted values + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + + // Validate they match our initializations + s.Nil(err) + s.True(getResp != nil) + s.Equal(clusterNameToPersist, getResp.ClusterName) + s.Equal(historyShardsToPersist, getResp.HistoryShardCount) + s.Equal(clusterIdToPersist, getResp.ClusterId) + s.Equal(clusterAddress, getResp.ClusterAddress) + s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) + s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) + s.True(getResp.IsGlobalNamespaceEnabled) + s.True(getResp.IsConnectionEnabled) + + // Case 4 - Init, data persisted + // Attempt to overwrite with new values + secondResp, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterNameToPersist, + HistoryShardCount: int32(77), + }}) + + s.Nil(err) + s.False(secondResp) // Should not have applied, and should match values from first request + + // Refetch persisted + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + + // Validate they match our initial values + s.Nil(err) + s.NotNil(getResp) + s.Equal(clusterNameToPersist, getResp.ClusterName) + s.Equal(historyShardsToPersist, getResp.HistoryShardCount) + s.Equal(clusterIdToPersist, getResp.ClusterId) + s.Equal(clusterAddress, getResp.ClusterAddress) + s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) + s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) + s.True(getResp.IsGlobalNamespaceEnabled) + s.True(getResp.IsConnectionEnabled) + + // Case 5 - Update version info + getResp.VersionInfo = &versionpb.VersionInfo{ + Current: &versionpb.ReleaseInfo{ + Version: "1.0", + }, + } + thirdResp, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ + ClusterMetadata: getResp.ClusterMetadata, + Version: getResp.Version, + }) + s.Nil(err) + s.True(thirdResp) + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + s.Nil(err) + s.NotNil(getResp) + s.Equal("1.0", getResp.ClusterMetadata.VersionInfo.Current.Version) + + // Case 6 - Delete Cluster Metadata + err = s.ClusterMetadataManager.DeleteClusterMetadata(s.ctx, &p.DeleteClusterMetadataRequest{ClusterName: clusterNameToPersist}) + s.Nil(err) + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + + // Validate they match our initializations + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) + s.Nil(getResp) + + // Case 7 - Update current cluster metadata + clusterNameToPersist = "active" + initialResp, err = s.ClusterMetadataManager.SaveClusterMetadata( + s.ctx, + &p.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterNameToPersist, + HistoryShardCount: historyShardsToPersist, + ClusterId: clusterIdToPersist, + ClusterAddress: clusterAddress, + FailoverVersionIncrement: failoverVersionIncrement, + InitialFailoverVersion: initialFailoverVersion, + IsGlobalNamespaceEnabled: true, + IsConnectionEnabled: true, + }}) + s.Nil(err) + s.True(initialResp) + + // Case 8 - Get, data persisted + // Fetch the persisted values + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + + // Validate they match our initializations + s.Nil(err) + s.True(getResp != nil) + s.Equal(clusterNameToPersist, getResp.ClusterName) + s.Equal(historyShardsToPersist, getResp.HistoryShardCount) + s.Equal(clusterIdToPersist, getResp.ClusterId) + s.Equal(clusterAddress, getResp.ClusterAddress) + s.Equal(failoverVersionIncrement, getResp.FailoverVersionIncrement) + s.Equal(initialFailoverVersion, getResp.InitialFailoverVersion) + s.True(getResp.IsGlobalNamespaceEnabled) + s.True(getResp.IsConnectionEnabled) + + // Case 9 - Update current cluster metadata + getResp.VersionInfo = &versionpb.VersionInfo{ + Current: &versionpb.ReleaseInfo{ + Version: "2.0", + }, + } + applied, err := s.ClusterMetadataManager.SaveClusterMetadata(s.ctx, &p.SaveClusterMetadataRequest{ + ClusterMetadata: getResp.ClusterMetadata, + Version: getResp.Version, + }) + s.True(applied) + s.NoError(err) + + // Case 10 - Get, data persisted + // Fetch the persisted values + getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + s.NoError(err) + s.Equal("2.0", getResp.ClusterMetadata.VersionInfo.Current.Version) + + // Case 11 - List + _, err = s.ClusterMetadataManager.SaveClusterMetadata( + s.ctx, + &p.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterNameToPersist + "2", + HistoryShardCount: historyShardsToPersist, + ClusterId: clusterIdToPersist, + ClusterAddress: clusterAddress, + FailoverVersionIncrement: failoverVersionIncrement, + InitialFailoverVersion: initialFailoverVersion, + IsGlobalNamespaceEnabled: true, + IsConnectionEnabled: true, + }}) + s.NoError(err) + + resp, err := s.ClusterMetadataManager.ListClusterMetadata(s.ctx, &p.ListClusterMetadataRequest{PageSize: 1}) + s.NoError(err) + s.Equal(1, len(resp.ClusterMetadata)) + resp, err = s.ClusterMetadataManager.ListClusterMetadata(s.ctx, &p.ListClusterMetadataRequest{PageSize: 1, NextPageToken: resp.NextPageToken}) + s.NoError(err) + s.Equal(1, len(resp.ClusterMetadata)) +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/historyV2PersistenceTest.go temporal-1.22.5/src/common/persistence/persistence-tests/historyV2PersistenceTest.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/historyV2PersistenceTest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/historyV2PersistenceTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,904 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistencetests - -import ( - "context" - "math/rand" - "reflect" - "sync" - "sync/atomic" - "time" - - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common/debug" - "go.temporal.io/server/common/persistence/serialization" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/backoff" - p "go.temporal.io/server/common/persistence" -) - -type ( - // HistoryV2PersistenceSuite contains history persistence tests - HistoryV2PersistenceSuite struct { - // suite.Suite - TestBase - // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, - // not merely log an error - *require.Assertions - - ctx context.Context - cancel context.CancelFunc - } -) - -const testForkRunID = "11220000-0000-f000-f000-000000000000" - -var ( - historyTestRetryPolicy = backoff.NewExponentialRetryPolicy(time.Millisecond * 50). - WithMaximumInterval(time.Second * 3). - WithExpirationInterval(time.Second * 30) -) - -func isConditionFail(err error) bool { - switch err.(type) { - case *p.ConditionFailedError: - return true - default: - return false - } -} - -// SetupSuite implementation -func (s *HistoryV2PersistenceSuite) SetupSuite() { -} - -// TearDownSuite implementation -func (s *HistoryV2PersistenceSuite) TearDownSuite() { - s.TearDownWorkflowStore() -} - -// SetupTest implementation -func (s *HistoryV2PersistenceSuite) SetupTest() { - // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil - s.Assertions = require.New(s.T()) - - s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) -} - -// TearDownTest implementation -func (s *HistoryV2PersistenceSuite) TearDownTest() { - s.cancel() -} - -// TestGenUUIDs testing uuid.New() can generate unique UUID -func (s *HistoryV2PersistenceSuite) TestGenUUIDs() { - wg := sync.WaitGroup{} - m := sync.Map{} - concurrency := 1000 - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - u := uuid.New() - m.Store(u, true) - }() - } - wg.Wait() - cnt := 0 - m.Range(func(k, v interface{}) bool { - cnt++ - return true - }) - s.Equal(concurrency, cnt) -} - -// TestScanAllTrees test -func (s *HistoryV2PersistenceSuite) TestScanAllTrees() { - resp, err := s.ExecutionManager.GetAllHistoryTreeBranches(s.ctx, &p.GetAllHistoryTreeBranchesRequest{ - PageSize: 1, - }) - s.Nil(err) - s.Equal(0, len(resp.Branches), "some trees were leaked in other tests") - - trees := map[string]bool{} - totalTrees := 1002 - pgSize := 100 - - for i := 0; i < totalTrees; i++ { - treeID := uuid.NewRandom().String() - bi, err := s.newHistoryBranch(treeID) - s.Nil(err) - - events := s.genRandomEvents([]int64{1, 2, 3}, 1) - err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") - s.Nil(err) - trees[string(treeID)] = true - } - - var pgToken []byte - for { - resp, err := s.ExecutionManager.GetAllHistoryTreeBranches(s.ctx, &p.GetAllHistoryTreeBranchesRequest{ - PageSize: pgSize, - NextPageToken: pgToken, - }) - s.Nil(err) - for _, br := range resp.Branches { - branch, err := serialization.HistoryBranchFromBlob(br.BranchToken, enumspb.ENCODING_TYPE_PROTO3.String()) - s.NoError(err) - uuidTreeId := branch.TreeId - if trees[uuidTreeId] { - delete(trees, uuidTreeId) - - s.True(br.ForkTime.UnixNano() > 0) - s.True(len(branch.BranchId) > 0) - s.Equal("branchInfo", br.Info) - } else { - s.Fail("treeID not found", branch.TreeId) - } - } - - if len(resp.NextPageToken) == 0 { - break - } - pgToken = resp.NextPageToken - } - - s.Equal(0, len(trees)) -} - -// TestReadBranchByPagination test -func (s *HistoryV2PersistenceSuite) TestReadBranchByPagination() { - treeID := uuid.NewRandom().String() - bi, err := s.newHistoryBranch(treeID) - s.Nil(err) - - historyW := &historypb.History{} - events := s.genRandomEvents([]int64{1, 2, 3}, 0) - err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") - s.Nil(err) - historyW.Events = events - - events = s.genRandomEvents([]int64{4}, 0) - err = s.appendNewNode(bi, events, 2) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{5, 6, 7, 8}, 4) - err = s.appendNewNode(bi, events, 6) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - // stale event batch - events = s.genRandomEvents([]int64{6, 7, 8}, 1) - err = s.appendNewNode(bi, events, 3) - s.Nil(err) - // stale event batch - events = s.genRandomEvents([]int64{6, 7, 8}, 2) - err = s.appendNewNode(bi, events, 4) - s.Nil(err) - // stale event batch - events = s.genRandomEvents([]int64{6, 7, 8}, 3) - err = s.appendNewNode(bi, events, 5) - s.Nil(err) - - events = s.genRandomEvents([]int64{9}, 4) - err = s.appendNewNode(bi, events, 7) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - // Start to read from middle, should not return error, but the first batch should be ignored by application layer - req := &p.ReadHistoryBranchRequest{ - BranchToken: bi, - MinEventID: 6, - MaxEventID: 10, - PageSize: 4, - NextPageToken: nil, - ShardID: s.ShardInfo.GetShardId(), - } - // first page - resp, err := s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(4, len(resp.HistoryEvents)) - s.Equal(int64(6), resp.HistoryEvents[0].GetEventId()) - - events = s.genRandomEvents([]int64{10}, 4) - err = s.appendNewNode(bi, events, 8) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{11}, 4) - err = s.appendNewNode(bi, events, 9) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{12}, 4) - err = s.appendNewNode(bi, events, 10) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{13, 14, 15}, 4) - err = s.appendNewNode(bi, events, 11) - s.Nil(err) - // we don't append this batch because we will fork from 13 - // historyW.Events = append(historyW.Events, events...) - - // fork from here - bi2, err := s.fork(bi, 13) - s.Nil(err) - - events = s.genRandomEvents([]int64{13}, 4) - err = s.appendNewNode(bi2, events, 12) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{14}, 4) - err = s.appendNewNode(bi2, events, 13) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{15, 16, 17}, 4) - err = s.appendNewNode(bi2, events, 14) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{18, 19, 20}, 4) - err = s.appendNewNode(bi2, events, 15) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - // read branch to verify - historyR := &historypb.History{} - - req = &p.ReadHistoryBranchRequest{ - BranchToken: bi2, - MinEventID: 1, - MaxEventID: 21, - PageSize: 3, - NextPageToken: nil, - ShardID: s.ShardInfo.GetShardId(), - } - - // first page - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - - s.Equal(8, len(resp.HistoryEvents)) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - - // this page is all stale batches - // doe to difference in Cassandra / MySQL pagination - // the stale event batch may get returned - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - if len(resp.HistoryEvents) == 0 { - // second page - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(3, len(resp.HistoryEvents)) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - } else if len(resp.HistoryEvents) == 3 { - // no op - } else { - s.Fail("should either return 0 (Cassandra) or 3 (MySQL) events") - } - - // 3rd page, since we fork from nodeID=13, we can only see one batch of 12 here - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(1, len(resp.HistoryEvents)) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - - // 4th page, 13~17 - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(5, len(resp.HistoryEvents)) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - - // last page: one batch of 18-20 - // We have only one page left and the page size is set to one. In this case, - // persistence may or may not return a nextPageToken. - // If it does return a token, we need to ensure that if the token returned is used - // to get history again, no error and history events should be returned. - req.PageSize = 1 - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(3, len(resp.HistoryEvents)) - historyR.Events = append(historyR.Events, resp.HistoryEvents...) - req.NextPageToken = resp.NextPageToken - if len(resp.NextPageToken) != 0 { - resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.Nil(err) - s.Equal(0, len(resp.HistoryEvents)) - } - - s.True(reflect.DeepEqual(historyW, historyR)) - s.Equal(0, len(resp.NextPageToken)) - - // MinEventID is in the middle of the last batch and this is the first request (NextPageToken - // is empty), the call should return an error. - req.MinEventID = 19 - req.NextPageToken = nil - _, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) - s.IsType(&serviceerror.NotFound{}, err) - - err = s.deleteHistoryBranch(bi2) - s.Nil(err) - err = s.deleteHistoryBranch(bi) - s.Nil(err) - branches := s.descTree(treeID) - s.Equal(0, len(branches)) -} - -// TestConcurrentlyCreateAndAppendBranches test -func (s *HistoryV2PersistenceSuite) TestConcurrentlyCreateAndAppendBranches() { - treeID := uuid.NewRandom().String() - wg := sync.WaitGroup{} - concurrency := 1 - m := sync.Map{} - - // test create new branch along with appending new nodes - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - bi, err := s.newHistoryBranch(treeID) - s.Nil(err) - historyW := &historypb.History{} - m.Store(idx, bi) - - events := s.genRandomEvents([]int64{1, 2, 3}, 1) - err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") - s.Nil(err) - historyW.Events = events - - events = s.genRandomEvents([]int64{4}, 1) - err = s.appendNewNode(bi, events, 2) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{5, 6, 7, 8}, 1) - err = s.appendNewNode(bi, events, 3) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - events = s.genRandomEvents([]int64{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, 1) - err = s.appendNewNode(bi, events, 4000) - s.Nil(err) - historyW.Events = append(historyW.Events, events...) - - // read branch to verify - historyR := &historypb.History{} - events = s.read(bi, 1, 21) - s.Equal(20, len(events)) - historyR.Events = events - - s.True(reflect.DeepEqual(historyW, historyR)) - }(i) - } - - wg.Wait() - branches := s.descTree(treeID) - s.Equal(concurrency, len(branches)) - - wg = sync.WaitGroup{} - // test appending nodes(override and new nodes) on each branch concurrently - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - - branch := s.getBranchByKey(m, idx) - - // override with smaller txn_id - events := s.genRandomEvents([]int64{5}, 1) - err := s.appendNewNode(branch, events, 0) - s.Nil(err) - // it shouldn't change anything - events = s.read(branch, 1, 25) - s.Equal(20, len(events)) - - // override with greatest txn_id - events = s.genRandomEvents([]int64{5}, 1) - err = s.appendNewNode(branch, events, 3000) - s.Nil(err) - - // read to verify override success, at this point history is corrupted, missing 6/7/8, so we should only see 5 events - events = s.read(branch, 1, 6) - s.Equal(5, len(events)) - _, err = s.readWithError(branch, 1, 25) - _, ok := err.(*serviceerror.DataLoss) - s.Equal(true, ok) - - // override with even larger txn_id and same version - events = s.genRandomEvents([]int64{5, 6}, 1) - err = s.appendNewNode(branch, events, 3001) - s.Nil(err) - - // read to verify override success, at this point history is corrupted, missing 7/8, so we should only see 6 events - events = s.read(branch, 1, 7) - s.Equal(6, len(events)) - _, err = s.readWithError(branch, 1, 25) - _, ok = err.(*serviceerror.DataLoss) - s.Equal(true, ok) - - // override more with larger txn_id, this would fix the corrupted hole so that we cna get 20 events again - events = s.genRandomEvents([]int64{7, 8}, 1) - err = s.appendNewNode(branch, events, 3002) - s.Nil(err) - - // read to verify override - events = s.read(branch, 1, 25) - s.Equal(20, len(events)) - events = s.genRandomEvents([]int64{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, 1) - err = s.appendNewNode(branch, events, 4001) - s.Nil(err) - events = s.read(branch, 1, 25) - s.Equal(23, len(events)) - }(i) - } - - wg.Wait() - // Finally lets clean up all branches - m.Range(func(k, v interface{}) bool { - br := v.([]byte) - // delete old branches along with create new branches - err := s.deleteHistoryBranch(br) - s.Nil(err) - return true - }) - - branches = s.descTree(treeID) - s.Equal(0, len(branches)) -} - -// TestConcurrentlyForkAndAppendBranches test -func (s *HistoryV2PersistenceSuite) TestConcurrentlyForkAndAppendBranches() { - treeID := uuid.NewRandom().String() - wg := sync.WaitGroup{} - concurrency := 10 - masterBr, err := s.newHistoryBranch(treeID) - s.Nil(err) - branches := s.descTree(treeID) - s.Equal(0, len(branches)) - - // append first batch to master branch - eids := []int64{} - for i := int64(1); i <= int64(concurrency)+1; i++ { - eids = append(eids, i) - } - events := s.genRandomEvents(eids, 1) - err = s.appendNewBranchAndFirstNode(masterBr, events[0:1], 1, "masterbr") - s.Nil(err) - - readEvents := s.read(masterBr, 1, int64(concurrency)+2) - s.Nil(err) - s.Equal(1, len(readEvents)) - - branches = s.descTree(treeID) - s.Equal(1, len(branches)) - mbrID := branches[0].BranchId - - txn := int64(1) - getTxnLock := sync.Mutex{} - reserveTxn := func(count int) int64 { - getTxnLock.Lock() - defer getTxnLock.Unlock() - - ret := txn - txn += int64(count) - return ret - } - - err = s.appendOneByOne(masterBr, events[1:], reserveTxn(len(events[1:]))) - s.Nil(err) - events = s.read(masterBr, 1, int64(concurrency)+2) - s.Nil(err) - s.Equal((concurrency)+1, len(events)) - - level1ID := sync.Map{} - level1Br := sync.Map{} - // test forking from master branch and append nodes - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - - forkNodeID := rand.Int63n(int64(concurrency)) + 2 - level1ID.Store(idx, forkNodeID) - - bi, err := s.fork(masterBr, forkNodeID) - s.Nil(err) - level1Br.Store(idx, bi) - - // cannot append to ancestors - events := s.genRandomEvents([]int64{forkNodeID - 1}, 1) - err = s.appendNewNode(bi, events, reserveTxn(1)) - _, ok := err.(*p.InvalidPersistenceRequestError) - s.Equal(true, ok) - - // append second batch to first level - eids := make([]int64, 0) - for i := forkNodeID; i <= int64(concurrency)*2+1; i++ { - eids = append(eids, i) - } - events = s.genRandomEvents(eids, 1) - - err = s.appendNewNode(bi, events[0:1], reserveTxn(1)) - s.Nil(err) - - err = s.appendOneByOne(bi, events[1:], reserveTxn(len(events[1:]))) - s.Nil(err) - - events = s.read(bi, 1, int64(concurrency)*2+2) - s.Nil(err) - s.Equal((concurrency)*2+1, len(events)) - - if idx == 0 { - err = s.deleteHistoryBranch(bi) - s.Nil(err) - } - - }(i) - } - - wg.Wait() - branches = s.descTree(treeID) - s.Equal(concurrency, len(branches)) - forkOnLevel1 := int32(0) - level2Br := sync.Map{} - wg = sync.WaitGroup{} - - // test forking for second level of branch - for i := 1; i < concurrency; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - - // Event we fork from level1 branch, it is possible that the new branch will fork from master branch - forkNodeID := rand.Int63n(int64(concurrency)*2) + 2 - forkBr := s.getBranchByKey(level1Br, idx) - lastForkNodeID := s.getIDByKey(level1ID, idx) - - if forkNodeID > lastForkNodeID { - atomic.AddInt32(&forkOnLevel1, int32(1)) - } - - bi, err := s.fork(forkBr, forkNodeID) - s.Nil(err) - level2Br.Store(idx, bi) - - // append second batch to second level - eids := make([]int64, 0) - for i := forkNodeID; i <= int64(concurrency)*3+1; i++ { - eids = append(eids, i) - } - events := s.genRandomEvents(eids, 1) - err = s.appendNewNode(bi, events[0:1], reserveTxn(1)) - s.Nil(err) - err = s.appendOneByOne(bi, events[1:], reserveTxn(len(events[1:]))) - s.Nil(err) - events = s.read(bi, 1, int64(concurrency)*3+2) - s.Nil(err) - s.Equal((concurrency)*3+1, len(events)) - - // try override last event - events = s.genRandomEvents([]int64{int64(concurrency)*3 + 1}, 1) - err = s.appendNewNode(bi, events, reserveTxn(1)) - s.Nil(err) - events = s.read(bi, 1, int64(concurrency)*3+2) - s.Nil(err) - s.Equal((concurrency)*3+1, len(events)) - - // test fork and newBranch concurrently - bi, err = s.newHistoryBranch(treeID) - s.Nil(err) - level2Br.Store(concurrency+idx, bi) - - events = s.genRandomEvents([]int64{1}, 1) - err = s.appendNewBranchAndFirstNode(bi, events, reserveTxn(1), "newbr") - s.Nil(err) - - }(i) - } - - wg.Wait() - branches = s.descTree(treeID) - s.Equal(concurrency*3-2, len(branches)) - actualForkOnLevel1 := int32(0) - masterCnt := 0 - for _, b := range branches { - if len(b.Ancestors) == 2 { - actualForkOnLevel1++ - } else if len(b.Ancestors) == 0 { - masterCnt++ - } else { - s.Equal(1, len(b.Ancestors)) - s.Equal(mbrID, b.Ancestors[0].GetBranchId()) - } - } - s.Equal(forkOnLevel1, actualForkOnLevel1) - s.Equal(concurrency, masterCnt) - - // Finally lets clean up all branches - level1Br.Range(func(k, v interface{}) bool { - br := v.([]byte) - // delete old branches along with create new branches - err := s.deleteHistoryBranch(br) - s.Nil(err) - - return true - }) - level2Br.Range(func(k, v interface{}) bool { - br := v.([]byte) - // delete old branches along with create new branches - err := s.deleteHistoryBranch(br) - s.Nil(err) - - return true - }) - err = s.deleteHistoryBranch(masterBr) - s.Nil(err) - - branches = s.descTree(treeID) - s.Equal(0, len(branches)) - -} - -// TestTreeInfoCompatibility test -func (s *HistoryV2PersistenceSuite) TestTreeInfoCompatibility() { - serializer := serialization.NewSerializer() - treeID := uuid.NewRandom().String() - branchID := uuid.NewRandom().String() - originalBranch := &persistencespb.HistoryBranch{ - TreeId: treeID, - BranchId: branchID, - } - originalToken, err := serializer.HistoryBranchToBlob(originalBranch, enumspb.ENCODING_TYPE_PROTO3) - s.NoError(err) - - // Forward compatibility -> infer missing branch token - blob, err := serializer.HistoryTreeInfoToBlob( - &persistencespb.HistoryTreeInfo{ - BranchInfo: originalBranch, - // NOTE: Intentionally missing BranchToken - }, - enumspb.ENCODING_TYPE_PROTO3, - ) - s.NoError(err) - info, err := p.ToHistoryTreeInfo(serializer, blob) - s.NoError(err) - s.Equal(originalToken.Data, info.BranchToken) - s.Equal(originalBranch, info.BranchInfo) - - // Backward compatibility -> infer missing branch info - blob, err = serializer.HistoryTreeInfoToBlob( - &persistencespb.HistoryTreeInfo{ - BranchToken: originalToken.Data, - // NOTE: Intentionally missing BranchInfo - }, - enumspb.ENCODING_TYPE_PROTO3, - ) - s.NoError(err) - info, err = p.ToHistoryTreeInfo(serializer, blob) - s.NoError(err) - s.Equal(originalToken.Data, info.BranchToken) - s.Equal(originalBranch, info.BranchInfo) -} - -func (s *HistoryV2PersistenceSuite) getBranchByKey(m sync.Map, k int) []byte { - v, ok := m.Load(k) - s.Equal(true, ok) - br := v.([]byte) - return br -} - -func (s *HistoryV2PersistenceSuite) getIDByKey(m sync.Map, k int) int64 { - v, ok := m.Load(k) - s.Equal(true, ok) - id := v.(int64) - return id -} - -func (s *HistoryV2PersistenceSuite) genRandomEvents(eventIDs []int64, version int64) []*historypb.HistoryEvent { - var events []*historypb.HistoryEvent - - now := time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC) - for _, eid := range eventIDs { - e := &historypb.HistoryEvent{EventId: eid, Version: version, EventTime: &now} - events = append(events, e) - } - - return events -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) newHistoryBranch(treeID string) ([]byte, error) { - return s.ExecutionManager.GetHistoryBranchUtil().NewHistoryBranch( - uuid.New(), - treeID, - nil, - []*persistencespb.HistoryBranchRange{}, - nil, - nil, - nil, - ) -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) deleteHistoryBranch(branch []byte) error { - - op := func() error { - return s.ExecutionManager.DeleteHistoryBranch(s.ctx, &p.DeleteHistoryBranchRequest{ - BranchToken: branch, - ShardID: s.ShardInfo.GetShardId(), - }) - } - - return backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) descTree(treeID string) []*persistencespb.HistoryBranch { - resp, err := s.ExecutionManager.GetHistoryTree(s.ctx, &p.GetHistoryTreeRequest{ - TreeID: treeID, - ShardID: s.ShardInfo.GetShardId(), - }) - s.Nil(err) - branches, err := s.toHistoryBranches(resp.BranchTokens) - s.NoError(err) - return branches -} - -func (s *HistoryV2PersistenceSuite) toHistoryBranches(branchTokens [][]byte) ([]*persistencespb.HistoryBranch, error) { - branches := make([]*persistencespb.HistoryBranch, len(branchTokens)) - for i, b := range branchTokens { - branch, err := serialization.HistoryBranchFromBlob(b, enumspb.ENCODING_TYPE_PROTO3.String()) - if err != nil { - return nil, err - } - branches[i] = branch - } - return branches, nil -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) read(branch []byte, minID, maxID int64) []*historypb.HistoryEvent { - res, err := s.readWithError(branch, minID, maxID) - s.Nil(err) - return res -} - -func (s *HistoryV2PersistenceSuite) readWithError(branch []byte, minID, maxID int64) ([]*historypb.HistoryEvent, error) { - - // use small page size to enforce pagination - randPageSize := 2 - res := make([]*historypb.HistoryEvent, 0) - token := []byte{} - for { - resp, err := s.ExecutionManager.ReadHistoryBranch(s.ctx, &p.ReadHistoryBranchRequest{ - BranchToken: branch, - MinEventID: minID, - MaxEventID: maxID, - PageSize: randPageSize, - NextPageToken: token, - ShardID: s.ShardInfo.GetShardId(), - }) - if err != nil { - return nil, err - } - if len(resp.HistoryEvents) > 0 { - s.True(resp.Size > 0) - } - res = append(res, resp.HistoryEvents...) - token = resp.NextPageToken - if len(token) == 0 { - break - } - } - - return res, nil -} - -func (s *HistoryV2PersistenceSuite) appendOneByOne(branch []byte, events []*historypb.HistoryEvent, txnID int64) error { - for index, e := range events { - err := s.append(branch, []*historypb.HistoryEvent{e}, txnID+int64(index), false, "") - if err != nil { - return err - } - } - return nil -} - -func (s *HistoryV2PersistenceSuite) appendNewNode(branch []byte, events []*historypb.HistoryEvent, txnID int64) error { - return s.append(branch, events, txnID, false, "") -} - -func (s *HistoryV2PersistenceSuite) appendNewBranchAndFirstNode(branch []byte, events []*historypb.HistoryEvent, txnID int64, branchInfo string) error { - return s.append(branch, events, txnID, true, branchInfo) -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) append(branch []byte, events []*historypb.HistoryEvent, txnID int64, isNewBranch bool, branchInfo string) error { - - var resp *p.AppendHistoryNodesResponse - - op := func() error { - var err error - resp, err = s.ExecutionManager.AppendHistoryNodes(s.ctx, &p.AppendHistoryNodesRequest{ - IsNewBranch: isNewBranch, - Info: branchInfo, - BranchToken: branch, - Events: events, - TransactionID: txnID, - ShardID: s.ShardInfo.GetShardId(), - }) - return err - } - - err := backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) - if err != nil { - return err - } - s.True(resp.Size > 0) - - return err -} - -// persistence helper -func (s *HistoryV2PersistenceSuite) fork(forkBranch []byte, forkNodeID int64) ([]byte, error) { - - bi := []byte{} - - op := func() error { - var err error - resp, err := s.ExecutionManager.ForkHistoryBranch(s.ctx, &p.ForkHistoryBranchRequest{ - ForkBranchToken: forkBranch, - ForkNodeID: forkNodeID, - Info: testForkRunID, - ShardID: s.ShardInfo.GetShardId(), - NamespaceID: uuid.New(), - }) - if resp != nil { - bi = resp.NewBranchToken - } - return err - } - - err := backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) - return bi, err -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/history_v2_persistence.go temporal-1.22.5/src/common/persistence/persistence-tests/history_v2_persistence.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/history_v2_persistence.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/history_v2_persistence.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,904 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "context" + "math/rand" + "reflect" + "sync" + "sync/atomic" + "time" + + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/persistence/serialization" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/backoff" + p "go.temporal.io/server/common/persistence" +) + +type ( + // HistoryV2PersistenceSuite contains history persistence tests + HistoryV2PersistenceSuite struct { + // suite.Suite + TestBase + // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, + // not merely log an error + *require.Assertions + + ctx context.Context + cancel context.CancelFunc + } +) + +const testForkRunID = "11220000-0000-f000-f000-000000000000" + +var ( + historyTestRetryPolicy = backoff.NewExponentialRetryPolicy(time.Millisecond * 50). + WithMaximumInterval(time.Second * 3). + WithExpirationInterval(time.Second * 30) +) + +func isConditionFail(err error) bool { + switch err.(type) { + case *p.ConditionFailedError: + return true + default: + return false + } +} + +// SetupSuite implementation +func (s *HistoryV2PersistenceSuite) SetupSuite() { +} + +// TearDownSuite implementation +func (s *HistoryV2PersistenceSuite) TearDownSuite() { + s.TearDownWorkflowStore() +} + +// SetupTest implementation +func (s *HistoryV2PersistenceSuite) SetupTest() { + // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil + s.Assertions = require.New(s.T()) + + s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) +} + +// TearDownTest implementation +func (s *HistoryV2PersistenceSuite) TearDownTest() { + s.cancel() +} + +// TestGenUUIDs testing uuid.New() can generate unique UUID +func (s *HistoryV2PersistenceSuite) TestGenUUIDs() { + wg := sync.WaitGroup{} + m := sync.Map{} + concurrency := 1000 + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + u := uuid.New() + m.Store(u, true) + }() + } + wg.Wait() + cnt := 0 + m.Range(func(k, v interface{}) bool { + cnt++ + return true + }) + s.Equal(concurrency, cnt) +} + +// TestScanAllTrees test +func (s *HistoryV2PersistenceSuite) TestScanAllTrees() { + resp, err := s.ExecutionManager.GetAllHistoryTreeBranches(s.ctx, &p.GetAllHistoryTreeBranchesRequest{ + PageSize: 1, + }) + s.Nil(err) + s.Equal(0, len(resp.Branches), "some trees were leaked in other tests") + + trees := map[string]bool{} + totalTrees := 1002 + pgSize := 100 + + for i := 0; i < totalTrees; i++ { + treeID := uuid.NewRandom().String() + bi, err := s.newHistoryBranch(treeID) + s.Nil(err) + + events := s.genRandomEvents([]int64{1, 2, 3}, 1) + err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") + s.Nil(err) + trees[string(treeID)] = true + } + + var pgToken []byte + for { + resp, err := s.ExecutionManager.GetAllHistoryTreeBranches(s.ctx, &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pgSize, + NextPageToken: pgToken, + }) + s.Nil(err) + for _, br := range resp.Branches { + branch, err := serialization.HistoryBranchFromBlob(br.BranchToken, enumspb.ENCODING_TYPE_PROTO3.String()) + s.NoError(err) + uuidTreeId := branch.TreeId + if trees[uuidTreeId] { + delete(trees, uuidTreeId) + + s.True(br.ForkTime.UnixNano() > 0) + s.True(len(branch.BranchId) > 0) + s.Equal("branchInfo", br.Info) + } else { + s.Fail("treeID not found", branch.TreeId) + } + } + + if len(resp.NextPageToken) == 0 { + break + } + pgToken = resp.NextPageToken + } + + s.Equal(0, len(trees)) +} + +// TestReadBranchByPagination test +func (s *HistoryV2PersistenceSuite) TestReadBranchByPagination() { + treeID := uuid.NewRandom().String() + bi, err := s.newHistoryBranch(treeID) + s.Nil(err) + + historyW := &historypb.History{} + events := s.genRandomEvents([]int64{1, 2, 3}, 0) + err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") + s.Nil(err) + historyW.Events = events + + events = s.genRandomEvents([]int64{4}, 0) + err = s.appendNewNode(bi, events, 2) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{5, 6, 7, 8}, 4) + err = s.appendNewNode(bi, events, 6) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + // stale event batch + events = s.genRandomEvents([]int64{6, 7, 8}, 1) + err = s.appendNewNode(bi, events, 3) + s.Nil(err) + // stale event batch + events = s.genRandomEvents([]int64{6, 7, 8}, 2) + err = s.appendNewNode(bi, events, 4) + s.Nil(err) + // stale event batch + events = s.genRandomEvents([]int64{6, 7, 8}, 3) + err = s.appendNewNode(bi, events, 5) + s.Nil(err) + + events = s.genRandomEvents([]int64{9}, 4) + err = s.appendNewNode(bi, events, 7) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + // Start to read from middle, should not return error, but the first batch should be ignored by application layer + req := &p.ReadHistoryBranchRequest{ + BranchToken: bi, + MinEventID: 6, + MaxEventID: 10, + PageSize: 4, + NextPageToken: nil, + ShardID: s.ShardInfo.GetShardId(), + } + // first page + resp, err := s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(4, len(resp.HistoryEvents)) + s.Equal(int64(6), resp.HistoryEvents[0].GetEventId()) + + events = s.genRandomEvents([]int64{10}, 4) + err = s.appendNewNode(bi, events, 8) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{11}, 4) + err = s.appendNewNode(bi, events, 9) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{12}, 4) + err = s.appendNewNode(bi, events, 10) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{13, 14, 15}, 4) + err = s.appendNewNode(bi, events, 11) + s.Nil(err) + // we don't append this batch because we will fork from 13 + // historyW.Events = append(historyW.Events, events...) + + // fork from here + bi2, err := s.fork(bi, 13) + s.Nil(err) + + events = s.genRandomEvents([]int64{13}, 4) + err = s.appendNewNode(bi2, events, 12) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{14}, 4) + err = s.appendNewNode(bi2, events, 13) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{15, 16, 17}, 4) + err = s.appendNewNode(bi2, events, 14) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{18, 19, 20}, 4) + err = s.appendNewNode(bi2, events, 15) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + // read branch to verify + historyR := &historypb.History{} + + req = &p.ReadHistoryBranchRequest{ + BranchToken: bi2, + MinEventID: 1, + MaxEventID: 21, + PageSize: 3, + NextPageToken: nil, + ShardID: s.ShardInfo.GetShardId(), + } + + // first page + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + + s.Equal(8, len(resp.HistoryEvents)) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + + // this page is all stale batches + // doe to difference in Cassandra / MySQL pagination + // the stale event batch may get returned + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + if len(resp.HistoryEvents) == 0 { + // second page + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(3, len(resp.HistoryEvents)) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + } else if len(resp.HistoryEvents) == 3 { + // no op + } else { + s.Fail("should either return 0 (Cassandra) or 3 (MySQL) events") + } + + // 3rd page, since we fork from nodeID=13, we can only see one batch of 12 here + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(1, len(resp.HistoryEvents)) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + + // 4th page, 13~17 + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(5, len(resp.HistoryEvents)) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + + // last page: one batch of 18-20 + // We have only one page left and the page size is set to one. In this case, + // persistence may or may not return a nextPageToken. + // If it does return a token, we need to ensure that if the token returned is used + // to get history again, no error and history events should be returned. + req.PageSize = 1 + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(3, len(resp.HistoryEvents)) + historyR.Events = append(historyR.Events, resp.HistoryEvents...) + req.NextPageToken = resp.NextPageToken + if len(resp.NextPageToken) != 0 { + resp, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.Nil(err) + s.Equal(0, len(resp.HistoryEvents)) + } + + s.True(reflect.DeepEqual(historyW, historyR)) + s.Equal(0, len(resp.NextPageToken)) + + // MinEventID is in the middle of the last batch and this is the first request (NextPageToken + // is empty), the call should return an error. + req.MinEventID = 19 + req.NextPageToken = nil + _, err = s.ExecutionManager.ReadHistoryBranch(s.ctx, req) + s.IsType(&serviceerror.NotFound{}, err) + + err = s.deleteHistoryBranch(bi2) + s.Nil(err) + err = s.deleteHistoryBranch(bi) + s.Nil(err) + branches := s.descTree(treeID) + s.Equal(0, len(branches)) +} + +// TestConcurrentlyCreateAndAppendBranches test +func (s *HistoryV2PersistenceSuite) TestConcurrentlyCreateAndAppendBranches() { + treeID := uuid.NewRandom().String() + wg := sync.WaitGroup{} + concurrency := 1 + m := sync.Map{} + + // test create new branch along with appending new nodes + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + bi, err := s.newHistoryBranch(treeID) + s.Nil(err) + historyW := &historypb.History{} + m.Store(idx, bi) + + events := s.genRandomEvents([]int64{1, 2, 3}, 1) + err = s.appendNewBranchAndFirstNode(bi, events, 1, "branchInfo") + s.Nil(err) + historyW.Events = events + + events = s.genRandomEvents([]int64{4}, 1) + err = s.appendNewNode(bi, events, 2) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{5, 6, 7, 8}, 1) + err = s.appendNewNode(bi, events, 3) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + events = s.genRandomEvents([]int64{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, 1) + err = s.appendNewNode(bi, events, 4000) + s.Nil(err) + historyW.Events = append(historyW.Events, events...) + + // read branch to verify + historyR := &historypb.History{} + events = s.read(bi, 1, 21) + s.Equal(20, len(events)) + historyR.Events = events + + s.True(reflect.DeepEqual(historyW, historyR)) + }(i) + } + + wg.Wait() + branches := s.descTree(treeID) + s.Equal(concurrency, len(branches)) + + wg = sync.WaitGroup{} + // test appending nodes(override and new nodes) on each branch concurrently + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + branch := s.getBranchByKey(m, idx) + + // override with smaller txn_id + events := s.genRandomEvents([]int64{5}, 1) + err := s.appendNewNode(branch, events, 0) + s.Nil(err) + // it shouldn't change anything + events = s.read(branch, 1, 25) + s.Equal(20, len(events)) + + // override with greatest txn_id + events = s.genRandomEvents([]int64{5}, 1) + err = s.appendNewNode(branch, events, 3000) + s.Nil(err) + + // read to verify override success, at this point history is corrupted, missing 6/7/8, so we should only see 5 events + events = s.read(branch, 1, 6) + s.Equal(5, len(events)) + _, err = s.readWithError(branch, 1, 25) + _, ok := err.(*serviceerror.DataLoss) + s.Equal(true, ok) + + // override with even larger txn_id and same version + events = s.genRandomEvents([]int64{5, 6}, 1) + err = s.appendNewNode(branch, events, 3001) + s.Nil(err) + + // read to verify override success, at this point history is corrupted, missing 7/8, so we should only see 6 events + events = s.read(branch, 1, 7) + s.Equal(6, len(events)) + _, err = s.readWithError(branch, 1, 25) + _, ok = err.(*serviceerror.DataLoss) + s.Equal(true, ok) + + // override more with larger txn_id, this would fix the corrupted hole so that we cna get 20 events again + events = s.genRandomEvents([]int64{7, 8}, 1) + err = s.appendNewNode(branch, events, 3002) + s.Nil(err) + + // read to verify override + events = s.read(branch, 1, 25) + s.Equal(20, len(events)) + events = s.genRandomEvents([]int64{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, 1) + err = s.appendNewNode(branch, events, 4001) + s.Nil(err) + events = s.read(branch, 1, 25) + s.Equal(23, len(events)) + }(i) + } + + wg.Wait() + // Finally lets clean up all branches + m.Range(func(k, v interface{}) bool { + br := v.([]byte) + // delete old branches along with create new branches + err := s.deleteHistoryBranch(br) + s.Nil(err) + return true + }) + + branches = s.descTree(treeID) + s.Equal(0, len(branches)) +} + +// TestConcurrentlyForkAndAppendBranches test +func (s *HistoryV2PersistenceSuite) TestConcurrentlyForkAndAppendBranches() { + treeID := uuid.NewRandom().String() + wg := sync.WaitGroup{} + concurrency := 10 + masterBr, err := s.newHistoryBranch(treeID) + s.Nil(err) + branches := s.descTree(treeID) + s.Equal(0, len(branches)) + + // append first batch to master branch + eids := []int64{} + for i := int64(1); i <= int64(concurrency)+1; i++ { + eids = append(eids, i) + } + events := s.genRandomEvents(eids, 1) + err = s.appendNewBranchAndFirstNode(masterBr, events[0:1], 1, "masterbr") + s.Nil(err) + + readEvents := s.read(masterBr, 1, int64(concurrency)+2) + s.Nil(err) + s.Equal(1, len(readEvents)) + + branches = s.descTree(treeID) + s.Equal(1, len(branches)) + mbrID := branches[0].BranchId + + txn := int64(1) + getTxnLock := sync.Mutex{} + reserveTxn := func(count int) int64 { + getTxnLock.Lock() + defer getTxnLock.Unlock() + + ret := txn + txn += int64(count) + return ret + } + + err = s.appendOneByOne(masterBr, events[1:], reserveTxn(len(events[1:]))) + s.Nil(err) + events = s.read(masterBr, 1, int64(concurrency)+2) + s.Nil(err) + s.Equal((concurrency)+1, len(events)) + + level1ID := sync.Map{} + level1Br := sync.Map{} + // test forking from master branch and append nodes + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + forkNodeID := rand.Int63n(int64(concurrency)) + 2 + level1ID.Store(idx, forkNodeID) + + bi, err := s.fork(masterBr, forkNodeID) + s.Nil(err) + level1Br.Store(idx, bi) + + // cannot append to ancestors + events := s.genRandomEvents([]int64{forkNodeID - 1}, 1) + err = s.appendNewNode(bi, events, reserveTxn(1)) + _, ok := err.(*p.InvalidPersistenceRequestError) + s.Equal(true, ok) + + // append second batch to first level + eids := make([]int64, 0) + for i := forkNodeID; i <= int64(concurrency)*2+1; i++ { + eids = append(eids, i) + } + events = s.genRandomEvents(eids, 1) + + err = s.appendNewNode(bi, events[0:1], reserveTxn(1)) + s.Nil(err) + + err = s.appendOneByOne(bi, events[1:], reserveTxn(len(events[1:]))) + s.Nil(err) + + events = s.read(bi, 1, int64(concurrency)*2+2) + s.Nil(err) + s.Equal((concurrency)*2+1, len(events)) + + if idx == 0 { + err = s.deleteHistoryBranch(bi) + s.Nil(err) + } + + }(i) + } + + wg.Wait() + branches = s.descTree(treeID) + s.Equal(concurrency, len(branches)) + forkOnLevel1 := int32(0) + level2Br := sync.Map{} + wg = sync.WaitGroup{} + + // test forking for second level of branch + for i := 1; i < concurrency; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + // Event we fork from level1 branch, it is possible that the new branch will fork from master branch + forkNodeID := rand.Int63n(int64(concurrency)*2) + 2 + forkBr := s.getBranchByKey(level1Br, idx) + lastForkNodeID := s.getIDByKey(level1ID, idx) + + if forkNodeID > lastForkNodeID { + atomic.AddInt32(&forkOnLevel1, int32(1)) + } + + bi, err := s.fork(forkBr, forkNodeID) + s.Nil(err) + level2Br.Store(idx, bi) + + // append second batch to second level + eids := make([]int64, 0) + for i := forkNodeID; i <= int64(concurrency)*3+1; i++ { + eids = append(eids, i) + } + events := s.genRandomEvents(eids, 1) + err = s.appendNewNode(bi, events[0:1], reserveTxn(1)) + s.Nil(err) + err = s.appendOneByOne(bi, events[1:], reserveTxn(len(events[1:]))) + s.Nil(err) + events = s.read(bi, 1, int64(concurrency)*3+2) + s.Nil(err) + s.Equal((concurrency)*3+1, len(events)) + + // try override last event + events = s.genRandomEvents([]int64{int64(concurrency)*3 + 1}, 1) + err = s.appendNewNode(bi, events, reserveTxn(1)) + s.Nil(err) + events = s.read(bi, 1, int64(concurrency)*3+2) + s.Nil(err) + s.Equal((concurrency)*3+1, len(events)) + + // test fork and newBranch concurrently + bi, err = s.newHistoryBranch(treeID) + s.Nil(err) + level2Br.Store(concurrency+idx, bi) + + events = s.genRandomEvents([]int64{1}, 1) + err = s.appendNewBranchAndFirstNode(bi, events, reserveTxn(1), "newbr") + s.Nil(err) + + }(i) + } + + wg.Wait() + branches = s.descTree(treeID) + s.Equal(concurrency*3-2, len(branches)) + actualForkOnLevel1 := int32(0) + masterCnt := 0 + for _, b := range branches { + if len(b.Ancestors) == 2 { + actualForkOnLevel1++ + } else if len(b.Ancestors) == 0 { + masterCnt++ + } else { + s.Equal(1, len(b.Ancestors)) + s.Equal(mbrID, b.Ancestors[0].GetBranchId()) + } + } + s.Equal(forkOnLevel1, actualForkOnLevel1) + s.Equal(concurrency, masterCnt) + + // Finally lets clean up all branches + level1Br.Range(func(k, v interface{}) bool { + br := v.([]byte) + // delete old branches along with create new branches + err := s.deleteHistoryBranch(br) + s.Nil(err) + + return true + }) + level2Br.Range(func(k, v interface{}) bool { + br := v.([]byte) + // delete old branches along with create new branches + err := s.deleteHistoryBranch(br) + s.Nil(err) + + return true + }) + err = s.deleteHistoryBranch(masterBr) + s.Nil(err) + + branches = s.descTree(treeID) + s.Equal(0, len(branches)) + +} + +// TestTreeInfoCompatibility test +func (s *HistoryV2PersistenceSuite) TestTreeInfoCompatibility() { + serializer := serialization.NewSerializer() + treeID := uuid.NewRandom().String() + branchID := uuid.NewRandom().String() + originalBranch := &persistencespb.HistoryBranch{ + TreeId: treeID, + BranchId: branchID, + } + originalToken, err := serializer.HistoryBranchToBlob(originalBranch, enumspb.ENCODING_TYPE_PROTO3) + s.NoError(err) + + // Forward compatibility -> infer missing branch token + blob, err := serializer.HistoryTreeInfoToBlob( + &persistencespb.HistoryTreeInfo{ + BranchInfo: originalBranch, + // NOTE: Intentionally missing BranchToken + }, + enumspb.ENCODING_TYPE_PROTO3, + ) + s.NoError(err) + info, err := p.ToHistoryTreeInfo(serializer, blob) + s.NoError(err) + s.Equal(originalToken.Data, info.BranchToken) + s.Equal(originalBranch, info.BranchInfo) + + // Backward compatibility -> infer missing branch info + blob, err = serializer.HistoryTreeInfoToBlob( + &persistencespb.HistoryTreeInfo{ + BranchToken: originalToken.Data, + // NOTE: Intentionally missing BranchInfo + }, + enumspb.ENCODING_TYPE_PROTO3, + ) + s.NoError(err) + info, err = p.ToHistoryTreeInfo(serializer, blob) + s.NoError(err) + s.Equal(originalToken.Data, info.BranchToken) + s.Equal(originalBranch, info.BranchInfo) +} + +func (s *HistoryV2PersistenceSuite) getBranchByKey(m sync.Map, k int) []byte { + v, ok := m.Load(k) + s.Equal(true, ok) + br := v.([]byte) + return br +} + +func (s *HistoryV2PersistenceSuite) getIDByKey(m sync.Map, k int) int64 { + v, ok := m.Load(k) + s.Equal(true, ok) + id := v.(int64) + return id +} + +func (s *HistoryV2PersistenceSuite) genRandomEvents(eventIDs []int64, version int64) []*historypb.HistoryEvent { + var events []*historypb.HistoryEvent + + now := time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC) + for _, eid := range eventIDs { + e := &historypb.HistoryEvent{EventId: eid, Version: version, EventTime: &now} + events = append(events, e) + } + + return events +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) newHistoryBranch(treeID string) ([]byte, error) { + return s.ExecutionManager.GetHistoryBranchUtil().NewHistoryBranch( + uuid.New(), + treeID, + nil, + []*persistencespb.HistoryBranchRange{}, + nil, + nil, + nil, + ) +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) deleteHistoryBranch(branch []byte) error { + + op := func() error { + return s.ExecutionManager.DeleteHistoryBranch(s.ctx, &p.DeleteHistoryBranchRequest{ + BranchToken: branch, + ShardID: s.ShardInfo.GetShardId(), + }) + } + + return backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) descTree(treeID string) []*persistencespb.HistoryBranch { + resp, err := s.ExecutionManager.GetHistoryTree(s.ctx, &p.GetHistoryTreeRequest{ + TreeID: treeID, + ShardID: s.ShardInfo.GetShardId(), + }) + s.Nil(err) + branches, err := s.toHistoryBranches(resp.BranchTokens) + s.NoError(err) + return branches +} + +func (s *HistoryV2PersistenceSuite) toHistoryBranches(branchTokens [][]byte) ([]*persistencespb.HistoryBranch, error) { + branches := make([]*persistencespb.HistoryBranch, len(branchTokens)) + for i, b := range branchTokens { + branch, err := serialization.HistoryBranchFromBlob(b, enumspb.ENCODING_TYPE_PROTO3.String()) + if err != nil { + return nil, err + } + branches[i] = branch + } + return branches, nil +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) read(branch []byte, minID, maxID int64) []*historypb.HistoryEvent { + res, err := s.readWithError(branch, minID, maxID) + s.Nil(err) + return res +} + +func (s *HistoryV2PersistenceSuite) readWithError(branch []byte, minID, maxID int64) ([]*historypb.HistoryEvent, error) { + + // use small page size to enforce pagination + randPageSize := 2 + res := make([]*historypb.HistoryEvent, 0) + token := []byte{} + for { + resp, err := s.ExecutionManager.ReadHistoryBranch(s.ctx, &p.ReadHistoryBranchRequest{ + BranchToken: branch, + MinEventID: minID, + MaxEventID: maxID, + PageSize: randPageSize, + NextPageToken: token, + ShardID: s.ShardInfo.GetShardId(), + }) + if err != nil { + return nil, err + } + if len(resp.HistoryEvents) > 0 { + s.True(resp.Size > 0) + } + res = append(res, resp.HistoryEvents...) + token = resp.NextPageToken + if len(token) == 0 { + break + } + } + + return res, nil +} + +func (s *HistoryV2PersistenceSuite) appendOneByOne(branch []byte, events []*historypb.HistoryEvent, txnID int64) error { + for index, e := range events { + err := s.append(branch, []*historypb.HistoryEvent{e}, txnID+int64(index), false, "") + if err != nil { + return err + } + } + return nil +} + +func (s *HistoryV2PersistenceSuite) appendNewNode(branch []byte, events []*historypb.HistoryEvent, txnID int64) error { + return s.append(branch, events, txnID, false, "") +} + +func (s *HistoryV2PersistenceSuite) appendNewBranchAndFirstNode(branch []byte, events []*historypb.HistoryEvent, txnID int64, branchInfo string) error { + return s.append(branch, events, txnID, true, branchInfo) +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) append(branch []byte, events []*historypb.HistoryEvent, txnID int64, isNewBranch bool, branchInfo string) error { + + var resp *p.AppendHistoryNodesResponse + + op := func() error { + var err error + resp, err = s.ExecutionManager.AppendHistoryNodes(s.ctx, &p.AppendHistoryNodesRequest{ + IsNewBranch: isNewBranch, + Info: branchInfo, + BranchToken: branch, + Events: events, + TransactionID: txnID, + ShardID: s.ShardInfo.GetShardId(), + }) + return err + } + + err := backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) + if err != nil { + return err + } + s.True(resp.Size > 0) + + return err +} + +// persistence helper +func (s *HistoryV2PersistenceSuite) fork(forkBranch []byte, forkNodeID int64) ([]byte, error) { + + bi := []byte{} + + op := func() error { + var err error + resp, err := s.ExecutionManager.ForkHistoryBranch(s.ctx, &p.ForkHistoryBranchRequest{ + ForkBranchToken: forkBranch, + ForkNodeID: forkNodeID, + Info: testForkRunID, + ShardID: s.ShardInfo.GetShardId(), + NamespaceID: uuid.New(), + }) + if resp != nil { + bi = resp.NewBranchToken + } + return err + } + + err := backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) + return bi, err +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/metadataPersistenceV2Test.go temporal-1.22.5/src/common/persistence/persistence-tests/metadataPersistenceV2Test.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/metadataPersistenceV2Test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/metadataPersistenceV2Test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1514 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistencetests - -import ( - "context" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - enumspb "go.temporal.io/api/enums/v1" - namespacepb "go.temporal.io/api/namespace/v1" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/debug" - p "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/cassandra" - "go.temporal.io/server/common/primitives/timestamp" -) - -type ( - // MetadataPersistenceSuiteV2 is test of the V2 version of metadata persistence - MetadataPersistenceSuiteV2 struct { - TestBase - // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, - // not merely log an error - *require.Assertions - - ctx context.Context - cancel context.CancelFunc - } -) - -// SetupSuite implementation -func (m *MetadataPersistenceSuiteV2) SetupSuite() { -} - -// SetupTest implementation -func (m *MetadataPersistenceSuiteV2) SetupTest() { - // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil - m.Assertions = require.New(m.T()) - m.ctx, m.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) - - // cleanup the namespace created - var token []byte - pageSize := 10 -ListLoop: - for { - resp, err := m.ListNamespaces(pageSize, token) - m.NoError(err) - token = resp.NextPageToken - for _, n := range resp.Namespaces { - m.NoError(m.DeleteNamespace(n.Namespace.Info.Id, "")) - } - if len(token) == 0 { - break ListLoop - } - } -} - -// TearDownTest implementation -func (m *MetadataPersistenceSuiteV2) TearDownTest() { - m.cancel() -} - -// TearDownSuite implementation -func (m *MetadataPersistenceSuiteV2) TearDownSuite() { - m.TearDownWorkflowStore() -} - -// Partial namespace creation is only relevant for Cassandra, the following tests will only run when the underlying cluster is cassandra -func (m *MetadataPersistenceSuiteV2) createPartialNamespace(id string, name string) { - // only add the namespace to namespaces_by_id table and not namespaces table - const constNamespacePartition = 0 - const templateCreateNamespaceQuery = `INSERT INTO namespaces_by_id (` + - `id, name) ` + - `VALUES(?, ?) IF NOT EXISTS` - query := m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query(templateCreateNamespaceQuery, id, name).WithContext(context.Background()) - err := query.Exec() - m.NoError(err) - -} - -func (m *MetadataPersistenceSuiteV2) truncatePartialNamespace() { - query := m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query("TRUNCATE namespaces_by_id").WithContext(context.Background()) - err := query.Exec() - m.NoError(err) - - query = m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query("TRUNCATE namespaces").WithContext(context.Background()) - err = query.Exec() - m.NoError(err) -} - -func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceSameNameSameID() { - // This is only relevant for cassandra - switch m.DefaultTestCluster.(type) { - case *cassandra.TestCluster: - default: - return - } - id := uuid.New() - name := "create-partial-namespace-test-name" - m.createPartialNamespace(id, name) - - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "create-namespace-test-description" - owner := "create-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} - isGlobalNamespace := false - configVersion := int64(0) - failoverVersion := int64(0) - - resp0, err0 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: badBinaries, - }, - &persistencespb.NamespaceReplicationConfig{}, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err0) - m.NotNil(resp0) - m.EqualValues(id, resp0.ID) - - // for namespace which do not have replication config set, will default to - // use current cluster as active, with current cluster as all clusters - resp1, err1 := m.GetNamespace(id, "") - m.NoError(err1) - m.NotNil(resp1) - m.EqualValues(id, resp1.Namespace.Info.Id) - m.Equal(name, resp1.Namespace.Info.Name) - m.Equal(state, resp1.Namespace.Info.State) - m.Equal(description, resp1.Namespace.Info.Description) - m.Equal(owner, resp1.Namespace.Info.Owner) - m.Equal(data, resp1.Namespace.Info.Data) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) - m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) - m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) - m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) - m.Equal(configVersion, resp1.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) - m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) - m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) - m.truncatePartialNamespace() -} - -func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceSameNameDifferentID() { - // This is only relevant for cassandra - switch m.DefaultTestCluster.(type) { - case *cassandra.TestCluster: - default: - return - } - - id := uuid.New() - partialID := uuid.New() - name := "create-partial-namespace-test-name" - m.createPartialNamespace(partialID, name) - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "create-namespace-test-description" - owner := "create-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} - isGlobalNamespace := false - configVersion := int64(0) - failoverVersion := int64(0) - - resp0, err0 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: badBinaries, - }, - &persistencespb.NamespaceReplicationConfig{}, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err0) - m.NotNil(resp0) - m.EqualValues(id, resp0.ID) - - // for namespace which do not have replication config set, will default to - // use current cluster as active, with current cluster as all clusters - resp1, err1 := m.GetNamespace(id, "") - m.NoError(err1) - m.NotNil(resp1) - m.EqualValues(id, resp1.Namespace.Info.Id) - m.Equal(name, resp1.Namespace.Info.Name) - m.Equal(state, resp1.Namespace.Info.State) - m.Equal(description, resp1.Namespace.Info.Description) - m.Equal(owner, resp1.Namespace.Info.Owner) - m.Equal(data, resp1.Namespace.Info.Data) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) - m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) - m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) - m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) - m.Equal(configVersion, resp1.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) - m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) - m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) - m.truncatePartialNamespace() -} - -func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceDifferentNameSameID() { - // This is only relevant for cassandra - switch m.DefaultTestCluster.(type) { - case *cassandra.TestCluster: - default: - return - } - id := uuid.New() - name := "create-namespace-test-name-for-partial-test" - partialName := "create-partial-namespace-test-name" - m.createPartialNamespace(id, partialName) - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "create-namespace-test-description" - owner := "create-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} - isGlobalNamespace := false - configVersion := int64(0) - failoverVersion := int64(0) - - resp0, err0 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: badBinaries, - }, - &persistencespb.NamespaceReplicationConfig{}, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.Error(err0) - m.IsType(&serviceerror.NamespaceAlreadyExists{}, err0) - m.Nil(resp0) - m.truncatePartialNamespace() -} - -// TestCreateNamespace test -func (m *MetadataPersistenceSuiteV2) TestCreateNamespace() { - id := uuid.New() - name := "create-namespace-test-name-for-partial-test" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "create-namespace-test-description" - owner := "create-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - badBinaries := &namespacepb.BadBinaries{map[string]*namespacepb.BadBinaryInfo{}} - isGlobalNamespace := false - configVersion := int64(0) - failoverVersion := int64(0) - - resp0, err0 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: badBinaries, - }, - &persistencespb.NamespaceReplicationConfig{}, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err0) - m.NotNil(resp0) - m.EqualValues(id, resp0.ID) - - // for namespace which do not have replication config set, will default to - // use current cluster as active, with current cluster as all clusters - resp1, err1 := m.GetNamespace(id, "") - m.NoError(err1) - m.NotNil(resp1) - m.EqualValues(id, resp1.Namespace.Info.Id) - m.Equal(name, resp1.Namespace.Info.Name) - m.Equal(state, resp1.Namespace.Info.State) - m.Equal(description, resp1.Namespace.Info.Description) - m.Equal(owner, resp1.Namespace.Info.Owner) - m.Equal(data, resp1.Namespace.Info.Data) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) - m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) - m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) - m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) - m.Equal(configVersion, resp1.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) - m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) - m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) - - resp2, err2 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: name, - State: state, - Description: "fail", - Owner: "fail", - Data: map[string]string{}, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(100), - HistoryArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, - HistoryArchivalUri: "", - VisibilityArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, - VisibilityArchivalUri: "", - }, - &persistencespb.NamespaceReplicationConfig{}, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.Error(err2) - m.IsType(&serviceerror.NamespaceAlreadyExists{}, err2) - m.Nil(resp2) -} - -// TestGetNamespace test -func (m *MetadataPersistenceSuiteV2) TestGetNamespace() { - id := uuid.New() - name := "get-namespace-test-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "get-namespace-test-description" - owner := "get-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(11) - failoverVersion := int64(59) - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - resp0, err0 := m.GetNamespace("", "does-not-exist") - m.Nil(resp0) - m.Error(err0) - m.IsType(&serviceerror.NamespaceNotFound{}, err0) - testBinaries := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "abc": { - Reason: "test-reason", - Operator: "test-operator", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - - resp1, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: testBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err1) - m.NotNil(resp1) - m.EqualValues(id, resp1.ID) - - resp2, err2 := m.GetNamespace(id, "") - m.NoError(err2) - m.NotNil(resp2) - m.EqualValues(id, resp2.Namespace.Info.Id) - m.Equal(name, resp2.Namespace.Info.Name) - m.Equal(state, resp2.Namespace.Info.State) - m.Equal(description, resp2.Namespace.Info.Description) - m.Equal(owner, resp2.Namespace.Info.Owner) - m.Equal(data, resp2.Namespace.Info.Data) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp2.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp2.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp2.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp2.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp2.Namespace.Config.VisibilityArchivalUri) - m.True(reflect.DeepEqual(testBinaries, resp2.Namespace.Config.BadBinaries)) - m.Equal(clusterActive, resp2.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(clusters), len(resp2.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(clusters[index], resp2.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(isGlobalNamespace, resp2.IsGlobalNamespace) - m.Equal(configVersion, resp2.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp2.Namespace.FailoverVersion) - m.Equal(p.InitialFailoverNotificationVersion, resp2.Namespace.FailoverNotificationVersion) - - resp3, err3 := m.GetNamespace("", name) - m.NoError(err3) - m.NotNil(resp3) - m.EqualValues(id, resp3.Namespace.Info.Id) - m.Equal(name, resp3.Namespace.Info.Name) - m.Equal(state, resp3.Namespace.Info.State) - m.Equal(description, resp3.Namespace.Info.Description) - m.Equal(owner, resp3.Namespace.Info.Owner) - m.Equal(data, resp3.Namespace.Info.Data) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp3.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp3.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp3.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp3.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp3.Namespace.Config.VisibilityArchivalUri) - m.Equal(clusterActive, resp3.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(clusters), len(resp3.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(clusters[index], resp3.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) - m.Equal(configVersion, resp3.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp3.Namespace.FailoverVersion) - m.Equal(p.InitialFailoverNotificationVersion, resp3.Namespace.FailoverNotificationVersion) - - resp4, err4 := m.GetNamespace(id, name) - m.Error(err4) - m.IsType(&serviceerror.InvalidArgument{}, err4) - m.Nil(resp4) - - resp5, err5 := m.GetNamespace("", "") - m.Nil(resp5) - m.IsType(&serviceerror.InvalidArgument{}, err5) -} - -// TestConcurrentCreateNamespace test -func (m *MetadataPersistenceSuiteV2) TestConcurrentCreateNamespace() { - id := uuid.New() - - name := "concurrent-create-namespace-test-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "concurrent-create-namespace-test-description" - owner := "create-namespace-test-owner" - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(10) - failoverVersion := int64(59) - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - testBinaries := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "abc": { - Reason: "test-reason", - Operator: "test-operator", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - concurrency := 16 - successCount := int32(0) - var wg sync.WaitGroup - for i := 1; i <= concurrency; i++ { - newValue := fmt.Sprintf("v-%v", i) - wg.Add(1) - go func(data map[string]string) { - _, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: testBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - if err1 == nil { - atomic.AddInt32(&successCount, 1) - } - wg.Done() - }(map[string]string{"k0": newValue}) - } - wg.Wait() - m.Equal(int32(1), successCount) - - resp, err3 := m.GetNamespace("", name) - m.NoError(err3) - m.NotNil(resp) - m.Equal(name, resp.Namespace.Info.Name) - m.Equal(state, resp.Namespace.Info.State) - m.Equal(description, resp.Namespace.Info.Description) - m.Equal(owner, resp.Namespace.Info.Owner) - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp.Namespace.Config.VisibilityArchivalUri) - m.True(reflect.DeepEqual(testBinaries, resp.Namespace.Config.BadBinaries)) - m.Equal(clusterActive, resp.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(clusters), len(resp.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(clusters[index], resp.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(isGlobalNamespace, resp.IsGlobalNamespace) - m.Equal(configVersion, resp.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp.Namespace.FailoverVersion) - - // check namespace data - ss := strings.Split(resp.Namespace.Info.Data["k0"], "-") - m.Equal(2, len(ss)) - vi, err := strconv.Atoi(ss[1]) - m.NoError(err) - m.Equal(true, vi > 0 && vi <= concurrency) -} - -// TestConcurrentUpdateNamespace test -func (m *MetadataPersistenceSuiteV2) TestConcurrentUpdateNamespace() { - id := uuid.New() - name := "concurrent-update-namespace-test-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "update-namespace-test-description" - owner := "update-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - badBinaries := &namespacepb.BadBinaries{map[string]*namespacepb.BadBinaryInfo{}} - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(10) - failoverVersion := int64(59) - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - resp1, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - BadBinaries: badBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err1) - m.EqualValues(id, resp1.ID) - - resp2, err2 := m.GetNamespace(id, "") - m.NoError(err2) - m.Equal(badBinaries, resp2.Namespace.Config.BadBinaries) - metadata, err := m.MetadataManager.GetMetadata(m.ctx) - m.NoError(err) - notificationVersion := metadata.NotificationVersion - - testBinaries := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "abc": { - Reason: "test-reason", - Operator: "test-operator", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - concurrency := 16 - successCount := int32(0) - var wg sync.WaitGroup - for i := 1; i <= concurrency; i++ { - newValue := fmt.Sprintf("v-%v", i) - wg.Add(1) - go func(updatedData map[string]string) { - err3 := m.UpdateNamespace( - &persistencespb.NamespaceInfo{ - Id: resp2.Namespace.Info.Id, - Name: resp2.Namespace.Info.Name, - State: resp2.Namespace.Info.State, - Description: resp2.Namespace.Info.Description, - Owner: resp2.Namespace.Info.Owner, - Data: updatedData, - }, - &persistencespb.NamespaceConfig{ - Retention: resp2.Namespace.Config.Retention, - HistoryArchivalState: resp2.Namespace.Config.HistoryArchivalState, - HistoryArchivalUri: resp2.Namespace.Config.HistoryArchivalUri, - VisibilityArchivalState: resp2.Namespace.Config.VisibilityArchivalState, - VisibilityArchivalUri: resp2.Namespace.Config.VisibilityArchivalUri, - BadBinaries: testBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: resp2.Namespace.ReplicationConfig.ActiveClusterName, - Clusters: resp2.Namespace.ReplicationConfig.Clusters, - }, - resp2.Namespace.ConfigVersion, - resp2.Namespace.FailoverVersion, - resp2.Namespace.FailoverNotificationVersion, - &time.Time{}, - notificationVersion, - isGlobalNamespace, - ) - if err3 == nil { - atomic.AddInt32(&successCount, 1) - } - wg.Done() - }(map[string]string{"k0": newValue}) - } - wg.Wait() - m.Equal(int32(1), successCount) - - resp3, err3 := m.GetNamespace("", name) - m.NoError(err3) - m.NotNil(resp3) - m.EqualValues(id, resp3.Namespace.Info.Id) - m.Equal(name, resp3.Namespace.Info.Name) - m.Equal(state, resp3.Namespace.Info.State) - m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) - m.Equal(description, resp3.Namespace.Info.Description) - m.Equal(owner, resp3.Namespace.Info.Owner) - - m.EqualValues(time.Duration(retention)*time.Hour*24, *resp3.Namespace.Config.Retention) - m.Equal(historyArchivalState, resp3.Namespace.Config.HistoryArchivalState) - m.Equal(historyArchivalURI, resp3.Namespace.Config.HistoryArchivalUri) - m.Equal(visibilityArchivalState, resp3.Namespace.Config.VisibilityArchivalState) - m.Equal(visibilityArchivalURI, resp3.Namespace.Config.VisibilityArchivalUri) - m.True(reflect.DeepEqual(testBinaries, resp3.Namespace.Config.BadBinaries)) - m.Equal(clusterActive, resp3.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(clusters), len(resp3.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(clusters[index], resp3.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) - m.Equal(configVersion, resp3.Namespace.ConfigVersion) - m.Equal(failoverVersion, resp3.Namespace.FailoverVersion) - - // check namespace data - ss := strings.Split(resp3.Namespace.Info.Data["k0"], "-") - m.Equal(2, len(ss)) - vi, err := strconv.Atoi(ss[1]) - m.NoError(err) - m.Equal(true, vi > 0 && vi <= concurrency) -} - -// TestUpdateNamespace test -func (m *MetadataPersistenceSuiteV2) TestUpdateNamespace() { - id := uuid.New() - name := "update-namespace-test-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "update-namespace-test-description" - owner := "update-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(10) - failoverVersion := int64(59) - failoverEndTime := time.Now().UTC() - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - resp1, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err1) - m.EqualValues(id, resp1.ID) - - resp2, err2 := m.GetNamespace(id, "") - m.NoError(err2) - metadata, err := m.MetadataManager.GetMetadata(m.ctx) - m.NoError(err) - notificationVersion := metadata.NotificationVersion - - updatedState := enumspb.NAMESPACE_STATE_DEPRECATED - updatedDescription := "description-updated" - updatedOwner := "owner-updated" - // This will overriding the previous key-value pair - updatedData := map[string]string{"k1": "v2"} - updatedRetention := timestamp.DurationFromDays(20) - updatedHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updatedHistoryArchivalURI := "" - updatedVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED - updatedVisibilityArchivalURI := "" - - updateClusterActive := "other random active cluster name" - updateClusterStandby := "other random standby cluster name" - updateConfigVersion := int64(12) - updateFailoverVersion := int64(28) - updateFailoverNotificationVersion := int64(14) - updateClusters := []string{updateClusterActive, updateClusterStandby} - - testBinaries := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "abc": { - Reason: "test-reason", - Operator: "test-operator", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - - err3 := m.UpdateNamespace( - &persistencespb.NamespaceInfo{ - Id: resp2.Namespace.Info.Id, - Name: resp2.Namespace.Info.Name, - State: updatedState, - Description: updatedDescription, - Owner: updatedOwner, - Data: updatedData, - }, - &persistencespb.NamespaceConfig{ - Retention: updatedRetention, - HistoryArchivalState: updatedHistoryArchivalState, - HistoryArchivalUri: updatedHistoryArchivalURI, - VisibilityArchivalState: updatedVisibilityArchivalState, - VisibilityArchivalUri: updatedVisibilityArchivalURI, - BadBinaries: testBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - updateConfigVersion, - updateFailoverVersion, - updateFailoverNotificationVersion, - &failoverEndTime, - notificationVersion, - isGlobalNamespace, - ) - m.NoError(err3) - - resp4, err4 := m.GetNamespace("", name) - m.NoError(err4) - m.NotNil(resp4) - m.EqualValues(id, resp4.Namespace.Info.Id) - m.Equal(name, resp4.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp4.IsGlobalNamespace) - m.Equal(updatedState, resp4.Namespace.Info.State) - m.Equal(updatedDescription, resp4.Namespace.Info.Description) - m.Equal(updatedOwner, resp4.Namespace.Info.Owner) - m.Equal(updatedData, resp4.Namespace.Info.Data) - m.EqualValues(*updatedRetention, *resp4.Namespace.Config.Retention) - m.Equal(updatedHistoryArchivalState, resp4.Namespace.Config.HistoryArchivalState) - m.Equal(updatedHistoryArchivalURI, resp4.Namespace.Config.HistoryArchivalUri) - m.Equal(updatedVisibilityArchivalState, resp4.Namespace.Config.VisibilityArchivalState) - m.Equal(updatedVisibilityArchivalURI, resp4.Namespace.Config.VisibilityArchivalUri) - m.True(reflect.DeepEqual(testBinaries, resp4.Namespace.Config.BadBinaries)) - m.Equal(updateClusterActive, resp4.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(updateClusters), len(resp4.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(updateClusters[index], resp4.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(updateConfigVersion, resp4.Namespace.ConfigVersion) - m.Equal(updateFailoverVersion, resp4.Namespace.FailoverVersion) - m.Equal(updateFailoverNotificationVersion, resp4.Namespace.FailoverNotificationVersion) - m.Equal(notificationVersion, resp4.NotificationVersion) - m.EqualTimes(failoverEndTime, *resp4.Namespace.FailoverEndTime) - - resp5, err5 := m.GetNamespace(id, "") - m.NoError(err5) - m.NotNil(resp5) - m.EqualValues(id, resp5.Namespace.Info.Id) - m.Equal(name, resp5.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp5.IsGlobalNamespace) - m.Equal(updatedState, resp5.Namespace.Info.State) - m.Equal(updatedDescription, resp5.Namespace.Info.Description) - m.Equal(updatedOwner, resp5.Namespace.Info.Owner) - m.Equal(updatedData, resp5.Namespace.Info.Data) - m.EqualValues(*updatedRetention, *resp5.Namespace.Config.Retention) - m.Equal(updatedHistoryArchivalState, resp5.Namespace.Config.HistoryArchivalState) - m.Equal(updatedHistoryArchivalURI, resp5.Namespace.Config.HistoryArchivalUri) - m.Equal(updatedVisibilityArchivalState, resp5.Namespace.Config.VisibilityArchivalState) - m.Equal(updatedVisibilityArchivalURI, resp5.Namespace.Config.VisibilityArchivalUri) - m.Equal(updateClusterActive, resp5.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(updateClusters), len(resp5.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(updateClusters[index], resp5.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(updateConfigVersion, resp5.Namespace.ConfigVersion) - m.Equal(updateFailoverVersion, resp5.Namespace.FailoverVersion) - m.Equal(updateFailoverNotificationVersion, resp5.Namespace.FailoverNotificationVersion) - m.Equal(notificationVersion, resp5.NotificationVersion) - m.EqualTimes(failoverEndTime, *resp4.Namespace.FailoverEndTime) - - notificationVersion++ - err6 := m.UpdateNamespace( - &persistencespb.NamespaceInfo{ - Id: resp2.Namespace.Info.Id, - Name: resp2.Namespace.Info.Name, - State: updatedState, - Description: updatedDescription, - Owner: updatedOwner, - Data: updatedData, - }, - &persistencespb.NamespaceConfig{ - Retention: updatedRetention, - HistoryArchivalState: updatedHistoryArchivalState, - HistoryArchivalUri: updatedHistoryArchivalURI, - VisibilityArchivalState: updatedVisibilityArchivalState, - VisibilityArchivalUri: updatedVisibilityArchivalURI, - BadBinaries: testBinaries, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: updateClusterActive, - Clusters: updateClusters, - }, - updateConfigVersion, - updateFailoverVersion, - updateFailoverNotificationVersion, - &time.Time{}, - notificationVersion, - isGlobalNamespace, - ) - m.NoError(err6) - - resp6, err6 := m.GetNamespace(id, "") - m.NoError(err6) - m.NotNil(resp6) - m.EqualValues(id, resp6.Namespace.Info.Id) - m.Equal(name, resp6.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp6.IsGlobalNamespace) - m.Equal(updatedState, resp6.Namespace.Info.State) - m.Equal(updatedDescription, resp6.Namespace.Info.Description) - m.Equal(updatedOwner, resp6.Namespace.Info.Owner) - m.Equal(updatedData, resp6.Namespace.Info.Data) - m.EqualValues(*updatedRetention, *resp6.Namespace.Config.Retention) - m.Equal(updatedHistoryArchivalState, resp6.Namespace.Config.HistoryArchivalState) - m.Equal(updatedHistoryArchivalURI, resp6.Namespace.Config.HistoryArchivalUri) - m.Equal(updatedVisibilityArchivalState, resp6.Namespace.Config.VisibilityArchivalState) - m.Equal(updatedVisibilityArchivalURI, resp6.Namespace.Config.VisibilityArchivalUri) - m.True(reflect.DeepEqual(testBinaries, resp6.Namespace.Config.BadBinaries)) - m.Equal(updateClusterActive, resp6.Namespace.ReplicationConfig.ActiveClusterName) - m.Equal(len(updateClusters), len(resp6.Namespace.ReplicationConfig.Clusters)) - for index := range clusters { - m.Equal(updateClusters[index], resp4.Namespace.ReplicationConfig.Clusters[index]) - } - m.Equal(updateConfigVersion, resp6.Namespace.ConfigVersion) - m.Equal(updateFailoverVersion, resp6.Namespace.FailoverVersion) - m.Equal(updateFailoverNotificationVersion, resp6.Namespace.FailoverNotificationVersion) - m.Equal(notificationVersion, resp6.NotificationVersion) - m.EqualTimes(time.Unix(0, 0).UTC(), *resp6.Namespace.FailoverEndTime) -} - -func (m *MetadataPersistenceSuiteV2) TestRenameNamespace() { - id := uuid.New() - name := "rename-namespace-test-name" - newName := "rename-namespace-test-new-name" - newNewName := "rename-namespace-test-new-new-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "rename-namespace-test-description" - owner := "rename-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := int32(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(10) - failoverVersion := int64(59) - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - resp1, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(retention), - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err1) - m.EqualValues(id, resp1.ID) - - _, err2 := m.GetNamespace(id, "") - m.NoError(err2) - - err3 := m.MetadataManager.RenameNamespace(m.ctx, &p.RenameNamespaceRequest{ - PreviousName: name, - NewName: newName, - }) - m.NoError(err3) - - resp4, err4 := m.GetNamespace("", newName) - m.NoError(err4) - m.NotNil(resp4) - m.EqualValues(id, resp4.Namespace.Info.Id) - m.Equal(newName, resp4.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp4.IsGlobalNamespace) - - resp5, err5 := m.GetNamespace(id, "") - m.NoError(err5) - m.NotNil(resp5) - m.EqualValues(id, resp5.Namespace.Info.Id) - m.Equal(newName, resp5.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp5.IsGlobalNamespace) - - err6 := m.MetadataManager.RenameNamespace(m.ctx, &p.RenameNamespaceRequest{ - PreviousName: newName, - NewName: newNewName, - }) - m.NoError(err6) - - resp6, err6 := m.GetNamespace(id, "") - m.NoError(err6) - m.NotNil(resp6) - m.EqualValues(id, resp6.Namespace.Info.Id) - m.Equal(newNewName, resp6.Namespace.Info.Name) - m.Equal(isGlobalNamespace, resp6.IsGlobalNamespace) -} - -// TestDeleteNamespace test -func (m *MetadataPersistenceSuiteV2) TestDeleteNamespace() { - id := uuid.New() - name := "delete-namespace-test-name" - state := enumspb.NAMESPACE_STATE_REGISTERED - description := "delete-namespace-test-description" - owner := "delete-namespace-test-owner" - data := map[string]string{"k1": "v1"} - retention := timestamp.DurationFromDays(10) - historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - historyArchivalURI := "test://history/uri" - visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED - visibilityArchivalURI := "test://visibility/uri" - - clusterActive := "some random active cluster name" - clusterStandby := "some random standby cluster name" - configVersion := int64(10) - failoverVersion := int64(59) - isGlobalNamespace := true - clusters := []string{clusterActive, clusterStandby} - - resp1, err1 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err1) - m.EqualValues(id, resp1.ID) - - resp2, err2 := m.GetNamespace("", name) - m.NoError(err2) - m.NotNil(resp2) - - err3 := m.DeleteNamespace("", name) - m.NoError(err3) - - // May need to loop here to avoid potential inconsistent read-after-write in cassandra - var err4 error - var resp4 *p.GetNamespaceResponse - for i := 0; i < 3; i++ { - resp4, err4 = m.GetNamespace("", name) - if err4 != nil { - break - } - time.Sleep(time.Second * time.Duration(i)) - } - m.Error(err4) - m.IsType(&serviceerror.NamespaceNotFound{}, err4) - m.Nil(resp4) - - resp5, err5 := m.GetNamespace(id, "") - m.Error(err5) - m.IsType(&serviceerror.NamespaceNotFound{}, err5) - m.Nil(resp5) - - id = uuid.New() - resp6, err6 := m.CreateNamespace( - &persistencespb.NamespaceInfo{ - Id: id, - Name: name, - State: state, - Description: description, - Owner: owner, - Data: data, - }, - &persistencespb.NamespaceConfig{ - Retention: retention, - HistoryArchivalState: historyArchivalState, - HistoryArchivalUri: historyArchivalURI, - VisibilityArchivalState: visibilityArchivalState, - VisibilityArchivalUri: visibilityArchivalURI, - }, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive, - Clusters: clusters, - }, - isGlobalNamespace, - configVersion, - failoverVersion, - ) - m.NoError(err6) - m.EqualValues(id, resp6.ID) - - err7 := m.DeleteNamespace(id, "") - m.NoError(err7) - - resp8, err8 := m.GetNamespace("", name) - m.Error(err8) - m.IsType(&serviceerror.NamespaceNotFound{}, err8) - m.Nil(resp8) - - resp9, err9 := m.GetNamespace(id, "") - m.Error(err9) - m.IsType(&serviceerror.NamespaceNotFound{}, err9) - m.Nil(resp9) -} - -// TestListNamespaces test -func (m *MetadataPersistenceSuiteV2) TestListNamespaces() { - clusterActive1 := "some random active cluster name" - clusterStandby1 := "some random standby cluster name" - clusters1 := []string{clusterActive1, clusterStandby1} - - clusterActive2 := "other random active cluster name" - clusterStandby2 := "other random standby cluster name" - clusters2 := []string{clusterActive2, clusterStandby2} - - testBinaries1 := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "abc": { - Reason: "test-reason1", - Operator: "test-operator1", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - testBinaries2 := &namespacepb.BadBinaries{ - Binaries: map[string]*namespacepb.BadBinaryInfo{ - "efg": { - Reason: "test-reason2", - Operator: "test-operator2", - CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), - }, - }, - } - - inputNamespaces := []*p.GetNamespaceResponse{ - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-1", - State: enumspb.NAMESPACE_STATE_REGISTERED, - Description: "list-namespace-test-description-1", - Owner: "list-namespace-test-owner-1", - Data: map[string]string{"k1": "v1"}, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(109), - HistoryArchivalState: enumspb.ARCHIVAL_STATE_ENABLED, - HistoryArchivalUri: "test://history/uri", - VisibilityArchivalState: enumspb.ARCHIVAL_STATE_ENABLED, - VisibilityArchivalUri: "test://visibility/uri", - BadBinaries: testBinaries1, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive1, - Clusters: clusters1, - }, - - ConfigVersion: 133, - FailoverVersion: 266, - }, - IsGlobalNamespace: true, - }, - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-2", - State: enumspb.NAMESPACE_STATE_REGISTERED, - Description: "list-namespace-test-description-2", - Owner: "list-namespace-test-owner-2", - Data: map[string]string{"k1": "v2"}, - }, - Config: &persistencespb.NamespaceConfig{ - Retention: timestamp.DurationFromDays(326), - HistoryArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, - HistoryArchivalUri: "", - VisibilityArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, - VisibilityArchivalUri: "", - BadBinaries: testBinaries2, - }, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: clusterActive2, - Clusters: clusters2, - }, - ConfigVersion: 400, - FailoverVersion: 667, - }, - IsGlobalNamespace: false, - }, - } - for _, namespace := range inputNamespaces { - _, err := m.CreateNamespace( - namespace.Namespace.Info, - namespace.Namespace.Config, - namespace.Namespace.ReplicationConfig, - namespace.IsGlobalNamespace, - namespace.Namespace.ConfigVersion, - namespace.Namespace.FailoverVersion, - ) - m.NoError(err) - } - - var token []byte - const pageSize = 1 - pageCount := 0 - outputNamespaces := make(map[string]*p.GetNamespaceResponse) - for { - resp, err := m.ListNamespaces(pageSize, token) - m.NoError(err) - token = resp.NextPageToken - for _, namespace := range resp.Namespaces { - outputNamespaces[namespace.Namespace.Info.Id] = namespace - // global notification version is already tested, so here we make it 0 - // so we can test == easily - namespace.NotificationVersion = 0 - } - pageCount++ - if len(token) == 0 { - break - } - } - - // 2 pages with data and 1 empty page which is unavoidable. - m.Equal(pageCount, 3) - m.Equal(len(inputNamespaces), len(outputNamespaces)) - for _, namespace := range inputNamespaces { - m.Equal(namespace, outputNamespaces[namespace.Namespace.Info.Id]) - } -} - -func (m *MetadataPersistenceSuiteV2) TestListNamespaces_DeletedNamespace() { - inputNamespaces := []*p.GetNamespaceResponse{ - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-1", - State: enumspb.NAMESPACE_STATE_REGISTERED, - }, - Config: &persistencespb.NamespaceConfig{}, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - }, - }, - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-2", - State: enumspb.NAMESPACE_STATE_DELETED, - }, - Config: &persistencespb.NamespaceConfig{}, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - }, - }, - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-3", - State: enumspb.NAMESPACE_STATE_REGISTERED, - }, - Config: &persistencespb.NamespaceConfig{}, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - }, - }, - { - Namespace: &persistencespb.NamespaceDetail{ - Info: &persistencespb.NamespaceInfo{ - Id: uuid.New(), - Name: "list-namespace-test-name-4", - State: enumspb.NAMESPACE_STATE_DELETED, - }, - Config: &persistencespb.NamespaceConfig{}, - ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, - }, - }, - } - for _, namespace := range inputNamespaces { - _, err := m.CreateNamespace( - namespace.Namespace.Info, - namespace.Namespace.Config, - namespace.Namespace.ReplicationConfig, - namespace.IsGlobalNamespace, - namespace.Namespace.ConfigVersion, - namespace.Namespace.FailoverVersion, - ) - m.NoError(err) - } - - var token []byte - var listNamespacesPageSize2 []*p.GetNamespaceResponse - pageCount := 0 - for { - resp, err := m.ListNamespaces(2, token) - m.NoError(err) - token = resp.NextPageToken - listNamespacesPageSize2 = append(listNamespacesPageSize2, resp.Namespaces...) - pageCount++ - if len(token) == 0 { - break - } - } - - // 1 page with data and 1 empty page which is unavoidable. - m.Equal(2, pageCount) - m.Len(listNamespacesPageSize2, 2) - for _, namespace := range listNamespacesPageSize2 { - m.NotEqual(namespace.Namespace.Info.State, enumspb.NAMESPACE_STATE_DELETED) - } - - pageCount = 0 - var listNamespacesPageSize1 []*p.GetNamespaceResponse - for { - resp, err := m.ListNamespaces(1, token) - m.NoError(err) - token = resp.NextPageToken - listNamespacesPageSize1 = append(listNamespacesPageSize1, resp.Namespaces...) - pageCount++ - if len(token) == 0 { - break - } - } - - // 2 pages with data and 1 empty page which is unavoidable. - m.Equal(3, pageCount) - m.Len(listNamespacesPageSize1, 2) - for _, namespace := range listNamespacesPageSize1 { - m.NotEqual(namespace.Namespace.Info.State, enumspb.NAMESPACE_STATE_DELETED) - } -} - -// CreateNamespace helper method -func (m *MetadataPersistenceSuiteV2) CreateNamespace(info *persistencespb.NamespaceInfo, config *persistencespb.NamespaceConfig, - replicationConfig *persistencespb.NamespaceReplicationConfig, isGlobalnamespace bool, configVersion int64, failoverVersion int64) (*p.CreateNamespaceResponse, error) { - return m.MetadataManager.CreateNamespace(m.ctx, &p.CreateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: info, - Config: config, - ReplicationConfig: replicationConfig, - - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - }, IsGlobalNamespace: isGlobalnamespace, - }) -} - -// GetNamespace helper method -func (m *MetadataPersistenceSuiteV2) GetNamespace(id string, name string) (*p.GetNamespaceResponse, error) { - return m.MetadataManager.GetNamespace(m.ctx, &p.GetNamespaceRequest{ - ID: id, - Name: name, - }) -} - -// UpdateNamespace helper method -func (m *MetadataPersistenceSuiteV2) UpdateNamespace( - info *persistencespb.NamespaceInfo, - config *persistencespb.NamespaceConfig, - replicationConfig *persistencespb.NamespaceReplicationConfig, - configVersion int64, - failoverVersion int64, - failoverNotificationVersion int64, - failoverEndTime *time.Time, - notificationVersion int64, - isGlobalNamespace bool, -) error { - return m.MetadataManager.UpdateNamespace(m.ctx, &p.UpdateNamespaceRequest{ - Namespace: &persistencespb.NamespaceDetail{ - Info: info, - Config: config, - ReplicationConfig: replicationConfig, - ConfigVersion: configVersion, - FailoverVersion: failoverVersion, - FailoverEndTime: failoverEndTime, - FailoverNotificationVersion: failoverNotificationVersion, - }, - NotificationVersion: notificationVersion, - IsGlobalNamespace: isGlobalNamespace, - }) -} - -// DeleteNamespace helper method -func (m *MetadataPersistenceSuiteV2) DeleteNamespace(id string, name string) error { - if len(id) > 0 { - return m.MetadataManager.DeleteNamespace(m.ctx, &p.DeleteNamespaceRequest{ID: id}) - } - return m.MetadataManager.DeleteNamespaceByName(m.ctx, &p.DeleteNamespaceByNameRequest{Name: name}) -} - -// ListNamespaces helper method -func (m *MetadataPersistenceSuiteV2) ListNamespaces(pageSize int, pageToken []byte) (*p.ListNamespacesResponse, error) { - return m.MetadataManager.ListNamespaces(m.ctx, &p.ListNamespacesRequest{ - PageSize: pageSize, - NextPageToken: pageToken, - }) -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/metadata_persistence_v2.go temporal-1.22.5/src/common/persistence/persistence-tests/metadata_persistence_v2.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/metadata_persistence_v2.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/metadata_persistence_v2.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1514 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + namespacepb "go.temporal.io/api/namespace/v1" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/debug" + p "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/cassandra" + "go.temporal.io/server/common/primitives/timestamp" +) + +type ( + // MetadataPersistenceSuiteV2 is test of the V2 version of metadata persistence + MetadataPersistenceSuiteV2 struct { + TestBase + // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, + // not merely log an error + *require.Assertions + + ctx context.Context + cancel context.CancelFunc + } +) + +// SetupSuite implementation +func (m *MetadataPersistenceSuiteV2) SetupSuite() { +} + +// SetupTest implementation +func (m *MetadataPersistenceSuiteV2) SetupTest() { + // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil + m.Assertions = require.New(m.T()) + m.ctx, m.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) + + // cleanup the namespace created + var token []byte + pageSize := 10 +ListLoop: + for { + resp, err := m.ListNamespaces(pageSize, token) + m.NoError(err) + token = resp.NextPageToken + for _, n := range resp.Namespaces { + m.NoError(m.DeleteNamespace(n.Namespace.Info.Id, "")) + } + if len(token) == 0 { + break ListLoop + } + } +} + +// TearDownTest implementation +func (m *MetadataPersistenceSuiteV2) TearDownTest() { + m.cancel() +} + +// TearDownSuite implementation +func (m *MetadataPersistenceSuiteV2) TearDownSuite() { + m.TearDownWorkflowStore() +} + +// Partial namespace creation is only relevant for Cassandra, the following tests will only run when the underlying cluster is cassandra +func (m *MetadataPersistenceSuiteV2) createPartialNamespace(id string, name string) { + // only add the namespace to namespaces_by_id table and not namespaces table + const constNamespacePartition = 0 + const templateCreateNamespaceQuery = `INSERT INTO namespaces_by_id (` + + `id, name) ` + + `VALUES(?, ?) IF NOT EXISTS` + query := m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query(templateCreateNamespaceQuery, id, name).WithContext(context.Background()) + err := query.Exec() + m.NoError(err) + +} + +func (m *MetadataPersistenceSuiteV2) truncatePartialNamespace() { + query := m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query("TRUNCATE namespaces_by_id").WithContext(context.Background()) + err := query.Exec() + m.NoError(err) + + query = m.DefaultTestCluster.(*cassandra.TestCluster).GetSession().Query("TRUNCATE namespaces").WithContext(context.Background()) + err = query.Exec() + m.NoError(err) +} + +func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceSameNameSameID() { + // This is only relevant for cassandra + switch m.DefaultTestCluster.(type) { + case *cassandra.TestCluster: + default: + return + } + id := uuid.New() + name := "create-partial-namespace-test-name" + m.createPartialNamespace(id, name) + + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "create-namespace-test-description" + owner := "create-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} + isGlobalNamespace := false + configVersion := int64(0) + failoverVersion := int64(0) + + resp0, err0 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: badBinaries, + }, + &persistencespb.NamespaceReplicationConfig{}, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err0) + m.NotNil(resp0) + m.EqualValues(id, resp0.ID) + + // for namespace which do not have replication config set, will default to + // use current cluster as active, with current cluster as all clusters + resp1, err1 := m.GetNamespace(id, "") + m.NoError(err1) + m.NotNil(resp1) + m.EqualValues(id, resp1.Namespace.Info.Id) + m.Equal(name, resp1.Namespace.Info.Name) + m.Equal(state, resp1.Namespace.Info.State) + m.Equal(description, resp1.Namespace.Info.Description) + m.Equal(owner, resp1.Namespace.Info.Owner) + m.Equal(data, resp1.Namespace.Info.Data) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) + m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) + m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) + m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) + m.Equal(configVersion, resp1.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) + m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) + m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) + m.truncatePartialNamespace() +} + +func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceSameNameDifferentID() { + // This is only relevant for cassandra + switch m.DefaultTestCluster.(type) { + case *cassandra.TestCluster: + default: + return + } + + id := uuid.New() + partialID := uuid.New() + name := "create-partial-namespace-test-name" + m.createPartialNamespace(partialID, name) + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "create-namespace-test-description" + owner := "create-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} + isGlobalNamespace := false + configVersion := int64(0) + failoverVersion := int64(0) + + resp0, err0 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: badBinaries, + }, + &persistencespb.NamespaceReplicationConfig{}, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err0) + m.NotNil(resp0) + m.EqualValues(id, resp0.ID) + + // for namespace which do not have replication config set, will default to + // use current cluster as active, with current cluster as all clusters + resp1, err1 := m.GetNamespace(id, "") + m.NoError(err1) + m.NotNil(resp1) + m.EqualValues(id, resp1.Namespace.Info.Id) + m.Equal(name, resp1.Namespace.Info.Name) + m.Equal(state, resp1.Namespace.Info.State) + m.Equal(description, resp1.Namespace.Info.Description) + m.Equal(owner, resp1.Namespace.Info.Owner) + m.Equal(data, resp1.Namespace.Info.Data) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) + m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) + m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) + m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) + m.Equal(configVersion, resp1.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) + m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) + m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) + m.truncatePartialNamespace() +} + +func (m *MetadataPersistenceSuiteV2) TestCreateWithPartialNamespaceDifferentNameSameID() { + // This is only relevant for cassandra + switch m.DefaultTestCluster.(type) { + case *cassandra.TestCluster: + default: + return + } + id := uuid.New() + name := "create-namespace-test-name-for-partial-test" + partialName := "create-partial-namespace-test-name" + m.createPartialNamespace(id, partialName) + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "create-namespace-test-description" + owner := "create-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + badBinaries := &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}} + isGlobalNamespace := false + configVersion := int64(0) + failoverVersion := int64(0) + + resp0, err0 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: badBinaries, + }, + &persistencespb.NamespaceReplicationConfig{}, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.Error(err0) + m.IsType(&serviceerror.NamespaceAlreadyExists{}, err0) + m.Nil(resp0) + m.truncatePartialNamespace() +} + +// TestCreateNamespace test +func (m *MetadataPersistenceSuiteV2) TestCreateNamespace() { + id := uuid.New() + name := "create-namespace-test-name-for-partial-test" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "create-namespace-test-description" + owner := "create-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + badBinaries := &namespacepb.BadBinaries{map[string]*namespacepb.BadBinaryInfo{}} + isGlobalNamespace := false + configVersion := int64(0) + failoverVersion := int64(0) + + resp0, err0 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: badBinaries, + }, + &persistencespb.NamespaceReplicationConfig{}, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err0) + m.NotNil(resp0) + m.EqualValues(id, resp0.ID) + + // for namespace which do not have replication config set, will default to + // use current cluster as active, with current cluster as all clusters + resp1, err1 := m.GetNamespace(id, "") + m.NoError(err1) + m.NotNil(resp1) + m.EqualValues(id, resp1.Namespace.Info.Id) + m.Equal(name, resp1.Namespace.Info.Name) + m.Equal(state, resp1.Namespace.Info.State) + m.Equal(description, resp1.Namespace.Info.Description) + m.Equal(owner, resp1.Namespace.Info.Owner) + m.Equal(data, resp1.Namespace.Info.Data) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp1.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp1.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp1.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp1.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp1.Namespace.Config.VisibilityArchivalUri) + m.Equal(badBinaries, resp1.Namespace.Config.BadBinaries) + m.Equal(cluster.TestCurrentClusterName, resp1.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(1, len(resp1.Namespace.ReplicationConfig.Clusters)) + m.Equal(isGlobalNamespace, resp1.IsGlobalNamespace) + m.Equal(configVersion, resp1.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp1.Namespace.FailoverVersion) + m.True(resp1.Namespace.ReplicationConfig.Clusters[0] == cluster.TestCurrentClusterName) + m.Equal(p.InitialFailoverNotificationVersion, resp1.Namespace.FailoverNotificationVersion) + + resp2, err2 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: name, + State: state, + Description: "fail", + Owner: "fail", + Data: map[string]string{}, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(100), + HistoryArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, + HistoryArchivalUri: "", + VisibilityArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, + VisibilityArchivalUri: "", + }, + &persistencespb.NamespaceReplicationConfig{}, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.Error(err2) + m.IsType(&serviceerror.NamespaceAlreadyExists{}, err2) + m.Nil(resp2) +} + +// TestGetNamespace test +func (m *MetadataPersistenceSuiteV2) TestGetNamespace() { + id := uuid.New() + name := "get-namespace-test-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "get-namespace-test-description" + owner := "get-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(11) + failoverVersion := int64(59) + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + resp0, err0 := m.GetNamespace("", "does-not-exist") + m.Nil(resp0) + m.Error(err0) + m.IsType(&serviceerror.NamespaceNotFound{}, err0) + testBinaries := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "abc": { + Reason: "test-reason", + Operator: "test-operator", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + + resp1, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: testBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err1) + m.NotNil(resp1) + m.EqualValues(id, resp1.ID) + + resp2, err2 := m.GetNamespace(id, "") + m.NoError(err2) + m.NotNil(resp2) + m.EqualValues(id, resp2.Namespace.Info.Id) + m.Equal(name, resp2.Namespace.Info.Name) + m.Equal(state, resp2.Namespace.Info.State) + m.Equal(description, resp2.Namespace.Info.Description) + m.Equal(owner, resp2.Namespace.Info.Owner) + m.Equal(data, resp2.Namespace.Info.Data) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp2.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp2.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp2.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp2.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp2.Namespace.Config.VisibilityArchivalUri) + m.True(reflect.DeepEqual(testBinaries, resp2.Namespace.Config.BadBinaries)) + m.Equal(clusterActive, resp2.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(clusters), len(resp2.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(clusters[index], resp2.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(isGlobalNamespace, resp2.IsGlobalNamespace) + m.Equal(configVersion, resp2.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp2.Namespace.FailoverVersion) + m.Equal(p.InitialFailoverNotificationVersion, resp2.Namespace.FailoverNotificationVersion) + + resp3, err3 := m.GetNamespace("", name) + m.NoError(err3) + m.NotNil(resp3) + m.EqualValues(id, resp3.Namespace.Info.Id) + m.Equal(name, resp3.Namespace.Info.Name) + m.Equal(state, resp3.Namespace.Info.State) + m.Equal(description, resp3.Namespace.Info.Description) + m.Equal(owner, resp3.Namespace.Info.Owner) + m.Equal(data, resp3.Namespace.Info.Data) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp3.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp3.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp3.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp3.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp3.Namespace.Config.VisibilityArchivalUri) + m.Equal(clusterActive, resp3.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(clusters), len(resp3.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(clusters[index], resp3.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) + m.Equal(configVersion, resp3.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp3.Namespace.FailoverVersion) + m.Equal(p.InitialFailoverNotificationVersion, resp3.Namespace.FailoverNotificationVersion) + + resp4, err4 := m.GetNamespace(id, name) + m.Error(err4) + m.IsType(&serviceerror.InvalidArgument{}, err4) + m.Nil(resp4) + + resp5, err5 := m.GetNamespace("", "") + m.Nil(resp5) + m.IsType(&serviceerror.InvalidArgument{}, err5) +} + +// TestConcurrentCreateNamespace test +func (m *MetadataPersistenceSuiteV2) TestConcurrentCreateNamespace() { + id := uuid.New() + + name := "concurrent-create-namespace-test-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "concurrent-create-namespace-test-description" + owner := "create-namespace-test-owner" + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(10) + failoverVersion := int64(59) + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + testBinaries := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "abc": { + Reason: "test-reason", + Operator: "test-operator", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + concurrency := 16 + successCount := int32(0) + var wg sync.WaitGroup + for i := 1; i <= concurrency; i++ { + newValue := fmt.Sprintf("v-%v", i) + wg.Add(1) + go func(data map[string]string) { + _, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: testBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + if err1 == nil { + atomic.AddInt32(&successCount, 1) + } + wg.Done() + }(map[string]string{"k0": newValue}) + } + wg.Wait() + m.Equal(int32(1), successCount) + + resp, err3 := m.GetNamespace("", name) + m.NoError(err3) + m.NotNil(resp) + m.Equal(name, resp.Namespace.Info.Name) + m.Equal(state, resp.Namespace.Info.State) + m.Equal(description, resp.Namespace.Info.Description) + m.Equal(owner, resp.Namespace.Info.Owner) + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp.Namespace.Config.VisibilityArchivalUri) + m.True(reflect.DeepEqual(testBinaries, resp.Namespace.Config.BadBinaries)) + m.Equal(clusterActive, resp.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(clusters), len(resp.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(clusters[index], resp.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(isGlobalNamespace, resp.IsGlobalNamespace) + m.Equal(configVersion, resp.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp.Namespace.FailoverVersion) + + // check namespace data + ss := strings.Split(resp.Namespace.Info.Data["k0"], "-") + m.Equal(2, len(ss)) + vi, err := strconv.Atoi(ss[1]) + m.NoError(err) + m.Equal(true, vi > 0 && vi <= concurrency) +} + +// TestConcurrentUpdateNamespace test +func (m *MetadataPersistenceSuiteV2) TestConcurrentUpdateNamespace() { + id := uuid.New() + name := "concurrent-update-namespace-test-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "update-namespace-test-description" + owner := "update-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + badBinaries := &namespacepb.BadBinaries{map[string]*namespacepb.BadBinaryInfo{}} + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(10) + failoverVersion := int64(59) + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + resp1, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + BadBinaries: badBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err1) + m.EqualValues(id, resp1.ID) + + resp2, err2 := m.GetNamespace(id, "") + m.NoError(err2) + m.Equal(badBinaries, resp2.Namespace.Config.BadBinaries) + metadata, err := m.MetadataManager.GetMetadata(m.ctx) + m.NoError(err) + notificationVersion := metadata.NotificationVersion + + testBinaries := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "abc": { + Reason: "test-reason", + Operator: "test-operator", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + concurrency := 16 + successCount := int32(0) + var wg sync.WaitGroup + for i := 1; i <= concurrency; i++ { + newValue := fmt.Sprintf("v-%v", i) + wg.Add(1) + go func(updatedData map[string]string) { + err3 := m.UpdateNamespace( + &persistencespb.NamespaceInfo{ + Id: resp2.Namespace.Info.Id, + Name: resp2.Namespace.Info.Name, + State: resp2.Namespace.Info.State, + Description: resp2.Namespace.Info.Description, + Owner: resp2.Namespace.Info.Owner, + Data: updatedData, + }, + &persistencespb.NamespaceConfig{ + Retention: resp2.Namespace.Config.Retention, + HistoryArchivalState: resp2.Namespace.Config.HistoryArchivalState, + HistoryArchivalUri: resp2.Namespace.Config.HistoryArchivalUri, + VisibilityArchivalState: resp2.Namespace.Config.VisibilityArchivalState, + VisibilityArchivalUri: resp2.Namespace.Config.VisibilityArchivalUri, + BadBinaries: testBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: resp2.Namespace.ReplicationConfig.ActiveClusterName, + Clusters: resp2.Namespace.ReplicationConfig.Clusters, + }, + resp2.Namespace.ConfigVersion, + resp2.Namespace.FailoverVersion, + resp2.Namespace.FailoverNotificationVersion, + &time.Time{}, + notificationVersion, + isGlobalNamespace, + ) + if err3 == nil { + atomic.AddInt32(&successCount, 1) + } + wg.Done() + }(map[string]string{"k0": newValue}) + } + wg.Wait() + m.Equal(int32(1), successCount) + + resp3, err3 := m.GetNamespace("", name) + m.NoError(err3) + m.NotNil(resp3) + m.EqualValues(id, resp3.Namespace.Info.Id) + m.Equal(name, resp3.Namespace.Info.Name) + m.Equal(state, resp3.Namespace.Info.State) + m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) + m.Equal(description, resp3.Namespace.Info.Description) + m.Equal(owner, resp3.Namespace.Info.Owner) + + m.EqualValues(time.Duration(retention)*time.Hour*24, *resp3.Namespace.Config.Retention) + m.Equal(historyArchivalState, resp3.Namespace.Config.HistoryArchivalState) + m.Equal(historyArchivalURI, resp3.Namespace.Config.HistoryArchivalUri) + m.Equal(visibilityArchivalState, resp3.Namespace.Config.VisibilityArchivalState) + m.Equal(visibilityArchivalURI, resp3.Namespace.Config.VisibilityArchivalUri) + m.True(reflect.DeepEqual(testBinaries, resp3.Namespace.Config.BadBinaries)) + m.Equal(clusterActive, resp3.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(clusters), len(resp3.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(clusters[index], resp3.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(isGlobalNamespace, resp3.IsGlobalNamespace) + m.Equal(configVersion, resp3.Namespace.ConfigVersion) + m.Equal(failoverVersion, resp3.Namespace.FailoverVersion) + + // check namespace data + ss := strings.Split(resp3.Namespace.Info.Data["k0"], "-") + m.Equal(2, len(ss)) + vi, err := strconv.Atoi(ss[1]) + m.NoError(err) + m.Equal(true, vi > 0 && vi <= concurrency) +} + +// TestUpdateNamespace test +func (m *MetadataPersistenceSuiteV2) TestUpdateNamespace() { + id := uuid.New() + name := "update-namespace-test-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "update-namespace-test-description" + owner := "update-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(10) + failoverVersion := int64(59) + failoverEndTime := time.Now().UTC() + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + resp1, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err1) + m.EqualValues(id, resp1.ID) + + resp2, err2 := m.GetNamespace(id, "") + m.NoError(err2) + metadata, err := m.MetadataManager.GetMetadata(m.ctx) + m.NoError(err) + notificationVersion := metadata.NotificationVersion + + updatedState := enumspb.NAMESPACE_STATE_DEPRECATED + updatedDescription := "description-updated" + updatedOwner := "owner-updated" + // This will overriding the previous key-value pair + updatedData := map[string]string{"k1": "v2"} + updatedRetention := timestamp.DurationFromDays(20) + updatedHistoryArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updatedHistoryArchivalURI := "" + updatedVisibilityArchivalState := enumspb.ARCHIVAL_STATE_DISABLED + updatedVisibilityArchivalURI := "" + + updateClusterActive := "other random active cluster name" + updateClusterStandby := "other random standby cluster name" + updateConfigVersion := int64(12) + updateFailoverVersion := int64(28) + updateFailoverNotificationVersion := int64(14) + updateClusters := []string{updateClusterActive, updateClusterStandby} + + testBinaries := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "abc": { + Reason: "test-reason", + Operator: "test-operator", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + + err3 := m.UpdateNamespace( + &persistencespb.NamespaceInfo{ + Id: resp2.Namespace.Info.Id, + Name: resp2.Namespace.Info.Name, + State: updatedState, + Description: updatedDescription, + Owner: updatedOwner, + Data: updatedData, + }, + &persistencespb.NamespaceConfig{ + Retention: updatedRetention, + HistoryArchivalState: updatedHistoryArchivalState, + HistoryArchivalUri: updatedHistoryArchivalURI, + VisibilityArchivalState: updatedVisibilityArchivalState, + VisibilityArchivalUri: updatedVisibilityArchivalURI, + BadBinaries: testBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + updateConfigVersion, + updateFailoverVersion, + updateFailoverNotificationVersion, + &failoverEndTime, + notificationVersion, + isGlobalNamespace, + ) + m.NoError(err3) + + resp4, err4 := m.GetNamespace("", name) + m.NoError(err4) + m.NotNil(resp4) + m.EqualValues(id, resp4.Namespace.Info.Id) + m.Equal(name, resp4.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp4.IsGlobalNamespace) + m.Equal(updatedState, resp4.Namespace.Info.State) + m.Equal(updatedDescription, resp4.Namespace.Info.Description) + m.Equal(updatedOwner, resp4.Namespace.Info.Owner) + m.Equal(updatedData, resp4.Namespace.Info.Data) + m.EqualValues(*updatedRetention, *resp4.Namespace.Config.Retention) + m.Equal(updatedHistoryArchivalState, resp4.Namespace.Config.HistoryArchivalState) + m.Equal(updatedHistoryArchivalURI, resp4.Namespace.Config.HistoryArchivalUri) + m.Equal(updatedVisibilityArchivalState, resp4.Namespace.Config.VisibilityArchivalState) + m.Equal(updatedVisibilityArchivalURI, resp4.Namespace.Config.VisibilityArchivalUri) + m.True(reflect.DeepEqual(testBinaries, resp4.Namespace.Config.BadBinaries)) + m.Equal(updateClusterActive, resp4.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(updateClusters), len(resp4.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(updateClusters[index], resp4.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(updateConfigVersion, resp4.Namespace.ConfigVersion) + m.Equal(updateFailoverVersion, resp4.Namespace.FailoverVersion) + m.Equal(updateFailoverNotificationVersion, resp4.Namespace.FailoverNotificationVersion) + m.Equal(notificationVersion, resp4.NotificationVersion) + m.EqualTimes(failoverEndTime, *resp4.Namespace.FailoverEndTime) + + resp5, err5 := m.GetNamespace(id, "") + m.NoError(err5) + m.NotNil(resp5) + m.EqualValues(id, resp5.Namespace.Info.Id) + m.Equal(name, resp5.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp5.IsGlobalNamespace) + m.Equal(updatedState, resp5.Namespace.Info.State) + m.Equal(updatedDescription, resp5.Namespace.Info.Description) + m.Equal(updatedOwner, resp5.Namespace.Info.Owner) + m.Equal(updatedData, resp5.Namespace.Info.Data) + m.EqualValues(*updatedRetention, *resp5.Namespace.Config.Retention) + m.Equal(updatedHistoryArchivalState, resp5.Namespace.Config.HistoryArchivalState) + m.Equal(updatedHistoryArchivalURI, resp5.Namespace.Config.HistoryArchivalUri) + m.Equal(updatedVisibilityArchivalState, resp5.Namespace.Config.VisibilityArchivalState) + m.Equal(updatedVisibilityArchivalURI, resp5.Namespace.Config.VisibilityArchivalUri) + m.Equal(updateClusterActive, resp5.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(updateClusters), len(resp5.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(updateClusters[index], resp5.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(updateConfigVersion, resp5.Namespace.ConfigVersion) + m.Equal(updateFailoverVersion, resp5.Namespace.FailoverVersion) + m.Equal(updateFailoverNotificationVersion, resp5.Namespace.FailoverNotificationVersion) + m.Equal(notificationVersion, resp5.NotificationVersion) + m.EqualTimes(failoverEndTime, *resp4.Namespace.FailoverEndTime) + + notificationVersion++ + err6 := m.UpdateNamespace( + &persistencespb.NamespaceInfo{ + Id: resp2.Namespace.Info.Id, + Name: resp2.Namespace.Info.Name, + State: updatedState, + Description: updatedDescription, + Owner: updatedOwner, + Data: updatedData, + }, + &persistencespb.NamespaceConfig{ + Retention: updatedRetention, + HistoryArchivalState: updatedHistoryArchivalState, + HistoryArchivalUri: updatedHistoryArchivalURI, + VisibilityArchivalState: updatedVisibilityArchivalState, + VisibilityArchivalUri: updatedVisibilityArchivalURI, + BadBinaries: testBinaries, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: updateClusterActive, + Clusters: updateClusters, + }, + updateConfigVersion, + updateFailoverVersion, + updateFailoverNotificationVersion, + &time.Time{}, + notificationVersion, + isGlobalNamespace, + ) + m.NoError(err6) + + resp6, err6 := m.GetNamespace(id, "") + m.NoError(err6) + m.NotNil(resp6) + m.EqualValues(id, resp6.Namespace.Info.Id) + m.Equal(name, resp6.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp6.IsGlobalNamespace) + m.Equal(updatedState, resp6.Namespace.Info.State) + m.Equal(updatedDescription, resp6.Namespace.Info.Description) + m.Equal(updatedOwner, resp6.Namespace.Info.Owner) + m.Equal(updatedData, resp6.Namespace.Info.Data) + m.EqualValues(*updatedRetention, *resp6.Namespace.Config.Retention) + m.Equal(updatedHistoryArchivalState, resp6.Namespace.Config.HistoryArchivalState) + m.Equal(updatedHistoryArchivalURI, resp6.Namespace.Config.HistoryArchivalUri) + m.Equal(updatedVisibilityArchivalState, resp6.Namespace.Config.VisibilityArchivalState) + m.Equal(updatedVisibilityArchivalURI, resp6.Namespace.Config.VisibilityArchivalUri) + m.True(reflect.DeepEqual(testBinaries, resp6.Namespace.Config.BadBinaries)) + m.Equal(updateClusterActive, resp6.Namespace.ReplicationConfig.ActiveClusterName) + m.Equal(len(updateClusters), len(resp6.Namespace.ReplicationConfig.Clusters)) + for index := range clusters { + m.Equal(updateClusters[index], resp4.Namespace.ReplicationConfig.Clusters[index]) + } + m.Equal(updateConfigVersion, resp6.Namespace.ConfigVersion) + m.Equal(updateFailoverVersion, resp6.Namespace.FailoverVersion) + m.Equal(updateFailoverNotificationVersion, resp6.Namespace.FailoverNotificationVersion) + m.Equal(notificationVersion, resp6.NotificationVersion) + m.EqualTimes(time.Unix(0, 0).UTC(), *resp6.Namespace.FailoverEndTime) +} + +func (m *MetadataPersistenceSuiteV2) TestRenameNamespace() { + id := uuid.New() + name := "rename-namespace-test-name" + newName := "rename-namespace-test-new-name" + newNewName := "rename-namespace-test-new-new-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "rename-namespace-test-description" + owner := "rename-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := int32(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(10) + failoverVersion := int64(59) + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + resp1, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(retention), + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err1) + m.EqualValues(id, resp1.ID) + + _, err2 := m.GetNamespace(id, "") + m.NoError(err2) + + err3 := m.MetadataManager.RenameNamespace(m.ctx, &p.RenameNamespaceRequest{ + PreviousName: name, + NewName: newName, + }) + m.NoError(err3) + + resp4, err4 := m.GetNamespace("", newName) + m.NoError(err4) + m.NotNil(resp4) + m.EqualValues(id, resp4.Namespace.Info.Id) + m.Equal(newName, resp4.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp4.IsGlobalNamespace) + + resp5, err5 := m.GetNamespace(id, "") + m.NoError(err5) + m.NotNil(resp5) + m.EqualValues(id, resp5.Namespace.Info.Id) + m.Equal(newName, resp5.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp5.IsGlobalNamespace) + + err6 := m.MetadataManager.RenameNamespace(m.ctx, &p.RenameNamespaceRequest{ + PreviousName: newName, + NewName: newNewName, + }) + m.NoError(err6) + + resp6, err6 := m.GetNamespace(id, "") + m.NoError(err6) + m.NotNil(resp6) + m.EqualValues(id, resp6.Namespace.Info.Id) + m.Equal(newNewName, resp6.Namespace.Info.Name) + m.Equal(isGlobalNamespace, resp6.IsGlobalNamespace) +} + +// TestDeleteNamespace test +func (m *MetadataPersistenceSuiteV2) TestDeleteNamespace() { + id := uuid.New() + name := "delete-namespace-test-name" + state := enumspb.NAMESPACE_STATE_REGISTERED + description := "delete-namespace-test-description" + owner := "delete-namespace-test-owner" + data := map[string]string{"k1": "v1"} + retention := timestamp.DurationFromDays(10) + historyArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + historyArchivalURI := "test://history/uri" + visibilityArchivalState := enumspb.ARCHIVAL_STATE_ENABLED + visibilityArchivalURI := "test://visibility/uri" + + clusterActive := "some random active cluster name" + clusterStandby := "some random standby cluster name" + configVersion := int64(10) + failoverVersion := int64(59) + isGlobalNamespace := true + clusters := []string{clusterActive, clusterStandby} + + resp1, err1 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err1) + m.EqualValues(id, resp1.ID) + + resp2, err2 := m.GetNamespace("", name) + m.NoError(err2) + m.NotNil(resp2) + + err3 := m.DeleteNamespace("", name) + m.NoError(err3) + + // May need to loop here to avoid potential inconsistent read-after-write in cassandra + var err4 error + var resp4 *p.GetNamespaceResponse + for i := 0; i < 3; i++ { + resp4, err4 = m.GetNamespace("", name) + if err4 != nil { + break + } + time.Sleep(time.Second * time.Duration(i)) + } + m.Error(err4) + m.IsType(&serviceerror.NamespaceNotFound{}, err4) + m.Nil(resp4) + + resp5, err5 := m.GetNamespace(id, "") + m.Error(err5) + m.IsType(&serviceerror.NamespaceNotFound{}, err5) + m.Nil(resp5) + + id = uuid.New() + resp6, err6 := m.CreateNamespace( + &persistencespb.NamespaceInfo{ + Id: id, + Name: name, + State: state, + Description: description, + Owner: owner, + Data: data, + }, + &persistencespb.NamespaceConfig{ + Retention: retention, + HistoryArchivalState: historyArchivalState, + HistoryArchivalUri: historyArchivalURI, + VisibilityArchivalState: visibilityArchivalState, + VisibilityArchivalUri: visibilityArchivalURI, + }, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive, + Clusters: clusters, + }, + isGlobalNamespace, + configVersion, + failoverVersion, + ) + m.NoError(err6) + m.EqualValues(id, resp6.ID) + + err7 := m.DeleteNamespace(id, "") + m.NoError(err7) + + resp8, err8 := m.GetNamespace("", name) + m.Error(err8) + m.IsType(&serviceerror.NamespaceNotFound{}, err8) + m.Nil(resp8) + + resp9, err9 := m.GetNamespace(id, "") + m.Error(err9) + m.IsType(&serviceerror.NamespaceNotFound{}, err9) + m.Nil(resp9) +} + +// TestListNamespaces test +func (m *MetadataPersistenceSuiteV2) TestListNamespaces() { + clusterActive1 := "some random active cluster name" + clusterStandby1 := "some random standby cluster name" + clusters1 := []string{clusterActive1, clusterStandby1} + + clusterActive2 := "other random active cluster name" + clusterStandby2 := "other random standby cluster name" + clusters2 := []string{clusterActive2, clusterStandby2} + + testBinaries1 := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "abc": { + Reason: "test-reason1", + Operator: "test-operator1", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + testBinaries2 := &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{ + "efg": { + Reason: "test-reason2", + Operator: "test-operator2", + CreateTime: timestamp.TimePtr(time.Date(2020, 8, 22, 0, 0, 0, 0, time.UTC)), + }, + }, + } + + inputNamespaces := []*p.GetNamespaceResponse{ + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-1", + State: enumspb.NAMESPACE_STATE_REGISTERED, + Description: "list-namespace-test-description-1", + Owner: "list-namespace-test-owner-1", + Data: map[string]string{"k1": "v1"}, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(109), + HistoryArchivalState: enumspb.ARCHIVAL_STATE_ENABLED, + HistoryArchivalUri: "test://history/uri", + VisibilityArchivalState: enumspb.ARCHIVAL_STATE_ENABLED, + VisibilityArchivalUri: "test://visibility/uri", + BadBinaries: testBinaries1, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive1, + Clusters: clusters1, + }, + + ConfigVersion: 133, + FailoverVersion: 266, + }, + IsGlobalNamespace: true, + }, + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-2", + State: enumspb.NAMESPACE_STATE_REGISTERED, + Description: "list-namespace-test-description-2", + Owner: "list-namespace-test-owner-2", + Data: map[string]string{"k1": "v2"}, + }, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(326), + HistoryArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, + HistoryArchivalUri: "", + VisibilityArchivalState: enumspb.ARCHIVAL_STATE_DISABLED, + VisibilityArchivalUri: "", + BadBinaries: testBinaries2, + }, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: clusterActive2, + Clusters: clusters2, + }, + ConfigVersion: 400, + FailoverVersion: 667, + }, + IsGlobalNamespace: false, + }, + } + for _, namespace := range inputNamespaces { + _, err := m.CreateNamespace( + namespace.Namespace.Info, + namespace.Namespace.Config, + namespace.Namespace.ReplicationConfig, + namespace.IsGlobalNamespace, + namespace.Namespace.ConfigVersion, + namespace.Namespace.FailoverVersion, + ) + m.NoError(err) + } + + var token []byte + const pageSize = 1 + pageCount := 0 + outputNamespaces := make(map[string]*p.GetNamespaceResponse) + for { + resp, err := m.ListNamespaces(pageSize, token) + m.NoError(err) + token = resp.NextPageToken + for _, namespace := range resp.Namespaces { + outputNamespaces[namespace.Namespace.Info.Id] = namespace + // global notification version is already tested, so here we make it 0 + // so we can test == easily + namespace.NotificationVersion = 0 + } + pageCount++ + if len(token) == 0 { + break + } + } + + // 2 pages with data and 1 empty page which is unavoidable. + m.Equal(pageCount, 3) + m.Equal(len(inputNamespaces), len(outputNamespaces)) + for _, namespace := range inputNamespaces { + m.Equal(namespace, outputNamespaces[namespace.Namespace.Info.Id]) + } +} + +func (m *MetadataPersistenceSuiteV2) TestListNamespaces_DeletedNamespace() { + inputNamespaces := []*p.GetNamespaceResponse{ + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-1", + State: enumspb.NAMESPACE_STATE_REGISTERED, + }, + Config: &persistencespb.NamespaceConfig{}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + }, + }, + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-2", + State: enumspb.NAMESPACE_STATE_DELETED, + }, + Config: &persistencespb.NamespaceConfig{}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + }, + }, + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-3", + State: enumspb.NAMESPACE_STATE_REGISTERED, + }, + Config: &persistencespb.NamespaceConfig{}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + }, + }, + { + Namespace: &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: uuid.New(), + Name: "list-namespace-test-name-4", + State: enumspb.NAMESPACE_STATE_DELETED, + }, + Config: &persistencespb.NamespaceConfig{}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{}, + }, + }, + } + for _, namespace := range inputNamespaces { + _, err := m.CreateNamespace( + namespace.Namespace.Info, + namespace.Namespace.Config, + namespace.Namespace.ReplicationConfig, + namespace.IsGlobalNamespace, + namespace.Namespace.ConfigVersion, + namespace.Namespace.FailoverVersion, + ) + m.NoError(err) + } + + var token []byte + var listNamespacesPageSize2 []*p.GetNamespaceResponse + pageCount := 0 + for { + resp, err := m.ListNamespaces(2, token) + m.NoError(err) + token = resp.NextPageToken + listNamespacesPageSize2 = append(listNamespacesPageSize2, resp.Namespaces...) + pageCount++ + if len(token) == 0 { + break + } + } + + // 1 page with data and 1 empty page which is unavoidable. + m.Equal(2, pageCount) + m.Len(listNamespacesPageSize2, 2) + for _, namespace := range listNamespacesPageSize2 { + m.NotEqual(namespace.Namespace.Info.State, enumspb.NAMESPACE_STATE_DELETED) + } + + pageCount = 0 + var listNamespacesPageSize1 []*p.GetNamespaceResponse + for { + resp, err := m.ListNamespaces(1, token) + m.NoError(err) + token = resp.NextPageToken + listNamespacesPageSize1 = append(listNamespacesPageSize1, resp.Namespaces...) + pageCount++ + if len(token) == 0 { + break + } + } + + // 2 pages with data and 1 empty page which is unavoidable. + m.Equal(3, pageCount) + m.Len(listNamespacesPageSize1, 2) + for _, namespace := range listNamespacesPageSize1 { + m.NotEqual(namespace.Namespace.Info.State, enumspb.NAMESPACE_STATE_DELETED) + } +} + +// CreateNamespace helper method +func (m *MetadataPersistenceSuiteV2) CreateNamespace(info *persistencespb.NamespaceInfo, config *persistencespb.NamespaceConfig, + replicationConfig *persistencespb.NamespaceReplicationConfig, isGlobalnamespace bool, configVersion int64, failoverVersion int64) (*p.CreateNamespaceResponse, error) { + return m.MetadataManager.CreateNamespace(m.ctx, &p.CreateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: info, + Config: config, + ReplicationConfig: replicationConfig, + + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + }, IsGlobalNamespace: isGlobalnamespace, + }) +} + +// GetNamespace helper method +func (m *MetadataPersistenceSuiteV2) GetNamespace(id string, name string) (*p.GetNamespaceResponse, error) { + return m.MetadataManager.GetNamespace(m.ctx, &p.GetNamespaceRequest{ + ID: id, + Name: name, + }) +} + +// UpdateNamespace helper method +func (m *MetadataPersistenceSuiteV2) UpdateNamespace( + info *persistencespb.NamespaceInfo, + config *persistencespb.NamespaceConfig, + replicationConfig *persistencespb.NamespaceReplicationConfig, + configVersion int64, + failoverVersion int64, + failoverNotificationVersion int64, + failoverEndTime *time.Time, + notificationVersion int64, + isGlobalNamespace bool, +) error { + return m.MetadataManager.UpdateNamespace(m.ctx, &p.UpdateNamespaceRequest{ + Namespace: &persistencespb.NamespaceDetail{ + Info: info, + Config: config, + ReplicationConfig: replicationConfig, + ConfigVersion: configVersion, + FailoverVersion: failoverVersion, + FailoverEndTime: failoverEndTime, + FailoverNotificationVersion: failoverNotificationVersion, + }, + NotificationVersion: notificationVersion, + IsGlobalNamespace: isGlobalNamespace, + }) +} + +// DeleteNamespace helper method +func (m *MetadataPersistenceSuiteV2) DeleteNamespace(id string, name string) error { + if len(id) > 0 { + return m.MetadataManager.DeleteNamespace(m.ctx, &p.DeleteNamespaceRequest{ID: id}) + } + return m.MetadataManager.DeleteNamespaceByName(m.ctx, &p.DeleteNamespaceByNameRequest{Name: name}) +} + +// ListNamespaces helper method +func (m *MetadataPersistenceSuiteV2) ListNamespaces(pageSize int, pageToken []byte) (*p.ListNamespacesResponse, error) { + return m.MetadataManager.ListNamespaces(m.ctx, &p.ListNamespacesRequest{ + PageSize: pageSize, + NextPageToken: pageToken, + }) +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/persistenceTestBase.go temporal-1.22.5/src/common/persistence/persistence-tests/persistenceTestBase.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/persistenceTestBase.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/persistenceTestBase.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,426 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistencetests - -import ( - "context" - "fmt" - "math/rand" - "strings" - "sync/atomic" - "time" - - "github.com/stretchr/testify/suite" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/cassandra" - "go.temporal.io/server/common/persistence/client" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/sql" - "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql" - "go.temporal.io/server/common/persistence/sql/sqlplugin/postgresql" - "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" - "go.temporal.io/server/common/quotas" - "go.temporal.io/server/common/resolver" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/environment" -) - -// TimePrecision is needed to account for database timestamp precision. -// Cassandra only provides milliseconds timestamp precision, so we need to use tolerance when doing comparison -const TimePrecision = 2 * time.Millisecond - -type ( - // TransferTaskIDGenerator generates IDs for transfer tasks written by helper methods - TransferTaskIDGenerator interface { - GenerateTransferTaskID() (int64, error) - } - - // TestBaseOptions options to configure workflow test base. - TestBaseOptions struct { - SQLDBPluginName string - DBName string - DBUsername string - DBPassword string - DBHost string - DBPort int `yaml:"-"` - ConnectAttributes map[string]string - StoreType string `yaml:"-"` - SchemaDir string `yaml:"-"` - FaultInjection *config.FaultInjection `yaml:"faultinjection"` - } - - // TestBase wraps the base setup needed to create workflows over persistence layer. - TestBase struct { - suite.Suite - ShardMgr persistence.ShardManager - AbstractDataStoreFactory client.AbstractDataStoreFactory - FaultInjection *client.FaultInjectionDataStoreFactory - Factory client.Factory - ExecutionManager persistence.ExecutionManager - TaskMgr persistence.TaskManager - ClusterMetadataManager persistence.ClusterMetadataManager - MetadataManager persistence.MetadataManager - NamespaceReplicationQueue persistence.NamespaceReplicationQueue - ShardInfo *persistencespb.ShardInfo - TaskIDGenerator TransferTaskIDGenerator - ClusterMetadata cluster.Metadata - SearchAttributesManager searchattribute.Manager - PersistenceRateLimiter quotas.RequestRateLimiter - PersistenceHealthSignals persistence.HealthSignalAggregator - ReadLevel int64 - ReplicationReadLevel int64 - DefaultTestCluster PersistenceTestCluster - Logger log.Logger - } - - // PersistenceTestCluster exposes management operations on a database - PersistenceTestCluster interface { - SetupTestDatabase() - TearDownTestDatabase() - Config() config.Persistence - } - - // TestTransferTaskIDGenerator helper - TestTransferTaskIDGenerator struct { - seqNum int64 - } -) - -// NewTestBaseWithCassandra returns a persistence test base backed by cassandra datastore -func NewTestBaseWithCassandra(options *TestBaseOptions) TestBase { - if options.DBName == "" { - options.DBName = "test_" + GenerateRandomDBName(3) - } - logger := log.NewTestLogger() - testCluster := cassandra.NewTestCluster(options.DBName, options.DBUsername, options.DBPassword, options.DBHost, options.DBPort, options.SchemaDir, options.FaultInjection, logger) - return NewTestBaseForCluster(testCluster, logger) -} - -// NewTestBaseWithSQL returns a new persistence test base backed by SQL -func NewTestBaseWithSQL(options *TestBaseOptions) TestBase { - if options.DBName == "" { - options.DBName = "test_" + GenerateRandomDBName(3) - } - logger := log.NewTestLogger() - - if options.DBPort == 0 { - switch options.SQLDBPluginName { - case mysql.PluginName, mysql.PluginNameV8: - options.DBPort = environment.GetMySQLPort() - case postgresql.PluginName, postgresql.PluginNameV12: - options.DBPort = environment.GetPostgreSQLPort() - case sqlite.PluginName: - options.DBPort = 0 - default: - panic(fmt.Sprintf("unknown sql store driver: %v", options.SQLDBPluginName)) - } - } - if options.DBHost == "" { - switch options.SQLDBPluginName { - case mysql.PluginName, mysql.PluginNameV8: - options.DBHost = environment.GetMySQLAddress() - case postgresql.PluginName: - options.DBHost = environment.GetPostgreSQLAddress() - case sqlite.PluginName: - options.DBHost = environment.Localhost - default: - panic(fmt.Sprintf("unknown sql store driver: %v", options.SQLDBPluginName)) - } - } - testCluster := sql.NewTestCluster(options.SQLDBPluginName, options.DBName, options.DBUsername, options.DBPassword, options.DBHost, options.DBPort, options.ConnectAttributes, options.SchemaDir, options.FaultInjection, logger) - return NewTestBaseForCluster(testCluster, logger) -} - -// NewTestBase returns a persistence test base backed by either cassandra or sql -func NewTestBase(options *TestBaseOptions) TestBase { - switch options.StoreType { - case config.StoreTypeSQL: - return NewTestBaseWithSQL(options) - case config.StoreTypeNoSQL: - return NewTestBaseWithCassandra(options) - default: - panic("invalid storeType " + options.StoreType) - } -} - -func NewTestBaseForCluster(testCluster PersistenceTestCluster, logger log.Logger) TestBase { - return TestBase{ - DefaultTestCluster: testCluster, - Logger: logger, - } -} - -// Setup sets up the test base, must be called as part of SetupSuite -func (s *TestBase) Setup(clusterMetadataConfig *cluster.Config) { - var err error - shardID := int32(10) - if clusterMetadataConfig == nil { - clusterMetadataConfig = cluster.NewTestClusterMetadataConfig(false, false) - } - if s.PersistenceHealthSignals == nil { - s.PersistenceHealthSignals = persistence.NoopHealthSignalAggregator - } - - clusterName := clusterMetadataConfig.CurrentClusterName - - s.DefaultTestCluster.SetupTestDatabase() - - cfg := s.DefaultTestCluster.Config() - dataStoreFactory, faultInjection := client.DataStoreFactoryProvider( - client.ClusterName(clusterName), - resolver.NewNoopResolver(), - &cfg, - s.AbstractDataStoreFactory, - s.Logger, - metrics.NoopMetricsHandler, - ) - factory := client.NewFactory(dataStoreFactory, &cfg, s.PersistenceRateLimiter, serialization.NewSerializer(), clusterName, metrics.NoopMetricsHandler, s.Logger, s.PersistenceHealthSignals) - - s.TaskMgr, err = factory.NewTaskManager() - s.fatalOnError("NewTaskManager", err) - - s.ClusterMetadataManager, err = factory.NewClusterMetadataManager() - s.fatalOnError("NewClusterMetadataManager", err) - - s.ClusterMetadata = cluster.NewMetadataFromConfig(clusterMetadataConfig, s.ClusterMetadataManager, dynamicconfig.NewNoopCollection(), s.Logger) - s.SearchAttributesManager = searchattribute.NewManager(clock.NewRealTimeSource(), s.ClusterMetadataManager, dynamicconfig.GetBoolPropertyFn(true)) - - s.MetadataManager, err = factory.NewMetadataManager() - s.fatalOnError("NewMetadataManager", err) - - s.ShardMgr, err = factory.NewShardManager() - s.fatalOnError("NewShardManager", err) - - s.ExecutionManager, err = factory.NewExecutionManager() - s.fatalOnError("NewExecutionManager", err) - - s.Factory = factory - s.FaultInjection = faultInjection - - s.ReadLevel = 0 - s.ReplicationReadLevel = 0 - s.ShardInfo = &persistencespb.ShardInfo{ - ShardId: shardID, - RangeId: 0, - } - - s.TaskIDGenerator = &TestTransferTaskIDGenerator{} - _, err = s.ShardMgr.GetOrCreateShard(context.Background(), &persistence.GetOrCreateShardRequest{ - ShardID: shardID, - InitialShardInfo: s.ShardInfo, - }) - s.fatalOnError("CreateShard", err) - - queue, err := factory.NewNamespaceReplicationQueue() - s.fatalOnError("Create NamespaceReplicationQueue", err) - s.NamespaceReplicationQueue = queue -} - -func (s *TestBase) fatalOnError(msg string, err error) { - if err != nil { - s.Logger.Fatal(msg, tag.Error(err)) - } -} - -// TearDownWorkflowStore to cleanup -func (s *TestBase) TearDownWorkflowStore() { - s.TaskMgr.Close() - s.ClusterMetadataManager.Close() - s.MetadataManager.Close() - s.ExecutionManager.Close() - s.ShardMgr.Close() - s.ExecutionManager.Close() - s.NamespaceReplicationQueue.Stop() - s.Factory.Close() - s.DefaultTestCluster.TearDownTestDatabase() -} - -// EqualTimesWithPrecision assertion that two times are equal within precision -func (s *TestBase) EqualTimesWithPrecision(t1, t2 time.Time, precision time.Duration) { - s.True(timeComparator(t1, t2, precision), - "Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", t1, t2, - ) -} - -// EqualTimes assertion that two times are equal within two millisecond precision -func (s *TestBase) EqualTimes(t1, t2 time.Time) { - s.EqualTimesWithPrecision(t1, t2, TimePrecision) -} - -// GenerateTransferTaskID helper -func (g *TestTransferTaskIDGenerator) GenerateTransferTaskID() (int64, error) { - return atomic.AddInt64(&g.seqNum, 1), nil -} - -// Publish is a utility method to add messages to the queue -func (s *TestBase) Publish(ctx context.Context, task *replicationspb.ReplicationTask) error { - retryPolicy := backoff.NewExponentialRetryPolicy(100 * time.Millisecond). - WithBackoffCoefficient(1.5). - WithMaximumAttempts(5) - - return backoff.ThrottleRetry( - func() error { - return s.NamespaceReplicationQueue.Publish(ctx, task) - }, - retryPolicy, - func(e error) bool { - return common.IsPersistenceTransientError(e) || isMessageIDConflictError(e) - }) -} - -func isMessageIDConflictError(err error) bool { - _, ok := err.(*persistence.ConditionFailedError) - return ok -} - -// GetReplicationMessages is a utility method to get messages from the queue -func (s *TestBase) GetReplicationMessages( - ctx context.Context, - lastMessageID int64, - pageSize int, -) ([]*replicationspb.ReplicationTask, int64, error) { - return s.NamespaceReplicationQueue.GetReplicationMessages(ctx, lastMessageID, pageSize) -} - -// UpdateAckLevel updates replication queue ack level -func (s *TestBase) UpdateAckLevel( - ctx context.Context, - lastProcessedMessageID int64, - clusterName string, -) error { - return s.NamespaceReplicationQueue.UpdateAckLevel(ctx, lastProcessedMessageID, clusterName) -} - -// GetAckLevels returns replication queue ack levels -func (s *TestBase) GetAckLevels( - ctx context.Context, -) (map[string]int64, error) { - return s.NamespaceReplicationQueue.GetAckLevels(ctx) -} - -// PublishToNamespaceDLQ is a utility method to add messages to the namespace DLQ -func (s *TestBase) PublishToNamespaceDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error { - retryPolicy := backoff.NewExponentialRetryPolicy(100 * time.Millisecond). - WithBackoffCoefficient(1.5). - WithMaximumAttempts(5) - - return backoff.ThrottleRetryContext( - ctx, - func(ctx context.Context) error { - return s.NamespaceReplicationQueue.PublishToDLQ(ctx, task) - }, - retryPolicy, - func(e error) bool { - return common.IsPersistenceTransientError(e) || isMessageIDConflictError(e) - }) -} - -// GetMessagesFromNamespaceDLQ is a utility method to get messages from the namespace DLQ -func (s *TestBase) GetMessagesFromNamespaceDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]*replicationspb.ReplicationTask, []byte, error) { - return s.NamespaceReplicationQueue.GetMessagesFromDLQ( - ctx, - firstMessageID, - lastMessageID, - pageSize, - pageToken, - ) -} - -// UpdateNamespaceDLQAckLevel updates namespace dlq ack level -func (s *TestBase) UpdateNamespaceDLQAckLevel( - ctx context.Context, - lastProcessedMessageID int64, -) error { - return s.NamespaceReplicationQueue.UpdateDLQAckLevel(ctx, lastProcessedMessageID) -} - -// GetNamespaceDLQAckLevel returns namespace dlq ack level -func (s *TestBase) GetNamespaceDLQAckLevel( - ctx context.Context, -) (int64, error) { - return s.NamespaceReplicationQueue.GetDLQAckLevel(ctx) -} - -// DeleteMessageFromNamespaceDLQ deletes one message from namespace DLQ -func (s *TestBase) DeleteMessageFromNamespaceDLQ( - ctx context.Context, - messageID int64, -) error { - return s.NamespaceReplicationQueue.DeleteMessageFromDLQ(ctx, messageID) -} - -// RangeDeleteMessagesFromNamespaceDLQ deletes messages from namespace DLQ -func (s *TestBase) RangeDeleteMessagesFromNamespaceDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, -) error { - return s.NamespaceReplicationQueue.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) -} - -func randString(length int) string { - const lowercaseSet = "abcdefghijklmnopqrstuvwxyz" - b := make([]byte, length) - for i := range b { - b[i] = lowercaseSet[rand.Int63()%int64(len(lowercaseSet))] - } - return string(b) -} - -// GenerateRandomDBName helper -// Format: MMDDHHMMSS_abc -func GenerateRandomDBName(n int) string { - now := time.Now().UTC() - rand.Seed(now.UnixNano()) - var prefix strings.Builder - prefix.WriteString(now.Format("0102150405")) - prefix.WriteRune('_') - prefix.WriteString(randString(n)) - return prefix.String() -} - -func timeComparator(t1, t2 time.Time, timeTolerance time.Duration) bool { - diff := t2.Sub(t1) - return diff.Nanoseconds() <= timeTolerance.Nanoseconds() -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/persistence_test_base.go temporal-1.22.5/src/common/persistence/persistence-tests/persistence_test_base.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/persistence_test_base.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/persistence_test_base.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,426 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "context" + "fmt" + "math/rand" + "strings" + "sync/atomic" + "time" + + "github.com/stretchr/testify/suite" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/cassandra" + "go.temporal.io/server/common/persistence/client" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/sql" + "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql" + "go.temporal.io/server/common/persistence/sql/sqlplugin/postgresql" + "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" + "go.temporal.io/server/common/quotas" + "go.temporal.io/server/common/resolver" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/environment" +) + +// TimePrecision is needed to account for database timestamp precision. +// Cassandra only provides milliseconds timestamp precision, so we need to use tolerance when doing comparison +const TimePrecision = 2 * time.Millisecond + +type ( + // TransferTaskIDGenerator generates IDs for transfer tasks written by helper methods + TransferTaskIDGenerator interface { + GenerateTransferTaskID() (int64, error) + } + + // TestBaseOptions options to configure workflow test base. + TestBaseOptions struct { + SQLDBPluginName string + DBName string + DBUsername string + DBPassword string + DBHost string + DBPort int `yaml:"-"` + ConnectAttributes map[string]string + StoreType string `yaml:"-"` + SchemaDir string `yaml:"-"` + FaultInjection *config.FaultInjection `yaml:"faultinjection"` + } + + // TestBase wraps the base setup needed to create workflows over persistence layer. + TestBase struct { + suite.Suite + ShardMgr persistence.ShardManager + AbstractDataStoreFactory client.AbstractDataStoreFactory + FaultInjection *client.FaultInjectionDataStoreFactory + Factory client.Factory + ExecutionManager persistence.ExecutionManager + TaskMgr persistence.TaskManager + ClusterMetadataManager persistence.ClusterMetadataManager + MetadataManager persistence.MetadataManager + NamespaceReplicationQueue persistence.NamespaceReplicationQueue + ShardInfo *persistencespb.ShardInfo + TaskIDGenerator TransferTaskIDGenerator + ClusterMetadata cluster.Metadata + SearchAttributesManager searchattribute.Manager + PersistenceRateLimiter quotas.RequestRateLimiter + PersistenceHealthSignals persistence.HealthSignalAggregator + ReadLevel int64 + ReplicationReadLevel int64 + DefaultTestCluster PersistenceTestCluster + Logger log.Logger + } + + // PersistenceTestCluster exposes management operations on a database + PersistenceTestCluster interface { + SetupTestDatabase() + TearDownTestDatabase() + Config() config.Persistence + } + + // TestTransferTaskIDGenerator helper + TestTransferTaskIDGenerator struct { + seqNum int64 + } +) + +// NewTestBaseWithCassandra returns a persistence test base backed by cassandra datastore +func NewTestBaseWithCassandra(options *TestBaseOptions) TestBase { + if options.DBName == "" { + options.DBName = "test_" + GenerateRandomDBName(3) + } + logger := log.NewTestLogger() + testCluster := cassandra.NewTestCluster(options.DBName, options.DBUsername, options.DBPassword, options.DBHost, options.DBPort, options.SchemaDir, options.FaultInjection, logger) + return NewTestBaseForCluster(testCluster, logger) +} + +// NewTestBaseWithSQL returns a new persistence test base backed by SQL +func NewTestBaseWithSQL(options *TestBaseOptions) TestBase { + if options.DBName == "" { + options.DBName = "test_" + GenerateRandomDBName(3) + } + logger := log.NewTestLogger() + + if options.DBPort == 0 { + switch options.SQLDBPluginName { + case mysql.PluginName, mysql.PluginNameV8: + options.DBPort = environment.GetMySQLPort() + case postgresql.PluginName, postgresql.PluginNameV12: + options.DBPort = environment.GetPostgreSQLPort() + case sqlite.PluginName: + options.DBPort = 0 + default: + panic(fmt.Sprintf("unknown sql store driver: %v", options.SQLDBPluginName)) + } + } + if options.DBHost == "" { + switch options.SQLDBPluginName { + case mysql.PluginName, mysql.PluginNameV8: + options.DBHost = environment.GetMySQLAddress() + case postgresql.PluginName: + options.DBHost = environment.GetPostgreSQLAddress() + case sqlite.PluginName: + options.DBHost = environment.Localhost + default: + panic(fmt.Sprintf("unknown sql store driver: %v", options.SQLDBPluginName)) + } + } + testCluster := sql.NewTestCluster(options.SQLDBPluginName, options.DBName, options.DBUsername, options.DBPassword, options.DBHost, options.DBPort, options.ConnectAttributes, options.SchemaDir, options.FaultInjection, logger) + return NewTestBaseForCluster(testCluster, logger) +} + +// NewTestBase returns a persistence test base backed by either cassandra or sql +func NewTestBase(options *TestBaseOptions) TestBase { + switch options.StoreType { + case config.StoreTypeSQL: + return NewTestBaseWithSQL(options) + case config.StoreTypeNoSQL: + return NewTestBaseWithCassandra(options) + default: + panic("invalid storeType " + options.StoreType) + } +} + +func NewTestBaseForCluster(testCluster PersistenceTestCluster, logger log.Logger) TestBase { + return TestBase{ + DefaultTestCluster: testCluster, + Logger: logger, + } +} + +// Setup sets up the test base, must be called as part of SetupSuite +func (s *TestBase) Setup(clusterMetadataConfig *cluster.Config) { + var err error + shardID := int32(10) + if clusterMetadataConfig == nil { + clusterMetadataConfig = cluster.NewTestClusterMetadataConfig(false, false) + } + if s.PersistenceHealthSignals == nil { + s.PersistenceHealthSignals = persistence.NoopHealthSignalAggregator + } + + clusterName := clusterMetadataConfig.CurrentClusterName + + s.DefaultTestCluster.SetupTestDatabase() + + cfg := s.DefaultTestCluster.Config() + dataStoreFactory, faultInjection := client.DataStoreFactoryProvider( + client.ClusterName(clusterName), + resolver.NewNoopResolver(), + &cfg, + s.AbstractDataStoreFactory, + s.Logger, + metrics.NoopMetricsHandler, + ) + factory := client.NewFactory(dataStoreFactory, &cfg, s.PersistenceRateLimiter, serialization.NewSerializer(), nil, clusterName, metrics.NoopMetricsHandler, s.Logger, s.PersistenceHealthSignals) + + s.TaskMgr, err = factory.NewTaskManager() + s.fatalOnError("NewTaskManager", err) + + s.ClusterMetadataManager, err = factory.NewClusterMetadataManager() + s.fatalOnError("NewClusterMetadataManager", err) + + s.ClusterMetadata = cluster.NewMetadataFromConfig(clusterMetadataConfig, s.ClusterMetadataManager, dynamicconfig.NewNoopCollection(), s.Logger) + s.SearchAttributesManager = searchattribute.NewManager(clock.NewRealTimeSource(), s.ClusterMetadataManager, dynamicconfig.GetBoolPropertyFn(true)) + + s.MetadataManager, err = factory.NewMetadataManager() + s.fatalOnError("NewMetadataManager", err) + + s.ShardMgr, err = factory.NewShardManager() + s.fatalOnError("NewShardManager", err) + + s.ExecutionManager, err = factory.NewExecutionManager() + s.fatalOnError("NewExecutionManager", err) + + s.Factory = factory + s.FaultInjection = faultInjection + + s.ReadLevel = 0 + s.ReplicationReadLevel = 0 + s.ShardInfo = &persistencespb.ShardInfo{ + ShardId: shardID, + RangeId: 0, + } + + s.TaskIDGenerator = &TestTransferTaskIDGenerator{} + _, err = s.ShardMgr.GetOrCreateShard(context.Background(), &persistence.GetOrCreateShardRequest{ + ShardID: shardID, + InitialShardInfo: s.ShardInfo, + }) + s.fatalOnError("CreateShard", err) + + queue, err := factory.NewNamespaceReplicationQueue() + s.fatalOnError("Create NamespaceReplicationQueue", err) + s.NamespaceReplicationQueue = queue +} + +func (s *TestBase) fatalOnError(msg string, err error) { + if err != nil { + s.Logger.Fatal(msg, tag.Error(err)) + } +} + +// TearDownWorkflowStore to cleanup +func (s *TestBase) TearDownWorkflowStore() { + s.TaskMgr.Close() + s.ClusterMetadataManager.Close() + s.MetadataManager.Close() + s.ExecutionManager.Close() + s.ShardMgr.Close() + s.ExecutionManager.Close() + s.NamespaceReplicationQueue.Stop() + s.Factory.Close() + s.DefaultTestCluster.TearDownTestDatabase() +} + +// EqualTimesWithPrecision assertion that two times are equal within precision +func (s *TestBase) EqualTimesWithPrecision(t1, t2 time.Time, precision time.Duration) { + s.True(timeComparator(t1, t2, precision), + "Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", t1, t2, + ) +} + +// EqualTimes assertion that two times are equal within two millisecond precision +func (s *TestBase) EqualTimes(t1, t2 time.Time) { + s.EqualTimesWithPrecision(t1, t2, TimePrecision) +} + +// GenerateTransferTaskID helper +func (g *TestTransferTaskIDGenerator) GenerateTransferTaskID() (int64, error) { + return atomic.AddInt64(&g.seqNum, 1), nil +} + +// Publish is a utility method to add messages to the queue +func (s *TestBase) Publish(ctx context.Context, task *replicationspb.ReplicationTask) error { + retryPolicy := backoff.NewExponentialRetryPolicy(100 * time.Millisecond). + WithBackoffCoefficient(1.5). + WithMaximumAttempts(5) + + return backoff.ThrottleRetry( + func() error { + return s.NamespaceReplicationQueue.Publish(ctx, task) + }, + retryPolicy, + func(e error) bool { + return common.IsPersistenceTransientError(e) || isMessageIDConflictError(e) + }) +} + +func isMessageIDConflictError(err error) bool { + _, ok := err.(*persistence.ConditionFailedError) + return ok +} + +// GetReplicationMessages is a utility method to get messages from the queue +func (s *TestBase) GetReplicationMessages( + ctx context.Context, + lastMessageID int64, + pageSize int, +) ([]*replicationspb.ReplicationTask, int64, error) { + return s.NamespaceReplicationQueue.GetReplicationMessages(ctx, lastMessageID, pageSize) +} + +// UpdateAckLevel updates replication queue ack level +func (s *TestBase) UpdateAckLevel( + ctx context.Context, + lastProcessedMessageID int64, + clusterName string, +) error { + return s.NamespaceReplicationQueue.UpdateAckLevel(ctx, lastProcessedMessageID, clusterName) +} + +// GetAckLevels returns replication queue ack levels +func (s *TestBase) GetAckLevels( + ctx context.Context, +) (map[string]int64, error) { + return s.NamespaceReplicationQueue.GetAckLevels(ctx) +} + +// PublishToNamespaceDLQ is a utility method to add messages to the namespace DLQ +func (s *TestBase) PublishToNamespaceDLQ(ctx context.Context, task *replicationspb.ReplicationTask) error { + retryPolicy := backoff.NewExponentialRetryPolicy(100 * time.Millisecond). + WithBackoffCoefficient(1.5). + WithMaximumAttempts(5) + + return backoff.ThrottleRetryContext( + ctx, + func(ctx context.Context) error { + return s.NamespaceReplicationQueue.PublishToDLQ(ctx, task) + }, + retryPolicy, + func(e error) bool { + return common.IsPersistenceTransientError(e) || isMessageIDConflictError(e) + }) +} + +// GetMessagesFromNamespaceDLQ is a utility method to get messages from the namespace DLQ +func (s *TestBase) GetMessagesFromNamespaceDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*replicationspb.ReplicationTask, []byte, error) { + return s.NamespaceReplicationQueue.GetMessagesFromDLQ( + ctx, + firstMessageID, + lastMessageID, + pageSize, + pageToken, + ) +} + +// UpdateNamespaceDLQAckLevel updates namespace dlq ack level +func (s *TestBase) UpdateNamespaceDLQAckLevel( + ctx context.Context, + lastProcessedMessageID int64, +) error { + return s.NamespaceReplicationQueue.UpdateDLQAckLevel(ctx, lastProcessedMessageID) +} + +// GetNamespaceDLQAckLevel returns namespace dlq ack level +func (s *TestBase) GetNamespaceDLQAckLevel( + ctx context.Context, +) (int64, error) { + return s.NamespaceReplicationQueue.GetDLQAckLevel(ctx) +} + +// DeleteMessageFromNamespaceDLQ deletes one message from namespace DLQ +func (s *TestBase) DeleteMessageFromNamespaceDLQ( + ctx context.Context, + messageID int64, +) error { + return s.NamespaceReplicationQueue.DeleteMessageFromDLQ(ctx, messageID) +} + +// RangeDeleteMessagesFromNamespaceDLQ deletes messages from namespace DLQ +func (s *TestBase) RangeDeleteMessagesFromNamespaceDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, +) error { + return s.NamespaceReplicationQueue.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) +} + +func randString(length int) string { + const lowercaseSet = "abcdefghijklmnopqrstuvwxyz" + b := make([]byte, length) + for i := range b { + b[i] = lowercaseSet[rand.Int63()%int64(len(lowercaseSet))] + } + return string(b) +} + +// GenerateRandomDBName helper +// Format: MMDDHHMMSS_abc +func GenerateRandomDBName(n int) string { + now := time.Now().UTC() + rand.Seed(now.UnixNano()) + var prefix strings.Builder + prefix.WriteString(now.Format("0102150405")) + prefix.WriteRune('_') + prefix.WriteString(randString(n)) + return prefix.String() +} + +func timeComparator(t1, t2 time.Time, timeTolerance time.Duration) bool { + diff := t2.Sub(t1) + return diff.Nanoseconds() <= timeTolerance.Nanoseconds() +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/queuePersistenceTest.go temporal-1.22.5/src/common/persistence/persistence-tests/queuePersistenceTest.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/queuePersistenceTest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/queuePersistenceTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,235 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistencetests - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/stretchr/testify/require" - - enumsspb "go.temporal.io/server/api/enums/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common/debug" - "go.temporal.io/server/common/persistence" -) - -type ( - // QueuePersistenceSuite contains queue persistence tests - QueuePersistenceSuite struct { - TestBase - // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, - // not merely log an error - *require.Assertions - - ctx context.Context - cancel context.CancelFunc - } -) - -// SetupSuite implementation -func (s *QueuePersistenceSuite) SetupSuite() { -} - -// SetupTest implementation -func (s *QueuePersistenceSuite) SetupTest() { - // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil - s.Assertions = require.New(s.T()) - s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) -} - -func (s *QueuePersistenceSuite) TearDownTest() { - s.cancel() -} - -// TearDownSuite implementation -func (s *QueuePersistenceSuite) TearDownSuite() { - s.TearDownWorkflowStore() -} - -// TestNamespaceReplicationQueue tests namespace replication queue operations -func (s *QueuePersistenceSuite) TestNamespaceReplicationQueue() { - numMessages := 100 - concurrentSenders := 10 - - messageChan := make(chan *replicationspb.ReplicationTask) - - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - go func() { - for i := 0; i < numMessages; i++ { - messageChan <- &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - Id: fmt.Sprintf("message-%v", i), - }, - }, - } - } - close(messageChan) - }() - - wg := sync.WaitGroup{} - wg.Add(concurrentSenders) - - for i := 0; i < concurrentSenders; i++ { - go func(senderNum int) { - defer wg.Done() - for message := range messageChan { - err := s.Publish(s.ctx, message) - id := message.Attributes.(*replicationspb.ReplicationTask_NamespaceTaskAttributes).NamespaceTaskAttributes.Id - s.Nil(err, "Enqueue message failed when sender %d tried to send %s", senderNum, id) - } - }(i) - } - - wg.Wait() - - result, lastRetrievedMessageID, err := s.GetReplicationMessages(s.ctx, persistence.EmptyQueueMessageID, numMessages) - s.Nil(err, "GetReplicationMessages failed.") - s.Len(result, numMessages) - s.Equal(int64(numMessages-1), lastRetrievedMessageID) -} - -// TestQueueMetadataOperations tests queue metadata operations -func (s *QueuePersistenceSuite) TestQueueMetadataOperations() { - clusterAckLevels, err := s.GetAckLevels(s.ctx) - s.Require().NoError(err) - s.Assert().Len(clusterAckLevels, 0) - - err = s.UpdateAckLevel(s.ctx, 10, "test1") - s.Require().NoError(err) - - clusterAckLevels, err = s.GetAckLevels(s.ctx) - s.Require().NoError(err) - s.Assert().Len(clusterAckLevels, 1) - s.Assert().Equal(int64(10), clusterAckLevels["test1"]) - - err = s.UpdateAckLevel(s.ctx, 20, "test1") - s.Require().NoError(err) - - clusterAckLevels, err = s.GetAckLevels(s.ctx) - s.Require().NoError(err) - s.Assert().Len(clusterAckLevels, 1) - s.Assert().Equal(int64(20), clusterAckLevels["test1"]) - - err = s.UpdateAckLevel(s.ctx, 25, "test2") - s.Require().NoError(err) - - clusterAckLevels, err = s.GetAckLevels(s.ctx) - s.Require().NoError(err) - s.Assert().Len(clusterAckLevels, 2) - s.Assert().Equal(int64(20), clusterAckLevels["test1"]) - s.Assert().Equal(int64(25), clusterAckLevels["test2"]) -} - -// TestNamespaceReplicationDLQ tests namespace DLQ operations -func (s *QueuePersistenceSuite) TestNamespaceReplicationDLQ() { - maxMessageID := int64(100) - numMessages := 100 - concurrentSenders := 10 - - messageChan := make(chan *replicationspb.ReplicationTask) - - taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK - go func() { - for i := 0; i < numMessages; i++ { - messageChan <- &replicationspb.ReplicationTask{ - TaskType: taskType, - Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ - NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ - Id: fmt.Sprintf("message-%v", i), - }, - }, - } - } - close(messageChan) - }() - - wg := sync.WaitGroup{} - wg.Add(concurrentSenders) - - for i := 0; i < concurrentSenders; i++ { - go func(senderNum int) { - defer wg.Done() - for message := range messageChan { - err := s.PublishToNamespaceDLQ(s.ctx, message) - id := message.Attributes.(*replicationspb.ReplicationTask_NamespaceTaskAttributes).NamespaceTaskAttributes.Id - s.Nil(err, "Enqueue message failed when sender %d tried to send %s", senderNum, id) - } - }(i) - } - - wg.Wait() - - result1, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages/2, nil) - s.Nil(err, "GetReplicationMessages failed.") - s.NotNil(token) - result2, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) - s.Nil(err, "GetReplicationMessages failed.") - s.Equal(len(token), 0) - s.Equal(len(result1)+len(result2), numMessages) - _, _, err = s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, 1<<63-1, numMessages, nil) - s.NoError(err, "GetReplicationMessages failed.") - s.Equal(len(token), 0) - - lastMessageID := result2[len(result2)-1].SourceTaskId - err = s.DeleteMessageFromNamespaceDLQ(s.ctx, lastMessageID) - s.NoError(err) - result3, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) - s.Nil(err, "GetReplicationMessages failed.") - s.Equal(len(token), 0) - s.Equal(len(result3), numMessages-1) - - err = s.RangeDeleteMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, lastMessageID) - s.NoError(err) - result4, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) - s.Nil(err, "GetReplicationMessages failed.") - s.Equal(len(token), 0) - s.Equal(len(result4), 0) -} - -// TestNamespaceDLQMetadataOperations tests queue metadata operations -func (s *QueuePersistenceSuite) TestNamespaceDLQMetadataOperations() { - ackLevel, err := s.GetNamespaceDLQAckLevel(s.ctx) - s.Require().NoError(err) - s.Equal(persistence.EmptyQueueMessageID, ackLevel) - - err = s.UpdateNamespaceDLQAckLevel(s.ctx, 10) - s.NoError(err) - - ackLevel, err = s.GetNamespaceDLQAckLevel(s.ctx) - s.Require().NoError(err) - s.Equal(int64(10), ackLevel) - - err = s.UpdateNamespaceDLQAckLevel(s.ctx, 1) - s.NoError(err) - - ackLevel, err = s.GetNamespaceDLQAckLevel(s.ctx) - s.Require().NoError(err) - s.Equal(int64(10), ackLevel) -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/queue_persistence.go temporal-1.22.5/src/common/persistence/persistence-tests/queue_persistence.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/queue_persistence.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/queue_persistence.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,235 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/stretchr/testify/require" + + enumsspb "go.temporal.io/server/api/enums/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/persistence" +) + +type ( + // QueuePersistenceSuite contains queue persistence tests + QueuePersistenceSuite struct { + TestBase + // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, + // not merely log an error + *require.Assertions + + ctx context.Context + cancel context.CancelFunc + } +) + +// SetupSuite implementation +func (s *QueuePersistenceSuite) SetupSuite() { +} + +// SetupTest implementation +func (s *QueuePersistenceSuite) SetupTest() { + // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil + s.Assertions = require.New(s.T()) + s.ctx, s.cancel = context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) +} + +func (s *QueuePersistenceSuite) TearDownTest() { + s.cancel() +} + +// TearDownSuite implementation +func (s *QueuePersistenceSuite) TearDownSuite() { + s.TearDownWorkflowStore() +} + +// TestNamespaceReplicationQueue tests namespace replication queue operations +func (s *QueuePersistenceSuite) TestNamespaceReplicationQueue() { + numMessages := 100 + concurrentSenders := 10 + + messageChan := make(chan *replicationspb.ReplicationTask) + + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + go func() { + for i := 0; i < numMessages; i++ { + messageChan <- &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + Id: fmt.Sprintf("message-%v", i), + }, + }, + } + } + close(messageChan) + }() + + wg := sync.WaitGroup{} + wg.Add(concurrentSenders) + + for i := 0; i < concurrentSenders; i++ { + go func(senderNum int) { + defer wg.Done() + for message := range messageChan { + err := s.Publish(s.ctx, message) + id := message.Attributes.(*replicationspb.ReplicationTask_NamespaceTaskAttributes).NamespaceTaskAttributes.Id + s.Nil(err, "Enqueue message failed when sender %d tried to send %s", senderNum, id) + } + }(i) + } + + wg.Wait() + + result, lastRetrievedMessageID, err := s.GetReplicationMessages(s.ctx, persistence.EmptyQueueMessageID, numMessages) + s.Nil(err, "GetReplicationMessages failed.") + s.Len(result, numMessages) + s.Equal(int64(numMessages-1), lastRetrievedMessageID) +} + +// TestQueueMetadataOperations tests queue metadata operations +func (s *QueuePersistenceSuite) TestQueueMetadataOperations() { + clusterAckLevels, err := s.GetAckLevels(s.ctx) + s.Require().NoError(err) + s.Assert().Len(clusterAckLevels, 0) + + err = s.UpdateAckLevel(s.ctx, 10, "test1") + s.Require().NoError(err) + + clusterAckLevels, err = s.GetAckLevels(s.ctx) + s.Require().NoError(err) + s.Assert().Len(clusterAckLevels, 1) + s.Assert().Equal(int64(10), clusterAckLevels["test1"]) + + err = s.UpdateAckLevel(s.ctx, 20, "test1") + s.Require().NoError(err) + + clusterAckLevels, err = s.GetAckLevels(s.ctx) + s.Require().NoError(err) + s.Assert().Len(clusterAckLevels, 1) + s.Assert().Equal(int64(20), clusterAckLevels["test1"]) + + err = s.UpdateAckLevel(s.ctx, 25, "test2") + s.Require().NoError(err) + + clusterAckLevels, err = s.GetAckLevels(s.ctx) + s.Require().NoError(err) + s.Assert().Len(clusterAckLevels, 2) + s.Assert().Equal(int64(20), clusterAckLevels["test1"]) + s.Assert().Equal(int64(25), clusterAckLevels["test2"]) +} + +// TestNamespaceReplicationDLQ tests namespace DLQ operations +func (s *QueuePersistenceSuite) TestNamespaceReplicationDLQ() { + maxMessageID := int64(100) + numMessages := 100 + concurrentSenders := 10 + + messageChan := make(chan *replicationspb.ReplicationTask) + + taskType := enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK + go func() { + for i := 0; i < numMessages; i++ { + messageChan <- &replicationspb.ReplicationTask{ + TaskType: taskType, + Attributes: &replicationspb.ReplicationTask_NamespaceTaskAttributes{ + NamespaceTaskAttributes: &replicationspb.NamespaceTaskAttributes{ + Id: fmt.Sprintf("message-%v", i), + }, + }, + } + } + close(messageChan) + }() + + wg := sync.WaitGroup{} + wg.Add(concurrentSenders) + + for i := 0; i < concurrentSenders; i++ { + go func(senderNum int) { + defer wg.Done() + for message := range messageChan { + err := s.PublishToNamespaceDLQ(s.ctx, message) + id := message.Attributes.(*replicationspb.ReplicationTask_NamespaceTaskAttributes).NamespaceTaskAttributes.Id + s.Nil(err, "Enqueue message failed when sender %d tried to send %s", senderNum, id) + } + }(i) + } + + wg.Wait() + + result1, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages/2, nil) + s.Nil(err, "GetReplicationMessages failed.") + s.NotNil(token) + result2, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) + s.Nil(err, "GetReplicationMessages failed.") + s.Equal(len(token), 0) + s.Equal(len(result1)+len(result2), numMessages) + _, _, err = s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, 1<<63-1, numMessages, nil) + s.NoError(err, "GetReplicationMessages failed.") + s.Equal(len(token), 0) + + lastMessageID := result2[len(result2)-1].SourceTaskId + err = s.DeleteMessageFromNamespaceDLQ(s.ctx, lastMessageID) + s.NoError(err) + result3, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) + s.Nil(err, "GetReplicationMessages failed.") + s.Equal(len(token), 0) + s.Equal(len(result3), numMessages-1) + + err = s.RangeDeleteMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, lastMessageID) + s.NoError(err) + result4, token, err := s.GetMessagesFromNamespaceDLQ(s.ctx, persistence.EmptyQueueMessageID, maxMessageID, numMessages, token) + s.Nil(err, "GetReplicationMessages failed.") + s.Equal(len(token), 0) + s.Equal(len(result4), 0) +} + +// TestNamespaceDLQMetadataOperations tests queue metadata operations +func (s *QueuePersistenceSuite) TestNamespaceDLQMetadataOperations() { + ackLevel, err := s.GetNamespaceDLQAckLevel(s.ctx) + s.Require().NoError(err) + s.Equal(persistence.EmptyQueueMessageID, ackLevel) + + err = s.UpdateNamespaceDLQAckLevel(s.ctx, 10) + s.NoError(err) + + ackLevel, err = s.GetNamespaceDLQAckLevel(s.ctx) + s.Require().NoError(err) + s.Equal(int64(10), ackLevel) + + err = s.UpdateNamespaceDLQAckLevel(s.ctx, 1) + s.NoError(err) + + ackLevel, err = s.GetNamespaceDLQAckLevel(s.ctx) + s.Require().NoError(err) + s.Equal(int64(10), ackLevel) +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence-tests/setup.go temporal-1.22.5/src/common/persistence/persistence-tests/setup.go --- temporal-1.21.5-1/src/common/persistence/persistence-tests/setup.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence-tests/setup.go 2024-02-23 09:45:43.000000000 +0000 @@ -92,7 +92,7 @@ // GetPostgreSQL12TestClusterOption return test options func GetPostgreSQL12TestClusterOption() *TestBaseOptions { return &TestBaseOptions{ - SQLDBPluginName: postgresql.PluginName, + SQLDBPluginName: postgresql.PluginNameV12, DBUsername: testPostgreSQLUser, DBPassword: testPostgreSQLPassword, DBHost: environment.GetPostgreSQLAddress(), diff -Nru temporal-1.21.5-1/src/common/persistence/persistenceInterface.go temporal-1.22.5/src/common/persistence/persistenceInterface.go --- temporal-1.21.5-1/src/common/persistence/persistenceInterface.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistenceInterface.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,728 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package mock -source $GOFILE -destination mock/store_mock.go -aux_files go.temporal.io/server/common/persistence=dataInterfaces.go - -package persistence - -import ( - "context" - "math" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/service/history/tasks" -) - -const ( - EmptyQueueMessageID = int64(-1) - MaxQueueMessageID = math.MaxInt64 -) - -type ( - // //////////////////////////////////////////////////////////////////// - // Persistence interface is a lower layer of dataInterface. - // The intention is to let different persistence implementation(SQL,Cassandra/etc) share some common logic - // Right now the only common part is serialization/deserialization. - // //////////////////////////////////////////////////////////////////// - - // ShardStore is a lower level of ShardManager - ShardStore interface { - Closeable - GetName() string - GetClusterName() string - GetOrCreateShard(ctx context.Context, request *InternalGetOrCreateShardRequest) (*InternalGetOrCreateShardResponse, error) - UpdateShard(ctx context.Context, request *InternalUpdateShardRequest) error - AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error - } - - // TaskStore is a lower level of TaskManager - TaskStore interface { - Closeable - GetName() string - CreateTaskQueue(ctx context.Context, request *InternalCreateTaskQueueRequest) error - GetTaskQueue(ctx context.Context, request *InternalGetTaskQueueRequest) (*InternalGetTaskQueueResponse, error) - UpdateTaskQueue(ctx context.Context, request *InternalUpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) - ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*InternalListTaskQueueResponse, error) - DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error - CreateTasks(ctx context.Context, request *InternalCreateTasksRequest) (*CreateTasksResponse, error) - GetTasks(ctx context.Context, request *GetTasksRequest) (*InternalGetTasksResponse, error) - CompleteTask(ctx context.Context, request *CompleteTaskRequest) error - CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) - GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*InternalGetTaskQueueUserDataResponse, error) - UpdateTaskQueueUserData(ctx context.Context, request *InternalUpdateTaskQueueUserDataRequest) error - ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*InternalListTaskQueueUserDataEntriesResponse, error) - GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) - CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) - } - // MetadataStore is a lower level of MetadataManager - MetadataStore interface { - Closeable - GetName() string - CreateNamespace(ctx context.Context, request *InternalCreateNamespaceRequest) (*CreateNamespaceResponse, error) - GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*InternalGetNamespaceResponse, error) - UpdateNamespace(ctx context.Context, request *InternalUpdateNamespaceRequest) error - RenameNamespace(ctx context.Context, request *InternalRenameNamespaceRequest) error - DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error - DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error - ListNamespaces(ctx context.Context, request *InternalListNamespacesRequest) (*InternalListNamespacesResponse, error) - GetMetadata(ctx context.Context) (*GetMetadataResponse, error) - } - - // ClusterMetadataStore is a lower level of ClusterMetadataManager. - // There is no Internal constructs needed to abstract away at the interface level currently, - // so we can reimplement the ClusterMetadataManager and leave this as a placeholder. - ClusterMetadataStore interface { - Closeable - GetName() string - ListClusterMetadata(ctx context.Context, request *InternalListClusterMetadataRequest) (*InternalListClusterMetadataResponse, error) - GetClusterMetadata(ctx context.Context, request *InternalGetClusterMetadataRequest) (*InternalGetClusterMetadataResponse, error) - SaveClusterMetadata(ctx context.Context, request *InternalSaveClusterMetadataRequest) (bool, error) - DeleteClusterMetadata(ctx context.Context, request *InternalDeleteClusterMetadataRequest) error - // Membership APIs - GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) - UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error - PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error - } - - // ExecutionStore is used to manage workflow execution including mutable states / history / tasks. - ExecutionStore interface { - Closeable - GetName() string - GetHistoryBranchUtil() HistoryBranchUtil - - // The below three APIs are related to serialization/deserialization - CreateWorkflowExecution(ctx context.Context, request *InternalCreateWorkflowExecutionRequest) (*InternalCreateWorkflowExecutionResponse, error) - UpdateWorkflowExecution(ctx context.Context, request *InternalUpdateWorkflowExecutionRequest) error - ConflictResolveWorkflowExecution(ctx context.Context, request *InternalConflictResolveWorkflowExecutionRequest) error - - DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error - DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error - GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*InternalGetCurrentExecutionResponse, error) - GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*InternalGetWorkflowExecutionResponse, error) - SetWorkflowExecution(ctx context.Context, request *InternalSetWorkflowExecutionRequest) error - - // Scan related methods - ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*InternalListConcreteExecutionsResponse, error) - - // Tasks related APIs - - // Hints for persistence implementaion regarding hisotry task readers - RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error - UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) - UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) - - AddHistoryTasks(ctx context.Context, request *InternalAddHistoryTasksRequest) error - GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*InternalGetHistoryTasksResponse, error) - CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error - RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error - - PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error - GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*InternalGetReplicationTasksFromDLQResponse, error) - DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error - RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error - IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) - - // The below are history V2 APIs - // V2 regards history events growing as a tree, decoupled from workflow concepts - - // AppendHistoryNodes add a node to history node table - AppendHistoryNodes(ctx context.Context, request *InternalAppendHistoryNodesRequest) error - // DeleteHistoryNodes delete a node from history node table - DeleteHistoryNodes(ctx context.Context, request *InternalDeleteHistoryNodesRequest) error - // ReadHistoryBranch returns history node data for a branch - ReadHistoryBranch(ctx context.Context, request *InternalReadHistoryBranchRequest) (*InternalReadHistoryBranchResponse, error) - // ForkHistoryBranch forks a new branch from a old branch - ForkHistoryBranch(ctx context.Context, request *InternalForkHistoryBranchRequest) error - // DeleteHistoryBranch removes a branch - DeleteHistoryBranch(ctx context.Context, request *InternalDeleteHistoryBranchRequest) error - // GetHistoryTree returns all branch information of a tree - GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*InternalGetHistoryTreeResponse, error) - // GetAllHistoryTreeBranches returns all branches of all trees. - // Note that branches may be skipped or duplicated across pages if there are branches created or deleted while - // paginating through results. - GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*InternalGetAllHistoryTreeBranchesResponse, error) - } - - // Queue is a store to enqueue and get messages - Queue interface { - Closeable - Init(ctx context.Context, blob *commonpb.DataBlob) error - EnqueueMessage(ctx context.Context, blob commonpb.DataBlob) error - ReadMessages(ctx context.Context, lastMessageID int64, maxCount int) ([]*QueueMessage, error) - DeleteMessagesBefore(ctx context.Context, messageID int64) error - UpdateAckLevel(ctx context.Context, metadata *InternalQueueMetadata) error - GetAckLevels(ctx context.Context) (*InternalQueueMetadata, error) - - EnqueueMessageToDLQ(ctx context.Context, blob commonpb.DataBlob) (int64, error) - ReadMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64, pageSize int, pageToken []byte) ([]*QueueMessage, []byte, error) - DeleteMessageFromDLQ(ctx context.Context, messageID int64) error - RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64) error - UpdateDLQAckLevel(ctx context.Context, metadata *InternalQueueMetadata) error - GetDLQAckLevels(ctx context.Context) (*InternalQueueMetadata, error) - } - - // QueueMessage is the message that stores in the queue - QueueMessage struct { - QueueType QueueType `json:"queue_type"` - ID int64 `json:"message_id"` - Data []byte `json:"message_payload"` - Encoding string `json:"message_encoding"` - } - - InternalQueueMetadata struct { - Blob *commonpb.DataBlob - Version int64 - } - - // InternalGetOrCreateShardRequest is used by ShardStore to retrieve or create a shard. - // GetOrCreateShard should: if shard exists, return it. If not, call CreateShardInfo and - // create the shard with the returned value. - InternalGetOrCreateShardRequest struct { - ShardID int32 - CreateShardInfo func() (rangeID int64, shardInfo *commonpb.DataBlob, err error) - LifecycleContext context.Context // cancelled when shard is unloaded - } - - // InternalGetOrCreateShardResponse is the response to GetShard - InternalGetOrCreateShardResponse struct { - ShardInfo *commonpb.DataBlob - } - - // InternalUpdateShardRequest is used by ShardStore to update a shard - InternalUpdateShardRequest struct { - ShardID int32 - RangeID int64 - Owner string - ShardInfo *commonpb.DataBlob - PreviousRangeID int64 - } - - InternalCreateTaskQueueRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - RangeID int64 - TaskQueueInfo *commonpb.DataBlob - - TaskQueueKind enumspb.TaskQueueKind - ExpiryTime *time.Time - } - - InternalGetTaskQueueRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - } - - InternalGetTaskQueueResponse struct { - RangeID int64 - TaskQueueInfo *commonpb.DataBlob - } - - InternalGetTaskQueueUserDataResponse struct { - Version int64 - UserData *commonpb.DataBlob - } - - InternalUpdateTaskQueueRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - RangeID int64 - TaskQueueInfo *commonpb.DataBlob - - TaskQueueKind enumspb.TaskQueueKind - ExpiryTime *time.Time - - PrevRangeID int64 - } - - InternalUpdateTaskQueueUserDataRequest struct { - NamespaceID string - TaskQueue string - Version int64 - UserData *commonpb.DataBlob - // Used to build an index of build_id to task_queues - BuildIdsAdded []string - BuildIdsRemoved []string - } - - InternalTaskQueueUserDataEntry struct { - TaskQueue string - Data *commonpb.DataBlob - Version int64 - } - - InternalListTaskQueueUserDataEntriesResponse struct { - NextPageToken []byte - Entries []InternalTaskQueueUserDataEntry - } - - InternalCreateTasksRequest struct { - NamespaceID string - TaskQueue string - TaskType enumspb.TaskQueueType - RangeID int64 - TaskQueueInfo *commonpb.DataBlob - Tasks []*InternalCreateTask - } - - InternalCreateTask struct { - TaskId int64 - ExpiryTime *time.Time - Task *commonpb.DataBlob - } - - InternalGetTasksResponse struct { - Tasks []*commonpb.DataBlob - NextPageToken []byte - } - - InternalListTaskQueueResponse struct { - Items []*InternalListTaskQueueItem - NextPageToken []byte - } - - InternalListTaskQueueItem struct { - TaskQueue *commonpb.DataBlob // serialized PersistedTaskQueueInfo - RangeID int64 - } - - // DataBlob represents a blob for any binary data. - // It contains raw data, and metadata(right now only encoding) in other field - // Note that it should be only used for Persistence layer, below dataInterface and application(historyEngine/etc) - - // InternalCreateWorkflowExecutionRequest is used to write a new workflow execution - InternalCreateWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode CreateWorkflowMode - - PreviousRunID string - PreviousLastWriteVersion int64 - - NewWorkflowSnapshot InternalWorkflowSnapshot - NewWorkflowNewEvents []*InternalAppendHistoryNodesRequest - } - - // InternalCreateWorkflowExecutionResponse is the response from persistence for create new workflow execution - InternalCreateWorkflowExecutionResponse struct { - } - - // InternalUpdateWorkflowExecutionRequest is used to update a workflow execution for Persistence Interface - InternalUpdateWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode UpdateWorkflowMode - - UpdateWorkflowMutation InternalWorkflowMutation - UpdateWorkflowNewEvents []*InternalAppendHistoryNodesRequest - NewWorkflowSnapshot *InternalWorkflowSnapshot - NewWorkflowNewEvents []*InternalAppendHistoryNodesRequest - } - - // InternalConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for Persistence Interface - InternalConflictResolveWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - Mode ConflictResolveWorkflowMode - - // workflow to be resetted - ResetWorkflowSnapshot InternalWorkflowSnapshot - ResetWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest - // maybe new workflow - NewWorkflowSnapshot *InternalWorkflowSnapshot - NewWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest - - // current workflow - CurrentWorkflowMutation *InternalWorkflowMutation - CurrentWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest - } - InternalSetWorkflowExecutionRequest struct { - ShardID int32 - RangeID int64 - - SetWorkflowSnapshot InternalWorkflowSnapshot - } - - // InternalWorkflowMutableState indicates workflow related state for Persistence Interface - InternalWorkflowMutableState struct { - ActivityInfos map[int64]*commonpb.DataBlob // ActivityInfo - TimerInfos map[string]*commonpb.DataBlob // TimerInfo - ChildExecutionInfos map[int64]*commonpb.DataBlob // ChildExecutionInfo - RequestCancelInfos map[int64]*commonpb.DataBlob // RequestCancelInfo - SignalInfos map[int64]*commonpb.DataBlob // SignalInfo - SignalRequestedIDs []string - ExecutionInfo *commonpb.DataBlob // WorkflowExecutionInfo - ExecutionState *commonpb.DataBlob // WorkflowExecutionState - NextEventID int64 - BufferedEvents []*commonpb.DataBlob - Checksum *commonpb.DataBlob // persistencespb.Checksum - DBRecordVersion int64 - } - - InternalHistoryTask struct { - Key tasks.Key - Blob commonpb.DataBlob - } - - // InternalAddHistoryTasksRequest is used to write new tasks - InternalAddHistoryTasksRequest struct { - ShardID int32 - RangeID int64 - - NamespaceID string - WorkflowID string - RunID string - - Tasks map[tasks.Category][]InternalHistoryTask - } - - // InternalWorkflowMutation is used as generic workflow execution state mutation for Persistence Interface - InternalWorkflowMutation struct { - // TODO: properly set this on call sites - NamespaceID string - WorkflowID string - RunID string - - ExecutionInfo *persistencespb.WorkflowExecutionInfo - ExecutionInfoBlob *commonpb.DataBlob - ExecutionState *persistencespb.WorkflowExecutionState - ExecutionStateBlob *commonpb.DataBlob - NextEventID int64 - StartVersion int64 - LastWriteVersion int64 - DBRecordVersion int64 - - UpsertActivityInfos map[int64]*commonpb.DataBlob - DeleteActivityInfos map[int64]struct{} - UpsertTimerInfos map[string]*commonpb.DataBlob - DeleteTimerInfos map[string]struct{} - UpsertChildExecutionInfos map[int64]*commonpb.DataBlob - DeleteChildExecutionInfos map[int64]struct{} - UpsertRequestCancelInfos map[int64]*commonpb.DataBlob - DeleteRequestCancelInfos map[int64]struct{} - UpsertSignalInfos map[int64]*commonpb.DataBlob - DeleteSignalInfos map[int64]struct{} - UpsertSignalRequestedIDs map[string]struct{} - DeleteSignalRequestedIDs map[string]struct{} - NewBufferedEvents *commonpb.DataBlob - ClearBufferedEvents bool - - Tasks map[tasks.Category][]InternalHistoryTask - - Condition int64 - - Checksum *commonpb.DataBlob - } - - // InternalWorkflowSnapshot is used as generic workflow execution state snapshot for Persistence Interface - InternalWorkflowSnapshot struct { - // TODO: properly set this on call sites - NamespaceID string - WorkflowID string - RunID string - - ExecutionInfo *persistencespb.WorkflowExecutionInfo - ExecutionInfoBlob *commonpb.DataBlob - ExecutionState *persistencespb.WorkflowExecutionState - ExecutionStateBlob *commonpb.DataBlob - StartVersion int64 - LastWriteVersion int64 - NextEventID int64 - DBRecordVersion int64 - - ActivityInfos map[int64]*commonpb.DataBlob - TimerInfos map[string]*commonpb.DataBlob - ChildExecutionInfos map[int64]*commonpb.DataBlob - RequestCancelInfos map[int64]*commonpb.DataBlob - SignalInfos map[int64]*commonpb.DataBlob - SignalRequestedIDs map[string]struct{} - - Tasks map[tasks.Category][]InternalHistoryTask - - Condition int64 - - Checksum *commonpb.DataBlob - } - - InternalGetCurrentExecutionResponse struct { - RunID string - ExecutionState *persistencespb.WorkflowExecutionState - } - - // InternalHistoryNode represent a history node metadata - InternalHistoryNode struct { - // The first eventID becomes the nodeID to be appended - NodeID int64 - // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins - TransactionID int64 - // TransactionID for events before these events. For events chaining - PrevTransactionID int64 - // The events to be appended - Events *commonpb.DataBlob - } - - // InternalAppendHistoryNodesRequest is used to append a batch of history nodes - InternalAppendHistoryNodesRequest struct { - // The raw branch token - BranchToken []byte - // True if it is the first append request to the branch - IsNewBranch bool - // The info for clean up data in background - Info string - // The branch to be appended - BranchInfo *persistencespb.HistoryBranch - // Serialized TreeInfo - TreeInfo *commonpb.DataBlob - // The history node - Node InternalHistoryNode - // Used in sharded data stores to identify which shard to use - ShardID int32 - } - - // InternalGetWorkflowExecutionResponse is the response to GetworkflowExecution for Persistence Interface - InternalGetWorkflowExecutionResponse struct { - State *InternalWorkflowMutableState - DBRecordVersion int64 - } - - // InternalListConcreteExecutionsResponse is the response to ListConcreteExecutions for Persistence Interface - InternalListConcreteExecutionsResponse struct { - States []*InternalWorkflowMutableState - NextPageToken []byte - } - - InternalGetHistoryTaskResponse struct { - InternalHistoryTask - } - - InternalGetHistoryTasksResponse struct { - Tasks []InternalHistoryTask - NextPageToken []byte - } - - InternalGetReplicationTasksFromDLQResponse = InternalGetHistoryTasksResponse - - // InternalForkHistoryBranchRequest is used to fork a history branch - InternalForkHistoryBranchRequest struct { - // The base branch to fork from - ForkBranchInfo *persistencespb.HistoryBranch - // Serialized TreeInfo - TreeInfo *commonpb.DataBlob - // The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive) - ForkNodeID int64 - // branchID of the new branch - NewBranchID string - // the info for clean up data in background - Info string - // Used in sharded data stores to identify which shard to use - ShardID int32 - } - - // InternalDeleteHistoryNodesRequest is used to remove a history node - InternalDeleteHistoryNodesRequest struct { - // The raw branch token - BranchToken []byte - // Used in sharded data stores to identify which shard to use - ShardID int32 - // The branch to be appended - BranchInfo *persistencespb.HistoryBranch - // node ID of the history node - NodeID int64 - // transaction ID of the history node - TransactionID int64 - } - - // InternalDeleteHistoryBranchRequest is used to remove a history branch - InternalDeleteHistoryBranchRequest struct { - // The raw branch token - BranchToken []byte - // The branch - BranchInfo *persistencespb.HistoryBranch - // Used in sharded data stores to identify which shard to use - ShardID int32 - // branch ranges is used to delete range of history nodes from target branch and it ancestors. - BranchRanges []InternalDeleteHistoryBranchRange - } - - // InternalDeleteHistoryBranchRange is used to delete a range of history nodes of a branch - InternalDeleteHistoryBranchRange struct { - BranchId string - BeginNodeId int64 // delete nodes with ID >= BeginNodeId - } - - // InternalReadHistoryBranchRequest is used to read a history branch - InternalReadHistoryBranchRequest struct { - // The raw branch token - BranchToken []byte - // The branch range to be read - BranchID string - // Get the history nodes from MinNodeID. Inclusive. - MinNodeID int64 - // Get the history nodes upto MaxNodeID. Exclusive. - MaxNodeID int64 - // passing thru for pagination - PageSize int - // Pagination token - NextPageToken []byte - // Used in sharded data stores to identify which shard to use - ShardID int32 - // whether to only return metadata, excluding node content - MetadataOnly bool - // whether we iterate in reverse order - ReverseOrder bool - } - - // InternalCompleteForkBranchRequest is used to update some tree/branch meta data for forking - InternalCompleteForkBranchRequest struct { - // branch to be updated - BranchInfo persistencespb.HistoryBranch - // whether fork is successful - Success bool - // Used in sharded data stores to identify which shard to use - ShardID int32 - } - - // InternalReadHistoryBranchResponse is the response to ReadHistoryBranchRequest - InternalReadHistoryBranchResponse struct { - // History nodes - Nodes []InternalHistoryNode - // Pagination token - NextPageToken []byte - } - - // InternalGetAllHistoryTreeBranchesResponse is response to GetAllHistoryTreeBranches - // Only used by persistence layer - InternalGetAllHistoryTreeBranchesResponse struct { - // pagination token - NextPageToken []byte - // all branches of all trees - Branches []InternalHistoryBranchDetail - } - - // InternalHistoryBranchDetail used by InternalGetAllHistoryTreeBranchesResponse - InternalHistoryBranchDetail struct { - TreeID string - BranchID string - Encoding string - Data []byte // HistoryTreeInfo blob - } - - // InternalGetHistoryTreeResponse is response to GetHistoryTree - // Only used by persistence layer - InternalGetHistoryTreeResponse struct { - // TreeInfos - TreeInfos []*commonpb.DataBlob - } - - // InternalCreateNamespaceRequest is used to create the namespace - InternalCreateNamespaceRequest struct { - ID string - Name string - Namespace *commonpb.DataBlob - IsGlobal bool - } - - // InternalGetNamespaceResponse is the response for GetNamespace - InternalGetNamespaceResponse struct { - Namespace *commonpb.DataBlob - IsGlobal bool - NotificationVersion int64 - } - - // InternalUpdateNamespaceRequest is used to update namespace - InternalUpdateNamespaceRequest struct { - Id string - Name string - Namespace *commonpb.DataBlob - NotificationVersion int64 - IsGlobal bool - } - - InternalRenameNamespaceRequest struct { - *InternalUpdateNamespaceRequest - PreviousName string - } - - InternalListNamespacesRequest struct { - PageSize int - NextPageToken []byte - } - - // InternalListNamespacesResponse is the response for GetNamespace - InternalListNamespacesResponse struct { - Namespaces []*InternalGetNamespaceResponse - NextPageToken []byte - } - - // InternalListClusterMetadataRequest is the request for ListClusterMetadata - InternalListClusterMetadataRequest struct { - PageSize int - NextPageToken []byte - } - - // InternalListClusterMetadataResponse is the response for ListClusterMetadata - InternalListClusterMetadataResponse struct { - ClusterMetadata []*InternalGetClusterMetadataResponse - NextPageToken []byte - } - - // InternalGetClusterMetadataRequest is the request for GetClusterMetadata - InternalGetClusterMetadataRequest struct { - ClusterName string - } - - // InternalGetClusterMetadataResponse is the response for GetClusterMetadata - InternalGetClusterMetadataResponse struct { - // Serialized MutableCusterMetadata. - ClusterMetadata *commonpb.DataBlob - Version int64 - } - - // InternalSaveClusterMetadataRequest is the request for SaveClusterMetadata - InternalSaveClusterMetadataRequest struct { - ClusterName string - // Serialized MutableCusterMetadata. - ClusterMetadata *commonpb.DataBlob - Version int64 - } - - // InternalDeleteClusterMetadataRequest is the request for DeleteClusterMetadata - InternalDeleteClusterMetadataRequest struct { - ClusterName string - } - - // InternalUpsertClusterMembershipRequest is the request to UpsertClusterMembership - InternalUpsertClusterMembershipRequest struct { - ClusterMember - RecordExpiry time.Time - } -) diff -Nru temporal-1.21.5-1/src/common/persistence/persistenceMetricClients.go temporal-1.22.5/src/common/persistence/persistenceMetricClients.go --- temporal-1.21.5-1/src/common/persistence/persistenceMetricClients.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistenceMetricClients.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1285 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "context" - "fmt" - "time" - - commonpb "go.temporal.io/api/common/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/service/history/tasks" -) - -type ( - metricEmitter struct { - metricsHandler metrics.Handler - logger log.Logger - } - - shardPersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence ShardManager - } - - executionPersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence ExecutionManager - } - - taskPersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence TaskManager - } - - metadataPersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence MetadataManager - } - - clusterMetadataPersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence ClusterMetadataManager - } - - queuePersistenceClient struct { - metricEmitter - healthSignals HealthSignalAggregator - persistence Queue - } -) - -var _ ShardManager = (*shardPersistenceClient)(nil) -var _ ExecutionManager = (*executionPersistenceClient)(nil) -var _ TaskManager = (*taskPersistenceClient)(nil) -var _ MetadataManager = (*metadataPersistenceClient)(nil) -var _ ClusterMetadataManager = (*clusterMetadataPersistenceClient)(nil) -var _ Queue = (*queuePersistenceClient)(nil) - -// NewShardPersistenceMetricsClient creates a client to manage shards -func NewShardPersistenceMetricsClient(persistence ShardManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ShardManager { - return &shardPersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -// NewExecutionPersistenceMetricsClient creates a client to manage executions -func NewExecutionPersistenceMetricsClient(persistence ExecutionManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ExecutionManager { - return &executionPersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -// NewTaskPersistenceMetricsClient creates a client to manage tasks -func NewTaskPersistenceMetricsClient(persistence TaskManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) TaskManager { - return &taskPersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -// NewMetadataPersistenceMetricsClient creates a MetadataManager client to manage metadata -func NewMetadataPersistenceMetricsClient(persistence MetadataManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) MetadataManager { - return &metadataPersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -// NewClusterMetadataPersistenceMetricsClient creates a ClusterMetadataManager client to manage cluster metadata -func NewClusterMetadataPersistenceMetricsClient(persistence ClusterMetadataManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ClusterMetadataManager { - return &clusterMetadataPersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -// NewQueuePersistenceMetricsClient creates a client to manage queue -func NewQueuePersistenceMetricsClient(persistence Queue, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) Queue { - return &queuePersistenceClient{ - metricEmitter: metricEmitter{ - metricsHandler: metricsHandler, - logger: logger, - }, - healthSignals: healthSignals, - persistence: persistence, - } -} - -func (p *shardPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *shardPersistenceClient) GetOrCreateShard( - ctx context.Context, - request *GetOrCreateShardRequest, -) (_ *GetOrCreateShardResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - latency := time.Since(startTime) - p.healthSignals.Record(request.ShardID, caller, latency, retErr) - p.recordRequestMetrics(metrics.PersistenceGetOrCreateShardScope, caller, latency, retErr) - }() - return p.persistence.GetOrCreateShard(ctx, request) -} - -func (p *shardPersistenceClient) UpdateShard( - ctx context.Context, - request *UpdateShardRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardInfo.GetShardId(), caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateShardScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateShard(ctx, request) -} - -func (p *shardPersistenceClient) AssertShardOwnership( - ctx context.Context, - request *AssertShardOwnershipRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceAssertShardOwnershipScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.AssertShardOwnership(ctx, request) -} - -func (p *shardPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *executionPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *executionPersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { - return p.persistence.GetHistoryBranchUtil() -} - -func (p *executionPersistenceClient) CreateWorkflowExecution( - ctx context.Context, - request *CreateWorkflowExecutionRequest, -) (_ *CreateWorkflowExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCreateWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CreateWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) GetWorkflowExecution( - ctx context.Context, - request *GetWorkflowExecutionRequest, -) (_ *GetWorkflowExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) SetWorkflowExecution( - ctx context.Context, - request *SetWorkflowExecutionRequest, -) (_ *SetWorkflowExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceSetWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.SetWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) UpdateWorkflowExecution( - ctx context.Context, - request *UpdateWorkflowExecutionRequest, -) (_ *UpdateWorkflowExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) ConflictResolveWorkflowExecution( - ctx context.Context, - request *ConflictResolveWorkflowExecutionRequest, -) (_ *ConflictResolveWorkflowExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceConflictResolveWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ConflictResolveWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) DeleteWorkflowExecution( - ctx context.Context, - request *DeleteWorkflowExecutionRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) DeleteCurrentWorkflowExecution( - ctx context.Context, - request *DeleteCurrentWorkflowExecutionRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteCurrentWorkflowExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) -} - -func (p *executionPersistenceClient) GetCurrentExecution( - ctx context.Context, - request *GetCurrentExecutionRequest, -) (_ *GetCurrentExecutionResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetCurrentExecutionScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetCurrentExecution(ctx, request) -} - -func (p *executionPersistenceClient) ListConcreteExecutions( - ctx context.Context, - request *ListConcreteExecutionsRequest, -) (_ *ListConcreteExecutionsResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceListConcreteExecutionsScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ListConcreteExecutions(ctx, request) -} - -func (p *executionPersistenceClient) RegisterHistoryTaskReader( - ctx context.Context, - request *RegisterHistoryTaskReaderRequest, -) error { - // hint methods won't go through persistence rate limiter - // so also not emitting any persistence request/error metrics - return p.persistence.RegisterHistoryTaskReader(ctx, request) -} - -func (p *executionPersistenceClient) UnregisterHistoryTaskReader( - ctx context.Context, - request *UnregisterHistoryTaskReaderRequest, -) { - // hint methods won't go through persistence rate limiter - // so also not emitting any persistence request/error metrics - p.persistence.UnregisterHistoryTaskReader(ctx, request) -} - -func (p *executionPersistenceClient) UpdateHistoryTaskReaderProgress( - ctx context.Context, - request *UpdateHistoryTaskReaderProgressRequest, -) { - // hint methods won't go through persistence rate limiter - // so also not emitting any persistence request/error metrics - p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) -} - -func (p *executionPersistenceClient) AddHistoryTasks( - ctx context.Context, - request *AddHistoryTasksRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceAddTasksScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.AddHistoryTasks(ctx, request) -} - -func (p *executionPersistenceClient) GetHistoryTasks( - ctx context.Context, - request *GetHistoryTasksRequest, -) (_ *GetHistoryTasksResponse, retErr error) { - var operation string - switch request.TaskCategory.ID() { - case tasks.CategoryIDTransfer: - operation = metrics.PersistenceGetTransferTasksScope - case tasks.CategoryIDTimer: - operation = metrics.PersistenceGetTimerTasksScope - case tasks.CategoryIDVisibility: - operation = metrics.PersistenceGetVisibilityTasksScope - case tasks.CategoryIDReplication: - operation = metrics.PersistenceGetReplicationTasksScope - case tasks.CategoryIDArchival: - operation = metrics.PersistenceGetArchivalTasksScope - default: - return nil, serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) - } - - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetHistoryTasks(ctx, request) -} - -func (p *executionPersistenceClient) CompleteHistoryTask( - ctx context.Context, - request *CompleteHistoryTaskRequest, -) (retErr error) { - var operation string - switch request.TaskCategory.ID() { - case tasks.CategoryIDTransfer: - operation = metrics.PersistenceCompleteTransferTaskScope - case tasks.CategoryIDTimer: - operation = metrics.PersistenceCompleteTimerTaskScope - case tasks.CategoryIDVisibility: - operation = metrics.PersistenceCompleteVisibilityTaskScope - case tasks.CategoryIDReplication: - operation = metrics.PersistenceCompleteReplicationTaskScope - case tasks.CategoryIDArchival: - operation = metrics.PersistenceCompleteArchivalTaskScope - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) - } - - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) - }() - return p.persistence.CompleteHistoryTask(ctx, request) -} - -func (p *executionPersistenceClient) RangeCompleteHistoryTasks( - ctx context.Context, - request *RangeCompleteHistoryTasksRequest, -) (retErr error) { - var operation string - switch request.TaskCategory.ID() { - case tasks.CategoryIDTransfer: - operation = metrics.PersistenceRangeCompleteTransferTasksScope - case tasks.CategoryIDTimer: - operation = metrics.PersistenceRangeCompleteTimerTasksScope - case tasks.CategoryIDVisibility: - operation = metrics.PersistenceRangeCompleteVisibilityTasksScope - case tasks.CategoryIDReplication: - operation = metrics.PersistenceRangeCompleteReplicationTasksScope - case tasks.CategoryIDArchival: - operation = metrics.PersistenceRangeCompleteArchivalTasksScope - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) - } - - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) - }() - return p.persistence.RangeCompleteHistoryTasks(ctx, request) -} - -func (p *executionPersistenceClient) PutReplicationTaskToDLQ( - ctx context.Context, - request *PutReplicationTaskToDLQRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistencePutReplicationTaskToDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.PutReplicationTaskToDLQ(ctx, request) -} - -func (p *executionPersistenceClient) GetReplicationTasksFromDLQ( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (_ *GetHistoryTasksResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetReplicationTasksFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetReplicationTasksFromDLQ(ctx, request) -} - -func (p *executionPersistenceClient) DeleteReplicationTaskFromDLQ( - ctx context.Context, - request *DeleteReplicationTaskFromDLQRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteReplicationTaskFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) -} - -func (p *executionPersistenceClient) RangeDeleteReplicationTaskFromDLQ( - ctx context.Context, - request *RangeDeleteReplicationTaskFromDLQRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceRangeDeleteReplicationTaskFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) -} - -func (p *executionPersistenceClient) IsReplicationDLQEmpty( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (_ bool, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetReplicationTasksFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.IsReplicationDLQEmpty(ctx, request) -} - -func (p *executionPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *taskPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *taskPersistenceClient) CreateTasks( - ctx context.Context, - request *CreateTasksRequest, -) (_ *CreateTasksResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCreateTasksScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CreateTasks(ctx, request) -} - -func (p *taskPersistenceClient) GetTasks( - ctx context.Context, - request *GetTasksRequest, -) (_ *GetTasksResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetTasksScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetTasks(ctx, request) -} - -func (p *taskPersistenceClient) CompleteTask( - ctx context.Context, - request *CompleteTaskRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCompleteTaskScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CompleteTask(ctx, request) -} - -func (p *taskPersistenceClient) CompleteTasksLessThan( - ctx context.Context, - request *CompleteTasksLessThanRequest, -) (_ int, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCompleteTasksLessThanScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CompleteTasksLessThan(ctx, request) -} - -func (p *taskPersistenceClient) CreateTaskQueue( - ctx context.Context, - request *CreateTaskQueueRequest, -) (_ *CreateTaskQueueResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCreateTaskQueueScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CreateTaskQueue(ctx, request) -} - -func (p *taskPersistenceClient) UpdateTaskQueue( - ctx context.Context, - request *UpdateTaskQueueRequest, -) (_ *UpdateTaskQueueResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateTaskQueueScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateTaskQueue(ctx, request) -} - -func (p *taskPersistenceClient) GetTaskQueue( - ctx context.Context, - request *GetTaskQueueRequest, -) (_ *GetTaskQueueResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetTaskQueueScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetTaskQueue(ctx, request) -} - -func (p *taskPersistenceClient) ListTaskQueue( - ctx context.Context, - request *ListTaskQueueRequest, -) (_ *ListTaskQueueResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceListTaskQueueScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ListTaskQueue(ctx, request) -} - -func (p *taskPersistenceClient) DeleteTaskQueue( - ctx context.Context, - request *DeleteTaskQueueRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteTaskQueueScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteTaskQueue(ctx, request) -} - -func (p *taskPersistenceClient) GetTaskQueueUserData( - ctx context.Context, - request *GetTaskQueueUserDataRequest, -) (_ *GetTaskQueueUserDataResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetTaskQueueUserDataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetTaskQueueUserData(ctx, request) -} - -func (p *taskPersistenceClient) UpdateTaskQueueUserData( - ctx context.Context, - request *UpdateTaskQueueUserDataRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateTaskQueueUserDataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateTaskQueueUserData(ctx, request) -} - -func (p *taskPersistenceClient) ListTaskQueueUserDataEntries( - ctx context.Context, - request *ListTaskQueueUserDataEntriesRequest, -) (_ *ListTaskQueueUserDataEntriesResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceListTaskQueueUserDataEntriesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ListTaskQueueUserDataEntries(ctx, request) -} - -func (p *taskPersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) (_ []string, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetTaskQueuesByBuildIdScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetTaskQueuesByBuildId(ctx, request) -} - -func (p *taskPersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (_ int, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCountTaskQueuesByBuildIdScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CountTaskQueuesByBuildId(ctx, request) -} - -func (p *taskPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *metadataPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *metadataPersistenceClient) CreateNamespace( - ctx context.Context, - request *CreateNamespaceRequest, -) (_ *CreateNamespaceResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceCreateNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.CreateNamespace(ctx, request) -} - -func (p *metadataPersistenceClient) GetNamespace( - ctx context.Context, - request *GetNamespaceRequest, -) (_ *GetNamespaceResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetNamespace(ctx, request) -} - -func (p *metadataPersistenceClient) UpdateNamespace( - ctx context.Context, - request *UpdateNamespaceRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateNamespace(ctx, request) -} - -func (p *metadataPersistenceClient) RenameNamespace( - ctx context.Context, - request *RenameNamespaceRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceRenameNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.RenameNamespace(ctx, request) -} - -func (p *metadataPersistenceClient) DeleteNamespace( - ctx context.Context, - request *DeleteNamespaceRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteNamespace(ctx, request) -} - -func (p *metadataPersistenceClient) DeleteNamespaceByName( - ctx context.Context, - request *DeleteNamespaceByNameRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteNamespaceByNameScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteNamespaceByName(ctx, request) -} - -func (p *metadataPersistenceClient) ListNamespaces( - ctx context.Context, - request *ListNamespacesRequest, -) (_ *ListNamespacesResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceListNamespacesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ListNamespaces(ctx, request) -} - -func (p *metadataPersistenceClient) GetMetadata( - ctx context.Context, -) (_ *GetMetadataResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetMetadata(ctx) -} - -func (p *metadataPersistenceClient) Close() { - p.persistence.Close() -} - -// AppendHistoryNodes add a node to history node table -func (p *executionPersistenceClient) AppendHistoryNodes( - ctx context.Context, - request *AppendHistoryNodesRequest, -) (_ *AppendHistoryNodesResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceAppendHistoryNodesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.AppendHistoryNodes(ctx, request) -} - -// AppendRawHistoryNodes add a node to history node table -func (p *executionPersistenceClient) AppendRawHistoryNodes( - ctx context.Context, - request *AppendRawHistoryNodesRequest, -) (_ *AppendHistoryNodesResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceAppendRawHistoryNodesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.AppendRawHistoryNodes(ctx, request) -} - -// ReadHistoryBranch returns history node data for a branch -func (p *executionPersistenceClient) ReadHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (_ *ReadHistoryBranchResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadHistoryBranch(ctx, request) -} - -func (p *executionPersistenceClient) ReadHistoryBranchReverse( - ctx context.Context, - request *ReadHistoryBranchReverseRequest, -) (_ *ReadHistoryBranchReverseResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchReverseScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadHistoryBranchReverse(ctx, request) -} - -// ReadHistoryBranchByBatch returns history node data for a branch ByBatch -func (p *executionPersistenceClient) ReadHistoryBranchByBatch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (_ *ReadHistoryBranchByBatchResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadHistoryBranchByBatch(ctx, request) -} - -// ReadRawHistoryBranch returns history node raw data for a branch ByBatch -func (p *executionPersistenceClient) ReadRawHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (_ *ReadRawHistoryBranchResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceReadRawHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadRawHistoryBranch(ctx, request) -} - -// ForkHistoryBranch forks a new branch from an old branch -func (p *executionPersistenceClient) ForkHistoryBranch( - ctx context.Context, - request *ForkHistoryBranchRequest, -) (_ *ForkHistoryBranchResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceForkHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ForkHistoryBranch(ctx, request) -} - -// DeleteHistoryBranch removes a branch -func (p *executionPersistenceClient) DeleteHistoryBranch( - ctx context.Context, - request *DeleteHistoryBranchRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.recordRequestMetrics(metrics.PersistenceDeleteHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteHistoryBranch(ctx, request) -} - -// TrimHistoryBranch trims a branch -func (p *executionPersistenceClient) TrimHistoryBranch( - ctx context.Context, - request *TrimHistoryBranchRequest, -) (_ *TrimHistoryBranchResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceTrimHistoryBranchScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.TrimHistoryBranch(ctx, request) -} - -func (p *executionPersistenceClient) GetAllHistoryTreeBranches( - ctx context.Context, - request *GetAllHistoryTreeBranchesRequest, -) (_ *GetAllHistoryTreeBranchesResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetAllHistoryTreeBranchesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetAllHistoryTreeBranches(ctx, request) -} - -// GetHistoryTree returns all branch information of a tree -func (p *executionPersistenceClient) GetHistoryTree( - ctx context.Context, - request *GetHistoryTreeRequest, -) (_ *GetHistoryTreeResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetHistoryTreeScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetHistoryTree(ctx, request) -} - -func (p *queuePersistenceClient) Init( - ctx context.Context, - blob *commonpb.DataBlob, -) error { - return p.persistence.Init(ctx, blob) -} - -func (p *queuePersistenceClient) EnqueueMessage( - ctx context.Context, - blob commonpb.DataBlob, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceEnqueueMessageScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.EnqueueMessage(ctx, blob) -} - -func (p *queuePersistenceClient) ReadMessages( - ctx context.Context, - lastMessageID int64, - maxCount int, -) (_ []*QueueMessage, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceReadQueueMessagesScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadMessages(ctx, lastMessageID, maxCount) -} - -func (p *queuePersistenceClient) UpdateAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateAckLevelScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateAckLevel(ctx, metadata) -} - -func (p *queuePersistenceClient) GetAckLevels( - ctx context.Context, -) (_ *InternalQueueMetadata, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetAckLevelScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetAckLevels(ctx) -} - -func (p *queuePersistenceClient) DeleteMessagesBefore( - ctx context.Context, - messageID int64, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteMessagesBeforeScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteMessagesBefore(ctx, messageID) -} - -func (p *queuePersistenceClient) EnqueueMessageToDLQ( - ctx context.Context, - blob commonpb.DataBlob, -) (_ int64, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceEnqueueMessageToDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.EnqueueMessageToDLQ(ctx, blob) -} - -func (p *queuePersistenceClient) ReadMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, -) (_ []*QueueMessage, _ []byte, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceReadMessagesFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) -} - -func (p *queuePersistenceClient) DeleteMessageFromDLQ( - ctx context.Context, - messageID int64, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteMessageFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteMessageFromDLQ(ctx, messageID) -} - -func (p *queuePersistenceClient) RangeDeleteMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceRangeDeleteMessagesFromDLQScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) -} - -func (p *queuePersistenceClient) UpdateDLQAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpdateDLQAckLevelScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpdateDLQAckLevel(ctx, metadata) -} - -func (p *queuePersistenceClient) GetDLQAckLevels( - ctx context.Context, -) (_ *InternalQueueMetadata, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetDLQAckLevelScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetDLQAckLevels(ctx) -} - -func (p *queuePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *clusterMetadataPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *clusterMetadataPersistenceClient) ListClusterMetadata( - ctx context.Context, - request *ListClusterMetadataRequest, -) (_ *ListClusterMetadataResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceListClusterMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.ListClusterMetadata(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) GetCurrentClusterMetadata( - ctx context.Context, -) (_ *GetClusterMetadataResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetCurrentClusterMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetCurrentClusterMetadata(ctx) -} - -func (p *clusterMetadataPersistenceClient) GetClusterMetadata( - ctx context.Context, - request *GetClusterMetadataRequest, -) (_ *GetClusterMetadataResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetClusterMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetClusterMetadata(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) SaveClusterMetadata( - ctx context.Context, - request *SaveClusterMetadataRequest, -) (_ bool, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceSaveClusterMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.SaveClusterMetadata(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) DeleteClusterMetadata( - ctx context.Context, - request *DeleteClusterMetadataRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceDeleteClusterMetadataScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.DeleteClusterMetadata(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *clusterMetadataPersistenceClient) GetClusterMembers( - ctx context.Context, - request *GetClusterMembersRequest, -) (_ *GetClusterMembersResponse, retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceGetClusterMembersScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.GetClusterMembers(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) UpsertClusterMembership( - ctx context.Context, - request *UpsertClusterMembershipRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceUpsertClusterMembershipScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.UpsertClusterMembership(ctx, request) -} - -func (p *clusterMetadataPersistenceClient) PruneClusterMembership( - ctx context.Context, - request *PruneClusterMembershipRequest, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistencePruneClusterMembershipScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.PruneClusterMembership(ctx, request) -} - -func (p *metadataPersistenceClient) InitializeSystemNamespaces( - ctx context.Context, - currentClusterName string, -) (retErr error) { - caller := headers.GetCallerInfo(ctx).CallerName - startTime := time.Now().UTC() - defer func() { - p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) - p.recordRequestMetrics(metrics.PersistenceInitializeSystemNamespaceScope, caller, time.Since(startTime), retErr) - }() - return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) -} - -func (p *metricEmitter) recordRequestMetrics(operation string, caller string, latency time.Duration, err error) { - handler := p.metricsHandler.WithTags(metrics.OperationTag(operation), metrics.NamespaceTag(caller)) - handler.Counter(metrics.PersistenceRequests.GetMetricName()).Record(1) - handler.Timer(metrics.PersistenceLatency.GetMetricName()).Record(latency) - updateErrorMetric(handler, p.logger, operation, err) -} - -func updateErrorMetric(handler metrics.Handler, logger log.Logger, operation string, err error) { - if err != nil { - handler.Counter(metrics.PersistenceErrorWithType.GetMetricName()).Record(1, metrics.ServiceErrorTypeTag(err)) - switch err := err.(type) { - case *ShardAlreadyExistError, - *ShardOwnershipLostError, - *AppendHistoryTimeoutError, - *CurrentWorkflowConditionFailedError, - *WorkflowConditionFailedError, - *ConditionFailedError, - *TimeoutError, - *serviceerror.InvalidArgument, - *serviceerror.NamespaceAlreadyExists, - *serviceerror.NotFound, - *serviceerror.NamespaceNotFound: - // no-op - - case *serviceerror.ResourceExhausted: - handler.Counter(metrics.PersistenceErrResourceExhaustedCounter.GetMetricName()).Record(1, metrics.ResourceExhaustedCauseTag(err.Cause)) - default: - logger.Error("Operation failed with internal error.", tag.Error(err), tag.Operation(operation)) - handler.Counter(metrics.PersistenceFailures.GetMetricName()).Record(1) - } - } -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistenceRateLimitedClients.go temporal-1.22.5/src/common/persistence/persistenceRateLimitedClients.go --- temporal-1.21.5-1/src/common/persistence/persistenceRateLimitedClients.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistenceRateLimitedClients.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1079 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "context" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/quotas" - "go.temporal.io/server/service/history/tasks" -) - -const ( - RateLimitDefaultToken = 1 - CallerSegmentMissing = -1 -) - -var ( - // ErrPersistenceLimitExceeded is the error indicating QPS limit reached. - ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT, "Persistence Max QPS Reached.") -) - -type ( - shardRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence ShardManager - logger log.Logger - } - - executionRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence ExecutionManager - logger log.Logger - } - - taskRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence TaskManager - logger log.Logger - } - - metadataRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence MetadataManager - logger log.Logger - } - - clusterMetadataRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence ClusterMetadataManager - logger log.Logger - } - - queueRateLimitedPersistenceClient struct { - rateLimiter quotas.RequestRateLimiter - persistence Queue - logger log.Logger - } -) - -var _ ShardManager = (*shardRateLimitedPersistenceClient)(nil) -var _ ExecutionManager = (*executionRateLimitedPersistenceClient)(nil) -var _ TaskManager = (*taskRateLimitedPersistenceClient)(nil) -var _ MetadataManager = (*metadataRateLimitedPersistenceClient)(nil) -var _ ClusterMetadataManager = (*clusterMetadataRateLimitedPersistenceClient)(nil) -var _ Queue = (*queueRateLimitedPersistenceClient)(nil) - -// NewShardPersistenceRateLimitedClient creates a client to manage shards -func NewShardPersistenceRateLimitedClient(persistence ShardManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ShardManager { - return &shardRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -// NewExecutionPersistenceRateLimitedClient creates a client to manage executions -func NewExecutionPersistenceRateLimitedClient(persistence ExecutionManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ExecutionManager { - return &executionRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -// NewTaskPersistenceRateLimitedClient creates a client to manage tasks -func NewTaskPersistenceRateLimitedClient(persistence TaskManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) TaskManager { - return &taskRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -// NewMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata -func NewMetadataPersistenceRateLimitedClient(persistence MetadataManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) MetadataManager { - return &metadataRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -// NewClusterMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata -func NewClusterMetadataPersistenceRateLimitedClient(persistence ClusterMetadataManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ClusterMetadataManager { - return &clusterMetadataRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -// NewQueuePersistenceRateLimitedClient creates a client to manage queue -func NewQueuePersistenceRateLimitedClient(persistence Queue, rateLimiter quotas.RequestRateLimiter, logger log.Logger) Queue { - return &queueRateLimitedPersistenceClient{ - persistence: persistence, - rateLimiter: rateLimiter, - logger: logger, - } -} - -func (p *shardRateLimitedPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *shardRateLimitedPersistenceClient) GetOrCreateShard( - ctx context.Context, - request *GetOrCreateShardRequest, -) (*GetOrCreateShardResponse, error) { - if ok := allow(ctx, "GetOrCreateShard", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetOrCreateShard(ctx, request) - return response, err -} - -func (p *shardRateLimitedPersistenceClient) UpdateShard( - ctx context.Context, - request *UpdateShardRequest, -) error { - if ok := allow(ctx, "UpdateShard", request.ShardInfo.ShardId, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.UpdateShard(ctx, request) -} - -func (p *shardRateLimitedPersistenceClient) AssertShardOwnership( - ctx context.Context, - request *AssertShardOwnershipRequest, -) error { - if ok := allow(ctx, "AssertShardOwnership", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.AssertShardOwnership(ctx, request) -} - -func (p *shardRateLimitedPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *executionRateLimitedPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *executionRateLimitedPersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { - return p.persistence.GetHistoryBranchUtil() -} - -func (p *executionRateLimitedPersistenceClient) CreateWorkflowExecution( - ctx context.Context, - request *CreateWorkflowExecutionRequest, -) (*CreateWorkflowExecutionResponse, error) { - if ok := allow(ctx, "CreateWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.CreateWorkflowExecution(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) GetWorkflowExecution( - ctx context.Context, - request *GetWorkflowExecutionRequest, -) (*GetWorkflowExecutionResponse, error) { - if ok := allow(ctx, "GetWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetWorkflowExecution(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) SetWorkflowExecution( - ctx context.Context, - request *SetWorkflowExecutionRequest, -) (*SetWorkflowExecutionResponse, error) { - if ok := allow(ctx, "SetWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.SetWorkflowExecution(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) UpdateWorkflowExecution( - ctx context.Context, - request *UpdateWorkflowExecutionRequest, -) (*UpdateWorkflowExecutionResponse, error) { - if ok := allow(ctx, "UpdateWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - resp, err := p.persistence.UpdateWorkflowExecution(ctx, request) - return resp, err -} - -func (p *executionRateLimitedPersistenceClient) ConflictResolveWorkflowExecution( - ctx context.Context, - request *ConflictResolveWorkflowExecutionRequest, -) (*ConflictResolveWorkflowExecutionResponse, error) { - if ok := allow(ctx, "ConflictResolveWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.ConflictResolveWorkflowExecution(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) DeleteWorkflowExecution( - ctx context.Context, - request *DeleteWorkflowExecutionRequest, -) error { - if ok := allow(ctx, "DeleteWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteWorkflowExecution(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) DeleteCurrentWorkflowExecution( - ctx context.Context, - request *DeleteCurrentWorkflowExecutionRequest, -) error { - if ok := allow(ctx, "DeleteCurrentWorkflowExecution", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) GetCurrentExecution( - ctx context.Context, - request *GetCurrentExecutionRequest, -) (*GetCurrentExecutionResponse, error) { - if ok := allow(ctx, "GetCurrentExecution", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetCurrentExecution(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) ListConcreteExecutions( - ctx context.Context, - request *ListConcreteExecutionsRequest, -) (*ListConcreteExecutionsResponse, error) { - if ok := allow(ctx, "ListConcreteExecutions", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.ListConcreteExecutions(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) RegisterHistoryTaskReader( - ctx context.Context, - request *RegisterHistoryTaskReaderRequest, -) error { - // hint methods don't actually hint DB, so don't go through persistence rate limiter - return p.persistence.RegisterHistoryTaskReader(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) UnregisterHistoryTaskReader( - ctx context.Context, - request *UnregisterHistoryTaskReaderRequest, -) { - // hint methods don't actually hint DB, so don't go through persistence rate limiter - p.persistence.UnregisterHistoryTaskReader(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) UpdateHistoryTaskReaderProgress( - ctx context.Context, - request *UpdateHistoryTaskReaderProgressRequest, -) { - // hint methods don't actually hint DB, so don't go through persistence rate limiter - p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) AddHistoryTasks( - ctx context.Context, - request *AddHistoryTasksRequest, -) error { - if ok := allow(ctx, "AddHistoryTasks", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.AddHistoryTasks(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) GetHistoryTasks( - ctx context.Context, - request *GetHistoryTasksRequest, -) (*GetHistoryTasksResponse, error) { - if ok := allow( - ctx, - ConstructHistoryTaskAPI("GetHistoryTasks", request.TaskCategory), - request.ShardID, - p.rateLimiter, - ); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetHistoryTasks(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) CompleteHistoryTask( - ctx context.Context, - request *CompleteHistoryTaskRequest, -) error { - if ok := allow( - ctx, - ConstructHistoryTaskAPI("CompleteHistoryTask", request.TaskCategory), - request.ShardID, - p.rateLimiter, - ); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.CompleteHistoryTask(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) RangeCompleteHistoryTasks( - ctx context.Context, - request *RangeCompleteHistoryTasksRequest, -) error { - if ok := allow( - ctx, - ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", request.TaskCategory), - request.ShardID, - p.rateLimiter, - ); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.RangeCompleteHistoryTasks(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) PutReplicationTaskToDLQ( - ctx context.Context, - request *PutReplicationTaskToDLQRequest, -) error { - if ok := allow(ctx, "PutReplicationTaskToDLQ", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.PutReplicationTaskToDLQ(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) GetReplicationTasksFromDLQ( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (*GetHistoryTasksResponse, error) { - if ok := allow(ctx, "GetReplicationTasksFromDLQ", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - return p.persistence.GetReplicationTasksFromDLQ(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) DeleteReplicationTaskFromDLQ( - ctx context.Context, - request *DeleteReplicationTaskFromDLQRequest, -) error { - if ok := allow(ctx, "DeleteReplicationTaskFromDLQ", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) RangeDeleteReplicationTaskFromDLQ( - ctx context.Context, - request *RangeDeleteReplicationTaskFromDLQRequest, -) error { - if ok := allow(ctx, "RangeDeleteReplicationTaskFromDLQ", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) IsReplicationDLQEmpty( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (bool, error) { - if ok := allow(ctx, "IsReplicationDLQEmpty", request.ShardID, p.rateLimiter); !ok { - return true, ErrPersistenceLimitExceeded - } - - return p.persistence.IsReplicationDLQEmpty(ctx, request) -} - -func (p *executionRateLimitedPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *taskRateLimitedPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *taskRateLimitedPersistenceClient) CreateTasks( - ctx context.Context, - request *CreateTasksRequest, -) (*CreateTasksResponse, error) { - if ok := allow(ctx, "CreateTasks", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.CreateTasks(ctx, request) - return response, err -} - -func (p *taskRateLimitedPersistenceClient) GetTasks( - ctx context.Context, - request *GetTasksRequest, -) (*GetTasksResponse, error) { - if ok := allow(ctx, "GetTasks", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetTasks(ctx, request) - return response, err -} - -func (p *taskRateLimitedPersistenceClient) CompleteTask( - ctx context.Context, - request *CompleteTaskRequest, -) error { - if ok := allow(ctx, "CompleteTask", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.CompleteTask(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) CompleteTasksLessThan( - ctx context.Context, - request *CompleteTasksLessThanRequest, -) (int, error) { - if ok := allow(ctx, "CompleteTasksLessThan", CallerSegmentMissing, p.rateLimiter); !ok { - return 0, ErrPersistenceLimitExceeded - } - return p.persistence.CompleteTasksLessThan(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) CreateTaskQueue( - ctx context.Context, - request *CreateTaskQueueRequest, -) (*CreateTaskQueueResponse, error) { - if ok := allow(ctx, "CreateTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.CreateTaskQueue(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) UpdateTaskQueue( - ctx context.Context, - request *UpdateTaskQueueRequest, -) (*UpdateTaskQueueResponse, error) { - if ok := allow(ctx, "UpdateTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.UpdateTaskQueue(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) GetTaskQueue( - ctx context.Context, - request *GetTaskQueueRequest, -) (*GetTaskQueueResponse, error) { - if ok := allow(ctx, "GetTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.GetTaskQueue(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) ListTaskQueue( - ctx context.Context, - request *ListTaskQueueRequest, -) (*ListTaskQueueResponse, error) { - if ok := allow(ctx, "ListTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.ListTaskQueue(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) DeleteTaskQueue( - ctx context.Context, - request *DeleteTaskQueueRequest, -) error { - if ok := allow(ctx, "DeleteTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return p.persistence.DeleteTaskQueue(ctx, request) -} - -func (p taskRateLimitedPersistenceClient) GetTaskQueueUserData( - ctx context.Context, - request *GetTaskQueueUserDataRequest, -) (*GetTaskQueueUserDataResponse, error) { - if ok := allow(ctx, "GetTaskQueueUserData", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.GetTaskQueueUserData(ctx, request) -} - -func (p taskRateLimitedPersistenceClient) UpdateTaskQueueUserData( - ctx context.Context, - request *UpdateTaskQueueUserDataRequest, -) error { - if ok := allow(ctx, "UpdateTaskQueueUserData", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return p.persistence.UpdateTaskQueueUserData(ctx, request) -} - -func (p taskRateLimitedPersistenceClient) ListTaskQueueUserDataEntries( - ctx context.Context, - request *ListTaskQueueUserDataEntriesRequest, -) (*ListTaskQueueUserDataEntriesResponse, error) { - if ok := allow(ctx, "ListTaskQueueUserDataEntries", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.ListTaskQueueUserDataEntries(ctx, request) -} - -func (p taskRateLimitedPersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { - if ok := allow(ctx, "GetTaskQueuesByBuildId", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.GetTaskQueuesByBuildId(ctx, request) -} - -func (p taskRateLimitedPersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { - if ok := allow(ctx, "CountTaskQueuesByBuildId", CallerSegmentMissing, p.rateLimiter); !ok { - return 0, ErrPersistenceLimitExceeded - } - return p.persistence.CountTaskQueuesByBuildId(ctx, request) -} - -func (p *taskRateLimitedPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *metadataRateLimitedPersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *metadataRateLimitedPersistenceClient) CreateNamespace( - ctx context.Context, - request *CreateNamespaceRequest, -) (*CreateNamespaceResponse, error) { - if ok := allow(ctx, "CreateNamespace", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.CreateNamespace(ctx, request) - return response, err -} - -func (p *metadataRateLimitedPersistenceClient) GetNamespace( - ctx context.Context, - request *GetNamespaceRequest, -) (*GetNamespaceResponse, error) { - if ok := allow(ctx, "GetNamespace", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetNamespace(ctx, request) - return response, err -} - -func (p *metadataRateLimitedPersistenceClient) UpdateNamespace( - ctx context.Context, - request *UpdateNamespaceRequest, -) error { - if ok := allow(ctx, "UpdateNamespace", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.UpdateNamespace(ctx, request) -} - -func (p *metadataRateLimitedPersistenceClient) RenameNamespace( - ctx context.Context, - request *RenameNamespaceRequest, -) error { - if ok := allow(ctx, "RenameNamespace", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.RenameNamespace(ctx, request) -} - -func (p *metadataRateLimitedPersistenceClient) DeleteNamespace( - ctx context.Context, - request *DeleteNamespaceRequest, -) error { - if ok := allow(ctx, "DeleteNamespace", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteNamespace(ctx, request) -} - -func (p *metadataRateLimitedPersistenceClient) DeleteNamespaceByName( - ctx context.Context, - request *DeleteNamespaceByNameRequest, -) error { - if ok := allow(ctx, "DeleteNamespaceByName", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteNamespaceByName(ctx, request) -} - -func (p *metadataRateLimitedPersistenceClient) ListNamespaces( - ctx context.Context, - request *ListNamespacesRequest, -) (*ListNamespacesResponse, error) { - if ok := allow(ctx, "ListNamespaces", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.ListNamespaces(ctx, request) - return response, err -} - -func (p *metadataRateLimitedPersistenceClient) GetMetadata( - ctx context.Context, -) (*GetMetadataResponse, error) { - if ok := allow(ctx, "GetMetadata", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - response, err := p.persistence.GetMetadata(ctx) - return response, err -} - -func (p *metadataRateLimitedPersistenceClient) InitializeSystemNamespaces( - ctx context.Context, - currentClusterName string, -) error { - if ok := allow(ctx, "InitializeSystemNamespaces", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) -} - -func (p *metadataRateLimitedPersistenceClient) Close() { - p.persistence.Close() -} - -// AppendHistoryNodes add a node to history node table -func (p *executionRateLimitedPersistenceClient) AppendHistoryNodes( - ctx context.Context, - request *AppendHistoryNodesRequest, -) (*AppendHistoryNodesResponse, error) { - if ok := allow(ctx, "AppendHistoryNodes", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.AppendHistoryNodes(ctx, request) -} - -// AppendRawHistoryNodes add a node to history node table -func (p *executionRateLimitedPersistenceClient) AppendRawHistoryNodes( - ctx context.Context, - request *AppendRawHistoryNodesRequest, -) (*AppendHistoryNodesResponse, error) { - if ok := allow(ctx, "AppendRawHistoryNodes", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return p.persistence.AppendRawHistoryNodes(ctx, request) -} - -// ReadHistoryBranch returns history node data for a branch -func (p *executionRateLimitedPersistenceClient) ReadHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadHistoryBranchResponse, error) { - if ok := allow(ctx, "ReadHistoryBranch", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.ReadHistoryBranch(ctx, request) - return response, err -} - -// ReadHistoryBranchReverse returns history node data for a branch -func (p *executionRateLimitedPersistenceClient) ReadHistoryBranchReverse( - ctx context.Context, - request *ReadHistoryBranchReverseRequest, -) (*ReadHistoryBranchReverseResponse, error) { - if ok := allow(ctx, "ReadHistoryBranchReverse", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.ReadHistoryBranchReverse(ctx, request) - return response, err -} - -// ReadHistoryBranchByBatch returns history node data for a branch -func (p *executionRateLimitedPersistenceClient) ReadHistoryBranchByBatch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadHistoryBranchByBatchResponse, error) { - if ok := allow(ctx, "ReadHistoryBranchByBatch", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.ReadHistoryBranchByBatch(ctx, request) - return response, err -} - -// ReadHistoryBranchByBatch returns history node data for a branch -func (p *executionRateLimitedPersistenceClient) ReadRawHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadRawHistoryBranchResponse, error) { - if ok := allow(ctx, "ReadRawHistoryBranch", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.ReadRawHistoryBranch(ctx, request) - return response, err -} - -// ForkHistoryBranch forks a new branch from a old branch -func (p *executionRateLimitedPersistenceClient) ForkHistoryBranch( - ctx context.Context, - request *ForkHistoryBranchRequest, -) (*ForkHistoryBranchResponse, error) { - if ok := allow(ctx, "ForkHistoryBranch", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.ForkHistoryBranch(ctx, request) - return response, err -} - -// DeleteHistoryBranch removes a branch -func (p *executionRateLimitedPersistenceClient) DeleteHistoryBranch( - ctx context.Context, - request *DeleteHistoryBranchRequest, -) error { - if ok := allow(ctx, "DeleteHistoryBranch", request.ShardID, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return p.persistence.DeleteHistoryBranch(ctx, request) -} - -// TrimHistoryBranch trims a branch -func (p *executionRateLimitedPersistenceClient) TrimHistoryBranch( - ctx context.Context, - request *TrimHistoryBranchRequest, -) (*TrimHistoryBranchResponse, error) { - if ok := allow(ctx, "TrimHistoryBranch", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - resp, err := p.persistence.TrimHistoryBranch(ctx, request) - return resp, err -} - -// GetHistoryTree returns all branch information of a tree -func (p *executionRateLimitedPersistenceClient) GetHistoryTree( - ctx context.Context, - request *GetHistoryTreeRequest, -) (*GetHistoryTreeResponse, error) { - if ok := allow(ctx, "GetHistoryTree", request.ShardID, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.GetHistoryTree(ctx, request) - return response, err -} - -func (p *executionRateLimitedPersistenceClient) GetAllHistoryTreeBranches( - ctx context.Context, - request *GetAllHistoryTreeBranchesRequest, -) (*GetAllHistoryTreeBranchesResponse, error) { - if ok := allow(ctx, "GetAllHistoryTreeBranches", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - response, err := p.persistence.GetAllHistoryTreeBranches(ctx, request) - return response, err -} - -func (p *queueRateLimitedPersistenceClient) EnqueueMessage( - ctx context.Context, - blob commonpb.DataBlob, -) error { - if ok := allow(ctx, "EnqueueMessage", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.EnqueueMessage(ctx, blob) -} - -func (p *queueRateLimitedPersistenceClient) ReadMessages( - ctx context.Context, - lastMessageID int64, - maxCount int, -) ([]*QueueMessage, error) { - if ok := allow(ctx, "ReadMessages", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - return p.persistence.ReadMessages(ctx, lastMessageID, maxCount) -} - -func (p *queueRateLimitedPersistenceClient) UpdateAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) error { - if ok := allow(ctx, "UpdateAckLevel", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.UpdateAckLevel(ctx, metadata) -} - -func (p *queueRateLimitedPersistenceClient) GetAckLevels( - ctx context.Context, -) (*InternalQueueMetadata, error) { - if ok := allow(ctx, "GetAckLevels", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - return p.persistence.GetAckLevels(ctx) -} - -func (p *queueRateLimitedPersistenceClient) DeleteMessagesBefore( - ctx context.Context, - messageID int64, -) error { - if ok := allow(ctx, "DeleteMessagesBefore", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteMessagesBefore(ctx, messageID) -} - -func (p *queueRateLimitedPersistenceClient) EnqueueMessageToDLQ( - ctx context.Context, - blob commonpb.DataBlob, -) (int64, error) { - if ok := allow(ctx, "EnqueueMessageToDLQ", CallerSegmentMissing, p.rateLimiter); !ok { - return EmptyQueueMessageID, ErrPersistenceLimitExceeded - } - - return p.persistence.EnqueueMessageToDLQ(ctx, blob) -} - -func (p *queueRateLimitedPersistenceClient) ReadMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]*QueueMessage, []byte, error) { - if ok := allow(ctx, "ReadMessagesFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, nil, ErrPersistenceLimitExceeded - } - - return p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) -} - -func (p *queueRateLimitedPersistenceClient) RangeDeleteMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, -) error { - if ok := allow(ctx, "RangeDeleteMessagesFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) -} -func (p *queueRateLimitedPersistenceClient) UpdateDLQAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) error { - if ok := allow(ctx, "UpdateDLQAckLevel", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.UpdateDLQAckLevel(ctx, metadata) -} - -func (p *queueRateLimitedPersistenceClient) GetDLQAckLevels( - ctx context.Context, -) (*InternalQueueMetadata, error) { - if ok := allow(ctx, "GetDLQAckLevels", CallerSegmentMissing, p.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - - return p.persistence.GetDLQAckLevels(ctx) -} - -func (p *queueRateLimitedPersistenceClient) DeleteMessageFromDLQ( - ctx context.Context, - messageID int64, -) error { - if ok := allow(ctx, "DeleteMessageFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - - return p.persistence.DeleteMessageFromDLQ(ctx, messageID) -} - -func (p *queueRateLimitedPersistenceClient) Close() { - p.persistence.Close() -} - -func (p *queueRateLimitedPersistenceClient) Init( - ctx context.Context, - blob *commonpb.DataBlob, -) error { - return p.persistence.Init(ctx, blob) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) Close() { - c.persistence.Close() -} - -func (c *clusterMetadataRateLimitedPersistenceClient) GetName() string { - return c.persistence.GetName() -} - -func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMembers( - ctx context.Context, - request *GetClusterMembersRequest, -) (*GetClusterMembersResponse, error) { - if ok := allow(ctx, "GetClusterMembers", CallerSegmentMissing, c.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return c.persistence.GetClusterMembers(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) UpsertClusterMembership( - ctx context.Context, - request *UpsertClusterMembershipRequest, -) error { - if ok := allow(ctx, "UpsertClusterMembership", CallerSegmentMissing, c.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return c.persistence.UpsertClusterMembership(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) PruneClusterMembership( - ctx context.Context, - request *PruneClusterMembershipRequest, -) error { - if ok := allow(ctx, "PruneClusterMembership", CallerSegmentMissing, c.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return c.persistence.PruneClusterMembership(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) ListClusterMetadata( - ctx context.Context, - request *ListClusterMetadataRequest, -) (*ListClusterMetadataResponse, error) { - if ok := allow(ctx, "ListClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return c.persistence.ListClusterMetadata(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) GetCurrentClusterMetadata( - ctx context.Context, -) (*GetClusterMetadataResponse, error) { - if ok := allow(ctx, "GetCurrentClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return c.persistence.GetCurrentClusterMetadata(ctx) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMetadata( - ctx context.Context, - request *GetClusterMetadataRequest, -) (*GetClusterMetadataResponse, error) { - if ok := allow(ctx, "GetClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { - return nil, ErrPersistenceLimitExceeded - } - return c.persistence.GetClusterMetadata(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) SaveClusterMetadata( - ctx context.Context, - request *SaveClusterMetadataRequest, -) (bool, error) { - if ok := allow(ctx, "SaveClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { - return false, ErrPersistenceLimitExceeded - } - return c.persistence.SaveClusterMetadata(ctx, request) -} - -func (c *clusterMetadataRateLimitedPersistenceClient) DeleteClusterMetadata( - ctx context.Context, - request *DeleteClusterMetadataRequest, -) error { - if ok := allow(ctx, "DeleteClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { - return ErrPersistenceLimitExceeded - } - return c.persistence.DeleteClusterMetadata(ctx, request) -} - -func allow( - ctx context.Context, - api string, - shardID int32, - rateLimiter quotas.RequestRateLimiter, -) bool { - callerInfo := headers.GetCallerInfo(ctx) - return rateLimiter.Allow(time.Now().UTC(), quotas.NewRequest( - api, - RateLimitDefaultToken, - callerInfo.CallerName, - callerInfo.CallerType, - shardID, - callerInfo.CallOrigin, - )) -} - -// TODO: change the value returned so it can also be used by -// persistence metrics client. For now, it's only used by rate -// limit client, and we don't really care about the actual value -// returned, as long as they are different from each task category. -func ConstructHistoryTaskAPI( - baseAPI string, - taskCategory tasks.Category, -) string { - return baseAPI + taskCategory.Name() -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistenceRetryableClients.go temporal-1.22.5/src/common/persistence/persistenceRetryableClients.go --- temporal-1.21.5-1/src/common/persistence/persistenceRetryableClients.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistenceRetryableClients.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1243 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inp. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inp. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "context" - - commonpb "go.temporal.io/api/common/v1" - - "go.temporal.io/server/common/backoff" -) - -type ( - shardRetryablePersistenceClient struct { - persistence ShardManager - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } - - executionRetryablePersistenceClient struct { - persistence ExecutionManager - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } - - taskRetryablePersistenceClient struct { - persistence TaskManager - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } - - metadataRetryablePersistenceClient struct { - persistence MetadataManager - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } - - clusterMetadataRetryablePersistenceClient struct { - persistence ClusterMetadataManager - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } - - queueRetryablePersistenceClient struct { - persistence Queue - policy backoff.RetryPolicy - isRetryable backoff.IsRetryable - } -) - -var _ ShardManager = (*shardRetryablePersistenceClient)(nil) -var _ ExecutionManager = (*executionRetryablePersistenceClient)(nil) -var _ TaskManager = (*taskRetryablePersistenceClient)(nil) -var _ MetadataManager = (*metadataRetryablePersistenceClient)(nil) -var _ ClusterMetadataManager = (*clusterMetadataRetryablePersistenceClient)(nil) -var _ Queue = (*queueRetryablePersistenceClient)(nil) - -// NewShardPersistenceRetryableClient creates a client to manage shards -func NewShardPersistenceRetryableClient( - persistence ShardManager, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) ShardManager { - return &shardRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -// NewExecutionPersistenceRetryableClient creates a client to manage executions -func NewExecutionPersistenceRetryableClient( - persistence ExecutionManager, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) ExecutionManager { - return &executionRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -// NewTaskPersistenceRetryableClient creates a client to manage tasks -func NewTaskPersistenceRetryableClient( - persistence TaskManager, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) TaskManager { - return &taskRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -// NewMetadataPersistenceRetryableClient creates a MetadataManager client to manage metadata -func NewMetadataPersistenceRetryableClient( - persistence MetadataManager, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) MetadataManager { - return &metadataRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -// NewClusterMetadataPersistenceRetryableClient creates a MetadataManager client to manage metadata -func NewClusterMetadataPersistenceRetryableClient( - persistence ClusterMetadataManager, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) ClusterMetadataManager { - return &clusterMetadataRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -// NewQueuePersistenceRetryableClient creates a client to manage queue -func NewQueuePersistenceRetryableClient( - persistence Queue, - policy backoff.RetryPolicy, - isRetryable backoff.IsRetryable, -) Queue { - return &queueRetryablePersistenceClient{ - persistence: persistence, - policy: policy, - isRetryable: isRetryable, - } -} - -func (p *shardRetryablePersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *shardRetryablePersistenceClient) GetOrCreateShard( - ctx context.Context, - request *GetOrCreateShardRequest, -) (*GetOrCreateShardResponse, error) { - var response *GetOrCreateShardResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetOrCreateShard(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *shardRetryablePersistenceClient) UpdateShard( - ctx context.Context, - request *UpdateShardRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpdateShard(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *shardRetryablePersistenceClient) AssertShardOwnership( - ctx context.Context, - request *AssertShardOwnershipRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.AssertShardOwnership(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *shardRetryablePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *executionRetryablePersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *executionRetryablePersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { - return p.persistence.GetHistoryBranchUtil() -} - -func (p *executionRetryablePersistenceClient) CreateWorkflowExecution( - ctx context.Context, - request *CreateWorkflowExecutionRequest, -) (*CreateWorkflowExecutionResponse, error) { - var response *CreateWorkflowExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CreateWorkflowExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) GetWorkflowExecution( - ctx context.Context, - request *GetWorkflowExecutionRequest, -) (*GetWorkflowExecutionResponse, error) { - var response *GetWorkflowExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetWorkflowExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) SetWorkflowExecution( - ctx context.Context, - request *SetWorkflowExecutionRequest, -) (*SetWorkflowExecutionResponse, error) { - var response *SetWorkflowExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.SetWorkflowExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) UpdateWorkflowExecution( - ctx context.Context, - request *UpdateWorkflowExecutionRequest, -) (*UpdateWorkflowExecutionResponse, error) { - var response *UpdateWorkflowExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.UpdateWorkflowExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) ConflictResolveWorkflowExecution( - ctx context.Context, - request *ConflictResolveWorkflowExecutionRequest, -) (*ConflictResolveWorkflowExecutionResponse, error) { - var response *ConflictResolveWorkflowExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ConflictResolveWorkflowExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) DeleteWorkflowExecution( - ctx context.Context, - request *DeleteWorkflowExecutionRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteWorkflowExecution(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) DeleteCurrentWorkflowExecution( - ctx context.Context, - request *DeleteCurrentWorkflowExecutionRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) GetCurrentExecution( - ctx context.Context, - request *GetCurrentExecutionRequest, -) (*GetCurrentExecutionResponse, error) { - var response *GetCurrentExecutionResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetCurrentExecution(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) ListConcreteExecutions( - ctx context.Context, - request *ListConcreteExecutionsRequest, -) (*ListConcreteExecutionsResponse, error) { - var response *ListConcreteExecutionsResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ListConcreteExecutions(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) RegisterHistoryTaskReader( - ctx context.Context, - request *RegisterHistoryTaskReaderRequest, -) error { - // hint methods don't actually hint DB, retry won't help - return p.persistence.RegisterHistoryTaskReader(ctx, request) -} - -func (p *executionRetryablePersistenceClient) UnregisterHistoryTaskReader( - ctx context.Context, - request *UnregisterHistoryTaskReaderRequest, -) { - // hint methods don't actually hint DB, retry won't help - p.persistence.UnregisterHistoryTaskReader(ctx, request) -} - -func (p *executionRetryablePersistenceClient) UpdateHistoryTaskReaderProgress( - ctx context.Context, - request *UpdateHistoryTaskReaderProgressRequest, -) { - // hint methods don't actually hint DB, retry won't help - p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) -} - -func (p *executionRetryablePersistenceClient) AddHistoryTasks( - ctx context.Context, - request *AddHistoryTasksRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.AddHistoryTasks(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) GetHistoryTasks( - ctx context.Context, - request *GetHistoryTasksRequest, -) (*GetHistoryTasksResponse, error) { - var response *GetHistoryTasksResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetHistoryTasks(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) CompleteHistoryTask( - ctx context.Context, - request *CompleteHistoryTaskRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.CompleteHistoryTask(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) RangeCompleteHistoryTasks( - ctx context.Context, - request *RangeCompleteHistoryTasksRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.RangeCompleteHistoryTasks(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) PutReplicationTaskToDLQ( - ctx context.Context, - request *PutReplicationTaskToDLQRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.PutReplicationTaskToDLQ(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) GetReplicationTasksFromDLQ( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (*GetHistoryTasksResponse, error) { - var response *GetHistoryTasksResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetReplicationTasksFromDLQ(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) DeleteReplicationTaskFromDLQ( - ctx context.Context, - request *DeleteReplicationTaskFromDLQRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) RangeDeleteReplicationTaskFromDLQ( - ctx context.Context, - request *RangeDeleteReplicationTaskFromDLQRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *executionRetryablePersistenceClient) IsReplicationDLQEmpty( - ctx context.Context, - request *GetReplicationTasksFromDLQRequest, -) (bool, error) { - var isEmpty bool - op := func(ctx context.Context) error { - var err error - isEmpty, err = p.persistence.IsReplicationDLQEmpty(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return isEmpty, err -} - -// AppendHistoryNodes add a node to history node table -func (p *executionRetryablePersistenceClient) AppendHistoryNodes( - ctx context.Context, - request *AppendHistoryNodesRequest, -) (*AppendHistoryNodesResponse, error) { - var response *AppendHistoryNodesResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.AppendHistoryNodes(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// AppendRawHistoryNodes add a node to history node table -func (p *executionRetryablePersistenceClient) AppendRawHistoryNodes( - ctx context.Context, - request *AppendRawHistoryNodesRequest, -) (*AppendHistoryNodesResponse, error) { - var response *AppendHistoryNodesResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.AppendRawHistoryNodes(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// ReadHistoryBranch returns history node data for a branch -func (p *executionRetryablePersistenceClient) ReadHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadHistoryBranchResponse, error) { - var response *ReadHistoryBranchResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ReadHistoryBranch(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// ReadHistoryBranch returns history node data for a branch -func (p *executionRetryablePersistenceClient) ReadHistoryBranchReverse( - ctx context.Context, - request *ReadHistoryBranchReverseRequest, -) (*ReadHistoryBranchReverseResponse, error) { - var response *ReadHistoryBranchReverseResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ReadHistoryBranchReverse(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// ReadHistoryBranchByBatch returns history node data for a branch -func (p *executionRetryablePersistenceClient) ReadHistoryBranchByBatch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadHistoryBranchByBatchResponse, error) { - var response *ReadHistoryBranchByBatchResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ReadHistoryBranchByBatch(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// ReadHistoryBranchByBatch returns history node data for a branch -func (p *executionRetryablePersistenceClient) ReadRawHistoryBranch( - ctx context.Context, - request *ReadHistoryBranchRequest, -) (*ReadRawHistoryBranchResponse, error) { - var response *ReadRawHistoryBranchResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ReadRawHistoryBranch(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// ForkHistoryBranch forks a new branch from a old branch -func (p *executionRetryablePersistenceClient) ForkHistoryBranch( - ctx context.Context, - request *ForkHistoryBranchRequest, -) (*ForkHistoryBranchResponse, error) { - var response *ForkHistoryBranchResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ForkHistoryBranch(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// DeleteHistoryBranch removes a branch -func (p *executionRetryablePersistenceClient) DeleteHistoryBranch( - ctx context.Context, - request *DeleteHistoryBranchRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteHistoryBranch(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -// TrimHistoryBranch trims a branch -func (p *executionRetryablePersistenceClient) TrimHistoryBranch( - ctx context.Context, - request *TrimHistoryBranchRequest, -) (*TrimHistoryBranchResponse, error) { - var response *TrimHistoryBranchResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.TrimHistoryBranch(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -// GetHistoryTree returns all branch information of a tree -func (p *executionRetryablePersistenceClient) GetHistoryTree( - ctx context.Context, - request *GetHistoryTreeRequest, -) (*GetHistoryTreeResponse, error) { - var response *GetHistoryTreeResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetHistoryTree(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) GetAllHistoryTreeBranches( - ctx context.Context, - request *GetAllHistoryTreeBranchesRequest, -) (*GetAllHistoryTreeBranchesResponse, error) { - var response *GetAllHistoryTreeBranchesResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetAllHistoryTreeBranches(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *executionRetryablePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *taskRetryablePersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *taskRetryablePersistenceClient) CreateTasks( - ctx context.Context, - request *CreateTasksRequest, -) (*CreateTasksResponse, error) { - var response *CreateTasksResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CreateTasks(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) GetTasks( - ctx context.Context, - request *GetTasksRequest, -) (*GetTasksResponse, error) { - var response *GetTasksResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetTasks(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) CompleteTask( - ctx context.Context, - request *CompleteTaskRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.CompleteTask(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *taskRetryablePersistenceClient) CompleteTasksLessThan( - ctx context.Context, - request *CompleteTasksLessThanRequest, -) (int, error) { - var response int - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CompleteTasksLessThan(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) CreateTaskQueue( - ctx context.Context, - request *CreateTaskQueueRequest, -) (*CreateTaskQueueResponse, error) { - var response *CreateTaskQueueResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CreateTaskQueue(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) UpdateTaskQueue( - ctx context.Context, - request *UpdateTaskQueueRequest, -) (*UpdateTaskQueueResponse, error) { - var response *UpdateTaskQueueResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.UpdateTaskQueue(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) GetTaskQueue( - ctx context.Context, - request *GetTaskQueueRequest, -) (*GetTaskQueueResponse, error) { - var response *GetTaskQueueResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetTaskQueue(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) ListTaskQueue( - ctx context.Context, - request *ListTaskQueueRequest, -) (*ListTaskQueueResponse, error) { - var response *ListTaskQueueResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ListTaskQueue(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) DeleteTaskQueue( - ctx context.Context, - request *DeleteTaskQueueRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteTaskQueue(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *taskRetryablePersistenceClient) GetTaskQueueUserData( - ctx context.Context, - request *GetTaskQueueUserDataRequest, -) (*GetTaskQueueUserDataResponse, error) { - var response *GetTaskQueueUserDataResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetTaskQueueUserData(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) UpdateTaskQueueUserData( - ctx context.Context, - request *UpdateTaskQueueUserDataRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpdateTaskQueueUserData(ctx, request) - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return err -} - -func (p *taskRetryablePersistenceClient) ListTaskQueueUserDataEntries( - ctx context.Context, - request *ListTaskQueueUserDataEntriesRequest, -) (*ListTaskQueueUserDataEntriesResponse, error) { - var response *ListTaskQueueUserDataEntriesResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ListTaskQueueUserDataEntries(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { - var response []string - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetTaskQueuesByBuildId(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { - var response int - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CountTaskQueuesByBuildId(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *taskRetryablePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *metadataRetryablePersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *metadataRetryablePersistenceClient) CreateNamespace( - ctx context.Context, - request *CreateNamespaceRequest, -) (*CreateNamespaceResponse, error) { - var response *CreateNamespaceResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.CreateNamespace(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *metadataRetryablePersistenceClient) GetNamespace( - ctx context.Context, - request *GetNamespaceRequest, -) (*GetNamespaceResponse, error) { - var response *GetNamespaceResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetNamespace(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *metadataRetryablePersistenceClient) UpdateNamespace( - ctx context.Context, - request *UpdateNamespaceRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpdateNamespace(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *metadataRetryablePersistenceClient) RenameNamespace( - ctx context.Context, - request *RenameNamespaceRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.RenameNamespace(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *metadataRetryablePersistenceClient) DeleteNamespace( - ctx context.Context, - request *DeleteNamespaceRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteNamespace(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *metadataRetryablePersistenceClient) DeleteNamespaceByName( - ctx context.Context, - request *DeleteNamespaceByNameRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteNamespaceByName(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *metadataRetryablePersistenceClient) ListNamespaces( - ctx context.Context, - request *ListNamespacesRequest, -) (*ListNamespacesResponse, error) { - var response *ListNamespacesResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ListNamespaces(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *metadataRetryablePersistenceClient) GetMetadata( - ctx context.Context, -) (*GetMetadataResponse, error) { - var response *GetMetadataResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetMetadata(ctx) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *metadataRetryablePersistenceClient) InitializeSystemNamespaces( - ctx context.Context, - currentClusterName string, -) error { - op := func(ctx context.Context) error { - return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *metadataRetryablePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *clusterMetadataRetryablePersistenceClient) GetName() string { - return p.persistence.GetName() -} - -func (p *clusterMetadataRetryablePersistenceClient) GetClusterMembers( - ctx context.Context, - request *GetClusterMembersRequest, -) (*GetClusterMembersResponse, error) { - var response *GetClusterMembersResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetClusterMembers(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *clusterMetadataRetryablePersistenceClient) UpsertClusterMembership( - ctx context.Context, - request *UpsertClusterMembershipRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpsertClusterMembership(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *clusterMetadataRetryablePersistenceClient) PruneClusterMembership( - ctx context.Context, - request *PruneClusterMembershipRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.PruneClusterMembership(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *clusterMetadataRetryablePersistenceClient) ListClusterMetadata( - ctx context.Context, - request *ListClusterMetadataRequest, -) (*ListClusterMetadataResponse, error) { - var response *ListClusterMetadataResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ListClusterMetadata(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *clusterMetadataRetryablePersistenceClient) GetCurrentClusterMetadata( - ctx context.Context, -) (*GetClusterMetadataResponse, error) { - var response *GetClusterMetadataResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetCurrentClusterMetadata(ctx) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *clusterMetadataRetryablePersistenceClient) GetClusterMetadata( - ctx context.Context, - request *GetClusterMetadataRequest, -) (*GetClusterMetadataResponse, error) { - var response *GetClusterMetadataResponse - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetClusterMetadata(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *clusterMetadataRetryablePersistenceClient) SaveClusterMetadata( - ctx context.Context, - request *SaveClusterMetadataRequest, -) (bool, error) { - var response bool - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.SaveClusterMetadata(ctx, request) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *clusterMetadataRetryablePersistenceClient) DeleteClusterMetadata( - ctx context.Context, - request *DeleteClusterMetadataRequest, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteClusterMetadata(ctx, request) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *clusterMetadataRetryablePersistenceClient) Close() { - p.persistence.Close() -} - -func (p *queueRetryablePersistenceClient) Init( - ctx context.Context, - blob *commonpb.DataBlob, -) error { - op := func(ctx context.Context) error { - return p.persistence.Init(ctx, blob) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) EnqueueMessage( - ctx context.Context, - blob commonpb.DataBlob, -) error { - op := func(ctx context.Context) error { - return p.persistence.EnqueueMessage(ctx, blob) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) ReadMessages( - ctx context.Context, - lastMessageID int64, - maxCount int, -) ([]*QueueMessage, error) { - var response []*QueueMessage - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.ReadMessages(ctx, lastMessageID, maxCount) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *queueRetryablePersistenceClient) UpdateAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpdateAckLevel(ctx, metadata) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) GetAckLevels( - ctx context.Context, -) (*InternalQueueMetadata, error) { - var response *InternalQueueMetadata - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetAckLevels(ctx) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *queueRetryablePersistenceClient) DeleteMessagesBefore( - ctx context.Context, - messageID int64, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteMessagesBefore(ctx, messageID) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) EnqueueMessageToDLQ( - ctx context.Context, - blob commonpb.DataBlob, -) (int64, error) { - var response int64 - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.EnqueueMessageToDLQ(ctx, blob) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *queueRetryablePersistenceClient) ReadMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, - pageSize int, - pageToken []byte, -) ([]*QueueMessage, []byte, error) { - var messages []*QueueMessage - var nextPageToken []byte - op := func(ctx context.Context) error { - var err error - messages, nextPageToken, err = p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return messages, nextPageToken, err -} - -func (p *queueRetryablePersistenceClient) RangeDeleteMessagesFromDLQ( - ctx context.Context, - firstMessageID int64, - lastMessageID int64, -) error { - op := func(ctx context.Context) error { - return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} -func (p *queueRetryablePersistenceClient) UpdateDLQAckLevel( - ctx context.Context, - metadata *InternalQueueMetadata, -) error { - op := func(ctx context.Context) error { - return p.persistence.UpdateDLQAckLevel(ctx, metadata) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) GetDLQAckLevels( - ctx context.Context, -) (*InternalQueueMetadata, error) { - var response *InternalQueueMetadata - op := func(ctx context.Context) error { - var err error - response, err = p.persistence.GetDLQAckLevels(ctx) - return err - } - - err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) - return response, err -} - -func (p *queueRetryablePersistenceClient) DeleteMessageFromDLQ( - ctx context.Context, - messageID int64, -) error { - op := func(ctx context.Context) error { - return p.persistence.DeleteMessageFromDLQ(ctx, messageID) - } - - return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) -} - -func (p *queueRetryablePersistenceClient) Close() { - p.persistence.Close() -} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence_interface.go temporal-1.22.5/src/common/persistence/persistence_interface.go --- temporal-1.21.5-1/src/common/persistence/persistence_interface.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence_interface.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,729 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package mock -source $GOFILE -destination mock/store_mock.go -aux_files go.temporal.io/server/common/persistence=data_interfaces.go + +package persistence + +import ( + "context" + "math" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/service/history/tasks" +) + +const ( + EmptyQueueMessageID = int64(-1) + MaxQueueMessageID = math.MaxInt64 +) + +type ( + // //////////////////////////////////////////////////////////////////// + // Persistence interface is a lower layer of dataInterface. + // The intention is to let different persistence implementation(SQL,Cassandra/etc) share some common logic + // Right now the only common part is serialization/deserialization. + // //////////////////////////////////////////////////////////////////// + + // ShardStore is a lower level of ShardManager + ShardStore interface { + Closeable + GetName() string + GetClusterName() string + GetOrCreateShard(ctx context.Context, request *InternalGetOrCreateShardRequest) (*InternalGetOrCreateShardResponse, error) + UpdateShard(ctx context.Context, request *InternalUpdateShardRequest) error + AssertShardOwnership(ctx context.Context, request *AssertShardOwnershipRequest) error + } + + // TaskStore is a lower level of TaskManager + TaskStore interface { + Closeable + GetName() string + CreateTaskQueue(ctx context.Context, request *InternalCreateTaskQueueRequest) error + GetTaskQueue(ctx context.Context, request *InternalGetTaskQueueRequest) (*InternalGetTaskQueueResponse, error) + UpdateTaskQueue(ctx context.Context, request *InternalUpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) + ListTaskQueue(ctx context.Context, request *ListTaskQueueRequest) (*InternalListTaskQueueResponse, error) + DeleteTaskQueue(ctx context.Context, request *DeleteTaskQueueRequest) error + CreateTasks(ctx context.Context, request *InternalCreateTasksRequest) (*CreateTasksResponse, error) + GetTasks(ctx context.Context, request *GetTasksRequest) (*InternalGetTasksResponse, error) + CompleteTask(ctx context.Context, request *CompleteTaskRequest) error + CompleteTasksLessThan(ctx context.Context, request *CompleteTasksLessThanRequest) (int, error) + GetTaskQueueUserData(ctx context.Context, request *GetTaskQueueUserDataRequest) (*InternalGetTaskQueueUserDataResponse, error) + UpdateTaskQueueUserData(ctx context.Context, request *InternalUpdateTaskQueueUserDataRequest) error + ListTaskQueueUserDataEntries(ctx context.Context, request *ListTaskQueueUserDataEntriesRequest) (*InternalListTaskQueueUserDataEntriesResponse, error) + GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) + CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) + } + // MetadataStore is a lower level of MetadataManager + MetadataStore interface { + Closeable + GetName() string + CreateNamespace(ctx context.Context, request *InternalCreateNamespaceRequest) (*CreateNamespaceResponse, error) + GetNamespace(ctx context.Context, request *GetNamespaceRequest) (*InternalGetNamespaceResponse, error) + UpdateNamespace(ctx context.Context, request *InternalUpdateNamespaceRequest) error + RenameNamespace(ctx context.Context, request *InternalRenameNamespaceRequest) error + DeleteNamespace(ctx context.Context, request *DeleteNamespaceRequest) error + DeleteNamespaceByName(ctx context.Context, request *DeleteNamespaceByNameRequest) error + ListNamespaces(ctx context.Context, request *InternalListNamespacesRequest) (*InternalListNamespacesResponse, error) + GetMetadata(ctx context.Context) (*GetMetadataResponse, error) + } + + // ClusterMetadataStore is a lower level of ClusterMetadataManager. + // There is no Internal constructs needed to abstract away at the interface level currently, + // so we can reimplement the ClusterMetadataManager and leave this as a placeholder. + ClusterMetadataStore interface { + Closeable + GetName() string + ListClusterMetadata(ctx context.Context, request *InternalListClusterMetadataRequest) (*InternalListClusterMetadataResponse, error) + GetClusterMetadata(ctx context.Context, request *InternalGetClusterMetadataRequest) (*InternalGetClusterMetadataResponse, error) + SaveClusterMetadata(ctx context.Context, request *InternalSaveClusterMetadataRequest) (bool, error) + DeleteClusterMetadata(ctx context.Context, request *InternalDeleteClusterMetadataRequest) error + // Membership APIs + GetClusterMembers(ctx context.Context, request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) + UpsertClusterMembership(ctx context.Context, request *UpsertClusterMembershipRequest) error + PruneClusterMembership(ctx context.Context, request *PruneClusterMembershipRequest) error + } + + // ExecutionStore is used to manage workflow execution including mutable states / history / tasks. + ExecutionStore interface { + Closeable + GetName() string + GetHistoryBranchUtil() HistoryBranchUtil + + // The below three APIs are related to serialization/deserialization + CreateWorkflowExecution(ctx context.Context, request *InternalCreateWorkflowExecutionRequest) (*InternalCreateWorkflowExecutionResponse, error) + UpdateWorkflowExecution(ctx context.Context, request *InternalUpdateWorkflowExecutionRequest) error + ConflictResolveWorkflowExecution(ctx context.Context, request *InternalConflictResolveWorkflowExecutionRequest) error + + DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error + DeleteCurrentWorkflowExecution(ctx context.Context, request *DeleteCurrentWorkflowExecutionRequest) error + GetCurrentExecution(ctx context.Context, request *GetCurrentExecutionRequest) (*InternalGetCurrentExecutionResponse, error) + GetWorkflowExecution(ctx context.Context, request *GetWorkflowExecutionRequest) (*InternalGetWorkflowExecutionResponse, error) + SetWorkflowExecution(ctx context.Context, request *InternalSetWorkflowExecutionRequest) error + + // Scan related methods + ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*InternalListConcreteExecutionsResponse, error) + + // Tasks related APIs + + // Hints for persistence implementaion regarding hisotry task readers + RegisterHistoryTaskReader(ctx context.Context, request *RegisterHistoryTaskReaderRequest) error + UnregisterHistoryTaskReader(ctx context.Context, request *UnregisterHistoryTaskReaderRequest) + UpdateHistoryTaskReaderProgress(ctx context.Context, request *UpdateHistoryTaskReaderProgressRequest) + + AddHistoryTasks(ctx context.Context, request *InternalAddHistoryTasksRequest) error + GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*InternalGetHistoryTasksResponse, error) + CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error + RangeCompleteHistoryTasks(ctx context.Context, request *RangeCompleteHistoryTasksRequest) error + + PutReplicationTaskToDLQ(ctx context.Context, request *PutReplicationTaskToDLQRequest) error + GetReplicationTasksFromDLQ(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (*InternalGetReplicationTasksFromDLQResponse, error) + DeleteReplicationTaskFromDLQ(ctx context.Context, request *DeleteReplicationTaskFromDLQRequest) error + RangeDeleteReplicationTaskFromDLQ(ctx context.Context, request *RangeDeleteReplicationTaskFromDLQRequest) error + IsReplicationDLQEmpty(ctx context.Context, request *GetReplicationTasksFromDLQRequest) (bool, error) + + // The below are history V2 APIs + // V2 regards history events growing as a tree, decoupled from workflow concepts + + // AppendHistoryNodes add a node to history node table + AppendHistoryNodes(ctx context.Context, request *InternalAppendHistoryNodesRequest) error + // DeleteHistoryNodes delete a node from history node table + DeleteHistoryNodes(ctx context.Context, request *InternalDeleteHistoryNodesRequest) error + // ReadHistoryBranch returns history node data for a branch + ReadHistoryBranch(ctx context.Context, request *InternalReadHistoryBranchRequest) (*InternalReadHistoryBranchResponse, error) + // ForkHistoryBranch forks a new branch from a old branch + ForkHistoryBranch(ctx context.Context, request *InternalForkHistoryBranchRequest) error + // DeleteHistoryBranch removes a branch + DeleteHistoryBranch(ctx context.Context, request *InternalDeleteHistoryBranchRequest) error + // GetHistoryTree returns all branch information of a tree + GetHistoryTree(ctx context.Context, request *GetHistoryTreeRequest) (*InternalGetHistoryTreeResponse, error) + // GetAllHistoryTreeBranches returns all branches of all trees. + // Note that branches may be skipped or duplicated across pages if there are branches created or deleted while + // paginating through results. + GetAllHistoryTreeBranches(ctx context.Context, request *GetAllHistoryTreeBranchesRequest) (*InternalGetAllHistoryTreeBranchesResponse, error) + } + + // Queue is a store to enqueue and get messages + Queue interface { + Closeable + Init(ctx context.Context, blob *commonpb.DataBlob) error + EnqueueMessage(ctx context.Context, blob commonpb.DataBlob) error + ReadMessages(ctx context.Context, lastMessageID int64, maxCount int) ([]*QueueMessage, error) + DeleteMessagesBefore(ctx context.Context, messageID int64) error + UpdateAckLevel(ctx context.Context, metadata *InternalQueueMetadata) error + GetAckLevels(ctx context.Context) (*InternalQueueMetadata, error) + + EnqueueMessageToDLQ(ctx context.Context, blob commonpb.DataBlob) (int64, error) + ReadMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64, pageSize int, pageToken []byte) ([]*QueueMessage, []byte, error) + DeleteMessageFromDLQ(ctx context.Context, messageID int64) error + RangeDeleteMessagesFromDLQ(ctx context.Context, firstMessageID int64, lastMessageID int64) error + UpdateDLQAckLevel(ctx context.Context, metadata *InternalQueueMetadata) error + GetDLQAckLevels(ctx context.Context) (*InternalQueueMetadata, error) + } + + // QueueMessage is the message that stores in the queue + QueueMessage struct { + QueueType QueueType `json:"queue_type"` + ID int64 `json:"message_id"` + Data []byte `json:"message_payload"` + Encoding string `json:"message_encoding"` + } + + InternalQueueMetadata struct { + Blob *commonpb.DataBlob + Version int64 + } + + // InternalGetOrCreateShardRequest is used by ShardStore to retrieve or create a shard. + // GetOrCreateShard should: if shard exists, return it. If not, call CreateShardInfo and + // create the shard with the returned value. + InternalGetOrCreateShardRequest struct { + ShardID int32 + CreateShardInfo func() (rangeID int64, shardInfo *commonpb.DataBlob, err error) + LifecycleContext context.Context // cancelled when shard is unloaded + } + + // InternalGetOrCreateShardResponse is the response to GetShard + InternalGetOrCreateShardResponse struct { + ShardInfo *commonpb.DataBlob + } + + // InternalUpdateShardRequest is used by ShardStore to update a shard + InternalUpdateShardRequest struct { + ShardID int32 + RangeID int64 + Owner string + ShardInfo *commonpb.DataBlob + PreviousRangeID int64 + } + + InternalCreateTaskQueueRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + RangeID int64 + TaskQueueInfo *commonpb.DataBlob + + TaskQueueKind enumspb.TaskQueueKind + ExpiryTime *time.Time + } + + InternalGetTaskQueueRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + } + + InternalGetTaskQueueResponse struct { + RangeID int64 + TaskQueueInfo *commonpb.DataBlob + } + + InternalGetTaskQueueUserDataResponse struct { + Version int64 + UserData *commonpb.DataBlob + } + + InternalUpdateTaskQueueRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + RangeID int64 + TaskQueueInfo *commonpb.DataBlob + + TaskQueueKind enumspb.TaskQueueKind + ExpiryTime *time.Time + + PrevRangeID int64 + } + + InternalUpdateTaskQueueUserDataRequest struct { + NamespaceID string + TaskQueue string + Version int64 + UserData *commonpb.DataBlob + // Used to build an index of build_id to task_queues + BuildIdsAdded []string + BuildIdsRemoved []string + } + + InternalTaskQueueUserDataEntry struct { + TaskQueue string + Data *commonpb.DataBlob + Version int64 + } + + InternalListTaskQueueUserDataEntriesResponse struct { + NextPageToken []byte + Entries []InternalTaskQueueUserDataEntry + } + + InternalCreateTasksRequest struct { + NamespaceID string + TaskQueue string + TaskType enumspb.TaskQueueType + RangeID int64 + TaskQueueInfo *commonpb.DataBlob + Tasks []*InternalCreateTask + } + + InternalCreateTask struct { + TaskId int64 + ExpiryTime *time.Time + Task *commonpb.DataBlob + } + + InternalGetTasksResponse struct { + Tasks []*commonpb.DataBlob + NextPageToken []byte + } + + InternalListTaskQueueResponse struct { + Items []*InternalListTaskQueueItem + NextPageToken []byte + } + + InternalListTaskQueueItem struct { + TaskQueue *commonpb.DataBlob // serialized PersistedTaskQueueInfo + RangeID int64 + } + + // DataBlob represents a blob for any binary data. + // It contains raw data, and metadata(right now only encoding) in other field + // Note that it should be only used for Persistence layer, below dataInterface and application(historyEngine/etc) + + // InternalCreateWorkflowExecutionRequest is used to write a new workflow execution + InternalCreateWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode CreateWorkflowMode + + PreviousRunID string + PreviousLastWriteVersion int64 + + NewWorkflowSnapshot InternalWorkflowSnapshot + NewWorkflowNewEvents []*InternalAppendHistoryNodesRequest + } + + // InternalCreateWorkflowExecutionResponse is the response from persistence for create new workflow execution + InternalCreateWorkflowExecutionResponse struct { + } + + // InternalUpdateWorkflowExecutionRequest is used to update a workflow execution for Persistence Interface + InternalUpdateWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode UpdateWorkflowMode + + UpdateWorkflowMutation InternalWorkflowMutation + UpdateWorkflowNewEvents []*InternalAppendHistoryNodesRequest + NewWorkflowSnapshot *InternalWorkflowSnapshot + NewWorkflowNewEvents []*InternalAppendHistoryNodesRequest + } + + // InternalConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for Persistence Interface + InternalConflictResolveWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + Mode ConflictResolveWorkflowMode + + // workflow to be resetted + ResetWorkflowSnapshot InternalWorkflowSnapshot + ResetWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest + // maybe new workflow + NewWorkflowSnapshot *InternalWorkflowSnapshot + NewWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest + + // current workflow + CurrentWorkflowMutation *InternalWorkflowMutation + CurrentWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest + } + InternalSetWorkflowExecutionRequest struct { + ShardID int32 + RangeID int64 + + SetWorkflowSnapshot InternalWorkflowSnapshot + } + + // InternalWorkflowMutableState indicates workflow related state for Persistence Interface + InternalWorkflowMutableState struct { + ActivityInfos map[int64]*commonpb.DataBlob // ActivityInfo + TimerInfos map[string]*commonpb.DataBlob // TimerInfo + ChildExecutionInfos map[int64]*commonpb.DataBlob // ChildExecutionInfo + RequestCancelInfos map[int64]*commonpb.DataBlob // RequestCancelInfo + SignalInfos map[int64]*commonpb.DataBlob // SignalInfo + SignalRequestedIDs []string + ExecutionInfo *commonpb.DataBlob // WorkflowExecutionInfo + ExecutionState *commonpb.DataBlob // WorkflowExecutionState + NextEventID int64 + BufferedEvents []*commonpb.DataBlob + Checksum *commonpb.DataBlob // persistencespb.Checksum + DBRecordVersion int64 + } + + InternalHistoryTask struct { + Key tasks.Key + Blob commonpb.DataBlob + } + + // InternalAddHistoryTasksRequest is used to write new tasks + InternalAddHistoryTasksRequest struct { + ShardID int32 + RangeID int64 + + NamespaceID string + WorkflowID string + RunID string + + Tasks map[tasks.Category][]InternalHistoryTask + } + + // InternalWorkflowMutation is used as generic workflow execution state mutation for Persistence Interface + InternalWorkflowMutation struct { + // TODO: properly set this on call sites + NamespaceID string + WorkflowID string + RunID string + + ExecutionInfo *persistencespb.WorkflowExecutionInfo + ExecutionInfoBlob *commonpb.DataBlob + ExecutionState *persistencespb.WorkflowExecutionState + ExecutionStateBlob *commonpb.DataBlob + NextEventID int64 + StartVersion int64 + LastWriteVersion int64 + DBRecordVersion int64 + + UpsertActivityInfos map[int64]*commonpb.DataBlob + DeleteActivityInfos map[int64]struct{} + UpsertTimerInfos map[string]*commonpb.DataBlob + DeleteTimerInfos map[string]struct{} + UpsertChildExecutionInfos map[int64]*commonpb.DataBlob + DeleteChildExecutionInfos map[int64]struct{} + UpsertRequestCancelInfos map[int64]*commonpb.DataBlob + DeleteRequestCancelInfos map[int64]struct{} + UpsertSignalInfos map[int64]*commonpb.DataBlob + DeleteSignalInfos map[int64]struct{} + UpsertSignalRequestedIDs map[string]struct{} + DeleteSignalRequestedIDs map[string]struct{} + NewBufferedEvents *commonpb.DataBlob + ClearBufferedEvents bool + + Tasks map[tasks.Category][]InternalHistoryTask + + Condition int64 + + Checksum *commonpb.DataBlob + } + + // InternalWorkflowSnapshot is used as generic workflow execution state snapshot for Persistence Interface + InternalWorkflowSnapshot struct { + // TODO: properly set this on call sites + NamespaceID string + WorkflowID string + RunID string + + ExecutionInfo *persistencespb.WorkflowExecutionInfo + ExecutionInfoBlob *commonpb.DataBlob + ExecutionState *persistencespb.WorkflowExecutionState + ExecutionStateBlob *commonpb.DataBlob + StartVersion int64 + LastWriteVersion int64 + NextEventID int64 + DBRecordVersion int64 + + ActivityInfos map[int64]*commonpb.DataBlob + TimerInfos map[string]*commonpb.DataBlob + ChildExecutionInfos map[int64]*commonpb.DataBlob + RequestCancelInfos map[int64]*commonpb.DataBlob + SignalInfos map[int64]*commonpb.DataBlob + SignalRequestedIDs map[string]struct{} + + Tasks map[tasks.Category][]InternalHistoryTask + + Condition int64 + + Checksum *commonpb.DataBlob + } + + InternalGetCurrentExecutionResponse struct { + RunID string + ExecutionState *persistencespb.WorkflowExecutionState + } + + // InternalHistoryNode represent a history node metadata + InternalHistoryNode struct { + // The first eventID becomes the nodeID to be appended + NodeID int64 + // requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins + TransactionID int64 + // TransactionID for events before these events. For events chaining + PrevTransactionID int64 + // The events to be appended + Events *commonpb.DataBlob + } + + // InternalAppendHistoryNodesRequest is used to append a batch of history nodes + InternalAppendHistoryNodesRequest struct { + // The raw branch token + BranchToken []byte + // True if it is the first append request to the branch + IsNewBranch bool + // The info for clean up data in background + Info string + // The branch to be appended + BranchInfo *persistencespb.HistoryBranch + // Serialized TreeInfo + TreeInfo *commonpb.DataBlob + // The history node + Node InternalHistoryNode + // Used in sharded data stores to identify which shard to use + ShardID int32 + } + + // InternalGetWorkflowExecutionResponse is the response to GetworkflowExecution for Persistence Interface + InternalGetWorkflowExecutionResponse struct { + State *InternalWorkflowMutableState + DBRecordVersion int64 + } + + // InternalListConcreteExecutionsResponse is the response to ListConcreteExecutions for Persistence Interface + InternalListConcreteExecutionsResponse struct { + States []*InternalWorkflowMutableState + NextPageToken []byte + } + + InternalGetHistoryTaskResponse struct { + InternalHistoryTask + } + + InternalGetHistoryTasksResponse struct { + Tasks []InternalHistoryTask + NextPageToken []byte + } + + InternalGetReplicationTasksFromDLQResponse = InternalGetHistoryTasksResponse + + // InternalForkHistoryBranchRequest is used to fork a history branch + InternalForkHistoryBranchRequest struct { + // The base branch to fork from + ForkBranchInfo *persistencespb.HistoryBranch + // Serialized TreeInfo + TreeInfo *commonpb.DataBlob + // The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive) + ForkNodeID int64 + // branchID of the new branch + NewBranchID string + // the info for clean up data in background + Info string + // Used in sharded data stores to identify which shard to use + ShardID int32 + } + + // InternalDeleteHistoryNodesRequest is used to remove a history node + InternalDeleteHistoryNodesRequest struct { + // The raw branch token + BranchToken []byte + // Used in sharded data stores to identify which shard to use + ShardID int32 + // The branch to be appended + BranchInfo *persistencespb.HistoryBranch + // node ID of the history node + NodeID int64 + // transaction ID of the history node + TransactionID int64 + } + + // InternalDeleteHistoryBranchRequest is used to remove a history branch + InternalDeleteHistoryBranchRequest struct { + // The raw branch token + BranchToken []byte + // The branch + BranchInfo *persistencespb.HistoryBranch + // Used in sharded data stores to identify which shard to use + ShardID int32 + // branch ranges is used to delete range of history nodes from target branch and it ancestors. + BranchRanges []InternalDeleteHistoryBranchRange + } + + // InternalDeleteHistoryBranchRange is used to delete a range of history nodes of a branch + InternalDeleteHistoryBranchRange struct { + BranchId string + BeginNodeId int64 // delete nodes with ID >= BeginNodeId + } + + // InternalReadHistoryBranchRequest is used to read a history branch + InternalReadHistoryBranchRequest struct { + // The raw branch token + BranchToken []byte + // The branch range to be read + BranchID string + // Get the history nodes from MinNodeID. Inclusive. + MinNodeID int64 + // Get the history nodes upto MaxNodeID. Exclusive. + MaxNodeID int64 + // passing thru for pagination + PageSize int + // Pagination token + NextPageToken []byte + // Used in sharded data stores to identify which shard to use + ShardID int32 + // whether to only return metadata, excluding node content + MetadataOnly bool + // whether we iterate in reverse order + ReverseOrder bool + } + + // InternalCompleteForkBranchRequest is used to update some tree/branch meta data for forking + InternalCompleteForkBranchRequest struct { + // branch to be updated + BranchInfo persistencespb.HistoryBranch + // whether fork is successful + Success bool + // Used in sharded data stores to identify which shard to use + ShardID int32 + } + + // InternalReadHistoryBranchResponse is the response to ReadHistoryBranchRequest + InternalReadHistoryBranchResponse struct { + // History nodes + Nodes []InternalHistoryNode + // Pagination token + NextPageToken []byte + } + + // InternalGetAllHistoryTreeBranchesResponse is response to GetAllHistoryTreeBranches + // Only used by persistence layer + InternalGetAllHistoryTreeBranchesResponse struct { + // pagination token + NextPageToken []byte + // all branches of all trees + Branches []InternalHistoryBranchDetail + } + + // InternalHistoryBranchDetail used by InternalGetAllHistoryTreeBranchesResponse + InternalHistoryBranchDetail struct { + TreeID string + BranchID string + Encoding string + Data []byte // HistoryTreeInfo blob + } + + // InternalGetHistoryTreeResponse is response to GetHistoryTree + // Only used by persistence layer + InternalGetHistoryTreeResponse struct { + // TreeInfos + TreeInfos []*commonpb.DataBlob + } + + // InternalCreateNamespaceRequest is used to create the namespace + InternalCreateNamespaceRequest struct { + ID string + Name string + Namespace *commonpb.DataBlob + IsGlobal bool + } + + // InternalGetNamespaceResponse is the response for GetNamespace + InternalGetNamespaceResponse struct { + Namespace *commonpb.DataBlob + IsGlobal bool + NotificationVersion int64 + } + + // InternalUpdateNamespaceRequest is used to update namespace + InternalUpdateNamespaceRequest struct { + Id string + Name string + Namespace *commonpb.DataBlob + NotificationVersion int64 + IsGlobal bool + } + + InternalRenameNamespaceRequest struct { + *InternalUpdateNamespaceRequest + PreviousName string + } + + InternalListNamespacesRequest struct { + PageSize int + NextPageToken []byte + } + + // InternalListNamespacesResponse is the response for GetNamespace + InternalListNamespacesResponse struct { + Namespaces []*InternalGetNamespaceResponse + NextPageToken []byte + } + + // InternalListClusterMetadataRequest is the request for ListClusterMetadata + InternalListClusterMetadataRequest struct { + PageSize int + NextPageToken []byte + } + + // InternalListClusterMetadataResponse is the response for ListClusterMetadata + InternalListClusterMetadataResponse struct { + ClusterMetadata []*InternalGetClusterMetadataResponse + NextPageToken []byte + } + + // InternalGetClusterMetadataRequest is the request for GetClusterMetadata + InternalGetClusterMetadataRequest struct { + ClusterName string + } + + // InternalGetClusterMetadataResponse is the response for GetClusterMetadata + InternalGetClusterMetadataResponse struct { + // Serialized MutableCusterMetadata. + ClusterMetadata *commonpb.DataBlob + Version int64 + } + + // InternalSaveClusterMetadataRequest is the request for SaveClusterMetadata + InternalSaveClusterMetadataRequest struct { + ClusterName string + // Serialized MutableCusterMetadata. + ClusterMetadata *commonpb.DataBlob + Version int64 + } + + // InternalDeleteClusterMetadataRequest is the request for DeleteClusterMetadata + InternalDeleteClusterMetadataRequest struct { + ClusterName string + } + + // InternalUpsertClusterMembershipRequest is the request to UpsertClusterMembership + InternalUpsertClusterMembershipRequest struct { + ClusterMember + RecordExpiry time.Time + } +) diff -Nru temporal-1.21.5-1/src/common/persistence/persistence_metric_clients.go temporal-1.22.5/src/common/persistence/persistence_metric_clients.go --- temporal-1.21.5-1/src/common/persistence/persistence_metric_clients.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence_metric_clients.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1285 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "context" + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/service/history/tasks" +) + +type ( + metricEmitter struct { + metricsHandler metrics.Handler + logger log.Logger + } + + shardPersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence ShardManager + } + + executionPersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence ExecutionManager + } + + taskPersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence TaskManager + } + + metadataPersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence MetadataManager + } + + clusterMetadataPersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence ClusterMetadataManager + } + + queuePersistenceClient struct { + metricEmitter + healthSignals HealthSignalAggregator + persistence Queue + } +) + +var _ ShardManager = (*shardPersistenceClient)(nil) +var _ ExecutionManager = (*executionPersistenceClient)(nil) +var _ TaskManager = (*taskPersistenceClient)(nil) +var _ MetadataManager = (*metadataPersistenceClient)(nil) +var _ ClusterMetadataManager = (*clusterMetadataPersistenceClient)(nil) +var _ Queue = (*queuePersistenceClient)(nil) + +// NewShardPersistenceMetricsClient creates a client to manage shards +func NewShardPersistenceMetricsClient(persistence ShardManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ShardManager { + return &shardPersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +// NewExecutionPersistenceMetricsClient creates a client to manage executions +func NewExecutionPersistenceMetricsClient(persistence ExecutionManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ExecutionManager { + return &executionPersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +// NewTaskPersistenceMetricsClient creates a client to manage tasks +func NewTaskPersistenceMetricsClient(persistence TaskManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) TaskManager { + return &taskPersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +// NewMetadataPersistenceMetricsClient creates a MetadataManager client to manage metadata +func NewMetadataPersistenceMetricsClient(persistence MetadataManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) MetadataManager { + return &metadataPersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +// NewClusterMetadataPersistenceMetricsClient creates a ClusterMetadataManager client to manage cluster metadata +func NewClusterMetadataPersistenceMetricsClient(persistence ClusterMetadataManager, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) ClusterMetadataManager { + return &clusterMetadataPersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +// NewQueuePersistenceMetricsClient creates a client to manage queue +func NewQueuePersistenceMetricsClient(persistence Queue, metricsHandler metrics.Handler, healthSignals HealthSignalAggregator, logger log.Logger) Queue { + return &queuePersistenceClient{ + metricEmitter: metricEmitter{ + metricsHandler: metricsHandler, + logger: logger, + }, + healthSignals: healthSignals, + persistence: persistence, + } +} + +func (p *shardPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *shardPersistenceClient) GetOrCreateShard( + ctx context.Context, + request *GetOrCreateShardRequest, +) (_ *GetOrCreateShardResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + latency := time.Since(startTime) + p.healthSignals.Record(request.ShardID, caller, latency, retErr) + p.recordRequestMetrics(metrics.PersistenceGetOrCreateShardScope, caller, latency, retErr) + }() + return p.persistence.GetOrCreateShard(ctx, request) +} + +func (p *shardPersistenceClient) UpdateShard( + ctx context.Context, + request *UpdateShardRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardInfo.GetShardId(), caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateShardScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateShard(ctx, request) +} + +func (p *shardPersistenceClient) AssertShardOwnership( + ctx context.Context, + request *AssertShardOwnershipRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceAssertShardOwnershipScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.AssertShardOwnership(ctx, request) +} + +func (p *shardPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *executionPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *executionPersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { + return p.persistence.GetHistoryBranchUtil() +} + +func (p *executionPersistenceClient) CreateWorkflowExecution( + ctx context.Context, + request *CreateWorkflowExecutionRequest, +) (_ *CreateWorkflowExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCreateWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CreateWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) GetWorkflowExecution( + ctx context.Context, + request *GetWorkflowExecutionRequest, +) (_ *GetWorkflowExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) SetWorkflowExecution( + ctx context.Context, + request *SetWorkflowExecutionRequest, +) (_ *SetWorkflowExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceSetWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.SetWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) UpdateWorkflowExecution( + ctx context.Context, + request *UpdateWorkflowExecutionRequest, +) (_ *UpdateWorkflowExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) ConflictResolveWorkflowExecution( + ctx context.Context, + request *ConflictResolveWorkflowExecutionRequest, +) (_ *ConflictResolveWorkflowExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceConflictResolveWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ConflictResolveWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) DeleteWorkflowExecution( + ctx context.Context, + request *DeleteWorkflowExecutionRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) DeleteCurrentWorkflowExecution( + ctx context.Context, + request *DeleteCurrentWorkflowExecutionRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteCurrentWorkflowExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) +} + +func (p *executionPersistenceClient) GetCurrentExecution( + ctx context.Context, + request *GetCurrentExecutionRequest, +) (_ *GetCurrentExecutionResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetCurrentExecutionScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetCurrentExecution(ctx, request) +} + +func (p *executionPersistenceClient) ListConcreteExecutions( + ctx context.Context, + request *ListConcreteExecutionsRequest, +) (_ *ListConcreteExecutionsResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceListConcreteExecutionsScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ListConcreteExecutions(ctx, request) +} + +func (p *executionPersistenceClient) RegisterHistoryTaskReader( + ctx context.Context, + request *RegisterHistoryTaskReaderRequest, +) error { + // hint methods won't go through persistence rate limiter + // so also not emitting any persistence request/error metrics + return p.persistence.RegisterHistoryTaskReader(ctx, request) +} + +func (p *executionPersistenceClient) UnregisterHistoryTaskReader( + ctx context.Context, + request *UnregisterHistoryTaskReaderRequest, +) { + // hint methods won't go through persistence rate limiter + // so also not emitting any persistence request/error metrics + p.persistence.UnregisterHistoryTaskReader(ctx, request) +} + +func (p *executionPersistenceClient) UpdateHistoryTaskReaderProgress( + ctx context.Context, + request *UpdateHistoryTaskReaderProgressRequest, +) { + // hint methods won't go through persistence rate limiter + // so also not emitting any persistence request/error metrics + p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) +} + +func (p *executionPersistenceClient) AddHistoryTasks( + ctx context.Context, + request *AddHistoryTasksRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceAddTasksScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.AddHistoryTasks(ctx, request) +} + +func (p *executionPersistenceClient) GetHistoryTasks( + ctx context.Context, + request *GetHistoryTasksRequest, +) (_ *GetHistoryTasksResponse, retErr error) { + var operation string + switch request.TaskCategory.ID() { + case tasks.CategoryIDTransfer: + operation = metrics.PersistenceGetTransferTasksScope + case tasks.CategoryIDTimer: + operation = metrics.PersistenceGetTimerTasksScope + case tasks.CategoryIDVisibility: + operation = metrics.PersistenceGetVisibilityTasksScope + case tasks.CategoryIDReplication: + operation = metrics.PersistenceGetReplicationTasksScope + case tasks.CategoryIDArchival: + operation = metrics.PersistenceGetArchivalTasksScope + default: + return nil, serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) + } + + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetHistoryTasks(ctx, request) +} + +func (p *executionPersistenceClient) CompleteHistoryTask( + ctx context.Context, + request *CompleteHistoryTaskRequest, +) (retErr error) { + var operation string + switch request.TaskCategory.ID() { + case tasks.CategoryIDTransfer: + operation = metrics.PersistenceCompleteTransferTaskScope + case tasks.CategoryIDTimer: + operation = metrics.PersistenceCompleteTimerTaskScope + case tasks.CategoryIDVisibility: + operation = metrics.PersistenceCompleteVisibilityTaskScope + case tasks.CategoryIDReplication: + operation = metrics.PersistenceCompleteReplicationTaskScope + case tasks.CategoryIDArchival: + operation = metrics.PersistenceCompleteArchivalTaskScope + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) + } + + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) + }() + return p.persistence.CompleteHistoryTask(ctx, request) +} + +func (p *executionPersistenceClient) RangeCompleteHistoryTasks( + ctx context.Context, + request *RangeCompleteHistoryTasksRequest, +) (retErr error) { + var operation string + switch request.TaskCategory.ID() { + case tasks.CategoryIDTransfer: + operation = metrics.PersistenceRangeCompleteTransferTasksScope + case tasks.CategoryIDTimer: + operation = metrics.PersistenceRangeCompleteTimerTasksScope + case tasks.CategoryIDVisibility: + operation = metrics.PersistenceRangeCompleteVisibilityTasksScope + case tasks.CategoryIDReplication: + operation = metrics.PersistenceRangeCompleteReplicationTasksScope + case tasks.CategoryIDArchival: + operation = metrics.PersistenceRangeCompleteArchivalTasksScope + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown task category type: %v", request.TaskCategory)) + } + + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(operation, caller, time.Since(startTime), retErr) + }() + return p.persistence.RangeCompleteHistoryTasks(ctx, request) +} + +func (p *executionPersistenceClient) PutReplicationTaskToDLQ( + ctx context.Context, + request *PutReplicationTaskToDLQRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistencePutReplicationTaskToDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.PutReplicationTaskToDLQ(ctx, request) +} + +func (p *executionPersistenceClient) GetReplicationTasksFromDLQ( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (_ *GetHistoryTasksResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetReplicationTasksFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetReplicationTasksFromDLQ(ctx, request) +} + +func (p *executionPersistenceClient) DeleteReplicationTaskFromDLQ( + ctx context.Context, + request *DeleteReplicationTaskFromDLQRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteReplicationTaskFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) +} + +func (p *executionPersistenceClient) RangeDeleteReplicationTaskFromDLQ( + ctx context.Context, + request *RangeDeleteReplicationTaskFromDLQRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceRangeDeleteReplicationTaskFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) +} + +func (p *executionPersistenceClient) IsReplicationDLQEmpty( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (_ bool, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(request.ShardID, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetReplicationTasksFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.IsReplicationDLQEmpty(ctx, request) +} + +func (p *executionPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *taskPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *taskPersistenceClient) CreateTasks( + ctx context.Context, + request *CreateTasksRequest, +) (_ *CreateTasksResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCreateTasksScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CreateTasks(ctx, request) +} + +func (p *taskPersistenceClient) GetTasks( + ctx context.Context, + request *GetTasksRequest, +) (_ *GetTasksResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetTasksScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetTasks(ctx, request) +} + +func (p *taskPersistenceClient) CompleteTask( + ctx context.Context, + request *CompleteTaskRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCompleteTaskScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CompleteTask(ctx, request) +} + +func (p *taskPersistenceClient) CompleteTasksLessThan( + ctx context.Context, + request *CompleteTasksLessThanRequest, +) (_ int, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCompleteTasksLessThanScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CompleteTasksLessThan(ctx, request) +} + +func (p *taskPersistenceClient) CreateTaskQueue( + ctx context.Context, + request *CreateTaskQueueRequest, +) (_ *CreateTaskQueueResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCreateTaskQueueScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CreateTaskQueue(ctx, request) +} + +func (p *taskPersistenceClient) UpdateTaskQueue( + ctx context.Context, + request *UpdateTaskQueueRequest, +) (_ *UpdateTaskQueueResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateTaskQueueScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateTaskQueue(ctx, request) +} + +func (p *taskPersistenceClient) GetTaskQueue( + ctx context.Context, + request *GetTaskQueueRequest, +) (_ *GetTaskQueueResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetTaskQueueScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetTaskQueue(ctx, request) +} + +func (p *taskPersistenceClient) ListTaskQueue( + ctx context.Context, + request *ListTaskQueueRequest, +) (_ *ListTaskQueueResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceListTaskQueueScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ListTaskQueue(ctx, request) +} + +func (p *taskPersistenceClient) DeleteTaskQueue( + ctx context.Context, + request *DeleteTaskQueueRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteTaskQueueScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteTaskQueue(ctx, request) +} + +func (p *taskPersistenceClient) GetTaskQueueUserData( + ctx context.Context, + request *GetTaskQueueUserDataRequest, +) (_ *GetTaskQueueUserDataResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetTaskQueueUserDataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetTaskQueueUserData(ctx, request) +} + +func (p *taskPersistenceClient) UpdateTaskQueueUserData( + ctx context.Context, + request *UpdateTaskQueueUserDataRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateTaskQueueUserDataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateTaskQueueUserData(ctx, request) +} + +func (p *taskPersistenceClient) ListTaskQueueUserDataEntries( + ctx context.Context, + request *ListTaskQueueUserDataEntriesRequest, +) (_ *ListTaskQueueUserDataEntriesResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceListTaskQueueUserDataEntriesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ListTaskQueueUserDataEntries(ctx, request) +} + +func (p *taskPersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) (_ []string, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetTaskQueuesByBuildIdScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetTaskQueuesByBuildId(ctx, request) +} + +func (p *taskPersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (_ int, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCountTaskQueuesByBuildIdScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CountTaskQueuesByBuildId(ctx, request) +} + +func (p *taskPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *metadataPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *metadataPersistenceClient) CreateNamespace( + ctx context.Context, + request *CreateNamespaceRequest, +) (_ *CreateNamespaceResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceCreateNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.CreateNamespace(ctx, request) +} + +func (p *metadataPersistenceClient) GetNamespace( + ctx context.Context, + request *GetNamespaceRequest, +) (_ *GetNamespaceResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetNamespace(ctx, request) +} + +func (p *metadataPersistenceClient) UpdateNamespace( + ctx context.Context, + request *UpdateNamespaceRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateNamespace(ctx, request) +} + +func (p *metadataPersistenceClient) RenameNamespace( + ctx context.Context, + request *RenameNamespaceRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceRenameNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.RenameNamespace(ctx, request) +} + +func (p *metadataPersistenceClient) DeleteNamespace( + ctx context.Context, + request *DeleteNamespaceRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteNamespace(ctx, request) +} + +func (p *metadataPersistenceClient) DeleteNamespaceByName( + ctx context.Context, + request *DeleteNamespaceByNameRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteNamespaceByNameScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteNamespaceByName(ctx, request) +} + +func (p *metadataPersistenceClient) ListNamespaces( + ctx context.Context, + request *ListNamespacesRequest, +) (_ *ListNamespacesResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceListNamespacesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ListNamespaces(ctx, request) +} + +func (p *metadataPersistenceClient) GetMetadata( + ctx context.Context, +) (_ *GetMetadataResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetMetadata(ctx) +} + +func (p *metadataPersistenceClient) Close() { + p.persistence.Close() +} + +// AppendHistoryNodes add a node to history node table +func (p *executionPersistenceClient) AppendHistoryNodes( + ctx context.Context, + request *AppendHistoryNodesRequest, +) (_ *AppendHistoryNodesResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceAppendHistoryNodesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.AppendHistoryNodes(ctx, request) +} + +// AppendRawHistoryNodes add a node to history node table +func (p *executionPersistenceClient) AppendRawHistoryNodes( + ctx context.Context, + request *AppendRawHistoryNodesRequest, +) (_ *AppendHistoryNodesResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceAppendRawHistoryNodesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.AppendRawHistoryNodes(ctx, request) +} + +// ReadHistoryBranch returns history node data for a branch +func (p *executionPersistenceClient) ReadHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (_ *ReadHistoryBranchResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadHistoryBranch(ctx, request) +} + +func (p *executionPersistenceClient) ReadHistoryBranchReverse( + ctx context.Context, + request *ReadHistoryBranchReverseRequest, +) (_ *ReadHistoryBranchReverseResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchReverseScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadHistoryBranchReverse(ctx, request) +} + +// ReadHistoryBranchByBatch returns history node data for a branch ByBatch +func (p *executionPersistenceClient) ReadHistoryBranchByBatch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (_ *ReadHistoryBranchByBatchResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceReadHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadHistoryBranchByBatch(ctx, request) +} + +// ReadRawHistoryBranch returns history node raw data for a branch ByBatch +func (p *executionPersistenceClient) ReadRawHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (_ *ReadRawHistoryBranchResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceReadRawHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadRawHistoryBranch(ctx, request) +} + +// ForkHistoryBranch forks a new branch from an old branch +func (p *executionPersistenceClient) ForkHistoryBranch( + ctx context.Context, + request *ForkHistoryBranchRequest, +) (_ *ForkHistoryBranchResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceForkHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ForkHistoryBranch(ctx, request) +} + +// DeleteHistoryBranch removes a branch +func (p *executionPersistenceClient) DeleteHistoryBranch( + ctx context.Context, + request *DeleteHistoryBranchRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.recordRequestMetrics(metrics.PersistenceDeleteHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteHistoryBranch(ctx, request) +} + +// TrimHistoryBranch trims a branch +func (p *executionPersistenceClient) TrimHistoryBranch( + ctx context.Context, + request *TrimHistoryBranchRequest, +) (_ *TrimHistoryBranchResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceTrimHistoryBranchScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.TrimHistoryBranch(ctx, request) +} + +func (p *executionPersistenceClient) GetAllHistoryTreeBranches( + ctx context.Context, + request *GetAllHistoryTreeBranchesRequest, +) (_ *GetAllHistoryTreeBranchesResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetAllHistoryTreeBranchesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetAllHistoryTreeBranches(ctx, request) +} + +// GetHistoryTree returns all branch information of a tree +func (p *executionPersistenceClient) GetHistoryTree( + ctx context.Context, + request *GetHistoryTreeRequest, +) (_ *GetHistoryTreeResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetHistoryTreeScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetHistoryTree(ctx, request) +} + +func (p *queuePersistenceClient) Init( + ctx context.Context, + blob *commonpb.DataBlob, +) error { + return p.persistence.Init(ctx, blob) +} + +func (p *queuePersistenceClient) EnqueueMessage( + ctx context.Context, + blob commonpb.DataBlob, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceEnqueueMessageScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.EnqueueMessage(ctx, blob) +} + +func (p *queuePersistenceClient) ReadMessages( + ctx context.Context, + lastMessageID int64, + maxCount int, +) (_ []*QueueMessage, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceReadQueueMessagesScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadMessages(ctx, lastMessageID, maxCount) +} + +func (p *queuePersistenceClient) UpdateAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateAckLevelScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateAckLevel(ctx, metadata) +} + +func (p *queuePersistenceClient) GetAckLevels( + ctx context.Context, +) (_ *InternalQueueMetadata, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetAckLevelScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetAckLevels(ctx) +} + +func (p *queuePersistenceClient) DeleteMessagesBefore( + ctx context.Context, + messageID int64, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteMessagesBeforeScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteMessagesBefore(ctx, messageID) +} + +func (p *queuePersistenceClient) EnqueueMessageToDLQ( + ctx context.Context, + blob commonpb.DataBlob, +) (_ int64, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceEnqueueMessageToDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.EnqueueMessageToDLQ(ctx, blob) +} + +func (p *queuePersistenceClient) ReadMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, +) (_ []*QueueMessage, _ []byte, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceReadMessagesFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) +} + +func (p *queuePersistenceClient) DeleteMessageFromDLQ( + ctx context.Context, + messageID int64, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteMessageFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteMessageFromDLQ(ctx, messageID) +} + +func (p *queuePersistenceClient) RangeDeleteMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceRangeDeleteMessagesFromDLQScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) +} + +func (p *queuePersistenceClient) UpdateDLQAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpdateDLQAckLevelScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpdateDLQAckLevel(ctx, metadata) +} + +func (p *queuePersistenceClient) GetDLQAckLevels( + ctx context.Context, +) (_ *InternalQueueMetadata, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetDLQAckLevelScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetDLQAckLevels(ctx) +} + +func (p *queuePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *clusterMetadataPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *clusterMetadataPersistenceClient) ListClusterMetadata( + ctx context.Context, + request *ListClusterMetadataRequest, +) (_ *ListClusterMetadataResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceListClusterMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.ListClusterMetadata(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) GetCurrentClusterMetadata( + ctx context.Context, +) (_ *GetClusterMetadataResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetCurrentClusterMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetCurrentClusterMetadata(ctx) +} + +func (p *clusterMetadataPersistenceClient) GetClusterMetadata( + ctx context.Context, + request *GetClusterMetadataRequest, +) (_ *GetClusterMetadataResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetClusterMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetClusterMetadata(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) SaveClusterMetadata( + ctx context.Context, + request *SaveClusterMetadataRequest, +) (_ bool, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceSaveClusterMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.SaveClusterMetadata(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) DeleteClusterMetadata( + ctx context.Context, + request *DeleteClusterMetadataRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceDeleteClusterMetadataScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.DeleteClusterMetadata(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *clusterMetadataPersistenceClient) GetClusterMembers( + ctx context.Context, + request *GetClusterMembersRequest, +) (_ *GetClusterMembersResponse, retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceGetClusterMembersScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.GetClusterMembers(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) UpsertClusterMembership( + ctx context.Context, + request *UpsertClusterMembershipRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceUpsertClusterMembershipScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.UpsertClusterMembership(ctx, request) +} + +func (p *clusterMetadataPersistenceClient) PruneClusterMembership( + ctx context.Context, + request *PruneClusterMembershipRequest, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistencePruneClusterMembershipScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.PruneClusterMembership(ctx, request) +} + +func (p *metadataPersistenceClient) InitializeSystemNamespaces( + ctx context.Context, + currentClusterName string, +) (retErr error) { + caller := headers.GetCallerInfo(ctx).CallerName + startTime := time.Now().UTC() + defer func() { + p.healthSignals.Record(CallerSegmentMissing, caller, time.Since(startTime), retErr) + p.recordRequestMetrics(metrics.PersistenceInitializeSystemNamespaceScope, caller, time.Since(startTime), retErr) + }() + return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) +} + +func (p *metricEmitter) recordRequestMetrics(operation string, caller string, latency time.Duration, err error) { + handler := p.metricsHandler.WithTags(metrics.OperationTag(operation), metrics.NamespaceTag(caller)) + handler.Counter(metrics.PersistenceRequests.GetMetricName()).Record(1) + handler.Timer(metrics.PersistenceLatency.GetMetricName()).Record(latency) + updateErrorMetric(handler, p.logger, operation, err) +} + +func updateErrorMetric(handler metrics.Handler, logger log.Logger, operation string, err error) { + if err != nil { + handler.Counter(metrics.PersistenceErrorWithType.GetMetricName()).Record(1, metrics.ServiceErrorTypeTag(err)) + switch err := err.(type) { + case *ShardAlreadyExistError, + *ShardOwnershipLostError, + *AppendHistoryTimeoutError, + *CurrentWorkflowConditionFailedError, + *WorkflowConditionFailedError, + *ConditionFailedError, + *TimeoutError, + *serviceerror.InvalidArgument, + *serviceerror.NamespaceAlreadyExists, + *serviceerror.NotFound, + *serviceerror.NamespaceNotFound: + // no-op + + case *serviceerror.ResourceExhausted: + handler.Counter(metrics.PersistenceErrResourceExhaustedCounter.GetMetricName()).Record(1, metrics.ResourceExhaustedCauseTag(err.Cause)) + default: + logger.Error("Operation failed with internal error.", tag.Error(err), tag.Operation(operation)) + handler.Counter(metrics.PersistenceFailures.GetMetricName()).Record(1) + } + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence_rate_limited_clients.go temporal-1.22.5/src/common/persistence/persistence_rate_limited_clients.go --- temporal-1.21.5-1/src/common/persistence/persistence_rate_limited_clients.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence_rate_limited_clients.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1079 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/quotas" + "go.temporal.io/server/service/history/tasks" +) + +const ( + RateLimitDefaultToken = 1 + CallerSegmentMissing = -1 +) + +var ( + // ErrPersistenceLimitExceeded is the error indicating QPS limit reached. + ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT, "Persistence Max QPS Reached.") +) + +type ( + shardRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence ShardManager + logger log.Logger + } + + executionRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence ExecutionManager + logger log.Logger + } + + taskRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence TaskManager + logger log.Logger + } + + metadataRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence MetadataManager + logger log.Logger + } + + clusterMetadataRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence ClusterMetadataManager + logger log.Logger + } + + queueRateLimitedPersistenceClient struct { + rateLimiter quotas.RequestRateLimiter + persistence Queue + logger log.Logger + } +) + +var _ ShardManager = (*shardRateLimitedPersistenceClient)(nil) +var _ ExecutionManager = (*executionRateLimitedPersistenceClient)(nil) +var _ TaskManager = (*taskRateLimitedPersistenceClient)(nil) +var _ MetadataManager = (*metadataRateLimitedPersistenceClient)(nil) +var _ ClusterMetadataManager = (*clusterMetadataRateLimitedPersistenceClient)(nil) +var _ Queue = (*queueRateLimitedPersistenceClient)(nil) + +// NewShardPersistenceRateLimitedClient creates a client to manage shards +func NewShardPersistenceRateLimitedClient(persistence ShardManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ShardManager { + return &shardRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +// NewExecutionPersistenceRateLimitedClient creates a client to manage executions +func NewExecutionPersistenceRateLimitedClient(persistence ExecutionManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ExecutionManager { + return &executionRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +// NewTaskPersistenceRateLimitedClient creates a client to manage tasks +func NewTaskPersistenceRateLimitedClient(persistence TaskManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) TaskManager { + return &taskRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +// NewMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata +func NewMetadataPersistenceRateLimitedClient(persistence MetadataManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) MetadataManager { + return &metadataRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +// NewClusterMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata +func NewClusterMetadataPersistenceRateLimitedClient(persistence ClusterMetadataManager, rateLimiter quotas.RequestRateLimiter, logger log.Logger) ClusterMetadataManager { + return &clusterMetadataRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +// NewQueuePersistenceRateLimitedClient creates a client to manage queue +func NewQueuePersistenceRateLimitedClient(persistence Queue, rateLimiter quotas.RequestRateLimiter, logger log.Logger) Queue { + return &queueRateLimitedPersistenceClient{ + persistence: persistence, + rateLimiter: rateLimiter, + logger: logger, + } +} + +func (p *shardRateLimitedPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *shardRateLimitedPersistenceClient) GetOrCreateShard( + ctx context.Context, + request *GetOrCreateShardRequest, +) (*GetOrCreateShardResponse, error) { + if ok := allow(ctx, "GetOrCreateShard", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetOrCreateShard(ctx, request) + return response, err +} + +func (p *shardRateLimitedPersistenceClient) UpdateShard( + ctx context.Context, + request *UpdateShardRequest, +) error { + if ok := allow(ctx, "UpdateShard", request.ShardInfo.ShardId, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.UpdateShard(ctx, request) +} + +func (p *shardRateLimitedPersistenceClient) AssertShardOwnership( + ctx context.Context, + request *AssertShardOwnershipRequest, +) error { + if ok := allow(ctx, "AssertShardOwnership", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.AssertShardOwnership(ctx, request) +} + +func (p *shardRateLimitedPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *executionRateLimitedPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *executionRateLimitedPersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { + return p.persistence.GetHistoryBranchUtil() +} + +func (p *executionRateLimitedPersistenceClient) CreateWorkflowExecution( + ctx context.Context, + request *CreateWorkflowExecutionRequest, +) (*CreateWorkflowExecutionResponse, error) { + if ok := allow(ctx, "CreateWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.CreateWorkflowExecution(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) GetWorkflowExecution( + ctx context.Context, + request *GetWorkflowExecutionRequest, +) (*GetWorkflowExecutionResponse, error) { + if ok := allow(ctx, "GetWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetWorkflowExecution(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) SetWorkflowExecution( + ctx context.Context, + request *SetWorkflowExecutionRequest, +) (*SetWorkflowExecutionResponse, error) { + if ok := allow(ctx, "SetWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.SetWorkflowExecution(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) UpdateWorkflowExecution( + ctx context.Context, + request *UpdateWorkflowExecutionRequest, +) (*UpdateWorkflowExecutionResponse, error) { + if ok := allow(ctx, "UpdateWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + resp, err := p.persistence.UpdateWorkflowExecution(ctx, request) + return resp, err +} + +func (p *executionRateLimitedPersistenceClient) ConflictResolveWorkflowExecution( + ctx context.Context, + request *ConflictResolveWorkflowExecutionRequest, +) (*ConflictResolveWorkflowExecutionResponse, error) { + if ok := allow(ctx, "ConflictResolveWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.ConflictResolveWorkflowExecution(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) DeleteWorkflowExecution( + ctx context.Context, + request *DeleteWorkflowExecutionRequest, +) error { + if ok := allow(ctx, "DeleteWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteWorkflowExecution(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) DeleteCurrentWorkflowExecution( + ctx context.Context, + request *DeleteCurrentWorkflowExecutionRequest, +) error { + if ok := allow(ctx, "DeleteCurrentWorkflowExecution", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) GetCurrentExecution( + ctx context.Context, + request *GetCurrentExecutionRequest, +) (*GetCurrentExecutionResponse, error) { + if ok := allow(ctx, "GetCurrentExecution", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetCurrentExecution(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) ListConcreteExecutions( + ctx context.Context, + request *ListConcreteExecutionsRequest, +) (*ListConcreteExecutionsResponse, error) { + if ok := allow(ctx, "ListConcreteExecutions", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.ListConcreteExecutions(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) RegisterHistoryTaskReader( + ctx context.Context, + request *RegisterHistoryTaskReaderRequest, +) error { + // hint methods don't actually hint DB, so don't go through persistence rate limiter + return p.persistence.RegisterHistoryTaskReader(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) UnregisterHistoryTaskReader( + ctx context.Context, + request *UnregisterHistoryTaskReaderRequest, +) { + // hint methods don't actually hint DB, so don't go through persistence rate limiter + p.persistence.UnregisterHistoryTaskReader(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) UpdateHistoryTaskReaderProgress( + ctx context.Context, + request *UpdateHistoryTaskReaderProgressRequest, +) { + // hint methods don't actually hint DB, so don't go through persistence rate limiter + p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) AddHistoryTasks( + ctx context.Context, + request *AddHistoryTasksRequest, +) error { + if ok := allow(ctx, "AddHistoryTasks", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.AddHistoryTasks(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) GetHistoryTasks( + ctx context.Context, + request *GetHistoryTasksRequest, +) (*GetHistoryTasksResponse, error) { + if ok := allow( + ctx, + ConstructHistoryTaskAPI("GetHistoryTasks", request.TaskCategory), + request.ShardID, + p.rateLimiter, + ); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetHistoryTasks(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) CompleteHistoryTask( + ctx context.Context, + request *CompleteHistoryTaskRequest, +) error { + if ok := allow( + ctx, + ConstructHistoryTaskAPI("CompleteHistoryTask", request.TaskCategory), + request.ShardID, + p.rateLimiter, + ); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.CompleteHistoryTask(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) RangeCompleteHistoryTasks( + ctx context.Context, + request *RangeCompleteHistoryTasksRequest, +) error { + if ok := allow( + ctx, + ConstructHistoryTaskAPI("RangeCompleteHistoryTasks", request.TaskCategory), + request.ShardID, + p.rateLimiter, + ); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.RangeCompleteHistoryTasks(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) PutReplicationTaskToDLQ( + ctx context.Context, + request *PutReplicationTaskToDLQRequest, +) error { + if ok := allow(ctx, "PutReplicationTaskToDLQ", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.PutReplicationTaskToDLQ(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) GetReplicationTasksFromDLQ( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (*GetHistoryTasksResponse, error) { + if ok := allow(ctx, "GetReplicationTasksFromDLQ", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + return p.persistence.GetReplicationTasksFromDLQ(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) DeleteReplicationTaskFromDLQ( + ctx context.Context, + request *DeleteReplicationTaskFromDLQRequest, +) error { + if ok := allow(ctx, "DeleteReplicationTaskFromDLQ", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) RangeDeleteReplicationTaskFromDLQ( + ctx context.Context, + request *RangeDeleteReplicationTaskFromDLQRequest, +) error { + if ok := allow(ctx, "RangeDeleteReplicationTaskFromDLQ", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) IsReplicationDLQEmpty( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (bool, error) { + if ok := allow(ctx, "IsReplicationDLQEmpty", request.ShardID, p.rateLimiter); !ok { + return true, ErrPersistenceLimitExceeded + } + + return p.persistence.IsReplicationDLQEmpty(ctx, request) +} + +func (p *executionRateLimitedPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *taskRateLimitedPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *taskRateLimitedPersistenceClient) CreateTasks( + ctx context.Context, + request *CreateTasksRequest, +) (*CreateTasksResponse, error) { + if ok := allow(ctx, "CreateTasks", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.CreateTasks(ctx, request) + return response, err +} + +func (p *taskRateLimitedPersistenceClient) GetTasks( + ctx context.Context, + request *GetTasksRequest, +) (*GetTasksResponse, error) { + if ok := allow(ctx, "GetTasks", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetTasks(ctx, request) + return response, err +} + +func (p *taskRateLimitedPersistenceClient) CompleteTask( + ctx context.Context, + request *CompleteTaskRequest, +) error { + if ok := allow(ctx, "CompleteTask", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.CompleteTask(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) CompleteTasksLessThan( + ctx context.Context, + request *CompleteTasksLessThanRequest, +) (int, error) { + if ok := allow(ctx, "CompleteTasksLessThan", CallerSegmentMissing, p.rateLimiter); !ok { + return 0, ErrPersistenceLimitExceeded + } + return p.persistence.CompleteTasksLessThan(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) CreateTaskQueue( + ctx context.Context, + request *CreateTaskQueueRequest, +) (*CreateTaskQueueResponse, error) { + if ok := allow(ctx, "CreateTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.CreateTaskQueue(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) UpdateTaskQueue( + ctx context.Context, + request *UpdateTaskQueueRequest, +) (*UpdateTaskQueueResponse, error) { + if ok := allow(ctx, "UpdateTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.UpdateTaskQueue(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) GetTaskQueue( + ctx context.Context, + request *GetTaskQueueRequest, +) (*GetTaskQueueResponse, error) { + if ok := allow(ctx, "GetTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.GetTaskQueue(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) ListTaskQueue( + ctx context.Context, + request *ListTaskQueueRequest, +) (*ListTaskQueueResponse, error) { + if ok := allow(ctx, "ListTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.ListTaskQueue(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) DeleteTaskQueue( + ctx context.Context, + request *DeleteTaskQueueRequest, +) error { + if ok := allow(ctx, "DeleteTaskQueue", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return p.persistence.DeleteTaskQueue(ctx, request) +} + +func (p taskRateLimitedPersistenceClient) GetTaskQueueUserData( + ctx context.Context, + request *GetTaskQueueUserDataRequest, +) (*GetTaskQueueUserDataResponse, error) { + if ok := allow(ctx, "GetTaskQueueUserData", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.GetTaskQueueUserData(ctx, request) +} + +func (p taskRateLimitedPersistenceClient) UpdateTaskQueueUserData( + ctx context.Context, + request *UpdateTaskQueueUserDataRequest, +) error { + if ok := allow(ctx, "UpdateTaskQueueUserData", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return p.persistence.UpdateTaskQueueUserData(ctx, request) +} + +func (p taskRateLimitedPersistenceClient) ListTaskQueueUserDataEntries( + ctx context.Context, + request *ListTaskQueueUserDataEntriesRequest, +) (*ListTaskQueueUserDataEntriesResponse, error) { + if ok := allow(ctx, "ListTaskQueueUserDataEntries", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.ListTaskQueueUserDataEntries(ctx, request) +} + +func (p taskRateLimitedPersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { + if ok := allow(ctx, "GetTaskQueuesByBuildId", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.GetTaskQueuesByBuildId(ctx, request) +} + +func (p taskRateLimitedPersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { + if ok := allow(ctx, "CountTaskQueuesByBuildId", CallerSegmentMissing, p.rateLimiter); !ok { + return 0, ErrPersistenceLimitExceeded + } + return p.persistence.CountTaskQueuesByBuildId(ctx, request) +} + +func (p *taskRateLimitedPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *metadataRateLimitedPersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *metadataRateLimitedPersistenceClient) CreateNamespace( + ctx context.Context, + request *CreateNamespaceRequest, +) (*CreateNamespaceResponse, error) { + if ok := allow(ctx, "CreateNamespace", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.CreateNamespace(ctx, request) + return response, err +} + +func (p *metadataRateLimitedPersistenceClient) GetNamespace( + ctx context.Context, + request *GetNamespaceRequest, +) (*GetNamespaceResponse, error) { + if ok := allow(ctx, "GetNamespace", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetNamespace(ctx, request) + return response, err +} + +func (p *metadataRateLimitedPersistenceClient) UpdateNamespace( + ctx context.Context, + request *UpdateNamespaceRequest, +) error { + if ok := allow(ctx, "UpdateNamespace", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.UpdateNamespace(ctx, request) +} + +func (p *metadataRateLimitedPersistenceClient) RenameNamespace( + ctx context.Context, + request *RenameNamespaceRequest, +) error { + if ok := allow(ctx, "RenameNamespace", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.RenameNamespace(ctx, request) +} + +func (p *metadataRateLimitedPersistenceClient) DeleteNamespace( + ctx context.Context, + request *DeleteNamespaceRequest, +) error { + if ok := allow(ctx, "DeleteNamespace", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteNamespace(ctx, request) +} + +func (p *metadataRateLimitedPersistenceClient) DeleteNamespaceByName( + ctx context.Context, + request *DeleteNamespaceByNameRequest, +) error { + if ok := allow(ctx, "DeleteNamespaceByName", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteNamespaceByName(ctx, request) +} + +func (p *metadataRateLimitedPersistenceClient) ListNamespaces( + ctx context.Context, + request *ListNamespacesRequest, +) (*ListNamespacesResponse, error) { + if ok := allow(ctx, "ListNamespaces", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.ListNamespaces(ctx, request) + return response, err +} + +func (p *metadataRateLimitedPersistenceClient) GetMetadata( + ctx context.Context, +) (*GetMetadataResponse, error) { + if ok := allow(ctx, "GetMetadata", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + response, err := p.persistence.GetMetadata(ctx) + return response, err +} + +func (p *metadataRateLimitedPersistenceClient) InitializeSystemNamespaces( + ctx context.Context, + currentClusterName string, +) error { + if ok := allow(ctx, "InitializeSystemNamespaces", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) +} + +func (p *metadataRateLimitedPersistenceClient) Close() { + p.persistence.Close() +} + +// AppendHistoryNodes add a node to history node table +func (p *executionRateLimitedPersistenceClient) AppendHistoryNodes( + ctx context.Context, + request *AppendHistoryNodesRequest, +) (*AppendHistoryNodesResponse, error) { + if ok := allow(ctx, "AppendHistoryNodes", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.AppendHistoryNodes(ctx, request) +} + +// AppendRawHistoryNodes add a node to history node table +func (p *executionRateLimitedPersistenceClient) AppendRawHistoryNodes( + ctx context.Context, + request *AppendRawHistoryNodesRequest, +) (*AppendHistoryNodesResponse, error) { + if ok := allow(ctx, "AppendRawHistoryNodes", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return p.persistence.AppendRawHistoryNodes(ctx, request) +} + +// ReadHistoryBranch returns history node data for a branch +func (p *executionRateLimitedPersistenceClient) ReadHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadHistoryBranchResponse, error) { + if ok := allow(ctx, "ReadHistoryBranch", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.ReadHistoryBranch(ctx, request) + return response, err +} + +// ReadHistoryBranchReverse returns history node data for a branch +func (p *executionRateLimitedPersistenceClient) ReadHistoryBranchReverse( + ctx context.Context, + request *ReadHistoryBranchReverseRequest, +) (*ReadHistoryBranchReverseResponse, error) { + if ok := allow(ctx, "ReadHistoryBranchReverse", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.ReadHistoryBranchReverse(ctx, request) + return response, err +} + +// ReadHistoryBranchByBatch returns history node data for a branch +func (p *executionRateLimitedPersistenceClient) ReadHistoryBranchByBatch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadHistoryBranchByBatchResponse, error) { + if ok := allow(ctx, "ReadHistoryBranchByBatch", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.ReadHistoryBranchByBatch(ctx, request) + return response, err +} + +// ReadHistoryBranchByBatch returns history node data for a branch +func (p *executionRateLimitedPersistenceClient) ReadRawHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadRawHistoryBranchResponse, error) { + if ok := allow(ctx, "ReadRawHistoryBranch", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.ReadRawHistoryBranch(ctx, request) + return response, err +} + +// ForkHistoryBranch forks a new branch from a old branch +func (p *executionRateLimitedPersistenceClient) ForkHistoryBranch( + ctx context.Context, + request *ForkHistoryBranchRequest, +) (*ForkHistoryBranchResponse, error) { + if ok := allow(ctx, "ForkHistoryBranch", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.ForkHistoryBranch(ctx, request) + return response, err +} + +// DeleteHistoryBranch removes a branch +func (p *executionRateLimitedPersistenceClient) DeleteHistoryBranch( + ctx context.Context, + request *DeleteHistoryBranchRequest, +) error { + if ok := allow(ctx, "DeleteHistoryBranch", request.ShardID, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return p.persistence.DeleteHistoryBranch(ctx, request) +} + +// TrimHistoryBranch trims a branch +func (p *executionRateLimitedPersistenceClient) TrimHistoryBranch( + ctx context.Context, + request *TrimHistoryBranchRequest, +) (*TrimHistoryBranchResponse, error) { + if ok := allow(ctx, "TrimHistoryBranch", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + resp, err := p.persistence.TrimHistoryBranch(ctx, request) + return resp, err +} + +// GetHistoryTree returns all branch information of a tree +func (p *executionRateLimitedPersistenceClient) GetHistoryTree( + ctx context.Context, + request *GetHistoryTreeRequest, +) (*GetHistoryTreeResponse, error) { + if ok := allow(ctx, "GetHistoryTree", request.ShardID, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.GetHistoryTree(ctx, request) + return response, err +} + +func (p *executionRateLimitedPersistenceClient) GetAllHistoryTreeBranches( + ctx context.Context, + request *GetAllHistoryTreeBranchesRequest, +) (*GetAllHistoryTreeBranchesResponse, error) { + if ok := allow(ctx, "GetAllHistoryTreeBranches", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + response, err := p.persistence.GetAllHistoryTreeBranches(ctx, request) + return response, err +} + +func (p *queueRateLimitedPersistenceClient) EnqueueMessage( + ctx context.Context, + blob commonpb.DataBlob, +) error { + if ok := allow(ctx, "EnqueueMessage", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.EnqueueMessage(ctx, blob) +} + +func (p *queueRateLimitedPersistenceClient) ReadMessages( + ctx context.Context, + lastMessageID int64, + maxCount int, +) ([]*QueueMessage, error) { + if ok := allow(ctx, "ReadMessages", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + return p.persistence.ReadMessages(ctx, lastMessageID, maxCount) +} + +func (p *queueRateLimitedPersistenceClient) UpdateAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) error { + if ok := allow(ctx, "UpdateAckLevel", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.UpdateAckLevel(ctx, metadata) +} + +func (p *queueRateLimitedPersistenceClient) GetAckLevels( + ctx context.Context, +) (*InternalQueueMetadata, error) { + if ok := allow(ctx, "GetAckLevels", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + return p.persistence.GetAckLevels(ctx) +} + +func (p *queueRateLimitedPersistenceClient) DeleteMessagesBefore( + ctx context.Context, + messageID int64, +) error { + if ok := allow(ctx, "DeleteMessagesBefore", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteMessagesBefore(ctx, messageID) +} + +func (p *queueRateLimitedPersistenceClient) EnqueueMessageToDLQ( + ctx context.Context, + blob commonpb.DataBlob, +) (int64, error) { + if ok := allow(ctx, "EnqueueMessageToDLQ", CallerSegmentMissing, p.rateLimiter); !ok { + return EmptyQueueMessageID, ErrPersistenceLimitExceeded + } + + return p.persistence.EnqueueMessageToDLQ(ctx, blob) +} + +func (p *queueRateLimitedPersistenceClient) ReadMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*QueueMessage, []byte, error) { + if ok := allow(ctx, "ReadMessagesFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, nil, ErrPersistenceLimitExceeded + } + + return p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) +} + +func (p *queueRateLimitedPersistenceClient) RangeDeleteMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, +) error { + if ok := allow(ctx, "RangeDeleteMessagesFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) +} +func (p *queueRateLimitedPersistenceClient) UpdateDLQAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) error { + if ok := allow(ctx, "UpdateDLQAckLevel", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.UpdateDLQAckLevel(ctx, metadata) +} + +func (p *queueRateLimitedPersistenceClient) GetDLQAckLevels( + ctx context.Context, +) (*InternalQueueMetadata, error) { + if ok := allow(ctx, "GetDLQAckLevels", CallerSegmentMissing, p.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + + return p.persistence.GetDLQAckLevels(ctx) +} + +func (p *queueRateLimitedPersistenceClient) DeleteMessageFromDLQ( + ctx context.Context, + messageID int64, +) error { + if ok := allow(ctx, "DeleteMessageFromDLQ", CallerSegmentMissing, p.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteMessageFromDLQ(ctx, messageID) +} + +func (p *queueRateLimitedPersistenceClient) Close() { + p.persistence.Close() +} + +func (p *queueRateLimitedPersistenceClient) Init( + ctx context.Context, + blob *commonpb.DataBlob, +) error { + return p.persistence.Init(ctx, blob) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) Close() { + c.persistence.Close() +} + +func (c *clusterMetadataRateLimitedPersistenceClient) GetName() string { + return c.persistence.GetName() +} + +func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMembers( + ctx context.Context, + request *GetClusterMembersRequest, +) (*GetClusterMembersResponse, error) { + if ok := allow(ctx, "GetClusterMembers", CallerSegmentMissing, c.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return c.persistence.GetClusterMembers(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) UpsertClusterMembership( + ctx context.Context, + request *UpsertClusterMembershipRequest, +) error { + if ok := allow(ctx, "UpsertClusterMembership", CallerSegmentMissing, c.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return c.persistence.UpsertClusterMembership(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) PruneClusterMembership( + ctx context.Context, + request *PruneClusterMembershipRequest, +) error { + if ok := allow(ctx, "PruneClusterMembership", CallerSegmentMissing, c.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return c.persistence.PruneClusterMembership(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) ListClusterMetadata( + ctx context.Context, + request *ListClusterMetadataRequest, +) (*ListClusterMetadataResponse, error) { + if ok := allow(ctx, "ListClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return c.persistence.ListClusterMetadata(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) GetCurrentClusterMetadata( + ctx context.Context, +) (*GetClusterMetadataResponse, error) { + if ok := allow(ctx, "GetCurrentClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return c.persistence.GetCurrentClusterMetadata(ctx) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMetadata( + ctx context.Context, + request *GetClusterMetadataRequest, +) (*GetClusterMetadataResponse, error) { + if ok := allow(ctx, "GetClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { + return nil, ErrPersistenceLimitExceeded + } + return c.persistence.GetClusterMetadata(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) SaveClusterMetadata( + ctx context.Context, + request *SaveClusterMetadataRequest, +) (bool, error) { + if ok := allow(ctx, "SaveClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { + return false, ErrPersistenceLimitExceeded + } + return c.persistence.SaveClusterMetadata(ctx, request) +} + +func (c *clusterMetadataRateLimitedPersistenceClient) DeleteClusterMetadata( + ctx context.Context, + request *DeleteClusterMetadataRequest, +) error { + if ok := allow(ctx, "DeleteClusterMetadata", CallerSegmentMissing, c.rateLimiter); !ok { + return ErrPersistenceLimitExceeded + } + return c.persistence.DeleteClusterMetadata(ctx, request) +} + +func allow( + ctx context.Context, + api string, + shardID int32, + rateLimiter quotas.RequestRateLimiter, +) bool { + callerInfo := headers.GetCallerInfo(ctx) + return rateLimiter.Allow(time.Now().UTC(), quotas.NewRequest( + api, + RateLimitDefaultToken, + callerInfo.CallerName, + callerInfo.CallerType, + shardID, + callerInfo.CallOrigin, + )) +} + +// TODO: change the value returned so it can also be used by +// persistence metrics client. For now, it's only used by rate +// limit client, and we don't really care about the actual value +// returned, as long as they are different from each task category. +func ConstructHistoryTaskAPI( + baseAPI string, + taskCategory tasks.Category, +) string { + return baseAPI + taskCategory.Name() +} diff -Nru temporal-1.21.5-1/src/common/persistence/persistence_retryable_clients.go temporal-1.22.5/src/common/persistence/persistence_retryable_clients.go --- temporal-1.21.5-1/src/common/persistence/persistence_retryable_clients.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/persistence_retryable_clients.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1243 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inp. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inp. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "context" + + commonpb "go.temporal.io/api/common/v1" + + "go.temporal.io/server/common/backoff" +) + +type ( + shardRetryablePersistenceClient struct { + persistence ShardManager + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } + + executionRetryablePersistenceClient struct { + persistence ExecutionManager + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } + + taskRetryablePersistenceClient struct { + persistence TaskManager + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } + + metadataRetryablePersistenceClient struct { + persistence MetadataManager + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } + + clusterMetadataRetryablePersistenceClient struct { + persistence ClusterMetadataManager + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } + + queueRetryablePersistenceClient struct { + persistence Queue + policy backoff.RetryPolicy + isRetryable backoff.IsRetryable + } +) + +var _ ShardManager = (*shardRetryablePersistenceClient)(nil) +var _ ExecutionManager = (*executionRetryablePersistenceClient)(nil) +var _ TaskManager = (*taskRetryablePersistenceClient)(nil) +var _ MetadataManager = (*metadataRetryablePersistenceClient)(nil) +var _ ClusterMetadataManager = (*clusterMetadataRetryablePersistenceClient)(nil) +var _ Queue = (*queueRetryablePersistenceClient)(nil) + +// NewShardPersistenceRetryableClient creates a client to manage shards +func NewShardPersistenceRetryableClient( + persistence ShardManager, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) ShardManager { + return &shardRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +// NewExecutionPersistenceRetryableClient creates a client to manage executions +func NewExecutionPersistenceRetryableClient( + persistence ExecutionManager, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) ExecutionManager { + return &executionRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +// NewTaskPersistenceRetryableClient creates a client to manage tasks +func NewTaskPersistenceRetryableClient( + persistence TaskManager, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) TaskManager { + return &taskRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +// NewMetadataPersistenceRetryableClient creates a MetadataManager client to manage metadata +func NewMetadataPersistenceRetryableClient( + persistence MetadataManager, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) MetadataManager { + return &metadataRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +// NewClusterMetadataPersistenceRetryableClient creates a MetadataManager client to manage metadata +func NewClusterMetadataPersistenceRetryableClient( + persistence ClusterMetadataManager, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) ClusterMetadataManager { + return &clusterMetadataRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +// NewQueuePersistenceRetryableClient creates a client to manage queue +func NewQueuePersistenceRetryableClient( + persistence Queue, + policy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) Queue { + return &queueRetryablePersistenceClient{ + persistence: persistence, + policy: policy, + isRetryable: isRetryable, + } +} + +func (p *shardRetryablePersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *shardRetryablePersistenceClient) GetOrCreateShard( + ctx context.Context, + request *GetOrCreateShardRequest, +) (*GetOrCreateShardResponse, error) { + var response *GetOrCreateShardResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetOrCreateShard(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *shardRetryablePersistenceClient) UpdateShard( + ctx context.Context, + request *UpdateShardRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpdateShard(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *shardRetryablePersistenceClient) AssertShardOwnership( + ctx context.Context, + request *AssertShardOwnershipRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.AssertShardOwnership(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *shardRetryablePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *executionRetryablePersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *executionRetryablePersistenceClient) GetHistoryBranchUtil() HistoryBranchUtil { + return p.persistence.GetHistoryBranchUtil() +} + +func (p *executionRetryablePersistenceClient) CreateWorkflowExecution( + ctx context.Context, + request *CreateWorkflowExecutionRequest, +) (*CreateWorkflowExecutionResponse, error) { + var response *CreateWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CreateWorkflowExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) GetWorkflowExecution( + ctx context.Context, + request *GetWorkflowExecutionRequest, +) (*GetWorkflowExecutionResponse, error) { + var response *GetWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetWorkflowExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) SetWorkflowExecution( + ctx context.Context, + request *SetWorkflowExecutionRequest, +) (*SetWorkflowExecutionResponse, error) { + var response *SetWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.SetWorkflowExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) UpdateWorkflowExecution( + ctx context.Context, + request *UpdateWorkflowExecutionRequest, +) (*UpdateWorkflowExecutionResponse, error) { + var response *UpdateWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.UpdateWorkflowExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) ConflictResolveWorkflowExecution( + ctx context.Context, + request *ConflictResolveWorkflowExecutionRequest, +) (*ConflictResolveWorkflowExecutionResponse, error) { + var response *ConflictResolveWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ConflictResolveWorkflowExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) DeleteWorkflowExecution( + ctx context.Context, + request *DeleteWorkflowExecutionRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteWorkflowExecution(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) DeleteCurrentWorkflowExecution( + ctx context.Context, + request *DeleteCurrentWorkflowExecutionRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteCurrentWorkflowExecution(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) GetCurrentExecution( + ctx context.Context, + request *GetCurrentExecutionRequest, +) (*GetCurrentExecutionResponse, error) { + var response *GetCurrentExecutionResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetCurrentExecution(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) ListConcreteExecutions( + ctx context.Context, + request *ListConcreteExecutionsRequest, +) (*ListConcreteExecutionsResponse, error) { + var response *ListConcreteExecutionsResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ListConcreteExecutions(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) RegisterHistoryTaskReader( + ctx context.Context, + request *RegisterHistoryTaskReaderRequest, +) error { + // hint methods don't actually hint DB, retry won't help + return p.persistence.RegisterHistoryTaskReader(ctx, request) +} + +func (p *executionRetryablePersistenceClient) UnregisterHistoryTaskReader( + ctx context.Context, + request *UnregisterHistoryTaskReaderRequest, +) { + // hint methods don't actually hint DB, retry won't help + p.persistence.UnregisterHistoryTaskReader(ctx, request) +} + +func (p *executionRetryablePersistenceClient) UpdateHistoryTaskReaderProgress( + ctx context.Context, + request *UpdateHistoryTaskReaderProgressRequest, +) { + // hint methods don't actually hint DB, retry won't help + p.persistence.UpdateHistoryTaskReaderProgress(ctx, request) +} + +func (p *executionRetryablePersistenceClient) AddHistoryTasks( + ctx context.Context, + request *AddHistoryTasksRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.AddHistoryTasks(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) GetHistoryTasks( + ctx context.Context, + request *GetHistoryTasksRequest, +) (*GetHistoryTasksResponse, error) { + var response *GetHistoryTasksResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetHistoryTasks(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) CompleteHistoryTask( + ctx context.Context, + request *CompleteHistoryTaskRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.CompleteHistoryTask(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) RangeCompleteHistoryTasks( + ctx context.Context, + request *RangeCompleteHistoryTasksRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.RangeCompleteHistoryTasks(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) PutReplicationTaskToDLQ( + ctx context.Context, + request *PutReplicationTaskToDLQRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.PutReplicationTaskToDLQ(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) GetReplicationTasksFromDLQ( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (*GetHistoryTasksResponse, error) { + var response *GetHistoryTasksResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetReplicationTasksFromDLQ(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) DeleteReplicationTaskFromDLQ( + ctx context.Context, + request *DeleteReplicationTaskFromDLQRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteReplicationTaskFromDLQ(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) RangeDeleteReplicationTaskFromDLQ( + ctx context.Context, + request *RangeDeleteReplicationTaskFromDLQRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.RangeDeleteReplicationTaskFromDLQ(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *executionRetryablePersistenceClient) IsReplicationDLQEmpty( + ctx context.Context, + request *GetReplicationTasksFromDLQRequest, +) (bool, error) { + var isEmpty bool + op := func(ctx context.Context) error { + var err error + isEmpty, err = p.persistence.IsReplicationDLQEmpty(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return isEmpty, err +} + +// AppendHistoryNodes add a node to history node table +func (p *executionRetryablePersistenceClient) AppendHistoryNodes( + ctx context.Context, + request *AppendHistoryNodesRequest, +) (*AppendHistoryNodesResponse, error) { + var response *AppendHistoryNodesResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.AppendHistoryNodes(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// AppendRawHistoryNodes add a node to history node table +func (p *executionRetryablePersistenceClient) AppendRawHistoryNodes( + ctx context.Context, + request *AppendRawHistoryNodesRequest, +) (*AppendHistoryNodesResponse, error) { + var response *AppendHistoryNodesResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.AppendRawHistoryNodes(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// ReadHistoryBranch returns history node data for a branch +func (p *executionRetryablePersistenceClient) ReadHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadHistoryBranchResponse, error) { + var response *ReadHistoryBranchResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ReadHistoryBranch(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// ReadHistoryBranch returns history node data for a branch +func (p *executionRetryablePersistenceClient) ReadHistoryBranchReverse( + ctx context.Context, + request *ReadHistoryBranchReverseRequest, +) (*ReadHistoryBranchReverseResponse, error) { + var response *ReadHistoryBranchReverseResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ReadHistoryBranchReverse(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// ReadHistoryBranchByBatch returns history node data for a branch +func (p *executionRetryablePersistenceClient) ReadHistoryBranchByBatch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadHistoryBranchByBatchResponse, error) { + var response *ReadHistoryBranchByBatchResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ReadHistoryBranchByBatch(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// ReadHistoryBranchByBatch returns history node data for a branch +func (p *executionRetryablePersistenceClient) ReadRawHistoryBranch( + ctx context.Context, + request *ReadHistoryBranchRequest, +) (*ReadRawHistoryBranchResponse, error) { + var response *ReadRawHistoryBranchResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ReadRawHistoryBranch(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// ForkHistoryBranch forks a new branch from a old branch +func (p *executionRetryablePersistenceClient) ForkHistoryBranch( + ctx context.Context, + request *ForkHistoryBranchRequest, +) (*ForkHistoryBranchResponse, error) { + var response *ForkHistoryBranchResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ForkHistoryBranch(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// DeleteHistoryBranch removes a branch +func (p *executionRetryablePersistenceClient) DeleteHistoryBranch( + ctx context.Context, + request *DeleteHistoryBranchRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteHistoryBranch(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +// TrimHistoryBranch trims a branch +func (p *executionRetryablePersistenceClient) TrimHistoryBranch( + ctx context.Context, + request *TrimHistoryBranchRequest, +) (*TrimHistoryBranchResponse, error) { + var response *TrimHistoryBranchResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.TrimHistoryBranch(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +// GetHistoryTree returns all branch information of a tree +func (p *executionRetryablePersistenceClient) GetHistoryTree( + ctx context.Context, + request *GetHistoryTreeRequest, +) (*GetHistoryTreeResponse, error) { + var response *GetHistoryTreeResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetHistoryTree(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) GetAllHistoryTreeBranches( + ctx context.Context, + request *GetAllHistoryTreeBranchesRequest, +) (*GetAllHistoryTreeBranchesResponse, error) { + var response *GetAllHistoryTreeBranchesResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetAllHistoryTreeBranches(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *executionRetryablePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *taskRetryablePersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *taskRetryablePersistenceClient) CreateTasks( + ctx context.Context, + request *CreateTasksRequest, +) (*CreateTasksResponse, error) { + var response *CreateTasksResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CreateTasks(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) GetTasks( + ctx context.Context, + request *GetTasksRequest, +) (*GetTasksResponse, error) { + var response *GetTasksResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetTasks(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) CompleteTask( + ctx context.Context, + request *CompleteTaskRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.CompleteTask(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *taskRetryablePersistenceClient) CompleteTasksLessThan( + ctx context.Context, + request *CompleteTasksLessThanRequest, +) (int, error) { + var response int + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CompleteTasksLessThan(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) CreateTaskQueue( + ctx context.Context, + request *CreateTaskQueueRequest, +) (*CreateTaskQueueResponse, error) { + var response *CreateTaskQueueResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CreateTaskQueue(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) UpdateTaskQueue( + ctx context.Context, + request *UpdateTaskQueueRequest, +) (*UpdateTaskQueueResponse, error) { + var response *UpdateTaskQueueResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.UpdateTaskQueue(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) GetTaskQueue( + ctx context.Context, + request *GetTaskQueueRequest, +) (*GetTaskQueueResponse, error) { + var response *GetTaskQueueResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetTaskQueue(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) ListTaskQueue( + ctx context.Context, + request *ListTaskQueueRequest, +) (*ListTaskQueueResponse, error) { + var response *ListTaskQueueResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ListTaskQueue(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) DeleteTaskQueue( + ctx context.Context, + request *DeleteTaskQueueRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteTaskQueue(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *taskRetryablePersistenceClient) GetTaskQueueUserData( + ctx context.Context, + request *GetTaskQueueUserDataRequest, +) (*GetTaskQueueUserDataResponse, error) { + var response *GetTaskQueueUserDataResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetTaskQueueUserData(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) UpdateTaskQueueUserData( + ctx context.Context, + request *UpdateTaskQueueUserDataRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpdateTaskQueueUserData(ctx, request) + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return err +} + +func (p *taskRetryablePersistenceClient) ListTaskQueueUserDataEntries( + ctx context.Context, + request *ListTaskQueueUserDataEntriesRequest, +) (*ListTaskQueueUserDataEntriesResponse, error) { + var response *ListTaskQueueUserDataEntriesResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ListTaskQueueUserDataEntries(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) GetTaskQueuesByBuildId(ctx context.Context, request *GetTaskQueuesByBuildIdRequest) ([]string, error) { + var response []string + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetTaskQueuesByBuildId(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) CountTaskQueuesByBuildId(ctx context.Context, request *CountTaskQueuesByBuildIdRequest) (int, error) { + var response int + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CountTaskQueuesByBuildId(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *taskRetryablePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *metadataRetryablePersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *metadataRetryablePersistenceClient) CreateNamespace( + ctx context.Context, + request *CreateNamespaceRequest, +) (*CreateNamespaceResponse, error) { + var response *CreateNamespaceResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.CreateNamespace(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *metadataRetryablePersistenceClient) GetNamespace( + ctx context.Context, + request *GetNamespaceRequest, +) (*GetNamespaceResponse, error) { + var response *GetNamespaceResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetNamespace(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *metadataRetryablePersistenceClient) UpdateNamespace( + ctx context.Context, + request *UpdateNamespaceRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpdateNamespace(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *metadataRetryablePersistenceClient) RenameNamespace( + ctx context.Context, + request *RenameNamespaceRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.RenameNamespace(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *metadataRetryablePersistenceClient) DeleteNamespace( + ctx context.Context, + request *DeleteNamespaceRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteNamespace(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *metadataRetryablePersistenceClient) DeleteNamespaceByName( + ctx context.Context, + request *DeleteNamespaceByNameRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteNamespaceByName(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *metadataRetryablePersistenceClient) ListNamespaces( + ctx context.Context, + request *ListNamespacesRequest, +) (*ListNamespacesResponse, error) { + var response *ListNamespacesResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ListNamespaces(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *metadataRetryablePersistenceClient) GetMetadata( + ctx context.Context, +) (*GetMetadataResponse, error) { + var response *GetMetadataResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetMetadata(ctx) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *metadataRetryablePersistenceClient) InitializeSystemNamespaces( + ctx context.Context, + currentClusterName string, +) error { + op := func(ctx context.Context) error { + return p.persistence.InitializeSystemNamespaces(ctx, currentClusterName) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *metadataRetryablePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *clusterMetadataRetryablePersistenceClient) GetName() string { + return p.persistence.GetName() +} + +func (p *clusterMetadataRetryablePersistenceClient) GetClusterMembers( + ctx context.Context, + request *GetClusterMembersRequest, +) (*GetClusterMembersResponse, error) { + var response *GetClusterMembersResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetClusterMembers(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *clusterMetadataRetryablePersistenceClient) UpsertClusterMembership( + ctx context.Context, + request *UpsertClusterMembershipRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpsertClusterMembership(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *clusterMetadataRetryablePersistenceClient) PruneClusterMembership( + ctx context.Context, + request *PruneClusterMembershipRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.PruneClusterMembership(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *clusterMetadataRetryablePersistenceClient) ListClusterMetadata( + ctx context.Context, + request *ListClusterMetadataRequest, +) (*ListClusterMetadataResponse, error) { + var response *ListClusterMetadataResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ListClusterMetadata(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *clusterMetadataRetryablePersistenceClient) GetCurrentClusterMetadata( + ctx context.Context, +) (*GetClusterMetadataResponse, error) { + var response *GetClusterMetadataResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetCurrentClusterMetadata(ctx) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *clusterMetadataRetryablePersistenceClient) GetClusterMetadata( + ctx context.Context, + request *GetClusterMetadataRequest, +) (*GetClusterMetadataResponse, error) { + var response *GetClusterMetadataResponse + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetClusterMetadata(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *clusterMetadataRetryablePersistenceClient) SaveClusterMetadata( + ctx context.Context, + request *SaveClusterMetadataRequest, +) (bool, error) { + var response bool + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.SaveClusterMetadata(ctx, request) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *clusterMetadataRetryablePersistenceClient) DeleteClusterMetadata( + ctx context.Context, + request *DeleteClusterMetadataRequest, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteClusterMetadata(ctx, request) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *clusterMetadataRetryablePersistenceClient) Close() { + p.persistence.Close() +} + +func (p *queueRetryablePersistenceClient) Init( + ctx context.Context, + blob *commonpb.DataBlob, +) error { + op := func(ctx context.Context) error { + return p.persistence.Init(ctx, blob) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) EnqueueMessage( + ctx context.Context, + blob commonpb.DataBlob, +) error { + op := func(ctx context.Context) error { + return p.persistence.EnqueueMessage(ctx, blob) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) ReadMessages( + ctx context.Context, + lastMessageID int64, + maxCount int, +) ([]*QueueMessage, error) { + var response []*QueueMessage + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.ReadMessages(ctx, lastMessageID, maxCount) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *queueRetryablePersistenceClient) UpdateAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpdateAckLevel(ctx, metadata) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) GetAckLevels( + ctx context.Context, +) (*InternalQueueMetadata, error) { + var response *InternalQueueMetadata + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetAckLevels(ctx) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *queueRetryablePersistenceClient) DeleteMessagesBefore( + ctx context.Context, + messageID int64, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteMessagesBefore(ctx, messageID) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) EnqueueMessageToDLQ( + ctx context.Context, + blob commonpb.DataBlob, +) (int64, error) { + var response int64 + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.EnqueueMessageToDLQ(ctx, blob) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *queueRetryablePersistenceClient) ReadMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*QueueMessage, []byte, error) { + var messages []*QueueMessage + var nextPageToken []byte + op := func(ctx context.Context) error { + var err error + messages, nextPageToken, err = p.persistence.ReadMessagesFromDLQ(ctx, firstMessageID, lastMessageID, pageSize, pageToken) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return messages, nextPageToken, err +} + +func (p *queueRetryablePersistenceClient) RangeDeleteMessagesFromDLQ( + ctx context.Context, + firstMessageID int64, + lastMessageID int64, +) error { + op := func(ctx context.Context) error { + return p.persistence.RangeDeleteMessagesFromDLQ(ctx, firstMessageID, lastMessageID) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} +func (p *queueRetryablePersistenceClient) UpdateDLQAckLevel( + ctx context.Context, + metadata *InternalQueueMetadata, +) error { + op := func(ctx context.Context) error { + return p.persistence.UpdateDLQAckLevel(ctx, metadata) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) GetDLQAckLevels( + ctx context.Context, +) (*InternalQueueMetadata, error) { + var response *InternalQueueMetadata + op := func(ctx context.Context) error { + var err error + response, err = p.persistence.GetDLQAckLevels(ctx) + return err + } + + err := backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) + return response, err +} + +func (p *queueRetryablePersistenceClient) DeleteMessageFromDLQ( + ctx context.Context, + messageID int64, +) error { + op := func(ctx context.Context) error { + return p.persistence.DeleteMessageFromDLQ(ctx, messageID) + } + + return backoff.ThrottleRetryContext(ctx, op, p.policy, p.isRetryable) +} + +func (p *queueRetryablePersistenceClient) Close() { + p.persistence.Close() +} diff -Nru temporal-1.21.5-1/src/common/persistence/serialization/serializer.go temporal-1.22.5/src/common/persistence/serialization/serializer.go --- temporal-1.21.5-1/src/common/persistence/serialization/serializer.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/serialization/serializer.go 2024-02-23 09:45:43.000000000 +0000 @@ -342,15 +342,6 @@ shardInfo.ReplicationDlqAckLevel = make(map[string]int64) } - if shardInfo.GetQueueAckLevels() == nil { - shardInfo.QueueAckLevels = make(map[int32]*persistencespb.QueueAckLevel) - } - for _, queueAckLevel := range shardInfo.QueueAckLevels { - if queueAckLevel.ClusterAckLevel == nil { - queueAckLevel.ClusterAckLevel = make(map[string]int64) - } - } - if shardInfo.GetQueueStates() == nil { shardInfo.QueueStates = make(map[int32]*persistencespb.QueueState) } diff -Nru temporal-1.21.5-1/src/common/persistence/serialization/serializer_test.go temporal-1.22.5/src/common/persistence/serialization/serializer_test.go --- temporal-1.21.5-1/src/common/persistence/serialization/serializer_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/serialization/serializer_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -167,10 +167,6 @@ shardInfo.RangeId = rand.Int63() categoryID := rand.Int31() - shardInfo.QueueAckLevels = make(map[int32]*persistencespb.QueueAckLevel) - shardInfo.QueueAckLevels[categoryID] = &persistencespb.QueueAckLevel{ - ClusterAckLevel: make(map[string]int64), - } shardInfo.QueueStates = make(map[int32]*persistencespb.QueueState) shardInfo.QueueStates[categoryID] = &persistencespb.QueueState{ ReaderStates: make(map[int64]*persistencespb.QueueReaderState), diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlPersistenceTest.go temporal-1.22.5/src/common/persistence/sql/sqlPersistenceTest.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlPersistenceTest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlPersistenceTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,206 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package sql - -import ( - "fmt" - "os" - "path" - "strings" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - p "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/sql/sqlplugin" - "go.temporal.io/server/common/resolver" - "go.temporal.io/server/tests/testutils" -) - -// TestCluster allows executing cassandra operations in testing. -type TestCluster struct { - dbName string - schemaDir string - cfg config.SQL - faultInjection *config.FaultInjection - logger log.Logger -} - -// NewTestCluster returns a new SQL test cluster -func NewTestCluster( - pluginName string, - dbName string, - username string, - password string, - host string, - port int, - connectAttributes map[string]string, - schemaDir string, - faultInjection *config.FaultInjection, - logger log.Logger, -) *TestCluster { - var result TestCluster - result.logger = logger - result.dbName = dbName - - result.schemaDir = schemaDir - result.cfg = config.SQL{ - User: username, - Password: password, - ConnectAddr: fmt.Sprintf("%v:%v", host, port), - ConnectProtocol: "tcp", - PluginName: pluginName, - DatabaseName: dbName, - TaskScanPartitions: 4, - ConnectAttributes: connectAttributes, - } - - result.faultInjection = faultInjection - return &result -} - -// DatabaseName from PersistenceTestCluster interface -func (s *TestCluster) DatabaseName() string { - return s.dbName -} - -// SetupTestDatabase from PersistenceTestCluster interface -func (s *TestCluster) SetupTestDatabase() { - s.CreateDatabase() - - if s.schemaDir == "" { - s.logger.Info("No schema directory provided, skipping schema setup") - return - } - - schemaDir := s.schemaDir + "/" - if !strings.HasPrefix(schemaDir, "/") && !strings.HasPrefix(schemaDir, "../") { - temporalPackageDir := testutils.GetRepoRootDirectory() - schemaDir = path.Join(temporalPackageDir, schemaDir) - } - s.LoadSchema(path.Join(schemaDir, "temporal", "schema.sql")) - s.LoadSchema(path.Join(schemaDir, "visibility", "schema.sql")) -} - -// Config returns the persistence config for connecting to this test cluster -func (s *TestCluster) Config() config.Persistence { - cfg := s.cfg - return config.Persistence{ - DefaultStore: "test", - VisibilityStore: "test", - DataStores: map[string]config.DataStore{ - "test": {SQL: &cfg, FaultInjection: s.faultInjection}, - }, - TransactionSizeLimit: dynamicconfig.GetIntPropertyFn(common.DefaultTransactionSizeLimit), - } -} - -// TearDownTestDatabase from PersistenceTestCluster interface -func (s *TestCluster) TearDownTestDatabase() { - s.DropDatabase() -} - -// CreateDatabase from PersistenceTestCluster interface -func (s *TestCluster) CreateDatabase() { - cfg2 := s.cfg - // NOTE need to connect with empty name to create new database - if cfg2.PluginName != "sqlite" { - cfg2.DatabaseName = "" - } - - db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &cfg2, resolver.NewNoopResolver()) - if err != nil { - panic(err) - } - defer func() { - err := db.Close() - if err != nil { - panic(err) - } - }() - err = db.CreateDatabase(s.cfg.DatabaseName) - if err != nil { - panic(err) - } -} - -// DropDatabase from PersistenceTestCluster interface -func (s *TestCluster) DropDatabase() { - cfg2 := s.cfg - - if cfg2.PluginName == "sqlite" && cfg2.DatabaseName != ":memory:" && cfg2.ConnectAttributes["mode"] != "memory" { - if len(cfg2.DatabaseName) > 3 { // 3 should mean not ., .., empty, or / - err := os.Remove(cfg2.DatabaseName) - if err != nil { - panic(err) - } - } - return - } - - // NOTE need to connect with empty name to drop the database - cfg2.DatabaseName = "" - db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &cfg2, resolver.NewNoopResolver()) - if err != nil { - panic(err) - } - defer func() { - err := db.Close() - if err != nil { - panic(err) - } - }() - err = db.DropDatabase(s.cfg.DatabaseName) - if err != nil { - panic(err) - } -} - -// LoadSchema from PersistenceTestCluster interface -func (s *TestCluster) LoadSchema(schemaFile string) { - statements, err := p.LoadAndSplitQuery([]string{schemaFile}) - if err != nil { - s.logger.Fatal("LoadSchema", tag.Error(err)) - } - - db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &s.cfg, resolver.NewNoopResolver()) - if err != nil { - panic(err) - } - defer func() { - err := db.Close() - if err != nil { - panic(err) - } - }() - - for _, stmt := range statements { - if err = db.Exec(stmt); err != nil { - s.logger.Fatal("LoadSchema", tag.Error(err)) - } - } -} diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/mysql/visibility.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/mysql/visibility.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/mysql/visibility.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/mysql/visibility.go 2024-02-23 09:45:43.000000000 +0000 @@ -305,6 +305,13 @@ return 0, store.OperationNotSupportedErr } +func (mdb *db) CountGroupByFromVisibility( + ctx context.Context, + filter sqlplugin.VisibilitySelectFilter, +) ([]sqlplugin.VisibilityCountRow, error) { + return nil, store.OperationNotSupportedErr +} + func (mdb *db) processRowFromDB(row *sqlplugin.VisibilityRow) { row.StartTime = mdb.converter.FromMySQLDateTime(row.StartTime) row.ExecutionTime = mdb.converter.FromMySQLDateTime(row.ExecutionTime) diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/mysql/visibility_v8.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/mysql/visibility_v8.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/mysql/visibility_v8.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/mysql/visibility_v8.go 2024-02-23 09:45:43.000000000 +0000 @@ -246,6 +246,18 @@ return count, nil } +func (mdb *dbV8) CountGroupByFromVisibility( + ctx context.Context, + filter sqlplugin.VisibilitySelectFilter, +) ([]sqlplugin.VisibilityCountRow, error) { + rows, err := mdb.db.db.QueryContext(ctx, filter.Query, filter.QueryArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + return sqlplugin.ParseCountGroupByRows(rows, filter.GroupBy) +} + func (mdb *dbV8) prepareRowForDB(row *sqlplugin.VisibilityRow) *sqlplugin.VisibilityRow { if row == nil { return nil diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/postgresql/visibility.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/postgresql/visibility.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/postgresql/visibility.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/postgresql/visibility.go 2024-02-23 09:45:43.000000000 +0000 @@ -324,6 +324,13 @@ return 0, store.OperationNotSupportedErr } +func (pdb *db) CountGroupByFromVisibility( + ctx context.Context, + filter sqlplugin.VisibilitySelectFilter, +) ([]sqlplugin.VisibilityCountRow, error) { + return nil, store.OperationNotSupportedErr +} + func (pdb *db) processRowFromDB(row *sqlplugin.VisibilityRow) { row.StartTime = pdb.converter.FromPostgreSQLDateTime(row.StartTime) row.ExecutionTime = pdb.converter.FromPostgreSQLDateTime(row.ExecutionTime) diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/postgresql/visibility_v12.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/postgresql/visibility_v12.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/postgresql/visibility_v12.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/postgresql/visibility_v12.go 2024-02-23 09:45:43.000000000 +0000 @@ -163,6 +163,19 @@ return count, nil } +func (pdb *dbV12) CountGroupByFromVisibility( + ctx context.Context, + filter sqlplugin.VisibilitySelectFilter, +) ([]sqlplugin.VisibilityCountRow, error) { + filter.Query = pdb.db.db.Rebind(filter.Query) + rows, err := pdb.db.db.QueryContext(ctx, filter.Query, filter.QueryArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + return sqlplugin.ParseCountGroupByRows(rows, filter.GroupBy) +} + func (pdb *dbV12) prepareRowForDB(row *sqlplugin.VisibilityRow) *sqlplugin.VisibilityRow { if row == nil { return nil diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/sqlite/visibility.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/sqlite/visibility.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/sqlite/visibility.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/sqlite/visibility.go 2024-02-23 09:45:43.000000000 +0000 @@ -164,6 +164,18 @@ return count, nil } +func (mdb *db) CountGroupByFromVisibility( + ctx context.Context, + filter sqlplugin.VisibilitySelectFilter, +) ([]sqlplugin.VisibilityCountRow, error) { + rows, err := mdb.db.QueryContext(ctx, filter.Query, filter.QueryArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + return sqlplugin.ParseCountGroupByRows(rows, filter.GroupBy) +} + func (mdb *db) prepareRowForDB(row *sqlplugin.VisibilityRow) *sqlplugin.VisibilityRow { if row == nil { return nil diff -Nru temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/visibility.go temporal-1.22.5/src/common/persistence/sql/sqlplugin/visibility.go --- temporal-1.21.5-1/src/common/persistence/sql/sqlplugin/visibility.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/sqlplugin/visibility.go 2024-02-23 09:45:43.000000000 +0000 @@ -37,6 +37,7 @@ "github.com/iancoleman/strcase" enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" "go.temporal.io/server/common/searchattribute" ) @@ -77,6 +78,7 @@ Query string QueryArgs []interface{} + GroupBy []string } VisibilityGetFilter struct { @@ -89,6 +91,11 @@ RunID string } + VisibilityCountRow struct { + GroupValues []any + Count int64 + } + Visibility interface { // InsertIntoVisibility inserts a row into visibility table. If a row already exist, // no changes will be made by this API @@ -107,6 +114,7 @@ GetFromVisibility(ctx context.Context, filter VisibilityGetFilter) (*VisibilityRow, error) DeleteFromVisibility(ctx context.Context, filter VisibilityDeleteFilter) (sql.Result, error) CountFromVisibility(ctx context.Context, filter VisibilitySelectFilter) (int64, error) + CountGroupByFromVisibility(ctx context.Context, filter VisibilitySelectFilter) ([]VisibilityCountRow, error) } ) @@ -136,6 +144,61 @@ return json.Marshal(vsa) } +func ParseCountGroupByRows(rows *sql.Rows, groupBy []string) ([]VisibilityCountRow, error) { + // Number of columns is number of group by fields plus the count column. + rowValues := make([]any, len(groupBy)+1) + for i := range rowValues { + rowValues[i] = new(any) + } + + var res []VisibilityCountRow + for rows.Next() { + err := rows.Scan(rowValues...) + if err != nil { + return nil, err + } + groupValues := make([]any, len(groupBy)) + for i := range groupBy { + groupValues[i], err = parseCountGroupByGroupValue(groupBy[i], *(rowValues[i].(*any))) + if err != nil { + return nil, err + } + } + count := *(rowValues[len(rowValues)-1].(*any)) + res = append(res, VisibilityCountRow{ + GroupValues: groupValues, + Count: count.(int64), + }) + } + return res, nil +} + +func parseCountGroupByGroupValue(fieldName string, value any) (any, error) { + switch fieldName { + case searchattribute.ExecutionStatus: + switch typedValue := value.(type) { + case int: + return enumspb.WorkflowExecutionStatus(typedValue).String(), nil + case int32: + return enumspb.WorkflowExecutionStatus(typedValue).String(), nil + case int64: + return enumspb.WorkflowExecutionStatus(typedValue).String(), nil + default: + // This should never happen. + return nil, serviceerror.NewInternal( + fmt.Sprintf( + "Unable to parse %s value from DB (got: %v of type: %T, expected type: integer)", + searchattribute.ExecutionStatus, + value, + value, + ), + ) + } + default: + return value, nil + } +} + func getDbFields() []string { t := reflect.TypeOf(VisibilityRow{}) dbFields := make([]string, t.NumField()) diff -Nru temporal-1.21.5-1/src/common/persistence/sql/test_sql_persistence.go temporal-1.22.5/src/common/persistence/sql/test_sql_persistence.go --- temporal-1.21.5-1/src/common/persistence/sql/test_sql_persistence.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/sql/test_sql_persistence.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,206 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sql + +import ( + "fmt" + "os" + "path" + "strings" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + p "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/sql/sqlplugin" + "go.temporal.io/server/common/resolver" + "go.temporal.io/server/tests/testutils" +) + +// TestCluster allows executing cassandra operations in testing. +type TestCluster struct { + dbName string + schemaDir string + cfg config.SQL + faultInjection *config.FaultInjection + logger log.Logger +} + +// NewTestCluster returns a new SQL test cluster +func NewTestCluster( + pluginName string, + dbName string, + username string, + password string, + host string, + port int, + connectAttributes map[string]string, + schemaDir string, + faultInjection *config.FaultInjection, + logger log.Logger, +) *TestCluster { + var result TestCluster + result.logger = logger + result.dbName = dbName + + result.schemaDir = schemaDir + result.cfg = config.SQL{ + User: username, + Password: password, + ConnectAddr: fmt.Sprintf("%v:%v", host, port), + ConnectProtocol: "tcp", + PluginName: pluginName, + DatabaseName: dbName, + TaskScanPartitions: 4, + ConnectAttributes: connectAttributes, + } + + result.faultInjection = faultInjection + return &result +} + +// DatabaseName from PersistenceTestCluster interface +func (s *TestCluster) DatabaseName() string { + return s.dbName +} + +// SetupTestDatabase from PersistenceTestCluster interface +func (s *TestCluster) SetupTestDatabase() { + s.CreateDatabase() + + if s.schemaDir == "" { + s.logger.Info("No schema directory provided, skipping schema setup") + return + } + + schemaDir := s.schemaDir + "/" + if !strings.HasPrefix(schemaDir, "/") && !strings.HasPrefix(schemaDir, "../") { + temporalPackageDir := testutils.GetRepoRootDirectory() + schemaDir = path.Join(temporalPackageDir, schemaDir) + } + s.LoadSchema(path.Join(schemaDir, "temporal", "schema.sql")) + s.LoadSchema(path.Join(schemaDir, "visibility", "schema.sql")) +} + +// Config returns the persistence config for connecting to this test cluster +func (s *TestCluster) Config() config.Persistence { + cfg := s.cfg + return config.Persistence{ + DefaultStore: "test", + VisibilityStore: "test", + DataStores: map[string]config.DataStore{ + "test": {SQL: &cfg, FaultInjection: s.faultInjection}, + }, + TransactionSizeLimit: dynamicconfig.GetIntPropertyFn(common.DefaultTransactionSizeLimit), + } +} + +// TearDownTestDatabase from PersistenceTestCluster interface +func (s *TestCluster) TearDownTestDatabase() { + s.DropDatabase() +} + +// CreateDatabase from PersistenceTestCluster interface +func (s *TestCluster) CreateDatabase() { + cfg2 := s.cfg + // NOTE need to connect with empty name to create new database + if cfg2.PluginName != "sqlite" { + cfg2.DatabaseName = "" + } + + db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &cfg2, resolver.NewNoopResolver()) + if err != nil { + panic(err) + } + defer func() { + err := db.Close() + if err != nil { + panic(err) + } + }() + err = db.CreateDatabase(s.cfg.DatabaseName) + if err != nil { + panic(err) + } +} + +// DropDatabase from PersistenceTestCluster interface +func (s *TestCluster) DropDatabase() { + cfg2 := s.cfg + + if cfg2.PluginName == "sqlite" && cfg2.DatabaseName != ":memory:" && cfg2.ConnectAttributes["mode"] != "memory" { + if len(cfg2.DatabaseName) > 3 { // 3 should mean not ., .., empty, or / + err := os.Remove(cfg2.DatabaseName) + if err != nil { + panic(err) + } + } + return + } + + // NOTE need to connect with empty name to drop the database + cfg2.DatabaseName = "" + db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &cfg2, resolver.NewNoopResolver()) + if err != nil { + panic(err) + } + defer func() { + err := db.Close() + if err != nil { + panic(err) + } + }() + err = db.DropDatabase(s.cfg.DatabaseName) + if err != nil { + panic(err) + } +} + +// LoadSchema from PersistenceTestCluster interface +func (s *TestCluster) LoadSchema(schemaFile string) { + statements, err := p.LoadAndSplitQuery([]string{schemaFile}) + if err != nil { + s.logger.Fatal("LoadSchema", tag.Error(err)) + } + + db, err := NewSQLAdminDB(sqlplugin.DbKindUnknown, &s.cfg, resolver.NewNoopResolver()) + if err != nil { + panic(err) + } + defer func() { + err := db.Close() + if err != nil { + panic(err) + } + }() + + for _, stmt := range statements { + if err = db.Exec(stmt); err != nil { + s.logger.Fatal("LoadSchema", tag.Error(err)) + } + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/tests/execution_mutable_state.go temporal-1.22.5/src/common/persistence/tests/execution_mutable_state.go --- temporal-1.21.5-1/src/common/persistence/tests/execution_mutable_state.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/tests/execution_mutable_state.go 2024-02-23 09:45:43.000000000 +0000 @@ -84,6 +84,7 @@ ExecutionManager: p.NewExecutionManager( executionStore, serializer, + nil, logger, dynamicconfig.GetIntPropertyFn(4*1024*1024), ), diff -Nru temporal-1.21.5-1/src/common/persistence/tests/execution_mutable_state_task.go temporal-1.22.5/src/common/persistence/tests/execution_mutable_state_task.go --- temporal-1.21.5-1/src/common/persistence/tests/execution_mutable_state_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/tests/execution_mutable_state_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -105,6 +105,7 @@ ExecutionManager: p.NewExecutionManager( executionStore, serializer, + nil, logger, dynamicconfig.GetIntPropertyFn(4*1024*1024), ), diff -Nru temporal-1.21.5-1/src/common/persistence/tests/history_store.go temporal-1.22.5/src/common/persistence/tests/history_store.go --- temporal-1.21.5-1/src/common/persistence/tests/history_store.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/tests/history_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -84,6 +84,7 @@ store: p.NewExecutionManager( store, eventSerializer, + nil, logger, dynamicconfig.GetIntPropertyFn(4*1024*1024), ), diff -Nru temporal-1.21.5-1/src/common/persistence/tests/visibility_persistence_suite_test.go temporal-1.22.5/src/common/persistence/tests/visibility_persistence_suite_test.go --- temporal-1.21.5-1/src/common/persistence/tests/visibility_persistence_suite_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/tests/visibility_persistence_suite_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,6 +35,7 @@ commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/debug" "go.temporal.io/server/common/dynamicconfig" @@ -44,8 +45,11 @@ "go.temporal.io/server/common/payload" "go.temporal.io/server/common/persistence" persistencetests "go.temporal.io/server/common/persistence/persistence-tests" + "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql" + "go.temporal.io/server/common/persistence/sql/sqlplugin/postgresql" "go.temporal.io/server/common/persistence/visibility" "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/persistence/visibility/store/standard/cassandra" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/searchattribute" @@ -87,6 +91,7 @@ s.SearchAttributesMapperProvider, dynamicconfig.GetIntPropertyFn(1000), dynamicconfig.GetIntPropertyFn(1000), + dynamicconfig.GetFloatPropertyFn(0.2), dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), dynamicconfig.GetStringPropertyFn(visibility.SecondaryVisibilityWritingModeOff), dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), @@ -774,6 +779,100 @@ } } +func (s *VisibilityPersistenceSuite) TestCountWorkflowExecutions() { + switch s.VisibilityMgr.GetStoreNames()[0] { + case mysql.PluginName, postgresql.PluginName, cassandra.CassandraPersistenceName: + s.T().Skip("Not supported by standard visibility") + } + + testNamespaceUUID := namespace.ID(uuid.New()) + closeTime := time.Now().UTC() + startTime := closeTime.Add(-5 * time.Second) + + for i := 0; i < 5; i++ { + s.createOpenWorkflowRecord( + testNamespaceUUID, + "visibility-workflow-test", + "visibility-workflow", + startTime, + "test-queue", + ) + } + + resp, err := s.VisibilityMgr.CountWorkflowExecutions( + s.ctx, + &manager.CountWorkflowExecutionsRequest{ + NamespaceID: testNamespaceUUID, + Query: "", + }, + ) + s.NoError(err) + s.Equal(int64(5), resp.Count) + s.Nil(resp.Groups) +} + +func (s *VisibilityPersistenceSuite) TestCountGroupByWorkflowExecutions() { + switch s.VisibilityMgr.GetStoreNames()[0] { + case mysql.PluginName, postgresql.PluginName, cassandra.CassandraPersistenceName: + s.T().Skip("Not supported by standard visibility") + } + + testNamespaceUUID := namespace.ID(uuid.New()) + closeTime := time.Now().UTC() + startTime := closeTime.Add(-5 * time.Second) + + var startRequests []*manager.RecordWorkflowExecutionStartedRequest + for i := 0; i < 5; i++ { + startRequests = append( + startRequests, + s.createOpenWorkflowRecord( + testNamespaceUUID, + "visibility-workflow-test", + "visibility-workflow", + startTime, + "test-queue", + ), + ) + } + + runningStatusPayload, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + resp, err := s.VisibilityMgr.CountWorkflowExecutions( + s.ctx, + &manager.CountWorkflowExecutionsRequest{ + NamespaceID: testNamespaceUUID, + Query: "GROUP BY ExecutionStatus", + }, + ) + s.NoError(err) + s.Equal(int64(5), resp.Count) + s.Equal( + []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + { + GroupValues: []*commonpb.Payload{runningStatusPayload}, + Count: int64(5), + }, + }, + resp.Groups, + ) + + for i := 0; i < 2; i++ { + s.createClosedWorkflowRecord(startRequests[i], closeTime) + } + + resp, err = s.VisibilityMgr.CountWorkflowExecutions( + s.ctx, + &manager.CountWorkflowExecutionsRequest{ + NamespaceID: testNamespaceUUID, + Query: "GROUP BY ExecutionStatus", + }, + ) + s.NoError(err) + s.Equal(int64(5), resp.Count) +} + func (s *VisibilityPersistenceSuite) listWithPagination(namespaceID namespace.ID, pageSize int) []*workflowpb.WorkflowExecutionInfo { var executions []*workflowpb.WorkflowExecutionInfo resp, err := s.VisibilityMgr.ListWorkflowExecutions(s.ctx, &manager.ListWorkflowExecutionsRequestV2{ diff -Nru temporal-1.21.5-1/src/common/persistence/versionhistory/version_histories.go temporal-1.22.5/src/common/persistence/versionhistory/version_histories.go --- temporal-1.21.5-1/src/common/persistence/versionhistory/version_histories.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/versionhistory/version_histories.go 2024-02-23 09:45:43.000000000 +0000 @@ -58,7 +58,7 @@ // GetVersionHistory gets the VersionHistory according to index provided. func GetVersionHistory(h *historyspb.VersionHistories, index int32) (*historyspb.VersionHistory, error) { if index < 0 || index >= int32(len(h.Histories)) { - return nil, serviceerror.NewInvalidArgument("version histories index is out of range.") + return nil, serviceerror.NewInternal("version histories index is out of range.") } return h.Histories[index], nil @@ -67,7 +67,7 @@ // AddVersionHistory adds a VersionHistory and return the whether current branch is changed. func AddVersionHistory(h *historyspb.VersionHistories, v *historyspb.VersionHistory) (bool, int32, error) { if v == nil { - return false, 0, serviceerror.NewInvalidArgument("version histories is null.") + return false, 0, serviceerror.NewInternal("version histories is null.") } // assuming existing version histories inside are valid @@ -86,7 +86,7 @@ } if incomingFirstItem.Version != currentFirstItem.Version { - return false, 0, serviceerror.NewInvalidArgument("version history first item does not match.") + return false, 0, serviceerror.NewInternal("version history first item does not match.") } // TODO maybe we need more strict validation @@ -147,7 +147,7 @@ return int32(versionHistoryIndex), nil } } - return 0, serviceerror.NewInvalidArgument("version histories does not contains given item.") + return 0, serviceerror.NewInternal("version histories does not contains given item.") } // IsVersionHistoriesRebuilt returns true if the current branch index's last write version is not the largest among all branches' last write version. @@ -178,7 +178,7 @@ // SetCurrentVersionHistoryIndex set the current VersionHistory index. func SetCurrentVersionHistoryIndex(h *historyspb.VersionHistories, currentVersionHistoryIndex int32) error { if currentVersionHistoryIndex < 0 || currentVersionHistoryIndex >= int32(len(h.Histories)) { - return serviceerror.NewInvalidArgument("invalid current version history index.") + return serviceerror.NewInternal("invalid current version history index.") } h.CurrentVersionHistoryIndex = currentVersionHistoryIndex diff -Nru temporal-1.21.5-1/src/common/persistence/versionhistory/version_history.go temporal-1.22.5/src/common/persistence/versionhistory/version_history.go --- temporal-1.21.5-1/src/common/persistence/versionhistory/version_history.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/versionhistory/version_history.go 2024-02-23 09:45:43.000000000 +0000 @@ -57,7 +57,7 @@ // CopyVersionHistoryUntilLCAVersionHistoryItem returns copy of VersionHistory up until LCA item. func CopyVersionHistoryUntilLCAVersionHistoryItem(v *historyspb.VersionHistory, lcaItem *historyspb.VersionHistoryItem) (*historyspb.VersionHistory, error) { versionHistory := &historyspb.VersionHistory{} - notFoundErr := serviceerror.NewInvalidArgument("version history does not contains the LCA item.") + notFoundErr := serviceerror.NewInternal("version history does not contains the LCA item.") for _, item := range v.Items { if item.Version < lcaItem.Version { if err := AddOrUpdateVersionHistoryItem(versionHistory, item); err != nil { @@ -93,11 +93,11 @@ lastItem := v.Items[len(v.Items)-1] if item.Version < lastItem.Version { - return serviceerror.NewInvalidArgument(fmt.Sprintf("cannot update version history with a lower version %v. Last version: %v", item.Version, lastItem.Version)) + return serviceerror.NewInternal(fmt.Sprintf("cannot update version history with a lower version %v. Last version: %v", item.Version, lastItem.Version)) } if item.GetEventId() <= lastItem.GetEventId() { - return serviceerror.NewInvalidArgument(fmt.Sprintf("cannot add version history with a lower event id %v. Last event id: %v", item.GetEventId(), lastItem.GetEventId())) + return serviceerror.NewInternal(fmt.Sprintf("cannot add version history with a lower event id %v. Last event id: %v", item.GetEventId(), lastItem.GetEventId())) } if item.Version > lastItem.Version { @@ -149,7 +149,7 @@ } } - return nil, serviceerror.NewInvalidArgument("version history is malformed. No joint point found.") + return nil, serviceerror.NewInternal("version history is malformed. No joint point found.") } // IsLCAVersionHistoryItemAppendable checks if a LCA VersionHistoryItem is appendable. @@ -167,7 +167,7 @@ // GetFirstVersionHistoryItem return the first VersionHistoryItem. func GetFirstVersionHistoryItem(v *historyspb.VersionHistory) (*historyspb.VersionHistoryItem, error) { if len(v.Items) == 0 { - return nil, serviceerror.NewInvalidArgument("version history is empty.") + return nil, serviceerror.NewInternal("version history is empty.") } return CopyVersionHistoryItem(v.Items[0]), nil } @@ -175,7 +175,7 @@ // GetLastVersionHistoryItem return the last VersionHistoryItem. func GetLastVersionHistoryItem(v *historyspb.VersionHistory) (*historyspb.VersionHistoryItem, error) { if len(v.Items) == 0 { - return nil, serviceerror.NewInvalidArgument("version history is empty.") + return nil, serviceerror.NewInternal("version history is empty.") } return CopyVersionHistoryItem(v.Items[len(v.Items)-1]), nil } @@ -187,7 +187,7 @@ return 0, err } if eventID < common.FirstEventID || eventID > lastItem.GetEventId() { - return 0, serviceerror.NewInvalidArgument("input event ID is not in range.") + return 0, serviceerror.NewInternal("input event ID is not in range.") } // items are sorted by eventID & version @@ -198,7 +198,7 @@ return currentItem.GetVersion(), nil } } - return 0, serviceerror.NewInvalidArgument("input event ID is not in range.") + return 0, serviceerror.NewInternal("input event ID is not in range.") } // IsEmptyVersionHistory indicate whether version history is empty diff -Nru temporal-1.21.5-1/src/common/persistence/versionhistory/version_history_test.go temporal-1.22.5/src/common/persistence/versionhistory/version_history_test.go --- temporal-1.21.5-1/src/common/persistence/versionhistory/version_history_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/versionhistory/version_history_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -110,22 +110,22 @@ history := NewVersionHistory(BranchToken, Items) _, err := CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(4, 0)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) _, err = CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(2, 1)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) _, err = CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(5, 3)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) _, err = CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(7, 5)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) _, err = CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(4, 0)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) _, err = CopyVersionHistoryUntilLCAVersionHistoryItem(history, NewVersionHistoryItem(7, 4)) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) } func (s *versionHistorySuite) TestSetBranchToken() { @@ -398,7 +398,7 @@ history := NewVersionHistory(BranchToken, []*historyspb.VersionHistoryItem{}) _, err := GetFirstVersionHistoryItem(history) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) } func (s *versionHistorySuite) TestGetLastItem_Success() { @@ -432,7 +432,7 @@ history := NewVersionHistory(BranchToken, []*historyspb.VersionHistoryItem{}) _, err := GetLastVersionHistoryItem(history) - s.IsType(&serviceerror.InvalidArgument{}, err) + s.IsType(&serviceerror.Internal{}, err) } func (s *versionHistoriesSuite) TestGetVersion_Success() { diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/defs.go temporal-1.22.5/src/common/persistence/visibility/defs.go --- temporal-1.21.5-1/src/common/persistence/visibility/defs.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/defs.go 2024-02-23 09:45:43.000000000 +0000 @@ -138,12 +138,6 @@ return false } - if len(storeNames) > 1 { - // If more than one store is configured then it means that dual visibility is enabled. - // Dual visibility is used for migration to advanced, don't allow list of values because it will be removed soon. - return false - } - switch storeNames[0] { case mysql.PluginNameV8, postgresql.PluginNameV12, sqlite.PluginName: // Advanced visibility with SQL DB don't support list of values diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/factory.go temporal-1.22.5/src/common/persistence/visibility/factory.go --- temporal-1.21.5-1/src/common/persistence/visibility/factory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -55,6 +55,7 @@ maxReadQPS dynamicconfig.IntPropertyFn, maxWriteQPS dynamicconfig.IntPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, enableReadFromSecondaryVisibility dynamicconfig.BoolPropertyFnWithNamespaceFilter, secondaryVisibilityWritingMode dynamicconfig.StringPropertyFn, visibilityDisableOrderByClause dynamicconfig.BoolPropertyFnWithNamespaceFilter, @@ -72,6 +73,7 @@ searchAttributesMapperProvider, maxReadQPS, maxWriteQPS, + operatorRPSRatio, visibilityDisableOrderByClause, visibilityEnableManualPagination, metricsHandler, @@ -94,6 +96,7 @@ searchAttributesMapperProvider, maxReadQPS, maxWriteQPS, + operatorRPSRatio, visibilityDisableOrderByClause, visibilityEnableManualPagination, metricsHandler, @@ -139,6 +142,7 @@ visStore store.VisibilityStore, maxReadQPS dynamicconfig.IntPropertyFn, maxWriteQPS dynamicconfig.IntPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, metricsHandler metrics.Handler, tag metrics.Tag, logger log.Logger, @@ -152,7 +156,8 @@ visManager = NewVisibilityManagerRateLimited( visManager, maxReadQPS, - maxWriteQPS) + maxWriteQPS, + operatorRPSRatio) // wrap with metrics client visManager = NewVisibilityManagerMetrics( visManager, @@ -175,6 +180,7 @@ maxReadQPS dynamicconfig.IntPropertyFn, maxWriteQPS dynamicconfig.IntPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, visibilityDisableOrderByClause dynamicconfig.BoolPropertyFnWithNamespaceFilter, visibilityEnableManualPagination dynamicconfig.BoolPropertyFnWithNamespaceFilter, @@ -203,6 +209,7 @@ visStore, maxReadQPS, maxWriteQPS, + operatorRPSRatio, metricsHandler, metrics.AdvancedVisibilityTypeTag(), logger, @@ -238,10 +245,22 @@ logger, ) default: - visStore, err = newStandardVisibilityStore(dsConfig, persistenceResolver, logger) + visStore, err = newStandardVisibilityStore( + dsConfig, + persistenceResolver, + searchAttributesProvider, + searchAttributesMapperProvider, + logger, + ) } } else if dsConfig.Cassandra != nil { - visStore, err = newStandardVisibilityStore(dsConfig, persistenceResolver, logger) + visStore, err = newStandardVisibilityStore( + dsConfig, + persistenceResolver, + searchAttributesProvider, + searchAttributesMapperProvider, + logger, + ) } else if dsConfig.Elasticsearch != nil { visStore = newElasticsearchVisibilityStore( dsConfig.Elasticsearch.GetVisibilityIndex(), @@ -261,6 +280,8 @@ func newStandardVisibilityStore( dsConfig config.DataStore, persistenceResolver resolver.ServiceResolver, + searchAttributesProvider searchattribute.Provider, + searchAttributesMapperProvider searchattribute.MapperProvider, logger log.Logger, ) (store.VisibilityStore, error) { var ( @@ -287,7 +308,11 @@ logger.Fatal("invalid config: one of cassandra or sql params must be specified for visibility store") return nil, nil } - return standard.NewVisibilityStore(visStore), nil + return standard.NewVisibilityStore( + visStore, + searchAttributesProvider, + searchAttributesMapperProvider, + ), nil } func newElasticsearchVisibilityStore( diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/manager/visibility_manager.go temporal-1.22.5/src/common/persistence/visibility/manager/visibility_manager.go --- temporal-1.21.5-1/src/common/persistence/visibility/manager/visibility_manager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/manager/visibility_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,7 @@ package manager // -aux_files is required here due to Closeable interface being in another file. -//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination visibility_manager_mock.go -aux_files go.temporal.io/server/common/persistence=../../dataInterfaces.go +//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination visibility_manager_mock.go -aux_files go.temporal.io/server/common/persistence=../../data_interfaces.go import ( "context" @@ -34,6 +34,7 @@ commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" @@ -145,7 +146,8 @@ // CountWorkflowExecutionsResponse is response to CountWorkflowExecutions CountWorkflowExecutionsResponse struct { - Count int64 + Count int64 // sum of counts in Groups + Groups []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup } // ListWorkflowExecutionsByTypeRequest is used to list executions of diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/quotas.go temporal-1.22.5/src/common/persistence/visibility/quotas.go --- temporal-1.21.5-1/src/common/persistence/visibility/quotas.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/quotas.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package visibility + +import ( + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/quotas" +) + +const ( + // OperatorPriority is used to give precedence to calls coming from web UI or tctl + OperatorPriority = 0 +) + +var ( + PrioritiesOrdered = []int{OperatorPriority, 1} +) + +func newPriorityRateLimiter( + maxQPS dynamicconfig.IntPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, +) quotas.RequestRateLimiter { + rateLimiters := make(map[int]quotas.RequestRateLimiter) + for priority := range PrioritiesOrdered { + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultOutgoingRateLimiter(operatorRateFn(maxQPS, operatorRPSRatio))) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultOutgoingRateLimiter(rateFn(maxQPS))) + } + } + return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } + // default to lowest priority + return PrioritiesOrdered[len(PrioritiesOrdered)-1] + }, rateLimiters) +} + +func rateFn(maxQPS dynamicconfig.IntPropertyFn) quotas.RateFn { + return func() float64 { + return float64(maxQPS()) + } +} + +func operatorRateFn(maxQPS dynamicconfig.IntPropertyFn, operatorRPSRatio dynamicconfig.FloatPropertyFn) quotas.RateFn { + return func() float64 { + return float64(maxQPS()) * operatorRPSRatio() + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/bulk_processor_v7.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/bulk_processor_v7.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/bulk_processor_v7.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/bulk_processor_v7.go 2024-02-23 09:45:43.000000000 +0000 @@ -59,10 +59,12 @@ errC <- errS } }() + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() select { case err := <-errC: return err - case <-time.After(5 * time.Second): + case <-timer.C: return errors.New("esBulkProcessor Flush/Stop timed out") } } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client.go 2024-02-23 09:45:43.000000000 +0000 @@ -45,6 +45,7 @@ Get(ctx context.Context, index string, docID string) (*elastic.GetResult, error) Search(ctx context.Context, p *SearchParameters) (*elastic.SearchResult, error) Count(ctx context.Context, index string, query elastic.Query) (int64, error) + CountGroupBy(ctx context.Context, index string, query elastic.Query, aggName string, agg elastic.Aggregation) (*elastic.SearchResult, error) RunBulkProcessor(ctx context.Context, p *BulkProcessorParameters) (BulkProcessor, error) // TODO (alex): move this to some admin client (and join with IntegrationTestsClient) diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client_mock.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client_mock.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -104,6 +104,21 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockClient)(nil).Count), ctx, index, query) } +// CountGroupBy mocks base method. +func (m *MockClient) CountGroupBy(ctx context.Context, index string, query v7.Query, aggName string, agg v7.Aggregation) (*v7.SearchResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountGroupBy", ctx, index, query, aggName, agg) + ret0, _ := ret[0].(*v7.SearchResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountGroupBy indicates an expected call of CountGroupBy. +func (mr *MockClientMockRecorder) CountGroupBy(ctx, index, query, aggName, agg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountGroupBy", reflect.TypeOf((*MockClient)(nil).CountGroupBy), ctx, index, query, aggName, agg) +} + // Get mocks base method. func (m *MockClient) Get(ctx context.Context, index, docID string) (*v7.GetResult, error) { m.ctrl.T.Helper() @@ -320,6 +335,21 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockCLIClient)(nil).Count), ctx, index, query) } +// CountGroupBy mocks base method. +func (m *MockCLIClient) CountGroupBy(ctx context.Context, index string, query v7.Query, aggName string, agg v7.Aggregation) (*v7.SearchResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountGroupBy", ctx, index, query, aggName, agg) + ret0, _ := ret[0].(*v7.SearchResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountGroupBy indicates an expected call of CountGroupBy. +func (mr *MockCLIClientMockRecorder) CountGroupBy(ctx, index, query, aggName, agg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountGroupBy", reflect.TypeOf((*MockCLIClient)(nil).CountGroupBy), ctx, index, query, aggName, agg) +} + // Delete mocks base method. func (m *MockCLIClient) Delete(ctx context.Context, indexName, docID string, version int64) error { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client_v7.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client_v7.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/client/client_v7.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/client/client_v7.go 2024-02-23 09:45:43.000000000 +0000 @@ -221,6 +221,20 @@ return c.esClient.Count(index).Query(query).Do(ctx) } +func (c *clientImpl) CountGroupBy( + ctx context.Context, + index string, + query elastic.Query, + aggName string, + agg elastic.Aggregation, +) (*elastic.SearchResult, error) { + searchSource := elastic.NewSearchSource(). + Query(query). + Size(0). + Aggregation(aggName, agg) + return c.esClient.Search(index).SearchSource(searchSource).Do(ctx) +} + func (c *clientImpl) RunBulkProcessor(ctx context.Context, p *BulkProcessorParameters) (BulkProcessor, error) { esBulkProcessor, err := c.esClient.BulkProcessor(). Name(p.Name). diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/converter_test.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/converter_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/converter_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/converter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -37,18 +37,19 @@ ) var errorCases = map[string]string{ - "delete": query.MalformedSqlQueryErrMessage, - "update x": query.MalformedSqlQueryErrMessage, - "insert ": query.MalformedSqlQueryErrMessage, - "insert into a values(1,2)": query.NotSupportedErrMessage, - "update a set id = 1": query.NotSupportedErrMessage, - "delete from a where id=1": query.NotSupportedErrMessage, - "select * from a where NOT(id=1)": query.NotSupportedErrMessage, - "select * from a where 1 = 1": query.InvalidExpressionErrMessage, - "select * from a where 1=a": query.InvalidExpressionErrMessage, - "select * from a where zz(k=2)": query.NotSupportedErrMessage, - "select * from a group by k": query.NotSupportedErrMessage, - "invalid query": query.MalformedSqlQueryErrMessage, + "delete": query.MalformedSqlQueryErrMessage, + "update x": query.MalformedSqlQueryErrMessage, + "insert ": query.MalformedSqlQueryErrMessage, + "insert into a values(1,2)": query.NotSupportedErrMessage, + "update a set id = 1": query.NotSupportedErrMessage, + "delete from a where id=1": query.NotSupportedErrMessage, + "select * from a where NOT(id=1)": query.NotSupportedErrMessage, + "select * from a where 1 = 1": query.InvalidExpressionErrMessage, + "select * from a where 1=a": query.InvalidExpressionErrMessage, + "select * from a where zz(k=2)": query.NotSupportedErrMessage, + "select * from a group by k, m": query.NotSupportedErrMessage, + "select * from a group by k order by id": query.NotSupportedErrMessage, + "invalid query": query.MalformedSqlQueryErrMessage, "select * from a where a= 1 and multi_match(zz=1, query='this is a test', fields=(title,title.origin), type=phrase)": query.NotSupportedErrMessage, } @@ -112,6 +113,20 @@ }, } +var supportedWhereGroupByCases = map[string]struct { + query string + groupBy []string +}{ + "group by status": { + query: ``, + groupBy: []string{"status"}, + }, + "id = 1 group by status": { + query: `{"bool":{"filter":{"match":{"id":{"query":1}}}}}`, + groupBy: []string{"status"}, + }, +} + func TestSupportedSelectWhere(t *testing.T) { c := newQueryConverter(nil, nil) @@ -164,6 +179,24 @@ } } +func TestSupportedSelectWhereGroupBy(t *testing.T) { + c := newQueryConverter(nil, nil) + + for sql, expectedJson := range supportedWhereGroupByCases { + queryParams, err := c.ConvertWhereOrderBy(sql) + assert.NoError(t, err) + + if expectedJson.query != "" { + actualQueryMap, _ := queryParams.Query.Source() + actualQueryJson, _ := json.Marshal(actualQueryMap) + assert.Equal(t, expectedJson.query, string(actualQueryJson), fmt.Sprintf("sql: %s", sql)) + } else { + assert.Nil(t, queryParams.Query) + } + assert.Equal(t, expectedJson.groupBy, queryParams.GroupBy) + } +} + func TestErrors(t *testing.T) { c := newQueryConverter(nil, nil) for sql, expectedErrMessage := range errorCases { diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/processor.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/processor.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/processor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/processor.go 2024-02-23 09:45:43.000000000 +0000 @@ -54,10 +54,10 @@ type ( // Processor is interface for Elasticsearch bulk processor Processor interface { - common.Daemon - // Add request to bulk processor. Add(request *client.BulkableRequest, visibilityTaskKey string) *future.FutureImpl[bool] + Start() + Stop() } // processorImpl implements Processor, it's an agent of elastic.BulkProcessor diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/query_interceptors.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/query_interceptors.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/query_interceptors.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/query_interceptors.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,6 +27,7 @@ import ( "errors" "fmt" + "strconv" "time" enumspb "go.temporal.io/api/enums/v1" @@ -45,25 +46,38 @@ searchAttributesMapperProvider searchattribute.MapperProvider seenNamespaceDivision bool } - valuesInterceptor struct{} + + valuesInterceptor struct { + namespace namespace.Name + searchAttributesTypeMap searchattribute.NameTypeMap + searchAttributesMapperProvider searchattribute.MapperProvider + } ) func newNameInterceptor( - namespace namespace.Name, + namespaceName namespace.Name, index string, saTypeMap searchattribute.NameTypeMap, searchAttributesMapperProvider searchattribute.MapperProvider, ) *nameInterceptor { return &nameInterceptor{ - namespace: namespace, + namespace: namespaceName, index: index, searchAttributesTypeMap: saTypeMap, searchAttributesMapperProvider: searchAttributesMapperProvider, } } -func NewValuesInterceptor() *valuesInterceptor { - return &valuesInterceptor{} +func NewValuesInterceptor( + namespaceName namespace.Name, + saTypeMap searchattribute.NameTypeMap, + searchAttributesMapperProvider searchattribute.MapperProvider, +) *valuesInterceptor { + return &valuesInterceptor{ + namespace: namespaceName, + searchAttributesTypeMap: saTypeMap, + searchAttributesMapperProvider: searchAttributesMapperProvider, + } } func (ni *nameInterceptor) Name(name string, usage query.FieldNameUsage) (string, error) { @@ -86,54 +100,61 @@ return "", query.NewConverterError("invalid search attribute: %s", name) } - if usage == query.FieldNameSorter { + switch usage { + case query.FieldNameFilter: + if fieldName == searchattribute.TemporalNamespaceDivision { + ni.seenNamespaceDivision = true + } + case query.FieldNameSorter: if fieldType == enumspb.INDEXED_VALUE_TYPE_TEXT { - return "", query.NewConverterError("unable to sort by field of %s type, use field of type %s", enumspb.INDEXED_VALUE_TYPE_TEXT.String(), enumspb.INDEXED_VALUE_TYPE_KEYWORD.String()) + return "", query.NewConverterError( + "unable to sort by field of %s type, use field of type %s", + enumspb.INDEXED_VALUE_TYPE_TEXT.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD.String(), + ) + } + case query.FieldNameGroupBy: + if fieldName != searchattribute.ExecutionStatus { + return "", query.NewConverterError( + "'group by' clause is only supported for %s search attribute", + searchattribute.ExecutionStatus, + ) } - } - - if fieldName == searchattribute.TemporalNamespaceDivision && usage == query.FieldNameFilter { - ni.seenNamespaceDivision = true } return fieldName, nil } -func (vi *valuesInterceptor) Values(name string, values ...interface{}) ([]interface{}, error) { - var result []interface{} - for _, value := range values { +func (vi *valuesInterceptor) Values(fieldName string, values ...interface{}) ([]interface{}, error) { + fieldType, err := vi.searchAttributesTypeMap.GetType(fieldName) + if err != nil { + return nil, query.NewConverterError("invalid search attribute: %s", fieldName) + } - switch name { - case searchattribute.StartTime, searchattribute.CloseTime, searchattribute.ExecutionTime: - if nanos, isNumber := value.(int64); isNumber { - value = time.Unix(0, nanos).UTC().Format(time.RFC3339Nano) - } - case searchattribute.ExecutionStatus: - if status, isNumber := value.(int64); isNumber { - value = enumspb.WorkflowExecutionStatus_name[int32(status)] - } - case searchattribute.ExecutionDuration: - if durationStr, isString := value.(string); isString { - // To support durations passed as golang durations such as "300ms", "-1.5h" or "2h45m". - // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - // Custom timestamp.ParseDuration also supports "d" as additional unit for days. - if duration, err := timestamp.ParseDuration(durationStr); err == nil { - value = duration.Nanoseconds() - } else { - // To support "hh:mm:ss" durations. - durationNanos, err := vi.parseHHMMSSDuration(durationStr) - var converterErr *query.ConverterError - if errors.As(err, &converterErr) { - return nil, converterErr - } - if err == nil { - value = durationNanos - } - } + name := fieldName + if searchattribute.IsMappable(fieldName) { + mapper, err := vi.searchAttributesMapperProvider.GetMapper(vi.namespace) + if err != nil { + return nil, err + } + if mapper != nil { + name, err = mapper.GetAlias(fieldName, vi.namespace.String()) + if err != nil { + return nil, err } - default: } + } + var result []interface{} + for _, value := range values { + value, err = vi.parseSystemSearchAttributeValues(fieldName, value) + if err != nil { + return nil, err + } + value, err = validateValueType(name, value, fieldType) + if err != nil { + return nil, err + } result = append(result, value) } return result, nil @@ -157,3 +178,78 @@ return hours*int64(time.Hour) + minutes*int64(time.Minute) + seconds*int64(time.Second) + nanos, nil } + +func (vi *valuesInterceptor) parseSystemSearchAttributeValues(name string, value any) (any, error) { + switch name { + case searchattribute.StartTime, searchattribute.CloseTime, searchattribute.ExecutionTime: + if nanos, isNumber := value.(int64); isNumber { + value = time.Unix(0, nanos).UTC().Format(time.RFC3339Nano) + } + case searchattribute.ExecutionStatus: + if status, isNumber := value.(int64); isNumber { + value = enumspb.WorkflowExecutionStatus_name[int32(status)] + } + case searchattribute.ExecutionDuration: + if durationStr, isString := value.(string); isString { + // To support durations passed as golang durations such as "300ms", "-1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // Custom timestamp.ParseDuration also supports "d" as additional unit for days. + if duration, err := timestamp.ParseDuration(durationStr); err == nil { + value = duration.Nanoseconds() + } else { + // To support "hh:mm:ss" durations. + durationNanos, err := vi.parseHHMMSSDuration(durationStr) + var converterErr *query.ConverterError + if errors.As(err, &converterErr) { + return nil, converterErr + } + if err == nil { + value = durationNanos + } + } + } + default: + } + return value, nil +} + +func validateValueType(name string, value any, fieldType enumspb.IndexedValueType) (any, error) { + switch fieldType { + case enumspb.INDEXED_VALUE_TYPE_INT, enumspb.INDEXED_VALUE_TYPE_DOUBLE: + switch v := value.(type) { + case int64, float64: + // nothing to do + case string: + // ES can do implicit casting if the value is numeric + if _, err := strconv.ParseFloat(v, 64); err != nil { + return nil, query.NewConverterError( + "invalid value for search attribute %s of type %s: %#v", name, fieldType.String(), value) + } + default: + return nil, query.NewConverterError( + "invalid value for search attribute %s of type %s: %#v", name, fieldType.String(), value) + } + case enumspb.INDEXED_VALUE_TYPE_BOOL: + switch value.(type) { + case bool: + // nothing to do + default: + return nil, query.NewConverterError( + "invalid value for search attribute %s of type %s: %#v", name, fieldType.String(), value) + } + case enumspb.INDEXED_VALUE_TYPE_DATETIME: + switch v := value.(type) { + case int64: + value = time.Unix(0, v).UTC().Format(time.RFC3339Nano) + case string: + if _, err := time.Parse(time.RFC3339Nano, v); err != nil { + return nil, query.NewConverterError( + "invalid value for search attribute %s of type %s: %#v", name, fieldType.String(), value) + } + default: + return nil, query.NewConverterError( + "invalid value for search attribute %s of type %s: %#v", name, fieldType.String(), value) + } + } + return value, nil +} diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/query_interceptors_test.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/query_interceptors_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/query_interceptors_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/query_interceptors_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -56,7 +56,11 @@ } func (s *QueryInterceptorSuite) TestTimeProcessFunc() { - vi := NewValuesInterceptor() + vi := NewValuesInterceptor( + "test-namespace", + searchattribute.TestNameTypeMap, + searchattribute.NewTestMapperProvider(nil), + ) cases := []struct { key string @@ -73,7 +77,7 @@ }{ {value: "2018-06-07T08:04:05.123456789Z", returnErr: false}, {value: "2018-06-07T15:04:05+07:00", returnErr: false}, - {value: "some invalid time string", returnErr: false}, + {value: "", returnErr: true}, {value: "should not be modified", returnErr: false}, } @@ -90,7 +94,11 @@ } func (s *QueryInterceptorSuite) TestStatusProcessFunc() { - vi := NewValuesInterceptor() + vi := NewValuesInterceptor( + "test-namespace", + searchattribute.TestNameTypeMap, + searchattribute.NewTestMapperProvider(nil), + ) cases := []struct { key string @@ -130,14 +138,18 @@ } func (s *QueryInterceptorSuite) TestDurationProcessFunc() { - vi := NewValuesInterceptor() + vi := NewValuesInterceptor( + "test-namespace", + searchattribute.TestNameTypeMap, + searchattribute.NewTestMapperProvider(nil), + ) cases := []struct { key string value interface{} }{ {key: searchattribute.ExecutionDuration, value: "1"}, - {key: searchattribute.ExecutionDuration, value: 1}, + {key: searchattribute.ExecutionDuration, value: int64(1)}, {key: searchattribute.ExecutionDuration, value: "5h3m"}, {key: searchattribute.ExecutionDuration, value: "00:00:01"}, {key: searchattribute.ExecutionDuration, value: "00:00:61"}, @@ -149,11 +161,11 @@ returnErr bool }{ {value: "1", returnErr: false}, - {value: 1, returnErr: false}, + {value: int64(1), returnErr: false}, {value: int64(18180000000000), returnErr: false}, {value: int64(1000000000), returnErr: false}, {value: nil, returnErr: true}, - {value: "bad value", returnErr: false}, + {value: nil, returnErr: true}, {value: "should not be modified", returnErr: false}, } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/visibility_store.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/visibility_store.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/visibility_store.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/visibility_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,8 +38,10 @@ "time" "github.com/olivere/elastic/v7" + commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/metrics" @@ -618,6 +620,10 @@ return nil, err } + if len(queryParams.GroupBy) > 0 { + return s.countGroupByWorkflowExecutions(ctx, queryParams) + } + count, err := s.esClient.Count(ctx, s.index, queryParams.Query) if err != nil { return nil, convertElasticsearchClientError("CountWorkflowExecutions failed", err) @@ -627,6 +633,49 @@ return response, nil } +func (s *visibilityStore) countGroupByWorkflowExecutions( + ctx context.Context, + queryParams *query.QueryParams, +) (*manager.CountWorkflowExecutionsResponse, error) { + groupByFields := queryParams.GroupBy + + // Elasticsearch aggregation is nested. so need to loop backwards to build it. + // Example: when grouping by (field1, field2), the object looks like + // { + // "aggs": { + // "field1": { + // "terms": { + // "field": "field1" + // }, + // "aggs": { + // "field2": { + // "terms": { + // "field": "field2" + // } + // } + // } + // } + // } + // } + termsAgg := elastic.NewTermsAggregation().Field(groupByFields[len(groupByFields)-1]) + for i := len(groupByFields) - 2; i >= 0; i-- { + termsAgg = elastic.NewTermsAggregation(). + Field(groupByFields[i]). + SubAggregation(groupByFields[i+1], termsAgg) + } + esResponse, err := s.esClient.CountGroupBy( + ctx, + s.index, + queryParams.Query, + groupByFields[0], + termsAgg, + ) + if err != nil { + return nil, err + } + return s.parseCountGroupByResponse(esResponse, groupByFields) +} + func (s *visibilityStore) GetWorkflowExecution( ctx context.Context, request *manager.GetWorkflowExecutionRequest, @@ -730,6 +779,10 @@ Query: queryParams.Query, } + if len(queryParams.GroupBy) > 0 { + return nil, serviceerror.NewInvalidArgument("GROUP BY clause is not supported") + } + // TODO(rodrigozhou): investigate possible solutions to slow ORDER BY. // ORDER BY clause can be slow if there is a large number of documents and // using a field that was not indexed by ES. Since slow queries can block @@ -833,7 +886,10 @@ return nil, serviceerror.NewUnavailable(fmt.Sprintf("Unable to read search attribute types: %v", err)) } nameInterceptor := newNameInterceptor(namespace, s.index, saTypeMap, s.searchAttributesMapperProvider) - queryConverter := newQueryConverter(nameInterceptor, NewValuesInterceptor()) + queryConverter := newQueryConverter( + nameInterceptor, + NewValuesInterceptor(namespace, saTypeMap, s.searchAttributesMapperProvider), + ) queryParams, err := queryConverter.ConvertWhereOrderBy(requestQueryStr) if err != nil { // Convert ConverterError to InvalidArgument and pass through all other errors (which should be only mapper errors). @@ -1123,6 +1179,93 @@ return record, nil } +// Elasticsearch aggregation groups are returned as nested object. +// This function flattens the response into rows. +// +//nolint:revive // cognitive complexity 27 (> max enabled 25) +func (s *visibilityStore) parseCountGroupByResponse( + searchResult *elastic.SearchResult, + groupByFields []string, +) (*manager.CountWorkflowExecutionsResponse, error) { + response := &manager.CountWorkflowExecutionsResponse{} + typeMap, err := s.searchAttributesProvider.GetSearchAttributes(s.index, false) + if err != nil { + return nil, serviceerror.NewUnavailable( + fmt.Sprintf("Unable to read search attribute types: %v", err), + ) + } + groupByTypes := make([]enumspb.IndexedValueType, len(groupByFields)) + for i, saName := range groupByFields { + tp, err := typeMap.GetType(saName) + if err != nil { + return nil, err + } + groupByTypes[i] = tp + } + + parseJsonNumber := func(val any) (int64, error) { + numberVal, isNumber := val.(json.Number) + if !isNumber { + return 0, fmt.Errorf("%w: expected json.Number, got %T", errUnexpectedJSONFieldType, val) + } + return numberVal.Int64() + } + + var parseInternal func(map[string]any, []*commonpb.Payload) error + parseInternal = func(aggs map[string]any, bucketValues []*commonpb.Payload) error { + if len(bucketValues) == len(groupByFields) { + cnt, err := parseJsonNumber(aggs["doc_count"]) + if err != nil { + return fmt.Errorf("Unable to parse 'doc_count' field: %w", err) + } + groupValues := make([]*commonpb.Payload, len(groupByFields)) + for i := range bucketValues { + groupValues[i] = bucketValues[i] + } + response.Groups = append( + response.Groups, + &workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + GroupValues: groupValues, + Count: cnt, + }, + ) + response.Count += cnt + return nil + } + + index := len(bucketValues) + fieldName := groupByFields[index] + buckets := aggs[fieldName].(map[string]any)["buckets"].([]any) + for i := range buckets { + bucket := buckets[i].(map[string]any) + value, err := finishParseJSONValue(bucket["key"], groupByTypes[index]) + if err != nil { + return fmt.Errorf("Failed to parse value %v: %w", bucket["key"], err) + } + payload, err := searchattribute.EncodeValue(value, groupByTypes[index]) + if err != nil { + return fmt.Errorf("Failed to encode value %v: %w", value, err) + } + err = parseInternal(bucket, append(bucketValues, payload)) + if err != nil { + return err + } + } + return nil + } + + var bucketsJson map[string]any + dec := json.NewDecoder(bytes.NewReader(searchResult.Aggregations[groupByFields[0]])) + dec.UseNumber() + if err := dec.Decode(&bucketsJson); err != nil { + return nil, serviceerror.NewInternal(fmt.Sprintf("unable to unmarshal json response: %v", err)) + } + if err := parseInternal(map[string]any{groupByFields[0]: bucketsJson}, nil); err != nil { + return nil, err + } + return response, nil +} + // finishParseJSONValue finishes JSON parsing after json.Decode. // json.Decode returns: // @@ -1182,8 +1325,8 @@ case *elastic.Error: switch e.Status { case 400: // BadRequest - // Returning Internal error will prevent retry on a caller side. - return serviceerror.NewInternal(errMessage) + // Returning InvalidArgument error will prevent retry on a caller side. + return serviceerror.NewInvalidArgument(errMessage) } } return serviceerror.NewUnavailable(errMessage) diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,8 +38,10 @@ "github.com/olivere/elastic/v7" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/debug" "go.temporal.io/server/common/dynamicconfig" @@ -47,6 +49,7 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" + "go.temporal.io/server/common/persistence/visibility/store/query" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" ) @@ -697,10 +700,9 @@ query = `ExecutionTime < "unable to parse"` queryParams, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) - // Wrong dates goes directly to Elasticsearch, and it returns an error. - s.NoError(err) - s.Equal(`{"bool":{"filter":[{"term":{"NamespaceId":"bfd5c907-f899-4baf-a7b2-2ab85e623ebd"}},{"bool":{"filter":{"range":{"ExecutionTime":{"from":null,"include_lower":true,"include_upper":false,"to":"unable to parse"}}}}}],"must_not":{"exists":{"field":"TemporalNamespaceDivision"}}}}`, s.queryToJSON(queryParams.Query)) - s.Nil(queryParams.Sorter) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal(err.Error(), "invalid query: unable to convert filter expression: unable to convert values of comparison expression: invalid value for search attribute ExecutionTime of type Datetime: \"unable to parse\"") // invalid union injection query = `WorkflowId = 'wid' union select * from dummy` @@ -1100,9 +1102,9 @@ } _, err := s.visibilityStore.ListWorkflowExecutions(context.Background(), request) s.Error(err) - var internalErr *serviceerror.Internal - s.ErrorAs(err, &internalErr) - s.Equal("ListWorkflowExecutions failed: elastic: Error 400 (Bad Request): error reason [type=]", internalErr.Message) + var invalidArgErr *serviceerror.InvalidArgument + s.ErrorAs(err, &invalidArgErr) + s.Equal("ListWorkflowExecutions failed: elastic: Error 400 (Bad Request): error reason [type=]", invalidArgErr.Message) s.mockESClient.EXPECT().Search(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, p *client.SearchParameters) (*elastic.SearchResult, error) { @@ -1383,6 +1385,357 @@ s.True(strings.HasPrefix(err.Error(), "invalid query"), err.Error()) } +func (s *ESVisibilitySuite) TestCountWorkflowExecutions_GroupBy() { + request := &manager.CountWorkflowExecutionsRequest{ + NamespaceID: testNamespaceID, + Namespace: testNamespace, + Query: "GROUP BY ExecutionStatus", + } + s.mockESClient.EXPECT(). + CountGroupBy( + gomock.Any(), + testIndex, + elastic.NewBoolQuery(). + Filter(elastic.NewTermQuery(searchattribute.NamespaceID, testNamespaceID.String())). + MustNot(namespaceDivisionExists), + searchattribute.ExecutionStatus, + elastic.NewTermsAggregation().Field(searchattribute.ExecutionStatus), + ). + Return( + &elastic.SearchResult{ + Aggregations: map[string]json.RawMessage{ + searchattribute.ExecutionStatus: json.RawMessage( + `{"buckets":[{"key":"Completed","doc_count":100},{"key":"Running","doc_count":10}]}`, + ), + }, + }, + nil, + ) + resp, err := s.visibilityStore.CountWorkflowExecutions(context.Background(), request) + s.NoError(err) + payload1, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + payload2, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + s.Equal( + &manager.CountWorkflowExecutionsResponse{ + Count: 110, + Groups: []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + { + GroupValues: []*commonpb.Payload{payload1}, + Count: 100, + }, + { + GroupValues: []*commonpb.Payload{payload2}, + Count: 10, + }, + }, + }, + resp, + ) + + // test only allowed to group by a single field + request.Query = "GROUP BY ExecutionStatus, WorkflowType" + resp, err = s.visibilityStore.CountWorkflowExecutions(context.Background(), request) + s.Error(err) + s.Contains(err.Error(), "'group by' clause supports only a single field") + s.Nil(resp) + + // test only allowed to group by ExecutionStatus + request.Query = "GROUP BY WorkflowType" + resp, err = s.visibilityStore.CountWorkflowExecutions(context.Background(), request) + s.Error(err) + s.Contains(err.Error(), "'group by' clause is only supported for ExecutionStatus search attribute") + s.Nil(resp) +} + +func (s *ESVisibilitySuite) TestCountGroupByWorkflowExecutions() { + statusCompletedPayload, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + statusRunningPayload, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + wfType1Payload, _ := searchattribute.EncodeValue("wf-type-1", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfType2Payload, _ := searchattribute.EncodeValue("wf-type-2", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfId1Payload, _ := searchattribute.EncodeValue("wf-id-1", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfId2Payload, _ := searchattribute.EncodeValue("wf-id-2", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfId3Payload, _ := searchattribute.EncodeValue("wf-id-3", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfId4Payload, _ := searchattribute.EncodeValue("wf-id-4", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + wfId5Payload, _ := searchattribute.EncodeValue("wf-id-5", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + + testCases := []struct { + name string + groupBy []string + aggName string + agg elastic.Aggregation + mockResponse *elastic.SearchResult + response *manager.CountWorkflowExecutionsResponse + }{ + { + name: "group by one field", + groupBy: []string{searchattribute.ExecutionStatus}, + aggName: searchattribute.ExecutionStatus, + agg: elastic.NewTermsAggregation().Field(searchattribute.ExecutionStatus), + mockResponse: &elastic.SearchResult{ + Aggregations: map[string]json.RawMessage{ + searchattribute.ExecutionStatus: json.RawMessage( + `{ + "buckets":[ + { + "key": "Completed", + "doc_count": 100 + }, + { + "key": "Running", + "doc_count": 10 + } + ] + }`, + ), + }, + }, + response: &manager.CountWorkflowExecutionsResponse{ + Count: 110, + Groups: []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + { + GroupValues: []*commonpb.Payload{statusCompletedPayload}, + Count: 100, + }, + { + GroupValues: []*commonpb.Payload{statusRunningPayload}, + Count: 10, + }, + }, + }, + }, + + { + name: "group by two fields", + groupBy: []string{searchattribute.ExecutionStatus, searchattribute.WorkflowType}, + aggName: searchattribute.ExecutionStatus, + agg: elastic.NewTermsAggregation().Field(searchattribute.ExecutionStatus).SubAggregation( + searchattribute.WorkflowType, + elastic.NewTermsAggregation().Field(searchattribute.WorkflowType), + ), + mockResponse: &elastic.SearchResult{ + Aggregations: map[string]json.RawMessage{ + searchattribute.ExecutionStatus: json.RawMessage( + `{ + "buckets":[ + { + "key": "Completed", + "doc_count": 100, + "WorkflowType": { + "buckets": [ + { + "key": "wf-type-1", + "doc_count": 75 + }, + { + "key": "wf-type-2", + "doc_count": 25 + } + ] + } + }, + { + "key": "Running", + "doc_count": 10, + "WorkflowType": { + "buckets": [ + { + "key": "wf-type-1", + "doc_count": 7 + }, + { + "key": "wf-type-2", + "doc_count": 3 + } + ] + } + } + ] + }`, + ), + }, + }, + response: &manager.CountWorkflowExecutionsResponse{ + Count: 110, + Groups: []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + { + GroupValues: []*commonpb.Payload{statusCompletedPayload, wfType1Payload}, + Count: 75, + }, + { + GroupValues: []*commonpb.Payload{statusCompletedPayload, wfType2Payload}, + Count: 25, + }, + { + GroupValues: []*commonpb.Payload{statusRunningPayload, wfType1Payload}, + Count: 7, + }, + { + GroupValues: []*commonpb.Payload{statusRunningPayload, wfType2Payload}, + Count: 3, + }, + }, + }, + }, + + { + name: "group by three fields", + groupBy: []string{ + searchattribute.ExecutionStatus, + searchattribute.WorkflowType, + searchattribute.WorkflowID, + }, + aggName: searchattribute.ExecutionStatus, + agg: elastic.NewTermsAggregation().Field(searchattribute.ExecutionStatus).SubAggregation( + searchattribute.WorkflowType, + elastic.NewTermsAggregation().Field(searchattribute.WorkflowType).SubAggregation( + searchattribute.WorkflowID, + elastic.NewTermsAggregation().Field(searchattribute.WorkflowID), + ), + ), + mockResponse: &elastic.SearchResult{ + Aggregations: map[string]json.RawMessage{ + searchattribute.ExecutionStatus: json.RawMessage( + `{ + "buckets":[ + { + "key": "Completed", + "doc_count": 100, + "WorkflowType": { + "buckets": [ + { + "key": "wf-type-1", + "doc_count": 75, + "WorkflowId": { + "buckets": [ + { + "key": "wf-id-1", + "doc_count": 75 + } + ] + } + }, + { + "key": "wf-type-2", + "doc_count": 25, + "WorkflowId": { + "buckets": [ + { + "key": "wf-id-2", + "doc_count": 20 + }, + { + "key": "wf-id-3", + "doc_count": 5 + } + ] + } + } + ] + } + }, + { + "key": "Running", + "doc_count": 10, + "WorkflowType": { + "buckets": [ + { + "key": "wf-type-1", + "doc_count": 7, + "WorkflowId": { + "buckets": [ + { + "key": "wf-id-4", + "doc_count": 7 + } + ] + } + }, + { + "key": "wf-type-2", + "doc_count": 3, + "WorkflowId": { + "buckets": [ + { + "key": "wf-id-5", + "doc_count": 3 + } + ] + } + } + ] + } + } + ] + }`, + ), + }, + }, + response: &manager.CountWorkflowExecutionsResponse{ + Count: 110, + Groups: []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + { + GroupValues: []*commonpb.Payload{statusCompletedPayload, wfType1Payload, wfId1Payload}, + Count: 75, + }, + { + GroupValues: []*commonpb.Payload{statusCompletedPayload, wfType2Payload, wfId2Payload}, + Count: 20, + }, + { + GroupValues: []*commonpb.Payload{statusCompletedPayload, wfType2Payload, wfId3Payload}, + Count: 5, + }, + { + GroupValues: []*commonpb.Payload{statusRunningPayload, wfType1Payload, wfId4Payload}, + Count: 7, + }, + { + GroupValues: []*commonpb.Payload{statusRunningPayload, wfType2Payload, wfId5Payload}, + Count: 3, + }, + }, + }, + }, + } + + for _, tc := range testCases { + s.T().Run(tc.name, func(t *testing.T) { + searchParams := &query.QueryParams{ + Query: elastic.NewBoolQuery(). + Filter(elastic.NewTermQuery(searchattribute.NamespaceID, testNamespaceID.String())). + MustNot(namespaceDivisionExists), + GroupBy: tc.groupBy, + } + s.mockESClient.EXPECT(). + CountGroupBy( + gomock.Any(), + testIndex, + elastic.NewBoolQuery(). + Filter(elastic.NewTermQuery(searchattribute.NamespaceID, testNamespaceID.String())). + MustNot(namespaceDivisionExists), + tc.aggName, + tc.agg, + ). + Return(tc.mockResponse, nil) + resp, err := s.visibilityStore.countGroupByWorkflowExecutions(context.Background(), searchParams) + s.NoError(err) + s.Equal(tc.response, resp) + }) + } +} + func (s *ESVisibilitySuite) TestGetWorkflowExecution() { now := timestamp.TimePtr(time.Now()) s.mockESClient.EXPECT().Get(gomock.Any(), testIndex, gomock.Any()).DoAndReturn( diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/query/converter.go temporal-1.22.5/src/common/persistence/visibility/store/query/converter.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/query/converter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/query/converter.go 2024-02-23 09:45:43.000000000 +0000 @@ -82,8 +82,9 @@ notSupportedExprConverter struct{} QueryParams struct { - Query elastic.Query - Sorter []elastic.Sorter + Query elastic.Query + Sorter []elastic.Sorter + GroupBy []string } ) @@ -195,7 +196,9 @@ func (c *Converter) ConvertWhereOrderBy(whereOrderBy string) (*QueryParams, error) { whereOrderBy = strings.TrimSpace(whereOrderBy) - if whereOrderBy != "" && !strings.HasPrefix(strings.ToLower(whereOrderBy), "order by ") { + if whereOrderBy != "" && + !strings.HasPrefix(strings.ToLower(whereOrderBy), "order by ") && + !strings.HasPrefix(strings.ToLower(whereOrderBy), "group by ") { whereOrderBy = "where " + whereOrderBy } // sqlparser can't parse just WHERE clause but instead accepts only valid SQL statement. @@ -219,10 +222,6 @@ } func (c *Converter) convertSelect(sel *sqlparser.Select) (*QueryParams, error) { - if sel.GroupBy != nil { - return nil, NewConverterError("%s: 'group by' clause", NotSupportedErrMessage) - } - if sel.Limit != nil { return nil, NewConverterError("%s: 'limit' clause", NotSupportedErrMessage) } @@ -240,6 +239,17 @@ queryParams.Query = query } + if len(sel.GroupBy) > 1 { + return nil, NewConverterError("%s: 'group by' clause supports only a single field", NotSupportedErrMessage) + } + for _, groupByExpr := range sel.GroupBy { + colName, err := convertColName(c.fnInterceptor, groupByExpr, FieldNameGroupBy) + if err != nil { + return nil, wrapConverterError("unable to convert 'group by' column name", err) + } + queryParams.GroupBy = append(queryParams.GroupBy, colName) + } + for _, orderByExpr := range sel.OrderBy { colName, err := convertColName(c.fnInterceptor, orderByExpr.Expr, FieldNameSorter) if err != nil { @@ -252,6 +262,13 @@ queryParams.Sorter = append(queryParams.Sorter, fieldSort) } + if len(queryParams.GroupBy) > 0 && len(queryParams.Sorter) > 0 { + return nil, NewConverterError( + "%s: 'order by' clause is not supported with 'group by' clause", + NotSupportedErrMessage, + ) + } + return queryParams, nil } @@ -485,6 +502,8 @@ return query, nil } +// convertComparisonExprValue returns a string, int64, float64, bool or +// a slice with each value of one of those types. func convertComparisonExprValue(expr sqlparser.Expr) (interface{}, error) { switch e := expr.(type) { case *sqlparser.SQLVal: @@ -534,6 +553,7 @@ return nil, NewConverterError("%s: expression of type %T", NotSupportedErrMessage, expr) } +// ParseSqlValue returns a string, int64 or float64 if the parsing succeeds. func ParseSqlValue(sqlValue string) (interface{}, error) { if sqlValue == "" { return "", nil diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/query/interceptors.go temporal-1.22.5/src/common/persistence/visibility/store/query/interceptors.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/query/interceptors.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/query/interceptors.go 2024-02-23 09:45:43.000000000 +0000 @@ -42,6 +42,7 @@ const ( FieldNameFilter FieldNameUsage = iota FieldNameSorter + FieldNameGroupBy ) func (n *NopFieldNameInterceptor) Name(name string, _ FieldNameUsage) (string, error) { diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter.go 2024-02-23 09:45:43.000000000 +0000 @@ -53,7 +53,7 @@ token *pageToken, ) (string, []any) - buildCountStmt(namespaceID namespace.ID, queryString string) (string, []any) + buildCountStmt(namespaceID namespace.ID, queryString string, groupBy []string) (string, []any) getDatetimeFormat() string @@ -70,6 +70,12 @@ seenNamespaceDivision bool } + + queryParams struct { + queryString string + // List of search attributes to group by (field name, not db name). + groupBy []string + } ) var ( @@ -143,13 +149,16 @@ if err != nil { return nil, err } - queryString, err := c.convertWhereString(c.queryString) + qp, err := c.convertWhereString(c.queryString) if err != nil { return nil, err } + if len(qp.groupBy) > 0 { + return nil, query.NewConverterError("%s: 'group by' clause", query.NotSupportedErrMessage) + } queryString, queryArgs := c.buildSelectStmt( c.namespaceID, - queryString, + qp.queryString, pageSize, token, ) @@ -157,47 +166,55 @@ } func (c *QueryConverter) BuildCountStmt() (*sqlplugin.VisibilitySelectFilter, error) { - queryString, err := c.convertWhereString(c.queryString) + qp, err := c.convertWhereString(c.queryString) if err != nil { return nil, err } - queryString, queryArgs := c.buildCountStmt( - c.namespaceID, - queryString, - ) - return &sqlplugin.VisibilitySelectFilter{Query: queryString, QueryArgs: queryArgs}, nil + groupByDbNames := make([]string, len(qp.groupBy)) + for i, fieldName := range qp.groupBy { + groupByDbNames[i] = searchattribute.GetSqlDbColName(fieldName) + } + queryString, queryArgs := c.buildCountStmt(c.namespaceID, qp.queryString, groupByDbNames) + return &sqlplugin.VisibilitySelectFilter{ + Query: queryString, + QueryArgs: queryArgs, + GroupBy: qp.groupBy, + }, nil } -func (c *QueryConverter) convertWhereString(queryString string) (string, error) { +func (c *QueryConverter) convertWhereString(queryString string) (*queryParams, error) { where := strings.TrimSpace(queryString) - if where != "" && !strings.HasPrefix(strings.ToLower(where), "order by") { + if where != "" && + !strings.HasPrefix(strings.ToLower(where), "order by") && + !strings.HasPrefix(strings.ToLower(where), "group by") { where = "where " + where } // sqlparser can't parse just WHERE clause but instead accepts only valid SQL statement. sql := "select * from table1 " + where stmt, err := sqlparser.Parse(sql) if err != nil { - return "", err + return nil, err } selectStmt, _ := stmt.(*sqlparser.Select) err = c.convertSelectStmt(selectStmt) if err != nil { - return "", err + return nil, err } - result := "" + res := &queryParams{} if selectStmt.Where != nil { - result = sqlparser.String(selectStmt.Where.Expr) + res.queryString = sqlparser.String(selectStmt.Where.Expr) + } + for _, groupByExpr := range selectStmt.GroupBy { + // The parser already ensures the type is saColName. + colName := groupByExpr.(*saColName) + res.groupBy = append(res.groupBy, colName.fieldName) } - return result, nil + return res, nil } func (c *QueryConverter) convertSelectStmt(sel *sqlparser.Select) error { - if sel.GroupBy != nil { - return query.NewConverterError("%s: 'group by' clause", query.NotSupportedErrMessage) - } - if sel.OrderBy != nil { return query.NewConverterError("%s: 'order by' clause", query.NotSupportedErrMessage) } @@ -251,6 +268,26 @@ } } + if len(sel.GroupBy) > 1 { + return query.NewConverterError( + "%s: 'group by' clause supports only a single field", + query.NotSupportedErrMessage, + ) + } + for k := range sel.GroupBy { + colName, err := c.convertColName(&sel.GroupBy[k]) + if err != nil { + return err + } + if colName.fieldName != searchattribute.ExecutionStatus { + return query.NewConverterError( + "%s: 'group by' clause is only supported for %s search attribute", + query.NotSupportedErrMessage, + searchattribute.ExecutionStatus, + ) + } + } + return nil } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_mysql.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_mysql.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_mysql.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_mysql.go 2024-02-23 09:45:43.000000000 +0000 @@ -275,6 +275,7 @@ func (c *mysqlQueryConverter) buildCountStmt( namespaceID namespace.ID, queryString string, + groupBy []string, ) (string, []any) { var whereClauses []string var queryArgs []any @@ -289,14 +290,22 @@ whereClauses = append(whereClauses, queryString) } + groupByClause := "" + if len(groupBy) > 0 { + groupByClause = fmt.Sprintf("GROUP BY %s", strings.Join(groupBy, ", ")) + } + return fmt.Sprintf( - `SELECT COUNT(1) + `SELECT %s FROM executions_visibility ev LEFT JOIN custom_search_attributes USING (%s, %s) - WHERE %s`, + WHERE %s + %s`, + strings.Join(append(groupBy, "COUNT(*)"), ", "), searchattribute.GetSqlDbColName(searchattribute.NamespaceID), searchattribute.GetSqlDbColName(searchattribute.RunID), strings.Join(whereClauses, " AND "), + groupByClause, ), queryArgs } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_postgresql.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_postgresql.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_postgresql.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_postgresql.go 2024-02-23 09:45:43.000000000 +0000 @@ -278,6 +278,7 @@ func (c *pgQueryConverter) buildCountStmt( namespaceID namespace.ID, queryString string, + groupBy []string, ) (string, []any) { var whereClauses []string var queryArgs []any @@ -292,8 +293,15 @@ whereClauses = append(whereClauses, queryString) } + groupByClause := "" + if len(groupBy) > 0 { + groupByClause = fmt.Sprintf("GROUP BY %s", strings.Join(groupBy, ", ")) + } + return fmt.Sprintf( - "SELECT COUNT(1) FROM executions_visibility WHERE %s", + "SELECT %s FROM executions_visibility WHERE %s %s", + strings.Join(append(groupBy, "COUNT(*)"), ", "), strings.Join(whereClauses, " AND "), + groupByClause, ), queryArgs } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_sqlite.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_sqlite.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_sqlite.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_sqlite.go 2024-02-23 09:45:43.000000000 +0000 @@ -108,7 +108,7 @@ sqlparser.String(expr.Right), ) } - ftsQuery = fmt.Sprintf(`%s:"%s"`, saColNameExpr.dbColName.Name, valueExpr.Val) + ftsQuery = buildFtsQueryString(saColNameExpr.dbColName.Name, valueExpr.Val) case sqlparser.InStr, sqlparser.NotInStr: valTupleExpr, isValTuple := expr.Right.(sqlparser.ValTuple) @@ -123,10 +123,7 @@ if err != nil { return nil, err } - for i := range values { - values[i] = fmt.Sprintf(`"%s"`, values[i]) - } - ftsQuery = fmt.Sprintf("%s:(%s)", saColNameExpr.dbColName.Name, strings.Join(values, " OR ")) + ftsQuery = buildFtsQueryString(saColNameExpr.dbColName.Name, values...) default: // this should never happen since isSupportedKeywordListOperator should already fail @@ -201,7 +198,6 @@ sqlparser.String(expr.Right), ) } - ftsQuery := fmt.Sprintf("%s:(%s)", saColNameExpr.dbColName.Name, strings.Join(tokens, " OR ")) var oper string switch expr.Operator { @@ -219,6 +215,7 @@ ) } + ftsQuery := buildFtsQueryString(saColNameExpr.dbColName.Name, tokens...) newExpr := sqlparser.ComparisonExpr{ Operator: oper, Left: newColName("rowid"), @@ -322,6 +319,7 @@ func (c *sqliteQueryConverter) buildCountStmt( namespaceID namespace.ID, queryString string, + groupBy []string, ) (string, []any) { var whereClauses []string var queryArgs []any @@ -336,8 +334,20 @@ whereClauses = append(whereClauses, queryString) } + groupByClause := "" + if len(groupBy) > 0 { + groupByClause = fmt.Sprintf("GROUP BY %s", strings.Join(groupBy, ", ")) + } + return fmt.Sprintf( - "SELECT COUNT(1) FROM executions_visibility WHERE %s", + "SELECT %s FROM executions_visibility WHERE %s %s", + strings.Join(append(groupBy, "COUNT(*)"), ", "), strings.Join(whereClauses, " AND "), + groupByClause, ), queryArgs } + +func buildFtsQueryString(colname string, values ...string) string { + // FTS query format: 'colname : ("token1" OR "token2" OR ...)' + return fmt.Sprintf(`%s : ("%s")`, colname, strings.Join(values, `" OR "`)) +} diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_sqlite_test.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_sqlite_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_sqlite_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_sqlite_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -73,25 +73,25 @@ { name: "valid equal expression", input: "AliasForKeywordList01 = 'foo'", - output: "rowid in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01:\"foo\"')", + output: `rowid in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01 : ("foo")')`, err: nil, }, { name: "valid not equal expression", input: "AliasForKeywordList01 != 'foo'", - output: "rowid not in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01:\"foo\"')", + output: `rowid not in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01 : ("foo")')`, err: nil, }, { name: "valid in expression", input: "AliasForKeywordList01 in ('foo', 'bar')", - output: "rowid in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01:(\"foo\" OR \"bar\")')", + output: `rowid in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01 : ("foo" OR "bar")')`, err: nil, }, { name: "valid not in expression", input: "AliasForKeywordList01 not in ('foo', 'bar')", - output: "rowid not in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01:(\"foo\" OR \"bar\")')", + output: `rowid not in (select rowid from executions_visibility_fts_keyword_list where executions_visibility_fts_keyword_list = 'KeywordList01 : ("foo" OR "bar")')`, err: nil, }, } @@ -130,13 +130,13 @@ { name: "valid equal expression", input: "AliasForText01 = 'foo bar'", - output: "rowid in (select rowid from executions_visibility_fts_text where executions_visibility_fts_text = 'Text01:(foo OR bar)')", + output: `rowid in (select rowid from executions_visibility_fts_text where executions_visibility_fts_text = 'Text01 : ("foo" OR "bar")')`, err: nil, }, { name: "valid not equal expression", input: "AliasForText01 != 'foo bar'", - output: "rowid not in (select rowid from executions_visibility_fts_text where executions_visibility_fts_text = 'Text01:(foo OR bar)')", + output: `rowid not in (select rowid from executions_visibility_fts_text where executions_visibility_fts_text = 'Text01 : ("foo" OR "bar")')`, err: nil, }, } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_test.go temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/query_converter_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/query_converter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -53,7 +53,7 @@ name string input string args map[string]any - output string + output any retValue any err error } @@ -83,53 +83,95 @@ { name: "empty string", input: "", - output: "TemporalNamespaceDivision is null", + output: &queryParams{queryString: "TemporalNamespaceDivision is null"}, err: nil, }, { name: "single condition int", input: "AliasForInt01 = 1", - output: "(Int01 = 1) and TemporalNamespaceDivision is null", + output: &queryParams{queryString: "(Int01 = 1) and TemporalNamespaceDivision is null"}, err: nil, }, { name: "single condition keyword", input: "AliasForKeyword01 = 1", - output: "(Keyword01 = 1) and TemporalNamespaceDivision is null", + output: &queryParams{queryString: "(Keyword01 = 1) and TemporalNamespaceDivision is null"}, err: nil, }, { name: "or condition keyword", input: "AliasForInt01 = 1 OR AliasForKeyword01 = 1", - output: "(Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision is null", + output: &queryParams{queryString: "(Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision is null"}, err: nil, }, { name: "no double parenthesis", input: "(AliasForInt01 = 1 OR AliasForKeyword01 = 1)", - output: "(Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision is null", + output: &queryParams{queryString: "(Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision is null"}, err: nil, }, { name: "has namespace division", input: "(AliasForInt01 = 1 OR AliasForKeyword01 = 1) AND TemporalNamespaceDivision = 'foo'", - output: "((Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision = 'foo')", + output: &queryParams{queryString: "((Int01 = 1 or Keyword01 = 1) and TemporalNamespaceDivision = 'foo')"}, err: nil, }, { + name: "group by one field", + input: "GROUP BY ExecutionStatus", + output: &queryParams{ + queryString: "TemporalNamespaceDivision is null", + groupBy: []string{searchattribute.ExecutionStatus}, + }, + err: nil, + }, + { + name: "group by two fields not supported", + input: "GROUP BY ExecutionStatus, WorkflowType", + output: nil, + err: query.NewConverterError( + "%s: 'group by' clause supports only a single field", + query.NotSupportedErrMessage, + ), + }, + { + name: "group by non ExecutionStatus", + input: "GROUP BY WorkflowType", + output: nil, + err: query.NewConverterError( + "%s: 'group by' clause is only supported for %s search attribute", + query.NotSupportedErrMessage, + searchattribute.ExecutionStatus, + ), + }, + { name: "order by not supported", input: "ORDER BY StartTime", - output: "", + output: nil, + err: query.NewConverterError("%s: 'order by' clause", query.NotSupportedErrMessage), + }, + { + name: "group by with order by not supported", + input: "GROUP BY ExecutionStatus ORDER BY StartTime", + output: nil, err: query.NewConverterError("%s: 'order by' clause", query.NotSupportedErrMessage), }, } for _, tc := range tests { s.Run(tc.name, func() { - queryString, err := s.queryConverter.convertWhereString(tc.input) + qc := newQueryConverterInternal( + s.pqc, + testNamespaceName, + testNamespaceID, + searchattribute.TestNameTypeMap, + &searchattribute.TestMapper{}, + "", + ) + qp, err := qc.convertWhereString(tc.input) if tc.err == nil { s.NoError(err) - s.Equal(tc.output, queryString) + s.Equal(tc.output, qp) } else { s.Error(err) s.Equal(err, tc.err) diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/sql/visibility_store.go temporal-1.22.5/src/common/persistence/visibility/store/sql/visibility_store.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/sql/visibility_store.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/sql/visibility_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,6 +34,7 @@ "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" @@ -469,6 +470,10 @@ return nil, err } + if len(selectFilter.GroupBy) > 0 { + return s.countGroupByWorkflowExecutions(ctx, selectFilter, saTypeMap) + } + count, err := s.sqlStore.Db.CountFromVisibility(ctx, *selectFilter) if err != nil { return nil, serviceerror.NewUnavailable( @@ -478,6 +483,49 @@ return &manager.CountWorkflowExecutionsResponse{Count: count}, nil } +func (s *VisibilityStore) countGroupByWorkflowExecutions( + ctx context.Context, + selectFilter *sqlplugin.VisibilitySelectFilter, + saTypeMap searchattribute.NameTypeMap, +) (*manager.CountWorkflowExecutionsResponse, error) { + var err error + groupByTypes := make([]enumspb.IndexedValueType, len(selectFilter.GroupBy)) + for i, fieldName := range selectFilter.GroupBy { + groupByTypes[i], err = saTypeMap.GetType(fieldName) + if err != nil { + return nil, err + } + } + + rows, err := s.sqlStore.Db.CountGroupByFromVisibility(ctx, *selectFilter) + if err != nil { + return nil, serviceerror.NewUnavailable( + fmt.Sprintf("CountWorkflowExecutions operation failed. Query failed: %v", err)) + } + resp := &manager.CountWorkflowExecutionsResponse{ + Count: 0, + Groups: make([]*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup, 0, len(rows)), + } + for _, row := range rows { + groupValues := make([]*common.Payload, len(row.GroupValues)) + for i, val := range row.GroupValues { + groupValues[i], err = searchattribute.EncodeValue(val, groupByTypes[i]) + if err != nil { + return nil, err + } + } + resp.Groups = append( + resp.Groups, + &workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + GroupValues: groupValues, + Count: row.Count, + }, + ) + resp.Count += row.Count + } + return resp, nil +} + func (s *VisibilityStore) GetWorkflowExecution( ctx context.Context, request *manager.GetWorkflowExecutionRequest, diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/standard/converter.go temporal-1.22.5/src/common/persistence/visibility/store/standard/converter.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/standard/converter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/standard/converter.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,8 +31,10 @@ "github.com/xwb1989/sqlparser" enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence/sql/sqlplugin" "go.temporal.io/server/common/persistence/visibility/store/query" + "go.temporal.io/server/common/searchattribute" ) var allowedComparisonOperators = map[string]struct{}{ @@ -46,9 +48,17 @@ } ) -func newQueryConverter() *converter { +func newQueryConverter( + namespaceName namespace.Name, + searchAttributesTypeMap searchattribute.NameTypeMap, + searchAttributesMapperProvider searchattribute.MapperProvider, +) *converter { fnInterceptor := newNameInterceptor() - fvInterceptor := newValuesInterceptor() + fvInterceptor := newValuesInterceptor( + namespaceName, + searchAttributesTypeMap, + searchAttributesMapperProvider, + ) rangeCond := query.NewRangeCondConverter(fnInterceptor, fvInterceptor, false) comparisonExpr := query.NewComparisonExprConverter(fnInterceptor, fvInterceptor, allowedComparisonOperators) diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/standard/converter_test.go temporal-1.22.5/src/common/persistence/visibility/store/standard/converter_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/standard/converter_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/standard/converter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,6 +34,7 @@ "go.temporal.io/server/common/convert" "go.temporal.io/server/common/persistence/sql/sqlplugin" + "go.temporal.io/server/common/searchattribute" ) var startTimeFrom = time.Now().Add(-time.Hour) @@ -67,7 +68,7 @@ func TestSupportedQueryFilters(t *testing.T) { for query, expectedFilter := range supportedQuery { - converter := newQueryConverter() + converter := newQueryConverter("test-namespace", searchattribute.TestNameTypeMap, nil) filter, err := converter.GetFilter(query) assert.NoError(t, err) @@ -93,7 +94,7 @@ func TestUnsupportedQueryFilters(t *testing.T) { for _, query := range unsupportedQuery { - converter := newQueryConverter() + converter := newQueryConverter("test-namespace", searchattribute.TestNameTypeMap, nil) _, err := converter.GetFilter(query) assert.Error(t, err) } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/standard/query_interceptors.go temporal-1.22.5/src/common/persistence/visibility/store/standard/query_interceptors.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/standard/query_interceptors.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/standard/query_interceptors.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,6 +27,7 @@ import ( "time" + "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/searchattribute" "go.temporal.io/api/enums/v1" @@ -56,10 +57,18 @@ return &nameInterceptor{} } -func newValuesInterceptor() *valuesInterceptor { +func newValuesInterceptor( + namespaceName namespace.Name, + searchAttributesTypeMap searchattribute.NameTypeMap, + searchAttributesMapperProvider searchattribute.MapperProvider, +) *valuesInterceptor { return &valuesInterceptor{ - filter: &sqlplugin.VisibilitySelectFilter{}, - nextInterceptor: elasticsearch.NewValuesInterceptor(), + filter: &sqlplugin.VisibilitySelectFilter{}, + nextInterceptor: elasticsearch.NewValuesInterceptor( + namespaceName, + searchAttributesTypeMap, + searchAttributesMapperProvider, + ), } } diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/standard/visibility_store.go temporal-1.22.5/src/common/persistence/visibility/store/standard/visibility_store.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/standard/visibility_store.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/standard/visibility_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,11 +34,14 @@ "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/persistence/visibility/store" + "go.temporal.io/server/common/searchattribute" ) type ( standardStore struct { - store store.VisibilityStore + store store.VisibilityStore + searchAttributesProvider searchattribute.Provider + searchAttributesMapperProvider searchattribute.MapperProvider } // We wrap the token with a boolean to indicate if it is from list open workflows or list closed workflows, @@ -59,9 +62,15 @@ var _ store.VisibilityStore = (*standardStore)(nil) var _ listRequest = (*manager.ListWorkflowExecutionsRequest)(nil) -func NewVisibilityStore(store store.VisibilityStore) store.VisibilityStore { +func NewVisibilityStore( + visibilityStore store.VisibilityStore, + searchAttributesProvider searchattribute.Provider, + searchAttributesMapperProvider searchattribute.MapperProvider, +) store.VisibilityStore { return &standardStore{ - store: store, + store: visibilityStore, + searchAttributesProvider: searchAttributesProvider, + searchAttributesMapperProvider: searchAttributesMapperProvider, } } @@ -187,7 +196,12 @@ ctx context.Context, request *manager.ListWorkflowExecutionsRequestV2, ) (*store.InternalListWorkflowExecutionsResponse, error) { - converter := newQueryConverter() + typeMap, err := s.searchAttributesProvider.GetSearchAttributes(s.GetIndexName(), false) + if err != nil { + return nil, err + } + + converter := newQueryConverter(request.Namespace, typeMap, s.searchAttributesMapperProvider) filter, err := converter.GetFilter(request.Query) if err != nil { return nil, err diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/store/visibility_store.go temporal-1.22.5/src/common/persistence/visibility/store/visibility_store.go --- temporal-1.21.5-1/src/common/persistence/visibility/store/visibility_store.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/store/visibility_store.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,7 @@ package store // -aux_files is required here due to Closeable interface being in another file. -//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination visibility_store_mock.go -aux_files go.temporal.io/server/common/persistence=../../dataInterfaces.go +//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination visibility_store_mock.go -aux_files go.temporal.io/server/common/persistence=../../data_interfaces.go import ( "context" diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/visibility_manager_rate_limited.go temporal-1.22.5/src/common/persistence/visibility/visibility_manager_rate_limited.go --- temporal-1.21.5-1/src/common/persistence/visibility/visibility_manager_rate_limited.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/visibility_manager_rate_limited.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,37 +26,38 @@ import ( "context" + "time" "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/quotas" ) +const ( + RateLimitDefaultToken = 1 +) + var _ manager.VisibilityManager = (*visibilityManagerRateLimited)(nil) type visibilityManagerRateLimited struct { delegate manager.VisibilityManager - readRateLimiter quotas.RateLimiter - writeRateLimiter quotas.RateLimiter + readRateLimiter quotas.RequestRateLimiter + writeRateLimiter quotas.RequestRateLimiter } func NewVisibilityManagerRateLimited( delegate manager.VisibilityManager, readMaxQPS dynamicconfig.IntPropertyFn, writeMaxQPS dynamicconfig.IntPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) *visibilityManagerRateLimited { - readRateLimiter := quotas.NewDefaultOutgoingRateLimiter( - func() float64 { return float64(readMaxQPS()) }, - ) - writeRateLimiter := quotas.NewDefaultOutgoingRateLimiter( - func() float64 { return float64(writeMaxQPS()) }, - ) return &visibilityManagerRateLimited{ delegate: delegate, - readRateLimiter: readRateLimiter, - writeRateLimiter: writeRateLimiter, + readRateLimiter: newPriorityRateLimiter(readMaxQPS, operatorRPSRatio), + writeRateLimiter: newPriorityRateLimiter(writeMaxQPS, operatorRPSRatio), } } @@ -92,7 +93,7 @@ ctx context.Context, request *manager.RecordWorkflowExecutionStartedRequest, ) error { - if ok := m.writeRateLimiter.Allow(); !ok { + if ok := allow(ctx, "RecordWorkflowExecutionStarted", m.writeRateLimiter); !ok { return persistence.ErrPersistenceLimitExceeded } return m.delegate.RecordWorkflowExecutionStarted(ctx, request) @@ -102,7 +103,7 @@ ctx context.Context, request *manager.RecordWorkflowExecutionClosedRequest, ) error { - if ok := m.writeRateLimiter.Allow(); !ok { + if ok := allow(ctx, "RecordWorkflowExecutionClosed", m.writeRateLimiter); !ok { return persistence.ErrPersistenceLimitExceeded } return m.delegate.RecordWorkflowExecutionClosed(ctx, request) @@ -112,7 +113,7 @@ ctx context.Context, request *manager.UpsertWorkflowExecutionRequest, ) error { - if ok := m.writeRateLimiter.Allow(); !ok { + if ok := allow(ctx, "UpsertWorkflowExecution", m.writeRateLimiter); !ok { return persistence.ErrPersistenceLimitExceeded } return m.delegate.UpsertWorkflowExecution(ctx, request) @@ -122,7 +123,7 @@ ctx context.Context, request *manager.VisibilityDeleteWorkflowExecutionRequest, ) error { - if ok := m.writeRateLimiter.Allow(); !ok { + if ok := allow(ctx, "DeleteWorkflowExecution", m.writeRateLimiter); !ok { return persistence.ErrPersistenceLimitExceeded } return m.delegate.DeleteWorkflowExecution(ctx, request) @@ -134,7 +135,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListOpenWorkflowExecutions", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListOpenWorkflowExecutions(ctx, request) @@ -144,7 +145,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListClosedWorkflowExecutions", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListClosedWorkflowExecutions(ctx, request) @@ -154,7 +155,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsByTypeRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListOpenWorkflowExecutionsByType", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListOpenWorkflowExecutionsByType(ctx, request) @@ -164,7 +165,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsByTypeRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListClosedWorkflowExecutionsByType", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListClosedWorkflowExecutionsByType(ctx, request) @@ -174,7 +175,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsByWorkflowIDRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListOpenWorkflowExecutionsByWorkflowID", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListOpenWorkflowExecutionsByWorkflowID(ctx, request) @@ -184,7 +185,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsByWorkflowIDRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListClosedWorkflowExecutionsByWorkflowID", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListClosedWorkflowExecutionsByWorkflowID(ctx, request) @@ -194,7 +195,7 @@ ctx context.Context, request *manager.ListClosedWorkflowExecutionsByStatusRequest, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListClosedWorkflowExecutionsByStatus", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListClosedWorkflowExecutionsByStatus(ctx, request) @@ -204,7 +205,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsRequestV2, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ListWorkflowExecutions", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ListWorkflowExecutions(ctx, request) @@ -214,7 +215,7 @@ ctx context.Context, request *manager.ListWorkflowExecutionsRequestV2, ) (*manager.ListWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "ScanWorkflowExecutions", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.ScanWorkflowExecutions(ctx, request) @@ -224,7 +225,7 @@ ctx context.Context, request *manager.CountWorkflowExecutionsRequest, ) (*manager.CountWorkflowExecutionsResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "CountWorkflowExecutions", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.CountWorkflowExecutions(ctx, request) @@ -234,8 +235,25 @@ ctx context.Context, request *manager.GetWorkflowExecutionRequest, ) (*manager.GetWorkflowExecutionResponse, error) { - if ok := m.readRateLimiter.Allow(); !ok { + if ok := allow(ctx, "GetWorkflowExecution", m.readRateLimiter); !ok { return nil, persistence.ErrPersistenceLimitExceeded } return m.delegate.GetWorkflowExecution(ctx, request) } + +func allow( + ctx context.Context, + api string, + rateLimiter quotas.RequestRateLimiter, +) bool { + callerInfo := headers.GetCallerInfo(ctx) + // Currently only CallerType is used. See common/persistence/visibility/quotas.go for rate limiter details. + return rateLimiter.Allow(time.Now().UTC(), quotas.NewRequest( + api, + RateLimitDefaultToken, + callerInfo.CallerName, + callerInfo.CallerType, + -1, + callerInfo.CallOrigin, + )) +} diff -Nru temporal-1.21.5-1/src/common/persistence/visibility/visibility_manager_test.go temporal-1.22.5/src/common/persistence/visibility/visibility_manager_test.go --- temporal-1.21.5-1/src/common/persistence/visibility/visibility_manager_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/visibility/visibility_manager_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -80,6 +80,7 @@ s.visibilityStore, dynamicconfig.GetIntPropertyFn(1), dynamicconfig.GetIntPropertyFn(1), + dynamicconfig.GetFloatPropertyFn(0.2), s.metricsHandler, metrics.StandardVisibilityTypeTag(), log.NewNoopLogger()) diff -Nru temporal-1.21.5-1/src/common/persistence/workflowStateStatusValidator.go temporal-1.22.5/src/common/persistence/workflowStateStatusValidator.go --- temporal-1.21.5-1/src/common/persistence/workflowStateStatusValidator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/workflowStateStatusValidator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "fmt" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" -) - -var ( - validWorkflowStates = map[enumsspb.WorkflowExecutionState]struct{}{ - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: {}, - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: {}, - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: {}, - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: {}, - enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: {}, - } - - validWorkflowStatuses = map[enumspb.WorkflowExecutionStatus]struct{}{ - enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW: {}, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: {}, - } -) - -// ValidateCreateWorkflowStateStatus validate workflow state and close status -func ValidateCreateWorkflowStateStatus( - state enumsspb.WorkflowExecutionState, - status enumspb.WorkflowExecutionStatus, -) error { - - if err := validateWorkflowState(state); err != nil { - return err - } - if err := validateWorkflowStatus(status); err != nil { - return err - } - - // validate workflow state & status - if (state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) || - (state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) { - return serviceerror.NewInternal(fmt.Sprintf("Create workflow with invalid state: %v or status: %v", state, status)) - } - return nil -} - -// ValidateUpdateWorkflowStateStatus validate workflow state and status -func ValidateUpdateWorkflowStateStatus( - state enumsspb.WorkflowExecutionState, - status enumspb.WorkflowExecutionStatus, -) error { - - if err := validateWorkflowState(state); err != nil { - return err - } - if err := validateWorkflowStatus(status); err != nil { - return err - } - - // validate workflow state & status - if (state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) || - (state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) { - return serviceerror.NewInternal(fmt.Sprintf("Update workflow with invalid state: %v or status: %v", state, status)) - } - return nil -} - -// validateWorkflowState validate workflow state -func validateWorkflowState( - state enumsspb.WorkflowExecutionState, -) error { - - if _, ok := validWorkflowStates[state]; !ok { - return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow state: %v", state)) - } - - return nil -} - -// validateWorkflowStatus validate workflow status -func validateWorkflowStatus( - status enumspb.WorkflowExecutionStatus, -) error { - - if _, ok := validWorkflowStatuses[status]; !ok { - return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow status: %v", status)) - } - - return nil -} diff -Nru temporal-1.21.5-1/src/common/persistence/workflowStateStatusValidator_test.go temporal-1.22.5/src/common/persistence/workflowStateStatusValidator_test.go --- temporal-1.21.5-1/src/common/persistence/workflowStateStatusValidator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/workflowStateStatusValidator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,198 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package persistence - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" -) - -type ( - workflowStateStatusSuite struct { - suite.Suite - *require.Assertions - } -) - -func TestWorkflowStateStatusSuite(t *testing.T) { - s := new(workflowStateStatusSuite) - suite.Run(t, s) -} - -func (s *workflowStateStatusSuite) SetupSuite() { -} - -func (s *workflowStateStatusSuite) TearDownSuite() { - -} - -func (s *workflowStateStatusSuite) SetupTest() { - s.Assertions = require.New(s.T()) -} - -func (s *workflowStateStatusSuite) TearDownTest() { - -} - -func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateCreated() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.NotNil(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, status)) - } -} - -func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateRunning() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.NotNil(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, status)) - } -} - -func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateCompleted() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.Error(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, status)) - } -} - -func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateZombie() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.Error(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, status)) - } -} - -func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateCreated() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, status)) - } -} - -func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateRunning() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, status)) - } -} - -func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateCompleted() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, status)) - } -} - -func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateZombie() { - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - - s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - - for _, status := range statuses { - s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, status)) - } -} diff -Nru temporal-1.21.5-1/src/common/persistence/workflow_state_status_validator.go temporal-1.22.5/src/common/persistence/workflow_state_status_validator.go --- temporal-1.21.5-1/src/common/persistence/workflow_state_status_validator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/workflow_state_status_validator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,120 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "fmt" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" +) + +var ( + validWorkflowStates = map[enumsspb.WorkflowExecutionState]struct{}{ + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: {}, + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: {}, + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: {}, + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: {}, + enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: {}, + } + + validWorkflowStatuses = map[enumspb.WorkflowExecutionStatus]struct{}{ + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW: {}, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: {}, + } +) + +// ValidateCreateWorkflowStateStatus validate workflow state and close status +func ValidateCreateWorkflowStateStatus( + state enumsspb.WorkflowExecutionState, + status enumspb.WorkflowExecutionStatus, +) error { + + if err := validateWorkflowState(state); err != nil { + return err + } + if err := validateWorkflowStatus(status); err != nil { + return err + } + + // validate workflow state & status + if (state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) || + (state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) { + return serviceerror.NewInternal(fmt.Sprintf("Create workflow with invalid state: %v or status: %v", state, status)) + } + return nil +} + +// ValidateUpdateWorkflowStateStatus validate workflow state and status +func ValidateUpdateWorkflowStateStatus( + state enumsspb.WorkflowExecutionState, + status enumspb.WorkflowExecutionStatus, +) error { + + if err := validateWorkflowState(state); err != nil { + return err + } + if err := validateWorkflowStatus(status); err != nil { + return err + } + + // validate workflow state & status + if (state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) || + (state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED && status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING) { + return serviceerror.NewInternal(fmt.Sprintf("Update workflow with invalid state: %v or status: %v", state, status)) + } + return nil +} + +// validateWorkflowState validate workflow state +func validateWorkflowState( + state enumsspb.WorkflowExecutionState, +) error { + + if _, ok := validWorkflowStates[state]; !ok { + return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow state: %v", state)) + } + + return nil +} + +// validateWorkflowStatus validate workflow status +func validateWorkflowStatus( + status enumspb.WorkflowExecutionStatus, +) error { + + if _, ok := validWorkflowStatuses[status]; !ok { + return serviceerror.NewInternal(fmt.Sprintf("Invalid workflow status: %v", status)) + } + + return nil +} diff -Nru temporal-1.21.5-1/src/common/persistence/workflow_state_status_validator_test.go temporal-1.22.5/src/common/persistence/workflow_state_status_validator_test.go --- temporal-1.21.5-1/src/common/persistence/workflow_state_status_validator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/workflow_state_status_validator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,198 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" +) + +type ( + workflowStateStatusSuite struct { + suite.Suite + *require.Assertions + } +) + +func TestWorkflowStateStatusSuite(t *testing.T) { + s := new(workflowStateStatusSuite) + suite.Run(t, s) +} + +func (s *workflowStateStatusSuite) SetupSuite() { +} + +func (s *workflowStateStatusSuite) TearDownSuite() { + +} + +func (s *workflowStateStatusSuite) SetupTest() { + s.Assertions = require.New(s.T()) +} + +func (s *workflowStateStatusSuite) TearDownTest() { + +} + +func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateCreated() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.NotNil(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, status)) + } +} + +func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateRunning() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.NotNil(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, status)) + } +} + +func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateCompleted() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.Error(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, status)) + } +} + +func (s *workflowStateStatusSuite) TestCreateWorkflowStateStatus_WorkflowStateZombie() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.Error(ValidateCreateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, status)) + } +} + +func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateCreated() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, status)) + } +} + +func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateRunning() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, status)) + } +} + +func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateCompleted() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, status)) + } +} + +func (s *workflowStateStatusSuite) TestUpdateWorkflowStateStatus_WorkflowStateZombie() { + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + + s.NoError(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) + + for _, status := range statuses { + s.Error(ValidateUpdateWorkflowStateStatus(enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, status)) + } +} diff -Nru temporal-1.21.5-1/src/common/persistence/xdc_cache.go temporal-1.22.5/src/common/persistence/xdc_cache.go --- temporal-1.21.5-1/src/common/persistence/xdc_cache.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/persistence/xdc_cache.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,168 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistence + +import ( + "time" + + commonpb "go.temporal.io/api/common/v1" + + historyspb "go.temporal.io/server/api/history/v1" + persistencepb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common/cache" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/util" +) + +type ( + XDCCacheKey struct { + WorkflowKey definition.WorkflowKey + MinEventID int64 // inclusive + MaxEventID int64 // exclusive + Version int64 + } + XDCCacheValue struct { + BaseWorkflowInfo *workflowspb.BaseExecutionInfo + VersionHistoryItems []*historyspb.VersionHistoryItem + EventBlob *commonpb.DataBlob + } + + XDCCache interface { + Put(key XDCCacheKey, value XDCCacheValue) + Get(key XDCCacheKey) (XDCCacheValue, bool) + } + + XDCCacheImpl struct { + cache cache.Cache + } +) + +const ( + xdcMinCacheSize = 64 * 1024 // 64KB +) + +var _ XDCCache = (*XDCCacheImpl)(nil) +var _ cache.SizeGetter = XDCCacheValue{} + +func NewXDCCacheKey( + workflowKey definition.WorkflowKey, + minEventID int64, + maxEventID int64, + version int64, +) XDCCacheKey { + return XDCCacheKey{ + WorkflowKey: workflowKey, + MinEventID: minEventID, + MaxEventID: maxEventID, + Version: version, + } +} + +func NewXDCCacheValue( + baseWorkflowInfo *workflowspb.BaseExecutionInfo, + versionHistoryItems []*historyspb.VersionHistoryItem, + eventBlob *commonpb.DataBlob, +) XDCCacheValue { + return XDCCacheValue{ + BaseWorkflowInfo: baseWorkflowInfo, + VersionHistoryItems: versionHistoryItems, + EventBlob: eventBlob, + } +} + +func (v XDCCacheValue) CacheSize() int { + size := 0 + for _, item := range v.VersionHistoryItems { + size += item.Size() + } + return v.BaseWorkflowInfo.Size() + size + v.EventBlob.Size() +} + +func NewEventsBlobCache( + maxBytes int, + ttl time.Duration, +) *XDCCacheImpl { + return &XDCCacheImpl{ + cache: cache.New(util.Max(xdcMinCacheSize, maxBytes), &cache.Options{ + TTL: ttl, + Pin: false, + }), + } +} + +func (e *XDCCacheImpl) Put( + key XDCCacheKey, + value XDCCacheValue, +) { + e.cache.Put(key, value) +} + +func (e *XDCCacheImpl) Get(key XDCCacheKey) (XDCCacheValue, bool) { + value := e.cache.Get(key) + if value == nil { + return XDCCacheValue{}, false + } + return value.(XDCCacheValue), true +} + +func GetXDCCacheValue( + executionInfo *persistencepb.WorkflowExecutionInfo, + eventID int64, + version int64, +) ([]*historyspb.VersionHistoryItem, []byte, *workflowspb.BaseExecutionInfo, error) { + baseWorkflowInfo := CopyBaseWorkflowInfo(executionInfo.BaseExecutionInfo) + versionHistories := executionInfo.VersionHistories + versionHistoryIndex, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem( + versionHistories, + versionhistory.NewVersionHistoryItem( + eventID, + version, + ), + ) + if err != nil { + return nil, nil, nil, err + } + + versionHistoryBranch, err := versionhistory.GetVersionHistory(versionHistories, versionHistoryIndex) + if err != nil { + return nil, nil, nil, err + } + return versionhistory.CopyVersionHistory(versionHistoryBranch).GetItems(), versionHistoryBranch.GetBranchToken(), baseWorkflowInfo, nil +} + +func CopyBaseWorkflowInfo( + baseWorkflowInfo *workflowspb.BaseExecutionInfo, +) *workflowspb.BaseExecutionInfo { + if baseWorkflowInfo == nil { + return nil + } + return &workflowspb.BaseExecutionInfo{ + RunId: baseWorkflowInfo.RunId, + LowestCommonAncestorEventId: baseWorkflowInfo.LowestCommonAncestorEventId, + LowestCommonAncestorEventVersion: baseWorkflowInfo.LowestCommonAncestorEventVersion, + } +} diff -Nru temporal-1.21.5-1/src/common/primitives/timestamp/parseDuration.go temporal-1.22.5/src/common/primitives/timestamp/parseDuration.go --- temporal-1.21.5-1/src/common/primitives/timestamp/parseDuration.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/primitives/timestamp/parseDuration.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package timestamp - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "time" -) - -var ( - reUnitless = regexp.MustCompile(`^(\d+(\.\d*)?|(\.\d+))$`) - reDays = regexp.MustCompile(`(\d+(\.\d*)?|(\.\d+))d`) -) - -// ParseDuration is like time.ParseDuration, but supports unit "d" for days -// (always interpreted as exactly 24 hours). -func ParseDuration(s string) (time.Duration, error) { - s = reDays.ReplaceAllStringFunc(s, func(v string) string { - fv, err := strconv.ParseFloat(strings.TrimSuffix(v, "d"), 64) - if err != nil { - return v // will cause time.ParseDuration to return an error - } - return fmt.Sprintf("%fh", 24*fv) - }) - return time.ParseDuration(s) -} - -// ParseDurationDefaultDays is like time.ParseDuration, but supports unit "d" -// for days (always interpreted as exactly 24 hours), and also supports -// unit-less numbers, which are interpreted as days. -func ParseDurationDefaultDays(s string) (time.Duration, error) { - if reUnitless.MatchString(s) { - s += "d" - } - return ParseDuration(s) -} - -// ParseDurationDefaultSeconds is like time.ParseDuration, but supports unit "d" -// for days (always interpreted as exactly 24 hours), and also supports -// unit-less numbers, which are interpreted as seconds. -func ParseDurationDefaultSeconds(s string) (time.Duration, error) { - if reUnitless.MatchString(s) { - s += "s" - } - return ParseDuration(s) -} diff -Nru temporal-1.21.5-1/src/common/primitives/timestamp/parseDuration_test.go temporal-1.22.5/src/common/primitives/timestamp/parseDuration_test.go --- temporal-1.21.5-1/src/common/primitives/timestamp/parseDuration_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/primitives/timestamp/parseDuration_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package timestamp - -import ( - "testing" - "time" - - "github.com/stretchr/testify/suite" -) - -type ParseDurationSuite struct { - suite.Suite -} - -func TestParseDurationSuite(t *testing.T) { - suite.Run(t, new(ParseDurationSuite)) -} - -func (s *ParseDurationSuite) TestParseDuration() { - for _, c := range []struct { - input string - expected time.Duration // -1 means error - }{ - {"1h", time.Hour}, - {"3m30s", 3*time.Minute + 30*time.Second}, - {"1d", 24 * time.Hour}, - {"3d", 3 * 24 * time.Hour}, - {"5d6h15m", 5*24*time.Hour + 6*time.Hour + 15*time.Minute}, - {"5.25d15m", 5*24*time.Hour + 6*time.Hour + 15*time.Minute}, - {".5d", 12 * time.Hour}, - {"-10d12.25h", -(10*24*time.Hour + 12*time.Hour + 15*time.Minute)}, - {"3m2h1d", 3*time.Minute + 2*time.Hour + 1*24*time.Hour}, - {"8m7h6d5d4h3m", 8*time.Minute + 7*time.Hour + 6*24*time.Hour + 5*24*time.Hour + 4*time.Hour + 3*time.Minute}, - {"7", -1}, // error - {"", -1}, // error - } { - got, err := ParseDuration(c.input) - if c.expected == -1 { - s.Error(err) - } else { - s.Equal(c.expected, got) - } - } -} - -func (s *ParseDurationSuite) TestParseDurationDefaultDays() { - for _, c := range []struct { - input string - expected time.Duration // -1 means error - }{ - {"3m30s", 3*time.Minute + 30*time.Second}, - {"7", 7 * 24 * time.Hour}, - {"7.5", 7*24*time.Hour + 12*time.Hour}, - {".75", 18 * time.Hour}, - {"2.75", 2*24*time.Hour + 18*time.Hour}, - {"", -1}, // error - } { - got, err := ParseDurationDefaultDays(c.input) - if c.expected == -1 { - s.Error(err) - } else { - s.Equal(c.expected, got) - } - } -} - -func (s *ParseDurationSuite) TestParseDurationDefaultSeconds() { - for _, c := range []struct { - input string - expected time.Duration // -1 means error - }{ - {"3m30s", 3*time.Minute + 30*time.Second}, - {"7", 7 * time.Second}, - {"", -1}, // error - } { - got, err := ParseDurationDefaultSeconds(c.input) - if c.expected == -1 { - s.Error(err) - } else { - s.Equal(c.expected, got) - } - } -} diff -Nru temporal-1.21.5-1/src/common/primitives/timestamp/parse_duration.go temporal-1.22.5/src/common/primitives/timestamp/parse_duration.go --- temporal-1.21.5-1/src/common/primitives/timestamp/parse_duration.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/primitives/timestamp/parse_duration.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,69 @@ +// The MIT License +// +// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package timestamp + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +var ( + reUnitless = regexp.MustCompile(`^(\d+(\.\d*)?|(\.\d+))$`) + reDays = regexp.MustCompile(`(\d+(\.\d*)?|(\.\d+))d`) +) + +// ParseDuration is like time.ParseDuration, but supports unit "d" for days +// (always interpreted as exactly 24 hours). +func ParseDuration(s string) (time.Duration, error) { + s = reDays.ReplaceAllStringFunc(s, func(v string) string { + fv, err := strconv.ParseFloat(strings.TrimSuffix(v, "d"), 64) + if err != nil { + return v // will cause time.ParseDuration to return an error + } + return fmt.Sprintf("%fh", 24*fv) + }) + return time.ParseDuration(s) +} + +// ParseDurationDefaultDays is like time.ParseDuration, but supports unit "d" +// for days (always interpreted as exactly 24 hours), and also supports +// unit-less numbers, which are interpreted as days. +func ParseDurationDefaultDays(s string) (time.Duration, error) { + if reUnitless.MatchString(s) { + s += "d" + } + return ParseDuration(s) +} + +// ParseDurationDefaultSeconds is like time.ParseDuration, but supports unit "d" +// for days (always interpreted as exactly 24 hours), and also supports +// unit-less numbers, which are interpreted as seconds. +func ParseDurationDefaultSeconds(s string) (time.Duration, error) { + if reUnitless.MatchString(s) { + s += "s" + } + return ParseDuration(s) +} diff -Nru temporal-1.21.5-1/src/common/primitives/timestamp/parse_duration_test.go temporal-1.22.5/src/common/primitives/timestamp/parse_duration_test.go --- temporal-1.21.5-1/src/common/primitives/timestamp/parse_duration_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/primitives/timestamp/parse_duration_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,104 @@ +// The MIT License +// +// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package timestamp + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +type ParseDurationSuite struct { + suite.Suite +} + +func TestParseDurationSuite(t *testing.T) { + suite.Run(t, new(ParseDurationSuite)) +} + +func (s *ParseDurationSuite) TestParseDuration() { + for _, c := range []struct { + input string + expected time.Duration // -1 means error + }{ + {"1h", time.Hour}, + {"3m30s", 3*time.Minute + 30*time.Second}, + {"1d", 24 * time.Hour}, + {"3d", 3 * 24 * time.Hour}, + {"5d6h15m", 5*24*time.Hour + 6*time.Hour + 15*time.Minute}, + {"5.25d15m", 5*24*time.Hour + 6*time.Hour + 15*time.Minute}, + {".5d", 12 * time.Hour}, + {"-10d12.25h", -(10*24*time.Hour + 12*time.Hour + 15*time.Minute)}, + {"3m2h1d", 3*time.Minute + 2*time.Hour + 1*24*time.Hour}, + {"8m7h6d5d4h3m", 8*time.Minute + 7*time.Hour + 6*24*time.Hour + 5*24*time.Hour + 4*time.Hour + 3*time.Minute}, + {"7", -1}, // error + {"", -1}, // error + } { + got, err := ParseDuration(c.input) + if c.expected == -1 { + s.Error(err) + } else { + s.Equal(c.expected, got) + } + } +} + +func (s *ParseDurationSuite) TestParseDurationDefaultDays() { + for _, c := range []struct { + input string + expected time.Duration // -1 means error + }{ + {"3m30s", 3*time.Minute + 30*time.Second}, + {"7", 7 * 24 * time.Hour}, + {"7.5", 7*24*time.Hour + 12*time.Hour}, + {".75", 18 * time.Hour}, + {"2.75", 2*24*time.Hour + 18*time.Hour}, + {"", -1}, // error + } { + got, err := ParseDurationDefaultDays(c.input) + if c.expected == -1 { + s.Error(err) + } else { + s.Equal(c.expected, got) + } + } +} + +func (s *ParseDurationSuite) TestParseDurationDefaultSeconds() { + for _, c := range []struct { + input string + expected time.Duration // -1 means error + }{ + {"3m30s", 3*time.Minute + 30*time.Second}, + {"7", 7 * time.Second}, + {"", -1}, // error + } { + got, err := ParseDurationDefaultSeconds(c.input) + if c.expected == -1 { + s.Error(err) + } else { + s.Equal(c.expected, got) + } + } +} diff -Nru temporal-1.21.5-1/src/common/protoTaskTokenSerializer.go temporal-1.22.5/src/common/protoTaskTokenSerializer.go --- temporal-1.21.5-1/src/common/protoTaskTokenSerializer.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/protoTaskTokenSerializer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package common - -import ( - tokenspb "go.temporal.io/server/api/token/v1" -) - -type ( - protoTaskTokenSerializer struct{} -) - -// NewProtoTaskTokenSerializer creates a new instance of TaskTokenSerializer -func NewProtoTaskTokenSerializer() TaskTokenSerializer { - return &protoTaskTokenSerializer{} -} - -func (s *protoTaskTokenSerializer) Serialize(taskToken *tokenspb.Task) ([]byte, error) { - if taskToken == nil { - return nil, nil - } - return taskToken.Marshal() -} - -func (s *protoTaskTokenSerializer) Deserialize(data []byte) (*tokenspb.Task, error) { - taskToken := &tokenspb.Task{} - err := taskToken.Unmarshal(data) - return taskToken, err -} - -func (s *protoTaskTokenSerializer) SerializeQueryTaskToken(taskToken *tokenspb.QueryTask) ([]byte, error) { - if taskToken == nil { - return nil, nil - } - return taskToken.Marshal() -} - -func (s *protoTaskTokenSerializer) DeserializeQueryTaskToken(data []byte) (*tokenspb.QueryTask, error) { - taskToken := tokenspb.QueryTask{} - err := taskToken.Unmarshal(data) - return &taskToken, err -} diff -Nru temporal-1.21.5-1/src/common/proto_task_token_serializer.go temporal-1.22.5/src/common/proto_task_token_serializer.go --- temporal-1.21.5-1/src/common/proto_task_token_serializer.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/proto_task_token_serializer.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,64 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import ( + tokenspb "go.temporal.io/server/api/token/v1" +) + +type ( + protoTaskTokenSerializer struct{} +) + +// NewProtoTaskTokenSerializer creates a new instance of TaskTokenSerializer +func NewProtoTaskTokenSerializer() TaskTokenSerializer { + return &protoTaskTokenSerializer{} +} + +func (s *protoTaskTokenSerializer) Serialize(taskToken *tokenspb.Task) ([]byte, error) { + if taskToken == nil { + return nil, nil + } + return taskToken.Marshal() +} + +func (s *protoTaskTokenSerializer) Deserialize(data []byte) (*tokenspb.Task, error) { + taskToken := &tokenspb.Task{} + err := taskToken.Unmarshal(data) + return taskToken, err +} + +func (s *protoTaskTokenSerializer) SerializeQueryTaskToken(taskToken *tokenspb.QueryTask) ([]byte, error) { + if taskToken == nil { + return nil, nil + } + return taskToken.Marshal() +} + +func (s *protoTaskTokenSerializer) DeserializeQueryTaskToken(data []byte) (*tokenspb.QueryTask, error) { + taskToken := tokenspb.QueryTask{} + err := taskToken.Unmarshal(data) + return &taskToken, err +} diff -Nru temporal-1.21.5-1/src/common/quotas/cluster_aware_quota_calculator.go temporal-1.22.5/src/common/quotas/cluster_aware_quota_calculator.go --- temporal-1.21.5-1/src/common/quotas/cluster_aware_quota_calculator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/cluster_aware_quota_calculator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,69 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas + +type ( + // MemberCounter returns the total number of instances there are for a given service. + MemberCounter interface { + MemberCount() int + } + // ClusterAwareQuotaCalculator calculates the available quota for the current host based on the per instance and per + // cluster quota. The quota could represent requests per second, total number of active requests, etc. It works by + // dividing the per cluster quota by the total number of instances running the same service. + ClusterAwareQuotaCalculator quotaCalculator[func() int] + // ClusterAwareNamespaceSpecificQuotaCalculator is similar to ClusterAwareQuotaCalculator, but it uses quotas that + // are specific to a namespace. + ClusterAwareNamespaceSpecificQuotaCalculator quotaCalculator[func(namespace string) int] + // quotaCalculator is a generic type that we use because the quota functions could be namespace specific or not. + quotaCalculator[T any] struct { + MemberCounter MemberCounter + // PerInstanceQuota is a function that returns the per instance limit. + PerInstanceQuota T + // GlobalQuota is a function that returns the per cluster limit. + GlobalQuota T + } +) + +// getQuota returns the effective resource limit for a host given the per instance and per cluster +// limits. The "resource" here could be requests per second, total number of active requests, etc. The cluster-wide +// limit is used if and only if it is configured to a value greater than zero and the number of instances that +// the memberCounter reports is greater than zero. Otherwise, the per-instance limit is used. +func getQuota(memberCounter MemberCounter, instanceLimit, clusterLimit int) float64 { + if clusterLimit > 0 && memberCounter != nil { + if clusterSize := memberCounter.MemberCount(); clusterSize > 0 { + return float64(clusterLimit) / float64(clusterSize) + } + } + + return float64(instanceLimit) +} + +func (l ClusterAwareQuotaCalculator) GetQuota() float64 { + return getQuota(l.MemberCounter, l.PerInstanceQuota(), l.GlobalQuota()) +} + +func (l ClusterAwareNamespaceSpecificQuotaCalculator) GetQuota(namespace string) float64 { + return getQuota(l.MemberCounter, l.PerInstanceQuota(namespace), l.GlobalQuota(namespace)) +} diff -Nru temporal-1.21.5-1/src/common/quotas/cluster_aware_quota_calculator_test.go temporal-1.22.5/src/common/quotas/cluster_aware_quota_calculator_test.go --- temporal-1.21.5-1/src/common/quotas/cluster_aware_quota_calculator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/cluster_aware_quota_calculator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,124 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/quotas" + "go.temporal.io/server/common/quotas/quotastest" +) + +type quotaCalculatorTestCase struct { + name string + memberCounter quotas.MemberCounter + instanceLimit int + clusterLimit int + expected float64 +} + +var quotaCalculatorTestCases = []quotaCalculatorTestCase{ + { + name: "both limits set", + memberCounter: quotastest.NewFakeMemberCounter(4), + instanceLimit: 10, + clusterLimit: 20, + expected: 5.0, + }, + { + name: "no per cluster limit", + memberCounter: quotastest.NewFakeMemberCounter(4), + instanceLimit: 10, + clusterLimit: 0, + expected: 10.0, + }, + { + name: "no hosts", + memberCounter: quotastest.NewFakeMemberCounter(0), + instanceLimit: 10, + clusterLimit: 20, + expected: 10.0, + }, + { + name: "nil member counter", + memberCounter: nil, + instanceLimit: 10, + clusterLimit: 20, + expected: 10.0, + }, +} + +func TestClusterAwareQuotaCalculator_GetQuota(t *testing.T) { + t.Parallel() + + for _, tc := range quotaCalculatorTestCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + assert.Equal(t, tc.expected, quotas.ClusterAwareQuotaCalculator{ + MemberCounter: tc.memberCounter, + PerInstanceQuota: dynamicconfig.GetIntPropertyFn(tc.instanceLimit), + GlobalQuota: dynamicconfig.GetIntPropertyFn(tc.clusterLimit), + }.GetQuota()) + + }) + } +} + +type perNamespaceQuota struct { + t *testing.T + quota int +} + +func (l perNamespaceQuota) getQuota(ns string) int { + if ns != "test-namespace" { + l.t.Errorf("unexpected namespace: %s", ns) + } + return l.quota +} + +func TestClusterAwareNamespaceSpecificQuotaCalculator_GetQuota(t *testing.T) { + t.Parallel() + + for _, tc := range quotaCalculatorTestCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + instanceLimit := perNamespaceQuota{t: t, quota: tc.instanceLimit} + + clusterLimit := perNamespaceQuota{t: t, quota: tc.clusterLimit} + + assert.Equal(t, tc.expected, quotas.ClusterAwareNamespaceSpecificQuotaCalculator{ + MemberCounter: tc.memberCounter, + PerInstanceQuota: instanceLimit.getQuota, + GlobalQuota: clusterLimit.getQuota, + }.GetQuota("test-namespace")) + }) + } +} diff -Nru temporal-1.21.5-1/src/common/quotas/delayed_request_rate_limiter.go temporal-1.22.5/src/common/quotas/delayed_request_rate_limiter.go --- temporal-1.21.5-1/src/common/quotas/delayed_request_rate_limiter.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/delayed_request_rate_limiter.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,78 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas + +import ( + "errors" + "fmt" + "time" + + "go.temporal.io/server/common/clock" +) + +// DelayedRequestRateLimiter is a rate limiter that allows all requests without any delay for a given duration. After +// the delay expires, it delegates to another rate limiter. This rate limiter is useful for cases where you want to +// allow all requests for a given duration, e.g. during something volatile like a deployment, and then switch to another +// rate limiter after the duration expires. +type DelayedRequestRateLimiter struct { + // RequestRateLimiter is the delegate that we switch to after the delay expires. + RequestRateLimiter + // timer triggers the rate limiter to delegate to the underlying rate limiter. We hold a reference to it in order to + // cancel it prematurely if needed. + timer clock.Timer +} + +var ErrNegativeDelay = errors.New("delay cannot be negative") + +// NewDelayedRequestRateLimiter returns a DelayedRequestRateLimiter that delegates to the given rate limiter after a +// delay. The timeSource is used to create the timer that triggers the switch. It returns an error if the given delay +// is negative. +func NewDelayedRequestRateLimiter( + rl RequestRateLimiter, + delay time.Duration, + timeSource clock.TimeSource, +) (*DelayedRequestRateLimiter, error) { + if delay < 0 { + return nil, fmt.Errorf("%w: %v", ErrNegativeDelay, delay) + } + + delegator := RequestRateLimiterDelegator{} + delegator.SetRateLimiter(NoopRequestRateLimiter) + + timer := timeSource.AfterFunc(delay, func() { + delegator.SetRateLimiter(rl) + }) + + return &DelayedRequestRateLimiter{ + RequestRateLimiter: &delegator, + timer: timer, + }, nil +} + +// Cancel stops the timer that triggers the rate limiter to delegate to the underlying rate limiter. It returns true if +// the timer was stopped before it expired. +func (rl *DelayedRequestRateLimiter) Cancel() bool { + return rl.timer.Stop() +} diff -Nru temporal-1.21.5-1/src/common/quotas/delayed_request_rate_limiter_test.go temporal-1.22.5/src/common/quotas/delayed_request_rate_limiter_test.go --- temporal-1.21.5-1/src/common/quotas/delayed_request_rate_limiter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/delayed_request_rate_limiter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,96 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/quotas" +) + +// disallowingRateLimiter is a rate limiter whose Allow method always returns false. +type disallowingRateLimiter struct { + // RequestRateLimiter is an embedded field so that disallowingRateLimiter implements that interface. It doesn't + // actually delegate to this rate limiter, and this field should be left nil. + quotas.RequestRateLimiter +} + +func (rl disallowingRateLimiter) Allow(time.Time, quotas.Request) bool { + return false +} + +func TestNewDelayedRequestRateLimiter_NegativeDelay(t *testing.T) { + t.Parallel() + + _, err := quotas.NewDelayedRequestRateLimiter( + quotas.NoopRequestRateLimiter, + -time.Nanosecond, + clock.NewRealTimeSource(), + ) + assert.ErrorIs(t, err, quotas.ErrNegativeDelay) +} + +func TestNewDelayedRequestRateLimiter_ZeroDelay(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() + drl, err := quotas.NewDelayedRequestRateLimiter(disallowingRateLimiter{}, 0, timeSource) + require.NoError(t, err) + assert.False(t, drl.Allow(time.Time{}, quotas.Request{}), "expected Allow to return false because we "+ + "immediately switched to the disallowing rate limiter due to the zero delay") +} + +func TestDelayedRequestRateLimiter_Allow(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() + drl, err := quotas.NewDelayedRequestRateLimiter(disallowingRateLimiter{}, time.Second, timeSource) + require.NoError(t, err) + timeSource.Advance(time.Second - time.Nanosecond) + assert.True(t, drl.Allow(time.Time{}, quotas.Request{}), "expected Allow to return true because the "+ + "timer hasn't expired yet") + timeSource.Advance(time.Nanosecond) + assert.False(t, drl.Allow(time.Time{}, quotas.Request{}), "expected Allow to return false because the "+ + "timer expired, and we switched to the disallowing rate limiter") +} + +func TestDelayedRequestRateLimiter_Cancel(t *testing.T) { + t.Parallel() + + timeSource := clock.NewEventTimeSource() + drl, err := quotas.NewDelayedRequestRateLimiter(disallowingRateLimiter{}, time.Second, timeSource) + require.NoError(t, err) + timeSource.Advance(time.Second - time.Nanosecond) + assert.True(t, drl.Cancel(), "expected Cancel to return true because the timer was stopped before it "+ + "expired") + timeSource.Advance(time.Nanosecond) + assert.True(t, drl.Allow(time.Time{}, quotas.Request{}), "expected Allow to return true because the "+ + "timer was stopped before it could expire") + assert.False(t, drl.Cancel(), "expected Cancel to return false because the timer was already stopped") +} diff -Nru temporal-1.21.5-1/src/common/quotas/noop_request_rate_limiter_impl_test.go temporal-1.22.5/src/common/quotas/noop_request_rate_limiter_impl_test.go --- temporal-1.21.5-1/src/common/quotas/noop_request_rate_limiter_impl_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/noop_request_rate_limiter_impl_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,46 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/quotas" +) + +func TestNoopRequestRateLimiterImpl(t *testing.T) { + t.Parallel() + + testNoopRequestRateLimiterImpl(t, quotas.NoopRequestRateLimiter) +} + +func testNoopRequestRateLimiterImpl(t *testing.T, rl quotas.RequestRateLimiter) { + assert.True(t, rl.Allow(time.Now(), quotas.Request{})) + assert.Equal(t, quotas.NoopReservation, rl.Reserve(time.Now(), quotas.Request{})) + assert.NoError(t, rl.Wait(context.Background(), quotas.Request{})) +} diff -Nru temporal-1.21.5-1/src/common/quotas/quotastest/fake_instance_counter.go temporal-1.22.5/src/common/quotas/quotastest/fake_instance_counter.go --- temporal-1.21.5-1/src/common/quotas/quotastest/fake_instance_counter.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/quotastest/fake_instance_counter.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,38 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotastest + +// NewFakeMemberCounter returns a new fake quotas.MemberCounter that always returns numInstances. +func NewFakeMemberCounter(numInstances int) memberCounter { + return memberCounter{numInstances: numInstances} +} + +type memberCounter struct { + numInstances int +} + +func (c memberCounter) MemberCount() int { + return c.numInstances +} diff -Nru temporal-1.21.5-1/src/common/quotas/request_rate_limiter_delegator.go temporal-1.22.5/src/common/quotas/request_rate_limiter_delegator.go --- temporal-1.21.5-1/src/common/quotas/request_rate_limiter_delegator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/request_rate_limiter_delegator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,71 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas + +import ( + "context" + "sync/atomic" + "time" +) + +// RequestRateLimiterDelegator is a request rate limiter that delegates to another rate limiter. The delegate can be +// changed at runtime by calling SetRateLimiter. This rate limiter is useful for cases where you want to substitute one +// rate limiter implementation for another at runtime. All methods of this type are thread-safe. +type RequestRateLimiterDelegator struct { + // delegate is an atomic.Value so that it can be safely read from and written to concurrently. It stores the rate + // limiter that this rate limiter delegates to as a monomorphicRequestRateLimiter. + delegate atomic.Value +} + +// monomorphicRequestRateLimiter is a workaround for the fact that the value stored in an atomic.Value must always be +// the same type, but we want to allow the rate limiter delegate to be any type that implements the RequestRateLimiter +// interface. +type monomorphicRequestRateLimiter struct { + RequestRateLimiter +} + +// SetRateLimiter sets the rate limiter to delegate to. +func (d *RequestRateLimiterDelegator) SetRateLimiter(rl RequestRateLimiter) { + d.delegate.Store(monomorphicRequestRateLimiter{rl}) +} + +// loadDelegate returns the rate limiter that this rate limiter delegates to. +func (d *RequestRateLimiterDelegator) loadDelegate() RequestRateLimiter { + return d.delegate.Load().(RequestRateLimiter) +} + +// The following methods just delegate to the underlying rate limiter. + +func (d *RequestRateLimiterDelegator) Allow(now time.Time, request Request) bool { + return d.loadDelegate().Allow(now, request) +} + +func (d *RequestRateLimiterDelegator) Reserve(now time.Time, request Request) Reservation { + return d.loadDelegate().Reserve(now, request) +} + +func (d *RequestRateLimiterDelegator) Wait(ctx context.Context, request Request) error { + return d.loadDelegate().Wait(ctx, request) +} diff -Nru temporal-1.21.5-1/src/common/quotas/request_rate_limiter_delegator_test.go temporal-1.22.5/src/common/quotas/request_rate_limiter_delegator_test.go --- temporal-1.21.5-1/src/common/quotas/request_rate_limiter_delegator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/quotas/request_rate_limiter_delegator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,86 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package quotas_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/quotas" +) + +// blockingRequestRateLimiter is a rate limiter that blocks in its Wait method until the context is canceled. +type blockingRequestRateLimiter struct { + // RateLimiter is an embedded field so that blockingRequestRateLimiter implements the RateLimiter interface. It doesn't + // actually delegate to this rate limiter, and this field should be left nil. + quotas.RequestRateLimiter + // waitStarted is a channel which is sent to as soon as Wait is called. + waitStarted chan struct{} +} + +func (b *blockingRequestRateLimiter) Wait(ctx context.Context, _ quotas.Request) error { + b.waitStarted <- struct{}{} + <-ctx.Done() + return ctx.Err() +} + +// TestRateLimiterDelegator_Wait verifies that the RequestRateLimiterDelegator.Wait method can be called concurrently even if +// the rate limiter it delegates to is switched while the method is being called. The same condition should hold for +// all methods on RequestRateLimiterDelegator, but we only test Wait here for simplicity. +func TestRateLimiterDelegator_Wait(t *testing.T) { + t.Parallel() + + blockingRateLimiter := &blockingRequestRateLimiter{ + waitStarted: make(chan struct{}), + } + delegator := "as.RequestRateLimiterDelegator{} + delegator.SetRateLimiter(blockingRateLimiter) + + ctx, cancel := context.WithCancel(context.Background()) + waitErrs := make(chan error) + + go func() { + waitErrs <- delegator.Wait(ctx, quotas.Request{}) + }() + <-blockingRateLimiter.waitStarted + delegator.SetRateLimiter(quotas.NoopRequestRateLimiter) + assert.NoError(t, delegator.Wait(ctx, quotas.Request{})) + select { + case err := <-waitErrs: + t.Fatal("Wait returned before context was canceled:", err) + default: + } + cancel() + assert.ErrorIs(t, <-waitErrs, context.Canceled) +} + +func TestRateLimiterDelegator_SetRateLimiter(t *testing.T) { + t.Parallel() + + delegator := "as.RequestRateLimiterDelegator{} + delegator.SetRateLimiter(quotas.NoopRequestRateLimiter) + testNoopRequestRateLimiterImpl(t, delegator) +} diff -Nru temporal-1.21.5-1/src/common/resource/fx.go temporal-1.22.5/src/common/resource/fx.go --- temporal-1.21.5-1/src/common/resource/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/resource/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -76,6 +76,9 @@ InstanceID string ServiceNames map[primitives.ServiceName]struct{} + HistoryRawClient historyservice.HistoryServiceClient + HistoryClient historyservice.HistoryServiceClient + MatchingRawClient matchingservice.MatchingServiceClient MatchingClient matchingservice.MatchingServiceClient @@ -114,6 +117,7 @@ fx.Provide(GrpcListenerProvider), fx.Provide(RuntimeMetricsReporterProvider), metrics.RuntimeMetricsReporterLifetimeHooksModule, + fx.Provide(HistoryRawClientProvider), fx.Provide(HistoryClientProvider), fx.Provide(MatchingRawClientProvider), fx.Provide(MatchingClientProvider), @@ -305,20 +309,22 @@ ) } -func HistoryClientProvider(clientBean client.Bean) historyservice.HistoryServiceClient { - historyRawClient := clientBean.GetHistoryClient() - historyClient := history.NewRetryableClient( +func HistoryRawClientProvider(clientBean client.Bean) HistoryRawClient { + return clientBean.GetHistoryClient() +} + +func HistoryClientProvider(historyRawClient HistoryRawClient) HistoryClient { + return history.NewRetryableClient( historyRawClient, common.CreateHistoryClientRetryPolicy(), common.IsServiceClientTransientError, ) - return historyClient } -func MatchingRawClientProvider(clientBean client.Bean, namespaceRegistry namespace.Registry) ( - MatchingRawClient, - error, -) { +func MatchingRawClientProvider( + clientBean client.Bean, + namespaceRegistry namespace.Registry, +) (MatchingRawClient, error) { return clientBean.GetMatchingClient(namespaceRegistry.GetNamespaceName) } diff -Nru temporal-1.21.5-1/src/common/resourcetest/resourceTest.go temporal-1.22.5/src/common/resourcetest/resourceTest.go --- temporal-1.21.5-1/src/common/resourcetest/resourceTest.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/resourcetest/resourceTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,449 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package resourcetest - -import ( - "net" - - "github.com/golang/mock/gomock" - "github.com/uber-go/tally/v4" - - "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/api/workflowservicemock/v1" - - "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/adminservicemock/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/provider" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - persistenceClient "go.temporal.io/server/common/persistence/client" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/visibility/manager" - esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/sdk" - "go.temporal.io/server/common/searchattribute" -) - -// TODO: replace with test specific Fx - -type ( - // Test is the test implementation used for testing - Test struct { - MetricsScope tally.Scope - ClusterMetadata *cluster.MockMetadata - SearchAttributesProvider *searchattribute.MockProvider - SearchAttributesManager *searchattribute.MockManager - SearchAttributesMapperProvider *searchattribute.MockMapperProvider - - // other common resources - - NamespaceCache *namespace.MockRegistry - TimeSource clock.TimeSource - PayloadSerializer serialization.Serializer - MetricsHandler metrics.Handler - ArchivalMetadata archiver.MetadataMock - ArchiverProvider *provider.MockArchiverProvider - - // membership infos - - MembershipMonitor *membership.MockMonitor - HostInfoProvider *membership.MockHostInfoProvider - FrontendServiceResolver *membership.MockServiceResolver - MatchingServiceResolver *membership.MockServiceResolver - HistoryServiceResolver *membership.MockServiceResolver - WorkerServiceResolver *membership.MockServiceResolver - - // internal services clients - - SDKClientFactory *sdk.MockClientFactory - FrontendClient *workflowservicemock.MockWorkflowServiceClient - MatchingClient *matchingservicemock.MockMatchingServiceClient - HistoryClient *historyservicemock.MockHistoryServiceClient - RemoteAdminClient *adminservicemock.MockAdminServiceClient - RemoteFrontendClient *workflowservicemock.MockWorkflowServiceClient - ClientBean *client.MockBean - ClientFactory *client.MockFactory - ESClient *esclient.MockClient - VisibilityManager *manager.MockVisibilityManager - - // persistence clients - - MetadataMgr *persistence.MockMetadataManager - ClusterMetadataMgr *persistence.MockClusterMetadataManager - TaskMgr *persistence.MockTaskManager - NamespaceReplicationQueue persistence.NamespaceReplicationQueue - ShardMgr *persistence.MockShardManager - ExecutionMgr *persistence.MockExecutionManager - PersistenceBean *persistenceClient.MockBean - - Logger log.Logger - } -) - -const ( - testHostName = "test_host" -) - -var testHostInfo = membership.NewHostInfoFromAddress(testHostName) - -// NewTest returns a new test resource instance -func NewTest(controller *gomock.Controller, serviceName primitives.ServiceName) *Test { - logger := log.NewTestLogger() - - frontendClient := workflowservicemock.NewMockWorkflowServiceClient(controller) - matchingClient := matchingservicemock.NewMockMatchingServiceClient(controller) - historyClient := historyservicemock.NewMockHistoryServiceClient(controller) - remoteFrontendClient := workflowservicemock.NewMockWorkflowServiceClient(controller) - remoteAdminClient := adminservicemock.NewMockAdminServiceClient(controller) - clusterMetadataManager := persistence.NewMockClusterMetadataManager(controller) - clientBean := client.NewMockBean(controller) - clientBean.EXPECT().GetFrontendClient().Return(frontendClient).AnyTimes() - clientBean.EXPECT().GetMatchingClient(gomock.Any()).Return(matchingClient, nil).AnyTimes() - clientBean.EXPECT().GetHistoryClient().Return(historyClient).AnyTimes() - clientBean.EXPECT().GetRemoteAdminClient(gomock.Any()).Return(remoteAdminClient, nil).AnyTimes() - clientBean.EXPECT().GetRemoteFrontendClient(gomock.Any()).Return(nil, remoteFrontendClient, nil).AnyTimes() - clientFactory := client.NewMockFactory(controller) - - metadataMgr := persistence.NewMockMetadataManager(controller) - taskMgr := persistence.NewMockTaskManager(controller) - shardMgr := persistence.NewMockShardManager(controller) - executionMgr := persistence.NewMockExecutionManager(controller) - executionMgr.EXPECT().GetHistoryBranchUtil().Return(&persistence.HistoryBranchUtilImpl{}).AnyTimes() - namespaceReplicationQueue := persistence.NewMockNamespaceReplicationQueue(controller) - namespaceReplicationQueue.EXPECT().Start().AnyTimes() - namespaceReplicationQueue.EXPECT().Stop().AnyTimes() - persistenceBean := persistenceClient.NewMockBean(controller) - persistenceBean.EXPECT().GetMetadataManager().Return(metadataMgr).AnyTimes() - persistenceBean.EXPECT().GetTaskManager().Return(taskMgr).AnyTimes() - persistenceBean.EXPECT().GetShardManager().Return(shardMgr).AnyTimes() - persistenceBean.EXPECT().GetExecutionManager().Return(executionMgr).AnyTimes() - persistenceBean.EXPECT().GetNamespaceReplicationQueue().Return(namespaceReplicationQueue).AnyTimes() - persistenceBean.EXPECT().GetClusterMetadataManager().Return(clusterMetadataManager).AnyTimes() - - membershipMonitor := membership.NewMockMonitor(controller) - hostInfoProvider := membership.NewMockHostInfoProvider(controller) - frontendServiceResolver := membership.NewMockServiceResolver(controller) - matchingServiceResolver := membership.NewMockServiceResolver(controller) - historyServiceResolver := membership.NewMockServiceResolver(controller) - workerServiceResolver := membership.NewMockServiceResolver(controller) - membershipMonitor.EXPECT().GetResolver(primitives.FrontendService).Return(frontendServiceResolver, nil).AnyTimes() - membershipMonitor.EXPECT().GetResolver(primitives.InternalFrontendService).Return(nil, membership.ErrUnknownService).AnyTimes() - membershipMonitor.EXPECT().GetResolver(primitives.MatchingService).Return(matchingServiceResolver, nil).AnyTimes() - membershipMonitor.EXPECT().GetResolver(primitives.HistoryService).Return(historyServiceResolver, nil).AnyTimes() - membershipMonitor.EXPECT().GetResolver(primitives.WorkerService).Return(workerServiceResolver, nil).AnyTimes() - membershipMonitor.EXPECT().WaitUntilInitialized(gomock.Any()).Return(nil).AnyTimes() - - scope := tally.NewTestScope("test", nil) - metricsHandler := metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags( - metrics.ServiceNameTag(serviceName), - ) - - return &Test{ - MetricsScope: scope, - ClusterMetadata: cluster.NewMockMetadata(controller), - SearchAttributesProvider: searchattribute.NewMockProvider(controller), - SearchAttributesManager: searchattribute.NewMockManager(controller), - SearchAttributesMapperProvider: searchattribute.NewMockMapperProvider(controller), - - // other common resources - - NamespaceCache: namespace.NewMockRegistry(controller), - TimeSource: clock.NewRealTimeSource(), - PayloadSerializer: serialization.NewSerializer(), - MetricsHandler: metricsHandler, - ArchivalMetadata: archiver.NewMetadataMock(controller), - ArchiverProvider: provider.NewMockArchiverProvider(controller), - - // membership infos - - MembershipMonitor: membershipMonitor, - HostInfoProvider: hostInfoProvider, - FrontendServiceResolver: frontendServiceResolver, - MatchingServiceResolver: matchingServiceResolver, - HistoryServiceResolver: historyServiceResolver, - WorkerServiceResolver: workerServiceResolver, - - // internal services clients - - SDKClientFactory: sdk.NewMockClientFactory(controller), - FrontendClient: frontendClient, - MatchingClient: matchingClient, - HistoryClient: historyClient, - RemoteAdminClient: remoteAdminClient, - RemoteFrontendClient: remoteFrontendClient, - ClientBean: clientBean, - ClientFactory: clientFactory, - ESClient: esclient.NewMockClient(controller), - VisibilityManager: manager.NewMockVisibilityManager(controller), - - // persistence clients - - MetadataMgr: metadataMgr, - ClusterMetadataMgr: clusterMetadataManager, - TaskMgr: taskMgr, - NamespaceReplicationQueue: namespaceReplicationQueue, - ShardMgr: shardMgr, - ExecutionMgr: executionMgr, - PersistenceBean: persistenceBean, - - // logger - - Logger: logger, - } -} - -// Start for testing -func (t *Test) Start() { -} - -// Stop for testing -func (t *Test) Stop() { -} - -// static infos - -// GetServiceName for testing -func (t *Test) GetServiceName() string { - panic("user should implement this method for test") -} - -// GetHostName for testing -func (t *Test) GetHostName() string { - return testHostInfo.Identity() -} - -// GetHostInfo for testing -func (t *Test) GetHostInfo() membership.HostInfo { - return testHostInfo -} - -// GetClusterMetadata for testing -func (t *Test) GetClusterMetadata() cluster.Metadata { - return t.ClusterMetadata -} - -// GetClusterMetadata for testing -func (t *Test) GetClusterMetadataManager() persistence.ClusterMetadataManager { - return t.ClusterMetadataMgr -} - -// other common resources - -// GetNamespaceRegistry for testing -func (t *Test) GetNamespaceRegistry() namespace.Registry { - return t.NamespaceCache -} - -// GetTimeSource for testing -func (t *Test) GetTimeSource() clock.TimeSource { - return t.TimeSource -} - -// GetPayloadSerializer for testing -func (t *Test) GetPayloadSerializer() serialization.Serializer { - return t.PayloadSerializer -} - -// GetMetricsHandler for testing -func (t *Test) GetMetricsHandler() metrics.Handler { - return t.MetricsHandler -} - -// GetArchivalMetadata for testing -func (t *Test) GetArchivalMetadata() archiver.ArchivalMetadata { - return t.ArchivalMetadata -} - -// GetArchiverProvider for testing -func (t *Test) GetArchiverProvider() provider.ArchiverProvider { - return t.ArchiverProvider -} - -// membership infos - -// GetMembershipMonitor for testing -func (t *Test) GetMembershipMonitor() membership.Monitor { - return t.MembershipMonitor -} - -// GetHostInfoProvider for testing -func (t *Test) GetHostInfoProvider() membership.HostInfoProvider { - return t.HostInfoProvider -} - -// GetFrontendServiceResolver for testing -func (t *Test) GetFrontendServiceResolver() membership.ServiceResolver { - return t.FrontendServiceResolver -} - -// GetMatchingServiceResolver for testing -func (t *Test) GetMatchingServiceResolver() membership.ServiceResolver { - return t.MatchingServiceResolver -} - -// GetHistoryServiceResolver for testing -func (t *Test) GetHistoryServiceResolver() membership.ServiceResolver { - return t.HistoryServiceResolver -} - -// GetWorkerServiceResolver for testing -func (t *Test) GetWorkerServiceResolver() membership.ServiceResolver { - return t.WorkerServiceResolver -} - -// internal services clients - -// GetSDKClientFactory for testing -func (t *Test) GetSDKClientFactory() sdk.ClientFactory { - return t.SDKClientFactory -} - -// GetFrontendClient for testing -func (t *Test) GetFrontendClient() workflowservice.WorkflowServiceClient { - return t.FrontendClient -} - -// GetMatchingRawClient for testing -func (t *Test) GetMatchingRawClient() matchingservice.MatchingServiceClient { - return t.MatchingClient -} - -// GetMatchingClient for testing -func (t *Test) GetMatchingClient() matchingservice.MatchingServiceClient { - return t.MatchingClient -} - -// GetHistoryRawClient for testing -func (t *Test) GetHistoryRawClient() historyservice.HistoryServiceClient { - return t.HistoryClient -} - -// GetHistoryClient for testing -func (t *Test) GetHistoryClient() historyservice.HistoryServiceClient { - return t.HistoryClient -} - -// GetRemoteAdminClient for testing -func (t *Test) GetRemoteAdminClient( - cluster string, -) adminservice.AdminServiceClient { - return t.RemoteAdminClient -} - -// GetRemoteFrontendClient for testing -func (t *Test) GetRemoteFrontendClient( - cluster string, -) workflowservice.WorkflowServiceClient { - return t.RemoteFrontendClient -} - -// GetClientBean for testing -func (t *Test) GetClientBean() client.Bean { - return t.ClientBean -} - -// GetClientFactory for testing -func (t *Test) GetClientFactory() client.Factory { - return t.ClientFactory -} - -// GetVisibilityManager for testing -func (t *Test) GetVisibilityManager() manager.VisibilityManager { - return t.VisibilityManager -} - -// persistence clients - -// GetMetadataManager for testing -func (t *Test) GetMetadataManager() persistence.MetadataManager { - return t.MetadataMgr -} - -// GetTaskManager for testing -func (t *Test) GetTaskManager() persistence.TaskManager { - return t.TaskMgr -} - -// GetNamespaceReplicationQueue for testing -func (t *Test) GetNamespaceReplicationQueue() persistence.NamespaceReplicationQueue { - // user should implement this method for test - return t.NamespaceReplicationQueue -} - -// GetShardManager for testing -func (t *Test) GetShardManager() persistence.ShardManager { - return t.ShardMgr -} - -// GetExecutionManager for testing -func (t *Test) GetExecutionManager() persistence.ExecutionManager { - return t.ExecutionMgr -} - -// GetPersistenceBean for testing -func (t *Test) GetPersistenceBean() persistenceClient.Bean { - return t.PersistenceBean -} - -// loggers - -// GetLogger for testing -func (t *Test) GetLogger() log.Logger { - return t.Logger -} - -// GetThrottledLogger for testing -func (t *Test) GetThrottledLogger() log.Logger { - return t.Logger -} - -// GetGRPCListener for testing -func (t *Test) GetGRPCListener() net.Listener { - panic("user should implement this method for test") -} - -func (t *Test) GetSearchAttributesProvider() searchattribute.Provider { - return t.SearchAttributesProvider -} - -func (t *Test) GetSearchAttributesManager() searchattribute.Manager { - return t.SearchAttributesManager -} - -func (t *Test) GetSearchAttributesMapperProvider() searchattribute.MapperProvider { - return t.SearchAttributesMapperProvider -} diff -Nru temporal-1.21.5-1/src/common/resourcetest/test_resource.go temporal-1.22.5/src/common/resourcetest/test_resource.go --- temporal-1.21.5-1/src/common/resourcetest/test_resource.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/resourcetest/test_resource.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,449 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package resourcetest + +import ( + "net" + + "github.com/golang/mock/gomock" + "github.com/uber-go/tally/v4" + + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/api/workflowservicemock/v1" + + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/adminservicemock/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/provider" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + persistenceClient "go.temporal.io/server/common/persistence/client" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/visibility/manager" + esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/common/searchattribute" +) + +// TODO: replace with test specific Fx + +type ( + // Test is the test implementation used for testing + Test struct { + MetricsScope tally.Scope + ClusterMetadata *cluster.MockMetadata + SearchAttributesProvider *searchattribute.MockProvider + SearchAttributesManager *searchattribute.MockManager + SearchAttributesMapperProvider *searchattribute.MockMapperProvider + + // other common resources + + NamespaceCache *namespace.MockRegistry + TimeSource clock.TimeSource + PayloadSerializer serialization.Serializer + MetricsHandler metrics.Handler + ArchivalMetadata archiver.MetadataMock + ArchiverProvider *provider.MockArchiverProvider + + // membership infos + + MembershipMonitor *membership.MockMonitor + HostInfoProvider *membership.MockHostInfoProvider + FrontendServiceResolver *membership.MockServiceResolver + MatchingServiceResolver *membership.MockServiceResolver + HistoryServiceResolver *membership.MockServiceResolver + WorkerServiceResolver *membership.MockServiceResolver + + // internal services clients + + SDKClientFactory *sdk.MockClientFactory + FrontendClient *workflowservicemock.MockWorkflowServiceClient + MatchingClient *matchingservicemock.MockMatchingServiceClient + HistoryClient *historyservicemock.MockHistoryServiceClient + RemoteAdminClient *adminservicemock.MockAdminServiceClient + RemoteFrontendClient *workflowservicemock.MockWorkflowServiceClient + ClientBean *client.MockBean + ClientFactory *client.MockFactory + ESClient *esclient.MockClient + VisibilityManager *manager.MockVisibilityManager + + // persistence clients + + MetadataMgr *persistence.MockMetadataManager + ClusterMetadataMgr *persistence.MockClusterMetadataManager + TaskMgr *persistence.MockTaskManager + NamespaceReplicationQueue persistence.NamespaceReplicationQueue + ShardMgr *persistence.MockShardManager + ExecutionMgr *persistence.MockExecutionManager + PersistenceBean *persistenceClient.MockBean + + Logger log.Logger + } +) + +const ( + testHostName = "test_host" +) + +var testHostInfo = membership.NewHostInfoFromAddress(testHostName) + +// NewTest returns a new test resource instance +func NewTest(controller *gomock.Controller, serviceName primitives.ServiceName) *Test { + logger := log.NewTestLogger() + + frontendClient := workflowservicemock.NewMockWorkflowServiceClient(controller) + matchingClient := matchingservicemock.NewMockMatchingServiceClient(controller) + historyClient := historyservicemock.NewMockHistoryServiceClient(controller) + remoteFrontendClient := workflowservicemock.NewMockWorkflowServiceClient(controller) + remoteAdminClient := adminservicemock.NewMockAdminServiceClient(controller) + clusterMetadataManager := persistence.NewMockClusterMetadataManager(controller) + clientBean := client.NewMockBean(controller) + clientBean.EXPECT().GetFrontendClient().Return(frontendClient).AnyTimes() + clientBean.EXPECT().GetMatchingClient(gomock.Any()).Return(matchingClient, nil).AnyTimes() + clientBean.EXPECT().GetHistoryClient().Return(historyClient).AnyTimes() + clientBean.EXPECT().GetRemoteAdminClient(gomock.Any()).Return(remoteAdminClient, nil).AnyTimes() + clientBean.EXPECT().GetRemoteFrontendClient(gomock.Any()).Return(nil, remoteFrontendClient, nil).AnyTimes() + clientFactory := client.NewMockFactory(controller) + + metadataMgr := persistence.NewMockMetadataManager(controller) + taskMgr := persistence.NewMockTaskManager(controller) + shardMgr := persistence.NewMockShardManager(controller) + executionMgr := persistence.NewMockExecutionManager(controller) + executionMgr.EXPECT().GetHistoryBranchUtil().Return(&persistence.HistoryBranchUtilImpl{}).AnyTimes() + namespaceReplicationQueue := persistence.NewMockNamespaceReplicationQueue(controller) + namespaceReplicationQueue.EXPECT().Start().AnyTimes() + namespaceReplicationQueue.EXPECT().Stop().AnyTimes() + persistenceBean := persistenceClient.NewMockBean(controller) + persistenceBean.EXPECT().GetMetadataManager().Return(metadataMgr).AnyTimes() + persistenceBean.EXPECT().GetTaskManager().Return(taskMgr).AnyTimes() + persistenceBean.EXPECT().GetShardManager().Return(shardMgr).AnyTimes() + persistenceBean.EXPECT().GetExecutionManager().Return(executionMgr).AnyTimes() + persistenceBean.EXPECT().GetNamespaceReplicationQueue().Return(namespaceReplicationQueue).AnyTimes() + persistenceBean.EXPECT().GetClusterMetadataManager().Return(clusterMetadataManager).AnyTimes() + + membershipMonitor := membership.NewMockMonitor(controller) + hostInfoProvider := membership.NewMockHostInfoProvider(controller) + frontendServiceResolver := membership.NewMockServiceResolver(controller) + matchingServiceResolver := membership.NewMockServiceResolver(controller) + historyServiceResolver := membership.NewMockServiceResolver(controller) + workerServiceResolver := membership.NewMockServiceResolver(controller) + membershipMonitor.EXPECT().GetResolver(primitives.FrontendService).Return(frontendServiceResolver, nil).AnyTimes() + membershipMonitor.EXPECT().GetResolver(primitives.InternalFrontendService).Return(nil, membership.ErrUnknownService).AnyTimes() + membershipMonitor.EXPECT().GetResolver(primitives.MatchingService).Return(matchingServiceResolver, nil).AnyTimes() + membershipMonitor.EXPECT().GetResolver(primitives.HistoryService).Return(historyServiceResolver, nil).AnyTimes() + membershipMonitor.EXPECT().GetResolver(primitives.WorkerService).Return(workerServiceResolver, nil).AnyTimes() + membershipMonitor.EXPECT().WaitUntilInitialized(gomock.Any()).Return(nil).AnyTimes() + + scope := tally.NewTestScope("test", nil) + metricsHandler := metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags( + metrics.ServiceNameTag(serviceName), + ) + + return &Test{ + MetricsScope: scope, + ClusterMetadata: cluster.NewMockMetadata(controller), + SearchAttributesProvider: searchattribute.NewMockProvider(controller), + SearchAttributesManager: searchattribute.NewMockManager(controller), + SearchAttributesMapperProvider: searchattribute.NewMockMapperProvider(controller), + + // other common resources + + NamespaceCache: namespace.NewMockRegistry(controller), + TimeSource: clock.NewRealTimeSource(), + PayloadSerializer: serialization.NewSerializer(), + MetricsHandler: metricsHandler, + ArchivalMetadata: archiver.NewMetadataMock(controller), + ArchiverProvider: provider.NewMockArchiverProvider(controller), + + // membership infos + + MembershipMonitor: membershipMonitor, + HostInfoProvider: hostInfoProvider, + FrontendServiceResolver: frontendServiceResolver, + MatchingServiceResolver: matchingServiceResolver, + HistoryServiceResolver: historyServiceResolver, + WorkerServiceResolver: workerServiceResolver, + + // internal services clients + + SDKClientFactory: sdk.NewMockClientFactory(controller), + FrontendClient: frontendClient, + MatchingClient: matchingClient, + HistoryClient: historyClient, + RemoteAdminClient: remoteAdminClient, + RemoteFrontendClient: remoteFrontendClient, + ClientBean: clientBean, + ClientFactory: clientFactory, + ESClient: esclient.NewMockClient(controller), + VisibilityManager: manager.NewMockVisibilityManager(controller), + + // persistence clients + + MetadataMgr: metadataMgr, + ClusterMetadataMgr: clusterMetadataManager, + TaskMgr: taskMgr, + NamespaceReplicationQueue: namespaceReplicationQueue, + ShardMgr: shardMgr, + ExecutionMgr: executionMgr, + PersistenceBean: persistenceBean, + + // logger + + Logger: logger, + } +} + +// Start for testing +func (t *Test) Start() { +} + +// Stop for testing +func (t *Test) Stop() { +} + +// static infos + +// GetServiceName for testing +func (t *Test) GetServiceName() string { + panic("user should implement this method for test") +} + +// GetHostName for testing +func (t *Test) GetHostName() string { + return testHostInfo.Identity() +} + +// GetHostInfo for testing +func (t *Test) GetHostInfo() membership.HostInfo { + return testHostInfo +} + +// GetClusterMetadata for testing +func (t *Test) GetClusterMetadata() cluster.Metadata { + return t.ClusterMetadata +} + +// GetClusterMetadata for testing +func (t *Test) GetClusterMetadataManager() persistence.ClusterMetadataManager { + return t.ClusterMetadataMgr +} + +// other common resources + +// GetNamespaceRegistry for testing +func (t *Test) GetNamespaceRegistry() namespace.Registry { + return t.NamespaceCache +} + +// GetTimeSource for testing +func (t *Test) GetTimeSource() clock.TimeSource { + return t.TimeSource +} + +// GetPayloadSerializer for testing +func (t *Test) GetPayloadSerializer() serialization.Serializer { + return t.PayloadSerializer +} + +// GetMetricsHandler for testing +func (t *Test) GetMetricsHandler() metrics.Handler { + return t.MetricsHandler +} + +// GetArchivalMetadata for testing +func (t *Test) GetArchivalMetadata() archiver.ArchivalMetadata { + return t.ArchivalMetadata +} + +// GetArchiverProvider for testing +func (t *Test) GetArchiverProvider() provider.ArchiverProvider { + return t.ArchiverProvider +} + +// membership infos + +// GetMembershipMonitor for testing +func (t *Test) GetMembershipMonitor() membership.Monitor { + return t.MembershipMonitor +} + +// GetHostInfoProvider for testing +func (t *Test) GetHostInfoProvider() membership.HostInfoProvider { + return t.HostInfoProvider +} + +// GetFrontendServiceResolver for testing +func (t *Test) GetFrontendServiceResolver() membership.ServiceResolver { + return t.FrontendServiceResolver +} + +// GetMatchingServiceResolver for testing +func (t *Test) GetMatchingServiceResolver() membership.ServiceResolver { + return t.MatchingServiceResolver +} + +// GetHistoryServiceResolver for testing +func (t *Test) GetHistoryServiceResolver() membership.ServiceResolver { + return t.HistoryServiceResolver +} + +// GetWorkerServiceResolver for testing +func (t *Test) GetWorkerServiceResolver() membership.ServiceResolver { + return t.WorkerServiceResolver +} + +// internal services clients + +// GetSDKClientFactory for testing +func (t *Test) GetSDKClientFactory() sdk.ClientFactory { + return t.SDKClientFactory +} + +// GetFrontendClient for testing +func (t *Test) GetFrontendClient() workflowservice.WorkflowServiceClient { + return t.FrontendClient +} + +// GetMatchingRawClient for testing +func (t *Test) GetMatchingRawClient() matchingservice.MatchingServiceClient { + return t.MatchingClient +} + +// GetMatchingClient for testing +func (t *Test) GetMatchingClient() matchingservice.MatchingServiceClient { + return t.MatchingClient +} + +// GetHistoryRawClient for testing +func (t *Test) GetHistoryRawClient() historyservice.HistoryServiceClient { + return t.HistoryClient +} + +// GetHistoryClient for testing +func (t *Test) GetHistoryClient() historyservice.HistoryServiceClient { + return t.HistoryClient +} + +// GetRemoteAdminClient for testing +func (t *Test) GetRemoteAdminClient( + cluster string, +) adminservice.AdminServiceClient { + return t.RemoteAdminClient +} + +// GetRemoteFrontendClient for testing +func (t *Test) GetRemoteFrontendClient( + cluster string, +) workflowservice.WorkflowServiceClient { + return t.RemoteFrontendClient +} + +// GetClientBean for testing +func (t *Test) GetClientBean() client.Bean { + return t.ClientBean +} + +// GetClientFactory for testing +func (t *Test) GetClientFactory() client.Factory { + return t.ClientFactory +} + +// GetVisibilityManager for testing +func (t *Test) GetVisibilityManager() manager.VisibilityManager { + return t.VisibilityManager +} + +// persistence clients + +// GetMetadataManager for testing +func (t *Test) GetMetadataManager() persistence.MetadataManager { + return t.MetadataMgr +} + +// GetTaskManager for testing +func (t *Test) GetTaskManager() persistence.TaskManager { + return t.TaskMgr +} + +// GetNamespaceReplicationQueue for testing +func (t *Test) GetNamespaceReplicationQueue() persistence.NamespaceReplicationQueue { + // user should implement this method for test + return t.NamespaceReplicationQueue +} + +// GetShardManager for testing +func (t *Test) GetShardManager() persistence.ShardManager { + return t.ShardMgr +} + +// GetExecutionManager for testing +func (t *Test) GetExecutionManager() persistence.ExecutionManager { + return t.ExecutionMgr +} + +// GetPersistenceBean for testing +func (t *Test) GetPersistenceBean() persistenceClient.Bean { + return t.PersistenceBean +} + +// loggers + +// GetLogger for testing +func (t *Test) GetLogger() log.Logger { + return t.Logger +} + +// GetThrottledLogger for testing +func (t *Test) GetThrottledLogger() log.Logger { + return t.Logger +} + +// GetGRPCListener for testing +func (t *Test) GetGRPCListener() net.Listener { + panic("user should implement this method for test") +} + +func (t *Test) GetSearchAttributesProvider() searchattribute.Provider { + return t.SearchAttributesProvider +} + +func (t *Test) GetSearchAttributesManager() searchattribute.Manager { + return t.SearchAttributesManager +} + +func (t *Test) GetSearchAttributesMapperProvider() searchattribute.MapperProvider { + return t.SearchAttributesMapperProvider +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/fixedTLSConfigProvider.go temporal-1.22.5/src/common/rpc/encryption/fixedTLSConfigProvider.go --- temporal-1.21.5-1/src/common/rpc/encryption/fixedTLSConfigProvider.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/fixedTLSConfigProvider.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,79 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "crypto/tls" + "time" +) + +// FixedTLSConfigProvider is a [TLSConfigProvider] that is for fixed sets of TLS +// configs. This is usually only used for testing. + +type FixedTLSConfigProvider struct { + InternodeServerConfig *tls.Config + InternodeClientConfig *tls.Config + FrontendServerConfig *tls.Config + FrontendClientConfig *tls.Config + RemoteClusterClientConfigs map[string]*tls.Config + CertExpirationChecker CertExpirationChecker +} + +var _ TLSConfigProvider = (*FixedTLSConfigProvider)(nil) + +// GetInternodeServerConfig implements [TLSConfigProvider.GetInternodeServerConfig]. +func (f *FixedTLSConfigProvider) GetInternodeServerConfig() (*tls.Config, error) { + return f.InternodeServerConfig, nil +} + +// GetInternodeClientConfig implements [TLSConfigProvider.GetInternodeClientConfig]. +func (f *FixedTLSConfigProvider) GetInternodeClientConfig() (*tls.Config, error) { + return f.InternodeClientConfig, nil +} + +// GetFrontendServerConfig implements [TLSConfigProvider.GetFrontendServerConfig]. +func (f *FixedTLSConfigProvider) GetFrontendServerConfig() (*tls.Config, error) { + return f.FrontendServerConfig, nil +} + +// GetFrontendClientConfig implements [TLSConfigProvider.GetFrontendClientConfig]. +func (f *FixedTLSConfigProvider) GetFrontendClientConfig() (*tls.Config, error) { + return f.FrontendClientConfig, nil +} + +// GetRemoteClusterClientConfig implements [TLSConfigProvider.GetRemoteClusterClientConfig]. +func (f *FixedTLSConfigProvider) GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) { + return f.RemoteClusterClientConfigs[hostname], nil +} + +// GetExpiringCerts implements [TLSConfigProvider.GetExpiringCerts]. +func (f *FixedTLSConfigProvider) GetExpiringCerts( + timeWindow time.Duration, +) (expiring CertExpirationMap, expired CertExpirationMap, err error) { + if f.CertExpirationChecker != nil { + return f.CertExpirationChecker.GetExpiringCerts(timeWindow) + } + return nil, nil, nil +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/localStoreCertProvider.go temporal-1.22.5/src/common/rpc/encryption/localStoreCertProvider.go --- temporal-1.21.5-1/src/common/rpc/encryption/localStoreCertProvider.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/localStoreCertProvider.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,610 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "bytes" - "crypto/md5" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "os" - "sync" - "time" - - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" -) - -var _ CertProvider = (*localStoreCertProvider)(nil) -var _ CertExpirationChecker = (*localStoreCertProvider)(nil) - -type certCache struct { - serverCert *tls.Certificate - workerCert *tls.Certificate - clientCAPool *x509.CertPool - serverCAPool *x509.CertPool - serverCAsWorkerPool *x509.CertPool - clientCACerts []*x509.Certificate // copies of certs in the clientCAPool CertPool for expiration checks - serverCACerts []*x509.Certificate // copies of certs in the serverCAPool CertPool for expiration checks - serverCACertsWorker []*x509.Certificate // copies of certs in the serverCAsWorkerPool CertPool for expiration checks -} - -type localStoreCertProvider struct { - sync.RWMutex - - tlsSettings *config.GroupTLS - workerTLSSettings *config.WorkerTLS - isLegacyWorkerConfig bool - legacyWorkerSettings *config.ClientTLS - - certs *certCache - refreshInterval time.Duration - - ticker *time.Ticker - stop chan bool - logger log.Logger -} - -type loadOrDecodeDataFunc func(item string) ([]byte, error) - -type tlsCertFetcher func() (*tls.Certificate, error) - -func (s *localStoreCertProvider) initialize() { - - if s.refreshInterval != 0 { - s.stop = make(chan bool) - s.ticker = time.NewTicker(s.refreshInterval) - go s.refreshCerts() - } -} - -func NewLocalStoreCertProvider( - tlsSettings *config.GroupTLS, - workerTlsSettings *config.WorkerTLS, - legacyWorkerSettings *config.ClientTLS, - refreshInterval time.Duration, - logger log.Logger) CertProvider { - - provider := &localStoreCertProvider{ - tlsSettings: tlsSettings, - workerTLSSettings: workerTlsSettings, - legacyWorkerSettings: legacyWorkerSettings, - isLegacyWorkerConfig: legacyWorkerSettings != nil, - logger: logger, - refreshInterval: refreshInterval, - } - provider.initialize() - return provider -} - -func (s *localStoreCertProvider) Close() { - - if s.ticker != nil { - s.ticker.Stop() - } - if s.stop != nil { - s.stop <- true - close(s.stop) - } -} - -func (s *localStoreCertProvider) FetchServerCertificate() (*tls.Certificate, error) { - - if s.tlsSettings == nil { - return nil, nil - } - certs, err := s.getCerts() - if err != nil { - return nil, err - } - return certs.serverCert, nil -} - -func (s *localStoreCertProvider) FetchClientCAs() (*x509.CertPool, error) { - - if s.tlsSettings == nil { - return nil, nil - } - certs, err := s.getCerts() - if err != nil { - return nil, err - } - return certs.clientCAPool, nil -} - -func (s *localStoreCertProvider) FetchServerRootCAsForClient(isWorker bool) (*x509.CertPool, error) { - - clientSettings := s.getClientTLSSettings(isWorker) - if clientSettings == nil { - return nil, nil - } - certs, err := s.getCerts() - if err != nil { - return nil, err - } - - if isWorker { - return certs.serverCAsWorkerPool, nil - } - - return certs.serverCAPool, nil -} - -func (s *localStoreCertProvider) FetchClientCertificate(isWorker bool) (*tls.Certificate, error) { - - if !s.isTLSEnabled() { - return nil, nil - } - certs, err := s.getCerts() - if err != nil { - return nil, err - } - if isWorker { - return certs.workerCert, nil - } - return certs.serverCert, nil -} - -func (s *localStoreCertProvider) GetExpiringCerts(timeWindow time.Duration, -) (CertExpirationMap, CertExpirationMap, error) { - - expiring := make(CertExpirationMap) - expired := make(CertExpirationMap) - when := time.Now().UTC().Add(timeWindow) - - certs, err := s.getCerts() - if err != nil { - return nil, nil, err - } - - checkError := checkTLSCertForExpiration(certs.serverCert, when, expiring, expired) - err = appendError(err, checkError) - checkError = checkTLSCertForExpiration(certs.workerCert, when, expiring, expired) - err = appendError(err, checkError) - - checkCertsForExpiration(certs.clientCACerts, when, expiring, expired) - checkCertsForExpiration(certs.serverCACerts, when, expiring, expired) - checkCertsForExpiration(certs.serverCACertsWorker, when, expiring, expired) - - return expiring, expired, err -} - -func (s *localStoreCertProvider) getCerts() (*certCache, error) { - - s.RLock() - if s.certs != nil { - defer s.RUnlock() - return s.certs, nil - } - s.RUnlock() - s.Lock() - defer s.Unlock() - - if s.certs != nil { - return s.certs, nil - } - - newCerts, err := s.loadCerts() - if err != nil { - return nil, err - } - - if newCerts == nil { - s.certs = &certCache{} - } else { - s.certs = newCerts - } - return s.certs, nil -} - -func (s *localStoreCertProvider) loadCerts() (*certCache, error) { - - if !s.isTLSEnabled() { - return nil, nil - } - - newCerts := certCache{} - var err error - - if s.tlsSettings != nil { - newCerts.serverCert, err = s.fetchCertificate(s.tlsSettings.Server.CertFile, s.tlsSettings.Server.CertData, - s.tlsSettings.Server.KeyFile, s.tlsSettings.Server.KeyData) - if err != nil { - return nil, err - } - - certPool, certs, err := s.fetchCAs(s.tlsSettings.Server.ClientCAFiles, s.tlsSettings.Server.ClientCAData, - "cannot specify both clientCAFiles and clientCAData properties") - if err != nil { - return nil, err - } - newCerts.clientCAPool = certPool - newCerts.clientCACerts = certs - } - - if s.isLegacyWorkerConfig { - newCerts.workerCert = newCerts.serverCert - } else { - if s.workerTLSSettings != nil { - newCerts.workerCert, err = s.fetchCertificate(s.workerTLSSettings.CertFile, s.workerTLSSettings.CertData, - s.workerTLSSettings.KeyFile, s.workerTLSSettings.KeyData) - if err != nil { - return nil, err - } - } - } - - nonWorkerPool, nonWorkerCerts, err := s.loadServerCACerts(false) - if err != nil { - return nil, err - } - newCerts.serverCAPool = nonWorkerPool - newCerts.serverCACerts = nonWorkerCerts - - workerPool, workerCerts, err := s.loadServerCACerts(true) - if err != nil { - return nil, err - } - newCerts.serverCAsWorkerPool = workerPool - newCerts.serverCACertsWorker = workerCerts - - return &newCerts, nil -} - -func (s *localStoreCertProvider) fetchCertificate( - certFile string, certData string, - keyFile string, keyData string) (*tls.Certificate, error) { - if certFile == "" && certData == "" { - return nil, nil - } - - if certFile != "" && certData != "" { - return nil, errors.New("only one of certFile or certData properties should be spcified") - } - - var certBytes []byte - var keyBytes []byte - var err error - - if certFile != "" { - s.logger.Info("loading certificate from file", tag.TLSCertFile(certFile)) - certBytes, err = os.ReadFile(certFile) - if err != nil { - return nil, err - } - } else if certData != "" { - certBytes, err = base64.StdEncoding.DecodeString(certData) - if err != nil { - return nil, fmt.Errorf("TLS public certificate could not be decoded: %w", err) - } - } - - if keyFile != "" { - s.logger.Info("loading private key from file", tag.TLSKeyFile(keyFile)) - keyBytes, err = os.ReadFile(keyFile) - if err != nil { - return nil, err - } - } else if keyData != "" { - keyBytes, err = base64.StdEncoding.DecodeString(keyData) - if err != nil { - return nil, fmt.Errorf("TLS private key could not be decoded: %w", err) - } - } - - cert, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return nil, fmt.Errorf("loading tls certificate failed: %v", err) - } - - return &cert, nil -} - -func (s *localStoreCertProvider) getClientTLSSettings(isWorker bool) *config.ClientTLS { - if isWorker && s.workerTLSSettings != nil { - return &s.workerTLSSettings.Client // explicit system worker case - } else if isWorker { - return s.legacyWorkerSettings // legacy config case when we use Frontend.Client settings - } else { - if s.tlsSettings == nil { - return nil - } - return &s.tlsSettings.Client // internode client case - } -} - -func (s *localStoreCertProvider) loadServerCACerts(isWorker bool) (*x509.CertPool, []*x509.Certificate, error) { - - clientSettings := s.getClientTLSSettings(isWorker) - if clientSettings == nil { - return nil, nil, nil - } - - return s.fetchCAs(clientSettings.RootCAFiles, clientSettings.RootCAData, - "cannot specify both rootCAFiles and rootCAData properties") -} - -func (s *localStoreCertProvider) fetchCAs( - files []string, - data []string, - duplicateErrorMessage string) (*x509.CertPool, []*x509.Certificate, error) { - if len(files) == 0 && len(data) == 0 { - return nil, nil, nil - } - - caPoolFromFiles, caCertsFromFiles, err := s.buildCAPoolFromFiles(files) - if err != nil { - return nil, nil, err - } - - caPoolFromData, caCertsFromData, err := buildCAPoolFromData(data) - if err != nil { - return nil, nil, err - } - - if caPoolFromFiles != nil && caPoolFromData != nil { - return nil, nil, errors.New(duplicateErrorMessage) - } - - var certPool *x509.CertPool - var certs []*x509.Certificate - - if caPoolFromData != nil { - certPool = caPoolFromData - certs = caCertsFromData - } else { - certPool = caPoolFromFiles - certs = caCertsFromFiles - } - - return certPool, certs, nil -} - -func checkTLSCertForExpiration( - cert *tls.Certificate, - when time.Time, - expiring CertExpirationMap, - expired CertExpirationMap, -) error { - - if cert == nil { - return nil - } - - x509cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return err - } - checkCertForExpiration(x509cert, when, expiring, expired) - return nil -} - -func checkCertsForExpiration( - certs []*x509.Certificate, - time time.Time, - expiring CertExpirationMap, - expired CertExpirationMap, -) { - - for _, cert := range certs { - checkCertForExpiration(cert, time, expiring, expired) - } -} - -func checkCertForExpiration( - cert *x509.Certificate, - pointInTime time.Time, - expiring CertExpirationMap, - expired CertExpirationMap, -) { - - if cert != nil && expiresBefore(cert, pointInTime) { - record := CertExpirationData{ - Thumbprint: md5.Sum(cert.Raw), - IsCA: cert.IsCA, - DNSNames: cert.DNSNames, - Expiration: cert.NotAfter, - } - if record.Expiration.Before(time.Now().UTC()) { - expired[record.Thumbprint] = record - } else { - expiring[record.Thumbprint] = record - } - } -} - -func expiresBefore(cert *x509.Certificate, pointInTime time.Time) bool { - return cert.NotAfter.Before(pointInTime) -} - -func buildCAPoolFromData(caData []string) (*x509.CertPool, []*x509.Certificate, error) { - - return buildCAPool(caData, base64.StdEncoding.DecodeString) -} - -func (s *localStoreCertProvider) buildCAPoolFromFiles(caFiles []string) (*x509.CertPool, []*x509.Certificate, error) { - if len(caFiles) == 0 { - return nil, nil, nil - } - - s.logger.Info("loading CA certs from", tag.TLSCertFiles(caFiles)) - return buildCAPool(caFiles, os.ReadFile) -} - -func buildCAPool(cas []string, getBytes loadOrDecodeDataFunc) (*x509.CertPool, []*x509.Certificate, error) { - - var caPool *x509.CertPool - var certs []*x509.Certificate - - for _, ca := range cas { - if ca == "" { - continue - } - - caBytes, err := getBytes(ca) - if err != nil { - return nil, nil, fmt.Errorf("failed to decode ca cert: %w", err) - } - - if caPool == nil { - caPool = x509.NewCertPool() - } - if !caPool.AppendCertsFromPEM(caBytes) { - return nil, nil, errors.New("unknown failure constructing cert pool for ca") - } - - cert, err := parseCert(caBytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse x509 certificate: %w", err) - } - certs = append(certs, cert) - } - return caPool, certs, nil -} - -// logic borrowed from tls.X509KeyPair() -func parseCert(bytes []byte) (*x509.Certificate, error) { - - var certBytes [][]byte - for { - var certDERBlock *pem.Block - certDERBlock, bytes = pem.Decode(bytes) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - certBytes = append(certBytes, certDERBlock.Bytes) - } - } - - if len(certBytes) == 0 || len(certBytes[0]) == 0 { - return nil, fmt.Errorf("failed to decode PEM certificate data") - } - return x509.ParseCertificate(certBytes[0]) -} - -func appendError(aggregatedErr error, err error) error { - if aggregatedErr == nil { - return err - } - if err == nil { - return aggregatedErr - } - return fmt.Errorf("%v, %w", aggregatedErr, err) -} - -func (s *localStoreCertProvider) refreshCerts() { - - for { - select { - case <-s.stop: - return - case <-s.ticker.C: - } - - newCerts, err := s.loadCerts() - if err != nil { - s.logger.Error("failed to load certificates", tag.Error(err)) - continue - } - - s.RLock() - currentCerts := s.certs - s.RUnlock() - if currentCerts.isEqual(newCerts) { - continue - } - - s.logger.Info("loaded new TLS certificates") - s.Lock() - s.certs = newCerts - s.Unlock() - } -} - -func (s *localStoreCertProvider) isTLSEnabled() bool { - return s.tlsSettings != nil || s.workerTLSSettings != nil -} - -func (c *certCache) isEqual(other *certCache) bool { - - if c == other { - return true - } - if c == nil || other == nil { - return false - } - - if !equalTLSCerts(c.serverCert, other.serverCert) || - !equalTLSCerts(c.workerCert, other.workerCert) || - !equalX509(c.clientCACerts, other.clientCACerts) || - !equalX509(c.serverCACerts, other.serverCACerts) || - !equalX509(c.serverCACertsWorker, other.serverCACertsWorker) { - return false - } - return true -} - -func equal(a, b [][]byte) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !bytes.Equal(a[i], b[i]) { - return false - } - } - return true -} - -func equalX509(a, b []*x509.Certificate) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !a[i].Equal(b[i]) { - return false - } - } - return true -} - -func equalTLSCerts(a, b *tls.Certificate) bool { - if a != nil { - if b == nil || !equal(a.Certificate, b.Certificate) { - return false - } - } else { - if b != nil { - return false - } - } - return true -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/localStoreCertProvider_test.go temporal-1.22.5/src/common/rpc/encryption/localStoreCertProvider_test.go --- temporal-1.21.5-1/src/common/rpc/encryption/localStoreCertProvider_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/localStoreCertProvider_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAppendError(t *testing.T) { - assert := assert.New(t) - err1 := errors.New("error1") - err2 := errors.New("error2") - - err := appendError(nil, err1) - assert.Equal(err1, err) - assert.Equal("error1", err.Error()) - err = appendError(err1, nil) - assert.Equal(err1, err) - assert.Equal("error1", err.Error()) - - err = appendError(err1, err2) - assert.Equal(err2, errors.Unwrap(err)) - assert.Equal("error1, error2", err.Error()) -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/localStorePerHostCertProviderMap.go temporal-1.22.5/src/common/rpc/encryption/localStorePerHostCertProviderMap.go --- temporal-1.21.5-1/src/common/rpc/encryption/localStorePerHostCertProviderMap.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/localStorePerHostCertProviderMap.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,109 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "strings" - "time" - - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" -) - -var _ PerHostCertProviderMap = (*localStorePerHostCertProviderMap)(nil) -var _ CertExpirationChecker = (*localStorePerHostCertProviderMap)(nil) - -type localStorePerHostCertProviderMap struct { - certProviderCache map[string]CertProvider - clientAuthCache map[string]bool -} - -func newLocalStorePerHostCertProviderMap( - overrides map[string]config.ServerTLS, - certProviderFactory CertProviderFactory, - refreshInterval time.Duration, - logger log.Logger, -) *localStorePerHostCertProviderMap { - - providerMap := &localStorePerHostCertProviderMap{} - if overrides == nil { - return providerMap - } - - providerMap.certProviderCache = make(map[string]CertProvider, len(overrides)) - providerMap.clientAuthCache = make(map[string]bool, len(overrides)) - - for host, settings := range overrides { - lcHost := strings.ToLower(host) - - provider := certProviderFactory(&config.GroupTLS{Server: settings}, nil, nil, refreshInterval, logger) - providerMap.certProviderCache[lcHost] = provider - providerMap.clientAuthCache[lcHost] = settings.RequireClientAuth - } - - return providerMap -} - -// GetCertProvider for a given host name returns a cert provider (nil if not found) and if client authentication is required -func (f *localStorePerHostCertProviderMap) GetCertProvider(hostName string) (CertProvider, bool, error) { - - lcHostName := strings.ToLower(hostName) - - if f.certProviderCache == nil { - return nil, true, nil - } - cachedCertProvider, ok := f.certProviderCache[lcHostName] - if !ok { - return nil, true, nil - } - clientAuthRequired := f.clientAuthCache[lcHostName] - return cachedCertProvider, clientAuthRequired, nil -} - -func (f *localStorePerHostCertProviderMap) GetExpiringCerts(timeWindow time.Duration, -) (expiring CertExpirationMap, expired CertExpirationMap, err error) { - - expiring = make(CertExpirationMap) - expired = make(CertExpirationMap) - - for _, provider := range f.certProviderCache { - - providerExpiring, providerExpired, providerError := provider.GetExpiringCerts(timeWindow) - mergeMaps(expiring, providerExpiring) - mergeMaps(expired, providerExpired) - if providerError != nil { - err = appendError(err, providerError) - } - } - return expiring, expired, err -} - -func (f *localStorePerHostCertProviderMap) NumberOfHosts() int { - - if f.certProviderCache != nil { - return len(f.certProviderCache) - } - return 0 -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/localStoreTlsProvider.go temporal-1.22.5/src/common/rpc/encryption/localStoreTlsProvider.go --- temporal-1.21.5-1/src/common/rpc/encryption/localStoreTlsProvider.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/localStoreTlsProvider.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,502 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "sync" - "time" - - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - - "go.temporal.io/server/common/auth" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" -) - -type CertProviderFactory func( - tlsSettings *config.GroupTLS, - workerTlsSettings *config.WorkerTLS, - legacyWorkerSettings *config.ClientTLS, - refreshInterval time.Duration, - logger log.Logger) CertProvider - -type localStoreTlsProvider struct { - sync.RWMutex - - settings *config.RootTLS - - internodeCertProvider CertProvider - internodeClientCertProvider CertProvider - frontendCertProvider CertProvider - workerCertProvider CertProvider - remoteClusterClientCertProvider map[string]CertProvider - frontendPerHostCertProviderMap *localStorePerHostCertProviderMap - - cachedInternodeServerConfig *tls.Config - cachedInternodeClientConfig *tls.Config - cachedFrontendServerConfig *tls.Config - cachedFrontendClientConfig *tls.Config - cachedRemoteClusterClientConfig map[string]*tls.Config - - ticker *time.Ticker - logger log.Logger - stop chan bool - metricsHandler metrics.Handler -} - -var _ TLSConfigProvider = (*localStoreTlsProvider)(nil) -var _ CertExpirationChecker = (*localStoreTlsProvider)(nil) - -func NewLocalStoreTlsProvider(tlsConfig *config.RootTLS, metricsHandler metrics.Handler, logger log.Logger, certProviderFactory CertProviderFactory, -) (TLSConfigProvider, error) { - - internodeProvider := certProviderFactory(&tlsConfig.Internode, nil, nil, tlsConfig.RefreshInterval, logger) - var workerProvider CertProvider - if isSystemWorker(tlsConfig) { // explicit system worker config - workerProvider = certProviderFactory(nil, &tlsConfig.SystemWorker, nil, tlsConfig.RefreshInterval, logger) - } else { // legacy implicit system worker config case - internodeWorkerProvider := certProviderFactory(&tlsConfig.Internode, nil, &tlsConfig.Frontend.Client, tlsConfig.RefreshInterval, logger) - workerProvider = internodeWorkerProvider - } - - remoteClusterClientCertProvider := make(map[string]CertProvider) - for hostname, groupTLS := range tlsConfig.RemoteClusters { - remoteClusterClientCertProvider[hostname] = certProviderFactory(&groupTLS, nil, nil, tlsConfig.RefreshInterval, logger) - } - - provider := &localStoreTlsProvider{ - internodeCertProvider: internodeProvider, - internodeClientCertProvider: internodeProvider, - frontendCertProvider: certProviderFactory(&tlsConfig.Frontend, nil, nil, tlsConfig.RefreshInterval, logger), - workerCertProvider: workerProvider, - frontendPerHostCertProviderMap: newLocalStorePerHostCertProviderMap( - tlsConfig.Frontend.PerHostOverrides, certProviderFactory, tlsConfig.RefreshInterval, logger), - remoteClusterClientCertProvider: remoteClusterClientCertProvider, - RWMutex: sync.RWMutex{}, - settings: tlsConfig, - metricsHandler: metricsHandler, - logger: logger, - cachedRemoteClusterClientConfig: make(map[string]*tls.Config), - } - provider.initialize() - return provider, nil -} - -func (s *localStoreTlsProvider) initialize() { - period := s.settings.ExpirationChecks.CheckInterval - if period != 0 { - s.stop = make(chan bool) - s.ticker = time.NewTicker(period) - s.checkCertExpiration() // perform initial check to emit metrics and logs right away - go s.timerCallback() - } -} - -func (s *localStoreTlsProvider) Close() { - - if s.ticker != nil { - s.ticker.Stop() - } - if s.stop != nil { - s.stop <- true - close(s.stop) - } -} - -func (s *localStoreTlsProvider) GetInternodeClientConfig() (*tls.Config, error) { - - client := &s.settings.Internode.Client - return s.getOrCreateConfig( - &s.cachedInternodeClientConfig, - func() (*tls.Config, error) { - return newClientTLSConfig(s.internodeClientCertProvider, client.ServerName, - s.settings.Internode.Server.RequireClientAuth, false, !client.DisableHostVerification) - }, - s.settings.Internode.IsClientEnabled(), - ) -} - -func (s *localStoreTlsProvider) GetFrontendClientConfig() (*tls.Config, error) { - - var client *config.ClientTLS - var useTLS bool - if isSystemWorker(s.settings) { - client = &s.settings.SystemWorker.Client - useTLS = true - } else { - client = &s.settings.Frontend.Client - useTLS = s.settings.Frontend.IsClientEnabled() - } - return s.getOrCreateConfig( - &s.cachedFrontendClientConfig, - func() (*tls.Config, error) { - return newClientTLSConfig(s.workerCertProvider, client.ServerName, - useTLS, true, !client.DisableHostVerification) - }, - useTLS, - ) -} - -func (s *localStoreTlsProvider) GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) { - groupTLS, ok := s.settings.RemoteClusters[hostname] - if !ok { - return nil, nil - } - - return s.getOrCreateRemoteClusterClientConfig( - hostname, - func() (*tls.Config, error) { - return newClientTLSConfig( - s.remoteClusterClientCertProvider[hostname], - groupTLS.Client.ServerName, - groupTLS.Server.RequireClientAuth, - false, - !groupTLS.Client.DisableHostVerification) - }, - groupTLS.IsClientEnabled(), - ) -} - -func (s *localStoreTlsProvider) GetFrontendServerConfig() (*tls.Config, error) { - return s.getOrCreateConfig( - &s.cachedFrontendServerConfig, - func() (*tls.Config, error) { - return newServerTLSConfig(s.frontendCertProvider, s.frontendPerHostCertProviderMap, &s.settings.Frontend, s.logger) - }, - s.settings.Frontend.IsServerEnabled()) -} - -func (s *localStoreTlsProvider) GetInternodeServerConfig() (*tls.Config, error) { - return s.getOrCreateConfig( - &s.cachedInternodeServerConfig, - func() (*tls.Config, error) { - return newServerTLSConfig(s.internodeCertProvider, nil, &s.settings.Internode, s.logger) - }, - s.settings.Internode.IsServerEnabled()) -} - -func (s *localStoreTlsProvider) GetExpiringCerts(timeWindow time.Duration, -) (expiring CertExpirationMap, expired CertExpirationMap, err error) { - - expiring = make(CertExpirationMap, 0) - expired = make(CertExpirationMap, 0) - - checkError := checkExpiration(s.internodeCertProvider, timeWindow, expiring, expired) - err = appendError(err, checkError) - checkError = checkExpiration(s.frontendCertProvider, timeWindow, expiring, expired) - err = appendError(err, checkError) - checkError = checkExpiration(s.workerCertProvider, timeWindow, expiring, expired) - err = appendError(err, checkError) - checkError = checkExpiration(s.frontendPerHostCertProviderMap, timeWindow, expiring, expired) - err = appendError(err, checkError) - - return expiring, expired, err -} - -func checkExpiration( - provider CertExpirationChecker, - timeWindow time.Duration, - expiring CertExpirationMap, - expired CertExpirationMap, -) error { - - providerExpiring, providerExpired, err := provider.GetExpiringCerts(timeWindow) - mergeMaps(expiring, providerExpiring) - mergeMaps(expired, providerExpired) - return err -} - -func (s *localStoreTlsProvider) getOrCreateConfig( - cachedConfig **tls.Config, - configConstructor tlsConfigConstructor, - isEnabled bool, -) (*tls.Config, error) { - if !isEnabled { - return nil, nil - } - - // Check if exists under a read lock first - s.RLock() - if *cachedConfig != nil { - defer s.RUnlock() - return *cachedConfig, nil - } - // Not found, promote to write lock to initialize - s.RUnlock() - s.Lock() - defer s.Unlock() - // Check if someone got here first while waiting for write lock - if *cachedConfig != nil { - return *cachedConfig, nil - } - - // Load configuration - localConfig, err := configConstructor() - - if err != nil { - return nil, err - } - - *cachedConfig = localConfig - return *cachedConfig, nil -} - -func (s *localStoreTlsProvider) getOrCreateRemoteClusterClientConfig( - hostname string, - configConstructor tlsConfigConstructor, - isEnabled bool, -) (*tls.Config, error) { - if !isEnabled { - return nil, nil - } - - // Check if exists under a read lock first - s.RLock() - if clientConfig, ok := s.cachedRemoteClusterClientConfig[hostname]; ok { - defer s.RUnlock() - return clientConfig, nil - } - // Not found, promote to write lock to initialize - s.RUnlock() - s.Lock() - defer s.Unlock() - // Check if someone got here first while waiting for write lock - if clientConfig, ok := s.cachedRemoteClusterClientConfig[hostname]; ok { - return clientConfig, nil - } - - // Load configuration - localConfig, err := configConstructor() - - if err != nil { - return nil, err - } - - s.cachedRemoteClusterClientConfig[hostname] = localConfig - return localConfig, nil -} - -func newServerTLSConfig( - certProvider CertProvider, - perHostCertProviderMap PerHostCertProviderMap, - config *config.GroupTLS, - logger log.Logger, -) (*tls.Config, error) { - - clientAuthRequired := config.Server.RequireClientAuth - tlsConfig, err := getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, "", "", logger) - if err != nil { - return nil, err - } - - tlsConfig.GetConfigForClient = func(c *tls.ClientHelloInfo) (*tls.Config, error) { - - remoteAddress := c.Conn.RemoteAddr().String() - logger.Debug("attempted incoming TLS connection", tag.Address(remoteAddress), tag.ServerName(c.ServerName)) - - if perHostCertProviderMap != nil && perHostCertProviderMap.NumberOfHosts() > 0 { - perHostCertProvider, hostClientAuthRequired, err := perHostCertProviderMap.GetCertProvider(c.ServerName) - if err != nil { - logger.Error("error while looking up per-host provider for attempted incoming TLS connection", - tag.ServerName(c.ServerName), tag.Address(remoteAddress), tag.Error(err)) - return nil, err - } - - if perHostCertProvider != nil { - return getServerTLSConfigFromCertProvider(perHostCertProvider, hostClientAuthRequired, remoteAddress, c.ServerName, logger) - } - logger.Warn("cannot find a per-host provider for attempted incoming TLS connection. returning default TLS configuration", - tag.ServerName(c.ServerName), tag.Address(remoteAddress)) - return getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, remoteAddress, c.ServerName, logger) - } - return getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, remoteAddress, c.ServerName, logger) - } - - return tlsConfig, nil -} - -func getServerTLSConfigFromCertProvider( - certProvider CertProvider, - requireClientAuth bool, - remoteAddress string, - serverName string, - logger log.Logger) (*tls.Config, error) { - - // Get serverCert from disk - serverCert, err := certProvider.FetchServerCertificate() - if err != nil { - return nil, fmt.Errorf("loading server tls certificate failed: %v", err) - } - - // tls disabled, responsibility of cert provider above to error otherwise - if serverCert == nil { - return nil, nil - } - - // Default to NoClientAuth - clientAuthType := tls.NoClientCert - var clientCaPool *x509.CertPool - - // If mTLS enabled - if requireClientAuth { - clientAuthType = tls.RequireAndVerifyClientCert - - ca, err := certProvider.FetchClientCAs() - if err != nil { - return nil, fmt.Errorf("failed to fetch client CAs: %v", err) - } - - clientCaPool = ca - } - if remoteAddress != "" { // remoteAddress=="" when we return initial tls.Config object when configuring server - logger.Debug("returning TLS config for connection", tag.Address(remoteAddress), tag.ServerName(serverName)) - } - return auth.NewTLSConfigWithCertsAndCAs( - clientAuthType, - []tls.Certificate{*serverCert}, - clientCaPool, - logger), nil -} - -func newClientTLSConfig( - clientProvider CertProvider, - serverName string, - isAuthRequired bool, - isWorker bool, - enableHostVerification bool, -) (*tls.Config, error) { - // Optional ServerCA for client if not already trusted by host - serverCa, err := clientProvider.FetchServerRootCAsForClient(isWorker) - if err != nil { - return nil, fmt.Errorf("failed to load client ca: %v", err) - } - - var getCert tlsCertFetcher - - // mTLS enabled, present certificate - if isAuthRequired { - getCert = func() (*tls.Certificate, error) { - cert, err := clientProvider.FetchClientCertificate(isWorker) - if err != nil { - return nil, err - } - - if cert == nil { - return nil, fmt.Errorf("client auth required, but no certificate provided") - } - return cert, nil - } - } - - return auth.NewDynamicTLSClientConfig( - getCert, - serverCa, - serverName, - enableHostVerification, - ), nil -} - -func (s *localStoreTlsProvider) timerCallback() { - for { - select { - case <-s.stop: - return - case <-s.ticker.C: - } - - s.checkCertExpiration() - } -} - -func (s *localStoreTlsProvider) checkCertExpiration() { - var retError error - defer log.CapturePanic(s.logger, &retError) - - var errorTime time.Time - if s.settings.ExpirationChecks.ErrorWindow != 0 { - errorTime = time.Now().UTC().Add(s.settings.ExpirationChecks.ErrorWindow) - } else { - errorTime = time.Now().UTC().AddDate(10, 0, 0) - } - - window := s.settings.ExpirationChecks.WarningWindow - // if only ErrorWindow is set, we set WarningWindow to the same value, so that the checks do happen - if window == 0 && s.settings.ExpirationChecks.ErrorWindow != 0 { - window = s.settings.ExpirationChecks.ErrorWindow - } - if window != 0 { - expiring, expired, err := s.GetExpiringCerts(window) - if err != nil { - s.logger.Error(fmt.Sprintf("error while checking for certificate expiration: %v", err)) - return - } - if s.metricsHandler != nil { - s.metricsHandler.Gauge(metrics.TlsCertsExpired.GetMetricName()).Record(float64(len(expired))) - s.metricsHandler.Gauge(metrics.TlsCertsExpiring.GetMetricName()).Record(float64(len(expiring))) - } - s.logCerts(expired, true, errorTime) - s.logCerts(expiring, false, errorTime) - } -} - -func (s *localStoreTlsProvider) logCerts(certs CertExpirationMap, expired bool, errorTime time.Time) { - - for _, cert := range certs { - str := createExpirationLogMessage(cert, expired) - if expired || cert.Expiration.Before(errorTime) { - s.logger.Error(str) - } else { - s.logger.Warn(str) - } - } -} - -func createExpirationLogMessage(cert CertExpirationData, expired bool) string { - - var verb string - if expired { - verb = "has expired" - } else { - verb = "will expire" - } - return fmt.Sprintf("certificate with thumbprint=%x %s on %v, IsCA=%t, DNS=%v", - cert.Thumbprint, verb, cert.Expiration, cert.IsCA, cert.DNSNames) -} - -func mergeMaps(to CertExpirationMap, from CertExpirationMap) { - for k, v := range from { - to[k] = v - } -} - -func isSystemWorker(tls *config.RootTLS) bool { - return tls.SystemWorker.CertData != "" || tls.SystemWorker.CertFile != "" || - len(tls.SystemWorker.Client.RootCAData) > 0 || len(tls.SystemWorker.Client.RootCAFiles) > 0 || - tls.SystemWorker.Client.ForceTLS -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/local_store_cert_provider.go temporal-1.22.5/src/common/rpc/encryption/local_store_cert_provider.go --- temporal-1.21.5-1/src/common/rpc/encryption/local_store_cert_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/local_store_cert_provider.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,610 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "bytes" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "os" + "sync" + "time" + + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +var _ CertProvider = (*localStoreCertProvider)(nil) +var _ CertExpirationChecker = (*localStoreCertProvider)(nil) + +type certCache struct { + serverCert *tls.Certificate + workerCert *tls.Certificate + clientCAPool *x509.CertPool + serverCAPool *x509.CertPool + serverCAsWorkerPool *x509.CertPool + clientCACerts []*x509.Certificate // copies of certs in the clientCAPool CertPool for expiration checks + serverCACerts []*x509.Certificate // copies of certs in the serverCAPool CertPool for expiration checks + serverCACertsWorker []*x509.Certificate // copies of certs in the serverCAsWorkerPool CertPool for expiration checks +} + +type localStoreCertProvider struct { + sync.RWMutex + + tlsSettings *config.GroupTLS + workerTLSSettings *config.WorkerTLS + isLegacyWorkerConfig bool + legacyWorkerSettings *config.ClientTLS + + certs *certCache + refreshInterval time.Duration + + ticker *time.Ticker + stop chan bool + logger log.Logger +} + +type loadOrDecodeDataFunc func(item string) ([]byte, error) + +type tlsCertFetcher func() (*tls.Certificate, error) + +func (s *localStoreCertProvider) initialize() { + + if s.refreshInterval != 0 { + s.stop = make(chan bool) + s.ticker = time.NewTicker(s.refreshInterval) + go s.refreshCerts() + } +} + +func NewLocalStoreCertProvider( + tlsSettings *config.GroupTLS, + workerTlsSettings *config.WorkerTLS, + legacyWorkerSettings *config.ClientTLS, + refreshInterval time.Duration, + logger log.Logger) CertProvider { + + provider := &localStoreCertProvider{ + tlsSettings: tlsSettings, + workerTLSSettings: workerTlsSettings, + legacyWorkerSettings: legacyWorkerSettings, + isLegacyWorkerConfig: legacyWorkerSettings != nil, + logger: logger, + refreshInterval: refreshInterval, + } + provider.initialize() + return provider +} + +func (s *localStoreCertProvider) Close() { + + if s.ticker != nil { + s.ticker.Stop() + } + if s.stop != nil { + s.stop <- true + close(s.stop) + } +} + +func (s *localStoreCertProvider) FetchServerCertificate() (*tls.Certificate, error) { + + if s.tlsSettings == nil { + return nil, nil + } + certs, err := s.getCerts() + if err != nil { + return nil, err + } + return certs.serverCert, nil +} + +func (s *localStoreCertProvider) FetchClientCAs() (*x509.CertPool, error) { + + if s.tlsSettings == nil { + return nil, nil + } + certs, err := s.getCerts() + if err != nil { + return nil, err + } + return certs.clientCAPool, nil +} + +func (s *localStoreCertProvider) FetchServerRootCAsForClient(isWorker bool) (*x509.CertPool, error) { + + clientSettings := s.getClientTLSSettings(isWorker) + if clientSettings == nil { + return nil, nil + } + certs, err := s.getCerts() + if err != nil { + return nil, err + } + + if isWorker { + return certs.serverCAsWorkerPool, nil + } + + return certs.serverCAPool, nil +} + +func (s *localStoreCertProvider) FetchClientCertificate(isWorker bool) (*tls.Certificate, error) { + + if !s.isTLSEnabled() { + return nil, nil + } + certs, err := s.getCerts() + if err != nil { + return nil, err + } + if isWorker { + return certs.workerCert, nil + } + return certs.serverCert, nil +} + +func (s *localStoreCertProvider) GetExpiringCerts(timeWindow time.Duration, +) (CertExpirationMap, CertExpirationMap, error) { + + expiring := make(CertExpirationMap) + expired := make(CertExpirationMap) + when := time.Now().UTC().Add(timeWindow) + + certs, err := s.getCerts() + if err != nil { + return nil, nil, err + } + + checkError := checkTLSCertForExpiration(certs.serverCert, when, expiring, expired) + err = appendError(err, checkError) + checkError = checkTLSCertForExpiration(certs.workerCert, when, expiring, expired) + err = appendError(err, checkError) + + checkCertsForExpiration(certs.clientCACerts, when, expiring, expired) + checkCertsForExpiration(certs.serverCACerts, when, expiring, expired) + checkCertsForExpiration(certs.serverCACertsWorker, when, expiring, expired) + + return expiring, expired, err +} + +func (s *localStoreCertProvider) getCerts() (*certCache, error) { + + s.RLock() + if s.certs != nil { + defer s.RUnlock() + return s.certs, nil + } + s.RUnlock() + s.Lock() + defer s.Unlock() + + if s.certs != nil { + return s.certs, nil + } + + newCerts, err := s.loadCerts() + if err != nil { + return nil, err + } + + if newCerts == nil { + s.certs = &certCache{} + } else { + s.certs = newCerts + } + return s.certs, nil +} + +func (s *localStoreCertProvider) loadCerts() (*certCache, error) { + + if !s.isTLSEnabled() { + return nil, nil + } + + newCerts := certCache{} + var err error + + if s.tlsSettings != nil { + newCerts.serverCert, err = s.fetchCertificate(s.tlsSettings.Server.CertFile, s.tlsSettings.Server.CertData, + s.tlsSettings.Server.KeyFile, s.tlsSettings.Server.KeyData) + if err != nil { + return nil, err + } + + certPool, certs, err := s.fetchCAs(s.tlsSettings.Server.ClientCAFiles, s.tlsSettings.Server.ClientCAData, + "cannot specify both clientCAFiles and clientCAData properties") + if err != nil { + return nil, err + } + newCerts.clientCAPool = certPool + newCerts.clientCACerts = certs + } + + if s.isLegacyWorkerConfig { + newCerts.workerCert = newCerts.serverCert + } else { + if s.workerTLSSettings != nil { + newCerts.workerCert, err = s.fetchCertificate(s.workerTLSSettings.CertFile, s.workerTLSSettings.CertData, + s.workerTLSSettings.KeyFile, s.workerTLSSettings.KeyData) + if err != nil { + return nil, err + } + } + } + + nonWorkerPool, nonWorkerCerts, err := s.loadServerCACerts(false) + if err != nil { + return nil, err + } + newCerts.serverCAPool = nonWorkerPool + newCerts.serverCACerts = nonWorkerCerts + + workerPool, workerCerts, err := s.loadServerCACerts(true) + if err != nil { + return nil, err + } + newCerts.serverCAsWorkerPool = workerPool + newCerts.serverCACertsWorker = workerCerts + + return &newCerts, nil +} + +func (s *localStoreCertProvider) fetchCertificate( + certFile string, certData string, + keyFile string, keyData string) (*tls.Certificate, error) { + if certFile == "" && certData == "" { + return nil, nil + } + + if certFile != "" && certData != "" { + return nil, errors.New("only one of certFile or certData properties should be spcified") + } + + var certBytes []byte + var keyBytes []byte + var err error + + if certFile != "" { + s.logger.Info("loading certificate from file", tag.TLSCertFile(certFile)) + certBytes, err = os.ReadFile(certFile) + if err != nil { + return nil, err + } + } else if certData != "" { + certBytes, err = base64.StdEncoding.DecodeString(certData) + if err != nil { + return nil, fmt.Errorf("TLS public certificate could not be decoded: %w", err) + } + } + + if keyFile != "" { + s.logger.Info("loading private key from file", tag.TLSKeyFile(keyFile)) + keyBytes, err = os.ReadFile(keyFile) + if err != nil { + return nil, err + } + } else if keyData != "" { + keyBytes, err = base64.StdEncoding.DecodeString(keyData) + if err != nil { + return nil, fmt.Errorf("TLS private key could not be decoded: %w", err) + } + } + + cert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, fmt.Errorf("loading tls certificate failed: %v", err) + } + + return &cert, nil +} + +func (s *localStoreCertProvider) getClientTLSSettings(isWorker bool) *config.ClientTLS { + if isWorker && s.workerTLSSettings != nil { + return &s.workerTLSSettings.Client // explicit system worker case + } else if isWorker { + return s.legacyWorkerSettings // legacy config case when we use Frontend.Client settings + } else { + if s.tlsSettings == nil { + return nil + } + return &s.tlsSettings.Client // internode client case + } +} + +func (s *localStoreCertProvider) loadServerCACerts(isWorker bool) (*x509.CertPool, []*x509.Certificate, error) { + + clientSettings := s.getClientTLSSettings(isWorker) + if clientSettings == nil { + return nil, nil, nil + } + + return s.fetchCAs(clientSettings.RootCAFiles, clientSettings.RootCAData, + "cannot specify both rootCAFiles and rootCAData properties") +} + +func (s *localStoreCertProvider) fetchCAs( + files []string, + data []string, + duplicateErrorMessage string) (*x509.CertPool, []*x509.Certificate, error) { + if len(files) == 0 && len(data) == 0 { + return nil, nil, nil + } + + caPoolFromFiles, caCertsFromFiles, err := s.buildCAPoolFromFiles(files) + if err != nil { + return nil, nil, err + } + + caPoolFromData, caCertsFromData, err := buildCAPoolFromData(data) + if err != nil { + return nil, nil, err + } + + if caPoolFromFiles != nil && caPoolFromData != nil { + return nil, nil, errors.New(duplicateErrorMessage) + } + + var certPool *x509.CertPool + var certs []*x509.Certificate + + if caPoolFromData != nil { + certPool = caPoolFromData + certs = caCertsFromData + } else { + certPool = caPoolFromFiles + certs = caCertsFromFiles + } + + return certPool, certs, nil +} + +func checkTLSCertForExpiration( + cert *tls.Certificate, + when time.Time, + expiring CertExpirationMap, + expired CertExpirationMap, +) error { + + if cert == nil { + return nil + } + + x509cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return err + } + checkCertForExpiration(x509cert, when, expiring, expired) + return nil +} + +func checkCertsForExpiration( + certs []*x509.Certificate, + time time.Time, + expiring CertExpirationMap, + expired CertExpirationMap, +) { + + for _, cert := range certs { + checkCertForExpiration(cert, time, expiring, expired) + } +} + +func checkCertForExpiration( + cert *x509.Certificate, + pointInTime time.Time, + expiring CertExpirationMap, + expired CertExpirationMap, +) { + + if cert != nil && expiresBefore(cert, pointInTime) { + record := CertExpirationData{ + Thumbprint: md5.Sum(cert.Raw), + IsCA: cert.IsCA, + DNSNames: cert.DNSNames, + Expiration: cert.NotAfter, + } + if record.Expiration.Before(time.Now().UTC()) { + expired[record.Thumbprint] = record + } else { + expiring[record.Thumbprint] = record + } + } +} + +func expiresBefore(cert *x509.Certificate, pointInTime time.Time) bool { + return cert.NotAfter.Before(pointInTime) +} + +func buildCAPoolFromData(caData []string) (*x509.CertPool, []*x509.Certificate, error) { + + return buildCAPool(caData, base64.StdEncoding.DecodeString) +} + +func (s *localStoreCertProvider) buildCAPoolFromFiles(caFiles []string) (*x509.CertPool, []*x509.Certificate, error) { + if len(caFiles) == 0 { + return nil, nil, nil + } + + s.logger.Info("loading CA certs from", tag.TLSCertFiles(caFiles)) + return buildCAPool(caFiles, os.ReadFile) +} + +func buildCAPool(cas []string, getBytes loadOrDecodeDataFunc) (*x509.CertPool, []*x509.Certificate, error) { + + var caPool *x509.CertPool + var certs []*x509.Certificate + + for _, ca := range cas { + if ca == "" { + continue + } + + caBytes, err := getBytes(ca) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode ca cert: %w", err) + } + + if caPool == nil { + caPool = x509.NewCertPool() + } + if !caPool.AppendCertsFromPEM(caBytes) { + return nil, nil, errors.New("unknown failure constructing cert pool for ca") + } + + cert, err := parseCert(caBytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse x509 certificate: %w", err) + } + certs = append(certs, cert) + } + return caPool, certs, nil +} + +// logic borrowed from tls.X509KeyPair() +func parseCert(bytes []byte) (*x509.Certificate, error) { + + var certBytes [][]byte + for { + var certDERBlock *pem.Block + certDERBlock, bytes = pem.Decode(bytes) + if certDERBlock == nil { + break + } + if certDERBlock.Type == "CERTIFICATE" { + certBytes = append(certBytes, certDERBlock.Bytes) + } + } + + if len(certBytes) == 0 || len(certBytes[0]) == 0 { + return nil, fmt.Errorf("failed to decode PEM certificate data") + } + return x509.ParseCertificate(certBytes[0]) +} + +func appendError(aggregatedErr error, err error) error { + if aggregatedErr == nil { + return err + } + if err == nil { + return aggregatedErr + } + return fmt.Errorf("%v, %w", aggregatedErr, err) +} + +func (s *localStoreCertProvider) refreshCerts() { + + for { + select { + case <-s.stop: + return + case <-s.ticker.C: + } + + newCerts, err := s.loadCerts() + if err != nil { + s.logger.Error("failed to load certificates", tag.Error(err)) + continue + } + + s.RLock() + currentCerts := s.certs + s.RUnlock() + if currentCerts.isEqual(newCerts) { + continue + } + + s.logger.Info("loaded new TLS certificates") + s.Lock() + s.certs = newCerts + s.Unlock() + } +} + +func (s *localStoreCertProvider) isTLSEnabled() bool { + return s.tlsSettings != nil || s.workerTLSSettings != nil +} + +func (c *certCache) isEqual(other *certCache) bool { + + if c == other { + return true + } + if c == nil || other == nil { + return false + } + + if !equalTLSCerts(c.serverCert, other.serverCert) || + !equalTLSCerts(c.workerCert, other.workerCert) || + !equalX509(c.clientCACerts, other.clientCACerts) || + !equalX509(c.serverCACerts, other.serverCACerts) || + !equalX509(c.serverCACertsWorker, other.serverCACertsWorker) { + return false + } + return true +} + +func equal(a, b [][]byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !bytes.Equal(a[i], b[i]) { + return false + } + } + return true +} + +func equalX509(a, b []*x509.Certificate) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true +} + +func equalTLSCerts(a, b *tls.Certificate) bool { + if a != nil { + if b == nil || !equal(a.Certificate, b.Certificate) { + return false + } + } else { + if b != nil { + return false + } + } + return true +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/local_store_cert_provider_test.go temporal-1.22.5/src/common/rpc/encryption/local_store_cert_provider_test.go --- temporal-1.21.5-1/src/common/rpc/encryption/local_store_cert_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/local_store_cert_provider_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,49 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAppendError(t *testing.T) { + assert := assert.New(t) + err1 := errors.New("error1") + err2 := errors.New("error2") + + err := appendError(nil, err1) + assert.Equal(err1, err) + assert.Equal("error1", err.Error()) + err = appendError(err1, nil) + assert.Equal(err1, err) + assert.Equal("error1", err.Error()) + + err = appendError(err1, err2) + assert.Equal(err2, errors.Unwrap(err)) + assert.Equal("error1, error2", err.Error()) +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/local_store_per_host_cert_provider_map.go temporal-1.22.5/src/common/rpc/encryption/local_store_per_host_cert_provider_map.go --- temporal-1.21.5-1/src/common/rpc/encryption/local_store_per_host_cert_provider_map.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/local_store_per_host_cert_provider_map.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,109 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "strings" + "time" + + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" +) + +var _ PerHostCertProviderMap = (*localStorePerHostCertProviderMap)(nil) +var _ CertExpirationChecker = (*localStorePerHostCertProviderMap)(nil) + +type localStorePerHostCertProviderMap struct { + certProviderCache map[string]CertProvider + clientAuthCache map[string]bool +} + +func newLocalStorePerHostCertProviderMap( + overrides map[string]config.ServerTLS, + certProviderFactory CertProviderFactory, + refreshInterval time.Duration, + logger log.Logger, +) *localStorePerHostCertProviderMap { + + providerMap := &localStorePerHostCertProviderMap{} + if overrides == nil { + return providerMap + } + + providerMap.certProviderCache = make(map[string]CertProvider, len(overrides)) + providerMap.clientAuthCache = make(map[string]bool, len(overrides)) + + for host, settings := range overrides { + lcHost := strings.ToLower(host) + + provider := certProviderFactory(&config.GroupTLS{Server: settings}, nil, nil, refreshInterval, logger) + providerMap.certProviderCache[lcHost] = provider + providerMap.clientAuthCache[lcHost] = settings.RequireClientAuth + } + + return providerMap +} + +// GetCertProvider for a given host name returns a cert provider (nil if not found) and if client authentication is required +func (f *localStorePerHostCertProviderMap) GetCertProvider(hostName string) (CertProvider, bool, error) { + + lcHostName := strings.ToLower(hostName) + + if f.certProviderCache == nil { + return nil, true, nil + } + cachedCertProvider, ok := f.certProviderCache[lcHostName] + if !ok { + return nil, true, nil + } + clientAuthRequired := f.clientAuthCache[lcHostName] + return cachedCertProvider, clientAuthRequired, nil +} + +func (f *localStorePerHostCertProviderMap) GetExpiringCerts(timeWindow time.Duration, +) (expiring CertExpirationMap, expired CertExpirationMap, err error) { + + expiring = make(CertExpirationMap) + expired = make(CertExpirationMap) + + for _, provider := range f.certProviderCache { + + providerExpiring, providerExpired, providerError := provider.GetExpiringCerts(timeWindow) + mergeMaps(expiring, providerExpiring) + mergeMaps(expired, providerExpired) + if providerError != nil { + err = appendError(err, providerError) + } + } + return expiring, expired, err +} + +func (f *localStorePerHostCertProviderMap) NumberOfHosts() int { + + if f.certProviderCache != nil { + return len(f.certProviderCache) + } + return 0 +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/local_store_tls_provider.go temporal-1.22.5/src/common/rpc/encryption/local_store_tls_provider.go --- temporal-1.21.5-1/src/common/rpc/encryption/local_store_tls_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/local_store_tls_provider.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,502 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "sync" + "time" + + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + + "go.temporal.io/server/common/auth" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" +) + +type CertProviderFactory func( + tlsSettings *config.GroupTLS, + workerTlsSettings *config.WorkerTLS, + legacyWorkerSettings *config.ClientTLS, + refreshInterval time.Duration, + logger log.Logger) CertProvider + +type localStoreTlsProvider struct { + sync.RWMutex + + settings *config.RootTLS + + internodeCertProvider CertProvider + internodeClientCertProvider CertProvider + frontendCertProvider CertProvider + workerCertProvider CertProvider + remoteClusterClientCertProvider map[string]CertProvider + frontendPerHostCertProviderMap *localStorePerHostCertProviderMap + + cachedInternodeServerConfig *tls.Config + cachedInternodeClientConfig *tls.Config + cachedFrontendServerConfig *tls.Config + cachedFrontendClientConfig *tls.Config + cachedRemoteClusterClientConfig map[string]*tls.Config + + ticker *time.Ticker + logger log.Logger + stop chan bool + metricsHandler metrics.Handler +} + +var _ TLSConfigProvider = (*localStoreTlsProvider)(nil) +var _ CertExpirationChecker = (*localStoreTlsProvider)(nil) + +func NewLocalStoreTlsProvider(tlsConfig *config.RootTLS, metricsHandler metrics.Handler, logger log.Logger, certProviderFactory CertProviderFactory, +) (TLSConfigProvider, error) { + + internodeProvider := certProviderFactory(&tlsConfig.Internode, nil, nil, tlsConfig.RefreshInterval, logger) + var workerProvider CertProvider + if isSystemWorker(tlsConfig) { // explicit system worker config + workerProvider = certProviderFactory(nil, &tlsConfig.SystemWorker, nil, tlsConfig.RefreshInterval, logger) + } else { // legacy implicit system worker config case + internodeWorkerProvider := certProviderFactory(&tlsConfig.Internode, nil, &tlsConfig.Frontend.Client, tlsConfig.RefreshInterval, logger) + workerProvider = internodeWorkerProvider + } + + remoteClusterClientCertProvider := make(map[string]CertProvider) + for hostname, groupTLS := range tlsConfig.RemoteClusters { + remoteClusterClientCertProvider[hostname] = certProviderFactory(&groupTLS, nil, nil, tlsConfig.RefreshInterval, logger) + } + + provider := &localStoreTlsProvider{ + internodeCertProvider: internodeProvider, + internodeClientCertProvider: internodeProvider, + frontendCertProvider: certProviderFactory(&tlsConfig.Frontend, nil, nil, tlsConfig.RefreshInterval, logger), + workerCertProvider: workerProvider, + frontendPerHostCertProviderMap: newLocalStorePerHostCertProviderMap( + tlsConfig.Frontend.PerHostOverrides, certProviderFactory, tlsConfig.RefreshInterval, logger), + remoteClusterClientCertProvider: remoteClusterClientCertProvider, + RWMutex: sync.RWMutex{}, + settings: tlsConfig, + metricsHandler: metricsHandler, + logger: logger, + cachedRemoteClusterClientConfig: make(map[string]*tls.Config), + } + provider.initialize() + return provider, nil +} + +func (s *localStoreTlsProvider) initialize() { + period := s.settings.ExpirationChecks.CheckInterval + if period != 0 { + s.stop = make(chan bool) + s.ticker = time.NewTicker(period) + s.checkCertExpiration() // perform initial check to emit metrics and logs right away + go s.timerCallback() + } +} + +func (s *localStoreTlsProvider) Close() { + + if s.ticker != nil { + s.ticker.Stop() + } + if s.stop != nil { + s.stop <- true + close(s.stop) + } +} + +func (s *localStoreTlsProvider) GetInternodeClientConfig() (*tls.Config, error) { + + client := &s.settings.Internode.Client + return s.getOrCreateConfig( + &s.cachedInternodeClientConfig, + func() (*tls.Config, error) { + return newClientTLSConfig(s.internodeClientCertProvider, client.ServerName, + s.settings.Internode.Server.RequireClientAuth, false, !client.DisableHostVerification) + }, + s.settings.Internode.IsClientEnabled(), + ) +} + +func (s *localStoreTlsProvider) GetFrontendClientConfig() (*tls.Config, error) { + + var client *config.ClientTLS + var useTLS bool + if isSystemWorker(s.settings) { + client = &s.settings.SystemWorker.Client + useTLS = true + } else { + client = &s.settings.Frontend.Client + useTLS = s.settings.Frontend.IsClientEnabled() + } + return s.getOrCreateConfig( + &s.cachedFrontendClientConfig, + func() (*tls.Config, error) { + return newClientTLSConfig(s.workerCertProvider, client.ServerName, + useTLS, true, !client.DisableHostVerification) + }, + useTLS, + ) +} + +func (s *localStoreTlsProvider) GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) { + groupTLS, ok := s.settings.RemoteClusters[hostname] + if !ok { + return nil, nil + } + + return s.getOrCreateRemoteClusterClientConfig( + hostname, + func() (*tls.Config, error) { + return newClientTLSConfig( + s.remoteClusterClientCertProvider[hostname], + groupTLS.Client.ServerName, + groupTLS.Server.RequireClientAuth, + false, + !groupTLS.Client.DisableHostVerification) + }, + groupTLS.IsClientEnabled(), + ) +} + +func (s *localStoreTlsProvider) GetFrontendServerConfig() (*tls.Config, error) { + return s.getOrCreateConfig( + &s.cachedFrontendServerConfig, + func() (*tls.Config, error) { + return newServerTLSConfig(s.frontendCertProvider, s.frontendPerHostCertProviderMap, &s.settings.Frontend, s.logger) + }, + s.settings.Frontend.IsServerEnabled()) +} + +func (s *localStoreTlsProvider) GetInternodeServerConfig() (*tls.Config, error) { + return s.getOrCreateConfig( + &s.cachedInternodeServerConfig, + func() (*tls.Config, error) { + return newServerTLSConfig(s.internodeCertProvider, nil, &s.settings.Internode, s.logger) + }, + s.settings.Internode.IsServerEnabled()) +} + +func (s *localStoreTlsProvider) GetExpiringCerts(timeWindow time.Duration, +) (expiring CertExpirationMap, expired CertExpirationMap, err error) { + + expiring = make(CertExpirationMap, 0) + expired = make(CertExpirationMap, 0) + + checkError := checkExpiration(s.internodeCertProvider, timeWindow, expiring, expired) + err = appendError(err, checkError) + checkError = checkExpiration(s.frontendCertProvider, timeWindow, expiring, expired) + err = appendError(err, checkError) + checkError = checkExpiration(s.workerCertProvider, timeWindow, expiring, expired) + err = appendError(err, checkError) + checkError = checkExpiration(s.frontendPerHostCertProviderMap, timeWindow, expiring, expired) + err = appendError(err, checkError) + + return expiring, expired, err +} + +func checkExpiration( + provider CertExpirationChecker, + timeWindow time.Duration, + expiring CertExpirationMap, + expired CertExpirationMap, +) error { + + providerExpiring, providerExpired, err := provider.GetExpiringCerts(timeWindow) + mergeMaps(expiring, providerExpiring) + mergeMaps(expired, providerExpired) + return err +} + +func (s *localStoreTlsProvider) getOrCreateConfig( + cachedConfig **tls.Config, + configConstructor tlsConfigConstructor, + isEnabled bool, +) (*tls.Config, error) { + if !isEnabled { + return nil, nil + } + + // Check if exists under a read lock first + s.RLock() + if *cachedConfig != nil { + defer s.RUnlock() + return *cachedConfig, nil + } + // Not found, promote to write lock to initialize + s.RUnlock() + s.Lock() + defer s.Unlock() + // Check if someone got here first while waiting for write lock + if *cachedConfig != nil { + return *cachedConfig, nil + } + + // Load configuration + localConfig, err := configConstructor() + + if err != nil { + return nil, err + } + + *cachedConfig = localConfig + return *cachedConfig, nil +} + +func (s *localStoreTlsProvider) getOrCreateRemoteClusterClientConfig( + hostname string, + configConstructor tlsConfigConstructor, + isEnabled bool, +) (*tls.Config, error) { + if !isEnabled { + return nil, nil + } + + // Check if exists under a read lock first + s.RLock() + if clientConfig, ok := s.cachedRemoteClusterClientConfig[hostname]; ok { + defer s.RUnlock() + return clientConfig, nil + } + // Not found, promote to write lock to initialize + s.RUnlock() + s.Lock() + defer s.Unlock() + // Check if someone got here first while waiting for write lock + if clientConfig, ok := s.cachedRemoteClusterClientConfig[hostname]; ok { + return clientConfig, nil + } + + // Load configuration + localConfig, err := configConstructor() + + if err != nil { + return nil, err + } + + s.cachedRemoteClusterClientConfig[hostname] = localConfig + return localConfig, nil +} + +func newServerTLSConfig( + certProvider CertProvider, + perHostCertProviderMap PerHostCertProviderMap, + config *config.GroupTLS, + logger log.Logger, +) (*tls.Config, error) { + + clientAuthRequired := config.Server.RequireClientAuth + tlsConfig, err := getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, "", "", logger) + if err != nil { + return nil, err + } + + tlsConfig.GetConfigForClient = func(c *tls.ClientHelloInfo) (*tls.Config, error) { + + remoteAddress := c.Conn.RemoteAddr().String() + logger.Debug("attempted incoming TLS connection", tag.Address(remoteAddress), tag.ServerName(c.ServerName)) + + if perHostCertProviderMap != nil && perHostCertProviderMap.NumberOfHosts() > 0 { + perHostCertProvider, hostClientAuthRequired, err := perHostCertProviderMap.GetCertProvider(c.ServerName) + if err != nil { + logger.Error("error while looking up per-host provider for attempted incoming TLS connection", + tag.ServerName(c.ServerName), tag.Address(remoteAddress), tag.Error(err)) + return nil, err + } + + if perHostCertProvider != nil { + return getServerTLSConfigFromCertProvider(perHostCertProvider, hostClientAuthRequired, remoteAddress, c.ServerName, logger) + } + logger.Warn("cannot find a per-host provider for attempted incoming TLS connection. returning default TLS configuration", + tag.ServerName(c.ServerName), tag.Address(remoteAddress)) + return getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, remoteAddress, c.ServerName, logger) + } + return getServerTLSConfigFromCertProvider(certProvider, clientAuthRequired, remoteAddress, c.ServerName, logger) + } + + return tlsConfig, nil +} + +func getServerTLSConfigFromCertProvider( + certProvider CertProvider, + requireClientAuth bool, + remoteAddress string, + serverName string, + logger log.Logger) (*tls.Config, error) { + + // Get serverCert from disk + serverCert, err := certProvider.FetchServerCertificate() + if err != nil { + return nil, fmt.Errorf("loading server tls certificate failed: %v", err) + } + + // tls disabled, responsibility of cert provider above to error otherwise + if serverCert == nil { + return nil, nil + } + + // Default to NoClientAuth + clientAuthType := tls.NoClientCert + var clientCaPool *x509.CertPool + + // If mTLS enabled + if requireClientAuth { + clientAuthType = tls.RequireAndVerifyClientCert + + ca, err := certProvider.FetchClientCAs() + if err != nil { + return nil, fmt.Errorf("failed to fetch client CAs: %v", err) + } + + clientCaPool = ca + } + if remoteAddress != "" { // remoteAddress=="" when we return initial tls.Config object when configuring server + logger.Debug("returning TLS config for connection", tag.Address(remoteAddress), tag.ServerName(serverName)) + } + return auth.NewTLSConfigWithCertsAndCAs( + clientAuthType, + []tls.Certificate{*serverCert}, + clientCaPool, + logger), nil +} + +func newClientTLSConfig( + clientProvider CertProvider, + serverName string, + isAuthRequired bool, + isWorker bool, + enableHostVerification bool, +) (*tls.Config, error) { + // Optional ServerCA for client if not already trusted by host + serverCa, err := clientProvider.FetchServerRootCAsForClient(isWorker) + if err != nil { + return nil, fmt.Errorf("failed to load client ca: %v", err) + } + + var getCert tlsCertFetcher + + // mTLS enabled, present certificate + if isAuthRequired { + getCert = func() (*tls.Certificate, error) { + cert, err := clientProvider.FetchClientCertificate(isWorker) + if err != nil { + return nil, err + } + + if cert == nil { + return nil, fmt.Errorf("client auth required, but no certificate provided") + } + return cert, nil + } + } + + return auth.NewDynamicTLSClientConfig( + getCert, + serverCa, + serverName, + enableHostVerification, + ), nil +} + +func (s *localStoreTlsProvider) timerCallback() { + for { + select { + case <-s.stop: + return + case <-s.ticker.C: + } + + s.checkCertExpiration() + } +} + +func (s *localStoreTlsProvider) checkCertExpiration() { + var retError error + defer log.CapturePanic(s.logger, &retError) + + var errorTime time.Time + if s.settings.ExpirationChecks.ErrorWindow != 0 { + errorTime = time.Now().UTC().Add(s.settings.ExpirationChecks.ErrorWindow) + } else { + errorTime = time.Now().UTC().AddDate(10, 0, 0) + } + + window := s.settings.ExpirationChecks.WarningWindow + // if only ErrorWindow is set, we set WarningWindow to the same value, so that the checks do happen + if window == 0 && s.settings.ExpirationChecks.ErrorWindow != 0 { + window = s.settings.ExpirationChecks.ErrorWindow + } + if window != 0 { + expiring, expired, err := s.GetExpiringCerts(window) + if err != nil { + s.logger.Error(fmt.Sprintf("error while checking for certificate expiration: %v", err)) + return + } + if s.metricsHandler != nil { + s.metricsHandler.Gauge(metrics.TlsCertsExpired.GetMetricName()).Record(float64(len(expired))) + s.metricsHandler.Gauge(metrics.TlsCertsExpiring.GetMetricName()).Record(float64(len(expiring))) + } + s.logCerts(expired, true, errorTime) + s.logCerts(expiring, false, errorTime) + } +} + +func (s *localStoreTlsProvider) logCerts(certs CertExpirationMap, expired bool, errorTime time.Time) { + + for _, cert := range certs { + str := createExpirationLogMessage(cert, expired) + if expired || cert.Expiration.Before(errorTime) { + s.logger.Error(str) + } else { + s.logger.Warn(str) + } + } +} + +func createExpirationLogMessage(cert CertExpirationData, expired bool) string { + + var verb string + if expired { + verb = "has expired" + } else { + verb = "will expire" + } + return fmt.Sprintf("certificate with thumbprint=%x %s on %v, IsCA=%t, DNS=%v", + cert.Thumbprint, verb, cert.Expiration, cert.IsCA, cert.DNSNames) +} + +func mergeMaps(to CertExpirationMap, from CertExpirationMap) { + for k, v := range from { + to[k] = v + } +} + +func isSystemWorker(tls *config.RootTLS) bool { + return tls.SystemWorker.CertData != "" || tls.SystemWorker.CertFile != "" || + len(tls.SystemWorker.Client.RootCAData) > 0 || len(tls.SystemWorker.Client.RootCAFiles) > 0 || + tls.SystemWorker.Client.ForceTLS +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/testDynamicCertProvider.go temporal-1.22.5/src/common/rpc/encryption/testDynamicCertProvider.go --- temporal-1.21.5-1/src/common/rpc/encryption/testDynamicCertProvider.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/testDynamicCertProvider.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "crypto/tls" - "crypto/x509" - "time" - - "go.temporal.io/server/common/config" -) - -type TestDynamicCertProvider struct { - serverCerts []*tls.Certificate - caCerts *x509.CertPool - wrongCACerts *x509.CertPool - serverCertIndex int - config *config.GroupTLS - serverName string -} - -var _ CertProvider = (*TestDynamicCertProvider)(nil) -var _ PerHostCertProviderMap = (*TestDynamicCertProvider)(nil) - -func NewTestDynamicCertProvider( - serverCerts []*tls.Certificate, - caCerts *x509.CertPool, - wrongCACerts *x509.CertPool, - config config.GroupTLS) *TestDynamicCertProvider { - - return &TestDynamicCertProvider{ - serverCerts: serverCerts, - caCerts: caCerts, - wrongCACerts: wrongCACerts, - config: &config, - serverName: "127.0.0.1", - } -} - -func (t *TestDynamicCertProvider) FetchServerCertificate() (*tls.Certificate, error) { - i := t.serverCertIndex % len(t.serverCerts) - t.serverCertIndex++ - return t.serverCerts[i], nil -} - -func (t *TestDynamicCertProvider) FetchClientCAs() (*x509.CertPool, error) { - panic("not implemented") -} - -func (t *TestDynamicCertProvider) GetSettings() *config.GroupTLS { - return t.config -} - -func (t *TestDynamicCertProvider) FetchClientCertificate(_ bool) (*tls.Certificate, error) { - panic("not implemented") -} - -func (t *TestDynamicCertProvider) FetchServerRootCAsForClient(_ bool) (*x509.CertPool, error) { - return t.caCerts, nil -} - -func (t *TestDynamicCertProvider) GetCertProvider(hostName string) (CertProvider, bool, error) { - if hostName == "localhost" { - return t, false, nil - } - return nil, false, nil -} - -func (t *TestDynamicCertProvider) SwitchToWrongServerRootCACerts() { - t.caCerts = t.wrongCACerts -} - -func (t *TestDynamicCertProvider) SetServerName(serverName string) { - t.serverName = serverName -} - -func (t *TestDynamicCertProvider) GetExpiringCerts(_ time.Duration, -) (expiring CertExpirationMap, expired CertExpirationMap, err error) { - panic("not implemented") -} - -func (t *TestDynamicCertProvider) Initialize(refreshInterval time.Duration) { - panic("implement me") -} - -func (t *TestDynamicCertProvider) NumberOfHosts() int { - return 1 -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/testDynamicTLSConfigProvider.go temporal-1.22.5/src/common/rpc/encryption/testDynamicTLSConfigProvider.go --- temporal-1.21.5-1/src/common/rpc/encryption/testDynamicTLSConfigProvider.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/testDynamicTLSConfigProvider.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "crypto/tls" - "crypto/x509" - "time" - - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/log" -) - -type TestDynamicTLSConfigProvider struct { - settings *config.RootTLS - - InternodeCertProvider *TestDynamicCertProvider - InternodeClientCertProvider *TestDynamicCertProvider - FrontendCertProvider *TestDynamicCertProvider - FrontendClientCertProvider *TestDynamicCertProvider - WorkerCertProvider *TestDynamicCertProvider - - FrontendPerHostCertProviderMap PerHostCertProviderMap - - logger log.Logger -} - -func (t *TestDynamicTLSConfigProvider) GetInternodeServerConfig() (*tls.Config, error) { - return newServerTLSConfig(t.InternodeCertProvider, nil, &t.settings.Internode, t.logger) -} - -func (t *TestDynamicTLSConfigProvider) GetInternodeClientConfig() (*tls.Config, error) { - return newClientTLSConfig(t.InternodeClientCertProvider, t.settings.Internode.Client.ServerName, true, false, true) -} - -func (t *TestDynamicTLSConfigProvider) GetFrontendServerConfig() (*tls.Config, error) { - return newServerTLSConfig(t.FrontendCertProvider, t.FrontendPerHostCertProviderMap, &t.settings.Frontend, t.logger) -} - -func (t *TestDynamicTLSConfigProvider) GetFrontendClientConfig() (*tls.Config, error) { - return newClientTLSConfig(t.WorkerCertProvider, t.settings.Frontend.Client.ServerName, true, false, true) -} - -func (t *TestDynamicTLSConfigProvider) GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) { - panic("implement me") -} - -func (t *TestDynamicTLSConfigProvider) GetRemoteClusterClientConfig(hostName string) (*tls.Config, error) { - panic("implement me") -} - -var _ TLSConfigProvider = (*TestDynamicTLSConfigProvider)(nil) - -func NewTestDynamicTLSConfigProvider( - tlsConfig *config.RootTLS, - internodeCerts []*tls.Certificate, - internodeCACerts *x509.CertPool, - frontendCerts []*tls.Certificate, - frontendCACerts *x509.CertPool, - wrongCACerts *x509.CertPool, -) (*TestDynamicTLSConfigProvider, error) { - - internodeProvider := NewTestDynamicCertProvider(internodeCerts, internodeCACerts, wrongCACerts, tlsConfig.Internode) - frontendProvider := NewTestDynamicCertProvider(frontendCerts, frontendCACerts, wrongCACerts, tlsConfig.Frontend) - - return &TestDynamicTLSConfigProvider{ - InternodeCertProvider: internodeProvider, - InternodeClientCertProvider: internodeProvider, - FrontendCertProvider: frontendProvider, - FrontendClientCertProvider: frontendProvider, - WorkerCertProvider: frontendProvider, - FrontendPerHostCertProviderMap: frontendProvider, - settings: tlsConfig, - logger: log.NewTestLogger(), - }, nil -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/test_dynamic_cert_provider.go temporal-1.22.5/src/common/rpc/encryption/test_dynamic_cert_provider.go --- temporal-1.21.5-1/src/common/rpc/encryption/test_dynamic_cert_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/test_dynamic_cert_provider.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,110 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "crypto/tls" + "crypto/x509" + "time" + + "go.temporal.io/server/common/config" +) + +type TestDynamicCertProvider struct { + serverCerts []*tls.Certificate + caCerts *x509.CertPool + wrongCACerts *x509.CertPool + serverCertIndex int + config *config.GroupTLS + serverName string +} + +var _ CertProvider = (*TestDynamicCertProvider)(nil) +var _ PerHostCertProviderMap = (*TestDynamicCertProvider)(nil) + +func NewTestDynamicCertProvider( + serverCerts []*tls.Certificate, + caCerts *x509.CertPool, + wrongCACerts *x509.CertPool, + config config.GroupTLS) *TestDynamicCertProvider { + + return &TestDynamicCertProvider{ + serverCerts: serverCerts, + caCerts: caCerts, + wrongCACerts: wrongCACerts, + config: &config, + serverName: "127.0.0.1", + } +} + +func (t *TestDynamicCertProvider) FetchServerCertificate() (*tls.Certificate, error) { + i := t.serverCertIndex % len(t.serverCerts) + t.serverCertIndex++ + return t.serverCerts[i], nil +} + +func (t *TestDynamicCertProvider) FetchClientCAs() (*x509.CertPool, error) { + panic("not implemented") +} + +func (t *TestDynamicCertProvider) GetSettings() *config.GroupTLS { + return t.config +} + +func (t *TestDynamicCertProvider) FetchClientCertificate(_ bool) (*tls.Certificate, error) { + panic("not implemented") +} + +func (t *TestDynamicCertProvider) FetchServerRootCAsForClient(_ bool) (*x509.CertPool, error) { + return t.caCerts, nil +} + +func (t *TestDynamicCertProvider) GetCertProvider(hostName string) (CertProvider, bool, error) { + if hostName == "localhost" { + return t, false, nil + } + return nil, false, nil +} + +func (t *TestDynamicCertProvider) SwitchToWrongServerRootCACerts() { + t.caCerts = t.wrongCACerts +} + +func (t *TestDynamicCertProvider) SetServerName(serverName string) { + t.serverName = serverName +} + +func (t *TestDynamicCertProvider) GetExpiringCerts(_ time.Duration, +) (expiring CertExpirationMap, expired CertExpirationMap, err error) { + panic("not implemented") +} + +func (t *TestDynamicCertProvider) Initialize(refreshInterval time.Duration) { + panic("implement me") +} + +func (t *TestDynamicCertProvider) NumberOfHosts() int { + return 1 +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/test_dynamic_tlsconfig_provider.go temporal-1.22.5/src/common/rpc/encryption/test_dynamic_tlsconfig_provider.go --- temporal-1.21.5-1/src/common/rpc/encryption/test_dynamic_tlsconfig_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/test_dynamic_tlsconfig_provider.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,98 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "crypto/tls" + "crypto/x509" + "time" + + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" +) + +type TestDynamicTLSConfigProvider struct { + settings *config.RootTLS + + InternodeCertProvider *TestDynamicCertProvider + InternodeClientCertProvider *TestDynamicCertProvider + FrontendCertProvider *TestDynamicCertProvider + FrontendClientCertProvider *TestDynamicCertProvider + WorkerCertProvider *TestDynamicCertProvider + + FrontendPerHostCertProviderMap PerHostCertProviderMap + + logger log.Logger +} + +func (t *TestDynamicTLSConfigProvider) GetInternodeServerConfig() (*tls.Config, error) { + return newServerTLSConfig(t.InternodeCertProvider, nil, &t.settings.Internode, t.logger) +} + +func (t *TestDynamicTLSConfigProvider) GetInternodeClientConfig() (*tls.Config, error) { + return newClientTLSConfig(t.InternodeClientCertProvider, t.settings.Internode.Client.ServerName, true, false, true) +} + +func (t *TestDynamicTLSConfigProvider) GetFrontendServerConfig() (*tls.Config, error) { + return newServerTLSConfig(t.FrontendCertProvider, t.FrontendPerHostCertProviderMap, &t.settings.Frontend, t.logger) +} + +func (t *TestDynamicTLSConfigProvider) GetFrontendClientConfig() (*tls.Config, error) { + return newClientTLSConfig(t.WorkerCertProvider, t.settings.Frontend.Client.ServerName, true, false, true) +} + +func (t *TestDynamicTLSConfigProvider) GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) { + panic("implement me") +} + +func (t *TestDynamicTLSConfigProvider) GetRemoteClusterClientConfig(hostName string) (*tls.Config, error) { + panic("implement me") +} + +var _ TLSConfigProvider = (*TestDynamicTLSConfigProvider)(nil) + +func NewTestDynamicTLSConfigProvider( + tlsConfig *config.RootTLS, + internodeCerts []*tls.Certificate, + internodeCACerts *x509.CertPool, + frontendCerts []*tls.Certificate, + frontendCACerts *x509.CertPool, + wrongCACerts *x509.CertPool, +) (*TestDynamicTLSConfigProvider, error) { + + internodeProvider := NewTestDynamicCertProvider(internodeCerts, internodeCACerts, wrongCACerts, tlsConfig.Internode) + frontendProvider := NewTestDynamicCertProvider(frontendCerts, frontendCACerts, wrongCACerts, tlsConfig.Frontend) + + return &TestDynamicTLSConfigProvider{ + InternodeCertProvider: internodeProvider, + InternodeClientCertProvider: internodeProvider, + FrontendCertProvider: frontendProvider, + FrontendClientCertProvider: frontendProvider, + WorkerCertProvider: frontendProvider, + FrontendPerHostCertProviderMap: frontendProvider, + settings: tlsConfig, + logger: log.NewTestLogger(), + }, nil +} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/tlsFactory.go temporal-1.22.5/src/common/rpc/encryption/tlsFactory.go --- temporal-1.21.5-1/src/common/rpc/encryption/tlsFactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/tlsFactory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package encryption - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "strings" - "time" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - - "go.temporal.io/server/common/config" -) - -type ( - // TLSConfigProvider serves as a common interface to read server and client configuration for TLS. - TLSConfigProvider interface { - GetInternodeServerConfig() (*tls.Config, error) - GetInternodeClientConfig() (*tls.Config, error) - GetFrontendServerConfig() (*tls.Config, error) - GetFrontendClientConfig() (*tls.Config, error) - GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) - GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) - } - - // CertProvider is a common interface to load raw TLS/X509 primitives. - CertProvider interface { - FetchServerCertificate() (*tls.Certificate, error) - FetchClientCAs() (*x509.CertPool, error) - FetchClientCertificate(isWorker bool) (*tls.Certificate, error) - FetchServerRootCAsForClient(isWorker bool) (*x509.CertPool, error) - GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) - } - - // PerHostCertProviderMap returns a CertProvider for a given host name. - PerHostCertProviderMap interface { - GetCertProvider(hostName string) (provider CertProvider, clientAuthRequired bool, err error) - GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) - NumberOfHosts() int - } - - CertThumbprint [16]byte - - CertExpirationData struct { - Thumbprint CertThumbprint - IsCA bool - DNSNames []string - Expiration time.Time - } - - CertExpirationMap map[CertThumbprint]CertExpirationData - - CertExpirationChecker interface { - GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) - } - - tlsConfigConstructor func() (*tls.Config, error) -) - -// NewTLSConfigProviderFromConfig creates a new TLS Config provider from RootTLS config. -// A custom cert provider factory can be optionally injected via certProviderFactory argument. -// Otherwise, it defaults to using localStoreCertProvider -func NewTLSConfigProviderFromConfig( - encryptionSettings config.RootTLS, - metricsHandler metrics.Handler, - logger log.Logger, - certProviderFactory CertProviderFactory, -) (TLSConfigProvider, error) { - if err := validateRootTLS(&encryptionSettings); err != nil { - return nil, err - } - if certProviderFactory == nil { - certProviderFactory = NewLocalStoreCertProvider - } - return NewLocalStoreTlsProvider(&encryptionSettings, metricsHandler.WithTags(metrics.OperationTag(metrics.ServerTlsScope)), logger, certProviderFactory) -} - -func validateRootTLS(cfg *config.RootTLS) error { - if err := validateGroupTLS(&cfg.Internode); err != nil { - return err - } - if err := validateGroupTLS(&cfg.Frontend); err != nil { - return err - } - return validateWorkerTLS(&cfg.SystemWorker) -} - -func validateGroupTLS(cfg *config.GroupTLS) error { - if err := validateServerTLS(&cfg.Server); err != nil { - return err - } - if err := validateClientTLS(&cfg.Client); err != nil { - return err - } - for host, hostConfig := range cfg.PerHostOverrides { - - if strings.TrimSpace(host) == "" { - return fmt.Errorf("host name cannot be empty string") - } - if err := validateServerTLS(&hostConfig); err != nil { - return err - } - } - return nil -} - -func validateWorkerTLS(cfg *config.WorkerTLS) error { - if cfg.CertFile != "" && cfg.CertData != "" { - return fmt.Errorf("cannot specify CertFile and CertData at the same time") - } - if cfg.KeyFile != "" && cfg.KeyData != "" { - return fmt.Errorf("cannot specify KeyFile and KeyData at the same time") - } - return validateClientTLS(&cfg.Client) -} - -func validateServerTLS(cfg *config.ServerTLS) error { - if cfg.CertFile != "" && cfg.CertData != "" { - return fmt.Errorf("cannot specify CertFile and CertData at the same time") - } - if cfg.KeyFile != "" && cfg.KeyData != "" { - return fmt.Errorf("cannot specify KeyFile and KeyData at the same time") - } - if err := validateCAs(cfg.ClientCAData); err != nil { - return fmt.Errorf("invalid ServerTLS.ClientCAData: %w", err) - } - if err := validateCAs(cfg.ClientCAFiles); err != nil { - return fmt.Errorf("invalid ServerTLS.ClientCAFiles: %w", err) - } - if len(cfg.ClientCAFiles) > 0 && len(cfg.ClientCAData) > 0 { - return fmt.Errorf("cannot specify ClientCAFiles and ClientCAData at the same time") - } - return nil -} - -func validateClientTLS(cfg *config.ClientTLS) error { - if err := validateCAs(cfg.RootCAData); err != nil { - return fmt.Errorf("invalid ClientTLS.RootCAData: %w", err) - } - if err := validateCAs(cfg.RootCAFiles); err != nil { - return fmt.Errorf("invalid ClientTLS.RootCAFiles: %w", err) - } - if len(cfg.RootCAData) > 0 && len(cfg.RootCAFiles) > 0 { - return fmt.Errorf("cannot specify RootCAFiles and RootCAData at the same time") - } - return nil -} - -func validateCAs(cas []string) error { - for _, ca := range cas { - if strings.TrimSpace(ca) == "" { - return fmt.Errorf("CA cannot be empty string") - } - } - return nil -} diff -Nru temporal-1.21.5-1/src/common/rpc/encryption/tls_factory.go temporal-1.22.5/src/common/rpc/encryption/tls_factory.go --- temporal-1.21.5-1/src/common/rpc/encryption/tls_factory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/encryption/tls_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,181 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package encryption + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + "time" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + + "go.temporal.io/server/common/config" +) + +type ( + // TLSConfigProvider serves as a common interface to read server and client configuration for TLS. + TLSConfigProvider interface { + GetInternodeServerConfig() (*tls.Config, error) + GetInternodeClientConfig() (*tls.Config, error) + GetFrontendServerConfig() (*tls.Config, error) + GetFrontendClientConfig() (*tls.Config, error) + GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) + GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) + } + + // CertProvider is a common interface to load raw TLS/X509 primitives. + CertProvider interface { + FetchServerCertificate() (*tls.Certificate, error) + FetchClientCAs() (*x509.CertPool, error) + FetchClientCertificate(isWorker bool) (*tls.Certificate, error) + FetchServerRootCAsForClient(isWorker bool) (*x509.CertPool, error) + GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) + } + + // PerHostCertProviderMap returns a CertProvider for a given host name. + PerHostCertProviderMap interface { + GetCertProvider(hostName string) (provider CertProvider, clientAuthRequired bool, err error) + GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) + NumberOfHosts() int + } + + CertThumbprint [16]byte + + CertExpirationData struct { + Thumbprint CertThumbprint + IsCA bool + DNSNames []string + Expiration time.Time + } + + CertExpirationMap map[CertThumbprint]CertExpirationData + + CertExpirationChecker interface { + GetExpiringCerts(timeWindow time.Duration) (expiring CertExpirationMap, expired CertExpirationMap, err error) + } + + tlsConfigConstructor func() (*tls.Config, error) +) + +// NewTLSConfigProviderFromConfig creates a new TLS Config provider from RootTLS config. +// A custom cert provider factory can be optionally injected via certProviderFactory argument. +// Otherwise, it defaults to using localStoreCertProvider +func NewTLSConfigProviderFromConfig( + encryptionSettings config.RootTLS, + metricsHandler metrics.Handler, + logger log.Logger, + certProviderFactory CertProviderFactory, +) (TLSConfigProvider, error) { + if err := validateRootTLS(&encryptionSettings); err != nil { + return nil, err + } + if certProviderFactory == nil { + certProviderFactory = NewLocalStoreCertProvider + } + return NewLocalStoreTlsProvider(&encryptionSettings, metricsHandler.WithTags(metrics.OperationTag(metrics.ServerTlsScope)), logger, certProviderFactory) +} + +func validateRootTLS(cfg *config.RootTLS) error { + if err := validateGroupTLS(&cfg.Internode); err != nil { + return err + } + if err := validateGroupTLS(&cfg.Frontend); err != nil { + return err + } + return validateWorkerTLS(&cfg.SystemWorker) +} + +func validateGroupTLS(cfg *config.GroupTLS) error { + if err := validateServerTLS(&cfg.Server); err != nil { + return err + } + if err := validateClientTLS(&cfg.Client); err != nil { + return err + } + for host, hostConfig := range cfg.PerHostOverrides { + + if strings.TrimSpace(host) == "" { + return fmt.Errorf("host name cannot be empty string") + } + if err := validateServerTLS(&hostConfig); err != nil { + return err + } + } + return nil +} + +func validateWorkerTLS(cfg *config.WorkerTLS) error { + if cfg.CertFile != "" && cfg.CertData != "" { + return fmt.Errorf("cannot specify CertFile and CertData at the same time") + } + if cfg.KeyFile != "" && cfg.KeyData != "" { + return fmt.Errorf("cannot specify KeyFile and KeyData at the same time") + } + return validateClientTLS(&cfg.Client) +} + +func validateServerTLS(cfg *config.ServerTLS) error { + if cfg.CertFile != "" && cfg.CertData != "" { + return fmt.Errorf("cannot specify CertFile and CertData at the same time") + } + if cfg.KeyFile != "" && cfg.KeyData != "" { + return fmt.Errorf("cannot specify KeyFile and KeyData at the same time") + } + if err := validateCAs(cfg.ClientCAData); err != nil { + return fmt.Errorf("invalid ServerTLS.ClientCAData: %w", err) + } + if err := validateCAs(cfg.ClientCAFiles); err != nil { + return fmt.Errorf("invalid ServerTLS.ClientCAFiles: %w", err) + } + if len(cfg.ClientCAFiles) > 0 && len(cfg.ClientCAData) > 0 { + return fmt.Errorf("cannot specify ClientCAFiles and ClientCAData at the same time") + } + return nil +} + +func validateClientTLS(cfg *config.ClientTLS) error { + if err := validateCAs(cfg.RootCAData); err != nil { + return fmt.Errorf("invalid ClientTLS.RootCAData: %w", err) + } + if err := validateCAs(cfg.RootCAFiles); err != nil { + return fmt.Errorf("invalid ClientTLS.RootCAFiles: %w", err) + } + if len(cfg.RootCAData) > 0 && len(cfg.RootCAFiles) > 0 { + return fmt.Errorf("cannot specify RootCAFiles and RootCAData at the same time") + } + return nil +} + +func validateCAs(cas []string) error { + for _, ca := range cas { + if strings.TrimSpace(ca) == "" { + return fmt.Errorf("CA cannot be empty string") + } + } + return nil +} diff -Nru temporal-1.21.5-1/src/common/rpc/grpc.go temporal-1.22.5/src/common/rpc/grpc.go --- temporal-1.21.5-1/src/common/rpc/grpc.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/grpc.go 2024-02-23 09:45:43.000000000 +0000 @@ -52,6 +52,10 @@ // MaxBackoffDelay is a maximum interval between reconnect attempts. MaxBackoffDelay = 10 * time.Second + // MaxHTTPAPIRequestBytes is the maximum number of bytes an HTTP API request + // can have. This is currently set to the max gRPC request size. + MaxHTTPAPIRequestBytes = 4 * 1024 * 1024 + // minConnectTimeout is the minimum amount of time we are willing to give a connection to complete. minConnectTimeout = 20 * time.Second diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/caller_info.go temporal-1.22.5/src/common/rpc/interceptor/caller_info.go --- temporal-1.21.5-1/src/common/rpc/interceptor/caller_info.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/caller_info.go 2024-02-23 09:45:43.000000000 +0000 @@ -66,7 +66,7 @@ callerInfo.CallerType = headers.CallerTypeAPI updateInfo = true } - if callerInfo.CallerType == headers.CallerTypeAPI && + if (callerInfo.CallerType == headers.CallerTypeAPI || callerInfo.CallerType == headers.CallerTypeOperator) && callerInfo.CallOrigin == "" { _, method := SplitMethodName(info.FullMethod) callerInfo.CallOrigin = method diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/concurrent_request_limit.go temporal-1.22.5/src/common/rpc/interceptor/concurrent_request_limit.go --- temporal-1.21.5-1/src/common/rpc/interceptor/concurrent_request_limit.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/concurrent_request_limit.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,146 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package interceptor + +import ( + "context" + "sync" + "sync/atomic" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/quotas" + "google.golang.org/grpc" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" +) + +type ( + // ConcurrentRequestLimitInterceptor intercepts requests to the server and enforces a limit on the number of + // requests that can be in-flight at any given time, according to the configured quotas. + ConcurrentRequestLimitInterceptor struct { + namespaceRegistry namespace.Registry + logger log.Logger + quotaCalculator quotas.ClusterAwareNamespaceSpecificQuotaCalculator + // tokens is a map of method name to the number of tokens that should be consumed for that method. If there is + // no entry for a method, then no tokens will be consumed, so the method will not be limited. + tokens map[string]int + + sync.Mutex + activeTokensCount map[string]*int32 + } +) + +var ( + _ grpc.UnaryServerInterceptor = (*ConcurrentRequestLimitInterceptor)(nil).Intercept + + ErrNamespaceCountLimitServerBusy = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_CONCURRENT_LIMIT, "namespace concurrent poller limit exceeded") +) + +func NewConcurrentRequestLimitInterceptor( + namespaceRegistry namespace.Registry, + memberCounter quotas.MemberCounter, + logger log.Logger, + perInstanceQuota func(ns string) int, + globalQuota func(ns string) int, + tokens map[string]int, +) *ConcurrentRequestLimitInterceptor { + return &ConcurrentRequestLimitInterceptor{ + namespaceRegistry: namespaceRegistry, + logger: logger, + quotaCalculator: quotas.ClusterAwareNamespaceSpecificQuotaCalculator{ + MemberCounter: memberCounter, + PerInstanceQuota: perInstanceQuota, + GlobalQuota: globalQuota, + }, + tokens: tokens, + activeTokensCount: make(map[string]*int32), + } +} + +func (ni *ConcurrentRequestLimitInterceptor) Intercept( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, +) (interface{}, error) { + _, methodName := SplitMethodName(info.FullMethod) + // token will default to 0 + token := ni.tokens[methodName] + + if token != 0 { + // for GetWorkflowExecutionHistoryRequest, we only care about long poll requests + longPollReq, ok := req.(*workflowservice.GetWorkflowExecutionHistoryRequest) + if ok && !longPollReq.WaitNewEvent { + // ignore non-long-poll GetHistory calls. + token = 0 + } + } + + if token != 0 { + nsName := MustGetNamespaceName(ni.namespaceRegistry, req) + counter := ni.counter(nsName, methodName) + count := atomic.AddInt32(counter, int32(token)) + defer atomic.AddInt32(counter, -int32(token)) + + handler := GetMetricsHandlerFromContext(ctx, ni.logger) + handler.Gauge(metrics.ServicePendingRequests.GetMetricName()).Record(float64(count)) + + // frontend.namespaceCount is applied per poller type temporarily to prevent + // one poller type to take all token waiting in the long poll. + if float64(count) > ni.quotaCalculator.GetQuota(nsName.String()) { + return nil, ErrNamespaceCountLimitServerBusy + } + } + + return handler(ctx, req) +} + +func (ni *ConcurrentRequestLimitInterceptor) counter( + namespace namespace.Name, + methodName string, +) *int32 { + key := ni.getTokenKey(namespace, methodName) + + ni.Lock() + defer ni.Unlock() + + counter, ok := ni.activeTokensCount[key] + if !ok { + counter = new(int32) + ni.activeTokensCount[key] = counter + } + return counter +} + +func (ni *ConcurrentRequestLimitInterceptor) getTokenKey( + namespace namespace.Name, + methodName string, +) string { + return namespace.String() + "/" + methodName +} diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/concurrent_request_limit_test.go temporal-1.22.5/src/common/rpc/interceptor/concurrent_request_limit_test.go --- temporal-1.21.5-1/src/common/rpc/interceptor/concurrent_request_limit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/concurrent_request_limit_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,261 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package interceptor + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/quotas" + "go.temporal.io/server/common/quotas/quotastest" + "google.golang.org/grpc" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" +) + +type nsCountLimitTestCase struct { + // name of the test case + name string + // request to be intercepted by the ConcurrentRequestLimitInterceptor + request any + // numBlockedRequests is the number of pending requests that will be blocked including the final request. + numBlockedRequests int + // memberCounter returns the number of members in the namespace. + memberCounter quotas.MemberCounter + // perInstanceLimit is the limit on the number of pending requests per-instance. + perInstanceLimit int + // globalLimit is the limit on the number of pending requests across all instances. + globalLimit int + // methodName is the fully-qualified name of the gRPC method being intercepted. + methodName string + // tokens is a map of method slugs (e.g. just the part of the method name after the final slash) to the number of + // tokens that will be consumed by that method. + tokens map[string]int + // expectRateLimit is true if the interceptor should respond with a rate limit error. + expectRateLimit bool +} + +// TestNamespaceCountLimitInterceptor_Intercept verifies that the ConcurrentRequestLimitInterceptor responds with a rate +// limit error when requests would exceed the concurrent poller limit for a namespace. +func TestNamespaceCountLimitInterceptor_Intercept(t *testing.T) { + t.Parallel() + for _, tc := range []nsCountLimitTestCase{ + { + name: "no limit exceeded", + request: nil, + numBlockedRequests: 2, + perInstanceLimit: 2, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", + tokens: map[string]int{ + "DescribeNamespace": 1, + }, + expectRateLimit: false, + }, + { + name: "per-instance limit exceeded", + request: nil, + numBlockedRequests: 3, + perInstanceLimit: 2, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", + tokens: map[string]int{ + "DescribeNamespace": 1, + }, + expectRateLimit: true, + }, + { + name: "global limit exceeded", + request: nil, + numBlockedRequests: 3, + perInstanceLimit: 3, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", + tokens: map[string]int{ + "DescribeNamespace": 1, + }, + expectRateLimit: true, + }, + { + name: "global limit zero", + request: nil, + numBlockedRequests: 3, + perInstanceLimit: 3, + globalLimit: 0, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", + tokens: map[string]int{ + "DescribeNamespace": 1, + }, + expectRateLimit: false, + }, + { + name: "method name does not consume token", + request: nil, + numBlockedRequests: 3, + perInstanceLimit: 2, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/DescribeNamespace", + tokens: map[string]int{}, + expectRateLimit: false, + }, + { + name: "long poll request", + request: &workflowservice.GetWorkflowExecutionHistoryRequest{WaitNewEvent: true}, + numBlockedRequests: 3, + perInstanceLimit: 2, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/GetWorkflowExecutionHistory", + tokens: map[string]int{ + "GetWorkflowExecutionHistory": 1, + }, + expectRateLimit: true, + }, + { + name: "non-long poll request", + request: &workflowservice.GetWorkflowExecutionHistoryRequest{WaitNewEvent: false}, + numBlockedRequests: 3, + perInstanceLimit: 2, + globalLimit: 4, + memberCounter: quotastest.NewFakeMemberCounter(2), + methodName: "/temporal.api.workflowservice.v1.WorkflowService/GetWorkflowExecutionHistory", + tokens: map[string]int{ + "GetWorkflowExecutionHistory": 1, + }, + expectRateLimit: false, + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.run(t) + }) + } +} + +// run the test case by simulating a bunch of blocked pollers, sending a final request, and verifying that it is either +// rate limited or not. +func (tc *nsCountLimitTestCase) run(t *testing.T) { + ctrl := gomock.NewController(t) + handler := tc.createRequestHandler() + interceptor := tc.createInterceptor(ctrl) + // Spawn a bunch of blocked requests in the background. + tc.spawnBlockedRequests(handler, interceptor) + + // With all the blocked requests in flight, send the final request and verify whether it is rate limited or not. + _, err := interceptor.Intercept(context.Background(), tc.request, &grpc.UnaryServerInfo{ + FullMethod: tc.methodName, + }, noopHandler) + + if tc.expectRateLimit { + assert.ErrorContains(t, err, "namespace concurrent poller limit exceeded") + } else { + assert.NoError(t, err) + } + + // Clean up by unblocking all the requests. + handler.Unblock() + + for i := 0; i < tc.numBlockedRequests-1; i++ { + assert.NoError(t, <-handler.errs) + } +} + +func (tc *nsCountLimitTestCase) createRequestHandler() *testRequestHandler { + return &testRequestHandler{ + started: make(chan struct{}), + respond: make(chan struct{}), + errs: make(chan error, tc.numBlockedRequests-1), + } +} + +// spawnBlockedRequests sends a bunch of requests to the interceptor which will block until signaled. +func (tc *nsCountLimitTestCase) spawnBlockedRequests( + handler *testRequestHandler, + interceptor *ConcurrentRequestLimitInterceptor, +) { + for i := 0; i < tc.numBlockedRequests-1; i++ { + go func() { + _, err := interceptor.Intercept(context.Background(), tc.request, &grpc.UnaryServerInfo{ + FullMethod: tc.methodName, + }, handler.Handle) + handler.errs <- err + }() + } + + for i := 0; i < tc.numBlockedRequests-1; i++ { + <-handler.started + } +} + +func (tc *nsCountLimitTestCase) createInterceptor(ctrl *gomock.Controller) *ConcurrentRequestLimitInterceptor { + registry := namespace.NewMockRegistry(ctrl) + registry.EXPECT().GetNamespace(gomock.Any()).Return(&namespace.Namespace{}, nil).AnyTimes() + + interceptor := NewConcurrentRequestLimitInterceptor( + registry, + tc.memberCounter, + log.NewNoopLogger(), + dynamicconfig.GetIntPropertyFilteredByNamespace(tc.perInstanceLimit), + dynamicconfig.GetIntPropertyFilteredByNamespace(tc.globalLimit), + tc.tokens, + ) + + return interceptor +} + +// noopHandler is a grpc.UnaryHandler which does nothing. +func noopHandler(context.Context, interface{}) (interface{}, error) { + return nil, nil +} + +// testRequestHandler provides a grpc.UnaryHandler which signals when it starts and does not respond until signaled. +type testRequestHandler struct { + started chan struct{} + respond chan struct{} + errs chan error +} + +func (h testRequestHandler) Unblock() { + close(h.respond) +} + +// Handle signals that the request has started and then blocks until signaled to respond. +func (h testRequestHandler) Handle(context.Context, interface{}) (interface{}, error) { + h.started <- struct{}{} + <-h.respond + + return nil, nil +} diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/namespace_count_limit.go temporal-1.22.5/src/common/rpc/interceptor/namespace_count_limit.go --- temporal-1.21.5-1/src/common/rpc/interceptor/namespace_count_limit.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/namespace_count_limit.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,136 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package interceptor - -import ( - "context" - "sync" - "sync/atomic" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" -) - -var ( - ErrNamespaceCountLimitServerBusy = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_CONCURRENT_LIMIT, "namespace concurrent poller limit exceeded") -) - -type ( - NamespaceCountLimitInterceptor struct { - namespaceRegistry namespace.Registry - logger log.Logger - - countFn func(namespace string) int - tokens map[string]int - - sync.Mutex - activeTokensCount map[string]*int32 - } -) - -var _ grpc.UnaryServerInterceptor = (*NamespaceCountLimitInterceptor)(nil).Intercept - -func NewNamespaceCountLimitInterceptor( - namespaceRegistry namespace.Registry, - logger log.Logger, - countFn func(namespace string) int, - tokens map[string]int, -) *NamespaceCountLimitInterceptor { - return &NamespaceCountLimitInterceptor{ - namespaceRegistry: namespaceRegistry, - logger: logger, - countFn: countFn, - tokens: tokens, - activeTokensCount: make(map[string]*int32), - } -} - -func (ni *NamespaceCountLimitInterceptor) Intercept( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, -) (interface{}, error) { - _, methodName := SplitMethodName(info.FullMethod) - // token will default to 0 - token := ni.tokens[methodName] - - if token != 0 { - // for GetWorkflowExecutionHistoryRequest, we only care about long poll requests - longPollReq, ok := req.(*workflowservice.GetWorkflowExecutionHistoryRequest) - if ok && !longPollReq.WaitNewEvent { - // ignore non-long-poll GetHistory calls. - token = 0 - } - } - - if token != 0 { - nsName := MustGetNamespaceName(ni.namespaceRegistry, req) - counter := ni.counter(nsName, methodName) - count := atomic.AddInt32(counter, int32(token)) - defer atomic.AddInt32(counter, -int32(token)) - - handler := GetMetricsHandlerFromContext(ctx, ni.logger) - handler.Gauge(metrics.ServicePendingRequests.GetMetricName()).Record(float64(count)) - - // frontend.namespaceCount is applied per poller type temporarily to prevent - // one poller type to take all token waiting in the long poll. - if int(count) > ni.countFn(nsName.String()) { - return nil, ErrNamespaceCountLimitServerBusy - } - } - - return handler(ctx, req) -} - -func (ni *NamespaceCountLimitInterceptor) counter( - namespace namespace.Name, - methodName string, -) *int32 { - key := ni.getTokenKey(namespace, methodName) - - ni.Lock() - defer ni.Unlock() - - counter, ok := ni.activeTokensCount[key] - if !ok { - counter = new(int32) - ni.activeTokensCount[key] = counter - } - return counter -} - -func (ni *NamespaceCountLimitInterceptor) getTokenKey( - namespace namespace.Name, - methodName string, -) string { - return namespace.String() + "/" + methodName -} diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/namespace_rate_limit.go temporal-1.22.5/src/common/rpc/interceptor/namespace_rate_limit.go --- temporal-1.21.5-1/src/common/rpc/interceptor/namespace_rate_limit.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/namespace_rate_limit.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,6 +30,7 @@ enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/headers" "google.golang.org/grpc" "go.temporal.io/server/common/namespace" @@ -83,7 +84,7 @@ methodName, token, namespace.String(), - "", // this interceptor layer does not throttle based on caller type + headers.GetValues(ctx, headers.CallerTypeHeaderName)[0], 0, // this interceptor layer does not throttle based on caller segment "", // this interceptor layer does not throttle based on call initiation )) { diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/rate_limit.go temporal-1.22.5/src/common/rpc/interceptor/rate_limit.go --- temporal-1.21.5-1/src/common/rpc/interceptor/rate_limit.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/rate_limit.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,6 +30,7 @@ enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/headers" "google.golang.org/grpc" "go.temporal.io/server/common/quotas" @@ -78,7 +79,7 @@ methodName, token, "", // this interceptor layer does not throttle based on caller name - "", // this interceptor layer does not throttle based on caller type + headers.GetValues(ctx, headers.CallerTypeHeaderName)[0], 0, // this interceptor layer does not throttle based on caller segment "", // this interceptor layer does not throttle based on call initiation )) { diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/telemetry.go temporal-1.22.5/src/common/rpc/interceptor/telemetry.go --- temporal-1.21.5-1/src/common/rpc/interceptor/telemetry.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/telemetry.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,6 +35,7 @@ "google.golang.org/grpc" "go.temporal.io/server/common" + "go.temporal.io/server/common/api" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" @@ -63,20 +64,22 @@ var ( respondWorkflowTaskCompleted = "RespondWorkflowTaskCompleted" pollActivityTaskQueue = "PollActivityTaskQueue" - frontendPackagePrefix = "/temporal.api.workflowservice.v1.WorkflowService/" - operatorServicePrefix = "/temporal.api.operatorservice.v1.OperatorService/" - adminServicePrefix = "/temporal.server.api.adminservice.v1.AdminService/" grpcActions = map[string]struct{}{ - metrics.FrontendQueryWorkflowScope: {}, - metrics.FrontendRecordActivityTaskHeartbeatScope: {}, - metrics.FrontendRecordActivityTaskHeartbeatByIdScope: {}, - metrics.FrontendResetWorkflowExecutionScope: {}, - metrics.FrontendStartWorkflowExecutionScope: {}, - metrics.FrontendSignalWorkflowExecutionScope: {}, - metrics.FrontendSignalWithStartWorkflowExecutionScope: {}, - metrics.FrontendRespondWorkflowTaskCompletedScope: {}, - metrics.FrontendPollActivityTaskQueueScope: {}, + "QueryWorkflow": {}, + "RecordActivityTaskHeartbeat": {}, + "RecordActivityTaskHeartbeatById": {}, + "ResetWorkflowExecution": {}, + "StartWorkflowExecution": {}, + "SignalWorkflowExecution": {}, + "SignalWithStartWorkflowExecution": {}, + "RespondWorkflowTaskCompleted": {}, + "PollActivityTaskQueue": {}, + "CreateSchedule": {}, + "UpdateSchedule": {}, + "DeleteSchedule": {}, + "PatchSchedule": {}, + "UpdateWorkflowExecution": {}, } commandActions = map[enums.CommandType]struct{}{ @@ -106,7 +109,7 @@ // Use this method to override scope used for reporting a metric. // Ideally this method should never be used. func (ti *TelemetryInterceptor) unaryOverrideOperationTag(fullName, operation string, req interface{}) string { - if strings.HasPrefix(fullName, frontendPackagePrefix) { + if strings.HasPrefix(fullName, api.WorkflowServicePrefix) { // GetWorkflowExecutionHistory method handles both long poll and regular calls. // Current plan is to eventually split GetWorkflowExecutionHistory into two APIs, // remove this "if" case when that is done. @@ -125,11 +128,11 @@ // Ideally this method should never be used. func (ti *TelemetryInterceptor) overrideOperationTag(fullName, operation string) string { // prepend Operator prefix to Operator APIs - if strings.HasPrefix(fullName, operatorServicePrefix) { + if strings.HasPrefix(fullName, api.OperatorServicePrefix) { return "Operator" + operation } // prepend Admin prefix to Admin APIs - if strings.HasPrefix(fullName, adminServicePrefix) { + if strings.HasPrefix(fullName, api.AdminServicePrefix) { return "Admin" + operation } return operation @@ -203,11 +206,8 @@ metricsHandler metrics.Handler, result interface{}, ) { - if _, ok := grpcActions[methodName]; !ok || !strings.HasPrefix(fullName, frontendPackagePrefix) { - // grpcActions checks that methodName is the one that we care about. - // ti.scopes verifies that the scope is the one we intended to emit action metrics. - // This is necessary because TelemetryInterceptor is used for all services. Different service could have same - // method name. But we only want to emit action metrics from frontend. + if _, ok := grpcActions[methodName]; !ok || !strings.HasPrefix(fullName, api.WorkflowServicePrefix) { + // grpcActions checks that methodName is the one that we care about, and we only care about WorkflowService. return } @@ -219,19 +219,30 @@ return } + hasMarker := false for _, command := range completedRequest.Commands { if _, ok := commandActions[command.CommandType]; ok { switch command.CommandType { case enums.COMMAND_TYPE_RECORD_MARKER: // handle RecordMarker command, they are used for localActivity, sideEffect, versioning etc. - markerName := command.GetRecordMarkerCommandAttributes().GetMarkerName() - metricsHandler.Counter(metrics.ActionCounter.GetMetricName()).Record(1, metrics.ActionType("command_RecordMarker_"+markerName)) + hasMarker = true + case enums.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION: + // Each child workflow counts as 2 actions. We use separate tags to track them separately. + metricsHandler.Counter(metrics.ActionCounter.GetMetricName()).Record(1, metrics.ActionType("command_"+command.CommandType.String())) + metricsHandler.Counter(metrics.ActionCounter.GetMetricName()).Record(1, metrics.ActionType("command_"+command.CommandType.String()+"_Extra")) default: // handle all other command action metricsHandler.Counter(metrics.ActionCounter.GetMetricName()).Record(1, metrics.ActionType("command_"+command.CommandType.String())) } } } + if hasMarker { + // Emit separate action metric for batch of markers. + // One workflow task response may contain multiple marker commands. Each marker will emit one + // command_RecordMarker_Xxx action metric. Depending on pricing model, you may want to ignore all individual + // command_RecordMarker_Xxx and use command_BatchMarkers instead. + metricsHandler.Counter(metrics.ActionCounter.GetMetricName()).Record(1, metrics.ActionType("command_BatchMarkers")) + } case pollActivityTaskQueue: // handle activity retries diff -Nru temporal-1.21.5-1/src/common/rpc/interceptor/telemetry_test.go temporal-1.22.5/src/common/rpc/interceptor/telemetry_test.go --- temporal-1.21.5-1/src/common/rpc/interceptor/telemetry_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/rpc/interceptor/telemetry_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,11 +30,17 @@ "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/api" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" ) +const ( + startWorkflow = "StartWorkflowExecution" + queryWorkflow = "QueryWorkflow" +) + func TestEmitActionMetric(t *testing.T) { controller := gomock.NewController(t) register := namespace.NewMockRegistry(controller) @@ -47,18 +53,18 @@ expectEmitMetrics bool }{ { - metrics.FrontendQueryWorkflowScope, - frontendPackagePrefix + metrics.FrontendQueryWorkflowScope, + queryWorkflow, + api.WorkflowServicePrefix + queryWorkflow, true, }, { - metrics.FrontendQueryWorkflowScope, - metrics.FrontendQueryWorkflowScope, + queryWorkflow, + api.AdminServicePrefix + queryWorkflow, false, }, { metrics.MatchingClientAddWorkflowTaskScope, - frontendPackagePrefix + metrics.FrontendQueryWorkflowScope, + api.WorkflowServicePrefix + queryWorkflow, false, }, } @@ -88,18 +94,18 @@ }{ { "DeleteWorkflowExecution", - adminServicePrefix + "DeleteWorkflowExecution", + api.AdminServicePrefix + "DeleteWorkflowExecution", "AdminDeleteWorkflowExecution", }, { "DeleteNamespace", - operatorServicePrefix + "DeleteNamespace", + api.OperatorServicePrefix + "DeleteNamespace", "OperatorDeleteNamespace", }, { - metrics.FrontendStartWorkflowExecutionScope, - frontendPackagePrefix + metrics.FrontendStartWorkflowExecutionScope, - metrics.FrontendStartWorkflowExecutionScope, + startWorkflow, + api.WorkflowServicePrefix + startWorkflow, + startWorkflow, }, } diff -Nru temporal-1.21.5-1/src/common/serviceerror/currentBranchChanged.go temporal-1.22.5/src/common/serviceerror/currentBranchChanged.go --- temporal-1.21.5-1/src/common/serviceerror/currentBranchChanged.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/currentBranchChanged.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package serviceerror - -import ( - "github.com/gogo/status" - "google.golang.org/grpc/codes" - - "go.temporal.io/server/api/errordetails/v1" -) - -type ( - // CurrentBranchChanged represents current branch changed error. - CurrentBranchChanged struct { - Message string - CurrentBranchToken []byte - RequestBranchToken []byte - st *status.Status - } -) - -// NewCurrentBranchChanged returns new CurrentBranchChanged error. -func NewCurrentBranchChanged(currentBranchToken, requestBranchToken []byte) error { - return &CurrentBranchChanged{ - Message: "Current branch token and request branch token doesn't match.", - CurrentBranchToken: currentBranchToken, - RequestBranchToken: requestBranchToken, - } -} - -// Error returns string message. -func (e *CurrentBranchChanged) Error() string { - return e.Message -} - -func (e *CurrentBranchChanged) Status() *status.Status { - if e.st != nil { - return e.st - } - - st := status.New(codes.InvalidArgument, e.Message) - st, _ = st.WithDetails( - &errordetails.CurrentBranchChangedFailure{ - CurrentBranchToken: e.CurrentBranchToken, - RequestBranchToken: e.RequestBranchToken, - }, - ) - return st -} - -func newCurrentBranchChanged(st *status.Status, errDetails *errordetails.CurrentBranchChangedFailure) error { - return &CurrentBranchChanged{ - Message: st.Message(), - CurrentBranchToken: errDetails.GetCurrentBranchToken(), - RequestBranchToken: errDetails.GetRequestBranchToken(), - st: st, - } -} diff -Nru temporal-1.21.5-1/src/common/serviceerror/current_branch_changed.go temporal-1.22.5/src/common/serviceerror/current_branch_changed.go --- temporal-1.21.5-1/src/common/serviceerror/current_branch_changed.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/current_branch_changed.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,81 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package serviceerror + +import ( + "github.com/gogo/status" + "google.golang.org/grpc/codes" + + "go.temporal.io/server/api/errordetails/v1" +) + +type ( + // CurrentBranchChanged represents current branch changed error. + CurrentBranchChanged struct { + Message string + CurrentBranchToken []byte + RequestBranchToken []byte + st *status.Status + } +) + +// NewCurrentBranchChanged returns new CurrentBranchChanged error. +// TODO: Update CurrentBranchChanged with event id and event version. Do not use branch token bytes as branch identity. +func NewCurrentBranchChanged(currentBranchToken, requestBranchToken []byte) error { + return &CurrentBranchChanged{ + Message: "Current branch token and request branch token doesn't match.", + CurrentBranchToken: currentBranchToken, + RequestBranchToken: requestBranchToken, + } +} + +// Error returns string message. +func (e *CurrentBranchChanged) Error() string { + return e.Message +} + +func (e *CurrentBranchChanged) Status() *status.Status { + if e.st != nil { + return e.st + } + + st := status.New(codes.InvalidArgument, e.Message) + st, _ = st.WithDetails( + &errordetails.CurrentBranchChangedFailure{ + CurrentBranchToken: e.CurrentBranchToken, + RequestBranchToken: e.RequestBranchToken, + }, + ) + return st +} + +func newCurrentBranchChanged(st *status.Status, errDetails *errordetails.CurrentBranchChangedFailure) error { + return &CurrentBranchChanged{ + Message: st.Message(), + CurrentBranchToken: errDetails.GetCurrentBranchToken(), + RequestBranchToken: errDetails.GetRequestBranchToken(), + st: st, + } +} diff -Nru temporal-1.21.5-1/src/common/serviceerror/shardOwnershipLost.go temporal-1.22.5/src/common/serviceerror/shardOwnershipLost.go --- temporal-1.21.5-1/src/common/serviceerror/shardOwnershipLost.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/shardOwnershipLost.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package serviceerror - -import ( - "fmt" - - "github.com/gogo/status" - "google.golang.org/grpc/codes" - - "go.temporal.io/server/api/errordetails/v1" -) - -type ( - // ShardOwnershipLost represents shard ownership lost error. - ShardOwnershipLost struct { - Message string - OwnerHost string - CurrentHost string - st *status.Status - } -) - -// NewShardOwnershipLost returns new ShardOwnershipLost error. -func NewShardOwnershipLost(ownerHost string, currentHost string) error { - return &ShardOwnershipLost{ - Message: fmt.Sprintf("Shard is owned by:%v but not by %v", ownerHost, currentHost), - OwnerHost: ownerHost, - CurrentHost: currentHost, - } -} - -// Error returns string message. -func (e *ShardOwnershipLost) Error() string { - return e.Message -} - -func (e *ShardOwnershipLost) Status() *status.Status { - if e.st != nil { - return e.st - } - - st := status.New(codes.Aborted, e.Message) - st, _ = st.WithDetails( - &errordetails.ShardOwnershipLostFailure{ - OwnerHost: e.OwnerHost, - CurrentHost: e.CurrentHost, - }, - ) - return st -} - -func newShardOwnershipLost(st *status.Status, errDetails *errordetails.ShardOwnershipLostFailure) error { - return &ShardOwnershipLost{ - Message: st.Message(), - OwnerHost: errDetails.GetOwnerHost(), - CurrentHost: errDetails.GetCurrentHost(), - st: st, - } -} diff -Nru temporal-1.21.5-1/src/common/serviceerror/shard_ownership_lost.go temporal-1.22.5/src/common/serviceerror/shard_ownership_lost.go --- temporal-1.21.5-1/src/common/serviceerror/shard_ownership_lost.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/shard_ownership_lost.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,82 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package serviceerror + +import ( + "fmt" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + + "go.temporal.io/server/api/errordetails/v1" +) + +type ( + // ShardOwnershipLost represents shard ownership lost error. + ShardOwnershipLost struct { + Message string + OwnerHost string + CurrentHost string + st *status.Status + } +) + +// NewShardOwnershipLost returns new ShardOwnershipLost error. +func NewShardOwnershipLost(ownerHost string, currentHost string) error { + return &ShardOwnershipLost{ + Message: fmt.Sprintf("Shard is owned by:%v but not by %v", ownerHost, currentHost), + OwnerHost: ownerHost, + CurrentHost: currentHost, + } +} + +// Error returns string message. +func (e *ShardOwnershipLost) Error() string { + return e.Message +} + +func (e *ShardOwnershipLost) Status() *status.Status { + if e.st != nil { + return e.st + } + + st := status.New(codes.Aborted, e.Message) + st, _ = st.WithDetails( + &errordetails.ShardOwnershipLostFailure{ + OwnerHost: e.OwnerHost, + CurrentHost: e.CurrentHost, + }, + ) + return st +} + +func newShardOwnershipLost(st *status.Status, errDetails *errordetails.ShardOwnershipLostFailure) error { + return &ShardOwnershipLost{ + Message: st.Message(), + OwnerHost: errDetails.GetOwnerHost(), + CurrentHost: errDetails.GetCurrentHost(), + st: st, + } +} diff -Nru temporal-1.21.5-1/src/common/serviceerror/taskAlreadyStarted.go temporal-1.22.5/src/common/serviceerror/taskAlreadyStarted.go --- temporal-1.21.5-1/src/common/serviceerror/taskAlreadyStarted.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/taskAlreadyStarted.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package serviceerror - -import ( - "fmt" - - "github.com/gogo/status" - "google.golang.org/grpc/codes" - - "go.temporal.io/server/api/errordetails/v1" -) - -type ( - // TaskAlreadyStarted represents task already started error. - TaskAlreadyStarted struct { - Message string - st *status.Status - } -) - -// NewTaskAlreadyStarted returns new TaskAlreadyStarted error. -func NewTaskAlreadyStarted(taskType string) error { - return &TaskAlreadyStarted{ - Message: fmt.Sprintf("%s task already started.", taskType), - } -} - -// Error returns string message. -func (e *TaskAlreadyStarted) Error() string { - return e.Message -} - -func (e *TaskAlreadyStarted) Status() *status.Status { - if e.st != nil { - return e.st - } - - st := status.New(codes.AlreadyExists, e.Message) - st, _ = st.WithDetails( - &errordetails.TaskAlreadyStartedFailure{}, - ) - return st -} - -func newTaskAlreadyStarted(st *status.Status) error { - return &TaskAlreadyStarted{ - Message: st.Message(), - st: st, - } -} diff -Nru temporal-1.21.5-1/src/common/serviceerror/task_already_started.go temporal-1.22.5/src/common/serviceerror/task_already_started.go --- temporal-1.21.5-1/src/common/serviceerror/task_already_started.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/serviceerror/task_already_started.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,73 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package serviceerror + +import ( + "fmt" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + + "go.temporal.io/server/api/errordetails/v1" +) + +type ( + // TaskAlreadyStarted represents task already started error. + TaskAlreadyStarted struct { + Message string + st *status.Status + } +) + +// NewTaskAlreadyStarted returns new TaskAlreadyStarted error. +func NewTaskAlreadyStarted(taskType string) error { + return &TaskAlreadyStarted{ + Message: fmt.Sprintf("%s task already started.", taskType), + } +} + +// Error returns string message. +func (e *TaskAlreadyStarted) Error() string { + return e.Message +} + +func (e *TaskAlreadyStarted) Status() *status.Status { + if e.st != nil { + return e.st + } + + st := status.New(codes.AlreadyExists, e.Message) + st, _ = st.WithDetails( + &errordetails.TaskAlreadyStartedFailure{}, + ) + return st +} + +func newTaskAlreadyStarted(st *status.Status) error { + return &TaskAlreadyStarted{ + Message: st.Message(), + st: st, + } +} diff -Nru temporal-1.21.5-1/src/common/taskTokenSerializerInterfaces.go temporal-1.22.5/src/common/taskTokenSerializerInterfaces.go --- temporal-1.21.5-1/src/common/taskTokenSerializerInterfaces.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/taskTokenSerializerInterfaces.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package common - -import ( - tokenspb "go.temporal.io/server/api/token/v1" -) - -type ( - // TaskTokenSerializer serializes task tokens - TaskTokenSerializer interface { - Serialize(token *tokenspb.Task) ([]byte, error) - Deserialize(data []byte) (*tokenspb.Task, error) - SerializeQueryTaskToken(token *tokenspb.QueryTask) ([]byte, error) - DeserializeQueryTaskToken(data []byte) (*tokenspb.QueryTask, error) - } -) diff -Nru temporal-1.21.5-1/src/common/task_token_serializer_interfaces.go temporal-1.22.5/src/common/task_token_serializer_interfaces.go --- temporal-1.21.5-1/src/common/task_token_serializer_interfaces.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/task_token_serializer_interfaces.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,39 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import ( + tokenspb "go.temporal.io/server/api/token/v1" +) + +type ( + // TaskTokenSerializer serializes task tokens + TaskTokenSerializer interface { + Serialize(token *tokenspb.Task) ([]byte, error) + Deserialize(data []byte) (*tokenspb.Task, error) + SerializeQueryTaskToken(token *tokenspb.QueryTask) ([]byte, error) + DeserializeQueryTaskToken(data []byte) (*tokenspb.QueryTask, error) + } +) diff -Nru temporal-1.21.5-1/src/common/tasks/scheduler.go temporal-1.22.5/src/common/tasks/scheduler.go --- temporal-1.21.5-1/src/common/tasks/scheduler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/tasks/scheduler.go 2024-02-23 09:45:43.000000000 +0000 @@ -24,16 +24,12 @@ package tasks -import ( - "go.temporal.io/server/common" -) - type ( // Scheduler is the generic interface for scheduling & processing tasks Scheduler[T Task] interface { - common.Daemon - Submit(task T) TrySubmit(task T) bool + Start() + Stop() } ) diff -Nru temporal-1.21.5-1/src/common/tasks/sequential_scheduler.go temporal-1.22.5/src/common/tasks/sequential_scheduler.go --- temporal-1.21.5-1/src/common/tasks/sequential_scheduler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/tasks/sequential_scheduler.go 2024-02-23 09:45:43.000000000 +0000 @@ -190,11 +190,13 @@ defer s.shutdownWG.Done() for { + timer := time.NewTimer(backoff.Jitter(defaultMonitorTickerDuration, defaultMonitorTickerJitter)) select { case <-s.shutdownChan: + timer.Stop() s.stopWorkers(len(s.workerShutdownCh)) return - case <-time.After(backoff.Jitter(defaultMonitorTickerDuration, defaultMonitorTickerJitter)): + case <-timer.C: targetWorkerNum := s.options.WorkerCount() currentWorkerNum := len(s.workerShutdownCh) diff -Nru temporal-1.21.5-1/src/common/tasktoken/token.go temporal-1.22.5/src/common/tasktoken/token.go --- temporal-1.21.5-1/src/common/tasktoken/token.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/tasktoken/token.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,80 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tasktoken + +import ( + "time" + + v11 "go.temporal.io/server/api/clock/v1" + tokenspb "go.temporal.io/server/api/token/v1" +) + +func NewWorkflowTaskToken( + namespaceID string, + workflowID string, + runID string, + scheduledEventID int64, + startedEventId int64, + startedTime *time.Time, + attempt int32, + clock *v11.VectorClock, + version int64, +) *tokenspb.Task { + return &tokenspb.Task{ + NamespaceId: namespaceID, + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + StartedEventId: startedEventId, + StartedTime: startedTime, + Attempt: attempt, + Clock: clock, + Version: version, + } +} + +func NewActivityTaskToken( + namespaceID string, + workflowID string, + runID string, + scheduledEventID int64, + activityId string, + activityType string, + attempt int32, + clock *v11.VectorClock, + version int64, +) *tokenspb.Task { + return &tokenspb.Task{ + NamespaceId: namespaceID, + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + ActivityType: activityType, + Attempt: attempt, + ActivityId: activityId, + Clock: clock, + Version: version, + } +} diff -Nru temporal-1.21.5-1/src/common/testing/mocksdk/client_mock.go temporal-1.22.5/src/common/testing/mocksdk/client_mock.go --- temporal-1.21.5-1/src/common/testing/mocksdk/client_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/testing/mocksdk/client_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -227,6 +227,21 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerBuildIdCompatibility", reflect.TypeOf((*MockClient)(nil).GetWorkerBuildIdCompatibility), arg0, arg1) } +// GetWorkerTaskReachability mocks base method. +func (m *MockClient) GetWorkerTaskReachability(arg0 context.Context, arg1 *client.GetWorkerTaskReachabilityOptions) (*client.WorkerTaskReachability, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkerTaskReachability", arg0, arg1) + ret0, _ := ret[0].(*client.WorkerTaskReachability) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkerTaskReachability indicates an expected call of GetWorkerTaskReachability. +func (mr *MockClientMockRecorder) GetWorkerTaskReachability(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerTaskReachability", reflect.TypeOf((*MockClient)(nil).GetWorkerTaskReachability), arg0, arg1) +} + // GetWorkflow mocks base method. func (m *MockClient) GetWorkflow(arg0 context.Context, arg1, arg2 string) client.WorkflowRun { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/common/util.go temporal-1.22.5/src/common/util.go --- temporal-1.21.5-1/src/common/util.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/util.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,7 +35,6 @@ "github.com/dgryski/go-farm" "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" @@ -43,7 +42,6 @@ "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" - taskqueuespb "go.temporal.io/server/api/taskqueue/v1" workflowspb "go.temporal.io/server/api/workflow/v1" "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/dynamicconfig" @@ -71,7 +69,7 @@ frontendHandlerRetryInitialInterval = 200 * time.Millisecond frontendHandlerRetryMaxInterval = time.Second - frontendHandlerRetryMaxAttempts = 5 + frontendHandlerRetryMaxAttempts = 2 historyHandlerRetryInitialInterval = 50 * time.Millisecond historyHandlerRetryMaxAttempts = 2 @@ -166,18 +164,22 @@ close(doneC) }() + timer := time.NewTimer(timeout) + defer timer.Stop() select { case <-doneC: return true - case <-time.After(timeout): + case <-timer.C: return false } } // InterruptibleSleep is like time.Sleep but can be interrupted by a context. func InterruptibleSleep(ctx context.Context, timeout time.Duration) { + timer := time.NewTimer(timeout) + defer timer.Stop() select { - case <-time.After(timeout): + case <-timer.C: case <-ctx.Done(): } } @@ -793,44 +795,6 @@ return util.Min(taskStartToCloseTimeout, workflowRunTimeout) } -// StampIfUsingVersioning returns the given WorkerVersionStamp if it is using versioning, -// otherwise returns nil. -func StampIfUsingVersioning(stamp *commonpb.WorkerVersionStamp) *commonpb.WorkerVersionStamp { - if stamp.GetUseVersioning() { - return stamp - } - return nil -} - -func MakeVersionDirectiveForWorkflowTask( - stamp *commonpb.WorkerVersionStamp, - lastWorkflowTaskStartedEventID int64, -) *taskqueuespb.TaskVersionDirective { - var directive taskqueuespb.TaskVersionDirective - if id := StampIfUsingVersioning(stamp).GetBuildId(); id != "" { - directive.Value = &taskqueuespb.TaskVersionDirective_BuildId{BuildId: id} - } else if lastWorkflowTaskStartedEventID == EmptyEventID { - // first workflow task - directive.Value = &taskqueuespb.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}} - } - // else: unversioned queue - return &directive -} - -func MakeVersionDirectiveForActivityTask( - stamp *commonpb.WorkerVersionStamp, - useCompatibleVersion bool, -) *taskqueuespb.TaskVersionDirective { - var directive taskqueuespb.TaskVersionDirective - if !useCompatibleVersion { - directive.Value = &taskqueuespb.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}} - } else if id := StampIfUsingVersioning(stamp).GetBuildId(); id != "" { - directive.Value = &taskqueuespb.TaskVersionDirective_BuildId{BuildId: id} - } - // else: unversioned queue - return &directive -} - // CloneProto is a generic typed version of proto.Clone from gogoproto. func CloneProto[T proto.Message](v T) T { return proto.Clone(v).(T) diff -Nru temporal-1.21.5-1/src/common/util_test.go temporal-1.22.5/src/common/util_test.go --- temporal-1.21.5-1/src/common/util_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/util_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -270,9 +270,8 @@ } func TestIsContextDeadlineExceededErr(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Millisecond)) defer cancel() - time.Sleep(10 * time.Millisecond) require.True(t, IsContextDeadlineExceededErr(ctx.Err())) require.True(t, IsContextDeadlineExceededErr(serviceerror.NewDeadlineExceeded("something"))) diff -Nru temporal-1.21.5-1/src/common/worker_versioning/worker_versioning.go temporal-1.22.5/src/common/worker_versioning/worker_versioning.go --- temporal-1.21.5-1/src/common/worker_versioning/worker_versioning.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/worker_versioning/worker_versioning.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,10 +28,13 @@ "context" "fmt" + "github.com/gogo/protobuf/types" "github.com/xwb1989/sqlparser" commonpb "go.temporal.io/api/common/v1" persistencespb "go.temporal.io/server/api/persistence/v1" + taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/common" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/searchattribute" @@ -70,18 +73,14 @@ // Returns -1, -1 if not found. func FindBuildId(versioningData *persistencespb.VersioningData, buildId string) (setIndex, indexInSet int) { versionSets := versioningData.GetVersionSets() - setIndex = -1 - indexInSet = -1 for sidx, set := range versionSets { for bidx, id := range set.GetBuildIds() { if buildId == id.Id { - setIndex = sidx - indexInSet = bidx - break + return sidx, bidx } } } - return setIndex, indexInSet + return -1, -1 } func WorkflowsExistForBuildId(ctx context.Context, visibilityManager manager.VisibilityManager, ns *namespace.Namespace, taskQueue, buildId string) (bool, error) { @@ -99,3 +98,41 @@ } return response.Count > 0, nil } + +// StampIfUsingVersioning returns the given WorkerVersionStamp if it is using versioning, +// otherwise returns nil. +func StampIfUsingVersioning(stamp *commonpb.WorkerVersionStamp) *commonpb.WorkerVersionStamp { + if stamp.GetUseVersioning() { + return stamp + } + return nil +} + +func MakeDirectiveForWorkflowTask( + stamp *commonpb.WorkerVersionStamp, + lastWorkflowTaskStartedEventID int64, +) *taskqueuespb.TaskVersionDirective { + var directive taskqueuespb.TaskVersionDirective + if id := StampIfUsingVersioning(stamp).GetBuildId(); id != "" { + directive.Value = &taskqueuespb.TaskVersionDirective_BuildId{BuildId: id} + } else if lastWorkflowTaskStartedEventID == common.EmptyEventID { + // first workflow task + directive.Value = &taskqueuespb.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}} + } + // else: unversioned queue + return &directive +} + +func MakeDirectiveForActivityTask( + stamp *commonpb.WorkerVersionStamp, + useCompatibleVersion bool, +) *taskqueuespb.TaskVersionDirective { + var directive taskqueuespb.TaskVersionDirective + if !useCompatibleVersion { + directive.Value = &taskqueuespb.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}} + } else if id := StampIfUsingVersioning(stamp).GetBuildId(); id != "" { + directive.Value = &taskqueuespb.TaskVersionDirective_BuildId{BuildId: id} + } + // else: unversioned queue + return &directive +} diff -Nru temporal-1.21.5-1/src/common/xdc/nDCHistoryResender.go temporal-1.22.5/src/common/xdc/nDCHistoryResender.go --- temporal-1.21.5-1/src/common/xdc/nDCHistoryResender.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/nDCHistoryResender.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,297 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination nDCHistoryResender_mock.go - -package xdc - -import ( - "context" - "time" - - commonpb "go.temporal.io/api/common/v1" - - "go.temporal.io/server/api/adminservice/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common/collection" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/rpc" -) - -const ( - resendContextTimeout = 30 * time.Second -) - -type ( - // nDCHistoryReplicationFn provides the functionality to deliver replication raw history request to history - // the provided func should be thread safe - nDCHistoryReplicationFn func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error - - // NDCHistoryResender is the interface for resending history events to remote - NDCHistoryResender interface { - // SendSingleWorkflowHistory sends multiple run IDs's history events to remote - SendSingleWorkflowHistory( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - startEventID int64, - startEventVersion int64, - endEventID int64, - endEventVersion int64, - ) error - } - - // NDCHistoryResenderImpl is the implementation of NDCHistoryResender - NDCHistoryResenderImpl struct { - namespaceRegistry namespace.Registry - clientBean client.Bean - historyReplicationFn nDCHistoryReplicationFn - serializer serialization.Serializer - rereplicationTimeout dynamicconfig.DurationPropertyFnWithNamespaceIDFilter - logger log.Logger - } - - historyBatch struct { - versionHistory *historyspb.VersionHistory - rawEventBatch *commonpb.DataBlob - } -) - -const ( - defaultPageSize = int32(100) -) - -// NewNDCHistoryResender create a new NDCHistoryResenderImpl -func NewNDCHistoryResender( - namespaceRegistry namespace.Registry, - clientBean client.Bean, - historyReplicationFn nDCHistoryReplicationFn, - serializer serialization.Serializer, - rereplicationTimeout dynamicconfig.DurationPropertyFnWithNamespaceIDFilter, - logger log.Logger, -) *NDCHistoryResenderImpl { - - return &NDCHistoryResenderImpl{ - namespaceRegistry: namespaceRegistry, - clientBean: clientBean, - historyReplicationFn: historyReplicationFn, - serializer: serializer, - rereplicationTimeout: rereplicationTimeout, - logger: logger, - } -} - -// SendSingleWorkflowHistory sends one run IDs's history events to remote -func (n *NDCHistoryResenderImpl) SendSingleWorkflowHistory( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - startEventID int64, - startEventVersion int64, - endEventID int64, - endEventVersion int64, -) error { - - resendCtx := context.Background() - var cancel context.CancelFunc - if n.rereplicationTimeout != nil { - resendContextTimeout := n.rereplicationTimeout(namespaceID.String()) - if resendContextTimeout > 0 { - resendCtx, cancel = context.WithTimeout(resendCtx, resendContextTimeout) - defer cancel() - } - } - resendCtx = rpc.CopyContextValues(resendCtx, ctx) - - historyIterator := collection.NewPagingIterator(n.getPaginationFn( - resendCtx, - remoteClusterName, - namespaceID, - workflowID, - runID, - startEventID, - startEventVersion, - endEventID, - endEventVersion, - )) - - for historyIterator.HasNext() { - batch, err := historyIterator.Next() - if err != nil { - n.logger.Error("failed to get history events", - tag.WorkflowNamespaceID(namespaceID.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.Error(err)) - return err - } - - replicationRequest := n.createReplicationRawRequest( - namespaceID, - workflowID, - runID, - batch.rawEventBatch, - batch.versionHistory.GetItems()) - - err = n.sendReplicationRawRequest(resendCtx, replicationRequest) - if err != nil { - n.logger.Error("failed to replicate events", - tag.WorkflowNamespaceID(namespaceID.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.Error(err)) - return err - } - } - return nil -} - -func (n *NDCHistoryResenderImpl) getPaginationFn( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - startEventID int64, - startEventVersion int64, - endEventID int64, - endEventVersion int64, -) collection.PaginationFn[historyBatch] { - - return func(paginationToken []byte) ([]historyBatch, []byte, error) { - - response, err := n.getHistory( - ctx, - remoteClusterName, - namespaceID, - workflowID, - runID, - startEventID, - startEventVersion, - endEventID, - endEventVersion, - paginationToken, - defaultPageSize, - ) - if err != nil { - return nil, nil, err - } - - batches := make([]historyBatch, 0, len(response.GetHistoryBatches())) - versionHistory := response.GetVersionHistory() - for _, history := range response.GetHistoryBatches() { - batch := historyBatch{ - versionHistory: versionHistory, - rawEventBatch: history, - } - batches = append(batches, batch) - } - return batches, response.NextPageToken, nil - } -} - -func (n *NDCHistoryResenderImpl) createReplicationRawRequest( - namespaceID namespace.ID, - workflowID string, - runID string, - historyBlob *commonpb.DataBlob, - versionHistoryItems []*historyspb.VersionHistoryItem, -) *historyservice.ReplicateEventsV2Request { - - request := &historyservice.ReplicateEventsV2Request{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - Events: historyBlob, - VersionHistoryItems: versionHistoryItems, - } - return request -} - -func (n *NDCHistoryResenderImpl) sendReplicationRawRequest( - ctx context.Context, - request *historyservice.ReplicateEventsV2Request, -) error { - - ctx, cancel := context.WithTimeout(ctx, resendContextTimeout) - defer cancel() - return n.historyReplicationFn(ctx, request) -} - -func (n *NDCHistoryResenderImpl) getHistory( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - startEventID int64, - startEventVersion int64, - endEventID int64, - endEventVersion int64, - token []byte, - pageSize int32, -) (*adminservice.GetWorkflowExecutionRawHistoryV2Response, error) { - - logger := log.With(n.logger, tag.WorkflowRunID(runID)) - - ctx, cancel := rpc.NewContextFromParentWithTimeoutAndVersionHeaders(ctx, resendContextTimeout) - defer cancel() - - adminClient, err := n.clientBean.GetRemoteAdminClient(remoteClusterName) - if err != nil { - return nil, err - } - - response, err := adminClient.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - StartEventId: startEventID, - StartEventVersion: startEventVersion, - EndEventId: endEventID, - EndEventVersion: endEventVersion, - MaximumPageSize: pageSize, - NextPageToken: token, - }) - if err != nil { - logger.Error("error getting history", tag.Error(err)) - return nil, err - } - - return response, nil -} diff -Nru temporal-1.21.5-1/src/common/xdc/nDCHistoryResender_mock.go temporal-1.22.5/src/common/xdc/nDCHistoryResender_mock.go --- temporal-1.21.5-1/src/common/xdc/nDCHistoryResender_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/nDCHistoryResender_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: nDCHistoryResender.go - -// Package xdc is a generated GoMock package. -package xdc - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - namespace "go.temporal.io/server/common/namespace" -) - -// MockNDCHistoryResender is a mock of NDCHistoryResender interface. -type MockNDCHistoryResender struct { - ctrl *gomock.Controller - recorder *MockNDCHistoryResenderMockRecorder -} - -// MockNDCHistoryResenderMockRecorder is the mock recorder for MockNDCHistoryResender. -type MockNDCHistoryResenderMockRecorder struct { - mock *MockNDCHistoryResender -} - -// NewMockNDCHistoryResender creates a new mock instance. -func NewMockNDCHistoryResender(ctrl *gomock.Controller) *MockNDCHistoryResender { - mock := &MockNDCHistoryResender{ctrl: ctrl} - mock.recorder = &MockNDCHistoryResenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNDCHistoryResender) EXPECT() *MockNDCHistoryResenderMockRecorder { - return m.recorder -} - -// SendSingleWorkflowHistory mocks base method. -func (m *MockNDCHistoryResender) SendSingleWorkflowHistory(ctx context.Context, remoteClusterName string, namespaceID namespace.ID, workflowID, runID string, startEventID, startEventVersion, endEventID, endEventVersion int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendSingleWorkflowHistory", ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendSingleWorkflowHistory indicates an expected call of SendSingleWorkflowHistory. -func (mr *MockNDCHistoryResenderMockRecorder) SendSingleWorkflowHistory(ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendSingleWorkflowHistory", reflect.TypeOf((*MockNDCHistoryResender)(nil).SendSingleWorkflowHistory), ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion) -} diff -Nru temporal-1.21.5-1/src/common/xdc/nDCHistoryResender_test.go temporal-1.22.5/src/common/xdc/nDCHistoryResender_test.go --- temporal-1.21.5-1/src/common/xdc/nDCHistoryResender_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/nDCHistoryResender_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,385 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package xdc - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - - "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/adminservicemock/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/primitives/timestamp" - serviceerrors "go.temporal.io/server/common/serviceerror" -) - -type ( - nDCHistoryResenderSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockClusterMetadata *cluster.MockMetadata - mockNamespaceCache *namespace.MockRegistry - mockClientBean *client.MockBean - mockAdminClient *adminservicemock.MockAdminServiceClient - mockHistoryClient *historyservicemock.MockHistoryServiceClient - - namespaceID namespace.ID - namespace namespace.Name - - serializer serialization.Serializer - logger log.Logger - - rereplicator *NDCHistoryResenderImpl - } -) - -func TestNDCHistoryResenderSuite(t *testing.T) { - s := new(nDCHistoryResenderSuite) - suite.Run(t, s) -} - -func (s *nDCHistoryResenderSuite) SetupSuite() { -} - -func (s *nDCHistoryResenderSuite) TearDownSuite() { - -} - -func (s *nDCHistoryResenderSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) - s.mockClientBean = client.NewMockBean(s.controller) - s.mockAdminClient = adminservicemock.NewMockAdminServiceClient(s.controller) - s.mockHistoryClient = historyservicemock.NewMockHistoryServiceClient(s.controller) - s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) - - s.mockClientBean.EXPECT().GetRemoteAdminClient(gomock.Any()).Return(s.mockAdminClient, nil).AnyTimes() - - s.logger = log.NewTestLogger() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - - s.namespaceID = namespace.ID(uuid.New()) - s.namespace = "some random namespace name" - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() - s.serializer = serialization.NewSerializer() - - s.rereplicator = NewNDCHistoryResender( - s.mockNamespaceCache, - s.mockClientBean, - func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { - _, err := s.mockHistoryClient.ReplicateEventsV2(ctx, request) - return err - }, - serialization.NewSerializer(), - nil, - s.logger, - ) -} - -func (s *nDCHistoryResenderSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *nDCHistoryResenderSuite) TestSendSingleWorkflowHistory() { - workflowID := "some random workflow ID" - runID := uuid.New() - startEventID := int64(123) - startEventVersion := int64(100) - token := []byte{1} - pageSize := defaultPageSize - eventBatch := []*historypb.HistoryEvent{ - { - EventId: 2, - Version: 123, - EventTime: timestamp.TimePtr(time.Now().UTC()), - EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, - }, - { - EventId: 3, - Version: 123, - EventTime: timestamp.TimePtr(time.Now().UTC()), - EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, - }, - } - blob := s.serializeEvents(eventBatch) - versionHistoryItems := []*historyspb.VersionHistoryItem{ - { - EventId: 1, - Version: 1, - }, - } - - s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2( - gomock.Any(), - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - StartEventId: startEventID, - StartEventVersion: startEventVersion, - EndEventId: common.EmptyEventID, - EndEventVersion: common.EmptyVersion, - MaximumPageSize: pageSize, - NextPageToken: nil, - }).Return(&adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: []*commonpb.DataBlob{blob}, - NextPageToken: token, - VersionHistory: &historyspb.VersionHistory{ - Items: versionHistoryItems, - }, - }, nil) - - s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2( - gomock.Any(), - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - StartEventId: startEventID, - StartEventVersion: startEventVersion, - EndEventId: common.EmptyEventID, - EndEventVersion: common.EmptyVersion, - MaximumPageSize: pageSize, - NextPageToken: token, - }).Return(&adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: []*commonpb.DataBlob{blob}, - NextPageToken: nil, - VersionHistory: &historyspb.VersionHistory{ - Items: versionHistoryItems, - }, - }, nil) - - s.mockHistoryClient.EXPECT().ReplicateEventsV2( - gomock.Any(), - &historyservice.ReplicateEventsV2Request{ - NamespaceId: s.namespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - VersionHistoryItems: versionHistoryItems, - Events: blob, - }).Return(nil, nil).Times(2) - - err := s.rereplicator.SendSingleWorkflowHistory( - context.Background(), - cluster.TestCurrentClusterName, - s.namespaceID, - workflowID, - runID, - startEventID, - startEventVersion, - common.EmptyEventID, - common.EmptyVersion, - ) - - s.Nil(err) -} - -func (s *nDCHistoryResenderSuite) TestCreateReplicateRawEventsRequest() { - workflowID := "some random workflow ID" - runID := uuid.New() - blob := &commonpb.DataBlob{ - EncodingType: enumspb.ENCODING_TYPE_PROTO3, - Data: []byte("some random history blob"), - } - versionHistoryItems := []*historyspb.VersionHistoryItem{ - { - EventId: 1, - Version: 1, - }, - } - - s.Equal(&historyservice.ReplicateEventsV2Request{ - NamespaceId: s.namespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - VersionHistoryItems: versionHistoryItems, - Events: blob, - }, s.rereplicator.createReplicationRawRequest( - s.namespaceID, - workflowID, - runID, - blob, - versionHistoryItems)) -} - -func (s *nDCHistoryResenderSuite) TestSendReplicationRawRequest() { - workflowID := "some random workflow ID" - runID := uuid.New() - item := &historyspb.VersionHistoryItem{ - EventId: 1, - Version: 1, - } - request := &historyservice.ReplicateEventsV2Request{ - NamespaceId: s.namespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - Events: &commonpb.DataBlob{ - EncodingType: enumspb.ENCODING_TYPE_PROTO3, - Data: []byte("some random history blob"), - }, - VersionHistoryItems: []*historyspb.VersionHistoryItem{item}, - } - - s.mockHistoryClient.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil, nil) - err := s.rereplicator.sendReplicationRawRequest(context.Background(), request) - s.Nil(err) -} - -func (s *nDCHistoryResenderSuite) TestSendReplicationRawRequest_Err() { - workflowID := "some random workflow ID" - runID := uuid.New() - item := &historyspb.VersionHistoryItem{ - EventId: 1, - Version: 1, - } - request := &historyservice.ReplicateEventsV2Request{ - NamespaceId: s.namespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - Events: &commonpb.DataBlob{ - EncodingType: enumspb.ENCODING_TYPE_PROTO3, - Data: []byte("some random history blob"), - }, - VersionHistoryItems: []*historyspb.VersionHistoryItem{item}, - } - retryErr := serviceerrors.NewRetryReplication( - "", - s.namespaceID.String(), - workflowID, - runID, - common.EmptyEventID, - common.EmptyVersion, - common.EmptyEventID, - common.EmptyVersion, - ) - - s.mockHistoryClient.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil, retryErr) - err := s.rereplicator.sendReplicationRawRequest(context.Background(), request) - s.Equal(retryErr, err) -} - -func (s *nDCHistoryResenderSuite) TestGetHistory() { - workflowID := "some random workflow ID" - runID := uuid.New() - startEventID := int64(123) - endEventID := int64(345) - version := int64(20) - nextTokenIn := []byte("some random next token in") - nextTokenOut := []byte("some random next token out") - pageSize := int32(59) - blob := []byte("some random events blob") - - response := &adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: []*commonpb.DataBlob{{ - EncodingType: enumspb.ENCODING_TYPE_PROTO3, - Data: blob, - }}, - NextPageToken: nextTokenOut, - } - s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - StartEventId: startEventID, - StartEventVersion: version, - EndEventId: endEventID, - EndEventVersion: version, - MaximumPageSize: pageSize, - NextPageToken: nextTokenIn, - }).Return(response, nil) - - out, err := s.rereplicator.getHistory( - context.Background(), - cluster.TestCurrentClusterName, - s.namespaceID, - workflowID, - runID, - startEventID, - version, - endEventID, - version, - nextTokenIn, - pageSize) - s.Nil(err) - s.Equal(response, out) -} - -func (s *nDCHistoryResenderSuite) serializeEvents(events []*historypb.HistoryEvent) *commonpb.DataBlob { - blob, err := s.serializer.SerializeEvents(events, enumspb.ENCODING_TYPE_PROTO3) - s.Nil(err) - return &commonpb.DataBlob{ - EncodingType: enumspb.ENCODING_TYPE_PROTO3, - Data: blob.Data, - } -} diff -Nru temporal-1.21.5-1/src/common/xdc/ndc_history_resender.go temporal-1.22.5/src/common/xdc/ndc_history_resender.go --- temporal-1.21.5-1/src/common/xdc/ndc_history_resender.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/ndc_history_resender.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,297 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination ndc_history_resender_mock.go + +package xdc + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + + "go.temporal.io/server/api/adminservice/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/rpc" +) + +const ( + resendContextTimeout = 30 * time.Second +) + +type ( + // nDCHistoryReplicationFn provides the functionality to deliver replication raw history request to history + // the provided func should be thread safe + nDCHistoryReplicationFn func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error + + // NDCHistoryResender is the interface for resending history events to remote + NDCHistoryResender interface { + // SendSingleWorkflowHistory sends multiple run IDs's history events to remote + SendSingleWorkflowHistory( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + startEventID int64, + startEventVersion int64, + endEventID int64, + endEventVersion int64, + ) error + } + + // NDCHistoryResenderImpl is the implementation of NDCHistoryResender + NDCHistoryResenderImpl struct { + namespaceRegistry namespace.Registry + clientBean client.Bean + historyReplicationFn nDCHistoryReplicationFn + serializer serialization.Serializer + rereplicationTimeout dynamicconfig.DurationPropertyFnWithNamespaceIDFilter + logger log.Logger + } + + historyBatch struct { + versionHistory *historyspb.VersionHistory + rawEventBatch *commonpb.DataBlob + } +) + +const ( + defaultPageSize = int32(100) +) + +// NewNDCHistoryResender create a new NDCHistoryResenderImpl +func NewNDCHistoryResender( + namespaceRegistry namespace.Registry, + clientBean client.Bean, + historyReplicationFn nDCHistoryReplicationFn, + serializer serialization.Serializer, + rereplicationTimeout dynamicconfig.DurationPropertyFnWithNamespaceIDFilter, + logger log.Logger, +) *NDCHistoryResenderImpl { + + return &NDCHistoryResenderImpl{ + namespaceRegistry: namespaceRegistry, + clientBean: clientBean, + historyReplicationFn: historyReplicationFn, + serializer: serializer, + rereplicationTimeout: rereplicationTimeout, + logger: logger, + } +} + +// SendSingleWorkflowHistory sends one run IDs's history events to remote +func (n *NDCHistoryResenderImpl) SendSingleWorkflowHistory( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + startEventID int64, + startEventVersion int64, + endEventID int64, + endEventVersion int64, +) error { + + resendCtx := context.Background() + var cancel context.CancelFunc + if n.rereplicationTimeout != nil { + resendContextTimeout := n.rereplicationTimeout(namespaceID.String()) + if resendContextTimeout > 0 { + resendCtx, cancel = context.WithTimeout(resendCtx, resendContextTimeout) + defer cancel() + } + } + resendCtx = rpc.CopyContextValues(resendCtx, ctx) + + historyIterator := collection.NewPagingIterator(n.getPaginationFn( + resendCtx, + remoteClusterName, + namespaceID, + workflowID, + runID, + startEventID, + startEventVersion, + endEventID, + endEventVersion, + )) + + for historyIterator.HasNext() { + batch, err := historyIterator.Next() + if err != nil { + n.logger.Error("failed to get history events", + tag.WorkflowNamespaceID(namespaceID.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.Error(err)) + return err + } + + replicationRequest := n.createReplicationRawRequest( + namespaceID, + workflowID, + runID, + batch.rawEventBatch, + batch.versionHistory.GetItems()) + + err = n.sendReplicationRawRequest(resendCtx, replicationRequest) + if err != nil { + n.logger.Error("failed to replicate events", + tag.WorkflowNamespaceID(namespaceID.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.Error(err)) + return err + } + } + return nil +} + +func (n *NDCHistoryResenderImpl) getPaginationFn( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + startEventID int64, + startEventVersion int64, + endEventID int64, + endEventVersion int64, +) collection.PaginationFn[historyBatch] { + + return func(paginationToken []byte) ([]historyBatch, []byte, error) { + + response, err := n.getHistory( + ctx, + remoteClusterName, + namespaceID, + workflowID, + runID, + startEventID, + startEventVersion, + endEventID, + endEventVersion, + paginationToken, + defaultPageSize, + ) + if err != nil { + return nil, nil, err + } + + batches := make([]historyBatch, 0, len(response.GetHistoryBatches())) + versionHistory := response.GetVersionHistory() + for _, history := range response.GetHistoryBatches() { + batch := historyBatch{ + versionHistory: versionHistory, + rawEventBatch: history, + } + batches = append(batches, batch) + } + return batches, response.NextPageToken, nil + } +} + +func (n *NDCHistoryResenderImpl) createReplicationRawRequest( + namespaceID namespace.ID, + workflowID string, + runID string, + historyBlob *commonpb.DataBlob, + versionHistoryItems []*historyspb.VersionHistoryItem, +) *historyservice.ReplicateEventsV2Request { + + request := &historyservice.ReplicateEventsV2Request{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + Events: historyBlob, + VersionHistoryItems: versionHistoryItems, + } + return request +} + +func (n *NDCHistoryResenderImpl) sendReplicationRawRequest( + ctx context.Context, + request *historyservice.ReplicateEventsV2Request, +) error { + + ctx, cancel := context.WithTimeout(ctx, resendContextTimeout) + defer cancel() + return n.historyReplicationFn(ctx, request) +} + +func (n *NDCHistoryResenderImpl) getHistory( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + startEventID int64, + startEventVersion int64, + endEventID int64, + endEventVersion int64, + token []byte, + pageSize int32, +) (*adminservice.GetWorkflowExecutionRawHistoryV2Response, error) { + + logger := log.With(n.logger, tag.WorkflowRunID(runID)) + + ctx, cancel := rpc.NewContextFromParentWithTimeoutAndVersionHeaders(ctx, resendContextTimeout) + defer cancel() + + adminClient, err := n.clientBean.GetRemoteAdminClient(remoteClusterName) + if err != nil { + return nil, err + } + + response, err := adminClient.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + StartEventId: startEventID, + StartEventVersion: startEventVersion, + EndEventId: endEventID, + EndEventVersion: endEventVersion, + MaximumPageSize: pageSize, + NextPageToken: token, + }) + if err != nil { + logger.Error("error getting history", tag.Error(err)) + return nil, err + } + + return response, nil +} diff -Nru temporal-1.21.5-1/src/common/xdc/ndc_history_resender_mock.go temporal-1.22.5/src/common/xdc/ndc_history_resender_mock.go --- temporal-1.21.5-1/src/common/xdc/ndc_history_resender_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/ndc_history_resender_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,74 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: ndc_history_resender.go + +// Package xdc is a generated GoMock package. +package xdc + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + namespace "go.temporal.io/server/common/namespace" +) + +// MockNDCHistoryResender is a mock of NDCHistoryResender interface. +type MockNDCHistoryResender struct { + ctrl *gomock.Controller + recorder *MockNDCHistoryResenderMockRecorder +} + +// MockNDCHistoryResenderMockRecorder is the mock recorder for MockNDCHistoryResender. +type MockNDCHistoryResenderMockRecorder struct { + mock *MockNDCHistoryResender +} + +// NewMockNDCHistoryResender creates a new mock instance. +func NewMockNDCHistoryResender(ctrl *gomock.Controller) *MockNDCHistoryResender { + mock := &MockNDCHistoryResender{ctrl: ctrl} + mock.recorder = &MockNDCHistoryResenderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNDCHistoryResender) EXPECT() *MockNDCHistoryResenderMockRecorder { + return m.recorder +} + +// SendSingleWorkflowHistory mocks base method. +func (m *MockNDCHistoryResender) SendSingleWorkflowHistory(ctx context.Context, remoteClusterName string, namespaceID namespace.ID, workflowID, runID string, startEventID, startEventVersion, endEventID, endEventVersion int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendSingleWorkflowHistory", ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendSingleWorkflowHistory indicates an expected call of SendSingleWorkflowHistory. +func (mr *MockNDCHistoryResenderMockRecorder) SendSingleWorkflowHistory(ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendSingleWorkflowHistory", reflect.TypeOf((*MockNDCHistoryResender)(nil).SendSingleWorkflowHistory), ctx, remoteClusterName, namespaceID, workflowID, runID, startEventID, startEventVersion, endEventID, endEventVersion) +} diff -Nru temporal-1.21.5-1/src/common/xdc/ndc_history_resender_test.go temporal-1.22.5/src/common/xdc/ndc_history_resender_test.go --- temporal-1.21.5-1/src/common/xdc/ndc_history_resender_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/common/xdc/ndc_history_resender_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,385 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package xdc + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/adminservicemock/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/primitives/timestamp" + serviceerrors "go.temporal.io/server/common/serviceerror" +) + +type ( + nDCHistoryResenderSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockClusterMetadata *cluster.MockMetadata + mockNamespaceCache *namespace.MockRegistry + mockClientBean *client.MockBean + mockAdminClient *adminservicemock.MockAdminServiceClient + mockHistoryClient *historyservicemock.MockHistoryServiceClient + + namespaceID namespace.ID + namespace namespace.Name + + serializer serialization.Serializer + logger log.Logger + + rereplicator *NDCHistoryResenderImpl + } +) + +func TestNDCHistoryResenderSuite(t *testing.T) { + s := new(nDCHistoryResenderSuite) + suite.Run(t, s) +} + +func (s *nDCHistoryResenderSuite) SetupSuite() { +} + +func (s *nDCHistoryResenderSuite) TearDownSuite() { + +} + +func (s *nDCHistoryResenderSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) + s.mockClientBean = client.NewMockBean(s.controller) + s.mockAdminClient = adminservicemock.NewMockAdminServiceClient(s.controller) + s.mockHistoryClient = historyservicemock.NewMockHistoryServiceClient(s.controller) + s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) + + s.mockClientBean.EXPECT().GetRemoteAdminClient(gomock.Any()).Return(s.mockAdminClient, nil).AnyTimes() + + s.logger = log.NewTestLogger() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + + s.namespaceID = namespace.ID(uuid.New()) + s.namespace = "some random namespace name" + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() + s.serializer = serialization.NewSerializer() + + s.rereplicator = NewNDCHistoryResender( + s.mockNamespaceCache, + s.mockClientBean, + func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { + _, err := s.mockHistoryClient.ReplicateEventsV2(ctx, request) + return err + }, + serialization.NewSerializer(), + nil, + s.logger, + ) +} + +func (s *nDCHistoryResenderSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *nDCHistoryResenderSuite) TestSendSingleWorkflowHistory() { + workflowID := "some random workflow ID" + runID := uuid.New() + startEventID := int64(123) + startEventVersion := int64(100) + token := []byte{1} + pageSize := defaultPageSize + eventBatch := []*historypb.HistoryEvent{ + { + EventId: 2, + Version: 123, + EventTime: timestamp.TimePtr(time.Now().UTC()), + EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, + }, + { + EventId: 3, + Version: 123, + EventTime: timestamp.TimePtr(time.Now().UTC()), + EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, + }, + } + blob := s.serializeEvents(eventBatch) + versionHistoryItems := []*historyspb.VersionHistoryItem{ + { + EventId: 1, + Version: 1, + }, + } + + s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2( + gomock.Any(), + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + StartEventId: startEventID, + StartEventVersion: startEventVersion, + EndEventId: common.EmptyEventID, + EndEventVersion: common.EmptyVersion, + MaximumPageSize: pageSize, + NextPageToken: nil, + }).Return(&adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: []*commonpb.DataBlob{blob}, + NextPageToken: token, + VersionHistory: &historyspb.VersionHistory{ + Items: versionHistoryItems, + }, + }, nil) + + s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2( + gomock.Any(), + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + StartEventId: startEventID, + StartEventVersion: startEventVersion, + EndEventId: common.EmptyEventID, + EndEventVersion: common.EmptyVersion, + MaximumPageSize: pageSize, + NextPageToken: token, + }).Return(&adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: []*commonpb.DataBlob{blob}, + NextPageToken: nil, + VersionHistory: &historyspb.VersionHistory{ + Items: versionHistoryItems, + }, + }, nil) + + s.mockHistoryClient.EXPECT().ReplicateEventsV2( + gomock.Any(), + &historyservice.ReplicateEventsV2Request{ + NamespaceId: s.namespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + VersionHistoryItems: versionHistoryItems, + Events: blob, + }).Return(nil, nil).Times(2) + + err := s.rereplicator.SendSingleWorkflowHistory( + context.Background(), + cluster.TestCurrentClusterName, + s.namespaceID, + workflowID, + runID, + startEventID, + startEventVersion, + common.EmptyEventID, + common.EmptyVersion, + ) + + s.Nil(err) +} + +func (s *nDCHistoryResenderSuite) TestCreateReplicateRawEventsRequest() { + workflowID := "some random workflow ID" + runID := uuid.New() + blob := &commonpb.DataBlob{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: []byte("some random history blob"), + } + versionHistoryItems := []*historyspb.VersionHistoryItem{ + { + EventId: 1, + Version: 1, + }, + } + + s.Equal(&historyservice.ReplicateEventsV2Request{ + NamespaceId: s.namespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + VersionHistoryItems: versionHistoryItems, + Events: blob, + }, s.rereplicator.createReplicationRawRequest( + s.namespaceID, + workflowID, + runID, + blob, + versionHistoryItems)) +} + +func (s *nDCHistoryResenderSuite) TestSendReplicationRawRequest() { + workflowID := "some random workflow ID" + runID := uuid.New() + item := &historyspb.VersionHistoryItem{ + EventId: 1, + Version: 1, + } + request := &historyservice.ReplicateEventsV2Request{ + NamespaceId: s.namespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + Events: &commonpb.DataBlob{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: []byte("some random history blob"), + }, + VersionHistoryItems: []*historyspb.VersionHistoryItem{item}, + } + + s.mockHistoryClient.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil, nil) + err := s.rereplicator.sendReplicationRawRequest(context.Background(), request) + s.Nil(err) +} + +func (s *nDCHistoryResenderSuite) TestSendReplicationRawRequest_Err() { + workflowID := "some random workflow ID" + runID := uuid.New() + item := &historyspb.VersionHistoryItem{ + EventId: 1, + Version: 1, + } + request := &historyservice.ReplicateEventsV2Request{ + NamespaceId: s.namespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + Events: &commonpb.DataBlob{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: []byte("some random history blob"), + }, + VersionHistoryItems: []*historyspb.VersionHistoryItem{item}, + } + retryErr := serviceerrors.NewRetryReplication( + "", + s.namespaceID.String(), + workflowID, + runID, + common.EmptyEventID, + common.EmptyVersion, + common.EmptyEventID, + common.EmptyVersion, + ) + + s.mockHistoryClient.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil, retryErr) + err := s.rereplicator.sendReplicationRawRequest(context.Background(), request) + s.Equal(retryErr, err) +} + +func (s *nDCHistoryResenderSuite) TestGetHistory() { + workflowID := "some random workflow ID" + runID := uuid.New() + startEventID := int64(123) + endEventID := int64(345) + version := int64(20) + nextTokenIn := []byte("some random next token in") + nextTokenOut := []byte("some random next token out") + pageSize := int32(59) + blob := []byte("some random events blob") + + response := &adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: []*commonpb.DataBlob{{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: blob, + }}, + NextPageToken: nextTokenOut, + } + s.mockAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + StartEventId: startEventID, + StartEventVersion: version, + EndEventId: endEventID, + EndEventVersion: version, + MaximumPageSize: pageSize, + NextPageToken: nextTokenIn, + }).Return(response, nil) + + out, err := s.rereplicator.getHistory( + context.Background(), + cluster.TestCurrentClusterName, + s.namespaceID, + workflowID, + runID, + startEventID, + version, + endEventID, + version, + nextTokenIn, + pageSize) + s.Nil(err) + s.Equal(response, out) +} + +func (s *nDCHistoryResenderSuite) serializeEvents(events []*historypb.HistoryEvent) *commonpb.DataBlob { + blob, err := s.serializer.SerializeEvents(events, enumspb.ENCODING_TYPE_PROTO3) + s.Nil(err) + return &commonpb.DataBlob{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: blob.Data, + } +} diff -Nru temporal-1.21.5-1/src/config/development-cass-archival.yaml temporal-1.22.5/src/config/development-cass-archival.yaml --- temporal-1.21.5-1/src/config/development-cass-archival.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-cass-archival.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -35,6 +35,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-cass-es.yaml temporal-1.22.5/src/config/development-cass-es.yaml --- temporal-1.21.5-1/src/config/development-cass-es.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-cass-es.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -42,6 +42,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-cass-s3.yaml temporal-1.22.5/src/config/development-cass-s3.yaml --- temporal-1.21.5-1/src/config/development-cass-s3.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-cass-s3.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -33,6 +33,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-cass.yaml temporal-1.22.5/src/config/development-cass.yaml --- temporal-1.21.5-1/src/config/development-cass.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-cass.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -59,6 +59,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-cluster-a.yaml temporal-1.22.5/src/config/development-cluster-a.yaml --- temporal-1.21.5-1/src/config/development-cluster-a.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-cluster-a.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -42,6 +42,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-mysql-es.yaml temporal-1.22.5/src/config/development-mysql-es.yaml --- temporal-1.21.5-1/src/config/development-mysql-es.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-mysql-es.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -48,6 +48,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-mysql.yaml temporal-1.22.5/src/config/development-mysql.yaml --- temporal-1.21.5-1/src/config/development-mysql.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-mysql.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-mysql8.yaml temporal-1.22.5/src/config/development-mysql8.yaml --- temporal-1.21.5-1/src/config/development-mysql8.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-mysql8.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-postgres-es.yaml temporal-1.22.5/src/config/development-postgres-es.yaml --- temporal-1.21.5-1/src/config/development-postgres-es.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-postgres-es.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-postgres.yaml temporal-1.22.5/src/config/development-postgres.yaml --- temporal-1.21.5-1/src/config/development-postgres.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-postgres.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-postgres12.yaml temporal-1.22.5/src/config/development-postgres12.yaml --- temporal-1.21.5-1/src/config/development-postgres12.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-postgres12.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-sqlite-file.yaml temporal-1.22.5/src/config/development-sqlite-file.yaml --- temporal-1.21.5-1/src/config/development-sqlite-file.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-sqlite-file.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -74,6 +74,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/config/development-sqlite.yaml temporal-1.22.5/src/config/development-sqlite.yaml --- temporal-1.21.5-1/src/config/development-sqlite.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/config/development-sqlite.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -70,6 +70,7 @@ grpcPort: 7233 membershipPort: 6933 bindOnLocalHost: true + httpPort: 7243 matching: rpc: diff -Nru temporal-1.21.5-1/src/develop/buildkite/Dockerfile temporal-1.22.5/src/develop/buildkite/Dockerfile --- temporal-1.21.5-1/src/develop/buildkite/Dockerfile 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/develop/buildkite/Dockerfile 2024-02-23 09:45:43.000000000 +0000 @@ -1,2 +1,2 @@ -FROM temporalio/base-ci-builder:1.9.2 +FROM temporalio/base-ci-builder:1.10.5 WORKDIR /temporal diff -Nru temporal-1.21.5-1/src/develop/buildkite/README.md temporal-1.22.5/src/develop/buildkite/README.md --- temporal-1.21.5-1/src/develop/buildkite/README.md 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/develop/buildkite/README.md 2024-02-23 09:45:43.000000000 +0000 @@ -50,7 +50,7 @@ ## Testing the build in Buildkite -Creating a PR against the master branch will trigger the Buildkite +Creating a PR against the `main` branch will trigger the Buildkite build. Members of the Temporal team can view the build pipeline here: [https://buildkite.com/temporal/temporal-server](https://buildkite.com/temporal/temporal-server). diff -Nru temporal-1.21.5-1/src/develop/buildkite/docker-compose.yml temporal-1.22.5/src/develop/buildkite/docker-compose.yml --- temporal-1.21.5-1/src/develop/buildkite/docker-compose.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/develop/buildkite/docker-compose.yml 2024-02-23 09:45:43.000000000 +0000 @@ -2,7 +2,7 @@ services: cassandra: - image: cassandra:3.11 + image: cassandra:3.11.15 networks: services-network: aliases: diff -Nru temporal-1.21.5-1/src/develop/docker-compose/docker-compose.cdc.yml temporal-1.22.5/src/develop/docker-compose/docker-compose.cdc.yml --- temporal-1.21.5-1/src/develop/docker-compose/docker-compose.cdc.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/develop/docker-compose/docker-compose.cdc.yml 2024-02-23 09:45:43.000000000 +0000 @@ -5,12 +5,12 @@ services: temporal-ui-standby: - image: temporalio/ui:2.11.2 + image: temporalio/ui:2.17.1 container_name: temporal-dev-ui-standby environment: - TEMPORAL_UI_PORT=8081 temporal-ui-other: - image: temporalio/ui:2.11.2 + image: temporalio/ui:2.17.1 container_name: temporal-dev-ui-other environment: - TEMPORAL_UI_PORT=8082 diff -Nru temporal-1.21.5-1/src/develop/docker-compose/docker-compose.yml temporal-1.22.5/src/develop/docker-compose/docker-compose.yml --- temporal-1.21.5-1/src/develop/docker-compose/docker-compose.yml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/develop/docker-compose/docker-compose.yml 2024-02-23 09:45:43.000000000 +0000 @@ -78,7 +78,7 @@ volumes: - ./grafana/provisioning/:/etc/grafana/provisioning/ temporal-ui: - image: temporalio/ui:2.11.2 + image: temporalio/ui:2.17.1 container_name: temporal-dev-ui networks: diff -Nru temporal-1.21.5-1/src/docker/config_template.yaml temporal-1.22.5/src/docker/config_template.yaml --- temporal-1.21.5-1/src/docker/config_template.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/docker/config_template.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -304,6 +304,7 @@ grpcPort: {{ $temporalGrpcPort }} membershipPort: {{ default .Env.FRONTEND_MEMBERSHIP_PORT "6933" }} bindOnIP: {{ default .Env.BIND_ON_IP "127.0.0.1" }} + httpPort: {{ default .Env.FRONTEND_HTTP_PORT "7243" }} {{- if .Env.USE_INTERNAL_FRONTEND }} internal-frontend: diff -Nru temporal-1.21.5-1/src/go.mod temporal-1.22.5/src/go.mod --- temporal-1.21.5-1/src/go.mod 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/go.mod 2024-02-23 09:45:43.000000000 +0000 @@ -1,39 +1,38 @@ module go.temporal.io/server -go 1.19 +go 1.20 require ( - cloud.google.com/go/storage v1.29.0 - github.com/aws/aws-sdk-go v1.44.203 + cloud.google.com/go/storage v1.30.1 + github.com/aws/aws-sdk-go v1.44.289 github.com/blang/semver/v4 v4.0.0 - github.com/brianvoe/gofakeit/v6 v6.20.1 - github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c + github.com/brianvoe/gofakeit/v6 v6.22.0 + github.com/cactus/go-statsd-client/v5 v5.0.0 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 github.com/emirpasic/gods v1.18.1 - github.com/fatih/color v1.14.1 + github.com/fatih/color v1.15.0 github.com/go-sql-driver/mysql v1.5.0 - github.com/gocql/gocql v1.4.0 + github.com/gocql/gocql v1.5.2 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.1 - github.com/golang-jwt/jwt/v4 v4.4.3 + github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/mock v1.7.0-rc.1 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/iancoleman/strcase v0.2.0 github.com/jmoiron/sqlx v1.3.4 - github.com/jonboulle/clockwork v0.4.0 - github.com/lib/pq v1.10.7 + github.com/lib/pq v1.10.9 github.com/olekukonko/tablewriter v0.0.5 github.com/olivere/elastic/v7 v7.0.32 github.com/pborman/uuid v1.2.1 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/robfig/cron/v3 v3.0.1 github.com/stretchr/testify v1.8.4 - github.com/temporalio/ringpop-go v0.0.0-20220818230611-30bf23b490b2 + github.com/temporalio/ringpop-go v0.0.0-20230606200434-b5c079f412d3 github.com/temporalio/tchannel-go v1.22.1-0.20220818200552-1be8d8cffa5b - github.com/temporalio/tctl-kit v0.0.0-20230213052353-2342ea1e7d14 - github.com/uber-go/tally/v4 v4.1.6 - github.com/urfave/cli v1.22.12 + github.com/temporalio/tctl-kit v0.0.0-20230328153839-577f95d16fa0 + github.com/uber-go/tally/v4 v4.1.7 + github.com/urfave/cli v1.22.14 github.com/urfave/cli/v2 v2.4.0 github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 @@ -44,41 +43,37 @@ go.opentelemetry.io/otel/metric v1.16.0 go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/sdk/metric v0.39.0 - go.temporal.io/api v1.23.0 - go.temporal.io/sdk v1.23.0 + go.temporal.io/api v1.24.1-0.20231003165936-bb03061759c8 + go.temporal.io/sdk v1.25.1 go.temporal.io/version v0.3.0 - go.uber.org/atomic v1.10.0 + go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 - go.uber.org/fx v1.19.1 - go.uber.org/multierr v1.9.0 + go.uber.org/fx v1.20.0 + go.uber.org/multierr v1.11.0 go.uber.org/zap v1.24.0 - golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb - golang.org/x/oauth2 v0.8.0 - golang.org/x/sync v0.2.0 + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 + golang.org/x/oauth2 v0.10.0 + golang.org/x/sync v0.3.0 golang.org/x/time v0.3.0 - google.golang.org/api v0.114.0 - google.golang.org/grpc v1.55.0 - google.golang.org/grpc/examples v0.0.0-20230216223317-abff344ead8f + google.golang.org/api v0.128.0 + google.golang.org/grpc v1.58.2 + google.golang.org/grpc/examples v0.0.0-20230623203957-0b3a81eabc28 gopkg.in/square/go-jose.v2 v2.6.0 gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 - modernc.org/sqlite v1.21.0 + modernc.org/sqlite v1.23.1 ) require ( - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect -) - -require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - github.com/apache/thrift v0.18.0 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + cloud.google.com/go/iam v1.1.2 // indirect + github.com/apache/thrift v0.18.1 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -91,17 +86,19 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -109,15 +106,15 @@ github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.11.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.4.3 // indirect + github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/twmb/murmur3 v1.1.6 // indirect + github.com/twmb/murmur3 v1.1.8 // indirect github.com/uber-common/bark v1.3.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect @@ -125,25 +122,27 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/dig v1.16.1 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect + go.opentelemetry.io/proto/otlp v0.20.0 // indirect + go.uber.org/dig v1.17.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.10.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - lukechampine.com/uint128 v1.2.0 // indirect - modernc.org/cc/v3 v3.40.0 // indirect - modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.22.3 // indirect + lukechampine.com/uint128 v1.3.0 // indirect + modernc.org/cc/v3 v3.41.0 // indirect + modernc.org/ccgo/v3 v3.16.14 // indirect + modernc.org/libc v1.24.1 // indirect modernc.org/mathutil v1.5.0 // indirect - modernc.org/memory v1.5.0 // indirect + modernc.org/memory v1.6.0 // indirect modernc.org/opt v0.1.3 // indirect modernc.org/strutil v1.1.3 // indirect modernc.org/token v1.1.0 // indirect diff -Nru temporal-1.21.5-1/src/go.sum temporal-1.22.5/src/go.sum --- temporal-1.21.5-1/src/go.sum 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/go.sum 2024-02-23 09:45:43.000000000 +0000 @@ -35,35 +35,51 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= @@ -72,10 +88,12 @@ cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= @@ -84,6 +102,7 @@ cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= @@ -92,27 +111,36 @@ cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -126,38 +154,55 @@ cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -171,8 +216,13 @@ cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -181,15 +231,22 @@ cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -198,44 +255,66 @@ cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -244,35 +323,51 @@ cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -280,28 +375,38 @@ cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= @@ -310,20 +415,28 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= @@ -331,92 +444,131 @@ cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -425,9 +577,12 @@ cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -436,46 +591,57 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= @@ -483,12 +649,14 @@ cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= @@ -500,6 +668,8 @@ cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= @@ -511,15 +681,20 @@ cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -530,43 +705,56 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= @@ -574,34 +762,43 @@ cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= +cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= @@ -619,14 +816,16 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apache/thrift v0.18.0 h1:YXuoqgVIHYiAp1WhRw59wXe86HQflof8fh3llIjRzMY= -github.com/apache/thrift v0.18.0/go.mod h1:rdQn/dCcDKEWjjylUeueum4vQEjG2v8v2PqriUnbr+I= -github.com/aws/aws-sdk-go v1.44.203 h1:pcsP805b9acL3wUqa4JR2vg1k2wnItkDYNvfmcy6F+U= -github.com/aws/aws-sdk-go v1.44.203/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/apache/thrift v0.18.1 h1:lNhK/1nqjbwbiOPDBPFJVKxgDEGSepKuTh6OLiXW8kg= +github.com/apache/thrift v0.18.1/go.mod h1:rdQn/dCcDKEWjjylUeueum4vQEjG2v8v2PqriUnbr+I= +github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0= +github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v0.0.0-20160125162948-a620c1cc9866/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -642,12 +841,14 @@ github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/brianvoe/gofakeit/v6 v6.20.1 h1:8ihJ60OvPnPJ2W6wZR7M+TTeaZ9bml0z6oy4gvyJ/ek= -github.com/brianvoe/gofakeit/v6 v6.20.1/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= +github.com/brianvoe/gofakeit/v6 v6.22.0 h1:BzOsDot1o3cufTfOk+fWKE9nFYojyDV+XHdCWL2+uyE= +github.com/brianvoe/gofakeit/v6 v6.22.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c h1:HIGF0r/56+7fuIZw2V4isE22MK6xpxWx7BbV8dJ290w= github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/cactus/go-statsd-client/v4 v4.0.0/go.mod h1:m73kwJp6TN0Ja9P6ycdZhWM1MlfxY/95WZ//IptPQ+Y= +github.com/cactus/go-statsd-client/v5 v5.0.0 h1:KqvIQtc9qt34uq+nu4nd1PwingWfBt/IISgtUQ2nSJk= +github.com/cactus/go-statsd-client/v5 v5.0.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -674,6 +875,9 @@ github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -702,14 +906,20 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -741,8 +951,8 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gocql/gocql v1.4.0 h1:NIlXAJXsjzjGvVn36njh9OLYWzS3D7FdvsifLj4eDEY= -github.com/gocql/gocql v1.4.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= +github.com/gocql/gocql v1.5.2 h1:WnKf8xRQImcT/KLaEWG2pjEeryDB7K0qQN9mPs1C58Q= +github.com/gocql/gocql v1.5.2/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= @@ -751,8 +961,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -840,6 +1050,10 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -848,8 +1062,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -860,17 +1076,22 @@ github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -887,8 +1108,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -922,24 +1141,27 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -982,8 +1204,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1000,8 +1222,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= +github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20141108142129-dee209f2455f/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1009,8 +1231,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -1029,8 +1251,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -1049,31 +1271,32 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/temporalio/ringpop-go v0.0.0-20220818230611-30bf23b490b2 h1:QIwUh2HCtmtB4rDM4CYnmX8ep9X7n9WZh+0rv18FySc= -github.com/temporalio/ringpop-go v0.0.0-20220818230611-30bf23b490b2/go.mod h1:ZEYrWwPO7607ZEaPzK7nWRv55cIrTtH4TeBBu3V532U= +github.com/temporalio/ringpop-go v0.0.0-20230606200434-b5c079f412d3 h1:V1U9fvhusDJ1pyAvQWg0+u6mQ+o5WtRfMbnnTIZe0Fo= +github.com/temporalio/ringpop-go v0.0.0-20230606200434-b5c079f412d3/go.mod h1:LA2yFb94r5XoEnuMVHkCC/P5174whMy2Dd+cu+AEcQA= github.com/temporalio/tchannel-go v1.22.1-0.20220818200552-1be8d8cffa5b h1:Fs3LdlF7xbnOWHymbFmvIEuxIEt1dNRCfaDkoajSaZk= github.com/temporalio/tchannel-go v1.22.1-0.20220818200552-1be8d8cffa5b/go.mod h1:c+V9Z/ZgkzAdyGvHrvC5AsXgN+M9Qwey04cBdKYzV7U= -github.com/temporalio/tctl-kit v0.0.0-20230213052353-2342ea1e7d14 h1:N+lz4Nav7634VmiCmQlbudy2ja6s5NAqc/5dUd4sOw8= -github.com/temporalio/tctl-kit v0.0.0-20230213052353-2342ea1e7d14/go.mod h1:hk/LJCKZNNmtVSWRKepbdUJme+k/4fb/hPkekXk40sk= +github.com/temporalio/tctl-kit v0.0.0-20230328153839-577f95d16fa0 h1:E1iAre7/4VvSJri8uOnItKVsMKnP+WEQourm+zVO0cc= +github.com/temporalio/tctl-kit v0.0.0-20230328153839-577f95d16fa0/go.mod h1:hk/LJCKZNNmtVSWRKepbdUJme+k/4fb/hPkekXk40sk= github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= -github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= +github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber-common/bark v1.0.0/go.mod h1:g0ZuPcD7XiExKHynr93Q742G/sbrdVQkghrqLGOoFuY= github.com/uber-common/bark v1.3.0 h1:DkuZCBaQS9LWuNAPrCO6yQVANckIX3QI0QwLemUnzCo= github.com/uber-common/bark v1.3.0/go.mod h1:5fDe/YcIVP55XhFF9hUihX2lDsDcpFrTZEAwAVwtPDw= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber-go/tally/v4 v4.1.6 h1:k8NwZvdfj/w398Q0H1nqi6yq2W5bgxsqn76kNwZ/hfM= -github.com/uber-go/tally/v4 v4.1.6/go.mod h1:aXeSTDMl4tNosyf6rdU8jlgScHyjEGGtfJ/uwCIf/vM= +github.com/uber-go/tally/v4 v4.1.7 h1:YiKvvMKCCXlCKXI0i1hVk+xda8YxdIpjeFXohpvn8Zo= +github.com/uber-go/tally/v4 v4.1.7/go.mod h1:pPR56rjthjtLB8xQlEx2I1VwAwRGCh/i4xMUcmG+6z4= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= @@ -1122,36 +1345,36 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.temporal.io/api v1.21.0/go.mod h1:xlsUEakkN2vU2/WV7e5NqMG4N93nfuNfvbXdaXUpU8w= -go.temporal.io/api v1.23.0 h1:4y9mTQjEHsE0Du0WJ2ExJUcP/1/a+B/UefzIDm4ALTE= -go.temporal.io/api v1.23.0/go.mod h1:AcJd1+rc1j0zte+ZBIkOHGHjntR/17LnZWFz+gMFHQ0= -go.temporal.io/sdk v1.23.0 h1:oa9/1f3bbcBLiNGbYf9woIx7uWFJ153q0JOkPeZqJtQ= -go.temporal.io/sdk v1.23.0/go.mod h1:S7vWxU01lGcCny0sWx03bkkYw4VtVrpzeqBTn2A6y+E= +go.opentelemetry.io/proto/otlp v0.20.0 h1:BLOA1cZBAGSbRiNuGCCKiFrCdYB7deeHDeD1SueyOfA= +go.opentelemetry.io/proto/otlp v0.20.0/go.mod h1:3QgjzPALBIv9pcknj2EXGPXjYPFdUh/RQfF8Lz3+Vnw= +go.temporal.io/api v1.24.1-0.20231003165936-bb03061759c8 h1:nYlATgXyviKLMySF/eUCnVbUg1AeYwkHcJdhusFYsIs= +go.temporal.io/api v1.24.1-0.20231003165936-bb03061759c8/go.mod h1:GyVOkCMSlZSqS7MrEaEV8kL3Oy8N0tpEsyVNywG+q5o= +go.temporal.io/sdk v1.25.1 h1:jC9l9vHHz5OJ7PR6OjrpYSN4+uEG0bLe5rdF9nlMSGk= +go.temporal.io/sdk v1.25.1/go.mod h1:X7iFKZpsj90BfszfpFCzLX8lwEJXbnRrl351/HyEgmU= go.temporal.io/version v0.3.0 h1:dMrei9l9NyHt8nG6EB8vAwDLLTwx2SvRyucCSumAiig= go.temporal.io/version v0.3.0/go.mod h1:UA9S8/1LaKYae6TyD9NaPMJTZb911JcbqghI2CBSP78= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= -go.uber.org/fx v1.19.1 h1:JwYIYAQzXBuBBwSZ1/tn/95pnQO/Sp3yE8lWj9eSAzI= -go.uber.org/fx v1.19.1/go.mod h1:bGK+AEy7XUwTBkqCsK/vDyFF0JJOA6X5KWpNC0e6qTA= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= +go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1164,8 +1387,15 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1181,8 +1411,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1224,8 +1454,10 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1266,6 +1498,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1285,8 +1518,13 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1315,8 +1553,10 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1333,8 +1573,9 @@ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1396,6 +1637,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1421,8 +1663,13 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1431,7 +1678,11 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1447,8 +1698,11 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1476,6 +1730,7 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1521,8 +1776,11 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1596,8 +1854,14 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1739,16 +2003,49 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230525154841-bd750badd5c6/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1788,13 +2085,18 @@ google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/examples v0.0.0-20230216223317-abff344ead8f h1:0/wXWRnloOk74ilxLn9ZTrNSa0pMzJ0CnYyOe4gaf8U= -google.golang.org/grpc/examples v0.0.0-20230216223317-abff344ead8f/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= +google.golang.org/grpc/examples v0.0.0-20230623203957-0b3a81eabc28 h1:soFSbKG2H9FiKip4adXyJL8FEsA+wcFvu1YYMDcKKWY= +google.golang.org/grpc/examples v0.0.0-20230623203957-0b3a81eabc28/go.mod h1:CjEn68pJy4gCZyxDUQ9HNT0mQwZtbmMMHh5UrDl3Ozc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1811,8 +2113,9 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1850,21 +2153,27 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= +modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccgo/v3 v3.16.14 h1:af6KNtFgsVmnDYrWk3PQCS9XT6BXe7o3ZFJKkIKvXNQ= +modernc.org/ccgo/v3 v3.16.14/go.mod h1:mPDSujUIaTNWQSG4eqKw+atqLOEbma6Ncsa94WbC9zo= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= @@ -1876,8 +2185,13 @@ modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/libc v1.22.3 h1:D/g6O5ftAfavceqlLOFwaZuA5KYafKwmr30A6iSqoyY= -modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= +modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= @@ -1885,24 +2199,30 @@ modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o= +modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.21.0 h1:4aP4MdUf15i3R3M2mx6Q90WHKz3nZLoz96zlB6tNdow= -modernc.org/sqlite v1.21.0/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= +modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/tcl v1.15.1 h1:mOQwiEK4p7HruMZcwKTZPw/aqtGM4aY00uzWhlKKYws= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff -Nru temporal-1.21.5-1/src/internal/goro/example_group_test.go temporal-1.22.5/src/internal/goro/example_group_test.go --- temporal-1.21.5-1/src/internal/goro/example_group_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/internal/goro/example_group_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -52,10 +52,12 @@ fmt.Println("starting backgroundLoop1") defer fmt.Println("stopping backgroundLoop1") for { + timer := time.NewTimer(1 * time.Minute) select { - case <-time.After(1 * time.Minute): + case <-timer.C: // do something every minute case <-ctx.Done(): + timer.Stop() return nil } } @@ -65,10 +67,12 @@ fmt.Println("starting backgroundLoop2") defer fmt.Println("stopping backgroundLoop2") for { + timer := time.NewTimer(10 * time.Second) select { - case <-time.After(10 * time.Second): + case <-timer.C: // do something every 10 seconds case <-ctx.Done(): + timer.Stop() return nil } } diff -Nru temporal-1.21.5-1/src/internal/goro/group.go temporal-1.22.5/src/internal/goro/group.go --- temporal-1.21.5-1/src/internal/goro/group.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/internal/goro/group.go 2024-02-23 09:45:43.000000000 +0000 @@ -32,10 +32,10 @@ // Group manages a set of long-running goroutines. Goroutines are spawned // individually with Group.Go and after that interrupted and waited-on as a // single unit. The expected use-case for this type is as a member field of a -// `common.Daemon` (or similar) type that spawns one or more goroutines in -// its Start() function and then stops those same goroutines in its Stop() -// function. The zero-value of this type is valid. A Group must not be copied -// after first use. +// background process type that spawns one or more goroutines in its Start() +// function and then stops those same goroutines in its Stop() function. +// The zero-value of this type is valid. A Group must not be copied after +// first use. type Group struct { initOnce sync.Once ctx context.Context diff -Nru temporal-1.21.5-1/src/internal/nettest/listener.go temporal-1.22.5/src/internal/nettest/listener.go --- temporal-1.21.5-1/src/internal/nettest/listener.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/internal/nettest/listener.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,57 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package nettest + +import "net" + +// NewListener returns a net.Listener which uses the given Pipe to simulate a network connection. +func NewListener(pipe *Pipe) *PipeListener { + return &PipeListener{ + Pipe: pipe, + done: make(chan struct{}), + } +} + +// PipeListener is a net.Listener which uses a Pipe to simulate a network connection. +type PipeListener struct { + *Pipe + // We cancel calls to Accept using the done channel so that tests don't hang if they're broken. + done chan struct{} +} + +var _ net.Listener = (*PipeListener)(nil) + +func (t *PipeListener) Accept() (net.Conn, error) { + return t.Pipe.Accept(t.done) +} + +func (t *PipeListener) Close() error { + close(t.done) + return nil +} + +func (t *PipeListener) Addr() net.Addr { + return &net.TCPAddr{} +} diff -Nru temporal-1.21.5-1/src/internal/nettest/listener_test.go temporal-1.22.5/src/internal/nettest/listener_test.go --- temporal-1.21.5-1/src/internal/nettest/listener_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/internal/nettest/listener_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,61 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package nettest + +import ( + "context" + "fmt" + "io" + "net" + "net/http" +) + +func ExampleListener() { + pipe := NewPipe() + listener := NewListener(pipe) + server := http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello")) + }), + } + go func() { + _ = server.Serve(listener) + }() + client := http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return pipe.Connect(ctx.Done()) + }, + }, + } + resp, _ := client.Get("http://fake") + defer func() { + _ = resp.Body.Close() + }() + buf, _ := io.ReadAll(resp.Body) + _ = server.Close() + fmt.Println(string(buf[:])) + // Output: hello +} diff -Nru temporal-1.21.5-1/src/internal/nettest/pipe_test.go temporal-1.22.5/src/internal/nettest/pipe_test.go --- temporal-1.21.5-1/src/internal/nettest/pipe_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/internal/nettest/pipe_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,7 +34,7 @@ func TestPipe_Accept(t *testing.T) { t.Parallel() - listener := NewPipe() + pipe := NewPipe() var wg sync.WaitGroup defer wg.Wait() @@ -43,7 +43,7 @@ go func() { defer wg.Done() - c, err := listener.Accept(nil) + c, err := pipe.Accept(nil) assert.NoError(t, err) defer func() { @@ -51,7 +51,7 @@ }() }() - c, err := listener.Connect(nil) + c, err := pipe.Connect(nil) assert.NoError(t, err) defer func() { @@ -62,19 +62,19 @@ func TestPipe_ClientCanceled(t *testing.T) { t.Parallel() - listener := NewPipe() + pipe := NewPipe() done := make(chan struct{}) close(done) // hi efe - _, err := listener.Connect(done) + _, err := pipe.Connect(done) assert.ErrorIs(t, err, ErrCanceled) } func TestPipe_ServerCanceled(t *testing.T) { t.Parallel() - listener := NewPipe() + pipe := NewPipe() done := make(chan struct{}) close(done) - _, err := listener.Accept(done) + _, err := pipe.Accept(done) assert.ErrorIs(t, err, ErrCanceled) } diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/adminservice/v1/request_response.proto temporal-1.22.5/src/proto/internal/temporal/server/api/adminservice/v1/request_response.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/adminservice/v1/request_response.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/adminservice/v1/request_response.proto 2024-02-23 09:45:43.000000000 +0000 @@ -33,6 +33,8 @@ import "temporal/api/common/v1/message.proto"; import "temporal/api/version/v1/message.proto"; import "temporal/api/workflow/v1/message.proto"; +import "temporal/api/namespace/v1/message.proto"; +import "temporal/api/replication/v1/message.proto"; import "temporal/server/api/cluster/v1/message.proto"; import "temporal/server/api/enums/v1/common.proto"; @@ -245,6 +247,7 @@ int64 failover_version_increment = 10; int64 initial_failover_version = 11; bool is_global_namespace_enabled = 12; + map tags = 13; } message ListClustersRequest { @@ -387,4 +390,20 @@ oneof attributes { temporal.server.api.replication.v1.WorkflowReplicationMessages messages = 1; } +} + +message GetNamespaceRequest { + oneof attributes { + string namespace = 1; + string id = 2; + } +} + +message GetNamespaceResponse { + temporal.api.namespace.v1.NamespaceInfo info = 3; + temporal.api.namespace.v1.NamespaceConfig config = 4; + temporal.api.replication.v1.NamespaceReplicationConfig replication_config = 5; + int64 config_version = 6; + int64 failover_version = 7; + repeated temporal.api.replication.v1.FailoverStatus failover_history = 8; } \ No newline at end of file diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/adminservice/v1/service.proto temporal-1.22.5/src/proto/internal/temporal/server/api/adminservice/v1/service.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/adminservice/v1/service.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/adminservice/v1/service.proto 2024-02-23 09:45:43.000000000 +0000 @@ -142,5 +142,8 @@ rpc StreamWorkflowReplicationMessages(stream StreamWorkflowReplicationMessagesRequest) returns (stream StreamWorkflowReplicationMessagesResponse) { } + + rpc GetNamespace(GetNamespaceRequest) returns (GetNamespaceResponse) { + } } diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/historyservice/v1/request_response.proto temporal-1.22.5/src/proto/internal/temporal/server/api/historyservice/v1/request_response.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/historyservice/v1/request_response.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/historyservice/v1/request_response.proto 2024-02-23 09:45:43.000000000 +0000 @@ -80,6 +80,7 @@ temporal.api.common.v1.WorkflowExecution execution = 2; int64 expected_next_event_id = 3; bytes current_branch_token = 4; + temporal.server.api.history.v1.VersionHistoryItem version_history_item = 5; } message GetMutableStateResponse { @@ -115,6 +116,7 @@ temporal.api.common.v1.WorkflowExecution execution = 2; int64 expected_next_event_id = 3; bytes current_branch_token = 4; + temporal.server.api.history.v1.VersionHistoryItem version_history_item = 5; } message PollMutableStateResponse { @@ -176,6 +178,7 @@ map queries = 14; temporal.server.api.clock.v1.VectorClock clock = 15; repeated temporal.api.protocol.v1.Message messages = 16; + int64 version = 17; } message RecordActivityTaskStartedRequest { @@ -198,6 +201,7 @@ temporal.api.common.v1.WorkflowType workflow_type = 6; string workflow_namespace = 7; temporal.server.api.clock.v1.VectorClock clock = 8; + int64 version = 9; } message RespondWorkflowTaskCompletedRequest { @@ -219,6 +223,18 @@ message RespondWorkflowTaskFailedResponse { } +message IsWorkflowTaskValidRequest { + string namespace_id = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + temporal.server.api.clock.v1.VectorClock clock = 3; + int64 scheduled_event_id = 4; +} + +message IsWorkflowTaskValidResponse { + // whether matching service can call history service to start the workflow task + bool is_valid = 1; +} + message RecordActivityTaskHeartbeatRequest { string namespace_id = 1; temporal.api.workflowservice.v1.RecordActivityTaskHeartbeatRequest heartbeat_request = 2; @@ -252,6 +268,18 @@ message RespondActivityTaskCanceledResponse { } +message IsActivityTaskValidRequest { + string namespace_id = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + temporal.server.api.clock.v1.VectorClock clock = 3; + int64 scheduled_event_id = 4; +} + +message IsActivityTaskValidResponse { + // whether matching service can call history service to start the activity task + bool is_valid = 1; +} + message SignalWorkflowExecutionRequest { string namespace_id = 1; temporal.api.workflowservice.v1.SignalWorkflowExecutionRequest signal_request = 2; @@ -351,9 +379,9 @@ **/ message RecordChildExecutionCompletedRequest { string namespace_id = 1; - temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + temporal.api.common.v1.WorkflowExecution parent_execution = 2; int64 parent_initiated_id = 3; - temporal.api.common.v1.WorkflowExecution completed_execution = 4; + temporal.api.common.v1.WorkflowExecution child_execution = 4; temporal.api.history.v1.HistoryEvent completion_event = 5; temporal.server.api.clock.v1.VectorClock clock = 6; int64 parent_initiated_version = 7; @@ -581,6 +609,7 @@ message GenerateLastHistoryReplicationTasksResponse { int64 state_transition_count = 1; + int64 history_length = 2; } message GetReplicationStatusRequest { diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/historyservice/v1/service.proto temporal-1.22.5/src/proto/internal/temporal/server/api/historyservice/v1/service.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/historyservice/v1/service.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/historyservice/v1/service.proto 2024-02-23 09:45:43.000000000 +0000 @@ -81,6 +81,10 @@ rpc RespondWorkflowTaskFailed (RespondWorkflowTaskFailedRequest) returns (RespondWorkflowTaskFailedResponse) { } + // IsWorkflowTaskValid is called by matching service checking whether the workflow task is valid. + rpc IsWorkflowTaskValid (IsWorkflowTaskValidRequest) returns (IsWorkflowTaskValidResponse) { + } + // RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails // to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and // 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will @@ -113,6 +117,10 @@ rpc RespondActivityTaskCanceled (RespondActivityTaskCanceledRequest) returns (RespondActivityTaskCanceledResponse) { } + // IsActivityTaskValid is called by matching service checking whether the workflow task is valid. + rpc IsActivityTaskValid (IsActivityTaskValidRequest) returns (IsActivityTaskValidResponse) { + } + // SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in // WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution. rpc SignalWorkflowExecution (SignalWorkflowExecutionRequest) returns (SignalWorkflowExecutionResponse) { diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto temporal-1.22.5/src/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto 2024-02-23 09:45:43.000000000 +0000 @@ -219,6 +219,7 @@ oneof operation { ApplyPublicRequest apply_public_request = 3; RemoveBuildIds remove_build_ids = 4; + string persist_unknown_build_id = 5; } } message UpdateWorkerBuildIdCompatibilityResponse {} diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/matchingservice/v1/service.proto temporal-1.22.5/src/proto/internal/temporal/server/api/matchingservice/v1/service.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/matchingservice/v1/service.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/matchingservice/v1/service.proto 2024-02-23 09:45:43.000000000 +0000 @@ -79,9 +79,9 @@ } // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdOrdering RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) rpc UpdateWorkerBuildIdCompatibility (UpdateWorkerBuildIdCompatibilityRequest) returns (UpdateWorkerBuildIdCompatibilityResponse) {} rpc GetWorkerBuildIdCompatibility (GetWorkerBuildIdCompatibilityRequest) returns (GetWorkerBuildIdCompatibilityResponse) {} // Fetch user data for a task queue, this request should always be routed to the node holding the root partition of the workflow task queue. diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/cluster_metadata.proto temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/cluster_metadata.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/cluster_metadata.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/cluster_metadata.proto 2024-02-23 09:45:43.000000000 +0000 @@ -39,6 +39,7 @@ bool is_global_namespace_enabled = 9; bool is_connection_enabled = 10; bool use_cluster_id_membership = 11; + map tags = 12; } message IndexSearchAttributes{ diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/executions.proto temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/executions.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/executions.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/executions.proto 2024-02-23 09:45:43.000000000 +0000 @@ -62,9 +62,7 @@ map replication_dlq_ack_level = 13; reserved 14; reserved 15; - // Map from task category to ack levels of the corresponding queue processor - // Deprecated. Use queue_states instead. - map queue_ack_levels = 16; + reserved 16; map queue_states = 17; } @@ -311,6 +309,7 @@ temporal.api.common.v1.Payloads last_heartbeat_details = 31; google.protobuf.Timestamp last_heartbeat_update_time = 32 [(gogoproto.stdtime) = true]; bool use_compatible_version = 33; + temporal.api.common.v1.ActivityType activity_type = 34; } // timer_map column diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/queues.proto temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/queues.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/queues.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/queues.proto 2024-02-23 09:45:43.000000000 +0000 @@ -26,11 +26,6 @@ import "temporal/server/api/persistence/v1/predicates.proto"; import "temporal/server/api/persistence/v1/tasks.proto"; -message QueueAckLevel { - int64 ack_level = 1; - map cluster_ack_level = 2; -} - message QueueState { map reader_states = 1; TaskKey exclusive_reader_high_watermark = 2; diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/task_queues.proto temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/task_queues.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/persistence/v1/task_queues.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/persistence/v1/task_queues.proto 2024-02-23 09:45:43.000000000 +0000 @@ -49,8 +49,17 @@ message CompatibleVersionSet { // Set IDs are used internally by matching. // A set typically has one set ID and extra care is taken to enforce this. - // In split brain scenarios, there may be conflicting concurrent writes to the task queue versioning data, in which - // case a set might end up with more than one ID. + // In some situations, including: + // - Replication race between task queue user data and history events + // - Replication split-brain + later merge + // - Delayed user data propagation between partitions + // - Cross-task-queue activities/child workflows/CAN where the user has not set up parallel + // versioning data + // we have to guess the set id for a build id. If that happens, and then the build id is + // discovered to be in a different set, then the sets will be merged and both (or more) + // build ids will be preserved, so that we don't lose tasks. + // The first set id is considered the "primary", and the others are "demoted". Once a build + // id is demoted, it cannot be made the primary again. repeated string set_ids = 1; // All the compatible versions, unordered except for the last element, which is considered the set "default". repeated BuildId build_ids = 2; diff -Nru temporal-1.21.5-1/src/proto/internal/temporal/server/api/token/v1/message.proto temporal-1.22.5/src/proto/internal/temporal/server/api/token/v1/message.proto --- temporal-1.21.5-1/src/proto/internal/temporal/server/api/token/v1/message.proto 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/proto/internal/temporal/server/api/token/v1/message.proto 2024-02-23 09:45:43.000000000 +0000 @@ -38,6 +38,7 @@ temporal.server.api.history.v1.TransientWorkflowTaskInfo transient_workflow_task = 7; bytes branch_token = 8; reserved 9; + temporal.server.api.history.v1.VersionHistoryItem version_history_item = 10; } message RawHistoryContinuation{ diff -Nru temporal-1.21.5-1/src/service/frontend/adminHandler.go temporal-1.22.5/src/service/frontend/adminHandler.go --- temporal-1.21.5-1/src/service/frontend/adminHandler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/adminHandler.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2047 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package frontend - -import ( - "context" - "errors" - "fmt" - "net" - "strings" - "sync/atomic" - "time" - - "google.golang.org/grpc/metadata" - - "go.temporal.io/server/client/history" - "go.temporal.io/server/common/channel" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/util" - - "github.com/pborman/uuid" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - namespacepb "go.temporal.io/api/namespace/v1" - "go.temporal.io/api/serviceerror" - workflowpb "go.temporal.io/api/workflow/v1" - "go.temporal.io/api/workflowservice/v1" - sdkclient "go.temporal.io/sdk/client" - "golang.org/x/exp/maps" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - - "go.temporal.io/server/api/adminservice/v1" - clusterspb "go.temporal.io/server/api/cluster/v1" - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - tokenspb "go.temporal.io/server/api/token/v1" - serverClient "go.temporal.io/server/client" - "go.temporal.io/server/client/admin" - "go.temporal.io/server/client/frontend" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/provider" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" - esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" - "go.temporal.io/server/common/persistence/visibility/store/standard/cassandra" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/sdk" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/worker" - "go.temporal.io/server/service/worker/addsearchattributes" -) - -const ( - getNamespaceReplicationMessageBatchSize = 100 - defaultLastMessageID = -1 - listClustersPageSize = 100 -) - -type ( - // AdminHandler - gRPC handler interface for adminservice - AdminHandler struct { - status int32 - - logger log.Logger - numberOfHistoryShards int32 - ESClient esclient.Client - config *Config - namespaceDLQHandler namespace.DLQMessageHandler - eventSerializer serialization.Serializer - visibilityMgr manager.VisibilityManager - persistenceExecutionManager persistence.ExecutionManager - namespaceReplicationQueue persistence.NamespaceReplicationQueue - taskManager persistence.TaskManager - clusterMetadataManager persistence.ClusterMetadataManager - persistenceMetadataManager persistence.MetadataManager - clientFactory serverClient.Factory - clientBean serverClient.Bean - historyClient historyservice.HistoryServiceClient - sdkClientFactory sdk.ClientFactory - membershipMonitor membership.Monitor - hostInfoProvider membership.HostInfoProvider - metricsHandler metrics.Handler - namespaceRegistry namespace.Registry - saProvider searchattribute.Provider - saManager searchattribute.Manager - clusterMetadata cluster.Metadata - healthServer *health.Server - } - - NewAdminHandlerArgs struct { - PersistenceConfig *config.Persistence - Config *Config - NamespaceReplicationQueue persistence.NamespaceReplicationQueue - ReplicatorNamespaceReplicationQueue persistence.NamespaceReplicationQueue - EsClient esclient.Client - VisibilityMrg manager.VisibilityManager - Logger log.Logger - PersistenceExecutionManager persistence.ExecutionManager - TaskManager persistence.TaskManager - ClusterMetadataManager persistence.ClusterMetadataManager - PersistenceMetadataManager persistence.MetadataManager - ClientFactory serverClient.Factory - ClientBean serverClient.Bean - HistoryClient historyservice.HistoryServiceClient - sdkClientFactory sdk.ClientFactory - MembershipMonitor membership.Monitor - HostInfoProvider membership.HostInfoProvider - ArchiverProvider provider.ArchiverProvider - MetricsHandler metrics.Handler - NamespaceRegistry namespace.Registry - SaProvider searchattribute.Provider - SaManager searchattribute.Manager - ClusterMetadata cluster.Metadata - ArchivalMetadata archiver.ArchivalMetadata - HealthServer *health.Server - EventSerializer serialization.Serializer - TimeSource clock.TimeSource - } -) - -var ( - _ adminservice.AdminServiceServer = (*AdminHandler)(nil) - - resendStartEventID = int64(0) -) - -// NewAdminHandler creates a gRPC handler for the adminservice -func NewAdminHandler( - args NewAdminHandlerArgs, -) *AdminHandler { - namespaceReplicationTaskExecutor := namespace.NewReplicationTaskExecutor( - args.ClusterMetadata.GetCurrentClusterName(), - args.PersistenceMetadataManager, - args.Logger, - ) - - return &AdminHandler{ - logger: args.Logger, - status: common.DaemonStatusInitialized, - numberOfHistoryShards: args.PersistenceConfig.NumHistoryShards, - config: args.Config, - namespaceDLQHandler: namespace.NewDLQMessageHandler( - namespaceReplicationTaskExecutor, - args.NamespaceReplicationQueue, - args.Logger, - ), - eventSerializer: args.EventSerializer, - visibilityMgr: args.VisibilityMrg, - ESClient: args.EsClient, - persistenceExecutionManager: args.PersistenceExecutionManager, - namespaceReplicationQueue: args.NamespaceReplicationQueue, - taskManager: args.TaskManager, - clusterMetadataManager: args.ClusterMetadataManager, - persistenceMetadataManager: args.PersistenceMetadataManager, - clientFactory: args.ClientFactory, - clientBean: args.ClientBean, - historyClient: args.HistoryClient, - sdkClientFactory: args.sdkClientFactory, - membershipMonitor: args.MembershipMonitor, - hostInfoProvider: args.HostInfoProvider, - metricsHandler: args.MetricsHandler, - namespaceRegistry: args.NamespaceRegistry, - saProvider: args.SaProvider, - saManager: args.SaManager, - clusterMetadata: args.ClusterMetadata, - healthServer: args.HealthServer, - } -} - -// Start starts the handler -func (adh *AdminHandler) Start() { - if atomic.CompareAndSwapInt32( - &adh.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - adh.healthServer.SetServingStatus(AdminServiceName, healthpb.HealthCheckResponse_SERVING) - } - - // Start namespace replication queue cleanup - // If the queue does not start, we can still call stop() - adh.namespaceReplicationQueue.Start() -} - -// Stop stops the handler -func (adh *AdminHandler) Stop() { - if atomic.CompareAndSwapInt32( - &adh.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - adh.healthServer.SetServingStatus(AdminServiceName, healthpb.HealthCheckResponse_NOT_SERVING) - } - - // Calling stop if the queue does not start is ok - adh.namespaceReplicationQueue.Stop() -} - -// AddSearchAttributes add search attribute to the cluster. -func (adh *AdminHandler) AddSearchAttributes( - ctx context.Context, - request *adminservice.AddSearchAttributesRequest, -) (_ *adminservice.AddSearchAttributesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - // validate request - if request == nil { - return nil, errRequestNotSet - } - - if len(request.GetSearchAttributes()) == 0 { - return nil, errSearchAttributesNotSet - } - - indexName := request.GetIndexName() - if indexName == "" { - indexName = adh.visibilityMgr.GetIndexName() - } - - currentSearchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) - if err != nil { - return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) - } - - for saName, saType := range request.GetSearchAttributes() { - if searchattribute.IsReserved(saName) { - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeIsReservedMessage, saName)) - } - if currentSearchAttributes.IsDefined(saName) { - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeAlreadyExistsMessage, saName)) - } - if _, ok := enumspb.IndexedValueType_name[int32(saType)]; !ok { - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errUnknownSearchAttributeTypeMessage, saType)) - } - } - - // TODO (rodrigozhou): Remove condition `indexName == ""`. - // If indexName == "", then calling addSearchAttributesElasticsearch will - // register the search attributes in the cluster metadata if ES is up or if - // `skip-schema-update` is set. This is for backward compatibility using - // standard visibility. - if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { - err = adh.addSearchAttributesElasticsearch(ctx, request, indexName) - } else { - err = adh.addSearchAttributesSQL(ctx, request, currentSearchAttributes) - } - - if err != nil { - return nil, err - } - return &adminservice.AddSearchAttributesResponse{}, nil -} - -func (adh *AdminHandler) addSearchAttributesElasticsearch( - ctx context.Context, - request *adminservice.AddSearchAttributesRequest, - indexName string, -) error { - // Execute workflow. - wfParams := addsearchattributes.WorkflowParams{ - CustomAttributesToAdd: request.GetSearchAttributes(), - IndexName: indexName, - SkipSchemaUpdate: request.GetSkipSchemaUpdate(), - } - - sdkClient := adh.sdkClientFactory.GetSystemClient() - run, err := sdkClient.ExecuteWorkflow( - ctx, - sdkclient.StartWorkflowOptions{ - TaskQueue: worker.DefaultWorkerTaskQueue, - ID: addsearchattributes.WorkflowName, - }, - addsearchattributes.WorkflowName, - wfParams, - ) - if err != nil { - return serviceerror.NewUnavailable( - fmt.Sprintf(errUnableToStartWorkflowMessage, addsearchattributes.WorkflowName, err), - ) - } - - // Wait for workflow to complete. - err = run.Get(ctx, nil) - if err != nil { - return serviceerror.NewUnavailable( - fmt.Sprintf(errWorkflowReturnedErrorMessage, addsearchattributes.WorkflowName, err), - ) - } - return nil -} - -func (adh *AdminHandler) addSearchAttributesSQL( - ctx context.Context, - request *adminservice.AddSearchAttributesRequest, - currentSearchAttributes searchattribute.NameTypeMap, -) error { - _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) - } - - nsName := request.GetNamespace() - if nsName == "" { - return errNamespaceNotSet - } - resp, err := client.DescribeNamespace( - ctx, - &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, - ) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName)) - } - - dbCustomSearchAttributes := searchattribute.GetSqlDbIndexSearchAttributes().CustomSearchAttributes - cmCustomSearchAttributes := currentSearchAttributes.Custom() - upsertFieldToAliasMap := make(map[string]string) - fieldToAliasMap := resp.Config.CustomSearchAttributeAliases - aliasToFieldMap := util.InverseMap(fieldToAliasMap) - for saName, saType := range request.GetSearchAttributes() { - // check if alias is already in use - if _, ok := aliasToFieldMap[saName]; ok { - return serviceerror.NewAlreadyExist( - fmt.Sprintf(errSearchAttributeAlreadyExistsMessage, saName), - ) - } - // find the first available field for the given type - targetFieldName := "" - cntUsed := 0 - for fieldName, fieldType := range dbCustomSearchAttributes { - if fieldType != saType { - continue - } - // make sure the pre-allocated custom search attributes are created in cluster metadata - if _, ok := cmCustomSearchAttributes[fieldName]; !ok { - continue - } - if _, ok := fieldToAliasMap[fieldName]; ok { - cntUsed++ - } else if _, ok := upsertFieldToAliasMap[fieldName]; ok { - cntUsed++ - } else { - targetFieldName = fieldName - break - } - } - if targetFieldName == "" { - return serviceerror.NewInvalidArgument( - fmt.Sprintf(errTooManySearchAttributesMessage, cntUsed, saType.String()), - ) - } - upsertFieldToAliasMap[targetFieldName] = saName - } - - _, err = client.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{ - Namespace: nsName, - Config: &namespacepb.NamespaceConfig{ - CustomSearchAttributeAliases: upsertFieldToAliasMap, - }, - }) - if err != nil && err.Error() == errCustomSearchAttributeFieldAlreadyAllocated.Error() { - return errRaceConditionAddingSearchAttributes - } - return err -} - -// RemoveSearchAttributes remove search attribute from the cluster. -func (adh *AdminHandler) RemoveSearchAttributes( - ctx context.Context, - request *adminservice.RemoveSearchAttributesRequest, -) (_ *adminservice.RemoveSearchAttributesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - // validate request - if request == nil { - return nil, errRequestNotSet - } - - if len(request.GetSearchAttributes()) == 0 { - return nil, errSearchAttributesNotSet - } - - indexName := request.GetIndexName() - if indexName == "" { - indexName = adh.visibilityMgr.GetIndexName() - } - - currentSearchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) - if err != nil { - return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) - } - - // TODO (rodrigozhou): Remove condition `indexName == ""`. - // If indexName == "", then calling addSearchAttributesElasticsearch will - // register the search attributes in the cluster metadata if ES is up or if - // `skip-schema-update` is set. This is for backward compatibility using - // standard visibility. - if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { - err = adh.removeSearchAttributesElasticsearch(ctx, request, indexName, currentSearchAttributes) - } else { - err = adh.removeSearchAttributesSQL(ctx, request, currentSearchAttributes) - } - - if err != nil { - return nil, err - } - return &adminservice.RemoveSearchAttributesResponse{}, nil -} - -func (adh *AdminHandler) removeSearchAttributesElasticsearch( - ctx context.Context, - request *adminservice.RemoveSearchAttributesRequest, - indexName string, - currentSearchAttributes searchattribute.NameTypeMap, -) error { - newCustomSearchAttributes := maps.Clone(currentSearchAttributes.Custom()) - for _, saName := range request.GetSearchAttributes() { - if !currentSearchAttributes.IsDefined(saName) { - return serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeDoesntExistMessage, saName)) - } - if _, ok := newCustomSearchAttributes[saName]; !ok { - return serviceerror.NewInvalidArgument(fmt.Sprintf(errUnableToRemoveNonCustomSearchAttributesMessage, saName)) - } - delete(newCustomSearchAttributes, saName) - } - - err := adh.saManager.SaveSearchAttributes(ctx, indexName, newCustomSearchAttributes) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToSaveSearchAttributesMessage, err)) - } - return nil -} - -func (adh *AdminHandler) removeSearchAttributesSQL( - ctx context.Context, - request *adminservice.RemoveSearchAttributesRequest, - currentSearchAttributes searchattribute.NameTypeMap, -) error { - _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) - } - - nsName := request.GetNamespace() - if nsName == "" { - return errNamespaceNotSet - } - resp, err := client.DescribeNamespace( - ctx, - &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, - ) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName)) - } - - upsertFieldToAliasMap := make(map[string]string) - aliasToFieldMap := util.InverseMap(resp.Config.CustomSearchAttributeAliases) - for _, saName := range request.GetSearchAttributes() { - if fieldName, ok := aliasToFieldMap[saName]; ok { - upsertFieldToAliasMap[fieldName] = "" - continue - } - if currentSearchAttributes.IsDefined(saName) { - return serviceerror.NewInvalidArgument( - fmt.Sprintf(errUnableToRemoveNonCustomSearchAttributesMessage, saName), - ) - } - return serviceerror.NewNotFound(fmt.Sprintf(errSearchAttributeDoesntExistMessage, saName)) - } - - _, err = client.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{ - Namespace: nsName, - Config: &namespacepb.NamespaceConfig{ - CustomSearchAttributeAliases: upsertFieldToAliasMap, - }, - }) - return err -} - -func (adh *AdminHandler) GetSearchAttributes( - ctx context.Context, - request *adminservice.GetSearchAttributesRequest, -) (_ *adminservice.GetSearchAttributesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - indexName := request.GetIndexName() - if indexName == "" { - indexName = adh.visibilityMgr.GetIndexName() - } - - searchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) - if err != nil { - adh.logger.Error("getSearchAttributes error", tag.Error(err)) - return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) - } - - // TODO (rodrigozhou): Remove condition `indexName == ""`. - // If indexName == "", then calling addSearchAttributesElasticsearch will - // register the search attributes in the cluster metadata if ES is up or if - // `skip-schema-update` is set. This is for backward compatibility using - // standard visibility. - if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { - return adh.getSearchAttributesElasticsearch(ctx, indexName, searchAttributes) - } - return adh.getSearchAttributesSQL(ctx, request, searchAttributes) -} - -func (adh *AdminHandler) getSearchAttributesElasticsearch( - ctx context.Context, - indexName string, - searchAttributes searchattribute.NameTypeMap, -) (*adminservice.GetSearchAttributesResponse, error) { - var lastErr error - - sdkClient := adh.sdkClientFactory.GetSystemClient() - descResp, err := sdkClient.DescribeWorkflowExecution(ctx, addsearchattributes.WorkflowName, "") - var wfInfo *workflowpb.WorkflowExecutionInfo - if err != nil { - // NotFound can happen when no search attributes were added and the workflow has never been executed. - if _, isNotFound := err.(*serviceerror.NotFound); !isNotFound { - lastErr = serviceerror.NewUnavailable(fmt.Sprintf("unable to get %s workflow state: %v", addsearchattributes.WorkflowName, err)) - adh.logger.Error("getSearchAttributes error", tag.Error(lastErr)) - } - } else { - wfInfo = descResp.GetWorkflowExecutionInfo() - } - - var esMapping map[string]string - if adh.ESClient != nil { - esMapping, err = adh.ESClient.GetMapping(ctx, indexName) - if err != nil { - lastErr = serviceerror.NewUnavailable(fmt.Sprintf("unable to get mapping from Elasticsearch: %v", err)) - adh.logger.Error("getSearchAttributes error", tag.Error(lastErr)) - } - } - - if lastErr != nil { - return nil, lastErr - } - return &adminservice.GetSearchAttributesResponse{ - CustomAttributes: searchAttributes.Custom(), - SystemAttributes: searchAttributes.System(), - Mapping: esMapping, - AddWorkflowExecutionInfo: wfInfo, - }, nil -} - -func (adh *AdminHandler) getSearchAttributesSQL( - ctx context.Context, - request *adminservice.GetSearchAttributesRequest, - searchAttributes searchattribute.NameTypeMap, -) (*adminservice.GetSearchAttributesResponse, error) { - _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - if err != nil { - return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) - } - - nsName := request.GetNamespace() - if nsName == "" { - return nil, errNamespaceNotSet - } - resp, err := client.DescribeNamespace( - ctx, - &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, - ) - if err != nil { - return nil, serviceerror.NewUnavailable( - fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName), - ) - } - - fieldToAliasMap := resp.Config.CustomSearchAttributeAliases - customSearchAttributes := make(map[string]enumspb.IndexedValueType) - for field, tp := range searchAttributes.Custom() { - if alias, ok := fieldToAliasMap[field]; ok { - customSearchAttributes[alias] = tp - } - } - return &adminservice.GetSearchAttributesResponse{ - CustomAttributes: customSearchAttributes, - SystemAttributes: searchAttributes.System(), - }, nil -} - -func (adh *AdminHandler) RebuildMutableState(ctx context.Context, request *adminservice.RebuildMutableStateRequest) (_ *adminservice.RebuildMutableStateResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - if err := validateExecution(request.Execution); err != nil { - return nil, err - } - - namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) - if err != nil { - return nil, err - } - - if _, err := adh.historyClient.RebuildMutableState(ctx, &historyservice.RebuildMutableStateRequest{ - NamespaceId: namespaceID.String(), - Execution: request.Execution, - }); err != nil { - return nil, err - } - return &adminservice.RebuildMutableStateResponse{}, nil -} - -// DescribeMutableState returns information about the specified workflow execution. -func (adh *AdminHandler) DescribeMutableState(ctx context.Context, request *adminservice.DescribeMutableStateRequest) (_ *adminservice.DescribeMutableStateResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - if err := validateExecution(request.Execution); err != nil { - return nil, err - } - - namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) - if err != nil { - return nil, err - } - - shardID := common.WorkflowIDToHistoryShard(namespaceID.String(), request.Execution.WorkflowId, adh.numberOfHistoryShards) - shardIDStr := convert.Int32ToString(shardID) - - resolver, err := adh.membershipMonitor.GetResolver(primitives.HistoryService) - if err != nil { - return nil, err - } - historyHost, err := resolver.Lookup(shardIDStr) - if err != nil { - return nil, err - } - - historyAddr := historyHost.GetAddress() - historyResponse, err := adh.historyClient.DescribeMutableState(ctx, &historyservice.DescribeMutableStateRequest{ - NamespaceId: namespaceID.String(), - Execution: request.Execution, - }) - - if err != nil { - return nil, err - } - return &adminservice.DescribeMutableStateResponse{ - ShardId: shardIDStr, - HistoryAddr: historyAddr, - DatabaseMutableState: historyResponse.GetDatabaseMutableState(), - CacheMutableState: historyResponse.GetCacheMutableState(), - }, nil -} - -// RemoveTask returns information about the internal states of a history host -func (adh *AdminHandler) RemoveTask(ctx context.Context, request *adminservice.RemoveTaskRequest) (_ *adminservice.RemoveTaskResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - _, err := adh.historyClient.RemoveTask(ctx, &historyservice.RemoveTaskRequest{ - ShardId: request.GetShardId(), - Category: request.GetCategory(), - TaskId: request.GetTaskId(), - VisibilityTime: request.GetVisibilityTime(), - }) - return &adminservice.RemoveTaskResponse{}, err -} - -// GetShard returns information about the internal states of a shard -func (adh *AdminHandler) GetShard(ctx context.Context, request *adminservice.GetShardRequest) (_ *adminservice.GetShardResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - if request == nil { - return nil, errRequestNotSet - } - resp, err := adh.historyClient.GetShard(ctx, &historyservice.GetShardRequest{ShardId: request.GetShardId()}) - if err != nil { - return nil, err - } - return &adminservice.GetShardResponse{ShardInfo: resp.ShardInfo}, nil -} - -// CloseShard returns information about the internal states of a history host -func (adh *AdminHandler) CloseShard(ctx context.Context, request *adminservice.CloseShardRequest) (_ *adminservice.CloseShardResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - _, err := adh.historyClient.CloseShard(ctx, &historyservice.CloseShardRequest{ShardId: request.GetShardId()}) - return &adminservice.CloseShardResponse{}, err -} - -func (adh *AdminHandler) ListHistoryTasks( - ctx context.Context, - request *adminservice.ListHistoryTasksRequest, -) (_ *adminservice.ListHistoryTasksResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - taskRange := request.GetTaskRange() - if taskRange == nil { - return nil, errTaskRangeNotSet - } - - taskCategory, ok := tasks.GetCategoryByID(int32(request.Category)) - if !ok { - return nil, &serviceerror.InvalidArgument{ - Message: fmt.Sprintf("unknown task category: %v", request.Category), - } - } - - var minTaskKey, maxTaskKey tasks.Key - if taskRange.InclusiveMinTaskKey != nil { - minTaskKey = tasks.NewKey( - timestamp.TimeValue(taskRange.InclusiveMinTaskKey.FireTime), - taskRange.InclusiveMinTaskKey.TaskId, - ) - if err := tasks.ValidateKey(minTaskKey); err != nil { - return nil, &serviceerror.InvalidArgument{ - Message: fmt.Sprintf("invalid minTaskKey: %v", err.Error()), - } - } - } - if taskRange.ExclusiveMaxTaskKey != nil { - maxTaskKey = tasks.NewKey( - timestamp.TimeValue(taskRange.ExclusiveMaxTaskKey.FireTime), - taskRange.ExclusiveMaxTaskKey.TaskId, - ) - if err := tasks.ValidateKey(maxTaskKey); err != nil { - return nil, &serviceerror.InvalidArgument{ - Message: fmt.Sprintf("invalid maxTaskKey: %v", err.Error()), - } - } - } - - // Queue reader registration is only meaning for history service - // we are on frontend service, so no need to do registration - // TODO: move the logic to history service - - resp, err := adh.persistenceExecutionManager.GetHistoryTasks(ctx, &persistence.GetHistoryTasksRequest{ - ShardID: request.ShardId, - TaskCategory: taskCategory, - ReaderID: common.DefaultQueueReaderID, - InclusiveMinTaskKey: minTaskKey, - ExclusiveMaxTaskKey: maxTaskKey, - BatchSize: int(request.BatchSize), - NextPageToken: request.NextPageToken, - }) - if err != nil { - return nil, err - } - - return &adminservice.ListHistoryTasksResponse{ - Tasks: toAdminTask(resp.Tasks), - NextPageToken: resp.NextPageToken, - }, nil -} - -func toAdminTask(tasks []tasks.Task) []*adminservice.Task { - var adminTasks []*adminservice.Task - for _, task := range tasks { - adminTasks = append(adminTasks, &adminservice.Task{ - NamespaceId: task.GetNamespaceID(), - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - TaskId: task.GetTaskID(), - TaskType: task.GetType(), - FireTime: timestamp.TimePtr(task.GetKey().FireTime), - Version: task.GetVersion(), - }) - } - return adminTasks -} - -// DescribeHistoryHost returns information about the internal states of a history host -func (adh *AdminHandler) DescribeHistoryHost(ctx context.Context, request *adminservice.DescribeHistoryHostRequest) (_ *adminservice.DescribeHistoryHostResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - flagsCount := 0 - if request.ShardId != 0 { - flagsCount++ - } - if len(request.Namespace) != 0 && request.WorkflowExecution != nil { - flagsCount++ - } - if len(request.GetHostAddress()) > 0 { - flagsCount++ - } - if flagsCount != 1 { - return nil, serviceerror.NewInvalidArgument("must provide one and only one: shard id or namespace & workflow id or host address") - } - - var err error - var namespaceID namespace.ID - if request.WorkflowExecution != nil { - namespaceID, err = adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.Namespace)) - if err != nil { - return nil, err - } - - if err := validateExecution(request.WorkflowExecution); err != nil { - return nil, err - } - } - - resp, err := adh.historyClient.DescribeHistoryHost(ctx, &historyservice.DescribeHistoryHostRequest{ - HostAddress: request.GetHostAddress(), - ShardId: request.GetShardId(), - NamespaceId: namespaceID.String(), - WorkflowExecution: request.GetWorkflowExecution(), - }) - - if resp == nil { - return nil, err - } - - return &adminservice.DescribeHistoryHostResponse{ - ShardsNumber: resp.GetShardsNumber(), - ShardIds: resp.GetShardIds(), - NamespaceCache: resp.GetNamespaceCache(), - Address: resp.GetAddress(), - }, err -} - -// GetWorkflowExecutionRawHistoryV2 - retrieves the history of workflow execution -func (adh *AdminHandler) GetWorkflowExecutionRawHistoryV2(ctx context.Context, request *adminservice.GetWorkflowExecutionRawHistoryV2Request) (_ *adminservice.GetWorkflowExecutionRawHistoryV2Response, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - taggedMetricsHandler, startTime := adh.startRequestProfile(metrics.AdminGetWorkflowExecutionRawHistoryV2Scope) - defer func() { - taggedMetricsHandler.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) - }() - - if err := adh.validateGetWorkflowExecutionRawHistoryV2Request( - request, - ); err != nil { - return nil, err - } - - ns, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) - if err != nil { - return nil, err - } - taggedMetricsHandler = taggedMetricsHandler.WithTags(metrics.NamespaceTag(ns.Name().String())) - - execution := request.Execution - var pageToken *tokenspb.RawHistoryContinuation - var targetVersionHistory *historyspb.VersionHistory - if request.NextPageToken == nil { - response, err := adh.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: ns.ID().String(), - Execution: execution, - }) - if err != nil { - return nil, err - } - - targetVersionHistory, err = adh.setRequestDefaultValueAndGetTargetVersionHistory( - request, - response.GetVersionHistories(), - ) - if err != nil { - return nil, err - } - - pageToken = generatePaginationToken(request, response.GetVersionHistories()) - } else { - pageToken, err = deserializeRawHistoryToken(request.NextPageToken) - if err != nil { - return nil, err - } - versionHistories := pageToken.GetVersionHistories() - if versionHistories == nil { - return nil, errInvalidVersionHistories - } - targetVersionHistory, err = adh.setRequestDefaultValueAndGetTargetVersionHistory( - request, - versionHistories, - ) - if err != nil { - return nil, err - } - } - - if err := validatePaginationToken( - request, - pageToken, - ); err != nil { - return nil, err - } - - if pageToken.GetStartEventId()+1 == pageToken.GetEndEventId() { - // API is exclusive-exclusive. Return empty response here. - return &adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: []*commonpb.DataBlob{}, - NextPageToken: nil, // no further pagination - VersionHistory: targetVersionHistory, - }, nil - } - pageSize := int(request.GetMaximumPageSize()) - shardID := common.WorkflowIDToHistoryShard( - ns.ID().String(), - execution.GetWorkflowId(), - adh.numberOfHistoryShards, - ) - rawHistoryResponse, err := adh.persistenceExecutionManager.ReadRawHistoryBranch(ctx, &persistence.ReadHistoryBranchRequest{ - BranchToken: targetVersionHistory.GetBranchToken(), - // GetWorkflowExecutionRawHistoryV2 is exclusive exclusive. - // ReadRawHistoryBranch is inclusive exclusive. - MinEventID: pageToken.GetStartEventId() + 1, - MaxEventID: pageToken.GetEndEventId(), - PageSize: pageSize, - NextPageToken: pageToken.PersistenceToken, - ShardID: shardID, - }) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // when no events can be returned from DB, DB layer will return - // EntityNotExistsError, this API shall return empty response - return &adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: []*commonpb.DataBlob{}, - NextPageToken: nil, // no further pagination - VersionHistory: targetVersionHistory, - }, nil - } - return nil, err - } - - pageToken.PersistenceToken = rawHistoryResponse.NextPageToken - size := rawHistoryResponse.Size - // N.B. - Dual emit is required here so that we can see aggregate timer stats across all - // namespaces along with the individual namespaces stats - adh.metricsHandler.Histogram(metrics.HistorySize.GetMetricName(), metrics.HistorySize.GetMetricUnit()).Record( - int64(size), - metrics.OperationTag(metrics.AdminGetWorkflowExecutionRawHistoryV2Scope)) - taggedMetricsHandler.Histogram(metrics.HistorySize.GetMetricName(), metrics.HistorySize.GetMetricUnit()).Record(int64(size)) - - result := &adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: rawHistoryResponse.HistoryEventBlobs, - VersionHistory: targetVersionHistory, - HistoryNodeIds: rawHistoryResponse.NodeIDs, - } - if len(pageToken.PersistenceToken) == 0 { - result.NextPageToken = nil - } else { - result.NextPageToken, err = serializeRawHistoryToken(pageToken) - if err != nil { - return nil, err - } - } - - return result, nil -} - -// DescribeCluster return information about a temporal cluster -func (adh *AdminHandler) DescribeCluster( - ctx context.Context, - request *adminservice.DescribeClusterRequest, -) (_ *adminservice.DescribeClusterResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - membershipInfo := &clusterspb.MembershipInfo{} - if monitor := adh.membershipMonitor; monitor != nil { - membershipInfo.CurrentHost = &clusterspb.HostInfo{ - Identity: adh.hostInfoProvider.HostInfo().Identity(), - } - - members, err := monitor.GetReachableMembers() - if err != nil { - return nil, err - } - - membershipInfo.ReachableMembers = members - - var rings []*clusterspb.RingInfo - for _, role := range []primitives.ServiceName{ - primitives.FrontendService, - primitives.InternalFrontendService, - primitives.HistoryService, - primitives.MatchingService, - primitives.WorkerService, - } { - resolver, err := monitor.GetResolver(role) - if err != nil { - if role == primitives.InternalFrontendService { - continue // this one is optional - } - return nil, err - } - - var servers []*clusterspb.HostInfo - for _, server := range resolver.Members() { - servers = append(servers, &clusterspb.HostInfo{ - Identity: server.Identity(), - }) - } - - rings = append(rings, &clusterspb.RingInfo{ - Role: string(role), - MemberCount: int32(resolver.MemberCount()), - Members: servers, - }) - } - membershipInfo.Rings = rings - } - - if len(request.ClusterName) == 0 { - request.ClusterName = adh.clusterMetadata.GetCurrentClusterName() - } - metadata, err := adh.clusterMetadataManager.GetClusterMetadata( - ctx, - &persistence.GetClusterMetadataRequest{ClusterName: request.GetClusterName()}, - ) - if err != nil { - return nil, err - } - - return &adminservice.DescribeClusterResponse{ - SupportedClients: headers.SupportedClients, - ServerVersion: headers.ServerVersion, - MembershipInfo: membershipInfo, - ClusterId: metadata.ClusterId, - ClusterName: metadata.ClusterName, - HistoryShardCount: metadata.HistoryShardCount, - PersistenceStore: adh.persistenceExecutionManager.GetName(), - VisibilityStore: strings.Join(adh.visibilityMgr.GetStoreNames(), ","), - VersionInfo: metadata.VersionInfo, - FailoverVersionIncrement: metadata.FailoverVersionIncrement, - InitialFailoverVersion: metadata.InitialFailoverVersion, - IsGlobalNamespaceEnabled: metadata.IsGlobalNamespaceEnabled, - }, nil -} - -// ListClusters return information about temporal clusters -// TODO: Remove this API after migrate tctl to use operator handler -func (adh *AdminHandler) ListClusters( - ctx context.Context, - request *adminservice.ListClustersRequest, -) (_ *adminservice.ListClustersResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - if request.GetPageSize() <= 0 { - request.PageSize = listClustersPageSize - } - - resp, err := adh.clusterMetadataManager.ListClusterMetadata(ctx, &persistence.ListClusterMetadataRequest{ - PageSize: int(request.GetPageSize()), - NextPageToken: request.GetNextPageToken(), - }) - if err != nil { - return nil, err - } - - var clusterMetadataList []*persistencespb.ClusterMetadata - for _, clusterResp := range resp.ClusterMetadata { - clusterMetadataList = append(clusterMetadataList, &clusterResp.ClusterMetadata) - } - return &adminservice.ListClustersResponse{ - Clusters: clusterMetadataList, - NextPageToken: resp.NextPageToken, - }, nil -} - -// ListClusterMembers -// TODO: Remove this API after migrate tctl to use operator handler -func (adh *AdminHandler) ListClusterMembers( - ctx context.Context, - request *adminservice.ListClusterMembersRequest, -) (_ *adminservice.ListClusterMembersResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - metadataMgr := adh.clusterMetadataManager - - heartbitRef := request.GetLastHeartbeatWithin() - var heartbit time.Duration - if heartbitRef != nil { - heartbit = *heartbitRef - } - startedTimeRef := request.GetSessionStartedAfterTime() - var startedTime time.Time - if startedTimeRef != nil { - startedTime = *startedTimeRef - } - - resp, err := metadataMgr.GetClusterMembers(ctx, &persistence.GetClusterMembersRequest{ - LastHeartbeatWithin: heartbit, - RPCAddressEquals: net.ParseIP(request.GetRpcAddress()), - HostIDEquals: uuid.Parse(request.GetHostId()), - RoleEquals: persistence.ServiceType(request.GetRole()), - SessionStartedAfter: startedTime, - PageSize: int(request.GetPageSize()), - NextPageToken: request.GetNextPageToken(), - }) - if err != nil { - return nil, err - } - - var activeMembers []*clusterspb.ClusterMember - for _, member := range resp.ActiveMembers { - activeMembers = append(activeMembers, &clusterspb.ClusterMember{ - Role: enumsspb.ClusterMemberRole(member.Role), - HostId: member.HostID.String(), - RpcAddress: member.RPCAddress.String(), - RpcPort: int32(member.RPCPort), - SessionStartTime: &member.SessionStart, - LastHeartbitTime: &member.LastHeartbeat, - RecordExpiryTime: &member.RecordExpiry, - }) - } - - return &adminservice.ListClusterMembersResponse{ - ActiveMembers: activeMembers, - NextPageToken: resp.NextPageToken, - }, nil -} - -// AddOrUpdateRemoteCluster -// TODO: Remove this API after migrate tctl to use operator handler -func (adh *AdminHandler) AddOrUpdateRemoteCluster( - ctx context.Context, - request *adminservice.AddOrUpdateRemoteClusterRequest, -) (_ *adminservice.AddOrUpdateRemoteClusterResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - adminClient := adh.clientFactory.NewRemoteAdminClientWithTimeout( - request.GetFrontendAddress(), - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - - // Fetch cluster metadata from remote cluster - resp, err := adminClient.DescribeCluster(ctx, &adminservice.DescribeClusterRequest{}) - if err != nil { - return nil, err - } - - err = adh.validateRemoteClusterMetadata(resp) - if err != nil { - return nil, err - } - - var updateRequestVersion int64 = 0 - clusterMetadataMrg := adh.clusterMetadataManager - clusterData, err := clusterMetadataMrg.GetClusterMetadata( - ctx, - &persistence.GetClusterMetadataRequest{ClusterName: resp.GetClusterName()}, - ) - switch err.(type) { - case nil: - updateRequestVersion = clusterData.Version - case *serviceerror.NotFound: - updateRequestVersion = 0 - default: - return nil, err - } - - applied, err := clusterMetadataMrg.SaveClusterMetadata(ctx, &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: resp.GetClusterName(), - HistoryShardCount: resp.GetHistoryShardCount(), - ClusterId: resp.GetClusterId(), - ClusterAddress: request.GetFrontendAddress(), - FailoverVersionIncrement: resp.GetFailoverVersionIncrement(), - InitialFailoverVersion: resp.GetInitialFailoverVersion(), - IsGlobalNamespaceEnabled: resp.GetIsGlobalNamespaceEnabled(), - IsConnectionEnabled: request.GetEnableRemoteClusterConnection(), - }, - Version: updateRequestVersion, - }) - if err != nil { - return nil, err - } - if !applied { - return nil, serviceerror.NewInvalidArgument( - "Cannot update remote cluster due to update immutable fields") - } - return &adminservice.AddOrUpdateRemoteClusterResponse{}, nil -} - -// RemoveRemoteCluster -// TODO: Remove this API after migrate tctl to use operator handler -func (adh *AdminHandler) RemoveRemoteCluster( - ctx context.Context, - request *adminservice.RemoveRemoteClusterRequest, -) (_ *adminservice.RemoveRemoteClusterResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if err := adh.clusterMetadataManager.DeleteClusterMetadata( - ctx, - &persistence.DeleteClusterMetadataRequest{ClusterName: request.GetClusterName()}, - ); err != nil { - return nil, err - } - return &adminservice.RemoveRemoteClusterResponse{}, nil -} - -// GetReplicationMessages returns new replication tasks since the read level provided in the token. -func (adh *AdminHandler) GetReplicationMessages(ctx context.Context, request *adminservice.GetReplicationMessagesRequest) (_ *adminservice.GetReplicationMessagesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - if request.GetClusterName() == "" { - return nil, errClusterNameNotSet - } - - resp, err := adh.historyClient.GetReplicationMessages(ctx, &historyservice.GetReplicationMessagesRequest{ - Tokens: request.GetTokens(), - ClusterName: request.GetClusterName(), - }) - if err != nil { - return nil, err - } - return &adminservice.GetReplicationMessagesResponse{ShardMessages: resp.GetShardMessages()}, nil -} - -// GetNamespaceReplicationMessages returns new namespace replication tasks since last retrieved task ID. -func (adh *AdminHandler) GetNamespaceReplicationMessages(ctx context.Context, request *adminservice.GetNamespaceReplicationMessagesRequest) (_ *adminservice.GetNamespaceReplicationMessagesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - - if adh.namespaceReplicationQueue == nil { - return nil, errors.New("namespace replication queue not enabled for cluster") - } - - lastMessageID := request.GetLastRetrievedMessageId() - if request.GetLastRetrievedMessageId() == defaultLastMessageID { - if clusterAckLevels, err := adh.namespaceReplicationQueue.GetAckLevels(ctx); err == nil { - if ackLevel, ok := clusterAckLevels[request.GetClusterName()]; ok { - lastMessageID = ackLevel - } - } - } - - replicationTasks, lastMessageID, err := adh.namespaceReplicationQueue.GetReplicationMessages( - ctx, - lastMessageID, - getNamespaceReplicationMessageBatchSize, - ) - if err != nil { - return nil, err - } - - if request.GetLastProcessedMessageId() != defaultLastMessageID { - if err := adh.namespaceReplicationQueue.UpdateAckLevel( - ctx, - request.GetLastProcessedMessageId(), - request.GetClusterName(), - ); err != nil { - adh.logger.Warn("Failed to update namespace replication queue ack level", - tag.TaskID(request.GetLastProcessedMessageId()), - tag.ClusterName(request.GetClusterName())) - } - } - - return &adminservice.GetNamespaceReplicationMessagesResponse{ - Messages: &replicationspb.ReplicationMessages{ - ReplicationTasks: replicationTasks, - LastRetrievedMessageId: lastMessageID, - }, - }, nil -} - -// GetDLQReplicationMessages returns new replication tasks based on the dlq info. -func (adh *AdminHandler) GetDLQReplicationMessages(ctx context.Context, request *adminservice.GetDLQReplicationMessagesRequest) (_ *adminservice.GetDLQReplicationMessagesResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - - if request == nil { - return nil, errRequestNotSet - } - if len(request.GetTaskInfos()) == 0 { - return nil, errEmptyReplicationInfo - } - - resp, err := adh.historyClient.GetDLQReplicationMessages(ctx, &historyservice.GetDLQReplicationMessagesRequest{TaskInfos: request.GetTaskInfos()}) - if err != nil { - return nil, err - } - return &adminservice.GetDLQReplicationMessagesResponse{ReplicationTasks: resp.GetReplicationTasks()}, nil -} - -// ReapplyEvents applies stale events to the current workflow and the current run -func (adh *AdminHandler) ReapplyEvents(ctx context.Context, request *adminservice.ReapplyEventsRequest) (_ *adminservice.ReapplyEventsResponse, retError error) { - defer log.CapturePanic(adh.logger, &retError) - if request == nil { - return nil, errRequestNotSet - } - if request.WorkflowExecution == nil { - return nil, errExecutionNotSet - } - if request.GetWorkflowExecution().GetWorkflowId() == "" { - return nil, errWorkflowIDNotSet - } - if request.GetEvents() == nil { - return nil, errWorkflowIDNotSet - } - namespaceEntry, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) - if err != nil { - return nil, err - } - - _, err = adh.historyClient.ReapplyEvents(ctx, &historyservice.ReapplyEventsRequest{ - NamespaceId: namespaceEntry.ID().String(), - Request: request, - }) - if err != nil { - return nil, err - } - return &adminservice.ReapplyEventsResponse{}, nil -} - -// GetDLQMessages reads messages from DLQ -func (adh *AdminHandler) GetDLQMessages( - ctx context.Context, - request *adminservice.GetDLQMessagesRequest, -) (resp *adminservice.GetDLQMessagesResponse, retErr error) { - defer log.CapturePanic(adh.logger, &retErr) - if request == nil { - return nil, errRequestNotSet - } - - if request.GetMaximumPageSize() <= 0 { - request.MaximumPageSize = common.ReadDLQMessagesPageSize - } - - if request.GetInclusiveEndMessageId() <= 0 { - request.InclusiveEndMessageId = common.EndMessageID - } - - switch request.GetType() { - case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: - resp, err := adh.historyClient.GetDLQMessages(ctx, &historyservice.GetDLQMessagesRequest{ - Type: request.GetType(), - ShardId: request.GetShardId(), - SourceCluster: request.GetSourceCluster(), - InclusiveEndMessageId: request.GetInclusiveEndMessageId(), - MaximumPageSize: request.GetMaximumPageSize(), - NextPageToken: request.GetNextPageToken(), - }) - - if resp == nil { - return nil, err - } - - return &adminservice.GetDLQMessagesResponse{ - Type: resp.GetType(), - ReplicationTasks: resp.GetReplicationTasks(), - ReplicationTasksInfo: resp.GetReplicationTasksInfo(), - NextPageToken: resp.GetNextPageToken(), - }, err - case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: - tasks, token, err := adh.namespaceDLQHandler.Read( - ctx, - request.GetInclusiveEndMessageId(), - int(request.GetMaximumPageSize()), - request.GetNextPageToken()) - if err != nil { - return nil, err - } - - return &adminservice.GetDLQMessagesResponse{ - ReplicationTasks: tasks, - NextPageToken: token, - }, nil - default: - return nil, errDLQTypeIsNotSupported - } -} - -// PurgeDLQMessages purge messages from DLQ -func (adh *AdminHandler) PurgeDLQMessages( - ctx context.Context, - request *adminservice.PurgeDLQMessagesRequest, -) (_ *adminservice.PurgeDLQMessagesResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - if request == nil { - return nil, errRequestNotSet - } - - if request.GetInclusiveEndMessageId() <= 0 { - request.InclusiveEndMessageId = common.EndMessageID - } - - switch request.GetType() { - case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: - resp, err := adh.historyClient.PurgeDLQMessages(ctx, &historyservice.PurgeDLQMessagesRequest{ - Type: request.GetType(), - ShardId: request.GetShardId(), - SourceCluster: request.GetSourceCluster(), - InclusiveEndMessageId: request.GetInclusiveEndMessageId(), - }) - - if resp == nil { - return nil, err - } - - return &adminservice.PurgeDLQMessagesResponse{}, err - case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: - err := adh.namespaceDLQHandler.Purge(ctx, request.GetInclusiveEndMessageId()) - if err != nil { - return nil, err - } - - return &adminservice.PurgeDLQMessagesResponse{}, err - default: - return nil, errDLQTypeIsNotSupported - } -} - -// MergeDLQMessages merges DLQ messages -func (adh *AdminHandler) MergeDLQMessages( - ctx context.Context, - request *adminservice.MergeDLQMessagesRequest, -) (resp *adminservice.MergeDLQMessagesResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - if request == nil { - return nil, errRequestNotSet - } - - if request.GetInclusiveEndMessageId() <= 0 { - request.InclusiveEndMessageId = common.EndMessageID - } - - switch request.GetType() { - case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: - resp, err := adh.historyClient.MergeDLQMessages(ctx, &historyservice.MergeDLQMessagesRequest{ - Type: request.GetType(), - ShardId: request.GetShardId(), - SourceCluster: request.GetSourceCluster(), - InclusiveEndMessageId: request.GetInclusiveEndMessageId(), - MaximumPageSize: request.GetMaximumPageSize(), - NextPageToken: request.GetNextPageToken(), - }) - if resp == nil { - return nil, err - } - - return &adminservice.MergeDLQMessagesResponse{ - NextPageToken: request.GetNextPageToken(), - }, nil - case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: - token, err := adh.namespaceDLQHandler.Merge( - ctx, - request.GetInclusiveEndMessageId(), - int(request.GetMaximumPageSize()), - request.GetNextPageToken(), - ) - if err != nil { - return nil, err - } - - return &adminservice.MergeDLQMessagesResponse{ - NextPageToken: token, - }, nil - default: - return nil, errDLQTypeIsNotSupported - } -} - -// RefreshWorkflowTasks re-generates the workflow tasks -func (adh *AdminHandler) RefreshWorkflowTasks( - ctx context.Context, - request *adminservice.RefreshWorkflowTasksRequest, -) (_ *adminservice.RefreshWorkflowTasksResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - - if request == nil { - return nil, errRequestNotSet - } - if err := validateExecution(request.Execution); err != nil { - return nil, err - } - namespaceEntry, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) - if err != nil { - return nil, err - } - - _, err = adh.historyClient.RefreshWorkflowTasks(ctx, &historyservice.RefreshWorkflowTasksRequest{ - NamespaceId: namespaceEntry.ID().String(), - Request: request, - }) - if err != nil { - return nil, err - } - return &adminservice.RefreshWorkflowTasksResponse{}, nil -} - -// ResendReplicationTasks requests replication task from remote cluster -func (adh *AdminHandler) ResendReplicationTasks( - ctx context.Context, - request *adminservice.ResendReplicationTasksRequest, -) (_ *adminservice.ResendReplicationTasksResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - - if request == nil { - return nil, errRequestNotSet - } - resender := xdc.NewNDCHistoryResender( - adh.namespaceRegistry, - adh.clientBean, - func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { - _, err1 := adh.historyClient.ReplicateEventsV2(ctx, request) - return err1 - }, - adh.eventSerializer, - nil, - adh.logger, - ) - if err := resender.SendSingleWorkflowHistory( - ctx, - request.GetRemoteCluster(), - namespace.ID(request.GetNamespaceId()), - request.GetWorkflowId(), - request.GetRunId(), - resendStartEventID, - request.StartVersion, - common.EmptyEventID, - common.EmptyVersion, - ); err != nil { - return nil, err - } - return &adminservice.ResendReplicationTasksResponse{}, nil -} - -// GetTaskQueueTasks returns tasks from task queue -func (adh *AdminHandler) GetTaskQueueTasks( - ctx context.Context, - request *adminservice.GetTaskQueueTasksRequest, -) (_ *adminservice.GetTaskQueueTasksResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - - if request == nil { - return nil, errRequestNotSet - } - - namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) - if err != nil { - return nil, err - } - - resp, err := adh.taskManager.GetTasks(ctx, &persistence.GetTasksRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: request.GetTaskQueue(), - TaskType: request.GetTaskQueueType(), - InclusiveMinTaskID: request.GetMinTaskId(), - ExclusiveMaxTaskID: request.GetMaxTaskId(), - PageSize: int(request.GetBatchSize()), - NextPageToken: request.NextPageToken, - }) - if err != nil { - return nil, err - } - - return &adminservice.GetTaskQueueTasksResponse{ - Tasks: resp.Tasks, - NextPageToken: resp.NextPageToken, - }, nil -} - -func (adh *AdminHandler) DeleteWorkflowExecution( - ctx context.Context, - request *adminservice.DeleteWorkflowExecutionRequest, -) (_ *adminservice.DeleteWorkflowExecutionResponse, err error) { - defer log.CapturePanic(adh.logger, &err) - - if request == nil { - return nil, errRequestNotSet - } - - if err := validateExecution(request.Execution); err != nil { - return nil, err - } - - namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) - if err != nil { - return nil, err - } - execution := request.Execution - - shardID := common.WorkflowIDToHistoryShard( - namespaceID.String(), - execution.GetWorkflowId(), - adh.numberOfHistoryShards, - ) - logger := log.With(adh.logger, - tag.WorkflowNamespace(request.Namespace), - tag.WorkflowID(execution.WorkflowId), - tag.WorkflowRunID(execution.RunId), - ) - - if execution.RunId == "" { - resp, err := adh.persistenceExecutionManager.GetCurrentExecution(ctx, &persistence.GetCurrentExecutionRequest{ - ShardID: shardID, - NamespaceID: namespaceID.String(), - WorkflowID: execution.WorkflowId, - }) - if err != nil { - return nil, err - } - execution.RunId = resp.RunID - } - - var warnings []string - var branchTokens [][]byte - var startTime, closeTime *time.Time - cassVisBackend := adh.visibilityMgr.HasStoreName(cassandra.CassandraPersistenceName) - - resp, err := adh.persistenceExecutionManager.GetWorkflowExecution(ctx, &persistence.GetWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: execution.RunId, - }) - if err != nil { - if common.IsContextCanceledErr(err) || common.IsContextDeadlineExceededErr(err) { - return nil, err - } - // continue to deletion - warnMsg := "Unable to load mutable state when deleting workflow execution, " + - "will skip deleting workflow history and cassandra visibility record" - logger.Warn(warnMsg, tag.Error(err)) - warnings = append(warnings, fmt.Sprintf("%s. Error: %v", warnMsg, err.Error())) - } else { - // load necessary information from mutable state - executionInfo := resp.State.GetExecutionInfo() - histories := executionInfo.GetVersionHistories().GetHistories() - branchTokens = make([][]byte, 0, len(histories)) - for _, historyItem := range histories { - branchTokens = append(branchTokens, historyItem.GetBranchToken()) - } - - if cassVisBackend { - if resp.State.ExecutionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { - startTime = executionInfo.GetStartTime() - } else if executionInfo.GetCloseTime() != nil { - closeTime = executionInfo.GetCloseTime() - } else { - completionEvent, err := adh.getWorkflowCompletionEvent(ctx, shardID, resp.State) - if err != nil { - warnMsg := "Unable to load workflow completion event, will skip deleting visibility record" - adh.logger.Warn(warnMsg, tag.Error(err)) - warnings = append(warnings, fmt.Sprintf("%s. Error: %v", warnMsg, err.Error())) - } else { - closeTime = completionEvent.GetEventTime() - } - } - } - } - - if !cassVisBackend || (startTime != nil || closeTime != nil) { - // if using cass visibility, then either start or close time should be non-nil - // NOTE: the deletion is best effort, for sql and cassandra visibility implementation, - // we can't guarantee there's no update or record close request for this workflow since - // visibility queue processing is async. Operator can call this api again to delete visibility - // record again if this happens. - if _, err := adh.historyClient.DeleteWorkflowVisibilityRecord(ctx, &historyservice.DeleteWorkflowVisibilityRecordRequest{ - NamespaceId: namespaceID.String(), - Execution: execution, - WorkflowStartTime: startTime, - WorkflowCloseTime: closeTime, - }); err != nil { - return nil, err - } - } - - if err := adh.persistenceExecutionManager.DeleteCurrentWorkflowExecution(ctx, &persistence.DeleteCurrentWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: execution.RunId, - }); err != nil { - return nil, err - } - - if err := adh.persistenceExecutionManager.DeleteWorkflowExecution(ctx, &persistence.DeleteWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: execution.RunId, - }); err != nil { - return nil, err - } - - for _, branchToken := range branchTokens { - if err := adh.persistenceExecutionManager.DeleteHistoryBranch(ctx, &persistence.DeleteHistoryBranchRequest{ - ShardID: shardID, - BranchToken: branchToken, - }); err != nil { - warnMsg := "Failed to delete history branch, skip" - adh.logger.Warn(warnMsg, tag.WorkflowBranchID(string(branchToken)), tag.Error(err)) - warnings = append(warnings, fmt.Sprintf("%s. BranchToken: %v, Error: %v", warnMsg, branchToken, err.Error())) - } - } - - return &adminservice.DeleteWorkflowExecutionResponse{ - Warnings: warnings, - }, nil -} - -func (adh *AdminHandler) validateGetWorkflowExecutionRawHistoryV2Request( - request *adminservice.GetWorkflowExecutionRawHistoryV2Request, -) error { - - execution := request.Execution - if execution.GetWorkflowId() == "" { - return errWorkflowIDNotSet - } - // TODO currently, this API is only going to be used by re-send history events - // to remote cluster if kafka is lossy again, in the future, this API can be used - // by CLI and client, then empty runID (meaning the current workflow) should be allowed - if execution.GetRunId() == "" || uuid.Parse(execution.GetRunId()) == nil { - return errInvalidRunID - } - - pageSize := int(request.GetMaximumPageSize()) - if pageSize <= 0 { - return errInvalidPageSize - } - - if request.GetStartEventId() == common.EmptyEventID && - request.GetStartEventVersion() == common.EmptyVersion && - request.GetEndEventId() == common.EmptyEventID && - request.GetEndEventVersion() == common.EmptyVersion { - return errInvalidEventQueryRange - } - - return nil -} - -func (adh *AdminHandler) validateRemoteClusterMetadata(metadata *adminservice.DescribeClusterResponse) error { - // Verify remote cluster config - currentClusterInfo := adh.clusterMetadata - if metadata.GetClusterName() == currentClusterInfo.GetCurrentClusterName() { - // cluster name conflict - return serviceerror.NewInvalidArgument("Cannot update current cluster metadata from rpc calls") - } - if metadata.GetFailoverVersionIncrement() != currentClusterInfo.GetFailoverVersionIncrement() { - // failover version increment is mismatch with current cluster config - return serviceerror.NewInvalidArgument("Cannot add remote cluster due to failover version increment mismatch") - } - if metadata.GetHistoryShardCount() != adh.config.NumHistoryShards { - remoteShardCount := metadata.GetHistoryShardCount() - large := remoteShardCount - small := adh.config.NumHistoryShards - if large < small { - small, large = large, small - } - if large%small != 0 { - return serviceerror.NewInvalidArgument("Remote cluster shard number and local cluster shard number are not multiples.") - } - } - if !metadata.IsGlobalNamespaceEnabled { - // remote cluster doesn't support global namespace - return serviceerror.NewInvalidArgument("Cannot add remote cluster as global namespace is not supported") - } - for clusterName, cluster := range currentClusterInfo.GetAllClusterInfo() { - if clusterName != metadata.ClusterName && cluster.InitialFailoverVersion == metadata.GetInitialFailoverVersion() { - // initial failover version conflict - // best effort: race condition if a concurrent write to db with the same version. - return serviceerror.NewInvalidArgument("Cannot add remote cluster due to initial failover version conflict") - } - } - return nil -} - -func (adh *AdminHandler) setRequestDefaultValueAndGetTargetVersionHistory( - request *adminservice.GetWorkflowExecutionRawHistoryV2Request, - versionHistories *historyspb.VersionHistories, -) (*historyspb.VersionHistory, error) { - - targetBranch, err := versionhistory.GetCurrentVersionHistory(versionHistories) - if err != nil { - return nil, err - } - firstItem, err := versionhistory.GetFirstVersionHistoryItem(targetBranch) - if err != nil { - return nil, err - } - lastItem, err := versionhistory.GetLastVersionHistoryItem(targetBranch) - if err != nil { - return nil, err - } - - if request.GetStartEventId() == common.EmptyVersion || request.GetStartEventVersion() == common.EmptyVersion { - // If start event is not set, get the events from the first event - // As the API is exclusive-exclusive, use first event id - 1 here - request.StartEventId = common.FirstEventID - 1 - request.StartEventVersion = firstItem.GetVersion() - } - if request.GetEndEventId() == common.EmptyEventID || request.GetEndEventVersion() == common.EmptyVersion { - // If end event is not set, get the events until the end event - // As the API is exclusive-exclusive, use end event id + 1 here - request.EndEventId = lastItem.GetEventId() + 1 - request.EndEventVersion = lastItem.GetVersion() - } - - if request.GetStartEventId() < 0 { - return nil, errInvalidFirstNextEventCombination - } - - // get branch based on the end event if end event is defined in the request - if request.GetEndEventId() == lastItem.GetEventId()+1 && - request.GetEndEventVersion() == lastItem.GetVersion() { - // this is a special case, target branch remains the same - } else { - endItem := versionhistory.NewVersionHistoryItem(request.GetEndEventId(), request.GetEndEventVersion()) - idx, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem(versionHistories, endItem) - if err != nil { - return nil, err - } - - targetBranch, err = versionhistory.GetVersionHistory(versionHistories, idx) - if err != nil { - return nil, err - } - } - - startItem := versionhistory.NewVersionHistoryItem(request.GetStartEventId(), request.GetStartEventVersion()) - // If the request start event is defined. The start event may be on a different branch as current branch. - // We need to find the LCA of the start event and the current branch. - if request.GetStartEventId() == common.FirstEventID-1 && - request.GetStartEventVersion() == firstItem.GetVersion() { - // this is a special case, start event is on the same branch as target branch - } else { - if !versionhistory.ContainsVersionHistoryItem(targetBranch, startItem) { - idx, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem(versionHistories, startItem) - if err != nil { - return nil, err - } - startBranch, err := versionhistory.GetVersionHistory(versionHistories, idx) - if err != nil { - return nil, err - } - startItem, err = versionhistory.FindLCAVersionHistoryItem(targetBranch, startBranch) - if err != nil { - return nil, err - } - request.StartEventId = startItem.GetEventId() - request.StartEventVersion = startItem.GetVersion() - } - } - - return targetBranch, nil -} - -// startRequestProfile initiates recording of request metrics -func (adh *AdminHandler) startRequestProfile(operation string) (metrics.Handler, time.Time) { - metricsScope := adh.metricsHandler.WithTags(metrics.OperationTag(operation)) - metricsScope.Counter(metrics.ServiceRequests.GetMetricName()).Record(1) - return metricsScope, time.Now().UTC() -} - -func (adh *AdminHandler) getWorkflowCompletionEvent( - ctx context.Context, - shardID int32, - mutableState *persistencespb.WorkflowMutableState, -) (*historypb.HistoryEvent, error) { - executionInfo := mutableState.GetExecutionInfo() - completionEventID := mutableState.GetNextEventId() - 1 - - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) - if err != nil { - return nil, err - } - version, err := versionhistory.GetVersionHistoryEventVersion(currentVersionHistory, completionEventID) - if err != nil { - return nil, err - } - - resp, err := adh.persistenceExecutionManager.ReadHistoryBranch(ctx, &persistence.ReadHistoryBranchRequest{ - ShardID: shardID, - BranchToken: currentVersionHistory.GetBranchToken(), - MinEventID: executionInfo.CompletionEventBatchId, - MaxEventID: completionEventID + 1, - PageSize: 1, - }) - if err != nil { - return nil, err - } - - // find history event from batch and return back single event to caller - for _, e := range resp.HistoryEvents { - if e.EventId == completionEventID && e.Version == version { - return e, nil - } - } - - return nil, serviceerror.NewInternal("Unable to find closed event for workflow") -} - -func (adh *AdminHandler) StreamWorkflowReplicationMessages( - clientCluster adminservice.AdminService_StreamWorkflowReplicationMessagesServer, -) (retError error) { - defer log.CapturePanic(adh.logger, &retError) - - ctxMetadata, ok := metadata.FromIncomingContext(clientCluster.Context()) - if !ok { - return serviceerror.NewInvalidArgument("missing cluster & shard ID metadata") - } - _, serverClusterShardID, err := history.DecodeClusterShardMD(ctxMetadata) - if err != nil { - return err - } - - logger := log.With(adh.logger, tag.ShardID(serverClusterShardID.ShardID)) - logger.Info("AdminStreamReplicationMessages started.") - defer logger.Info("AdminStreamReplicationMessages stopped.") - - ctx := clientCluster.Context() - serverCluster, err := adh.historyClient.StreamWorkflowReplicationMessages(ctx) - if err != nil { - return err - } - - shutdownChan := channel.NewShutdownOnce() - go func() { - defer shutdownChan.Shutdown() - - for !shutdownChan.IsShutdown() { - req, err := clientCluster.Recv() - if err != nil { - logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(err)) - return - } - switch attr := req.GetAttributes().(type) { - case *adminservice.StreamWorkflowReplicationMessagesRequest_SyncReplicationState: - if err = serverCluster.Send(&historyservice.StreamWorkflowReplicationMessagesRequest{ - Attributes: &historyservice.StreamWorkflowReplicationMessagesRequest_SyncReplicationState{ - SyncReplicationState: attr.SyncReplicationState, - }, - }); err != nil { - logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(err)) - return - } - default: - logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(serviceerror.NewInternal(fmt.Sprintf( - "StreamWorkflowReplicationMessages encountered unknown type: %T %v", attr, attr, - )))) - return - } - } - }() - go func() { - defer shutdownChan.Shutdown() - - for !shutdownChan.IsShutdown() { - resp, err := serverCluster.Recv() - if err != nil { - logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(err)) - return - } - switch attr := resp.GetAttributes().(type) { - case *historyservice.StreamWorkflowReplicationMessagesResponse_Messages: - if err = clientCluster.Send(&adminservice.StreamWorkflowReplicationMessagesResponse{ - Attributes: &adminservice.StreamWorkflowReplicationMessagesResponse_Messages{ - Messages: attr.Messages, - }, - }); err != nil { - logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(err)) - return - } - default: - logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(serviceerror.NewInternal(fmt.Sprintf( - "StreamWorkflowReplicationMessages encountered unknown type: %T %v", attr, attr, - )))) - return - } - } - }() - <-shutdownChan.Channel() - return nil -} diff -Nru temporal-1.21.5-1/src/service/frontend/adminHandler_test.go temporal-1.22.5/src/service/frontend/adminHandler_test.go --- temporal-1.21.5-1/src/service/frontend/adminHandler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/adminHandler_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1542 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package frontend - -import ( - "context" - "errors" - "fmt" - "math/rand" - "sync" - "testing" - "time" - - "google.golang.org/grpc/metadata" - - historyclient "go.temporal.io/server/client/history" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/persistence/visibility/store/standard/cassandra" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/resourcetest" - - "google.golang.org/grpc/health" - - "go.temporal.io/server/api/adminservicemock/v1" - "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/testing/mocksdk" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/adminservice/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - clientmocks "go.temporal.io/server/client" - "go.temporal.io/server/common" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" - "go.temporal.io/server/common/searchattribute" -) - -type ( - adminHandlerSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockResource *resourcetest.Test - mockHistoryClient *historyservicemock.MockHistoryServiceClient - mockNamespaceCache *namespace.MockRegistry - - mockExecutionMgr *persistence.MockExecutionManager - mockVisibilityMgr *manager.MockVisibilityManager - mockClusterMetadataManager *persistence.MockClusterMetadataManager - mockClientFactory *clientmocks.MockFactory - mockAdminClient *adminservicemock.MockAdminServiceClient - mockMetadata *cluster.MockMetadata - mockProducer *persistence.MockNamespaceReplicationQueue - - namespace namespace.Name - namespaceID namespace.ID - namespaceEntry *namespace.Namespace - - handler *AdminHandler - } -) - -func TestAdminHandlerSuite(t *testing.T) { - s := new(adminHandlerSuite) - suite.Run(t, s) -} - -func (s *adminHandlerSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.namespace = "some random namespace name" - s.namespaceID = "deadd0d0-c001-face-d00d-000000000000" - s.namespaceEntry = namespace.NewNamespaceForTest( - &persistencespb.NamespaceInfo{ - Name: s.namespace.String(), - Id: s.namespaceID.String(), - }, - nil, - false, - nil, - int64(100), - ) - - s.controller = gomock.NewController(s.T()) - s.mockResource = resourcetest.NewTest(s.controller, primitives.FrontendService) - s.mockNamespaceCache = s.mockResource.NamespaceCache - s.mockHistoryClient = s.mockResource.HistoryClient - s.mockExecutionMgr = s.mockResource.ExecutionMgr - s.mockClusterMetadataManager = s.mockResource.ClusterMetadataMgr - s.mockClientFactory = s.mockResource.ClientFactory - s.mockAdminClient = adminservicemock.NewMockAdminServiceClient(s.controller) - s.mockMetadata = s.mockResource.ClusterMetadata - s.mockVisibilityMgr = s.mockResource.VisibilityManager - s.mockProducer = persistence.NewMockNamespaceReplicationQueue(s.controller) - - persistenceConfig := &config.Persistence{ - NumHistoryShards: 1, - } - - cfg := &Config{ - NumHistoryShards: 4, - } - args := NewAdminHandlerArgs{ - persistenceConfig, - cfg, - s.mockResource.GetNamespaceReplicationQueue(), - s.mockProducer, - s.mockResource.ESClient, - s.mockResource.GetVisibilityManager(), - s.mockResource.GetLogger(), - s.mockResource.GetExecutionManager(), - s.mockResource.GetTaskManager(), - s.mockResource.GetClusterMetadataManager(), - s.mockResource.GetMetadataManager(), - s.mockResource.GetClientFactory(), - s.mockResource.GetClientBean(), - s.mockResource.GetHistoryClient(), - s.mockResource.GetSDKClientFactory(), - s.mockResource.GetMembershipMonitor(), - s.mockResource.GetHostInfoProvider(), - s.mockResource.GetArchiverProvider(), - s.mockResource.GetMetricsHandler(), - s.mockResource.GetNamespaceRegistry(), - s.mockResource.GetSearchAttributesProvider(), - s.mockResource.GetSearchAttributesManager(), - s.mockMetadata, - s.mockResource.GetArchivalMetadata(), - health.NewServer(), - serialization.NewSerializer(), - clock.NewRealTimeSource(), - } - s.mockMetadata.EXPECT().GetCurrentClusterName().Return(uuid.New()).AnyTimes() - s.handler = NewAdminHandler(args) - s.handler.Start() -} - -func (s *adminHandlerSuite) TearDownTest() { - s.controller.Finish() - s.handler.Stop() -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidWorkflowID() { - ctx := context.Background() - _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "", - RunId: uuid.New(), - }, - StartEventId: 1, - StartEventVersion: 100, - EndEventId: 10, - EndEventVersion: 100, - MaximumPageSize: 1, - NextPageToken: nil, - }) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidRunID() { - ctx := context.Background() - _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: "runID", - }, - StartEventId: 1, - StartEventVersion: 100, - EndEventId: 10, - EndEventVersion: 100, - MaximumPageSize: 1, - NextPageToken: nil, - }) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidSize() { - ctx := context.Background() - _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: 1, - StartEventVersion: 100, - EndEventId: 10, - EndEventVersion: 100, - MaximumPageSize: -1, - NextPageToken: nil, - }) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnNamespaceCache() { - ctx := context.Background() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(nil, fmt.Errorf("test")) - _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: 1, - StartEventVersion: 100, - EndEventId: 10, - EndEventVersion: 100, - MaximumPageSize: 1, - NextPageToken: nil, - }) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2() { - ctx := context.Background() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(s.namespaceEntry, nil).AnyTimes() - branchToken := []byte{1} - versionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{ - versionhistory.NewVersionHistoryItem(int64(10), int64(100)), - }) - versionHistories := versionhistory.NewVersionHistories(versionHistory) - mState := &historyservice.GetMutableStateResponse{ - NextEventId: 11, - CurrentBranchToken: branchToken, - VersionHistories: versionHistories, - } - s.mockHistoryClient.EXPECT().GetMutableState(gomock.Any(), gomock.Any()).Return(mState, nil).AnyTimes() - - s.mockExecutionMgr.EXPECT().ReadRawHistoryBranch(gomock.Any(), gomock.Any()).Return(&persistence.ReadRawHistoryBranchResponse{ - HistoryEventBlobs: []*commonpb.DataBlob{}, - NextPageToken: []byte{}, - Size: 0, - }, nil) - _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: 1, - StartEventVersion: 100, - EndEventId: 10, - EndEventVersion: 100, - MaximumPageSize: 10, - NextPageToken: nil, - }) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_SameStartIDAndEndID() { - ctx := context.Background() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(s.namespaceEntry, nil).AnyTimes() - branchToken := []byte{1} - versionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{ - versionhistory.NewVersionHistoryItem(int64(10), int64(100)), - }) - versionHistories := versionhistory.NewVersionHistories(versionHistory) - mState := &historyservice.GetMutableStateResponse{ - NextEventId: 11, - CurrentBranchToken: branchToken, - VersionHistories: versionHistories, - } - s.mockHistoryClient.EXPECT().GetMutableState(gomock.Any(), gomock.Any()).Return(mState, nil).AnyTimes() - - resp, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, - &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: 10, - StartEventVersion: 100, - EndEventId: common.EmptyEventID, - EndEventVersion: common.EmptyVersion, - MaximumPageSize: 1, - NextPageToken: nil, - }) - s.Nil(resp.NextPageToken) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedStartAndEnd() { - inputStartEventID := int64(1) - inputStartVersion := int64(10) - inputEndEventID := int64(100) - inputEndVersion := int64(11) - firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) - endItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) - versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, endItem}) - versionHistories := versionhistory.NewVersionHistories(versionHistory) - request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: inputStartEventID, - StartEventVersion: inputStartVersion, - EndEventId: inputEndEventID, - EndEventVersion: inputEndVersion, - MaximumPageSize: 10, - NextPageToken: nil, - } - - targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( - request, - versionHistories, - ) - s.Equal(request.GetStartEventId(), inputStartEventID) - s.Equal(request.GetEndEventId(), inputEndEventID) - s.Equal(targetVersionHistory, versionHistory) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedEndEvent() { - inputStartEventID := int64(1) - inputEndEventID := int64(100) - inputStartVersion := int64(10) - inputEndVersion := int64(11) - firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) - targetItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) - versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, targetItem}) - versionHistories := versionhistory.NewVersionHistories(versionHistory) - request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: common.EmptyEventID, - StartEventVersion: common.EmptyVersion, - EndEventId: inputEndEventID, - EndEventVersion: inputEndVersion, - MaximumPageSize: 10, - NextPageToken: nil, - } - - targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( - request, - versionHistories, - ) - s.Equal(request.GetStartEventId(), inputStartEventID-1) - s.Equal(request.GetEndEventId(), inputEndEventID) - s.Equal(targetVersionHistory, versionHistory) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedStartEvent() { - inputStartEventID := int64(1) - inputEndEventID := int64(100) - inputStartVersion := int64(10) - inputEndVersion := int64(11) - firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) - targetItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) - versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, targetItem}) - versionHistories := versionhistory.NewVersionHistories(versionHistory) - request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: inputStartEventID, - StartEventVersion: inputStartVersion, - EndEventId: common.EmptyEventID, - EndEventVersion: common.EmptyVersion, - MaximumPageSize: 10, - NextPageToken: nil, - } - - targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( - request, - versionHistories, - ) - s.Equal(request.GetStartEventId(), inputStartEventID) - s.Equal(request.GetEndEventId(), inputEndEventID+1) - s.Equal(targetVersionHistory, versionHistory) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_NonCurrentBranch() { - inputStartEventID := int64(1) - inputEndEventID := int64(100) - inputStartVersion := int64(10) - inputEndVersion := int64(101) - item1 := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) - item2 := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) - versionHistory1 := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{item1, item2}) - item3 := versionhistory.NewVersionHistoryItem(int64(10), int64(20)) - item4 := versionhistory.NewVersionHistoryItem(int64(20), int64(51)) - versionHistory2 := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{item1, item3, item4}) - versionHistories := versionhistory.NewVersionHistories(versionHistory1) - _, _, err := versionhistory.AddVersionHistory(versionHistories, versionHistory2) - s.NoError(err) - request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - }, - StartEventId: 9, - StartEventVersion: 20, - EndEventId: inputEndEventID, - EndEventVersion: inputEndVersion, - MaximumPageSize: 10, - NextPageToken: nil, - } - - targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( - request, - versionHistories, - ) - s.Equal(request.GetStartEventId(), inputStartEventID) - s.Equal(request.GetEndEventId(), inputEndEventID) - s.Equal(targetVersionHistory, versionHistory1) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_AddSearchAttributes() { - handler := s.handler - ctx := context.Background() - - type test struct { - Name string - Request *adminservice.AddSearchAttributesRequest - Expected error - } - // request validation tests - testCases1 := []test{ - { - Name: "nil request", - Request: nil, - Expected: &serviceerror.InvalidArgument{Message: "Request is nil."}, - }, - { - Name: "empty request", - Request: &adminservice.AddSearchAttributesRequest{}, - Expected: &serviceerror.InvalidArgument{Message: "SearchAttributes are not set on request."}, - }, - } - for _, testCase := range testCases1 { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.AddSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } - - // Elasticsearch is not configured - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - testCases3 := []test{ - { - Name: "reserved key (empty index)", - Request: &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "WorkflowId": enumspb.INDEXED_VALUE_TYPE_TEXT, - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute WorkflowId is reserved by system."}, - }, - { - Name: "key already whitelisted (empty index)", - Request: &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "CustomTextField": enumspb.INDEXED_VALUE_TYPE_TEXT, - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute CustomTextField already exists."}, - }, - } - for _, testCase := range testCases3 { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.AddSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } - - // Configure Elasticsearch: add advanced visibility store config with index name. - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - testCases2 := []test{ - { - Name: "reserved key (ES configured)", - Request: &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "WorkflowId": enumspb.INDEXED_VALUE_TYPE_TEXT, - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute WorkflowId is reserved by system."}, - }, - { - Name: "key already whitelisted (ES configured)", - Request: &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "CustomTextField": enumspb.INDEXED_VALUE_TYPE_TEXT, - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute CustomTextField already exists."}, - }, - } - for _, testCase := range testCases2 { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.AddSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } - - mockSdkClient := mocksdk.NewMockClient(s.controller) - s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() - s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() - - // Start workflow failed. - mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(nil, errors.New("start failed")) - resp, err := handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, - }, - }) - s.Error(err) - s.Equal("Unable to start temporal-sys-add-search-attributes-workflow workflow: start failed.", err.Error()) - s.Nil(resp) - - // Workflow failed. - mockRun := mocksdk.NewMockWorkflowRun(s.controller) - mockRun.EXPECT().Get(gomock.Any(), nil).Return(errors.New("workflow failed")) - mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(mockRun, nil) - resp, err = handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, - }, - }) - s.Error(err) - s.Equal("Workflow temporal-sys-add-search-attributes-workflow returned an error: workflow failed.", err.Error()) - s.Nil(resp) - - // Success case. - mockRun.EXPECT().Get(gomock.Any(), nil).Return(nil) - mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(mockRun, nil) - - resp, err = handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ - SearchAttributes: map[string]enumspb.IndexedValueType{ - "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, - }, - }) - s.NoError(err) - s.NotNil(resp) -} - -func (s *adminHandlerSuite) Test_GetSearchAttributes_EmptyIndexName() { - handler := s.handler - ctx := context.Background() - - resp, err := handler.GetSearchAttributes(ctx, nil) - s.Error(err) - s.Equal(&serviceerror.InvalidArgument{Message: "Request is nil."}, err) - s.Nil(resp) - - mockSdkClient := mocksdk.NewMockClient(s.controller) - s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(s.namespaceEntry, nil).AnyTimes() - - // Elasticsearch is not configured - s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() - mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( - &workflowservice.DescribeWorkflowExecutionResponse{}, nil) - s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "").Return(map[string]string{"col": "type"}, nil) - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - - resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{Namespace: s.namespace.String()}) - s.NoError(err) - s.NotNil(resp) -} - -func (s *adminHandlerSuite) Test_GetSearchAttributes_NonEmptyIndexName() { - handler := s.handler - ctx := context.Background() - - mockSdkClient := mocksdk.NewMockClient(s.controller) - s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() - - // Configure Elasticsearch: add advanced visibility store config with index name. - s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() - - mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( - &workflowservice.DescribeWorkflowExecutionResponse{}, nil) - s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "random-index-name").Return(map[string]string{"col": "type"}, nil) - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - resp, err := handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{}) - s.NoError(err) - s.NotNil(resp) - - mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( - &workflowservice.DescribeWorkflowExecutionResponse{}, nil) - s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "another-index-name").Return(map[string]string{"col": "type"}, nil) - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("another-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{IndexName: "another-index-name"}) - s.NoError(err) - s.NotNil(resp) - - mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( - nil, errors.New("random error")) - s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "random-index-name").Return(map[string]string{"col": "type"}, nil) - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{Namespace: s.namespace.String()}) - s.Error(err) - s.Nil(resp) -} - -func (s *adminHandlerSuite) Test_RemoveSearchAttributes_EmptyIndexName() { - handler := s.handler - ctx := context.Background() - - type test struct { - Name string - Request *adminservice.RemoveSearchAttributesRequest - Expected error - } - // request validation tests - testCases1 := []test{ - { - Name: "nil request", - Request: nil, - Expected: &serviceerror.InvalidArgument{Message: "Request is nil."}, - }, - { - Name: "empty request", - Request: &adminservice.RemoveSearchAttributesRequest{}, - Expected: &serviceerror.InvalidArgument{Message: "SearchAttributes are not set on request."}, - }, - } - for _, testCase := range testCases1 { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } - - // Elasticsearch is not configured - s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - testCases2 := []test{ - { - Name: "reserved search attribute (empty index)", - Request: &adminservice.RemoveSearchAttributesRequest{ - SearchAttributes: []string{ - "WorkflowId", - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Unable to remove non-custom search attributes: WorkflowId."}, - }, - { - Name: "search attribute doesn't exist (empty index)", - Request: &adminservice.RemoveSearchAttributesRequest{ - SearchAttributes: []string{ - "ProductId", - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute ProductId doesn't exist."}, - }, - } - for _, testCase := range testCases2 { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } -} - -func (s *adminHandlerSuite) Test_RemoveSearchAttributes_NonEmptyIndexName() { - handler := s.handler - ctx := context.Background() - - type test struct { - Name string - Request *adminservice.RemoveSearchAttributesRequest - Expected error - } - testCases := []test{ - { - Name: "reserved search attribute (ES configured)", - Request: &adminservice.RemoveSearchAttributesRequest{ - SearchAttributes: []string{ - "WorkflowId", - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Unable to remove non-custom search attributes: WorkflowId."}, - }, - { - Name: "search attribute doesn't exist (ES configured)", - Request: &adminservice.RemoveSearchAttributesRequest{ - SearchAttributes: []string{ - "ProductId", - }, - }, - Expected: &serviceerror.InvalidArgument{Message: "Search attribute ProductId doesn't exist."}, - }, - } - - // Configure Elasticsearch: add advanced visibility store config with index name. - s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() - s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() - s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() - for _, testCase := range testCases { - s.T().Run(testCase.Name, func(t *testing.T) { - resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) - s.Equal(testCase.Expected, err) - s.Nil(resp) - }) - } - - // Success case. - s.mockResource.SearchAttributesManager.EXPECT().SaveSearchAttributes(gomock.Any(), "random-index-name", gomock.Any()).Return(nil) - - resp, err := handler.RemoveSearchAttributes(ctx, &adminservice.RemoveSearchAttributesRequest{ - SearchAttributes: []string{ - "CustomKeywordField", - }, - }) - s.NoError(err) - s.NotNil(resp) -} - -func (s *adminHandlerSuite) Test_RemoveRemoteCluster_Success() { - var clusterName = "cluster" - s.mockClusterMetadataManager.EXPECT().DeleteClusterMetadata( - gomock.Any(), - &persistence.DeleteClusterMetadataRequest{ClusterName: clusterName}, - ).Return(nil) - - _, err := s.handler.RemoveRemoteCluster(context.Background(), &adminservice.RemoveRemoteClusterRequest{ClusterName: clusterName}) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_RemoveRemoteCluster_Error() { - var clusterName = "cluster" - s.mockClusterMetadataManager.EXPECT().DeleteClusterMetadata( - gomock.Any(), - &persistence.DeleteClusterMetadataRequest{ClusterName: clusterName}, - ).Return(fmt.Errorf("test error")) - - _, err := s.handler.RemoveRemoteCluster(context.Background(), &adminservice.RemoveRemoteClusterRequest{ClusterName: clusterName}) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_RecordFound_Success() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - var recordVersion int64 = 5 - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - &persistence.GetClusterMetadataResponse{ - Version: recordVersion, - }, nil) - s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 4, - ClusterId: clusterId, - ClusterAddress: rpcAddress, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: recordVersion, - }).Return(true, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_RecordNotFound_Success() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - nil, - serviceerror.NewNotFound("expected empty result"), - ) - s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 4, - ClusterId: clusterId, - ClusterAddress: rpcAddress, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: 0, - }).Return(true, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_ClusterNameConflict() { - var rpcAddress = uuid.New() - var clusterId = uuid.New() - - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: s.mockMetadata.GetCurrentClusterName(), - HistoryShardCount: 0, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_FailoverVersionIncrementMismatch() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(1)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 0, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_ShardCount_Invalid() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 5, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ShardCount_Multiple() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - var recordVersion int64 = 5 - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 16, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - &persistence.GetClusterMetadataResponse{ - Version: recordVersion, - }, nil) - s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 16, - ClusterId: clusterId, - ClusterAddress: rpcAddress, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: recordVersion, - }).Return(true, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.NoError(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_GlobalNamespaceDisabled() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: false, - }, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_InitialFailoverVersionConflict() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(map[string]cluster.ClusterInformation{ - uuid.New(): {InitialFailoverVersion: 0}, - }) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_DescribeCluster_Error() { - var rpcAddress = uuid.New() - - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - nil, - fmt.Errorf("test error"), - ) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_GetClusterMetadata_Error() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - nil, - fmt.Errorf("test error"), - ) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_SaveClusterMetadata_Error() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - nil, - serviceerror.NewNotFound("expected empty result"), - ) - s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 4, - ClusterId: clusterId, - ClusterAddress: rpcAddress, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: 0, - }).Return(false, fmt.Errorf("test error")) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) -} - -func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_SaveClusterMetadata_NotApplied_Error() { - var rpcAddress = uuid.New() - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) - s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( - s.mockAdminClient, - ) - s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( - &adminservice.DescribeClusterResponse{ - ClusterId: clusterId, - ClusterName: clusterName, - HistoryShardCount: 4, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, nil) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - nil, - serviceerror.NewNotFound("expected empty result"), - ) - s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 4, - ClusterId: clusterId, - ClusterAddress: rpcAddress, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: 0, - }).Return(false, nil) - _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *adminHandlerSuite) Test_DescribeCluster_CurrentCluster_Success() { - var clusterId = uuid.New() - clusterName := s.mockMetadata.GetCurrentClusterName() - s.mockResource.HostInfoProvider.EXPECT().HostInfo().Return(membership.NewHostInfoFromAddress("test")) - s.mockResource.MembershipMonitor.EXPECT().GetReachableMembers().Return(nil, nil) - s.mockResource.HistoryServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.HistoryServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.FrontendServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.FrontendServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.MatchingServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.MatchingServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.WorkerServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.WorkerServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.ExecutionMgr.EXPECT().GetName().Return("") - s.mockVisibilityMgr.EXPECT().GetStoreNames().Return([]string{elasticsearch.PersistenceName}) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - &persistence.GetClusterMetadataResponse{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 0, - ClusterId: clusterId, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: 1, - }, nil) - - resp, err := s.handler.DescribeCluster(context.Background(), &adminservice.DescribeClusterRequest{}) - s.NoError(err) - s.Equal(resp.GetClusterName(), clusterName) - s.Equal(resp.GetClusterId(), clusterId) - s.Equal(resp.GetHistoryShardCount(), int32(0)) - s.Equal(resp.GetFailoverVersionIncrement(), int64(0)) - s.Equal(resp.GetInitialFailoverVersion(), int64(0)) - s.True(resp.GetIsGlobalNamespaceEnabled()) -} - -func (s *adminHandlerSuite) Test_DescribeCluster_NonCurrentCluster_Success() { - var clusterName = uuid.New() - var clusterId = uuid.New() - - s.mockResource.HostInfoProvider.EXPECT().HostInfo().Return(membership.NewHostInfoFromAddress("test")) - s.mockResource.MembershipMonitor.EXPECT().GetReachableMembers().Return(nil, nil) - s.mockResource.HistoryServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.HistoryServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.FrontendServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.FrontendServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.MatchingServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.MatchingServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.WorkerServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) - s.mockResource.WorkerServiceResolver.EXPECT().MemberCount().Return(0) - s.mockResource.ExecutionMgr.EXPECT().GetName().Return("") - s.mockVisibilityMgr.EXPECT().GetStoreNames().Return([]string{elasticsearch.PersistenceName}) - s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( - &persistence.GetClusterMetadataResponse{ - ClusterMetadata: persistencespb.ClusterMetadata{ - ClusterName: clusterName, - HistoryShardCount: 0, - ClusterId: clusterId, - FailoverVersionIncrement: 0, - InitialFailoverVersion: 0, - IsGlobalNamespaceEnabled: true, - }, - Version: 1, - }, nil) - - resp, err := s.handler.DescribeCluster(context.Background(), &adminservice.DescribeClusterRequest{ClusterName: clusterName}) - s.NoError(err) - s.Equal(resp.GetClusterName(), clusterName) - s.Equal(resp.GetClusterId(), clusterId) - s.Equal(resp.GetHistoryShardCount(), int32(0)) - s.Equal(resp.GetFailoverVersionIncrement(), int64(0)) - s.Equal(resp.GetInitialFailoverVersion(), int64(0)) - s.True(resp.GetIsGlobalNamespaceEnabled()) -} - -func (s *adminHandlerSuite) Test_ListClusters_Success() { - var pageSize int32 = 1 - - s.mockClusterMetadataManager.EXPECT().ListClusterMetadata(gomock.Any(), &persistence.ListClusterMetadataRequest{ - PageSize: int(pageSize), - }).Return( - &persistence.ListClusterMetadataResponse{ - ClusterMetadata: []*persistence.GetClusterMetadataResponse{ - { - ClusterMetadata: persistencespb.ClusterMetadata{ClusterName: "test"}, - }, - }}, nil) - - resp, err := s.handler.ListClusters(context.Background(), &adminservice.ListClustersRequest{ - PageSize: pageSize, - }) - s.NoError(err) - s.Equal(1, len(resp.Clusters)) - s.Equal(0, len(resp.GetNextPageToken())) -} - -func (s *adminHandlerSuite) TestDeleteWorkflowExecution_DeleteCurrentExecution() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - } - - request := &adminservice.DeleteWorkflowExecutionRequest{ - Namespace: s.namespace.String(), - Execution: &execution, - } - - s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() - s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(false) - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) - resp, err := s.handler.DeleteWorkflowExecution(context.Background(), request) - s.Nil(resp) - s.Error(err) - - mutableState := &persistencespb.WorkflowMutableState{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - {BranchToken: []byte("branch1")}, - {BranchToken: []byte("branch2")}, - {BranchToken: []byte("branch3")}, - }, - }, - }, - } - - shardID := common.WorkflowIDToHistoryShard( - s.namespaceID.String(), - execution.GetWorkflowId(), - s.handler.numberOfHistoryShards, - ) - runID := uuid.New() - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetCurrentExecutionResponse{ - StartRequestID: uuid.New(), - RunID: runID, - State: enums.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - }, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), &persistence.GetWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: s.namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: runID, - }).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) - s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ - NamespaceId: s.namespaceID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: execution.WorkflowId, - RunId: runID, - }, - }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) - s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), &persistence.DeleteCurrentWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: s.namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: runID, - }).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), &persistence.DeleteWorkflowExecutionRequest{ - ShardID: shardID, - NamespaceID: s.namespaceID.String(), - WorkflowID: execution.WorkflowId, - RunID: runID, - }).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) - - _, err = s.handler.DeleteWorkflowExecution(context.Background(), request) - s.NoError(err) -} - -func (s *adminHandlerSuite) TestDeleteWorkflowExecution_LoadMutableStateFailed() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - } - - request := &adminservice.DeleteWorkflowExecutionRequest{ - Namespace: s.namespace.String(), - Execution: &execution, - } - - s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() - s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(false) - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) - s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), gomock.Any()).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) - s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - - _, err := s.handler.DeleteWorkflowExecution(context.Background(), request) - s.NoError(err) -} - -func (s *adminHandlerSuite) TestDeleteWorkflowExecution_CassandraVisibilityBackend() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "workflowID", - RunId: uuid.New(), - } - - request := &adminservice.DeleteWorkflowExecutionRequest{ - Namespace: s.namespace.String(), - Execution: &execution, - } - - s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() - s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(true).AnyTimes() - - // test delete open records - branchToken := []byte("branchToken") - version := int64(100) - mutableState := &persistencespb.WorkflowMutableState{ - ExecutionState: &persistencespb.WorkflowExecutionState{ - CreateRequestId: uuid.New(), - RunId: execution.RunId, - State: enums.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - }, - NextEventId: 12, - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - CompletionEventBatchId: 10, - StartTime: timestamp.TimePtr(time.Now()), - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - BranchToken: branchToken, - Items: []*historyspb.VersionHistoryItem{ - {EventId: 11, Version: version}, - }, - }, - }, - }, - }, - } - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) - s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ - NamespaceId: s.namespaceID.String(), - Execution: &execution, - WorkflowStartTime: mutableState.ExecutionInfo.StartTime, - }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) - s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) - - _, err := s.handler.DeleteWorkflowExecution(context.Background(), request) - s.NoError(err) - - // test delete close records - mutableState.ExecutionState.State = enums.WORKFLOW_EXECUTION_STATE_COMPLETED - mutableState.ExecutionState.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED - - shardID := common.WorkflowIDToHistoryShard( - s.namespaceID.String(), - execution.GetWorkflowId(), - s.handler.numberOfHistoryShards, - ) - closeTime := time.Now() - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) - s.mockExecutionMgr.EXPECT().ReadHistoryBranch(gomock.Any(), &persistence.ReadHistoryBranchRequest{ - ShardID: shardID, - BranchToken: branchToken, - MinEventID: mutableState.ExecutionInfo.CompletionEventBatchId, - MaxEventID: mutableState.NextEventId, - PageSize: 1, - }).Return(&persistence.ReadHistoryBranchResponse{ - HistoryEvents: []*historypb.HistoryEvent{ - { - EventId: 10, - EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, - Version: version, - EventTime: timestamp.TimePtr(closeTime.Add(-time.Millisecond)), - }, - { - EventId: 11, - EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, - Version: version, - EventTime: timestamp.TimePtr(closeTime), - }, - }, - }, nil) - s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ - NamespaceId: s.namespaceID.String(), - Execution: &execution, - WorkflowCloseTime: timestamp.TimePtr(closeTime), - }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) - s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) - s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) - - _, err = s.handler.DeleteWorkflowExecution(context.Background(), request) - s.NoError(err) -} - -func (s *adminHandlerSuite) TestStreamWorkflowReplicationMessages_ClientToServerBroken() { - clientClusterShardID := historyclient.ClusterShardID{ - ClusterID: rand.Int31(), - ShardID: rand.Int31(), - } - serverClusterShardID := historyclient.ClusterShardID{ - ClusterID: rand.Int31(), - ShardID: rand.Int31(), - } - clusterShardMD := historyclient.EncodeClusterShardMD( - clientClusterShardID, - serverClusterShardID, - ) - ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) - clientCluster := adminservicemock.NewMockAdminService_StreamWorkflowReplicationMessagesServer(s.controller) - clientCluster.EXPECT().Context().Return(ctx).AnyTimes() - serverCluster := historyservicemock.NewMockHistoryService_StreamWorkflowReplicationMessagesClient(s.controller) - s.mockHistoryClient.EXPECT().StreamWorkflowReplicationMessages(ctx).Return(serverCluster, nil) - - waitGroupStart := sync.WaitGroup{} - waitGroupStart.Add(2) - waitGroupEnd := sync.WaitGroup{} - waitGroupEnd.Add(2) - channel := make(chan struct{}) - - clientCluster.EXPECT().Recv().DoAndReturn(func() (*adminservice.StreamWorkflowReplicationMessagesRequest, error) { - waitGroupStart.Done() - waitGroupStart.Wait() - - defer waitGroupEnd.Done() - return nil, serviceerror.NewUnavailable("random error") - }) - serverCluster.EXPECT().Recv().DoAndReturn(func() (*historyservice.StreamWorkflowReplicationMessagesResponse, error) { - waitGroupStart.Done() - waitGroupStart.Wait() - - defer waitGroupEnd.Done() - <-channel - return nil, serviceerror.NewUnavailable("random error") - }) - _ = s.handler.StreamWorkflowReplicationMessages(clientCluster) - close(channel) - waitGroupEnd.Wait() -} - -func (s *adminHandlerSuite) TestStreamWorkflowReplicationMessages_ServerToClientBroken() { - clientClusterShardID := historyclient.ClusterShardID{ - ClusterID: rand.Int31(), - ShardID: rand.Int31(), - } - serverClusterShardID := historyclient.ClusterShardID{ - ClusterID: rand.Int31(), - ShardID: rand.Int31(), - } - clusterShardMD := historyclient.EncodeClusterShardMD( - clientClusterShardID, - serverClusterShardID, - ) - ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) - clientCluster := adminservicemock.NewMockAdminService_StreamWorkflowReplicationMessagesServer(s.controller) - clientCluster.EXPECT().Context().Return(ctx).AnyTimes() - serverCluster := historyservicemock.NewMockHistoryService_StreamWorkflowReplicationMessagesClient(s.controller) - s.mockHistoryClient.EXPECT().StreamWorkflowReplicationMessages(ctx).Return(serverCluster, nil) - - waitGroupStart := sync.WaitGroup{} - waitGroupStart.Add(2) - waitGroupEnd := sync.WaitGroup{} - waitGroupEnd.Add(2) - channel := make(chan struct{}) - - clientCluster.EXPECT().Recv().DoAndReturn(func() (*adminservice.StreamWorkflowReplicationMessagesRequest, error) { - waitGroupStart.Done() - waitGroupStart.Wait() - - defer waitGroupEnd.Done() - <-channel - return nil, serviceerror.NewUnavailable("random error") - }) - serverCluster.EXPECT().Recv().DoAndReturn(func() (*historyservice.StreamWorkflowReplicationMessagesResponse, error) { - waitGroupStart.Done() - waitGroupStart.Wait() - - defer waitGroupEnd.Done() - return nil, serviceerror.NewUnavailable("random error") - }) - _ = s.handler.StreamWorkflowReplicationMessages(clientCluster) - close(channel) - waitGroupEnd.Wait() -} diff -Nru temporal-1.21.5-1/src/service/frontend/admin_handler.go temporal-1.22.5/src/service/frontend/admin_handler.go --- temporal-1.21.5-1/src/service/frontend/admin_handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/admin_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,2108 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "sync/atomic" + "time" + + replicationpb "go.temporal.io/api/replication/v1" + "google.golang.org/grpc/metadata" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common/channel" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/util" + + "github.com/pborman/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + namespacepb "go.temporal.io/api/namespace/v1" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" + sdkclient "go.temporal.io/sdk/client" + "golang.org/x/exp/maps" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "go.temporal.io/server/api/adminservice/v1" + clusterspb "go.temporal.io/server/api/cluster/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + tokenspb "go.temporal.io/server/api/token/v1" + serverClient "go.temporal.io/server/client" + "go.temporal.io/server/client/admin" + "go.temporal.io/server/client/frontend" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/provider" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" + esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" + "go.temporal.io/server/common/persistence/visibility/store/standard/cassandra" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/worker" + "go.temporal.io/server/service/worker/addsearchattributes" +) + +const ( + getNamespaceReplicationMessageBatchSize = 100 + defaultLastMessageID = -1 + listClustersPageSize = 100 +) + +type ( + // AdminHandler - gRPC handler interface for adminservice + AdminHandler struct { + status int32 + + logger log.Logger + numberOfHistoryShards int32 + ESClient esclient.Client + config *Config + namespaceDLQHandler namespace.DLQMessageHandler + eventSerializer serialization.Serializer + visibilityMgr manager.VisibilityManager + persistenceExecutionManager persistence.ExecutionManager + namespaceReplicationQueue persistence.NamespaceReplicationQueue + taskManager persistence.TaskManager + clusterMetadataManager persistence.ClusterMetadataManager + persistenceMetadataManager persistence.MetadataManager + clientFactory serverClient.Factory + clientBean serverClient.Bean + historyClient historyservice.HistoryServiceClient + sdkClientFactory sdk.ClientFactory + membershipMonitor membership.Monitor + hostInfoProvider membership.HostInfoProvider + metricsHandler metrics.Handler + namespaceRegistry namespace.Registry + saProvider searchattribute.Provider + saManager searchattribute.Manager + clusterMetadata cluster.Metadata + healthServer *health.Server + } + + NewAdminHandlerArgs struct { + PersistenceConfig *config.Persistence + Config *Config + NamespaceReplicationQueue persistence.NamespaceReplicationQueue + ReplicatorNamespaceReplicationQueue persistence.NamespaceReplicationQueue + EsClient esclient.Client + VisibilityMrg manager.VisibilityManager + Logger log.Logger + PersistenceExecutionManager persistence.ExecutionManager + TaskManager persistence.TaskManager + ClusterMetadataManager persistence.ClusterMetadataManager + PersistenceMetadataManager persistence.MetadataManager + ClientFactory serverClient.Factory + ClientBean serverClient.Bean + HistoryClient historyservice.HistoryServiceClient + sdkClientFactory sdk.ClientFactory + MembershipMonitor membership.Monitor + HostInfoProvider membership.HostInfoProvider + ArchiverProvider provider.ArchiverProvider + MetricsHandler metrics.Handler + NamespaceRegistry namespace.Registry + SaProvider searchattribute.Provider + SaManager searchattribute.Manager + ClusterMetadata cluster.Metadata + ArchivalMetadata archiver.ArchivalMetadata + HealthServer *health.Server + EventSerializer serialization.Serializer + TimeSource clock.TimeSource + } +) + +var ( + _ adminservice.AdminServiceServer = (*AdminHandler)(nil) + + resendStartEventID = int64(0) +) + +// NewAdminHandler creates a gRPC handler for the adminservice +func NewAdminHandler( + args NewAdminHandlerArgs, +) *AdminHandler { + namespaceReplicationTaskExecutor := namespace.NewReplicationTaskExecutor( + args.ClusterMetadata.GetCurrentClusterName(), + args.PersistenceMetadataManager, + args.Logger, + ) + + return &AdminHandler{ + logger: args.Logger, + status: common.DaemonStatusInitialized, + numberOfHistoryShards: args.PersistenceConfig.NumHistoryShards, + config: args.Config, + namespaceDLQHandler: namespace.NewDLQMessageHandler( + namespaceReplicationTaskExecutor, + args.NamespaceReplicationQueue, + args.Logger, + ), + eventSerializer: args.EventSerializer, + visibilityMgr: args.VisibilityMrg, + ESClient: args.EsClient, + persistenceExecutionManager: args.PersistenceExecutionManager, + namespaceReplicationQueue: args.NamespaceReplicationQueue, + taskManager: args.TaskManager, + clusterMetadataManager: args.ClusterMetadataManager, + persistenceMetadataManager: args.PersistenceMetadataManager, + clientFactory: args.ClientFactory, + clientBean: args.ClientBean, + historyClient: args.HistoryClient, + sdkClientFactory: args.sdkClientFactory, + membershipMonitor: args.MembershipMonitor, + hostInfoProvider: args.HostInfoProvider, + metricsHandler: args.MetricsHandler, + namespaceRegistry: args.NamespaceRegistry, + saProvider: args.SaProvider, + saManager: args.SaManager, + clusterMetadata: args.ClusterMetadata, + healthServer: args.HealthServer, + } +} + +// Start starts the handler +func (adh *AdminHandler) Start() { + if atomic.CompareAndSwapInt32( + &adh.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + adh.healthServer.SetServingStatus(AdminServiceName, healthpb.HealthCheckResponse_SERVING) + } + + // Start namespace replication queue cleanup + // If the queue does not start, we can still call stop() + adh.namespaceReplicationQueue.Start() +} + +// Stop stops the handler +func (adh *AdminHandler) Stop() { + if atomic.CompareAndSwapInt32( + &adh.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + adh.healthServer.SetServingStatus(AdminServiceName, healthpb.HealthCheckResponse_NOT_SERVING) + } + + // Calling stop if the queue does not start is ok + adh.namespaceReplicationQueue.Stop() +} + +// AddSearchAttributes add search attribute to the cluster. +func (adh *AdminHandler) AddSearchAttributes( + ctx context.Context, + request *adminservice.AddSearchAttributesRequest, +) (_ *adminservice.AddSearchAttributesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + // validate request + if request == nil { + return nil, errRequestNotSet + } + + if len(request.GetSearchAttributes()) == 0 { + return nil, errSearchAttributesNotSet + } + + indexName := request.GetIndexName() + if indexName == "" { + indexName = adh.visibilityMgr.GetIndexName() + } + + currentSearchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) + if err != nil { + return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) + } + + for saName, saType := range request.GetSearchAttributes() { + if searchattribute.IsReserved(saName) { + return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeIsReservedMessage, saName)) + } + if currentSearchAttributes.IsDefined(saName) { + return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeAlreadyExistsMessage, saName)) + } + if _, ok := enumspb.IndexedValueType_name[int32(saType)]; !ok { + return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errUnknownSearchAttributeTypeMessage, saType)) + } + } + + // TODO (rodrigozhou): Remove condition `indexName == ""`. + // If indexName == "", then calling addSearchAttributesElasticsearch will + // register the search attributes in the cluster metadata if ES is up or if + // `skip-schema-update` is set. This is for backward compatibility using + // standard visibility. + if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { + err = adh.addSearchAttributesElasticsearch(ctx, request, indexName) + } else { + err = adh.addSearchAttributesSQL(ctx, request, currentSearchAttributes) + } + + if err != nil { + return nil, err + } + return &adminservice.AddSearchAttributesResponse{}, nil +} + +func (adh *AdminHandler) addSearchAttributesElasticsearch( + ctx context.Context, + request *adminservice.AddSearchAttributesRequest, + indexName string, +) error { + // Execute workflow. + wfParams := addsearchattributes.WorkflowParams{ + CustomAttributesToAdd: request.GetSearchAttributes(), + IndexName: indexName, + SkipSchemaUpdate: request.GetSkipSchemaUpdate(), + } + + sdkClient := adh.sdkClientFactory.GetSystemClient() + run, err := sdkClient.ExecuteWorkflow( + ctx, + sdkclient.StartWorkflowOptions{ + TaskQueue: worker.DefaultWorkerTaskQueue, + ID: addsearchattributes.WorkflowName, + }, + addsearchattributes.WorkflowName, + wfParams, + ) + if err != nil { + return serviceerror.NewUnavailable( + fmt.Sprintf(errUnableToStartWorkflowMessage, addsearchattributes.WorkflowName, err), + ) + } + + // Wait for workflow to complete. + err = run.Get(ctx, nil) + if err != nil { + return serviceerror.NewUnavailable( + fmt.Sprintf(errWorkflowReturnedErrorMessage, addsearchattributes.WorkflowName, err), + ) + } + return nil +} + +func (adh *AdminHandler) addSearchAttributesSQL( + ctx context.Context, + request *adminservice.AddSearchAttributesRequest, + currentSearchAttributes searchattribute.NameTypeMap, +) error { + _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) + } + + nsName := request.GetNamespace() + if nsName == "" { + return errNamespaceNotSet + } + resp, err := client.DescribeNamespace( + ctx, + &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, + ) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName)) + } + + dbCustomSearchAttributes := searchattribute.GetSqlDbIndexSearchAttributes().CustomSearchAttributes + cmCustomSearchAttributes := currentSearchAttributes.Custom() + upsertFieldToAliasMap := make(map[string]string) + fieldToAliasMap := resp.Config.CustomSearchAttributeAliases + aliasToFieldMap := util.InverseMap(fieldToAliasMap) + for saName, saType := range request.GetSearchAttributes() { + // check if alias is already in use + if _, ok := aliasToFieldMap[saName]; ok { + return serviceerror.NewAlreadyExist( + fmt.Sprintf(errSearchAttributeAlreadyExistsMessage, saName), + ) + } + // find the first available field for the given type + targetFieldName := "" + cntUsed := 0 + for fieldName, fieldType := range dbCustomSearchAttributes { + if fieldType != saType { + continue + } + // make sure the pre-allocated custom search attributes are created in cluster metadata + if _, ok := cmCustomSearchAttributes[fieldName]; !ok { + continue + } + if _, ok := fieldToAliasMap[fieldName]; ok { + cntUsed++ + } else if _, ok := upsertFieldToAliasMap[fieldName]; ok { + cntUsed++ + } else { + targetFieldName = fieldName + break + } + } + if targetFieldName == "" { + return serviceerror.NewInvalidArgument( + fmt.Sprintf(errTooManySearchAttributesMessage, cntUsed, saType.String()), + ) + } + upsertFieldToAliasMap[targetFieldName] = saName + } + + _, err = client.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{ + Namespace: nsName, + Config: &namespacepb.NamespaceConfig{ + CustomSearchAttributeAliases: upsertFieldToAliasMap, + }, + }) + if err != nil && err.Error() == errCustomSearchAttributeFieldAlreadyAllocated.Error() { + return errRaceConditionAddingSearchAttributes + } + return err +} + +// RemoveSearchAttributes remove search attribute from the cluster. +func (adh *AdminHandler) RemoveSearchAttributes( + ctx context.Context, + request *adminservice.RemoveSearchAttributesRequest, +) (_ *adminservice.RemoveSearchAttributesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + // validate request + if request == nil { + return nil, errRequestNotSet + } + + if len(request.GetSearchAttributes()) == 0 { + return nil, errSearchAttributesNotSet + } + + indexName := request.GetIndexName() + if indexName == "" { + indexName = adh.visibilityMgr.GetIndexName() + } + + currentSearchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) + if err != nil { + return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) + } + + // TODO (rodrigozhou): Remove condition `indexName == ""`. + // If indexName == "", then calling addSearchAttributesElasticsearch will + // register the search attributes in the cluster metadata if ES is up or if + // `skip-schema-update` is set. This is for backward compatibility using + // standard visibility. + if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { + err = adh.removeSearchAttributesElasticsearch(ctx, request, indexName, currentSearchAttributes) + } else { + err = adh.removeSearchAttributesSQL(ctx, request, currentSearchAttributes) + } + + if err != nil { + return nil, err + } + return &adminservice.RemoveSearchAttributesResponse{}, nil +} + +func (adh *AdminHandler) removeSearchAttributesElasticsearch( + ctx context.Context, + request *adminservice.RemoveSearchAttributesRequest, + indexName string, + currentSearchAttributes searchattribute.NameTypeMap, +) error { + newCustomSearchAttributes := maps.Clone(currentSearchAttributes.Custom()) + for _, saName := range request.GetSearchAttributes() { + if !currentSearchAttributes.IsDefined(saName) { + return serviceerror.NewInvalidArgument(fmt.Sprintf(errSearchAttributeDoesntExistMessage, saName)) + } + if _, ok := newCustomSearchAttributes[saName]; !ok { + return serviceerror.NewInvalidArgument(fmt.Sprintf(errUnableToRemoveNonCustomSearchAttributesMessage, saName)) + } + delete(newCustomSearchAttributes, saName) + } + + err := adh.saManager.SaveSearchAttributes(ctx, indexName, newCustomSearchAttributes) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToSaveSearchAttributesMessage, err)) + } + return nil +} + +func (adh *AdminHandler) removeSearchAttributesSQL( + ctx context.Context, + request *adminservice.RemoveSearchAttributesRequest, + currentSearchAttributes searchattribute.NameTypeMap, +) error { + _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) + } + + nsName := request.GetNamespace() + if nsName == "" { + return errNamespaceNotSet + } + resp, err := client.DescribeNamespace( + ctx, + &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, + ) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName)) + } + + upsertFieldToAliasMap := make(map[string]string) + aliasToFieldMap := util.InverseMap(resp.Config.CustomSearchAttributeAliases) + for _, saName := range request.GetSearchAttributes() { + if fieldName, ok := aliasToFieldMap[saName]; ok { + upsertFieldToAliasMap[fieldName] = "" + continue + } + if currentSearchAttributes.IsDefined(saName) { + return serviceerror.NewInvalidArgument( + fmt.Sprintf(errUnableToRemoveNonCustomSearchAttributesMessage, saName), + ) + } + return serviceerror.NewNotFound(fmt.Sprintf(errSearchAttributeDoesntExistMessage, saName)) + } + + _, err = client.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{ + Namespace: nsName, + Config: &namespacepb.NamespaceConfig{ + CustomSearchAttributeAliases: upsertFieldToAliasMap, + }, + }) + return err +} + +func (adh *AdminHandler) GetSearchAttributes( + ctx context.Context, + request *adminservice.GetSearchAttributesRequest, +) (_ *adminservice.GetSearchAttributesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + indexName := request.GetIndexName() + if indexName == "" { + indexName = adh.visibilityMgr.GetIndexName() + } + + searchAttributes, err := adh.saProvider.GetSearchAttributes(indexName, true) + if err != nil { + adh.logger.Error("getSearchAttributes error", tag.Error(err)) + return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToGetSearchAttributesMessage, err)) + } + + // TODO (rodrigozhou): Remove condition `indexName == ""`. + // If indexName == "", then calling addSearchAttributesElasticsearch will + // register the search attributes in the cluster metadata if ES is up or if + // `skip-schema-update` is set. This is for backward compatibility using + // standard visibility. + if adh.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { + return adh.getSearchAttributesElasticsearch(ctx, indexName, searchAttributes) + } + return adh.getSearchAttributesSQL(ctx, request, searchAttributes) +} + +func (adh *AdminHandler) getSearchAttributesElasticsearch( + ctx context.Context, + indexName string, + searchAttributes searchattribute.NameTypeMap, +) (*adminservice.GetSearchAttributesResponse, error) { + var lastErr error + + sdkClient := adh.sdkClientFactory.GetSystemClient() + descResp, err := sdkClient.DescribeWorkflowExecution(ctx, addsearchattributes.WorkflowName, "") + var wfInfo *workflowpb.WorkflowExecutionInfo + if err != nil { + // NotFound can happen when no search attributes were added and the workflow has never been executed. + if _, isNotFound := err.(*serviceerror.NotFound); !isNotFound { + lastErr = serviceerror.NewUnavailable(fmt.Sprintf("unable to get %s workflow state: %v", addsearchattributes.WorkflowName, err)) + adh.logger.Error("getSearchAttributes error", tag.Error(lastErr)) + } + } else { + wfInfo = descResp.GetWorkflowExecutionInfo() + } + + var esMapping map[string]string + if adh.ESClient != nil { + esMapping, err = adh.ESClient.GetMapping(ctx, indexName) + if err != nil { + lastErr = serviceerror.NewUnavailable(fmt.Sprintf("unable to get mapping from Elasticsearch: %v", err)) + adh.logger.Error("getSearchAttributes error", tag.Error(lastErr)) + } + } + + if lastErr != nil { + return nil, lastErr + } + return &adminservice.GetSearchAttributesResponse{ + CustomAttributes: searchAttributes.Custom(), + SystemAttributes: searchAttributes.System(), + Mapping: esMapping, + AddWorkflowExecutionInfo: wfInfo, + }, nil +} + +func (adh *AdminHandler) getSearchAttributesSQL( + ctx context.Context, + request *adminservice.GetSearchAttributesRequest, + searchAttributes searchattribute.NameTypeMap, +) (*adminservice.GetSearchAttributesResponse, error) { + _, client, err := adh.clientFactory.NewLocalFrontendClientWithTimeout( + frontend.DefaultTimeout, + frontend.DefaultLongPollTimeout, + ) + if err != nil { + return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToCreateFrontendClientMessage, err)) + } + + nsName := request.GetNamespace() + if nsName == "" { + return nil, errNamespaceNotSet + } + resp, err := client.DescribeNamespace( + ctx, + &workflowservice.DescribeNamespaceRequest{Namespace: nsName}, + ) + if err != nil { + return nil, serviceerror.NewUnavailable( + fmt.Sprintf(errUnableToGetNamespaceInfoMessage, nsName), + ) + } + + fieldToAliasMap := resp.Config.CustomSearchAttributeAliases + customSearchAttributes := make(map[string]enumspb.IndexedValueType) + for field, tp := range searchAttributes.Custom() { + if alias, ok := fieldToAliasMap[field]; ok { + customSearchAttributes[alias] = tp + } + } + return &adminservice.GetSearchAttributesResponse{ + CustomAttributes: customSearchAttributes, + SystemAttributes: searchAttributes.System(), + }, nil +} + +func (adh *AdminHandler) RebuildMutableState(ctx context.Context, request *adminservice.RebuildMutableStateRequest) (_ *adminservice.RebuildMutableStateResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + if err := validateExecution(request.Execution); err != nil { + return nil, err + } + + namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) + if err != nil { + return nil, err + } + + if _, err := adh.historyClient.RebuildMutableState(ctx, &historyservice.RebuildMutableStateRequest{ + NamespaceId: namespaceID.String(), + Execution: request.Execution, + }); err != nil { + return nil, err + } + return &adminservice.RebuildMutableStateResponse{}, nil +} + +// DescribeMutableState returns information about the specified workflow execution. +func (adh *AdminHandler) DescribeMutableState(ctx context.Context, request *adminservice.DescribeMutableStateRequest) (_ *adminservice.DescribeMutableStateResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + if err := validateExecution(request.Execution); err != nil { + return nil, err + } + + namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) + if err != nil { + return nil, err + } + + shardID := common.WorkflowIDToHistoryShard(namespaceID.String(), request.Execution.WorkflowId, adh.numberOfHistoryShards) + shardIDStr := convert.Int32ToString(shardID) + + resolver, err := adh.membershipMonitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + historyHost, err := resolver.Lookup(shardIDStr) + if err != nil { + return nil, err + } + + historyAddr := historyHost.GetAddress() + historyResponse, err := adh.historyClient.DescribeMutableState(ctx, &historyservice.DescribeMutableStateRequest{ + NamespaceId: namespaceID.String(), + Execution: request.Execution, + }) + + if err != nil { + return nil, err + } + return &adminservice.DescribeMutableStateResponse{ + ShardId: shardIDStr, + HistoryAddr: historyAddr, + DatabaseMutableState: historyResponse.GetDatabaseMutableState(), + CacheMutableState: historyResponse.GetCacheMutableState(), + }, nil +} + +// RemoveTask returns information about the internal states of a history host +func (adh *AdminHandler) RemoveTask(ctx context.Context, request *adminservice.RemoveTaskRequest) (_ *adminservice.RemoveTaskResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + _, err := adh.historyClient.RemoveTask(ctx, &historyservice.RemoveTaskRequest{ + ShardId: request.GetShardId(), + Category: request.GetCategory(), + TaskId: request.GetTaskId(), + VisibilityTime: request.GetVisibilityTime(), + }) + return &adminservice.RemoveTaskResponse{}, err +} + +// GetShard returns information about the internal states of a shard +func (adh *AdminHandler) GetShard(ctx context.Context, request *adminservice.GetShardRequest) (_ *adminservice.GetShardResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + if request == nil { + return nil, errRequestNotSet + } + resp, err := adh.historyClient.GetShard(ctx, &historyservice.GetShardRequest{ShardId: request.GetShardId()}) + if err != nil { + return nil, err + } + return &adminservice.GetShardResponse{ShardInfo: resp.ShardInfo}, nil +} + +// CloseShard returns information about the internal states of a history host +func (adh *AdminHandler) CloseShard(ctx context.Context, request *adminservice.CloseShardRequest) (_ *adminservice.CloseShardResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + _, err := adh.historyClient.CloseShard(ctx, &historyservice.CloseShardRequest{ShardId: request.GetShardId()}) + return &adminservice.CloseShardResponse{}, err +} + +func (adh *AdminHandler) ListHistoryTasks( + ctx context.Context, + request *adminservice.ListHistoryTasksRequest, +) (_ *adminservice.ListHistoryTasksResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + taskRange := request.GetTaskRange() + if taskRange == nil { + return nil, errTaskRangeNotSet + } + + taskCategory, ok := tasks.GetCategoryByID(int32(request.Category)) + if !ok { + return nil, &serviceerror.InvalidArgument{ + Message: fmt.Sprintf("unknown task category: %v", request.Category), + } + } + + var minTaskKey, maxTaskKey tasks.Key + if taskRange.InclusiveMinTaskKey != nil { + minTaskKey = tasks.NewKey( + timestamp.TimeValue(taskRange.InclusiveMinTaskKey.FireTime), + taskRange.InclusiveMinTaskKey.TaskId, + ) + if err := tasks.ValidateKey(minTaskKey); err != nil { + return nil, &serviceerror.InvalidArgument{ + Message: fmt.Sprintf("invalid minTaskKey: %v", err.Error()), + } + } + } + if taskRange.ExclusiveMaxTaskKey != nil { + maxTaskKey = tasks.NewKey( + timestamp.TimeValue(taskRange.ExclusiveMaxTaskKey.FireTime), + taskRange.ExclusiveMaxTaskKey.TaskId, + ) + if err := tasks.ValidateKey(maxTaskKey); err != nil { + return nil, &serviceerror.InvalidArgument{ + Message: fmt.Sprintf("invalid maxTaskKey: %v", err.Error()), + } + } + } + + // Queue reader registration is only meaning for history service + // we are on frontend service, so no need to do registration + // TODO: move the logic to history service + + resp, err := adh.persistenceExecutionManager.GetHistoryTasks(ctx, &persistence.GetHistoryTasksRequest{ + ShardID: request.ShardId, + TaskCategory: taskCategory, + ReaderID: common.DefaultQueueReaderID, + InclusiveMinTaskKey: minTaskKey, + ExclusiveMaxTaskKey: maxTaskKey, + BatchSize: int(request.BatchSize), + NextPageToken: request.NextPageToken, + }) + if err != nil { + return nil, err + } + + return &adminservice.ListHistoryTasksResponse{ + Tasks: toAdminTask(resp.Tasks), + NextPageToken: resp.NextPageToken, + }, nil +} + +func toAdminTask(tasks []tasks.Task) []*adminservice.Task { + var adminTasks []*adminservice.Task + for _, task := range tasks { + adminTasks = append(adminTasks, &adminservice.Task{ + NamespaceId: task.GetNamespaceID(), + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + TaskId: task.GetTaskID(), + TaskType: task.GetType(), + FireTime: timestamp.TimePtr(task.GetKey().FireTime), + Version: task.GetVersion(), + }) + } + return adminTasks +} + +// DescribeHistoryHost returns information about the internal states of a history host +func (adh *AdminHandler) DescribeHistoryHost(ctx context.Context, request *adminservice.DescribeHistoryHostRequest) (_ *adminservice.DescribeHistoryHostResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + flagsCount := 0 + if request.ShardId != 0 { + flagsCount++ + } + if len(request.Namespace) != 0 && request.WorkflowExecution != nil { + flagsCount++ + } + if len(request.GetHostAddress()) > 0 { + flagsCount++ + } + if flagsCount != 1 { + return nil, serviceerror.NewInvalidArgument("must provide one and only one: shard id or namespace & workflow id or host address") + } + + var err error + var namespaceID namespace.ID + if request.WorkflowExecution != nil { + namespaceID, err = adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.Namespace)) + if err != nil { + return nil, err + } + + if err := validateExecution(request.WorkflowExecution); err != nil { + return nil, err + } + } + + resp, err := adh.historyClient.DescribeHistoryHost(ctx, &historyservice.DescribeHistoryHostRequest{ + HostAddress: request.GetHostAddress(), + ShardId: request.GetShardId(), + NamespaceId: namespaceID.String(), + WorkflowExecution: request.GetWorkflowExecution(), + }) + + if resp == nil { + return nil, err + } + + return &adminservice.DescribeHistoryHostResponse{ + ShardsNumber: resp.GetShardsNumber(), + ShardIds: resp.GetShardIds(), + NamespaceCache: resp.GetNamespaceCache(), + Address: resp.GetAddress(), + }, err +} + +// GetWorkflowExecutionRawHistoryV2 - retrieves the history of workflow execution +func (adh *AdminHandler) GetWorkflowExecutionRawHistoryV2(ctx context.Context, request *adminservice.GetWorkflowExecutionRawHistoryV2Request) (_ *adminservice.GetWorkflowExecutionRawHistoryV2Response, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if err := adh.validateGetWorkflowExecutionRawHistoryV2Request( + request, + ); err != nil { + return nil, err + } + + ns, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) + if err != nil { + return nil, err + } + + execution := request.Execution + var pageToken *tokenspb.RawHistoryContinuation + var targetVersionHistory *historyspb.VersionHistory + if request.NextPageToken == nil { + response, err := adh.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: ns.ID().String(), + Execution: execution, + }) + if err != nil { + return nil, err + } + + targetVersionHistory, err = adh.setRequestDefaultValueAndGetTargetVersionHistory( + request, + response.GetVersionHistories(), + ) + if err != nil { + return nil, err + } + + pageToken = generatePaginationToken(request, response.GetVersionHistories()) + } else { + pageToken, err = deserializeRawHistoryToken(request.NextPageToken) + if err != nil { + return nil, err + } + versionHistories := pageToken.GetVersionHistories() + if versionHistories == nil { + return nil, errInvalidVersionHistories + } + targetVersionHistory, err = adh.setRequestDefaultValueAndGetTargetVersionHistory( + request, + versionHistories, + ) + if err != nil { + return nil, err + } + } + + if err := validatePaginationToken( + request, + pageToken, + ); err != nil { + return nil, err + } + + if pageToken.GetStartEventId()+1 == pageToken.GetEndEventId() { + // API is exclusive-exclusive. Return empty response here. + return &adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: []*commonpb.DataBlob{}, + NextPageToken: nil, // no further pagination + VersionHistory: targetVersionHistory, + }, nil + } + pageSize := int(request.GetMaximumPageSize()) + shardID := common.WorkflowIDToHistoryShard( + ns.ID().String(), + execution.GetWorkflowId(), + adh.numberOfHistoryShards, + ) + rawHistoryResponse, err := adh.persistenceExecutionManager.ReadRawHistoryBranch(ctx, &persistence.ReadHistoryBranchRequest{ + BranchToken: targetVersionHistory.GetBranchToken(), + // GetWorkflowExecutionRawHistoryV2 is exclusive exclusive. + // ReadRawHistoryBranch is inclusive exclusive. + MinEventID: pageToken.GetStartEventId() + 1, + MaxEventID: pageToken.GetEndEventId(), + PageSize: pageSize, + NextPageToken: pageToken.PersistenceToken, + ShardID: shardID, + }) + if err != nil { + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // when no events can be returned from DB, DB layer will return + // EntityNotExistsError, this API shall return empty response + return &adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: []*commonpb.DataBlob{}, + NextPageToken: nil, // no further pagination + VersionHistory: targetVersionHistory, + }, nil + } + return nil, err + } + + pageToken.PersistenceToken = rawHistoryResponse.NextPageToken + size := rawHistoryResponse.Size + adh.metricsHandler.Histogram(metrics.HistorySize.GetMetricName(), metrics.HistorySize.GetMetricUnit()).Record( + int64(size), + metrics.NamespaceTag(ns.Name().String()), + metrics.OperationTag(metrics.AdminGetWorkflowExecutionRawHistoryV2Scope), + ) + + result := &adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: rawHistoryResponse.HistoryEventBlobs, + VersionHistory: targetVersionHistory, + HistoryNodeIds: rawHistoryResponse.NodeIDs, + } + if len(pageToken.PersistenceToken) == 0 { + result.NextPageToken = nil + } else { + result.NextPageToken, err = serializeRawHistoryToken(pageToken) + if err != nil { + return nil, err + } + } + + return result, nil +} + +// DescribeCluster return information about a temporal cluster +func (adh *AdminHandler) DescribeCluster( + ctx context.Context, + request *adminservice.DescribeClusterRequest, +) (_ *adminservice.DescribeClusterResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + membershipInfo := &clusterspb.MembershipInfo{} + if monitor := adh.membershipMonitor; monitor != nil { + membershipInfo.CurrentHost = &clusterspb.HostInfo{ + Identity: adh.hostInfoProvider.HostInfo().Identity(), + } + + members, err := monitor.GetReachableMembers() + if err != nil { + return nil, err + } + + membershipInfo.ReachableMembers = members + + var rings []*clusterspb.RingInfo + for _, role := range []primitives.ServiceName{ + primitives.FrontendService, + primitives.InternalFrontendService, + primitives.HistoryService, + primitives.MatchingService, + primitives.WorkerService, + } { + resolver, err := monitor.GetResolver(role) + if err != nil { + if role == primitives.InternalFrontendService { + continue // this one is optional + } + return nil, err + } + + var servers []*clusterspb.HostInfo + for _, server := range resolver.Members() { + servers = append(servers, &clusterspb.HostInfo{ + Identity: server.Identity(), + }) + } + + rings = append(rings, &clusterspb.RingInfo{ + Role: string(role), + MemberCount: int32(resolver.MemberCount()), + Members: servers, + }) + } + membershipInfo.Rings = rings + } + + if len(request.ClusterName) == 0 { + request.ClusterName = adh.clusterMetadata.GetCurrentClusterName() + } + metadata, err := adh.clusterMetadataManager.GetClusterMetadata( + ctx, + &persistence.GetClusterMetadataRequest{ClusterName: request.GetClusterName()}, + ) + if err != nil { + return nil, err + } + + return &adminservice.DescribeClusterResponse{ + SupportedClients: headers.SupportedClients, + ServerVersion: headers.ServerVersion, + MembershipInfo: membershipInfo, + ClusterId: metadata.GetClusterId(), + ClusterName: metadata.GetClusterName(), + HistoryShardCount: metadata.GetHistoryShardCount(), + PersistenceStore: adh.persistenceExecutionManager.GetName(), + VisibilityStore: strings.Join(adh.visibilityMgr.GetStoreNames(), ","), + VersionInfo: metadata.GetVersionInfo(), + FailoverVersionIncrement: metadata.GetFailoverVersionIncrement(), + InitialFailoverVersion: metadata.GetInitialFailoverVersion(), + IsGlobalNamespaceEnabled: metadata.GetIsGlobalNamespaceEnabled(), + Tags: metadata.GetTags(), + }, nil +} + +// ListClusters return information about temporal clusters +// TODO: Remove this API after migrate tctl to use operator handler +func (adh *AdminHandler) ListClusters( + ctx context.Context, + request *adminservice.ListClustersRequest, +) (_ *adminservice.ListClustersResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + if request.GetPageSize() <= 0 { + request.PageSize = listClustersPageSize + } + + resp, err := adh.clusterMetadataManager.ListClusterMetadata(ctx, &persistence.ListClusterMetadataRequest{ + PageSize: int(request.GetPageSize()), + NextPageToken: request.GetNextPageToken(), + }) + if err != nil { + return nil, err + } + + var clusterMetadataList []*persistencespb.ClusterMetadata + for _, clusterResp := range resp.ClusterMetadata { + clusterMetadataList = append(clusterMetadataList, &clusterResp.ClusterMetadata) + } + return &adminservice.ListClustersResponse{ + Clusters: clusterMetadataList, + NextPageToken: resp.NextPageToken, + }, nil +} + +// ListClusterMembers +// TODO: Remove this API after migrate tctl to use operator handler +func (adh *AdminHandler) ListClusterMembers( + ctx context.Context, + request *adminservice.ListClusterMembersRequest, +) (_ *adminservice.ListClusterMembersResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + metadataMgr := adh.clusterMetadataManager + + heartbitRef := request.GetLastHeartbeatWithin() + var heartbit time.Duration + if heartbitRef != nil { + heartbit = *heartbitRef + } + startedTimeRef := request.GetSessionStartedAfterTime() + var startedTime time.Time + if startedTimeRef != nil { + startedTime = *startedTimeRef + } + + resp, err := metadataMgr.GetClusterMembers(ctx, &persistence.GetClusterMembersRequest{ + LastHeartbeatWithin: heartbit, + RPCAddressEquals: net.ParseIP(request.GetRpcAddress()), + HostIDEquals: uuid.Parse(request.GetHostId()), + RoleEquals: persistence.ServiceType(request.GetRole()), + SessionStartedAfter: startedTime, + PageSize: int(request.GetPageSize()), + NextPageToken: request.GetNextPageToken(), + }) + if err != nil { + return nil, err + } + + var activeMembers []*clusterspb.ClusterMember + for _, member := range resp.ActiveMembers { + activeMembers = append(activeMembers, &clusterspb.ClusterMember{ + Role: enumsspb.ClusterMemberRole(member.Role), + HostId: member.HostID.String(), + RpcAddress: member.RPCAddress.String(), + RpcPort: int32(member.RPCPort), + SessionStartTime: &member.SessionStart, + LastHeartbitTime: &member.LastHeartbeat, + RecordExpiryTime: &member.RecordExpiry, + }) + } + + return &adminservice.ListClusterMembersResponse{ + ActiveMembers: activeMembers, + NextPageToken: resp.NextPageToken, + }, nil +} + +// AddOrUpdateRemoteCluster +// TODO: Remove this API after migrate tctl to use operator handler +func (adh *AdminHandler) AddOrUpdateRemoteCluster( + ctx context.Context, + request *adminservice.AddOrUpdateRemoteClusterRequest, +) (_ *adminservice.AddOrUpdateRemoteClusterResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + adminClient := adh.clientFactory.NewRemoteAdminClientWithTimeout( + request.GetFrontendAddress(), + admin.DefaultTimeout, + admin.DefaultLargeTimeout, + ) + + // Fetch cluster metadata from remote cluster + resp, err := adminClient.DescribeCluster(ctx, &adminservice.DescribeClusterRequest{}) + if err != nil { + return nil, err + } + + err = adh.validateRemoteClusterMetadata(resp) + if err != nil { + return nil, err + } + + var updateRequestVersion int64 = 0 + clusterMetadataMrg := adh.clusterMetadataManager + clusterData, err := clusterMetadataMrg.GetClusterMetadata( + ctx, + &persistence.GetClusterMetadataRequest{ClusterName: resp.GetClusterName()}, + ) + switch err.(type) { + case nil: + updateRequestVersion = clusterData.Version + case *serviceerror.NotFound: + updateRequestVersion = 0 + default: + return nil, err + } + + applied, err := clusterMetadataMrg.SaveClusterMetadata(ctx, &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: resp.GetClusterName(), + HistoryShardCount: resp.GetHistoryShardCount(), + ClusterId: resp.GetClusterId(), + ClusterAddress: request.GetFrontendAddress(), + FailoverVersionIncrement: resp.GetFailoverVersionIncrement(), + InitialFailoverVersion: resp.GetInitialFailoverVersion(), + IsGlobalNamespaceEnabled: resp.GetIsGlobalNamespaceEnabled(), + IsConnectionEnabled: request.GetEnableRemoteClusterConnection(), + Tags: resp.GetTags(), + }, + Version: updateRequestVersion, + }) + if err != nil { + return nil, err + } + if !applied { + return nil, serviceerror.NewInvalidArgument( + "Cannot update remote cluster due to update immutable fields") + } + return &adminservice.AddOrUpdateRemoteClusterResponse{}, nil +} + +// RemoveRemoteCluster +// TODO: Remove this API after migrate tctl to use operator handler +func (adh *AdminHandler) RemoveRemoteCluster( + ctx context.Context, + request *adminservice.RemoveRemoteClusterRequest, +) (_ *adminservice.RemoveRemoteClusterResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if err := adh.clusterMetadataManager.DeleteClusterMetadata( + ctx, + &persistence.DeleteClusterMetadataRequest{ClusterName: request.GetClusterName()}, + ); err != nil { + return nil, err + } + return &adminservice.RemoveRemoteClusterResponse{}, nil +} + +// GetReplicationMessages returns new replication tasks since the read level provided in the token. +func (adh *AdminHandler) GetReplicationMessages(ctx context.Context, request *adminservice.GetReplicationMessagesRequest) (_ *adminservice.GetReplicationMessagesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + if request.GetClusterName() == "" { + return nil, errClusterNameNotSet + } + + resp, err := adh.historyClient.GetReplicationMessages(ctx, &historyservice.GetReplicationMessagesRequest{ + Tokens: request.GetTokens(), + ClusterName: request.GetClusterName(), + }) + if err != nil { + return nil, err + } + return &adminservice.GetReplicationMessagesResponse{ShardMessages: resp.GetShardMessages()}, nil +} + +// GetNamespaceReplicationMessages returns new namespace replication tasks since last retrieved task ID. +func (adh *AdminHandler) GetNamespaceReplicationMessages(ctx context.Context, request *adminservice.GetNamespaceReplicationMessagesRequest) (_ *adminservice.GetNamespaceReplicationMessagesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + + if adh.namespaceReplicationQueue == nil { + return nil, errors.New("namespace replication queue not enabled for cluster") + } + + lastMessageID := request.GetLastRetrievedMessageId() + if request.GetLastRetrievedMessageId() == defaultLastMessageID { + if clusterAckLevels, err := adh.namespaceReplicationQueue.GetAckLevels(ctx); err == nil { + if ackLevel, ok := clusterAckLevels[request.GetClusterName()]; ok { + lastMessageID = ackLevel + } + } + } + + replicationTasks, lastMessageID, err := adh.namespaceReplicationQueue.GetReplicationMessages( + ctx, + lastMessageID, + getNamespaceReplicationMessageBatchSize, + ) + if err != nil { + return nil, err + } + + if request.GetLastProcessedMessageId() != defaultLastMessageID { + if err := adh.namespaceReplicationQueue.UpdateAckLevel( + ctx, + request.GetLastProcessedMessageId(), + request.GetClusterName(), + ); err != nil { + adh.logger.Warn("Failed to update namespace replication queue ack level", + tag.TaskID(request.GetLastProcessedMessageId()), + tag.ClusterName(request.GetClusterName())) + } + } + + return &adminservice.GetNamespaceReplicationMessagesResponse{ + Messages: &replicationspb.ReplicationMessages{ + ReplicationTasks: replicationTasks, + LastRetrievedMessageId: lastMessageID, + }, + }, nil +} + +// GetDLQReplicationMessages returns new replication tasks based on the dlq info. +func (adh *AdminHandler) GetDLQReplicationMessages(ctx context.Context, request *adminservice.GetDLQReplicationMessagesRequest) (_ *adminservice.GetDLQReplicationMessagesResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + + if request == nil { + return nil, errRequestNotSet + } + if len(request.GetTaskInfos()) == 0 { + return nil, errEmptyReplicationInfo + } + + resp, err := adh.historyClient.GetDLQReplicationMessages(ctx, &historyservice.GetDLQReplicationMessagesRequest{TaskInfos: request.GetTaskInfos()}) + if err != nil { + return nil, err + } + return &adminservice.GetDLQReplicationMessagesResponse{ReplicationTasks: resp.GetReplicationTasks()}, nil +} + +// ReapplyEvents applies stale events to the current workflow and the current run +func (adh *AdminHandler) ReapplyEvents(ctx context.Context, request *adminservice.ReapplyEventsRequest) (_ *adminservice.ReapplyEventsResponse, retError error) { + defer log.CapturePanic(adh.logger, &retError) + if request == nil { + return nil, errRequestNotSet + } + if request.WorkflowExecution == nil { + return nil, errExecutionNotSet + } + if request.GetWorkflowExecution().GetWorkflowId() == "" { + return nil, errWorkflowIDNotSet + } + if request.GetEvents() == nil { + return nil, errWorkflowIDNotSet + } + namespaceEntry, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) + if err != nil { + return nil, err + } + + _, err = adh.historyClient.ReapplyEvents(ctx, &historyservice.ReapplyEventsRequest{ + NamespaceId: namespaceEntry.ID().String(), + Request: request, + }) + if err != nil { + return nil, err + } + return &adminservice.ReapplyEventsResponse{}, nil +} + +// GetDLQMessages reads messages from DLQ +func (adh *AdminHandler) GetDLQMessages( + ctx context.Context, + request *adminservice.GetDLQMessagesRequest, +) (resp *adminservice.GetDLQMessagesResponse, retErr error) { + defer log.CapturePanic(adh.logger, &retErr) + if request == nil { + return nil, errRequestNotSet + } + + if request.GetMaximumPageSize() <= 0 { + request.MaximumPageSize = common.ReadDLQMessagesPageSize + } + + if request.GetInclusiveEndMessageId() <= 0 { + request.InclusiveEndMessageId = common.EndMessageID + } + + switch request.GetType() { + case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: + resp, err := adh.historyClient.GetDLQMessages(ctx, &historyservice.GetDLQMessagesRequest{ + Type: request.GetType(), + ShardId: request.GetShardId(), + SourceCluster: request.GetSourceCluster(), + InclusiveEndMessageId: request.GetInclusiveEndMessageId(), + MaximumPageSize: request.GetMaximumPageSize(), + NextPageToken: request.GetNextPageToken(), + }) + + if resp == nil { + return nil, err + } + + return &adminservice.GetDLQMessagesResponse{ + Type: resp.GetType(), + ReplicationTasks: resp.GetReplicationTasks(), + ReplicationTasksInfo: resp.GetReplicationTasksInfo(), + NextPageToken: resp.GetNextPageToken(), + }, err + case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: + tasks, token, err := adh.namespaceDLQHandler.Read( + ctx, + request.GetInclusiveEndMessageId(), + int(request.GetMaximumPageSize()), + request.GetNextPageToken()) + if err != nil { + return nil, err + } + + return &adminservice.GetDLQMessagesResponse{ + ReplicationTasks: tasks, + NextPageToken: token, + }, nil + default: + return nil, errDLQTypeIsNotSupported + } +} + +// PurgeDLQMessages purge messages from DLQ +func (adh *AdminHandler) PurgeDLQMessages( + ctx context.Context, + request *adminservice.PurgeDLQMessagesRequest, +) (_ *adminservice.PurgeDLQMessagesResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + if request == nil { + return nil, errRequestNotSet + } + + if request.GetInclusiveEndMessageId() <= 0 { + request.InclusiveEndMessageId = common.EndMessageID + } + + switch request.GetType() { + case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: + resp, err := adh.historyClient.PurgeDLQMessages(ctx, &historyservice.PurgeDLQMessagesRequest{ + Type: request.GetType(), + ShardId: request.GetShardId(), + SourceCluster: request.GetSourceCluster(), + InclusiveEndMessageId: request.GetInclusiveEndMessageId(), + }) + + if resp == nil { + return nil, err + } + + return &adminservice.PurgeDLQMessagesResponse{}, err + case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: + err := adh.namespaceDLQHandler.Purge(ctx, request.GetInclusiveEndMessageId()) + if err != nil { + return nil, err + } + + return &adminservice.PurgeDLQMessagesResponse{}, err + default: + return nil, errDLQTypeIsNotSupported + } +} + +// MergeDLQMessages merges DLQ messages +func (adh *AdminHandler) MergeDLQMessages( + ctx context.Context, + request *adminservice.MergeDLQMessagesRequest, +) (resp *adminservice.MergeDLQMessagesResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + if request == nil { + return nil, errRequestNotSet + } + + if request.GetInclusiveEndMessageId() <= 0 { + request.InclusiveEndMessageId = common.EndMessageID + } + + switch request.GetType() { + case enumsspb.DEAD_LETTER_QUEUE_TYPE_REPLICATION: + resp, err := adh.historyClient.MergeDLQMessages(ctx, &historyservice.MergeDLQMessagesRequest{ + Type: request.GetType(), + ShardId: request.GetShardId(), + SourceCluster: request.GetSourceCluster(), + InclusiveEndMessageId: request.GetInclusiveEndMessageId(), + MaximumPageSize: request.GetMaximumPageSize(), + NextPageToken: request.GetNextPageToken(), + }) + if resp == nil { + return nil, err + } + + return &adminservice.MergeDLQMessagesResponse{ + NextPageToken: request.GetNextPageToken(), + }, nil + case enumsspb.DEAD_LETTER_QUEUE_TYPE_NAMESPACE: + token, err := adh.namespaceDLQHandler.Merge( + ctx, + request.GetInclusiveEndMessageId(), + int(request.GetMaximumPageSize()), + request.GetNextPageToken(), + ) + if err != nil { + return nil, err + } + + return &adminservice.MergeDLQMessagesResponse{ + NextPageToken: token, + }, nil + default: + return nil, errDLQTypeIsNotSupported + } +} + +// RefreshWorkflowTasks re-generates the workflow tasks +func (adh *AdminHandler) RefreshWorkflowTasks( + ctx context.Context, + request *adminservice.RefreshWorkflowTasksRequest, +) (_ *adminservice.RefreshWorkflowTasksResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + + if request == nil { + return nil, errRequestNotSet + } + if err := validateExecution(request.Execution); err != nil { + return nil, err + } + namespaceEntry, err := adh.namespaceRegistry.GetNamespaceByID(namespace.ID(request.GetNamespaceId())) + if err != nil { + return nil, err + } + + _, err = adh.historyClient.RefreshWorkflowTasks(ctx, &historyservice.RefreshWorkflowTasksRequest{ + NamespaceId: namespaceEntry.ID().String(), + Request: request, + }) + if err != nil { + return nil, err + } + return &adminservice.RefreshWorkflowTasksResponse{}, nil +} + +// ResendReplicationTasks requests replication task from remote cluster +func (adh *AdminHandler) ResendReplicationTasks( + ctx context.Context, + request *adminservice.ResendReplicationTasksRequest, +) (_ *adminservice.ResendReplicationTasksResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + + if request == nil { + return nil, errRequestNotSet + } + resender := xdc.NewNDCHistoryResender( + adh.namespaceRegistry, + adh.clientBean, + func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { + _, err1 := adh.historyClient.ReplicateEventsV2(ctx, request) + return err1 + }, + adh.eventSerializer, + nil, + adh.logger, + ) + if err := resender.SendSingleWorkflowHistory( + ctx, + request.GetRemoteCluster(), + namespace.ID(request.GetNamespaceId()), + request.GetWorkflowId(), + request.GetRunId(), + resendStartEventID, + request.StartVersion, + common.EmptyEventID, + common.EmptyVersion, + ); err != nil { + return nil, err + } + return &adminservice.ResendReplicationTasksResponse{}, nil +} + +// GetTaskQueueTasks returns tasks from task queue +func (adh *AdminHandler) GetTaskQueueTasks( + ctx context.Context, + request *adminservice.GetTaskQueueTasksRequest, +) (_ *adminservice.GetTaskQueueTasksResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + + if request == nil { + return nil, errRequestNotSet + } + + namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := adh.taskManager.GetTasks(ctx, &persistence.GetTasksRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: request.GetTaskQueue(), + TaskType: request.GetTaskQueueType(), + InclusiveMinTaskID: request.GetMinTaskId(), + ExclusiveMaxTaskID: request.GetMaxTaskId(), + PageSize: int(request.GetBatchSize()), + NextPageToken: request.NextPageToken, + }) + if err != nil { + return nil, err + } + + return &adminservice.GetTaskQueueTasksResponse{ + Tasks: resp.Tasks, + NextPageToken: resp.NextPageToken, + }, nil +} + +func (adh *AdminHandler) DeleteWorkflowExecution( + ctx context.Context, + request *adminservice.DeleteWorkflowExecutionRequest, +) (_ *adminservice.DeleteWorkflowExecutionResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + + if request == nil { + return nil, errRequestNotSet + } + + if err := validateExecution(request.Execution); err != nil { + return nil, err + } + + namespaceID, err := adh.namespaceRegistry.GetNamespaceID(namespace.Name(request.GetNamespace())) + if err != nil { + return nil, err + } + execution := request.Execution + + shardID := common.WorkflowIDToHistoryShard( + namespaceID.String(), + execution.GetWorkflowId(), + adh.numberOfHistoryShards, + ) + logger := log.With(adh.logger, + tag.WorkflowNamespace(request.Namespace), + tag.WorkflowID(execution.WorkflowId), + tag.WorkflowRunID(execution.RunId), + ) + + if execution.RunId == "" { + resp, err := adh.persistenceExecutionManager.GetCurrentExecution(ctx, &persistence.GetCurrentExecutionRequest{ + ShardID: shardID, + NamespaceID: namespaceID.String(), + WorkflowID: execution.WorkflowId, + }) + if err != nil { + return nil, err + } + execution.RunId = resp.RunID + } + + var warnings []string + var branchTokens [][]byte + var startTime, closeTime *time.Time + cassVisBackend := adh.visibilityMgr.HasStoreName(cassandra.CassandraPersistenceName) + + resp, err := adh.persistenceExecutionManager.GetWorkflowExecution(ctx, &persistence.GetWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: execution.RunId, + }) + if err != nil { + if common.IsContextCanceledErr(err) || common.IsContextDeadlineExceededErr(err) { + return nil, err + } + // continue to deletion + warnMsg := "Unable to load mutable state when deleting workflow execution, " + + "will skip deleting workflow history and cassandra visibility record" + logger.Warn(warnMsg, tag.Error(err)) + warnings = append(warnings, fmt.Sprintf("%s. Error: %v", warnMsg, err.Error())) + } else { + // load necessary information from mutable state + executionInfo := resp.State.GetExecutionInfo() + histories := executionInfo.GetVersionHistories().GetHistories() + branchTokens = make([][]byte, 0, len(histories)) + for _, historyItem := range histories { + branchTokens = append(branchTokens, historyItem.GetBranchToken()) + } + + if cassVisBackend { + if resp.State.ExecutionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + startTime = executionInfo.GetStartTime() + } else if executionInfo.GetCloseTime() != nil { + closeTime = executionInfo.GetCloseTime() + } else { + completionEvent, err := adh.getWorkflowCompletionEvent(ctx, shardID, resp.State) + if err != nil { + warnMsg := "Unable to load workflow completion event, will skip deleting visibility record" + adh.logger.Warn(warnMsg, tag.Error(err)) + warnings = append(warnings, fmt.Sprintf("%s. Error: %v", warnMsg, err.Error())) + } else { + closeTime = completionEvent.GetEventTime() + } + } + } + } + + if !cassVisBackend || (startTime != nil || closeTime != nil) { + // if using cass visibility, then either start or close time should be non-nil + // NOTE: the deletion is best effort, for sql and cassandra visibility implementation, + // we can't guarantee there's no update or record close request for this workflow since + // visibility queue processing is async. Operator can call this api again to delete visibility + // record again if this happens. + if _, err := adh.historyClient.DeleteWorkflowVisibilityRecord(ctx, &historyservice.DeleteWorkflowVisibilityRecordRequest{ + NamespaceId: namespaceID.String(), + Execution: execution, + WorkflowStartTime: startTime, + WorkflowCloseTime: closeTime, + }); err != nil { + return nil, err + } + } + + if err := adh.persistenceExecutionManager.DeleteCurrentWorkflowExecution(ctx, &persistence.DeleteCurrentWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: execution.RunId, + }); err != nil { + return nil, err + } + + if err := adh.persistenceExecutionManager.DeleteWorkflowExecution(ctx, &persistence.DeleteWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: execution.RunId, + }); err != nil { + return nil, err + } + + for _, branchToken := range branchTokens { + if err := adh.persistenceExecutionManager.DeleteHistoryBranch(ctx, &persistence.DeleteHistoryBranchRequest{ + ShardID: shardID, + BranchToken: branchToken, + }); err != nil { + warnMsg := "Failed to delete history branch, skip" + adh.logger.Warn(warnMsg, tag.WorkflowBranchID(string(branchToken)), tag.Error(err)) + warnings = append(warnings, fmt.Sprintf("%s. BranchToken: %v, Error: %v", warnMsg, branchToken, err.Error())) + } + } + + return &adminservice.DeleteWorkflowExecutionResponse{ + Warnings: warnings, + }, nil +} + +func (adh *AdminHandler) validateGetWorkflowExecutionRawHistoryV2Request( + request *adminservice.GetWorkflowExecutionRawHistoryV2Request, +) error { + + execution := request.Execution + if execution.GetWorkflowId() == "" { + return errWorkflowIDNotSet + } + // TODO currently, this API is only going to be used by re-send history events + // to remote cluster if kafka is lossy again, in the future, this API can be used + // by CLI and client, then empty runID (meaning the current workflow) should be allowed + if execution.GetRunId() == "" || uuid.Parse(execution.GetRunId()) == nil { + return errInvalidRunID + } + + pageSize := int(request.GetMaximumPageSize()) + if pageSize <= 0 { + return errInvalidPageSize + } + + if request.GetStartEventId() == common.EmptyEventID && + request.GetStartEventVersion() == common.EmptyVersion && + request.GetEndEventId() == common.EmptyEventID && + request.GetEndEventVersion() == common.EmptyVersion { + return errInvalidEventQueryRange + } + + return nil +} + +func (adh *AdminHandler) validateRemoteClusterMetadata(metadata *adminservice.DescribeClusterResponse) error { + // Verify remote cluster config + currentClusterInfo := adh.clusterMetadata + if metadata.GetClusterName() == currentClusterInfo.GetCurrentClusterName() { + // cluster name conflict + return serviceerror.NewInvalidArgument("Cannot update current cluster metadata from rpc calls") + } + if metadata.GetFailoverVersionIncrement() != currentClusterInfo.GetFailoverVersionIncrement() { + // failover version increment is mismatch with current cluster config + return serviceerror.NewInvalidArgument("Cannot add remote cluster due to failover version increment mismatch") + } + if metadata.GetHistoryShardCount() != adh.config.NumHistoryShards { + remoteShardCount := metadata.GetHistoryShardCount() + large := remoteShardCount + small := adh.config.NumHistoryShards + if large < small { + small, large = large, small + } + if large%small != 0 { + return serviceerror.NewInvalidArgument("Remote cluster shard number and local cluster shard number are not multiples.") + } + } + if !metadata.IsGlobalNamespaceEnabled { + // remote cluster doesn't support global namespace + return serviceerror.NewInvalidArgument("Cannot add remote cluster as global namespace is not supported") + } + for clusterName, cluster := range currentClusterInfo.GetAllClusterInfo() { + if clusterName != metadata.ClusterName && cluster.InitialFailoverVersion == metadata.GetInitialFailoverVersion() { + // initial failover version conflict + // best effort: race condition if a concurrent write to db with the same version. + return serviceerror.NewInvalidArgument("Cannot add remote cluster due to initial failover version conflict") + } + } + return nil +} + +func (adh *AdminHandler) setRequestDefaultValueAndGetTargetVersionHistory( + request *adminservice.GetWorkflowExecutionRawHistoryV2Request, + versionHistories *historyspb.VersionHistories, +) (*historyspb.VersionHistory, error) { + + targetBranch, err := versionhistory.GetCurrentVersionHistory(versionHistories) + if err != nil { + return nil, err + } + firstItem, err := versionhistory.GetFirstVersionHistoryItem(targetBranch) + if err != nil { + return nil, err + } + lastItem, err := versionhistory.GetLastVersionHistoryItem(targetBranch) + if err != nil { + return nil, err + } + + if request.GetStartEventId() == common.EmptyVersion || request.GetStartEventVersion() == common.EmptyVersion { + // If start event is not set, get the events from the first event + // As the API is exclusive-exclusive, use first event id - 1 here + request.StartEventId = common.FirstEventID - 1 + request.StartEventVersion = firstItem.GetVersion() + } + if request.GetEndEventId() == common.EmptyEventID || request.GetEndEventVersion() == common.EmptyVersion { + // If end event is not set, get the events until the end event + // As the API is exclusive-exclusive, use end event id + 1 here + request.EndEventId = lastItem.GetEventId() + 1 + request.EndEventVersion = lastItem.GetVersion() + } + + if request.GetStartEventId() < 0 { + return nil, errInvalidFirstNextEventCombination + } + + // get branch based on the end event if end event is defined in the request + if request.GetEndEventId() == lastItem.GetEventId()+1 && + request.GetEndEventVersion() == lastItem.GetVersion() { + // this is a special case, target branch remains the same + } else { + endItem := versionhistory.NewVersionHistoryItem(request.GetEndEventId(), request.GetEndEventVersion()) + idx, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem(versionHistories, endItem) + if err != nil { + return nil, err + } + + targetBranch, err = versionhistory.GetVersionHistory(versionHistories, idx) + if err != nil { + return nil, err + } + } + + startItem := versionhistory.NewVersionHistoryItem(request.GetStartEventId(), request.GetStartEventVersion()) + // If the request start event is defined. The start event may be on a different branch as current branch. + // We need to find the LCA of the start event and the current branch. + if request.GetStartEventId() == common.FirstEventID-1 && + request.GetStartEventVersion() == firstItem.GetVersion() { + // this is a special case, start event is on the same branch as target branch + } else { + if !versionhistory.ContainsVersionHistoryItem(targetBranch, startItem) { + idx, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem(versionHistories, startItem) + if err != nil { + return nil, err + } + startBranch, err := versionhistory.GetVersionHistory(versionHistories, idx) + if err != nil { + return nil, err + } + startItem, err = versionhistory.FindLCAVersionHistoryItem(targetBranch, startBranch) + if err != nil { + return nil, err + } + request.StartEventId = startItem.GetEventId() + request.StartEventVersion = startItem.GetVersion() + } + } + + return targetBranch, nil +} + +func (adh *AdminHandler) getWorkflowCompletionEvent( + ctx context.Context, + shardID int32, + mutableState *persistencespb.WorkflowMutableState, +) (*historypb.HistoryEvent, error) { + executionInfo := mutableState.GetExecutionInfo() + completionEventID := mutableState.GetNextEventId() - 1 + + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) + if err != nil { + return nil, err + } + version, err := versionhistory.GetVersionHistoryEventVersion(currentVersionHistory, completionEventID) + if err != nil { + return nil, err + } + + resp, err := adh.persistenceExecutionManager.ReadHistoryBranch(ctx, &persistence.ReadHistoryBranchRequest{ + ShardID: shardID, + BranchToken: currentVersionHistory.GetBranchToken(), + MinEventID: executionInfo.CompletionEventBatchId, + MaxEventID: completionEventID + 1, + PageSize: 1, + }) + if err != nil { + return nil, err + } + + // find history event from batch and return back single event to caller + for _, e := range resp.HistoryEvents { + if e.EventId == completionEventID && e.Version == version { + return e, nil + } + } + + return nil, serviceerror.NewInternal("Unable to find closed event for workflow") +} + +func (adh *AdminHandler) StreamWorkflowReplicationMessages( + clientCluster adminservice.AdminService_StreamWorkflowReplicationMessagesServer, +) (retError error) { + defer log.CapturePanic(adh.logger, &retError) + + ctxMetadata, ok := metadata.FromIncomingContext(clientCluster.Context()) + if !ok { + return serviceerror.NewInvalidArgument("missing cluster & shard ID metadata") + } + _, serverClusterShardID, err := history.DecodeClusterShardMD(ctxMetadata) + if err != nil { + return err + } + + logger := log.With(adh.logger, tag.ShardID(serverClusterShardID.ShardID)) + logger.Info("AdminStreamReplicationMessages started.") + defer logger.Info("AdminStreamReplicationMessages stopped.") + + ctx := clientCluster.Context() + serverCluster, err := adh.historyClient.StreamWorkflowReplicationMessages(ctx) + if err != nil { + return err + } + + shutdownChan := channel.NewShutdownOnce() + go func() { + defer shutdownChan.Shutdown() + + for !shutdownChan.IsShutdown() { + req, err := clientCluster.Recv() + if err != nil { + logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(err)) + return + } + switch attr := req.GetAttributes().(type) { + case *adminservice.StreamWorkflowReplicationMessagesRequest_SyncReplicationState: + if err = serverCluster.Send(&historyservice.StreamWorkflowReplicationMessagesRequest{ + Attributes: &historyservice.StreamWorkflowReplicationMessagesRequest_SyncReplicationState{ + SyncReplicationState: attr.SyncReplicationState, + }, + }); err != nil { + logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(err)) + return + } + default: + logger.Info("AdminStreamReplicationMessages client -> server encountered error", tag.Error(serviceerror.NewInternal(fmt.Sprintf( + "StreamWorkflowReplicationMessages encountered unknown type: %T %v", attr, attr, + )))) + return + } + } + }() + go func() { + defer shutdownChan.Shutdown() + + for !shutdownChan.IsShutdown() { + resp, err := serverCluster.Recv() + if err != nil { + logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(err)) + return + } + switch attr := resp.GetAttributes().(type) { + case *historyservice.StreamWorkflowReplicationMessagesResponse_Messages: + if err = clientCluster.Send(&adminservice.StreamWorkflowReplicationMessagesResponse{ + Attributes: &adminservice.StreamWorkflowReplicationMessagesResponse_Messages{ + Messages: attr.Messages, + }, + }); err != nil { + logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(err)) + return + } + default: + logger.Info("AdminStreamReplicationMessages server -> client encountered error", tag.Error(serviceerror.NewInternal(fmt.Sprintf( + "StreamWorkflowReplicationMessages encountered unknown type: %T %v", attr, attr, + )))) + return + } + } + }() + <-shutdownChan.Channel() + return nil +} + +func (adh *AdminHandler) GetNamespace(ctx context.Context, request *adminservice.GetNamespaceRequest) (_ *adminservice.GetNamespaceResponse, err error) { + defer log.CapturePanic(adh.logger, &err) + + if request == nil || (len(request.GetId()) == 0 && len(request.GetNamespace()) == 0) { + return nil, errRequestNotSet + } + req := &persistence.GetNamespaceRequest{ + Name: request.GetNamespace(), + ID: request.GetId(), + } + resp, err := adh.persistenceMetadataManager.GetNamespace(ctx, req) + if err != nil { + return nil, err + } + info := resp.Namespace.GetInfo() + nsConfig := resp.Namespace.GetConfig() + replicationConfig := resp.Namespace.GetReplicationConfig() + + nsResponse := &adminservice.GetNamespaceResponse{ + Info: &namespacepb.NamespaceInfo{ + Name: info.Name, + State: info.State, + Description: info.Description, + OwnerEmail: info.Owner, + Data: info.Data, + Id: info.Id, + }, + Config: &namespacepb.NamespaceConfig{ + WorkflowExecutionRetentionTtl: nsConfig.Retention, + HistoryArchivalState: nsConfig.HistoryArchivalState, + HistoryArchivalUri: nsConfig.HistoryArchivalUri, + VisibilityArchivalState: nsConfig.VisibilityArchivalState, + VisibilityArchivalUri: nsConfig.VisibilityArchivalUri, + BadBinaries: nsConfig.BadBinaries, + CustomSearchAttributeAliases: nsConfig.CustomSearchAttributeAliases, + }, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: replicationConfig.ActiveClusterName, + Clusters: convertClusterReplicationConfigToProto(replicationConfig.Clusters), + State: replicationConfig.GetState(), + }, + ConfigVersion: resp.Namespace.GetConfigVersion(), + FailoverVersion: resp.Namespace.GetFailoverVersion(), + FailoverHistory: convertFailoverHistoryToReplicationProto(resp.Namespace.GetReplicationConfig().GetFailoverHistory()), + } + return nsResponse, nil +} + +func convertClusterReplicationConfigToProto( + input []string, +) []*replicationpb.ClusterReplicationConfig { + output := make([]*replicationpb.ClusterReplicationConfig, 0, len(input)) + for _, clusterName := range input { + output = append(output, &replicationpb.ClusterReplicationConfig{ClusterName: clusterName}) + } + return output +} + +func convertFailoverHistoryToReplicationProto( + failoverHistoy []*persistencespb.FailoverStatus, +) []*replicationpb.FailoverStatus { + var replicationProto []*replicationpb.FailoverStatus + for _, failoverStatus := range failoverHistoy { + replicationProto = append(replicationProto, &replicationpb.FailoverStatus{ + FailoverTime: failoverStatus.GetFailoverTime(), + FailoverVersion: failoverStatus.GetFailoverVersion(), + }) + } + + return replicationProto +} diff -Nru temporal-1.21.5-1/src/service/frontend/admin_handler_test.go temporal-1.22.5/src/service/frontend/admin_handler_test.go --- temporal-1.21.5-1/src/service/frontend/admin_handler_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/admin_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1627 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "errors" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + namespacepb "go.temporal.io/api/namespace/v1" + "google.golang.org/grpc/metadata" + + historyclient "go.temporal.io/server/client/history" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/persistence/visibility/store/standard/cassandra" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/resourcetest" + + "google.golang.org/grpc/health" + + "go.temporal.io/server/api/adminservicemock/v1" + "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/testing/mocksdk" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/adminservice/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + clientmocks "go.temporal.io/server/client" + "go.temporal.io/server/common" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" + "go.temporal.io/server/common/searchattribute" +) + +type ( + adminHandlerSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockResource *resourcetest.Test + mockHistoryClient *historyservicemock.MockHistoryServiceClient + mockNamespaceCache *namespace.MockRegistry + + mockExecutionMgr *persistence.MockExecutionManager + mockVisibilityMgr *manager.MockVisibilityManager + mockClusterMetadataManager *persistence.MockClusterMetadataManager + mockClientFactory *clientmocks.MockFactory + mockAdminClient *adminservicemock.MockAdminServiceClient + mockMetadata *cluster.MockMetadata + mockProducer *persistence.MockNamespaceReplicationQueue + + namespace namespace.Name + namespaceID namespace.ID + namespaceEntry *namespace.Namespace + + handler *AdminHandler + } +) + +func TestAdminHandlerSuite(t *testing.T) { + s := new(adminHandlerSuite) + suite.Run(t, s) +} + +func (s *adminHandlerSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.namespace = "some random namespace name" + s.namespaceID = "deadd0d0-c001-face-d00d-000000000000" + s.namespaceEntry = namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{ + Name: s.namespace.String(), + Id: s.namespaceID.String(), + }, + nil, + false, + nil, + int64(100), + ) + + s.controller = gomock.NewController(s.T()) + s.mockResource = resourcetest.NewTest(s.controller, primitives.FrontendService) + s.mockNamespaceCache = s.mockResource.NamespaceCache + s.mockHistoryClient = s.mockResource.HistoryClient + s.mockExecutionMgr = s.mockResource.ExecutionMgr + s.mockClusterMetadataManager = s.mockResource.ClusterMetadataMgr + s.mockClientFactory = s.mockResource.ClientFactory + s.mockAdminClient = adminservicemock.NewMockAdminServiceClient(s.controller) + s.mockMetadata = s.mockResource.ClusterMetadata + s.mockVisibilityMgr = s.mockResource.VisibilityManager + s.mockProducer = persistence.NewMockNamespaceReplicationQueue(s.controller) + + persistenceConfig := &config.Persistence{ + NumHistoryShards: 1, + } + + cfg := &Config{ + NumHistoryShards: 4, + } + args := NewAdminHandlerArgs{ + persistenceConfig, + cfg, + s.mockResource.GetNamespaceReplicationQueue(), + s.mockProducer, + s.mockResource.ESClient, + s.mockResource.GetVisibilityManager(), + s.mockResource.GetLogger(), + s.mockResource.GetExecutionManager(), + s.mockResource.GetTaskManager(), + s.mockResource.GetClusterMetadataManager(), + s.mockResource.GetMetadataManager(), + s.mockResource.GetClientFactory(), + s.mockResource.GetClientBean(), + s.mockResource.GetHistoryClient(), + s.mockResource.GetSDKClientFactory(), + s.mockResource.GetMembershipMonitor(), + s.mockResource.GetHostInfoProvider(), + s.mockResource.GetArchiverProvider(), + s.mockResource.GetMetricsHandler(), + s.mockResource.GetNamespaceRegistry(), + s.mockResource.GetSearchAttributesProvider(), + s.mockResource.GetSearchAttributesManager(), + s.mockMetadata, + s.mockResource.GetArchivalMetadata(), + health.NewServer(), + serialization.NewSerializer(), + clock.NewRealTimeSource(), + } + s.mockMetadata.EXPECT().GetCurrentClusterName().Return(uuid.New()).AnyTimes() + s.handler = NewAdminHandler(args) + s.handler.Start() +} + +func (s *adminHandlerSuite) TearDownTest() { + s.controller.Finish() + s.handler.Stop() +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidWorkflowID() { + ctx := context.Background() + _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "", + RunId: uuid.New(), + }, + StartEventId: 1, + StartEventVersion: 100, + EndEventId: 10, + EndEventVersion: 100, + MaximumPageSize: 1, + NextPageToken: nil, + }) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidRunID() { + ctx := context.Background() + _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: "runID", + }, + StartEventId: 1, + StartEventVersion: 100, + EndEventId: 10, + EndEventVersion: 100, + MaximumPageSize: 1, + NextPageToken: nil, + }) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnInvalidSize() { + ctx := context.Background() + _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: 1, + StartEventVersion: 100, + EndEventId: 10, + EndEventVersion: 100, + MaximumPageSize: -1, + NextPageToken: nil, + }) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_FailedOnNamespaceCache() { + ctx := context.Background() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(nil, fmt.Errorf("test")) + _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: 1, + StartEventVersion: 100, + EndEventId: 10, + EndEventVersion: 100, + MaximumPageSize: 1, + NextPageToken: nil, + }) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2() { + ctx := context.Background() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(s.namespaceEntry, nil).AnyTimes() + branchToken := []byte{1} + versionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{ + versionhistory.NewVersionHistoryItem(int64(10), int64(100)), + }) + versionHistories := versionhistory.NewVersionHistories(versionHistory) + mState := &historyservice.GetMutableStateResponse{ + NextEventId: 11, + CurrentBranchToken: branchToken, + VersionHistories: versionHistories, + } + s.mockHistoryClient.EXPECT().GetMutableState(gomock.Any(), gomock.Any()).Return(mState, nil).AnyTimes() + + s.mockExecutionMgr.EXPECT().ReadRawHistoryBranch(gomock.Any(), gomock.Any()).Return(&persistence.ReadRawHistoryBranchResponse{ + HistoryEventBlobs: []*commonpb.DataBlob{}, + NextPageToken: []byte{}, + Size: 0, + }, nil) + _, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: 1, + StartEventVersion: 100, + EndEventId: 10, + EndEventVersion: 100, + MaximumPageSize: 10, + NextPageToken: nil, + }) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_GetWorkflowExecutionRawHistoryV2_SameStartIDAndEndID() { + ctx := context.Background() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(s.namespaceEntry, nil).AnyTimes() + branchToken := []byte{1} + versionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{ + versionhistory.NewVersionHistoryItem(int64(10), int64(100)), + }) + versionHistories := versionhistory.NewVersionHistories(versionHistory) + mState := &historyservice.GetMutableStateResponse{ + NextEventId: 11, + CurrentBranchToken: branchToken, + VersionHistories: versionHistories, + } + s.mockHistoryClient.EXPECT().GetMutableState(gomock.Any(), gomock.Any()).Return(mState, nil).AnyTimes() + + resp, err := s.handler.GetWorkflowExecutionRawHistoryV2(ctx, + &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: 10, + StartEventVersion: 100, + EndEventId: common.EmptyEventID, + EndEventVersion: common.EmptyVersion, + MaximumPageSize: 1, + NextPageToken: nil, + }) + s.Nil(resp.NextPageToken) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedStartAndEnd() { + inputStartEventID := int64(1) + inputStartVersion := int64(10) + inputEndEventID := int64(100) + inputEndVersion := int64(11) + firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) + endItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) + versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, endItem}) + versionHistories := versionhistory.NewVersionHistories(versionHistory) + request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: inputStartEventID, + StartEventVersion: inputStartVersion, + EndEventId: inputEndEventID, + EndEventVersion: inputEndVersion, + MaximumPageSize: 10, + NextPageToken: nil, + } + + targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( + request, + versionHistories, + ) + s.Equal(request.GetStartEventId(), inputStartEventID) + s.Equal(request.GetEndEventId(), inputEndEventID) + s.Equal(targetVersionHistory, versionHistory) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedEndEvent() { + inputStartEventID := int64(1) + inputEndEventID := int64(100) + inputStartVersion := int64(10) + inputEndVersion := int64(11) + firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) + targetItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) + versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, targetItem}) + versionHistories := versionhistory.NewVersionHistories(versionHistory) + request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: common.EmptyEventID, + StartEventVersion: common.EmptyVersion, + EndEventId: inputEndEventID, + EndEventVersion: inputEndVersion, + MaximumPageSize: 10, + NextPageToken: nil, + } + + targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( + request, + versionHistories, + ) + s.Equal(request.GetStartEventId(), inputStartEventID-1) + s.Equal(request.GetEndEventId(), inputEndEventID) + s.Equal(targetVersionHistory, versionHistory) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_DefinedStartEvent() { + inputStartEventID := int64(1) + inputEndEventID := int64(100) + inputStartVersion := int64(10) + inputEndVersion := int64(11) + firstItem := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) + targetItem := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) + versionHistory := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{firstItem, targetItem}) + versionHistories := versionhistory.NewVersionHistories(versionHistory) + request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: inputStartEventID, + StartEventVersion: inputStartVersion, + EndEventId: common.EmptyEventID, + EndEventVersion: common.EmptyVersion, + MaximumPageSize: 10, + NextPageToken: nil, + } + + targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( + request, + versionHistories, + ) + s.Equal(request.GetStartEventId(), inputStartEventID) + s.Equal(request.GetEndEventId(), inputEndEventID+1) + s.Equal(targetVersionHistory, versionHistory) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_SetRequestDefaultValueAndGetTargetVersionHistory_NonCurrentBranch() { + inputStartEventID := int64(1) + inputEndEventID := int64(100) + inputStartVersion := int64(10) + inputEndVersion := int64(101) + item1 := versionhistory.NewVersionHistoryItem(inputStartEventID, inputStartVersion) + item2 := versionhistory.NewVersionHistoryItem(inputEndEventID, inputEndVersion) + versionHistory1 := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{item1, item2}) + item3 := versionhistory.NewVersionHistoryItem(int64(10), int64(20)) + item4 := versionhistory.NewVersionHistoryItem(int64(20), int64(51)) + versionHistory2 := versionhistory.NewVersionHistory([]byte{}, []*historyspb.VersionHistoryItem{item1, item3, item4}) + versionHistories := versionhistory.NewVersionHistories(versionHistory1) + _, _, err := versionhistory.AddVersionHistory(versionHistories, versionHistory2) + s.NoError(err) + request := &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + }, + StartEventId: 9, + StartEventVersion: 20, + EndEventId: inputEndEventID, + EndEventVersion: inputEndVersion, + MaximumPageSize: 10, + NextPageToken: nil, + } + + targetVersionHistory, err := s.handler.setRequestDefaultValueAndGetTargetVersionHistory( + request, + versionHistories, + ) + s.Equal(request.GetStartEventId(), inputStartEventID) + s.Equal(request.GetEndEventId(), inputEndEventID) + s.Equal(targetVersionHistory, versionHistory1) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_AddSearchAttributes() { + handler := s.handler + ctx := context.Background() + + type test struct { + Name string + Request *adminservice.AddSearchAttributesRequest + Expected error + } + // request validation tests + testCases1 := []test{ + { + Name: "nil request", + Request: nil, + Expected: &serviceerror.InvalidArgument{Message: "Request is nil."}, + }, + { + Name: "empty request", + Request: &adminservice.AddSearchAttributesRequest{}, + Expected: &serviceerror.InvalidArgument{Message: "SearchAttributes are not set on request."}, + }, + } + for _, testCase := range testCases1 { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.AddSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } + + // Elasticsearch is not configured + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + testCases3 := []test{ + { + Name: "reserved key (empty index)", + Request: &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "WorkflowId": enumspb.INDEXED_VALUE_TYPE_TEXT, + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute WorkflowId is reserved by system."}, + }, + { + Name: "key already whitelisted (empty index)", + Request: &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "CustomTextField": enumspb.INDEXED_VALUE_TYPE_TEXT, + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute CustomTextField already exists."}, + }, + } + for _, testCase := range testCases3 { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.AddSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } + + // Configure Elasticsearch: add advanced visibility store config with index name. + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + testCases2 := []test{ + { + Name: "reserved key (ES configured)", + Request: &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "WorkflowId": enumspb.INDEXED_VALUE_TYPE_TEXT, + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute WorkflowId is reserved by system."}, + }, + { + Name: "key already whitelisted (ES configured)", + Request: &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "CustomTextField": enumspb.INDEXED_VALUE_TYPE_TEXT, + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute CustomTextField already exists."}, + }, + } + for _, testCase := range testCases2 { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.AddSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } + + mockSdkClient := mocksdk.NewMockClient(s.controller) + s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() + s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() + + // Start workflow failed. + mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(nil, errors.New("start failed")) + resp, err := handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + }) + s.Error(err) + s.Equal("Unable to start temporal-sys-add-search-attributes-workflow workflow: start failed.", err.Error()) + s.Nil(resp) + + // Workflow failed. + mockRun := mocksdk.NewMockWorkflowRun(s.controller) + mockRun.EXPECT().Get(gomock.Any(), nil).Return(errors.New("workflow failed")) + mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(mockRun, nil) + resp, err = handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + }) + s.Error(err) + s.Equal("Workflow temporal-sys-add-search-attributes-workflow returned an error: workflow failed.", err.Error()) + s.Nil(resp) + + // Success case. + mockRun.EXPECT().Get(gomock.Any(), nil).Return(nil) + mockSdkClient.EXPECT().ExecuteWorkflow(gomock.Any(), gomock.Any(), "temporal-sys-add-search-attributes-workflow", gomock.Any()).Return(mockRun, nil) + + resp, err = handler.AddSearchAttributes(ctx, &adminservice.AddSearchAttributesRequest{ + SearchAttributes: map[string]enumspb.IndexedValueType{ + "CustomAttr": enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + }) + s.NoError(err) + s.NotNil(resp) +} + +func (s *adminHandlerSuite) Test_GetSearchAttributes_EmptyIndexName() { + handler := s.handler + ctx := context.Background() + + resp, err := handler.GetSearchAttributes(ctx, nil) + s.Error(err) + s.Equal(&serviceerror.InvalidArgument{Message: "Request is nil."}, err) + s.Nil(resp) + + mockSdkClient := mocksdk.NewMockClient(s.controller) + s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(s.namespaceEntry, nil).AnyTimes() + + // Elasticsearch is not configured + s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() + mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( + &workflowservice.DescribeWorkflowExecutionResponse{}, nil) + s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "").Return(map[string]string{"col": "type"}, nil) + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + + resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{Namespace: s.namespace.String()}) + s.NoError(err) + s.NotNil(resp) +} + +func (s *adminHandlerSuite) Test_GetSearchAttributes_NonEmptyIndexName() { + handler := s.handler + ctx := context.Background() + + mockSdkClient := mocksdk.NewMockClient(s.controller) + s.mockResource.SDKClientFactory.EXPECT().GetSystemClient().Return(mockSdkClient).AnyTimes() + + // Configure Elasticsearch: add advanced visibility store config with index name. + s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() + + mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( + &workflowservice.DescribeWorkflowExecutionResponse{}, nil) + s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "random-index-name").Return(map[string]string{"col": "type"}, nil) + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + resp, err := handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{}) + s.NoError(err) + s.NotNil(resp) + + mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( + &workflowservice.DescribeWorkflowExecutionResponse{}, nil) + s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "another-index-name").Return(map[string]string{"col": "type"}, nil) + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("another-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{IndexName: "another-index-name"}) + s.NoError(err) + s.NotNil(resp) + + mockSdkClient.EXPECT().DescribeWorkflowExecution(gomock.Any(), "temporal-sys-add-search-attributes-workflow", "").Return( + nil, errors.New("random error")) + s.mockResource.ESClient.EXPECT().GetMapping(gomock.Any(), "random-index-name").Return(map[string]string{"col": "type"}, nil) + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + resp, err = handler.GetSearchAttributes(ctx, &adminservice.GetSearchAttributesRequest{Namespace: s.namespace.String()}) + s.Error(err) + s.Nil(resp) +} + +func (s *adminHandlerSuite) Test_RemoveSearchAttributes_EmptyIndexName() { + handler := s.handler + ctx := context.Background() + + type test struct { + Name string + Request *adminservice.RemoveSearchAttributesRequest + Expected error + } + // request validation tests + testCases1 := []test{ + { + Name: "nil request", + Request: nil, + Expected: &serviceerror.InvalidArgument{Message: "Request is nil."}, + }, + { + Name: "empty request", + Request: &adminservice.RemoveSearchAttributesRequest{}, + Expected: &serviceerror.InvalidArgument{Message: "SearchAttributes are not set on request."}, + }, + } + for _, testCase := range testCases1 { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } + + // Elasticsearch is not configured + s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("").AnyTimes() + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + testCases2 := []test{ + { + Name: "reserved search attribute (empty index)", + Request: &adminservice.RemoveSearchAttributesRequest{ + SearchAttributes: []string{ + "WorkflowId", + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Unable to remove non-custom search attributes: WorkflowId."}, + }, + { + Name: "search attribute doesn't exist (empty index)", + Request: &adminservice.RemoveSearchAttributesRequest{ + SearchAttributes: []string{ + "ProductId", + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute ProductId doesn't exist."}, + }, + } + for _, testCase := range testCases2 { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } +} + +func (s *adminHandlerSuite) Test_RemoveSearchAttributes_NonEmptyIndexName() { + handler := s.handler + ctx := context.Background() + + type test struct { + Name string + Request *adminservice.RemoveSearchAttributesRequest + Expected error + } + testCases := []test{ + { + Name: "reserved search attribute (ES configured)", + Request: &adminservice.RemoveSearchAttributesRequest{ + SearchAttributes: []string{ + "WorkflowId", + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Unable to remove non-custom search attributes: WorkflowId."}, + }, + { + Name: "search attribute doesn't exist (ES configured)", + Request: &adminservice.RemoveSearchAttributesRequest{ + SearchAttributes: []string{ + "ProductId", + }, + }, + Expected: &serviceerror.InvalidArgument{Message: "Search attribute ProductId doesn't exist."}, + }, + } + + // Configure Elasticsearch: add advanced visibility store config with index name. + s.mockVisibilityMgr.EXPECT().HasStoreName(elasticsearch.PersistenceName).Return(true).AnyTimes() + s.mockVisibilityMgr.EXPECT().GetIndexName().Return("random-index-name").AnyTimes() + s.mockResource.SearchAttributesProvider.EXPECT().GetSearchAttributes("random-index-name", true).Return(searchattribute.TestNameTypeMap, nil).AnyTimes() + for _, testCase := range testCases { + s.T().Run(testCase.Name, func(t *testing.T) { + resp, err := handler.RemoveSearchAttributes(ctx, testCase.Request) + s.Equal(testCase.Expected, err) + s.Nil(resp) + }) + } + + // Success case. + s.mockResource.SearchAttributesManager.EXPECT().SaveSearchAttributes(gomock.Any(), "random-index-name", gomock.Any()).Return(nil) + + resp, err := handler.RemoveSearchAttributes(ctx, &adminservice.RemoveSearchAttributesRequest{ + SearchAttributes: []string{ + "CustomKeywordField", + }, + }) + s.NoError(err) + s.NotNil(resp) +} + +func (s *adminHandlerSuite) Test_RemoveRemoteCluster_Success() { + var clusterName = "cluster" + s.mockClusterMetadataManager.EXPECT().DeleteClusterMetadata( + gomock.Any(), + &persistence.DeleteClusterMetadataRequest{ClusterName: clusterName}, + ).Return(nil) + + _, err := s.handler.RemoveRemoteCluster(context.Background(), &adminservice.RemoveRemoteClusterRequest{ClusterName: clusterName}) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_RemoveRemoteCluster_Error() { + var clusterName = "cluster" + s.mockClusterMetadataManager.EXPECT().DeleteClusterMetadata( + gomock.Any(), + &persistence.DeleteClusterMetadataRequest{ClusterName: clusterName}, + ).Return(fmt.Errorf("test error")) + + _, err := s.handler.RemoveRemoteCluster(context.Background(), &adminservice.RemoveRemoteClusterRequest{ClusterName: clusterName}) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_RecordFound_Success() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + var recordVersion int64 = 5 + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + &persistence.GetClusterMetadataResponse{ + Version: recordVersion, + }, nil) + s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 4, + ClusterId: clusterId, + ClusterAddress: rpcAddress, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: recordVersion, + }).Return(true, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_RecordNotFound_Success() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + nil, + serviceerror.NewNotFound("expected empty result"), + ) + s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 4, + ClusterId: clusterId, + ClusterAddress: rpcAddress, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: 0, + }).Return(true, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_ClusterNameConflict() { + var rpcAddress = uuid.New() + var clusterId = uuid.New() + + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: s.mockMetadata.GetCurrentClusterName(), + HistoryShardCount: 0, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_FailoverVersionIncrementMismatch() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(1)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 0, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_ShardCount_Invalid() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 5, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ShardCount_Multiple() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + var recordVersion int64 = 5 + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 16, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + &persistence.GetClusterMetadataResponse{ + Version: recordVersion, + }, nil) + s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 16, + ClusterId: clusterId, + ClusterAddress: rpcAddress, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: recordVersion, + }).Return(true, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.NoError(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_GlobalNamespaceDisabled() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: false, + }, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_ValidationError_InitialFailoverVersionConflict() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(map[string]cluster.ClusterInformation{ + uuid.New(): {InitialFailoverVersion: 0}, + }) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_DescribeCluster_Error() { + var rpcAddress = uuid.New() + + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + nil, + fmt.Errorf("test error"), + ) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_GetClusterMetadata_Error() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + nil, + fmt.Errorf("test error"), + ) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_SaveClusterMetadata_Error() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + nil, + serviceerror.NewNotFound("expected empty result"), + ) + s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 4, + ClusterId: clusterId, + ClusterAddress: rpcAddress, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: 0, + }).Return(false, fmt.Errorf("test error")) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) +} + +func (s *adminHandlerSuite) Test_AddOrUpdateRemoteCluster_SaveClusterMetadata_NotApplied_Error() { + var rpcAddress = uuid.New() + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockMetadata.EXPECT().GetFailoverVersionIncrement().Return(int64(0)) + s.mockMetadata.EXPECT().GetAllClusterInfo().Return(make(map[string]cluster.ClusterInformation)) + s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(rpcAddress, gomock.Any(), gomock.Any()).Return( + s.mockAdminClient, + ) + s.mockAdminClient.EXPECT().DescribeCluster(gomock.Any(), &adminservice.DescribeClusterRequest{}).Return( + &adminservice.DescribeClusterResponse{ + ClusterId: clusterId, + ClusterName: clusterName, + HistoryShardCount: 4, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, nil) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + nil, + serviceerror.NewNotFound("expected empty result"), + ) + s.mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 4, + ClusterId: clusterId, + ClusterAddress: rpcAddress, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: 0, + }).Return(false, nil) + _, err := s.handler.AddOrUpdateRemoteCluster(context.Background(), &adminservice.AddOrUpdateRemoteClusterRequest{FrontendAddress: rpcAddress}) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *adminHandlerSuite) Test_DescribeCluster_CurrentCluster_Success() { + var clusterId = uuid.New() + clusterName := s.mockMetadata.GetCurrentClusterName() + s.mockResource.HostInfoProvider.EXPECT().HostInfo().Return(membership.NewHostInfoFromAddress("test")) + s.mockResource.MembershipMonitor.EXPECT().GetReachableMembers().Return(nil, nil) + s.mockResource.HistoryServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.HistoryServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.FrontendServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.FrontendServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.MatchingServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.MatchingServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.WorkerServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.WorkerServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.ExecutionMgr.EXPECT().GetName().Return("") + s.mockVisibilityMgr.EXPECT().GetStoreNames().Return([]string{elasticsearch.PersistenceName}) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + &persistence.GetClusterMetadataResponse{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 0, + ClusterId: clusterId, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: 1, + }, nil) + + resp, err := s.handler.DescribeCluster(context.Background(), &adminservice.DescribeClusterRequest{}) + s.NoError(err) + s.Equal(resp.GetClusterName(), clusterName) + s.Equal(resp.GetClusterId(), clusterId) + s.Equal(resp.GetHistoryShardCount(), int32(0)) + s.Equal(resp.GetFailoverVersionIncrement(), int64(0)) + s.Equal(resp.GetInitialFailoverVersion(), int64(0)) + s.True(resp.GetIsGlobalNamespaceEnabled()) +} + +func (s *adminHandlerSuite) Test_DescribeCluster_NonCurrentCluster_Success() { + var clusterName = uuid.New() + var clusterId = uuid.New() + + s.mockResource.HostInfoProvider.EXPECT().HostInfo().Return(membership.NewHostInfoFromAddress("test")) + s.mockResource.MembershipMonitor.EXPECT().GetReachableMembers().Return(nil, nil) + s.mockResource.HistoryServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.HistoryServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.FrontendServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.FrontendServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.MatchingServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.MatchingServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.WorkerServiceResolver.EXPECT().Members().Return([]membership.HostInfo{}) + s.mockResource.WorkerServiceResolver.EXPECT().MemberCount().Return(0) + s.mockResource.ExecutionMgr.EXPECT().GetName().Return("") + s.mockVisibilityMgr.EXPECT().GetStoreNames().Return([]string{elasticsearch.PersistenceName}) + s.mockClusterMetadataManager.EXPECT().GetClusterMetadata(gomock.Any(), &persistence.GetClusterMetadataRequest{ClusterName: clusterName}).Return( + &persistence.GetClusterMetadataResponse{ + ClusterMetadata: persistencespb.ClusterMetadata{ + ClusterName: clusterName, + HistoryShardCount: 0, + ClusterId: clusterId, + FailoverVersionIncrement: 0, + InitialFailoverVersion: 0, + IsGlobalNamespaceEnabled: true, + }, + Version: 1, + }, nil) + + resp, err := s.handler.DescribeCluster(context.Background(), &adminservice.DescribeClusterRequest{ClusterName: clusterName}) + s.NoError(err) + s.Equal(resp.GetClusterName(), clusterName) + s.Equal(resp.GetClusterId(), clusterId) + s.Equal(resp.GetHistoryShardCount(), int32(0)) + s.Equal(resp.GetFailoverVersionIncrement(), int64(0)) + s.Equal(resp.GetInitialFailoverVersion(), int64(0)) + s.True(resp.GetIsGlobalNamespaceEnabled()) +} + +func (s *adminHandlerSuite) Test_ListClusters_Success() { + var pageSize int32 = 1 + + s.mockClusterMetadataManager.EXPECT().ListClusterMetadata(gomock.Any(), &persistence.ListClusterMetadataRequest{ + PageSize: int(pageSize), + }).Return( + &persistence.ListClusterMetadataResponse{ + ClusterMetadata: []*persistence.GetClusterMetadataResponse{ + { + ClusterMetadata: persistencespb.ClusterMetadata{ClusterName: "test"}, + }, + }}, nil) + + resp, err := s.handler.ListClusters(context.Background(), &adminservice.ListClustersRequest{ + PageSize: pageSize, + }) + s.NoError(err) + s.Equal(1, len(resp.Clusters)) + s.Equal(0, len(resp.GetNextPageToken())) +} + +func (s *adminHandlerSuite) TestDeleteWorkflowExecution_DeleteCurrentExecution() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + } + + request := &adminservice.DeleteWorkflowExecutionRequest{ + Namespace: s.namespace.String(), + Execution: &execution, + } + + s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() + s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(false) + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) + resp, err := s.handler.DeleteWorkflowExecution(context.Background(), request) + s.Nil(resp) + s.Error(err) + + mutableState := &persistencespb.WorkflowMutableState{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + {BranchToken: []byte("branch1")}, + {BranchToken: []byte("branch2")}, + {BranchToken: []byte("branch3")}, + }, + }, + }, + } + + shardID := common.WorkflowIDToHistoryShard( + s.namespaceID.String(), + execution.GetWorkflowId(), + s.handler.numberOfHistoryShards, + ) + runID := uuid.New() + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetCurrentExecutionResponse{ + StartRequestID: uuid.New(), + RunID: runID, + State: enums.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), &persistence.GetWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: s.namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: runID, + }).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) + s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ + NamespaceId: s.namespaceID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: execution.WorkflowId, + RunId: runID, + }, + }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) + s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), &persistence.DeleteCurrentWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: s.namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: runID, + }).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), &persistence.DeleteWorkflowExecutionRequest{ + ShardID: shardID, + NamespaceID: s.namespaceID.String(), + WorkflowID: execution.WorkflowId, + RunID: runID, + }).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) + + _, err = s.handler.DeleteWorkflowExecution(context.Background(), request) + s.NoError(err) +} + +func (s *adminHandlerSuite) TestDeleteWorkflowExecution_LoadMutableStateFailed() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + } + + request := &adminservice.DeleteWorkflowExecutionRequest{ + Namespace: s.namespace.String(), + Execution: &execution, + } + + s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() + s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(false) + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) + s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), gomock.Any()).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) + s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + + _, err := s.handler.DeleteWorkflowExecution(context.Background(), request) + s.NoError(err) +} + +func (s *adminHandlerSuite) TestDeleteWorkflowExecution_CassandraVisibilityBackend() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "workflowID", + RunId: uuid.New(), + } + + request := &adminservice.DeleteWorkflowExecutionRequest{ + Namespace: s.namespace.String(), + Execution: &execution, + } + + s.mockNamespaceCache.EXPECT().GetNamespaceID(s.namespace).Return(s.namespaceID, nil).AnyTimes() + s.mockVisibilityMgr.EXPECT().HasStoreName(cassandra.CassandraPersistenceName).Return(true).AnyTimes() + + // test delete open records + branchToken := []byte("branchToken") + version := int64(100) + mutableState := &persistencespb.WorkflowMutableState{ + ExecutionState: &persistencespb.WorkflowExecutionState{ + CreateRequestId: uuid.New(), + RunId: execution.RunId, + State: enums.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + }, + NextEventId: 12, + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + CompletionEventBatchId: 10, + StartTime: timestamp.TimePtr(time.Now()), + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + BranchToken: branchToken, + Items: []*historyspb.VersionHistoryItem{ + {EventId: 11, Version: version}, + }, + }, + }, + }, + }, + } + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) + s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ + NamespaceId: s.namespaceID.String(), + Execution: &execution, + WorkflowStartTime: mutableState.ExecutionInfo.StartTime, + }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) + s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) + + _, err := s.handler.DeleteWorkflowExecution(context.Background(), request) + s.NoError(err) + + // test delete close records + mutableState.ExecutionState.State = enums.WORKFLOW_EXECUTION_STATE_COMPLETED + mutableState.ExecutionState.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + + shardID := common.WorkflowIDToHistoryShard( + s.namespaceID.String(), + execution.GetWorkflowId(), + s.handler.numberOfHistoryShards, + ) + closeTime := time.Now() + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: mutableState}, nil) + s.mockExecutionMgr.EXPECT().ReadHistoryBranch(gomock.Any(), &persistence.ReadHistoryBranchRequest{ + ShardID: shardID, + BranchToken: branchToken, + MinEventID: mutableState.ExecutionInfo.CompletionEventBatchId, + MaxEventID: mutableState.NextEventId, + PageSize: 1, + }).Return(&persistence.ReadHistoryBranchResponse{ + HistoryEvents: []*historypb.HistoryEvent{ + { + EventId: 10, + EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, + Version: version, + EventTime: timestamp.TimePtr(closeTime.Add(-time.Millisecond)), + }, + { + EventId: 11, + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, + Version: version, + EventTime: timestamp.TimePtr(closeTime), + }, + }, + }, nil) + s.mockHistoryClient.EXPECT().DeleteWorkflowVisibilityRecord(gomock.Any(), &historyservice.DeleteWorkflowVisibilityRecordRequest{ + NamespaceId: s.namespaceID.String(), + Execution: &execution, + WorkflowCloseTime: timestamp.TimePtr(closeTime), + }).Return(&historyservice.DeleteWorkflowVisibilityRecordResponse{}, nil) + s.mockExecutionMgr.EXPECT().DeleteCurrentWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil) + s.mockExecutionMgr.EXPECT().DeleteHistoryBranch(gomock.Any(), gomock.Any()).Times(len(mutableState.ExecutionInfo.VersionHistories.Histories)) + + _, err = s.handler.DeleteWorkflowExecution(context.Background(), request) + s.NoError(err) +} + +func (s *adminHandlerSuite) TestStreamWorkflowReplicationMessages_ClientToServerBroken() { + clientClusterShardID := historyclient.ClusterShardID{ + ClusterID: rand.Int31(), + ShardID: rand.Int31(), + } + serverClusterShardID := historyclient.ClusterShardID{ + ClusterID: rand.Int31(), + ShardID: rand.Int31(), + } + clusterShardMD := historyclient.EncodeClusterShardMD( + clientClusterShardID, + serverClusterShardID, + ) + ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) + clientCluster := adminservicemock.NewMockAdminService_StreamWorkflowReplicationMessagesServer(s.controller) + clientCluster.EXPECT().Context().Return(ctx).AnyTimes() + serverCluster := historyservicemock.NewMockHistoryService_StreamWorkflowReplicationMessagesClient(s.controller) + s.mockHistoryClient.EXPECT().StreamWorkflowReplicationMessages(ctx).Return(serverCluster, nil) + + waitGroupStart := sync.WaitGroup{} + waitGroupStart.Add(2) + waitGroupEnd := sync.WaitGroup{} + waitGroupEnd.Add(2) + channel := make(chan struct{}) + + clientCluster.EXPECT().Recv().DoAndReturn(func() (*adminservice.StreamWorkflowReplicationMessagesRequest, error) { + waitGroupStart.Done() + waitGroupStart.Wait() + + defer waitGroupEnd.Done() + return nil, serviceerror.NewUnavailable("random error") + }) + serverCluster.EXPECT().Recv().DoAndReturn(func() (*historyservice.StreamWorkflowReplicationMessagesResponse, error) { + waitGroupStart.Done() + waitGroupStart.Wait() + + defer waitGroupEnd.Done() + <-channel + return nil, serviceerror.NewUnavailable("random error") + }) + _ = s.handler.StreamWorkflowReplicationMessages(clientCluster) + close(channel) + waitGroupEnd.Wait() +} + +func (s *adminHandlerSuite) TestStreamWorkflowReplicationMessages_ServerToClientBroken() { + clientClusterShardID := historyclient.ClusterShardID{ + ClusterID: rand.Int31(), + ShardID: rand.Int31(), + } + serverClusterShardID := historyclient.ClusterShardID{ + ClusterID: rand.Int31(), + ShardID: rand.Int31(), + } + clusterShardMD := historyclient.EncodeClusterShardMD( + clientClusterShardID, + serverClusterShardID, + ) + ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) + clientCluster := adminservicemock.NewMockAdminService_StreamWorkflowReplicationMessagesServer(s.controller) + clientCluster.EXPECT().Context().Return(ctx).AnyTimes() + serverCluster := historyservicemock.NewMockHistoryService_StreamWorkflowReplicationMessagesClient(s.controller) + s.mockHistoryClient.EXPECT().StreamWorkflowReplicationMessages(ctx).Return(serverCluster, nil) + + waitGroupStart := sync.WaitGroup{} + waitGroupStart.Add(2) + waitGroupEnd := sync.WaitGroup{} + waitGroupEnd.Add(2) + channel := make(chan struct{}) + + clientCluster.EXPECT().Recv().DoAndReturn(func() (*adminservice.StreamWorkflowReplicationMessagesRequest, error) { + waitGroupStart.Done() + waitGroupStart.Wait() + + defer waitGroupEnd.Done() + <-channel + return nil, serviceerror.NewUnavailable("random error") + }) + serverCluster.EXPECT().Recv().DoAndReturn(func() (*historyservice.StreamWorkflowReplicationMessagesResponse, error) { + waitGroupStart.Done() + waitGroupStart.Wait() + + defer waitGroupEnd.Done() + return nil, serviceerror.NewUnavailable("random error") + }) + _ = s.handler.StreamWorkflowReplicationMessages(clientCluster) + close(channel) + waitGroupEnd.Wait() +} + +func (s *adminHandlerSuite) TestGetNamespace_WithIDSuccess() { + namespaceID := "someId" + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: 1, + Info: &persistencespb.NamespaceInfo{ + Id: namespaceID, + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + s.mockResource.MetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: namespaceID, + }).Return(nsResponse, nil) + resp, err := s.handler.GetNamespace(context.Background(), &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Id{ + Id: namespaceID, + }, + }) + s.NoError(err) + s.Equal(namespaceID, resp.GetInfo().GetId()) + s.Equal(cluster.TestAlternativeClusterName, resp.GetReplicationConfig().GetActiveClusterName()) +} + +func (s *adminHandlerSuite) TestGetNamespace_WithNameSuccess() { + namespaceName := "some name" + namespaceId := "some id" + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: 1, + Info: &persistencespb.NamespaceInfo{ + Id: namespaceId, + Name: namespaceName, + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + s.mockResource.MetadataMgr.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + Name: namespaceName, + }).Return(nsResponse, nil) + resp, err := s.handler.GetNamespace(context.Background(), &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Namespace{ + Namespace: namespaceName, + }, + }) + s.NoError(err) + s.Equal(namespaceId, resp.GetInfo().GetId()) + s.Equal(namespaceName, resp.GetInfo().GetName()) + s.Equal(cluster.TestAlternativeClusterName, resp.GetReplicationConfig().GetActiveClusterName()) +} + +func (s *adminHandlerSuite) TestGetNamespace_EmptyRequest() { + v := &adminservice.GetNamespaceRequest{} + _, err := s.handler.GetNamespace(context.Background(), v) + s.Equal(errRequestNotSet, err) +} diff -Nru temporal-1.21.5-1/src/service/frontend/configs/quotas.go temporal-1.22.5/src/service/frontend/configs/quotas.go --- temporal-1.21.5-1/src/service/frontend/configs/quotas.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/configs/quotas.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,10 +28,23 @@ "time" "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/quotas" ) +const ( + // OperatorPriority is used to give precedence to calls coming from web UI or tctl + OperatorPriority = 0 +) + var ( + // ExecutionAPICountLimitOverride determines how many tokens each of these API calls consumes from their + // corresponding quota, which is determined by dynamicconfig.FrontendMaxConcurrentLongRunningRequestsPerInstance. If + // the value is not set, then the method is not considered a long-running request and the number of concurrent + // requests will not be throttled. The Poll* methods here are long-running because they block until there is a task + // available. The GetWorkflowExecutionHistory method is blocking only if WaitNewEvent is true, otherwise it is not + // long-running. The QueryWorkflow and UpdateWorkflowExecution methods are long-running because they both block + // until a background WFT is complete. ExecutionAPICountLimitOverride = map[string]int{ "PollActivityTaskQueue": 1, "PollWorkflowTaskQueue": 1, @@ -41,95 +54,95 @@ } ExecutionAPIToPriority = map[string]int{ - // priority 0 - "StartWorkflowExecution": 0, - "SignalWithStartWorkflowExecution": 0, - "SignalWorkflowExecution": 0, - "RequestCancelWorkflowExecution": 0, - "TerminateWorkflowExecution": 0, - "GetWorkflowExecutionHistory": 0, - "UpdateWorkflowExecution": 0, - "PollWorkflowExecutionUpdate": 0, - // priority 1 - "RecordActivityTaskHeartbeat": 1, - "RecordActivityTaskHeartbeatById": 1, - "RespondActivityTaskCanceled": 1, - "RespondActivityTaskCanceledById": 1, - "RespondActivityTaskFailed": 1, - "RespondActivityTaskFailedById": 1, - "RespondActivityTaskCompleted": 1, - "RespondActivityTaskCompletedById": 1, - "RespondWorkflowTaskCompleted": 1, + "StartWorkflowExecution": 1, + "SignalWithStartWorkflowExecution": 1, + "SignalWorkflowExecution": 1, + "RequestCancelWorkflowExecution": 1, + "TerminateWorkflowExecution": 1, + "GetWorkflowExecutionHistory": 1, + "UpdateWorkflowExecution": 1, // priority 2 - "ResetWorkflowExecution": 2, - "DescribeWorkflowExecution": 2, - "RespondWorkflowTaskFailed": 2, - "QueryWorkflow": 2, - "RespondQueryTaskCompleted": 2, - "PollWorkflowTaskQueue": 2, - "PollActivityTaskQueue": 2, - "GetWorkflowExecutionHistoryReverse": 2, - "GetWorkerBuildIdCompatibility": 2, - "GetWorkerTaskReachability": 2, - "DeleteWorkflowExecution": 2, + "RecordActivityTaskHeartbeat": 2, + "RecordActivityTaskHeartbeatById": 2, + "RespondActivityTaskCanceled": 2, + "RespondActivityTaskCanceledById": 2, + "RespondActivityTaskFailed": 2, + "RespondActivityTaskFailedById": 2, + "RespondActivityTaskCompleted": 2, + "RespondActivityTaskCompletedById": 2, + "RespondWorkflowTaskCompleted": 2, + "RespondWorkflowTaskFailed": 2, + "RespondQueryTaskCompleted": 2, // priority 3 - "ResetStickyTaskQueue": 3, - "DescribeTaskQueue": 3, - "ListTaskQueuePartitions": 3, + "ResetWorkflowExecution": 3, + "DescribeWorkflowExecution": 3, + "QueryWorkflow": 3, + "PollWorkflowTaskQueue": 3, + "PollActivityTaskQueue": 3, + "PollWorkflowExecutionUpdate": 3, + "GetWorkflowExecutionHistoryReverse": 3, + "GetWorkerBuildIdCompatibility": 3, + "GetWorkerTaskReachability": 3, + "DeleteWorkflowExecution": 3, + + // priority 4 + "ResetStickyTaskQueue": 4, + "DescribeTaskQueue": 4, + "ListTaskQueuePartitions": 4, } - ExecutionAPIPrioritiesOrdered = []int{0, 1, 2, 3} + ExecutionAPIPrioritiesOrdered = []int{0, 1, 2, 3, 4} VisibilityAPIToPriority = map[string]int{ - "CountWorkflowExecutions": 0, - "ScanWorkflowExecutions": 0, - "ListOpenWorkflowExecutions": 0, - "ListClosedWorkflowExecutions": 0, - "ListWorkflowExecutions": 0, - "ListArchivedWorkflowExecutions": 0, + "CountWorkflowExecutions": 1, + "ScanWorkflowExecutions": 1, + "ListOpenWorkflowExecutions": 1, + "ListClosedWorkflowExecutions": 1, + "ListWorkflowExecutions": 1, + "ListArchivedWorkflowExecutions": 1, } - VisibilityAPIPrioritiesOrdered = []int{0} + VisibilityAPIPrioritiesOrdered = []int{0, 1} // Special rate limiting for APIs that may insert replication tasks into a namespace replication queue. // The replication queue is used to propagate critical failover messages and this mapping prevents flooding the // queue and delaying failover. NamespaceReplicationInducingAPIToPriority = map[string]int{ - "RegisterNamespace": 0, - "UpdateNamespace": 0, - "UpdateWorkerBuildIdCompatibility": 1, + "RegisterNamespace": 1, + "UpdateNamespace": 1, + "UpdateWorkerBuildIdCompatibility": 2, } - NamespaceReplicationInducingAPIPrioritiesOrdered = []int{0, 1} + NamespaceReplicationInducingAPIPrioritiesOrdered = []int{0, 1, 2} OtherAPIToPriority = map[string]int{ - "GetClusterInfo": 0, - "GetSystemInfo": 0, - "GetSearchAttributes": 0, - - "DescribeNamespace": 0, - "ListNamespaces": 0, - "DeprecateNamespace": 0, - - "CreateSchedule": 0, - "DescribeSchedule": 0, - "UpdateSchedule": 0, - "PatchSchedule": 0, - "ListScheduleMatchingTimes": 0, - "DeleteSchedule": 0, - "ListSchedules": 0, + "GetClusterInfo": 1, + "GetSystemInfo": 1, + "GetSearchAttributes": 1, + + "DescribeNamespace": 1, + "ListNamespaces": 1, + "DeprecateNamespace": 1, + + "CreateSchedule": 1, + "DescribeSchedule": 1, + "UpdateSchedule": 1, + "PatchSchedule": 1, + "ListScheduleMatchingTimes": 1, + "DeleteSchedule": 1, + "ListSchedules": 1, // TODO(yx): added temporarily here; need to check if it's the right place and priority - "DescribeBatchOperation": 0, - "ListBatchOperations": 0, - "StartBatchOperation": 0, - "StopBatchOperation": 0, + "DescribeBatchOperation": 1, + "ListBatchOperations": 1, + "StartBatchOperation": 1, + "StopBatchOperation": 1, } - OtherAPIPrioritiesOrdered = []int{0} + OtherAPIPrioritiesOrdered = []int{0, 1} ) type ( @@ -138,9 +151,15 @@ rateFn dynamicconfig.FloatPropertyFnWithNamespaceFilter burstFn dynamicconfig.IntPropertyFnWithNamespaceFilter } + + operatorRateBurstImpl struct { + operatorRateRatio dynamicconfig.FloatPropertyFn + baseRateBurstFn quotas.RateBurst + } ) var _ quotas.RateBurst = (*NamespaceRateBurstImpl)(nil) +var _ quotas.RateBurst = (*operatorRateBurstImpl)(nil) func NewNamespaceRateBurst( namespaceName string, @@ -162,18 +181,37 @@ return c.burstFn(c.namespaceName) } +func newOperatorRateBurst( + baseRateBurstFn quotas.RateBurst, + operatorRateRatio dynamicconfig.FloatPropertyFn, +) *operatorRateBurstImpl { + return &operatorRateBurstImpl{ + operatorRateRatio: operatorRateRatio, + baseRateBurstFn: baseRateBurstFn, + } +} + +func (c *operatorRateBurstImpl) Rate() float64 { + return c.operatorRateRatio() * c.baseRateBurstFn.Rate() +} + +func (c *operatorRateBurstImpl) Burst() int { + return c.baseRateBurstFn.Burst() +} + func NewRequestToRateLimiter( executionRateBurstFn quotas.RateBurst, visibilityRateBurstFn quotas.RateBurst, namespaceReplicationInducingRateBurstFn quotas.RateBurst, otherRateBurstFn quotas.RateBurst, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { mapping := make(map[string]quotas.RequestRateLimiter) - executionRateLimiter := NewExecutionPriorityRateLimiter(executionRateBurstFn) - visibilityRateLimiter := NewVisibilityPriorityRateLimiter(visibilityRateBurstFn) - namespaceReplicationInducingRateLimiter := NewNamespaceReplicationInducingAPIPriorityRateLimiter(namespaceReplicationInducingRateBurstFn) - otherRateLimiter := NewOtherAPIPriorityRateLimiter(otherRateBurstFn) + executionRateLimiter := NewExecutionPriorityRateLimiter(executionRateBurstFn, operatorRPSRatio) + visibilityRateLimiter := NewVisibilityPriorityRateLimiter(visibilityRateBurstFn, operatorRPSRatio) + namespaceReplicationInducingRateLimiter := NewNamespaceReplicationInducingAPIPriorityRateLimiter(namespaceReplicationInducingRateBurstFn, operatorRPSRatio) + otherRateLimiter := NewOtherAPIPriorityRateLimiter(otherRateBurstFn, operatorRPSRatio) for api := range ExecutionAPIToPriority { mapping[api] = executionRateLimiter @@ -193,12 +231,20 @@ func NewExecutionPriorityRateLimiter( rateBurstFn quotas.RateBurst, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range ExecutionAPIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(newOperatorRateBurst(rateBurstFn, operatorRPSRatio), time.Minute)) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := ExecutionAPIToPriority[req.API]; ok { return priority } @@ -208,12 +254,20 @@ func NewVisibilityPriorityRateLimiter( rateBurstFn quotas.RateBurst, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range VisibilityAPIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(newOperatorRateBurst(rateBurstFn, operatorRPSRatio), time.Minute)) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := VisibilityAPIToPriority[req.API]; ok { return priority } @@ -223,12 +277,20 @@ func NewNamespaceReplicationInducingAPIPriorityRateLimiter( rateBurstFn quotas.RateBurst, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range NamespaceReplicationInducingAPIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(newOperatorRateBurst(rateBurstFn, operatorRPSRatio), time.Minute)) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := NamespaceReplicationInducingAPIToPriority[req.API]; ok { return priority } @@ -238,12 +300,20 @@ func NewOtherAPIPriorityRateLimiter( rateBurstFn quotas.RateBurst, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range OtherAPIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(newOperatorRateBurst(rateBurstFn, operatorRPSRatio), time.Minute)) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDynamicRateLimiter(rateBurstFn, time.Minute)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := OtherAPIToPriority[req.API]; ok { return priority } diff -Nru temporal-1.21.5-1/src/service/frontend/configs/quotas_test.go temporal-1.22.5/src/service/frontend/configs/quotas_test.go --- temporal-1.21.5-1/src/service/frontend/configs/quotas_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/configs/quotas_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,13 +27,21 @@ import ( "reflect" "testing" + "time" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/quotas" "golang.org/x/exp/slices" ) +var ( + testRateBurstFn = quotas.NewDefaultIncomingRateBurst(func() float64 { return 5 }) + testOperatorRPSRatioFn = func() float64 { return 0.2 } +) + type ( quotasSuite struct { suite.Suite @@ -262,3 +270,52 @@ } s.Equal(expectedAPIs, actualAPIs) } + +func (s *quotasSuite) TestOperatorPriority_Execution() { + limiter := NewExecutionPriorityRateLimiter(testRateBurstFn, testOperatorRPSRatioFn) + s.testOperatorPrioritized(limiter, "DescribeWorkflowExecution") +} + +func (s *quotasSuite) TestOperatorPriority_Visibility() { + limiter := NewVisibilityPriorityRateLimiter(testRateBurstFn, testOperatorRPSRatioFn) + s.testOperatorPrioritized(limiter, "ListOpenWorkflowExecutions") +} + +func (s *quotasSuite) TestOperatorPriority_NamespaceReplicationInducing() { + limiter := NewNamespaceReplicationInducingAPIPriorityRateLimiter(testRateBurstFn, testOperatorRPSRatioFn) + s.testOperatorPrioritized(limiter, "RegisterNamespace") +} + +func (s *quotasSuite) TestOperatorPriority_Other() { + limiter := NewOtherAPIPriorityRateLimiter(testRateBurstFn, testOperatorRPSRatioFn) + s.testOperatorPrioritized(limiter, "DescribeNamespace") +} + +func (s *quotasSuite) testOperatorPrioritized(limiter quotas.RequestRateLimiter, api string) { + operatorRequest := quotas.NewRequest( + api, + 1, + "test-namespace", + headers.CallerTypeOperator, + -1, + "") + + apiRequest := quotas.NewRequest( + api, + 1, + "test-namespace", + headers.CallerTypeAPI, + -1, + "") + + requestTime := time.Now() + limitCount := 0 + + for i := 0; i < 12; i++ { + if !limiter.Allow(requestTime, apiRequest) { + limitCount++ + s.True(limiter.Allow(requestTime, operatorRequest)) + } + } + s.Equal(2, limitCount) +} diff -Nru temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy.go temporal-1.22.5/src/service/frontend/dcRedirectionPolicy.go --- temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dcRedirectionPolicy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dcRedirectionPolicy_mock.go - -package frontend - -import ( - "context" - "fmt" - - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/namespace" -) - -const ( - // DCRedirectionPolicyDefault means no redirection - DCRedirectionPolicyDefault = "" - // DCRedirectionPolicyNoop means no redirection - DCRedirectionPolicyNoop = "noop" - // DCRedirectionPolicySelectedAPIsForwarding means forwarding the following APIs based namespace - // 1. StartWorkflowExecution - // 2. SignalWithStartWorkflowExecution - // 3. SignalWorkflowExecution - // 4. RequestCancelWorkflowExecution - // 5. TerminateWorkflowExecution - // 6. QueryWorkflow - // please also reference selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs - DCRedirectionPolicySelectedAPIsForwarding = "selected-apis-forwarding" - - // DCRedirectionPolicyAllAPIsForwarding means forwarding all APIs based on namespace active cluster - DCRedirectionPolicyAllAPIsForwarding = "all-apis-forwarding" -) - -type ( - // DCRedirectionPolicy is a DC redirection policy interface - DCRedirectionPolicy interface { - WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error - WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error - } - - // NoopRedirectionPolicy is DC redirection policy which does nothing - NoopRedirectionPolicy struct { - currentClusterName string - } - - // SelectedAPIsForwardingRedirectionPolicy is a DC redirection policy - // which (based on namespace) forwards selected APIs calls to active cluster - SelectedAPIsForwardingRedirectionPolicy struct { - currentClusterName string - config *Config - namespaceRegistry namespace.Registry - enableForAllAPIs bool - } -) - -// selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs contains a list of APIs which can be redirected -var selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs = map[string]struct{}{ - "StartWorkflowExecution": {}, - "SignalWithStartWorkflowExecution": {}, - "SignalWorkflowExecution": {}, - "RequestCancelWorkflowExecution": {}, - "TerminateWorkflowExecution": {}, - "QueryWorkflow": {}, -} - -// RedirectionPolicyGenerator generate corresponding redirection policy -func RedirectionPolicyGenerator(clusterMetadata cluster.Metadata, config *Config, - namespaceRegistry namespace.Registry, policy config.DCRedirectionPolicy) DCRedirectionPolicy { - switch policy.Policy { - case DCRedirectionPolicyDefault: - // default policy, noop - return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName()) - case DCRedirectionPolicyNoop: - return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName()) - case DCRedirectionPolicySelectedAPIsForwarding: - currentClusterName := clusterMetadata.GetCurrentClusterName() - return NewSelectedAPIsForwardingPolicy(currentClusterName, config, namespaceRegistry) - case DCRedirectionPolicyAllAPIsForwarding: - currentClusterName := clusterMetadata.GetCurrentClusterName() - return NewAllAPIsForwardingPolicy(currentClusterName, config, namespaceRegistry) - default: - panic(fmt.Sprintf("Unknown DC redirection policy %v", policy.Policy)) - } -} - -// NewNoopRedirectionPolicy is DC redirection policy which does nothing -func NewNoopRedirectionPolicy(currentClusterName string) *NoopRedirectionPolicy { - return &NoopRedirectionPolicy{ - currentClusterName: currentClusterName, - } -} - -// WithNamespaceIDRedirect redirect the API call based on namespace ID -func (policy *NoopRedirectionPolicy) WithNamespaceIDRedirect(_ context.Context, _ namespace.ID, _ string, call func(string) error) error { - return call(policy.currentClusterName) -} - -// WithNamespaceRedirect redirect the API call based on namespace name -func (policy *NoopRedirectionPolicy) WithNamespaceRedirect(_ context.Context, _ namespace.Name, _ string, call func(string) error) error { - return call(policy.currentClusterName) -} - -// NewSelectedAPIsForwardingPolicy creates a forwarding policy for selected APIs based on namespace -func NewSelectedAPIsForwardingPolicy(currentClusterName string, config *Config, namespaceRegistry namespace.Registry) *SelectedAPIsForwardingRedirectionPolicy { - return &SelectedAPIsForwardingRedirectionPolicy{ - currentClusterName: currentClusterName, - config: config, - namespaceRegistry: namespaceRegistry, - } -} - -// NewAllAPIsForwardingPolicy creates a forwarding policy for all APIs based on namespace -func NewAllAPIsForwardingPolicy(currentClusterName string, config *Config, namespaceRegistry namespace.Registry) *SelectedAPIsForwardingRedirectionPolicy { - return &SelectedAPIsForwardingRedirectionPolicy{ - currentClusterName: currentClusterName, - config: config, - namespaceRegistry: namespaceRegistry, - enableForAllAPIs: true, - } -} - -// WithNamespaceIDRedirect redirect the API call based on namespace ID -func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error { - namespaceEntry, err := policy.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return err - } - return policy.withRedirect(ctx, namespaceEntry, apiName, call) -} - -// WithNamespaceRedirect redirect the API call based on namespace name -func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error { - namespaceEntry, err := policy.namespaceRegistry.GetNamespace(namespace) - if err != nil { - return err - } - return policy.withRedirect(ctx, namespaceEntry, apiName, call) -} - -func (policy *SelectedAPIsForwardingRedirectionPolicy) withRedirect(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string, call func(string) error) error { - targetDC, enableNamespaceNotActiveForwarding := policy.getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx, namespaceEntry, apiName) - - err := call(targetDC) - - targetDC, ok := policy.isNamespaceNotActiveError(err) - if !ok || !enableNamespaceNotActiveForwarding { - return err - } - return call(targetDC) -} - -func (policy *SelectedAPIsForwardingRedirectionPolicy) isNamespaceNotActiveError(err error) (string, bool) { - namespaceNotActiveErr, ok := err.(*serviceerror.NamespaceNotActive) - if !ok { - return "", false - } - return namespaceNotActiveErr.ActiveCluster, true -} - -func (policy *SelectedAPIsForwardingRedirectionPolicy) getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string) (string, bool) { - if !namespaceEntry.IsGlobalNamespace() { - return policy.currentClusterName, false - } - - if len(namespaceEntry.ClusterNames()) == 1 { - // do not do dc redirection if namespace is only targeting at 1 dc (effectively local namespace) - return policy.currentClusterName, false - } - - if !policy.config.EnableNamespaceNotActiveAutoForwarding(namespaceEntry.Name().String()) { - // do not do dc redirection if auto-forwarding dynamic config flag is not enabled - return policy.currentClusterName, false - } - - if policy.enableForAllAPIs { - return namespaceEntry.ActiveClusterName(), true - } - - _, ok := selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs[apiName] - if !ok { - // do not do dc redirection if API is not whitelisted - return policy.currentClusterName, false - } - - return namespaceEntry.ActiveClusterName(), true -} diff -Nru temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy_mock.go temporal-1.22.5/src/service/frontend/dcRedirectionPolicy_mock.go --- temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dcRedirectionPolicy_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: dcRedirectionPolicy.go - -// Package frontend is a generated GoMock package. -package frontend - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - namespace "go.temporal.io/server/common/namespace" -) - -// MockDCRedirectionPolicy is a mock of DCRedirectionPolicy interface. -type MockDCRedirectionPolicy struct { - ctrl *gomock.Controller - recorder *MockDCRedirectionPolicyMockRecorder -} - -// MockDCRedirectionPolicyMockRecorder is the mock recorder for MockDCRedirectionPolicy. -type MockDCRedirectionPolicyMockRecorder struct { - mock *MockDCRedirectionPolicy -} - -// NewMockDCRedirectionPolicy creates a new mock instance. -func NewMockDCRedirectionPolicy(ctrl *gomock.Controller) *MockDCRedirectionPolicy { - mock := &MockDCRedirectionPolicy{ctrl: ctrl} - mock.recorder = &MockDCRedirectionPolicyMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDCRedirectionPolicy) EXPECT() *MockDCRedirectionPolicyMockRecorder { - return m.recorder -} - -// WithNamespaceIDRedirect mocks base method. -func (m *MockDCRedirectionPolicy) WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithNamespaceIDRedirect", ctx, namespaceID, apiName, call) - ret0, _ := ret[0].(error) - return ret0 -} - -// WithNamespaceIDRedirect indicates an expected call of WithNamespaceIDRedirect. -func (mr *MockDCRedirectionPolicyMockRecorder) WithNamespaceIDRedirect(ctx, namespaceID, apiName, call interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithNamespaceIDRedirect", reflect.TypeOf((*MockDCRedirectionPolicy)(nil).WithNamespaceIDRedirect), ctx, namespaceID, apiName, call) -} - -// WithNamespaceRedirect mocks base method. -func (m *MockDCRedirectionPolicy) WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WithNamespaceRedirect", ctx, namespace, apiName, call) - ret0, _ := ret[0].(error) - return ret0 -} - -// WithNamespaceRedirect indicates an expected call of WithNamespaceRedirect. -func (mr *MockDCRedirectionPolicyMockRecorder) WithNamespaceRedirect(ctx, namespace, apiName, call interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithNamespaceRedirect", reflect.TypeOf((*MockDCRedirectionPolicy)(nil).WithNamespaceRedirect), ctx, namespace, apiName, call) -} diff -Nru temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy_test.go temporal-1.22.5/src/service/frontend/dcRedirectionPolicy_test.go --- temporal-1.21.5-1/src/service/frontend/dcRedirectionPolicy_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dcRedirectionPolicy_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,417 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package frontend - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/primitives/timestamp" -) - -type ( - noopDCRedirectionPolicySuite struct { - suite.Suite - *require.Assertions - - currentClusterName string - policy *NoopRedirectionPolicy - } - - selectedAPIsForwardingRedirectionPolicySuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockClusterMetadata *cluster.MockMetadata - mockNamespaceCache *namespace.MockRegistry - - namespace namespace.Name - namespaceID namespace.ID - currentClusterName string - alternativeClusterName string - mockConfig *Config - - policy *SelectedAPIsForwardingRedirectionPolicy - } -) - -func TestNoopDCRedirectionPolicySuite(t *testing.T) { - s := new(noopDCRedirectionPolicySuite) - suite.Run(t, s) -} - -func (s *noopDCRedirectionPolicySuite) SetupSuite() { -} - -func (s *noopDCRedirectionPolicySuite) TearDownSuite() { - -} - -func (s *noopDCRedirectionPolicySuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.currentClusterName = cluster.TestCurrentClusterName - s.policy = NewNoopRedirectionPolicy(s.currentClusterName) -} - -func (s *noopDCRedirectionPolicySuite) TearDownTest() { - -} - -func (s *noopDCRedirectionPolicySuite) TestWithNamespaceRedirect() { - namespaceName := namespace.Name("some random namespace name") - namespaceID := namespace.ID("some random namespace ID") - apiName := "any random API name" - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - err := s.policy.WithNamespaceIDRedirect(context.Background(), namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), namespaceName, apiName, callFn) - s.Nil(err) - - s.Equal(2, callCount) -} - -func TestSelectedAPIsForwardingRedirectionPolicySuite(t *testing.T) { - s := new(selectedAPIsForwardingRedirectionPolicySuite) - suite.Run(t, s) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) SetupSuite() { -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TearDownSuite() { - -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) - s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) - - s.namespace = "some random namespace name" - s.namespaceID = "deadd0d0-c001-face-d00d-000000000000" - s.currentClusterName = cluster.TestCurrentClusterName - s.alternativeClusterName = cluster.TestAlternativeClusterName - - logger := log.NewTestLogger() - - s.mockConfig = NewConfig(dynamicconfig.NewCollection(dynamicconfig.NewNoopClient(), logger), 0, true, false) - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.policy = NewSelectedAPIsForwardingPolicy( - s.currentClusterName, - s.mockConfig, - s.mockNamespaceCache, - ) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TearDownTest() { - s.controller.Finish() -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_LocalNamespace() { - s.setupLocalNamespace() - - apiName := "any random API name" - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - - s.Equal(2, callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_OneReplicationCluster() { - s.setupGlobalNamespaceWithOneReplicationCluster() - - apiName := "any random API name" - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - - s.Equal(2, callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_NamespaceNotWhiltelisted() { - s.setupGlobalNamespaceWithTwoReplicationCluster(false, true) - - apiName := "any random API name" - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - - s.Equal(2, callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_APINotWhiltelisted() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) - - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - } - - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_CurrentCluster() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) - - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.currentClusterName, targetCluster) - return nil - } - - for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - } - - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeCluster() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) - - callCount := 0 - callFn := func(targetCluster string) error { - callCount++ - s.Equal(s.alternativeClusterName, targetCluster) - return nil - } - - for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - } - - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_CurrentClusterToAlternativeCluster() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) - - currentClustercallCount := 0 - alternativeClustercallCount := 0 - callFn := func(targetCluster string) error { - switch targetCluster { - case s.currentClusterName: - currentClustercallCount++ - return serviceerror.NewNamespaceNotActive("", s.currentClusterName, s.alternativeClusterName) - case s.alternativeClusterName: - alternativeClustercallCount++ - return nil - default: - panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) - } - } - - for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - } - - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), currentClustercallCount) - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), alternativeClustercallCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeClusterToCurrentCluster() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) - - currentClustercallCount := 0 - alternativeClustercallCount := 0 - callFn := func(targetCluster string) error { - switch targetCluster { - case s.currentClusterName: - currentClustercallCount++ - return nil - case s.alternativeClusterName: - alternativeClustercallCount++ - return serviceerror.NewNamespaceNotActive("", s.alternativeClusterName, s.currentClusterName) - default: - panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) - } - } - - for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - } - - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), currentClustercallCount) - s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), alternativeClustercallCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeClusterToCurrentCluster_AllAPIs() { - s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) - s.policy.enableForAllAPIs = true - - currentClustercallCount := 0 - alternativeClustercallCount := 0 - callFn := func(targetCluster string) error { - switch targetCluster { - case s.currentClusterName: - currentClustercallCount++ - return nil - case s.alternativeClusterName: - alternativeClustercallCount++ - return serviceerror.NewNamespaceNotActive("", s.alternativeClusterName, s.currentClusterName) - default: - panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) - } - } - - apiName := "NotExistRandomAPI" - err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) - s.Nil(err) - - err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) - s.Nil(err) - - s.Equal(2, currentClustercallCount) - s.Equal(2, alternativeClustercallCount) -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) setupLocalNamespace() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) setupGlobalNamespaceWithOneReplicationCluster() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, // not used - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() -} - -func (s *selectedAPIsForwardingRedirectionPolicySuite) setupGlobalNamespaceWithTwoReplicationCluster(forwardingEnabled bool, isRecordActive bool) { - activeCluster := s.alternativeClusterName - if isRecordActive { - activeCluster = s.currentClusterName - } - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: activeCluster, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, // not used - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() - s.mockConfig.EnableNamespaceNotActiveAutoForwarding = dynamicconfig.GetBoolPropertyFnFilteredByNamespace(forwardingEnabled) -} diff -Nru temporal-1.21.5-1/src/service/frontend/dc_redirection_policy.go temporal-1.22.5/src/service/frontend/dc_redirection_policy.go --- temporal-1.21.5-1/src/service/frontend/dc_redirection_policy.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dc_redirection_policy.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,211 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dc_redirection_policy_mock.go + +package frontend + +import ( + "context" + "fmt" + + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/namespace" +) + +const ( + // DCRedirectionPolicyDefault means no redirection + DCRedirectionPolicyDefault = "" + // DCRedirectionPolicyNoop means no redirection + DCRedirectionPolicyNoop = "noop" + // DCRedirectionPolicySelectedAPIsForwarding means forwarding the following APIs based namespace + // 1. StartWorkflowExecution + // 2. SignalWithStartWorkflowExecution + // 3. SignalWorkflowExecution + // 4. RequestCancelWorkflowExecution + // 5. TerminateWorkflowExecution + // 6. QueryWorkflow + // please also reference selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs + DCRedirectionPolicySelectedAPIsForwarding = "selected-apis-forwarding" + + // DCRedirectionPolicyAllAPIsForwarding means forwarding all APIs based on namespace active cluster + DCRedirectionPolicyAllAPIsForwarding = "all-apis-forwarding" +) + +type ( + // DCRedirectionPolicy is a DC redirection policy interface + DCRedirectionPolicy interface { + WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error + WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error + } + + // NoopRedirectionPolicy is DC redirection policy which does nothing + NoopRedirectionPolicy struct { + currentClusterName string + } + + // SelectedAPIsForwardingRedirectionPolicy is a DC redirection policy + // which (based on namespace) forwards selected APIs calls to active cluster + SelectedAPIsForwardingRedirectionPolicy struct { + currentClusterName string + config *Config + namespaceRegistry namespace.Registry + enableForAllAPIs bool + } +) + +// selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs contains a list of APIs which can be redirected +var selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs = map[string]struct{}{ + "StartWorkflowExecution": {}, + "SignalWithStartWorkflowExecution": {}, + "SignalWorkflowExecution": {}, + "RequestCancelWorkflowExecution": {}, + "TerminateWorkflowExecution": {}, + "QueryWorkflow": {}, +} + +// RedirectionPolicyGenerator generate corresponding redirection policy +func RedirectionPolicyGenerator(clusterMetadata cluster.Metadata, config *Config, + namespaceRegistry namespace.Registry, policy config.DCRedirectionPolicy) DCRedirectionPolicy { + switch policy.Policy { + case DCRedirectionPolicyDefault: + // default policy, noop + return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName()) + case DCRedirectionPolicyNoop: + return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName()) + case DCRedirectionPolicySelectedAPIsForwarding: + currentClusterName := clusterMetadata.GetCurrentClusterName() + return NewSelectedAPIsForwardingPolicy(currentClusterName, config, namespaceRegistry) + case DCRedirectionPolicyAllAPIsForwarding: + currentClusterName := clusterMetadata.GetCurrentClusterName() + return NewAllAPIsForwardingPolicy(currentClusterName, config, namespaceRegistry) + default: + panic(fmt.Sprintf("Unknown DC redirection policy %v", policy.Policy)) + } +} + +// NewNoopRedirectionPolicy is DC redirection policy which does nothing +func NewNoopRedirectionPolicy(currentClusterName string) *NoopRedirectionPolicy { + return &NoopRedirectionPolicy{ + currentClusterName: currentClusterName, + } +} + +// WithNamespaceIDRedirect redirect the API call based on namespace ID +func (policy *NoopRedirectionPolicy) WithNamespaceIDRedirect(_ context.Context, _ namespace.ID, _ string, call func(string) error) error { + return call(policy.currentClusterName) +} + +// WithNamespaceRedirect redirect the API call based on namespace name +func (policy *NoopRedirectionPolicy) WithNamespaceRedirect(_ context.Context, _ namespace.Name, _ string, call func(string) error) error { + return call(policy.currentClusterName) +} + +// NewSelectedAPIsForwardingPolicy creates a forwarding policy for selected APIs based on namespace +func NewSelectedAPIsForwardingPolicy(currentClusterName string, config *Config, namespaceRegistry namespace.Registry) *SelectedAPIsForwardingRedirectionPolicy { + return &SelectedAPIsForwardingRedirectionPolicy{ + currentClusterName: currentClusterName, + config: config, + namespaceRegistry: namespaceRegistry, + } +} + +// NewAllAPIsForwardingPolicy creates a forwarding policy for all APIs based on namespace +func NewAllAPIsForwardingPolicy(currentClusterName string, config *Config, namespaceRegistry namespace.Registry) *SelectedAPIsForwardingRedirectionPolicy { + return &SelectedAPIsForwardingRedirectionPolicy{ + currentClusterName: currentClusterName, + config: config, + namespaceRegistry: namespaceRegistry, + enableForAllAPIs: true, + } +} + +// WithNamespaceIDRedirect redirect the API call based on namespace ID +func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error { + namespaceEntry, err := policy.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return err + } + return policy.withRedirect(ctx, namespaceEntry, apiName, call) +} + +// WithNamespaceRedirect redirect the API call based on namespace name +func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error { + namespaceEntry, err := policy.namespaceRegistry.GetNamespace(namespace) + if err != nil { + return err + } + return policy.withRedirect(ctx, namespaceEntry, apiName, call) +} + +func (policy *SelectedAPIsForwardingRedirectionPolicy) withRedirect(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string, call func(string) error) error { + targetDC, enableNamespaceNotActiveForwarding := policy.getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx, namespaceEntry, apiName) + + err := call(targetDC) + + targetDC, ok := policy.isNamespaceNotActiveError(err) + if !ok || !enableNamespaceNotActiveForwarding { + return err + } + return call(targetDC) +} + +func (policy *SelectedAPIsForwardingRedirectionPolicy) isNamespaceNotActiveError(err error) (string, bool) { + namespaceNotActiveErr, ok := err.(*serviceerror.NamespaceNotActive) + if !ok { + return "", false + } + return namespaceNotActiveErr.ActiveCluster, true +} + +func (policy *SelectedAPIsForwardingRedirectionPolicy) getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string) (string, bool) { + if !namespaceEntry.IsGlobalNamespace() { + return policy.currentClusterName, false + } + + if len(namespaceEntry.ClusterNames()) == 1 { + // do not do dc redirection if namespace is only targeting at 1 dc (effectively local namespace) + return policy.currentClusterName, false + } + + if !policy.config.EnableNamespaceNotActiveAutoForwarding(namespaceEntry.Name().String()) { + // do not do dc redirection if auto-forwarding dynamic config flag is not enabled + return policy.currentClusterName, false + } + + if policy.enableForAllAPIs { + return namespaceEntry.ActiveClusterName(), true + } + + _, ok := selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs[apiName] + if !ok { + // do not do dc redirection if API is not whitelisted + return policy.currentClusterName, false + } + + return namespaceEntry.ActiveClusterName(), true +} diff -Nru temporal-1.21.5-1/src/service/frontend/dc_redirection_policy_mock.go temporal-1.22.5/src/service/frontend/dc_redirection_policy_mock.go --- temporal-1.21.5-1/src/service/frontend/dc_redirection_policy_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dc_redirection_policy_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,88 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: dc_redirection_policy.go + +// Package frontend is a generated GoMock package. +package frontend + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + namespace "go.temporal.io/server/common/namespace" +) + +// MockDCRedirectionPolicy is a mock of DCRedirectionPolicy interface. +type MockDCRedirectionPolicy struct { + ctrl *gomock.Controller + recorder *MockDCRedirectionPolicyMockRecorder +} + +// MockDCRedirectionPolicyMockRecorder is the mock recorder for MockDCRedirectionPolicy. +type MockDCRedirectionPolicyMockRecorder struct { + mock *MockDCRedirectionPolicy +} + +// NewMockDCRedirectionPolicy creates a new mock instance. +func NewMockDCRedirectionPolicy(ctrl *gomock.Controller) *MockDCRedirectionPolicy { + mock := &MockDCRedirectionPolicy{ctrl: ctrl} + mock.recorder = &MockDCRedirectionPolicyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDCRedirectionPolicy) EXPECT() *MockDCRedirectionPolicyMockRecorder { + return m.recorder +} + +// WithNamespaceIDRedirect mocks base method. +func (m *MockDCRedirectionPolicy) WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithNamespaceIDRedirect", ctx, namespaceID, apiName, call) + ret0, _ := ret[0].(error) + return ret0 +} + +// WithNamespaceIDRedirect indicates an expected call of WithNamespaceIDRedirect. +func (mr *MockDCRedirectionPolicyMockRecorder) WithNamespaceIDRedirect(ctx, namespaceID, apiName, call interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithNamespaceIDRedirect", reflect.TypeOf((*MockDCRedirectionPolicy)(nil).WithNamespaceIDRedirect), ctx, namespaceID, apiName, call) +} + +// WithNamespaceRedirect mocks base method. +func (m *MockDCRedirectionPolicy) WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithNamespaceRedirect", ctx, namespace, apiName, call) + ret0, _ := ret[0].(error) + return ret0 +} + +// WithNamespaceRedirect indicates an expected call of WithNamespaceRedirect. +func (mr *MockDCRedirectionPolicyMockRecorder) WithNamespaceRedirect(ctx, namespace, apiName, call interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithNamespaceRedirect", reflect.TypeOf((*MockDCRedirectionPolicy)(nil).WithNamespaceRedirect), ctx, namespace, apiName, call) +} diff -Nru temporal-1.21.5-1/src/service/frontend/dc_redirection_policy_test.go temporal-1.22.5/src/service/frontend/dc_redirection_policy_test.go --- temporal-1.21.5-1/src/service/frontend/dc_redirection_policy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/dc_redirection_policy_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,417 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" +) + +type ( + noopDCRedirectionPolicySuite struct { + suite.Suite + *require.Assertions + + currentClusterName string + policy *NoopRedirectionPolicy + } + + selectedAPIsForwardingRedirectionPolicySuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockClusterMetadata *cluster.MockMetadata + mockNamespaceCache *namespace.MockRegistry + + namespace namespace.Name + namespaceID namespace.ID + currentClusterName string + alternativeClusterName string + mockConfig *Config + + policy *SelectedAPIsForwardingRedirectionPolicy + } +) + +func TestNoopDCRedirectionPolicySuite(t *testing.T) { + s := new(noopDCRedirectionPolicySuite) + suite.Run(t, s) +} + +func (s *noopDCRedirectionPolicySuite) SetupSuite() { +} + +func (s *noopDCRedirectionPolicySuite) TearDownSuite() { + +} + +func (s *noopDCRedirectionPolicySuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.currentClusterName = cluster.TestCurrentClusterName + s.policy = NewNoopRedirectionPolicy(s.currentClusterName) +} + +func (s *noopDCRedirectionPolicySuite) TearDownTest() { + +} + +func (s *noopDCRedirectionPolicySuite) TestWithNamespaceRedirect() { + namespaceName := namespace.Name("some random namespace name") + namespaceID := namespace.ID("some random namespace ID") + apiName := "any random API name" + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + err := s.policy.WithNamespaceIDRedirect(context.Background(), namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), namespaceName, apiName, callFn) + s.Nil(err) + + s.Equal(2, callCount) +} + +func TestSelectedAPIsForwardingRedirectionPolicySuite(t *testing.T) { + s := new(selectedAPIsForwardingRedirectionPolicySuite) + suite.Run(t, s) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) SetupSuite() { +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TearDownSuite() { + +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockClusterMetadata = cluster.NewMockMetadata(s.controller) + s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) + + s.namespace = "some random namespace name" + s.namespaceID = "deadd0d0-c001-face-d00d-000000000000" + s.currentClusterName = cluster.TestCurrentClusterName + s.alternativeClusterName = cluster.TestAlternativeClusterName + + logger := log.NewTestLogger() + + s.mockConfig = NewConfig(dynamicconfig.NewCollection(dynamicconfig.NewNoopClient(), logger), 0, true, false) + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.policy = NewSelectedAPIsForwardingPolicy( + s.currentClusterName, + s.mockConfig, + s.mockNamespaceCache, + ) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TearDownTest() { + s.controller.Finish() +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_LocalNamespace() { + s.setupLocalNamespace() + + apiName := "any random API name" + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + + s.Equal(2, callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_OneReplicationCluster() { + s.setupGlobalNamespaceWithOneReplicationCluster() + + apiName := "any random API name" + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + + s.Equal(2, callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_NamespaceNotWhiltelisted() { + s.setupGlobalNamespaceWithTwoReplicationCluster(false, true) + + apiName := "any random API name" + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + + s.Equal(2, callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_APINotWhiltelisted() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) + + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + } + + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_CurrentCluster() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) + + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.currentClusterName, targetCluster) + return nil + } + + for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + } + + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeCluster() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) + + callCount := 0 + callFn := func(targetCluster string) error { + callCount++ + s.Equal(s.alternativeClusterName, targetCluster) + return nil + } + + for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + } + + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), callCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_CurrentClusterToAlternativeCluster() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) + + currentClustercallCount := 0 + alternativeClustercallCount := 0 + callFn := func(targetCluster string) error { + switch targetCluster { + case s.currentClusterName: + currentClustercallCount++ + return serviceerror.NewNamespaceNotActive("", s.currentClusterName, s.alternativeClusterName) + case s.alternativeClusterName: + alternativeClustercallCount++ + return nil + default: + panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) + } + } + + for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + } + + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), currentClustercallCount) + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), alternativeClustercallCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeClusterToCurrentCluster() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) + + currentClustercallCount := 0 + alternativeClustercallCount := 0 + callFn := func(targetCluster string) error { + switch targetCluster { + case s.currentClusterName: + currentClustercallCount++ + return nil + case s.alternativeClusterName: + alternativeClustercallCount++ + return serviceerror.NewNamespaceNotActive("", s.alternativeClusterName, s.currentClusterName) + default: + panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) + } + } + + for apiName := range selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs { + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + } + + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), currentClustercallCount) + s.Equal(2*len(selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs), alternativeClustercallCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestGetTargetDataCenter_GlobalNamespace_Forwarding_AlternativeClusterToCurrentCluster_AllAPIs() { + s.setupGlobalNamespaceWithTwoReplicationCluster(true, false) + s.policy.enableForAllAPIs = true + + currentClustercallCount := 0 + alternativeClustercallCount := 0 + callFn := func(targetCluster string) error { + switch targetCluster { + case s.currentClusterName: + currentClustercallCount++ + return nil + case s.alternativeClusterName: + alternativeClustercallCount++ + return serviceerror.NewNamespaceNotActive("", s.alternativeClusterName, s.currentClusterName) + default: + panic(fmt.Sprintf("unknown cluster name %v", targetCluster)) + } + } + + apiName := "NotExistRandomAPI" + err := s.policy.WithNamespaceIDRedirect(context.Background(), s.namespaceID, apiName, callFn) + s.Nil(err) + + err = s.policy.WithNamespaceRedirect(context.Background(), s.namespace, apiName, callFn) + s.Nil(err) + + s.Equal(2, currentClustercallCount) + s.Equal(2, alternativeClustercallCount) +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) setupLocalNamespace() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) setupGlobalNamespaceWithOneReplicationCluster() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, // not used + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() +} + +func (s *selectedAPIsForwardingRedirectionPolicySuite) setupGlobalNamespaceWithTwoReplicationCluster(forwardingEnabled bool, isRecordActive bool) { + activeCluster := s.alternativeClusterName + if isRecordActive { + activeCluster = s.currentClusterName + } + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: s.namespaceID.String(), Name: s.namespace.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: activeCluster, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, // not used + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.namespaceID).Return(namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(s.namespace).Return(namespaceEntry, nil).AnyTimes() + s.mockConfig.EnableNamespaceNotActiveAutoForwarding = dynamicconfig.GetBoolPropertyFnFilteredByNamespace(forwardingEnabled) +} diff -Nru temporal-1.21.5-1/src/service/frontend/fx.go temporal-1.22.5/src/service/frontend/fx.go --- temporal-1.21.5-1/src/service/frontend/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,6 @@ package frontend import ( - "context" "fmt" "net" @@ -34,7 +33,6 @@ "google.golang.org/grpc/health" "google.golang.org/grpc/keepalive" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/client" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" @@ -60,6 +58,7 @@ "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/resource" "go.temporal.io/server/common/rpc" + "go.temporal.io/server/common/rpc/encryption" "go.temporal.io/server/common/rpc/interceptor" "go.temporal.io/server/common/sdk" "go.temporal.io/server/common/searchattribute" @@ -89,12 +88,13 @@ fx.Provide(ThrottledLoggerRpsFnProvider), fx.Provide(PersistenceRateLimitingParamsProvider), fx.Provide(FEReplicatorNamespaceReplicationQueueProvider), - fx.Provide(func(so []grpc.ServerOption) *grpc.Server { return grpc.NewServer(so...) }), + fx.Provide(func(so GrpcServerOptions) *grpc.Server { return grpc.NewServer(so.Options...) }), fx.Provide(HandlerProvider), fx.Provide(AdminHandlerProvider), fx.Provide(OperatorHandlerProvider), fx.Provide(NewVersionChecker), fx.Provide(ServiceResolverProvider), + fx.Provide(HTTPAPIServerProvider), fx.Provide(NewServiceProvider), fx.Invoke(ServiceLifetimeHooks), ) @@ -103,6 +103,7 @@ serviceConfig *Config, server *grpc.Server, healthServer *health.Server, + httpAPIServer *HTTPAPIServer, handler Handler, adminHandler *AdminHandler, operatorHandler *OperatorHandlerImpl, @@ -118,6 +119,7 @@ serviceConfig, server, healthServer, + httpAPIServer, handler, adminHandler, operatorHandler, @@ -131,6 +133,13 @@ ) } +// GrpcServerOptions are the options to build the frontend gRPC server along +// with the interceptors that are already set in the options. +type GrpcServerOptions struct { + Options []grpc.ServerOption + UnaryInterceptors []grpc.UnaryServerInterceptor +} + func GrpcServerOptionsProvider( logger log.Logger, serviceConfig *Config, @@ -138,7 +147,7 @@ rpcFactory common.RPCFactory, namespaceLogInterceptor *interceptor.NamespaceLogInterceptor, namespaceRateLimiterInterceptor *interceptor.NamespaceRateLimitInterceptor, - namespaceCountLimiterInterceptor *interceptor.NamespaceCountLimitInterceptor, + namespaceCountLimiterInterceptor *interceptor.ConcurrentRequestLimitInterceptor, namespaceValidatorInterceptor *interceptor.NamespaceValidatorInterceptor, redirectionInterceptor *RedirectionInterceptor, telemetryInterceptor *interceptor.TelemetryInterceptor, @@ -152,7 +161,7 @@ audienceGetter authorization.JWTAudienceMapper, customInterceptors []grpc.UnaryServerInterceptor, metricsHandler metrics.Handler, -) []grpc.ServerOption { +) GrpcServerOptions { kep := keepalive.EnforcementPolicy{ MinTime: serviceConfig.KeepAliveMinTime(), PermitWithoutStream: serviceConfig.KeepAlivePermitWithoutStream(), @@ -184,8 +193,6 @@ namespaceLogInterceptor.Intercept, // TODO: Deprecate this with a outer custom interceptor grpc.UnaryServerInterceptor(traceInterceptor), metrics.NewServerMetricsContextInjectorInterceptor(), - redirectionInterceptor.Intercept, - telemetryInterceptor.UnaryIntercept, authorization.NewAuthorizationInterceptor( claimMapper, authorizer, @@ -193,6 +200,8 @@ logger, audienceGetter, ), + redirectionInterceptor.Intercept, + telemetryInterceptor.UnaryIntercept, namespaceValidatorInterceptor.StateValidationIntercept, namespaceCountLimiterInterceptor.Intercept, namespaceRateLimiterInterceptor.Intercept, @@ -211,13 +220,14 @@ telemetryInterceptor.StreamIntercept, } - return append( + grpcServerOptions = append( grpcServerOptions, grpc.KeepaliveParams(kp), grpc.KeepaliveEnforcementPolicy(kep), grpc.ChainUnaryInterceptor(unaryInterceptors...), grpc.ChainStreamInterceptor(streamInterceptor...), ) + return GrpcServerOptions{Options: grpcServerOptions, UnaryInterceptors: unaryInterceptors} } func ConfigProvider( @@ -288,16 +298,24 @@ func RateLimitInterceptorProvider( serviceConfig *Config, + frontendServiceResolver membership.ServiceResolver, ) *interceptor.RateLimitInterceptor { - rateFn := func() float64 { return float64(serviceConfig.RPS()) } - namespaceReplicationInducingRateFn := func() float64 { return float64(serviceConfig.NamespaceReplicationInducingAPIsRPS()) } + rateFn := quotas.ClusterAwareQuotaCalculator{ + MemberCounter: frontendServiceResolver, + PerInstanceQuota: serviceConfig.RPS, + GlobalQuota: serviceConfig.GlobalRPS, + }.GetQuota + namespaceReplicationInducingRateFn := func() float64 { + return float64(serviceConfig.NamespaceReplicationInducingAPIsRPS()) + } return interceptor.NewRateLimitInterceptor( configs.NewRequestToRateLimiter( - quotas.NewDefaultIncomingRateLimiter(rateFn), - quotas.NewDefaultIncomingRateLimiter(rateFn), - quotas.NewDefaultIncomingRateLimiter(namespaceReplicationInducingRateFn), - quotas.NewDefaultIncomingRateLimiter(rateFn), + quotas.NewDefaultIncomingRateBurst(rateFn), + quotas.NewDefaultIncomingRateBurst(rateFn), + quotas.NewDefaultIncomingRateBurst(namespaceReplicationInducingRateFn), + quotas.NewDefaultIncomingRateBurst(rateFn), + serviceConfig.OperatorRPSRatio, ), map[string]int{}, ) @@ -325,31 +343,21 @@ panic("invalid service name") } - rateFn := func(namespace string) float64 { - return namespaceRPS( - serviceConfig.MaxNamespaceRPSPerInstance, - globalNamespaceRPS, - frontendServiceResolver, - namespace, - ) - } - - visibilityRateFn := func(namespace string) float64 { - return namespaceRPS( - serviceConfig.MaxNamespaceVisibilityRPSPerInstance, - globalNamespaceVisibilityRPS, - frontendServiceResolver, - namespace, - ) - } - namespaceReplicationInducingRateFn := func(ns string) float64 { - return namespaceRPS( - serviceConfig.MaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance, - globalNamespaceNamespaceReplicationInducingAPIsRPS, - frontendServiceResolver, - ns, - ) - } + rateFn := quotas.ClusterAwareNamespaceSpecificQuotaCalculator{ + MemberCounter: frontendServiceResolver, + PerInstanceQuota: serviceConfig.MaxNamespaceRPSPerInstance, + GlobalQuota: globalNamespaceRPS, + }.GetQuota + visibilityRateFn := quotas.ClusterAwareNamespaceSpecificQuotaCalculator{ + MemberCounter: frontendServiceResolver, + PerInstanceQuota: serviceConfig.MaxNamespaceVisibilityRPSPerInstance, + GlobalQuota: globalNamespaceVisibilityRPS, + }.GetQuota + namespaceReplicationInducingRateFn := quotas.ClusterAwareNamespaceSpecificQuotaCalculator{ + MemberCounter: frontendServiceResolver, + PerInstanceQuota: serviceConfig.MaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance, + GlobalQuota: globalNamespaceNamespaceReplicationInducingAPIsRPS, + }.GetQuota namespaceRateLimiter := quotas.NewNamespaceRequestRateLimiter( func(req quotas.Request) quotas.RequestRateLimiter { return configs.NewRequestToRateLimiter( @@ -357,6 +365,7 @@ configs.NewNamespaceRateBurst(req.Caller, visibilityRateFn, serviceConfig.MaxNamespaceVisibilityBurstPerInstance), configs.NewNamespaceRateBurst(req.Caller, namespaceReplicationInducingRateFn, serviceConfig.MaxNamespaceNamespaceReplicationInducingAPIsBurstPerInstance), configs.NewNamespaceRateBurst(req.Caller, rateFn, serviceConfig.MaxNamespaceBurstPerInstance), + serviceConfig.OperatorRPSRatio, ) }, ) @@ -366,12 +375,15 @@ func NamespaceCountLimitInterceptorProvider( serviceConfig *Config, namespaceRegistry namespace.Registry, + serviceResolver membership.ServiceResolver, logger log.SnTaggedLogger, -) *interceptor.NamespaceCountLimitInterceptor { - return interceptor.NewNamespaceCountLimitInterceptor( +) *interceptor.ConcurrentRequestLimitInterceptor { + return interceptor.NewConcurrentRequestLimitInterceptor( namespaceRegistry, + serviceResolver, logger, - serviceConfig.MaxNamespaceCountPerInstance, + serviceConfig.MaxConcurrentLongRunningRequestsPerInstance, + serviceConfig.MaxGlobalConcurrentLongRunningRequests, configs.ExecutionAPICountLimitOverride, ) } @@ -406,6 +418,7 @@ serviceConfig.PersistenceNamespaceMaxQPS, serviceConfig.PersistencePerShardNamespaceMaxQPS, serviceConfig.EnablePersistencePriorityRateLimiting, + serviceConfig.OperatorRPSRatio, serviceConfig.PersistenceDynamicRateLimitingParams, ) } @@ -429,6 +442,7 @@ searchAttributesMapperProvider, serviceConfig.VisibilityPersistenceMaxReadQPS, serviceConfig.VisibilityPersistenceMaxWriteQPS, + serviceConfig.OperatorRPSRatio, serviceConfig.EnableReadFromSecondaryVisibility, dynamicconfig.GetStringPropertyFn(visibility.SecondaryVisibilityWritingModeOff), // frontend visibility never write serviceConfig.VisibilityDisableOrderByClause, @@ -470,7 +484,7 @@ persistenceMetadataManager persistence.MetadataManager, clientFactory client.Factory, clientBean client.Bean, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, sdkClientFactory sdk.ClientFactory, membershipMonitor membership.Monitor, hostInfoProvider membership.HostInfoProvider, @@ -527,7 +541,7 @@ saProvider searchattribute.Provider, saManager searchattribute.Manager, healthServer *health.Server, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, clusterMetadataManager persistence.ClusterMetadataManager, clusterMetadata cluster.Metadata, clientFactory client.Factory, @@ -562,7 +576,7 @@ clusterMetadataManager persistence.ClusterMetadataManager, persistenceMetadataManager persistence.MetadataManager, clientBean client.Bean, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, matchingClient resource.MatchingClient, archiverProvider provider.ArchiverProvider, metricsHandler metrics.Handler, @@ -601,26 +615,42 @@ return wfHandler } -func ServiceLifetimeHooks( - lc fx.Lifecycle, - svcStoppedCh chan struct{}, - svc *Service, -) { - lc.Append( - fx.Hook{ - OnStart: func(context.Context) error { - go func(svc common.Daemon, svcStoppedCh chan<- struct{}) { - // Start is blocked until Stop() is called. - svc.Start() - close(svcStoppedCh) - }(svc, svcStoppedCh) - - return nil - }, - OnStop: func(ctx context.Context) error { - svc.Stop() - return nil - }, - }, +// HTTPAPIServerProvider provides an HTTP API server if enabled or nil +// otherwise. +func HTTPAPIServerProvider( + cfg *config.Config, + serviceName primitives.ServiceName, + serviceConfig *Config, + grpcListener net.Listener, + tlsConfigProvider encryption.TLSConfigProvider, + handler Handler, + grpcServerOptions GrpcServerOptions, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, + logger log.Logger, +) (*HTTPAPIServer, error) { + // If the service is not the frontend service, HTTP API is disabled + if serviceName != primitives.FrontendService { + return nil, nil + } + // If HTTP API port is 0, it is disabled + rpcConfig := cfg.Services[string(serviceName)].RPC + if rpcConfig.HTTPPort == 0 { + return nil, nil + } + return NewHTTPAPIServer( + serviceConfig, + rpcConfig, + grpcListener, + tlsConfigProvider, + handler, + grpcServerOptions.UnaryInterceptors, + metricsHandler, + namespaceRegistry, + logger, ) } + +func ServiceLifetimeHooks(lc fx.Lifecycle, svc *Service) { + lc.Append(fx.StartStopHook(svc.Start, svc.Stop)) +} diff -Nru temporal-1.21.5-1/src/service/frontend/fx_test.go temporal-1.22.5/src/service/frontend/fx_test.go --- temporal-1.21.5-1/src/service/frontend/fx_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/fx_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,265 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "net" + "sync" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/internal/nettest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type testCase struct { + // name of the test case + name string + // t is the test object + t *testing.T + // globalRPSLimit is the global RPS limit for all frontend hosts + globalRPSLimit int + // perInstanceRPSLimit is the RPS limit for each frontend host + perInstanceRPSLimit int + // operatorRPSRatio is the ratio of the global RPS limit that is reserved for operator requests + operatorRPSRatio float64 + // expectRateLimit is true if the interceptor should return a rate limit error + expectRateLimit bool + // numRequests is the number of requests to send to the interceptor + numRequests int + // serviceResolver is used to determine the number of frontend hosts for the global rate limiter + serviceResolver membership.ServiceResolver + // configure is a function that can be used to override the default test case values + configure func(tc *testCase) +} + +func TestRateLimitInterceptorProvider(t *testing.T) { + t.Parallel() + + // The burst limit is 2 * the rps limit, so this is 8, which is < the number of requests. The interceptor should + // rate limit one of the last two requests because we exceeded the burst limit, and there is no delay between the + // last two requests. + lowPerInstanceRPSLimit := 4 + // The burst limit is 2 * the rps limit, so this is 10, which is >= the number of requests. The interceptor should + // not rate limit any of the requests because we never exceed the burst limit. + highPerInstanceRPSLimit := 5 + // The number of hosts is 10, so if 4 is too low of an RPS limit per-instance, then 40 is too low of a global RPS + // limit. + numHosts := 10 + lowGlobalRPSLimit := lowPerInstanceRPSLimit * numHosts + highGlobalRPSLimit := highPerInstanceRPSLimit * numHosts + operatorRPSRatio := 0.2 + + testCases := []testCase{ + { + name: "both rate limits hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = lowGlobalRPSLimit + tc.perInstanceRPSLimit = lowPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = true + }, + }, + { + name: "global rate limit hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = lowGlobalRPSLimit + tc.perInstanceRPSLimit = highPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = true + }, + }, + { + name: "per instance rate limit hit but ignored because global rate limit is not hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = highGlobalRPSLimit + tc.perInstanceRPSLimit = lowPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = false + }, + }, + { + name: "neither rate limit hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = highGlobalRPSLimit + tc.perInstanceRPSLimit = highPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = false + }, + }, + { + name: "global rate limit not configured and per instance rate limit not hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = 0 + tc.perInstanceRPSLimit = highPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = false + }, + }, + { + name: "global rate limit not configured and per instance rate limit is hit", + configure: func(tc *testCase) { + tc.globalRPSLimit = 0 + tc.perInstanceRPSLimit = lowPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = true + }, + }, + { + name: "global rate limit not configured and zero per-instance rate limit", + configure: func(tc *testCase) { + tc.globalRPSLimit = 0 + tc.perInstanceRPSLimit = 0 + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = true + }, + }, + { + name: "nil service resolver causes global RPS limit to be ignored", + configure: func(tc *testCase) { + tc.globalRPSLimit = lowPerInstanceRPSLimit + tc.perInstanceRPSLimit = highPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = false + tc.serviceResolver = nil + }, + }, + { + name: "no hosts causes global RPS limit to be ignored", + configure: func(tc *testCase) { + tc.globalRPSLimit = lowPerInstanceRPSLimit + tc.perInstanceRPSLimit = highPerInstanceRPSLimit + tc.operatorRPSRatio = operatorRPSRatio + tc.expectRateLimit = false + serviceResolver := membership.NewMockServiceResolver(gomock.NewController(tc.t)) + serviceResolver.EXPECT().MemberCount().Return(0).AnyTimes() + tc.serviceResolver = serviceResolver + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + tc.numRequests = 10 + tc.t = t + { + // Create a mock service resolver which returns the number of frontend hosts. + // This may be overridden by the test case. + ctrl := gomock.NewController(t) + serviceResolver := membership.NewMockServiceResolver(ctrl) + serviceResolver.EXPECT().MemberCount().Return(numHosts).AnyTimes() + tc.serviceResolver = serviceResolver + } + tc.configure(&tc) + + // Create a rate limit interceptor which uses the per-instance and global RPS limits from the test case. + rateLimitInterceptor := RateLimitInterceptorProvider(&Config{ + RPS: func() int { + return tc.perInstanceRPSLimit + }, + GlobalRPS: func() int { + return tc.globalRPSLimit + }, + NamespaceReplicationInducingAPIsRPS: func() int { + // this is not used in this test + return 0 + }, + OperatorRPSRatio: func() float64 { + return tc.operatorRPSRatio + }, + }, tc.serviceResolver) + + // Create a gRPC server for the fake workflow service. + svc := &testSvc{} + server := grpc.NewServer(grpc.UnaryInterceptor(rateLimitInterceptor.Intercept)) + workflowservice.RegisterWorkflowServiceServer(server, svc) + + pipe := nettest.NewPipe() + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(1) + + listener := nettest.NewListener(pipe) + go func() { + defer wg.Done() + + _ = server.Serve(listener) + }() + + // Create a gRPC client to the fake workflow service. + dialer := grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { + return pipe.Connect(ctx.Done()) + }) + transportCredentials := grpc.WithTransportCredentials(insecure.NewCredentials()) + conn, err := grpc.DialContext(context.Background(), "fake", dialer, transportCredentials) + require.NoError(t, err) + + defer server.Stop() + + client := workflowservice.NewWorkflowServiceClient(conn) + + // Generate load by sending a number of requests to the server. + for i := 0; i < tc.numRequests; i++ { + _, err = client.StartWorkflowExecution( + context.Background(), + &workflowservice.StartWorkflowExecutionRequest{}, + ) + if err != nil { + break + } + } + + // Check if the rate limit is hit. + if tc.expectRateLimit { + assert.ErrorContains(t, err, "rate limit exceeded") + } else { + assert.NoError(t, err) + } + }) + } +} + +// testSvc is a fake workflow service. +type testSvc struct { + workflowservice.UnimplementedWorkflowServiceServer +} + +// StartWorkflowExecution is a fake implementation of the StartWorkflowExecution gRPC method which does nothing. +func (t *testSvc) StartWorkflowExecution( + context.Context, + *workflowservice.StartWorkflowExecutionRequest, +) (*workflowservice.StartWorkflowExecutionResponse, error) { + return &workflowservice.StartWorkflowExecutionResponse{}, nil +} diff -Nru temporal-1.21.5-1/src/service/frontend/http_api_server.go temporal-1.22.5/src/service/frontend/http_api_server.go --- temporal-1.21.5-1/src/service/frontend/http_api_server.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/http_api_server.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,494 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "reflect" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/status" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "go.temporal.io/api/proxy" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/rpc" + "go.temporal.io/server/common/rpc/encryption" + "go.temporal.io/server/common/rpc/interceptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// HTTPAPIServer is an HTTP API server that forwards requests to gRPC via the +// gRPC interceptors. +type HTTPAPIServer struct { + server http.Server + listener net.Listener + logger log.Logger + serveMux *runtime.ServeMux + stopped chan struct{} + matchAdditionalHeaders map[string]bool +} + +var defaultForwardedHeaders = []string{ + "Authorization-Extras", + "X-Forwarded-For", + http.CanonicalHeaderKey(headers.ClientNameHeaderName), + http.CanonicalHeaderKey(headers.ClientVersionHeaderName), +} + +type httpRemoteAddrContextKey struct{} + +var ( + errHTTPGRPCListenerNotTCP = errors.New("must use TCP for gRPC listener to support HTTP API") + errHTTPGRPCStreamNotSupported = errors.New("stream not supported") +) + +// NewHTTPAPIServer creates an [HTTPAPIServer]. +func NewHTTPAPIServer( + serviceConfig *Config, + rpcConfig config.RPC, + grpcListener net.Listener, + tlsConfigProvider encryption.TLSConfigProvider, + handler Handler, + interceptors []grpc.UnaryServerInterceptor, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, + logger log.Logger, +) (*HTTPAPIServer, error) { + // Create a TCP listener the same as the frontend one but with different port + tcpAddrRef, _ := grpcListener.Addr().(*net.TCPAddr) + if tcpAddrRef == nil { + return nil, errHTTPGRPCListenerNotTCP + } + tcpAddr := *tcpAddrRef + tcpAddr.Port = rpcConfig.HTTPPort + var listener net.Listener + var err error + if listener, err = net.ListenTCP("tcp", &tcpAddr); err != nil { + return nil, fmt.Errorf("failed listening for HTTP API on %v: %w", &tcpAddr, err) + } + // Close the listener if anything else in this function fails + success := false + defer func() { + if !success { + _ = listener.Close() + } + }() + + // Wrap the listener in a TLS listener if there is any TLS config + if tlsConfigProvider != nil { + if tlsConfig, err := tlsConfigProvider.GetFrontendServerConfig(); err != nil { + return nil, fmt.Errorf("failed getting TLS config for HTTP API: %w", err) + } else if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + } + + h := &HTTPAPIServer{ + listener: listener, + logger: logger, + stopped: make(chan struct{}), + } + + // Build 4 possible marshalers in order based on content type + opts := []runtime.ServeMuxOption{ + runtime.WithMarshalerOption("application/json+pretty+no-payload-shorthand", h.newMarshaler(" ", true)), + runtime.WithMarshalerOption("application/json+no-payload-shorthand", h.newMarshaler("", true)), + runtime.WithMarshalerOption("application/json+pretty", h.newMarshaler(" ", false)), + runtime.WithMarshalerOption(runtime.MIMEWildcard, h.newMarshaler("", false)), + } + + // Set Temporal service error handler + opts = append(opts, runtime.WithProtoErrorHandler(h.errorHandler)) + + // Match headers w/ default + h.matchAdditionalHeaders = map[string]bool{} + for _, v := range defaultForwardedHeaders { + h.matchAdditionalHeaders[v] = true + } + for _, v := range rpcConfig.HTTPAdditionalForwardedHeaders { + h.matchAdditionalHeaders[http.CanonicalHeaderKey(v)] = true + } + opts = append(opts, runtime.WithIncomingHeaderMatcher(h.incomingHeaderMatcher)) + + // Create inline client connection + clientConn := newInlineClientConn( + map[string]any{"temporal.api.workflowservice.v1.WorkflowService": handler}, + interceptors, + metricsHandler, + namespaceRegistry, + ) + + // Create serve mux + h.serveMux = runtime.NewServeMux(opts...) + err = workflowservice.RegisterWorkflowServiceHandlerClient( + context.Background(), + h.serveMux, + workflowservice.NewWorkflowServiceClient(clientConn), + ) + if err != nil { + return nil, fmt.Errorf("failed registering HTTP API handler: %w", err) + } + // Set the handler as our function that wraps serve mux + h.server.Handler = http.HandlerFunc(h.serveHTTP) + + // Put the remote address on the context + h.server.ConnContext = func(ctx context.Context, c net.Conn) context.Context { + return context.WithValue(ctx, httpRemoteAddrContextKey{}, c) + } + + // We want to set ReadTimeout and WriteTimeout as max idle (and IdleTimeout + // defaults to ReadTimeout) to ensure that a connection cannot hang over that + // amount of time. + h.server.ReadTimeout = serviceConfig.KeepAliveMaxConnectionIdle() + h.server.WriteTimeout = serviceConfig.KeepAliveMaxConnectionIdle() + + success = true + return h, nil +} + +// Serve serves the HTTP API and does not return until there is a serve error or +// GracefulStop completes. Upon graceful stop, this will return nil. If an error +// is returned, the message is clear that it came from the HTTP API server. +func (h *HTTPAPIServer) Serve() error { + err := h.server.Serve(h.listener) + // If the error is for close, we have to wait for the shutdown to complete and + // we don't consider it an error + if errors.Is(err, http.ErrServerClosed) { + <-h.stopped + err = nil + } + // Wrap the error to be clearer it's from the HTTP API + if err != nil { + return fmt.Errorf("HTTP API serve failed: %w", err) + } + return nil +} + +// GracefulStop stops the HTTP server. This will first attempt a graceful stop +// with a drain time, then will hard-stop. This will not return until stopped. +func (h *HTTPAPIServer) GracefulStop(gracefulDrainTime time.Duration) { + // We try a graceful stop for the amount of time we can drain, then we do a + // hard stop + shutdownCtx, cancel := context.WithTimeout(context.Background(), gracefulDrainTime) + defer cancel() + // We intentionally ignore this error, we're gonna stop at this point no + // matter what. This closes the listener too. + _ = h.server.Shutdown(shutdownCtx) + _ = h.server.Close() + close(h.stopped) +} + +func (h *HTTPAPIServer) serveHTTP(w http.ResponseWriter, r *http.Request) { + // Limit the request body to max gRPC size. This is hardcoded to 4MB at the + // moment using gRPC's default at + // https://github.com/grpc/grpc-go/blob/0673105ebcb956e8bf50b96e28209ab7845a65ad/server.go#L58 + // which is what the constant is set as at the time of this comment. + r.Body = http.MaxBytesReader(w, r.Body, rpc.MaxHTTPAPIRequestBytes) + + h.logger.Debug( + "HTTP API call", + tag.NewStringTag("http-method", r.Method), + tag.NewAnyTag("http-url", r.URL), + ) + + // Need to change the accept header based on whether pretty and/or + // noPayloadShorthand are present + var acceptHeaderSuffix string + if _, ok := r.URL.Query()["pretty"]; ok { + acceptHeaderSuffix += "+pretty" + } + if _, ok := r.URL.Query()["noPayloadShorthand"]; ok { + acceptHeaderSuffix += "+no-payload-shorthand" + } + if acceptHeaderSuffix != "" { + r.Header.Set("Accept", "application/json"+acceptHeaderSuffix) + } + + // Put the TLS info on the peer context + if r.TLS != nil { + var addr net.Addr + if conn, _ := r.Context().Value(httpRemoteAddrContextKey{}).(net.Conn); conn != nil { + addr = conn.RemoteAddr() + } + r = r.WithContext(peer.NewContext(r.Context(), &peer.Peer{ + Addr: addr, + AuthInfo: credentials.TLSInfo{ + State: *r.TLS, + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + }, + })) + } + + // Call gRPC gateway mux + h.serveMux.ServeHTTP(w, r) +} + +func (h *HTTPAPIServer) errorHandler( + ctx context.Context, + mux *runtime.ServeMux, + marshaler runtime.Marshaler, + w http.ResponseWriter, + r *http.Request, + err error, +) { + // Convert the error using serviceerror. The result does not conform to Google + // gRPC status directly (it conforms to gogo gRPC status), but Err() does + // based on internal code reading. However, Err() uses Google proto Any + // which our marshaler is not expecting. So instead we are embedding similar + // logic to runtime.DefaultHTTPProtoErrorHandler in here but with gogo + // support. We don't implement custom content type marshaler or trailers at + // this time. + + s := serviceerror.ToStatus(err) + w.Header().Set("Content-Type", marshaler.ContentType()) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + h.logger.Warn("Failed to marshal error message", tag.Error(merr)) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(`{"code": 13, "message": "failed to marshal error message"}`)) + return + } + + w.WriteHeader(runtime.HTTPStatusFromCode(s.Code())) + _, _ = w.Write(buf) +} + +func (h *HTTPAPIServer) newMarshaler(indent string, disablePayloadShorthand bool) runtime.Marshaler { + marshalOpts := proxy.JSONPBMarshalerOptions{ + Indent: indent, + DisablePayloadShorthand: disablePayloadShorthand, + } + unmarshalOpts := proxy.JSONPBUnmarshalerOptions{DisablePayloadShorthand: disablePayloadShorthand} + if m, err := proxy.NewJSONPBMarshaler(marshalOpts); err != nil { + panic(err) + } else if u, err := proxy.NewJSONPBUnmarshaler(unmarshalOpts); err != nil { + panic(err) + } else { + return proxy.NewGRPCGatewayJSONPBMarshaler(m, u) + } +} + +func (h *HTTPAPIServer) incomingHeaderMatcher(headerName string) (string, bool) { + // Try ours before falling back to default + if h.matchAdditionalHeaders[headerName] { + return headerName, true + } + return runtime.DefaultHeaderMatcher(headerName) +} + +// inlineClientConn is a [grpc.ClientConnInterface] implementation that forwards +// requests directly to gRPC via interceptors. This implementation moves all +// outgoing metadata to incoming and takes resulting outgoing metadata and sets +// as header. But which headers to use and TLS peer context and such are +// expected to be handled by the caller. +type inlineClientConn struct { + methods map[string]*serviceMethod + interceptor grpc.UnaryServerInterceptor + requestsCounter metrics.CounterIface + namespaceRegistry namespace.Registry +} + +var _ grpc.ClientConnInterface = (*inlineClientConn)(nil) + +type serviceMethod struct { + info grpc.UnaryServerInfo + handler grpc.UnaryHandler +} + +var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() +var protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +func newInlineClientConn( + servers map[string]any, + interceptors []grpc.UnaryServerInterceptor, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, +) *inlineClientConn { + // Create the set of methods via reflection. We currently accept the overhead + // of reflection compared to having to custom generate gateway code. + methods := map[string]*serviceMethod{} + for qualifiedServerName, server := range servers { + serverVal := reflect.ValueOf(server) + for i := 0; i < serverVal.Type().NumMethod(); i++ { + reflectMethod := serverVal.Type().Method(i) + // We intentionally look this up by name to not assume method indexes line + // up from type to value + methodVal := serverVal.MethodByName(reflectMethod.Name) + // We assume the methods we want only accept a context + request and only + // return a response + error. We also assume the method name matches the + // RPC name. + methodType := methodVal.Type() + validRPCMethod := methodType.Kind() == reflect.Func && + methodType.NumIn() == 2 && + methodType.NumOut() == 2 && + methodType.In(0) == contextType && + methodType.In(1).Implements(protoMessageType) && + methodType.Out(0).Implements(protoMessageType) && + methodType.Out(1) == errorType + if !validRPCMethod { + continue + } + fullMethod := "/" + qualifiedServerName + "/" + reflectMethod.Name + methods[fullMethod] = &serviceMethod{ + info: grpc.UnaryServerInfo{Server: server, FullMethod: fullMethod}, + handler: func(ctx context.Context, req interface{}) (interface{}, error) { + ret := methodVal.Call([]reflect.Value{reflect.ValueOf(ctx), reflect.ValueOf(req)}) + err, _ := ret[1].Interface().(error) + return ret[0].Interface(), err + }, + } + } + } + + return &inlineClientConn{ + methods: methods, + interceptor: chainUnaryServerInterceptors(interceptors), + requestsCounter: metricsHandler.Counter(metrics.HTTPServiceRequests.GetMetricName()), + namespaceRegistry: namespaceRegistry, + } +} + +func (i *inlineClientConn) Invoke( + ctx context.Context, + method string, + args any, + reply any, + opts ...grpc.CallOption, +) error { + // Move outgoing metadata to incoming and set new outgoing metadata + md, _ := metadata.FromOutgoingContext(ctx) + // Set the client and version headers if not already set + if len(md[headers.ClientNameHeaderName]) == 0 { + md.Set(headers.ClientNameHeaderName, headers.ClientNameServerHTTP) + } + if len(md[headers.ClientVersionHeaderName]) == 0 { + md.Set(headers.ClientVersionHeaderName, headers.ServerVersion) + } + ctx = metadata.NewIncomingContext(ctx, md) + outgoingMD := metadata.MD{} + ctx = metadata.NewOutgoingContext(ctx, outgoingMD) + + // Get the method. Should never fail, but we check anyways + serviceMethod := i.methods[method] + if serviceMethod == nil { + return status.Error(codes.NotFound, "call not found") + } + + // Add metric + var namespaceTag metrics.Tag + if namespaceName := interceptor.MustGetNamespaceName(i.namespaceRegistry, args); namespaceName != "" { + namespaceTag = metrics.NamespaceTag(namespaceName.String()) + } else { + namespaceTag = metrics.NamespaceUnknownTag() + } + i.requestsCounter.Record(1, metrics.OperationTag(method), namespaceTag) + + // Invoke + var resp any + var err error + if i.interceptor == nil { + resp, err = serviceMethod.handler(ctx, args) + } else { + resp, err = i.interceptor(ctx, args, &serviceMethod.info, serviceMethod.handler) + } + + // Find the header call option and set response headers. We accept that if + // somewhere internally the metadata was replaced instead of appended to, this + // does not work. + for _, opt := range opts { + if callOpt, ok := opt.(grpc.HeaderCallOption); ok { + *callOpt.HeaderAddr = outgoingMD + } + } + + // Merge the response proto onto the wanted reply if non-nil + if respProto, _ := resp.(proto.Message); respProto != nil { + proto.Merge(reply.(proto.Message), respProto) + } + + return err +} + +func (*inlineClientConn) NewStream( + context.Context, + *grpc.StreamDesc, + string, + ...grpc.CallOption, +) (grpc.ClientStream, error) { + return nil, errHTTPGRPCStreamNotSupported +} + +// Mostly taken from https://github.com/grpc/grpc-go/blob/v1.56.1/server.go#L1124-L1158 +// with slight modifications. +func chainUnaryServerInterceptors(interceptors []grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { + switch len(interceptors) { + case 0: + return nil + case 1: + return interceptors[0] + default: + return chainUnaryInterceptors(interceptors) + } +} + +func chainUnaryInterceptors(interceptors []grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler( + interceptors []grpc.UnaryServerInterceptor, + curr int, + info *grpc.UnaryServerInfo, + finalHandler grpc.UnaryHandler, +) grpc.UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} diff -Nru temporal-1.21.5-1/src/service/frontend/interface.go temporal-1.22.5/src/service/frontend/interface.go --- temporal-1.21.5-1/src/service/frontend/interface.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/interface.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,8 +29,6 @@ import ( "go.temporal.io/api/operatorservice/v1" "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/common" ) const ( @@ -43,14 +41,13 @@ // Handler is interface wrapping frontend workflow handler Handler interface { workflowservice.WorkflowServiceServer - common.Daemon - GetConfig() *Config + Start() + Stop() } // OperatorHandler is interface wrapping frontend workflow handler OperatorHandler interface { operatorservice.OperatorServiceServer - common.Daemon } ) diff -Nru temporal-1.21.5-1/src/service/frontend/interface_mock.go temporal-1.22.5/src/service/frontend/interface_mock.go --- temporal-1.21.5-1/src/service/frontend/interface_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/interface_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -1080,27 +1080,3 @@ mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSearchAttributes", reflect.TypeOf((*MockOperatorHandler)(nil).RemoveSearchAttributes), arg0, arg1) } - -// Start mocks base method. -func (m *MockOperatorHandler) Start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start") -} - -// Start indicates an expected call of Start. -func (mr *MockOperatorHandlerMockRecorder) Start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorHandler)(nil).Start)) -} - -// Stop mocks base method. -func (m *MockOperatorHandler) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") -} - -// Stop indicates an expected call of Stop. -func (mr *MockOperatorHandlerMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorHandler)(nil).Stop)) -} diff -Nru temporal-1.21.5-1/src/service/frontend/operator_handler.go temporal-1.22.5/src/service/frontend/operator_handler.go --- temporal-1.21.5-1/src/service/frontend/operator_handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/operator_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,7 +28,6 @@ "context" "fmt" "sync/atomic" - "time" "golang.org/x/exp/maps" "google.golang.org/grpc/health" @@ -43,7 +42,6 @@ sdkclient "go.temporal.io/sdk/client" "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/historyservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" svc "go.temporal.io/server/client" "go.temporal.io/server/client/admin" @@ -58,6 +56,7 @@ "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/resource" "go.temporal.io/server/common/sdk" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/common/util" @@ -83,7 +82,7 @@ saProvider searchattribute.Provider saManager searchattribute.Manager healthServer *health.Server - historyClient historyservice.HistoryServiceClient + historyClient resource.HistoryClient clusterMetadataManager persistence.ClusterMetadataManager clusterMetadata clustermetadata.Metadata clientFactory svc.Factory @@ -99,7 +98,7 @@ SaProvider searchattribute.Provider SaManager searchattribute.Manager healthServer *health.Server - historyClient historyservice.HistoryServiceClient + historyClient resource.HistoryClient clusterMetadataManager persistence.ClusterMetadataManager clusterMetadata clustermetadata.Metadata clientFactory svc.Factory @@ -159,9 +158,6 @@ ) (_ *operatorservice.AddSearchAttributesResponse, retError error) { defer log.CapturePanic(h.logger, &retError) - scope, startTime := h.startRequestProfile(metrics.OperatorAddSearchAttributesScope) - defer func() { scope.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) }() - // validate request if request == nil { return nil, errRequestNotSet @@ -192,6 +188,7 @@ // `skip-schema-update` is set. This is for backward compatibility using // standard visibility. if h.visibilityMgr.HasStoreName(elasticsearch.PersistenceName) || indexName == "" { + scope := h.metricsHandler.WithTags(metrics.OperationTag(metrics.OperatorAddSearchAttributesScope)) err = h.addSearchAttributesElasticsearch(ctx, request, indexName, currentSearchAttributes) if err != nil { if _, isWorkflowErr := err.(*serviceerror.SystemWorkflow); isWorkflowErr { @@ -548,9 +545,6 @@ ) (_ *operatorservice.DeleteNamespaceResponse, retError error) { defer log.CapturePanic(h.logger, &retError) - scope, startTime := h.startRequestProfile(metrics.OperatorDeleteNamespaceScope) - defer func() { scope.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) }() - // validate request if request == nil { return nil, errRequestNotSet @@ -586,6 +580,8 @@ return nil, serviceerror.NewUnavailable(fmt.Sprintf(errUnableToStartWorkflowMessage, deletenamespace.WorkflowName, err)) } + scope := h.metricsHandler.WithTags(metrics.OperationTag(metrics.OperatorDeleteNamespaceScope)) + // Wait for workflow to complete. var wfResult deletenamespace.DeleteNamespaceWorkflowResult err = run.Get(ctx, &wfResult) @@ -607,8 +603,6 @@ request *operatorservice.AddOrUpdateRemoteClusterRequest, ) (_ *operatorservice.AddOrUpdateRemoteClusterResponse, retError error) { defer log.CapturePanic(h.logger, &retError) - scope, startTime := h.startRequestProfile(metrics.OperatorAddOrUpdateRemoteClusterScope) - defer func() { scope.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) }() adminClient := h.clientFactory.NewRemoteAdminClientWithTimeout( request.GetFrontendAddress(), @@ -619,7 +613,6 @@ // Fetch cluster metadata from remote cluster resp, err := adminClient.DescribeCluster(ctx, &adminservice.DescribeClusterRequest{}) if err != nil { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewUnavailable(fmt.Sprintf( errUnableConnectRemoteClusterMessage, request.GetFrontendAddress(), @@ -629,7 +622,6 @@ err = h.validateRemoteClusterMetadata(resp) if err != nil { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errInvalidRemoteClusterInfo, err)) } @@ -644,7 +636,6 @@ case *serviceerror.NotFound: updateRequestVersion = 0 default: - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewInternal(fmt.Sprintf(errUnableToStoreClusterInfo, err)) } @@ -658,15 +649,14 @@ InitialFailoverVersion: resp.GetInitialFailoverVersion(), IsGlobalNamespaceEnabled: resp.GetIsGlobalNamespaceEnabled(), IsConnectionEnabled: request.GetEnableRemoteClusterConnection(), + Tags: resp.GetTags(), }, Version: updateRequestVersion, }) if err != nil { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewInternal(fmt.Sprintf(errUnableToStoreClusterInfo, err)) } if !applied { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewInvalidArgument(fmt.Sprintf(errUnableToStoreClusterInfo, err)) } return &operatorservice.AddOrUpdateRemoteClusterResponse{}, nil @@ -677,8 +667,6 @@ request *operatorservice.RemoveRemoteClusterRequest, ) (_ *operatorservice.RemoveRemoteClusterResponse, retError error) { defer log.CapturePanic(h.logger, &retError) - scope, startTime := h.startRequestProfile(metrics.OperatorRemoveRemoteClusterScope) - defer func() { scope.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) }() var isClusterNameExist bool for clusterName := range h.clusterMetadata.GetAllClusterInfo() { @@ -695,7 +683,6 @@ ctx, &persistence.DeleteClusterMetadataRequest{ClusterName: request.GetClusterName()}, ); err != nil { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, serviceerror.NewInternal(fmt.Sprintf(errUnableToDeleteClusterInfo, err)) } return &operatorservice.RemoveRemoteClusterResponse{}, nil @@ -706,8 +693,6 @@ request *operatorservice.ListClustersRequest, ) (_ *operatorservice.ListClustersResponse, retError error) { defer log.CapturePanic(h.logger, &retError) - scope, startTime := h.startRequestProfile(metrics.OperatorListClustersScope) - defer func() { scope.Timer(metrics.ServiceLatency.GetMetricName()).Record(time.Since(startTime)) }() if request == nil { return nil, errRequestNotSet @@ -721,7 +706,6 @@ NextPageToken: request.GetNextPageToken(), }) if err != nil { - scope.Counter(metrics.ServiceFailures.GetMetricName()).Record(1) return nil, err } @@ -777,10 +761,3 @@ } return nil } - -// startRequestProfile initiates recording of request metrics -func (h *OperatorHandlerImpl) startRequestProfile(operation string) (metrics.Handler, time.Time) { - metricsScope := h.metricsHandler.WithTags(metrics.OperationTag(operation)) - metricsScope.Counter(metrics.ServiceRequests.GetMetricName()).Record(1) - return metricsScope, time.Now().UTC() -} diff -Nru temporal-1.21.5-1/src/service/frontend/service.go temporal-1.22.5/src/service/frontend/service.go --- temporal-1.21.5-1/src/service/frontend/service.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/service.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,16 +28,11 @@ "math/rand" "net" "os" - "sync/atomic" + "sync" "time" "go.temporal.io/api/operatorservice/v1" "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/dynamicconfig" @@ -50,6 +45,10 @@ "go.temporal.io/server/common/persistence/visibility" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/util" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" ) // Config represents configuration for frontend service @@ -71,10 +70,13 @@ HistoryMaxPageSize dynamicconfig.IntPropertyFnWithNamespaceFilter RPS dynamicconfig.IntPropertyFn + GlobalRPS dynamicconfig.IntPropertyFn + OperatorRPSRatio dynamicconfig.FloatPropertyFn NamespaceReplicationInducingAPIsRPS dynamicconfig.IntPropertyFn MaxNamespaceRPSPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter MaxNamespaceBurstPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter - MaxNamespaceCountPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxConcurrentLongRunningRequestsPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxGlobalConcurrentLongRunningRequests dynamicconfig.IntPropertyFnWithNamespaceFilter MaxNamespaceVisibilityRPSPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter MaxNamespaceVisibilityBurstPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter MaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance dynamicconfig.IntPropertyFnWithNamespaceFilter @@ -205,11 +207,14 @@ HistoryMaxPageSize: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendHistoryMaxPageSize, common.GetHistoryMaxPageSize), RPS: dc.GetIntProperty(dynamicconfig.FrontendRPS, 2400), + GlobalRPS: dc.GetIntProperty(dynamicconfig.FrontendGlobalRPS, 0), + OperatorRPSRatio: dc.GetFloat64Property(dynamicconfig.OperatorRPSRatio, common.DefaultOperatorRPSRatio), NamespaceReplicationInducingAPIsRPS: dc.GetIntProperty(dynamicconfig.FrontendNamespaceReplicationInducingAPIsRPS, 20), MaxNamespaceRPSPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceRPSPerInstance, 2400), MaxNamespaceBurstPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceBurstPerInstance, 4800), - MaxNamespaceCountPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceCountPerInstance, 1200), + MaxConcurrentLongRunningRequestsPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxConcurrentLongRunningRequestsPerInstance, 1200), + MaxGlobalConcurrentLongRunningRequests: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendGlobalMaxConcurrentLongRunningRequests, 0), MaxNamespaceVisibilityRPSPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceVisibilityRPSPerInstance, 10), MaxNamespaceVisibilityBurstPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceVisibilityBurstPerInstance, 10), MaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendMaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance, 1), @@ -274,7 +279,6 @@ // Service represents the frontend service type Service struct { - status int32 config *Config healthServer *health.Server @@ -284,6 +288,7 @@ versionChecker *VersionChecker visibilityManager manager.VisibilityManager server *grpc.Server + httpAPIServer *HTTPAPIServer logger log.Logger grpcListener net.Listener @@ -296,6 +301,7 @@ serviceConfig *Config, server *grpc.Server, healthServer *health.Server, + httpAPIServer *HTTPAPIServer, handler Handler, adminHandler *AdminHandler, operatorHandler *OperatorHandlerImpl, @@ -308,10 +314,10 @@ membershipMonitor membership.Monitor, ) *Service { return &Service{ - status: common.DaemonStatusInitialized, config: serviceConfig, server: server, healthServer: healthServer, + httpAPIServer: httpAPIServer, handler: handler, adminHandler: adminHandler, operatorHandler: operatorHandler, @@ -327,12 +333,7 @@ // Start starts the service func (s *Service) Start() { - if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { - return - } - - logger := s.logger - logger.Info("frontend starting") + s.logger.Info("frontend starting") healthpb.RegisterHealthServer(s.server, s.healthServer) workflowservice.RegisterWorkflowServiceServer(s.server, s.handler) @@ -350,22 +351,26 @@ s.operatorHandler.Start() s.handler.Start() - go s.membershipMonitor.Start() - - logger.Info("Starting to serve on frontend listener") - if err := s.server.Serve(s.grpcListener); err != nil { - logger.Fatal("Failed to serve on frontend listener", tag.Error(err)) + go func() { + s.logger.Info("Starting to serve on frontend listener") + if err := s.server.Serve(s.grpcListener); err != nil { + s.logger.Fatal("Failed to serve on frontend listener", tag.Error(err)) + } + }() + + if s.httpAPIServer != nil { + go func() { + if err := s.httpAPIServer.Serve(); err != nil { + s.logger.Fatal("Failed to serve HTTP API server", tag.Error(err)) + } + }() } + + go s.membershipMonitor.Start() } // Stop stops the service func (s *Service) Stop() { - logger := s.logger - - if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { - return - } - // initiate graceful shutdown: // 1. Fail rpc health check, this will cause client side load balancer to stop forwarding requests to this node // 2. wait for failure detection time @@ -376,10 +381,10 @@ requestDrainTime := util.Max(time.Second, s.config.ShutdownDrainDuration()) failureDetectionTime := util.Max(0, s.config.ShutdownFailHealthCheckDuration()) - logger.Info("ShutdownHandler: Updating gRPC health status to ShuttingDown") + s.logger.Info("ShutdownHandler: Updating gRPC health status to ShuttingDown") s.healthServer.Shutdown() - logger.Info("ShutdownHandler: Waiting for others to discover I am unhealthy") + s.logger.Info("ShutdownHandler: Waiting for others to discover I am unhealthy") time.Sleep(failureDetectionTime) s.handler.Stop() @@ -388,50 +393,33 @@ s.versionChecker.Stop() s.visibilityManager.Close() - logger.Info("ShutdownHandler: Draining traffic") - t := time.AfterFunc(requestDrainTime, func() { - logger.Info("ShutdownHandler: Drain time expired, stopping all traffic") - s.server.Stop() - }) - s.server.GracefulStop() - t.Stop() - - if s.metricsHandler != nil { - s.metricsHandler.Stop(logger) - } - - logger.Info("frontend stopped") -} - -func namespaceRPS( - perInstanceRPSFn dynamicconfig.IntPropertyFnWithNamespaceFilter, - globalRPSFn dynamicconfig.IntPropertyFnWithNamespaceFilter, - frontendResolver membership.ServiceResolver, - namespace string, -) float64 { - globalRPS := float64(globalRPSFn(namespace)) - if globalRPS > 0 && frontendResolver != nil { - hosts := float64(numFrontendHosts(frontendResolver)) - return globalRPS / hosts + s.logger.Info("ShutdownHandler: Draining traffic") + // Gracefully stop gRPC server and HTTP API server concurrently + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + t := time.AfterFunc(requestDrainTime, func() { + s.logger.Info("ShutdownHandler: Drain time expired, stopping all traffic") + s.server.Stop() + }) + s.server.GracefulStop() + t.Stop() + }() + if s.httpAPIServer != nil { + wg.Add(1) + go func() { + defer wg.Done() + s.httpAPIServer.GracefulStop(requestDrainTime) + }() } + wg.Wait() - hostRPS := float64(perInstanceRPSFn(namespace)) - return hostRPS -} - -func numFrontendHosts( - frontendResolver membership.ServiceResolver, -) int { - defaultHosts := 1 - if frontendResolver == nil { - return defaultHosts + if s.metricsHandler != nil { + s.metricsHandler.Stop(s.logger) } - ringSize := frontendResolver.MemberCount() - if ringSize < defaultHosts { - return defaultHosts - } - return ringSize + s.logger.Info("frontend stopped") } func (s *Service) GetFaultInjection() *client.FaultInjectionDataStoreFactory { diff -Nru temporal-1.21.5-1/src/service/frontend/versionChecker.go temporal-1.22.5/src/service/frontend/versionChecker.go --- temporal-1.21.5-1/src/service/frontend/versionChecker.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/versionChecker.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package frontend - -import ( - "context" - "runtime" - "sync" - "time" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - versionpb "go.temporal.io/api/version/v1" - "go.temporal.io/version/check" - - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/rpc/interceptor" -) - -const VersionCheckInterval = 24 * time.Hour - -type VersionChecker struct { - config *Config - shutdownChan chan struct{} - metricsHandler metrics.Handler - clusterMetadataManager persistence.ClusterMetadataManager - startOnce sync.Once - stopOnce sync.Once - sdkVersionRecorder *interceptor.SDKVersionInterceptor -} - -func NewVersionChecker( - config *Config, - metricsHandler metrics.Handler, - clusterMetadataManager persistence.ClusterMetadataManager, - sdkVersionRecorder *interceptor.SDKVersionInterceptor, -) *VersionChecker { - return &VersionChecker{ - config: config, - shutdownChan: make(chan struct{}), - metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.VersionCheckScope)), - clusterMetadataManager: clusterMetadataManager, - sdkVersionRecorder: sdkVersionRecorder, - } -} - -func (vc *VersionChecker) Start() { - if vc.config.EnableServerVersionCheck() { - vc.startOnce.Do(func() { - // TODO: specify a timeout for the context - ctx := headers.SetCallerInfo( - context.TODO(), - headers.SystemBackgroundCallerInfo, - ) - - go vc.versionCheckLoop(ctx) - }) - } -} - -func (vc *VersionChecker) Stop() { - if vc.config.EnableServerVersionCheck() { - vc.stopOnce.Do(func() { - close(vc.shutdownChan) - }) - } -} - -func (vc *VersionChecker) versionCheckLoop( - ctx context.Context, -) { - timer := time.NewTicker(VersionCheckInterval) - defer timer.Stop() - vc.performVersionCheck(ctx) - for { - select { - case <-vc.shutdownChan: - return - case <-timer.C: - vc.performVersionCheck(ctx) - } - } -} - -func (vc *VersionChecker) performVersionCheck( - ctx context.Context, -) { - startTime := time.Now().UTC() - defer func() { - vc.metricsHandler.Timer(metrics.VersionCheckLatency.GetMetricName()).Record(time.Since(startTime)) - }() - metadata, err := vc.clusterMetadataManager.GetCurrentClusterMetadata(ctx) - if err != nil { - vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) - return - } - - if !isUpdateNeeded(metadata) { - return - } - - req, err := vc.createVersionCheckRequest(metadata) - if err != nil { - vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) - return - } - resp, err := vc.getVersionInfo(req) - if err != nil { - vc.metricsHandler.Counter(metrics.VersionCheckRequestFailedCount.GetMetricName()).Record(1) - vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) - return - } - err = vc.saveVersionInfo(ctx, resp) - if err != nil { - vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) - return - } - vc.metricsHandler.Counter(metrics.VersionCheckSuccessCount.GetMetricName()).Record(1) -} - -func isUpdateNeeded(metadata *persistence.GetClusterMetadataResponse) bool { - return metadata.VersionInfo == nil || (metadata.VersionInfo.LastUpdateTime != nil && - metadata.VersionInfo.LastUpdateTime.Before(time.Now().Add(-time.Hour))) -} - -func (vc *VersionChecker) createVersionCheckRequest(metadata *persistence.GetClusterMetadataResponse) (*check.VersionCheckRequest, error) { - return &check.VersionCheckRequest{ - Product: headers.ClientNameServer, - Version: headers.ServerVersion, - Arch: runtime.GOARCH, - OS: runtime.GOOS, - DB: vc.clusterMetadataManager.GetName(), - ClusterID: metadata.ClusterId, - Timestamp: time.Now().UnixNano(), - SDKInfo: vc.sdkVersionRecorder.GetAndResetSDKInfo(), - }, nil -} - -func (vc *VersionChecker) getVersionInfo(req *check.VersionCheckRequest) (*check.VersionCheckResponse, error) { - return check.NewCaller().Call(req) -} - -func (vc *VersionChecker) saveVersionInfo(ctx context.Context, resp *check.VersionCheckResponse) error { - metadata, err := vc.clusterMetadataManager.GetCurrentClusterMetadata(ctx) - if err != nil { - return err - } - // TODO(bergundy): Extract and save version info per SDK - versionInfo, err := toVersionInfo(resp) - if err != nil { - return err - } - metadata.VersionInfo = versionInfo - saved, err := vc.clusterMetadataManager.SaveClusterMetadata(ctx, &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: metadata.ClusterMetadata, Version: metadata.Version}) - if err != nil { - return err - } - if !saved { - return serviceerror.NewUnavailable("version info update hasn't been applied") - } - return nil -} - -func toVersionInfo(resp *check.VersionCheckResponse) (*versionpb.VersionInfo, error) { - for _, product := range resp.Products { - if product.Product == headers.ClientNameServer { - return &versionpb.VersionInfo{ - Current: convertReleaseInfo(product.Current), - Recommended: convertReleaseInfo(product.Recommended), - Instructions: product.Instructions, - Alerts: convertAlerts(product.Alerts), - LastUpdateTime: timestamp.TimePtr(time.Now().UTC()), - }, nil - } - } - return nil, serviceerror.NewNotFound("version info update was not found in response") -} - -func convertAlerts(alerts []check.Alert) []*versionpb.Alert { - var result []*versionpb.Alert - for _, alert := range alerts { - result = append(result, &versionpb.Alert{ - Message: alert.Message, - Severity: enumspb.Severity(alert.Severity), - }) - } - return result -} - -func convertReleaseInfo(releaseInfo check.ReleaseInfo) *versionpb.ReleaseInfo { - return &versionpb.ReleaseInfo{ - Version: releaseInfo.Version, - ReleaseTime: timestamp.UnixOrZeroTimePtr(releaseInfo.ReleaseTime), - Notes: releaseInfo.Notes, - } -} diff -Nru temporal-1.21.5-1/src/service/frontend/version_checker.go temporal-1.22.5/src/service/frontend/version_checker.go --- temporal-1.21.5-1/src/service/frontend/version_checker.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/version_checker.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,222 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package frontend + +import ( + "context" + "runtime" + "sync" + "time" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + versionpb "go.temporal.io/api/version/v1" + "go.temporal.io/version/check" + + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/rpc/interceptor" +) + +const VersionCheckInterval = 24 * time.Hour + +type VersionChecker struct { + config *Config + shutdownChan chan struct{} + metricsHandler metrics.Handler + clusterMetadataManager persistence.ClusterMetadataManager + startOnce sync.Once + stopOnce sync.Once + sdkVersionRecorder *interceptor.SDKVersionInterceptor +} + +func NewVersionChecker( + config *Config, + metricsHandler metrics.Handler, + clusterMetadataManager persistence.ClusterMetadataManager, + sdkVersionRecorder *interceptor.SDKVersionInterceptor, +) *VersionChecker { + return &VersionChecker{ + config: config, + shutdownChan: make(chan struct{}), + metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.VersionCheckScope)), + clusterMetadataManager: clusterMetadataManager, + sdkVersionRecorder: sdkVersionRecorder, + } +} + +func (vc *VersionChecker) Start() { + if vc.config.EnableServerVersionCheck() { + vc.startOnce.Do(func() { + // TODO: specify a timeout for the context + ctx := headers.SetCallerInfo( + context.TODO(), + headers.SystemBackgroundCallerInfo, + ) + + go vc.versionCheckLoop(ctx) + }) + } +} + +func (vc *VersionChecker) Stop() { + if vc.config.EnableServerVersionCheck() { + vc.stopOnce.Do(func() { + close(vc.shutdownChan) + }) + } +} + +func (vc *VersionChecker) versionCheckLoop( + ctx context.Context, +) { + timer := time.NewTicker(VersionCheckInterval) + defer timer.Stop() + vc.performVersionCheck(ctx) + for { + select { + case <-vc.shutdownChan: + return + case <-timer.C: + vc.performVersionCheck(ctx) + } + } +} + +func (vc *VersionChecker) performVersionCheck( + ctx context.Context, +) { + startTime := time.Now().UTC() + defer func() { + vc.metricsHandler.Timer(metrics.VersionCheckLatency.GetMetricName()).Record(time.Since(startTime)) + }() + metadata, err := vc.clusterMetadataManager.GetCurrentClusterMetadata(ctx) + if err != nil { + vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) + return + } + + if !isUpdateNeeded(metadata) { + return + } + + req, err := vc.createVersionCheckRequest(metadata) + if err != nil { + vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) + return + } + resp, err := vc.getVersionInfo(req) + if err != nil { + vc.metricsHandler.Counter(metrics.VersionCheckRequestFailedCount.GetMetricName()).Record(1) + vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) + return + } + err = vc.saveVersionInfo(ctx, resp) + if err != nil { + vc.metricsHandler.Counter(metrics.VersionCheckFailedCount.GetMetricName()).Record(1) + return + } + vc.metricsHandler.Counter(metrics.VersionCheckSuccessCount.GetMetricName()).Record(1) +} + +func isUpdateNeeded(metadata *persistence.GetClusterMetadataResponse) bool { + return metadata.VersionInfo == nil || (metadata.VersionInfo.LastUpdateTime != nil && + metadata.VersionInfo.LastUpdateTime.Before(time.Now().Add(-time.Hour))) +} + +func (vc *VersionChecker) createVersionCheckRequest(metadata *persistence.GetClusterMetadataResponse) (*check.VersionCheckRequest, error) { + return &check.VersionCheckRequest{ + Product: headers.ClientNameServer, + Version: headers.ServerVersion, + Arch: runtime.GOARCH, + OS: runtime.GOOS, + DB: vc.clusterMetadataManager.GetName(), + ClusterID: metadata.ClusterId, + Timestamp: time.Now().UnixNano(), + SDKInfo: vc.sdkVersionRecorder.GetAndResetSDKInfo(), + }, nil +} + +func (vc *VersionChecker) getVersionInfo(req *check.VersionCheckRequest) (*check.VersionCheckResponse, error) { + return check.NewCaller().Call(req) +} + +func (vc *VersionChecker) saveVersionInfo(ctx context.Context, resp *check.VersionCheckResponse) error { + metadata, err := vc.clusterMetadataManager.GetCurrentClusterMetadata(ctx) + if err != nil { + return err + } + // TODO(bergundy): Extract and save version info per SDK + versionInfo, err := toVersionInfo(resp) + if err != nil { + return err + } + metadata.VersionInfo = versionInfo + saved, err := vc.clusterMetadataManager.SaveClusterMetadata(ctx, &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: metadata.ClusterMetadata, Version: metadata.Version}) + if err != nil { + return err + } + if !saved { + return serviceerror.NewUnavailable("version info update hasn't been applied") + } + return nil +} + +func toVersionInfo(resp *check.VersionCheckResponse) (*versionpb.VersionInfo, error) { + for _, product := range resp.Products { + if product.Product == headers.ClientNameServer { + return &versionpb.VersionInfo{ + Current: convertReleaseInfo(product.Current), + Recommended: convertReleaseInfo(product.Recommended), + Instructions: product.Instructions, + Alerts: convertAlerts(product.Alerts), + LastUpdateTime: timestamp.TimePtr(time.Now().UTC()), + }, nil + } + } + return nil, serviceerror.NewNotFound("version info update was not found in response") +} + +func convertAlerts(alerts []check.Alert) []*versionpb.Alert { + var result []*versionpb.Alert + for _, alert := range alerts { + result = append(result, &versionpb.Alert{ + Message: alert.Message, + Severity: enumspb.Severity(alert.Severity), + }) + } + return result +} + +func convertReleaseInfo(releaseInfo check.ReleaseInfo) *versionpb.ReleaseInfo { + return &versionpb.ReleaseInfo{ + Version: releaseInfo.Version, + ReleaseTime: timestamp.UnixOrZeroTimePtr(releaseInfo.ReleaseTime), + Notes: releaseInfo.Notes, + } +} diff -Nru temporal-1.21.5-1/src/service/frontend/workflow_handler.go temporal-1.22.5/src/service/frontend/workflow_handler.go --- temporal-1.21.5-1/src/service/frontend/workflow_handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/workflow_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -75,6 +75,7 @@ "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/common/persistence/visibility" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/persistence/visibility/store" @@ -84,6 +85,7 @@ "go.temporal.io/server/common/rpc/interceptor" "go.temporal.io/server/common/sdk" "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/tasktoken" "go.temporal.io/server/common/util" "go.temporal.io/server/service/worker/batcher" "go.temporal.io/server/service/worker/scheduler" @@ -368,7 +370,9 @@ } if request.GetRequestId() == "" { - return nil, errRequestIDNotSet + // For easy direct API use, we default the request ID here but expect all + // SDKs and other auto-retrying clients to set it + request.RequestId = uuid.New() } if len(request.GetRequestId()) > wh.config.MaxIDLengthLimit() { @@ -455,24 +459,34 @@ execution *commonpb.WorkflowExecution, expectedNextEventID int64, currentBranchToken []byte, - ) ([]byte, string, int64, int64, bool, error) { + versionHistoryItem *historyspb.VersionHistoryItem, + ) ([]byte, string, int64, int64, bool, *historyspb.VersionHistoryItem, error) { response, err := wh.historyClient.PollMutableState(ctx, &historyservice.PollMutableStateRequest{ NamespaceId: namespaceUUID.String(), Execution: execution, ExpectedNextEventId: expectedNextEventID, CurrentBranchToken: currentBranchToken, + VersionHistoryItem: versionHistoryItem, }) if err != nil { - return nil, "", 0, 0, false, err + return nil, "", 0, 0, false, nil, err } isWorkflowRunning := response.GetWorkflowStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING - + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(response.GetVersionHistories()) + if err != nil { + return nil, "", 0, 0, false, nil, err + } + lastVersionHistoryItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, "", 0, 0, false, nil, err + } return response.CurrentBranchToken, response.Execution.GetRunId(), response.GetLastFirstEventId(), response.GetNextEventId(), isWorkflowRunning, + lastVersionHistoryItem, nil } @@ -504,8 +518,8 @@ if !isCloseEventOnly { queryNextEventID = continuationToken.GetNextEventId() } - continuationToken.BranchToken, _, lastFirstEventID, nextEventID, isWorkflowRunning, err = - queryHistory(namespaceID, execution, queryNextEventID, continuationToken.BranchToken) + continuationToken.BranchToken, _, lastFirstEventID, nextEventID, isWorkflowRunning, continuationToken.VersionHistoryItem, err = + queryHistory(namespaceID, execution, queryNextEventID, continuationToken.BranchToken, continuationToken.VersionHistoryItem) if err != nil { return nil, err } @@ -518,8 +532,8 @@ if !isCloseEventOnly { queryNextEventID = common.FirstEventID } - continuationToken.BranchToken, runID, lastFirstEventID, nextEventID, isWorkflowRunning, err = - queryHistory(namespaceID, execution, queryNextEventID, nil) + continuationToken.BranchToken, runID, lastFirstEventID, nextEventID, isWorkflowRunning, continuationToken.VersionHistoryItem, err = + queryHistory(namespaceID, execution, queryNextEventID, nil, nil) if err != nil { return nil, err } @@ -721,21 +735,33 @@ execution *commonpb.WorkflowExecution, expectedNextEventID int64, currentBranchToken []byte, - ) ([]byte, string, int64, error) { + versionHistoryItem *historyspb.VersionHistoryItem, + ) ([]byte, string, int64, *historyspb.VersionHistoryItem, error) { response, err := wh.historyClient.PollMutableState(ctx, &historyservice.PollMutableStateRequest{ NamespaceId: namespaceUUID.String(), Execution: execution, ExpectedNextEventId: expectedNextEventID, CurrentBranchToken: currentBranchToken, + VersionHistoryItem: versionHistoryItem, }) if err != nil { - return nil, "", 0, err + return nil, "", 0, nil, err + } + + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(response.GetVersionHistories()) + if err != nil { + return nil, "", 0, nil, err + } + lastVersionHistoryItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, "", 0, nil, err } return response.CurrentBranchToken, response.Execution.GetRunId(), response.GetLastFirstEventTxnId(), + lastVersionHistoryItem, nil } @@ -747,8 +773,8 @@ if request.NextPageToken == nil { continuationToken = &tokenspb.HistoryContinuation{} - continuationToken.BranchToken, runID, lastFirstTxnID, err = - queryMutableState(namespaceID, execution, common.FirstEventID, nil) + continuationToken.BranchToken, runID, lastFirstTxnID, continuationToken.VersionHistoryItem, err = + queryMutableState(namespaceID, execution, common.FirstEventID, nil, nil) if err != nil { return nil, err } @@ -961,15 +987,17 @@ ResetHistoryEventId: histResp.ResetHistoryEventId, } if request.GetReturnNewWorkflowTask() && histResp != nil && histResp.StartedResponse != nil { - taskToken := &tokenspb.Task{ - NamespaceId: taskToken.GetNamespaceId(), - WorkflowId: taskToken.GetWorkflowId(), - RunId: taskToken.GetRunId(), - ScheduledEventId: histResp.StartedResponse.GetScheduledEventId(), - StartedEventId: histResp.StartedResponse.GetStartedEventId(), - StartedTime: histResp.StartedResponse.GetStartedTime(), - Attempt: histResp.StartedResponse.GetAttempt(), - } + taskToken := tasktoken.NewWorkflowTaskToken( + taskToken.GetNamespaceId(), + taskToken.GetWorkflowId(), + taskToken.GetRunId(), + histResp.StartedResponse.GetScheduledEventId(), + histResp.StartedResponse.GetStartedEventId(), + histResp.StartedResponse.GetStartedTime(), + histResp.StartedResponse.GetAttempt(), + histResp.StartedResponse.GetClock(), + histResp.StartedResponse.GetVersion(), + ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { return nil, err @@ -1250,14 +1278,17 @@ return nil, errActivityIDNotSet } - taskToken := &tokenspb.Task{ - NamespaceId: namespaceID.String(), - RunId: runID, - WorkflowId: workflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - Attempt: 1, - } + taskToken := tasktoken.NewActivityTaskToken( + namespaceID.String(), + workflowID, + runID, + common.EmptyEventID, + activityID, + "", + 1, + nil, + common.EmptyVersion, + ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { return nil, err @@ -1414,14 +1445,17 @@ return nil, errIdentityTooLong } - taskToken := &tokenspb.Task{ - NamespaceId: namespaceID.String(), - RunId: runID, - WorkflowId: workflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - Attempt: 1, - } + taskToken := tasktoken.NewActivityTaskToken( + namespaceID.String(), + workflowID, + runID, + common.EmptyEventID, + activityID, + "", + 1, + nil, + common.EmptyVersion, + ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { return nil, err @@ -1595,14 +1629,17 @@ return nil, errIdentityTooLong } - taskToken := &tokenspb.Task{ - NamespaceId: namespaceID.String(), - RunId: runID, - WorkflowId: workflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - Attempt: 1, - } + taskToken := tasktoken.NewActivityTaskToken( + namespaceID.String(), + workflowID, + runID, + common.EmptyEventID, + activityID, + "", + 1, + nil, + common.EmptyVersion, + ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { return nil, err @@ -1768,14 +1805,17 @@ return nil, errIdentityTooLong } - taskToken := &tokenspb.Task{ - NamespaceId: namespaceID.String(), - RunId: runID, - WorkflowId: workflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - Attempt: 1, - } + taskToken := tasktoken.NewActivityTaskToken( + namespaceID.String(), + workflowID, + runID, + common.EmptyEventID, + activityID, + "", + 1, + nil, + common.EmptyVersion, + ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { return nil, err @@ -2462,7 +2502,8 @@ } resp := &workflowservice.CountWorkflowExecutionsResponse{ - Count: persistenceResp.Count, + Count: persistenceResp.Count, + Groups: persistenceResp.Groups, } return resp, nil } @@ -2761,6 +2802,7 @@ EagerWorkflowStart: true, SdkMetadata: true, BuildIdBasedVersioning: true, + CountGroupByExecutionStatus: true, }, }, nil } @@ -3011,108 +3053,62 @@ } // then query to get current state from the workflow itself - // TODO: turn the refresh path into a synchronous update so we don't have to retry in a loop - sentRefresh := make(map[commonpb.WorkflowExecution]struct{}) - // limit how many signals we send, separate from the retry policy (which is used to retry - // the query if the signal was not received or processed yet) - signalsLeft := 1 - var queryResponse schedspb.DescribeResponse - - op := func(ctx context.Context) error { - req := &historyservice.QueryWorkflowRequest{ - NamespaceId: namespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Namespace: request.Namespace, - Execution: execution, - Query: &querypb.WorkflowQuery{QueryType: scheduler.QueryNameDescribe}, - }, - } - res, err := wh.historyClient.QueryWorkflow(ctx, req) - if err != nil { - return err - } - - queryResponse.Reset() - err = payloads.Decode(res.GetResponse().GetQueryResult(), &queryResponse) - if err != nil { - return err - } - - // Search attributes in the Action are already in external ("aliased") form. Do not alias them here. - - // for all running workflows started by the schedule, we should check that they're - // still running, and if not, poke the schedule to refresh - needRefresh := false - for _, ex := range queryResponse.GetInfo().GetRunningWorkflows() { - if _, ok := sentRefresh[*ex]; ok { - // we asked the schedule to refresh this one because it wasn't running, but - // it's still reporting it as running - return errWaitForRefresh - } + req := &historyservice.QueryWorkflowRequest{ + NamespaceId: namespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Namespace: request.Namespace, + Execution: execution, + Query: &querypb.WorkflowQuery{QueryType: scheduler.QueryNameDescribe}, + }, + } + res, err := wh.historyClient.QueryWorkflow(ctx, req) + if err != nil { + return nil, err + } - // we'll usually have just zero or one of these so we can just do them sequentially - if msResponse, err := wh.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: namespaceID.String(), - // Note: do not send runid here so that we always get the latest one - Execution: &commonpb.WorkflowExecution{WorkflowId: ex.WorkflowId}, - }); err != nil { - switch err.(type) { - case *serviceerror.NotFound: - // if it doesn't exist (past retention period?) it's certainly not running - needRefresh = true - sentRefresh[*ex] = struct{}{} - default: - return err - } - } else if msResponse.WorkflowStatus != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING || - msResponse.FirstExecutionRunId != ex.RunId { - // there is no running execution of this workflow id, or there is a running - // execution, but it's not part of the chain that we started. - // either way, the workflow that we started is not running. - needRefresh = true - sentRefresh[*ex] = struct{}{} - } - } + var queryResponse schedspb.DescribeResponse + err = payloads.Decode(res.GetResponse().GetQueryResult(), &queryResponse) + if err != nil { + return nil, err + } - if !needRefresh || signalsLeft == 0 { - return nil - } - signalsLeft-- + // Search attributes in the Action are already in external ("aliased") form. Do not alias them here. - // poke to refresh - _, err = wh.historyClient.SignalWorkflowExecution(ctx, &historyservice.SignalWorkflowExecutionRequest{ + // for all running workflows started by the schedule, we should check that they're still running + origLen := len(queryResponse.Info.RunningWorkflows) + queryResponse.Info.RunningWorkflows = util.FilterSlice(queryResponse.Info.RunningWorkflows, func(ex *commonpb.WorkflowExecution) bool { + // we'll usually have just zero or one of these so we can just do them sequentially + msResponse, err := wh.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ NamespaceId: namespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: request.Namespace, - WorkflowExecution: execution, - SignalName: scheduler.SignalNameRefresh, - Identity: "internal refresh from describe request", - RequestId: uuid.New(), - }, + // Note: do not send runid here so that we always get the latest one + Execution: &commonpb.WorkflowExecution{WorkflowId: ex.WorkflowId}, }) if err != nil { - return err - } - - return errWaitForRefresh - } - - // wait up to 4 seconds or rpc deadline minus 1 second, but at least 1 second - expiration := 4 * time.Second - if deadline, ok := ctx.Deadline(); ok { - remaining := time.Until(deadline) - 1*time.Second - expiration = util.Min(expiration, remaining) - } - expiration = util.Max(expiration, 1*time.Second) - policy := backoff.NewExponentialRetryPolicy(200 * time.Millisecond). - WithExpirationInterval(expiration) - isWaitErr := func(e error) bool { return e == errWaitForRefresh } + // if it's not found, it's certainly not running, so return false. if we got + // another error, we don't know the state so assume it's still running. + return !common.IsNotFoundError(err) + } + // return true if it is still running and is part of the chain the schedule started + return msResponse.WorkflowStatus == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING && + msResponse.FirstExecutionRunId == ex.RunId + }) - err = backoff.ThrottleRetryContext(ctx, op, policy, isWaitErr) - // if we still got errWaitForRefresh that means we used up our retries, just return - // whatever we have - if err != nil && err != errWaitForRefresh { - return nil, err + if len(queryResponse.Info.RunningWorkflows) < origLen { + // we noticed some "running workflows" aren't running anymore. poke the workflow to + // refresh, but don't wait for the state to change. ignore errors. + go func() { + disconnectedCtx := headers.SetCallerInfo(context.Background(), headers.NewBackgroundCallerInfo(request.Namespace)) + _, _ = wh.historyClient.SignalWorkflowExecution(disconnectedCtx, &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: request.Namespace, + WorkflowExecution: execution, + SignalName: scheduler.SignalNameRefresh, + Identity: "internal refresh from describe request", + RequestId: uuid.New(), + }, + }) + }() } token := make([]byte, 8) @@ -4139,7 +4135,12 @@ // noop case *serviceerror.DataLoss: // log event - wh.logger.Error("encountered data loss event", tag.WorkflowNamespaceID(namespaceID.String()), tag.WorkflowID(execution.GetWorkflowId()), tag.WorkflowRunID(execution.GetRunId())) + wh.logger.Error("encountered data loss event", + tag.WorkflowNamespaceID(namespaceID.String()), + tag.WorkflowID(execution.GetWorkflowId()), + tag.WorkflowRunID(execution.GetRunId()), + tag.Error(err), + ) return nil, nil, err default: return nil, nil, err @@ -4318,8 +4319,8 @@ GetUseVersioning() bool } -func (wh *WorkflowHandler) validateVersioningInfo(namespace string, id buildIdAndFlag, tq *taskqueuepb.TaskQueue) error { - if id.GetUseVersioning() && !wh.config.EnableWorkerVersioningWorkflow(namespace) { +func (wh *WorkflowHandler) validateVersioningInfo(nsName string, id buildIdAndFlag, tq *taskqueuepb.TaskQueue) error { + if id.GetUseVersioning() && !wh.config.EnableWorkerVersioningWorkflow(nsName) { return errWorkerVersioningNotAllowed } if id.GetUseVersioning() && tq.GetKind() == enumspb.TASK_QUEUE_KIND_STICKY && len(tq.GetNormalName()) == 0 { diff -Nru temporal-1.21.5-1/src/service/frontend/workflow_handler_test.go temporal-1.22.5/src/service/frontend/workflow_handler_test.go --- temporal-1.21.5-1/src/service/frontend/workflow_handler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/frontend/workflow_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -71,6 +71,7 @@ "go.temporal.io/server/common/payload" "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/persistence/visibility/store" "go.temporal.io/server/common/persistence/visibility/store/elasticsearch" @@ -361,33 +362,6 @@ s.Equal(common.ErrContextTimeoutTooShort, err) } -func (s *workflowHandlerSuite) TestStartWorkflowExecution_Failed_RequestIdNotSet() { - config := s.newConfig() - config.RPS = dc.GetIntPropertyFn(10) - wh := s.getWorkflowHandler(config) - - startWorkflowExecutionRequest := &workflowservice.StartWorkflowExecutionRequest{ - Namespace: "test-namespace", - WorkflowId: "workflow-id", - WorkflowType: &commonpb.WorkflowType{ - Name: "workflow-type", - }, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: "task-queue", - }, - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - RetryPolicy: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 2, - MaximumInterval: timestamp.DurationPtr(2 * time.Second), - MaximumAttempts: 1, - }, - } - _, err := wh.StartWorkflowExecution(context.Background(), startWorkflowExecutionRequest) - s.Error(err) - s.Equal(errRequestIDNotSet, err) -} - func (s *workflowHandlerSuite) TestStartWorkflowExecution_Failed_StartRequestNotSet() { config := s.newConfig() config.RPS = dc.GetIntPropertyFn(10) @@ -1516,6 +1490,9 @@ // set up mocks to simulate a failed workflow with a retry policy. the failure event is id 5. branchToken := []byte{1, 2, 3} shardID := common.WorkflowIDToHistoryShard(namespaceID.String(), we.WorkflowId, numHistoryShards) + versionHistoryItem := versionhistory.NewVersionHistoryItem(1, 1) + currentVersionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{versionHistoryItem}) + versionHistories := versionhistory.NewVersionHistories(currentVersionHistory) s.mockNamespaceCache.EXPECT().GetNamespaceID(namespace).Return(namespaceID, nil).AnyTimes() s.mockHistoryClient.EXPECT().PollMutableState(gomock.Any(), &historyservice.PollMutableStateRequest{ @@ -1523,13 +1500,14 @@ Execution: &we, ExpectedNextEventId: common.EndEventID, CurrentBranchToken: nil, + VersionHistoryItem: nil, }).Return(&historyservice.PollMutableStateResponse{ Execution: &we, WorkflowType: &commonpb.WorkflowType{Name: "mytype"}, NextEventId: 6, LastFirstEventId: 5, CurrentBranchToken: branchToken, - VersionHistories: nil, + VersionHistories: versionHistories, WorkflowState: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, WorkflowStatus: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, LastFirstEventTxnId: 100, diff -Nru temporal-1.21.5-1/src/service/fx.go temporal-1.22.5/src/service/fx.go --- temporal-1.21.5-1/src/service/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -47,8 +47,21 @@ PersistenceNamespaceMaxQps persistenceClient.PersistenceNamespaceMaxQps PersistencePerShardNamespaceMaxQPS persistenceClient.PersistencePerShardNamespaceMaxQPS EnablePriorityRateLimiting persistenceClient.EnablePriorityRateLimiting + OperatorRPSRatio persistenceClient.OperatorRPSRatio DynamicRateLimitingParams persistenceClient.DynamicRateLimitingParams } + + GrpcServerOptionsParams struct { + fx.In + + Logger log.Logger + RpcFactory common.RPCFactory + RetryableInterceptor *interceptor.RetryableInterceptor + TelemetryInterceptor *interceptor.TelemetryInterceptor + RateLimitInterceptor *interceptor.RateLimitInterceptor + TracingInterceptor telemetry.ServerTraceInterceptor + AdditionalInterceptors []grpc.UnaryServerInterceptor `optional:"true"` + } ) func NewPersistenceRateLimitingParams( @@ -57,6 +70,7 @@ namespaceMaxQps dynamicconfig.IntPropertyFnWithNamespaceFilter, perShardNamespaceMaxQps dynamicconfig.IntPropertyFnWithNamespaceFilter, enablePriorityRateLimiting dynamicconfig.BoolPropertyFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, dynamicRateLimitingParams dynamicconfig.MapPropertyFn, ) PersistenceRateLimitingParams { return PersistenceRateLimitingParams{ @@ -64,6 +78,7 @@ PersistenceNamespaceMaxQps: persistenceClient.PersistenceNamespaceMaxQps(namespaceMaxQps), PersistencePerShardNamespaceMaxQPS: persistenceClient.PersistencePerShardNamespaceMaxQPS(perShardNamespaceMaxQps), EnablePriorityRateLimiting: persistenceClient.EnablePriorityRateLimiting(enablePriorityRateLimiting), + OperatorRPSRatio: persistenceClient.OperatorRPSRatio(operatorRPSRatio), DynamicRateLimitingParams: persistenceClient.DynamicRateLimitingParams(dynamicRateLimitingParams), } } @@ -88,32 +103,34 @@ } func GrpcServerOptionsProvider( - logger log.Logger, - rpcFactory common.RPCFactory, - retryableInterceptor *interceptor.RetryableInterceptor, - telemetryInterceptor *interceptor.TelemetryInterceptor, - rateLimitInterceptor *interceptor.RateLimitInterceptor, - tracingInterceptor telemetry.ServerTraceInterceptor, + params GrpcServerOptionsParams, ) []grpc.ServerOption { - grpcServerOptions, err := rpcFactory.GetInternodeGRPCServerOptions() + grpcServerOptions, err := params.RpcFactory.GetInternodeGRPCServerOptions() if err != nil { - logger.Fatal("creating gRPC server options failed", tag.Error(err)) + params.Logger.Fatal("creating gRPC server options failed", tag.Error(err)) } return append( grpcServerOptions, - grpc.ChainUnaryInterceptor( - rpc.ServiceErrorInterceptor, - grpc.UnaryServerInterceptor(tracingInterceptor), - metrics.NewServerMetricsContextInjectorInterceptor(), - metrics.NewServerMetricsTrailerPropagatorInterceptor(logger), - telemetryInterceptor.UnaryIntercept, - rateLimitInterceptor.Intercept, - retryableInterceptor.Intercept, - ), - grpc.ChainStreamInterceptor( - telemetryInterceptor.StreamIntercept, - ), + grpc.ChainUnaryInterceptor(getUnaryInterceptors(params)...), + grpc.ChainStreamInterceptor(params.TelemetryInterceptor.StreamIntercept), ) } + +func getUnaryInterceptors(params GrpcServerOptionsParams) []grpc.UnaryServerInterceptor { + interceptors := []grpc.UnaryServerInterceptor{ + rpc.ServiceErrorInterceptor, + grpc.UnaryServerInterceptor(params.TracingInterceptor), + metrics.NewServerMetricsContextInjectorInterceptor(), + metrics.NewServerMetricsTrailerPropagatorInterceptor(params.Logger), + params.TelemetryInterceptor.UnaryIntercept, + } + + interceptors = append(interceptors, params.AdditionalInterceptors...) + + return append( + interceptors, + params.RateLimitInterceptor.Intercept, + params.RetryableInterceptor.Intercept) +} diff -Nru temporal-1.21.5-1/src/service/history/api/activity_util.go temporal-1.22.5/src/service/history/api/activity_util.go --- temporal-1.21.5-1/src/service/history/api/activity_util.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/activity_util.go 2024-02-23 09:45:43.000000000 +0000 @@ -54,6 +54,7 @@ ctx, token.NamespaceId, token.WorkflowId, + workflow.LockPriorityHigh, ) if err != nil { return err diff -Nru temporal-1.21.5-1/src/service/history/api/consistency_checker.go temporal-1.22.5/src/service/history/api/consistency_checker.go --- temporal-1.21.5-1/src/service/history/api/consistency_checker.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/consistency_checker.go 2024-02-23 09:45:43.000000000 +0000 @@ -53,6 +53,7 @@ ctx context.Context, namespaceID string, workflowID string, + lockPriority workflow.LockPriority, ) (string, error) GetWorkflowContext( ctx context.Context, @@ -87,6 +88,7 @@ ctx context.Context, namespaceID string, workflowID string, + lockPriority workflow.LockPriority, ) (string, error) { // to achieve read after write consistency, // logic need to assert shard ownership *at most once* per read API call @@ -97,6 +99,7 @@ &shardOwnershipAsserted, namespaceID, workflowID, + lockPriority, ) if err != nil { return "", err @@ -259,6 +262,7 @@ shardOwnershipAsserted, namespaceID, workflowID, + lockPriority, ) if err != nil { return nil, err @@ -282,6 +286,7 @@ shardOwnershipAsserted, namespaceID, workflowID, + lockPriority, ) if err != nil { wfContext.GetReleaseFn()(err) @@ -300,7 +305,21 @@ shardOwnershipAsserted *bool, namespaceID string, workflowID string, -) (string, error) { + lockPriority workflow.LockPriority, +) (runID string, retErr error) { + if c.shardContext.GetConfig().EnableAPIGetCurrentRunIDLock() { + _, release, err := c.workflowCache.GetOrCreateCurrentWorkflowExecution( + ctx, + namespace.ID(namespaceID), + workflowID, + lockPriority, + ) + if err != nil { + return "", err + } + defer release(retErr) + } + resp, err := c.shardContext.GetCurrentExecution( ctx, &persistence.GetCurrentExecutionRequest{ diff -Nru temporal-1.21.5-1/src/service/history/api/consistency_checker_test.go temporal-1.22.5/src/service/history/api/consistency_checker_test.go --- temporal-1.21.5-1/src/service/history/api/consistency_checker_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/consistency_checker_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -43,7 +43,9 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/service/history/configs" "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tests" "go.temporal.io/server/service/history/workflow" wcache "go.temporal.io/server/service/history/workflow/cache" ) @@ -56,6 +58,7 @@ controller *gomock.Controller shardContext *shard.MockContext workflowCache *wcache.MockCache + config *configs.Config shardID int32 namespaceID string @@ -84,6 +87,7 @@ s.controller = gomock.NewController(s.T()) s.shardContext = shard.NewMockContext(s.controller) s.workflowCache = wcache.NewMockCache(s.controller) + s.config = tests.NewDynamicConfig() s.shardID = rand.Int31() s.namespaceID = uuid.New().String() @@ -91,6 +95,7 @@ s.currentRunID = uuid.New().String() s.shardContext.EXPECT().GetShardID().Return(s.shardID).AnyTimes() + s.shardContext.EXPECT().GetConfig().Return(s.config).AnyTimes() s.checker = NewWorkflowConsistencyChecker(s.shardContext, s.workflowCache) } @@ -269,6 +274,16 @@ ctx := context.Background() shardOwnershipAsserted := false + wfContext := workflow.NewMockContext(s.controller) + released := false + releaseFn := func(err error) { released = true } + + s.workflowCache.EXPECT().GetOrCreateCurrentWorkflowExecution( + ctx, + namespace.ID(s.namespaceID), + s.workflowID, + workflow.LockPriorityHigh, + ).Return(wfContext, releaseFn, nil) s.shardContext.EXPECT().GetCurrentExecution( ctx, &persistence.GetCurrentExecutionRequest{ @@ -278,15 +293,26 @@ }, ).Return(&persistence.GetCurrentExecutionResponse{RunID: s.currentRunID}, nil) - runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID) + runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID, workflow.LockPriorityHigh) s.NoError(err) s.Equal(s.currentRunID, runID) + s.True(released) } func (s *workflowConsistencyCheckerSuite) TestGetCurrentRunID_NotFound_OwnershipAsserted() { ctx := context.Background() shardOwnershipAsserted := false + wfContext := workflow.NewMockContext(s.controller) + released := false + releaseFn := func(err error) { released = true } + + s.workflowCache.EXPECT().GetOrCreateCurrentWorkflowExecution( + ctx, + namespace.ID(s.namespaceID), + s.workflowID, + workflow.LockPriorityHigh, + ).Return(wfContext, releaseFn, nil) s.shardContext.EXPECT().GetCurrentExecution( ctx, &persistence.GetCurrentExecutionRequest{ @@ -297,15 +323,26 @@ ).Return(nil, serviceerror.NewNotFound("")) s.shardContext.EXPECT().AssertOwnership(ctx).Return(nil) - runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID) + runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID, workflow.LockPriorityHigh) s.IsType(&serviceerror.NotFound{}, err) s.Empty(runID) + s.True(released) } func (s *workflowConsistencyCheckerSuite) TestGetCurrentRunID_NotFound_OwnershipLost() { ctx := context.Background() shardOwnershipAsserted := false + wfContext := workflow.NewMockContext(s.controller) + released := false + releaseFn := func(err error) { released = true } + + s.workflowCache.EXPECT().GetOrCreateCurrentWorkflowExecution( + ctx, + namespace.ID(s.namespaceID), + s.workflowID, + workflow.LockPriorityHigh, + ).Return(wfContext, releaseFn, nil) s.shardContext.EXPECT().GetCurrentExecution( ctx, &persistence.GetCurrentExecutionRequest{ @@ -316,15 +353,26 @@ ).Return(nil, serviceerror.NewNotFound("")) s.shardContext.EXPECT().AssertOwnership(ctx).Return(&persistence.ShardOwnershipLostError{}) - runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID) + runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID, workflow.LockPriorityHigh) s.IsType(&persistence.ShardOwnershipLostError{}, err) s.Empty(runID) + s.True(released) } func (s *workflowConsistencyCheckerSuite) TestGetCurrentRunID_Error() { ctx := context.Background() shardOwnershipAsserted := false + wfContext := workflow.NewMockContext(s.controller) + released := false + releaseFn := func(err error) { released = true } + + s.workflowCache.EXPECT().GetOrCreateCurrentWorkflowExecution( + ctx, + namespace.ID(s.namespaceID), + s.workflowID, + workflow.LockPriorityHigh, + ).Return(wfContext, releaseFn, nil) s.shardContext.EXPECT().GetCurrentExecution( ctx, &persistence.GetCurrentExecutionRequest{ @@ -334,9 +382,10 @@ }, ).Return(nil, serviceerror.NewUnavailable("")) - runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID) + runID, err := s.checker.getCurrentRunID(ctx, &shardOwnershipAsserted, s.namespaceID, s.workflowID, workflow.LockPriorityHigh) s.IsType(&serviceerror.Unavailable{}, err) s.Empty(runID) + s.True(released) } func (s *workflowConsistencyCheckerSuite) TestAssertShardOwnership_FirstTime() { diff -Nru temporal-1.21.5-1/src/service/history/api/describeworkflow/api.go temporal-1.22.5/src/service/history/api/describeworkflow/api.go --- temporal-1.21.5-1/src/service/history/api/describeworkflow/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/describeworkflow/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -46,6 +46,21 @@ "go.temporal.io/server/service/history/workflow" ) +func clonePayloadMap(source map[string]*commonpb.Payload) map[string]*commonpb.Payload { + target := make(map[string]*commonpb.Payload, len(source)) + for k, v := range source { + metadata := make(map[string][]byte, len(v.GetMetadata())) + for mk, mv := range v.GetMetadata() { + metadata[mk] = mv + } + target[k] = &commonpb.Payload{ + Metadata: metadata, + Data: v.GetData(), + } + } + return target +} + func Invoke( ctx context.Context, req *historyservice.DescribeWorkflowExecutionRequest, @@ -73,11 +88,29 @@ if err != nil { return nil, err } + // We release the lock on this workflow just before we return from this method, at which point mutable state might + // be mutated. Take extra care to clone all response methods as marshalling happens after we return and it is unsafe + // to mutate proto fields during marshalling. defer func() { weCtx.GetReleaseFn()(retError) }() mutableState := weCtx.GetMutableState() executionInfo := mutableState.GetExecutionInfo() executionState := mutableState.GetExecutionState() + + resetPoints := &workflowpb.ResetPoints{ + Points: make([]*workflowpb.ResetPointInfo, len(executionInfo.AutoResetPoints.GetPoints())), + } + for i, p := range executionInfo.AutoResetPoints.GetPoints() { + resetPoints.Points[i] = &workflowpb.ResetPointInfo{ + BinaryChecksum: p.BinaryChecksum, + RunId: p.RunId, + FirstWorkflowTaskCompletedId: p.FirstWorkflowTaskCompletedId, + CreateTime: p.CreateTime, + ExpireTime: p.ExpireTime, + Resettable: p.Resettable, + } + } + result := &historyservice.DescribeWorkflowExecutionResponse{ ExecutionConfig: &workflowpb.WorkflowExecutionConfig{ TaskQueue: &taskqueuepb.TaskQueue{ @@ -93,14 +126,13 @@ WorkflowId: executionInfo.WorkflowId, RunId: executionState.RunId, }, - Type: &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName}, - StartTime: executionInfo.StartTime, - Status: executionState.Status, - HistoryLength: mutableState.GetNextEventID() - common.FirstEventID, - ExecutionTime: executionInfo.ExecutionTime, - Memo: &commonpb.Memo{Fields: executionInfo.Memo}, - SearchAttributes: &commonpb.SearchAttributes{IndexedFields: executionInfo.SearchAttributes}, - AutoResetPoints: executionInfo.AutoResetPoints, + Type: &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName}, + StartTime: executionInfo.StartTime, + Status: executionState.Status, + HistoryLength: mutableState.GetNextEventID() - common.FirstEventID, + ExecutionTime: executionInfo.ExecutionTime, + // Memo and SearchAttributes are set below + AutoResetPoints: resetPoints, TaskQueue: executionInfo.TaskQueue, StateTransitionCount: executionInfo.StateTransitionCount, HistorySizeBytes: executionInfo.GetExecutionStats().GetHistorySize(), @@ -142,12 +174,10 @@ p.LastHeartbeatTime = ai.LastHeartbeatUpdateTime p.HeartbeatDetails = ai.LastHeartbeatDetails } - // TODO: move to mutable state instead of loading it from event - scheduledEvent, err := mutableState.GetActivityScheduledEvent(ctx, ai.ScheduledEventId) + p.ActivityType, err = mutableState.GetActivityType(ctx, ai) if err != nil { return nil, err } - p.ActivityType = scheduledEvent.GetActivityTaskScheduledEventAttributes().ActivityType if p.State == enumspb.PENDING_ACTIVITY_STATE_SCHEDULED { p.ScheduledTime = ai.ScheduledTime } else { @@ -210,8 +240,12 @@ ) return nil, serviceerror.NewInternal("Failed to fetch memo and search attributes") } - result.WorkflowExecutionInfo.Memo = relocatableAttributes.Memo - result.WorkflowExecutionInfo.SearchAttributes = relocatableAttributes.SearchAttributes + result.WorkflowExecutionInfo.Memo = &commonpb.Memo{ + Fields: clonePayloadMap(relocatableAttributes.Memo.GetFields()), + } + result.WorkflowExecutionInfo.SearchAttributes = &commonpb.SearchAttributes{ + IndexedFields: clonePayloadMap(relocatableAttributes.SearchAttributes.GetIndexedFields()), + } return result, nil } diff -Nru temporal-1.21.5-1/src/service/history/api/get_workflow_util.go temporal-1.22.5/src/service/history/api/get_workflow_util.go --- temporal-1.21.5-1/src/service/history/api/get_workflow_util.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/get_workflow_util.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,6 @@ package api import ( - "bytes" "context" "fmt" "time" @@ -38,6 +37,7 @@ "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence/versionhistory" serviceerrors "go.temporal.io/server/common/serviceerror" @@ -54,6 +54,7 @@ eventNotifier events.Notifier, ) (*historyservice.GetMutableStateResponse, error) { + logger := shard.GetLogger() namespaceID := namespace.ID(request.GetNamespaceId()) err := ValidateNamespaceUUID(namespaceID) if err != nil { @@ -65,6 +66,7 @@ ctx, request.NamespaceId, request.Execution.WorkflowId, + workflow.LockPriorityHigh, ) if err != nil { return nil, err @@ -79,14 +81,33 @@ if err != nil { return nil, err } - if request.CurrentBranchToken == nil { - request.CurrentBranchToken = response.CurrentBranchToken + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(response.GetVersionHistories()) + if err != nil { + return nil, err } - if !bytes.Equal(request.CurrentBranchToken, response.CurrentBranchToken) { + if request.GetVersionHistoryItem() == nil { + lastVersionHistoryItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, err + } + request.VersionHistoryItem = lastVersionHistoryItem + } + // Use the latest event id + event version as the branch identifier. This pair is unique across clusters. + // We return the full version histories. Callers need to fetch the last version history item from current branch + // and use the last version history item in following calls. + if !versionhistory.ContainsVersionHistoryItem(currentVersionHistory, request.VersionHistoryItem) { + logItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, err + } + logger.Warn("Request history branch and current history branch don't match", + tag.Value(logItem), + tag.TokenLastEventVersion(request.VersionHistoryItem.GetVersion()), + tag.TokenLastEventID(request.VersionHistoryItem.GetEventId())) return nil, serviceerrors.NewCurrentBranchChanged(response.CurrentBranchToken, request.CurrentBranchToken) } - // expectedNextEventID is 0 when caller want to get the current next event ID without blocking + // expectedNextEventID is 0 when caller want to get the current next event ID without blocking. expectedNextEventID := common.FirstEventID if request.ExpectedNextEventId != common.EmptyEventID { expectedNextEventID = request.GetExpectedNextEventId() @@ -105,8 +126,19 @@ if err != nil { return nil, err } - // check again if the current branch token changed - if !bytes.Equal(request.CurrentBranchToken, response.CurrentBranchToken) { + currentVersionHistory, err = versionhistory.GetCurrentVersionHistory(response.GetVersionHistories()) + if err != nil { + return nil, err + } + if !versionhistory.ContainsVersionHistoryItem(currentVersionHistory, request.VersionHistoryItem) { + logItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, err + } + logger.Warn("Request history branch and current history branch don't match prior to polling the mutable state", + tag.Value(logItem), + tag.TokenLastEventVersion(request.VersionHistoryItem.GetVersion()), + tag.TokenLastEventID(request.VersionHistoryItem.GetEventId())) return nil, serviceerrors.NewCurrentBranchChanged(response.CurrentBranchToken, request.CurrentBranchToken) } if expectedNextEventID < response.GetNextEventId() || response.GetWorkflowStatus() != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { @@ -131,8 +163,22 @@ // Note: Later events could modify response.WorkerVersionStamp and we won't // update it here. That's okay since this return value is only informative and isn't used for task dispatch. // For correctness we could pass it in the Notification event. - if !bytes.Equal(request.CurrentBranchToken, event.CurrentBranchToken) { - return nil, serviceerrors.NewCurrentBranchChanged(event.CurrentBranchToken, request.CurrentBranchToken) + latestVersionHistory, err := versionhistory.GetCurrentVersionHistory(event.VersionHistories) + if err != nil { + return nil, err + } + response.CurrentBranchToken = latestVersionHistory.GetBranchToken() + response.VersionHistories = event.VersionHistories + if !versionhistory.ContainsVersionHistoryItem(latestVersionHistory, request.VersionHistoryItem) { + logItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return nil, err + } + logger.Warn("Request history branch and current history branch don't match after polling the mutable state", + tag.Value(logItem), + tag.TokenLastEventVersion(request.VersionHistoryItem.GetVersion()), + tag.TokenLastEventID(request.VersionHistoryItem.GetEventId())) + return nil, serviceerrors.NewCurrentBranchChanged(response.CurrentBranchToken, request.CurrentBranchToken) } if expectedNextEventID < response.GetNextEventId() || response.GetWorkflowStatus() != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { return response, nil diff -Nru temporal-1.21.5-1/src/service/history/api/isactivitytaskvalid/api.go temporal-1.22.5/src/service/history/api/isactivitytaskvalid/api.go --- temporal-1.21.5-1/src/service/history/api/isactivitytaskvalid/api.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/isactivitytaskvalid/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,88 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package isactivitytaskvalid + +import ( + "context" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/shard" +) + +func Invoke( + ctx context.Context, + req *historyservice.IsActivityTaskValidRequest, + shardContext shard.Context, + workflowConsistencyChecker api.WorkflowConsistencyChecker, +) (resp *historyservice.IsActivityTaskValidResponse, retError error) { + isValid := false + err := api.GetAndUpdateWorkflowWithNew( + ctx, + req.Clock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + req.NamespaceId, + req.Execution.WorkflowId, + req.Execution.RunId, + ), + func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { + isTaskValid, err := isActivityTaskValid(workflowContext, req.ScheduledEventId) + if err != nil { + return nil, err + } + isValid = isTaskValid + return &api.UpdateWorkflowAction{ + Noop: true, + CreateWorkflowTask: false, + }, nil + }, + nil, + shardContext, + workflowConsistencyChecker, + ) + return &historyservice.IsActivityTaskValidResponse{ + IsValid: isValid, + }, err +} + +func isActivityTaskValid( + workflowContext api.WorkflowContext, + scheduledEventID int64, +) (bool, error) { + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() { + return false, consts.ErrWorkflowCompleted + } + + ai, ok := mutableState.GetActivityInfo(scheduledEventID) + if ok && ai.StartedEventId == common.EmptyEventID { + return true, nil + } + return false, nil +} diff -Nru temporal-1.21.5-1/src/service/history/api/isactivitytaskvalid/api_test.go temporal-1.22.5/src/service/history/api/isactivitytaskvalid/api_test.go --- temporal-1.21.5-1/src/service/history/api/isactivitytaskvalid/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/isactivitytaskvalid/api_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,118 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package isactivitytaskvalid + +import ( + "math/rand" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/workflow" +) + +type ( + apiSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + apiContext api.WorkflowContext + workflowContext *workflow.MockContext + mutableState *workflow.MockMutableState + } +) + +func TestAPISuite(t *testing.T) { + s := new(apiSuite) + suite.Run(t, s) +} + +func (s *apiSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.workflowContext = workflow.NewMockContext(s.controller) + s.mutableState = workflow.NewMockMutableState(s.controller) + s.apiContext = api.NewWorkflowContext( + s.workflowContext, + func(err error) {}, + s.mutableState, + ) +} + +func (s *apiSuite) TeardownTest() { + s.controller.Finish() +} + +func (s *apiSuite) TestWorkflowCompleted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false) + + _, err := isActivityTaskValid(s.apiContext, rand.Int63()) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *apiSuite) TestWorkflowRunning_ActivityTaskNotStarted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + activityScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetActivityInfo(activityScheduleEventID).Return(&persistencespb.ActivityInfo{ + ScheduledEventId: activityScheduleEventID, + StartedEventId: common.EmptyEventID, + }, true) + + valid, err := isActivityTaskValid(s.apiContext, activityScheduleEventID) + s.NoError(err) + s.True(valid) +} + +func (s *apiSuite) TestWorkflowRunning_ActivityTaskStarted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + activityScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetActivityInfo(activityScheduleEventID).Return(&persistencespb.ActivityInfo{ + ScheduledEventId: activityScheduleEventID, + StartedEventId: activityScheduleEventID + 1, + }, true) + + valid, err := isActivityTaskValid(s.apiContext, activityScheduleEventID) + s.NoError(err) + s.False(valid) +} + +func (s *apiSuite) TestWorkflowRunning_ActivityTaskMissing() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + activityScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetActivityInfo(activityScheduleEventID).Return(nil, false) + + valid, err := isActivityTaskValid(s.apiContext, activityScheduleEventID) + s.NoError(err) + s.False(valid) +} diff -Nru temporal-1.21.5-1/src/service/history/api/isworkflowtaskvalid/api.go temporal-1.22.5/src/service/history/api/isworkflowtaskvalid/api.go --- temporal-1.21.5-1/src/service/history/api/isworkflowtaskvalid/api.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/isworkflowtaskvalid/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,88 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package isworkflowtaskvalid + +import ( + "context" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/shard" +) + +func Invoke( + ctx context.Context, + req *historyservice.IsWorkflowTaskValidRequest, + shardContext shard.Context, + workflowConsistencyChecker api.WorkflowConsistencyChecker, +) (resp *historyservice.IsWorkflowTaskValidResponse, retError error) { + isValid := false + err := api.GetAndUpdateWorkflowWithNew( + ctx, + req.Clock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + req.NamespaceId, + req.Execution.WorkflowId, + req.Execution.RunId, + ), + func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { + isTaskValid, err := isWorkflowTaskValid(workflowContext, req.ScheduledEventId) + if err != nil { + return nil, err + } + isValid = isTaskValid + return &api.UpdateWorkflowAction{ + Noop: true, + CreateWorkflowTask: false, + }, nil + }, + nil, + shardContext, + workflowConsistencyChecker, + ) + return &historyservice.IsWorkflowTaskValidResponse{ + IsValid: isValid, + }, err +} + +func isWorkflowTaskValid( + workflowContext api.WorkflowContext, + scheduledEventID int64, +) (bool, error) { + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() { + return false, consts.ErrWorkflowCompleted + } + + workflowTask := mutableState.GetWorkflowTaskByID(scheduledEventID) + if workflowTask == nil { + return false, nil + } + return workflowTask.StartedEventID == common.EmptyEventID, nil +} diff -Nru temporal-1.21.5-1/src/service/history/api/isworkflowtaskvalid/api_test.go temporal-1.22.5/src/service/history/api/isworkflowtaskvalid/api_test.go --- temporal-1.21.5-1/src/service/history/api/isworkflowtaskvalid/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/isworkflowtaskvalid/api_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,117 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package isworkflowtaskvalid + +import ( + "math/rand" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/workflow" +) + +type ( + apiSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + apiContext api.WorkflowContext + workflowContext *workflow.MockContext + mutableState *workflow.MockMutableState + } +) + +func TestAPISuite(t *testing.T) { + s := new(apiSuite) + suite.Run(t, s) +} + +func (s *apiSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.workflowContext = workflow.NewMockContext(s.controller) + s.mutableState = workflow.NewMockMutableState(s.controller) + s.apiContext = api.NewWorkflowContext( + s.workflowContext, + func(err error) {}, + s.mutableState, + ) +} + +func (s *apiSuite) TeardownTest() { + s.controller.Finish() +} + +func (s *apiSuite) TestWorkflowCompleted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false) + + _, err := isWorkflowTaskValid(s.apiContext, rand.Int63()) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *apiSuite) TestWorkflowRunning_WorkflowTaskNotStarted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + workflowTaskScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetWorkflowTaskByID(workflowTaskScheduleEventID).Return(&workflow.WorkflowTaskInfo{ + ScheduledEventID: workflowTaskScheduleEventID, + StartedEventID: common.EmptyEventID, + }) + + valid, err := isWorkflowTaskValid(s.apiContext, workflowTaskScheduleEventID) + s.NoError(err) + s.True(valid) +} + +func (s *apiSuite) TestWorkflowRunning_WorkflowTaskStarted() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + workflowTaskScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetWorkflowTaskByID(workflowTaskScheduleEventID).Return(&workflow.WorkflowTaskInfo{ + ScheduledEventID: workflowTaskScheduleEventID, + StartedEventID: workflowTaskScheduleEventID + 10, + }) + + valid, err := isWorkflowTaskValid(s.apiContext, workflowTaskScheduleEventID) + s.NoError(err) + s.False(valid) +} + +func (s *apiSuite) TestWorkflowRunning_WorkflowTaskMissing() { + s.mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true) + workflowTaskScheduleEventID := rand.Int63() + s.mutableState.EXPECT().GetWorkflowTaskByID(workflowTaskScheduleEventID).Return(nil) + + valid, err := isWorkflowTaskValid(s.apiContext, workflowTaskScheduleEventID) + s.NoError(err) + s.False(valid) +} diff -Nru temporal-1.21.5-1/src/service/history/api/queryworkflow/api.go temporal-1.22.5/src/service/history/api/queryworkflow/api.go --- temporal-1.21.5-1/src/service/history/api/queryworkflow/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/queryworkflow/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,6 +34,7 @@ "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/worker_versioning" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" @@ -77,6 +78,7 @@ ctx, request.NamespaceId, request.Request.Execution.WorkflowId, + workflow.LockPriorityHigh, ) if err != nil { return nil, err @@ -251,7 +253,7 @@ metricsHandler.Timer(metrics.DirectQueryDispatchLatency.GetMetricName()).Record(time.Since(startTime)) }() - directive := common.MakeVersionDirectiveForWorkflowTask( + directive := worker_versioning.MakeDirectiveForWorkflowTask( msResp.GetWorkerVersionStamp(), msResp.GetPreviousStartedEventId(), ) diff -Nru temporal-1.21.5-1/src/service/history/api/reapplyevents/api.go temporal-1.22.5/src/service/history/api/reapplyevents/api.go --- temporal-1.21.5-1/src/service/history/api/reapplyevents/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/reapplyevents/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -177,7 +177,7 @@ }, nil } - _, err = eventsReapplier.ReapplyEvents( + reappliedEvents, err := eventsReapplier.ReapplyEvents( ctx, mutableState, toReapplyEvents, @@ -187,8 +187,14 @@ shard.GetLogger().Error("failed to re-apply stale events", tag.Error(err)) return nil, err } + if len(reappliedEvents) == 0 { + return &api.UpdateWorkflowAction{ + Noop: true, + CreateWorkflowTask: false, + }, nil + } return &api.UpdateWorkflowAction{ - Noop: true, + Noop: false, CreateWorkflowTask: false, }, nil }, diff -Nru temporal-1.21.5-1/src/service/history/api/recordactivitytaskheartbeat/api.go temporal-1.22.5/src/service/history/api/recordactivitytaskheartbeat/api.go --- temporal-1.21.5-1/src/service/history/api/recordactivitytaskheartbeat/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/recordactivitytaskheartbeat/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -92,8 +92,10 @@ return nil, consts.ErrStaleState } - if !isRunning || ai.StartedEventId == common.EmptyEventID || - (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) { + if !isRunning || + ai.StartedEventId == common.EmptyEventID || + (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) || + (token.GetVersion() != common.EmptyVersion && token.Version != ai.Version) { return nil, consts.ErrActivityTaskNotFound } diff -Nru temporal-1.21.5-1/src/service/history/api/recordactivitytaskstarted/api.go temporal-1.22.5/src/service/history/api/recordactivitytaskstarted/api.go --- temporal-1.21.5-1/src/service/history/api/recordactivitytaskstarted/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/recordactivitytaskstarted/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -135,6 +135,7 @@ response.StartedTime = ai.StartedTime response.Attempt = ai.Attempt response.HeartbeatDetails = ai.LastHeartbeatDetails + response.Version = ai.Version response.WorkflowType = mutableState.GetWorkflowType() response.WorkflowNamespace = namespace.String() diff -Nru temporal-1.21.5-1/src/service/history/api/recordchildworkflowcompleted/api.go temporal-1.22.5/src/service/history/api/recordchildworkflowcompleted/api.go --- temporal-1.21.5-1/src/service/history/api/recordchildworkflowcompleted/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/recordchildworkflowcompleted/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -77,8 +77,8 @@ }, definition.NewWorkflowKey( request.NamespaceId, - request.WorkflowExecution.WorkflowId, - request.WorkflowExecution.RunId, + request.GetParentExecution().WorkflowId, + request.GetParentExecution().RunId, ), func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { mutableState := workflowContext.GetMutableState() @@ -99,8 +99,8 @@ return nil, consts.ErrChildExecutionNotFound } - completedExecution := request.CompletedExecution - if ci.GetStartedWorkflowId() != completedExecution.GetWorkflowId() { + childExecution := request.GetChildExecution() + if ci.GetStartedWorkflowId() != childExecution.GetWorkflowId() { // this can only happen when we don't have the initiated version return nil, consts.ErrChildExecutionNotFound } @@ -109,21 +109,20 @@ switch completionEvent.GetEventType() { case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED: attributes := completionEvent.GetWorkflowExecutionCompletedEventAttributes() - _, err = mutableState.AddChildWorkflowExecutionCompletedEvent(parentInitiatedID, completedExecution, attributes) + _, err = mutableState.AddChildWorkflowExecutionCompletedEvent(parentInitiatedID, childExecution, attributes) case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED: attributes := completionEvent.GetWorkflowExecutionFailedEventAttributes() - _, err = mutableState.AddChildWorkflowExecutionFailedEvent(parentInitiatedID, completedExecution, attributes) + _, err = mutableState.AddChildWorkflowExecutionFailedEvent(parentInitiatedID, childExecution, attributes) case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED: attributes := completionEvent.GetWorkflowExecutionCanceledEventAttributes() - _, err = mutableState.AddChildWorkflowExecutionCanceledEvent(parentInitiatedID, completedExecution, attributes) + _, err = mutableState.AddChildWorkflowExecutionCanceledEvent(parentInitiatedID, childExecution, attributes) case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED: attributes := completionEvent.GetWorkflowExecutionTerminatedEventAttributes() - _, err = mutableState.AddChildWorkflowExecutionTerminatedEvent(parentInitiatedID, completedExecution, attributes) + _, err = mutableState.AddChildWorkflowExecutionTerminatedEvent(parentInitiatedID, childExecution, attributes) case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT: attributes := completionEvent.GetWorkflowExecutionTimedOutEventAttributes() - _, err = mutableState.AddChildWorkflowExecutionTimedOutEvent(parentInitiatedID, completedExecution, attributes) + _, err = mutableState.AddChildWorkflowExecutionTimedOutEvent(parentInitiatedID, childExecution, attributes) } - if err != nil { return nil, err } diff -Nru temporal-1.21.5-1/src/service/history/api/replication/generate_task.go temporal-1.22.5/src/service/history/api/replication/generate_task.go --- temporal-1.21.5-1/src/service/history/api/replication/generate_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/replication/generate_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,6 +31,7 @@ "go.temporal.io/server/common/definition" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/util" "go.temporal.io/server/service/history/api" "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" @@ -66,7 +67,7 @@ defer func() { wfContext.GetReleaseFn()(retError) }() mutableState := wfContext.GetMutableState() - task, stateTransitionCount, err := mutableState.GenerateMigrationTasks() + replicationTasks, stateTransitionCount, err := mutableState.GenerateMigrationTasks() if err != nil { return nil, err } @@ -78,13 +79,16 @@ WorkflowID: request.Execution.WorkflowId, RunID: request.Execution.RunId, Tasks: map[tasks.Category][]tasks.Task{ - tasks.CategoryReplication: {task}, + tasks.CategoryReplication: replicationTasks, }, }) if err != nil { return nil, err } + + historyLength := util.Max(mutableState.GetNextEventID()-1, 0) return &historyservice.GenerateLastHistoryReplicationTasksResponse{ StateTransitionCount: stateTransitionCount, + HistoryLength: historyLength, }, nil } diff -Nru temporal-1.21.5-1/src/service/history/api/resetworkflow/api.go temporal-1.22.5/src/service/history/api/resetworkflow/api.go --- temporal-1.21.5-1/src/service/history/api/resetworkflow/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/resetworkflow/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -85,6 +85,7 @@ ctx, namespaceID.String(), request.WorkflowExecution.GetWorkflowId(), + workflow.LockPriorityHigh, ) if err != nil { return nil, err diff -Nru temporal-1.21.5-1/src/service/history/api/respondactivitytaskcanceled/api.go temporal-1.22.5/src/service/history/api/respondactivitytaskcanceled/api.go --- temporal-1.21.5-1/src/service/history/api/respondactivitytaskcanceled/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/respondactivitytaskcanceled/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -97,8 +97,10 @@ return nil, consts.ErrStaleState } - if !isRunning || ai.StartedEventId == common.EmptyEventID || - (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) { + if !isRunning || + ai.StartedEventId == common.EmptyEventID || + (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) || + (token.GetVersion() != common.EmptyVersion && token.Version != ai.Version) { return nil, consts.ErrActivityTaskNotFound } diff -Nru temporal-1.21.5-1/src/service/history/api/respondactivitytaskcompleted/api.go temporal-1.22.5/src/service/history/api/respondactivitytaskcompleted/api.go --- temporal-1.21.5-1/src/service/history/api/respondactivitytaskcompleted/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/respondactivitytaskcompleted/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -96,8 +96,10 @@ return nil, consts.ErrStaleState } - if !isRunning || ai.StartedEventId == common.EmptyEventID || - (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) { + if !isRunning || + ai.StartedEventId == common.EmptyEventID || + (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) || + (token.GetVersion() != common.EmptyVersion && token.Version != ai.Version) { return nil, consts.ErrActivityTaskNotFound } diff -Nru temporal-1.21.5-1/src/service/history/api/respondactivitytaskfailed/api.go temporal-1.22.5/src/service/history/api/respondactivitytaskfailed/api.go --- temporal-1.21.5-1/src/service/history/api/respondactivitytaskfailed/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/respondactivitytaskfailed/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -100,8 +100,10 @@ return nil, consts.ErrStaleState } - if !isRunning || ai.StartedEventId == common.EmptyEventID || - (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) { + if !isRunning || + ai.StartedEventId == common.EmptyEventID || + (token.GetScheduledEventId() != common.EmptyEventID && token.Attempt != ai.Attempt) || + (token.GetVersion() != common.EmptyVersion && token.Version != ai.Version) { return nil, consts.ErrActivityTaskNotFound } diff -Nru temporal-1.21.5-1/src/service/history/api/startworkflow/api.go temporal-1.22.5/src/service/history/api/startworkflow/api.go --- temporal-1.21.5-1/src/service/history/api/startworkflow/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/startworkflow/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,7 +35,7 @@ "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/api/historyservice/v1" - tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common/tasktoken" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" @@ -180,9 +180,11 @@ return s.generateResponse(creationParams.runID, creationParams.workflowTaskInfo, extractHistoryEvents(creationParams.workflowEventBatches)) } var currentWorkflowConditionFailedError *persistence.CurrentWorkflowConditionFailedError - if !errors.As(err, ¤tWorkflowConditionFailedError) { + if !errors.As(err, ¤tWorkflowConditionFailedError) || + len(currentWorkflowConditionFailedError.RunID) == 0 { return nil, err } + // The history and mutable state we generated above should be deleted by a background process. return s.handleConflict(ctx, creationParams, currentWorkflowConditionFailedError) } @@ -342,6 +344,8 @@ } var mutableStateInfo *mutableStateInfo // update prev execution and create new execution in one transaction + // we already validated that currentWorkflowConditionFailed.RunID is not empty, + // so the following update won't try to lock current execution again. err = api.GetAndUpdateWorkflowWithNew( ctx, nil, @@ -536,16 +540,18 @@ if err != nil { return nil, err } - taskToken := &tokenspb.Task{ - NamespaceId: s.namespace.ID().String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: workflowTaskInfo.ScheduledEventID, - StartedEventId: workflowTaskInfo.StartedEventID, - StartedTime: workflowTaskInfo.StartedTime, - Attempt: workflowTaskInfo.Attempt, - Clock: clock, - } + + taskToken := tasktoken.NewWorkflowTaskToken( + s.namespace.ID().String(), + workflowID, + runID, + workflowTaskInfo.ScheduledEventID, + workflowTaskInfo.StartedEventID, + workflowTaskInfo.StartedTime, + workflowTaskInfo.Attempt, + clock, + workflowTaskInfo.Version, + ) serializedToken, err := tokenSerializer.Serialize(taskToken) if err != nil { return nil, err diff -Nru temporal-1.21.5-1/src/service/history/api/updateworkflow/api.go temporal-1.22.5/src/service/history/api/updateworkflow/api.go --- temporal-1.21.5-1/src/service/history/api/updateworkflow/api.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/api/updateworkflow/api.go 2024-02-23 09:45:43.000000000 +0000 @@ -46,6 +46,7 @@ "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/namespace" serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/worker_versioning" "go.temporal.io/server/internal/effect" "go.temporal.io/server/service/history/api" "go.temporal.io/server/service/history/consts" @@ -193,7 +194,7 @@ } taskQueue = *newWorkflowTask.TaskQueue normalTaskQueueName = ms.GetExecutionInfo().TaskQueue - directive = common.MakeVersionDirectiveForWorkflowTask( + directive = worker_versioning.MakeDirectiveForWorkflowTask( ms.GetWorkerVersionStamp(), ms.GetLastWorkflowTaskStartedEventID(), ) diff -Nru temporal-1.21.5-1/src/service/history/archival_queue_factory.go temporal-1.22.5/src/service/history/archival_queue_factory.go --- temporal-1.21.5-1/src/service/history/archival_queue_factory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/archival_queue_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -130,6 +130,9 @@ workflowCache wcache.Cache, ) queues.Queue { executor := f.newArchivalTaskExecutor(shard, workflowCache) + if f.ExecutorWrapper != nil { + executor = f.ExecutorWrapper.Wrap(executor) + } return f.newScheduledQueue(shard, executor) } diff -Nru temporal-1.21.5-1/src/service/history/archival_queue_factory_test.go temporal-1.22.5/src/service/history/archival_queue_factory_test.go --- temporal-1.21.5-1/src/service/history/archival_queue_factory_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/archival_queue_factory_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,11 +30,11 @@ "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.temporal.io/server/common/clock" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" @@ -73,7 +73,7 @@ queueFactory := NewArchivalQueueFactory(ArchivalQueueFactoryParams{ QueueFactoryBaseParams: QueueFactoryBaseParams{ Config: tests.NewDynamicConfig(), - TimeSource: namespace.NewMockClock(ctrl), + TimeSource: clock.NewEventTimeSource(), MetricsHandler: metricsHandler, Logger: log.NewNoopLogger(), }, diff -Nru temporal-1.21.5-1/src/service/history/archival_queue_task_executor_test.go temporal-1.22.5/src/service/history/archival_queue_task_executor_test.go --- temporal-1.21.5-1/src/service/history/archival_queue_task_executor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/archival_queue_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -520,6 +520,7 @@ mockMetadata, nil, metrics.NoopMetricsHandler, + func() bool { return false }, ) err := executable.Execute() if len(p.ExpectedErrorSubstrings) > 0 { diff -Nru temporal-1.21.5-1/src/service/history/commandChecker.go temporal-1.22.5/src/service/history/commandChecker.go --- temporal-1.21.5-1/src/service/history/commandChecker.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/commandChecker.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,935 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "fmt" - "strings" - "time" - - "github.com/pborman/uuid" - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/workflow" -) - -type ( - commandAttrValidator struct { - namespaceRegistry namespace.Registry - config *configs.Config - maxIDLengthLimit int - searchAttributesValidator *searchattribute.Validator - getDefaultActivityRetrySettings dynamicconfig.MapPropertyFnWithNamespaceFilter - getDefaultWorkflowRetrySettings dynamicconfig.MapPropertyFnWithNamespaceFilter - enableCrossNamespaceCommands dynamicconfig.BoolPropertyFn - } - - workflowSizeLimits struct { - blobSizeLimitWarn int - blobSizeLimitError int - memoSizeLimitWarn int - memoSizeLimitError int - numPendingChildExecutionsLimit int - numPendingActivitiesLimit int - numPendingSignalsLimit int - numPendingCancelsRequestLimit int - } - - workflowSizeChecker struct { - workflowSizeLimits - - mutableState workflow.MutableState - searchAttributesValidator *searchattribute.Validator - metricsHandler metrics.Handler - logger log.Logger - } -) - -const ( - reservedTaskQueuePrefix = "/_sys/" -) - -func newCommandAttrValidator( - namespaceRegistry namespace.Registry, - config *configs.Config, - searchAttributesValidator *searchattribute.Validator, -) *commandAttrValidator { - return &commandAttrValidator{ - namespaceRegistry: namespaceRegistry, - config: config, - maxIDLengthLimit: config.MaxIDLengthLimit(), - searchAttributesValidator: searchAttributesValidator, - getDefaultActivityRetrySettings: config.DefaultActivityRetryPolicy, - getDefaultWorkflowRetrySettings: config.DefaultWorkflowRetryPolicy, - enableCrossNamespaceCommands: config.EnableCrossNamespaceCommands, - } -} - -func newWorkflowSizeChecker( - limits workflowSizeLimits, - mutableState workflow.MutableState, - searchAttributesValidator *searchattribute.Validator, - metricsHandler metrics.Handler, - logger log.Logger, -) *workflowSizeChecker { - return &workflowSizeChecker{ - workflowSizeLimits: limits, - mutableState: mutableState, - searchAttributesValidator: searchAttributesValidator, - metricsHandler: metricsHandler, - logger: logger, - } -} - -func (c *workflowSizeChecker) checkIfPayloadSizeExceedsLimit( - commandTypeTag metrics.Tag, - payloadSize int, - message string, -) error { - - executionInfo := c.mutableState.GetExecutionInfo() - executionState := c.mutableState.GetExecutionState() - err := common.CheckEventBlobSizeLimit( - payloadSize, - c.blobSizeLimitWarn, - c.blobSizeLimitError, - executionInfo.NamespaceId, - executionInfo.WorkflowId, - executionState.RunId, - c.metricsHandler.WithTags(commandTypeTag), - c.logger, - tag.BlobSizeViolationOperation(commandTypeTag.Value()), - ) - if err != nil { - return fmt.Errorf(message) - } - return nil -} - -func (c *workflowSizeChecker) checkIfMemoSizeExceedsLimit( - memo *commonpb.Memo, - commandTypeTag metrics.Tag, - message string, -) error { - c.metricsHandler.Histogram(metrics.MemoSize.GetMetricName(), metrics.MemoSize.GetMetricUnit()).Record( - int64(memo.Size()), - commandTypeTag) - - executionInfo := c.mutableState.GetExecutionInfo() - executionState := c.mutableState.GetExecutionState() - err := common.CheckEventBlobSizeLimit( - memo.Size(), - c.memoSizeLimitWarn, - c.memoSizeLimitError, - executionInfo.NamespaceId, - executionInfo.WorkflowId, - executionState.RunId, - c.metricsHandler.WithTags(commandTypeTag), - c.logger, - tag.BlobSizeViolationOperation(commandTypeTag.Value()), - ) - if err != nil { - return fmt.Errorf(message) - } - return nil -} - -func withinLimit(value int, limit int) bool { - if limit <= 0 { - // limit not defined - return true - } - return value < limit -} - -func (c *workflowSizeChecker) checkCountConstraint( - numPending int, - errLimit int, - metricName string, - resourceName string, -) error { - key := c.mutableState.GetWorkflowKey() - logger := log.With( - c.logger, - tag.WorkflowNamespaceID(key.NamespaceID), - tag.WorkflowID(key.WorkflowID), - tag.WorkflowRunID(key.RunID), - ) - - if withinLimit(numPending, errLimit) { - return nil - } - c.metricsHandler.Counter(metricName).Record(1) - err := fmt.Errorf( - "the number of %s, %d, has reached the per-workflow limit of %d", - resourceName, - numPending, - errLimit, - ) - logger.Error(err.Error(), tag.Error(err)) - return err -} - -const ( - PendingChildWorkflowExecutionsDescription = "pending child workflow executions" - PendingActivitiesDescription = "pending activities" - PendingCancelRequestsDescription = "pending requests to cancel external workflows" - PendingSignalsDescription = "pending signals to external workflows" -) - -func (c *workflowSizeChecker) checkIfNumChildWorkflowsExceedsLimit() error { - return c.checkCountConstraint( - len(c.mutableState.GetPendingChildExecutionInfos()), - c.numPendingChildExecutionsLimit, - metrics.TooManyPendingChildWorkflows.GetMetricName(), - PendingChildWorkflowExecutionsDescription, - ) -} - -func (c *workflowSizeChecker) checkIfNumPendingActivitiesExceedsLimit() error { - return c.checkCountConstraint( - len(c.mutableState.GetPendingActivityInfos()), - c.numPendingActivitiesLimit, - metrics.TooManyPendingActivities.GetMetricName(), - PendingActivitiesDescription, - ) -} - -func (c *workflowSizeChecker) checkIfNumPendingCancelRequestsExceedsLimit() error { - return c.checkCountConstraint( - len(c.mutableState.GetPendingRequestCancelExternalInfos()), - c.numPendingCancelsRequestLimit, - metrics.TooManyPendingCancelRequests.GetMetricName(), - PendingCancelRequestsDescription, - ) -} - -func (c *workflowSizeChecker) checkIfNumPendingSignalsExceedsLimit() error { - return c.checkCountConstraint( - len(c.mutableState.GetPendingSignalExternalInfos()), - c.numPendingSignalsLimit, - metrics.TooManyPendingSignalsToExternalWorkflows.GetMetricName(), - PendingSignalsDescription, - ) -} - -func (c *workflowSizeChecker) checkIfSearchAttributesSizeExceedsLimit( - searchAttributes *commonpb.SearchAttributes, - namespace namespace.Name, - commandTypeTag metrics.Tag, -) error { - c.metricsHandler.Histogram(metrics.SearchAttributesSize.GetMetricName(), metrics.SearchAttributesSize.GetMetricUnit()).Record( - int64(searchAttributes.Size()), - commandTypeTag) - err := c.searchAttributesValidator.ValidateSize(searchAttributes, namespace.String()) - if err != nil { - c.logger.Warn( - "Search attributes size exceeds limits. Fail workflow.", - tag.Error(err), - tag.WorkflowNamespace(namespace.String()), - ) - } - return err -} - -func (v *commandAttrValidator) validateProtocolMessageAttributes( - namespaceID namespace.ID, - attributes *commandpb.ProtocolMessageCommandAttributes, - runTimeout time.Duration, -) (enumspb.WorkflowTaskFailedCause, error) { - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE - - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("ProtocolMessageCommandAttributes is not set on command.") - } - - if attributes.MessageId == "" { - return failedCause, serviceerror.NewInvalidArgument("MessageID is not set on command.") - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateActivityScheduleAttributes( - namespaceID namespace.ID, - attributes *commandpb.ScheduleActivityTaskCommandAttributes, - runTimeout time.Duration, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES - - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("ScheduleActivityTaskCommandAttributes is not set on command.") - } - - defaultTaskQueueName := "" - if _, err := v.validateTaskQueue(attributes.TaskQueue, defaultTaskQueueName); err != nil { - return failedCause, err - } - - if attributes.GetActivityId() == "" { - return failedCause, serviceerror.NewInvalidArgument("ActivityId is not set on command.") - } - - if attributes.ActivityType == nil || attributes.ActivityType.GetName() == "" { - return failedCause, serviceerror.NewInvalidArgument("ActivityType is not set on command.") - } - - if err := v.validateActivityRetryPolicy(namespaceID, attributes); err != nil { - return failedCause, err - } - - if len(attributes.GetActivityId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("ActivityID exceeds length limit.") - } - - if len(attributes.GetActivityType().GetName()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("ActivityType exceeds length limit.") - } - - // Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative. - if timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) < 0 || timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) < 0 || - timestamp.DurationValue(attributes.GetStartToCloseTimeout()) < 0 || timestamp.DurationValue(attributes.GetHeartbeatTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("A valid timeout may not be negative.") - } - - validScheduleToClose := timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) > 0 - validScheduleToStart := timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) > 0 - validStartToClose := timestamp.DurationValue(attributes.GetStartToCloseTimeout()) > 0 - - if validScheduleToClose { - if validScheduleToStart { - attributes.ScheduleToStartTimeout = timestamp.MinDurationPtr(attributes.GetScheduleToStartTimeout(), - attributes.GetScheduleToCloseTimeout()) - } else { - attributes.ScheduleToStartTimeout = attributes.GetScheduleToCloseTimeout() - } - if validStartToClose { - attributes.StartToCloseTimeout = timestamp.MinDurationPtr(attributes.GetStartToCloseTimeout(), - attributes.GetScheduleToCloseTimeout()) - } else { - attributes.StartToCloseTimeout = attributes.GetScheduleToCloseTimeout() - } - } else if validStartToClose { - // We are in !validScheduleToClose due to the first if above - attributes.ScheduleToCloseTimeout = &runTimeout - if !validScheduleToStart { - attributes.ScheduleToStartTimeout = &runTimeout - } - } else { - // Deduction failed as there's not enough information to fill in missing timeouts. - return failedCause, serviceerror.NewInvalidArgument("A valid StartToClose or ScheduleToCloseTimeout is not set on command.") - } - // ensure activity timeout never larger than workflow timeout - if runTimeout > 0 { - if timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) > runTimeout { - attributes.ScheduleToCloseTimeout = &runTimeout - } - if timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) > runTimeout { - attributes.ScheduleToStartTimeout = &runTimeout - } - if timestamp.DurationValue(attributes.GetStartToCloseTimeout()) > runTimeout { - attributes.StartToCloseTimeout = &runTimeout - } - if timestamp.DurationValue(attributes.GetHeartbeatTimeout()) > runTimeout { - attributes.HeartbeatTimeout = &runTimeout - } - } - attributes.HeartbeatTimeout = timestamp.MinDurationPtr(attributes.GetHeartbeatTimeout(), attributes.GetStartToCloseTimeout()) - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateTimerScheduleAttributes( - attributes *commandpb.StartTimerCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("StartTimerCommandAttributes is not set on command.") - } - if attributes.GetTimerId() == "" { - return failedCause, serviceerror.NewInvalidArgument("TimerId is not set on command.") - } - if len(attributes.GetTimerId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("TimerId exceeds length limit.") - } - if timestamp.DurationValue(attributes.GetStartToFireTimeout()) <= 0 { - return failedCause, serviceerror.NewInvalidArgument("A valid StartToFireTimeout is not set on command.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateActivityCancelAttributes( - attributes *commandpb.RequestCancelActivityTaskCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("RequestCancelActivityTaskCommandAttributes is not set on command.") - } - if attributes.GetScheduledEventId() <= 0 { - return failedCause, serviceerror.NewInvalidArgument("ScheduledEventId is not set on command.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateTimerCancelAttributes( - attributes *commandpb.CancelTimerCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("CancelTimerCommandAttributes is not set on command.") - } - if attributes.GetTimerId() == "" { - return failedCause, serviceerror.NewInvalidArgument("TimerId is not set on command.") - } - if len(attributes.GetTimerId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("TimerId exceeds length limit.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateRecordMarkerAttributes( - attributes *commandpb.RecordMarkerCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("RecordMarkerCommandAttributes is not set on command.") - } - if attributes.GetMarkerName() == "" { - return failedCause, serviceerror.NewInvalidArgument("MarkerName is not set on command.") - } - if len(attributes.GetMarkerName()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("MarkerName exceeds length limit.") - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateCompleteWorkflowExecutionAttributes( - attributes *commandpb.CompleteWorkflowExecutionCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("CompleteWorkflowExecutionCommandAttributes is not set on command.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateFailWorkflowExecutionAttributes( - attributes *commandpb.FailWorkflowExecutionCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("FailWorkflowExecutionCommandAttributes is not set on command.") - } - if attributes.GetFailure() == nil { - return failedCause, serviceerror.NewInvalidArgument("Failure is not set on command.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateCancelWorkflowExecutionAttributes( - attributes *commandpb.CancelWorkflowExecutionCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("CancelWorkflowExecutionCommandAttributes is not set on command.") - } - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateCancelExternalWorkflowExecutionAttributes( - namespaceID namespace.ID, - targetNamespaceID namespace.ID, - initiatedChildExecutionsInSession map[string]struct{}, - attributes *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES - if err := v.validateCrossNamespaceCall( - namespaceID, - targetNamespaceID, - ); err != nil { - return failedCause, err - } - - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("RequestCancelExternalWorkflowExecutionCommandAttributes is not set on command.") - } - if attributes.GetWorkflowId() == "" { - return failedCause, serviceerror.NewInvalidArgument("WorkflowId is not set on command.") - } - if len(attributes.GetNamespace()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") - } - if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") - } - runID := attributes.GetRunId() - if runID != "" && uuid.Parse(runID) == nil { - return failedCause, serviceerror.NewInvalidArgument("Invalid RunId set on command.") - } - if _, ok := initiatedChildExecutionsInSession[attributes.GetWorkflowId()]; ok { - return failedCause, serviceerror.NewInvalidArgument("Start and RequestCancel for child workflow is not allowed in same workflow task.") - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateSignalExternalWorkflowExecutionAttributes( - namespaceID namespace.ID, - targetNamespaceID namespace.ID, - attributes *commandpb.SignalExternalWorkflowExecutionCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES - if err := v.validateCrossNamespaceCall( - namespaceID, - targetNamespaceID, - ); err != nil { - return failedCause, err - } - - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("SignalExternalWorkflowExecutionCommandAttributes is not set on command.") - } - if attributes.Execution == nil { - return failedCause, serviceerror.NewInvalidArgument("Execution is nil on command.") - } - if attributes.Execution.GetWorkflowId() == "" { - return failedCause, serviceerror.NewInvalidArgument("WorkflowId is not set on command.") - } - if len(attributes.GetNamespace()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") - } - if len(attributes.Execution.GetWorkflowId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") - } - - targetRunID := attributes.Execution.GetRunId() - if targetRunID != "" && uuid.Parse(targetRunID) == nil { - return failedCause, serviceerror.NewInvalidArgument("Invalid RunId set on command.") - } - if attributes.GetSignalName() == "" { - return failedCause, serviceerror.NewInvalidArgument("SignalName is not set on command.") - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateUpsertWorkflowSearchAttributes( - namespace namespace.Name, - attributes *commandpb.UpsertWorkflowSearchAttributesCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("UpsertWorkflowSearchAttributesCommandAttributes is not set on command.") - } - if attributes.SearchAttributes == nil { - return failedCause, serviceerror.NewInvalidArgument("SearchAttributes is not set on command.") - } - if len(attributes.GetSearchAttributes().GetIndexedFields()) == 0 { - return failedCause, serviceerror.NewInvalidArgument("IndexedFields is empty on command.") - } - if err := v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), namespace.String()); err != nil { - return failedCause, err - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateModifyWorkflowProperties( - namespace namespace.Name, - attributes *commandpb.ModifyWorkflowPropertiesCommandAttributes, -) (enumspb.WorkflowTaskFailedCause, error) { - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument( - "ModifyWorkflowPropertiesCommandAttributes is not set on command.", - ) - } - - // check at least one attribute is not nil - if attributes.UpsertedMemo == nil { - return failedCause, serviceerror.NewInvalidArgument( - "ModifyWorkflowPropertiesCommandAttributes attributes are all nil.", - ) - } - - // check if UpsertedMemo is not nil, then it's not an empty map - if attributes.UpsertedMemo != nil && len(attributes.GetUpsertedMemo().GetFields()) == 0 { - return failedCause, serviceerror.NewInvalidArgument("UpsertedMemo.Fields is empty on command.") - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateContinueAsNewWorkflowExecutionAttributes( - namespace namespace.Name, - attributes *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, - executionInfo *persistencespb.WorkflowExecutionInfo, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("ContinueAsNewWorkflowExecutionCommandAttributes is not set on command.") - } - - // Inherit workflow type from previous execution if not provided on command - if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" { - attributes.WorkflowType = &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName} - } - - if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.") - } - - // Inherit task queue from previous execution if not provided on command - taskQueue, err := v.validateTaskQueue(attributes.TaskQueue, executionInfo.TaskQueue) - if err != nil { - return failedCause, err - } - attributes.TaskQueue = taskQueue - - if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowRunTimeout.") - } - - if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowTaskTimeout.") - } - - if timestamp.DurationValue(attributes.GetBackoffStartInterval()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid BackoffStartInterval.") - } - - if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) == 0 { - attributes.WorkflowRunTimeout = timestamp.DurationPtr(timestamp.DurationValue(executionInfo.WorkflowRunTimeout)) - } - - if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) == 0 { - attributes.WorkflowTaskTimeout = timestamp.DurationPtr(timestamp.DurationValue(executionInfo.DefaultWorkflowTaskTimeout)) - } - - attributes.WorkflowRunTimeout = timestamp.DurationPtr( - common.OverrideWorkflowRunTimeout( - timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), - timestamp.DurationValue(executionInfo.GetWorkflowExecutionTimeout()), - ), - ) - - attributes.WorkflowTaskTimeout = timestamp.DurationPtr( - common.OverrideWorkflowTaskTimeout( - namespace.String(), - timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()), - timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), - v.config.DefaultWorkflowTaskTimeout, - ), - ) - - if err := v.validateWorkflowRetryPolicy(namespace, attributes.RetryPolicy); err != nil { - return failedCause, err - } - - if err = v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), namespace.String()); err != nil { - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err - } - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateStartChildExecutionAttributes( - namespaceID namespace.ID, - targetNamespaceID namespace.ID, - targetNamespace namespace.Name, - attributes *commandpb.StartChildWorkflowExecutionCommandAttributes, - parentInfo *persistencespb.WorkflowExecutionInfo, - defaultWorkflowTaskTimeoutFn dynamicconfig.DurationPropertyFnWithNamespaceFilter, -) (enumspb.WorkflowTaskFailedCause, error) { - - const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES - if err := v.validateCrossNamespaceCall( - namespaceID, - targetNamespaceID, - ); err != nil { - return failedCause, err - } - - if attributes == nil { - return failedCause, serviceerror.NewInvalidArgument("StartChildWorkflowExecutionCommandAttributes is not set on command.") - } - - if attributes.GetWorkflowId() == "" { - return failedCause, serviceerror.NewInvalidArgument("Required field WorkflowId is not set on command.") - } - - if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" { - return failedCause, serviceerror.NewInvalidArgument("Required field WorkflowType is not set on command.") - } - - if len(attributes.GetNamespace()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") - } - - if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") - } - - if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.") - } - - if timestamp.DurationValue(attributes.GetWorkflowExecutionTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowExecutionTimeout.") - } - - if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowRunTimeout.") - } - - if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) < 0 { - return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowTaskTimeout.") - } - - if err := v.validateWorkflowRetryPolicy(namespace.Name(attributes.GetNamespace()), attributes.RetryPolicy); err != nil { - return failedCause, err - } - - if err := backoff.ValidateSchedule(attributes.GetCronSchedule()); err != nil { - return failedCause, err - } - - if err := v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), targetNamespace.String()); err != nil { - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err - } - - // Inherit taskqueue from parent workflow execution if not provided on command - taskQueue, err := v.validateTaskQueue(attributes.TaskQueue, parentInfo.TaskQueue) - if err != nil { - return failedCause, err - } - attributes.TaskQueue = taskQueue - - // workflow execution timeout is left as is - // if workflow execution timeout == 0 -> infinity - - attributes.WorkflowRunTimeout = timestamp.DurationPtr( - common.OverrideWorkflowRunTimeout( - timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), - timestamp.DurationValue(attributes.GetWorkflowExecutionTimeout()), - ), - ) - - attributes.WorkflowTaskTimeout = timestamp.DurationPtr( - common.OverrideWorkflowTaskTimeout( - targetNamespace.String(), - timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()), - timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), - defaultWorkflowTaskTimeoutFn, - ), - ) - - return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil -} - -func (v *commandAttrValidator) validateTaskQueue( - taskQueue *taskqueuepb.TaskQueue, - defaultVal string, -) (*taskqueuepb.TaskQueue, error) { - - if taskQueue == nil { - taskQueue = &taskqueuepb.TaskQueue{ - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - } - - if taskQueue.GetName() == "" { - if defaultVal == "" { - return taskQueue, serviceerror.NewInvalidArgument("missing task queue name") - } - taskQueue.Name = defaultVal - return taskQueue, nil - } - - name := taskQueue.GetName() - if len(name) > v.maxIDLengthLimit { - return taskQueue, serviceerror.NewInvalidArgument(fmt.Sprintf("task queue name exceeds length limit of %v", v.maxIDLengthLimit)) - } - - if strings.HasPrefix(name, reservedTaskQueuePrefix) { - return taskQueue, serviceerror.NewInvalidArgument(fmt.Sprintf("task queue name cannot start with reserved prefix %v", reservedTaskQueuePrefix)) - } - - return taskQueue, nil -} - -func (v *commandAttrValidator) validateActivityRetryPolicy( - namespaceID namespace.ID, - attributes *commandpb.ScheduleActivityTaskCommandAttributes, -) error { - if attributes.RetryPolicy == nil { - attributes.RetryPolicy = &commonpb.RetryPolicy{} - } - - defaultActivityRetrySettings := common.FromConfigToDefaultRetrySettings(v.getDefaultActivityRetrySettings(namespaceID.String())) - common.EnsureRetryPolicyDefaults(attributes.RetryPolicy, defaultActivityRetrySettings) - return common.ValidateRetryPolicy(attributes.RetryPolicy) -} - -func (v *commandAttrValidator) validateWorkflowRetryPolicy( - namespaceName namespace.Name, - retryPolicy *commonpb.RetryPolicy, -) error { - if retryPolicy == nil { - // By default, if the user does not explicitly set a retry policy for a Child Workflow, do not perform any retries. - return nil - } - - // Otherwise, for any unset fields on the retry policy, set with defaults - defaultWorkflowRetrySettings := common.FromConfigToDefaultRetrySettings(v.getDefaultWorkflowRetrySettings(namespaceName.String())) - common.EnsureRetryPolicyDefaults(retryPolicy, defaultWorkflowRetrySettings) - return common.ValidateRetryPolicy(retryPolicy) -} - -func (v *commandAttrValidator) validateCrossNamespaceCall( - namespaceID namespace.ID, - targetNamespaceID namespace.ID, -) error { - - // same name, no check needed - if namespaceID == targetNamespaceID { - return nil - } - - if !v.enableCrossNamespaceCommands() { - return serviceerror.NewInvalidArgument("cross namespace commands are not allowed") - } - - namespaceEntry, err := v.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return err - } - - targetNamespaceEntry, err := v.namespaceRegistry.GetNamespaceByID(targetNamespaceID) - if err != nil { - return err - } - - // both local namespace - if !namespaceEntry.IsGlobalNamespace() && !targetNamespaceEntry.IsGlobalNamespace() { - return nil - } - - namespaceClusters := namespaceEntry.ClusterNames() - targetNamespaceClusters := targetNamespaceEntry.ClusterNames() - - // one is local namespace, another one is global namespace or both global namespace - // treat global namespace with one replication cluster as local namespace - if len(namespaceClusters) == 1 && len(targetNamespaceClusters) == 1 { - if namespaceClusters[0] == targetNamespaceClusters[0] { - return nil - } - return v.createCrossNamespaceCallError(namespaceEntry, targetNamespaceEntry) - } - return v.createCrossNamespaceCallError(namespaceEntry, targetNamespaceEntry) -} - -func (v *commandAttrValidator) createCrossNamespaceCallError( - namespaceEntry *namespace.Namespace, - targetNamespaceEntry *namespace.Namespace, -) error { - return serviceerror.NewInvalidArgument(fmt.Sprintf("unable to process cross namespace command between %v and %v", namespaceEntry.Name(), targetNamespaceEntry.Name())) -} - -func (v *commandAttrValidator) validateCommandSequence( - commands []*commandpb.Command, -) error { - closeCommand := enumspb.COMMAND_TYPE_UNSPECIFIED - - for _, command := range commands { - if closeCommand != enumspb.COMMAND_TYPE_UNSPECIFIED { - return serviceerror.NewInvalidArgument(fmt.Sprintf( - "invalid command sequence: [%v], command %s must be the last command.", - strings.Join(v.commandTypes(commands), ", "), closeCommand.String(), - )) - } - - switch command.GetCommandType() { - case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - enumspb.COMMAND_TYPE_START_TIMER, - enumspb.COMMAND_TYPE_CANCEL_TIMER, - enumspb.COMMAND_TYPE_RECORD_MARKER, - enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES, - enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES, - enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE: - // noop - case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, - enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION: - closeCommand = command.GetCommandType() - default: - return serviceerror.NewInvalidArgument(fmt.Sprintf("unknown command type: %v", command.GetCommandType())) - } - } - return nil -} - -func (v *commandAttrValidator) commandTypes( - commands []*commandpb.Command, -) []string { - result := make([]string, len(commands)) - for index, command := range commands { - result[index] = command.GetCommandType().String() - } - return result -} diff -Nru temporal-1.21.5-1/src/service/history/commandChecker_test.go temporal-1.22.5/src/service/history/commandChecker_test.go --- temporal-1.21.5-1/src/service/history/commandChecker_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/commandChecker_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,947 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "math/rand" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" -) - -var ( - nonTerminalCommands = []*commandpb.Command{ - {CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK}, - {CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK}, - {CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER}, - {CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER}, - {CommandType: enumspb.COMMAND_TYPE_RECORD_MARKER}, - {CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES}, - {CommandType: enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES}, - } - - terminalCommands = []*commandpb.Command{ - {CommandType: enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION}, - {CommandType: enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION}, - } -) - -type ( - commandAttrValidatorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockNamespaceCache *namespace.MockRegistry - mockVisibilityManager *manager.MockVisibilityManager - - validator *commandAttrValidator - - testNamespaceID namespace.ID - testTargetNamespaceID namespace.ID - } -) - -func TestCommandAttrValidatorSuite(t *testing.T) { - s := new(commandAttrValidatorSuite) - suite.Run(t, s) -} - -func (s *commandAttrValidatorSuite) SetupSuite() { - s.testNamespaceID = "test namespace ID" - s.testTargetNamespaceID = "test target namespace ID" -} - -func (s *commandAttrValidatorSuite) TearDownSuite() { -} - -func (s *commandAttrValidatorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) - - s.mockVisibilityManager = manager.NewMockVisibilityManager(s.controller) - s.mockVisibilityManager.EXPECT().GetIndexName().Return("index-name").AnyTimes() - s.mockVisibilityManager.EXPECT(). - ValidateCustomSearchAttributes(gomock.Any()). - DoAndReturn( - func(searchAttributes map[string]any) (map[string]any, error) { - return searchAttributes, nil - }, - ). - AnyTimes() - - config := &configs.Config{ - MaxIDLengthLimit: dynamicconfig.GetIntPropertyFn(1000), - SearchAttributesNumberOfKeysLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(100), - SearchAttributesSizeOfValueLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(2 * 1024), - SearchAttributesTotalSizeLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(40 * 1024), - DefaultActivityRetryPolicy: dynamicconfig.GetMapPropertyFnWithNamespaceFilter(common.GetDefaultRetryPolicyConfigOptions()), - DefaultWorkflowRetryPolicy: dynamicconfig.GetMapPropertyFnWithNamespaceFilter(common.GetDefaultRetryPolicyConfigOptions()), - EnableCrossNamespaceCommands: dynamicconfig.GetBoolPropertyFn(true), - DefaultWorkflowTaskTimeout: dynamicconfig.GetDurationPropertyFnFilteredByNamespace(common.DefaultWorkflowTaskTimeout), - } - s.validator = newCommandAttrValidator( - s.mockNamespaceCache, - config, - searchattribute.NewValidator( - searchattribute.NewTestProvider(), - searchattribute.NewTestMapperProvider(nil), - config.SearchAttributesNumberOfKeysLimit, - config.SearchAttributesSizeOfValueLimit, - config.SearchAttributesTotalSizeLimit, - s.mockVisibilityManager, - false, - )) -} - -func (s *commandAttrValidatorSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *commandAttrValidatorSuite) TestValidateSignalExternalWorkflowExecutionAttributes() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - targetNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil).AnyTimes() - - var attributes *commandpb.SignalExternalWorkflowExecutionCommandAttributes - - fc, err := s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.EqualError(err, "SignalExternalWorkflowExecutionCommandAttributes is not set on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) - - attributes = &commandpb.SignalExternalWorkflowExecutionCommandAttributes{} - fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.EqualError(err, "Execution is nil on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) - - attributes.Execution = &commonpb.WorkflowExecution{} - attributes.Execution.WorkflowId = "workflow-id" - fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.EqualError(err, "SignalName is not set on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) - - attributes.Execution.RunId = "run-id" - fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.EqualError(err, "Invalid RunId set on command.") - attributes.Execution.RunId = tests.RunID - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) - - attributes.SignalName = "my signal name" - fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.NoError(err) - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) - - attributes.Input = payloads.EncodeString("test input") - fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) - s.NoError(err) - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) -} - -func (s *commandAttrValidatorSuite) TestValidateUpsertWorkflowSearchAttributes() { - namespace := namespace.Name("tests.Namespace") - var attributes *commandpb.UpsertWorkflowSearchAttributesCommandAttributes - - fc, err := s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) - s.EqualError(err, "UpsertWorkflowSearchAttributesCommandAttributes is not set on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) - - attributes = &commandpb.UpsertWorkflowSearchAttributesCommandAttributes{} - fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) - s.EqualError(err, "SearchAttributes is not set on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) - - attributes.SearchAttributes = &commonpb.SearchAttributes{} - fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) - s.EqualError(err, "IndexedFields is empty on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) - - saPayload, err := searchattribute.EncodeValue("bytes", enumspb.INDEXED_VALUE_TYPE_KEYWORD) - s.NoError(err) - attributes.SearchAttributes.IndexedFields = map[string]*commonpb.Payload{ - "CustomKeywordField": saPayload, - } - fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) - s.NoError(err) - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) -} - -func (s *commandAttrValidatorSuite) TestValidateContinueAsNewWorkflowExecutionAttributes() { - executionTimeout := time.Hour - workflowTypeName := "workflowType" - taskQueue := "taskQueue" - - attributes := &commandpb.ContinueAsNewWorkflowExecutionCommandAttributes{ - // workflow type name and task queue name should be retrieved from existing workflow info - - // WorkflowRunTimeout should be shorten to execution timeout - WorkflowRunTimeout: timestamp.DurationPtr(executionTimeout * 2), - // WorkflowTaskTimeout should be shorten to max workflow task timeout - WorkflowTaskTimeout: timestamp.DurationPtr(common.MaxWorkflowTaskStartToCloseTimeout * 2), - } - - executionInfo := &persistencespb.WorkflowExecutionInfo{ - WorkflowTypeName: workflowTypeName, - TaskQueue: taskQueue, - WorkflowExecutionTimeout: timestamp.DurationPtr(executionTimeout), - } - - fc, err := s.validator.validateContinueAsNewWorkflowExecutionAttributes( - tests.Namespace, - attributes, - executionInfo, - ) - s.NoError(err) - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) - - s.Equal(workflowTypeName, attributes.GetWorkflowType().GetName()) - s.Equal(taskQueue, attributes.GetTaskQueue().GetName()) - s.Equal(executionTimeout, *attributes.GetWorkflowRunTimeout()) - s.Equal(common.MaxWorkflowTaskStartToCloseTimeout, *attributes.GetWorkflowTaskTimeout()) -} - -func (s *commandAttrValidatorSuite) TestValidateModifyWorkflowProperties() { - namespace := namespace.Name("tests.Namespace") - var attributes *commandpb.ModifyWorkflowPropertiesCommandAttributes - - fc, err := s.validator.validateModifyWorkflowProperties(namespace, attributes) - s.EqualError(err, "ModifyWorkflowPropertiesCommandAttributes is not set on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) - - // test attributes has at least one non-nil attribute - attributes = &commandpb.ModifyWorkflowPropertiesCommandAttributes{} - fc, err = s.validator.validateModifyWorkflowProperties(namespace, attributes) - s.EqualError(err, "ModifyWorkflowPropertiesCommandAttributes attributes are all nil.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) - - // test UpsertedMemo cannot be an empty map - attributes = &commandpb.ModifyWorkflowPropertiesCommandAttributes{ - UpsertedMemo: &commonpb.Memo{}, - } - fc, err = s.validator.validateModifyWorkflowProperties(namespace, attributes) - s.EqualError(err, "UpsertedMemo.Fields is empty on command.") - s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToLocal() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - targetNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.Nil(err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToEffectiveLocal_SameCluster() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{cluster.TestCurrentClusterName}, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.Nil(err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToEffectiveLocal_DiffCluster() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{cluster.TestAlternativeClusterName}, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToGlobal() { - namespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToLocal_SameCluster() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{cluster.TestCurrentClusterName}, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.Nil(err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToLocal_DiffCluster() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{cluster.TestAlternativeClusterName}, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToEffectiveLocal_SameCluster() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{cluster.TestCurrentClusterName}, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{cluster.TestCurrentClusterName}, - }, - 5678, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.Nil(err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToEffectiveLocal_DiffCluster() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{cluster.TestCurrentClusterName}, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - Clusters: []string{cluster.TestAlternativeClusterName}, - }, - 5678, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToGlobal() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - }, - }, - 5678, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToLocal() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - cluster.TestCurrentClusterName, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToEffectiveLocal() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 5678, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - }, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToGlobal_DiffNamespace() { - namespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestAlternativeClusterName, - cluster.TestCurrentClusterName, - }, - }, - 1234, - ) - targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, - nil, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - 1234, - ) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToGlobal_SameNamespace() { - targetNamespaceID := s.testNamespaceID - - err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, targetNamespaceID) - s.Nil(err) -} - -func (s *commandAttrValidatorSuite) TestValidateTaskQueueName() { - newTaskQueue := func(name string) *taskqueuepb.TaskQueue { - return &taskqueuepb.TaskQueue{ - Name: name, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - } - - testCases := []struct { - defaultVal string - input *taskqueuepb.TaskQueue - output *taskqueuepb.TaskQueue - isOutputErr bool - }{ - {"tq-1", nil, newTaskQueue("tq-1"), false}, - {"", newTaskQueue("tq-1"), newTaskQueue("tq-1"), false}, - {"tq-1", newTaskQueue("tq-1"), newTaskQueue("tq-1"), false}, - {"", newTaskQueue("/tl-1"), newTaskQueue("/tl-1"), false}, - {"", newTaskQueue("/__temporal_sys"), newTaskQueue("/__temporal_sys"), false}, - {"", nil, newTaskQueue(""), true}, - {"", newTaskQueue(""), newTaskQueue(""), true}, - {"", newTaskQueue(reservedTaskQueuePrefix), newTaskQueue(reservedTaskQueuePrefix), true}, - {"tq-1", newTaskQueue(reservedTaskQueuePrefix), newTaskQueue(reservedTaskQueuePrefix), true}, - {"", newTaskQueue(reservedTaskQueuePrefix + "tq-1"), newTaskQueue(reservedTaskQueuePrefix + "tq-1"), true}, - {"tq-1", newTaskQueue(reservedTaskQueuePrefix + "tq-1"), newTaskQueue(reservedTaskQueuePrefix + "tq-1"), true}, - } - - for _, tc := range testCases { - key := tc.defaultVal + "#" - if tc.input != nil { - key += tc.input.GetName() - } else { - key += "nil" - } - s.Run(key, func() { - output, err := s.validator.validateTaskQueue(tc.input, tc.defaultVal) - if tc.isOutputErr { - s.Error(err) - } else { - s.NoError(err) - } - s.EqualValues(tc.output, output) - }) - } -} - -func (s *commandAttrValidatorSuite) TestValidateActivityRetryPolicy() { - testCases := []struct { - name string - input *commonpb.RetryPolicy - want *commonpb.RetryPolicy - }{ - { - name: "override non-set policy", - input: nil, - want: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 2, - MaximumInterval: timestamp.DurationPtr(100 * time.Second), - MaximumAttempts: 0, - }, - }, - { - name: "do not override fully set policy", - input: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(5 * time.Second), - BackoffCoefficient: 10, - MaximumInterval: timestamp.DurationPtr(20 * time.Second), - MaximumAttempts: 8, - }, - want: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(5 * time.Second), - BackoffCoefficient: 10, - MaximumInterval: timestamp.DurationPtr(20 * time.Second), - MaximumAttempts: 8, - }, - }, - { - name: "partial override of fields", - input: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(0 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(0 * time.Second), - MaximumAttempts: 7, - }, - want: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(100 * time.Second), - MaximumAttempts: 7, - }, - }, - { - name: "set expected max interval if only init interval set", - input: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(3 * time.Second), - MaximumInterval: timestamp.DurationPtr(0 * time.Second), - }, - want: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(3 * time.Second), - BackoffCoefficient: 2, - MaximumInterval: timestamp.DurationPtr(300 * time.Second), - MaximumAttempts: 0, - }, - }, - { - name: "override all defaults", - input: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(0 * time.Second), - BackoffCoefficient: 0, - MaximumInterval: timestamp.DurationPtr(0 * time.Second), - MaximumAttempts: 0, - }, - want: &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 2, - MaximumInterval: timestamp.DurationPtr(100 * time.Second), - MaximumAttempts: 0, - }, - }, - } - - for _, tt := range testCases { - s.Run(tt.name, func() { - attr := &commandpb.ScheduleActivityTaskCommandAttributes{ - RetryPolicy: tt.input, - } - - err := s.validator.validateActivityRetryPolicy(s.testNamespaceID, attr) - assert.Nil(s.T(), err, "expected no error") - assert.Equal(s.T(), tt.want, attr.RetryPolicy, "unexpected retry policy") - }) - } -} - -func (s *commandAttrValidatorSuite) TestValidateCommandSequence_NoTerminalCommand() { - err := s.validator.validateCommandSequence(nonTerminalCommands) - s.NoError(err) -} - -func (s *commandAttrValidatorSuite) TestValidateCommandSequence_ValidTerminalCommand() { - for _, terminalCommand := range terminalCommands { - err := s.validator.validateCommandSequence(append(nonTerminalCommands, terminalCommand)) - s.NoError(err) - } -} - -func (s *commandAttrValidatorSuite) TestValidateCommandSequence_InvalidTerminalCommand() { - for _, terminalCommand := range terminalCommands { - err := s.validator.validateCommandSequence(append( - []*commandpb.Command{terminalCommand}, - nonTerminalCommands[int(rand.Int31n(int32(len(nonTerminalCommands))))], - )) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - } -} - -func TestWorkflowSizeChecker_NumChildWorkflows(t *testing.T) { - - for _, c := range []struct { - Name string - NumPendingChildExecutions int - NumPendingActivities int - NumPendingCancelRequests int - NumPendingSignals int - - PendingChildExecutionsLimit int - PendingActivitiesLimit int - PendingCancelRequestsLimit int - PendingSignalsLimit int - - ExpectedMetric string - ExpectedChildExecutionsErrorMsg string - ExpectedActivitiesErrorMsg string - ExpectedCancelRequestsErrorMsg string - ExpectedSignalsErrorMsg string - }{ - { - Name: "No limits and no data", - }, - { - Name: "Limits but no workflow data", - PendingChildExecutionsLimit: 1, - PendingActivitiesLimit: 1, - PendingCancelRequestsLimit: 1, - PendingSignalsLimit: 1, - }, - { - Name: "Limits not exceeded", - NumPendingChildExecutions: 1, - NumPendingActivities: 1, - NumPendingCancelRequests: 1, - NumPendingSignals: 1, - PendingChildExecutionsLimit: 2, - PendingActivitiesLimit: 2, - PendingCancelRequestsLimit: 2, - PendingSignalsLimit: 2, - }, - { - Name: "Pending child executions limit exceeded", - NumPendingChildExecutions: 1, - PendingChildExecutionsLimit: 1, - ExpectedMetric: "wf_too_many_pending_child_workflows", - ExpectedChildExecutionsErrorMsg: "the number of pending child workflow executions, 1, has reached the " + - "per-workflow limit of 1", - }, - { - Name: "Pending activities limit exceeded", - NumPendingActivities: 1, - PendingActivitiesLimit: 1, - ExpectedMetric: "wf_too_many_pending_activities", - ExpectedActivitiesErrorMsg: "the number of pending activities, 1, has reached the per-workflow limit of 1", - }, - { - Name: "Pending cancel requests limit exceeded", - NumPendingCancelRequests: 1, - PendingCancelRequestsLimit: 1, - ExpectedMetric: "wf_too_many_pending_cancel_requests", - ExpectedCancelRequestsErrorMsg: "the number of pending requests to cancel external workflows, 1, has " + - "reached the per-workflow limit of 1", - }, - { - Name: "Pending signals limit exceeded", - NumPendingSignals: 1, - PendingSignalsLimit: 1, - ExpectedMetric: "wf_too_many_pending_external_workflow_signals", - ExpectedSignalsErrorMsg: "the number of pending signals to external workflows, 1, has reached the " + - "per-workflow limit of 1", - }, - } { - t.Run(c.Name, func(t *testing.T) { - ctrl := gomock.NewController(t) - mutableState := workflow.NewMockMutableState(ctrl) - logger := log.NewMockLogger(ctrl) - metricsHandler := metrics.NewMockHandler(ctrl) - - workflowKey := definition.NewWorkflowKey( - "test-namespace-id", - "test-workflow-id", - "test-run-id", - ) - mutableState.EXPECT().GetWorkflowKey().Return(workflowKey).AnyTimes() - - executionInfos := make(map[int64]*persistencespb.ChildExecutionInfo) - activityInfos := make(map[int64]*persistencespb.ActivityInfo) - requestCancelInfos := make(map[int64]*persistencespb.RequestCancelInfo) - signalInfos := make(map[int64]*persistencespb.SignalInfo) - for i := 0; i < c.NumPendingChildExecutions; i++ { - executionInfos[int64(i)] = new(persistencespb.ChildExecutionInfo) - } - for i := 0; i < c.NumPendingActivities; i++ { - activityInfos[int64(i)] = new(persistencespb.ActivityInfo) - } - for i := 0; i < c.NumPendingCancelRequests; i++ { - requestCancelInfos[int64(i)] = new(persistencespb.RequestCancelInfo) - } - for i := 0; i < c.NumPendingSignals; i++ { - signalInfos[int64(i)] = new(persistencespb.SignalInfo) - } - mutableState.EXPECT().GetPendingChildExecutionInfos().Return(executionInfos) - mutableState.EXPECT().GetPendingActivityInfos().Return(activityInfos) - mutableState.EXPECT().GetPendingRequestCancelExternalInfos().Return(requestCancelInfos) - mutableState.EXPECT().GetPendingSignalExternalInfos().Return(signalInfos) - - if len(c.ExpectedMetric) > 0 { - counterMetric := metrics.NewMockCounterIface(ctrl) - metricsHandler.EXPECT().Counter(c.ExpectedMetric).Return(counterMetric) - counterMetric.EXPECT().Record(int64(1)) - } - - for _, msg := range []string{ - c.ExpectedChildExecutionsErrorMsg, - c.ExpectedActivitiesErrorMsg, - c.ExpectedCancelRequestsErrorMsg, - c.ExpectedSignalsErrorMsg, - } { - if len(msg) > 0 { - logger.EXPECT().Error(msg, gomock.Any()).Do(func(msg string, tags ...tag.Tag) { - var namespaceID, workflowID, runID interface{} - for _, t := range tags { - if t.Key() == "wf-namespace-id" { - namespaceID = t.Value() - } else if t.Key() == "wf-id" { - workflowID = t.Value() - } else if t.Key() == "wf-run-id" { - runID = t.Value() - } - } - assert.Equal(t, "test-namespace-id", namespaceID) - assert.Equal(t, "test-workflow-id", workflowID) - assert.Equal(t, "test-run-id", runID) - }) - } - } - - checker := newWorkflowSizeChecker(workflowSizeLimits{ - numPendingChildExecutionsLimit: c.PendingChildExecutionsLimit, - numPendingActivitiesLimit: c.PendingActivitiesLimit, - numPendingCancelsRequestLimit: c.PendingCancelRequestsLimit, - numPendingSignalsLimit: c.PendingSignalsLimit, - }, mutableState, nil, metricsHandler, logger) - - err := checker.checkIfNumChildWorkflowsExceedsLimit() - if len(c.ExpectedChildExecutionsErrorMsg) > 0 { - require.Error(t, err) - assert.Equal(t, c.ExpectedChildExecutionsErrorMsg, err.Error()) - } else { - assert.NoError(t, err) - } - - err = checker.checkIfNumPendingActivitiesExceedsLimit() - if len(c.ExpectedActivitiesErrorMsg) > 0 { - require.Error(t, err) - assert.Equal(t, c.ExpectedActivitiesErrorMsg, err.Error()) - } else { - assert.NoError(t, err) - } - - err = checker.checkIfNumPendingCancelRequestsExceedsLimit() - if len(c.ExpectedCancelRequestsErrorMsg) > 0 { - require.Error(t, err) - assert.Equal(t, c.ExpectedCancelRequestsErrorMsg, err.Error()) - } else { - assert.NoError(t, err) - } - - err = checker.checkIfNumPendingSignalsExceedsLimit() - if len(c.ExpectedSignalsErrorMsg) > 0 { - require.Error(t, err) - assert.Equal(t, c.ExpectedSignalsErrorMsg, err.Error()) - } else { - assert.NoError(t, err) - } - }) - } -} diff -Nru temporal-1.21.5-1/src/service/history/command_checker.go temporal-1.22.5/src/service/history/command_checker.go --- temporal-1.21.5-1/src/service/history/command_checker.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/command_checker.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,935 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "fmt" + "strings" + "time" + + "github.com/pborman/uuid" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/workflow" +) + +type ( + commandAttrValidator struct { + namespaceRegistry namespace.Registry + config *configs.Config + maxIDLengthLimit int + searchAttributesValidator *searchattribute.Validator + getDefaultActivityRetrySettings dynamicconfig.MapPropertyFnWithNamespaceFilter + getDefaultWorkflowRetrySettings dynamicconfig.MapPropertyFnWithNamespaceFilter + enableCrossNamespaceCommands dynamicconfig.BoolPropertyFn + } + + workflowSizeLimits struct { + blobSizeLimitWarn int + blobSizeLimitError int + memoSizeLimitWarn int + memoSizeLimitError int + numPendingChildExecutionsLimit int + numPendingActivitiesLimit int + numPendingSignalsLimit int + numPendingCancelsRequestLimit int + } + + workflowSizeChecker struct { + workflowSizeLimits + + mutableState workflow.MutableState + searchAttributesValidator *searchattribute.Validator + metricsHandler metrics.Handler + logger log.Logger + } +) + +const ( + reservedTaskQueuePrefix = "/_sys/" +) + +func newCommandAttrValidator( + namespaceRegistry namespace.Registry, + config *configs.Config, + searchAttributesValidator *searchattribute.Validator, +) *commandAttrValidator { + return &commandAttrValidator{ + namespaceRegistry: namespaceRegistry, + config: config, + maxIDLengthLimit: config.MaxIDLengthLimit(), + searchAttributesValidator: searchAttributesValidator, + getDefaultActivityRetrySettings: config.DefaultActivityRetryPolicy, + getDefaultWorkflowRetrySettings: config.DefaultWorkflowRetryPolicy, + enableCrossNamespaceCommands: config.EnableCrossNamespaceCommands, + } +} + +func newWorkflowSizeChecker( + limits workflowSizeLimits, + mutableState workflow.MutableState, + searchAttributesValidator *searchattribute.Validator, + metricsHandler metrics.Handler, + logger log.Logger, +) *workflowSizeChecker { + return &workflowSizeChecker{ + workflowSizeLimits: limits, + mutableState: mutableState, + searchAttributesValidator: searchAttributesValidator, + metricsHandler: metricsHandler, + logger: logger, + } +} + +func (c *workflowSizeChecker) checkIfPayloadSizeExceedsLimit( + commandTypeTag metrics.Tag, + payloadSize int, + message string, +) error { + + executionInfo := c.mutableState.GetExecutionInfo() + executionState := c.mutableState.GetExecutionState() + err := common.CheckEventBlobSizeLimit( + payloadSize, + c.blobSizeLimitWarn, + c.blobSizeLimitError, + executionInfo.NamespaceId, + executionInfo.WorkflowId, + executionState.RunId, + c.metricsHandler.WithTags(commandTypeTag), + c.logger, + tag.BlobSizeViolationOperation(commandTypeTag.Value()), + ) + if err != nil { + return fmt.Errorf(message) + } + return nil +} + +func (c *workflowSizeChecker) checkIfMemoSizeExceedsLimit( + memo *commonpb.Memo, + commandTypeTag metrics.Tag, + message string, +) error { + c.metricsHandler.Histogram(metrics.MemoSize.GetMetricName(), metrics.MemoSize.GetMetricUnit()).Record( + int64(memo.Size()), + commandTypeTag) + + executionInfo := c.mutableState.GetExecutionInfo() + executionState := c.mutableState.GetExecutionState() + err := common.CheckEventBlobSizeLimit( + memo.Size(), + c.memoSizeLimitWarn, + c.memoSizeLimitError, + executionInfo.NamespaceId, + executionInfo.WorkflowId, + executionState.RunId, + c.metricsHandler.WithTags(commandTypeTag), + c.logger, + tag.BlobSizeViolationOperation(commandTypeTag.Value()), + ) + if err != nil { + return fmt.Errorf(message) + } + return nil +} + +func withinLimit(value int, limit int) bool { + if limit <= 0 { + // limit not defined + return true + } + return value < limit +} + +func (c *workflowSizeChecker) checkCountConstraint( + numPending int, + errLimit int, + metricName string, + resourceName string, +) error { + key := c.mutableState.GetWorkflowKey() + logger := log.With( + c.logger, + tag.WorkflowNamespaceID(key.NamespaceID), + tag.WorkflowID(key.WorkflowID), + tag.WorkflowRunID(key.RunID), + ) + + if withinLimit(numPending, errLimit) { + return nil + } + c.metricsHandler.Counter(metricName).Record(1) + err := fmt.Errorf( + "the number of %s, %d, has reached the per-workflow limit of %d", + resourceName, + numPending, + errLimit, + ) + logger.Error(err.Error(), tag.Error(err)) + return err +} + +const ( + PendingChildWorkflowExecutionsDescription = "pending child workflow executions" + PendingActivitiesDescription = "pending activities" + PendingCancelRequestsDescription = "pending requests to cancel external workflows" + PendingSignalsDescription = "pending signals to external workflows" +) + +func (c *workflowSizeChecker) checkIfNumChildWorkflowsExceedsLimit() error { + return c.checkCountConstraint( + len(c.mutableState.GetPendingChildExecutionInfos()), + c.numPendingChildExecutionsLimit, + metrics.TooManyPendingChildWorkflows.GetMetricName(), + PendingChildWorkflowExecutionsDescription, + ) +} + +func (c *workflowSizeChecker) checkIfNumPendingActivitiesExceedsLimit() error { + return c.checkCountConstraint( + len(c.mutableState.GetPendingActivityInfos()), + c.numPendingActivitiesLimit, + metrics.TooManyPendingActivities.GetMetricName(), + PendingActivitiesDescription, + ) +} + +func (c *workflowSizeChecker) checkIfNumPendingCancelRequestsExceedsLimit() error { + return c.checkCountConstraint( + len(c.mutableState.GetPendingRequestCancelExternalInfos()), + c.numPendingCancelsRequestLimit, + metrics.TooManyPendingCancelRequests.GetMetricName(), + PendingCancelRequestsDescription, + ) +} + +func (c *workflowSizeChecker) checkIfNumPendingSignalsExceedsLimit() error { + return c.checkCountConstraint( + len(c.mutableState.GetPendingSignalExternalInfos()), + c.numPendingSignalsLimit, + metrics.TooManyPendingSignalsToExternalWorkflows.GetMetricName(), + PendingSignalsDescription, + ) +} + +func (c *workflowSizeChecker) checkIfSearchAttributesSizeExceedsLimit( + searchAttributes *commonpb.SearchAttributes, + namespace namespace.Name, + commandTypeTag metrics.Tag, +) error { + c.metricsHandler.Histogram(metrics.SearchAttributesSize.GetMetricName(), metrics.SearchAttributesSize.GetMetricUnit()).Record( + int64(searchAttributes.Size()), + commandTypeTag) + err := c.searchAttributesValidator.ValidateSize(searchAttributes, namespace.String()) + if err != nil { + c.logger.Warn( + "Search attributes size exceeds limits. Fail workflow.", + tag.Error(err), + tag.WorkflowNamespace(namespace.String()), + ) + } + return err +} + +func (v *commandAttrValidator) validateProtocolMessageAttributes( + namespaceID namespace.ID, + attributes *commandpb.ProtocolMessageCommandAttributes, + runTimeout time.Duration, +) (enumspb.WorkflowTaskFailedCause, error) { + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE + + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("ProtocolMessageCommandAttributes is not set on command.") + } + + if attributes.MessageId == "" { + return failedCause, serviceerror.NewInvalidArgument("MessageID is not set on command.") + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateActivityScheduleAttributes( + namespaceID namespace.ID, + attributes *commandpb.ScheduleActivityTaskCommandAttributes, + runTimeout time.Duration, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES + + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("ScheduleActivityTaskCommandAttributes is not set on command.") + } + + defaultTaskQueueName := "" + if _, err := v.validateTaskQueue(attributes.TaskQueue, defaultTaskQueueName); err != nil { + return failedCause, err + } + + if attributes.GetActivityId() == "" { + return failedCause, serviceerror.NewInvalidArgument("ActivityId is not set on command.") + } + + if attributes.ActivityType == nil || attributes.ActivityType.GetName() == "" { + return failedCause, serviceerror.NewInvalidArgument("ActivityType is not set on command.") + } + + if err := v.validateActivityRetryPolicy(namespaceID, attributes); err != nil { + return failedCause, err + } + + if len(attributes.GetActivityId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("ActivityID exceeds length limit.") + } + + if len(attributes.GetActivityType().GetName()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("ActivityType exceeds length limit.") + } + + // Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative. + if timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) < 0 || timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) < 0 || + timestamp.DurationValue(attributes.GetStartToCloseTimeout()) < 0 || timestamp.DurationValue(attributes.GetHeartbeatTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("A valid timeout may not be negative.") + } + + validScheduleToClose := timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) > 0 + validScheduleToStart := timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) > 0 + validStartToClose := timestamp.DurationValue(attributes.GetStartToCloseTimeout()) > 0 + + if validScheduleToClose { + if validScheduleToStart { + attributes.ScheduleToStartTimeout = timestamp.MinDurationPtr(attributes.GetScheduleToStartTimeout(), + attributes.GetScheduleToCloseTimeout()) + } else { + attributes.ScheduleToStartTimeout = attributes.GetScheduleToCloseTimeout() + } + if validStartToClose { + attributes.StartToCloseTimeout = timestamp.MinDurationPtr(attributes.GetStartToCloseTimeout(), + attributes.GetScheduleToCloseTimeout()) + } else { + attributes.StartToCloseTimeout = attributes.GetScheduleToCloseTimeout() + } + } else if validStartToClose { + // We are in !validScheduleToClose due to the first if above + attributes.ScheduleToCloseTimeout = &runTimeout + if !validScheduleToStart { + attributes.ScheduleToStartTimeout = &runTimeout + } + } else { + // Deduction failed as there's not enough information to fill in missing timeouts. + return failedCause, serviceerror.NewInvalidArgument("A valid StartToClose or ScheduleToCloseTimeout is not set on command.") + } + // ensure activity timeout never larger than workflow timeout + if runTimeout > 0 { + if timestamp.DurationValue(attributes.GetScheduleToCloseTimeout()) > runTimeout { + attributes.ScheduleToCloseTimeout = &runTimeout + } + if timestamp.DurationValue(attributes.GetScheduleToStartTimeout()) > runTimeout { + attributes.ScheduleToStartTimeout = &runTimeout + } + if timestamp.DurationValue(attributes.GetStartToCloseTimeout()) > runTimeout { + attributes.StartToCloseTimeout = &runTimeout + } + if timestamp.DurationValue(attributes.GetHeartbeatTimeout()) > runTimeout { + attributes.HeartbeatTimeout = &runTimeout + } + } + attributes.HeartbeatTimeout = timestamp.MinDurationPtr(attributes.GetHeartbeatTimeout(), attributes.GetStartToCloseTimeout()) + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateTimerScheduleAttributes( + attributes *commandpb.StartTimerCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("StartTimerCommandAttributes is not set on command.") + } + if attributes.GetTimerId() == "" { + return failedCause, serviceerror.NewInvalidArgument("TimerId is not set on command.") + } + if len(attributes.GetTimerId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("TimerId exceeds length limit.") + } + if timestamp.DurationValue(attributes.GetStartToFireTimeout()) <= 0 { + return failedCause, serviceerror.NewInvalidArgument("A valid StartToFireTimeout is not set on command.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateActivityCancelAttributes( + attributes *commandpb.RequestCancelActivityTaskCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("RequestCancelActivityTaskCommandAttributes is not set on command.") + } + if attributes.GetScheduledEventId() <= 0 { + return failedCause, serviceerror.NewInvalidArgument("ScheduledEventId is not set on command.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateTimerCancelAttributes( + attributes *commandpb.CancelTimerCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("CancelTimerCommandAttributes is not set on command.") + } + if attributes.GetTimerId() == "" { + return failedCause, serviceerror.NewInvalidArgument("TimerId is not set on command.") + } + if len(attributes.GetTimerId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("TimerId exceeds length limit.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateRecordMarkerAttributes( + attributes *commandpb.RecordMarkerCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("RecordMarkerCommandAttributes is not set on command.") + } + if attributes.GetMarkerName() == "" { + return failedCause, serviceerror.NewInvalidArgument("MarkerName is not set on command.") + } + if len(attributes.GetMarkerName()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("MarkerName exceeds length limit.") + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateCompleteWorkflowExecutionAttributes( + attributes *commandpb.CompleteWorkflowExecutionCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("CompleteWorkflowExecutionCommandAttributes is not set on command.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateFailWorkflowExecutionAttributes( + attributes *commandpb.FailWorkflowExecutionCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("FailWorkflowExecutionCommandAttributes is not set on command.") + } + if attributes.GetFailure() == nil { + return failedCause, serviceerror.NewInvalidArgument("Failure is not set on command.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateCancelWorkflowExecutionAttributes( + attributes *commandpb.CancelWorkflowExecutionCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("CancelWorkflowExecutionCommandAttributes is not set on command.") + } + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateCancelExternalWorkflowExecutionAttributes( + namespaceID namespace.ID, + targetNamespaceID namespace.ID, + initiatedChildExecutionsInSession map[string]struct{}, + attributes *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES + if err := v.validateCrossNamespaceCall( + namespaceID, + targetNamespaceID, + ); err != nil { + return failedCause, err + } + + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("RequestCancelExternalWorkflowExecutionCommandAttributes is not set on command.") + } + if attributes.GetWorkflowId() == "" { + return failedCause, serviceerror.NewInvalidArgument("WorkflowId is not set on command.") + } + if len(attributes.GetNamespace()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") + } + if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") + } + runID := attributes.GetRunId() + if runID != "" && uuid.Parse(runID) == nil { + return failedCause, serviceerror.NewInvalidArgument("Invalid RunId set on command.") + } + if _, ok := initiatedChildExecutionsInSession[attributes.GetWorkflowId()]; ok { + return failedCause, serviceerror.NewInvalidArgument("Start and RequestCancel for child workflow is not allowed in same workflow task.") + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateSignalExternalWorkflowExecutionAttributes( + namespaceID namespace.ID, + targetNamespaceID namespace.ID, + attributes *commandpb.SignalExternalWorkflowExecutionCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES + if err := v.validateCrossNamespaceCall( + namespaceID, + targetNamespaceID, + ); err != nil { + return failedCause, err + } + + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("SignalExternalWorkflowExecutionCommandAttributes is not set on command.") + } + if attributes.Execution == nil { + return failedCause, serviceerror.NewInvalidArgument("Execution is nil on command.") + } + if attributes.Execution.GetWorkflowId() == "" { + return failedCause, serviceerror.NewInvalidArgument("WorkflowId is not set on command.") + } + if len(attributes.GetNamespace()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") + } + if len(attributes.Execution.GetWorkflowId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") + } + + targetRunID := attributes.Execution.GetRunId() + if targetRunID != "" && uuid.Parse(targetRunID) == nil { + return failedCause, serviceerror.NewInvalidArgument("Invalid RunId set on command.") + } + if attributes.GetSignalName() == "" { + return failedCause, serviceerror.NewInvalidArgument("SignalName is not set on command.") + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateUpsertWorkflowSearchAttributes( + namespace namespace.Name, + attributes *commandpb.UpsertWorkflowSearchAttributesCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("UpsertWorkflowSearchAttributesCommandAttributes is not set on command.") + } + if attributes.SearchAttributes == nil { + return failedCause, serviceerror.NewInvalidArgument("SearchAttributes is not set on command.") + } + if len(attributes.GetSearchAttributes().GetIndexedFields()) == 0 { + return failedCause, serviceerror.NewInvalidArgument("IndexedFields is empty on command.") + } + if err := v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), namespace.String()); err != nil { + return failedCause, err + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateModifyWorkflowProperties( + namespace namespace.Name, + attributes *commandpb.ModifyWorkflowPropertiesCommandAttributes, +) (enumspb.WorkflowTaskFailedCause, error) { + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument( + "ModifyWorkflowPropertiesCommandAttributes is not set on command.", + ) + } + + // check at least one attribute is not nil + if attributes.UpsertedMemo == nil { + return failedCause, serviceerror.NewInvalidArgument( + "ModifyWorkflowPropertiesCommandAttributes attributes are all nil.", + ) + } + + // check if UpsertedMemo is not nil, then it's not an empty map + if attributes.UpsertedMemo != nil && len(attributes.GetUpsertedMemo().GetFields()) == 0 { + return failedCause, serviceerror.NewInvalidArgument("UpsertedMemo.Fields is empty on command.") + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateContinueAsNewWorkflowExecutionAttributes( + namespace namespace.Name, + attributes *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, + executionInfo *persistencespb.WorkflowExecutionInfo, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("ContinueAsNewWorkflowExecutionCommandAttributes is not set on command.") + } + + // Inherit workflow type from previous execution if not provided on command + if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" { + attributes.WorkflowType = &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName} + } + + if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.") + } + + // Inherit task queue from previous execution if not provided on command + taskQueue, err := v.validateTaskQueue(attributes.TaskQueue, executionInfo.TaskQueue) + if err != nil { + return failedCause, err + } + attributes.TaskQueue = taskQueue + + if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowRunTimeout.") + } + + if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowTaskTimeout.") + } + + if timestamp.DurationValue(attributes.GetBackoffStartInterval()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid BackoffStartInterval.") + } + + if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) == 0 { + attributes.WorkflowRunTimeout = timestamp.DurationPtr(timestamp.DurationValue(executionInfo.WorkflowRunTimeout)) + } + + if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) == 0 { + attributes.WorkflowTaskTimeout = timestamp.DurationPtr(timestamp.DurationValue(executionInfo.DefaultWorkflowTaskTimeout)) + } + + attributes.WorkflowRunTimeout = timestamp.DurationPtr( + common.OverrideWorkflowRunTimeout( + timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), + timestamp.DurationValue(executionInfo.GetWorkflowExecutionTimeout()), + ), + ) + + attributes.WorkflowTaskTimeout = timestamp.DurationPtr( + common.OverrideWorkflowTaskTimeout( + namespace.String(), + timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()), + timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), + v.config.DefaultWorkflowTaskTimeout, + ), + ) + + if err := v.validateWorkflowRetryPolicy(namespace, attributes.RetryPolicy); err != nil { + return failedCause, err + } + + if err = v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), namespace.String()); err != nil { + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err + } + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateStartChildExecutionAttributes( + namespaceID namespace.ID, + targetNamespaceID namespace.ID, + targetNamespace namespace.Name, + attributes *commandpb.StartChildWorkflowExecutionCommandAttributes, + parentInfo *persistencespb.WorkflowExecutionInfo, + defaultWorkflowTaskTimeoutFn dynamicconfig.DurationPropertyFnWithNamespaceFilter, +) (enumspb.WorkflowTaskFailedCause, error) { + + const failedCause = enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES + if err := v.validateCrossNamespaceCall( + namespaceID, + targetNamespaceID, + ); err != nil { + return failedCause, err + } + + if attributes == nil { + return failedCause, serviceerror.NewInvalidArgument("StartChildWorkflowExecutionCommandAttributes is not set on command.") + } + + if attributes.GetWorkflowId() == "" { + return failedCause, serviceerror.NewInvalidArgument("Required field WorkflowId is not set on command.") + } + + if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" { + return failedCause, serviceerror.NewInvalidArgument("Required field WorkflowType is not set on command.") + } + + if len(attributes.GetNamespace()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("Namespace exceeds length limit.") + } + + if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.") + } + + if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit { + return failedCause, serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.") + } + + if timestamp.DurationValue(attributes.GetWorkflowExecutionTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowExecutionTimeout.") + } + + if timestamp.DurationValue(attributes.GetWorkflowRunTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowRunTimeout.") + } + + if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) < 0 { + return failedCause, serviceerror.NewInvalidArgument("Invalid WorkflowTaskTimeout.") + } + + if err := v.validateWorkflowRetryPolicy(namespace.Name(attributes.GetNamespace()), attributes.RetryPolicy); err != nil { + return failedCause, err + } + + if err := backoff.ValidateSchedule(attributes.GetCronSchedule()); err != nil { + return failedCause, err + } + + if err := v.searchAttributesValidator.Validate(attributes.GetSearchAttributes(), targetNamespace.String()); err != nil { + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err + } + + // Inherit taskqueue from parent workflow execution if not provided on command + taskQueue, err := v.validateTaskQueue(attributes.TaskQueue, parentInfo.TaskQueue) + if err != nil { + return failedCause, err + } + attributes.TaskQueue = taskQueue + + // workflow execution timeout is left as is + // if workflow execution timeout == 0 -> infinity + + attributes.WorkflowRunTimeout = timestamp.DurationPtr( + common.OverrideWorkflowRunTimeout( + timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), + timestamp.DurationValue(attributes.GetWorkflowExecutionTimeout()), + ), + ) + + attributes.WorkflowTaskTimeout = timestamp.DurationPtr( + common.OverrideWorkflowTaskTimeout( + targetNamespace.String(), + timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()), + timestamp.DurationValue(attributes.GetWorkflowRunTimeout()), + defaultWorkflowTaskTimeoutFn, + ), + ) + + return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil +} + +func (v *commandAttrValidator) validateTaskQueue( + taskQueue *taskqueuepb.TaskQueue, + defaultVal string, +) (*taskqueuepb.TaskQueue, error) { + + if taskQueue == nil { + taskQueue = &taskqueuepb.TaskQueue{ + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + } + + if taskQueue.GetName() == "" { + if defaultVal == "" { + return taskQueue, serviceerror.NewInvalidArgument("missing task queue name") + } + taskQueue.Name = defaultVal + return taskQueue, nil + } + + name := taskQueue.GetName() + if len(name) > v.maxIDLengthLimit { + return taskQueue, serviceerror.NewInvalidArgument(fmt.Sprintf("task queue name exceeds length limit of %v", v.maxIDLengthLimit)) + } + + if strings.HasPrefix(name, reservedTaskQueuePrefix) { + return taskQueue, serviceerror.NewInvalidArgument(fmt.Sprintf("task queue name cannot start with reserved prefix %v", reservedTaskQueuePrefix)) + } + + return taskQueue, nil +} + +func (v *commandAttrValidator) validateActivityRetryPolicy( + namespaceID namespace.ID, + attributes *commandpb.ScheduleActivityTaskCommandAttributes, +) error { + if attributes.RetryPolicy == nil { + attributes.RetryPolicy = &commonpb.RetryPolicy{} + } + + defaultActivityRetrySettings := common.FromConfigToDefaultRetrySettings(v.getDefaultActivityRetrySettings(namespaceID.String())) + common.EnsureRetryPolicyDefaults(attributes.RetryPolicy, defaultActivityRetrySettings) + return common.ValidateRetryPolicy(attributes.RetryPolicy) +} + +func (v *commandAttrValidator) validateWorkflowRetryPolicy( + namespaceName namespace.Name, + retryPolicy *commonpb.RetryPolicy, +) error { + if retryPolicy == nil { + // By default, if the user does not explicitly set a retry policy for a Child Workflow, do not perform any retries. + return nil + } + + // Otherwise, for any unset fields on the retry policy, set with defaults + defaultWorkflowRetrySettings := common.FromConfigToDefaultRetrySettings(v.getDefaultWorkflowRetrySettings(namespaceName.String())) + common.EnsureRetryPolicyDefaults(retryPolicy, defaultWorkflowRetrySettings) + return common.ValidateRetryPolicy(retryPolicy) +} + +func (v *commandAttrValidator) validateCrossNamespaceCall( + namespaceID namespace.ID, + targetNamespaceID namespace.ID, +) error { + + // same name, no check needed + if namespaceID == targetNamespaceID { + return nil + } + + if !v.enableCrossNamespaceCommands() { + return serviceerror.NewInvalidArgument("cross namespace commands are not allowed") + } + + namespaceEntry, err := v.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return err + } + + targetNamespaceEntry, err := v.namespaceRegistry.GetNamespaceByID(targetNamespaceID) + if err != nil { + return err + } + + // both local namespace + if !namespaceEntry.IsGlobalNamespace() && !targetNamespaceEntry.IsGlobalNamespace() { + return nil + } + + namespaceClusters := namespaceEntry.ClusterNames() + targetNamespaceClusters := targetNamespaceEntry.ClusterNames() + + // one is local namespace, another one is global namespace or both global namespace + // treat global namespace with one replication cluster as local namespace + if len(namespaceClusters) == 1 && len(targetNamespaceClusters) == 1 { + if namespaceClusters[0] == targetNamespaceClusters[0] { + return nil + } + return v.createCrossNamespaceCallError(namespaceEntry, targetNamespaceEntry) + } + return v.createCrossNamespaceCallError(namespaceEntry, targetNamespaceEntry) +} + +func (v *commandAttrValidator) createCrossNamespaceCallError( + namespaceEntry *namespace.Namespace, + targetNamespaceEntry *namespace.Namespace, +) error { + return serviceerror.NewInvalidArgument(fmt.Sprintf("unable to process cross namespace command between %v and %v", namespaceEntry.Name(), targetNamespaceEntry.Name())) +} + +func (v *commandAttrValidator) validateCommandSequence( + commands []*commandpb.Command, +) error { + closeCommand := enumspb.COMMAND_TYPE_UNSPECIFIED + + for _, command := range commands { + if closeCommand != enumspb.COMMAND_TYPE_UNSPECIFIED { + return serviceerror.NewInvalidArgument(fmt.Sprintf( + "invalid command sequence: [%v], command %s must be the last command.", + strings.Join(v.commandTypes(commands), ", "), closeCommand.String(), + )) + } + + switch command.GetCommandType() { + case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + enumspb.COMMAND_TYPE_START_TIMER, + enumspb.COMMAND_TYPE_CANCEL_TIMER, + enumspb.COMMAND_TYPE_RECORD_MARKER, + enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES, + enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES, + enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE: + // noop + case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, + enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION: + closeCommand = command.GetCommandType() + default: + return serviceerror.NewInvalidArgument(fmt.Sprintf("unknown command type: %v", command.GetCommandType())) + } + } + return nil +} + +func (v *commandAttrValidator) commandTypes( + commands []*commandpb.Command, +) []string { + result := make([]string, len(commands)) + for index, command := range commands { + result[index] = command.GetCommandType().String() + } + return result +} diff -Nru temporal-1.21.5-1/src/service/history/command_checker_test.go temporal-1.22.5/src/service/history/command_checker_test.go --- temporal-1.21.5-1/src/service/history/command_checker_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/command_checker_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,947 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "math/rand" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" +) + +var ( + nonTerminalCommands = []*commandpb.Command{ + {CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK}, + {CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK}, + {CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER}, + {CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER}, + {CommandType: enumspb.COMMAND_TYPE_RECORD_MARKER}, + {CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES}, + {CommandType: enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES}, + } + + terminalCommands = []*commandpb.Command{ + {CommandType: enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION}, + {CommandType: enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION}, + } +) + +type ( + commandAttrValidatorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockNamespaceCache *namespace.MockRegistry + mockVisibilityManager *manager.MockVisibilityManager + + validator *commandAttrValidator + + testNamespaceID namespace.ID + testTargetNamespaceID namespace.ID + } +) + +func TestCommandAttrValidatorSuite(t *testing.T) { + s := new(commandAttrValidatorSuite) + suite.Run(t, s) +} + +func (s *commandAttrValidatorSuite) SetupSuite() { + s.testNamespaceID = "test namespace ID" + s.testTargetNamespaceID = "test target namespace ID" +} + +func (s *commandAttrValidatorSuite) TearDownSuite() { +} + +func (s *commandAttrValidatorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) + + s.mockVisibilityManager = manager.NewMockVisibilityManager(s.controller) + s.mockVisibilityManager.EXPECT().GetIndexName().Return("index-name").AnyTimes() + s.mockVisibilityManager.EXPECT(). + ValidateCustomSearchAttributes(gomock.Any()). + DoAndReturn( + func(searchAttributes map[string]any) (map[string]any, error) { + return searchAttributes, nil + }, + ). + AnyTimes() + + config := &configs.Config{ + MaxIDLengthLimit: dynamicconfig.GetIntPropertyFn(1000), + SearchAttributesNumberOfKeysLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(100), + SearchAttributesSizeOfValueLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(2 * 1024), + SearchAttributesTotalSizeLimit: dynamicconfig.GetIntPropertyFilteredByNamespace(40 * 1024), + DefaultActivityRetryPolicy: dynamicconfig.GetMapPropertyFnWithNamespaceFilter(common.GetDefaultRetryPolicyConfigOptions()), + DefaultWorkflowRetryPolicy: dynamicconfig.GetMapPropertyFnWithNamespaceFilter(common.GetDefaultRetryPolicyConfigOptions()), + EnableCrossNamespaceCommands: dynamicconfig.GetBoolPropertyFn(true), + DefaultWorkflowTaskTimeout: dynamicconfig.GetDurationPropertyFnFilteredByNamespace(common.DefaultWorkflowTaskTimeout), + } + s.validator = newCommandAttrValidator( + s.mockNamespaceCache, + config, + searchattribute.NewValidator( + searchattribute.NewTestProvider(), + searchattribute.NewTestMapperProvider(nil), + config.SearchAttributesNumberOfKeysLimit, + config.SearchAttributesSizeOfValueLimit, + config.SearchAttributesTotalSizeLimit, + s.mockVisibilityManager, + false, + )) +} + +func (s *commandAttrValidatorSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *commandAttrValidatorSuite) TestValidateSignalExternalWorkflowExecutionAttributes() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + targetNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil).AnyTimes() + + var attributes *commandpb.SignalExternalWorkflowExecutionCommandAttributes + + fc, err := s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.EqualError(err, "SignalExternalWorkflowExecutionCommandAttributes is not set on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) + + attributes = &commandpb.SignalExternalWorkflowExecutionCommandAttributes{} + fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.EqualError(err, "Execution is nil on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) + + attributes.Execution = &commonpb.WorkflowExecution{} + attributes.Execution.WorkflowId = "workflow-id" + fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.EqualError(err, "SignalName is not set on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) + + attributes.Execution.RunId = "run-id" + fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.EqualError(err, "Invalid RunId set on command.") + attributes.Execution.RunId = tests.RunID + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, fc) + + attributes.SignalName = "my signal name" + fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.NoError(err) + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) + + attributes.Input = payloads.EncodeString("test input") + fc, err = s.validator.validateSignalExternalWorkflowExecutionAttributes(s.testNamespaceID, s.testTargetNamespaceID, attributes) + s.NoError(err) + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) +} + +func (s *commandAttrValidatorSuite) TestValidateUpsertWorkflowSearchAttributes() { + namespace := namespace.Name("tests.Namespace") + var attributes *commandpb.UpsertWorkflowSearchAttributesCommandAttributes + + fc, err := s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) + s.EqualError(err, "UpsertWorkflowSearchAttributesCommandAttributes is not set on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) + + attributes = &commandpb.UpsertWorkflowSearchAttributesCommandAttributes{} + fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) + s.EqualError(err, "SearchAttributes is not set on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) + + attributes.SearchAttributes = &commonpb.SearchAttributes{} + fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) + s.EqualError(err, "IndexedFields is empty on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, fc) + + saPayload, err := searchattribute.EncodeValue("bytes", enumspb.INDEXED_VALUE_TYPE_KEYWORD) + s.NoError(err) + attributes.SearchAttributes.IndexedFields = map[string]*commonpb.Payload{ + "CustomKeywordField": saPayload, + } + fc, err = s.validator.validateUpsertWorkflowSearchAttributes(namespace, attributes) + s.NoError(err) + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) +} + +func (s *commandAttrValidatorSuite) TestValidateContinueAsNewWorkflowExecutionAttributes() { + executionTimeout := time.Hour + workflowTypeName := "workflowType" + taskQueue := "taskQueue" + + attributes := &commandpb.ContinueAsNewWorkflowExecutionCommandAttributes{ + // workflow type name and task queue name should be retrieved from existing workflow info + + // WorkflowRunTimeout should be shorten to execution timeout + WorkflowRunTimeout: timestamp.DurationPtr(executionTimeout * 2), + // WorkflowTaskTimeout should be shorten to max workflow task timeout + WorkflowTaskTimeout: timestamp.DurationPtr(common.MaxWorkflowTaskStartToCloseTimeout * 2), + } + + executionInfo := &persistencespb.WorkflowExecutionInfo{ + WorkflowTypeName: workflowTypeName, + TaskQueue: taskQueue, + WorkflowExecutionTimeout: timestamp.DurationPtr(executionTimeout), + } + + fc, err := s.validator.validateContinueAsNewWorkflowExecutionAttributes( + tests.Namespace, + attributes, + executionInfo, + ) + s.NoError(err) + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, fc) + + s.Equal(workflowTypeName, attributes.GetWorkflowType().GetName()) + s.Equal(taskQueue, attributes.GetTaskQueue().GetName()) + s.Equal(executionTimeout, *attributes.GetWorkflowRunTimeout()) + s.Equal(common.MaxWorkflowTaskStartToCloseTimeout, *attributes.GetWorkflowTaskTimeout()) +} + +func (s *commandAttrValidatorSuite) TestValidateModifyWorkflowProperties() { + namespace := namespace.Name("tests.Namespace") + var attributes *commandpb.ModifyWorkflowPropertiesCommandAttributes + + fc, err := s.validator.validateModifyWorkflowProperties(namespace, attributes) + s.EqualError(err, "ModifyWorkflowPropertiesCommandAttributes is not set on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) + + // test attributes has at least one non-nil attribute + attributes = &commandpb.ModifyWorkflowPropertiesCommandAttributes{} + fc, err = s.validator.validateModifyWorkflowProperties(namespace, attributes) + s.EqualError(err, "ModifyWorkflowPropertiesCommandAttributes attributes are all nil.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) + + // test UpsertedMemo cannot be an empty map + attributes = &commandpb.ModifyWorkflowPropertiesCommandAttributes{ + UpsertedMemo: &commonpb.Memo{}, + } + fc, err = s.validator.validateModifyWorkflowProperties(namespace, attributes) + s.EqualError(err, "UpsertedMemo.Fields is empty on command.") + s.Equal(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, fc) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToLocal() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + targetNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.Nil(err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToEffectiveLocal_SameCluster() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{cluster.TestCurrentClusterName}, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.Nil(err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToEffectiveLocal_DiffCluster() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{cluster.TestAlternativeClusterName}, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_LocalToGlobal() { + namespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToLocal_SameCluster() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{cluster.TestCurrentClusterName}, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.Nil(err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToLocal_DiffCluster() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{cluster.TestAlternativeClusterName}, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToEffectiveLocal_SameCluster() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{cluster.TestCurrentClusterName}, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{cluster.TestCurrentClusterName}, + }, + 5678, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.Nil(err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToEffectiveLocal_DiffCluster() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{cluster.TestCurrentClusterName}, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{cluster.TestAlternativeClusterName}, + }, + 5678, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_EffectiveLocalToGlobal() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + }, + }, + 5678, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToLocal() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + cluster.TestCurrentClusterName, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToEffectiveLocal() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 5678, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + }, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToGlobal_DiffNamespace() { + namespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestAlternativeClusterName, + cluster.TestCurrentClusterName, + }, + }, + 1234, + ) + targetNamespaceEntry := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Name: s.testTargetNamespaceID.String()}, + nil, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + 1234, + ) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testNamespaceID).Return(namespaceEntry, nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(s.testTargetNamespaceID).Return(targetNamespaceEntry, nil) + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, s.testTargetNamespaceID) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *commandAttrValidatorSuite) TestValidateCrossNamespaceCall_GlobalToGlobal_SameNamespace() { + targetNamespaceID := s.testNamespaceID + + err := s.validator.validateCrossNamespaceCall(s.testNamespaceID, targetNamespaceID) + s.Nil(err) +} + +func (s *commandAttrValidatorSuite) TestValidateTaskQueueName() { + newTaskQueue := func(name string) *taskqueuepb.TaskQueue { + return &taskqueuepb.TaskQueue{ + Name: name, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + } + + testCases := []struct { + defaultVal string + input *taskqueuepb.TaskQueue + output *taskqueuepb.TaskQueue + isOutputErr bool + }{ + {"tq-1", nil, newTaskQueue("tq-1"), false}, + {"", newTaskQueue("tq-1"), newTaskQueue("tq-1"), false}, + {"tq-1", newTaskQueue("tq-1"), newTaskQueue("tq-1"), false}, + {"", newTaskQueue("/tl-1"), newTaskQueue("/tl-1"), false}, + {"", newTaskQueue("/__temporal_sys"), newTaskQueue("/__temporal_sys"), false}, + {"", nil, newTaskQueue(""), true}, + {"", newTaskQueue(""), newTaskQueue(""), true}, + {"", newTaskQueue(reservedTaskQueuePrefix), newTaskQueue(reservedTaskQueuePrefix), true}, + {"tq-1", newTaskQueue(reservedTaskQueuePrefix), newTaskQueue(reservedTaskQueuePrefix), true}, + {"", newTaskQueue(reservedTaskQueuePrefix + "tq-1"), newTaskQueue(reservedTaskQueuePrefix + "tq-1"), true}, + {"tq-1", newTaskQueue(reservedTaskQueuePrefix + "tq-1"), newTaskQueue(reservedTaskQueuePrefix + "tq-1"), true}, + } + + for _, tc := range testCases { + key := tc.defaultVal + "#" + if tc.input != nil { + key += tc.input.GetName() + } else { + key += "nil" + } + s.Run(key, func() { + output, err := s.validator.validateTaskQueue(tc.input, tc.defaultVal) + if tc.isOutputErr { + s.Error(err) + } else { + s.NoError(err) + } + s.EqualValues(tc.output, output) + }) + } +} + +func (s *commandAttrValidatorSuite) TestValidateActivityRetryPolicy() { + testCases := []struct { + name string + input *commonpb.RetryPolicy + want *commonpb.RetryPolicy + }{ + { + name: "override non-set policy", + input: nil, + want: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 2, + MaximumInterval: timestamp.DurationPtr(100 * time.Second), + MaximumAttempts: 0, + }, + }, + { + name: "do not override fully set policy", + input: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(5 * time.Second), + BackoffCoefficient: 10, + MaximumInterval: timestamp.DurationPtr(20 * time.Second), + MaximumAttempts: 8, + }, + want: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(5 * time.Second), + BackoffCoefficient: 10, + MaximumInterval: timestamp.DurationPtr(20 * time.Second), + MaximumAttempts: 8, + }, + }, + { + name: "partial override of fields", + input: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(0 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(0 * time.Second), + MaximumAttempts: 7, + }, + want: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(100 * time.Second), + MaximumAttempts: 7, + }, + }, + { + name: "set expected max interval if only init interval set", + input: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(3 * time.Second), + MaximumInterval: timestamp.DurationPtr(0 * time.Second), + }, + want: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(3 * time.Second), + BackoffCoefficient: 2, + MaximumInterval: timestamp.DurationPtr(300 * time.Second), + MaximumAttempts: 0, + }, + }, + { + name: "override all defaults", + input: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(0 * time.Second), + BackoffCoefficient: 0, + MaximumInterval: timestamp.DurationPtr(0 * time.Second), + MaximumAttempts: 0, + }, + want: &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 2, + MaximumInterval: timestamp.DurationPtr(100 * time.Second), + MaximumAttempts: 0, + }, + }, + } + + for _, tt := range testCases { + s.Run(tt.name, func() { + attr := &commandpb.ScheduleActivityTaskCommandAttributes{ + RetryPolicy: tt.input, + } + + err := s.validator.validateActivityRetryPolicy(s.testNamespaceID, attr) + assert.Nil(s.T(), err, "expected no error") + assert.Equal(s.T(), tt.want, attr.RetryPolicy, "unexpected retry policy") + }) + } +} + +func (s *commandAttrValidatorSuite) TestValidateCommandSequence_NoTerminalCommand() { + err := s.validator.validateCommandSequence(nonTerminalCommands) + s.NoError(err) +} + +func (s *commandAttrValidatorSuite) TestValidateCommandSequence_ValidTerminalCommand() { + for _, terminalCommand := range terminalCommands { + err := s.validator.validateCommandSequence(append(nonTerminalCommands, terminalCommand)) + s.NoError(err) + } +} + +func (s *commandAttrValidatorSuite) TestValidateCommandSequence_InvalidTerminalCommand() { + for _, terminalCommand := range terminalCommands { + err := s.validator.validateCommandSequence(append( + []*commandpb.Command{terminalCommand}, + nonTerminalCommands[int(rand.Int31n(int32(len(nonTerminalCommands))))], + )) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + } +} + +func TestWorkflowSizeChecker_NumChildWorkflows(t *testing.T) { + + for _, c := range []struct { + Name string + NumPendingChildExecutions int + NumPendingActivities int + NumPendingCancelRequests int + NumPendingSignals int + + PendingChildExecutionsLimit int + PendingActivitiesLimit int + PendingCancelRequestsLimit int + PendingSignalsLimit int + + ExpectedMetric string + ExpectedChildExecutionsErrorMsg string + ExpectedActivitiesErrorMsg string + ExpectedCancelRequestsErrorMsg string + ExpectedSignalsErrorMsg string + }{ + { + Name: "No limits and no data", + }, + { + Name: "Limits but no workflow data", + PendingChildExecutionsLimit: 1, + PendingActivitiesLimit: 1, + PendingCancelRequestsLimit: 1, + PendingSignalsLimit: 1, + }, + { + Name: "Limits not exceeded", + NumPendingChildExecutions: 1, + NumPendingActivities: 1, + NumPendingCancelRequests: 1, + NumPendingSignals: 1, + PendingChildExecutionsLimit: 2, + PendingActivitiesLimit: 2, + PendingCancelRequestsLimit: 2, + PendingSignalsLimit: 2, + }, + { + Name: "Pending child executions limit exceeded", + NumPendingChildExecutions: 1, + PendingChildExecutionsLimit: 1, + ExpectedMetric: "wf_too_many_pending_child_workflows", + ExpectedChildExecutionsErrorMsg: "the number of pending child workflow executions, 1, has reached the " + + "per-workflow limit of 1", + }, + { + Name: "Pending activities limit exceeded", + NumPendingActivities: 1, + PendingActivitiesLimit: 1, + ExpectedMetric: "wf_too_many_pending_activities", + ExpectedActivitiesErrorMsg: "the number of pending activities, 1, has reached the per-workflow limit of 1", + }, + { + Name: "Pending cancel requests limit exceeded", + NumPendingCancelRequests: 1, + PendingCancelRequestsLimit: 1, + ExpectedMetric: "wf_too_many_pending_cancel_requests", + ExpectedCancelRequestsErrorMsg: "the number of pending requests to cancel external workflows, 1, has " + + "reached the per-workflow limit of 1", + }, + { + Name: "Pending signals limit exceeded", + NumPendingSignals: 1, + PendingSignalsLimit: 1, + ExpectedMetric: "wf_too_many_pending_external_workflow_signals", + ExpectedSignalsErrorMsg: "the number of pending signals to external workflows, 1, has reached the " + + "per-workflow limit of 1", + }, + } { + t.Run(c.Name, func(t *testing.T) { + ctrl := gomock.NewController(t) + mutableState := workflow.NewMockMutableState(ctrl) + logger := log.NewMockLogger(ctrl) + metricsHandler := metrics.NewMockHandler(ctrl) + + workflowKey := definition.NewWorkflowKey( + "test-namespace-id", + "test-workflow-id", + "test-run-id", + ) + mutableState.EXPECT().GetWorkflowKey().Return(workflowKey).AnyTimes() + + executionInfos := make(map[int64]*persistencespb.ChildExecutionInfo) + activityInfos := make(map[int64]*persistencespb.ActivityInfo) + requestCancelInfos := make(map[int64]*persistencespb.RequestCancelInfo) + signalInfos := make(map[int64]*persistencespb.SignalInfo) + for i := 0; i < c.NumPendingChildExecutions; i++ { + executionInfos[int64(i)] = new(persistencespb.ChildExecutionInfo) + } + for i := 0; i < c.NumPendingActivities; i++ { + activityInfos[int64(i)] = new(persistencespb.ActivityInfo) + } + for i := 0; i < c.NumPendingCancelRequests; i++ { + requestCancelInfos[int64(i)] = new(persistencespb.RequestCancelInfo) + } + for i := 0; i < c.NumPendingSignals; i++ { + signalInfos[int64(i)] = new(persistencespb.SignalInfo) + } + mutableState.EXPECT().GetPendingChildExecutionInfos().Return(executionInfos) + mutableState.EXPECT().GetPendingActivityInfos().Return(activityInfos) + mutableState.EXPECT().GetPendingRequestCancelExternalInfos().Return(requestCancelInfos) + mutableState.EXPECT().GetPendingSignalExternalInfos().Return(signalInfos) + + if len(c.ExpectedMetric) > 0 { + counterMetric := metrics.NewMockCounterIface(ctrl) + metricsHandler.EXPECT().Counter(c.ExpectedMetric).Return(counterMetric) + counterMetric.EXPECT().Record(int64(1)) + } + + for _, msg := range []string{ + c.ExpectedChildExecutionsErrorMsg, + c.ExpectedActivitiesErrorMsg, + c.ExpectedCancelRequestsErrorMsg, + c.ExpectedSignalsErrorMsg, + } { + if len(msg) > 0 { + logger.EXPECT().Error(msg, gomock.Any()).Do(func(msg string, tags ...tag.Tag) { + var namespaceID, workflowID, runID interface{} + for _, t := range tags { + if t.Key() == "wf-namespace-id" { + namespaceID = t.Value() + } else if t.Key() == "wf-id" { + workflowID = t.Value() + } else if t.Key() == "wf-run-id" { + runID = t.Value() + } + } + assert.Equal(t, "test-namespace-id", namespaceID) + assert.Equal(t, "test-workflow-id", workflowID) + assert.Equal(t, "test-run-id", runID) + }) + } + } + + checker := newWorkflowSizeChecker(workflowSizeLimits{ + numPendingChildExecutionsLimit: c.PendingChildExecutionsLimit, + numPendingActivitiesLimit: c.PendingActivitiesLimit, + numPendingCancelsRequestLimit: c.PendingCancelRequestsLimit, + numPendingSignalsLimit: c.PendingSignalsLimit, + }, mutableState, nil, metricsHandler, logger) + + err := checker.checkIfNumChildWorkflowsExceedsLimit() + if len(c.ExpectedChildExecutionsErrorMsg) > 0 { + require.Error(t, err) + assert.Equal(t, c.ExpectedChildExecutionsErrorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + + err = checker.checkIfNumPendingActivitiesExceedsLimit() + if len(c.ExpectedActivitiesErrorMsg) > 0 { + require.Error(t, err) + assert.Equal(t, c.ExpectedActivitiesErrorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + + err = checker.checkIfNumPendingCancelRequestsExceedsLimit() + if len(c.ExpectedCancelRequestsErrorMsg) > 0 { + require.Error(t, err) + assert.Equal(t, c.ExpectedCancelRequestsErrorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + + err = checker.checkIfNumPendingSignalsExceedsLimit() + if len(c.ExpectedSignalsErrorMsg) > 0 { + require.Error(t, err) + assert.Equal(t, c.ExpectedSignalsErrorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} diff -Nru temporal-1.21.5-1/src/service/history/configs/config.go temporal-1.22.5/src/service/history/configs/config.go --- temporal-1.21.5-1/src/service/history/configs/config.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/configs/config.go 2024-02-23 09:45:43.000000000 +0000 @@ -42,6 +42,7 @@ EnableReplicationStream dynamicconfig.BoolPropertyFn RPS dynamicconfig.IntPropertyFn + OperatorRPSRatio dynamicconfig.FloatPropertyFn MaxIDLengthLimit dynamicconfig.IntPropertyFn PersistenceMaxQPS dynamicconfig.IntPropertyFn PersistenceGlobalMaxQPS dynamicconfig.IntPropertyFn @@ -70,12 +71,12 @@ HistoryCacheMaxSize dynamicconfig.IntPropertyFn HistoryCacheTTL dynamicconfig.DurationPropertyFn HistoryCacheNonUserContextLockTimeout dynamicconfig.DurationPropertyFn + EnableAPIGetCurrentRunIDLock dynamicconfig.BoolPropertyFn // EventsCache settings // Change of these configs require shard restart - EventsCacheInitialSize dynamicconfig.IntPropertyFn - EventsCacheMaxSize dynamicconfig.IntPropertyFn - EventsCacheTTL dynamicconfig.DurationPropertyFn + EventsCacheMaxSizeBytes dynamicconfig.IntPropertyFn + EventsCacheTTL dynamicconfig.DurationPropertyFn // ShardController settings RangeSizeBits uint @@ -96,6 +97,7 @@ QueueCriticalSlicesCount dynamicconfig.IntPropertyFn QueuePendingTaskMaxCount dynamicconfig.IntPropertyFn QueueMaxReaderCount dynamicconfig.IntPropertyFn + TaskDropInternalErrors dynamicconfig.BoolPropertyFn TaskSchedulerEnableRateLimiter dynamicconfig.BoolPropertyFn TaskSchedulerEnableRateLimiterShadowMode dynamicconfig.BoolPropertyFn @@ -187,7 +189,6 @@ NumArchiveSystemWorkflows dynamicconfig.IntPropertyFn ArchiveRequestRPS dynamicconfig.IntPropertyFn ArchiveSignalTimeout dynamicconfig.DurationPropertyFn - DurableArchivalEnabled dynamicconfig.BoolPropertyFn // Size limit related settings BlobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter @@ -332,6 +333,7 @@ EnableReplicationStream: dc.GetBoolProperty(dynamicconfig.EnableReplicationStream, false), RPS: dc.GetIntProperty(dynamicconfig.HistoryRPS, 3000), + OperatorRPSRatio: dc.GetFloat64Property(dynamicconfig.OperatorRPSRatio, common.DefaultOperatorRPSRatio), MaxIDLengthLimit: dc.GetIntProperty(dynamicconfig.MaxIDLengthLimit, 1000), PersistenceMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceMaxQPS, 9000), PersistenceGlobalMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceGlobalMaxQPS, 0), @@ -357,10 +359,10 @@ HistoryCacheMaxSize: dc.GetIntProperty(dynamicconfig.HistoryCacheMaxSize, 512), HistoryCacheTTL: dc.GetDurationProperty(dynamicconfig.HistoryCacheTTL, time.Hour), HistoryCacheNonUserContextLockTimeout: dc.GetDurationProperty(dynamicconfig.HistoryCacheNonUserContextLockTimeout, 500*time.Millisecond), + EnableAPIGetCurrentRunIDLock: dc.GetBoolProperty(dynamicconfig.EnableAPIGetCurrentRunIDLock, false), - EventsCacheInitialSize: dc.GetIntProperty(dynamicconfig.EventsCacheInitialSize, 128), - EventsCacheMaxSize: dc.GetIntProperty(dynamicconfig.EventsCacheMaxSize, 512), - EventsCacheTTL: dc.GetDurationProperty(dynamicconfig.EventsCacheTTL, time.Hour), + EventsCacheMaxSizeBytes: dc.GetIntProperty(dynamicconfig.EventsCacheMaxSizeBytes, 512*1024), // 512KB + EventsCacheTTL: dc.GetDurationProperty(dynamicconfig.EventsCacheTTL, time.Hour), RangeSizeBits: 20, // 20 bits for sequencer, 2^20 sequence number for any range AcquireShardInterval: dc.GetDurationProperty(dynamicconfig.AcquireShardInterval, time.Minute), @@ -379,6 +381,7 @@ QueueCriticalSlicesCount: dc.GetIntProperty(dynamicconfig.QueueCriticalSlicesCount, 50), QueuePendingTaskMaxCount: dc.GetIntProperty(dynamicconfig.QueuePendingTaskMaxCount, 10000), QueueMaxReaderCount: dc.GetIntProperty(dynamicconfig.QueueMaxReaderCount, 2), + TaskDropInternalErrors: dc.GetBoolProperty(dynamicconfig.HistoryTaskDropInternalErrors, false), TaskSchedulerEnableRateLimiter: dc.GetBoolProperty(dynamicconfig.TaskSchedulerEnableRateLimiter, false), TaskSchedulerEnableRateLimiterShadowMode: dc.GetBoolProperty(dynamicconfig.TaskSchedulerEnableRateLimiterShadowMode, true), @@ -460,7 +463,6 @@ NumArchiveSystemWorkflows: dc.GetIntProperty(dynamicconfig.NumArchiveSystemWorkflows, 1000), ArchiveRequestRPS: dc.GetIntProperty(dynamicconfig.ArchiveRequestRPS, 300), // should be much smaller than frontend RPS ArchiveSignalTimeout: dc.GetDurationProperty(dynamicconfig.ArchiveSignalTimeout, 300*time.Millisecond), - DurableArchivalEnabled: dc.GetBoolProperty(dynamicconfig.DurableArchivalEnabled, true), BlobSizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitError, 2*1024*1024), BlobSizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitWarn, 512*1024), diff -Nru temporal-1.21.5-1/src/service/history/configs/quotas.go temporal-1.22.5/src/service/history/configs/quotas.go --- temporal-1.21.5-1/src/service/history/configs/quotas.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/configs/quotas.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,75 +25,101 @@ package configs import ( + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/quotas" ) +const ( + // OperatorPriority is used to give precedence to calls coming from web UI or tctl + OperatorPriority = 0 +) + var ( APIToPriority = map[string]int{ - "CloseShard": 0, - "GetShard": 0, - "DeleteWorkflowExecution": 0, - "DescribeHistoryHost": 0, - "DescribeMutableState": 0, - "DescribeWorkflowExecution": 0, - "GetDLQMessages": 0, - "GetDLQReplicationMessages": 0, - "GetMutableState": 0, - "GetReplicationMessages": 0, - "MergeDLQMessages": 0, - "PollMutableState": 0, - "PurgeDLQMessages": 0, - "QueryWorkflow": 0, - "ReapplyEvents": 0, - "RebuildMutableState": 0, - "RecordActivityTaskHeartbeat": 0, - "RecordActivityTaskStarted": 0, - "RecordChildExecutionCompleted": 0, - "VerifyChildExecutionCompletionRecorded": 0, - "RecordWorkflowTaskStarted": 0, - "RefreshWorkflowTasks": 0, - "RemoveSignalMutableState": 0, - "RemoveTask": 0, - "ReplicateEventsV2": 0, - "ReplicateWorkflowState": 0, - "RequestCancelWorkflowExecution": 0, - "ResetStickyTaskQueue": 0, - "ResetWorkflowExecution": 0, - "RespondActivityTaskCanceled": 0, - "RespondActivityTaskCompleted": 0, - "RespondActivityTaskFailed": 0, - "RespondWorkflowTaskCompleted": 0, - "RespondWorkflowTaskFailed": 0, - "ScheduleWorkflowTask": 0, - "VerifyFirstWorkflowTaskScheduled": 0, - "SignalWithStartWorkflowExecution": 0, - "SignalWorkflowExecution": 0, - "StartWorkflowExecution": 0, - "SyncActivity": 0, - "SyncShardStatus": 0, - "TerminateWorkflowExecution": 0, - "GenerateLastHistoryReplicationTasks": 0, - "GetReplicationStatus": 0, - "DeleteWorkflowVisibilityRecord": 0, - "UpdateWorkflowExecution": 0, - "PollWorkflowExecutionUpdate": 0, - "StreamWorkflowReplicationMessages": 0, + "CloseShard": 1, + "GetShard": 1, + "DeleteWorkflowExecution": 1, + "DescribeHistoryHost": 1, + "DescribeMutableState": 1, + "DescribeWorkflowExecution": 1, + "GetDLQMessages": 1, + "GetDLQReplicationMessages": 1, + "GetMutableState": 1, + "GetReplicationMessages": 1, + "IsActivityTaskValid": 1, + "IsWorkflowTaskValid": 1, + "MergeDLQMessages": 1, + "PollMutableState": 1, + "PurgeDLQMessages": 1, + "QueryWorkflow": 1, + "ReapplyEvents": 1, + "RebuildMutableState": 1, + "RecordActivityTaskHeartbeat": 1, + "RecordActivityTaskStarted": 1, + "RecordChildExecutionCompleted": 1, + "VerifyChildExecutionCompletionRecorded": 1, + "RecordWorkflowTaskStarted": 1, + "RefreshWorkflowTasks": 1, + "RemoveSignalMutableState": 1, + "RemoveTask": 1, + "ReplicateEventsV2": 1, + "ReplicateWorkflowState": 1, + "RequestCancelWorkflowExecution": 1, + "ResetStickyTaskQueue": 1, + "ResetWorkflowExecution": 1, + "RespondActivityTaskCanceled": 1, + "RespondActivityTaskCompleted": 1, + "RespondActivityTaskFailed": 1, + "RespondWorkflowTaskCompleted": 1, + "RespondWorkflowTaskFailed": 1, + "ScheduleWorkflowTask": 1, + "VerifyFirstWorkflowTaskScheduled": 1, + "SignalWithStartWorkflowExecution": 1, + "SignalWorkflowExecution": 1, + "StartWorkflowExecution": 1, + "SyncActivity": 1, + "SyncShardStatus": 1, + "TerminateWorkflowExecution": 1, + "GenerateLastHistoryReplicationTasks": 1, + "GetReplicationStatus": 1, + "DeleteWorkflowVisibilityRecord": 1, + "UpdateWorkflowExecution": 1, + "PollWorkflowExecutionUpdate": 1, + "StreamWorkflowReplicationMessages": 1, } - APIPrioritiesOrdered = []int{0} + APIPrioritiesOrdered = []int{OperatorPriority, 1} ) func NewPriorityRateLimiter( rateFn quotas.RateFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range APIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(rateFn)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(operatorRateFn(rateFn, operatorRPSRatio))) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(rateFn)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := APIToPriority[req.API]; ok { return priority } return APIPrioritiesOrdered[len(APIPrioritiesOrdered)-1] }, rateLimiters) } + +func operatorRateFn( + rateFn quotas.RateFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, +) quotas.RateFn { + return func() float64 { + return operatorRPSRatio() * rateFn() + } +} diff -Nru temporal-1.21.5-1/src/service/history/configs/quotas_test.go temporal-1.22.5/src/service/history/configs/quotas_test.go --- temporal-1.21.5-1/src/service/history/configs/quotas_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/configs/quotas_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,12 +27,15 @@ import ( "reflect" "testing" + "time" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "golang.org/x/exp/slices" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/quotas" ) type ( @@ -83,3 +86,36 @@ } s.Equal(apiToPriority, APIToPriority) } + +func (s *quotasSuite) TestOperatorPrioritized() { + rateFn := func() float64 { return 5 } + operatorRPSRatioFn := func() float64 { return 0.2 } + limiter := NewPriorityRateLimiter(rateFn, operatorRPSRatioFn) + + operatorRequest := quotas.NewRequest( + "StartWorkflowExecution", + 1, + "", + headers.CallerTypeOperator, + -1, + "") + + apiRequest := quotas.NewRequest( + "StartWorkflowExecution", + 1, + "", + headers.CallerTypeAPI, + -1, + "") + + requestTime := time.Now() + limitCount := 0 + + for i := 0; i < 12; i++ { + if !limiter.Allow(requestTime, apiRequest) { + limitCount++ + s.True(limiter.Allow(requestTime, operatorRequest)) + } + } + s.Equal(2, limitCount) +} diff -Nru temporal-1.21.5-1/src/service/history/consts/const.go temporal-1.22.5/src/service/history/consts/const.go --- temporal-1.21.5-1/src/service/history/consts/const.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/consts/const.go 2024-02-23 09:45:43.000000000 +0000 @@ -99,6 +99,8 @@ ErrWorkflowTaskStateInconsistent = serviceerror.NewUnavailable("Workflow task state is inconsistent.") // ErrResourceExhaustedBusyWorkflow is an error indicating workflow resource is exhausted and should not be retried by service handler and client ErrResourceExhaustedBusyWorkflow = serviceerror.NewResourceExhausted(enums.RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW, "Workflow is busy.") + // ErrResourceExhaustedAPSLimit is an error indicating user has reached their action per second limit + ErrResourceExhaustedAPSLimit = serviceerror.NewResourceExhausted(enums.RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT, "Action per second limit exceeded.") // FailedWorkflowStatuses is a set of failed workflow close states, used for start workflow policy // for start workflow execution API diff -Nru temporal-1.21.5-1/src/service/history/events/cache.go temporal-1.22.5/src/service/history/events/cache.go --- temporal-1.21.5-1/src/service/history/events/cache.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/events/cache.go 2024-02-23 09:45:43.000000000 +0000 @@ -65,6 +65,10 @@ metricsHandler metrics.Handler shardID int32 } + + historyEventCacheItemImpl struct { + event *historypb.HistoryEvent + } ) var ( @@ -75,7 +79,6 @@ func NewEventsCache( shardID int32, - initialCount int, maxCount int, ttl time.Duration, eventsMgr persistence.ExecutionManager, @@ -84,7 +87,6 @@ metricsHandler metrics.Handler, ) *CacheImpl { opts := &cache.Options{} - opts.InitialCapacity = initialCount opts.TTL = ttl return &CacheImpl{ @@ -120,9 +122,9 @@ // Test hook for disabling cache if !e.disabled { - event, cacheHit := e.Cache.Get(key).(*historypb.HistoryEvent) + eventItem, cacheHit := e.Cache.Get(key).(*historyEventCacheItemImpl) if cacheHit { - return event, nil + return eventItem.event, nil } } @@ -141,7 +143,7 @@ // If invalid, return event anyway, but don't store in cache if validKey { - e.Put(key, event) + e.put(key, event) } return event, nil } @@ -155,7 +157,7 @@ if !e.validateKey(key) { return } - e.Put(key, event) + e.put(key, event) } func (e *CacheImpl) DeleteEvent(key EventKey) { @@ -210,3 +212,21 @@ return nil, errEventNotFoundInBatch } + +func (e *CacheImpl) put(key EventKey, event *historypb.HistoryEvent) interface{} { + return e.Put(key, newHistoryEventCacheItem(event)) +} + +var _ cache.SizeGetter = (*historyEventCacheItemImpl)(nil) + +func newHistoryEventCacheItem( + event *historypb.HistoryEvent, +) *historyEventCacheItemImpl { + return &historyEventCacheItemImpl{ + event: event, + } +} + +func (h *historyEventCacheItemImpl) CacheSize() int { + return h.event.Size() +} diff -Nru temporal-1.21.5-1/src/service/history/events/cache_test.go temporal-1.22.5/src/service/history/events/cache_test.go --- temporal-1.21.5-1/src/service/history/events/cache_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/events/cache_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -89,7 +89,6 @@ shardId := int32(10) return NewEventsCache( shardId, - 16, 32, time.Minute, s.mockExecutionManager, diff -Nru temporal-1.21.5-1/src/service/history/events/notifier.go temporal-1.22.5/src/service/history/events/notifier.go --- temporal-1.21.5-1/src/service/history/events/notifier.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/events/notifier.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,12 +34,14 @@ "go.temporal.io/api/serviceerror" enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/collection" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/versionhistory" ) const ( @@ -48,10 +50,11 @@ type ( Notifier interface { - common.Daemon NotifyNewHistoryEvent(event *Notification) WatchHistoryEvent(identifier definition.WorkflowKey) (string, chan *Notification, error) UnwatchHistoryEvent(identifier definition.WorkflowKey, subscriberID string) error + Start() + Stop() } Notification struct { @@ -61,9 +64,9 @@ NextEventID int64 PreviousStartedEventID int64 Timestamp time.Time - CurrentBranchToken []byte WorkflowState enumsspb.WorkflowExecutionState WorkflowStatus enumspb.WorkflowExecutionStatus + VersionHistories *historyspb.VersionHistories } NotifierImpl struct { @@ -95,9 +98,9 @@ lastFirstEventTxnID int64, nextEventID int64, previousStartedEventID int64, - currentBranchToken []byte, workflowState enumsspb.WorkflowExecutionState, workflowStatus enumspb.WorkflowExecutionStatus, + versionHistories *historyspb.VersionHistories, ) *Notification { return &Notification{ @@ -110,9 +113,9 @@ LastFirstEventTxnID: lastFirstEventTxnID, NextEventID: nextEventID, PreviousStartedEventID: previousStartedEventID, - CurrentBranchToken: currentBranchToken, WorkflowState: workflowState, WorkflowStatus: workflowStatus, + VersionHistories: versionhistory.CopyVersionHistories(versionHistories), } } diff -Nru temporal-1.21.5-1/src/service/history/events/notifier_test.go temporal-1.22.5/src/service/history/events/notifier_test.go --- temporal-1.21.5-1/src/service/history/events/notifier_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/events/notifier_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,10 +35,12 @@ enumspb "go.temporal.io/api/enums/v1" enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/versionhistory" ) type ( @@ -94,7 +96,10 @@ workflowState := enumsspb.WORKFLOW_EXECUTION_STATE_CREATED workflowStatus := enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING branchToken := make([]byte, 0) - historyEvent := NewNotification(namespaceID, execution, lastFirstEventID, lastFirstEventTxnID, nextEventID, previousStartedEventID, branchToken, workflowState, workflowStatus) + versionHistoryItem := versionhistory.NewVersionHistoryItem(nextEventID-1, 1) + currentVersionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{versionHistoryItem}) + versionHistories := versionhistory.NewVersionHistories(currentVersionHistory) + historyEvent := NewNotification(namespaceID, execution, lastFirstEventID, lastFirstEventTxnID, nextEventID, previousStartedEventID, workflowState, workflowStatus, versionHistories) timerChan := time.NewTimer(time.Second * 2).C subscriberID, channel, err := s.notifier.WatchHistoryEvent(definition.NewWorkflowKey(namespaceID, execution.GetWorkflowId(), execution.GetRunId())) @@ -126,7 +131,10 @@ workflowState := enumsspb.WORKFLOW_EXECUTION_STATE_CREATED workflowStatus := enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING branchToken := make([]byte, 0) - historyEvent := NewNotification(namespaceID, execution, lastFirstEventID, lastFirstEventTxnID, nextEventID, previousStartedEventID, branchToken, workflowState, workflowStatus) + versionHistoryItem := versionhistory.NewVersionHistoryItem(nextEventID-1, 1) + currentVersionHistory := versionhistory.NewVersionHistory(branchToken, []*historyspb.VersionHistoryItem{versionHistoryItem}) + versionHistories := versionhistory.NewVersionHistories(currentVersionHistory) + historyEvent := NewNotification(namespaceID, execution, lastFirstEventID, lastFirstEventTxnID, nextEventID, previousStartedEventID, workflowState, workflowStatus, versionHistories) timerChan := time.NewTimer(time.Second * 5).C subscriberCount := 100 diff -Nru temporal-1.21.5-1/src/service/history/fx.go temporal-1.22.5/src/service/history/fx.go --- temporal-1.21.5-1/src/service/history/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,6 @@ package history import ( - "context" "net" "go.uber.org/fx" @@ -142,8 +141,9 @@ eventNotifier: args.EventNotifier, tracer: args.TracerProvider.Tracer(consts.LibraryName), - replicationTaskFetcherFactory: args.ReplicationTaskFetcherFactory, - streamReceiverMonitor: args.StreamReceiverMonitor, + replicationTaskFetcherFactory: args.ReplicationTaskFetcherFactory, + replicationTaskConverterProvider: args.ReplicationTaskConverterFactory, + streamReceiverMonitor: args.StreamReceiverMonitor, } // prevent us from trying to serve requests before shard controller is started and ready @@ -199,7 +199,7 @@ serviceConfig *configs.Config, ) *interceptor.RateLimitInterceptor { return interceptor.NewRateLimitInterceptor( - configs.NewPriorityRateLimiter(func() float64 { return float64(serviceConfig.RPS()) }), + configs.NewPriorityRateLimiter(func() float64 { return float64(serviceConfig.RPS()) }, serviceConfig.OperatorRPSRatio), map[string]int{}, ) } @@ -226,6 +226,7 @@ serviceConfig.PersistenceNamespaceMaxQPS, serviceConfig.PersistencePerShardNamespaceMaxQPS, serviceConfig.EnablePersistencePriorityRateLimiting, + serviceConfig.OperatorRPSRatio, serviceConfig.PersistenceDynamicRateLimitingParams, ) } @@ -250,6 +251,7 @@ searchAttributesMapperProvider, serviceConfig.VisibilityPersistenceMaxReadQPS, serviceConfig.VisibilityPersistenceMaxWriteQPS, + serviceConfig.OperatorRPSRatio, serviceConfig.EnableReadFromSecondaryVisibility, serviceConfig.SecondaryVisibilityWritingMode, serviceConfig.VisibilityDisableOrderByClause, @@ -289,26 +291,6 @@ ) } -func ServiceLifetimeHooks( - lc fx.Lifecycle, - svcStoppedCh chan struct{}, - svc *Service, -) { - lc.Append( - fx.Hook{ - OnStart: func(context.Context) error { - go func(svc common.Daemon, svcStoppedCh chan<- struct{}) { - // Start is blocked until Stop() is called. - svc.Start() - close(svcStoppedCh) - }(svc, svcStoppedCh) - - return nil - }, - OnStop: func(ctx context.Context) error { - svc.Stop() - return nil - }, - }, - ) +func ServiceLifetimeHooks(lc fx.Lifecycle, svc *Service) { + lc.Append(fx.StartStopHook(svc.Start, svc.Stop)) } diff -Nru temporal-1.21.5-1/src/service/history/handler.go temporal-1.22.5/src/service/history/handler.go --- temporal-1.21.5-1/src/service/history/handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -97,8 +97,9 @@ controller shard.Controller tracer trace.Tracer - replicationTaskFetcherFactory replication.TaskFetcherFactory - streamReceiverMonitor replication.StreamReceiverMonitor + replicationTaskFetcherFactory replication.TaskFetcherFactory + replicationTaskConverterProvider replication.SourceTaskConverterProvider + streamReceiverMonitor replication.StreamReceiverMonitor } NewHandlerArgs struct { @@ -123,8 +124,9 @@ EventNotifier events.Notifier TracerProvider trace.TracerProvider - ReplicationTaskFetcherFactory replication.TaskFetcherFactory - StreamReceiverMonitor replication.StreamReceiverMonitor + ReplicationTaskFetcherFactory replication.TaskFetcherFactory + ReplicationTaskConverterFactory replication.SourceTaskConverterProvider + StreamReceiverMonitor replication.StreamReceiverMonitor } ) @@ -188,8 +190,35 @@ return atomic.LoadInt32(&h.status) == common.DaemonStatusStopped } -// RecordActivityTaskHeartbeat - Record Activity Task Heart beat. -func (h *Handler) RecordActivityTaskHeartbeat(ctx context.Context, request *historyservice.RecordActivityTaskHeartbeatRequest) (_ *historyservice.RecordActivityTaskHeartbeatResponse, retError error) { +// IsWorkflowTaskValid - whether workflow task is still valid +func (h *Handler) IsWorkflowTaskValid(ctx context.Context, request *historyservice.IsWorkflowTaskValidRequest) (_ *historyservice.IsWorkflowTaskValidResponse, retError error) { + defer log.CapturePanic(h.logger, &retError) + h.startWG.Wait() + + namespaceID := namespace.ID(request.GetNamespaceId()) + if namespaceID == "" { + return nil, h.convertError(errNamespaceNotSet) + } + workflowID := request.Execution.WorkflowId + + shardContext, err := h.controller.GetShardByNamespaceWorkflow(namespaceID, workflowID) + if err != nil { + return nil, h.convertError(err) + } + engine, err := shardContext.GetEngine(ctx) + if err != nil { + return nil, h.convertError(err) + } + + response, err := engine.IsWorkflowTaskValid(ctx, request) + if err != nil { + return nil, h.convertError(err) + } + return response, nil +} + +// IsActivityTaskValid - whether activity task is still valid +func (h *Handler) IsActivityTaskValid(ctx context.Context, request *historyservice.IsActivityTaskValidRequest) (_ *historyservice.IsActivityTaskValidResponse, retError error) { defer log.CapturePanic(h.logger, &retError) h.startWG.Wait() @@ -197,6 +226,32 @@ if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } + workflowID := request.Execution.WorkflowId + + shardContext, err := h.controller.GetShardByNamespaceWorkflow(namespaceID, workflowID) + if err != nil { + return nil, h.convertError(err) + } + engine, err := shardContext.GetEngine(ctx) + if err != nil { + return nil, h.convertError(err) + } + + response, err := engine.IsActivityTaskValid(ctx, request) + if err != nil { + return nil, h.convertError(err) + } + return response, nil +} + +func (h *Handler) RecordActivityTaskHeartbeat(ctx context.Context, request *historyservice.RecordActivityTaskHeartbeatRequest) (_ *historyservice.RecordActivityTaskHeartbeatResponse, retError error) { + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) + h.startWG.Wait() + + namespaceID := namespace.ID(request.GetNamespaceId()) + if namespaceID == "" { + return nil, h.convertError(errNamespaceNotSet) + } heartbeatRequest := request.HeartbeatRequest taskToken, err0 := h.tokenSerializer.Deserialize(heartbeatRequest.TaskToken) @@ -229,7 +284,7 @@ // RecordActivityTaskStarted - Record Activity Task started. func (h *Handler) RecordActivityTaskStarted(ctx context.Context, request *historyservice.RecordActivityTaskStartedRequest) (_ *historyservice.RecordActivityTaskStartedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -261,7 +316,7 @@ // RecordWorkflowTaskStarted - Record Workflow Task started. func (h *Handler) RecordWorkflowTaskStarted(ctx context.Context, request *historyservice.RecordWorkflowTaskStartedRequest) (_ *historyservice.RecordWorkflowTaskStartedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -303,7 +358,7 @@ // RespondActivityTaskCompleted - records completion of an activity task func (h *Handler) RespondActivityTaskCompleted(ctx context.Context, request *historyservice.RespondActivityTaskCompletedRequest) (_ *historyservice.RespondActivityTaskCompletedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -342,7 +397,7 @@ // RespondActivityTaskFailed - records failure of an activity task func (h *Handler) RespondActivityTaskFailed(ctx context.Context, request *historyservice.RespondActivityTaskFailedRequest) (_ *historyservice.RespondActivityTaskFailedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -381,7 +436,7 @@ // RespondActivityTaskCanceled - records failure of an activity task func (h *Handler) RespondActivityTaskCanceled(ctx context.Context, request *historyservice.RespondActivityTaskCanceledRequest) (_ *historyservice.RespondActivityTaskCanceledResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -420,7 +475,7 @@ // RespondWorkflowTaskCompleted - records completion of a workflow task func (h *Handler) RespondWorkflowTaskCompleted(ctx context.Context, request *historyservice.RespondWorkflowTaskCompletedRequest) (_ *historyservice.RespondWorkflowTaskCompletedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -465,7 +520,7 @@ // RespondWorkflowTaskFailed - failed response to workflow task func (h *Handler) RespondWorkflowTaskFailed(ctx context.Context, request *historyservice.RespondWorkflowTaskFailedRequest) (_ *historyservice.RespondWorkflowTaskFailedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -510,7 +565,7 @@ // StartWorkflowExecution - creates a new workflow execution func (h *Handler) StartWorkflowExecution(ctx context.Context, request *historyservice.StartWorkflowExecutionRequest) (_ *historyservice.StartWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() namespaceID := namespace.ID(request.GetNamespaceId()) @@ -545,7 +600,7 @@ // DescribeHistoryHost returns information about the internal states of a history host func (h *Handler) DescribeHistoryHost(_ context.Context, _ *historyservice.DescribeHistoryHostRequest) (_ *historyservice.DescribeHistoryHostResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() itemsInCacheByIDCount, itemsInCacheByNameCount := h.namespaceRegistry.GetCacheSize() @@ -603,14 +658,14 @@ // CloseShard closes a shard hosted by this instance func (h *Handler) CloseShard(_ context.Context, request *historyservice.CloseShardRequest) (_ *historyservice.CloseShardResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.controller.CloseShardByID(request.GetShardId()) return &historyservice.CloseShardResponse{}, nil } // GetShard gets a shard hosted by this instance func (h *Handler) GetShard(ctx context.Context, request *historyservice.GetShardRequest) (_ *historyservice.GetShardResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) resp, err := h.persistenceShardManager.GetOrCreateShard(ctx, &persistence.GetOrCreateShardRequest{ ShardID: request.ShardId, }) @@ -622,7 +677,7 @@ // RebuildMutableState attempts to rebuild mutable state according to persisted history events func (h *Handler) RebuildMutableState(ctx context.Context, request *historyservice.RebuildMutableStateRequest) (_ *historyservice.RebuildMutableStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -656,7 +711,7 @@ // DescribeMutableState - returns the internal analysis of workflow execution state func (h *Handler) DescribeMutableState(ctx context.Context, request *historyservice.DescribeMutableStateRequest) (_ *historyservice.DescribeMutableStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -688,7 +743,7 @@ // GetMutableState - returns the id of the next event in the execution's history func (h *Handler) GetMutableState(ctx context.Context, request *historyservice.GetMutableStateRequest) (_ *historyservice.GetMutableStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -720,7 +775,7 @@ // PollMutableState - returns the id of the next event in the execution's history func (h *Handler) PollMutableState(ctx context.Context, request *historyservice.PollMutableStateRequest) (_ *historyservice.PollMutableStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -752,7 +807,7 @@ // DescribeWorkflowExecution returns information about the specified workflow execution. func (h *Handler) DescribeWorkflowExecution(ctx context.Context, request *historyservice.DescribeWorkflowExecutionRequest) (_ *historyservice.DescribeWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -784,7 +839,7 @@ // RequestCancelWorkflowExecution - requests cancellation of a workflow func (h *Handler) RequestCancelWorkflowExecution(ctx context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest) (_ *historyservice.RequestCancelWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -824,7 +879,7 @@ // SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in // WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution. func (h *Handler) SignalWorkflowExecution(ctx context.Context, request *historyservice.SignalWorkflowExecutionRequest) (_ *historyservice.SignalWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -861,7 +916,7 @@ // If workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled // event recorded in history, and a workflow task being created for the execution func (h *Handler) SignalWithStartWorkflowExecution(ctx context.Context, request *historyservice.SignalWithStartWorkflowExecutionRequest) (_ *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -918,7 +973,7 @@ // RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently // used to clean execution info when signal workflow task finished. func (h *Handler) RemoveSignalMutableState(ctx context.Context, request *historyservice.RemoveSignalMutableStateRequest) (_ *historyservice.RemoveSignalMutableStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -952,7 +1007,7 @@ // TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event // in the history and immediately terminating the execution instance. func (h *Handler) TerminateWorkflowExecution(ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest) (_ *historyservice.TerminateWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -984,7 +1039,7 @@ } func (h *Handler) DeleteWorkflowExecution(ctx context.Context, request *historyservice.DeleteWorkflowExecutionRequest) (_ *historyservice.DeleteWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1017,7 +1072,7 @@ // ResetWorkflowExecution reset an existing workflow execution // in the history and immediately terminating the execution instance. func (h *Handler) ResetWorkflowExecution(ctx context.Context, request *historyservice.ResetWorkflowExecutionRequest) (_ *historyservice.ResetWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1050,7 +1105,7 @@ // QueryWorkflow queries a workflow. func (h *Handler) QueryWorkflow(ctx context.Context, request *historyservice.QueryWorkflowRequest) (_ *historyservice.QueryWorkflowResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1085,7 +1140,7 @@ // child execution without creating the workflow task and then calls this API after updating the mutable state of // parent execution. func (h *Handler) ScheduleWorkflowTask(ctx context.Context, request *historyservice.ScheduleWorkflowTaskRequest) (_ *historyservice.ScheduleWorkflowTaskResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1124,7 +1179,7 @@ ctx context.Context, request *historyservice.VerifyFirstWorkflowTaskScheduledRequest, ) (_ *historyservice.VerifyFirstWorkflowTaskScheduledResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1162,7 +1217,7 @@ // RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent. // This is mainly called by transfer queue processor during the processing of DeleteExecution task. func (h *Handler) RecordChildExecutionCompleted(ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest) (_ *historyservice.RecordChildExecutionCompletedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1174,13 +1229,11 @@ return nil, h.convertError(errNamespaceNotSet) } - if request.WorkflowExecution == nil { + if request.GetParentExecution() == nil { return nil, h.convertError(errWorkflowExecutionNotSet) } - workflowExecution := request.WorkflowExecution - workflowID := workflowExecution.GetWorkflowId() - shardContext, err := h.controller.GetShardByNamespaceWorkflow(namespaceID, workflowID) + shardContext, err := h.controller.GetShardByNamespaceWorkflow(namespaceID, request.GetParentExecution().WorkflowId) if err != nil { return nil, h.convertError(err) } @@ -1201,7 +1254,7 @@ ctx context.Context, request *historyservice.VerifyChildExecutionCompletionRecordedRequest, ) (_ *historyservice.VerifyChildExecutionCompletionRecordedResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1240,7 +1293,7 @@ // 2. StickyScheduleToStartTimeout func (h *Handler) ResetStickyTaskQueue(ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest) (_ *historyservice.ResetStickyTaskQueueResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1272,7 +1325,7 @@ // ReplicateEventsV2 is called by processor to replicate history events for passive namespaces func (h *Handler) ReplicateEventsV2(ctx context.Context, request *historyservice.ReplicateEventsV2Request) (_ *historyservice.ReplicateEventsV2Response, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1312,7 +1365,7 @@ ctx context.Context, request *historyservice.ReplicateWorkflowStateRequest, ) (_ *historyservice.ReplicateWorkflowStateResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1341,7 +1394,7 @@ // SyncShardStatus is called by processor to sync history shard information from another cluster func (h *Handler) SyncShardStatus(ctx context.Context, request *historyservice.SyncShardStatusRequest) (_ *historyservice.SyncShardStatusResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1379,7 +1432,7 @@ // SyncActivity is called by processor to sync activity func (h *Handler) SyncActivity(ctx context.Context, request *historyservice.SyncActivityRequest) (_ *historyservice.SyncActivityResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1423,7 +1476,7 @@ // GetReplicationMessages is called by remote peers to get replicated messages for cross DC replication func (h *Handler) GetReplicationMessages(ctx context.Context, request *historyservice.GetReplicationMessagesRequest) (_ *historyservice.GetReplicationMessagesResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1484,7 +1537,7 @@ // GetDLQReplicationMessages is called by remote peers to get replicated messages for DLQ merging func (h *Handler) GetDLQReplicationMessages(ctx context.Context, request *historyservice.GetDLQReplicationMessagesRequest) (_ *historyservice.GetDLQReplicationMessagesResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1561,7 +1614,7 @@ // ReapplyEvents applies stale events to the current workflow and the current run func (h *Handler) ReapplyEvents(ctx context.Context, request *historyservice.ReapplyEventsRequest) (_ *historyservice.ReapplyEventsResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1602,7 +1655,7 @@ } func (h *Handler) GetDLQMessages(ctx context.Context, request *historyservice.GetDLQMessagesRequest) (_ *historyservice.GetDLQMessagesResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1628,7 +1681,7 @@ } func (h *Handler) PurgeDLQMessages(ctx context.Context, request *historyservice.PurgeDLQMessagesRequest) (_ *historyservice.PurgeDLQMessagesResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1652,7 +1705,7 @@ } func (h *Handler) MergeDLQMessages(ctx context.Context, request *historyservice.MergeDLQMessagesRequest) (_ *historyservice.MergeDLQMessagesResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1678,7 +1731,7 @@ } func (h *Handler) RefreshWorkflowTasks(ctx context.Context, request *historyservice.RefreshWorkflowTasksRequest) (_ *historyservice.RefreshWorkflowTasksResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1718,7 +1771,7 @@ ctx context.Context, request *historyservice.GenerateLastHistoryReplicationTasksRequest, ) (_ *historyservice.GenerateLastHistoryReplicationTasksResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1750,7 +1803,7 @@ ctx context.Context, request *historyservice.GetReplicationStatusRequest, ) (_ *historyservice.GetReplicationStatusResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1784,7 +1837,7 @@ ctx context.Context, request *historyservice.DeleteWorkflowVisibilityRecordRequest, ) (_ *historyservice.DeleteWorkflowVisibilityRecordResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1830,7 +1883,7 @@ ctx context.Context, request *historyservice.UpdateWorkflowExecutionRequest, ) (_ *historyservice.UpdateWorkflowExecutionResponse, retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1883,7 +1936,7 @@ func (h *Handler) StreamWorkflowReplicationMessages( server historyservice.HistoryService_StreamWorkflowReplicationMessagesServer, ) (retError error) { - defer log.CapturePanic(h.logger, &retError) + defer metrics.CapturePanic(h.logger, h.metricsHandler, &retError) h.startWG.Wait() if h.isStopped() { @@ -1930,9 +1983,9 @@ server, shardContext, engine, - replication.NewSourceTaskConvertor( + h.replicationTaskConverterProvider( engine, - shardContext.GetNamespaceRegistry(), + shardContext, clientShardCount, clientClusterName, replication.NewClusterShardKey(clientClusterShardID.ClusterID, clientClusterShardID.ShardID), diff -Nru temporal-1.21.5-1/src/service/history/historyEngine.go temporal-1.22.5/src/service/history/historyEngine.go --- temporal-1.21.5-1/src/service/history/historyEngine.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/historyEngine.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,786 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "sync/atomic" - "time" - - "go.opentelemetry.io/otel/trace" - commonpb "go.temporal.io/api/common/v1" - historypb "go.temporal.io/api/history/v1" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/collection" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/visibility" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/sdk" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/api/deleteworkflow" - "go.temporal.io/server/service/history/api/describemutablestate" - "go.temporal.io/server/service/history/api/describeworkflow" - "go.temporal.io/server/service/history/api/pollupdate" - "go.temporal.io/server/service/history/api/queryworkflow" - "go.temporal.io/server/service/history/api/reapplyevents" - "go.temporal.io/server/service/history/api/recordactivitytaskheartbeat" - "go.temporal.io/server/service/history/api/recordactivitytaskstarted" - "go.temporal.io/server/service/history/api/recordchildworkflowcompleted" - "go.temporal.io/server/service/history/api/refreshworkflow" - "go.temporal.io/server/service/history/api/removesignalmutablestate" - replicationapi "go.temporal.io/server/service/history/api/replication" - "go.temporal.io/server/service/history/api/replicationadmin" - "go.temporal.io/server/service/history/api/requestcancelworkflow" - "go.temporal.io/server/service/history/api/resetstickytaskqueue" - "go.temporal.io/server/service/history/api/resetworkflow" - "go.temporal.io/server/service/history/api/respondactivitytaskcanceled" - "go.temporal.io/server/service/history/api/respondactivitytaskcompleted" - "go.temporal.io/server/service/history/api/respondactivitytaskfailed" - "go.temporal.io/server/service/history/api/signalwithstartworkflow" - "go.temporal.io/server/service/history/api/signalworkflow" - "go.temporal.io/server/service/history/api/startworkflow" - "go.temporal.io/server/service/history/api/terminateworkflow" - "go.temporal.io/server/service/history/api/updateworkflow" - "go.temporal.io/server/service/history/api/verifychildworkflowcompletionrecorded" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/ndc" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/replication" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -const ( - activityCancellationMsgActivityNotStarted = "ACTIVITY_ID_NOT_STARTED" -) - -type ( - historyEngineImpl struct { - status int32 - currentClusterName string - shard shard.Context - timeSource clock.TimeSource - workflowTaskHandler workflowTaskHandlerCallbacks - clusterMetadata cluster.Metadata - executionManager persistence.ExecutionManager - queueProcessors map[tasks.Category]queues.Queue - replicationAckMgr replication.AckManager - nDCReplicator ndc.HistoryReplicator - nDCActivityReplicator ndc.ActivityReplicator - replicationProcessorMgr common.Daemon - eventNotifier events.Notifier - tokenSerializer common.TaskTokenSerializer - metricsHandler metrics.Handler - logger log.Logger - throttledLogger log.Logger - config *configs.Config - workflowRebuilder workflowRebuilder - workflowResetter ndc.WorkflowResetter - sdkClientFactory sdk.ClientFactory - eventsReapplier ndc.EventsReapplier - matchingClient matchingservice.MatchingServiceClient - rawMatchingClient matchingservice.MatchingServiceClient - replicationDLQHandler replication.DLQHandler - persistenceVisibilityMgr manager.VisibilityManager - searchAttributesValidator *searchattribute.Validator - workflowDeleteManager deletemanager.DeleteManager - eventSerializer serialization.Serializer - workflowConsistencyChecker api.WorkflowConsistencyChecker - tracer trace.Tracer - } -) - -// NewEngineWithShardContext creates an instance of history engine -func NewEngineWithShardContext( - shard shard.Context, - clientBean client.Bean, - matchingClient matchingservice.MatchingServiceClient, - sdkClientFactory sdk.ClientFactory, - eventNotifier events.Notifier, - config *configs.Config, - rawMatchingClient matchingservice.MatchingServiceClient, - workflowCache wcache.Cache, - archivalClient archiver.Client, - eventSerializer serialization.Serializer, - queueProcessorFactories []QueueFactory, - replicationTaskFetcherFactory replication.TaskFetcherFactory, - replicationTaskExecutorProvider replication.TaskExecutorProvider, - workflowConsistencyChecker api.WorkflowConsistencyChecker, - tracerProvider trace.TracerProvider, - persistenceVisibilityMgr manager.VisibilityManager, -) shard.Engine { - currentClusterName := shard.GetClusterMetadata().GetCurrentClusterName() - - logger := shard.GetLogger() - executionManager := shard.GetExecutionManager() - - workflowDeleteManager := deletemanager.NewDeleteManager( - shard, - workflowCache, - config, - archivalClient, - shard.GetTimeSource(), - persistenceVisibilityMgr, - ) - - historyEngImpl := &historyEngineImpl{ - status: common.DaemonStatusInitialized, - currentClusterName: currentClusterName, - shard: shard, - clusterMetadata: shard.GetClusterMetadata(), - timeSource: shard.GetTimeSource(), - executionManager: executionManager, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - logger: log.With(logger, tag.ComponentHistoryEngine), - throttledLogger: log.With(shard.GetThrottledLogger(), tag.ComponentHistoryEngine), - metricsHandler: shard.GetMetricsHandler(), - eventNotifier: eventNotifier, - config: config, - sdkClientFactory: sdkClientFactory, - matchingClient: matchingClient, - rawMatchingClient: rawMatchingClient, - persistenceVisibilityMgr: persistenceVisibilityMgr, - workflowDeleteManager: workflowDeleteManager, - eventSerializer: eventSerializer, - workflowConsistencyChecker: workflowConsistencyChecker, - tracer: tracerProvider.Tracer(consts.LibraryName), - } - - historyEngImpl.queueProcessors = make(map[tasks.Category]queues.Queue) - for _, factory := range queueProcessorFactories { - processor := factory.CreateQueue(shard, workflowCache) - historyEngImpl.queueProcessors[processor.Category()] = processor - } - - historyEngImpl.eventsReapplier = ndc.NewEventsReapplier(shard.GetMetricsHandler(), logger) - - if shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { - historyEngImpl.replicationAckMgr = replication.NewAckManager( - shard, - workflowCache, - executionManager, - logger, - ) - historyEngImpl.nDCReplicator = ndc.NewHistoryReplicator( - shard, - workflowCache, - historyEngImpl.eventsReapplier, - logger, - eventSerializer, - ) - historyEngImpl.nDCActivityReplicator = ndc.NewActivityReplicator( - shard, - workflowCache, - logger, - ) - } - historyEngImpl.workflowRebuilder = NewWorkflowRebuilder( - shard, - workflowCache, - logger, - ) - historyEngImpl.workflowResetter = ndc.NewWorkflowResetter( - shard, - workflowCache, - logger, - ) - - historyEngImpl.searchAttributesValidator = searchattribute.NewValidator( - shard.GetSearchAttributesProvider(), - shard.GetSearchAttributesMapperProvider(), - config.SearchAttributesNumberOfKeysLimit, - config.SearchAttributesSizeOfValueLimit, - config.SearchAttributesTotalSizeLimit, - persistenceVisibilityMgr, - visibility.AllowListForValidation(persistenceVisibilityMgr.GetStoreNames()), - ) - - historyEngImpl.workflowTaskHandler = newWorkflowTaskHandlerCallback(historyEngImpl) - historyEngImpl.replicationDLQHandler = replication.NewLazyDLQHandler( - shard, - workflowDeleteManager, - workflowCache, - clientBean, - replicationTaskExecutorProvider, - ) - historyEngImpl.replicationProcessorMgr = replication.NewTaskProcessorManager( - config, - shard, - historyEngImpl, - workflowCache, - workflowDeleteManager, - clientBean, - eventSerializer, - replicationTaskFetcherFactory, - replicationTaskExecutorProvider, - ) - return historyEngImpl -} - -// Start will spin up all the components needed to start serving this shard. -// Make sure all the components are loaded lazily so start can return immediately. This is important because -// ShardController calls start sequentially for all the shards for a given host during startup. -func (e *historyEngineImpl) Start() { - if !atomic.CompareAndSwapInt32( - &e.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - - e.logger.Info("", tag.LifeCycleStarting) - defer e.logger.Info("", tag.LifeCycleStarted) - - e.registerNamespaceStateChangeCallback() - - for _, queueProcessor := range e.queueProcessors { - queueProcessor.Start() - } - e.replicationProcessorMgr.Start() -} - -// Stop the service. -func (e *historyEngineImpl) Stop() { - if !atomic.CompareAndSwapInt32( - &e.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - - e.logger.Info("", tag.LifeCycleStopping) - defer e.logger.Info("", tag.LifeCycleStopped) - - for _, queueProcessor := range e.queueProcessors { - queueProcessor.Stop() - } - e.replicationProcessorMgr.Stop() - if e.replicationAckMgr != nil { - e.replicationAckMgr.Close() - } - // unset the failover callback - e.shard.GetNamespaceRegistry().UnregisterStateChangeCallback(e) -} - -func (e *historyEngineImpl) registerNamespaceStateChangeCallback() { - - e.shard.GetNamespaceRegistry().RegisterStateChangeCallback(e, func(ns *namespace.Namespace, deletedFromDb bool) { - if e.shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { - e.shard.UpdateHandoverNamespace(ns, deletedFromDb) - } - - if deletedFromDb { - return - } - - if ns.IsGlobalNamespace() && - ns.ReplicationPolicy() == namespace.ReplicationPolicyMultiCluster && - ns.ActiveClusterName() == e.currentClusterName { - - for _, queueProcessor := range e.queueProcessors { - queueProcessor.FailoverNamespace(ns.ID().String()) - } - } - }) -} - -// StartWorkflowExecution starts a workflow execution -// Consistency guarantee: always write -func (e *historyEngineImpl) StartWorkflowExecution( - ctx context.Context, - startRequest *historyservice.StartWorkflowExecutionRequest, -) (resp *historyservice.StartWorkflowExecutionResponse, retError error) { - starter, err := startworkflow.NewStarter( - e.shard, - e.workflowConsistencyChecker, - e.tokenSerializer, - startRequest, - ) - if err != nil { - return nil, err - } - return starter.Invoke(ctx) -} - -// GetMutableState retrieves the mutable state of the workflow execution -func (e *historyEngineImpl) GetMutableState( - ctx context.Context, - request *historyservice.GetMutableStateRequest, -) (*historyservice.GetMutableStateResponse, error) { - return api.GetOrPollMutableState(ctx, request, e.shard, e.workflowConsistencyChecker, e.eventNotifier) -} - -// PollMutableState retrieves the mutable state of the workflow execution with long polling -func (e *historyEngineImpl) PollMutableState( - ctx context.Context, - request *historyservice.PollMutableStateRequest, -) (*historyservice.PollMutableStateResponse, error) { - - response, err := api.GetOrPollMutableState( - ctx, - &historyservice.GetMutableStateRequest{ - NamespaceId: request.GetNamespaceId(), - Execution: request.Execution, - ExpectedNextEventId: request.ExpectedNextEventId, - CurrentBranchToken: request.CurrentBranchToken, - }, - e.shard, - e.workflowConsistencyChecker, - e.eventNotifier, - ) - if err != nil { - return nil, err - } - - return &historyservice.PollMutableStateResponse{ - Execution: response.Execution, - WorkflowType: response.WorkflowType, - NextEventId: response.NextEventId, - PreviousStartedEventId: response.PreviousStartedEventId, - LastFirstEventId: response.LastFirstEventId, - LastFirstEventTxnId: response.LastFirstEventTxnId, - TaskQueue: response.TaskQueue, - StickyTaskQueue: response.StickyTaskQueue, - StickyTaskQueueScheduleToStartTimeout: response.StickyTaskQueueScheduleToStartTimeout, - CurrentBranchToken: response.CurrentBranchToken, - VersionHistories: response.VersionHistories, - WorkflowState: response.WorkflowState, - WorkflowStatus: response.WorkflowStatus, - FirstExecutionRunId: response.FirstExecutionRunId, - }, nil -} - -func (e *historyEngineImpl) QueryWorkflow( - ctx context.Context, - request *historyservice.QueryWorkflowRequest, -) (_ *historyservice.QueryWorkflowResponse, retErr error) { - return queryworkflow.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker, e.rawMatchingClient, e.matchingClient) -} - -func (e *historyEngineImpl) DescribeMutableState( - ctx context.Context, - request *historyservice.DescribeMutableStateRequest, -) (response *historyservice.DescribeMutableStateResponse, retError error) { - return describemutablestate.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker) -} - -// ResetStickyTaskQueue reset the volatile information in mutable state of a given workflow. -// Volatile information are the information related to client, such as: -// 1. StickyTaskQueue -// 2. StickyScheduleToStartTimeout -func (e *historyEngineImpl) ResetStickyTaskQueue( - ctx context.Context, - resetRequest *historyservice.ResetStickyTaskQueueRequest, -) (*historyservice.ResetStickyTaskQueueResponse, error) { - return resetstickytaskqueue.Invoke(ctx, resetRequest, e.shard, e.workflowConsistencyChecker) -} - -// DescribeWorkflowExecution returns information about the specified workflow execution. -func (e *historyEngineImpl) DescribeWorkflowExecution( - ctx context.Context, - request *historyservice.DescribeWorkflowExecutionRequest, -) (_ *historyservice.DescribeWorkflowExecutionResponse, retError error) { - return describeworkflow.Invoke( - ctx, - request, - e.shard, - e.workflowConsistencyChecker, - e.persistenceVisibilityMgr, - ) -} - -func (e *historyEngineImpl) RecordActivityTaskStarted( - ctx context.Context, - request *historyservice.RecordActivityTaskStartedRequest, -) (*historyservice.RecordActivityTaskStartedResponse, error) { - return recordactivitytaskstarted.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker) -} - -// ScheduleWorkflowTask schedules a workflow task if no outstanding workflow task found -func (e *historyEngineImpl) ScheduleWorkflowTask( - ctx context.Context, - req *historyservice.ScheduleWorkflowTaskRequest, -) error { - return e.workflowTaskHandler.handleWorkflowTaskScheduled(ctx, req) -} - -func (e *historyEngineImpl) VerifyFirstWorkflowTaskScheduled( - ctx context.Context, - request *historyservice.VerifyFirstWorkflowTaskScheduledRequest, -) (retError error) { - return e.workflowTaskHandler.verifyFirstWorkflowTaskScheduled(ctx, request) -} - -// RecordWorkflowTaskStarted starts a workflow task -func (e *historyEngineImpl) RecordWorkflowTaskStarted( - ctx context.Context, - request *historyservice.RecordWorkflowTaskStartedRequest, -) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - return e.workflowTaskHandler.handleWorkflowTaskStarted(ctx, request) -} - -// RespondWorkflowTaskCompleted completes a workflow task -func (e *historyEngineImpl) RespondWorkflowTaskCompleted( - ctx context.Context, - req *historyservice.RespondWorkflowTaskCompletedRequest, -) (*historyservice.RespondWorkflowTaskCompletedResponse, error) { - return e.workflowTaskHandler.handleWorkflowTaskCompleted(ctx, req) -} - -// RespondWorkflowTaskFailed fails a workflow task -func (e *historyEngineImpl) RespondWorkflowTaskFailed( - ctx context.Context, - req *historyservice.RespondWorkflowTaskFailedRequest, -) error { - return e.workflowTaskHandler.handleWorkflowTaskFailed(ctx, req) -} - -// RespondActivityTaskCompleted completes an activity task. -func (e *historyEngineImpl) RespondActivityTaskCompleted( - ctx context.Context, - req *historyservice.RespondActivityTaskCompletedRequest, -) (*historyservice.RespondActivityTaskCompletedResponse, error) { - return respondactivitytaskcompleted.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -// RespondActivityTaskFailed completes an activity task failure. -func (e *historyEngineImpl) RespondActivityTaskFailed( - ctx context.Context, - req *historyservice.RespondActivityTaskFailedRequest, -) (*historyservice.RespondActivityTaskFailedResponse, error) { - return respondactivitytaskfailed.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -// RespondActivityTaskCanceled completes an activity task failure. -func (e *historyEngineImpl) RespondActivityTaskCanceled( - ctx context.Context, - req *historyservice.RespondActivityTaskCanceledRequest, -) (*historyservice.RespondActivityTaskCanceledResponse, error) { - return respondactivitytaskcanceled.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -// RecordActivityTaskHeartbeat records an hearbeat for a task. -// This method can be used for two purposes. -// - For reporting liveness of the activity. -// - For reporting progress of the activity, this can be done even if the liveness is not configured. -func (e *historyEngineImpl) RecordActivityTaskHeartbeat( - ctx context.Context, - req *historyservice.RecordActivityTaskHeartbeatRequest, -) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { - return recordactivitytaskheartbeat.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -// RequestCancelWorkflowExecution records request cancellation event for workflow execution -func (e *historyEngineImpl) RequestCancelWorkflowExecution( - ctx context.Context, - req *historyservice.RequestCancelWorkflowExecutionRequest, -) (resp *historyservice.RequestCancelWorkflowExecutionResponse, retError error) { - return requestcancelworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) SignalWorkflowExecution( - ctx context.Context, - req *historyservice.SignalWorkflowExecutionRequest, -) (resp *historyservice.SignalWorkflowExecutionResponse, retError error) { - return signalworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -// SignalWithStartWorkflowExecution signals current workflow (if running) or creates & signals a new workflow -// Consistency guarantee: always write -func (e *historyEngineImpl) SignalWithStartWorkflowExecution( - ctx context.Context, - req *historyservice.SignalWithStartWorkflowExecutionRequest, -) (_ *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) { - return signalwithstartworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) UpdateWorkflowExecution( - ctx context.Context, - req *historyservice.UpdateWorkflowExecutionRequest, -) (*historyservice.UpdateWorkflowExecutionResponse, error) { - return updateworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker, e.matchingClient) -} - -func (e *historyEngineImpl) PollWorkflowExecutionUpdate( - ctx context.Context, - req *historyservice.PollWorkflowExecutionUpdateRequest, -) (*historyservice.PollWorkflowExecutionUpdateResponse, error) { - return pollupdate.Invoke(ctx, req, e.workflowConsistencyChecker) -} - -// RemoveSignalMutableState remove the signal request id in signal_requested for deduplicate -func (e *historyEngineImpl) RemoveSignalMutableState( - ctx context.Context, - req *historyservice.RemoveSignalMutableStateRequest, -) (*historyservice.RemoveSignalMutableStateResponse, error) { - return removesignalmutablestate.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) TerminateWorkflowExecution( - ctx context.Context, - req *historyservice.TerminateWorkflowExecutionRequest, -) (*historyservice.TerminateWorkflowExecutionResponse, error) { - return terminateworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) DeleteWorkflowExecution( - ctx context.Context, - request *historyservice.DeleteWorkflowExecutionRequest, -) (*historyservice.DeleteWorkflowExecutionResponse, error) { - return deleteworkflow.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker, e.workflowDeleteManager) -} - -// RecordChildExecutionCompleted records the completion of child execution into parent execution history -func (e *historyEngineImpl) RecordChildExecutionCompleted( - ctx context.Context, - req *historyservice.RecordChildExecutionCompletedRequest, -) (*historyservice.RecordChildExecutionCompletedResponse, error) { - return recordchildworkflowcompleted.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) VerifyChildExecutionCompletionRecorded( - ctx context.Context, - req *historyservice.VerifyChildExecutionCompletionRecordedRequest, -) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) { - return verifychildworkflowcompletionrecorded.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) ReplicateEventsV2( - ctx context.Context, - replicateRequest *historyservice.ReplicateEventsV2Request, -) error { - - return e.nDCReplicator.ApplyEvents(ctx, replicateRequest) -} - -// ReplicateWorkflowState is an experimental method to replicate workflow state. This should not expose outside of history service role. -func (e *historyEngineImpl) ReplicateWorkflowState( - ctx context.Context, - request *historyservice.ReplicateWorkflowStateRequest, -) error { - - return e.nDCReplicator.ApplyWorkflowState(ctx, request) -} - -func (e *historyEngineImpl) SyncShardStatus( - ctx context.Context, - request *historyservice.SyncShardStatusRequest, -) error { - - clusterName := request.GetSourceCluster() - now := timestamp.TimeValue(request.GetStatusTime()) - - // here there are 3 main things - // 1. update the view of remote cluster's shard time - // 2. notify the timer gate in the timer queue standby processor - // 3, notify the transfer (essentially a no op, just put it here so it looks symmetric) - e.shard.SetCurrentTime(clusterName, now) - for _, processor := range e.queueProcessors { - processor.NotifyNewTasks([]tasks.Task{}) - } - return nil -} - -func (e *historyEngineImpl) SyncActivity( - ctx context.Context, - request *historyservice.SyncActivityRequest, -) (retError error) { - - return e.nDCActivityReplicator.SyncActivity(ctx, request) -} - -// ResetWorkflowExecution terminates current workflow (if running) and replay & create new workflow -// Consistency guarantee: always write -func (e *historyEngineImpl) ResetWorkflowExecution( - ctx context.Context, - req *historyservice.ResetWorkflowExecutionRequest, -) (*historyservice.ResetWorkflowExecutionResponse, error) { - return resetworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) NotifyNewHistoryEvent( - notification *events.Notification, -) { - - e.eventNotifier.NotifyNewHistoryEvent(notification) -} - -func (e *historyEngineImpl) NotifyNewTasks( - newTasks map[tasks.Category][]tasks.Task, -) { - for category, tasksByCategory := range newTasks { - // TODO: make replicatorProcessor part of queueProcessors list - // and get rid of the special case here. - if category == tasks.CategoryReplication { - if e.replicationAckMgr != nil { - e.replicationAckMgr.NotifyNewTasks(tasksByCategory) - } - continue - } - - if len(tasksByCategory) > 0 { - e.queueProcessors[category].NotifyNewTasks(tasksByCategory) - } - } -} - -func (e *historyEngineImpl) AddSpeculativeWorkflowTaskTimeoutTask(task *tasks.WorkflowTaskTimeoutTask) { - e.queueProcessors[tasks.CategoryMemoryTimer].NotifyNewTasks([]tasks.Task{task}) -} - -func (e *historyEngineImpl) GetReplicationMessages( - ctx context.Context, - pollingCluster string, - ackMessageID int64, - ackTimestamp time.Time, - queryMessageID int64, -) (*replicationspb.ReplicationMessages, error) { - return replicationapi.GetTasks(ctx, e.shard, e.replicationAckMgr, pollingCluster, ackMessageID, ackTimestamp, queryMessageID) -} - -func (e *historyEngineImpl) SubscribeReplicationNotification() (<-chan struct{}, string) { - return e.replicationAckMgr.SubscribeNotification() -} - -func (e *historyEngineImpl) UnsubscribeReplicationNotification(subscriberID string) { - e.replicationAckMgr.UnsubscribeNotification(subscriberID) -} - -func (e *historyEngineImpl) ConvertReplicationTask( - ctx context.Context, - task tasks.Task, -) (*replicationspb.ReplicationTask, error) { - return e.replicationAckMgr.ConvertTask(ctx, task) -} -func (e *historyEngineImpl) GetReplicationTasksIter( - ctx context.Context, - pollingCluster string, - minInclusiveTaskID int64, - maxExclusiveTaskID int64, -) (collection.Iterator[tasks.Task], error) { - return e.replicationAckMgr.GetReplicationTasksIter(ctx, pollingCluster, minInclusiveTaskID, maxExclusiveTaskID) -} - -func (e *historyEngineImpl) GetDLQReplicationMessages( - ctx context.Context, - taskInfos []*replicationspb.ReplicationTaskInfo, -) ([]*replicationspb.ReplicationTask, error) { - return replicationapi.GetDLQTasks(ctx, e.shard, e.replicationAckMgr, taskInfos) -} - -func (e *historyEngineImpl) ReapplyEvents( - ctx context.Context, - namespaceUUID namespace.ID, - workflowID string, - runID string, - reapplyEvents []*historypb.HistoryEvent, -) error { - return reapplyevents.Invoke(ctx, namespaceUUID, workflowID, runID, reapplyEvents, e.shard, e.workflowConsistencyChecker, e.workflowResetter, e.eventsReapplier) -} - -func (e *historyEngineImpl) GetDLQMessages( - ctx context.Context, - request *historyservice.GetDLQMessagesRequest, -) (*historyservice.GetDLQMessagesResponse, error) { - return replicationadmin.GetDLQ(ctx, request, e.shard, e.replicationDLQHandler) -} - -func (e *historyEngineImpl) PurgeDLQMessages( - ctx context.Context, - request *historyservice.PurgeDLQMessagesRequest, -) (*historyservice.PurgeDLQMessagesResponse, error) { - return replicationadmin.PurgeDLQ(ctx, request, e.shard, e.replicationDLQHandler) -} - -func (e *historyEngineImpl) MergeDLQMessages( - ctx context.Context, - request *historyservice.MergeDLQMessagesRequest, -) (*historyservice.MergeDLQMessagesResponse, error) { - return replicationadmin.MergeDLQ(ctx, request, e.shard, e.replicationDLQHandler) -} - -func (e *historyEngineImpl) RebuildMutableState( - ctx context.Context, - namespaceUUID namespace.ID, - execution commonpb.WorkflowExecution, -) error { - return e.workflowRebuilder.rebuild( - ctx, - definition.NewWorkflowKey( - namespaceUUID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ) -} - -func (e *historyEngineImpl) RefreshWorkflowTasks( - ctx context.Context, - namespaceUUID namespace.ID, - execution commonpb.WorkflowExecution, -) (retError error) { - return refreshworkflow.Invoke( - ctx, - definition.NewWorkflowKey(namespaceUUID.String(), execution.WorkflowId, execution.RunId), - e.shard, - e.workflowConsistencyChecker, - ) -} - -func (e *historyEngineImpl) GenerateLastHistoryReplicationTasks( - ctx context.Context, - request *historyservice.GenerateLastHistoryReplicationTasksRequest, -) (_ *historyservice.GenerateLastHistoryReplicationTasksResponse, retError error) { - return replicationapi.GenerateTask(ctx, request, e.shard, e.workflowConsistencyChecker) -} - -func (e *historyEngineImpl) GetReplicationStatus( - ctx context.Context, - request *historyservice.GetReplicationStatusRequest, -) (_ *historyservice.ShardReplicationStatus, retError error) { - return replicationapi.GetStatus(ctx, request, e.shard, e.replicationAckMgr) -} diff -Nru temporal-1.21.5-1/src/service/history/historyEngine2_test.go temporal-1.22.5/src/service/history/historyEngine2_test.go --- temporal-1.21.5-1/src/service/history/historyEngine2_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/historyEngine2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2039 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - querypb "go.temporal.io/api/query/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/searchattribute" - - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - - tokenspb "go.temporal.io/server/api/token/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" -) - -type ( - engine2Suite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockVisibilityProcessor *queues.MockQueue - mockArchivalProcessor *queues.MockQueue - mockMemoryScheduledQueue *queues.MockQueue - mockEventsCache *events.MockCache - mockNamespaceCache *namespace.MockRegistry - mockClusterMetadata *cluster.MockMetadata - mockVisibilityManager *manager.MockVisibilityManager - - workflowCache wcache.Cache - historyEngine *historyEngineImpl - mockExecutionMgr *persistence.MockExecutionManager - - config *configs.Config - logger *log.MockLogger - errorMessages []string - } -) - -func TestEngine2Suite(t *testing.T) { - s := new(engine2Suite) - suite.Run(t, s) -} - -func (s *engine2Suite) SetupSuite() { - -} - -func (s *engine2Suite) TearDownSuite() { -} - -func (s *engine2Suite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) - s.mockArchivalProcessor = queues.NewMockQueue(s.controller) - s.mockMemoryScheduledQueue = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() - s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() - s.mockMemoryScheduledQueue.EXPECT().Category().Return(tasks.CategoryMemoryTimer).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockMemoryScheduledQueue.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - s.config = tests.NewDynamicConfig() - mockShard := shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - s.config, - ) - s.mockShard = mockShard - s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() - - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager - - s.mockEventsCache = s.mockShard.MockEventsCache - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() - s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, tests.Version).Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockVisibilityManager.EXPECT().GetIndexName().Return("").AnyTimes() - s.mockVisibilityManager.EXPECT(). - ValidateCustomSearchAttributes(gomock.Any()). - DoAndReturn( - func(searchAttributes map[string]any) (map[string]any, error) { - return searchAttributes, nil - }, - ). - AnyTimes() - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = log.NewMockLogger(s.controller) - s.logger.EXPECT().Debug(gomock.Any(), gomock.Any()).AnyTimes() - s.logger.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() - s.logger.EXPECT().Warn(gomock.Any(), gomock.Any()).AnyTimes() - s.errorMessages = make([]string, 0) - s.logger.EXPECT().Error(gomock.Any(), gomock.Any()).AnyTimes().Do(func(msg string, tags ...tag.Tag) { - s.errorMessages = append(s.errorMessages, msg) - }) - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - throttledLogger: s.logger, - metricsHandler: metrics.NoopMetricsHandler, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - config: s.config, - timeSource: s.mockShard.GetTimeSource(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, - s.mockMemoryScheduledQueue.Category(): s.mockMemoryScheduledQueue, - }, - searchAttributesValidator: searchattribute.NewValidator( - searchattribute.NewTestProvider(), - s.mockShard.Resource.SearchAttributesMapperProvider, - s.config.SearchAttributesNumberOfKeysLimit, - s.config.SearchAttributesSizeOfValueLimit, - s.config.SearchAttributesTotalSizeLimit, - s.mockVisibilityManager, - false, - ), - workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(mockShard, s.workflowCache), - } - s.mockShard.SetEngineForTesting(h) - h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) - - s.historyEngine = h -} - -func (s *engine2Suite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedSuccessStickyEnabled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - stickyTl := "stickyTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), we.GetRunId()) - executionInfo := ms.GetExecutionInfo() - executionInfo.LastUpdateTime = timestamp.TimeNowPtrUtc() - executionInfo.StickyTaskQueue = stickyTl - - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - request := historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &we, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: stickyTl, - }, - Identity: identity, - }, - } - - expectedResponse := historyservice.RecordWorkflowTaskStartedResponse{} - expectedResponse.WorkflowType = ms.GetWorkflowType() - executionInfo = ms.GetExecutionInfo() - if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { - expectedResponse.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId - } - expectedResponse.ScheduledEventId = wt.ScheduledEventID - expectedResponse.ScheduledTime = wt.ScheduledTime - expectedResponse.StartedEventId = wt.ScheduledEventID + 1 - expectedResponse.StickyExecutionEnabled = true - expectedResponse.NextEventId = ms.GetNextEventID() + 1 - expectedResponse.Attempt = wt.Attempt - expectedResponse.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ - Name: executionInfo.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - currentBranchTokken, err := ms.GetCurrentBranchToken() - s.NoError(err) - expectedResponse.BranchToken = currentBranchTokken - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &request) - s.Nil(err) - s.NotNil(response) - s.True(response.StartedTime.After(*expectedResponse.ScheduledTime)) - expectedResponse.StartedTime = response.StartedTime - s.Equal(&expectedResponse, response) -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedIfNoExecution() { - namespaceID := tests.NamespaceID - workflowExecution := &commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - s.Nil(response) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engine2Suite) TestRecordWorkflowTaskStarted_NoMessages() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - tl := "testTaskQueue" - identity := "testIdentity" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, false, false) - // Use UpdateCurrentVersion explicitly here, - // because there is no call to CloseTransactionAsSnapshot, - // because it converts speculative WT to normal, but WT needs to be speculative for this test. - err := ms.UpdateCurrentVersion(tests.GlobalNamespaceEntry.FailoverVersion(), true) - s.NoError(err) - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *persistence.GetWorkflowExecutionRequest) (*persistence.GetWorkflowExecutionResponse, error) { - wfMs := ms.CloneToProto() - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - return gwmsResponse, nil - }, - ) - - wt, err := ms.AddWorkflowTaskScheduledEvent(false, enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE) - s.NoError(err) - s.NotNil(wt) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: wt.ScheduledEventID, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - - s.Nil(response) - s.Error(err) - s.IsType(&serviceerror.NotFound{}, err, err.Error()) - s.EqualError(err, "No messages for speculative workflow task.") -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedIfGetExecutionFailed() { - namespaceID := tests.NamespaceID - workflowExecution := &commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - s.Nil(response) - s.NotNil(err) - s.EqualError(err, "FAILED") -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedIfTaskAlreadyStarted() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - s.Nil(response) - s.NotNil(err) - s.IsType(&serviceerrors.TaskAlreadyStarted{}, err) - s.logger.Error("RecordWorkflowTaskStarted failed with", tag.Error(err)) -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedIfTaskAlreadyCompleted() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) - addWorkflowTaskCompletedEvent(&s.Suite, ms, int64(2), int64(3), identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - s.Nil(response) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) - s.logger.Error("RecordWorkflowTaskStarted failed with", tag.Error(err)) -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedConflictOnUpdate() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - tl := "testTaskQueue" - identity := "testIdentity" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.ConditionFailedError{}) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - - s.NotNil(err) - s.Nil(response) - s.Equal(&persistence.ConditionFailedError{}, err) -} - -func (s *engine2Suite) TestRecordWorkflowTaskStartedSuccess() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - tl := "testTaskQueue" - identity := "testIdentity" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - // load mutable state such that it already exists in memory when respond workflow task is called - // this enables us to set query registry on it - ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( - metrics.AddMetricsContext(context.Background()), - tests.NamespaceID, - workflowExecution, - workflow.LockPriorityHigh, - ) - s.NoError(err) - loadedMS, err := ctx.LoadMutableState(context.Background()) - s.NoError(err) - qr := workflow.NewQueryRegistry() - id1, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - id2, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - id3, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr - release(nil) - - response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - - s.Nil(err) - s.NotNil(response) - s.Equal("wType", response.WorkflowType.Name) - s.True(response.PreviousStartedEventId == 0) - s.Equal(int64(3), response.StartedEventId) - expectedQueryMap := map[string]*querypb.WorkflowQuery{ - id1: {}, - id2: {}, - id3: {}, - } - s.Equal(expectedQueryMap, response.Queries) -} - -func (s *engine2Suite) TestRecordActivityTaskStartedIfNoExecution() { - namespaceID := tests.NamespaceID - workflowExecution := &commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - response, err := s.historyEngine.RecordActivityTaskStarted( - metrics.AddMetricsContext(context.Background()), - &historyservice.RecordActivityTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: workflowExecution, - ScheduledEventId: 5, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }, - ) - if err != nil { - s.logger.Error("Unexpected Error", tag.Error(err)) - } - s.Nil(response) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engine2Suite) TestRecordActivityTaskStartedSuccess() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, int64(2), int64(3), identity) - scheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.mockEventsCache.EXPECT().GetEvent( - gomock.Any(), - events.EventKey{ - NamespaceID: namespaceID, - WorkflowID: workflowExecution.GetWorkflowId(), - RunID: workflowExecution.GetRunId(), - EventID: scheduledEvent.GetEventId(), - Version: 0, - }, - workflowTaskCompletedEvent.GetEventId(), - gomock.Any(), - ).Return(scheduledEvent, nil) - response, err := s.historyEngine.RecordActivityTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordActivityTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &workflowExecution, - ScheduledEventId: 5, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - }, - Identity: identity, - }, - }) - s.Nil(err) - s.NotNil(response) - s.Equal(scheduledEvent, response.ScheduledEvent) -} - -func (s *engine2Suite) TestRequestCancelWorkflowExecution_Running() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowExecution.WorkflowId, - RunId: workflowExecution.RunId, - }, - Identity: "identity", - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(namespaceID, workflowExecution) - s.Equal(int64(4), ms2.GetNextEventID()) -} - -func (s *engine2Suite) TestRequestCancelWorkflowExecution_Finished() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) - ms.GetExecutionState().State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - - _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowExecution.WorkflowId, - RunId: workflowExecution.RunId, - }, - Identity: "identity", - }, - }) - s.Nil(err) -} - -func (s *engine2Suite) TestRequestCancelWorkflowExecution_NotFound() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowExecution.WorkflowId, - RunId: workflowExecution.RunId, - }, - Identity: "identity", - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engine2Suite) TestRequestCancelWorkflowExecution_ParentMismatch() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - parentInfo := &workflowspb.ParentExecutionInfo{ - NamespaceId: tests.ParentNamespaceID.String(), - Namespace: tests.ParentNamespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "parent wId", - RunId: "parent rId", - }, - InitiatedId: 123, - InitiatedVersion: 456, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedStateWithParent(workflowExecution, tl, parentInfo, identity, true, false) - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - - _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowExecution.WorkflowId, - RunId: workflowExecution.RunId, - }, - Identity: "identity", - }, - ExternalWorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: "unknown wId", - RunId: "unknown rId", - }, - ChildWorkflowOnly: true, - }) - s.Equal(consts.ErrWorkflowParent, err) -} - -func (s *engine2Suite) TestTerminateWorkflowExecution_ParentMismatch() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - parentInfo := &workflowspb.ParentExecutionInfo{ - NamespaceId: tests.ParentNamespaceID.String(), - Namespace: tests.ParentNamespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "parent wId", - RunId: "parent rId", - }, - InitiatedId: 123, - InitiatedVersion: 456, - } - - identity := "testIdentity" - tl := "testTaskQueue" - - ms := s.createExecutionStartedStateWithParent(workflowExecution, tl, parentInfo, identity, true, false) - ms1 := workflow.TestCloneToProto(ms) - currentExecutionResp := &persistence.GetCurrentExecutionResponse{ - RunID: tests.RunID, - } - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(currentExecutionResp, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - - _, err := s.historyEngine.TerminateWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.TerminateWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - TerminateRequest: &workflowservice.TerminateWorkflowExecutionRequest{ - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: workflowExecution.WorkflowId, - }, - Identity: "identity", - FirstExecutionRunId: workflowExecution.RunId, - }, - ExternalWorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: "unknown wId", - RunId: "unknown rId", - }, - ChildWorkflowOnly: true, - }) - s.Equal(consts.ErrWorkflowParent, err) -} - -func (s *engine2Suite) createExecutionStartedState(we commonpb.WorkflowExecution, tl string, identity string, scheduleWorkflowTask bool, startWorkflowTask bool) workflow.MutableState { - return s.createExecutionStartedStateWithParent(we, tl, nil, identity, scheduleWorkflowTask, startWorkflowTask) -} - -func (s *engine2Suite) createExecutionStartedStateWithParent(we commonpb.WorkflowExecution, tl string, parentInfo *workflowspb.ParentExecutionInfo, identity string, scheduleWorkflowTask bool, startWorkflowTask bool) workflow.MutableState { - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - s.logger, we.GetRunId()) - addWorkflowExecutionStartedEventWithParent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, parentInfo, identity) - var wt *workflow.WorkflowTaskInfo - if scheduleWorkflowTask { - wt = addWorkflowTaskScheduledEvent(ms) - } - if wt != nil && startWorkflowTask { - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - } - _ = ms.SetHistoryTree(context.Background(), nil, nil, we.GetRunId()) - versionHistory, _ := versionhistory.GetCurrentVersionHistory( - ms.GetExecutionInfo().VersionHistories, - ) - _ = versionhistory.AddOrUpdateVersionHistoryItem( - versionHistory, - versionhistory.NewVersionHistoryItem(0, 0), - ) - - return ms -} - -func (s *engine2Suite) TestRespondWorkflowTaskCompletedRecordMarkerCommand() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: "wId", - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - serializedTaskToken, _ := taskToken.Marshal() - identity := "testIdentity" - markerDetails := payloads.EncodeString("marker details") - markerName := "marker name" - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_RECORD_MARKER, - Attributes: &commandpb.Command_RecordMarkerCommandAttributes{RecordMarkerCommandAttributes: &commandpb.RecordMarkerCommandAttributes{ - MarkerName: markerName, - Details: map[string]*commonpb.Payloads{ - "data": markerDetails, - }, - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: namespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: serializedTaskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - ms2 := s.getMutableState(namespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engine2Suite) TestRespondWorkflowTaskCompleted_StartChildWithSearchAttributes() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: "wId", - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - serializedTaskToken, _ := taskToken.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, nil, 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: tests.Namespace.String(), - WorkflowId: tests.WorkflowID, - WorkflowType: &commonpb.WorkflowType{Name: "wType"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - SearchAttributes: &commonpb.SearchAttributes{IndexedFields: map[string]*commonpb.Payload{ - "AliasForCustomTextField": payload.EncodeString("search attribute value")}, - }, - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() - - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - eventsToSave := request.UpdateWorkflowEvents[0].Events - s.Len(eventsToSave, 2) - s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, eventsToSave[0].GetEventType()) - s.Equal(enumspb.EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED, eventsToSave[1].GetEventType()) - startChildEventAttributes := eventsToSave[1].GetStartChildWorkflowExecutionInitiatedEventAttributes() - // Search attribute name was mapped and saved under field name. - s.Equal( - payload.EncodeString("search attribute value"), - startChildEventAttributes.GetSearchAttributes().GetIndexedFields()["CustomTextField"]) - return tests.UpdateWorkflowExecutionResponse, nil - }) - - s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). - GetMapper(tests.Namespace). - Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) - - _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: serializedTaskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) -} - -func (s *engine2Suite) TestRespondWorkflowTaskCompleted_StartChildWorkflow_ExceedsLimit() { - namespaceID := tests.NamespaceID - taskQueue := "testTaskQueue" - identity := "testIdentity" - workflowType := "testWorkflowType" - - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - ms := workflow.TestLocalMutableState( - s.historyEngine.shard, - s.mockEventsCache, - tests.LocalNamespaceEntry, - log.NewTestLogger(), - we.GetRunId(), - ) - - addWorkflowExecutionStartedEvent( - ms, - we, - workflowType, - taskQueue, - nil, - time.Minute, - time.Minute, - time.Minute, - identity, - ) - - s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() - - var commands []*commandpb.Command - for i := 0; i < 6; i++ { - commands = append( - commands, - &commandpb.Command{ - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{ - StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: tests.Namespace.String(), - WorkflowId: tests.WorkflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - }}, - }, - ) - } - - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent( - ms, - wt.ScheduledEventID, - taskQueue, - identity, - ) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskTokenBytes, _ := taskToken.Marshal() - response := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(response, nil).AnyTimes() - s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). - GetMapper(tests.Namespace). - Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil). - AnyTimes() - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.historyEngine.shard.GetConfig().NumPendingChildExecutionsLimit = func(namespace string) int { - return 5 - } - _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskTokenBytes, - Commands: commands, - Identity: identity, - }, - }) - - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Len(s.errorMessages, 1) - s.Equal("the number of pending child workflow executions, 5, has reached the per-workflow limit of 5", s.errorMessages[0]) -} - -func (s *engine2Suite) TestStartWorkflowExecution_BrandNew() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) - - requestID := uuid.New() - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), - WorkflowRunTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - }, - }) - s.Nil(err) - s.NotNil(resp.RunId) -} - -func (s *engine2Suite) TestStartWorkflowExecution_BrandNew_SearchAttributes() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { - eventsToSave := request.NewWorkflowEvents[0].Events - s.Len(eventsToSave, 2) - s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, eventsToSave[0].GetEventType()) - startEventAttributes := eventsToSave[0].GetWorkflowExecutionStartedEventAttributes() - // Search attribute name was mapped and saved under field name. - s.Equal( - payload.EncodeString("test"), - startEventAttributes.GetSearchAttributes().GetIndexedFields()["CustomKeywordField"]) - return tests.CreateWorkflowExecutionResponse, nil - }) - - requestID := uuid.New() - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), - WorkflowRunTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - SearchAttributes: &commonpb.SearchAttributes{IndexedFields: map[string]*commonpb.Payload{ - "CustomKeywordField": payload.EncodeString("test"), - }}}, - }) - s.Nil(err) - s.NotNil(resp.RunId) -} - -func (s *engine2Suite) TestStartWorkflowExecution_StillRunning_Dedup() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - runID := "runID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - requestID := "requestID" - lastWriteVersion := common.EmptyVersion - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: requestID, - RunID: runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - LastWriteVersion: lastWriteVersion, - }) - - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - }, - }) - s.Nil(err) - s.Equal(runID, resp.GetRunId()) -} - -func (s *engine2Suite) TestStartWorkflowExecution_StillRunning_NonDeDup() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - runID := "runID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - lastWriteVersion := common.EmptyVersion - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: "oldRequestID", - RunID: runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - LastWriteVersion: lastWriteVersion, - }) - - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: "newRequestID", - }, - }) - if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { - s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") - } - s.Nil(resp) -} - -func (s *engine2Suite) TestStartWorkflowExecution_NotRunning_PrevSuccess() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - runID := "runID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - lastWriteVersion := common.EmptyVersion - - options := []enumspb.WorkflowIdReusePolicy{ - enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY, - enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, - enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE, - } - - expecedErrs := []bool{true, false, true} - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( - gomock.Any(), - newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { - return request.Mode == persistence.CreateWorkflowModeBrandNew - }), - ).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: "oldRequestID", - RunID: runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - LastWriteVersion: lastWriteVersion, - }).Times(len(expecedErrs)) - - for index, option := range options { - if !expecedErrs[index] { - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( - gomock.Any(), - newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { - return request.Mode == persistence.CreateWorkflowModeUpdateCurrent && - request.PreviousRunID == runID && - request.PreviousLastWriteVersion == lastWriteVersion - }), - ).Return(tests.CreateWorkflowExecutionResponse, nil) - } - - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: "newRequestID", - WorkflowIdReusePolicy: option, - }, - }) - - if expecedErrs[index] { - if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { - s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") - } - s.Nil(resp) - } else { - s.Nil(err) - s.NotNil(resp) - } - } -} - -func (s *engine2Suite) TestStartWorkflowExecution_NotRunning_PrevFail() { - namespaceID := tests.NamespaceID - workflowID := "workflowID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - lastWriteVersion := common.EmptyVersion - - options := []enumspb.WorkflowIdReusePolicy{ - enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY, - enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, - enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE, - } - - expecedErrs := []bool{false, false, true} - - statuses := []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, - enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, - enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, - } - runIDs := []string{"1", "2", "3", "4"} - - for i, status := range statuses { - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( - gomock.Any(), - newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { - return request.Mode == persistence.CreateWorkflowModeBrandNew - }), - ).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: "oldRequestID", - RunID: runIDs[i], - State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: status, - LastWriteVersion: lastWriteVersion, - }).Times(len(expecedErrs)) - - for j, option := range options { - - if !expecedErrs[j] { - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( - gomock.Any(), - newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { - return request.Mode == persistence.CreateWorkflowModeUpdateCurrent && - request.PreviousRunID == runIDs[i] && - request.PreviousLastWriteVersion == lastWriteVersion - }), - ).Return(tests.CreateWorkflowExecutionResponse, nil) - } - - resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: "newRequestID", - WorkflowIdReusePolicy: option, - }, - }) - - if expecedErrs[j] { - if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { - s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") - } - s.Nil(resp) - } else { - s.Nil(err) - s.NotNil(resp) - } - } - } -} - -func (s *engine2Suite) TestSignalWithStartWorkflowExecution_JustSignal() { - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} - _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.EqualError(err, "Missing namespace UUID.") - - namespaceID := tests.NamespaceID - workflowID := "wId" - workflowType := "workflowType" - runID := tests.RunID - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := uuid.New() - sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), runID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) - _ = addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.Nil(err) - s.Equal(runID, resp.GetRunId()) -} - -func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotExist() { - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} - _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.EqualError(err, "Missing namespace UUID.") - - namespaceID := tests.NamespaceID - workflowID := "wId" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := uuid.New() - - sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - notExistErr := serviceerror.NewNotFound("Workflow not exist") - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, notExistErr) - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.Nil(err) - s.NotNil(resp.GetRunId()) -} - -func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotRunning() { - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} - _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.EqualError(err, "Missing namespace UUID.") - - namespaceID := tests.NamespaceID - workflowID := "wId" - runID := tests.RunID - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := uuid.New() - sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, - SignalName: signalName, - SignalInput: nil, - Control: "", - RetryPolicy: nil, - CronSchedule: "", - Memo: nil, - SearchAttributes: nil, - Header: nil, - }, - } - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), runID) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.Nil(err) - s.NotNil(resp.GetRunId()) - s.NotEqual(runID, resp.GetRunId()) -} - -func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_DuplicateRequests() { - namespaceID := tests.NamespaceID - workflowID := "wId" - runID := tests.RunID - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := "testRequestID" - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, - SignalName: signalName, - SignalInput: nil, - Control: "", - RetryPolicy: nil, - CronSchedule: "", - Memo: nil, - SearchAttributes: nil, - Header: nil, - }, - } - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), runID) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} - workflowAlreadyStartedErr := &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: requestID, // use same requestID - RunID: runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - LastWriteVersion: common.EmptyVersion, - } - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, workflowAlreadyStartedErr) - - ctx := metrics.AddMetricsContext(context.Background()) - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(ctx, sRequest) - if err != nil { - println("================================================================================================") - println("================================================================================================") - println("================================================================================================") - println(err) - println("================================================================================================") - println("================================================================================================") - println("================================================================================================") - } - s.Nil(err) - s.NotNil(resp.GetRunId()) - s.Equal(runID, resp.GetRunId()) -} - -func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_WorkflowAlreadyStarted() { - namespaceID := tests.NamespaceID - workflowID := "wId" - runID := tests.RunID - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := "testRequestID" - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, - SignalName: signalName, - SignalInput: nil, - Control: "", - RetryPolicy: nil, - CronSchedule: "", - Memo: nil, - SearchAttributes: nil, - Header: nil, - }, - } - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), runID) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} - workflowAlreadyStartedErr := &persistence.CurrentWorkflowConditionFailedError{ - Msg: "random message", - RequestID: "new request ID", - RunID: runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - LastWriteVersion: common.EmptyVersion, - } - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, workflowAlreadyStartedErr) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) - s.Nil(resp) - s.NotNil(err) -} - -func (s *engine2Suite) TestRecordChildExecutionCompleted() { - childWorkflowID := "some random child workflow ID" - childRunID := uuid.New() - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - request := &historyservice.RecordChildExecutionCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - CompletedExecution: &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowID, - RunId: childRunID, - }, - CompletionEvent: &historypb.HistoryEvent{ - EventId: 456, - EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, - Attributes: &historypb.HistoryEvent_WorkflowExecutionCompletedEventAttributes{ - WorkflowExecutionCompletedEventAttributes: &historypb.WorkflowExecutionCompletedEventAttributes{}, - }, - }, - ParentInitiatedId: 123, - ParentInitiatedVersion: 100, - } - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - // reload mutable state due to potential stale mutable state (initiated event not found) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) - _, err := s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.NotFound{}, err) - - // add child init event - wt := addWorkflowTaskScheduledEvent(ms) - workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, "testTaskQueue", uuid.New()) - wt.StartedEventID = workflowTasksStartEvent.GetEventId() - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - initiatedEvent, _ := addStartChildWorkflowExecutionInitiatedEvent(ms, workflowTaskCompletedEvent.GetEventId(), uuid.New(), - tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_TERMINATE) - request.ParentInitiatedId = initiatedEvent.GetEventId() - request.ParentInitiatedVersion = initiatedEvent.GetVersion() - - // reload mutable state due to potential stale mutable state (started event not found) - wfMs = workflow.TestCloneToProto(ms) - gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) - _, err = s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.NotFound{}, err) - - // add child started event - addChildWorkflowExecutionStartedEvent(ms, initiatedEvent.GetEventId(), childWorkflowID, childRunID, childWorkflowType, nil) - - wfMs = workflow.TestCloneToProto(ms) - gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - _, err = s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) - s.NoError(err) -} - -func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_WorkflowNotExist() { - - request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: tests.ParentNamespaceID.String(), - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: "child workflowId", - RunId: "child runId", - }, - ParentInitiatedId: 123, - ParentInitiatedVersion: 100, - } - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.NotFound{}) - - _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_WorkflowClosed() { - - request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: tests.ParentNamespaceID.String(), - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: "child workflowId", - RunId: "child runId", - }, - ParentInitiatedId: 123, - ParentInitiatedVersion: 100, - } - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - _, err := ms.AddTimeoutWorkflowEvent( - ms.GetNextEventID(), - enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, - uuid.New(), - ) - s.NoError(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.NoError(err) -} - -func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventNotFound() { - - request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: tests.NamespaceID.String(), - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: "child workflowId", - RunId: "child runId", - }, - ParentInitiatedId: 123, - ParentInitiatedVersion: 100, - } - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.WorkflowNotReady{}, err) -} - -func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventFoundOnNonCurrentBranch() { - - inititatedVersion := tests.Version - 100 - request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: tests.NamespaceID.String(), - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: "child workflowId", - RunId: "child runId", - }, - ParentInitiatedId: 123, - ParentInitiatedVersion: inititatedVersion, - } - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - ms.GetExecutionInfo().VersionHistories = &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - BranchToken: []byte{1, 2, 3}, - Items: []*historyspb.VersionHistoryItem{ - {EventId: 100, Version: inititatedVersion}, - {EventId: 456, Version: tests.Version}, - }, - }, - { - BranchToken: []byte{4, 5, 6}, - Items: []*historyspb.VersionHistoryItem{ - {EventId: 456, Version: inititatedVersion}, - }, - }, - }, - } - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventFoundOnCurrentBranch() { - - taskQueueName := "testTaskQueue" - - childWorkflowID := "some random child workflow ID" - childRunID := uuid.New() - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", taskQueueName, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - wt := addWorkflowTaskScheduledEvent(ms) - workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = workflowTasksStartEvent.GetEventId() - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - initiatedEvent, ci := addStartChildWorkflowExecutionInitiatedEvent(ms, workflowTaskCompletedEvent.GetEventId(), uuid.New(), - tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_TERMINATE) - - request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: tests.NamespaceID.String(), - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowID, - RunId: childRunID, - }, - ParentInitiatedId: initiatedEvent.GetEventId(), - ParentInitiatedVersion: initiatedEvent.GetVersion(), - } - - // child workflow not started in mutable state - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.WorkflowNotReady{}, err) - - // child workflow started but not completed - addChildWorkflowExecutionStartedEvent(ms, initiatedEvent.GetEventId(), childWorkflowID, childRunID, childWorkflowType, nil) - - wfMs = workflow.TestCloneToProto(ms) - gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.IsType(&serviceerror.WorkflowNotReady{}, err) - - // child completion recorded - addChildWorkflowExecutionCompletedEvent( - ms, - ci.InitiatedEventId, - &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowID, - RunId: childRunID, - }, - &historypb.WorkflowExecutionCompletedEventAttributes{ - Result: payloads.EncodeString("some random child workflow execution result"), - WorkflowTaskCompletedEventId: workflowTaskCompletedEvent.GetEventId(), - }, - ) - - wfMs = workflow.TestCloneToProto(ms) - gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) - s.NoError(err) -} - -func (s *engine2Suite) TestRefreshWorkflowTasks() { - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - - ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) - startEvent := addWorkflowExecutionStartedEvent(ms, execution, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - startVersion := startEvent.GetVersion() - timeoutEvent, err := ms.AddTimeoutWorkflowEvent( - ms.GetNextEventID(), - enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, - uuid.New(), - ) - s.NoError(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().AddHistoryTasks(gomock.Any(), gomock.Any()).Return(nil) - s.mockEventsCache.EXPECT().GetEvent( - gomock.Any(), - events.EventKey{ - NamespaceID: tests.NamespaceID, - WorkflowID: execution.GetWorkflowId(), - RunID: execution.GetRunId(), - EventID: common.FirstEventID, - Version: startVersion, - }, - common.FirstEventID, - gomock.Any(), - ).Return(startEvent, nil).AnyTimes() - s.mockEventsCache.EXPECT().GetEvent( - gomock.Any(), - events.EventKey{ - NamespaceID: tests.NamespaceID, - WorkflowID: execution.GetWorkflowId(), - RunID: execution.GetRunId(), - EventID: timeoutEvent.GetEventId(), - Version: startVersion, - }, - timeoutEvent.GetEventId(), - gomock.Any(), - ).Return(startEvent, nil).AnyTimes() - - err = s.historyEngine.RefreshWorkflowTasks(metrics.AddMetricsContext(context.Background()), tests.NamespaceID, execution) - s.NoError(err) -} - -func (s *engine2Suite) getMutableState(namespaceID namespace.ID, we commonpb.WorkflowExecution) workflow.MutableState { - weContext, release, err := s.workflowCache.GetOrCreateWorkflowExecution( - metrics.AddMetricsContext(context.Background()), - namespaceID, - we, - workflow.LockPriorityHigh, - ) - if err != nil { - return nil - } - defer release(nil) - - return weContext.(*workflow.ContextImpl).MutableState -} - -type createWorkflowExecutionRequestMatcher struct { - f func(request *persistence.CreateWorkflowExecutionRequest) bool -} - -func newCreateWorkflowExecutionRequestMatcher(f func(request *persistence.CreateWorkflowExecutionRequest) bool) gomock.Matcher { - return &createWorkflowExecutionRequestMatcher{ - f: f, - } -} - -func (m *createWorkflowExecutionRequestMatcher) Matches(x interface{}) bool { - request, ok := x.(*persistence.CreateWorkflowExecutionRequest) - if !ok { - return false - } - return m.f(request) -} - -func (m *createWorkflowExecutionRequestMatcher) String() string { - return "CreateWorkflowExecutionRequest match condition" -} diff -Nru temporal-1.21.5-1/src/service/history/historyEngine3_eventsv2_test.go temporal-1.22.5/src/service/history/historyEngine3_eventsv2_test.go --- temporal-1.21.5-1/src/service/history/historyEngine3_eventsv2_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/historyEngine3_eventsv2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,368 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - engine3Suite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockVisibilityProcessor *queues.MockQueue - mockEventsCache *events.MockCache - mockNamespaceCache *namespace.MockRegistry - mockClusterMetadata *cluster.MockMetadata - workflowCache wcache.Cache - historyEngine *historyEngineImpl - mockExecutionMgr *persistence.MockExecutionManager - - config *configs.Config - logger log.Logger - } -) - -func TestEngine3Suite(t *testing.T) { - s := new(engine3Suite) - suite.Run(t, s) -} - -func (s *engine3Suite) SetupSuite() { - s.config = tests.NewDynamicConfig() -} - -func (s *engine3Suite) TearDownSuite() { -} - -func (s *engine3Suite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - s.mockShard = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - s.config, - ) - s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() - - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockEventsCache = s.mockShard.MockEventsCache - - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - throttledLogger: s.logger, - metricsHandler: metrics.NoopMetricsHandler, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - config: s.config, - timeSource: s.mockShard.GetTimeSource(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, - }, - workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(s.mockShard, s.workflowCache), - } - s.mockShard.SetEngineForTesting(h) - h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) - - s.historyEngine = h -} - -func (s *engine3Suite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *engine3Suite) TestRecordWorkflowTaskStartedSuccessStickyEnabled() { - testNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: "wId", - RunId: tests.RunID, - } - tl := "testTaskQueue" - stickyTl := "stickyTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), we.GetRunId()) - executionInfo := ms.GetExecutionInfo() - executionInfo.LastUpdateTime = timestamp.TimeNowPtrUtc() - executionInfo.StickyTaskQueue = stickyTl - - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - request := historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: namespaceID.String(), - WorkflowExecution: &we, - ScheduledEventId: 2, - TaskId: 100, - RequestId: "reqId", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: &taskqueuepb.TaskQueue{ - Name: stickyTl, - }, - Identity: identity, - }, - } - - expectedResponse := historyservice.RecordWorkflowTaskStartedResponse{} - expectedResponse.WorkflowType = ms.GetWorkflowType() - executionInfo = ms.GetExecutionInfo() - if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { - expectedResponse.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId - } - expectedResponse.ScheduledEventId = wt.ScheduledEventID - expectedResponse.ScheduledTime = wt.ScheduledTime - expectedResponse.StartedEventId = wt.ScheduledEventID + 1 - expectedResponse.StickyExecutionEnabled = true - expectedResponse.NextEventId = ms.GetNextEventID() + 1 - expectedResponse.Attempt = wt.Attempt - expectedResponse.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ - Name: executionInfo.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - expectedResponse.BranchToken, _ = ms.GetCurrentBranchToken() - - response, err := s.historyEngine.RecordWorkflowTaskStarted(context.Background(), &request) - s.Nil(err) - s.NotNil(response) - s.True(response.StartedTime.After(*expectedResponse.ScheduledTime)) - expectedResponse.StartedTime = response.StartedTime - s.Equal(&expectedResponse, response) -} - -func (s *engine3Suite) TestStartWorkflowExecution_BrandNew() { - testNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - - namespaceID := tests.NamespaceID - workflowID := "workflowID" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) - - requestID := uuid.New() - resp, err := s.historyEngine.StartWorkflowExecution(context.Background(), &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - RequestId: requestID, - }, - }) - s.Nil(err) - s.NotNil(resp.RunId) -} - -func (s *engine3Suite) TestSignalWithStartWorkflowExecution_JustSignal() { - testNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} - _, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) - s.EqualError(err, "Missing namespace UUID.") - - namespaceID := tests.NamespaceID - workflowID := "wId" - workflowType := "workflowType" - runID := tests.RunID - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := uuid.New() - sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), runID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) - _ = addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) - s.Nil(err) - s.Equal(runID, resp.GetRunId()) -} - -func (s *engine3Suite) TestSignalWithStartWorkflowExecution_WorkflowNotExist() { - testNamespaceEntry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() - - sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} - _, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) - s.EqualError(err, "Missing namespace UUID.") - - namespaceID := tests.NamespaceID - workflowID := "wId" - workflowType := "workflowType" - taskQueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - requestID := uuid.New() - sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ - NamespaceId: namespaceID.String(), - SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ - Namespace: namespaceID.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - notExistErr := serviceerror.NewNotFound("Workflow not exist") - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, notExistErr) - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) - - resp, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) - s.Nil(err) - s.NotNil(resp.GetRunId()) -} diff -Nru temporal-1.21.5-1/src/service/history/historyEngineFactory.go temporal-1.22.5/src/service/history/historyEngineFactory.go --- temporal-1.21.5-1/src/service/history/historyEngineFactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/historyEngineFactory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "go.opentelemetry.io/otel/trace" - "go.uber.org/fx" - - "go.temporal.io/server/client" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/resource" - "go.temporal.io/server/common/sdk" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/replication" - "go.temporal.io/server/service/history/shard" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -type ( - HistoryEngineFactoryParams struct { - fx.In - - ClientBean client.Bean - MatchingClient resource.MatchingClient - SdkClientFactory sdk.ClientFactory - EventNotifier events.Notifier - Config *configs.Config - RawMatchingClient resource.MatchingRawClient - NewCacheFn wcache.NewCacheFn - ArchivalClient archiver.Client - EventSerializer serialization.Serializer - QueueFactories []QueueFactory `group:"queueFactory"` - ReplicationTaskFetcherFactory replication.TaskFetcherFactory - ReplicationTaskExecutorProvider replication.TaskExecutorProvider - TracerProvider trace.TracerProvider - PersistenceVisibilityMgr manager.VisibilityManager - } - - historyEngineFactory struct { - HistoryEngineFactoryParams - } -) - -func (f *historyEngineFactory) CreateEngine( - shard shard.Context, -) shard.Engine { - workflowCache := f.NewCacheFn(shard) - workflowConsistencyChecker := api.NewWorkflowConsistencyChecker(shard, workflowCache) - return NewEngineWithShardContext( - shard, - f.ClientBean, - f.MatchingClient, - f.SdkClientFactory, - f.EventNotifier, - f.Config, - f.RawMatchingClient, - workflowCache, - f.ArchivalClient, - f.EventSerializer, - f.QueueFactories, - f.ReplicationTaskFetcherFactory, - f.ReplicationTaskExecutorProvider, - workflowConsistencyChecker, - f.TracerProvider, - f.PersistenceVisibilityMgr, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/historyEngine_test.go temporal-1.22.5/src/service/history/historyEngine_test.go --- temporal-1.21.5-1/src/service/history/historyEngine_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/historyEngine_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,5609 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "encoding/json" - "errors" - "strings" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - failurepb "go.temporal.io/api/failure/v1" - historypb "go.temporal.io/api/history/v1" - querypb "go.temporal.io/api/query/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - - clockspb "go.temporal.io/server/api/clock/v1" - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - tokenspb "go.temporal.io/server/api/token/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/failure" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/rpc/interceptor" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/ndc" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - engineSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockVisibilityProcessor *queues.MockQueue - mockArchivalProcessor *queues.MockQueue - mockMemoryScheduledQueue *queues.MockQueue - mockNamespaceCache *namespace.MockRegistry - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - mockHistoryClient *historyservicemock.MockHistoryServiceClient - mockClusterMetadata *cluster.MockMetadata - mockEventsReapplier *ndc.MockEventsReapplier - mockWorkflowResetter *ndc.MockWorkflowResetter - - workflowCache wcache.Cache - mockHistoryEngine *historyEngineImpl - mockExecutionMgr *persistence.MockExecutionManager - mockShardManager *persistence.MockShardManager - - eventsCache events.Cache - config *configs.Config - } -) - -func TestEngineSuite(t *testing.T) { - s := new(engineSuite) - suite.Run(t, s) -} - -func (s *engineSuite) SetupSuite() { - -} - -func (s *engineSuite) TearDownSuite() { -} - -func (s *engineSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockEventsReapplier = ndc.NewMockEventsReapplier(s.controller) - s.mockWorkflowResetter = ndc.NewMockWorkflowResetter(s.controller) - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) - s.mockArchivalProcessor = queues.NewMockQueue(s.controller) - s.mockMemoryScheduledQueue = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() - s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() - s.mockMemoryScheduledQueue.EXPECT().Category().Return(tasks.CategoryMemoryTimer).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockMemoryScheduledQueue.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - s.config = tests.NewDynamicConfig() - s.mockShard = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - s.config, - ) - s.workflowCache = wcache.NewCache(s.mockShard) - s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() - - s.eventsCache = events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - ) - s.mockShard.SetEventsCacheForTesting(s.eventsCache) - - s.mockMatchingClient = s.mockShard.Resource.MatchingClient - s.mockHistoryClient = s.mockShard.Resource.HistoryClient - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockShardManager = s.mockShard.Resource.ShardMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(cluster.TestCurrentClusterInitialFailoverVersion).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.LocalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() - - eventNotifier := events.NewNotifier( - clock.NewRealTimeSource(), - s.mockShard.Resource.MetricsHandler, - func(namespaceID namespace.ID, workflowID string) int32 { - key := namespaceID.String() + "_" + workflowID - return int32(len(key)) - }, - ) - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.mockShard.GetLogger(), - metricsHandler: s.mockShard.GetMetricsHandler(), - tokenSerializer: common.NewProtoTaskTokenSerializer(), - eventNotifier: eventNotifier, - config: s.config, - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, - s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, - s.mockMemoryScheduledQueue.Category(): s.mockMemoryScheduledQueue, - }, - eventsReapplier: s.mockEventsReapplier, - workflowResetter: s.mockWorkflowResetter, - workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(s.mockShard, s.workflowCache), - throttledLogger: log.NewNoopLogger(), - } - s.mockShard.SetEngineForTesting(h) - h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) - - h.eventNotifier.Start() - - s.mockHistoryEngine = h -} - -func (s *engineSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() - s.mockHistoryEngine.eventNotifier.Stop() -} - -func (s *engineSuite) TestGetMutableStateSync() { - ctx := context.Background() - - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - // right now the next event ID is 4 - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - // test get the next event ID instantly - response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - }) - s.Nil(err) - s.Equal(int64(4), response.GetNextEventId()) - s.Equal(tests.RunID, response.GetFirstExecutionRunId()) -} - -func (s *engineSuite) TestGetMutableState_IntestRunID() { - ctx := context.Background() - - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - RunId: "run-id-not-valid-uuid", - } - - _, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - }) - s.Equal(errRunIDNotValid, err) -} - -func (s *engineSuite) TestGetMutableState_EmptyRunID() { - ctx := context.Background() - - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - } - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - }) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestGetMutableStateLongPoll() { - ctx := context.Background() - - namespaceID := tests.NamespaceID - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, - log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - // right now the next event ID is 4 - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - // test long poll on next event ID change - waitGroup := &sync.WaitGroup{} - waitGroup.Add(1) - asycWorkflowUpdate := func(delay time.Duration) { - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: execution.WorkflowId, - RunId: execution.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - timer := time.NewTimer(delay) - - <-timer.C - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.Nil(err) - waitGroup.Done() - // right now the next event ID is 5 - } - - // return immediately, since the expected next event ID appears - response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - ExpectedNextEventId: 3, - }) - s.Nil(err) - s.Equal(int64(4), response.NextEventId) - - // long poll, new event happen before long poll timeout - go asycWorkflowUpdate(time.Second * 2) - start := time.Now().UTC() - pollResponse, err := s.mockHistoryEngine.PollMutableState(ctx, &historyservice.PollMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - ExpectedNextEventId: 4, - }) - s.True(time.Now().UTC().After(start.Add(time.Second * 1))) - s.Nil(err) - s.Equal(int64(5), pollResponse.GetNextEventId()) - waitGroup.Wait() -} - -func (s *engineSuite) TestGetMutableStateLongPoll_CurrentBranchChanged() { - ctx := context.Background() - - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState( - s.mockHistoryEngine.shard, - s.eventsCache, - tests.LocalNamespaceEntry, - log.NewTestLogger(), - execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - // right now the next event ID is 4 - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - // test long poll on next event ID change - asyncBranchTokenUpdate := func(delay time.Duration) { - timer := time.NewTimer(delay) - <-timer.C - newExecution := &commonpb.WorkflowExecution{ - WorkflowId: execution.WorkflowId, - RunId: execution.RunId, - } - s.mockHistoryEngine.eventNotifier.NotifyNewHistoryEvent(events.NewNotification( - "tests.NamespaceID", - newExecution, - int64(1), - int64(0), - int64(4), - int64(1), - []byte{1}, - enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, - enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)) - } - - // return immediately, since the expected next event ID appears - response0, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - ExpectedNextEventId: 3, - }) - s.Nil(err) - s.Equal(int64(4), response0.GetNextEventId()) - - // long poll, new event happen before long poll timeout - go asyncBranchTokenUpdate(time.Second * 2) - start := time.Now().UTC() - response1, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - ExpectedNextEventId: 10, - }) - s.True(time.Now().UTC().After(start.Add(time.Second * 1))) - s.Nil(err) - s.Equal(response0.GetCurrentBranchToken(), response1.GetCurrentBranchToken()) -} - -func (s *engineSuite) TestGetMutableStateLongPollTimeout() { - ctx := context.Background() - - execution := commonpb.WorkflowExecution{ - WorkflowId: "test-get-workflow-execution-event-id", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - // right now the next event ID is 4 - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - // long poll, no event happen after long poll timeout - response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - Execution: &execution, - ExpectedNextEventId: 4, - }) - s.Nil(err) - s.Equal(int64(4), response.GetNextEventId()) -} - -func (s *engineSuite) TestQueryWorkflow_RejectBasedOnCompleted() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_RejectBasedOnCompleted", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - event := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - addCompleteWorkflowEvent(ms, event.GetEventId(), nil) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, - }, - } - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.NoError(err) - s.Nil(resp.GetResponse().QueryResult) - s.NotNil(resp.GetResponse().QueryRejected) - s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, resp.GetResponse().GetQueryRejected().GetStatus()) -} - -func (s *engineSuite) TestQueryWorkflow_RejectBasedOnFailed() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_RejectBasedOnFailed", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - event := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - addFailWorkflowEvent(ms, event.GetEventId(), failure.NewServerFailure("failure reason", true), enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE) - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, - }, - } - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.NoError(err) - s.Nil(resp.GetResponse().QueryResult) - s.NotNil(resp.GetResponse().QueryRejected) - s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, resp.GetResponse().GetQueryRejected().GetStatus()) - - request = &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY, - }, - } - resp, err = s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.NoError(err) - s.Nil(resp.GetResponse().QueryResult) - s.NotNil(resp.GetResponse().QueryRejected) - s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, resp.GetResponse().GetQueryRejected().GetStatus()) -} - -func (s *engineSuite) TestQueryWorkflow_DirectlyThroughMatching() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_DirectlyThroughMatching", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - s.mockMatchingClient.EXPECT().QueryWorkflow(gomock.Any(), gomock.Any()).Return(&matchingservice.QueryWorkflowResponse{QueryResult: payloads.EncodeBytes([]byte{1, 2, 3})}, nil) - s.mockHistoryEngine.matchingClient = s.mockMatchingClient - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - // since workflow is open this filter does not reject query - QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, - }, - } - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.NoError(err) - s.NotNil(resp.GetResponse().QueryResult) - s.Nil(resp.GetResponse().QueryRejected) - - var queryResult []byte - err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) - s.NoError(err) - s.Equal([]byte{1, 2, 3}, queryResult) -} - -func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Timeout() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Timeout", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - wt = addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - // since workflow is open this filter does not reject query - QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, - }, - } - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - resp, err := s.mockHistoryEngine.QueryWorkflow(ctx, request) - s.Error(err) - s.Nil(resp) - wg.Done() - }() - - <-time.After(time.Second) - ms1 := s.getMutableState(tests.NamespaceID, execution) - s.NotNil(ms1) - qr := ms1.GetQueryRegistry() - s.True(qr.HasBufferedQuery()) - s.False(qr.HasCompletedQuery()) - s.False(qr.HasUnblockedQuery()) - s.False(qr.HasFailedQuery()) - wg.Wait() - s.False(qr.HasBufferedQuery()) - s.False(qr.HasCompletedQuery()) - s.False(qr.HasUnblockedQuery()) - s.False(qr.HasFailedQuery()) -} - -func (s *engineSuite) TestQueryWorkflow_ConsistentQueryBufferFull() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_ConsistentQueryBufferFull", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - wt = addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - // buffer query so that when history.QueryWorkflow is called buffer is already full - ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( - context.Background(), - tests.NamespaceID, - execution, - workflow.LockPriorityHigh, - ) - s.NoError(err) - loadedMS, err := ctx.LoadMutableState(context.Background()) - s.NoError(err) - qr := workflow.NewQueryRegistry() - qr.BufferQuery(&querypb.WorkflowQuery{}) - loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr - release(nil) - - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - }, - } - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.Nil(resp) - s.Equal(consts.ErrConsistentQueryBufferExceeded, err) -} - -func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Complete() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Complete", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - wt = addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - - waitGroup := &sync.WaitGroup{} - waitGroup.Add(1) - asyncQueryUpdate := func(delay time.Duration, answer []byte) { - defer waitGroup.Done() - <-time.After(delay) - ms1 := s.getMutableState(tests.NamespaceID, execution) - s.NotNil(ms1) - qr := ms1.GetQueryRegistry() - buffered := qr.GetBufferedIDs() - for _, id := range buffered { - resultType := enumspb.QUERY_RESULT_TYPE_ANSWERED - succeededCompletionState := &workflow.QueryCompletionState{ - Type: workflow.QueryCompletionTypeSucceeded, - Result: &querypb.WorkflowQueryResult{ - ResultType: resultType, - Answer: payloads.EncodeBytes(answer), - }, - } - err := qr.SetCompletionState(id, succeededCompletionState) - s.NoError(err) - state, err := qr.GetCompletionState(id) - s.NoError(err) - s.Equal(workflow.QueryCompletionTypeSucceeded, state.Type) - } - } - - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - }, - } - go asyncQueryUpdate(time.Second*2, []byte{1, 2, 3}) - start := time.Now().UTC() - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.True(time.Now().UTC().After(start.Add(time.Second))) - s.NoError(err) - - var queryResult []byte - err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) - s.NoError(err) - s.Equal([]byte{1, 2, 3}, queryResult) - - ms1 := s.getMutableState(tests.NamespaceID, execution) - s.NotNil(ms1) - qr := ms1.GetQueryRegistry() - s.False(qr.HasBufferedQuery()) - s.False(qr.HasCompletedQuery()) - waitGroup.Wait() -} - -func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Unblocked() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Unblocked", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - wt = addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) - - wfMs := workflow.TestCloneToProto(ms) - gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) - s.mockMatchingClient.EXPECT().QueryWorkflow(gomock.Any(), gomock.Any()).Return(&matchingservice.QueryWorkflowResponse{QueryResult: payloads.EncodeBytes([]byte{1, 2, 3})}, nil) - s.mockHistoryEngine.matchingClient = s.mockMatchingClient - waitGroup := &sync.WaitGroup{} - waitGroup.Add(1) - asyncQueryUpdate := func(delay time.Duration, answer []byte) { - defer waitGroup.Done() - <-time.After(delay) - ms1 := s.getMutableState(tests.NamespaceID, execution) - s.NotNil(ms1) - qr := ms1.GetQueryRegistry() - buffered := qr.GetBufferedIDs() - for _, id := range buffered { - s.NoError(qr.SetCompletionState(id, &workflow.QueryCompletionState{Type: workflow.QueryCompletionTypeUnblocked})) - state, err := qr.GetCompletionState(id) - s.NoError(err) - s.Equal(workflow.QueryCompletionTypeUnblocked, state.Type) - } - } - - request := &historyservice.QueryWorkflowRequest{ - NamespaceId: tests.NamespaceID.String(), - Request: &workflowservice.QueryWorkflowRequest{ - Execution: &execution, - Query: &querypb.WorkflowQuery{}, - }, - } - go asyncQueryUpdate(time.Second*2, []byte{1, 2, 3}) - start := time.Now().UTC() - resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) - s.True(time.Now().UTC().After(start.Add(time.Second))) - s.NoError(err) - - var queryResult []byte - err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) - s.NoError(err) - s.Equal([]byte{1, 2, 3}, queryResult) - - ms1 := s.getMutableState(tests.NamespaceID, execution) - s.NotNil(ms1) - qr := ms1.GetQueryRegistry() - s.False(qr.HasBufferedQuery()) - s.False(qr.HasCompletedQuery()) - s.False(qr.HasUnblockedQuery()) - waitGroup.Wait() -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedInvalidToken() { - - invalidToken, _ := json.Marshal("bad token") - identity := "testIdentity" - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: invalidToken, - Commands: nil, - Identity: identity, - }, - }) - - s.NotNil(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedIfNoExecution() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedIfGetExecutionFailed() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedUpdateExecutionFailed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tq := "testTaskQueue" - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tq, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) - s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedIfTaskCompleted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tq := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tq, identity) - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedIfTaskNotStarted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tq := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedConflictOnUpdate() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tq := "testTaskQueue" - identity := "testIdentity" - activity1ID := "activity1" - activity1Type := "activity_type1" - activity1Input := payloads.EncodeString("input1") - activity1Result := payloads.EncodeString("activity1_result") - activity2ID := "activity2" - activity2Type := "activity_type2" - activity2Input := payloads.EncodeString("input2") - activity2Result := payloads.EncodeString("activity2_result") - activity3ID := "activity3" - activity3Type := "activity_type3" - activity3Input := payloads.EncodeString("input3") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt1 := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tq, identity) - workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) - activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tq, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tq, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) - activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) - addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, - activity1StartedEvent.EventId, activity1Result, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tq, identity) - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: wt2.ScheduledEventID, - } - taskToken, _ := tt.Marshal() - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activity3ID, - ActivityType: &commonpb.ActivityType{Name: activity3Type}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tq}, - Input: activity3Input, - ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, - activity2StartedEvent.EventId, activity2Result, identity) - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.Equal(&persistence.ConditionFailedError{}, err) -} - -func (s *engineSuite) TestValidateSignalRequest() { - workflowType := "testType" - input := payloads.EncodeString("input") - startRequest := &workflowservice.StartWorkflowExecutionRequest{ - WorkflowId: "ID", - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: "taskptr"}, - Input: input, - WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), - WorkflowRunTimeout: timestamp.DurationPtr(10 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(10 * time.Second), - Identity: "identity", - } - err := api.ValidateStartWorkflowExecutionRequest( - context.Background(), startRequest, s.mockHistoryEngine.shard, tests.LocalNamespaceEntry, "SignalWithStartWorkflowExecution") - s.Error(err, "startRequest doesn't have request id, it should error out") - - startRequest.RequestId = "request-id" - startRequest.Memo = &commonpb.Memo{Fields: map[string]*commonpb.Payload{ - "data": payload.EncodeBytes(make([]byte, 4*1024*1024)), - }} - err = api.ValidateStartWorkflowExecutionRequest( - context.Background(), startRequest, s.mockHistoryEngine.shard, tests.LocalNamespaceEntry, "SignalWithStartWorkflowExecution") - s.Error(err, "memo should be too big") -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_StaleCache() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - tt.ScheduledEventId = 4 // Set it to 4 to emulate stale cache. - - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedCompleteWorkflowFailed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - identity := "testIdentity" - activity1ID := "activity1" - activity1Type := "activity_type1" - activity1Input := payloads.EncodeString("input1") - activity1Result := payloads.EncodeString("activity1_result") - activity2ID := "activity2" - activity2Type := "activity_type2" - activity2Input := payloads.EncodeString("input2") - activity2Result := payloads.EncodeString("activity2_result") - workflowResult := payloads.EncodeString("workflow result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) - wt1 := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) - activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tl, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) - activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) - addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, - activity1StartedEvent.EventId, activity1Result, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, - activity2StartedEvent.EventId, activity2Result, identity) - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: wt2.ScheduledEventID, - } - taskToken, _ := tt.Marshal() - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ - Result: workflowResult, - }}, - }} - - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - - ms2 := common.CloneProto(ms1) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("UnhandledCommand", err.Error()) - - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(15), updatedWorkflowMutation.NextEventID) - s.Equal(workflowTaskStartedEvent1.EventId, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.Equal(updatedWorkflowMutation.NextEventID-1, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) - s.Equal(int32(1), updatedWorkflowMutation.ExecutionInfo.Attempt) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedFailWorkflowFailed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - identity := "testIdentity" - activity1ID := "activity1" - activity1Type := "activity_type1" - activity1Input := payloads.EncodeString("input1") - activity1Result := payloads.EncodeString("activity1_result") - activity2ID := "activity2" - activity2Type := "activity_type2" - activity2Input := payloads.EncodeString("input2") - activity2Result := payloads.EncodeString("activity2_result") - reason := "workflow fail reason" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) - wt1 := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) - activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tl, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) - activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) - addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, - activity1StartedEvent.EventId, activity1Result, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, - activity2StartedEvent.EventId, activity2Result, identity) - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: wt2.ScheduledEventID, - } - taskToken, _ := tt.Marshal() - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_FailWorkflowExecutionCommandAttributes{FailWorkflowExecutionCommandAttributes: &commandpb.FailWorkflowExecutionCommandAttributes{ - Failure: failure.NewServerFailure(reason, false), - }}, - }} - - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - - ms2 := common.CloneProto(ms1) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("UnhandledCommand", err.Error()) - - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(15), updatedWorkflowMutation.NextEventID) - s.Equal(workflowTaskStartedEvent1.EventId, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.Equal(updatedWorkflowMutation.NextEventID-1, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) - s.Equal(int32(1), updatedWorkflowMutation.ExecutionInfo.Attempt) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedBadCommandAttributes() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - identity := "testIdentity" - activity1ID := "activity1" - activity1Type := "activity_type1" - activity1Input := payloads.EncodeString("input1") - activity1Result := payloads.EncodeString("activity1_result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) - wt1 := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) - activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) - addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, - activity1StartedEvent.EventId, activity1Result, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: wt2.ScheduledEventID, - } - taskToken, _ := tt.Marshal() - - // commands with nil attributes - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - }} - - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("BadCompleteWorkflowExecutionAttributes: CompleteWorkflowExecutionCommandAttributes is not set on command.", err.Error()) -} - -// This test unit tests the activity schedule timeout validation logic of HistoryEngine's RespondWorkflowTaskComplete function. -// A ScheduleActivityTask command and the corresponding ActivityTaskScheduledEvent have 3 timeouts: ScheduleToClose, ScheduleToStart and StartToClose. -// This test verifies that when either ScheduleToClose or ScheduleToStart and StartToClose are specified, -// HistoryEngine's validateActivityScheduleAttribute will deduce the missing timeout and fill it in -// instead of returning a BadRequest error and only when all three are missing should a BadRequest be returned. -func (s *engineSuite) TestRespondWorkflowTaskCompletedSingleActivityScheduledAttribute() { - runTimeout := int32(100) - testIterationVariables := []struct { - scheduleToClose int32 - scheduleToStart int32 - startToClose int32 - heartbeat int32 - expectedScheduleToClose int32 - expectedScheduleToStart int32 - expectedStartToClose int32 - expectWorkflowTaskFail bool - }{ - // No ScheduleToClose timeout, will use runTimeout - {0, 3, 7, 0, - runTimeout, 3, 7, false}, - // Has ScheduleToClose timeout but not ScheduleToStart or StartToClose, - // will use ScheduleToClose for ScheduleToStart and StartToClose - {7, 0, 0, 0, - 7, 7, 7, false}, - // Only StartToClose timeout - {0, 0, 7, 0, - runTimeout, runTimeout, 7, false}, - // No ScheduleToClose timeout, ScheduleToStart or StartToClose, expect error return - {0, 0, 0, 0, - 0, 0, 0, true}, - // Negative ScheduleToClose, expect error return - {-1, 0, 0, 0, - 0, 0, 0, true}, - // Negative ScheduleToStart, expect error return - {0, -1, 0, 0, - 0, 0, 0, true}, - // Negative StartToClose, expect error return - {0, 0, -1, 0, - 0, 0, 0, true}, - // Negative HeartBeat, expect error return - {0, 0, 0, -1, - 0, 0, 0, true}, - // Use workflow timeout - {runTimeout, 0, 0, 0, - runTimeout, runTimeout, runTimeout, false}, - // Timeout larger than workflow timeout - {runTimeout + 1, 0, 0, 0, - runTimeout, runTimeout, runTimeout, false}, - {0, runTimeout + 1, 0, 0, - 0, 0, 0, true}, - {0, 0, runTimeout + 1, 0, - runTimeout, runTimeout, runTimeout, false}, - {0, 0, 0, runTimeout + 1, - 0, 0, 0, true}, - // No ScheduleToClose timeout, will use ScheduleToStart + StartToClose, but exceed limit - {0, runTimeout, 10, 0, - runTimeout, runTimeout, 10, false}, - } - - for _, iVar := range testIterationVariables { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), time.Duration(runTimeout*10)*time.Second, time.Duration(runTimeout)*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: timestamp.DurationPtr(time.Duration(iVar.scheduleToClose) * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(time.Duration(iVar.scheduleToStart) * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(time.Duration(iVar.startToClose) * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(time.Duration(iVar.heartbeat) * time.Second), - }}, - }} - - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - ms2 := workflow.TestCloneToProto(ms) - if iVar.expectWorkflowTaskFail { - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - } - - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - - if !iVar.expectWorkflowTaskFail { - s.NoError(err) - ms := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms.GetNextEventID()) - s.Equal(int64(3), ms.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms.GetExecutionState().State) - s.False(ms.HasPendingWorkflowTask()) - - activity1Attributes := s.getActivityScheduledEvent(ms, int64(5)).GetActivityTaskScheduledEventAttributes() - s.Equal(time.Duration(iVar.expectedScheduleToClose)*time.Second, timestamp.DurationValue(activity1Attributes.GetScheduleToCloseTimeout()), iVar) - s.Equal(time.Duration(iVar.expectedScheduleToStart)*time.Second, timestamp.DurationValue(activity1Attributes.GetScheduleToStartTimeout()), iVar) - s.Equal(time.Duration(iVar.expectedStartToClose)*time.Second, timestamp.DurationValue(activity1Attributes.GetStartToCloseTimeout()), iVar) - } else { - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.True(strings.HasPrefix(err.Error(), "BadScheduleActivityAttributes"), err.Error()) - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(5), updatedWorkflowMutation.NextEventID, iVar) - s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId, iVar) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State, iVar) - s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID, iVar) - } - s.TearDownTest() - s.SetupTest() - } -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedBadBinary() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ns := tests.LocalNamespaceEntry.Clone( - namespace.WithID(uuid.New()), - namespace.WithBadBinary("test-bad-binary"), - ) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(ns.ID()).Return(ns, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(ns.ID()).Return(ns, nil).AnyTimes() - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - ns, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - var commands []*commandpb.Command - - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} - ms2 := workflow.TestCloneToProto(ms) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: ns.ID().String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - BinaryChecksum: "test-bad-binary", - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("BadBinary: binary test-bad-binary is marked as bad deployment", err.Error()) - - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(5), updatedWorkflowMutation.NextEventID) - s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedSingleActivityScheduledWorkflowTask() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - - activity1Attributes := s.getActivityScheduledEvent(ms2, int64(5)).GetActivityTaskScheduledEventAttributes() - s.Equal("activity1", activity1Attributes.ActivityId) - s.Equal("activity_type1", activity1Attributes.ActivityType.Name) - s.Equal(int64(4), activity1Attributes.WorkflowTaskCompletedEventId) - s.Equal(tl, activity1Attributes.TaskQueue.Name) - s.Equal(input, activity1Attributes.Input) - s.Equal(90*time.Second, timestamp.DurationValue(activity1Attributes.ScheduleToCloseTimeout)) // runTimeout - s.Equal(10*time.Second, timestamp.DurationValue(activity1Attributes.ScheduleToStartTimeout)) - s.Equal(50*time.Second, timestamp.DurationValue(activity1Attributes.StartToCloseTimeout)) - s.Equal(5*time.Second, timestamp.DurationValue(activity1Attributes.HeartbeatTimeout)) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_SignalTaskGeneration() { - resp := s.testRespondWorkflowTaskCompletedSignalGeneration(false) - s.NotNil(resp.GetStartedResponse()) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_SkipSignalTaskGeneration() { - resp := s.testRespondWorkflowTaskCompletedSignalGeneration(true) - s.Nil(resp.GetStartedResponse()) -} - -func (s *engineSuite) testRespondWorkflowTaskCompletedSignalGeneration(skipGenerateTask bool) *historyservice.RespondWorkflowTaskCompletedResponse { - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: tests.NamespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - signal := workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: &we, - Identity: identity, - SignalName: "test signal name", - Input: payloads.EncodeString("test input"), - SkipGenerateWorkflowTask: skipGenerateTask, - RequestId: uuid.New(), - } - signalRequest := &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &signal, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil).AnyTimes() - - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.NoError(err) - - var commands []*commandpb.Command - resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - ReturnNewWorkflowTask: true, - }, - }) - s.NoError(err) - s.NotNil(resp) - - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms.GetExecutionState().State) - - return resp -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_NotCancelled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) - scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) - startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) - heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) - commands := []*commandpb.Command{ - { - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: scheduleToCloseTimeout, - ScheduleToStartTimeout: scheduleToStartTimeout, - StartToCloseTimeout: startToCloseTimeout, - HeartbeatTimeout: heartbeatTimeout, - RequestEagerExecution: false, - }}, - }, - { - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity2", - ActivityType: &commonpb.ActivityType{Name: "activity_type2"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: scheduleToCloseTimeout, - ScheduleToStartTimeout: scheduleToStartTimeout, - StartToCloseTimeout: startToCloseTimeout, - HeartbeatTimeout: heartbeatTimeout, - RequestEagerExecution: true, - }}, - }, - } - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(7), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - - ai1, ok := ms2.GetActivityByActivityID("activity1") - s.True(ok) - s.Equal(common.EmptyEventID, ai1.StartedEventId) - - ai2, ok := ms2.GetActivityByActivityID("activity2") - s.True(ok) - s.Equal(common.TransientEventID, ai2.StartedEventId) - s.NotZero(ai2.StartedTime) - - scheduledEvent := s.getActivityScheduledEvent(ms2, ai2.ScheduledEventId) - - s.Len(resp.ActivityTasks, 1) - activityTask := resp.ActivityTasks[0] - s.Equal("activity2", activityTask.ActivityId) - s.Equal("activity_type2", activityTask.ActivityType.GetName()) - s.Equal(input, activityTask.Input) - s.Equal(we, *activityTask.WorkflowExecution) - s.Equal(scheduledEvent.EventTime, activityTask.CurrentAttemptScheduledTime) - s.Equal(scheduledEvent.EventTime, activityTask.ScheduledTime) - s.Equal(*scheduleToCloseTimeout, *activityTask.ScheduleToCloseTimeout) - s.Equal(startToCloseTimeout, activityTask.StartToCloseTimeout) - s.Equal(heartbeatTimeout, activityTask.HeartbeatTimeout) - s.Equal(int32(1), activityTask.Attempt) - s.Nil(activityTask.HeartbeatDetails) - s.Equal(tests.LocalNamespaceEntry.Name().String(), activityTask.WorkflowNamespace) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_Cancelled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) - scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) - startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) - heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) - commands := []*commandpb.Command{ - { - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: scheduleToCloseTimeout, - ScheduleToStartTimeout: scheduleToStartTimeout, - StartToCloseTimeout: startToCloseTimeout, - HeartbeatTimeout: heartbeatTimeout, - RequestEagerExecution: true, - }}, - }, - { - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: 5, - }}, - }, - } - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - ReturnNewWorkflowTask: true, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(10), ms2.GetNextEventID()) // activity scheduled, request cancel, cancelled, workflow task scheduled, started - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.True(ms2.HasPendingWorkflowTask()) - - _, ok := ms2.GetActivityByActivityID("activity1") - s.False(ok) - - s.Len(resp.ActivityTasks, 0) - s.NotNil(resp.StartedResponse) - s.Equal(int64(10), resp.StartedResponse.NextEventId) - s.Equal(int64(3), resp.StartedResponse.PreviousStartedEventId) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_WorkflowClosed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - input := payloads.EncodeString("input") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) - scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) - startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) - heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) - commands := []*commandpb.Command{ - { - CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, - Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: "activity1", - ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, - Input: input, - ScheduleToCloseTimeout: scheduleToCloseTimeout, - ScheduleToStartTimeout: scheduleToStartTimeout, - StartToCloseTimeout: startToCloseTimeout, - HeartbeatTimeout: heartbeatTimeout, - RequestEagerExecution: true, - }}, - }, - { - CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ - Result: payloads.EncodeString("complete"), - }}, - }, - } - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - ReturnNewWorkflowTask: true, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(7), ms2.GetNextEventID()) // activity scheduled, workflow completed - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - - activityInfo, ok := ms2.GetActivityByActivityID("activity1") - s.True(ok) - s.Equal(int64(5), activityInfo.ScheduledEventId) // activity scheduled - s.Equal(common.EmptyEventID, activityInfo.StartedEventId) // activity not started - - s.Len(resp.ActivityTasks, 0) - s.Nil(resp.StartedResponse) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatTimeout() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = timestamp.TimePtr(time.Now().UTC().Add(-time.Hour)) - - var commands []*commandpb.Command - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - ForceCreateNewWorkflowTask: true, - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err, "workflow task heartbeat timeout") -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = timestamp.TimePtr(time.Now().UTC().Add(-time.Minute)) - - var commands []*commandpb.Command - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - ForceCreateNewWorkflowTask: true, - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout_ZeroOrignalScheduledTime() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = nil - - var commands []*commandpb.Command - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - ForceCreateNewWorkflowTask: true, - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedCompleteWorkflowSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - workflowResult := payloads.EncodeString("success") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ - Result: workflowResult, - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedFailWorkflowSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - reason := "fail workflow reason" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_FailWorkflowExecutionCommandAttributes{FailWorkflowExecutionCommandAttributes: &commandpb.FailWorkflowExecutionCommandAttributes{ - Failure: failure.NewServerFailure(reason, false), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedSignalExternalWorkflowSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_SignalExternalWorkflowExecutionCommandAttributes{SignalExternalWorkflowExecutionCommandAttributes: &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ - Namespace: tests.Namespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: we.WorkflowId, - RunId: we.RunId, - }, - SignalName: "signal", - Input: payloads.EncodeString("test input"), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedStartChildWorkflowWithAbandonPolicy() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - abandon := enumspb.PARENT_CLOSE_POLICY_ABANDON - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: tests.Namespace.String(), - WorkflowId: "child-workflow-id", - WorkflowType: &commonpb.WorkflowType{ - Name: "child-workflow-type", - }, - ParentClosePolicy: abandon, - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). - GetMapper(tests.Namespace). - Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(1, len(ms2.GetPendingChildExecutionInfos())) - var childID int64 - for c := range ms2.GetPendingChildExecutionInfos() { - childID = c - break - } - s.Equal("child-workflow-id", ms2.GetPendingChildExecutionInfos()[childID].StartedWorkflowId) - s.Equal(enumspb.PARENT_CLOSE_POLICY_ABANDON, ms2.GetPendingChildExecutionInfos()[childID].ParentClosePolicy) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedStartChildWorkflowWithTerminatePolicy() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - terminate := enumspb.PARENT_CLOSE_POLICY_TERMINATE - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: tests.Namespace.String(), - WorkflowId: "child-workflow-id", - WorkflowType: &commonpb.WorkflowType{ - Name: "child-workflow-type", - }, - ParentClosePolicy: terminate, - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). - GetMapper(tests.Namespace). - Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(6), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(1, len(ms2.GetPendingChildExecutionInfos())) - var childID int64 - for c := range ms2.GetPendingChildExecutionInfos() { - childID = c - break - } - s.Equal("child-workflow-id", ms2.GetPendingChildExecutionInfos()[childID].StartedWorkflowId) - s.Equal(enumspb.PARENT_CLOSE_POLICY_TERMINATE, ms2.GetPendingChildExecutionInfos()[childID].ParentClosePolicy) -} - -func (s *engineSuite) TestRespondWorkflowTaskCompletedSignalExternalWorkflowFailed_UnKnownNamespace() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - foreignNamespace := namespace.Name("unknown namespace") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_SignalExternalWorkflowExecutionCommandAttributes{SignalExternalWorkflowExecutionCommandAttributes: &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ - Namespace: foreignNamespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: we.WorkflowId, - RunId: we.RunId, - }, - SignalName: "signal", - Input: payloads.EncodeString("test input"), - }}, - }} - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockNamespaceCache.EXPECT().GetNamespace(foreignNamespace).Return( - nil, errors.New("get foreign namespace error"), - ) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - - s.NotNil(err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedInvalidToken() { - - invalidToken, _ := json.Marshal("bad token") - identity := "testIdentity" - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: invalidToken, - Result: nil, - Identity: identity, - }, - }) - - s.NotNil(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfNoExecution() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfNoRunID() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfGetExecutionFailed() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfNoAIdProvided() { - namespaceID := tests.NamespaceID - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - } - taskToken, _ := tt.Marshal() - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "activityID cannot be empty") -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfNotFound() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: "aid", - } - taskToken, _ := tt.Marshal() - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.Error(err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedUpdateExecutionFailed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) - s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfTaskCompleted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activityStartedEvent := addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - addActivityTaskCompletedEvent(ms, activityScheduledEvent.EventId, activityStartedEvent.EventId, - activityResult, identity) - addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedIfTaskNotStarted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedConflictOnUpdate() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.Equal(&persistence.ConditionFailedError{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(9), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt = ms2.GetWorkflowTaskByID(int64(8)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(8), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRespondActivityTaskCompletedByIdSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - activityResult := payloads.EncodeString("activity result") - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - } - taskToken, _ := tt.Marshal() - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ - TaskToken: taskToken, - Result: activityResult, - Identity: identity, - }, - }) - s.NoError(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(9), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt := ms2.GetWorkflowTaskByID(int64(8)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(8), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRespondActivityTaskFailedInvalidToken() { - - invalidToken, _ := json.Marshal("bad token") - identity := "testIdentity" - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: invalidToken, - Identity: identity, - }, - }) - - s.NotNil(err) - s.IsType(&serviceerror.InvalidArgument{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedIfNoExecution() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, - serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedIfNoRunID() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, - serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedIfGetExecutionFailed() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, - errors.New("FAILED")) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondActivityTaskFailededIfNoAIdProvided() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - } - taskToken, _ := tt.Marshal() - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "activityID cannot be empty") -} - -func (s *engineSuite) TestRespondActivityTaskFailededIfNotFound() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: "aid", - } - taskToken, _ := tt.Marshal() - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.Error(err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedUpdateExecutionFailed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) - s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "FAILED") -} - -func (s *engineSuite) TestRespondActivityTaskFailedIfTaskCompleted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - failure := failure.NewServerFailure("fail reason", true) - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - activityStartedEvent := addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - addActivityTaskFailedEvent(ms, activityScheduledEvent.EventId, activityStartedEvent.EventId, failure, enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE, identity) - addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Failure: failure, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedIfTaskNotStarted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedConflictOnUpdate() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.Equal(&persistence.ConditionFailedError{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskFailedSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - failure := failure.NewServerFailure("failed", false) - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Failure: failure, - Identity: identity, - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(9), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt = ms2.GetWorkflowTaskByID(int64(8)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(8), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRespondActivityTaskFailedWithHeartbeatSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - failure := failure.NewServerFailure("failed", false) - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, activityInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - wfMs.ActivityInfos[activityInfo.ScheduledEventId] = activityInfo - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - details := payloads.EncodeString("details") - - s.Nil(activityInfo.GetLastHeartbeatDetails()) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Failure: failure, - Identity: identity, - LastHeartbeatDetails: details, - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(9), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt = ms2.GetWorkflowTaskByID(int64(8)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(8), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) - - s.NotNil(activityInfo.GetLastHeartbeatDetails()) -} - -func (s *engineSuite) TestRespondActivityTaskFailedByIdSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - failure := failure.NewServerFailure("failed", false) - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - } - taskToken, _ := tt.Marshal() - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ - NamespaceId: tests.NamespaceID.String(), - FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ - TaskToken: taskToken, - Failure: failure, - Identity: identity, - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(9), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt := ms2.GetWorkflowTaskByID(int64(8)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(8), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_NoTimer() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - // No HeartBeat timer running. - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - detais := payloads.EncodeString("details") - - _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: taskToken, - Identity: identity, - Details: detais, - }, - }) - s.Nil(err) -} - -func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_TimerRunning() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - // HeartBeat timer running. - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - detais := payloads.EncodeString("details") - - _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: taskToken, - Identity: identity, - Details: detais, - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(7), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRecordActivityTaskHeartBeatByIDSuccess() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - } - taskToken, _ := tt.Marshal() - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - - // No HeartBeat timer running. - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - detais := payloads.EncodeString("details") - - _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: taskToken, - Identity: identity, - Details: detais, - }, - }) - s.Nil(err) -} - -func (s *engineSuite) TestRespondActivityTaskCanceled_Scheduled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCanceled_Started() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 5, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - _, _, err := ms.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEvent.EventId, activityScheduledEvent.EventId, identity) - s.Nil(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(10), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt = ms2.GetWorkflowTaskByID(int64(9)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(9), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRespondActivityTaskCanceledById_Started() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - ScheduledEventId: common.EmptyEventID, - ActivityId: activityID, - } - taskToken, _ := tt.Marshal() - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - _, _, err := ms.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEvent.EventId, activityScheduledEvent.EventId, identity) - s.Nil(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(10), ms2.GetNextEventID()) - s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - s.True(ms2.HasPendingWorkflowTask()) - wt := ms2.GetWorkflowTaskByID(int64(9)) - s.NotNil(wt) - s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) - s.Equal(int64(9), wt.ScheduledEventID) - s.Equal(common.EmptyEventID, wt.StartedEventID) -} - -func (s *engineSuite) TestRespondActivityTaskCanceledIfNoRunID() { - namespaceID := tests.NamespaceID - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - - _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.NotNil(err) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *engineSuite) TestRespondActivityTaskCanceledIfNoAIdProvided() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "test-respond-activity-task-canceled-if-no-activity-id-provided", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - // Add dummy event - addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.EqualError(err, "activityID cannot be empty") -} - -func (s *engineSuite) TestRespondActivityTaskCanceledIfNotFound() { - namespaceID := tests.NamespaceID - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "test-respond-activity-task-canceled-if-not-found", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - ScheduledEventId: common.EmptyEventID, - ActivityId: "aid", - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - // Add dummy event - addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: taskToken, - Identity: identity, - }, - }) - s.Error(err) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_NotScheduled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityScheduledEventID := int64(99) - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: activityScheduledEventID, - }}, - }} - - ms1 := workflow.TestCloneToProto(ms) - gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} - ms2 := workflow.TestCloneToProto(ms) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("BadRequestCancelActivityAttributes: invalid history builder state for action: add-activitytask-cancel-requested-event, ScheduledEventID: 99", err.Error()) - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(5), updatedWorkflowMutation.NextEventID) - s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Scheduled() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 6, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - _, aInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: aInfo.ScheduledEventId, - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(12), ms2.GetNextEventID()) - s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.True(ms2.HasPendingWorkflowTask()) - wt2 = ms2.GetWorkflowTaskByID(ms2.GetNextEventID() - 1) - s.NotNil(wt2) - s.Equal(ms2.GetNextEventID()-1, wt2.ScheduledEventID) - s.Equal(int32(1), wt2.Attempt) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Started() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 7, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: activityScheduledEvent.GetEventId(), - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(11), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Completed() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 6, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - workflowResult := payloads.EncodeString("workflow result") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - _, aInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - commands := []*commandpb.Command{ - { - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: aInfo.ScheduledEventId, - }}, - }, - { - CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ - Result: workflowResult, - }}, - }, - } - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(11), ms2.GetNextEventID()) - s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_NoHeartBeat() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 7, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: activityScheduledEvent.GetEventId(), - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(11), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - - // Try recording activity heartbeat - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - att := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 5, - } - activityTaskToken, _ := att.Marshal() - - hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - s.NotNil(hbResponse) - s.True(hbResponse.CancelRequested) - - // Try cancelling the request. - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - - ms2 = s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(13), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.True(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Success() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 7, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: activityScheduledEvent.GetEventId(), - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(11), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - - // Try recording activity heartbeat - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - att := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 5, - } - activityTaskToken, _ := att.Marshal() - - hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - s.NotNil(hbResponse) - s.True(hbResponse.CancelRequested) - - // Try cancelling the request. - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - - ms2 = s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(13), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.True(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_SuccessWithQueries() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 7, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - activityID := "activity1_id" - activityType := "activity_type1" - activityInput := payloads.EncodeString("input1") - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) - addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, - Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ - ScheduledEventId: activityScheduledEvent.GetEventId(), - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - // load mutable state such that it already exists in memory when respond workflow task is called - // this enables us to set query registry on it - ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( - context.Background(), - tests.NamespaceID, - we, - workflow.LockPriorityHigh, - ) - s.NoError(err) - loadedMS, err := ctx.LoadMutableState(context.Background()) - s.NoError(err) - qr := workflow.NewQueryRegistry() - id1, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - id2, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - id3, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) - loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr - release(nil) - result1 := &querypb.WorkflowQueryResult{ - ResultType: enumspb.QUERY_RESULT_TYPE_ANSWERED, - Answer: payloads.EncodeBytes([]byte{1, 2, 3}), - } - result2 := &querypb.WorkflowQueryResult{ - ResultType: enumspb.QUERY_RESULT_TYPE_FAILED, - ErrorMessage: "error reason", - } - queryResults := map[string]*querypb.WorkflowQueryResult{ - id1: result1, - id2: result2, - } - _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - QueryResults: queryResults, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(11), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - s.Len(qr.GetCompletedIDs(), 2) - succeeded1, err := qr.GetCompletionState(id1) - s.NoError(err) - s.EqualValues(succeeded1.Result, result1) - s.Equal(workflow.QueryCompletionTypeSucceeded, succeeded1.Type) - succeeded2, err := qr.GetCompletionState(id2) - s.NoError(err) - s.EqualValues(succeeded2.Result, result2) - s.Equal(workflow.QueryCompletionTypeSucceeded, succeeded2.Type) - s.Len(qr.GetBufferedIDs(), 0) - s.Len(qr.GetFailedIDs(), 0) - s.Len(qr.GetUnblockedIDs(), 1) - unblocked1, err := qr.GetCompletionState(id3) - s.NoError(err) - s.Nil(unblocked1.Result) - s.Equal(workflow.QueryCompletionTypeUnblocked, unblocked1.Type) - - // Try recording activity heartbeat - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - att := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: tests.WorkflowID, - RunId: we.GetRunId(), - ScheduledEventId: 5, - } - activityTaskToken, _ := att.Marshal() - - hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ - NamespaceId: tests.NamespaceID.String(), - HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - s.NotNil(hbResponse) - s.True(hbResponse.CancelRequested) - - // Try cancelling the request. - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ - NamespaceId: tests.NamespaceID.String(), - CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ - TaskToken: activityTaskToken, - Identity: identity, - Details: payloads.EncodeString("details"), - }, - }) - s.Nil(err) - - ms2 = s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(13), ms2.GetNextEventID()) - s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.True(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestStarTimer_DuplicateTimerID() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - timerID := "t1" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_START_TIMER, - Attributes: &commandpb.Command_StartTimerCommandAttributes{StartTimerCommandAttributes: &commandpb.StartTimerCommandAttributes{ - TimerId: timerID, - StartToFireTimeout: timestamp.DurationPtr(1 * time.Second), - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - - // Try to add the same timer ID again. - wt2 := addWorkflowTaskScheduledEvent(ms2) - addWorkflowTaskStartedEvent(ms2, wt2.ScheduledEventID, tl, identity) - tt2 := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: wt2.ScheduledEventID, - } - taskToken2, _ := tt2.Marshal() - - wfMs2 := workflow.TestCloneToProto(ms2) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: wfMs2} - - workflowTaskFailedEvent := false - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - for _, newEvents := range request.UpdateWorkflowEvents { - decTaskIndex := len(newEvents.Events) - 1 - if decTaskIndex >= 0 && newEvents.Events[decTaskIndex].EventType == enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED { - workflowTaskFailedEvent = true - } - } - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken2, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("StartTimerDuplicateId: invalid history builder state for action: add-timer-started-event, TimerID: t1", err.Error()) - - s.True(workflowTaskFailedEvent) - - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(9), updatedWorkflowMutation.NextEventID) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.Equal(updatedWorkflowMutation.NextEventID, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) - s.Equal(int32(2), updatedWorkflowMutation.ExecutionInfo.WorkflowTaskAttempt) -} - -func (s *engineSuite) TestUserTimer_RespondWorkflowTaskCompleted() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 6, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - timerID := "t1" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - // Verify cancel timer with a start event. - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - addTimerStartedEvent(ms, workflowTaskCompletedEvent.EventId, timerID, 10*time.Second) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, - Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ - TimerId: timerID, - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(10), ms2.GetNextEventID()) - s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) -} - -func (s *engineSuite) TestCancelTimer_RespondWorkflowTaskCompleted_NoStartTimer() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 2, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - timerID := "t1" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - // Verify cancel timer with a start event. - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - ms2 := workflow.TestCloneToProto(ms) - gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, - Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ - TimerId: timerID, - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) - var updatedWorkflowMutation persistence.WorkflowMutation - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - updatedWorkflowMutation = request.UpdateWorkflowMutation - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Error(err) - s.IsType(&serviceerror.InvalidArgument{}, err) - s.Equal("BadCancelTimerAttributes: invalid history builder state for action: add-timer-canceled-event, TimerID: t1", err.Error()) - - s.NotNil(updatedWorkflowMutation) - s.Equal(int64(5), updatedWorkflowMutation.NextEventID) - s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) - s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) -} - -func (s *engineSuite) TestCancelTimer_RespondWorkflowTaskCompleted_TimerFired() { - namespaceID := tests.NamespaceID - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - tl := "testTaskQueue" - tt := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: we.WorkflowId, - RunId: we.RunId, - ScheduledEventId: 6, - } - taskToken, _ := tt.Marshal() - identity := "testIdentity" - timerID := "t1" - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - // Verify cancel timer with a start event. - addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) - wt := addWorkflowTaskScheduledEvent(ms) - workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) - workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) - addTimerStartedEvent(ms, workflowTaskCompletedEvent.EventId, timerID, 10*time.Second) - wt2 := addWorkflowTaskScheduledEvent(ms) - addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) - addTimerFiredEvent(ms, timerID) - _, _, err := ms.CloseTransactionAsMutation(workflow.TransactionPolicyActive) - s.Nil(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.True(len(gwmsResponse.State.BufferedEvents) > 0) - - commands := []*commandpb.Command{{ - CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, - Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ - TimerId: timerID, - }}, - }} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.True(request.UpdateWorkflowMutation.ClearBufferedEvents) - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ - NamespaceId: tests.NamespaceID.String(), - CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ - TaskToken: taskToken, - Commands: commands, - Identity: identity, - }, - }) - s.Nil(err) - - ms2 := s.getMutableState(tests.NamespaceID, we) - s.Equal(int64(10), ms2.GetNextEventID()) - s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) - s.False(ms2.HasPendingWorkflowTask()) - s.False(ms2.HasBufferedEvents()) -} - -func (s *engineSuite) TestSignalWorkflowExecution() { - signalRequest := &historyservice.SignalWorkflowExecutionRequest{} - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "Missing namespace UUID.") - - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - signalRequest = &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: &we, - Identity: identity, - SignalName: signalName, - Input: input, - }, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.Nil(err) -} - -// Test signal workflow task by adding request ID -func (s *engineSuite) TestSignalWorkflowExecution_DuplicateRequest() { - signalRequest := &historyservice.SignalWorkflowExecutionRequest{} - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "Missing namespace UUID.") - - we := commonpb.WorkflowExecution{ - WorkflowId: "wId2", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name 2" - input := payloads.EncodeString("test input 2") - requestID := uuid.New() - signalRequest = &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: &we, - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - // assume duplicate request id - wfMs.SignalRequestedIds = []string{requestID} - wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.Nil(err) -} - -// Test signal workflow task by dedup request ID & workflow finished -func (s *engineSuite) TestSignalWorkflowExecution_DuplicateRequest_Completed() { - signalRequest := &historyservice.SignalWorkflowExecutionRequest{} - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "Missing namespace UUID.") - - we := commonpb.WorkflowExecution{ - WorkflowId: "wId2", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name 2" - input := payloads.EncodeString("test input 2") - requestID := uuid.New() - signalRequest = &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: &we, - Identity: identity, - SignalName: signalName, - Input: input, - RequestId: requestID, - }, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - // assume duplicate request id - wfMs.SignalRequestedIds = []string{requestID} - wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.Nil(err) -} - -func (s *engineSuite) TestSignalWorkflowExecution_Failed() { - signalRequest := &historyservice.SignalWorkflowExecutionRequest{} - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "Missing namespace UUID.") - - we := &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - input := payloads.EncodeString("test input") - signalRequest = &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: we, - Identity: identity, - SignalName: signalName, - Input: input, - }, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - addWorkflowExecutionStartedEvent(ms, *we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "workflow execution already completed") -} - -func (s *engineSuite) TestSignalWorkflowExecution_WorkflowTaskBackoff() { - signalRequest := &historyservice.SignalWorkflowExecutionRequest{} - _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.EqualError(err, "Missing namespace UUID.") - - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - signalName := "my signal name" - signalInput := payloads.EncodeString("test input") - signalRequest = &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: tests.NamespaceID.String(), - WorkflowExecution: &we, - Identity: identity, - SignalName: signalName, - Input: signalInput, - }, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) - startRequest := &workflowservice.StartWorkflowExecutionRequest{ - WorkflowId: we.WorkflowId, - WorkflowType: &commonpb.WorkflowType{Name: "wType"}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskqueue}, - Input: payloads.EncodeString("input"), - WorkflowExecutionTimeout: timestamp.DurationPtr(100 * time.Second), - WorkflowRunTimeout: timestamp.DurationPtr(50 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), - Identity: identity, - } - - _, err = ms.AddWorkflowExecutionStartedEvent( - we, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: tests.NamespaceID.String(), - StartRequest: startRequest, - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY, - FirstWorkflowTaskBackoff: timestamp.DurationPtr(time.Second * 10), - }, - ) - s.NoError(err) - - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.Len(request.UpdateWorkflowEvents[0].Events, 1) // no workflow task scheduled event - // s.Empty(request.UpdateWorkflowMutation.Tasks[tasks.CategoryTransfer]) // no workflow transfer task - return tests.UpdateWorkflowExecutionResponse, nil - }) - - _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) - s.Nil(err) -} - -func (s *engineSuite) TestRemoveSignalMutableState() { - removeRequest := &historyservice.RemoveSignalMutableStateRequest{} - _, err := s.mockHistoryEngine.RemoveSignalMutableState(context.Background(), removeRequest) - s.EqualError(err, "Missing namespace UUID.") - - execution := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - requestID := uuid.New() - removeRequest = &historyservice.RemoveSignalMutableStateRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &execution, - RequestId: requestID, - } - - ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, - tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) - addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - addWorkflowTaskScheduledEvent(ms) - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, err = s.mockHistoryEngine.RemoveSignalMutableState(context.Background(), removeRequest) - s.Nil(err) -} - -func (s *engineSuite) TestReapplyEvents_ReturnSuccess() { - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "test-reapply", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - history := []*historypb.HistoryEvent{ - { - EventId: 1, - EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, - Version: 1, - }, - } - ms := workflow.TestLocalMutableState( - s.mockHistoryEngine.shard, - s.eventsCache, - tests.LocalNamespaceEntry, - log.NewTestLogger(), - workflowExecution.GetRunId(), - ) - // Add dummy event - addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - - err := s.mockHistoryEngine.ReapplyEvents( - context.Background(), - tests.NamespaceID, - workflowExecution.GetWorkflowId(), - workflowExecution.GetRunId(), - history, - ) - s.NoError(err) -} - -func (s *engineSuite) TestReapplyEvents_IgnoreSameVersionEvents() { - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "test-reapply-same-version", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - - // TODO: Figure out why version is empty? - history := []*historypb.HistoryEvent{ - { - EventId: 1, - EventType: enumspb.EVENT_TYPE_TIMER_STARTED, - Version: common.EmptyVersion, - }, - } - ms := workflow.TestLocalMutableState( - s.mockHistoryEngine.shard, - s.eventsCache, - tests.LocalNamespaceEntry, - log.NewTestLogger(), - workflowExecution.GetRunId(), - ) - // Add dummy event - addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - - err := s.mockHistoryEngine.ReapplyEvents( - context.Background(), - tests.NamespaceID, - workflowExecution.GetWorkflowId(), - workflowExecution.GetRunId(), - history, - ) - s.NoError(err) -} - -func (s *engineSuite) TestReapplyEvents_ResetWorkflow() { - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: "test-reapply-reset-workflow", - RunId: tests.RunID, - } - taskqueue := "testTaskQueue" - identity := "testIdentity" - history := []*historypb.HistoryEvent{ - { - EventId: 1, - EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, - Version: 100, - }, - } - ms := workflow.TestLocalMutableState( - s.mockHistoryEngine.shard, - s.eventsCache, - tests.LocalNamespaceEntry, - log.NewTestLogger(), - workflowExecution.GetRunId(), - ) - // Add dummy event - addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) - - wfMs := workflow.TestCloneToProto(ms) - wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED - wfMs.ExecutionInfo.LastWorkflowTaskStartedEventId = 1 - token, err := ms.GetCurrentBranchToken() - s.NoError(err) - item := versionhistory.NewVersionHistoryItem(1, 1) - versionHistory := versionhistory.NewVersionHistory(token, []*historyspb.VersionHistoryItem{item}) - wfMs.ExecutionInfo.VersionHistories = versionhistory.NewVersionHistories(versionHistory) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} - s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - s.mockWorkflowResetter.EXPECT().ResetWorkflow( - gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(nil) - err = s.mockHistoryEngine.ReapplyEvents( - context.Background(), - tests.NamespaceID, - workflowExecution.GetWorkflowId(), - workflowExecution.GetRunId(), - history, - ) - s.NoError(err) -} - -func (s *engineSuite) TestEagerWorkflowStart_DoesNotCreateTransferTask() { - var recordedTasks []tasks.Task - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { - recordedTasks = request.NewWorkflowSnapshot.Tasks[tasks.CategoryTransfer] - persistenceResponse := persistence.CreateWorkflowExecutionResponse{NewMutableStateStats: tests.CreateWorkflowExecutionResponse.NewMutableStateStats} - return &persistenceResponse, nil - }) - - i := interceptor.NewTelemetryInterceptor(s.mockShard.GetNamespaceRegistry(), s.mockShard.GetMetricsHandler(), s.mockShard.Resource.Logger) - response, err := i.UnaryIntercept(context.Background(), nil, &grpc.UnaryServerInfo{FullMethod: "StartWorkflowExecution"}, func(ctx context.Context, req interface{}) (interface{}, error) { - response, err := s.mockHistoryEngine.StartWorkflowExecution(ctx, &historyservice.StartWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - Attempt: 1, - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowId: "test", - Namespace: tests.Namespace.String(), - WorkflowType: &commonpb.WorkflowType{Name: "test"}, - TaskQueue: &taskqueuepb.TaskQueue{Kind: enumspb.TASK_QUEUE_KIND_NORMAL, Name: "test"}, - Identity: "test", - RequestId: "test", - RequestEagerExecution: true, - }, - }) - return response, err - }) - s.NoError(err) - s.Equal(len(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events), 3) - s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[0].EventType, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED) - s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[1].EventType, enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED) - s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[2].EventType, enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED) - s.Equal(len(recordedTasks), 0) -} - -func (s *engineSuite) TestEagerWorkflowStart_FromCron_SkipsEager() { - var recordedTasks []tasks.Task - - s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { - recordedTasks = request.NewWorkflowSnapshot.Tasks[tasks.CategoryTransfer] - persistenceResponse := persistence.CreateWorkflowExecutionResponse{NewMutableStateStats: tests.CreateWorkflowExecutionResponse.NewMutableStateStats} - return &persistenceResponse, nil - }) - - i := interceptor.NewTelemetryInterceptor(s.mockShard.GetNamespaceRegistry(), s.mockShard.GetMetricsHandler(), s.mockShard.Resource.Logger) - response, err := i.UnaryIntercept(context.Background(), nil, &grpc.UnaryServerInfo{FullMethod: "StartWorkflowExecution"}, func(ctx context.Context, req interface{}) (interface{}, error) { - firstWorkflowTaskBackoff := time.Second - response, err := s.mockHistoryEngine.StartWorkflowExecution(ctx, &historyservice.StartWorkflowExecutionRequest{ - NamespaceId: tests.NamespaceID.String(), - Attempt: 1, - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE, - FirstWorkflowTaskBackoff: &firstWorkflowTaskBackoff, - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowId: "test", - Namespace: tests.Namespace.String(), - WorkflowType: &commonpb.WorkflowType{Name: "test"}, - TaskQueue: &taskqueuepb.TaskQueue{Kind: enumspb.TASK_QUEUE_KIND_NORMAL, Name: "test"}, - Identity: "test", - RequestId: "test", - CronSchedule: "* * * * *", - RequestEagerExecution: true, - }, - }) - return response, err - }) - s.NoError(err) - s.Nil(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask) - s.Equal(len(recordedTasks), 0) -} - -func (s *engineSuite) getMutableState(testNamespaceID namespace.ID, we commonpb.WorkflowExecution) workflow.MutableState { - context, release, err := s.workflowCache.GetOrCreateWorkflowExecution( - context.Background(), - tests.NamespaceID, - we, - workflow.LockPriorityHigh, - ) - if err != nil { - return nil - } - defer release(nil) - - return context.(*workflow.ContextImpl).MutableState -} - -func (s *engineSuite) getActivityScheduledEvent( - ms workflow.MutableState, - scheduledEventID int64, -) *historypb.HistoryEvent { - event, _ := ms.GetActivityScheduledEvent(context.Background(), scheduledEventID) - return event -} - -func addWorkflowExecutionStartedEventWithParent(ms workflow.MutableState, workflowExecution commonpb.WorkflowExecution, - workflowType, taskQueue string, input *commonpb.Payloads, executionTimeout, runTimeout, taskTimeout time.Duration, - parentInfo *workflowspb.ParentExecutionInfo, identity string) *historypb.HistoryEvent { - - startRequest := &workflowservice.StartWorkflowExecutionRequest{ - WorkflowId: workflowExecution.WorkflowId, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - WorkflowExecutionTimeout: &executionTimeout, - WorkflowRunTimeout: &runTimeout, - WorkflowTaskTimeout: &taskTimeout, - Identity: identity, - } - - event, _ := ms.AddWorkflowExecutionStartedEvent( - workflowExecution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: tests.NamespaceID.String(), - StartRequest: startRequest, - ParentExecutionInfo: parentInfo, - }, - ) - - return event -} - -func addWorkflowExecutionStartedEvent(ms workflow.MutableState, workflowExecution commonpb.WorkflowExecution, - workflowType, taskQueue string, input *commonpb.Payloads, executionTimeout, runTimeout, taskTimeout time.Duration, - identity string) *historypb.HistoryEvent { - return addWorkflowExecutionStartedEventWithParent(ms, workflowExecution, workflowType, taskQueue, input, - executionTimeout, runTimeout, taskTimeout, nil, identity) -} - -func addWorkflowTaskScheduledEvent(ms workflow.MutableState) *workflow.WorkflowTaskInfo { - workflowTask, _ := ms.AddWorkflowTaskScheduledEvent(false, enumsspb.WORKFLOW_TASK_TYPE_NORMAL) - return workflowTask -} - -func addWorkflowTaskStartedEvent(ms workflow.MutableState, scheduledEventID int64, taskQueue, - identity string) *historypb.HistoryEvent { - return addWorkflowTaskStartedEventWithRequestID(ms, scheduledEventID, tests.RunID, taskQueue, identity) -} - -func addWorkflowTaskStartedEventWithRequestID(ms workflow.MutableState, scheduledEventID int64, requestID string, - taskQueue, identity string) *historypb.HistoryEvent { - event, _, _ := ms.AddWorkflowTaskStartedEvent( - scheduledEventID, - requestID, - &taskqueuepb.TaskQueue{Name: taskQueue}, - identity, - ) - - return event -} - -func addWorkflowTaskCompletedEvent(s *suite.Suite, ms workflow.MutableState, scheduledEventID, startedEventID int64, identity string) *historypb.HistoryEvent { - workflowTask := ms.GetWorkflowTaskByID(scheduledEventID) - s.NotNil(workflowTask) - s.Equal(startedEventID, workflowTask.StartedEventID) - - event, _ := ms.AddWorkflowTaskCompletedEvent(workflowTask, &workflowservice.RespondWorkflowTaskCompletedRequest{ - Identity: identity, - }, defaultWorkflowTaskCompletionLimits) - - ms.FlushBufferedEvents() - - return event -} - -func addActivityTaskScheduledEvent( - ms workflow.MutableState, - workflowTaskCompletedID int64, - activityID, activityType, - taskQueue string, - input *commonpb.Payloads, - scheduleToCloseTimeout time.Duration, - scheduleToStartTimeout time.Duration, - startToCloseTimeout time.Duration, - heartbeatTimeout time.Duration, -) (*historypb.HistoryEvent, - *persistencespb.ActivityInfo) { - - event, ai, _ := ms.AddActivityTaskScheduledEvent(workflowTaskCompletedID, &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - ActivityType: &commonpb.ActivityType{Name: activityType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - ScheduleToCloseTimeout: &scheduleToCloseTimeout, - ScheduleToStartTimeout: &scheduleToStartTimeout, - StartToCloseTimeout: &startToCloseTimeout, - HeartbeatTimeout: &heartbeatTimeout, - }, false) - - return event, ai -} - -func addActivityTaskScheduledEventWithRetry( - ms workflow.MutableState, - workflowTaskCompletedID int64, - activityID, activityType, - taskQueue string, - input *commonpb.Payloads, - scheduleToCloseTimeout time.Duration, - scheduleToStartTimeout time.Duration, - startToCloseTimeout time.Duration, - heartbeatTimeout time.Duration, - retryPolicy *commonpb.RetryPolicy, -) (*historypb.HistoryEvent, *persistencespb.ActivityInfo) { - - event, ai, _ := ms.AddActivityTaskScheduledEvent(workflowTaskCompletedID, &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - ActivityType: &commonpb.ActivityType{Name: activityType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - ScheduleToCloseTimeout: &scheduleToCloseTimeout, - ScheduleToStartTimeout: &scheduleToStartTimeout, - StartToCloseTimeout: &startToCloseTimeout, - HeartbeatTimeout: &heartbeatTimeout, - RetryPolicy: retryPolicy, - }, false) - - return event, ai -} - -func addActivityTaskStartedEvent(ms workflow.MutableState, scheduledEventID int64, identity string) *historypb.HistoryEvent { - ai, _ := ms.GetActivityInfo(scheduledEventID) - event, _ := ms.AddActivityTaskStartedEvent(ai, scheduledEventID, tests.RunID, identity) - return event -} - -func addActivityTaskCompletedEvent(ms workflow.MutableState, scheduledEventID, startedEventID int64, result *commonpb.Payloads, - identity string) *historypb.HistoryEvent { - event, _ := ms.AddActivityTaskCompletedEvent(scheduledEventID, startedEventID, &workflowservice.RespondActivityTaskCompletedRequest{ - Result: result, - Identity: identity, - }) - - return event -} - -func addActivityTaskFailedEvent(ms workflow.MutableState, scheduledEventID, startedEventID int64, failure *failurepb.Failure, retryState enumspb.RetryState, identity string) *historypb.HistoryEvent { - event, _ := ms.AddActivityTaskFailedEvent(scheduledEventID, startedEventID, failure, retryState, identity) - return event -} - -func addTimerStartedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, timerID string, - timeout time.Duration) (*historypb.HistoryEvent, *persistencespb.TimerInfo) { - event, ti, _ := ms.AddTimerStartedEvent(workflowTaskCompletedEventID, - &commandpb.StartTimerCommandAttributes{ - TimerId: timerID, - StartToFireTimeout: &timeout, - }) - return event, ti -} - -func addTimerFiredEvent(ms workflow.MutableState, timerID string) *historypb.HistoryEvent { - event, _ := ms.AddTimerFiredEvent(timerID) - return event -} - -func addRequestCancelInitiatedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, - cancelRequestID string, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string) (*historypb.HistoryEvent, *persistencespb.RequestCancelInfo) { - event, rci, _ := ms.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, - cancelRequestID, &commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes{ - Namespace: namespace.String(), - WorkflowId: workflowID, - RunId: runID, - Reason: "cancellation reason", - }, - namespaceID) - - return event, rci -} - -func addCancelRequestedEvent(ms workflow.MutableState, initiatedID int64, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string) *historypb.HistoryEvent { - event, _ := ms.AddExternalWorkflowExecutionCancelRequested(initiatedID, namespace, namespaceID, workflowID, runID) - return event -} - -func addRequestSignalInitiatedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, - signalRequestID string, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID, signalName string, input *commonpb.Payloads, - control string, header *commonpb.Header) (*historypb.HistoryEvent, *persistencespb.SignalInfo) { - event, si, _ := ms.AddSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, signalRequestID, - &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ - Namespace: namespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - SignalName: signalName, - Input: input, - Control: control, - Header: header, - }, namespaceID) - - return event, si -} - -func addSignaledEvent(ms workflow.MutableState, initiatedID int64, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string, control string) *historypb.HistoryEvent { - event, _ := ms.AddExternalWorkflowExecutionSignaled(initiatedID, namespace, namespaceID, workflowID, runID, control) - return event -} - -func addStartChildWorkflowExecutionInitiatedEvent( - ms workflow.MutableState, - workflowTaskCompletedID int64, - createRequestID string, - namespace namespace.Name, - namespaceID namespace.ID, - workflowID, workflowType, taskQueue string, - input *commonpb.Payloads, - executionTimeout, runTimeout, taskTimeout time.Duration, - parentClosePolicy enumspb.ParentClosePolicy, -) (*historypb.HistoryEvent, *persistencespb.ChildExecutionInfo) { - - event, cei, _ := ms.AddStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedID, createRequestID, - &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: namespace.String(), - WorkflowId: workflowID, - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, - Input: input, - WorkflowExecutionTimeout: &executionTimeout, - WorkflowRunTimeout: &runTimeout, - WorkflowTaskTimeout: &taskTimeout, - Control: "", - ParentClosePolicy: parentClosePolicy, - }, namespaceID) - return event, cei -} - -func addChildWorkflowExecutionStartedEvent(ms workflow.MutableState, initiatedID int64, workflowID, runID string, - workflowType string, clock *clockspb.VectorClock) *historypb.HistoryEvent { - event, _ := ms.AddChildWorkflowExecutionStartedEvent( - &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - &commonpb.WorkflowType{Name: workflowType}, - initiatedID, - &commonpb.Header{}, - clock, - ) - return event -} - -func addChildWorkflowExecutionCompletedEvent(ms workflow.MutableState, initiatedID int64, childExecution *commonpb.WorkflowExecution, - attributes *historypb.WorkflowExecutionCompletedEventAttributes) *historypb.HistoryEvent { - event, _ := ms.AddChildWorkflowExecutionCompletedEvent(initiatedID, childExecution, attributes) - return event -} - -func addCompleteWorkflowEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, - result *commonpb.Payloads) *historypb.HistoryEvent { - event, _ := ms.AddCompletedWorkflowEvent( - workflowTaskCompletedEventID, - &commandpb.CompleteWorkflowExecutionCommandAttributes{ - Result: result, - }, - "") - return event -} - -func addFailWorkflowEvent( - ms workflow.MutableState, - workflowTaskCompletedEventID int64, - failure *failurepb.Failure, - retryState enumspb.RetryState, -) *historypb.HistoryEvent { - event, _ := ms.AddFailWorkflowEvent( - workflowTaskCompletedEventID, - retryState, - &commandpb.FailWorkflowExecutionCommandAttributes{ - Failure: failure, - }, - "", - ) - return event -} diff -Nru temporal-1.21.5-1/src/service/history/history_engine.go temporal-1.22.5/src/service/history/history_engine.go --- temporal-1.21.5-1/src/service/history/history_engine.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/history_engine.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,812 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel/trace" + commonpb "go.temporal.io/api/common/v1" + historypb "go.temporal.io/api/history/v1" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/visibility" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/api/deleteworkflow" + "go.temporal.io/server/service/history/api/describemutablestate" + "go.temporal.io/server/service/history/api/describeworkflow" + "go.temporal.io/server/service/history/api/isactivitytaskvalid" + "go.temporal.io/server/service/history/api/isworkflowtaskvalid" + "go.temporal.io/server/service/history/api/pollupdate" + "go.temporal.io/server/service/history/api/queryworkflow" + "go.temporal.io/server/service/history/api/reapplyevents" + "go.temporal.io/server/service/history/api/recordactivitytaskheartbeat" + "go.temporal.io/server/service/history/api/recordactivitytaskstarted" + "go.temporal.io/server/service/history/api/recordchildworkflowcompleted" + "go.temporal.io/server/service/history/api/refreshworkflow" + "go.temporal.io/server/service/history/api/removesignalmutablestate" + replicationapi "go.temporal.io/server/service/history/api/replication" + "go.temporal.io/server/service/history/api/replicationadmin" + "go.temporal.io/server/service/history/api/requestcancelworkflow" + "go.temporal.io/server/service/history/api/resetstickytaskqueue" + "go.temporal.io/server/service/history/api/resetworkflow" + "go.temporal.io/server/service/history/api/respondactivitytaskcanceled" + "go.temporal.io/server/service/history/api/respondactivitytaskcompleted" + "go.temporal.io/server/service/history/api/respondactivitytaskfailed" + "go.temporal.io/server/service/history/api/signalwithstartworkflow" + "go.temporal.io/server/service/history/api/signalworkflow" + "go.temporal.io/server/service/history/api/startworkflow" + "go.temporal.io/server/service/history/api/terminateworkflow" + "go.temporal.io/server/service/history/api/updateworkflow" + "go.temporal.io/server/service/history/api/verifychildworkflowcompletionrecorded" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/ndc" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/replication" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +const ( + activityCancellationMsgActivityNotStarted = "ACTIVITY_ID_NOT_STARTED" +) + +type ( + historyEngineImpl struct { + status int32 + currentClusterName string + shard shard.Context + timeSource clock.TimeSource + workflowTaskHandler workflowTaskHandlerCallbacks + clusterMetadata cluster.Metadata + executionManager persistence.ExecutionManager + queueProcessors map[tasks.Category]queues.Queue + replicationAckMgr replication.AckManager + nDCHistoryReplicator ndc.HistoryReplicator + nDCActivityStateReplicator ndc.ActivityStateReplicator + nDCWorkflowStateReplicator ndc.WorkflowStateReplicator + replicationProcessorMgr replication.TaskProcessor + eventNotifier events.Notifier + tokenSerializer common.TaskTokenSerializer + metricsHandler metrics.Handler + logger log.Logger + throttledLogger log.Logger + config *configs.Config + workflowRebuilder workflowRebuilder + workflowResetter ndc.WorkflowResetter + sdkClientFactory sdk.ClientFactory + eventsReapplier ndc.EventsReapplier + matchingClient matchingservice.MatchingServiceClient + rawMatchingClient matchingservice.MatchingServiceClient + replicationDLQHandler replication.DLQHandler + persistenceVisibilityMgr manager.VisibilityManager + searchAttributesValidator *searchattribute.Validator + workflowDeleteManager deletemanager.DeleteManager + eventSerializer serialization.Serializer + workflowConsistencyChecker api.WorkflowConsistencyChecker + tracer trace.Tracer + } +) + +// NewEngineWithShardContext creates an instance of history engine +func NewEngineWithShardContext( + shard shard.Context, + clientBean client.Bean, + matchingClient matchingservice.MatchingServiceClient, + sdkClientFactory sdk.ClientFactory, + eventNotifier events.Notifier, + config *configs.Config, + rawMatchingClient matchingservice.MatchingServiceClient, + workflowCache wcache.Cache, + archivalClient archiver.Client, + eventSerializer serialization.Serializer, + queueProcessorFactories []QueueFactory, + replicationTaskFetcherFactory replication.TaskFetcherFactory, + replicationTaskExecutorProvider replication.TaskExecutorProvider, + workflowConsistencyChecker api.WorkflowConsistencyChecker, + tracerProvider trace.TracerProvider, + persistenceVisibilityMgr manager.VisibilityManager, + eventBlobCache persistence.XDCCache, +) shard.Engine { + currentClusterName := shard.GetClusterMetadata().GetCurrentClusterName() + + logger := shard.GetLogger() + executionManager := shard.GetExecutionManager() + + workflowDeleteManager := deletemanager.NewDeleteManager( + shard, + workflowCache, + config, + archivalClient, + shard.GetTimeSource(), + persistenceVisibilityMgr, + ) + + historyEngImpl := &historyEngineImpl{ + status: common.DaemonStatusInitialized, + currentClusterName: currentClusterName, + shard: shard, + clusterMetadata: shard.GetClusterMetadata(), + timeSource: shard.GetTimeSource(), + executionManager: executionManager, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + logger: log.With(logger, tag.ComponentHistoryEngine), + throttledLogger: log.With(shard.GetThrottledLogger(), tag.ComponentHistoryEngine), + metricsHandler: shard.GetMetricsHandler(), + eventNotifier: eventNotifier, + config: config, + sdkClientFactory: sdkClientFactory, + matchingClient: matchingClient, + rawMatchingClient: rawMatchingClient, + persistenceVisibilityMgr: persistenceVisibilityMgr, + workflowDeleteManager: workflowDeleteManager, + eventSerializer: eventSerializer, + workflowConsistencyChecker: workflowConsistencyChecker, + tracer: tracerProvider.Tracer(consts.LibraryName), + } + + historyEngImpl.queueProcessors = make(map[tasks.Category]queues.Queue) + for _, factory := range queueProcessorFactories { + processor := factory.CreateQueue(shard, workflowCache) + historyEngImpl.queueProcessors[processor.Category()] = processor + } + + historyEngImpl.eventsReapplier = ndc.NewEventsReapplier(shard.GetMetricsHandler(), logger) + + if shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { + historyEngImpl.replicationAckMgr = replication.NewAckManager( + shard, + workflowCache, + eventBlobCache, + executionManager, + logger, + ) + historyEngImpl.nDCHistoryReplicator = ndc.NewHistoryReplicator( + shard, + workflowCache, + historyEngImpl.eventsReapplier, + eventSerializer, + logger, + ) + historyEngImpl.nDCActivityStateReplicator = ndc.NewActivityStateReplicator( + shard, + workflowCache, + logger, + ) + historyEngImpl.nDCWorkflowStateReplicator = ndc.NewWorkflowStateReplicator( + shard, + workflowCache, + historyEngImpl.eventsReapplier, + eventSerializer, + logger, + ) + } + historyEngImpl.workflowRebuilder = NewWorkflowRebuilder( + shard, + workflowCache, + logger, + ) + historyEngImpl.workflowResetter = ndc.NewWorkflowResetter( + shard, + workflowCache, + logger, + ) + + historyEngImpl.searchAttributesValidator = searchattribute.NewValidator( + shard.GetSearchAttributesProvider(), + shard.GetSearchAttributesMapperProvider(), + config.SearchAttributesNumberOfKeysLimit, + config.SearchAttributesSizeOfValueLimit, + config.SearchAttributesTotalSizeLimit, + persistenceVisibilityMgr, + visibility.AllowListForValidation(persistenceVisibilityMgr.GetStoreNames()), + ) + + historyEngImpl.workflowTaskHandler = newWorkflowTaskHandlerCallback(historyEngImpl) + historyEngImpl.replicationDLQHandler = replication.NewLazyDLQHandler( + shard, + workflowDeleteManager, + workflowCache, + clientBean, + replicationTaskExecutorProvider, + ) + historyEngImpl.replicationProcessorMgr = replication.NewTaskProcessorManager( + config, + shard, + historyEngImpl, + workflowCache, + workflowDeleteManager, + clientBean, + eventSerializer, + replicationTaskFetcherFactory, + replicationTaskExecutorProvider, + ) + return historyEngImpl +} + +// Start will spin up all the components needed to start serving this shard. +// Make sure all the components are loaded lazily so start can return immediately. This is important because +// ShardController calls start sequentially for all the shards for a given host during startup. +func (e *historyEngineImpl) Start() { + if !atomic.CompareAndSwapInt32( + &e.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + return + } + + e.logger.Info("", tag.LifeCycleStarting) + defer e.logger.Info("", tag.LifeCycleStarted) + + e.registerNamespaceStateChangeCallback() + + for _, queueProcessor := range e.queueProcessors { + queueProcessor.Start() + } + e.replicationProcessorMgr.Start() +} + +// Stop the service. +func (e *historyEngineImpl) Stop() { + if !atomic.CompareAndSwapInt32( + &e.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + return + } + + e.logger.Info("", tag.LifeCycleStopping) + defer e.logger.Info("", tag.LifeCycleStopped) + + for _, queueProcessor := range e.queueProcessors { + queueProcessor.Stop() + } + e.replicationProcessorMgr.Stop() + if e.replicationAckMgr != nil { + e.replicationAckMgr.Close() + } + // unset the failover callback + e.shard.GetNamespaceRegistry().UnregisterStateChangeCallback(e) +} + +func (e *historyEngineImpl) registerNamespaceStateChangeCallback() { + + e.shard.GetNamespaceRegistry().RegisterStateChangeCallback(e, func(ns *namespace.Namespace, deletedFromDb bool) { + if e.shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { + e.shard.UpdateHandoverNamespace(ns, deletedFromDb) + } + + if deletedFromDb { + return + } + + if ns.IsGlobalNamespace() && + ns.ReplicationPolicy() == namespace.ReplicationPolicyMultiCluster && + ns.ActiveClusterName() == e.currentClusterName { + + for _, queueProcessor := range e.queueProcessors { + queueProcessor.FailoverNamespace(ns.ID().String()) + } + } + }) +} + +// StartWorkflowExecution starts a workflow execution +// Consistency guarantee: always write +func (e *historyEngineImpl) StartWorkflowExecution( + ctx context.Context, + startRequest *historyservice.StartWorkflowExecutionRequest, +) (resp *historyservice.StartWorkflowExecutionResponse, retError error) { + starter, err := startworkflow.NewStarter( + e.shard, + e.workflowConsistencyChecker, + e.tokenSerializer, + startRequest, + ) + if err != nil { + return nil, err + } + return starter.Invoke(ctx) +} + +// GetMutableState retrieves the mutable state of the workflow execution +func (e *historyEngineImpl) GetMutableState( + ctx context.Context, + request *historyservice.GetMutableStateRequest, +) (*historyservice.GetMutableStateResponse, error) { + return api.GetOrPollMutableState(ctx, request, e.shard, e.workflowConsistencyChecker, e.eventNotifier) +} + +// PollMutableState retrieves the mutable state of the workflow execution with long polling +func (e *historyEngineImpl) PollMutableState( + ctx context.Context, + request *historyservice.PollMutableStateRequest, +) (*historyservice.PollMutableStateResponse, error) { + + response, err := api.GetOrPollMutableState( + ctx, + &historyservice.GetMutableStateRequest{ + NamespaceId: request.GetNamespaceId(), + Execution: request.Execution, + ExpectedNextEventId: request.ExpectedNextEventId, + CurrentBranchToken: request.CurrentBranchToken, + VersionHistoryItem: request.GetVersionHistoryItem(), + }, + e.shard, + e.workflowConsistencyChecker, + e.eventNotifier, + ) + if err != nil { + return nil, err + } + + return &historyservice.PollMutableStateResponse{ + Execution: response.Execution, + WorkflowType: response.WorkflowType, + NextEventId: response.NextEventId, + PreviousStartedEventId: response.PreviousStartedEventId, + LastFirstEventId: response.LastFirstEventId, + LastFirstEventTxnId: response.LastFirstEventTxnId, + TaskQueue: response.TaskQueue, + StickyTaskQueue: response.StickyTaskQueue, + StickyTaskQueueScheduleToStartTimeout: response.StickyTaskQueueScheduleToStartTimeout, + CurrentBranchToken: response.CurrentBranchToken, + VersionHistories: response.VersionHistories, + WorkflowState: response.WorkflowState, + WorkflowStatus: response.WorkflowStatus, + FirstExecutionRunId: response.FirstExecutionRunId, + }, nil +} + +func (e *historyEngineImpl) QueryWorkflow( + ctx context.Context, + request *historyservice.QueryWorkflowRequest, +) (_ *historyservice.QueryWorkflowResponse, retErr error) { + return queryworkflow.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker, e.rawMatchingClient, e.matchingClient) +} + +func (e *historyEngineImpl) DescribeMutableState( + ctx context.Context, + request *historyservice.DescribeMutableStateRequest, +) (response *historyservice.DescribeMutableStateResponse, retError error) { + return describemutablestate.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker) +} + +// ResetStickyTaskQueue reset the volatile information in mutable state of a given workflow. +// Volatile information are the information related to client, such as: +// 1. StickyTaskQueue +// 2. StickyScheduleToStartTimeout +func (e *historyEngineImpl) ResetStickyTaskQueue( + ctx context.Context, + resetRequest *historyservice.ResetStickyTaskQueueRequest, +) (*historyservice.ResetStickyTaskQueueResponse, error) { + return resetstickytaskqueue.Invoke(ctx, resetRequest, e.shard, e.workflowConsistencyChecker) +} + +// DescribeWorkflowExecution returns information about the specified workflow execution. +func (e *historyEngineImpl) DescribeWorkflowExecution( + ctx context.Context, + request *historyservice.DescribeWorkflowExecutionRequest, +) (_ *historyservice.DescribeWorkflowExecutionResponse, retError error) { + return describeworkflow.Invoke( + ctx, + request, + e.shard, + e.workflowConsistencyChecker, + e.persistenceVisibilityMgr, + ) +} + +func (e *historyEngineImpl) RecordActivityTaskStarted( + ctx context.Context, + request *historyservice.RecordActivityTaskStartedRequest, +) (*historyservice.RecordActivityTaskStartedResponse, error) { + return recordactivitytaskstarted.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker) +} + +// ScheduleWorkflowTask schedules a workflow task if no outstanding workflow task found +func (e *historyEngineImpl) ScheduleWorkflowTask( + ctx context.Context, + req *historyservice.ScheduleWorkflowTaskRequest, +) error { + return e.workflowTaskHandler.handleWorkflowTaskScheduled(ctx, req) +} + +func (e *historyEngineImpl) VerifyFirstWorkflowTaskScheduled( + ctx context.Context, + request *historyservice.VerifyFirstWorkflowTaskScheduledRequest, +) (retError error) { + return e.workflowTaskHandler.verifyFirstWorkflowTaskScheduled(ctx, request) +} + +// RecordWorkflowTaskStarted starts a workflow task +func (e *historyEngineImpl) RecordWorkflowTaskStarted( + ctx context.Context, + request *historyservice.RecordWorkflowTaskStartedRequest, +) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + return e.workflowTaskHandler.handleWorkflowTaskStarted(ctx, request) +} + +// RespondWorkflowTaskCompleted completes a workflow task +func (e *historyEngineImpl) RespondWorkflowTaskCompleted( + ctx context.Context, + req *historyservice.RespondWorkflowTaskCompletedRequest, +) (*historyservice.RespondWorkflowTaskCompletedResponse, error) { + return e.workflowTaskHandler.handleWorkflowTaskCompleted(ctx, req) +} + +// RespondWorkflowTaskFailed fails a workflow task +func (e *historyEngineImpl) RespondWorkflowTaskFailed( + ctx context.Context, + req *historyservice.RespondWorkflowTaskFailedRequest, +) error { + return e.workflowTaskHandler.handleWorkflowTaskFailed(ctx, req) +} + +// RespondActivityTaskCompleted completes an activity task. +func (e *historyEngineImpl) RespondActivityTaskCompleted( + ctx context.Context, + req *historyservice.RespondActivityTaskCompletedRequest, +) (*historyservice.RespondActivityTaskCompletedResponse, error) { + return respondactivitytaskcompleted.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// RespondActivityTaskFailed completes an activity task failure. +func (e *historyEngineImpl) RespondActivityTaskFailed( + ctx context.Context, + req *historyservice.RespondActivityTaskFailedRequest, +) (*historyservice.RespondActivityTaskFailedResponse, error) { + return respondactivitytaskfailed.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// RespondActivityTaskCanceled completes an activity task failure. +func (e *historyEngineImpl) RespondActivityTaskCanceled( + ctx context.Context, + req *historyservice.RespondActivityTaskCanceledRequest, +) (*historyservice.RespondActivityTaskCanceledResponse, error) { + return respondactivitytaskcanceled.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// RecordActivityTaskHeartbeat records an hearbeat for a task. +// This method can be used for two purposes. +// - For reporting liveness of the activity. +// - For reporting progress of the activity, this can be done even if the liveness is not configured. +func (e *historyEngineImpl) RecordActivityTaskHeartbeat( + ctx context.Context, + req *historyservice.RecordActivityTaskHeartbeatRequest, +) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { + return recordactivitytaskheartbeat.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// RequestCancelWorkflowExecution records request cancellation event for workflow execution +func (e *historyEngineImpl) RequestCancelWorkflowExecution( + ctx context.Context, + req *historyservice.RequestCancelWorkflowExecutionRequest, +) (resp *historyservice.RequestCancelWorkflowExecutionResponse, retError error) { + return requestcancelworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) SignalWorkflowExecution( + ctx context.Context, + req *historyservice.SignalWorkflowExecutionRequest, +) (resp *historyservice.SignalWorkflowExecutionResponse, retError error) { + return signalworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// SignalWithStartWorkflowExecution signals current workflow (if running) or creates & signals a new workflow +// Consistency guarantee: always write +func (e *historyEngineImpl) SignalWithStartWorkflowExecution( + ctx context.Context, + req *historyservice.SignalWithStartWorkflowExecutionRequest, +) (_ *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) { + return signalwithstartworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) UpdateWorkflowExecution( + ctx context.Context, + req *historyservice.UpdateWorkflowExecutionRequest, +) (*historyservice.UpdateWorkflowExecutionResponse, error) { + return updateworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker, e.matchingClient) +} + +func (e *historyEngineImpl) PollWorkflowExecutionUpdate( + ctx context.Context, + req *historyservice.PollWorkflowExecutionUpdateRequest, +) (*historyservice.PollWorkflowExecutionUpdateResponse, error) { + return pollupdate.Invoke(ctx, req, e.workflowConsistencyChecker) +} + +// RemoveSignalMutableState remove the signal request id in signal_requested for deduplicate +func (e *historyEngineImpl) RemoveSignalMutableState( + ctx context.Context, + req *historyservice.RemoveSignalMutableStateRequest, +) (*historyservice.RemoveSignalMutableStateResponse, error) { + return removesignalmutablestate.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) TerminateWorkflowExecution( + ctx context.Context, + req *historyservice.TerminateWorkflowExecutionRequest, +) (*historyservice.TerminateWorkflowExecutionResponse, error) { + return terminateworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) DeleteWorkflowExecution( + ctx context.Context, + request *historyservice.DeleteWorkflowExecutionRequest, +) (*historyservice.DeleteWorkflowExecutionResponse, error) { + return deleteworkflow.Invoke(ctx, request, e.shard, e.workflowConsistencyChecker, e.workflowDeleteManager) +} + +// RecordChildExecutionCompleted records the completion of child execution into parent execution history +func (e *historyEngineImpl) RecordChildExecutionCompleted( + ctx context.Context, + req *historyservice.RecordChildExecutionCompletedRequest, +) (*historyservice.RecordChildExecutionCompletedResponse, error) { + return recordchildworkflowcompleted.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// IsActivityTaskValid - whether activity task is still valid +func (e *historyEngineImpl) IsActivityTaskValid( + ctx context.Context, + req *historyservice.IsActivityTaskValidRequest, +) (*historyservice.IsActivityTaskValidResponse, error) { + return isactivitytaskvalid.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +// IsWorkflowTaskValid - whether workflow task is still valid +func (e *historyEngineImpl) IsWorkflowTaskValid( + ctx context.Context, + req *historyservice.IsWorkflowTaskValidRequest, +) (*historyservice.IsWorkflowTaskValidResponse, error) { + return isworkflowtaskvalid.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) VerifyChildExecutionCompletionRecorded( + ctx context.Context, + req *historyservice.VerifyChildExecutionCompletionRecordedRequest, +) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) { + return verifychildworkflowcompletionrecorded.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) ReplicateEventsV2( + ctx context.Context, + replicateRequest *historyservice.ReplicateEventsV2Request, +) error { + return e.nDCHistoryReplicator.ApplyEvents(ctx, replicateRequest) +} + +func (e *historyEngineImpl) SyncActivity( + ctx context.Context, + request *historyservice.SyncActivityRequest, +) (retError error) { + return e.nDCActivityStateReplicator.SyncActivityState(ctx, request) +} + +// ReplicateWorkflowState is an experimental method to replicate workflow state. This should not expose outside of history service role. +func (e *historyEngineImpl) ReplicateWorkflowState( + ctx context.Context, + request *historyservice.ReplicateWorkflowStateRequest, +) error { + return e.nDCWorkflowStateReplicator.SyncWorkflowState(ctx, request) +} + +func (e *historyEngineImpl) SyncShardStatus( + ctx context.Context, + request *historyservice.SyncShardStatusRequest, +) error { + + clusterName := request.GetSourceCluster() + now := timestamp.TimeValue(request.GetStatusTime()) + + // here there are 3 main things + // 1. update the view of remote cluster's shard time + // 2. notify the timer gate in the timer queue standby processor + // 3, notify the transfer (essentially a no op, just put it here so it looks symmetric) + e.shard.SetCurrentTime(clusterName, now) + for _, processor := range e.queueProcessors { + processor.NotifyNewTasks([]tasks.Task{}) + } + return nil +} + +// ResetWorkflowExecution terminates current workflow (if running) and replay & create new workflow +// Consistency guarantee: always write +func (e *historyEngineImpl) ResetWorkflowExecution( + ctx context.Context, + req *historyservice.ResetWorkflowExecutionRequest, +) (*historyservice.ResetWorkflowExecutionResponse, error) { + return resetworkflow.Invoke(ctx, req, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) NotifyNewHistoryEvent( + notification *events.Notification, +) { + + e.eventNotifier.NotifyNewHistoryEvent(notification) +} + +func (e *historyEngineImpl) NotifyNewTasks( + newTasks map[tasks.Category][]tasks.Task, +) { + for category, tasksByCategory := range newTasks { + // TODO: make replicatorProcessor part of queueProcessors list + // and get rid of the special case here. + if category == tasks.CategoryReplication { + if e.replicationAckMgr != nil { + e.replicationAckMgr.NotifyNewTasks(tasksByCategory) + } + continue + } + + if len(tasksByCategory) > 0 { + e.queueProcessors[category].NotifyNewTasks(tasksByCategory) + } + } +} + +func (e *historyEngineImpl) AddSpeculativeWorkflowTaskTimeoutTask(task *tasks.WorkflowTaskTimeoutTask) { + e.queueProcessors[tasks.CategoryMemoryTimer].NotifyNewTasks([]tasks.Task{task}) +} + +func (e *historyEngineImpl) GetReplicationMessages( + ctx context.Context, + pollingCluster string, + ackMessageID int64, + ackTimestamp time.Time, + queryMessageID int64, +) (*replicationspb.ReplicationMessages, error) { + return replicationapi.GetTasks(ctx, e.shard, e.replicationAckMgr, pollingCluster, ackMessageID, ackTimestamp, queryMessageID) +} + +func (e *historyEngineImpl) SubscribeReplicationNotification() (<-chan struct{}, string) { + return e.replicationAckMgr.SubscribeNotification() +} + +func (e *historyEngineImpl) UnsubscribeReplicationNotification(subscriberID string) { + e.replicationAckMgr.UnsubscribeNotification(subscriberID) +} + +func (e *historyEngineImpl) ConvertReplicationTask( + ctx context.Context, + task tasks.Task, +) (*replicationspb.ReplicationTask, error) { + return e.replicationAckMgr.ConvertTask(ctx, task) +} +func (e *historyEngineImpl) GetReplicationTasksIter( + ctx context.Context, + pollingCluster string, + minInclusiveTaskID int64, + maxExclusiveTaskID int64, +) (collection.Iterator[tasks.Task], error) { + return e.replicationAckMgr.GetReplicationTasksIter(ctx, pollingCluster, minInclusiveTaskID, maxExclusiveTaskID) +} + +func (e *historyEngineImpl) GetDLQReplicationMessages( + ctx context.Context, + taskInfos []*replicationspb.ReplicationTaskInfo, +) ([]*replicationspb.ReplicationTask, error) { + return replicationapi.GetDLQTasks(ctx, e.shard, e.replicationAckMgr, taskInfos) +} + +func (e *historyEngineImpl) ReapplyEvents( + ctx context.Context, + namespaceUUID namespace.ID, + workflowID string, + runID string, + reapplyEvents []*historypb.HistoryEvent, +) error { + return reapplyevents.Invoke(ctx, namespaceUUID, workflowID, runID, reapplyEvents, e.shard, e.workflowConsistencyChecker, e.workflowResetter, e.eventsReapplier) +} + +func (e *historyEngineImpl) GetDLQMessages( + ctx context.Context, + request *historyservice.GetDLQMessagesRequest, +) (*historyservice.GetDLQMessagesResponse, error) { + return replicationadmin.GetDLQ(ctx, request, e.shard, e.replicationDLQHandler) +} + +func (e *historyEngineImpl) PurgeDLQMessages( + ctx context.Context, + request *historyservice.PurgeDLQMessagesRequest, +) (*historyservice.PurgeDLQMessagesResponse, error) { + return replicationadmin.PurgeDLQ(ctx, request, e.shard, e.replicationDLQHandler) +} + +func (e *historyEngineImpl) MergeDLQMessages( + ctx context.Context, + request *historyservice.MergeDLQMessagesRequest, +) (*historyservice.MergeDLQMessagesResponse, error) { + return replicationadmin.MergeDLQ(ctx, request, e.shard, e.replicationDLQHandler) +} + +func (e *historyEngineImpl) RebuildMutableState( + ctx context.Context, + namespaceUUID namespace.ID, + execution commonpb.WorkflowExecution, +) error { + return e.workflowRebuilder.rebuild( + ctx, + definition.NewWorkflowKey( + namespaceUUID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ) +} + +func (e *historyEngineImpl) RefreshWorkflowTasks( + ctx context.Context, + namespaceUUID namespace.ID, + execution commonpb.WorkflowExecution, +) (retError error) { + return refreshworkflow.Invoke( + ctx, + definition.NewWorkflowKey(namespaceUUID.String(), execution.WorkflowId, execution.RunId), + e.shard, + e.workflowConsistencyChecker, + ) +} + +func (e *historyEngineImpl) GenerateLastHistoryReplicationTasks( + ctx context.Context, + request *historyservice.GenerateLastHistoryReplicationTasksRequest, +) (_ *historyservice.GenerateLastHistoryReplicationTasksResponse, retError error) { + return replicationapi.GenerateTask(ctx, request, e.shard, e.workflowConsistencyChecker) +} + +func (e *historyEngineImpl) GetReplicationStatus( + ctx context.Context, + request *historyservice.GetReplicationStatusRequest, +) (_ *historyservice.ShardReplicationStatus, retError error) { + return replicationapi.GetStatus(ctx, request, e.shard, e.replicationAckMgr) +} diff -Nru temporal-1.21.5-1/src/service/history/history_engine2_test.go temporal-1.22.5/src/service/history/history_engine2_test.go --- temporal-1.21.5-1/src/service/history/history_engine2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/history_engine2_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,2040 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + querypb "go.temporal.io/api/query/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/searchattribute" + + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" +) + +type ( + engine2Suite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockVisibilityProcessor *queues.MockQueue + mockArchivalProcessor *queues.MockQueue + mockMemoryScheduledQueue *queues.MockQueue + mockEventsCache *events.MockCache + mockNamespaceCache *namespace.MockRegistry + mockClusterMetadata *cluster.MockMetadata + mockVisibilityManager *manager.MockVisibilityManager + + workflowCache wcache.Cache + historyEngine *historyEngineImpl + mockExecutionMgr *persistence.MockExecutionManager + + config *configs.Config + logger *log.MockLogger + errorMessages []string + } +) + +func TestEngine2Suite(t *testing.T) { + s := new(engine2Suite) + suite.Run(t, s) +} + +func (s *engine2Suite) SetupSuite() { + +} + +func (s *engine2Suite) TearDownSuite() { +} + +func (s *engine2Suite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) + s.mockArchivalProcessor = queues.NewMockQueue(s.controller) + s.mockMemoryScheduledQueue = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() + s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() + s.mockMemoryScheduledQueue.EXPECT().Category().Return(tasks.CategoryMemoryTimer).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockMemoryScheduledQueue.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + s.config = tests.NewDynamicConfig() + mockShard := shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + s.config, + ) + s.mockShard = mockShard + s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() + + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager + + s.mockEventsCache = s.mockShard.MockEventsCache + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() + s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, tests.Version).Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockVisibilityManager.EXPECT().GetIndexName().Return("").AnyTimes() + s.mockVisibilityManager.EXPECT(). + ValidateCustomSearchAttributes(gomock.Any()). + DoAndReturn( + func(searchAttributes map[string]any) (map[string]any, error) { + return searchAttributes, nil + }, + ). + AnyTimes() + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = log.NewMockLogger(s.controller) + s.logger.EXPECT().Debug(gomock.Any(), gomock.Any()).AnyTimes() + s.logger.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() + s.logger.EXPECT().Warn(gomock.Any(), gomock.Any()).AnyTimes() + s.errorMessages = make([]string, 0) + s.logger.EXPECT().Error(gomock.Any(), gomock.Any()).AnyTimes().Do(func(msg string, tags ...tag.Tag) { + s.errorMessages = append(s.errorMessages, msg) + }) + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + throttledLogger: s.logger, + metricsHandler: metrics.NoopMetricsHandler, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + config: s.config, + timeSource: s.mockShard.GetTimeSource(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, + s.mockMemoryScheduledQueue.Category(): s.mockMemoryScheduledQueue, + }, + searchAttributesValidator: searchattribute.NewValidator( + searchattribute.NewTestProvider(), + s.mockShard.Resource.SearchAttributesMapperProvider, + s.config.SearchAttributesNumberOfKeysLimit, + s.config.SearchAttributesSizeOfValueLimit, + s.config.SearchAttributesTotalSizeLimit, + s.mockVisibilityManager, + false, + ), + workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(mockShard, s.workflowCache), + } + s.mockShard.SetEngineForTesting(h) + h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) + + s.historyEngine = h +} + +func (s *engine2Suite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedSuccessStickyEnabled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + stickyTl := "stickyTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), we.GetRunId()) + executionInfo := ms.GetExecutionInfo() + executionInfo.LastUpdateTime = timestamp.TimeNowPtrUtc() + executionInfo.StickyTaskQueue = stickyTl + + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + request := historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &we, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: stickyTl, + }, + Identity: identity, + }, + } + + expectedResponse := historyservice.RecordWorkflowTaskStartedResponse{} + expectedResponse.WorkflowType = ms.GetWorkflowType() + executionInfo = ms.GetExecutionInfo() + if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { + expectedResponse.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId + } + expectedResponse.Version = tests.GlobalNamespaceEntry.FailoverVersion() + expectedResponse.ScheduledEventId = wt.ScheduledEventID + expectedResponse.ScheduledTime = wt.ScheduledTime + expectedResponse.StartedEventId = wt.ScheduledEventID + 1 + expectedResponse.StickyExecutionEnabled = true + expectedResponse.NextEventId = ms.GetNextEventID() + 1 + expectedResponse.Attempt = wt.Attempt + expectedResponse.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ + Name: executionInfo.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + currentBranchTokken, err := ms.GetCurrentBranchToken() + s.NoError(err) + expectedResponse.BranchToken = currentBranchTokken + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &request) + s.Nil(err) + s.NotNil(response) + s.True(response.StartedTime.After(*expectedResponse.ScheduledTime)) + expectedResponse.StartedTime = response.StartedTime + s.Equal(&expectedResponse, response) +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedIfNoExecution() { + namespaceID := tests.NamespaceID + workflowExecution := &commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + s.Nil(response) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engine2Suite) TestRecordWorkflowTaskStarted_NoMessages() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + tl := "testTaskQueue" + identity := "testIdentity" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, false, false) + // Use UpdateCurrentVersion explicitly here, + // because there is no call to CloseTransactionAsSnapshot, + // because it converts speculative WT to normal, but WT needs to be speculative for this test. + err := ms.UpdateCurrentVersion(tests.GlobalNamespaceEntry.FailoverVersion(), true) + s.NoError(err) + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *persistence.GetWorkflowExecutionRequest) (*persistence.GetWorkflowExecutionResponse, error) { + wfMs := ms.CloneToProto() + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + return gwmsResponse, nil + }, + ) + + wt, err := ms.AddWorkflowTaskScheduledEvent(false, enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE) + s.NoError(err) + s.NotNil(wt) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: wt.ScheduledEventID, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + + s.Nil(response) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err, err.Error()) + s.EqualError(err, "No messages for speculative workflow task.") +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedIfGetExecutionFailed() { + namespaceID := tests.NamespaceID + workflowExecution := &commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + s.Nil(response) + s.NotNil(err) + s.EqualError(err, "FAILED") +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedIfTaskAlreadyStarted() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + s.Nil(response) + s.NotNil(err) + s.IsType(&serviceerrors.TaskAlreadyStarted{}, err) + s.logger.Error("RecordWorkflowTaskStarted failed with", tag.Error(err)) +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedIfTaskAlreadyCompleted() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) + addWorkflowTaskCompletedEvent(&s.Suite, ms, int64(2), int64(3), identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + s.Nil(response) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) + s.logger.Error("RecordWorkflowTaskStarted failed with", tag.Error(err)) +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedConflictOnUpdate() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + tl := "testTaskQueue" + identity := "testIdentity" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.ConditionFailedError{}) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + + s.NotNil(err) + s.Nil(response) + s.Equal(&persistence.ConditionFailedError{}, err) +} + +func (s *engine2Suite) TestRecordWorkflowTaskStartedSuccess() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + tl := "testTaskQueue" + identity := "testIdentity" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + // load mutable state such that it already exists in memory when respond workflow task is called + // this enables us to set query registry on it + ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( + metrics.AddMetricsContext(context.Background()), + tests.NamespaceID, + workflowExecution, + workflow.LockPriorityHigh, + ) + s.NoError(err) + loadedMS, err := ctx.LoadMutableState(context.Background()) + s.NoError(err) + qr := workflow.NewQueryRegistry() + id1, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + id2, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + id3, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr + release(nil) + + response, err := s.historyEngine.RecordWorkflowTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + + s.Nil(err) + s.NotNil(response) + s.Equal("wType", response.WorkflowType.Name) + s.True(response.PreviousStartedEventId == 0) + s.Equal(int64(3), response.StartedEventId) + expectedQueryMap := map[string]*querypb.WorkflowQuery{ + id1: {}, + id2: {}, + id3: {}, + } + s.Equal(expectedQueryMap, response.Queries) +} + +func (s *engine2Suite) TestRecordActivityTaskStartedIfNoExecution() { + namespaceID := tests.NamespaceID + workflowExecution := &commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + response, err := s.historyEngine.RecordActivityTaskStarted( + metrics.AddMetricsContext(context.Background()), + &historyservice.RecordActivityTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: workflowExecution, + ScheduledEventId: 5, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }, + ) + if err != nil { + s.logger.Error("Unexpected Error", tag.Error(err)) + } + s.Nil(response) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engine2Suite) TestRecordActivityTaskStartedSuccess() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, true) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, int64(2), int64(3), identity) + scheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.mockEventsCache.EXPECT().GetEvent( + gomock.Any(), + events.EventKey{ + NamespaceID: namespaceID, + WorkflowID: workflowExecution.GetWorkflowId(), + RunID: workflowExecution.GetRunId(), + EventID: scheduledEvent.GetEventId(), + Version: 0, + }, + workflowTaskCompletedEvent.GetEventId(), + gomock.Any(), + ).Return(scheduledEvent, nil) + response, err := s.historyEngine.RecordActivityTaskStarted(metrics.AddMetricsContext(context.Background()), &historyservice.RecordActivityTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &workflowExecution, + ScheduledEventId: 5, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + }, + Identity: identity, + }, + }) + s.Nil(err) + s.NotNil(response) + s.Equal(scheduledEvent, response.ScheduledEvent) +} + +func (s *engine2Suite) TestRequestCancelWorkflowExecution_Running() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowExecution.WorkflowId, + RunId: workflowExecution.RunId, + }, + Identity: "identity", + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(namespaceID, workflowExecution) + s.Equal(int64(4), ms2.GetNextEventID()) +} + +func (s *engine2Suite) TestRequestCancelWorkflowExecution_Finished() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedState(workflowExecution, tl, identity, true, false) + ms.GetExecutionState().State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + + _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowExecution.WorkflowId, + RunId: workflowExecution.RunId, + }, + Identity: "identity", + }, + }) + s.Nil(err) +} + +func (s *engine2Suite) TestRequestCancelWorkflowExecution_NotFound() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowExecution.WorkflowId, + RunId: workflowExecution.RunId, + }, + Identity: "identity", + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engine2Suite) TestRequestCancelWorkflowExecution_ParentMismatch() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + parentInfo := &workflowspb.ParentExecutionInfo{ + NamespaceId: tests.ParentNamespaceID.String(), + Namespace: tests.ParentNamespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "parent wId", + RunId: "parent rId", + }, + InitiatedId: 123, + InitiatedVersion: 456, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedStateWithParent(workflowExecution, tl, parentInfo, identity, true, false) + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + + _, err := s.historyEngine.RequestCancelWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowExecution.WorkflowId, + RunId: workflowExecution.RunId, + }, + Identity: "identity", + }, + ExternalWorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: "unknown wId", + RunId: "unknown rId", + }, + ChildWorkflowOnly: true, + }) + s.Equal(consts.ErrWorkflowParent, err) +} + +func (s *engine2Suite) TestTerminateWorkflowExecution_ParentMismatch() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + parentInfo := &workflowspb.ParentExecutionInfo{ + NamespaceId: tests.ParentNamespaceID.String(), + Namespace: tests.ParentNamespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "parent wId", + RunId: "parent rId", + }, + InitiatedId: 123, + InitiatedVersion: 456, + } + + identity := "testIdentity" + tl := "testTaskQueue" + + ms := s.createExecutionStartedStateWithParent(workflowExecution, tl, parentInfo, identity, true, false) + ms1 := workflow.TestCloneToProto(ms) + currentExecutionResp := &persistence.GetCurrentExecutionResponse{ + RunID: tests.RunID, + } + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(currentExecutionResp, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + + _, err := s.historyEngine.TerminateWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.TerminateWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + TerminateRequest: &workflowservice.TerminateWorkflowExecutionRequest{ + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: workflowExecution.WorkflowId, + }, + Identity: "identity", + FirstExecutionRunId: workflowExecution.RunId, + }, + ExternalWorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: "unknown wId", + RunId: "unknown rId", + }, + ChildWorkflowOnly: true, + }) + s.Equal(consts.ErrWorkflowParent, err) +} + +func (s *engine2Suite) createExecutionStartedState(we commonpb.WorkflowExecution, tl string, identity string, scheduleWorkflowTask bool, startWorkflowTask bool) workflow.MutableState { + return s.createExecutionStartedStateWithParent(we, tl, nil, identity, scheduleWorkflowTask, startWorkflowTask) +} + +func (s *engine2Suite) createExecutionStartedStateWithParent(we commonpb.WorkflowExecution, tl string, parentInfo *workflowspb.ParentExecutionInfo, identity string, scheduleWorkflowTask bool, startWorkflowTask bool) workflow.MutableState { + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + s.logger, we.GetRunId()) + addWorkflowExecutionStartedEventWithParent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, parentInfo, identity) + var wt *workflow.WorkflowTaskInfo + if scheduleWorkflowTask { + wt = addWorkflowTaskScheduledEvent(ms) + } + if wt != nil && startWorkflowTask { + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + } + _ = ms.SetHistoryTree(context.Background(), nil, nil, we.GetRunId()) + versionHistory, _ := versionhistory.GetCurrentVersionHistory( + ms.GetExecutionInfo().VersionHistories, + ) + _ = versionhistory.AddOrUpdateVersionHistoryItem( + versionHistory, + versionhistory.NewVersionHistoryItem(0, 0), + ) + + return ms +} + +func (s *engine2Suite) TestRespondWorkflowTaskCompletedRecordMarkerCommand() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: "wId", + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + serializedTaskToken, _ := taskToken.Marshal() + identity := "testIdentity" + markerDetails := payloads.EncodeString("marker details") + markerName := "marker name" + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_RECORD_MARKER, + Attributes: &commandpb.Command_RecordMarkerCommandAttributes{RecordMarkerCommandAttributes: &commandpb.RecordMarkerCommandAttributes{ + MarkerName: markerName, + Details: map[string]*commonpb.Payloads{ + "data": markerDetails, + }, + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: namespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: serializedTaskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + ms2 := s.getMutableState(namespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engine2Suite) TestRespondWorkflowTaskCompleted_StartChildWithSearchAttributes() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: "wId", + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + serializedTaskToken, _ := taskToken.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, nil, 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: tests.Namespace.String(), + WorkflowId: tests.WorkflowID, + WorkflowType: &commonpb.WorkflowType{Name: "wType"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: map[string]*commonpb.Payload{ + "AliasForCustomTextField": payload.EncodeString("search attribute value")}, + }, + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() + + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + eventsToSave := request.UpdateWorkflowEvents[0].Events + s.Len(eventsToSave, 2) + s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, eventsToSave[0].GetEventType()) + s.Equal(enumspb.EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED, eventsToSave[1].GetEventType()) + startChildEventAttributes := eventsToSave[1].GetStartChildWorkflowExecutionInitiatedEventAttributes() + // Search attribute name was mapped and saved under field name. + s.Equal( + payload.EncodeString("search attribute value"), + startChildEventAttributes.GetSearchAttributes().GetIndexedFields()["CustomTextField"]) + return tests.UpdateWorkflowExecutionResponse, nil + }) + + s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). + GetMapper(tests.Namespace). + Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) + + _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: serializedTaskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) +} + +func (s *engine2Suite) TestRespondWorkflowTaskCompleted_StartChildWorkflow_ExceedsLimit() { + namespaceID := tests.NamespaceID + taskQueue := "testTaskQueue" + identity := "testIdentity" + workflowType := "testWorkflowType" + + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + ms := workflow.TestLocalMutableState( + s.historyEngine.shard, + s.mockEventsCache, + tests.LocalNamespaceEntry, + log.NewTestLogger(), + we.GetRunId(), + ) + + addWorkflowExecutionStartedEvent( + ms, + we, + workflowType, + taskQueue, + nil, + time.Minute, + time.Minute, + time.Minute, + identity, + ) + + s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() + + var commands []*commandpb.Command + for i := 0; i < 6; i++ { + commands = append( + commands, + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{ + StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: tests.Namespace.String(), + WorkflowId: tests.WorkflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + }}, + }, + ) + } + + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent( + ms, + wt.ScheduledEventID, + taskQueue, + identity, + ) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskTokenBytes, _ := taskToken.Marshal() + response := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(response, nil).AnyTimes() + s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). + GetMapper(tests.Namespace). + Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil). + AnyTimes() + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.historyEngine.shard.GetConfig().NumPendingChildExecutionsLimit = func(namespace string) int { + return 5 + } + _, err := s.historyEngine.RespondWorkflowTaskCompleted(metrics.AddMetricsContext(context.Background()), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskTokenBytes, + Commands: commands, + Identity: identity, + }, + }) + + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Len(s.errorMessages, 1) + s.Equal("the number of pending child workflow executions, 5, has reached the per-workflow limit of 5", s.errorMessages[0]) +} + +func (s *engine2Suite) TestStartWorkflowExecution_BrandNew() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) + + requestID := uuid.New() + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), + WorkflowRunTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + }, + }) + s.Nil(err) + s.NotNil(resp.RunId) +} + +func (s *engine2Suite) TestStartWorkflowExecution_BrandNew_SearchAttributes() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { + eventsToSave := request.NewWorkflowEvents[0].Events + s.Len(eventsToSave, 2) + s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, eventsToSave[0].GetEventType()) + startEventAttributes := eventsToSave[0].GetWorkflowExecutionStartedEventAttributes() + // Search attribute name was mapped and saved under field name. + s.Equal( + payload.EncodeString("test"), + startEventAttributes.GetSearchAttributes().GetIndexedFields()["CustomKeywordField"]) + return tests.CreateWorkflowExecutionResponse, nil + }) + + requestID := uuid.New() + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), + WorkflowRunTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: map[string]*commonpb.Payload{ + "CustomKeywordField": payload.EncodeString("test"), + }}}, + }) + s.Nil(err) + s.NotNil(resp.RunId) +} + +func (s *engine2Suite) TestStartWorkflowExecution_StillRunning_Dedup() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + runID := "runID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + requestID := "requestID" + lastWriteVersion := common.EmptyVersion + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: requestID, + RunID: runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + LastWriteVersion: lastWriteVersion, + }) + + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + }, + }) + s.Nil(err) + s.Equal(runID, resp.GetRunId()) +} + +func (s *engine2Suite) TestStartWorkflowExecution_StillRunning_NonDeDup() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + runID := "runID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + lastWriteVersion := common.EmptyVersion + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: "oldRequestID", + RunID: runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + LastWriteVersion: lastWriteVersion, + }) + + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: "newRequestID", + }, + }) + if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { + s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") + } + s.Nil(resp) +} + +func (s *engine2Suite) TestStartWorkflowExecution_NotRunning_PrevSuccess() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + runID := "runID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + lastWriteVersion := common.EmptyVersion + + options := []enumspb.WorkflowIdReusePolicy{ + enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY, + enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE, + } + + expecedErrs := []bool{true, false, true} + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( + gomock.Any(), + newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { + return request.Mode == persistence.CreateWorkflowModeBrandNew + }), + ).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: "oldRequestID", + RunID: runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + LastWriteVersion: lastWriteVersion, + }).Times(len(expecedErrs)) + + for index, option := range options { + if !expecedErrs[index] { + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( + gomock.Any(), + newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { + return request.Mode == persistence.CreateWorkflowModeUpdateCurrent && + request.PreviousRunID == runID && + request.PreviousLastWriteVersion == lastWriteVersion + }), + ).Return(tests.CreateWorkflowExecutionResponse, nil) + } + + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: "newRequestID", + WorkflowIdReusePolicy: option, + }, + }) + + if expecedErrs[index] { + if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { + s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") + } + s.Nil(resp) + } else { + s.Nil(err) + s.NotNil(resp) + } + } +} + +func (s *engine2Suite) TestStartWorkflowExecution_NotRunning_PrevFail() { + namespaceID := tests.NamespaceID + workflowID := "workflowID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + lastWriteVersion := common.EmptyVersion + + options := []enumspb.WorkflowIdReusePolicy{ + enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY, + enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE, + } + + expecedErrs := []bool{false, false, true} + + statuses := []enumspb.WorkflowExecutionStatus{ + enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, + } + runIDs := []string{"1", "2", "3", "4"} + + for i, status := range statuses { + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( + gomock.Any(), + newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { + return request.Mode == persistence.CreateWorkflowModeBrandNew + }), + ).Return(nil, &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: "oldRequestID", + RunID: runIDs[i], + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: status, + LastWriteVersion: lastWriteVersion, + }).Times(len(expecedErrs)) + + for j, option := range options { + + if !expecedErrs[j] { + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution( + gomock.Any(), + newCreateWorkflowExecutionRequestMatcher(func(request *persistence.CreateWorkflowExecutionRequest) bool { + return request.Mode == persistence.CreateWorkflowModeUpdateCurrent && + request.PreviousRunID == runIDs[i] && + request.PreviousLastWriteVersion == lastWriteVersion + }), + ).Return(tests.CreateWorkflowExecutionResponse, nil) + } + + resp, err := s.historyEngine.StartWorkflowExecution(metrics.AddMetricsContext(context.Background()), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: "newRequestID", + WorkflowIdReusePolicy: option, + }, + }) + + if expecedErrs[j] { + if _, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); !ok { + s.Fail("return err is not *serviceerror.WorkflowExecutionAlreadyStarted") + } + s.Nil(resp) + } else { + s.Nil(err) + s.NotNil(resp) + } + } + } +} + +func (s *engine2Suite) TestSignalWithStartWorkflowExecution_JustSignal() { + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} + _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.EqualError(err, "Missing namespace UUID.") + + namespaceID := tests.NamespaceID + workflowID := "wId" + workflowType := "workflowType" + runID := tests.RunID + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := uuid.New() + sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), runID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) + _ = addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.Nil(err) + s.Equal(runID, resp.GetRunId()) +} + +func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotExist() { + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} + _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.EqualError(err, "Missing namespace UUID.") + + namespaceID := tests.NamespaceID + workflowID := "wId" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := uuid.New() + + sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + notExistErr := serviceerror.NewNotFound("Workflow not exist") + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, notExistErr) + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.Nil(err) + s.NotNil(resp.GetRunId()) +} + +func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotRunning() { + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} + _, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.EqualError(err, "Missing namespace UUID.") + + namespaceID := tests.NamespaceID + workflowID := "wId" + runID := tests.RunID + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := uuid.New() + sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + SignalName: signalName, + SignalInput: nil, + Control: "", + RetryPolicy: nil, + CronSchedule: "", + Memo: nil, + SearchAttributes: nil, + Header: nil, + }, + } + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), runID) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.Nil(err) + s.NotNil(resp.GetRunId()) + s.NotEqual(runID, resp.GetRunId()) +} + +func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_DuplicateRequests() { + namespaceID := tests.NamespaceID + workflowID := "wId" + runID := tests.RunID + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := "testRequestID" + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + SignalName: signalName, + SignalInput: nil, + Control: "", + RetryPolicy: nil, + CronSchedule: "", + Memo: nil, + SearchAttributes: nil, + Header: nil, + }, + } + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), runID) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} + workflowAlreadyStartedErr := &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: requestID, // use same requestID + RunID: runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + LastWriteVersion: common.EmptyVersion, + } + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, workflowAlreadyStartedErr) + + ctx := metrics.AddMetricsContext(context.Background()) + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(ctx, sRequest) + if err != nil { + println("================================================================================================") + println("================================================================================================") + println("================================================================================================") + println(err) + println("================================================================================================") + println("================================================================================================") + println("================================================================================================") + } + s.Nil(err) + s.NotNil(resp.GetRunId()) + s.Equal(runID, resp.GetRunId()) +} + +func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_WorkflowAlreadyStarted() { + namespaceID := tests.NamespaceID + workflowID := "wId" + runID := tests.RunID + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := "testRequestID" + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + SignalName: signalName, + SignalInput: nil, + Control: "", + RetryPolicy: nil, + CronSchedule: "", + Memo: nil, + SearchAttributes: nil, + Header: nil, + }, + } + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), runID) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} + workflowAlreadyStartedErr := &persistence.CurrentWorkflowConditionFailedError{ + Msg: "random message", + RequestID: "new request ID", + RunID: runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + LastWriteVersion: common.EmptyVersion, + } + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, workflowAlreadyStartedErr) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(metrics.AddMetricsContext(context.Background()), sRequest) + s.Nil(resp) + s.NotNil(err) +} + +func (s *engine2Suite) TestRecordChildExecutionCompleted() { + childWorkflowID := "some random child workflow ID" + childRunID := uuid.New() + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + request := &historyservice.RecordChildExecutionCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowID, + RunId: childRunID, + }, + CompletionEvent: &historypb.HistoryEvent{ + EventId: 456, + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, + Attributes: &historypb.HistoryEvent_WorkflowExecutionCompletedEventAttributes{ + WorkflowExecutionCompletedEventAttributes: &historypb.WorkflowExecutionCompletedEventAttributes{}, + }, + }, + ParentInitiatedId: 123, + ParentInitiatedVersion: 100, + } + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + // reload mutable state due to potential stale mutable state (initiated event not found) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) + _, err := s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.NotFound{}, err) + + // add child init event + wt := addWorkflowTaskScheduledEvent(ms) + workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, "testTaskQueue", uuid.New()) + wt.StartedEventID = workflowTasksStartEvent.GetEventId() + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + initiatedEvent, _ := addStartChildWorkflowExecutionInitiatedEvent(ms, workflowTaskCompletedEvent.GetEventId(), uuid.New(), + tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_TERMINATE) + request.ParentInitiatedId = initiatedEvent.GetEventId() + request.ParentInitiatedVersion = initiatedEvent.GetVersion() + + // reload mutable state due to potential stale mutable state (started event not found) + wfMs = workflow.TestCloneToProto(ms) + gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) + _, err = s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.NotFound{}, err) + + // add child started event + addChildWorkflowExecutionStartedEvent(ms, initiatedEvent.GetEventId(), childWorkflowID, childRunID, childWorkflowType, nil) + + wfMs = workflow.TestCloneToProto(ms) + gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + _, err = s.historyEngine.RecordChildExecutionCompleted(metrics.AddMetricsContext(context.Background()), request) + s.NoError(err) +} + +func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_WorkflowNotExist() { + + request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: tests.ParentNamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: "child workflowId", + RunId: "child runId", + }, + ParentInitiatedId: 123, + ParentInitiatedVersion: 100, + } + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.NotFound{}) + + _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_WorkflowClosed() { + + request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: tests.ParentNamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: "child workflowId", + RunId: "child runId", + }, + ParentInitiatedId: 123, + ParentInitiatedVersion: 100, + } + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + _, err := ms.AddTimeoutWorkflowEvent( + ms.GetNextEventID(), + enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, + uuid.New(), + ) + s.NoError(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.NoError(err) +} + +func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventNotFound() { + + request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: tests.NamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: "child workflowId", + RunId: "child runId", + }, + ParentInitiatedId: 123, + ParentInitiatedVersion: 100, + } + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.WorkflowNotReady{}, err) +} + +func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventFoundOnNonCurrentBranch() { + + inititatedVersion := tests.Version - 100 + request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: tests.NamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: "child workflowId", + RunId: "child runId", + }, + ParentInitiatedId: 123, + ParentInitiatedVersion: inititatedVersion, + } + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + ms.GetExecutionInfo().VersionHistories = &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + BranchToken: []byte{1, 2, 3}, + Items: []*historyspb.VersionHistoryItem{ + {EventId: 100, Version: inititatedVersion}, + {EventId: 456, Version: tests.Version}, + }, + }, + { + BranchToken: []byte{4, 5, 6}, + Items: []*historyspb.VersionHistoryItem{ + {EventId: 456, Version: inititatedVersion}, + }, + }, + }, + } + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventFoundOnCurrentBranch() { + + taskQueueName := "testTaskQueue" + + childWorkflowID := "some random child workflow ID" + childRunID := uuid.New() + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", taskQueueName, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + wt := addWorkflowTaskScheduledEvent(ms) + workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = workflowTasksStartEvent.GetEventId() + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + initiatedEvent, ci := addStartChildWorkflowExecutionInitiatedEvent(ms, workflowTaskCompletedEvent.GetEventId(), uuid.New(), + tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_TERMINATE) + + request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: tests.NamespaceID.String(), + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowID, + RunId: childRunID, + }, + ParentInitiatedId: initiatedEvent.GetEventId(), + ParentInitiatedVersion: initiatedEvent.GetVersion(), + } + + // child workflow not started in mutable state + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.WorkflowNotReady{}, err) + + // child workflow started but not completed + addChildWorkflowExecutionStartedEvent(ms, initiatedEvent.GetEventId(), childWorkflowID, childRunID, childWorkflowType, nil) + + wfMs = workflow.TestCloneToProto(ms) + gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.IsType(&serviceerror.WorkflowNotReady{}, err) + + // child completion recorded + addChildWorkflowExecutionCompletedEvent( + ms, + ci.InitiatedEventId, + &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowID, + RunId: childRunID, + }, + &historypb.WorkflowExecutionCompletedEventAttributes{ + Result: payloads.EncodeString("some random child workflow execution result"), + WorkflowTaskCompletedEventId: workflowTaskCompletedEvent.GetEventId(), + }, + ) + + wfMs = workflow.TestCloneToProto(ms) + gwmsResponse = &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.historyEngine.VerifyChildExecutionCompletionRecorded(metrics.AddMetricsContext(context.Background()), request) + s.NoError(err) +} + +func (s *engine2Suite) TestRefreshWorkflowTasks() { + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + + ms := workflow.TestGlobalMutableState(s.historyEngine.shard, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.RunID) + startEvent := addWorkflowExecutionStartedEvent(ms, execution, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + startVersion := startEvent.GetVersion() + timeoutEvent, err := ms.AddTimeoutWorkflowEvent( + ms.GetNextEventID(), + enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, + uuid.New(), + ) + s.NoError(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().AddHistoryTasks(gomock.Any(), gomock.Any()).Return(nil) + s.mockEventsCache.EXPECT().GetEvent( + gomock.Any(), + events.EventKey{ + NamespaceID: tests.NamespaceID, + WorkflowID: execution.GetWorkflowId(), + RunID: execution.GetRunId(), + EventID: common.FirstEventID, + Version: startVersion, + }, + common.FirstEventID, + gomock.Any(), + ).Return(startEvent, nil).AnyTimes() + s.mockEventsCache.EXPECT().GetEvent( + gomock.Any(), + events.EventKey{ + NamespaceID: tests.NamespaceID, + WorkflowID: execution.GetWorkflowId(), + RunID: execution.GetRunId(), + EventID: timeoutEvent.GetEventId(), + Version: startVersion, + }, + timeoutEvent.GetEventId(), + gomock.Any(), + ).Return(startEvent, nil).AnyTimes() + + err = s.historyEngine.RefreshWorkflowTasks(metrics.AddMetricsContext(context.Background()), tests.NamespaceID, execution) + s.NoError(err) +} + +func (s *engine2Suite) getMutableState(namespaceID namespace.ID, we commonpb.WorkflowExecution) workflow.MutableState { + weContext, release, err := s.workflowCache.GetOrCreateWorkflowExecution( + metrics.AddMetricsContext(context.Background()), + namespaceID, + we, + workflow.LockPriorityHigh, + ) + if err != nil { + return nil + } + defer release(nil) + + return weContext.(*workflow.ContextImpl).MutableState +} + +type createWorkflowExecutionRequestMatcher struct { + f func(request *persistence.CreateWorkflowExecutionRequest) bool +} + +func newCreateWorkflowExecutionRequestMatcher(f func(request *persistence.CreateWorkflowExecutionRequest) bool) gomock.Matcher { + return &createWorkflowExecutionRequestMatcher{ + f: f, + } +} + +func (m *createWorkflowExecutionRequestMatcher) Matches(x interface{}) bool { + request, ok := x.(*persistence.CreateWorkflowExecutionRequest) + if !ok { + return false + } + return m.f(request) +} + +func (m *createWorkflowExecutionRequestMatcher) String() string { + return "CreateWorkflowExecutionRequest match condition" +} diff -Nru temporal-1.21.5-1/src/service/history/history_engine3_eventsv2_test.go temporal-1.22.5/src/service/history/history_engine3_eventsv2_test.go --- temporal-1.21.5-1/src/service/history/history_engine3_eventsv2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/history_engine3_eventsv2_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,368 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + engine3Suite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockVisibilityProcessor *queues.MockQueue + mockEventsCache *events.MockCache + mockNamespaceCache *namespace.MockRegistry + mockClusterMetadata *cluster.MockMetadata + workflowCache wcache.Cache + historyEngine *historyEngineImpl + mockExecutionMgr *persistence.MockExecutionManager + + config *configs.Config + logger log.Logger + } +) + +func TestEngine3Suite(t *testing.T) { + s := new(engine3Suite) + suite.Run(t, s) +} + +func (s *engine3Suite) SetupSuite() { + s.config = tests.NewDynamicConfig() +} + +func (s *engine3Suite) TearDownSuite() { +} + +func (s *engine3Suite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + s.mockShard = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + s.config, + ) + s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() + + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockEventsCache = s.mockShard.MockEventsCache + + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + throttledLogger: s.logger, + metricsHandler: metrics.NoopMetricsHandler, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + config: s.config, + timeSource: s.mockShard.GetTimeSource(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, + }, + workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(s.mockShard, s.workflowCache), + } + s.mockShard.SetEngineForTesting(h) + h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) + + s.historyEngine = h +} + +func (s *engine3Suite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *engine3Suite) TestRecordWorkflowTaskStartedSuccessStickyEnabled() { + testNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: "wId", + RunId: tests.RunID, + } + tl := "testTaskQueue" + stickyTl := "stickyTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), we.GetRunId()) + executionInfo := ms.GetExecutionInfo() + executionInfo.LastUpdateTime = timestamp.TimeNowPtrUtc() + executionInfo.StickyTaskQueue = stickyTl + + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + request := historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: namespaceID.String(), + WorkflowExecution: &we, + ScheduledEventId: 2, + TaskId: 100, + RequestId: "reqId", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: &taskqueuepb.TaskQueue{ + Name: stickyTl, + }, + Identity: identity, + }, + } + + expectedResponse := historyservice.RecordWorkflowTaskStartedResponse{} + expectedResponse.WorkflowType = ms.GetWorkflowType() + executionInfo = ms.GetExecutionInfo() + if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { + expectedResponse.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId + } + expectedResponse.ScheduledEventId = wt.ScheduledEventID + expectedResponse.ScheduledTime = wt.ScheduledTime + expectedResponse.StartedEventId = wt.ScheduledEventID + 1 + expectedResponse.StickyExecutionEnabled = true + expectedResponse.NextEventId = ms.GetNextEventID() + 1 + expectedResponse.Attempt = wt.Attempt + expectedResponse.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ + Name: executionInfo.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + expectedResponse.BranchToken, _ = ms.GetCurrentBranchToken() + + response, err := s.historyEngine.RecordWorkflowTaskStarted(context.Background(), &request) + s.Nil(err) + s.NotNil(response) + s.True(response.StartedTime.After(*expectedResponse.ScheduledTime)) + expectedResponse.StartedTime = response.StartedTime + s.Equal(&expectedResponse, response) +} + +func (s *engine3Suite) TestStartWorkflowExecution_BrandNew() { + testNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + + namespaceID := tests.NamespaceID + workflowID := "workflowID" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) + + requestID := uuid.New() + resp, err := s.historyEngine.StartWorkflowExecution(context.Background(), &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + RequestId: requestID, + }, + }) + s.Nil(err) + s.NotNil(resp.RunId) +} + +func (s *engine3Suite) TestSignalWithStartWorkflowExecution_JustSignal() { + testNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} + _, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) + s.EqualError(err, "Missing namespace UUID.") + + namespaceID := tests.NamespaceID + workflowID := "wId" + workflowType := "workflowType" + runID := tests.RunID + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := uuid.New() + sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + ms := workflow.TestLocalMutableState(s.historyEngine.shard, s.mockEventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), runID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) + _ = addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) + s.Nil(err) + s.Equal(runID, resp.GetRunId()) +} + +func (s *engine3Suite) TestSignalWithStartWorkflowExecution_WorkflowNotExist() { + testNamespaceEntry := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: tests.NamespaceID.String()}, &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() + + sRequest := &historyservice.SignalWithStartWorkflowExecutionRequest{} + _, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) + s.EqualError(err, "Missing namespace UUID.") + + namespaceID := tests.NamespaceID + workflowID := "wId" + workflowType := "workflowType" + taskQueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + requestID := uuid.New() + sRequest = &historyservice.SignalWithStartWorkflowExecutionRequest{ + NamespaceId: namespaceID.String(), + SignalWithStartRequest: &workflowservice.SignalWithStartWorkflowExecutionRequest{ + Namespace: namespaceID.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + WorkflowExecutionTimeout: timestamp.DurationPtr(1 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(2 * time.Second), + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + notExistErr := serviceerror.NewNotFound("Workflow not exist") + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, notExistErr) + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.CreateWorkflowExecutionResponse, nil) + + resp, err := s.historyEngine.SignalWithStartWorkflowExecution(context.Background(), sRequest) + s.Nil(err) + s.NotNil(resp.GetRunId()) +} diff -Nru temporal-1.21.5-1/src/service/history/history_engine_factory.go temporal-1.22.5/src/service/history/history_engine_factory.go --- temporal-1.21.5-1/src/service/history/history_engine_factory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/history_engine_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,96 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "go.opentelemetry.io/otel/trace" + "go.uber.org/fx" + + "go.temporal.io/server/client" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/replication" + "go.temporal.io/server/service/history/shard" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +type ( + HistoryEngineFactoryParams struct { + fx.In + + ClientBean client.Bean + MatchingClient resource.MatchingClient + SdkClientFactory sdk.ClientFactory + EventNotifier events.Notifier + Config *configs.Config + RawMatchingClient resource.MatchingRawClient + NewCacheFn wcache.NewCacheFn + ArchivalClient archiver.Client + EventSerializer serialization.Serializer + QueueFactories []QueueFactory `group:"queueFactory"` + ReplicationTaskFetcherFactory replication.TaskFetcherFactory + ReplicationTaskExecutorProvider replication.TaskExecutorProvider + TracerProvider trace.TracerProvider + PersistenceVisibilityMgr manager.VisibilityManager + EventBlobCache persistence.XDCCache + } + + historyEngineFactory struct { + HistoryEngineFactoryParams + } +) + +func (f *historyEngineFactory) CreateEngine( + shard shard.Context, +) shard.Engine { + workflowCache := f.NewCacheFn(shard) + workflowConsistencyChecker := api.NewWorkflowConsistencyChecker(shard, workflowCache) + return NewEngineWithShardContext( + shard, + f.ClientBean, + f.MatchingClient, + f.SdkClientFactory, + f.EventNotifier, + f.Config, + f.RawMatchingClient, + workflowCache, + f.ArchivalClient, + f.EventSerializer, + f.QueueFactories, + f.ReplicationTaskFetcherFactory, + f.ReplicationTaskExecutorProvider, + workflowConsistencyChecker, + f.TracerProvider, + f.PersistenceVisibilityMgr, + f.EventBlobCache, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/history_engine_test.go temporal-1.22.5/src/service/history/history_engine_test.go --- temporal-1.21.5-1/src/service/history/history_engine_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/history_engine_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,5609 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + historypb "go.temporal.io/api/history/v1" + querypb "go.temporal.io/api/query/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "google.golang.org/grpc" + + clockspb "go.temporal.io/server/api/clock/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + tokenspb "go.temporal.io/server/api/token/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/failure" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/rpc/interceptor" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/ndc" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + engineSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockVisibilityProcessor *queues.MockQueue + mockArchivalProcessor *queues.MockQueue + mockMemoryScheduledQueue *queues.MockQueue + mockNamespaceCache *namespace.MockRegistry + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + mockHistoryClient *historyservicemock.MockHistoryServiceClient + mockClusterMetadata *cluster.MockMetadata + mockEventsReapplier *ndc.MockEventsReapplier + mockWorkflowResetter *ndc.MockWorkflowResetter + + workflowCache wcache.Cache + mockHistoryEngine *historyEngineImpl + mockExecutionMgr *persistence.MockExecutionManager + mockShardManager *persistence.MockShardManager + + eventsCache events.Cache + config *configs.Config + } +) + +func TestEngineSuite(t *testing.T) { + s := new(engineSuite) + suite.Run(t, s) +} + +func (s *engineSuite) SetupSuite() { + +} + +func (s *engineSuite) TearDownSuite() { +} + +func (s *engineSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockEventsReapplier = ndc.NewMockEventsReapplier(s.controller) + s.mockWorkflowResetter = ndc.NewMockWorkflowResetter(s.controller) + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) + s.mockArchivalProcessor = queues.NewMockQueue(s.controller) + s.mockMemoryScheduledQueue = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() + s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() + s.mockMemoryScheduledQueue.EXPECT().Category().Return(tasks.CategoryMemoryTimer).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockMemoryScheduledQueue.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + s.config = tests.NewDynamicConfig() + s.mockShard = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + s.config, + ) + s.workflowCache = wcache.NewCache(s.mockShard) + s.mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() + + s.eventsCache = events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + ) + s.mockShard.SetEventsCacheForTesting(s.eventsCache) + + s.mockMatchingClient = s.mockShard.Resource.MatchingClient + s.mockHistoryClient = s.mockShard.Resource.HistoryClient + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockShardManager = s.mockShard.Resource.ShardMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(cluster.TestCurrentClusterInitialFailoverVersion).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.LocalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.LocalNamespaceEntry, nil).AnyTimes() + + eventNotifier := events.NewNotifier( + clock.NewRealTimeSource(), + s.mockShard.Resource.MetricsHandler, + func(namespaceID namespace.ID, workflowID string) int32 { + key := namespaceID.String() + "_" + workflowID + return int32(len(key)) + }, + ) + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.mockShard.GetLogger(), + metricsHandler: s.mockShard.GetMetricsHandler(), + tokenSerializer: common.NewProtoTaskTokenSerializer(), + eventNotifier: eventNotifier, + config: s.config, + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, + s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, + s.mockMemoryScheduledQueue.Category(): s.mockMemoryScheduledQueue, + }, + eventsReapplier: s.mockEventsReapplier, + workflowResetter: s.mockWorkflowResetter, + workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(s.mockShard, s.workflowCache), + throttledLogger: log.NewNoopLogger(), + } + s.mockShard.SetEngineForTesting(h) + h.workflowTaskHandler = newWorkflowTaskHandlerCallback(h) + + h.eventNotifier.Start() + + s.mockHistoryEngine = h +} + +func (s *engineSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() + s.mockHistoryEngine.eventNotifier.Stop() +} + +func (s *engineSuite) TestGetMutableStateSync() { + ctx := context.Background() + + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + // right now the next event ID is 4 + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + // test get the next event ID instantly + response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + }) + s.Nil(err) + s.Equal(int64(4), response.GetNextEventId()) + s.Equal(tests.RunID, response.GetFirstExecutionRunId()) +} + +func (s *engineSuite) TestGetMutableState_IntestRunID() { + ctx := context.Background() + + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + RunId: "run-id-not-valid-uuid", + } + + _, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + }) + s.Equal(errRunIDNotValid, err) +} + +func (s *engineSuite) TestGetMutableState_EmptyRunID() { + ctx := context.Background() + + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + } + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + }) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestGetMutableStateLongPoll() { + ctx := context.Background() + + namespaceID := tests.NamespaceID + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, + log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + // right now the next event ID is 4 + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + // test long poll on next event ID change + waitGroup := &sync.WaitGroup{} + waitGroup.Add(1) + asycWorkflowUpdate := func(delay time.Duration) { + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: execution.WorkflowId, + RunId: execution.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + timer := time.NewTimer(delay) + + <-timer.C + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.Nil(err) + waitGroup.Done() + // right now the next event ID is 5 + } + + // return immediately, since the expected next event ID appears + response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + ExpectedNextEventId: 3, + }) + s.Nil(err) + s.Equal(int64(4), response.NextEventId) + + // long poll, new event happen before long poll timeout + go asycWorkflowUpdate(time.Second * 2) + start := time.Now().UTC() + pollResponse, err := s.mockHistoryEngine.PollMutableState(ctx, &historyservice.PollMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + ExpectedNextEventId: 4, + }) + s.True(time.Now().UTC().After(start.Add(time.Second * 1))) + s.Nil(err) + s.Equal(int64(5), pollResponse.GetNextEventId()) + waitGroup.Wait() +} + +func (s *engineSuite) TestGetMutableStateLongPoll_CurrentBranchChanged() { + ctx := context.Background() + + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState( + s.mockHistoryEngine.shard, + s.eventsCache, + tests.LocalNamespaceEntry, + log.NewTestLogger(), + execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + // right now the next event ID is 4 + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + // test long poll on next event ID change + asyncBranchTokenUpdate := func(delay time.Duration) { + timer := time.NewTimer(delay) + <-timer.C + newExecution := &commonpb.WorkflowExecution{ + WorkflowId: execution.WorkflowId, + RunId: execution.RunId, + } + ms.GetExecutionInfo().GetVersionHistories() + s.mockHistoryEngine.eventNotifier.NotifyNewHistoryEvent(events.NewNotification( + "tests.NamespaceID", + newExecution, + int64(1), + int64(0), + int64(4), + int64(1), + enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ms.GetExecutionInfo().GetVersionHistories())) + } + + // return immediately, since the expected next event ID appears + response0, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + ExpectedNextEventId: 3, + }) + s.Nil(err) + s.Equal(int64(4), response0.GetNextEventId()) + + // long poll, new event happen before long poll timeout + go asyncBranchTokenUpdate(time.Second * 2) + start := time.Now().UTC() + response1, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + ExpectedNextEventId: 10, + }) + s.True(time.Now().UTC().After(start.Add(time.Second * 1))) + s.Nil(err) + s.Equal(response0.GetCurrentBranchToken(), response1.GetCurrentBranchToken()) +} + +func (s *engineSuite) TestGetMutableStateLongPollTimeout() { + ctx := context.Background() + + execution := commonpb.WorkflowExecution{ + WorkflowId: "test-get-workflow-execution-event-id", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + // right now the next event ID is 4 + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + // long poll, no event happen after long poll timeout + response, err := s.mockHistoryEngine.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + Execution: &execution, + ExpectedNextEventId: 4, + }) + s.Nil(err) + s.Equal(int64(4), response.GetNextEventId()) +} + +func (s *engineSuite) TestQueryWorkflow_RejectBasedOnCompleted() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_RejectBasedOnCompleted", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + event := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + addCompleteWorkflowEvent(ms, event.GetEventId(), nil) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, + }, + } + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.NoError(err) + s.Nil(resp.GetResponse().QueryResult) + s.NotNil(resp.GetResponse().QueryRejected) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, resp.GetResponse().GetQueryRejected().GetStatus()) +} + +func (s *engineSuite) TestQueryWorkflow_RejectBasedOnFailed() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_RejectBasedOnFailed", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + event := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + addFailWorkflowEvent(ms, event.GetEventId(), failure.NewServerFailure("failure reason", true), enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE) + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, + }, + } + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.NoError(err) + s.Nil(resp.GetResponse().QueryResult) + s.NotNil(resp.GetResponse().QueryRejected) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, resp.GetResponse().GetQueryRejected().GetStatus()) + + request = &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY, + }, + } + resp, err = s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.NoError(err) + s.Nil(resp.GetResponse().QueryResult) + s.NotNil(resp.GetResponse().QueryRejected) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, resp.GetResponse().GetQueryRejected().GetStatus()) +} + +func (s *engineSuite) TestQueryWorkflow_DirectlyThroughMatching() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_DirectlyThroughMatching", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + s.mockMatchingClient.EXPECT().QueryWorkflow(gomock.Any(), gomock.Any()).Return(&matchingservice.QueryWorkflowResponse{QueryResult: payloads.EncodeBytes([]byte{1, 2, 3})}, nil) + s.mockHistoryEngine.matchingClient = s.mockMatchingClient + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + // since workflow is open this filter does not reject query + QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, + }, + } + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.NoError(err) + s.NotNil(resp.GetResponse().QueryResult) + s.Nil(resp.GetResponse().QueryRejected) + + var queryResult []byte + err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) + s.NoError(err) + s.Equal([]byte{1, 2, 3}, queryResult) +} + +func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Timeout() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Timeout", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + wt = addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + // since workflow is open this filter does not reject query + QueryRejectCondition: enumspb.QUERY_REJECT_CONDITION_NOT_OPEN, + }, + } + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + resp, err := s.mockHistoryEngine.QueryWorkflow(ctx, request) + s.Error(err) + s.Nil(resp) + wg.Done() + }() + + time.Sleep(time.Second) + ms1 := s.getMutableState(tests.NamespaceID, execution) + s.NotNil(ms1) + qr := ms1.GetQueryRegistry() + s.True(qr.HasBufferedQuery()) + s.False(qr.HasCompletedQuery()) + s.False(qr.HasUnblockedQuery()) + s.False(qr.HasFailedQuery()) + wg.Wait() + s.False(qr.HasBufferedQuery()) + s.False(qr.HasCompletedQuery()) + s.False(qr.HasUnblockedQuery()) + s.False(qr.HasFailedQuery()) +} + +func (s *engineSuite) TestQueryWorkflow_ConsistentQueryBufferFull() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_ConsistentQueryBufferFull", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + wt = addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + // buffer query so that when history.QueryWorkflow is called buffer is already full + ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( + context.Background(), + tests.NamespaceID, + execution, + workflow.LockPriorityHigh, + ) + s.NoError(err) + loadedMS, err := ctx.LoadMutableState(context.Background()) + s.NoError(err) + qr := workflow.NewQueryRegistry() + qr.BufferQuery(&querypb.WorkflowQuery{}) + loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr + release(nil) + + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + }, + } + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.Nil(resp) + s.Equal(consts.ErrConsistentQueryBufferExceeded, err) +} + +func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Complete() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Complete", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + wt = addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + + waitGroup := &sync.WaitGroup{} + waitGroup.Add(1) + asyncQueryUpdate := func(delay time.Duration, answer []byte) { + defer waitGroup.Done() + time.Sleep(delay) + ms1 := s.getMutableState(tests.NamespaceID, execution) + s.NotNil(ms1) + qr := ms1.GetQueryRegistry() + buffered := qr.GetBufferedIDs() + for _, id := range buffered { + resultType := enumspb.QUERY_RESULT_TYPE_ANSWERED + succeededCompletionState := &workflow.QueryCompletionState{ + Type: workflow.QueryCompletionTypeSucceeded, + Result: &querypb.WorkflowQueryResult{ + ResultType: resultType, + Answer: payloads.EncodeBytes(answer), + }, + } + err := qr.SetCompletionState(id, succeededCompletionState) + s.NoError(err) + state, err := qr.GetCompletionState(id) + s.NoError(err) + s.Equal(workflow.QueryCompletionTypeSucceeded, state.Type) + } + } + + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + }, + } + go asyncQueryUpdate(time.Second*2, []byte{1, 2, 3}) + start := time.Now().UTC() + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.True(time.Now().UTC().After(start.Add(time.Second))) + s.NoError(err) + + var queryResult []byte + err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) + s.NoError(err) + s.Equal([]byte{1, 2, 3}, queryResult) + + ms1 := s.getMutableState(tests.NamespaceID, execution) + s.NotNil(ms1) + qr := ms1.GetQueryRegistry() + s.False(qr.HasBufferedQuery()) + s.False(qr.HasCompletedQuery()) + waitGroup.Wait() +} + +func (s *engineSuite) TestQueryWorkflow_WorkflowTaskDispatch_Unblocked() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "TestQueryWorkflow_WorkflowTaskDispatch_Unblocked", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, tests.LocalNamespaceEntry, log.NewTestLogger(), execution.GetRunId()) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + wt = addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, taskqueue, identity) + + wfMs := workflow.TestCloneToProto(ms) + gweResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gweResponse, nil) + s.mockMatchingClient.EXPECT().QueryWorkflow(gomock.Any(), gomock.Any()).Return(&matchingservice.QueryWorkflowResponse{QueryResult: payloads.EncodeBytes([]byte{1, 2, 3})}, nil) + s.mockHistoryEngine.matchingClient = s.mockMatchingClient + waitGroup := &sync.WaitGroup{} + waitGroup.Add(1) + asyncQueryUpdate := func(delay time.Duration, answer []byte) { + defer waitGroup.Done() + time.Sleep(delay) + ms1 := s.getMutableState(tests.NamespaceID, execution) + s.NotNil(ms1) + qr := ms1.GetQueryRegistry() + buffered := qr.GetBufferedIDs() + for _, id := range buffered { + s.NoError(qr.SetCompletionState(id, &workflow.QueryCompletionState{Type: workflow.QueryCompletionTypeUnblocked})) + state, err := qr.GetCompletionState(id) + s.NoError(err) + s.Equal(workflow.QueryCompletionTypeUnblocked, state.Type) + } + } + + request := &historyservice.QueryWorkflowRequest{ + NamespaceId: tests.NamespaceID.String(), + Request: &workflowservice.QueryWorkflowRequest{ + Execution: &execution, + Query: &querypb.WorkflowQuery{}, + }, + } + go asyncQueryUpdate(time.Second*2, []byte{1, 2, 3}) + start := time.Now().UTC() + resp, err := s.mockHistoryEngine.QueryWorkflow(context.Background(), request) + s.True(time.Now().UTC().After(start.Add(time.Second))) + s.NoError(err) + + var queryResult []byte + err = payloads.Decode(resp.GetResponse().GetQueryResult(), &queryResult) + s.NoError(err) + s.Equal([]byte{1, 2, 3}, queryResult) + + ms1 := s.getMutableState(tests.NamespaceID, execution) + s.NotNil(ms1) + qr := ms1.GetQueryRegistry() + s.False(qr.HasBufferedQuery()) + s.False(qr.HasCompletedQuery()) + s.False(qr.HasUnblockedQuery()) + waitGroup.Wait() +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedInvalidToken() { + + invalidToken, _ := json.Marshal("bad token") + identity := "testIdentity" + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: invalidToken, + Commands: nil, + Identity: identity, + }, + }) + + s.NotNil(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedIfNoExecution() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedIfGetExecutionFailed() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedUpdateExecutionFailed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tq := "testTaskQueue" + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tq, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) + s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedIfTaskCompleted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tq := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + startedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tq, identity) + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, startedEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedIfTaskNotStarted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tq := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedConflictOnUpdate() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tq := "testTaskQueue" + identity := "testIdentity" + activity1ID := "activity1" + activity1Type := "activity_type1" + activity1Input := payloads.EncodeString("input1") + activity1Result := payloads.EncodeString("activity1_result") + activity2ID := "activity2" + activity2Type := "activity_type2" + activity2Input := payloads.EncodeString("input2") + activity2Result := payloads.EncodeString("activity2_result") + activity3ID := "activity3" + activity3Type := "activity_type3" + activity3Input := payloads.EncodeString("input3") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tq, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt1 := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tq, identity) + workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) + activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tq, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tq, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) + activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) + addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, + activity1StartedEvent.EventId, activity1Result, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tq, identity) + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: wt2.ScheduledEventID, + } + taskToken, _ := tt.Marshal() + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activity3ID, + ActivityType: &commonpb.ActivityType{Name: activity3Type}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tq}, + Input: activity3Input, + ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, + activity2StartedEvent.EventId, activity2Result, identity) + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.Equal(&persistence.ConditionFailedError{}, err) +} + +func (s *engineSuite) TestValidateSignalRequest() { + workflowType := "testType" + input := payloads.EncodeString("input") + startRequest := &workflowservice.StartWorkflowExecutionRequest{ + WorkflowId: "ID", + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: "taskptr"}, + Input: input, + WorkflowExecutionTimeout: timestamp.DurationPtr(20 * time.Second), + WorkflowRunTimeout: timestamp.DurationPtr(10 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(10 * time.Second), + Identity: "identity", + } + err := api.ValidateStartWorkflowExecutionRequest( + context.Background(), startRequest, s.mockHistoryEngine.shard, tests.LocalNamespaceEntry, "SignalWithStartWorkflowExecution") + s.Error(err, "startRequest doesn't have request id, it should error out") + + startRequest.RequestId = "request-id" + startRequest.Memo = &commonpb.Memo{Fields: map[string]*commonpb.Payload{ + "data": payload.EncodeBytes(make([]byte, 4*1024*1024)), + }} + err = api.ValidateStartWorkflowExecutionRequest( + context.Background(), startRequest, s.mockHistoryEngine.shard, tests.LocalNamespaceEntry, "SignalWithStartWorkflowExecution") + s.Error(err, "memo should be too big") +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_StaleCache() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + tt.ScheduledEventId = 4 // Set it to 4 to emulate stale cache. + + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil).Times(2) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedCompleteWorkflowFailed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + identity := "testIdentity" + activity1ID := "activity1" + activity1Type := "activity_type1" + activity1Input := payloads.EncodeString("input1") + activity1Result := payloads.EncodeString("activity1_result") + activity2ID := "activity2" + activity2Type := "activity_type2" + activity2Input := payloads.EncodeString("input2") + activity2Result := payloads.EncodeString("activity2_result") + workflowResult := payloads.EncodeString("workflow result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) + wt1 := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) + activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tl, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) + activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) + addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, + activity1StartedEvent.EventId, activity1Result, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, + activity2StartedEvent.EventId, activity2Result, identity) + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: wt2.ScheduledEventID, + } + taskToken, _ := tt.Marshal() + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ + Result: workflowResult, + }}, + }} + + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + + ms2 := common.CloneProto(ms1) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("UnhandledCommand", err.Error()) + + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(15), updatedWorkflowMutation.NextEventID) + s.Equal(workflowTaskStartedEvent1.EventId, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.Equal(updatedWorkflowMutation.NextEventID-1, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) + s.Equal(int32(1), updatedWorkflowMutation.ExecutionInfo.Attempt) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedFailWorkflowFailed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + identity := "testIdentity" + activity1ID := "activity1" + activity1Type := "activity_type1" + activity1Input := payloads.EncodeString("input1") + activity1Result := payloads.EncodeString("activity1_result") + activity2ID := "activity2" + activity2Type := "activity_type2" + activity2Input := payloads.EncodeString("input2") + activity2Result := payloads.EncodeString("activity2_result") + reason := "workflow fail reason" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) + wt1 := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) + activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity2ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity2ID, activity2Type, tl, activity2Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) + activity2StartedEvent := addActivityTaskStartedEvent(ms, activity2ScheduledEvent.EventId, identity) + addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, + activity1StartedEvent.EventId, activity1Result, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + addActivityTaskCompletedEvent(ms, activity2ScheduledEvent.EventId, + activity2StartedEvent.EventId, activity2Result, identity) + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: wt2.ScheduledEventID, + } + taskToken, _ := tt.Marshal() + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_FailWorkflowExecutionCommandAttributes{FailWorkflowExecutionCommandAttributes: &commandpb.FailWorkflowExecutionCommandAttributes{ + Failure: failure.NewServerFailure(reason, false), + }}, + }} + + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + + ms2 := common.CloneProto(ms1) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("UnhandledCommand", err.Error()) + + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(15), updatedWorkflowMutation.NextEventID) + s.Equal(workflowTaskStartedEvent1.EventId, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.Equal(updatedWorkflowMutation.NextEventID-1, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) + s.Equal(int32(1), updatedWorkflowMutation.ExecutionInfo.Attempt) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedBadCommandAttributes() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + identity := "testIdentity" + activity1ID := "activity1" + activity1Type := "activity_type1" + activity1Input := payloads.EncodeString("input1") + activity1Result := payloads.EncodeString("activity1_result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) + wt1 := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent1 := addWorkflowTaskStartedEvent(ms, wt1.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent1 := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt1.ScheduledEventID, workflowTaskStartedEvent1.EventId, identity) + activity1ScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent1.EventId, activity1ID, activity1Type, tl, activity1Input, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activity1StartedEvent := addActivityTaskStartedEvent(ms, activity1ScheduledEvent.EventId, identity) + addActivityTaskCompletedEvent(ms, activity1ScheduledEvent.EventId, + activity1StartedEvent.EventId, activity1Result, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: wt2.ScheduledEventID, + } + taskToken, _ := tt.Marshal() + + // commands with nil attributes + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + }} + + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("BadCompleteWorkflowExecutionAttributes: CompleteWorkflowExecutionCommandAttributes is not set on command.", err.Error()) +} + +// This test unit tests the activity schedule timeout validation logic of HistoryEngine's RespondWorkflowTaskComplete function. +// A ScheduleActivityTask command and the corresponding ActivityTaskScheduledEvent have 3 timeouts: ScheduleToClose, ScheduleToStart and StartToClose. +// This test verifies that when either ScheduleToClose or ScheduleToStart and StartToClose are specified, +// HistoryEngine's validateActivityScheduleAttribute will deduce the missing timeout and fill it in +// instead of returning a BadRequest error and only when all three are missing should a BadRequest be returned. +func (s *engineSuite) TestRespondWorkflowTaskCompletedSingleActivityScheduledAttribute() { + runTimeout := int32(100) + testIterationVariables := []struct { + scheduleToClose int32 + scheduleToStart int32 + startToClose int32 + heartbeat int32 + expectedScheduleToClose int32 + expectedScheduleToStart int32 + expectedStartToClose int32 + expectWorkflowTaskFail bool + }{ + // No ScheduleToClose timeout, will use runTimeout + {0, 3, 7, 0, + runTimeout, 3, 7, false}, + // Has ScheduleToClose timeout but not ScheduleToStart or StartToClose, + // will use ScheduleToClose for ScheduleToStart and StartToClose + {7, 0, 0, 0, + 7, 7, 7, false}, + // Only StartToClose timeout + {0, 0, 7, 0, + runTimeout, runTimeout, 7, false}, + // No ScheduleToClose timeout, ScheduleToStart or StartToClose, expect error return + {0, 0, 0, 0, + 0, 0, 0, true}, + // Negative ScheduleToClose, expect error return + {-1, 0, 0, 0, + 0, 0, 0, true}, + // Negative ScheduleToStart, expect error return + {0, -1, 0, 0, + 0, 0, 0, true}, + // Negative StartToClose, expect error return + {0, 0, -1, 0, + 0, 0, 0, true}, + // Negative HeartBeat, expect error return + {0, 0, 0, -1, + 0, 0, 0, true}, + // Use workflow timeout + {runTimeout, 0, 0, 0, + runTimeout, runTimeout, runTimeout, false}, + // Timeout larger than workflow timeout + {runTimeout + 1, 0, 0, 0, + runTimeout, runTimeout, runTimeout, false}, + {0, runTimeout + 1, 0, 0, + 0, 0, 0, true}, + {0, 0, runTimeout + 1, 0, + runTimeout, runTimeout, runTimeout, false}, + {0, 0, 0, runTimeout + 1, + 0, 0, 0, true}, + // No ScheduleToClose timeout, will use ScheduleToStart + StartToClose, but exceed limit + {0, runTimeout, 10, 0, + runTimeout, runTimeout, 10, false}, + } + + for _, iVar := range testIterationVariables { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), time.Duration(runTimeout*10)*time.Second, time.Duration(runTimeout)*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: timestamp.DurationPtr(time.Duration(iVar.scheduleToClose) * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(time.Duration(iVar.scheduleToStart) * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(time.Duration(iVar.startToClose) * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(time.Duration(iVar.heartbeat) * time.Second), + }}, + }} + + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + ms2 := workflow.TestCloneToProto(ms) + if iVar.expectWorkflowTaskFail { + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + } + + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + + if !iVar.expectWorkflowTaskFail { + s.NoError(err) + ms := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms.GetNextEventID()) + s.Equal(int64(3), ms.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms.GetExecutionState().State) + s.False(ms.HasPendingWorkflowTask()) + + activity1Attributes := s.getActivityScheduledEvent(ms, int64(5)).GetActivityTaskScheduledEventAttributes() + s.Equal(time.Duration(iVar.expectedScheduleToClose)*time.Second, timestamp.DurationValue(activity1Attributes.GetScheduleToCloseTimeout()), iVar) + s.Equal(time.Duration(iVar.expectedScheduleToStart)*time.Second, timestamp.DurationValue(activity1Attributes.GetScheduleToStartTimeout()), iVar) + s.Equal(time.Duration(iVar.expectedStartToClose)*time.Second, timestamp.DurationValue(activity1Attributes.GetStartToCloseTimeout()), iVar) + } else { + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.True(strings.HasPrefix(err.Error(), "BadScheduleActivityAttributes"), err.Error()) + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(5), updatedWorkflowMutation.NextEventID, iVar) + s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId, iVar) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State, iVar) + s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID, iVar) + } + s.TearDownTest() + s.SetupTest() + } +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedBadBinary() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ns := tests.LocalNamespaceEntry.Clone( + namespace.WithID(uuid.New()), + namespace.WithBadBinary("test-bad-binary"), + ) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(ns.ID()).Return(ns, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(ns.ID()).Return(ns, nil).AnyTimes() + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + ns, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + var commands []*commandpb.Command + + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: workflow.TestCloneToProto(ms)} + ms2 := workflow.TestCloneToProto(ms) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: ns.ID().String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + BinaryChecksum: "test-bad-binary", + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("BadBinary: binary test-bad-binary is marked as bad deployment", err.Error()) + + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(5), updatedWorkflowMutation.NextEventID) + s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedSingleActivityScheduledWorkflowTask() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(10 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(5 * time.Second), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + + activity1Attributes := s.getActivityScheduledEvent(ms2, int64(5)).GetActivityTaskScheduledEventAttributes() + s.Equal("activity1", activity1Attributes.ActivityId) + s.Equal("activity_type1", activity1Attributes.ActivityType.Name) + s.Equal(int64(4), activity1Attributes.WorkflowTaskCompletedEventId) + s.Equal(tl, activity1Attributes.TaskQueue.Name) + s.Equal(input, activity1Attributes.Input) + s.Equal(90*time.Second, timestamp.DurationValue(activity1Attributes.ScheduleToCloseTimeout)) // runTimeout + s.Equal(10*time.Second, timestamp.DurationValue(activity1Attributes.ScheduleToStartTimeout)) + s.Equal(50*time.Second, timestamp.DurationValue(activity1Attributes.StartToCloseTimeout)) + s.Equal(5*time.Second, timestamp.DurationValue(activity1Attributes.HeartbeatTimeout)) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_SignalTaskGeneration() { + resp := s.testRespondWorkflowTaskCompletedSignalGeneration(false) + s.NotNil(resp.GetStartedResponse()) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_SkipSignalTaskGeneration() { + resp := s.testRespondWorkflowTaskCompletedSignalGeneration(true) + s.Nil(resp.GetStartedResponse()) +} + +func (s *engineSuite) testRespondWorkflowTaskCompletedSignalGeneration(skipGenerateTask bool) *historyservice.RespondWorkflowTaskCompletedResponse { + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: tests.NamespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + signal := workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: &we, + Identity: identity, + SignalName: "test signal name", + Input: payloads.EncodeString("test input"), + SkipGenerateWorkflowTask: skipGenerateTask, + RequestId: uuid.New(), + } + signalRequest := &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &signal, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil).AnyTimes() + + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.NoError(err) + + var commands []*commandpb.Command + resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + ReturnNewWorkflowTask: true, + }, + }) + s.NoError(err) + s.NotNil(resp) + + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms.GetExecutionState().State) + + return resp +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_NotCancelled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) + scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) + startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) + heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) + commands := []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: scheduleToCloseTimeout, + ScheduleToStartTimeout: scheduleToStartTimeout, + StartToCloseTimeout: startToCloseTimeout, + HeartbeatTimeout: heartbeatTimeout, + RequestEagerExecution: false, + }}, + }, + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity2", + ActivityType: &commonpb.ActivityType{Name: "activity_type2"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: scheduleToCloseTimeout, + ScheduleToStartTimeout: scheduleToStartTimeout, + StartToCloseTimeout: startToCloseTimeout, + HeartbeatTimeout: heartbeatTimeout, + RequestEagerExecution: true, + }}, + }, + } + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(7), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + + ai1, ok := ms2.GetActivityByActivityID("activity1") + s.True(ok) + s.Equal(common.EmptyEventID, ai1.StartedEventId) + + ai2, ok := ms2.GetActivityByActivityID("activity2") + s.True(ok) + s.Equal(common.TransientEventID, ai2.StartedEventId) + s.NotZero(ai2.StartedTime) + + scheduledEvent := s.getActivityScheduledEvent(ms2, ai2.ScheduledEventId) + + s.Len(resp.ActivityTasks, 1) + activityTask := resp.ActivityTasks[0] + s.Equal("activity2", activityTask.ActivityId) + s.Equal("activity_type2", activityTask.ActivityType.GetName()) + s.Equal(input, activityTask.Input) + s.Equal(we, *activityTask.WorkflowExecution) + s.Equal(scheduledEvent.EventTime, activityTask.CurrentAttemptScheduledTime) + s.Equal(scheduledEvent.EventTime, activityTask.ScheduledTime) + s.Equal(*scheduleToCloseTimeout, *activityTask.ScheduleToCloseTimeout) + s.Equal(startToCloseTimeout, activityTask.StartToCloseTimeout) + s.Equal(heartbeatTimeout, activityTask.HeartbeatTimeout) + s.Equal(int32(1), activityTask.Attempt) + s.Nil(activityTask.HeartbeatDetails) + s.Equal(tests.LocalNamespaceEntry.Name().String(), activityTask.WorkflowNamespace) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_Cancelled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) + scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) + startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) + heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) + commands := []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: scheduleToCloseTimeout, + ScheduleToStartTimeout: scheduleToStartTimeout, + StartToCloseTimeout: startToCloseTimeout, + HeartbeatTimeout: heartbeatTimeout, + RequestEagerExecution: true, + }}, + }, + { + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: 5, + }}, + }, + } + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + ReturnNewWorkflowTask: true, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(10), ms2.GetNextEventID()) // activity scheduled, request cancel, cancelled, workflow task scheduled, started + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.True(ms2.HasPendingWorkflowTask()) + + _, ok := ms2.GetActivityByActivityID("activity1") + s.False(ok) + + s.Len(resp.ActivityTasks, 0) + s.NotNil(resp.StartedResponse) + s.Equal(int64(10), resp.StartedResponse.NextEventId) + s.Equal(int64(3), resp.StartedResponse.PreviousStartedEventId) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_ActivityEagerExecution_WorkflowClosed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + input := payloads.EncodeString("input") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 90*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + scheduleToCloseTimeout := timestamp.DurationPtr(90 * time.Second) + scheduleToStartTimeout := timestamp.DurationPtr(10 * time.Second) + startToCloseTimeout := timestamp.DurationPtr(50 * time.Second) + heartbeatTimeout := timestamp.DurationPtr(5 * time.Second) + commands := []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "activity1", + ActivityType: &commonpb.ActivityType{Name: "activity_type1"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: tl}, + Input: input, + ScheduleToCloseTimeout: scheduleToCloseTimeout, + ScheduleToStartTimeout: scheduleToStartTimeout, + StartToCloseTimeout: startToCloseTimeout, + HeartbeatTimeout: heartbeatTimeout, + RequestEagerExecution: true, + }}, + }, + { + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ + Result: payloads.EncodeString("complete"), + }}, + }, + } + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + resp, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + ReturnNewWorkflowTask: true, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(7), ms2.GetNextEventID()) // activity scheduled, workflow completed + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + + activityInfo, ok := ms2.GetActivityByActivityID("activity1") + s.True(ok) + s.Equal(int64(5), activityInfo.ScheduledEventId) // activity scheduled + s.Equal(common.EmptyEventID, activityInfo.StartedEventId) // activity not started + + s.Len(resp.ActivityTasks, 0) + s.Nil(resp.StartedResponse) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatTimeout() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = timestamp.TimePtr(time.Now().UTC().Add(-time.Hour)) + + var commands []*commandpb.Command + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + ForceCreateNewWorkflowTask: true, + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err, "workflow task heartbeat timeout") +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = timestamp.TimePtr(time.Now().UTC().Add(-time.Minute)) + + var commands []*commandpb.Command + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + ForceCreateNewWorkflowTask: true, + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout_ZeroOrignalScheduledTime() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + ms.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = nil + + var commands []*commandpb.Command + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + ForceCreateNewWorkflowTask: true, + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedCompleteWorkflowSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + workflowResult := payloads.EncodeString("success") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ + Result: workflowResult, + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedFailWorkflowSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + reason := "fail workflow reason" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_FailWorkflowExecutionCommandAttributes{FailWorkflowExecutionCommandAttributes: &commandpb.FailWorkflowExecutionCommandAttributes{ + Failure: failure.NewServerFailure(reason, false), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedSignalExternalWorkflowSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_SignalExternalWorkflowExecutionCommandAttributes{SignalExternalWorkflowExecutionCommandAttributes: &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ + Namespace: tests.Namespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: we.WorkflowId, + RunId: we.RunId, + }, + SignalName: "signal", + Input: payloads.EncodeString("test input"), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedStartChildWorkflowWithAbandonPolicy() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + abandon := enumspb.PARENT_CLOSE_POLICY_ABANDON + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: tests.Namespace.String(), + WorkflowId: "child-workflow-id", + WorkflowType: &commonpb.WorkflowType{ + Name: "child-workflow-type", + }, + ParentClosePolicy: abandon, + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). + GetMapper(tests.Namespace). + Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(1, len(ms2.GetPendingChildExecutionInfos())) + var childID int64 + for c := range ms2.GetPendingChildExecutionInfos() { + childID = c + break + } + s.Equal("child-workflow-id", ms2.GetPendingChildExecutionInfos()[childID].StartedWorkflowId) + s.Equal(enumspb.PARENT_CLOSE_POLICY_ABANDON, ms2.GetPendingChildExecutionInfos()[childID].ParentClosePolicy) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedStartChildWorkflowWithTerminatePolicy() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + terminate := enumspb.PARENT_CLOSE_POLICY_TERMINATE + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: tests.Namespace.String(), + WorkflowId: "child-workflow-id", + WorkflowType: &commonpb.WorkflowType{ + Name: "child-workflow-type", + }, + ParentClosePolicy: terminate, + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockShard.Resource.SearchAttributesMapperProvider.EXPECT(). + GetMapper(tests.Namespace). + Return(&searchattribute.TestMapper{Namespace: tests.Namespace.String()}, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(6), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(1, len(ms2.GetPendingChildExecutionInfos())) + var childID int64 + for c := range ms2.GetPendingChildExecutionInfos() { + childID = c + break + } + s.Equal("child-workflow-id", ms2.GetPendingChildExecutionInfos()[childID].StartedWorkflowId) + s.Equal(enumspb.PARENT_CLOSE_POLICY_TERMINATE, ms2.GetPendingChildExecutionInfos()[childID].ParentClosePolicy) +} + +func (s *engineSuite) TestRespondWorkflowTaskCompletedSignalExternalWorkflowFailed_UnKnownNamespace() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + foreignNamespace := namespace.Name("unknown namespace") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_SignalExternalWorkflowExecutionCommandAttributes{SignalExternalWorkflowExecutionCommandAttributes: &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ + Namespace: foreignNamespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: we.WorkflowId, + RunId: we.RunId, + }, + SignalName: "signal", + Input: payloads.EncodeString("test input"), + }}, + }} + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockNamespaceCache.EXPECT().GetNamespace(foreignNamespace).Return( + nil, errors.New("get foreign namespace error"), + ) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + + s.NotNil(err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedInvalidToken() { + + invalidToken, _ := json.Marshal("bad token") + identity := "testIdentity" + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: invalidToken, + Result: nil, + Identity: identity, + }, + }) + + s.NotNil(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfNoExecution() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfNoRunID() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfGetExecutionFailed() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, errors.New("FAILED")) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfNoAIdProvided() { + namespaceID := tests.NamespaceID + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + } + taskToken, _ := tt.Marshal() + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "activityID cannot be empty") +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfNotFound() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + ActivityId: "aid", + } + taskToken, _ := tt.Marshal() + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.Error(err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedUpdateExecutionFailed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) + s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfTaskCompleted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activityStartedEvent := addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + addActivityTaskCompletedEvent(ms, activityScheduledEvent.EventId, activityStartedEvent.EventId, + activityResult, identity) + addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedIfTaskNotStarted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedConflictOnUpdate() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.Equal(&persistence.ConditionFailedError{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(9), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt = ms2.GetWorkflowTaskByID(int64(8)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(8), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRespondActivityTaskCompletedByIdSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + activityResult := payloads.EncodeString("activity result") + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + ScheduledEventId: common.EmptyEventID, + ActivityId: activityID, + } + taskToken, _ := tt.Marshal() + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &historyservice.RespondActivityTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + TaskToken: taskToken, + Result: activityResult, + Identity: identity, + }, + }) + s.NoError(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(9), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt := ms2.GetWorkflowTaskByID(int64(8)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(8), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRespondActivityTaskFailedInvalidToken() { + + invalidToken, _ := json.Marshal("bad token") + identity := "testIdentity" + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: invalidToken, + Identity: identity, + }, + }) + + s.NotNil(err) + s.IsType(&serviceerror.InvalidArgument{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedIfNoExecution() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, + serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedIfNoRunID() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, + serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedIfGetExecutionFailed() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, + errors.New("FAILED")) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondActivityTaskFailededIfNoAIdProvided() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + } + taskToken, _ := tt.Marshal() + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "activityID cannot be empty") +} + +func (s *engineSuite) TestRespondActivityTaskFailededIfNotFound() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + ActivityId: "aid", + } + taskToken, _ := tt.Marshal() + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.Error(err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedUpdateExecutionFailed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, errors.New("FAILED")) + s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // might be called in background goroutine + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "FAILED") +} + +func (s *engineSuite) TestRespondActivityTaskFailedIfTaskCompleted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + failure := failure.NewServerFailure("fail reason", true) + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + activityStartedEvent := addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + addActivityTaskFailedEvent(ms, activityScheduledEvent.EventId, activityStartedEvent.EventId, failure, enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE, identity) + addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Failure: failure, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedIfTaskNotStarted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedConflictOnUpdate() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, &persistence.ConditionFailedError{}) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.Equal(&persistence.ConditionFailedError{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskFailedSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + failure := failure.NewServerFailure("failed", false) + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Failure: failure, + Identity: identity, + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(9), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt = ms2.GetWorkflowTaskByID(int64(8)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(8), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRespondActivityTaskFailedWithHeartbeatSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + failure := failure.NewServerFailure("failed", false) + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, activityInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + wfMs.ActivityInfos[activityInfo.ScheduledEventId] = activityInfo + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + details := payloads.EncodeString("details") + + s.Nil(activityInfo.GetLastHeartbeatDetails()) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Failure: failure, + Identity: identity, + LastHeartbeatDetails: details, + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(9), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt = ms2.GetWorkflowTaskByID(int64(8)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(8), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) + + s.NotNil(activityInfo.GetLastHeartbeatDetails()) +} + +func (s *engineSuite) TestRespondActivityTaskFailedByIdSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + failure := failure.NewServerFailure("failed", false) + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + ScheduledEventId: common.EmptyEventID, + ActivityId: activityID, + } + taskToken, _ := tt.Marshal() + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 5*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &historyservice.RespondActivityTaskFailedRequest{ + NamespaceId: tests.NamespaceID.String(), + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + TaskToken: taskToken, + Failure: failure, + Identity: identity, + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(9), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt := ms2.GetWorkflowTaskByID(int64(8)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(8), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_NoTimer() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + // No HeartBeat timer running. + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + detais := payloads.EncodeString("details") + + _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: taskToken, + Identity: identity, + Details: detais, + }, + }) + s.Nil(err) +} + +func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_TimerRunning() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + // HeartBeat timer running. + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + detais := payloads.EncodeString("details") + + _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: taskToken, + Identity: identity, + Details: detais, + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(7), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRecordActivityTaskHeartBeatByIDSuccess() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: common.EmptyEventID, + ActivityId: activityID, + } + taskToken, _ := tt.Marshal() + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + + // No HeartBeat timer running. + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + detais := payloads.EncodeString("details") + + _, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: taskToken, + Identity: identity, + Details: detais, + }, + }) + s.Nil(err) +} + +func (s *engineSuite) TestRespondActivityTaskCanceled_Scheduled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCanceled_Started() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 5, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + _, _, err := ms.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEvent.EventId, activityScheduledEvent.EventId, identity) + s.Nil(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(10), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt = ms2.GetWorkflowTaskByID(int64(9)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(9), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRespondActivityTaskCanceledById_Started() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + ScheduledEventId: common.EmptyEventID, + ActivityId: activityID, + } + taskToken, _ := tt.Marshal() + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + workflowTaskScheduledEvent := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, workflowTaskScheduledEvent.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, workflowTaskScheduledEvent.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + _, _, err := ms.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEvent.EventId, activityScheduledEvent.EventId, identity) + s.Nil(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: we.RunId} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(10), ms2.GetNextEventID()) + s.Equal(int64(3), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + s.True(ms2.HasPendingWorkflowTask()) + wt := ms2.GetWorkflowTaskByID(int64(9)) + s.NotNil(wt) + s.EqualValues(int64(100), wt.WorkflowTaskTimeout.Seconds()) + s.Equal(int64(9), wt.ScheduledEventID) + s.Equal(common.EmptyEventID, wt.StartedEventID) +} + +func (s *engineSuite) TestRespondActivityTaskCanceledIfNoRunID() { + namespaceID := tests.NamespaceID + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + + _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.NotNil(err) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *engineSuite) TestRespondActivityTaskCanceledIfNoAIdProvided() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "test-respond-activity-task-canceled-if-no-activity-id-provided", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + // Add dummy event + addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.EqualError(err, "activityID cannot be empty") +} + +func (s *engineSuite) TestRespondActivityTaskCanceledIfNotFound() { + namespaceID := tests.NamespaceID + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "test-respond-activity-task-canceled-if-not-found", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + ScheduledEventId: common.EmptyEventID, + ActivityId: "aid", + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + // Add dummy event + addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err := s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: taskToken, + Identity: identity, + }, + }) + s.Error(err) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_NotScheduled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityScheduledEventID := int64(99) + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: activityScheduledEventID, + }}, + }} + + ms1 := workflow.TestCloneToProto(ms) + gwmsResponse1 := &persistence.GetWorkflowExecutionResponse{State: ms1} + ms2 := workflow.TestCloneToProto(ms) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse1, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("BadRequestCancelActivityAttributes: invalid history builder state for action: add-activitytask-cancel-requested-event, ScheduledEventID: 99", err.Error()) + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(5), updatedWorkflowMutation.NextEventID) + s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Scheduled() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 6, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + _, aInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: aInfo.ScheduledEventId, + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(12), ms2.GetNextEventID()) + s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.True(ms2.HasPendingWorkflowTask()) + wt2 = ms2.GetWorkflowTaskByID(ms2.GetNextEventID() - 1) + s.NotNil(wt2) + s.Equal(ms2.GetNextEventID()-1, wt2.ScheduledEventID) + s.Equal(int32(1), wt2.Attempt) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Started() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 7, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: activityScheduledEvent.GetEventId(), + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(11), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Completed() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 6, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + workflowResult := payloads.EncodeString("workflow result") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + _, aInfo := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + commands := []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: aInfo.ScheduledEventId, + }}, + }, + { + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{ + Result: workflowResult, + }}, + }, + } + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(11), ms2.GetNextEventID()) + s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_NoHeartBeat() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 7, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 0*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: activityScheduledEvent.GetEventId(), + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(11), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + + // Try recording activity heartbeat + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + att := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 5, + } + activityTaskToken, _ := att.Marshal() + + hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + s.NotNil(hbResponse) + s.True(hbResponse.CancelRequested) + + // Try cancelling the request. + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + + ms2 = s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(13), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.True(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_Success() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 7, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: activityScheduledEvent.GetEventId(), + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(11), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + + // Try recording activity heartbeat + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + att := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 5, + } + activityTaskToken, _ := att.Marshal() + + hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + s.NotNil(hbResponse) + s.True(hbResponse.CancelRequested) + + // Try cancelling the request. + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + + ms2 = s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(13), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.True(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestRequestCancel_RespondWorkflowTaskCompleted_SuccessWithQueries() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 7, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + activityID := "activity1_id" + activityType := "activity_type1" + activityInput := payloads.EncodeString("input1") + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + activityScheduledEvent, _ := addActivityTaskScheduledEvent(ms, workflowTaskCompletedEvent.EventId, activityID, activityType, tl, activityInput, 100*time.Second, 10*time.Second, 1*time.Second, 1*time.Second) + addActivityTaskStartedEvent(ms, activityScheduledEvent.EventId, identity) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK, + Attributes: &commandpb.Command_RequestCancelActivityTaskCommandAttributes{RequestCancelActivityTaskCommandAttributes: &commandpb.RequestCancelActivityTaskCommandAttributes{ + ScheduledEventId: activityScheduledEvent.GetEventId(), + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + // load mutable state such that it already exists in memory when respond workflow task is called + // this enables us to set query registry on it + ctx, release, err := s.workflowCache.GetOrCreateWorkflowExecution( + context.Background(), + tests.NamespaceID, + we, + workflow.LockPriorityHigh, + ) + s.NoError(err) + loadedMS, err := ctx.LoadMutableState(context.Background()) + s.NoError(err) + qr := workflow.NewQueryRegistry() + id1, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + id2, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + id3, _ := qr.BufferQuery(&querypb.WorkflowQuery{}) + loadedMS.(*workflow.MutableStateImpl).QueryRegistry = qr + release(nil) + result1 := &querypb.WorkflowQueryResult{ + ResultType: enumspb.QUERY_RESULT_TYPE_ANSWERED, + Answer: payloads.EncodeBytes([]byte{1, 2, 3}), + } + result2 := &querypb.WorkflowQueryResult{ + ResultType: enumspb.QUERY_RESULT_TYPE_FAILED, + ErrorMessage: "error reason", + } + queryResults := map[string]*querypb.WorkflowQueryResult{ + id1: result1, + id2: result2, + } + _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + QueryResults: queryResults, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(11), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + s.Len(qr.GetCompletedIDs(), 2) + succeeded1, err := qr.GetCompletionState(id1) + s.NoError(err) + s.EqualValues(succeeded1.Result, result1) + s.Equal(workflow.QueryCompletionTypeSucceeded, succeeded1.Type) + succeeded2, err := qr.GetCompletionState(id2) + s.NoError(err) + s.EqualValues(succeeded2.Result, result2) + s.Equal(workflow.QueryCompletionTypeSucceeded, succeeded2.Type) + s.Len(qr.GetBufferedIDs(), 0) + s.Len(qr.GetFailedIDs(), 0) + s.Len(qr.GetUnblockedIDs(), 1) + unblocked1, err := qr.GetCompletionState(id3) + s.NoError(err) + s.Nil(unblocked1.Result) + s.Equal(workflow.QueryCompletionTypeUnblocked, unblocked1.Type) + + // Try recording activity heartbeat + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + att := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: tests.WorkflowID, + RunId: we.GetRunId(), + ScheduledEventId: 5, + } + activityTaskToken, _ := att.Marshal() + + hbResponse, err := s.mockHistoryEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ + NamespaceId: tests.NamespaceID.String(), + HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + s.NotNil(hbResponse) + s.True(hbResponse.CancelRequested) + + // Try cancelling the request. + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &historyservice.RespondActivityTaskCanceledRequest{ + NamespaceId: tests.NamespaceID.String(), + CancelRequest: &workflowservice.RespondActivityTaskCanceledRequest{ + TaskToken: activityTaskToken, + Identity: identity, + Details: payloads.EncodeString("details"), + }, + }) + s.Nil(err) + + ms2 = s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(13), ms2.GetNextEventID()) + s.Equal(int64(8), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.True(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestStarTimer_DuplicateTimerID() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + timerID := "t1" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_START_TIMER, + Attributes: &commandpb.Command_StartTimerCommandAttributes{StartTimerCommandAttributes: &commandpb.StartTimerCommandAttributes{ + TimerId: timerID, + StartToFireTimeout: timestamp.DurationPtr(1 * time.Second), + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + + // Try to add the same timer ID again. + wt2 := addWorkflowTaskScheduledEvent(ms2) + addWorkflowTaskStartedEvent(ms2, wt2.ScheduledEventID, tl, identity) + tt2 := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: wt2.ScheduledEventID, + } + taskToken2, _ := tt2.Marshal() + + wfMs2 := workflow.TestCloneToProto(ms2) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: wfMs2} + + workflowTaskFailedEvent := false + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + for _, newEvents := range request.UpdateWorkflowEvents { + decTaskIndex := len(newEvents.Events) - 1 + if decTaskIndex >= 0 && newEvents.Events[decTaskIndex].EventType == enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED { + workflowTaskFailedEvent = true + } + } + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken2, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("StartTimerDuplicateId: invalid history builder state for action: add-timer-started-event, TimerID: t1", err.Error()) + + s.True(workflowTaskFailedEvent) + + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(9), updatedWorkflowMutation.NextEventID) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.Equal(updatedWorkflowMutation.NextEventID, updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId) + s.Equal(int32(2), updatedWorkflowMutation.ExecutionInfo.WorkflowTaskAttempt) +} + +func (s *engineSuite) TestUserTimer_RespondWorkflowTaskCompleted() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 6, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + timerID := "t1" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + // Verify cancel timer with a start event. + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + addTimerStartedEvent(ms, workflowTaskCompletedEvent.EventId, timerID, 10*time.Second) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, + Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ + TimerId: timerID, + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(10), ms2.GetNextEventID()) + s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) +} + +func (s *engineSuite) TestCancelTimer_RespondWorkflowTaskCompleted_NoStartTimer() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 2, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + timerID := "t1" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + // Verify cancel timer with a start event. + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + ms2 := workflow.TestCloneToProto(ms) + gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, + Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ + TimerId: timerID, + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse2, nil) + var updatedWorkflowMutation persistence.WorkflowMutation + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + updatedWorkflowMutation = request.UpdateWorkflowMutation + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err := s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Error(err) + s.IsType(&serviceerror.InvalidArgument{}, err) + s.Equal("BadCancelTimerAttributes: invalid history builder state for action: add-timer-canceled-event, TimerID: t1", err.Error()) + + s.NotNil(updatedWorkflowMutation) + s.Equal(int64(5), updatedWorkflowMutation.NextEventID) + s.Equal(common.EmptyEventID, updatedWorkflowMutation.ExecutionInfo.LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, updatedWorkflowMutation.ExecutionState.State) + s.True(updatedWorkflowMutation.ExecutionInfo.WorkflowTaskScheduledEventId != common.EmptyEventID) +} + +func (s *engineSuite) TestCancelTimer_RespondWorkflowTaskCompleted_TimerFired() { + namespaceID := tests.NamespaceID + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + tl := "testTaskQueue" + tt := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: we.WorkflowId, + RunId: we.RunId, + ScheduledEventId: 6, + } + taskToken, _ := tt.Marshal() + identity := "testIdentity" + timerID := "t1" + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + // Verify cancel timer with a start event. + addWorkflowExecutionStartedEvent(ms, we, "wType", tl, payloads.EncodeString("input"), 100*time.Second, 100*time.Second, 100*time.Second, identity) + wt := addWorkflowTaskScheduledEvent(ms) + workflowTaskStartedEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, tl, identity) + workflowTaskCompletedEvent := addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, workflowTaskStartedEvent.EventId, identity) + addTimerStartedEvent(ms, workflowTaskCompletedEvent.EventId, timerID, 10*time.Second) + wt2 := addWorkflowTaskScheduledEvent(ms) + addWorkflowTaskStartedEvent(ms, wt2.ScheduledEventID, tl, identity) + addTimerFiredEvent(ms, timerID) + _, _, err := ms.CloseTransactionAsMutation(workflow.TransactionPolicyActive) + s.Nil(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.True(len(gwmsResponse.State.BufferedEvents) > 0) + + commands := []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_CANCEL_TIMER, + Attributes: &commandpb.Command_CancelTimerCommandAttributes{CancelTimerCommandAttributes: &commandpb.CancelTimerCommandAttributes{ + TimerId: timerID, + }}, + }} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.True(request.UpdateWorkflowMutation.ClearBufferedEvents) + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err = s.mockHistoryEngine.RespondWorkflowTaskCompleted(context.Background(), &historyservice.RespondWorkflowTaskCompletedRequest{ + NamespaceId: tests.NamespaceID.String(), + CompleteRequest: &workflowservice.RespondWorkflowTaskCompletedRequest{ + TaskToken: taskToken, + Commands: commands, + Identity: identity, + }, + }) + s.Nil(err) + + ms2 := s.getMutableState(tests.NamespaceID, we) + s.Equal(int64(10), ms2.GetNextEventID()) + s.Equal(int64(7), ms2.GetExecutionInfo().LastWorkflowTaskStartedEventId) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, ms2.GetExecutionState().State) + s.False(ms2.HasPendingWorkflowTask()) + s.False(ms2.HasBufferedEvents()) +} + +func (s *engineSuite) TestSignalWorkflowExecution() { + signalRequest := &historyservice.SignalWorkflowExecutionRequest{} + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "Missing namespace UUID.") + + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + signalRequest = &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: &we, + Identity: identity, + SignalName: signalName, + Input: input, + }, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.Nil(err) +} + +// Test signal workflow task by adding request ID +func (s *engineSuite) TestSignalWorkflowExecution_DuplicateRequest() { + signalRequest := &historyservice.SignalWorkflowExecutionRequest{} + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "Missing namespace UUID.") + + we := commonpb.WorkflowExecution{ + WorkflowId: "wId2", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name 2" + input := payloads.EncodeString("test input 2") + requestID := uuid.New() + signalRequest = &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: &we, + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + // assume duplicate request id + wfMs.SignalRequestedIds = []string{requestID} + wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.Nil(err) +} + +// Test signal workflow task by dedup request ID & workflow finished +func (s *engineSuite) TestSignalWorkflowExecution_DuplicateRequest_Completed() { + signalRequest := &historyservice.SignalWorkflowExecutionRequest{} + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "Missing namespace UUID.") + + we := commonpb.WorkflowExecution{ + WorkflowId: "wId2", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name 2" + input := payloads.EncodeString("test input 2") + requestID := uuid.New() + signalRequest = &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: &we, + Identity: identity, + SignalName: signalName, + Input: input, + RequestId: requestID, + }, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + // assume duplicate request id + wfMs.SignalRequestedIds = []string{requestID} + wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.Nil(err) +} + +func (s *engineSuite) TestSignalWorkflowExecution_Failed() { + signalRequest := &historyservice.SignalWorkflowExecutionRequest{} + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "Missing namespace UUID.") + + we := &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + input := payloads.EncodeString("test input") + signalRequest = &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: we, + Identity: identity, + SignalName: signalName, + Input: input, + }, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + addWorkflowExecutionStartedEvent(ms, *we, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "workflow execution already completed") +} + +func (s *engineSuite) TestSignalWorkflowExecution_WorkflowTaskBackoff() { + signalRequest := &historyservice.SignalWorkflowExecutionRequest{} + _, err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.EqualError(err, "Missing namespace UUID.") + + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + signalName := "my signal name" + signalInput := payloads.EncodeString("test input") + signalRequest = &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: tests.NamespaceID.String(), + WorkflowExecution: &we, + Identity: identity, + SignalName: signalName, + Input: signalInput, + }, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), we.GetRunId()) + startRequest := &workflowservice.StartWorkflowExecutionRequest{ + WorkflowId: we.WorkflowId, + WorkflowType: &commonpb.WorkflowType{Name: "wType"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskqueue}, + Input: payloads.EncodeString("input"), + WorkflowExecutionTimeout: timestamp.DurationPtr(100 * time.Second), + WorkflowRunTimeout: timestamp.DurationPtr(50 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), + Identity: identity, + } + + _, err = ms.AddWorkflowExecutionStartedEvent( + we, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: tests.NamespaceID.String(), + StartRequest: startRequest, + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY, + FirstWorkflowTaskBackoff: timestamp.DurationPtr(time.Second * 10), + }, + ) + s.NoError(err) + + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.Len(request.UpdateWorkflowEvents[0].Events, 1) // no workflow task scheduled event + // s.Empty(request.UpdateWorkflowMutation.Tasks[tasks.CategoryTransfer]) // no workflow transfer task + return tests.UpdateWorkflowExecutionResponse, nil + }) + + _, err = s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) + s.Nil(err) +} + +func (s *engineSuite) TestRemoveSignalMutableState() { + removeRequest := &historyservice.RemoveSignalMutableStateRequest{} + _, err := s.mockHistoryEngine.RemoveSignalMutableState(context.Background(), removeRequest) + s.EqualError(err, "Missing namespace UUID.") + + execution := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + requestID := uuid.New() + removeRequest = &historyservice.RemoveSignalMutableStateRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &execution, + RequestId: requestID, + } + + ms := workflow.TestLocalMutableState(s.mockHistoryEngine.shard, s.eventsCache, + tests.LocalNamespaceEntry, log.NewTestLogger(), tests.RunID) + addWorkflowExecutionStartedEvent(ms, execution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + addWorkflowTaskScheduledEvent(ms) + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionInfo.NamespaceId = tests.NamespaceID.String() + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, err = s.mockHistoryEngine.RemoveSignalMutableState(context.Background(), removeRequest) + s.Nil(err) +} + +func (s *engineSuite) TestReapplyEvents_ReturnSuccess() { + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "test-reapply", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + history := []*historypb.HistoryEvent{ + { + EventId: 1, + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, + Version: 1, + }, + } + ms := workflow.TestLocalMutableState( + s.mockHistoryEngine.shard, + s.eventsCache, + tests.LocalNamespaceEntry, + log.NewTestLogger(), + workflowExecution.GetRunId(), + ) + // Add dummy event + addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + + err := s.mockHistoryEngine.ReapplyEvents( + context.Background(), + tests.NamespaceID, + workflowExecution.GetWorkflowId(), + workflowExecution.GetRunId(), + history, + ) + s.NoError(err) +} + +func (s *engineSuite) TestReapplyEvents_IgnoreSameVersionEvents() { + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "test-reapply-same-version", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + + // TODO: Figure out why version is empty? + history := []*historypb.HistoryEvent{ + { + EventId: 1, + EventType: enumspb.EVENT_TYPE_TIMER_STARTED, + Version: common.EmptyVersion, + }, + } + ms := workflow.TestLocalMutableState( + s.mockHistoryEngine.shard, + s.eventsCache, + tests.LocalNamespaceEntry, + log.NewTestLogger(), + workflowExecution.GetRunId(), + ) + // Add dummy event + addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + err := s.mockHistoryEngine.ReapplyEvents( + context.Background(), + tests.NamespaceID, + workflowExecution.GetWorkflowId(), + workflowExecution.GetRunId(), + history, + ) + s.NoError(err) +} + +func (s *engineSuite) TestReapplyEvents_ResetWorkflow() { + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: "test-reapply-reset-workflow", + RunId: tests.RunID, + } + taskqueue := "testTaskQueue" + identity := "testIdentity" + history := []*historypb.HistoryEvent{ + { + EventId: 1, + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, + Version: 100, + }, + } + ms := workflow.TestLocalMutableState( + s.mockHistoryEngine.shard, + s.eventsCache, + tests.LocalNamespaceEntry, + log.NewTestLogger(), + workflowExecution.GetRunId(), + ) + // Add dummy event + addWorkflowExecutionStartedEvent(ms, workflowExecution, "wType", taskqueue, payloads.EncodeString("input"), 100*time.Second, 50*time.Second, 200*time.Second, identity) + + wfMs := workflow.TestCloneToProto(ms) + wfMs.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + wfMs.ExecutionInfo.LastWorkflowTaskStartedEventId = 1 + token, err := ms.GetCurrentBranchToken() + s.NoError(err) + item := versionhistory.NewVersionHistoryItem(1, 1) + versionHistory := versionhistory.NewVersionHistory(token, []*historyspb.VersionHistoryItem{item}) + wfMs.ExecutionInfo.VersionHistories = versionhistory.NewVersionHistories(versionHistory) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: tests.RunID} + s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil).AnyTimes() + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + s.mockEventsReapplier.EXPECT().ReapplyEvents(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + s.mockWorkflowResetter.EXPECT().ResetWorkflow( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + err = s.mockHistoryEngine.ReapplyEvents( + context.Background(), + tests.NamespaceID, + workflowExecution.GetWorkflowId(), + workflowExecution.GetRunId(), + history, + ) + s.NoError(err) +} + +func (s *engineSuite) TestEagerWorkflowStart_DoesNotCreateTransferTask() { + var recordedTasks []tasks.Task + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { + recordedTasks = request.NewWorkflowSnapshot.Tasks[tasks.CategoryTransfer] + persistenceResponse := persistence.CreateWorkflowExecutionResponse{NewMutableStateStats: tests.CreateWorkflowExecutionResponse.NewMutableStateStats} + return &persistenceResponse, nil + }) + + i := interceptor.NewTelemetryInterceptor(s.mockShard.GetNamespaceRegistry(), s.mockShard.GetMetricsHandler(), s.mockShard.Resource.Logger) + response, err := i.UnaryIntercept(context.Background(), nil, &grpc.UnaryServerInfo{FullMethod: "StartWorkflowExecution"}, func(ctx context.Context, req interface{}) (interface{}, error) { + response, err := s.mockHistoryEngine.StartWorkflowExecution(ctx, &historyservice.StartWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + Attempt: 1, + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowId: "test", + Namespace: tests.Namespace.String(), + WorkflowType: &commonpb.WorkflowType{Name: "test"}, + TaskQueue: &taskqueuepb.TaskQueue{Kind: enumspb.TASK_QUEUE_KIND_NORMAL, Name: "test"}, + Identity: "test", + RequestId: "test", + RequestEagerExecution: true, + }, + }) + return response, err + }) + s.NoError(err) + s.Equal(len(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events), 3) + s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[0].EventType, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED) + s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[1].EventType, enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED) + s.Equal(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask.History.Events[2].EventType, enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED) + s.Equal(len(recordedTasks), 0) +} + +func (s *engineSuite) TestEagerWorkflowStart_FromCron_SkipsEager() { + var recordedTasks []tasks.Task + + s.mockExecutionMgr.EXPECT().CreateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, request *persistence.CreateWorkflowExecutionRequest) (*persistence.CreateWorkflowExecutionResponse, error) { + recordedTasks = request.NewWorkflowSnapshot.Tasks[tasks.CategoryTransfer] + persistenceResponse := persistence.CreateWorkflowExecutionResponse{NewMutableStateStats: tests.CreateWorkflowExecutionResponse.NewMutableStateStats} + return &persistenceResponse, nil + }) + + i := interceptor.NewTelemetryInterceptor(s.mockShard.GetNamespaceRegistry(), s.mockShard.GetMetricsHandler(), s.mockShard.Resource.Logger) + response, err := i.UnaryIntercept(context.Background(), nil, &grpc.UnaryServerInfo{FullMethod: "StartWorkflowExecution"}, func(ctx context.Context, req interface{}) (interface{}, error) { + firstWorkflowTaskBackoff := time.Second + response, err := s.mockHistoryEngine.StartWorkflowExecution(ctx, &historyservice.StartWorkflowExecutionRequest{ + NamespaceId: tests.NamespaceID.String(), + Attempt: 1, + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE, + FirstWorkflowTaskBackoff: &firstWorkflowTaskBackoff, + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowId: "test", + Namespace: tests.Namespace.String(), + WorkflowType: &commonpb.WorkflowType{Name: "test"}, + TaskQueue: &taskqueuepb.TaskQueue{Kind: enumspb.TASK_QUEUE_KIND_NORMAL, Name: "test"}, + Identity: "test", + RequestId: "test", + CronSchedule: "* * * * *", + RequestEagerExecution: true, + }, + }) + return response, err + }) + s.NoError(err) + s.Nil(response.(*historyservice.StartWorkflowExecutionResponse).EagerWorkflowTask) + s.Equal(len(recordedTasks), 0) +} + +func (s *engineSuite) getMutableState(testNamespaceID namespace.ID, we commonpb.WorkflowExecution) workflow.MutableState { + context, release, err := s.workflowCache.GetOrCreateWorkflowExecution( + context.Background(), + tests.NamespaceID, + we, + workflow.LockPriorityHigh, + ) + if err != nil { + return nil + } + defer release(nil) + + return context.(*workflow.ContextImpl).MutableState +} + +func (s *engineSuite) getActivityScheduledEvent( + ms workflow.MutableState, + scheduledEventID int64, +) *historypb.HistoryEvent { + event, _ := ms.GetActivityScheduledEvent(context.Background(), scheduledEventID) + return event +} + +func addWorkflowExecutionStartedEventWithParent(ms workflow.MutableState, workflowExecution commonpb.WorkflowExecution, + workflowType, taskQueue string, input *commonpb.Payloads, executionTimeout, runTimeout, taskTimeout time.Duration, + parentInfo *workflowspb.ParentExecutionInfo, identity string) *historypb.HistoryEvent { + + startRequest := &workflowservice.StartWorkflowExecutionRequest{ + WorkflowId: workflowExecution.WorkflowId, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + WorkflowExecutionTimeout: &executionTimeout, + WorkflowRunTimeout: &runTimeout, + WorkflowTaskTimeout: &taskTimeout, + Identity: identity, + } + + event, _ := ms.AddWorkflowExecutionStartedEvent( + workflowExecution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: tests.NamespaceID.String(), + StartRequest: startRequest, + ParentExecutionInfo: parentInfo, + }, + ) + + return event +} + +func addWorkflowExecutionStartedEvent(ms workflow.MutableState, workflowExecution commonpb.WorkflowExecution, + workflowType, taskQueue string, input *commonpb.Payloads, executionTimeout, runTimeout, taskTimeout time.Duration, + identity string) *historypb.HistoryEvent { + return addWorkflowExecutionStartedEventWithParent(ms, workflowExecution, workflowType, taskQueue, input, + executionTimeout, runTimeout, taskTimeout, nil, identity) +} + +func addWorkflowTaskScheduledEvent(ms workflow.MutableState) *workflow.WorkflowTaskInfo { + workflowTask, _ := ms.AddWorkflowTaskScheduledEvent(false, enumsspb.WORKFLOW_TASK_TYPE_NORMAL) + return workflowTask +} + +func addWorkflowTaskStartedEvent(ms workflow.MutableState, scheduledEventID int64, taskQueue, + identity string) *historypb.HistoryEvent { + return addWorkflowTaskStartedEventWithRequestID(ms, scheduledEventID, tests.RunID, taskQueue, identity) +} + +func addWorkflowTaskStartedEventWithRequestID(ms workflow.MutableState, scheduledEventID int64, requestID string, + taskQueue, identity string) *historypb.HistoryEvent { + event, _, _ := ms.AddWorkflowTaskStartedEvent( + scheduledEventID, + requestID, + &taskqueuepb.TaskQueue{Name: taskQueue}, + identity, + ) + + return event +} + +func addWorkflowTaskCompletedEvent(s *suite.Suite, ms workflow.MutableState, scheduledEventID, startedEventID int64, identity string) *historypb.HistoryEvent { + workflowTask := ms.GetWorkflowTaskByID(scheduledEventID) + s.NotNil(workflowTask) + s.Equal(startedEventID, workflowTask.StartedEventID) + + event, _ := ms.AddWorkflowTaskCompletedEvent(workflowTask, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: identity, + }, defaultWorkflowTaskCompletionLimits) + + ms.FlushBufferedEvents() + + return event +} + +func addActivityTaskScheduledEvent( + ms workflow.MutableState, + workflowTaskCompletedID int64, + activityID, activityType, + taskQueue string, + input *commonpb.Payloads, + scheduleToCloseTimeout time.Duration, + scheduleToStartTimeout time.Duration, + startToCloseTimeout time.Duration, + heartbeatTimeout time.Duration, +) (*historypb.HistoryEvent, + *persistencespb.ActivityInfo) { + + event, ai, _ := ms.AddActivityTaskScheduledEvent(workflowTaskCompletedID, &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{Name: activityType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + ScheduleToCloseTimeout: &scheduleToCloseTimeout, + ScheduleToStartTimeout: &scheduleToStartTimeout, + StartToCloseTimeout: &startToCloseTimeout, + HeartbeatTimeout: &heartbeatTimeout, + }, false) + + return event, ai +} + +func addActivityTaskScheduledEventWithRetry( + ms workflow.MutableState, + workflowTaskCompletedID int64, + activityID, activityType, + taskQueue string, + input *commonpb.Payloads, + scheduleToCloseTimeout time.Duration, + scheduleToStartTimeout time.Duration, + startToCloseTimeout time.Duration, + heartbeatTimeout time.Duration, + retryPolicy *commonpb.RetryPolicy, +) (*historypb.HistoryEvent, *persistencespb.ActivityInfo) { + + event, ai, _ := ms.AddActivityTaskScheduledEvent(workflowTaskCompletedID, &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{Name: activityType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + ScheduleToCloseTimeout: &scheduleToCloseTimeout, + ScheduleToStartTimeout: &scheduleToStartTimeout, + StartToCloseTimeout: &startToCloseTimeout, + HeartbeatTimeout: &heartbeatTimeout, + RetryPolicy: retryPolicy, + }, false) + + return event, ai +} + +func addActivityTaskStartedEvent(ms workflow.MutableState, scheduledEventID int64, identity string) *historypb.HistoryEvent { + ai, _ := ms.GetActivityInfo(scheduledEventID) + event, _ := ms.AddActivityTaskStartedEvent(ai, scheduledEventID, tests.RunID, identity) + return event +} + +func addActivityTaskCompletedEvent(ms workflow.MutableState, scheduledEventID, startedEventID int64, result *commonpb.Payloads, + identity string) *historypb.HistoryEvent { + event, _ := ms.AddActivityTaskCompletedEvent(scheduledEventID, startedEventID, &workflowservice.RespondActivityTaskCompletedRequest{ + Result: result, + Identity: identity, + }) + + return event +} + +func addActivityTaskFailedEvent(ms workflow.MutableState, scheduledEventID, startedEventID int64, failure *failurepb.Failure, retryState enumspb.RetryState, identity string) *historypb.HistoryEvent { + event, _ := ms.AddActivityTaskFailedEvent(scheduledEventID, startedEventID, failure, retryState, identity) + return event +} + +func addTimerStartedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, timerID string, + timeout time.Duration) (*historypb.HistoryEvent, *persistencespb.TimerInfo) { + event, ti, _ := ms.AddTimerStartedEvent(workflowTaskCompletedEventID, + &commandpb.StartTimerCommandAttributes{ + TimerId: timerID, + StartToFireTimeout: &timeout, + }) + return event, ti +} + +func addTimerFiredEvent(ms workflow.MutableState, timerID string) *historypb.HistoryEvent { + event, _ := ms.AddTimerFiredEvent(timerID) + return event +} + +func addRequestCancelInitiatedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, + cancelRequestID string, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string) (*historypb.HistoryEvent, *persistencespb.RequestCancelInfo) { + event, rci, _ := ms.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, + cancelRequestID, &commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes{ + Namespace: namespace.String(), + WorkflowId: workflowID, + RunId: runID, + Reason: "cancellation reason", + }, + namespaceID) + + return event, rci +} + +func addCancelRequestedEvent(ms workflow.MutableState, initiatedID int64, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string) *historypb.HistoryEvent { + event, _ := ms.AddExternalWorkflowExecutionCancelRequested(initiatedID, namespace, namespaceID, workflowID, runID) + return event +} + +func addRequestSignalInitiatedEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, + signalRequestID string, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID, signalName string, input *commonpb.Payloads, + control string, header *commonpb.Header) (*historypb.HistoryEvent, *persistencespb.SignalInfo) { + event, si, _ := ms.AddSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, signalRequestID, + &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ + Namespace: namespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + SignalName: signalName, + Input: input, + Control: control, + Header: header, + }, namespaceID) + + return event, si +} + +func addSignaledEvent(ms workflow.MutableState, initiatedID int64, namespace namespace.Name, namespaceID namespace.ID, workflowID, runID string, control string) *historypb.HistoryEvent { + event, _ := ms.AddExternalWorkflowExecutionSignaled(initiatedID, namespace, namespaceID, workflowID, runID, control) + return event +} + +func addStartChildWorkflowExecutionInitiatedEvent( + ms workflow.MutableState, + workflowTaskCompletedID int64, + createRequestID string, + namespace namespace.Name, + namespaceID namespace.ID, + workflowID, workflowType, taskQueue string, + input *commonpb.Payloads, + executionTimeout, runTimeout, taskTimeout time.Duration, + parentClosePolicy enumspb.ParentClosePolicy, +) (*historypb.HistoryEvent, *persistencespb.ChildExecutionInfo) { + + event, cei, _ := ms.AddStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedID, createRequestID, + &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: namespace.String(), + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + Input: input, + WorkflowExecutionTimeout: &executionTimeout, + WorkflowRunTimeout: &runTimeout, + WorkflowTaskTimeout: &taskTimeout, + Control: "", + ParentClosePolicy: parentClosePolicy, + }, namespaceID) + return event, cei +} + +func addChildWorkflowExecutionStartedEvent(ms workflow.MutableState, initiatedID int64, workflowID, runID string, + workflowType string, clock *clockspb.VectorClock) *historypb.HistoryEvent { + event, _ := ms.AddChildWorkflowExecutionStartedEvent( + &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + &commonpb.WorkflowType{Name: workflowType}, + initiatedID, + &commonpb.Header{}, + clock, + ) + return event +} + +func addChildWorkflowExecutionCompletedEvent(ms workflow.MutableState, initiatedID int64, childExecution *commonpb.WorkflowExecution, + attributes *historypb.WorkflowExecutionCompletedEventAttributes) *historypb.HistoryEvent { + event, _ := ms.AddChildWorkflowExecutionCompletedEvent(initiatedID, childExecution, attributes) + return event +} + +func addCompleteWorkflowEvent(ms workflow.MutableState, workflowTaskCompletedEventID int64, + result *commonpb.Payloads) *historypb.HistoryEvent { + event, _ := ms.AddCompletedWorkflowEvent( + workflowTaskCompletedEventID, + &commandpb.CompleteWorkflowExecutionCommandAttributes{ + Result: result, + }, + "") + return event +} + +func addFailWorkflowEvent( + ms workflow.MutableState, + workflowTaskCompletedEventID int64, + failure *failurepb.Failure, + retryState enumspb.RetryState, +) *historypb.HistoryEvent { + event, _ := ms.AddFailWorkflowEvent( + workflowTaskCompletedEventID, + retryState, + &commandpb.FailWorkflowExecutionCommandAttributes{ + Failure: failure, + }, + "", + ) + return event +} diff -Nru temporal-1.21.5-1/src/service/history/memory_scheduled_queue_factory.go temporal-1.22.5/src/service/history/memory_scheduled_queue_factory.go --- temporal-1.21.5-1/src/service/history/memory_scheduled_queue_factory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/memory_scheduled_queue_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -50,6 +50,8 @@ TimeSource clock.TimeSource MetricsHandler metrics.Handler Logger log.SnTaggedLogger + + ExecutorWrapper queues.ExecutorWrapper `optional:"true"` } memoryScheduledQueueFactory struct { @@ -61,6 +63,8 @@ timeSource clock.TimeSource metricsHandler metrics.Handler logger log.SnTaggedLogger + + executorWrapper queues.ExecutorWrapper } ) @@ -86,6 +90,7 @@ timeSource: params.TimeSource, metricsHandler: metricsHandler, logger: logger, + executorWrapper: params.ExecutorWrapper, } } @@ -113,6 +118,9 @@ shardCtx.GetConfig(), nil, ) + if f.executorWrapper != nil { + speculativeWorkflowTaskTimeoutExecutor = f.executorWrapper.Wrap(speculativeWorkflowTaskTimeoutExecutor) + } return queues.NewSpeculativeWorkflowTaskTimeoutQueue( f.scheduler, diff -Nru temporal-1.21.5-1/src/service/history/nDCStandbyTaskUtil.go temporal-1.22.5/src/service/history/nDCStandbyTaskUtil.go --- temporal-1.21.5-1/src/service/history/nDCStandbyTaskUtil.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/nDCStandbyTaskUtil.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,267 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "errors" - "time" - - taskqueuepb "go.temporal.io/api/taskqueue/v1" - taskqueuespb "go.temporal.io/server/api/taskqueue/v1" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/workflow" -) - -type ( - standbyActionFn func(context.Context, workflow.Context, workflow.MutableState) (interface{}, error) - standbyPostActionFn func(context.Context, tasks.Task, interface{}, log.Logger) error - - standbyCurrentTimeFn func() time.Time -) - -func standbyTaskPostActionNoOp( - _ context.Context, - _ tasks.Task, - postActionInfo interface{}, - _ log.Logger, -) error { - - if postActionInfo == nil { - return nil - } - - // return error so task processing logic will retry - return consts.ErrTaskRetry -} - -func standbyTransferTaskPostActionTaskDiscarded( - _ context.Context, - taskInfo tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - - if postActionInfo == nil { - return nil - } - - logger.Warn("Discarding standby transfer task due to task being pending for too long.", tag.Task(taskInfo)) - return consts.ErrTaskDiscarded -} - -func standbyTimerTaskPostActionTaskDiscarded( - _ context.Context, - taskInfo tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - - if postActionInfo == nil { - return nil - } - - logger.Warn("Discarding standby timer task due to task being pending for too long.", tag.Task(taskInfo)) - return consts.ErrTaskDiscarded -} - -type ( - historyResendInfo struct { - - // used by NDC - lastEventID int64 - lastEventVersion int64 - } - - activityTaskPostActionInfo struct { - *historyResendInfo - - taskQueue string - activityTaskScheduleToStartTimeout time.Duration - versionDirective *taskqueuespb.TaskVersionDirective - } - - workflowTaskPostActionInfo struct { - *historyResendInfo - - workflowTaskScheduleToStartTimeout *time.Duration - taskqueue taskqueuepb.TaskQueue - versionDirective *taskqueuespb.TaskVersionDirective - } - - startChildExecutionPostActionInfo struct { - *historyResendInfo - } -) - -var ( - // verifyChildCompletionRecordedInfo is the post action info returned by - // standby close execution task action func. The actual content of the - // struct doesn't matter. We just need a non-nil pointer to to indicate - // that the verification has failed. - verifyChildCompletionRecordedInfo = &struct{}{} -) - -func newHistoryResendInfo( - lastEventID int64, - lastEventVersion int64, -) *historyResendInfo { - return &historyResendInfo{ - lastEventID: lastEventID, - lastEventVersion: lastEventVersion, - } -} - -func newActivityTaskPostActionInfo( - mutableState workflow.MutableState, - activityScheduleToStartTimeout time.Duration, - useCompatibleVersion bool, -) (*activityTaskPostActionInfo, error) { - resendInfo, err := getHistoryResendInfo(mutableState) - if err != nil { - return nil, err - } - - directive := common.MakeVersionDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), useCompatibleVersion) - - return &activityTaskPostActionInfo{ - historyResendInfo: resendInfo, - activityTaskScheduleToStartTimeout: activityScheduleToStartTimeout, - versionDirective: directive, - }, nil -} - -func newActivityRetryTimePostActionInfo( - mutableState workflow.MutableState, - taskQueue string, - activityScheduleToStartTimeout time.Duration, - useCompatibleVersion bool, -) (*activityTaskPostActionInfo, error) { - resendInfo, err := getHistoryResendInfo(mutableState) - if err != nil { - return nil, err - } - - directive := common.MakeVersionDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), useCompatibleVersion) - - return &activityTaskPostActionInfo{ - historyResendInfo: resendInfo, - taskQueue: taskQueue, - activityTaskScheduleToStartTimeout: activityScheduleToStartTimeout, - versionDirective: directive, - }, nil -} - -func newWorkflowTaskPostActionInfo( - mutableState workflow.MutableState, - workflowTaskScheduleToStartTimeout *time.Duration, - taskqueue taskqueuepb.TaskQueue, -) (*workflowTaskPostActionInfo, error) { - resendInfo, err := getHistoryResendInfo(mutableState) - if err != nil { - return nil, err - } - - directive := common.MakeVersionDirectiveForWorkflowTask( - mutableState.GetWorkerVersionStamp(), - mutableState.GetLastWorkflowTaskStartedEventID(), - ) - - return &workflowTaskPostActionInfo{ - historyResendInfo: resendInfo, - workflowTaskScheduleToStartTimeout: workflowTaskScheduleToStartTimeout, - taskqueue: taskqueue, - versionDirective: directive, - }, nil -} - -func getHistoryResendInfo( - mutableState workflow.MutableState, -) (*historyResendInfo, error) { - - currentBranch, err := versionhistory.GetCurrentVersionHistory(mutableState.GetExecutionInfo().GetVersionHistories()) - if err != nil { - return nil, err - } - lastItem, err := versionhistory.GetLastVersionHistoryItem(currentBranch) - if err != nil { - return nil, err - } - return newHistoryResendInfo(lastItem.GetEventId(), lastItem.GetVersion()), nil -} - -func getStandbyPostActionFn( - taskInfo tasks.Task, - standbyNow standbyCurrentTimeFn, - standbyTaskMissingEventsResendDelay time.Duration, - standbyTaskMissingEventsDiscardDelay time.Duration, - fetchHistoryStandbyPostActionFn standbyPostActionFn, - discardTaskStandbyPostActionFn standbyPostActionFn, -) standbyPostActionFn { - - // this is for task retry, use machine time - now := standbyNow() - taskTime := taskInfo.GetVisibilityTime() - resendTime := taskTime.Add(standbyTaskMissingEventsResendDelay) - discardTime := taskTime.Add(standbyTaskMissingEventsDiscardDelay) - - // now < task start time + StandbyTaskMissingEventsResendDelay - if now.Before(resendTime) { - return standbyTaskPostActionNoOp - } - - // task start time + StandbyTaskMissingEventsResendDelay <= now < task start time + StandbyTaskMissingEventsResendDelay - if now.Before(discardTime) { - return fetchHistoryStandbyPostActionFn - } - - // task start time + StandbyTaskMissingEventsResendDelay <= now - return discardTaskStandbyPostActionFn -} - -func getRemoteClusterName( - currentCluster string, - registry namespace.Registry, - namespaceID string, -) (string, error) { - namespaceEntry, err := registry.GetNamespaceByID(namespace.ID(namespaceID)) - if err != nil { - return "", err - } - - remoteClusterName := namespaceEntry.ActiveClusterName() - if remoteClusterName == currentCluster { - // namespace has turned active, retry the task - return "", errors.New("namespace becomes active when processing task as standby") - } - return remoteClusterName, nil -} diff -Nru temporal-1.21.5-1/src/service/history/nDCTaskUtil.go temporal-1.22.5/src/service/history/nDCTaskUtil.go --- temporal-1.21.5-1/src/service/history/nDCTaskUtil.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/nDCTaskUtil.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,226 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/workflow" -) - -// CheckTaskVersion will return an error if task version check fails -func CheckTaskVersion( - shard shard.Context, - logger log.Logger, - namespace *namespace.Namespace, - version int64, - taskVersion int64, - task interface{}, -) error { - - if !shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { - return nil - } - - // the first return value is whether this task is valid for further processing - if !namespace.IsGlobalNamespace() { - logger.Debug("NamespaceID is not global, task version check pass", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task)) - return nil - } else if version != taskVersion { - logger.Debug("NamespaceID is global, task version != target version", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task), tag.TaskVersion(version)) - return consts.ErrTaskVersionMismatch - } - logger.Debug("NamespaceID is global, task version == target version", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task), tag.TaskVersion(version)) - return nil -} - -// load mutable state, if mutable state's next event ID <= task ID, will attempt to refresh -// if still mutable state's next event ID <= task ID, will return nil, nil -func loadMutableStateForTransferTask( - ctx context.Context, - wfContext workflow.Context, - transferTask tasks.Task, - metricsHandler metrics.Handler, - logger log.Logger, -) (workflow.MutableState, error) { - logger = tasks.InitializeLogger(transferTask, logger) - mutableState, err := LoadMutableStateForTask( - ctx, - wfContext, - transferTask, - getTransferTaskEventIDAndRetryable, - metricsHandler.WithTags(metrics.OperationTag(metrics.TransferQueueProcessorScope)), - logger, - ) - if err != nil { - // When standby task executor executes task in active cluster (and vice versa), - // mutable state might be already deleted by active task executor and NotFound is a valid case which shouldn't be logged. - // Unfortunately, this will also skip logging of actual errors that might happen due to serious bugs, - // but these errors, most likely, will happen for other task types too, and will be logged. - // TODO: remove this logic multi-cursor is implemented and only one task processor is running in each cluster. - skipNotFoundLog := - transferTask.GetType() == enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION || - transferTask.GetType() == enumsspb.TASK_TYPE_TRANSFER_DELETE_EXECUTION - - if !skipNotFoundLog { - switch err.(type) { - case *serviceerror.NotFound: - // NotFound error will be ignored by task error handling logic, so log it here - // for transfer tasks, mutable state should always be available - logger.Warn("Transfer Task Processor: workflow mutable state not found, skip.") - case *serviceerror.NamespaceNotFound: - // NamespaceNotFound error will be ignored by task error handling logic, so log it here - // for transfer tasks, namespace should always be available. - logger.Warn("Transfer Task Processor: namespace not found, skip.") - } - } - } - return mutableState, err -} - -// load mutable state, if mutable state's next event ID <= task ID, will attempt to refresh -// if still mutable state's next event ID <= task ID, will return nil, nil -func loadMutableStateForTimerTask( - ctx context.Context, - wfContext workflow.Context, - timerTask tasks.Task, - metricsHandler metrics.Handler, - logger log.Logger, -) (workflow.MutableState, error) { - logger = tasks.InitializeLogger(timerTask, logger) - return LoadMutableStateForTask( - ctx, - wfContext, - timerTask, - getTimerTaskEventIDAndRetryable, - metricsHandler.WithTags(metrics.OperationTag(metrics.TimerQueueProcessorScope)), - logger, - ) -} - -func LoadMutableStateForTask( - ctx context.Context, - wfContext workflow.Context, - task tasks.Task, - taskEventIDAndRetryable func(task tasks.Task, executionInfo *persistencespb.WorkflowExecutionInfo) (int64, bool), - metricsHandler metrics.Handler, - logger log.Logger, -) (workflow.MutableState, error) { - - mutableState, err := wfContext.LoadMutableState(ctx) - if err != nil { - return nil, err - } - - // check to see if cache needs to be refreshed as we could potentially have stale workflow execution - // the exception is workflow task consistently fail - // there will be no event generated, thus making the workflow task schedule ID == next event ID - eventID, retryable := taskEventIDAndRetryable(task, mutableState.GetExecutionInfo()) - if eventID < mutableState.GetNextEventID() || !retryable { - return mutableState, nil - } - - metricsHandler.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record(1) - wfContext.Clear() - - mutableState, err = wfContext.LoadMutableState(ctx) - if err != nil { - return nil, err - } - // after refresh, still mutable state's next event ID <= task's event ID - if eventID >= mutableState.GetNextEventID() { - metricsHandler.Counter(metrics.TaskSkipped.GetMetricName()).Record(1) - logger.Info("Task Processor: task event ID >= MS NextEventID, skip.", - tag.WorkflowNextEventID(mutableState.GetNextEventID()), - ) - return nil, nil - } - return mutableState, nil -} - -func getTransferTaskEventIDAndRetryable( - transferTask tasks.Task, - executionInfo *persistencespb.WorkflowExecutionInfo, -) (int64, bool) { - eventID := tasks.GetTransferTaskEventID(transferTask) - retryable := true - - if task, ok := transferTask.(*tasks.WorkflowTask); ok { - retryable = !(executionInfo.WorkflowTaskScheduledEventId == task.ScheduledEventID && executionInfo.WorkflowTaskAttempt > 1) - } - - return eventID, retryable -} - -func getTimerTaskEventIDAndRetryable( - timerTask tasks.Task, - executionInfo *persistencespb.WorkflowExecutionInfo, -) (int64, bool) { - eventID := tasks.GetTimerTaskEventID(timerTask) - retryable := true - - if task, ok := timerTask.(*tasks.WorkflowTaskTimeoutTask); ok { - retryable = !(executionInfo.WorkflowTaskScheduledEventId == task.EventID && executionInfo.WorkflowTaskAttempt > 1) && - executionInfo.WorkflowTaskType != enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE - } - - return eventID, retryable -} - -func getNamespaceTagByID( - registry namespace.Registry, - namespaceID string, -) metrics.Tag { - namespaceName, err := registry.GetNamespaceName(namespace.ID(namespaceID)) - if err != nil { - return metrics.NamespaceUnknownTag() - } - - return metrics.NamespaceTag(namespaceName.String()) -} - -func getNamespaceTagAndReplicationStateByID( - registry namespace.Registry, - namespaceID string, -) (metrics.Tag, enumspb.ReplicationState) { - namespace, err := registry.GetNamespaceByID(namespace.ID(namespaceID)) - if err != nil { - return metrics.NamespaceUnknownTag(), enumspb.REPLICATION_STATE_UNSPECIFIED - } - - return metrics.NamespaceTag(namespace.Name().String()), namespace.ReplicationState() -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_replicator.go temporal-1.22.5/src/service/history/ndc/activity_replicator.go --- temporal-1.21.5-1/src/service/history/ndc/activity_replicator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_replicator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,349 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination activity_replicator_mock.go - -package ndc - -import ( - "context" - "time" - - commonpb "go.temporal.io/api/common/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/primitives/timestamp" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -const ( - resendMissingEventMessage = "Resend missed sync activity events" - resendHigherVersionMessage = "Resend sync activity events due to a higher version received" -) - -type ( - ActivityReplicator interface { - SyncActivity( - ctx context.Context, - request *historyservice.SyncActivityRequest, - ) error - } - - ActivityReplicatorImpl struct { - workflowCache wcache.Cache - clusterMetadata cluster.Metadata - logger log.Logger - } -) - -func NewActivityReplicator( - shard shard.Context, - workflowCache wcache.Cache, - logger log.Logger, -) *ActivityReplicatorImpl { - - return &ActivityReplicatorImpl{ - workflowCache: workflowCache, - clusterMetadata: shard.GetClusterMetadata(), - logger: log.With(logger, tag.ComponentHistoryReplicator), - } -} - -func (r *ActivityReplicatorImpl) SyncActivity( - ctx context.Context, - request *historyservice.SyncActivityRequest, -) (retError error) { - - // sync activity info will only be sent from active side, when - // 1. activity retry - // 2. activity start - // 3. activity heart beat - // no sync activity task will be sent when active side fail / timeout activity, - namespaceID := namespace.ID(request.GetNamespaceId()) - execution := commonpb.WorkflowExecution{ - WorkflowId: request.WorkflowId, - RunId: request.RunId, - } - - executionContext, release, err := r.workflowCache.GetOrCreateWorkflowExecution( - ctx, - namespaceID, - execution, - workflow.LockPriorityHigh, - ) - if err != nil { - // for get workflow execution context, with valid run id - // err will not be of type EntityNotExistsError - return err - } - defer func() { release(retError) }() - - mutableState, err := executionContext.LoadMutableState(ctx) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // this can happen if the workflow start event and this sync activity task are out of order - // or the target workflow is long gone - // the safe solution to this is to throw away the sync activity task - // or otherwise, worker attempt will exceed limit and put this message to DLQ - return nil - } - return err - } - - scheduledEventID := request.GetScheduledEventId() - shouldApply, err := r.testVersionHistory( - namespaceID, - execution.GetWorkflowId(), - execution.GetRunId(), - scheduledEventID, - mutableState, - request.GetVersionHistory(), - ) - if err != nil || !shouldApply { - return err - } - - activityInfo, ok := mutableState.GetActivityInfo(scheduledEventID) - if !ok { - // this should not retry, can be caused by out of order delivery - // since the activity is already finished - return nil - } - if shouldApply := r.testActivity( - request.GetVersion(), - request.GetAttempt(), - timestamp.TimeValue(request.GetLastHeartbeatTime()), - activityInfo, - ); !shouldApply { - return nil - } - - // sync activity with empty started ID means activity retry - eventTime := timestamp.TimeValue(request.GetScheduledTime()) - if request.StartedEventId == common.EmptyEventID && request.Attempt > activityInfo.GetAttempt() { - mutableState.AddTasks(&tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.WorkflowKey{ - NamespaceID: request.GetNamespaceId(), - WorkflowID: request.GetWorkflowId(), - RunID: request.GetRunId(), - }, - VisibilityTimestamp: eventTime, - EventID: request.GetScheduledEventId(), - Version: request.GetVersion(), - Attempt: request.GetAttempt(), - }) - } - - refreshTask := r.testRefreshActivityTimerTaskMask( - request.GetVersion(), - request.GetAttempt(), - activityInfo, - ) - err = mutableState.ReplicateActivityInfo(request, refreshTask) - if err != nil { - return err - } - - // see whether we need to refresh the activity timer - startedTime := timestamp.TimeValue(request.GetStartedTime()) - lastHeartbeatTime := timestamp.TimeValue(request.GetLastHeartbeatTime()) - if eventTime.Before(startedTime) { - eventTime = startedTime - } - if eventTime.Before(lastHeartbeatTime) { - eventTime = lastHeartbeatTime - } - - // passive logic need to explicitly call create timer - if _, err := workflow.NewTimerSequence( - mutableState, - ).CreateNextActivityTimer(); err != nil { - return err - } - - updateMode := persistence.UpdateWorkflowModeUpdateCurrent - if state, _ := mutableState.GetWorkflowStateStatus(); state == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - updateMode = persistence.UpdateWorkflowModeBypassCurrent - } - - return executionContext.UpdateWorkflowExecutionWithNew( - ctx, - updateMode, - nil, // no new workflow - nil, // no new workflow - workflow.TransactionPolicyPassive, - nil, - ) -} - -func (r *ActivityReplicatorImpl) testRefreshActivityTimerTaskMask( - version int64, - attempt int32, - activityInfo *persistencespb.ActivityInfo, -) bool { - - // calculate whether to reset the activity timer task status bits - // reset timer task status bits if - // 1. same source cluster & attempt changes - // 2. different source cluster - if !r.clusterMetadata.IsVersionFromSameCluster(version, activityInfo.Version) { - return true - } else if activityInfo.Attempt != attempt { - return true - } - return false -} - -func (r *ActivityReplicatorImpl) testActivity( - version int64, - attempt int32, - lastHeartbeatTime time.Time, - activityInfo *persistencespb.ActivityInfo, -) bool { - - if activityInfo.Version > version { - // this should not retry, can be caused by failover or reset - return false - } - - if activityInfo.Version < version { - // incoming version larger then local version, should update activity - return true - } - - // activityInfo.Version == version - if activityInfo.Attempt > attempt { - // this should not retry, can be caused by failover or reset - return false - } - - // activityInfo.Version == version - if activityInfo.Attempt < attempt { - // version equal & attempt larger then existing, should update activity - return true - } - - // activityInfo.Version == version & activityInfo.Attempt == attempt - - // last heartbeat after existing heartbeat & should update activity - if !timestamp.TimeValue(activityInfo.LastHeartbeatUpdateTime).IsZero() && - activityInfo.LastHeartbeatUpdateTime.After(lastHeartbeatTime) { - // this should not retry, can be caused by out of order delivery - return false - } - return true -} - -func (r *ActivityReplicatorImpl) testVersionHistory( - namespaceID namespace.ID, - workflowID string, - runID string, - scheduledEventID int64, - mutableState workflow.MutableState, - incomingVersionHistory *historyspb.VersionHistory, -) (bool, error) { - - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory( - mutableState.GetExecutionInfo().GetVersionHistories(), - ) - if err != nil { - return false, err - } - - lastLocalItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) - if err != nil { - return false, err - } - - lastIncomingItem, err := versionhistory.GetLastVersionHistoryItem(incomingVersionHistory) - if err != nil { - return false, err - } - - lcaItem, err := versionhistory.FindLCAVersionHistoryItem(currentVersionHistory, incomingVersionHistory) - if err != nil { - return false, err - } - - // case 1: local version history is superset of incoming version history - // or incoming version history is superset of local version history - // resend the missing event if local version history doesn't have the schedule event - - // case 2: local version history and incoming version history diverged - // case 2-1: local version history has the higher version and discard the incoming event - // case 2-2: incoming version history has the higher version and resend the missing incoming events - if versionhistory.IsLCAVersionHistoryItemAppendable(currentVersionHistory, lcaItem) || - versionhistory.IsLCAVersionHistoryItemAppendable(incomingVersionHistory, lcaItem) { - // case 1 - if scheduledEventID > lcaItem.GetEventId() { - return false, serviceerrors.NewRetryReplication( - resendMissingEventMessage, - namespaceID.String(), - workflowID, - runID, - lcaItem.GetEventId(), - lcaItem.GetVersion(), - common.EmptyEventID, - common.EmptyVersion, - ) - } - } else { - // case 2 - if lastIncomingItem.GetVersion() < lastLocalItem.GetVersion() { - // case 2-1 - return false, nil - } else if lastIncomingItem.GetVersion() > lastLocalItem.GetVersion() { - // case 2-2 - return false, serviceerrors.NewRetryReplication( - resendHigherVersionMessage, - namespaceID.String(), - workflowID, - runID, - lcaItem.GetEventId(), - lcaItem.GetVersion(), - common.EmptyEventID, - common.EmptyVersion, - ) - } - } - - state, _ := mutableState.GetWorkflowStateStatus() - return state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, nil -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_replicator_mock.go temporal-1.22.5/src/service/history/ndc/activity_replicator_mock.go --- temporal-1.21.5-1/src/service/history/ndc/activity_replicator_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_replicator_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: activity_replicator.go - -// Package ndc is a generated GoMock package. -package ndc - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - historyservice "go.temporal.io/server/api/historyservice/v1" -) - -// MockActivityReplicator is a mock of ActivityReplicator interface. -type MockActivityReplicator struct { - ctrl *gomock.Controller - recorder *MockActivityReplicatorMockRecorder -} - -// MockActivityReplicatorMockRecorder is the mock recorder for MockActivityReplicator. -type MockActivityReplicatorMockRecorder struct { - mock *MockActivityReplicator -} - -// NewMockActivityReplicator creates a new mock instance. -func NewMockActivityReplicator(ctrl *gomock.Controller) *MockActivityReplicator { - mock := &MockActivityReplicator{ctrl: ctrl} - mock.recorder = &MockActivityReplicatorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockActivityReplicator) EXPECT() *MockActivityReplicatorMockRecorder { - return m.recorder -} - -// SyncActivity mocks base method. -func (m *MockActivityReplicator) SyncActivity(ctx context.Context, request *historyservice.SyncActivityRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncActivity", ctx, request) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncActivity indicates an expected call of SyncActivity. -func (mr *MockActivityReplicatorMockRecorder) SyncActivity(ctx, request interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncActivity", reflect.TypeOf((*MockActivityReplicator)(nil).SyncActivity), ctx, request) -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_replicator_test.go temporal-1.22.5/src/service/history/ndc/activity_replicator_test.go --- temporal-1.21.5-1/src/service/history/ndc/activity_replicator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_replicator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,942 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package ndc - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives/timestamp" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - activityReplicatorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockNamespaceCache *namespace.MockRegistry - mockClusterMetadata *cluster.MockMetadata - mockMutableState *workflow.MockMutableState - - mockExecutionMgr *persistence.MockExecutionManager - - workflowCache *wcache.CacheImpl - logger log.Logger - - nDCActivityReplicator *ActivityReplicatorImpl - } -) - -func TestActivityReplicatorSuite(t *testing.T) { - s := new(activityReplicatorSuite) - suite.Run(t, s) -} - -func (s *activityReplicatorSuite) SetupSuite() { - -} - -func (s *activityReplicatorSuite) TearDownSuite() { - -} - -func (s *activityReplicatorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockMutableState = workflow.NewMockMutableState(s.controller) - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockShard = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - tests.NewDynamicConfig(), - ) - s.workflowCache = wcache.NewCache(s.mockShard).(*wcache.CacheImpl) - - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - - s.logger = s.mockShard.GetLogger() - - s.nDCActivityReplicator = NewActivityReplicator( - s.mockShard, - s.workflowCache, - s.logger, - ) -} - -func (s *activityReplicatorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *activityReplicatorSuite) TestRefreshTask_DiffCluster() { - version := int64(99) - attempt := int32(1) - localActivityInfo := &persistencespb.ActivityInfo{ - Version: int64(100), - Attempt: attempt, - } - - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, localActivityInfo.Version).Return(false) - - apply := s.nDCActivityReplicator.testRefreshActivityTimerTaskMask( - version, - attempt, - localActivityInfo, - ) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestRefreshTask_SameCluster_DiffAttempt() { - version := int64(99) - attempt := int32(1) - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt + 1, - } - - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) - - apply := s.nDCActivityReplicator.testRefreshActivityTimerTaskMask( - version, - attempt, - localActivityInfo, - ) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestRefreshTask_SameCluster_SameAttempt() { - version := int64(99) - attempt := int32(1) - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt, - } - - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) - - apply := s.nDCActivityReplicator.testRefreshActivityTimerTaskMask( - version, - attempt, - localActivityInfo, - ) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestActivity_LocalVersionLarger() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version + 1, - Attempt: attempt, - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestActivity_IncomingVersionLarger() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version - 1, - Attempt: attempt, - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestActivity_SameVersion_LocalAttemptLarger() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt + 1, - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestActivity_SameVersion_IncomingAttemptLarger() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt - 1, - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestActivity_SameVersion_SameAttempt_LocalHeartbeatLater() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt, - LastHeartbeatUpdateTime: timestamp.TimePtr(lastHeartbeatTime.Add(time.Second)), - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestActivity_SameVersion_SameAttempt_IncomingHeartbeatLater() { - version := int64(123) - attempt := int32(1) - lastHeartbeatTime := time.Now() - localActivityInfo := &persistencespb.ActivityInfo{ - Version: version, - Attempt: attempt, - LastHeartbeatUpdateTime: timestamp.TimePtr(lastHeartbeatTime.Add(-time.Second)), - } - - apply := s.nDCActivityReplicator.testActivity( - version, - attempt, - lastHeartbeatTime, - localActivityInfo, - ) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestVersionHistory_LocalIsSuperSet() { - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - - apply, err := s.nDCActivityReplicator.testVersionHistory( - namespaceID, - workflowID, - runID, - scheduledEventID, - s.mockMutableState, - incomingVersionHistory, - ) - s.NoError(err) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestVersionHistory_IncomingIsSuperSet_NoResend() { - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - - apply, err := s.nDCActivityReplicator.testVersionHistory( - namespaceID, - workflowID, - runID, - scheduledEventID, - s.mockMutableState, - incomingVersionHistory, - ) - s.NoError(err) - s.True(apply) -} - -func (s *activityReplicatorSuite) TestVersionHistory_IncomingIsSuperSet_Resend() { - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID - 1, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - - apply, err := s.nDCActivityReplicator.testVersionHistory( - namespaceID, - workflowID, - runID, - scheduledEventID, - s.mockMutableState, - incomingVersionHistory, - ) - s.Equal(serviceerrors.NewRetryReplication( - resendMissingEventMessage, - namespaceID.String(), - workflowID, - runID, - scheduledEventID-1, - version, - common.EmptyEventID, - common.EmptyVersion, - ), err) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestVersionHistory_Diverge_LocalLarger() { - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - { - EventId: scheduledEventID + 1, - Version: version + 2, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - { - EventId: scheduledEventID + 1, - Version: version + 1, - }, - }, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - - apply, err := s.nDCActivityReplicator.testVersionHistory( - namespaceID, - workflowID, - runID, - scheduledEventID, - s.mockMutableState, - incomingVersionHistory, - ) - s.NoError(err) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestVersionHistory_Diverge_IncomingLarger() { - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - { - EventId: scheduledEventID + 1, - Version: version + 1, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - { - EventId: scheduledEventID + 1, - Version: version + 2, - }, - }, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - - apply, err := s.nDCActivityReplicator.testVersionHistory( - namespaceID, - workflowID, - runID, - scheduledEventID, - s.mockMutableState, - incomingVersionHistory, - ) - s.Equal(serviceerrors.NewRetryReplication( - resendHigherVersionMessage, - namespaceID.String(), - workflowID, - runID, - scheduledEventID, - version, - common.EmptyEventID, - common.EmptyVersion, - ), err) - s.False(apply) -} - -func (s *activityReplicatorSuite) TestSyncActivity_WorkflowNotFound() { - namespaceName := namespace.Name("some random namespace name") - namespaceID := tests.NamespaceID - workflowID := "some random workflow ID" - runID := uuid.New() - version := int64(100) - - request := &historyservice.SyncActivityRequest{ - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - } - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), &persistence.GetWorkflowExecutionRequest{ - ShardID: s.mockShard.GetShardID(), - NamespaceID: namespaceID.String(), - WorkflowID: workflowID, - RunID: runID, - }).Return(nil, serviceerror.NewNotFound("")) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( - namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - version, - ), nil, - ).AnyTimes() - - err := s.nDCActivityReplicator.SyncActivity(context.Background(), request) - s.Nil(err) -} - -func (s *activityReplicatorSuite) TestSyncActivity_WorkflowClosed() { - namespaceName := tests.Namespace - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - lastWriteVersion := version - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - } - - key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) - weContext := workflow.NewMockContext(s.controller) - weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) - weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) - weContext.EXPECT().Unlock(workflow.LockPriorityHigh) - _, err := s.workflowCache.PutIfNotExist(key, weContext) - s.NoError(err) - - request := &historyservice.SyncActivityRequest{ - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - Version: version, - ScheduledEventId: scheduledEventID, - VersionHistory: incomingVersionHistory, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, - ).AnyTimes() - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( - namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - lastWriteVersion, - ), nil, - ).AnyTimes() - - err = s.nDCActivityReplicator.SyncActivity(context.Background(), request) - s.Nil(err) -} - -func (s *activityReplicatorSuite) TestSyncActivity_ActivityNotFound() { - namespaceName := tests.Namespace - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - lastWriteVersion := version - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - } - - key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) - weContext := workflow.NewMockContext(s.controller) - weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) - weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) - weContext.EXPECT().Unlock(workflow.LockPriorityHigh) - _, err := s.workflowCache.PutIfNotExist(key, weContext) - s.NoError(err) - - request := &historyservice.SyncActivityRequest{ - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - Version: version, - ScheduledEventId: scheduledEventID, - VersionHistory: incomingVersionHistory, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(nil, false) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( - namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - lastWriteVersion, - ), nil, - ).AnyTimes() - - err = s.nDCActivityReplicator.SyncActivity(context.Background(), request) - s.Nil(err) -} - -func (s *activityReplicatorSuite) TestSyncActivity_ActivityFound_Zombie() { - namespaceName := tests.Namespace - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - lastWriteVersion := version - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - } - - key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) - weContext := workflow.NewMockContext(s.controller) - weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) - weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) - weContext.EXPECT().Unlock(workflow.LockPriorityHigh) - - _, err := s.workflowCache.PutIfNotExist(key, weContext) - s.NoError(err) - - now := time.Now() - request := &historyservice.SyncActivityRequest{ - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - Version: version, - ScheduledEventId: scheduledEventID, - ScheduledTime: &now, - VersionHistory: incomingVersionHistory, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(&persistencespb.ActivityInfo{ - Version: version, - }, true) - s.mockMutableState.EXPECT().ReplicateActivityInfo(request, false).Return(nil) - s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(map[int64]*persistencespb.ActivityInfo{}) - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) - - weContext.EXPECT().UpdateWorkflowExecutionWithNew( - gomock.Any(), - persistence.UpdateWorkflowModeBypassCurrent, - workflow.Context(nil), - workflow.MutableState(nil), - workflow.TransactionPolicyPassive, - (*workflow.TransactionPolicy)(nil), - ).Return(nil) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( - namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - lastWriteVersion, - ), nil, - ).AnyTimes() - - err = s.nDCActivityReplicator.SyncActivity(context.Background(), request) - s.Nil(err) -} - -func (s *activityReplicatorSuite) TestSyncActivity_ActivityFound_NonZombie() { - namespaceName := tests.Namespace - namespaceID := tests.NamespaceID - workflowID := tests.WorkflowID - runID := uuid.New() - scheduledEventID := int64(99) - version := int64(100) - lastWriteVersion := version - - localVersionHistories := &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID + 10, - Version: version, - }, - }, - }}, - } - incomingVersionHistory := &historyspb.VersionHistory{ - BranchToken: []byte{}, - Items: []*historyspb.VersionHistoryItem{ - { - EventId: scheduledEventID, - Version: version, - }, - }, - } - - key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) - weContext := workflow.NewMockContext(s.controller) - weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) - weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) - weContext.EXPECT().Unlock(workflow.LockPriorityHigh) - _, err := s.workflowCache.PutIfNotExist(key, weContext) - s.NoError(err) - - now := time.Now() - request := &historyservice.SyncActivityRequest{ - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - Version: version, - ScheduledEventId: scheduledEventID, - ScheduledTime: &now, - VersionHistory: incomingVersionHistory, - } - - s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: localVersionHistories, - }).AnyTimes() - s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( - enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ).AnyTimes() - s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(&persistencespb.ActivityInfo{ - Version: version, - }, true) - s.mockMutableState.EXPECT().ReplicateActivityInfo(request, false).Return(nil) - s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(map[int64]*persistencespb.ActivityInfo{}) - - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) - - weContext.EXPECT().UpdateWorkflowExecutionWithNew( - gomock.Any(), - persistence.UpdateWorkflowModeUpdateCurrent, - workflow.Context(nil), - workflow.MutableState(nil), - workflow.TransactionPolicyPassive, - (*workflow.TransactionPolicy)(nil), - ).Return(nil) - - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( - namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, - &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestCurrentClusterName, - Clusters: []string{ - cluster.TestCurrentClusterName, - cluster.TestAlternativeClusterName, - }, - }, - lastWriteVersion, - ), nil, - ).AnyTimes() - - err = s.nDCActivityReplicator.SyncActivity(context.Background(), request) - s.Nil(err) -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator.go temporal-1.22.5/src/service/history/ndc/activity_state_replicator.go --- temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_state_replicator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,349 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination activity_state_replicator_mock.go + +package ndc + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/primitives/timestamp" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +const ( + resendMissingEventMessage = "Resend missed sync activity events" + resendHigherVersionMessage = "Resend sync activity events due to a higher version received" +) + +type ( + ActivityStateReplicator interface { + SyncActivityState( + ctx context.Context, + request *historyservice.SyncActivityRequest, + ) error + } + + ActivityStateReplicatorImpl struct { + workflowCache wcache.Cache + clusterMetadata cluster.Metadata + logger log.Logger + } +) + +func NewActivityStateReplicator( + shard shard.Context, + workflowCache wcache.Cache, + logger log.Logger, +) *ActivityStateReplicatorImpl { + + return &ActivityStateReplicatorImpl{ + workflowCache: workflowCache, + clusterMetadata: shard.GetClusterMetadata(), + logger: log.With(logger, tag.ComponentHistoryReplicator), + } +} + +func (r *ActivityStateReplicatorImpl) SyncActivityState( + ctx context.Context, + request *historyservice.SyncActivityRequest, +) (retError error) { + + // sync activity info will only be sent from active side, when + // 1. activity retry + // 2. activity start + // 3. activity heart beat + // no sync activity task will be sent when active side fail / timeout activity, + namespaceID := namespace.ID(request.GetNamespaceId()) + execution := commonpb.WorkflowExecution{ + WorkflowId: request.WorkflowId, + RunId: request.RunId, + } + + executionContext, release, err := r.workflowCache.GetOrCreateWorkflowExecution( + ctx, + namespaceID, + execution, + workflow.LockPriorityHigh, + ) + if err != nil { + // for get workflow execution context, with valid run id + // err will not be of type EntityNotExistsError + return err + } + defer func() { release(retError) }() + + mutableState, err := executionContext.LoadMutableState(ctx) + if err != nil { + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // this can happen if the workflow start event and this sync activity task are out of order + // or the target workflow is long gone + // the safe solution to this is to throw away the sync activity task + // or otherwise, worker attempt will exceed limit and put this message to DLQ + return nil + } + return err + } + + scheduledEventID := request.GetScheduledEventId() + shouldApply, err := r.testVersionHistory( + namespaceID, + execution.GetWorkflowId(), + execution.GetRunId(), + scheduledEventID, + mutableState, + request.GetVersionHistory(), + ) + if err != nil || !shouldApply { + return err + } + + activityInfo, ok := mutableState.GetActivityInfo(scheduledEventID) + if !ok { + // this should not retry, can be caused by out of order delivery + // since the activity is already finished + return nil + } + if shouldApply := r.testActivity( + request.GetVersion(), + request.GetAttempt(), + timestamp.TimeValue(request.GetLastHeartbeatTime()), + activityInfo, + ); !shouldApply { + return nil + } + + // sync activity with empty started ID means activity retry + eventTime := timestamp.TimeValue(request.GetScheduledTime()) + if request.StartedEventId == common.EmptyEventID && request.Attempt > activityInfo.GetAttempt() { + mutableState.AddTasks(&tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.WorkflowKey{ + NamespaceID: request.GetNamespaceId(), + WorkflowID: request.GetWorkflowId(), + RunID: request.GetRunId(), + }, + VisibilityTimestamp: eventTime, + EventID: request.GetScheduledEventId(), + Version: request.GetVersion(), + Attempt: request.GetAttempt(), + }) + } + + refreshTask := r.testRefreshActivityTimerTaskMask( + request.GetVersion(), + request.GetAttempt(), + activityInfo, + ) + err = mutableState.ReplicateActivityInfo(request, refreshTask) + if err != nil { + return err + } + + // see whether we need to refresh the activity timer + startedTime := timestamp.TimeValue(request.GetStartedTime()) + lastHeartbeatTime := timestamp.TimeValue(request.GetLastHeartbeatTime()) + if eventTime.Before(startedTime) { + eventTime = startedTime + } + if eventTime.Before(lastHeartbeatTime) { + eventTime = lastHeartbeatTime + } + + // passive logic need to explicitly call create timer + if _, err := workflow.NewTimerSequence( + mutableState, + ).CreateNextActivityTimer(); err != nil { + return err + } + + updateMode := persistence.UpdateWorkflowModeUpdateCurrent + if state, _ := mutableState.GetWorkflowStateStatus(); state == enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + updateMode = persistence.UpdateWorkflowModeBypassCurrent + } + + return executionContext.UpdateWorkflowExecutionWithNew( + ctx, + updateMode, + nil, // no new workflow + nil, // no new workflow + workflow.TransactionPolicyPassive, + nil, + ) +} + +func (r *ActivityStateReplicatorImpl) testRefreshActivityTimerTaskMask( + version int64, + attempt int32, + activityInfo *persistencespb.ActivityInfo, +) bool { + + // calculate whether to reset the activity timer task status bits + // reset timer task status bits if + // 1. same source cluster & attempt changes + // 2. different source cluster + if !r.clusterMetadata.IsVersionFromSameCluster(version, activityInfo.Version) { + return true + } else if activityInfo.Attempt != attempt { + return true + } + return false +} + +func (r *ActivityStateReplicatorImpl) testActivity( + version int64, + attempt int32, + lastHeartbeatTime time.Time, + activityInfo *persistencespb.ActivityInfo, +) bool { + + if activityInfo.Version > version { + // this should not retry, can be caused by failover or reset + return false + } + + if activityInfo.Version < version { + // incoming version larger then local version, should update activity + return true + } + + // activityInfo.Version == version + if activityInfo.Attempt > attempt { + // this should not retry, can be caused by failover or reset + return false + } + + // activityInfo.Version == version + if activityInfo.Attempt < attempt { + // version equal & attempt larger then existing, should update activity + return true + } + + // activityInfo.Version == version & activityInfo.Attempt == attempt + + // last heartbeat after existing heartbeat & should update activity + if !timestamp.TimeValue(activityInfo.LastHeartbeatUpdateTime).IsZero() && + activityInfo.LastHeartbeatUpdateTime.After(lastHeartbeatTime) { + // this should not retry, can be caused by out of order delivery + return false + } + return true +} + +func (r *ActivityStateReplicatorImpl) testVersionHistory( + namespaceID namespace.ID, + workflowID string, + runID string, + scheduledEventID int64, + mutableState workflow.MutableState, + incomingVersionHistory *historyspb.VersionHistory, +) (bool, error) { + + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory( + mutableState.GetExecutionInfo().GetVersionHistories(), + ) + if err != nil { + return false, err + } + + lastLocalItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return false, err + } + + lastIncomingItem, err := versionhistory.GetLastVersionHistoryItem(incomingVersionHistory) + if err != nil { + return false, err + } + + lcaItem, err := versionhistory.FindLCAVersionHistoryItem(currentVersionHistory, incomingVersionHistory) + if err != nil { + return false, err + } + + // case 1: local version history is superset of incoming version history + // or incoming version history is superset of local version history + // resend the missing event if local version history doesn't have the schedule event + + // case 2: local version history and incoming version history diverged + // case 2-1: local version history has the higher version and discard the incoming event + // case 2-2: incoming version history has the higher version and resend the missing incoming events + if versionhistory.IsLCAVersionHistoryItemAppendable(currentVersionHistory, lcaItem) || + versionhistory.IsLCAVersionHistoryItemAppendable(incomingVersionHistory, lcaItem) { + // case 1 + if scheduledEventID > lcaItem.GetEventId() { + return false, serviceerrors.NewRetryReplication( + resendMissingEventMessage, + namespaceID.String(), + workflowID, + runID, + lcaItem.GetEventId(), + lcaItem.GetVersion(), + common.EmptyEventID, + common.EmptyVersion, + ) + } + } else { + // case 2 + if lastIncomingItem.GetVersion() < lastLocalItem.GetVersion() { + // case 2-1 + return false, nil + } else if lastIncomingItem.GetVersion() > lastLocalItem.GetVersion() { + // case 2-2 + return false, serviceerrors.NewRetryReplication( + resendHigherVersionMessage, + namespaceID.String(), + workflowID, + runID, + lcaItem.GetEventId(), + lcaItem.GetVersion(), + common.EmptyEventID, + common.EmptyVersion, + ) + } + } + + state, _ := mutableState.GetWorkflowStateStatus() + return state != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, nil +} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator_mock.go temporal-1.22.5/src/service/history/ndc/activity_state_replicator_mock.go --- temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_state_replicator_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,74 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: activity_state_replicator.go + +// Package ndc is a generated GoMock package. +package ndc + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + historyservice "go.temporal.io/server/api/historyservice/v1" +) + +// MockActivityStateReplicator is a mock of ActivityStateReplicator interface. +type MockActivityStateReplicator struct { + ctrl *gomock.Controller + recorder *MockActivityStateReplicatorMockRecorder +} + +// MockActivityStateReplicatorMockRecorder is the mock recorder for MockActivityStateReplicator. +type MockActivityStateReplicatorMockRecorder struct { + mock *MockActivityStateReplicator +} + +// NewMockActivityStateReplicator creates a new mock instance. +func NewMockActivityStateReplicator(ctrl *gomock.Controller) *MockActivityStateReplicator { + mock := &MockActivityStateReplicator{ctrl: ctrl} + mock.recorder = &MockActivityStateReplicatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockActivityStateReplicator) EXPECT() *MockActivityStateReplicatorMockRecorder { + return m.recorder +} + +// SyncActivityState mocks base method. +func (m *MockActivityStateReplicator) SyncActivityState(ctx context.Context, request *historyservice.SyncActivityRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncActivityState", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncActivityState indicates an expected call of SyncActivityState. +func (mr *MockActivityStateReplicatorMockRecorder) SyncActivityState(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncActivityState", reflect.TypeOf((*MockActivityStateReplicator)(nil).SyncActivityState), ctx, request) +} diff -Nru temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator_test.go temporal-1.22.5/src/service/history/ndc/activity_state_replicator_test.go --- temporal-1.21.5-1/src/service/history/ndc/activity_state_replicator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/activity_state_replicator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,949 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ndc + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + activityReplicatorStateSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockNamespaceCache *namespace.MockRegistry + mockClusterMetadata *cluster.MockMetadata + mockMutableState *workflow.MockMutableState + + mockExecutionMgr *persistence.MockExecutionManager + + workflowCache *wcache.CacheImpl + logger log.Logger + + nDCActivityStateReplicator *ActivityStateReplicatorImpl + } +) + +func TestActivityStateReplicatorSuite(t *testing.T) { + s := new(activityReplicatorStateSuite) + suite.Run(t, s) +} + +func (s *activityReplicatorStateSuite) SetupSuite() { + +} + +func (s *activityReplicatorStateSuite) TearDownSuite() { + +} + +func (s *activityReplicatorStateSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockMutableState = workflow.NewMockMutableState(s.controller) + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockShard = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + tests.NewDynamicConfig(), + ) + s.workflowCache = wcache.NewCache(s.mockShard).(*wcache.CacheImpl) + + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + + s.logger = s.mockShard.GetLogger() + + s.nDCActivityStateReplicator = NewActivityStateReplicator( + s.mockShard, + s.workflowCache, + s.logger, + ) +} + +func (s *activityReplicatorStateSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *activityReplicatorStateSuite) TestRefreshTask_DiffCluster() { + version := int64(99) + attempt := int32(1) + localActivityInfo := &persistencespb.ActivityInfo{ + Version: int64(100), + Attempt: attempt, + } + + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, localActivityInfo.Version).Return(false) + + apply := s.nDCActivityStateReplicator.testRefreshActivityTimerTaskMask( + version, + attempt, + localActivityInfo, + ) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestRefreshTask_SameCluster_DiffAttempt() { + version := int64(99) + attempt := int32(1) + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt + 1, + } + + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) + + apply := s.nDCActivityStateReplicator.testRefreshActivityTimerTaskMask( + version, + attempt, + localActivityInfo, + ) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestRefreshTask_SameCluster_SameAttempt() { + version := int64(99) + attempt := int32(1) + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt, + } + + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) + + apply := s.nDCActivityStateReplicator.testRefreshActivityTimerTaskMask( + version, + attempt, + localActivityInfo, + ) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_LocalVersionLarger() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version + 1, + Attempt: attempt, + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_IncomingVersionLarger() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version - 1, + Attempt: attempt, + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_SameVersion_LocalAttemptLarger() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt + 1, + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_SameVersion_IncomingAttemptLarger() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt - 1, + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_SameVersion_SameAttempt_LocalHeartbeatLater() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt, + LastHeartbeatUpdateTime: timestamp.TimePtr(lastHeartbeatTime.Add(time.Second)), + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestActivity_SameVersion_SameAttempt_IncomingHeartbeatLater() { + version := int64(123) + attempt := int32(1) + lastHeartbeatTime := time.Now() + localActivityInfo := &persistencespb.ActivityInfo{ + Version: version, + Attempt: attempt, + LastHeartbeatUpdateTime: timestamp.TimePtr(lastHeartbeatTime.Add(-time.Second)), + } + + apply := s.nDCActivityStateReplicator.testActivity( + version, + attempt, + lastHeartbeatTime, + localActivityInfo, + ) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestVersionHistory_LocalIsSuperSet() { + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + + apply, err := s.nDCActivityStateReplicator.testVersionHistory( + namespaceID, + workflowID, + runID, + scheduledEventID, + s.mockMutableState, + incomingVersionHistory, + ) + s.NoError(err) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestVersionHistory_IncomingIsSuperSet_NoResend() { + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + + apply, err := s.nDCActivityStateReplicator.testVersionHistory( + namespaceID, + workflowID, + runID, + scheduledEventID, + s.mockMutableState, + incomingVersionHistory, + ) + s.NoError(err) + s.True(apply) +} + +func (s *activityReplicatorStateSuite) TestVersionHistory_IncomingIsSuperSet_Resend() { + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID - 1, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + + apply, err := s.nDCActivityStateReplicator.testVersionHistory( + namespaceID, + workflowID, + runID, + scheduledEventID, + s.mockMutableState, + incomingVersionHistory, + ) + s.Equal(serviceerrors.NewRetryReplication( + resendMissingEventMessage, + namespaceID.String(), + workflowID, + runID, + scheduledEventID-1, + version, + common.EmptyEventID, + common.EmptyVersion, + ), err) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestVersionHistory_Diverge_LocalLarger() { + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + { + EventId: scheduledEventID + 1, + Version: version + 2, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + { + EventId: scheduledEventID + 1, + Version: version + 1, + }, + }, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + + apply, err := s.nDCActivityStateReplicator.testVersionHistory( + namespaceID, + workflowID, + runID, + scheduledEventID, + s.mockMutableState, + incomingVersionHistory, + ) + s.NoError(err) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestVersionHistory_Diverge_IncomingLarger() { + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + { + EventId: scheduledEventID + 1, + Version: version + 1, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + { + EventId: scheduledEventID + 1, + Version: version + 2, + }, + }, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + + apply, err := s.nDCActivityStateReplicator.testVersionHistory( + namespaceID, + workflowID, + runID, + scheduledEventID, + s.mockMutableState, + incomingVersionHistory, + ) + s.Equal(serviceerrors.NewRetryReplication( + resendHigherVersionMessage, + namespaceID.String(), + workflowID, + runID, + scheduledEventID, + version, + common.EmptyEventID, + common.EmptyVersion, + ), err) + s.False(apply) +} + +func (s *activityReplicatorStateSuite) TestSyncActivity_WorkflowNotFound() { + namespaceName := namespace.Name("some random namespace name") + namespaceID := tests.NamespaceID + workflowID := "some random workflow ID" + runID := uuid.New() + version := int64(100) + + request := &historyservice.SyncActivityRequest{ + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + } + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), &persistence.GetWorkflowExecutionRequest{ + ShardID: s.mockShard.GetShardID(), + NamespaceID: namespaceID.String(), + WorkflowID: workflowID, + RunID: runID, + }).Return(nil, serviceerror.NewNotFound("")) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( + namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + version, + ), nil, + ).AnyTimes() + + err := s.nDCActivityStateReplicator.SyncActivityState(context.Background(), request) + s.Nil(err) +} + +func (s *activityReplicatorStateSuite) TestSyncActivity_WorkflowClosed() { + namespaceName := tests.Namespace + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + lastWriteVersion := version + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + } + + key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) + weContext := workflow.NewMockContext(s.controller) + weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) + weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) + weContext.EXPECT().Unlock(workflow.LockPriorityHigh) + weContext.EXPECT().IsDirty().Return(false).AnyTimes() + + _, err := s.workflowCache.PutIfNotExist(key, weContext) + s.NoError(err) + + request := &historyservice.SyncActivityRequest{ + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + Version: version, + ScheduledEventId: scheduledEventID, + VersionHistory: incomingVersionHistory, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + ).AnyTimes() + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( + namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + lastWriteVersion, + ), nil, + ).AnyTimes() + + err = s.nDCActivityStateReplicator.SyncActivityState(context.Background(), request) + s.Nil(err) +} + +func (s *activityReplicatorStateSuite) TestSyncActivity_ActivityNotFound() { + namespaceName := tests.Namespace + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + lastWriteVersion := version + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + } + + key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) + weContext := workflow.NewMockContext(s.controller) + weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) + weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) + weContext.EXPECT().Unlock(workflow.LockPriorityHigh) + weContext.EXPECT().IsDirty().Return(false).AnyTimes() + + _, err := s.workflowCache.PutIfNotExist(key, weContext) + s.NoError(err) + + request := &historyservice.SyncActivityRequest{ + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + Version: version, + ScheduledEventId: scheduledEventID, + VersionHistory: incomingVersionHistory, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(nil, false) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( + namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + lastWriteVersion, + ), nil, + ).AnyTimes() + + err = s.nDCActivityStateReplicator.SyncActivityState(context.Background(), request) + s.Nil(err) +} + +func (s *activityReplicatorStateSuite) TestSyncActivity_ActivityFound_Zombie() { + namespaceName := tests.Namespace + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + lastWriteVersion := version + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + } + + key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) + weContext := workflow.NewMockContext(s.controller) + weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) + weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) + weContext.EXPECT().Unlock(workflow.LockPriorityHigh) + weContext.EXPECT().IsDirty().Return(false).AnyTimes() + + _, err := s.workflowCache.PutIfNotExist(key, weContext) + s.NoError(err) + + now := time.Now() + request := &historyservice.SyncActivityRequest{ + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + Version: version, + ScheduledEventId: scheduledEventID, + ScheduledTime: &now, + VersionHistory: incomingVersionHistory, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(&persistencespb.ActivityInfo{ + Version: version, + }, true) + s.mockMutableState.EXPECT().ReplicateActivityInfo(request, false).Return(nil) + s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(map[int64]*persistencespb.ActivityInfo{}) + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) + + weContext.EXPECT().UpdateWorkflowExecutionWithNew( + gomock.Any(), + persistence.UpdateWorkflowModeBypassCurrent, + workflow.Context(nil), + workflow.MutableState(nil), + workflow.TransactionPolicyPassive, + (*workflow.TransactionPolicy)(nil), + ).Return(nil) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( + namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + lastWriteVersion, + ), nil, + ).AnyTimes() + + err = s.nDCActivityStateReplicator.SyncActivityState(context.Background(), request) + s.Nil(err) +} + +func (s *activityReplicatorStateSuite) TestSyncActivity_ActivityFound_NonZombie() { + namespaceName := tests.Namespace + namespaceID := tests.NamespaceID + workflowID := tests.WorkflowID + runID := uuid.New() + scheduledEventID := int64(99) + version := int64(100) + lastWriteVersion := version + + localVersionHistories := &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID + 10, + Version: version, + }, + }, + }}, + } + incomingVersionHistory := &historyspb.VersionHistory{ + BranchToken: []byte{}, + Items: []*historyspb.VersionHistoryItem{ + { + EventId: scheduledEventID, + Version: version, + }, + }, + } + + key := definition.NewWorkflowKey(namespaceID.String(), workflowID, runID) + weContext := workflow.NewMockContext(s.controller) + weContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mockMutableState, nil) + weContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) + weContext.EXPECT().Unlock(workflow.LockPriorityHigh) + weContext.EXPECT().IsDirty().Return(false).AnyTimes() + + _, err := s.workflowCache.PutIfNotExist(key, weContext) + s.NoError(err) + + now := time.Now() + request := &historyservice.SyncActivityRequest{ + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + Version: version, + ScheduledEventId: scheduledEventID, + ScheduledTime: &now, + VersionHistory: incomingVersionHistory, + } + + s.mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: localVersionHistories, + }).AnyTimes() + s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return( + enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ).AnyTimes() + s.mockMutableState.EXPECT().GetActivityInfo(scheduledEventID).Return(&persistencespb.ActivityInfo{ + Version: version, + }, true) + s.mockMutableState.EXPECT().ReplicateActivityInfo(request, false).Return(nil) + s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(map[int64]*persistencespb.ActivityInfo{}) + + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, version).Return(true) + + weContext.EXPECT().UpdateWorkflowExecutionWithNew( + gomock.Any(), + persistence.UpdateWorkflowModeUpdateCurrent, + workflow.Context(nil), + workflow.MutableState(nil), + workflow.TransactionPolicyPassive, + (*workflow.TransactionPolicy)(nil), + ).Return(nil) + + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return( + namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: namespaceID.String(), Name: namespaceName.String()}, + &persistencespb.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestCurrentClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + lastWriteVersion, + ), nil, + ).AnyTimes() + + err = s.nDCActivityStateReplicator.SyncActivityState(context.Background(), request) + s.Nil(err) +} diff -Nru temporal-1.21.5-1/src/service/history/ndc/history_replicator.go temporal-1.22.5/src/service/history/ndc/history_replicator.go --- temporal-1.21.5-1/src/service/history/ndc/history_replicator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/history_replicator.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,8 +26,6 @@ import ( "context" - "fmt" - "sort" "time" "github.com/pborman/uuid" @@ -35,17 +33,12 @@ enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "golang.org/x/exp/slices" - "go.temporal.io/server/api/adminservice/v1" - enumsspb "go.temporal.io/server/api/enums/v1" historyspb "go.temporal.io/server/api/history/v1" "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" workflowpb "go.temporal.io/server/api/workflow/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/collection" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -120,16 +113,11 @@ events [][]*historypb.HistoryEvent, newEvents []*historypb.HistoryEvent, ) error - ApplyWorkflowState( - ctx context.Context, - request *historyservice.ReplicateWorkflowStateRequest, - ) error } HistoryReplicatorImpl struct { shard shard.Context clusterMetadata cluster.Metadata - executionMgr persistence.ExecutionManager historySerializer serialization.Serializer metricsHandler metrics.Handler namespaceRegistry namespace.Registry @@ -157,15 +145,14 @@ shard shard.Context, workflowCache wcache.Cache, eventsReapplier EventsReapplier, - logger log.Logger, eventSerializer serialization.Serializer, + logger log.Logger, ) *HistoryReplicatorImpl { transactionMgr := newTransactionMgr(shard, workflowCache, eventsReapplier, logger) replicator := &HistoryReplicatorImpl{ shard: shard, clusterMetadata: shard.GetClusterMetadata(), - executionMgr: shard.GetExecutionManager(), historySerializer: eventSerializer, metricsHandler: shard.GetMetricsHandler(), namespaceRegistry: shard.GetNamespaceRegistry(), @@ -269,165 +256,6 @@ return r.doApplyEvents(ctx, task) } -func (r *HistoryReplicatorImpl) ApplyWorkflowState( - ctx context.Context, - request *historyservice.ReplicateWorkflowStateRequest, -) (retError error) { - executionInfo := request.GetWorkflowState().GetExecutionInfo() - executionState := request.GetWorkflowState().GetExecutionState() - namespaceID := namespace.ID(executionInfo.GetNamespaceId()) - wid := executionInfo.GetWorkflowId() - rid := executionState.GetRunId() - if executionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { - return serviceerror.NewInternal("Replicate non completed workflow state is not supported.") - } - - wfCtx, releaseFn, err := r.workflowCache.GetOrCreateWorkflowExecution( - ctx, - namespaceID, - commonpb.WorkflowExecution{ - WorkflowId: wid, - RunId: rid, - }, - workflow.LockPriorityLow, - ) - if err != nil { - return err - } - defer func() { - if rec := recover(); rec != nil { - releaseFn(errPanic) - panic(rec) - } else { - releaseFn(retError) - } - }() - - // Handle existing workflows - ms, err := wfCtx.LoadMutableState(ctx) - switch err.(type) { - case *serviceerror.NotFound: - // no-op, continue to replicate workflow state - case nil: - // workflow exists, do resend if version histories are not match. - localVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - if err != nil { - return err - } - localHistoryLastItem, err := versionhistory.GetLastVersionHistoryItem(localVersionHistory) - if err != nil { - return err - } - incomingVersionHistory, err := versionhistory.GetCurrentVersionHistory(request.GetWorkflowState().GetExecutionInfo().GetVersionHistories()) - if err != nil { - return err - } - incomingHistoryLastItem, err := versionhistory.GetLastVersionHistoryItem(incomingVersionHistory) - if err != nil { - return err - } - if !versionhistory.IsEqualVersionHistoryItem(localHistoryLastItem, incomingHistoryLastItem) { - return serviceerrors.NewRetryReplication( - "Failed to sync workflow state due to version history mismatch", - namespaceID.String(), - wid, - rid, - localHistoryLastItem.GetEventId(), - localHistoryLastItem.GetVersion(), - common.EmptyEventID, - common.EmptyVersion, - ) - } - return nil - default: - return err - } - - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) - if err != nil { - return err - } - lastEventItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) - if err != nil { - return err - } - - // The following sanitizes the branch token from the source cluster to this target cluster by re-initializing it. - - branchInfo, err := r.shard.GetExecutionManager().GetHistoryBranchUtil().ParseHistoryBranchInfo( - currentVersionHistory.GetBranchToken(), - ) - if err != nil { - return err - } - newHistoryBranchToken, err := r.shard.GetExecutionManager().GetHistoryBranchUtil().NewHistoryBranch( - request.NamespaceId, - branchInfo.GetTreeId(), - &branchInfo.BranchId, - branchInfo.Ancestors, - nil, - nil, - nil, - ) - if err != nil { - return err - } - - _, lastFirstTxnID, err := r.backfillHistory( - ctx, - request.GetRemoteCluster(), - namespaceID, - wid, - rid, - lastEventItem.GetEventId(), - lastEventItem.GetVersion(), - newHistoryBranchToken, - ) - if err != nil { - return err - } - - ns, err := r.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return err - } - - mutableState, err := workflow.NewSanitizedMutableState( - r.shard, - r.shard.GetEventsCache(), - r.logger, - ns, - request.GetWorkflowState(), - lastFirstTxnID, - lastEventItem.GetVersion(), - ) - if err != nil { - return err - } - - err = mutableState.SetCurrentBranchToken(newHistoryBranchToken) - if err != nil { - return err - } - - taskRefresh := workflow.NewTaskRefresher(r.shard, r.shard.GetConfig(), r.namespaceRegistry, r.logger) - err = taskRefresh.RefreshTasks(ctx, mutableState) - if err != nil { - return err - } - return r.transactionMgr.createWorkflow( - ctx, - NewWorkflow( - ctx, - r.namespaceRegistry, - r.clusterMetadata, - wfCtx, - mutableState, - releaseFn, - ), - ) -} - func (r *HistoryReplicatorImpl) doApplyEvents( ctx context.Context, task replicationTask, @@ -739,7 +567,7 @@ return err } - transactionID, err := r.shard.GenerateTaskID() + transactionIDs, err := r.shard.GenerateTaskIDs(len(task.getEvents())) if err != nil { return err } @@ -752,7 +580,7 @@ RunID: task.getExecution().GetRunId(), BranchToken: versionHistory.GetBranchToken(), PrevTxnID: 0, // TODO @wxing1292 events chaining will not work for backfill case - TxnID: transactionID, + TxnID: transactionIDs[i], Events: events, } } @@ -945,209 +773,3 @@ now = now.Add(-r.shard.GetConfig().StandbyClusterDelay()) r.shard.SetCurrentTime(clusterName, now) } - -func (r *HistoryReplicatorImpl) backfillHistory( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - lastEventID int64, - lastEventVersion int64, - branchToken []byte, -) (*time.Time, int64, error) { - - // Get the last batch node id to check if the history data is already in DB. - localHistoryIterator := collection.NewPagingIterator(r.getHistoryFromLocalPaginationFn( - ctx, - branchToken, - lastEventID, - )) - var lastBatchNodeID int64 - for localHistoryIterator.HasNext() { - localHistoryBatch, err := localHistoryIterator.Next() - switch err.(type) { - case nil: - if len(localHistoryBatch.GetEvents()) > 0 { - lastBatchNodeID = localHistoryBatch.GetEvents()[0].GetEventId() - } - case *serviceerror.NotFound: - default: - return nil, common.EmptyEventTaskID, err - } - } - - remoteHistoryIterator := collection.NewPagingIterator(r.getHistoryFromRemotePaginationFn( - ctx, - remoteClusterName, - namespaceID, - workflowID, - runID, - lastEventID, - lastEventVersion), - ) - historyBranchUtil := r.executionMgr.GetHistoryBranchUtil() - historyBranch, err := historyBranchUtil.ParseHistoryBranchInfo(branchToken) - if err != nil { - return nil, common.EmptyEventTaskID, err - } - - prevTxnID := common.EmptyEventTaskID - var lastHistoryBatch *commonpb.DataBlob - var prevBranchID string - sortedAncestors := sortAncestors(historyBranch.GetAncestors()) - sortedAncestorsIdx := 0 - var ancestors []*persistencespb.HistoryBranchRange - -BackfillLoop: - for remoteHistoryIterator.HasNext() { - historyBlob, err := remoteHistoryIterator.Next() - if err != nil { - return nil, common.EmptyEventTaskID, err - } - - if historyBlob.nodeID <= lastBatchNodeID { - // The history batch already in DB. - continue BackfillLoop - } - - branchID := historyBranch.GetBranchId() - if sortedAncestorsIdx < len(sortedAncestors) { - currentAncestor := sortedAncestors[sortedAncestorsIdx] - if historyBlob.nodeID >= currentAncestor.GetEndNodeId() { - // update ancestor - ancestors = append(ancestors, currentAncestor) - sortedAncestorsIdx++ - } - if sortedAncestorsIdx < len(sortedAncestors) { - // use ancestor branch id - currentAncestor = sortedAncestors[sortedAncestorsIdx] - branchID = currentAncestor.GetBranchId() - if historyBlob.nodeID < currentAncestor.GetBeginNodeId() || historyBlob.nodeID >= currentAncestor.GetEndNodeId() { - return nil, common.EmptyEventTaskID, serviceerror.NewInternal( - fmt.Sprintf("The backfill history blob node id %d is not in acestoer range [%d, %d]", - historyBlob.nodeID, - currentAncestor.GetBeginNodeId(), - currentAncestor.GetEndNodeId()), - ) - } - } - } - - filteredHistoryBranch, err := historyBranchUtil.UpdateHistoryBranchInfo( - branchToken, - &persistencespb.HistoryBranch{ - TreeId: historyBranch.GetTreeId(), - BranchId: branchID, - Ancestors: ancestors, - }, - ) - if err != nil { - return nil, common.EmptyEventTaskID, err - } - txnID, err := r.shard.GenerateTaskID() - if err != nil { - return nil, common.EmptyEventTaskID, err - } - _, err = r.executionMgr.AppendRawHistoryNodes(ctx, &persistence.AppendRawHistoryNodesRequest{ - ShardID: r.shard.GetShardID(), - IsNewBranch: prevBranchID != branchID, - BranchToken: filteredHistoryBranch, - History: historyBlob.rawHistory, - PrevTransactionID: prevTxnID, - TransactionID: txnID, - NodeID: historyBlob.nodeID, - Info: persistence.BuildHistoryGarbageCleanupInfo( - namespaceID.String(), - workflowID, - runID, - ), - }) - if err != nil { - return nil, common.EmptyEventTaskID, err - } - prevTxnID = txnID - prevBranchID = branchID - lastHistoryBatch = historyBlob.rawHistory - } - - var lastEventTime *time.Time - events, _ := r.historySerializer.DeserializeEvents(lastHistoryBatch) - if len(events) > 0 { - lastEventTime = events[len(events)-1].EventTime - } - return lastEventTime, prevTxnID, nil -} - -func sortAncestors(ans []*persistencespb.HistoryBranchRange) []*persistencespb.HistoryBranchRange { - if len(ans) > 0 { - // sort ans based onf EndNodeID so that we can set BeginNodeID - sort.Slice(ans, func(i, j int) bool { return ans[i].GetEndNodeId() < ans[j].GetEndNodeId() }) - ans[0].BeginNodeId = int64(1) - for i := 1; i < len(ans); i++ { - ans[i].BeginNodeId = ans[i-1].GetEndNodeId() - } - } - return ans -} - -func (r *HistoryReplicatorImpl) getHistoryFromRemotePaginationFn( - ctx context.Context, - remoteClusterName string, - namespaceID namespace.ID, - workflowID string, - runID string, - endEventID int64, - endEventVersion int64, -) collection.PaginationFn[*rawHistoryData] { - - return func(paginationToken []byte) ([]*rawHistoryData, []byte, error) { - - adminClient, err := r.shard.GetRemoteAdminClient(remoteClusterName) - if err != nil { - return nil, nil, err - } - response, err := adminClient.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: namespaceID.String(), - Execution: &commonpb.WorkflowExecution{WorkflowId: workflowID, RunId: runID}, - EndEventId: endEventID + 1, - EndEventVersion: endEventVersion, - MaximumPageSize: 1000, - NextPageToken: paginationToken, - }) - if err != nil { - return nil, nil, err - } - - batches := make([]*rawHistoryData, 0, len(response.GetHistoryBatches())) - for idx, blob := range response.GetHistoryBatches() { - batches = append(batches, &rawHistoryData{ - rawHistory: blob, - nodeID: response.GetHistoryNodeIds()[idx], - }) - } - return batches, response.NextPageToken, nil - } -} - -func (r *HistoryReplicatorImpl) getHistoryFromLocalPaginationFn( - ctx context.Context, - branchToken []byte, - lastEventID int64, -) collection.PaginationFn[*historypb.History] { - - return func(paginationToken []byte) ([]*historypb.History, []byte, error) { - response, err := r.executionMgr.ReadHistoryBranchByBatch(ctx, &persistence.ReadHistoryBranchRequest{ - ShardID: r.shard.GetShardID(), - BranchToken: branchToken, - MinEventID: common.FirstEventID, - MaxEventID: lastEventID + 1, - PageSize: 100, - NextPageToken: paginationToken, - }) - if err != nil { - return nil, nil, err - } - return slices.Clone(response.History), response.NextPageToken, nil - } -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/history_replicator_test.go temporal-1.22.5/src/service/history/ndc/history_replicator_test.go --- temporal-1.21.5-1/src/service/history/ndc/history_replicator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/history_replicator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,482 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package ndc - -import ( - "context" - "testing" - "time" - - historypb "go.temporal.io/api/history/v1" - - "go.temporal.io/server/common" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/service/history/events" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/adminservicemock/v1" - enumsspb "go.temporal.io/server/api/enums/v1" - historyspb "go.temporal.io/server/api/history/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - historyReplicatorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockEventCache *events.MockCache - mockWorkflowCache *wcache.MockCache - mockNamespaceCache *namespace.MockRegistry - mockRemoteAdminClient *adminservicemock.MockAdminServiceClient - mockExecutionManager *persistence.MockExecutionManager - logger log.Logger - - workflowID string - runID string - now time.Time - - historyReplicator *HistoryReplicatorImpl - } -) - -func TestHistoryReplicatorSuite(t *testing.T) { - s := new(historyReplicatorSuite) - suite.Run(t, s) -} - -func (s *historyReplicatorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - // s.mockTaskRefresher = workflow.NewMockTaskRefresher(s.controller) - - s.mockShard = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 10, - RangeId: 1, - }, - tests.NewDynamicConfig(), - ) - - s.mockExecutionManager = s.mockShard.Resource.ExecutionMgr - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockWorkflowCache = wcache.NewMockCache(s.controller) - s.mockEventCache = s.mockShard.MockEventsCache - s.mockRemoteAdminClient = s.mockShard.Resource.RemoteAdminClient - eventReapplier := NewMockEventsReapplier(s.controller) - s.logger = s.mockShard.GetLogger() - - s.workflowID = "some random workflow ID" - s.runID = uuid.New() - s.now = time.Now().UTC() - s.historyReplicator = NewHistoryReplicator( - s.mockShard, - s.mockWorkflowCache, - eventReapplier, - s.logger, - serialization.NewSerializer(), - ) -} - -func (s *historyReplicatorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *historyReplicatorSuite) Test_ApplyWorkflowState_BrandNew() { - namespaceID := uuid.New() - namespaceName := "namespaceName" - branchInfo := &persistencespb.HistoryBranch{ - TreeId: uuid.New(), - BranchId: uuid.New(), - Ancestors: nil, - } - historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) - s.NoError(err) - completionEventBatchId := int64(5) - nextEventID := int64(7) - request := &historyservice.ReplicateWorkflowStateRequest{ - WorkflowState: &persistencespb.WorkflowMutableState{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - WorkflowId: s.workflowID, - NamespaceId: namespaceID, - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - BranchToken: historyBranch.GetData(), - Items: []*historyspb.VersionHistoryItem{ - { - EventId: int64(100), - Version: int64(100), - }, - }, - }, - }, - }, - CompletionEventBatchId: completionEventBatchId, - }, - ExecutionState: &persistencespb.WorkflowExecutionState{ - RunId: s.runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - }, - NextEventId: nextEventID, - }, - RemoteCluster: "test", - } - we := commonpb.WorkflowExecution{ - WorkflowId: s.workflowID, - RunId: s.runID, - } - mockWeCtx := workflow.NewMockContext(s.controller) - s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( - gomock.Any(), - namespace.ID(namespaceID), - we, - workflow.LockPriorityLow, - ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) - mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(nil, serviceerror.NewNotFound("ms not found")) - mockWeCtx.EXPECT().CreateWorkflowExecution( - gomock.Any(), - persistence.CreateWorkflowModeBrandNew, - "", - int64(0), - gomock.Any(), - gomock.Any(), - []*persistence.WorkflowEvents{}, - ).Return(nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(namespace.NewNamespaceForTest( - &persistencespb.NamespaceInfo{Name: namespaceName}, - nil, - false, - nil, - int64(100), - ), nil).AnyTimes() - s.mockRemoteAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), gomock.Any()).Return( - &adminservice.GetWorkflowExecutionRawHistoryV2Response{}, - nil, - ) - s.mockExecutionManager.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("test")) - s.mockExecutionManager.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - fakeStartHistory := &historypb.HistoryEvent{ - Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{ - WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{}, - }, - } - fakeCompletionEvent := &historypb.HistoryEvent{ - Attributes: &historypb.HistoryEvent_WorkflowExecutionTerminatedEventAttributes{ - WorkflowExecutionTerminatedEventAttributes: &historypb.WorkflowExecutionTerminatedEventAttributes{}, - }, - } - s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), common.FirstEventID, gomock.Any()).Return(fakeStartHistory, nil).AnyTimes() - s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), completionEventBatchId, gomock.Any()).Return(fakeCompletionEvent, nil).AnyTimes() - err = s.historyReplicator.ApplyWorkflowState(context.Background(), request) - s.NoError(err) -} - -func (s *historyReplicatorSuite) Test_ApplyWorkflowState_Ancestors() { - namespaceID := uuid.New() - namespaceName := "namespaceName" - branchInfo := &persistencespb.HistoryBranch{ - TreeId: uuid.New(), - BranchId: uuid.New(), - Ancestors: []*persistencespb.HistoryBranchRange{ - { - BranchId: uuid.New(), - BeginNodeId: 1, - EndNodeId: 3, - }, - { - BranchId: uuid.New(), - BeginNodeId: 3, - EndNodeId: 4, - }, - }, - } - historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) - s.NoError(err) - completionEventBatchId := int64(5) - nextEventID := int64(7) - request := &historyservice.ReplicateWorkflowStateRequest{ - WorkflowState: &persistencespb.WorkflowMutableState{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - WorkflowId: s.workflowID, - NamespaceId: namespaceID, - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - BranchToken: historyBranch.GetData(), - Items: []*historyspb.VersionHistoryItem{ - { - EventId: int64(100), - Version: int64(100), - }, - }, - }, - }, - }, - CompletionEventBatchId: completionEventBatchId, - }, - ExecutionState: &persistencespb.WorkflowExecutionState{ - RunId: s.runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - }, - NextEventId: nextEventID, - }, - RemoteCluster: "test", - } - we := commonpb.WorkflowExecution{ - WorkflowId: s.workflowID, - RunId: s.runID, - } - mockWeCtx := workflow.NewMockContext(s.controller) - s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( - gomock.Any(), - namespace.ID(namespaceID), - we, - workflow.LockPriorityLow, - ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) - mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(nil, serviceerror.NewNotFound("ms not found")) - mockWeCtx.EXPECT().CreateWorkflowExecution( - gomock.Any(), - persistence.CreateWorkflowModeBrandNew, - "", - int64(0), - gomock.Any(), - gomock.Any(), - []*persistence.WorkflowEvents{}, - ).Return(nil) - s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(namespace.NewNamespaceForTest( - &persistencespb.NamespaceInfo{Name: namespaceName}, - nil, - false, - nil, - int64(100), - ), nil).AnyTimes() - expectedHistory := []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: 1, - }, - { - EventId: 2, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: 3, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: 4, - }, - }, - }, - { - Events: []*historypb.HistoryEvent{ - { - EventId: 5, - }, - { - EventId: 6, - }, - }, - }, - } - serializer := serialization.NewSerializer() - var historyBlobs []*commonpb.DataBlob - var nodeIds []int64 - for _, history := range expectedHistory { - blob, err := serializer.SerializeEvents(history.GetEvents(), enumspb.ENCODING_TYPE_PROTO3) - s.NoError(err) - historyBlobs = append(historyBlobs, blob) - nodeIds = append(nodeIds, history.GetEvents()[0].GetEventId()) - } - s.mockRemoteAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), gomock.Any()).Return( - &adminservice.GetWorkflowExecutionRawHistoryV2Response{ - HistoryBatches: historyBlobs, - HistoryNodeIds: nodeIds, - }, - nil, - ) - s.mockExecutionManager.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(&persistence.ReadHistoryBranchByBatchResponse{ - History: []*historypb.History{ - { - Events: []*historypb.HistoryEvent{ - { - EventId: 1, - }, - { - EventId: 2, - }, - }, - }, - }, - }, nil) - s.mockExecutionManager.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - s.mockExecutionManager.EXPECT().AppendRawHistoryNodes(gomock.Any(), gomock.Any()).Return(nil, nil).Times(3) - fakeStartHistory := &historypb.HistoryEvent{ - Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{ - WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{}, - }, - } - fakeCompletionEvent := &historypb.HistoryEvent{ - Attributes: &historypb.HistoryEvent_WorkflowExecutionTerminatedEventAttributes{ - WorkflowExecutionTerminatedEventAttributes: &historypb.WorkflowExecutionTerminatedEventAttributes{}, - }, - } - s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), common.FirstEventID, gomock.Any()).Return(fakeStartHistory, nil).AnyTimes() - s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), completionEventBatchId, gomock.Any()).Return(fakeCompletionEvent, nil).AnyTimes() - err = s.historyReplicator.ApplyWorkflowState(context.Background(), request) - s.NoError(err) -} - -func (s *historyReplicatorSuite) Test_ApplyWorkflowState_NoClosedWorkflow_Error() { - err := s.historyReplicator.ApplyWorkflowState(context.Background(), &historyservice.ReplicateWorkflowStateRequest{ - WorkflowState: &persistencespb.WorkflowMutableState{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - WorkflowId: s.workflowID, - }, - ExecutionState: &persistencespb.WorkflowExecutionState{ - RunId: s.runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - }, - }, - RemoteCluster: "test", - }) - var internalErr *serviceerror.Internal - s.ErrorAs(err, &internalErr) -} - -func (s *historyReplicatorSuite) Test_ApplyWorkflowState_ExistWorkflow_Resend() { - namespaceID := uuid.New() - branchInfo := &persistencespb.HistoryBranch{ - TreeId: uuid.New(), - BranchId: uuid.New(), - Ancestors: nil, - } - historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) - s.NoError(err) - completionEventBatchId := int64(5) - nextEventID := int64(7) - request := &historyservice.ReplicateWorkflowStateRequest{ - WorkflowState: &persistencespb.WorkflowMutableState{ - ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ - WorkflowId: s.workflowID, - NamespaceId: namespaceID, - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - BranchToken: historyBranch.GetData(), - Items: []*historyspb.VersionHistoryItem{ - { - EventId: int64(100), - Version: int64(100), - }, - }, - }, - }, - }, - CompletionEventBatchId: completionEventBatchId, - }, - ExecutionState: &persistencespb.WorkflowExecutionState{ - RunId: s.runID, - State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, - }, - NextEventId: nextEventID, - }, - RemoteCluster: "test", - } - we := commonpb.WorkflowExecution{ - WorkflowId: s.workflowID, - RunId: s.runID, - } - mockWeCtx := workflow.NewMockContext(s.controller) - mockMutableState := workflow.NewMockMutableState(s.controller) - s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( - gomock.Any(), - namespace.ID(namespaceID), - we, - workflow.LockPriorityLow, - ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) - mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) - mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: &historyspb.VersionHistories{ - CurrentVersionHistoryIndex: 0, - Histories: []*historyspb.VersionHistory{ - { - Items: []*historyspb.VersionHistoryItem{ - { - EventId: int64(1), - Version: int64(1), - }, - }, - }, - }, - }, - }) - err = s.historyReplicator.ApplyWorkflowState(context.Background(), request) - var expectedErr *serviceerrors.RetryReplication - s.ErrorAs(err, &expectedErr) - s.Equal(namespaceID, expectedErr.NamespaceId) - s.Equal(s.workflowID, expectedErr.WorkflowId) - s.Equal(s.runID, expectedErr.RunId) - s.Equal(int64(1), expectedErr.StartEventId) - s.Equal(int64(1), expectedErr.StartEventVersion) -} diff -Nru temporal-1.21.5-1/src/service/history/ndc/workflow_resetter_test.go temporal-1.22.5/src/service/history/ndc/workflow_resetter_test.go --- temporal-1.21.5-1/src/service/history/ndc/workflow_resetter_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/workflow_resetter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -699,6 +699,7 @@ resetContext := workflow.NewMockContext(s.controller) resetContext.EXPECT().Lock(gomock.Any(), workflow.LockPriorityHigh).Return(nil) resetContext.EXPECT().Unlock(workflow.LockPriorityHigh) + resetContext.EXPECT().IsDirty().Return(false).AnyTimes() resetMutableState := workflow.NewMockMutableState(s.controller) resetContext.EXPECT().LoadMutableState(gomock.Any()).Return(resetMutableState, nil) resetMutableState.EXPECT().GetNextEventID().Return(newNextEventID).AnyTimes() diff -Nru temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator.go temporal-1.22.5/src/service/history/ndc/workflow_state_replicator.go --- temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/workflow_state_replicator.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,462 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination workflow_state_replicator_mock.go + +package ndc + +import ( + "context" + "fmt" + "sort" + "time" + + commonpb "go.temporal.io/api/common/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "golang.org/x/exp/slices" + + "go.temporal.io/server/api/adminservice/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/versionhistory" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + WorkflowStateReplicator interface { + SyncWorkflowState( + ctx context.Context, + request *historyservice.ReplicateWorkflowStateRequest, + ) error + } + + WorkflowStateReplicatorImpl struct { + shard shard.Context + namespaceRegistry namespace.Registry + workflowCache wcache.Cache + clusterMetadata cluster.Metadata + executionMgr persistence.ExecutionManager + historySerializer serialization.Serializer + transactionMgr transactionMgr + logger log.Logger + } +) + +func NewWorkflowStateReplicator( + shard shard.Context, + workflowCache wcache.Cache, + eventsReapplier EventsReapplier, + eventSerializer serialization.Serializer, + logger log.Logger, +) *WorkflowStateReplicatorImpl { + + return &WorkflowStateReplicatorImpl{ + shard: shard, + namespaceRegistry: shard.GetNamespaceRegistry(), + workflowCache: workflowCache, + clusterMetadata: shard.GetClusterMetadata(), + executionMgr: shard.GetExecutionManager(), + historySerializer: eventSerializer, + transactionMgr: newTransactionMgr(shard, workflowCache, eventsReapplier, logger), + logger: log.With(logger, tag.ComponentHistoryReplicator), + } +} + +func (r *WorkflowStateReplicatorImpl) SyncWorkflowState( + ctx context.Context, + request *historyservice.ReplicateWorkflowStateRequest, +) (retError error) { + executionInfo := request.GetWorkflowState().GetExecutionInfo() + executionState := request.GetWorkflowState().GetExecutionState() + namespaceID := namespace.ID(executionInfo.GetNamespaceId()) + wid := executionInfo.GetWorkflowId() + rid := executionState.GetRunId() + if executionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + return serviceerror.NewInternal("Replicate non completed workflow state is not supported.") + } + + wfCtx, releaseFn, err := r.workflowCache.GetOrCreateWorkflowExecution( + ctx, + namespaceID, + commonpb.WorkflowExecution{ + WorkflowId: wid, + RunId: rid, + }, + workflow.LockPriorityLow, + ) + if err != nil { + return err + } + defer func() { + if rec := recover(); rec != nil { + releaseFn(errPanic) + panic(rec) + } else { + releaseFn(retError) + } + }() + + // Handle existing workflows + ms, err := wfCtx.LoadMutableState(ctx) + switch err.(type) { + case *serviceerror.NotFound: + // no-op, continue to replicate workflow state + case nil: + // workflow exists, do resend if version histories are not match. + localVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + if err != nil { + return err + } + localHistoryLastItem, err := versionhistory.GetLastVersionHistoryItem(localVersionHistory) + if err != nil { + return err + } + incomingVersionHistory, err := versionhistory.GetCurrentVersionHistory(request.GetWorkflowState().GetExecutionInfo().GetVersionHistories()) + if err != nil { + return err + } + incomingHistoryLastItem, err := versionhistory.GetLastVersionHistoryItem(incomingVersionHistory) + if err != nil { + return err + } + if !versionhistory.IsEqualVersionHistoryItem(localHistoryLastItem, incomingHistoryLastItem) { + return serviceerrors.NewRetryReplication( + "Failed to sync workflow state due to version history mismatch", + namespaceID.String(), + wid, + rid, + localHistoryLastItem.GetEventId(), + localHistoryLastItem.GetVersion(), + common.EmptyEventID, + common.EmptyVersion, + ) + } + return nil + default: + return err + } + + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) + if err != nil { + return err + } + lastEventItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory) + if err != nil { + return err + } + + // The following sanitizes the branch token from the source cluster to this target cluster by re-initializing it. + + branchInfo, err := r.shard.GetExecutionManager().GetHistoryBranchUtil().ParseHistoryBranchInfo( + currentVersionHistory.GetBranchToken(), + ) + if err != nil { + return err + } + newHistoryBranchToken, err := r.shard.GetExecutionManager().GetHistoryBranchUtil().NewHistoryBranch( + request.NamespaceId, + branchInfo.GetTreeId(), + &branchInfo.BranchId, + branchInfo.Ancestors, + nil, + nil, + nil, + ) + if err != nil { + return err + } + + _, lastFirstTxnID, err := r.backfillHistory( + ctx, + request.GetRemoteCluster(), + namespaceID, + wid, + rid, + lastEventItem.GetEventId(), + lastEventItem.GetVersion(), + newHistoryBranchToken, + ) + if err != nil { + return err + } + + ns, err := r.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return err + } + + mutableState, err := workflow.NewSanitizedMutableState( + r.shard, + r.shard.GetEventsCache(), + r.logger, + ns, + request.GetWorkflowState(), + lastFirstTxnID, + lastEventItem.GetVersion(), + ) + if err != nil { + return err + } + + err = mutableState.SetCurrentBranchToken(newHistoryBranchToken) + if err != nil { + return err + } + + taskRefresh := workflow.NewTaskRefresher(r.shard, r.shard.GetConfig(), r.namespaceRegistry, r.logger) + err = taskRefresh.RefreshTasks(ctx, mutableState) + if err != nil { + return err + } + return r.transactionMgr.createWorkflow( + ctx, + NewWorkflow( + ctx, + r.namespaceRegistry, + r.clusterMetadata, + wfCtx, + mutableState, + releaseFn, + ), + ) +} + +func (r *WorkflowStateReplicatorImpl) backfillHistory( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + lastEventID int64, + lastEventVersion int64, + branchToken []byte, +) (*time.Time, int64, error) { + + // Get the last batch node id to check if the history data is already in DB. + localHistoryIterator := collection.NewPagingIterator(r.getHistoryFromLocalPaginationFn( + ctx, + branchToken, + lastEventID, + )) + var lastBatchNodeID int64 + for localHistoryIterator.HasNext() { + localHistoryBatch, err := localHistoryIterator.Next() + switch err.(type) { + case nil: + if len(localHistoryBatch.GetEvents()) > 0 { + lastBatchNodeID = localHistoryBatch.GetEvents()[0].GetEventId() + } + case *serviceerror.NotFound: + default: + return nil, common.EmptyEventTaskID, err + } + } + + remoteHistoryIterator := collection.NewPagingIterator(r.getHistoryFromRemotePaginationFn( + ctx, + remoteClusterName, + namespaceID, + workflowID, + runID, + lastEventID, + lastEventVersion), + ) + historyBranchUtil := r.executionMgr.GetHistoryBranchUtil() + historyBranch, err := historyBranchUtil.ParseHistoryBranchInfo(branchToken) + if err != nil { + return nil, common.EmptyEventTaskID, err + } + + prevTxnID := common.EmptyEventTaskID + var lastHistoryBatch *commonpb.DataBlob + var prevBranchID string + sortedAncestors := sortAncestors(historyBranch.GetAncestors()) + sortedAncestorsIdx := 0 + var ancestors []*persistencespb.HistoryBranchRange + +BackfillLoop: + for remoteHistoryIterator.HasNext() { + historyBlob, err := remoteHistoryIterator.Next() + if err != nil { + return nil, common.EmptyEventTaskID, err + } + + if historyBlob.nodeID <= lastBatchNodeID { + // The history batch already in DB. + continue BackfillLoop + } + + branchID := historyBranch.GetBranchId() + if sortedAncestorsIdx < len(sortedAncestors) { + currentAncestor := sortedAncestors[sortedAncestorsIdx] + if historyBlob.nodeID >= currentAncestor.GetEndNodeId() { + // update ancestor + ancestors = append(ancestors, currentAncestor) + sortedAncestorsIdx++ + } + if sortedAncestorsIdx < len(sortedAncestors) { + // use ancestor branch id + currentAncestor = sortedAncestors[sortedAncestorsIdx] + branchID = currentAncestor.GetBranchId() + if historyBlob.nodeID < currentAncestor.GetBeginNodeId() || historyBlob.nodeID >= currentAncestor.GetEndNodeId() { + return nil, common.EmptyEventTaskID, serviceerror.NewInternal( + fmt.Sprintf("The backfill history blob node id %d is not in acestoer range [%d, %d]", + historyBlob.nodeID, + currentAncestor.GetBeginNodeId(), + currentAncestor.GetEndNodeId()), + ) + } + } + } + + filteredHistoryBranch, err := historyBranchUtil.UpdateHistoryBranchInfo( + branchToken, + &persistencespb.HistoryBranch{ + TreeId: historyBranch.GetTreeId(), + BranchId: branchID, + Ancestors: ancestors, + }, + ) + if err != nil { + return nil, common.EmptyEventTaskID, err + } + txnID, err := r.shard.GenerateTaskID() + if err != nil { + return nil, common.EmptyEventTaskID, err + } + _, err = r.executionMgr.AppendRawHistoryNodes(ctx, &persistence.AppendRawHistoryNodesRequest{ + ShardID: r.shard.GetShardID(), + IsNewBranch: prevBranchID != branchID, + BranchToken: filteredHistoryBranch, + History: historyBlob.rawHistory, + PrevTransactionID: prevTxnID, + TransactionID: txnID, + NodeID: historyBlob.nodeID, + Info: persistence.BuildHistoryGarbageCleanupInfo( + namespaceID.String(), + workflowID, + runID, + ), + }) + if err != nil { + return nil, common.EmptyEventTaskID, err + } + prevTxnID = txnID + prevBranchID = branchID + lastHistoryBatch = historyBlob.rawHistory + } + + var lastEventTime *time.Time + events, _ := r.historySerializer.DeserializeEvents(lastHistoryBatch) + if len(events) > 0 { + lastEventTime = events[len(events)-1].EventTime + } + return lastEventTime, prevTxnID, nil +} + +func (r *WorkflowStateReplicatorImpl) getHistoryFromLocalPaginationFn( + ctx context.Context, + branchToken []byte, + lastEventID int64, +) collection.PaginationFn[*historypb.History] { + + return func(paginationToken []byte) ([]*historypb.History, []byte, error) { + response, err := r.executionMgr.ReadHistoryBranchByBatch(ctx, &persistence.ReadHistoryBranchRequest{ + ShardID: r.shard.GetShardID(), + BranchToken: branchToken, + MinEventID: common.FirstEventID, + MaxEventID: lastEventID + 1, + PageSize: 100, + NextPageToken: paginationToken, + }) + if err != nil { + return nil, nil, err + } + return slices.Clone(response.History), response.NextPageToken, nil + } +} + +func (r *WorkflowStateReplicatorImpl) getHistoryFromRemotePaginationFn( + ctx context.Context, + remoteClusterName string, + namespaceID namespace.ID, + workflowID string, + runID string, + endEventID int64, + endEventVersion int64, +) collection.PaginationFn[*rawHistoryData] { + + return func(paginationToken []byte) ([]*rawHistoryData, []byte, error) { + + adminClient, err := r.shard.GetRemoteAdminClient(remoteClusterName) + if err != nil { + return nil, nil, err + } + response, err := adminClient.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: namespaceID.String(), + Execution: &commonpb.WorkflowExecution{WorkflowId: workflowID, RunId: runID}, + EndEventId: endEventID + 1, + EndEventVersion: endEventVersion, + MaximumPageSize: 1000, + NextPageToken: paginationToken, + }) + if err != nil { + return nil, nil, err + } + + batches := make([]*rawHistoryData, 0, len(response.GetHistoryBatches())) + for idx, blob := range response.GetHistoryBatches() { + batches = append(batches, &rawHistoryData{ + rawHistory: blob, + nodeID: response.GetHistoryNodeIds()[idx], + }) + } + return batches, response.NextPageToken, nil + } +} + +func sortAncestors(ans []*persistencespb.HistoryBranchRange) []*persistencespb.HistoryBranchRange { + if len(ans) > 0 { + // sort ans based onf EndNodeID so that we can set BeginNodeID + sort.Slice(ans, func(i, j int) bool { return ans[i].GetEndNodeId() < ans[j].GetEndNodeId() }) + ans[0].BeginNodeId = int64(1) + for i := 1; i < len(ans); i++ { + ans[i].BeginNodeId = ans[i-1].GetEndNodeId() + } + } + return ans +} diff -Nru temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator_mock.go temporal-1.22.5/src/service/history/ndc/workflow_state_replicator_mock.go --- temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/workflow_state_replicator_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,74 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: workflow_state_replicator.go + +// Package ndc is a generated GoMock package. +package ndc + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + historyservice "go.temporal.io/server/api/historyservice/v1" +) + +// MockWorkflowStateReplicator is a mock of WorkflowStateReplicator interface. +type MockWorkflowStateReplicator struct { + ctrl *gomock.Controller + recorder *MockWorkflowStateReplicatorMockRecorder +} + +// MockWorkflowStateReplicatorMockRecorder is the mock recorder for MockWorkflowStateReplicator. +type MockWorkflowStateReplicatorMockRecorder struct { + mock *MockWorkflowStateReplicator +} + +// NewMockWorkflowStateReplicator creates a new mock instance. +func NewMockWorkflowStateReplicator(ctrl *gomock.Controller) *MockWorkflowStateReplicator { + mock := &MockWorkflowStateReplicator{ctrl: ctrl} + mock.recorder = &MockWorkflowStateReplicatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWorkflowStateReplicator) EXPECT() *MockWorkflowStateReplicatorMockRecorder { + return m.recorder +} + +// SyncWorkflowState mocks base method. +func (m *MockWorkflowStateReplicator) SyncWorkflowState(ctx context.Context, request *historyservice.ReplicateWorkflowStateRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncWorkflowState", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncWorkflowState indicates an expected call of SyncWorkflowState. +func (mr *MockWorkflowStateReplicatorMockRecorder) SyncWorkflowState(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWorkflowState", reflect.TypeOf((*MockWorkflowStateReplicator)(nil).SyncWorkflowState), ctx, request) +} diff -Nru temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator_test.go temporal-1.22.5/src/service/history/ndc/workflow_state_replicator_test.go --- temporal-1.21.5-1/src/service/history/ndc/workflow_state_replicator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc/workflow_state_replicator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,482 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ndc + +import ( + "context" + "testing" + "time" + + historypb "go.temporal.io/api/history/v1" + + "go.temporal.io/server/common" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/service/history/events" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/adminservicemock/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + historyspb "go.temporal.io/server/api/history/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + workflowReplicatorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockEventCache *events.MockCache + mockWorkflowCache *wcache.MockCache + mockNamespaceCache *namespace.MockRegistry + mockRemoteAdminClient *adminservicemock.MockAdminServiceClient + mockExecutionManager *persistence.MockExecutionManager + logger log.Logger + + workflowID string + runID string + now time.Time + + workflowStateReplicator *WorkflowStateReplicatorImpl + } +) + +func TestWorkflowReplicatorSuite(t *testing.T) { + s := new(workflowReplicatorSuite) + suite.Run(t, s) +} + +func (s *workflowReplicatorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + // s.mockTaskRefresher = workflow.NewMockTaskRefresher(s.controller) + + s.mockShard = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 10, + RangeId: 1, + }, + tests.NewDynamicConfig(), + ) + + s.mockExecutionManager = s.mockShard.Resource.ExecutionMgr + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockWorkflowCache = wcache.NewMockCache(s.controller) + s.mockEventCache = s.mockShard.MockEventsCache + s.mockRemoteAdminClient = s.mockShard.Resource.RemoteAdminClient + eventReapplier := NewMockEventsReapplier(s.controller) + s.logger = s.mockShard.GetLogger() + + s.workflowID = "some random workflow ID" + s.runID = uuid.New() + s.now = time.Now().UTC() + s.workflowStateReplicator = NewWorkflowStateReplicator( + s.mockShard, + s.mockWorkflowCache, + eventReapplier, + serialization.NewSerializer(), + s.logger, + ) +} + +func (s *workflowReplicatorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *workflowReplicatorSuite) Test_ApplyWorkflowState_BrandNew() { + namespaceID := uuid.New() + namespaceName := "namespaceName" + branchInfo := &persistencespb.HistoryBranch{ + TreeId: uuid.New(), + BranchId: uuid.New(), + Ancestors: nil, + } + historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) + s.NoError(err) + completionEventBatchId := int64(5) + nextEventID := int64(7) + request := &historyservice.ReplicateWorkflowStateRequest{ + WorkflowState: &persistencespb.WorkflowMutableState{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + WorkflowId: s.workflowID, + NamespaceId: namespaceID, + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + BranchToken: historyBranch.GetData(), + Items: []*historyspb.VersionHistoryItem{ + { + EventId: int64(100), + Version: int64(100), + }, + }, + }, + }, + }, + CompletionEventBatchId: completionEventBatchId, + }, + ExecutionState: &persistencespb.WorkflowExecutionState{ + RunId: s.runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + }, + NextEventId: nextEventID, + }, + RemoteCluster: "test", + } + we := commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + } + mockWeCtx := workflow.NewMockContext(s.controller) + s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( + gomock.Any(), + namespace.ID(namespaceID), + we, + workflow.LockPriorityLow, + ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) + mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(nil, serviceerror.NewNotFound("ms not found")) + mockWeCtx.EXPECT().CreateWorkflowExecution( + gomock.Any(), + persistence.CreateWorkflowModeBrandNew, + "", + int64(0), + gomock.Any(), + gomock.Any(), + []*persistence.WorkflowEvents{}, + ).Return(nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: namespaceName}, + nil, + false, + nil, + int64(100), + ), nil).AnyTimes() + s.mockRemoteAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), gomock.Any()).Return( + &adminservice.GetWorkflowExecutionRawHistoryV2Response{}, + nil, + ) + s.mockExecutionManager.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("test")) + s.mockExecutionManager.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + fakeStartHistory := &historypb.HistoryEvent{ + Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{ + WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{}, + }, + } + fakeCompletionEvent := &historypb.HistoryEvent{ + Attributes: &historypb.HistoryEvent_WorkflowExecutionTerminatedEventAttributes{ + WorkflowExecutionTerminatedEventAttributes: &historypb.WorkflowExecutionTerminatedEventAttributes{}, + }, + } + s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), common.FirstEventID, gomock.Any()).Return(fakeStartHistory, nil).AnyTimes() + s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), completionEventBatchId, gomock.Any()).Return(fakeCompletionEvent, nil).AnyTimes() + err = s.workflowStateReplicator.SyncWorkflowState(context.Background(), request) + s.NoError(err) +} + +func (s *workflowReplicatorSuite) Test_ApplyWorkflowState_Ancestors() { + namespaceID := uuid.New() + namespaceName := "namespaceName" + branchInfo := &persistencespb.HistoryBranch{ + TreeId: uuid.New(), + BranchId: uuid.New(), + Ancestors: []*persistencespb.HistoryBranchRange{ + { + BranchId: uuid.New(), + BeginNodeId: 1, + EndNodeId: 3, + }, + { + BranchId: uuid.New(), + BeginNodeId: 3, + EndNodeId: 4, + }, + }, + } + historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) + s.NoError(err) + completionEventBatchId := int64(5) + nextEventID := int64(7) + request := &historyservice.ReplicateWorkflowStateRequest{ + WorkflowState: &persistencespb.WorkflowMutableState{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + WorkflowId: s.workflowID, + NamespaceId: namespaceID, + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + BranchToken: historyBranch.GetData(), + Items: []*historyspb.VersionHistoryItem{ + { + EventId: int64(100), + Version: int64(100), + }, + }, + }, + }, + }, + CompletionEventBatchId: completionEventBatchId, + }, + ExecutionState: &persistencespb.WorkflowExecutionState{ + RunId: s.runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + }, + NextEventId: nextEventID, + }, + RemoteCluster: "test", + } + we := commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + } + mockWeCtx := workflow.NewMockContext(s.controller) + s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( + gomock.Any(), + namespace.ID(namespaceID), + we, + workflow.LockPriorityLow, + ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) + mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(nil, serviceerror.NewNotFound("ms not found")) + mockWeCtx.EXPECT().CreateWorkflowExecution( + gomock.Any(), + persistence.CreateWorkflowModeBrandNew, + "", + int64(0), + gomock.Any(), + gomock.Any(), + []*persistence.WorkflowEvents{}, + ).Return(nil) + s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: namespaceName}, + nil, + false, + nil, + int64(100), + ), nil).AnyTimes() + expectedHistory := []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: 1, + }, + { + EventId: 2, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: 3, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: 4, + }, + }, + }, + { + Events: []*historypb.HistoryEvent{ + { + EventId: 5, + }, + { + EventId: 6, + }, + }, + }, + } + serializer := serialization.NewSerializer() + var historyBlobs []*commonpb.DataBlob + var nodeIds []int64 + for _, history := range expectedHistory { + blob, err := serializer.SerializeEvents(history.GetEvents(), enumspb.ENCODING_TYPE_PROTO3) + s.NoError(err) + historyBlobs = append(historyBlobs, blob) + nodeIds = append(nodeIds, history.GetEvents()[0].GetEventId()) + } + s.mockRemoteAdminClient.EXPECT().GetWorkflowExecutionRawHistoryV2(gomock.Any(), gomock.Any()).Return( + &adminservice.GetWorkflowExecutionRawHistoryV2Response{ + HistoryBatches: historyBlobs, + HistoryNodeIds: nodeIds, + }, + nil, + ) + s.mockExecutionManager.EXPECT().ReadHistoryBranchByBatch(gomock.Any(), gomock.Any()).Return(&persistence.ReadHistoryBranchByBatchResponse{ + History: []*historypb.History{ + { + Events: []*historypb.HistoryEvent{ + { + EventId: 1, + }, + { + EventId: 2, + }, + }, + }, + }, + }, nil) + s.mockExecutionManager.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNotFound("")) + s.mockExecutionManager.EXPECT().AppendRawHistoryNodes(gomock.Any(), gomock.Any()).Return(nil, nil).Times(3) + fakeStartHistory := &historypb.HistoryEvent{ + Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{ + WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{}, + }, + } + fakeCompletionEvent := &historypb.HistoryEvent{ + Attributes: &historypb.HistoryEvent_WorkflowExecutionTerminatedEventAttributes{ + WorkflowExecutionTerminatedEventAttributes: &historypb.WorkflowExecutionTerminatedEventAttributes{}, + }, + } + s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), common.FirstEventID, gomock.Any()).Return(fakeStartHistory, nil).AnyTimes() + s.mockEventCache.EXPECT().GetEvent(gomock.Any(), gomock.Any(), completionEventBatchId, gomock.Any()).Return(fakeCompletionEvent, nil).AnyTimes() + err = s.workflowStateReplicator.SyncWorkflowState(context.Background(), request) + s.NoError(err) +} + +func (s *workflowReplicatorSuite) Test_ApplyWorkflowState_NoClosedWorkflow_Error() { + err := s.workflowStateReplicator.SyncWorkflowState(context.Background(), &historyservice.ReplicateWorkflowStateRequest{ + WorkflowState: &persistencespb.WorkflowMutableState{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + WorkflowId: s.workflowID, + }, + ExecutionState: &persistencespb.WorkflowExecutionState{ + RunId: s.runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + }, + }, + RemoteCluster: "test", + }) + var internalErr *serviceerror.Internal + s.ErrorAs(err, &internalErr) +} + +func (s *workflowReplicatorSuite) Test_ApplyWorkflowState_ExistWorkflow_Resend() { + namespaceID := uuid.New() + branchInfo := &persistencespb.HistoryBranch{ + TreeId: uuid.New(), + BranchId: uuid.New(), + Ancestors: nil, + } + historyBranch, err := serialization.HistoryBranchToBlob(branchInfo) + s.NoError(err) + completionEventBatchId := int64(5) + nextEventID := int64(7) + request := &historyservice.ReplicateWorkflowStateRequest{ + WorkflowState: &persistencespb.WorkflowMutableState{ + ExecutionInfo: &persistencespb.WorkflowExecutionInfo{ + WorkflowId: s.workflowID, + NamespaceId: namespaceID, + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + BranchToken: historyBranch.GetData(), + Items: []*historyspb.VersionHistoryItem{ + { + EventId: int64(100), + Version: int64(100), + }, + }, + }, + }, + }, + CompletionEventBatchId: completionEventBatchId, + }, + ExecutionState: &persistencespb.WorkflowExecutionState{ + RunId: s.runID, + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + }, + NextEventId: nextEventID, + }, + RemoteCluster: "test", + } + we := commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + } + mockWeCtx := workflow.NewMockContext(s.controller) + mockMutableState := workflow.NewMockMutableState(s.controller) + s.mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution( + gomock.Any(), + namespace.ID(namespaceID), + we, + workflow.LockPriorityLow, + ).Return(mockWeCtx, wcache.NoopReleaseFn, nil) + mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) + mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + VersionHistories: &historyspb.VersionHistories{ + CurrentVersionHistoryIndex: 0, + Histories: []*historyspb.VersionHistory{ + { + Items: []*historyspb.VersionHistoryItem{ + { + EventId: int64(1), + Version: int64(1), + }, + }, + }, + }, + }, + }) + err = s.workflowStateReplicator.SyncWorkflowState(context.Background(), request) + var expectedErr *serviceerrors.RetryReplication + s.ErrorAs(err, &expectedErr) + s.Equal(namespaceID, expectedErr.NamespaceId) + s.Equal(s.workflowID, expectedErr.WorkflowId) + s.Equal(s.runID, expectedErr.RunId) + s.Equal(int64(1), expectedErr.StartEventId) + s.Equal(int64(1), expectedErr.StartEventVersion) +} diff -Nru temporal-1.21.5-1/src/service/history/ndc_standby_task_util.go temporal-1.22.5/src/service/history/ndc_standby_task_util.go --- temporal-1.21.5-1/src/service/history/ndc_standby_task_util.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc_standby_task_util.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,267 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "errors" + "time" + + taskqueuepb "go.temporal.io/api/taskqueue/v1" + taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/workflow" +) + +type ( + standbyActionFn func(context.Context, workflow.Context, workflow.MutableState) (interface{}, error) + standbyPostActionFn func(context.Context, tasks.Task, interface{}, log.Logger) error + + standbyCurrentTimeFn func() time.Time +) + +func standbyTaskPostActionNoOp( + _ context.Context, + _ tasks.Task, + postActionInfo interface{}, + _ log.Logger, +) error { + + if postActionInfo == nil { + return nil + } + + // return error so task processing logic will retry + return consts.ErrTaskRetry +} + +func standbyTransferTaskPostActionTaskDiscarded( + _ context.Context, + taskInfo tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + + if postActionInfo == nil { + return nil + } + + logger.Warn("Discarding standby transfer task due to task being pending for too long.", tag.Task(taskInfo)) + return consts.ErrTaskDiscarded +} + +func standbyTimerTaskPostActionTaskDiscarded( + _ context.Context, + taskInfo tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + + if postActionInfo == nil { + return nil + } + + logger.Warn("Discarding standby timer task due to task being pending for too long.", tag.Task(taskInfo)) + return consts.ErrTaskDiscarded +} + +type ( + historyResendInfo struct { + + // used by NDC + lastEventID int64 + lastEventVersion int64 + } + + activityTaskPostActionInfo struct { + *historyResendInfo + + taskQueue string + activityTaskScheduleToStartTimeout time.Duration + versionDirective *taskqueuespb.TaskVersionDirective + } + + workflowTaskPostActionInfo struct { + *historyResendInfo + + workflowTaskScheduleToStartTimeout *time.Duration + taskqueue taskqueuepb.TaskQueue + versionDirective *taskqueuespb.TaskVersionDirective + } + + startChildExecutionPostActionInfo struct { + *historyResendInfo + } +) + +var ( + // verifyChildCompletionRecordedInfo is the post action info returned by + // standby close execution task action func. The actual content of the + // struct doesn't matter. We just need a non-nil pointer to to indicate + // that the verification has failed. + verifyChildCompletionRecordedInfo = &struct{}{} +) + +func newHistoryResendInfo( + lastEventID int64, + lastEventVersion int64, +) *historyResendInfo { + return &historyResendInfo{ + lastEventID: lastEventID, + lastEventVersion: lastEventVersion, + } +} + +func newActivityTaskPostActionInfo( + mutableState workflow.MutableState, + activityScheduleToStartTimeout time.Duration, + useCompatibleVersion bool, +) (*activityTaskPostActionInfo, error) { + resendInfo, err := getHistoryResendInfo(mutableState) + if err != nil { + return nil, err + } + + directive := worker_versioning.MakeDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), useCompatibleVersion) + + return &activityTaskPostActionInfo{ + historyResendInfo: resendInfo, + activityTaskScheduleToStartTimeout: activityScheduleToStartTimeout, + versionDirective: directive, + }, nil +} + +func newActivityRetryTimePostActionInfo( + mutableState workflow.MutableState, + taskQueue string, + activityScheduleToStartTimeout time.Duration, + useCompatibleVersion bool, +) (*activityTaskPostActionInfo, error) { + resendInfo, err := getHistoryResendInfo(mutableState) + if err != nil { + return nil, err + } + + directive := worker_versioning.MakeDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), useCompatibleVersion) + + return &activityTaskPostActionInfo{ + historyResendInfo: resendInfo, + taskQueue: taskQueue, + activityTaskScheduleToStartTimeout: activityScheduleToStartTimeout, + versionDirective: directive, + }, nil +} + +func newWorkflowTaskPostActionInfo( + mutableState workflow.MutableState, + workflowTaskScheduleToStartTimeout *time.Duration, + taskqueue taskqueuepb.TaskQueue, +) (*workflowTaskPostActionInfo, error) { + resendInfo, err := getHistoryResendInfo(mutableState) + if err != nil { + return nil, err + } + + directive := worker_versioning.MakeDirectiveForWorkflowTask( + mutableState.GetWorkerVersionStamp(), + mutableState.GetLastWorkflowTaskStartedEventID(), + ) + + return &workflowTaskPostActionInfo{ + historyResendInfo: resendInfo, + workflowTaskScheduleToStartTimeout: workflowTaskScheduleToStartTimeout, + taskqueue: taskqueue, + versionDirective: directive, + }, nil +} + +func getHistoryResendInfo( + mutableState workflow.MutableState, +) (*historyResendInfo, error) { + + currentBranch, err := versionhistory.GetCurrentVersionHistory(mutableState.GetExecutionInfo().GetVersionHistories()) + if err != nil { + return nil, err + } + lastItem, err := versionhistory.GetLastVersionHistoryItem(currentBranch) + if err != nil { + return nil, err + } + return newHistoryResendInfo(lastItem.GetEventId(), lastItem.GetVersion()), nil +} + +func getStandbyPostActionFn( + taskInfo tasks.Task, + standbyNow standbyCurrentTimeFn, + standbyTaskMissingEventsResendDelay time.Duration, + standbyTaskMissingEventsDiscardDelay time.Duration, + fetchHistoryStandbyPostActionFn standbyPostActionFn, + discardTaskStandbyPostActionFn standbyPostActionFn, +) standbyPostActionFn { + + // this is for task retry, use machine time + now := standbyNow() + taskTime := taskInfo.GetVisibilityTime() + resendTime := taskTime.Add(standbyTaskMissingEventsResendDelay) + discardTime := taskTime.Add(standbyTaskMissingEventsDiscardDelay) + + // now < task start time + StandbyTaskMissingEventsResendDelay + if now.Before(resendTime) { + return standbyTaskPostActionNoOp + } + + // task start time + StandbyTaskMissingEventsResendDelay <= now < task start time + StandbyTaskMissingEventsResendDelay + if now.Before(discardTime) { + return fetchHistoryStandbyPostActionFn + } + + // task start time + StandbyTaskMissingEventsResendDelay <= now + return discardTaskStandbyPostActionFn +} + +func getRemoteClusterName( + currentCluster string, + registry namespace.Registry, + namespaceID string, +) (string, error) { + namespaceEntry, err := registry.GetNamespaceByID(namespace.ID(namespaceID)) + if err != nil { + return "", err + } + + remoteClusterName := namespaceEntry.ActiveClusterName() + if remoteClusterName == currentCluster { + // namespace has turned active, retry the task + return "", errors.New("namespace becomes active when processing task as standby") + } + return remoteClusterName, nil +} diff -Nru temporal-1.21.5-1/src/service/history/ndc_task_util.go temporal-1.22.5/src/service/history/ndc_task_util.go --- temporal-1.21.5-1/src/service/history/ndc_task_util.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/ndc_task_util.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,226 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/workflow" +) + +// CheckTaskVersion will return an error if task version check fails +func CheckTaskVersion( + shard shard.Context, + logger log.Logger, + namespace *namespace.Namespace, + version int64, + taskVersion int64, + task interface{}, +) error { + + if !shard.GetClusterMetadata().IsGlobalNamespaceEnabled() { + return nil + } + + // the first return value is whether this task is valid for further processing + if !namespace.IsGlobalNamespace() { + logger.Debug("NamespaceID is not global, task version check pass", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task)) + return nil + } else if version != taskVersion { + logger.Debug("NamespaceID is global, task version != target version", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task), tag.TaskVersion(version)) + return consts.ErrTaskVersionMismatch + } + logger.Debug("NamespaceID is global, task version == target version", tag.WorkflowNamespaceID(namespace.ID().String()), tag.Task(task), tag.TaskVersion(version)) + return nil +} + +// load mutable state, if mutable state's next event ID <= task ID, will attempt to refresh +// if still mutable state's next event ID <= task ID, will return nil, nil +func loadMutableStateForTransferTask( + ctx context.Context, + wfContext workflow.Context, + transferTask tasks.Task, + metricsHandler metrics.Handler, + logger log.Logger, +) (workflow.MutableState, error) { + logger = tasks.InitializeLogger(transferTask, logger) + mutableState, err := LoadMutableStateForTask( + ctx, + wfContext, + transferTask, + getTransferTaskEventIDAndRetryable, + metricsHandler.WithTags(metrics.OperationTag(metrics.TransferQueueProcessorScope)), + logger, + ) + if err != nil { + // When standby task executor executes task in active cluster (and vice versa), + // mutable state might be already deleted by active task executor and NotFound is a valid case which shouldn't be logged. + // Unfortunately, this will also skip logging of actual errors that might happen due to serious bugs, + // but these errors, most likely, will happen for other task types too, and will be logged. + // TODO: remove this logic multi-cursor is implemented and only one task processor is running in each cluster. + skipNotFoundLog := + transferTask.GetType() == enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION || + transferTask.GetType() == enumsspb.TASK_TYPE_TRANSFER_DELETE_EXECUTION + + if !skipNotFoundLog { + switch err.(type) { + case *serviceerror.NotFound: + // NotFound error will be ignored by task error handling logic, so log it here + // for transfer tasks, mutable state should always be available + logger.Warn("Transfer Task Processor: workflow mutable state not found, skip.") + case *serviceerror.NamespaceNotFound: + // NamespaceNotFound error will be ignored by task error handling logic, so log it here + // for transfer tasks, namespace should always be available. + logger.Warn("Transfer Task Processor: namespace not found, skip.") + } + } + } + return mutableState, err +} + +// load mutable state, if mutable state's next event ID <= task ID, will attempt to refresh +// if still mutable state's next event ID <= task ID, will return nil, nil +func loadMutableStateForTimerTask( + ctx context.Context, + wfContext workflow.Context, + timerTask tasks.Task, + metricsHandler metrics.Handler, + logger log.Logger, +) (workflow.MutableState, error) { + logger = tasks.InitializeLogger(timerTask, logger) + return LoadMutableStateForTask( + ctx, + wfContext, + timerTask, + getTimerTaskEventIDAndRetryable, + metricsHandler.WithTags(metrics.OperationTag(metrics.TimerQueueProcessorScope)), + logger, + ) +} + +func LoadMutableStateForTask( + ctx context.Context, + wfContext workflow.Context, + task tasks.Task, + taskEventIDAndRetryable func(task tasks.Task, executionInfo *persistencespb.WorkflowExecutionInfo) (int64, bool), + metricsHandler metrics.Handler, + logger log.Logger, +) (workflow.MutableState, error) { + + mutableState, err := wfContext.LoadMutableState(ctx) + if err != nil { + return nil, err + } + + // check to see if cache needs to be refreshed as we could potentially have stale workflow execution + // the exception is workflow task consistently fail + // there will be no event generated, thus making the workflow task schedule ID == next event ID + eventID, retryable := taskEventIDAndRetryable(task, mutableState.GetExecutionInfo()) + if eventID < mutableState.GetNextEventID() || !retryable { + return mutableState, nil + } + + metricsHandler.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record(1) + wfContext.Clear() + + mutableState, err = wfContext.LoadMutableState(ctx) + if err != nil { + return nil, err + } + // after refresh, still mutable state's next event ID <= task's event ID + if eventID >= mutableState.GetNextEventID() { + metricsHandler.Counter(metrics.TaskSkipped.GetMetricName()).Record(1) + logger.Info("Task Processor: task event ID >= MS NextEventID, skip.", + tag.WorkflowNextEventID(mutableState.GetNextEventID()), + ) + return nil, nil + } + return mutableState, nil +} + +func getTransferTaskEventIDAndRetryable( + transferTask tasks.Task, + executionInfo *persistencespb.WorkflowExecutionInfo, +) (int64, bool) { + eventID := tasks.GetTransferTaskEventID(transferTask) + retryable := true + + if task, ok := transferTask.(*tasks.WorkflowTask); ok { + retryable = !(executionInfo.WorkflowTaskScheduledEventId == task.ScheduledEventID && executionInfo.WorkflowTaskAttempt > 1) + } + + return eventID, retryable +} + +func getTimerTaskEventIDAndRetryable( + timerTask tasks.Task, + executionInfo *persistencespb.WorkflowExecutionInfo, +) (int64, bool) { + eventID := tasks.GetTimerTaskEventID(timerTask) + retryable := true + + if task, ok := timerTask.(*tasks.WorkflowTaskTimeoutTask); ok { + retryable = !(executionInfo.WorkflowTaskScheduledEventId == task.EventID && executionInfo.WorkflowTaskAttempt > 1) && + executionInfo.WorkflowTaskType != enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE + } + + return eventID, retryable +} + +func getNamespaceTagByID( + registry namespace.Registry, + namespaceID string, +) metrics.Tag { + namespaceName, err := registry.GetNamespaceName(namespace.ID(namespaceID)) + if err != nil { + return metrics.NamespaceUnknownTag() + } + + return metrics.NamespaceTag(namespaceName.String()) +} + +func getNamespaceTagAndReplicationStateByID( + registry namespace.Registry, + namespaceID string, +) (metrics.Tag, enumspb.ReplicationState) { + namespace, err := registry.GetNamespaceByID(namespace.ID(namespaceID)) + if err != nil { + return metrics.NamespaceUnknownTag(), enumspb.REPLICATION_STATE_UNSPECIFIED + } + + return metrics.NamespaceTag(namespace.Name().String()), namespace.ReplicationState() +} diff -Nru temporal-1.21.5-1/src/service/history/queueFactoryBase.go temporal-1.22.5/src/service/history/queueFactoryBase.go --- temporal-1.21.5-1/src/service/history/queueFactoryBase.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queueFactoryBase.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - - "go.uber.org/fx" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/quotas" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -const QueueFactoryFxGroup = "queueFactory" - -type ( - QueueFactory interface { - common.Daemon - - // TODO: - // 1. Remove the cache parameter after workflow cache become a host level component - // and it can be provided as a parameter when creating a QueueFactory instance. - // Currently, workflow cache is shard level, but we can't get it from shard or engine interface, - // as that will lead to a cycle dependency issue between shard and workflow package. - // 2. Move this interface to queues package after 1 is done so that there's no cycle dependency - // between workflow and queues package. - CreateQueue(shard shard.Context, cache wcache.Cache) queues.Queue - } - - QueueFactoryBaseParams struct { - fx.In - - NamespaceRegistry namespace.Registry - ClusterMetadata cluster.Metadata - Config *configs.Config - TimeSource clock.TimeSource - MetricsHandler metrics.Handler - Logger log.SnTaggedLogger - SchedulerRateLimiter queues.SchedulerRateLimiter - } - - QueueFactoryBase struct { - HostScheduler queues.Scheduler - HostPriorityAssigner queues.PriorityAssigner - HostReaderRateLimiter quotas.RequestRateLimiter - } - - QueueFactoriesLifetimeHookParams struct { - fx.In - - Lifecycle fx.Lifecycle - Factories []QueueFactory `group:"queueFactory"` - } -) - -var QueueModule = fx.Options( - fx.Provide(QueueSchedulerRateLimiterProvider), - fx.Provide( - fx.Annotated{ - Group: QueueFactoryFxGroup, - Target: NewTransferQueueFactory, - }, - fx.Annotated{ - Group: QueueFactoryFxGroup, - Target: NewTimerQueueFactory, - }, - fx.Annotated{ - Group: QueueFactoryFxGroup, - Target: NewVisibilityQueueFactory, - }, - fx.Annotated{ - Group: QueueFactoryFxGroup, - Target: NewMemoryScheduledQueueFactory, - }, - getOptionalQueueFactories, - ), - fx.Invoke(QueueFactoryLifetimeHooks), -) - -// additionalQueueFactories is a container for a list of queue factories that are only added to the group if -// they are enabled. This exists because there is no way to conditionally add to a group with a provider that returns -// a single object. For example, this doesn't work because it will always add the factory to the group, which can -// cause NPEs: -// -// fx.Annotated{ -// Group: "queueFactory", -// Target: func() QueueFactory { return isEnabled ? NewQueueFactory() : nil }, -// }, -type additionalQueueFactories struct { - // This is what tells fx to add the factories to the group whenever this object is provided. - fx.Out - - // Factories is a list of queue factories that will be added to the `group:"queueFactory"` group. - Factories []QueueFactory `group:"queueFactory,flatten"` -} - -// getOptionalQueueFactories returns an additionalQueueFactories which contains a list of queue factories that will be -// added to the `group:"queueFactory"` group. The factories are added to the group only if they are enabled, which -// is why we must return a list here. -func getOptionalQueueFactories( - archivalMetadata archiver.ArchivalMetadata, - params ArchivalQueueFactoryParams, -) additionalQueueFactories { - - c := tasks.CategoryArchival - // Removing this category will only affect tests because this method is only called once in production, - // but it may be called many times across test runs, which would leave the archival queue as a dangling category - tasks.RemoveCategory(c.ID()) - if archivalMetadata.GetHistoryConfig().StaticClusterState() != archiver.ArchivalEnabled && - archivalMetadata.GetVisibilityConfig().StaticClusterState() != archiver.ArchivalEnabled { - return additionalQueueFactories{} - } - tasks.NewCategory(c.ID(), c.Type(), c.Name()) - return additionalQueueFactories{ - Factories: []QueueFactory{ - NewArchivalQueueFactory(params), - }, - } -} - -func QueueSchedulerRateLimiterProvider( - config *configs.Config, -) queues.SchedulerRateLimiter { - return queues.NewSchedulerRateLimiter( - config.TaskSchedulerNamespaceMaxQPS, - config.TaskSchedulerMaxQPS, - config.PersistenceNamespaceMaxQPS, - config.PersistenceMaxQPS, - ) -} - -func QueueFactoryLifetimeHooks( - params QueueFactoriesLifetimeHookParams, -) { - params.Lifecycle.Append( - fx.Hook{ - OnStart: func(context.Context) error { - for _, factory := range params.Factories { - factory.Start() - } - return nil - }, - OnStop: func(context.Context) error { - for _, factory := range params.Factories { - factory.Stop() - } - return nil - }, - }, - ) -} - -func (f *QueueFactoryBase) Start() { - if f.HostScheduler != nil { - f.HostScheduler.Start() - } -} - -func (f *QueueFactoryBase) Stop() { - if f.HostScheduler != nil { - f.HostScheduler.Stop() - } -} - -func NewQueueHostRateLimiter( - hostRPS dynamicconfig.IntPropertyFn, - persistenceMaxRPS dynamicconfig.IntPropertyFn, - persistenceMaxRPSRatio float64, -) quotas.RateLimiter { - return quotas.NewDefaultOutgoingRateLimiter( - NewHostRateLimiterRateFn( - hostRPS, - persistenceMaxRPS, - persistenceMaxRPSRatio, - ), - ) -} - -func NewHostRateLimiterRateFn( - hostRPS dynamicconfig.IntPropertyFn, - persistenceMaxRPS dynamicconfig.IntPropertyFn, - persistenceMaxRPSRatio float64, -) quotas.RateFn { - return func() float64 { - if maxPollHostRps := hostRPS(); maxPollHostRps > 0 { - return float64(maxPollHostRps) - } - - // ensure queue loading won't consume all persistence tokens - // especially upon host restart when we need to perform a load - // for all shards - return float64(persistenceMaxRPS()) * persistenceMaxRPSRatio - } -} diff -Nru temporal-1.21.5-1/src/service/history/queue_factory_base.go temporal-1.22.5/src/service/history/queue_factory_base.go --- temporal-1.21.5-1/src/service/history/queue_factory_base.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/queue_factory_base.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,230 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + + "go.uber.org/fx" + + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/quotas" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +const QueueFactoryFxGroup = "queueFactory" + +type ( + QueueFactory interface { + Start() + Stop() + + // TODO: + // 1. Remove the cache parameter after workflow cache become a host level component + // and it can be provided as a parameter when creating a QueueFactory instance. + // Currently, workflow cache is shard level, but we can't get it from shard or engine interface, + // as that will lead to a cycle dependency issue between shard and workflow package. + // 2. Move this interface to queues package after 1 is done so that there's no cycle dependency + // between workflow and queues package. + CreateQueue(shard shard.Context, cache wcache.Cache) queues.Queue + } + + QueueFactoryBaseParams struct { + fx.In + + NamespaceRegistry namespace.Registry + ClusterMetadata cluster.Metadata + Config *configs.Config + TimeSource clock.TimeSource + MetricsHandler metrics.Handler + Logger log.SnTaggedLogger + SchedulerRateLimiter queues.SchedulerRateLimiter + + ExecutorWrapper queues.ExecutorWrapper `optional:"true"` + } + + QueueFactoryBase struct { + HostScheduler queues.Scheduler + HostPriorityAssigner queues.PriorityAssigner + HostReaderRateLimiter quotas.RequestRateLimiter + } + + QueueFactoriesLifetimeHookParams struct { + fx.In + + Lifecycle fx.Lifecycle + Factories []QueueFactory `group:"queueFactory"` + } +) + +var QueueModule = fx.Options( + fx.Provide(QueueSchedulerRateLimiterProvider), + fx.Provide( + fx.Annotated{ + Group: QueueFactoryFxGroup, + Target: NewTransferQueueFactory, + }, + fx.Annotated{ + Group: QueueFactoryFxGroup, + Target: NewTimerQueueFactory, + }, + fx.Annotated{ + Group: QueueFactoryFxGroup, + Target: NewVisibilityQueueFactory, + }, + fx.Annotated{ + Group: QueueFactoryFxGroup, + Target: NewMemoryScheduledQueueFactory, + }, + getOptionalQueueFactories, + ), + fx.Invoke(QueueFactoryLifetimeHooks), +) + +// additionalQueueFactories is a container for a list of queue factories that are only added to the group if +// they are enabled. This exists because there is no way to conditionally add to a group with a provider that returns +// a single object. For example, this doesn't work because it will always add the factory to the group, which can +// cause NPEs: +// +// fx.Annotated{ +// Group: "queueFactory", +// Target: func() QueueFactory { return isEnabled ? NewQueueFactory() : nil }, +// }, +type additionalQueueFactories struct { + // This is what tells fx to add the factories to the group whenever this object is provided. + fx.Out + + // Factories is a list of queue factories that will be added to the `group:"queueFactory"` group. + Factories []QueueFactory `group:"queueFactory,flatten"` +} + +// getOptionalQueueFactories returns an additionalQueueFactories which contains a list of queue factories that will be +// added to the `group:"queueFactory"` group. The factories are added to the group only if they are enabled, which +// is why we must return a list here. +func getOptionalQueueFactories( + archivalMetadata archiver.ArchivalMetadata, + params ArchivalQueueFactoryParams, +) additionalQueueFactories { + + c := tasks.CategoryArchival + // Removing this category will only affect tests because this method is only called once in production, + // but it may be called many times across test runs, which would leave the archival queue as a dangling category + tasks.RemoveCategory(c.ID()) + if archivalMetadata.GetHistoryConfig().StaticClusterState() != archiver.ArchivalEnabled && + archivalMetadata.GetVisibilityConfig().StaticClusterState() != archiver.ArchivalEnabled { + return additionalQueueFactories{} + } + tasks.NewCategory(c.ID(), c.Type(), c.Name()) + return additionalQueueFactories{ + Factories: []QueueFactory{ + NewArchivalQueueFactory(params), + }, + } +} + +func QueueSchedulerRateLimiterProvider( + config *configs.Config, +) queues.SchedulerRateLimiter { + return queues.NewSchedulerRateLimiter( + config.TaskSchedulerNamespaceMaxQPS, + config.TaskSchedulerMaxQPS, + config.PersistenceNamespaceMaxQPS, + config.PersistenceMaxQPS, + ) +} + +func QueueFactoryLifetimeHooks( + params QueueFactoriesLifetimeHookParams, +) { + params.Lifecycle.Append( + fx.Hook{ + OnStart: func(context.Context) error { + for _, factory := range params.Factories { + factory.Start() + } + return nil + }, + OnStop: func(context.Context) error { + for _, factory := range params.Factories { + factory.Stop() + } + return nil + }, + }, + ) +} + +func (f *QueueFactoryBase) Start() { + if f.HostScheduler != nil { + f.HostScheduler.Start() + } +} + +func (f *QueueFactoryBase) Stop() { + if f.HostScheduler != nil { + f.HostScheduler.Stop() + } +} + +func NewQueueHostRateLimiter( + hostRPS dynamicconfig.IntPropertyFn, + persistenceMaxRPS dynamicconfig.IntPropertyFn, + persistenceMaxRPSRatio float64, +) quotas.RateLimiter { + return quotas.NewDefaultOutgoingRateLimiter( + NewHostRateLimiterRateFn( + hostRPS, + persistenceMaxRPS, + persistenceMaxRPSRatio, + ), + ) +} + +func NewHostRateLimiterRateFn( + hostRPS dynamicconfig.IntPropertyFn, + persistenceMaxRPS dynamicconfig.IntPropertyFn, + persistenceMaxRPSRatio float64, +) quotas.RateFn { + return func() float64 { + if maxPollHostRps := hostRPS(); maxPollHostRps > 0 { + return float64(maxPollHostRps) + } + + // ensure queue loading won't consume all persistence tokens + // especially upon host restart when we need to perform a load + // for all shards + return float64(persistenceMaxRPS()) * persistenceMaxRPSRatio + } +} diff -Nru temporal-1.21.5-1/src/service/history/queue_factory_base_test.go temporal-1.22.5/src/service/history/queue_factory_base_test.go --- temporal-1.21.5-1/src/service/history/queue_factory_base_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queue_factory_base_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -32,7 +32,6 @@ "github.com/stretchr/testify/require" "go.uber.org/fx" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/client" carchiver "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/clock" @@ -175,8 +174,8 @@ client.Bean archiver.Client sdk.ClientFactory - resource.MatchingClient - historyservice.HistoryServiceClient + resource.MatchingRawClient + resource.HistoryRawClient manager.VisibilityManager archival.Archiver workflow.RelocatableAttributesFetcher diff -Nru temporal-1.21.5-1/src/service/history/queues/active_standby_executor.go temporal-1.22.5/src/service/history/queues/active_standby_executor.go --- temporal-1.21.5-1/src/service/history/queues/active_standby_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/active_standby_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,97 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package queues + +import ( + "context" + + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" +) + +type ( + activeStandbyExecutor struct { + currentClusterName string + registry namespace.Registry + activeExecutor Executor + standbyExecutor Executor + logger log.Logger + } +) + +func NewActiveStandbyExecutor( + currentClusterName string, + registry namespace.Registry, + activeExecutor Executor, + standbyExecutor Executor, + logger log.Logger, +) Executor { + return &activeStandbyExecutor{ + currentClusterName: currentClusterName, + registry: registry, + activeExecutor: activeExecutor, + standbyExecutor: standbyExecutor, + logger: logger, + } +} + +func (e *activeStandbyExecutor) Execute( + ctx context.Context, + executable Executable, +) ([]metrics.Tag, bool, error) { + if e.isActiveTask(executable) { + return e.activeExecutor.Execute(ctx, executable) + } + + // for standby tasks, use preemptable callerType to avoid impacting active traffic + return e.standbyExecutor.Execute( + headers.SetCallerType(ctx, headers.CallerTypePreemptable), + executable, + ) +} + +func (e *activeStandbyExecutor) isActiveTask( + executable Executable, +) bool { + // Following is the existing task allocator logic for verifying active task + + namespaceID := executable.GetNamespaceID() + entry, err := e.registry.GetNamespaceByID(namespace.ID(namespaceID)) + if err != nil { + e.logger.Warn("Unable to find namespace, process task as active.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) + return true + } + + if !entry.ActiveInCluster(e.currentClusterName) { + e.logger.Debug("Process task as standby.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) + return false + } + + e.logger.Debug("Process task as active.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) + return true +} diff -Nru temporal-1.21.5-1/src/service/history/queues/active_standby_executor_test.go temporal-1.22.5/src/service/history/queues/active_standby_executor_test.go --- temporal-1.21.5-1/src/service/history/queues/active_standby_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/active_standby_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,107 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package queues + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + persistencepb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" +) + +const ( + currentCluster = "current" + nonCurrentCluster = "nonCurrent" +) + +type ( + executorSuite struct { + suite.Suite + *require.Assertions + ctrl *gomock.Controller + + registry *namespace.MockRegistry + activeExecutor *MockExecutor + standbyExecutor *MockExecutor + executor Executor + } +) + +func TestExecutorSuite(t *testing.T) { + t.Parallel() + s := new(executorSuite) + suite.Run(t, s) +} + +func (s *executorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.ctrl = gomock.NewController(s.T()) + s.registry = namespace.NewMockRegistry(s.ctrl) + s.activeExecutor = NewMockExecutor(s.ctrl) + s.standbyExecutor = NewMockExecutor(s.ctrl) + s.executor = NewActiveStandbyExecutor( + currentCluster, + s.registry, + s.activeExecutor, + s.standbyExecutor, + log.NewNoopLogger(), + ) +} + +func (s *executorSuite) TestExecute_Active() { + executable := NewMockExecutable(s.ctrl) + executable.EXPECT().GetNamespaceID().Return("namespace_id") + executable.EXPECT().GetTask().Return(nil) + ns := namespace.NewGlobalNamespaceForTest(nil, nil, &persistencepb.NamespaceReplicationConfig{ + ActiveClusterName: currentCluster, + Clusters: []string{currentCluster}, + }, 1) + s.registry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) + s.activeExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(nil, true, nil).Times(1) + _, isActive, err := s.executor.Execute(context.Background(), executable) + s.NoError(err) + s.True(isActive) +} + +func (s *executorSuite) TestExecute_Standby() { + executable := NewMockExecutable(s.ctrl) + executable.EXPECT().GetNamespaceID().Return("namespace_id") + executable.EXPECT().GetTask().Return(nil) + ns := namespace.NewGlobalNamespaceForTest(nil, nil, &persistencepb.NamespaceReplicationConfig{ + ActiveClusterName: nonCurrentCluster, + Clusters: []string{currentCluster, nonCurrentCluster}, + }, 1) + s.registry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) + s.standbyExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(nil, false, nil).Times(1) + _, isActive, err := s.executor.Execute(context.Background(), executable) + s.NoError(err) + s.False(isActive) +} diff -Nru temporal-1.21.5-1/src/service/history/queues/executable.go temporal-1.22.5/src/service/history/queues/executable.go --- temporal-1.21.5-1/src/service/history/queues/executable.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/executable.go 2024-02-23 09:45:43.000000000 +0000 @@ -41,6 +41,7 @@ "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -71,6 +72,10 @@ // active/standby queue processing logic Execute(context.Context, Executable) (tags []metrics.Tag, isActive bool, err error) } + + ExecutorWrapper interface { + Wrap(delegate Executor) Executor + } ) var ( @@ -116,15 +121,16 @@ logger log.Logger metricsHandler metrics.Handler - readerID int64 - loadTime time.Time - scheduledTime time.Time - scheduleLatency time.Duration - attemptNoUserLatency time.Duration - inMemoryNoUserLatency time.Duration - lastActiveness bool - systemResourceExhaustedCount int - taggedMetricsHandler metrics.Handler + readerID int64 + loadTime time.Time + scheduledTime time.Time + scheduleLatency time.Duration + attemptNoUserLatency time.Duration + inMemoryNoUserLatency time.Duration + lastActiveness bool + resourceExhaustedCount int // does NOT include consts.ErrResourceExhaustedBusyWorkflow + taggedMetricsHandler metrics.Handler + dropInternalErrors dynamicconfig.BoolPropertyFn } ) @@ -140,7 +146,11 @@ clusterMetadata cluster.Metadata, logger log.Logger, metricsHandler metrics.Handler, + dropInternalErrors dynamicconfig.BoolPropertyFn, ) Executable { + if dropInternalErrors == nil { + dropInternalErrors = func() bool { return false } + } executable := &executableImpl{ Task: task, state: ctasks.TaskStatePending, @@ -162,6 +172,7 @@ ), metricsHandler: metricsHandler, taggedMetricsHandler: metricsHandler, + dropInternalErrors: dropInternalErrors, } executable.updatePriority() return executable @@ -248,8 +259,9 @@ } defer func() { - if !errors.Is(retErr, consts.ErrResourceExhaustedBusyWorkflow) { - // if err is due to workflow busy, do not take any latency related to this attempt into account + if !errors.Is(retErr, consts.ErrResourceExhaustedBusyWorkflow) && + !errors.Is(retErr, consts.ErrResourceExhaustedAPSLimit) { + // if err is due to workflow busy or APS limit, do not take any latency related to this attempt into account e.inMemoryNoUserLatency += e.scheduleLatency + e.attemptNoUserLatency } @@ -268,14 +280,17 @@ var resourceExhaustedErr *serviceerror.ResourceExhausted if errors.As(err, &resourceExhaustedErr) { if resourceExhaustedErr.Cause != enums.RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW { - e.systemResourceExhaustedCount++ + if resourceExhaustedErr.Cause == enums.RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT { + err = consts.ErrResourceExhaustedAPSLimit + } + e.resourceExhaustedCount++ e.taggedMetricsHandler.Counter(metrics.TaskThrottledCounter.GetMetricName()).Record(1) return err } err = consts.ErrResourceExhaustedBusyWorkflow } - e.systemResourceExhaustedCount = 0 + e.resourceExhaustedCount = 0 if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { return nil @@ -333,6 +348,13 @@ e.logger.Error("Drop task due to serialization error", tag.Error(err)) return nil } + if common.IsInternalError(err) { + e.logger.Error("Encountered internal error processing tasks", tag.Error(err)) + e.taggedMetricsHandler.Counter(metrics.TaskInternalErrorCounter.GetMetricName()).Record(1) + if e.dropInternalErrors() { + return nil + } + } e.taggedMetricsHandler.Counter(metrics.TaskFailures.GetMetricName()).Record(1) @@ -416,7 +438,8 @@ if !submitted { backoffDuration := e.backoffDuration(err, e.Attempt()) e.rescheduler.Add(e, e.timeSource.Now().Add(backoffDuration)) - if !errors.Is(err, consts.ErrResourceExhaustedBusyWorkflow) { + if !errors.Is(err, consts.ErrResourceExhaustedBusyWorkflow) && + !errors.Is(err, consts.ErrResourceExhaustedAPSLimit) { e.inMemoryNoUserLatency += backoffDuration } } @@ -476,7 +499,7 @@ if !errors.Is(err, consts.ErrResourceExhaustedBusyWorkflow) && common.IsResourceExhausted(err) && - e.systemResourceExhaustedCount > resourceExhaustedResubmitMaxAttempts { + e.resourceExhaustedCount > resourceExhaustedResubmitMaxAttempts { return false } @@ -518,7 +541,7 @@ // upon system resource exhausted error and pick the longer backoff duration backoffDuration = util.Max( backoffDuration, - taskResourceExhuastedReschedulePolicy.ComputeNextDelay(0, e.systemResourceExhaustedCount), + taskResourceExhuastedReschedulePolicy.ComputeNextDelay(0, e.resourceExhaustedCount), ) } diff -Nru temporal-1.21.5-1/src/service/history/queues/executable_mock.go temporal-1.22.5/src/service/history/queues/executable_mock.go --- temporal-1.21.5-1/src/service/history/queues/executable_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/executable_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -450,3 +450,40 @@ mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockExecutor)(nil).Execute), arg0, arg1) } + +// MockExecutorWrapper is a mock of ExecutorWrapper interface. +type MockExecutorWrapper struct { + ctrl *gomock.Controller + recorder *MockExecutorWrapperMockRecorder +} + +// MockExecutorWrapperMockRecorder is the mock recorder for MockExecutorWrapper. +type MockExecutorWrapperMockRecorder struct { + mock *MockExecutorWrapper +} + +// NewMockExecutorWrapper creates a new mock instance. +func NewMockExecutorWrapper(ctrl *gomock.Controller) *MockExecutorWrapper { + mock := &MockExecutorWrapper{ctrl: ctrl} + mock.recorder = &MockExecutorWrapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecutorWrapper) EXPECT() *MockExecutorWrapperMockRecorder { + return m.recorder +} + +// Wrap mocks base method. +func (m *MockExecutorWrapper) Wrap(delegate Executor) Executor { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Wrap", delegate) + ret0, _ := ret[0].(Executor) + return ret0 +} + +// Wrap indicates an expected call of Wrap. +func (mr *MockExecutorWrapperMockRecorder) Wrap(delegate interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wrap", reflect.TypeOf((*MockExecutorWrapper)(nil).Wrap), delegate) +} diff -Nru temporal-1.21.5-1/src/service/history/queues/executable_test.go temporal-1.22.5/src/service/history/queues/executable_test.go --- temporal-1.21.5-1/src/service/history/queues/executable_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/executable_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -40,6 +40,7 @@ "go.temporal.io/server/common/clock" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" @@ -65,6 +66,11 @@ timeSource *clock.EventTimeSource } + + params struct { + dropInternalErrors dynamicconfig.BoolPropertyFn + } + option func(*params) ) func TestExecutableSuite(t *testing.T) { @@ -297,6 +303,34 @@ s.NoError(executable.HandleErr(err)) } +func (s *executableSuite) TestExecute_DropsInternalErrors_WhenEnabled() { + executable := s.newTestExecutable(func(p *params) { + p.dropInternalErrors = func() bool { return true } + }) + + s.mockExecutor.EXPECT().Execute(gomock.Any(), executable).DoAndReturn( + func(_ context.Context, _ Executable) ([]metrics.Tag, bool, error) { + panic(serviceerror.NewInternal("injected error")) + }, + ) + + s.NoError(executable.HandleErr(executable.Execute())) +} + +func (s *executableSuite) TestExecute_DoesntDropInternalErrors_WhenDisabled() { + executable := s.newTestExecutable(func(p *params) { + p.dropInternalErrors = func() bool { return false } + }) + + s.mockExecutor.EXPECT().Execute(gomock.Any(), executable).DoAndReturn( + func(_ context.Context, _ Executable) ([]metrics.Tag, bool, error) { + panic(serviceerror.NewInternal("injected error")) + }, + ) + + s.Error(executable.HandleErr(executable.Execute())) +} + func (s *executableSuite) TestHandleErr_EntityNotExists() { executable := s.newTestExecutable() @@ -408,7 +442,13 @@ s.False(executable.IsRetryableError(errors.New("some random error"))) } -func (s *executableSuite) newTestExecutable() Executable { +func (s *executableSuite) newTestExecutable(opts ...option) Executable { + p := params{ + dropInternalErrors: func() bool { return false }, + } + for _, opt := range opts { + opt(&p) + } return NewExecutable( DefaultReaderId, tasks.NewFakeTask( @@ -429,5 +469,6 @@ s.mockClusterMetadata, log.NewTestLogger(), metrics.NoopMetricsHandler, + p.dropInternalErrors, ) } diff -Nru temporal-1.21.5-1/src/service/history/queues/executor_wrapper.go temporal-1.22.5/src/service/history/queues/executor_wrapper.go --- temporal-1.21.5-1/src/service/history/queues/executor_wrapper.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/executor_wrapper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package queues - -import ( - "context" - - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" -) - -type ( - executorWrapper struct { - currentClusterName string - registry namespace.Registry - activeExecutor Executor - standbyExecutor Executor - logger log.Logger - } -) - -func NewExecutorWrapper( - currentClusterName string, - registry namespace.Registry, - activeExecutor Executor, - standbyExecutor Executor, - logger log.Logger, -) Executor { - return &executorWrapper{ - currentClusterName: currentClusterName, - registry: registry, - activeExecutor: activeExecutor, - standbyExecutor: standbyExecutor, - logger: logger, - } -} - -func (e *executorWrapper) Execute( - ctx context.Context, - executable Executable, -) ([]metrics.Tag, bool, error) { - if e.isActiveTask(executable) { - return e.activeExecutor.Execute(ctx, executable) - } - - // for standby tasks, use preemptable callerType to avoid impacting active traffic - return e.standbyExecutor.Execute( - headers.SetCallerType(ctx, headers.CallerTypePreemptable), - executable, - ) -} - -func (e *executorWrapper) isActiveTask( - executable Executable, -) bool { - // Following is the existing task allocator logic for verifying active task - - namespaceID := executable.GetNamespaceID() - entry, err := e.registry.GetNamespaceByID(namespace.ID(namespaceID)) - if err != nil { - e.logger.Warn("Unable to find namespace, process task as active.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) - return true - } - - if !entry.ActiveInCluster(e.currentClusterName) { - e.logger.Debug("Process task as standby.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) - return false - } - - e.logger.Debug("Process task as active.", tag.WorkflowNamespaceID(namespaceID), tag.Value(executable.GetTask())) - return true -} diff -Nru temporal-1.21.5-1/src/service/history/queues/executor_wrapper_test.go temporal-1.22.5/src/service/history/queues/executor_wrapper_test.go --- temporal-1.21.5-1/src/service/history/queues/executor_wrapper_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/executor_wrapper_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package queues - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - persistencepb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/namespace" -) - -const ( - currentCluster = "current" - nonCurrentCluster = "nonCurrent" -) - -type ( - executorSuite struct { - suite.Suite - *require.Assertions - ctrl *gomock.Controller - - registry *namespace.MockRegistry - activeExecutor *MockExecutor - standbyExecutor *MockExecutor - executor Executor - } -) - -func TestExecutorSuite(t *testing.T) { - t.Parallel() - s := new(executorSuite) - suite.Run(t, s) -} - -func (s *executorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - s.ctrl = gomock.NewController(s.T()) - s.registry = namespace.NewMockRegistry(s.ctrl) - s.activeExecutor = NewMockExecutor(s.ctrl) - s.standbyExecutor = NewMockExecutor(s.ctrl) - s.executor = NewExecutorWrapper( - currentCluster, - s.registry, - s.activeExecutor, - s.standbyExecutor, - log.NewNoopLogger(), - ) -} - -func (s *executorSuite) TestExecute_Active() { - executable := NewMockExecutable(s.ctrl) - executable.EXPECT().GetNamespaceID().Return("namespace_id") - executable.EXPECT().GetTask().Return(nil) - ns := namespace.NewGlobalNamespaceForTest(nil, nil, &persistencepb.NamespaceReplicationConfig{ - ActiveClusterName: currentCluster, - Clusters: []string{currentCluster}, - }, 1) - s.registry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) - s.activeExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(nil, true, nil).Times(1) - _, isActive, err := s.executor.Execute(context.Background(), executable) - s.NoError(err) - s.True(isActive) -} - -func (s *executorSuite) TestExecute_Standby() { - executable := NewMockExecutable(s.ctrl) - executable.EXPECT().GetNamespaceID().Return("namespace_id") - executable.EXPECT().GetTask().Return(nil) - ns := namespace.NewGlobalNamespaceForTest(nil, nil, &persistencepb.NamespaceReplicationConfig{ - ActiveClusterName: nonCurrentCluster, - Clusters: []string{currentCluster, nonCurrentCluster}, - }, 1) - s.registry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) - s.standbyExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(nil, false, nil).Times(1) - _, isActive, err := s.executor.Execute(context.Background(), executable) - s.NoError(err) - s.False(isActive) -} diff -Nru temporal-1.21.5-1/src/service/history/queues/memory_scheduled_queue_test.go temporal-1.22.5/src/service/history/queues/memory_scheduled_queue_test.go --- temporal-1.21.5-1/src/service/history/queues/memory_scheduled_queue_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/memory_scheduled_queue_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -184,6 +184,7 @@ nil, nil, nil, + func() bool { return false }, ), wttt, ) diff -Nru temporal-1.21.5-1/src/service/history/queues/queue.go temporal-1.22.5/src/service/history/queues/queue.go --- temporal-1.21.5-1/src/service/history/queues/queue.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/queue.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,7 +25,6 @@ package queues import ( - "go.temporal.io/server/common" "go.temporal.io/server/service/history/tasks" ) @@ -33,9 +32,10 @@ type ( Queue interface { - common.Daemon Category() tasks.Category NotifyNewTasks(tasks []tasks.Task) FailoverNamespace(namespaceID string) + Start() + Stop() } ) diff -Nru temporal-1.21.5-1/src/service/history/queues/queue_base.go temporal-1.22.5/src/service/history/queues/queue_base.go --- temporal-1.21.5-1/src/service/history/queues/queue_base.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/queue_base.go 2024-02-23 09:45:43.000000000 +0000 @@ -168,6 +168,7 @@ shard.GetClusterMetadata(), logger, metricsHandler, + shard.GetConfig().TaskDropInternalErrors, ) } @@ -392,7 +393,7 @@ // for the queue p.metricsHandler.Counter(metrics.TaskBatchCompleteCounter.GetMetricName()).Record(1) if newExclusiveDeletionHighWatermark.CompareTo(p.exclusiveDeletionHighWatermark) > 0 || - p.updateShardRangeID() { + (p.updateShardRangeID() && newExclusiveDeletionHighWatermark.CompareTo(tasks.MinimumKey) > 0) { // when shard rangeID is updated, perform range completion again in case the underlying persistence implementation // serves traffic based on the persisted shardInfo err := p.rangeCompleteTasks(p.exclusiveDeletionHighWatermark, newExclusiveDeletionHighWatermark) diff -Nru temporal-1.21.5-1/src/service/history/queues/queue_base_test.go temporal-1.22.5/src/service/history/queues/queue_base_test.go --- temporal-1.21.5-1/src/service/history/queues/queue_base_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/queue_base_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,6 +27,7 @@ import ( "context" "errors" + "math/rand" "testing" "time" @@ -117,19 +118,11 @@ } func (s *queueBaseSuite) TestNewProcessBase_NoPreviousState() { - ackLevel := int64(1024) - rangeID := int64(10) - mockShard := shard.NewTestContext( s.controller, &persistencespb.ShardInfo{ ShardId: 0, - RangeId: rangeID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: ackLevel, - }, - }, + RangeId: int64(10), }, s.config, ) @@ -150,7 +143,7 @@ ) s.Len(base.readerGroup.Readers(), 0) - s.Equal(ackLevel+1, base.nonReadableScope.Range.InclusiveMin.TaskID) + s.Equal(int64(1), base.nonReadableScope.Range.InclusiveMin.TaskID) } func (s *queueBaseSuite) TestNewProcessBase_WithPreviousState_RestoreSucceed() { @@ -326,11 +319,6 @@ &persistencespb.ShardInfo{ ShardId: 0, RangeId: 10, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: 1024, - }, - }, }, s.config, ) @@ -433,7 +421,7 @@ s.True(base.nonReadableScope.Range.Equals(NewRange(scopes[0].Range.ExclusiveMax, tasks.MaximumKey))) } -func (s *queueBaseSuite) TestCheckPoint_WithPendingTasks() { +func (s *queueBaseSuite) TestCheckPoint_WithPendingTasks_PerformRangeCompletion() { scopeMinKey := tasks.MaximumKey readerScopes := map[int64][]Scope{} readerIDs := []int64{DefaultReaderId, 2, 3} @@ -507,6 +495,75 @@ mockShard.Resource.ShardMgr.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).DoAndReturn( func(_ context.Context, request *persistence.UpdateShardRequest) error { s.QueueStateEqual(persistenceState, request.ShardInfo.QueueStates[tasks.CategoryIDTimer]) + return nil + }, + ).Times(1), + ) + + base.checkpoint() + + s.True(scopeMinKey.CompareTo(base.exclusiveDeletionHighWatermark) == 0) +} + +func (s *queueBaseSuite) TestCheckPoint_WithPendingTasks_SkipRangeCompletion() { + // task range completion should be skipped when there's no task to delete + scopeMinKey := tasks.MinimumKey + readerScopes := map[int64][]Scope{ + DefaultReaderId: { + { + Range: NewRange(scopeMinKey, tasks.NewKey(time.Now(), rand.Int63())), + Predicate: predicates.Universal[tasks.Task](), + }, + }, + } + queueState := &queueState{ + readerScopes: readerScopes, + exclusiveReaderHighWatermark: tasks.MaximumKey, + } + persistenceState := ToPersistenceQueueState(queueState) + + mockShard := shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 0, + RangeId: 10, + QueueStates: map[int32]*persistencespb.QueueState{ + tasks.CategoryIDTimer: persistenceState, + }, + }, + s.config, + ) + mockShard.Resource.ClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + mockShard.Resource.ClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + mockShard.Resource.ExecutionMgr.EXPECT().RegisterHistoryTaskReader(gomock.Any(), gomock.Any()).Return(nil).Times(len(readerScopes)) + + base := newQueueBase( + mockShard, + tasks.CategoryTimer, + nil, + s.mockScheduler, + s.mockRescheduler, + NewNoopPriorityAssigner(), + nil, + s.options, + s.rateLimiter, + NoopReaderCompletionFn, + s.logger, + s.metricsHandler, + ) + base.checkpointTimer = time.NewTimer(s.options.CheckpointInterval()) + + s.True(scopeMinKey.CompareTo(base.exclusiveDeletionHighWatermark) == 0) + + // set to a smaller value so that delete will be triggered + currentLowWatermark := tasks.MinimumKey + base.exclusiveDeletionHighWatermark = currentLowWatermark + + gomock.InOrder( + mockShard.Resource.ExecutionMgr.EXPECT().UpdateHistoryTaskReaderProgress(gomock.Any(), gomock.Any()).Times(len(readerScopes)), + mockShard.Resource.ShardMgr.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.UpdateShardRequest) error { + s.QueueStateEqual(persistenceState, request.ShardInfo.QueueStates[tasks.CategoryIDTimer]) return nil }, ).Times(1), diff -Nru temporal-1.21.5-1/src/service/history/queues/reader.go temporal-1.22.5/src/service/history/queues/reader.go --- temporal-1.21.5-1/src/service/history/queues/reader.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/reader.go 2024-02-23 09:45:43.000000000 +0000 @@ -49,8 +49,6 @@ type ( Reader interface { - common.Daemon - Scopes() []Scope WalkSlices(SliceIterator) @@ -63,6 +61,8 @@ Notify() Pause(time.Duration) + Start() + Stop() } ReaderOptions struct { @@ -527,9 +527,10 @@ executable Executable, ) { now := r.timeSource.Now() - // Persistence layer may lose precision when persisting the task, which essentially move - // task fire time forward. Need to account for that when submitting the task. - if fireTime := executable.GetKey().FireTime.Add(persistence.ScheduledTaskMinPrecision); now.Before(fireTime) { + // Persistence layer may lose precision when persisting the task, which essentially moves + // task fire time backward. Need to account for that when submitting the task. + fireTime := executable.GetKey().FireTime.Add(persistence.ScheduledTaskMinPrecision) + if now.Before(fireTime) { r.rescheduler.Add(executable, fireTime) return } diff -Nru temporal-1.21.5-1/src/service/history/queues/reader_test.go temporal-1.22.5/src/service/history/queues/reader_test.go --- temporal-1.21.5-1/src/service/history/queues/reader_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/reader_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -77,7 +77,7 @@ s.metricsHandler = metrics.NoopMetricsHandler s.executableInitializer = func(readerID int64, t tasks.Task) Executable { - return NewExecutable(readerID, t, nil, nil, nil, NewNoopPriorityAssigner(), clock.NewRealTimeSource(), nil, nil, nil, metrics.NoopMetricsHandler) + return NewExecutable(readerID, t, nil, nil, nil, NewNoopPriorityAssigner(), clock.NewRealTimeSource(), nil, nil, nil, metrics.NoopMetricsHandler, func() bool { return false }) } s.monitor = newMonitor(tasks.CategoryTypeScheduled, clock.NewRealTimeSource(), &MonitorOptions{ PendingTasksCriticalCount: dynamicconfig.GetIntPropertyFn(1000), diff -Nru temporal-1.21.5-1/src/service/history/queues/rescheduler.go temporal-1.22.5/src/service/history/queues/rescheduler.go --- temporal-1.21.5-1/src/service/history/queues/rescheduler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/rescheduler.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,8 +38,10 @@ "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" ctasks "go.temporal.io/server/common/tasks" "go.temporal.io/server/common/timer" + "go.temporal.io/server/common/util" ) const ( @@ -53,8 +55,6 @@ // Rescheduler buffers task executables that are failed to process and // resubmit them to the task scheduler when the Reschedule method is called. Rescheduler interface { - common.Daemon - // Add task executable to the rescheduler. Add(task Executable, rescheduleTime time.Time) @@ -65,6 +65,8 @@ // Len returns the total number of task executables waiting to be rescheduled. Len() int + Start() + Stop() } rescheduledExecuable struct { @@ -176,7 +178,12 @@ items := make([]rescheduledExecuable, 0, pq.Len()) for !pq.IsEmpty() { rescheduled := pq.Remove() - rescheduled.rescheduleTime = now + // scheduled queue pre-fetches tasks, + // so we need to make sure the reschedule time is not before the task scheduled time + rescheduled.rescheduleTime = util.MaxTime( + rescheduled.executable.GetKey().FireTime.Add(persistence.ScheduledTaskMinPrecision), + now, + ) items = append(items, rescheduled) } r.pqMap[key] = r.newPriorityQueue(items) diff -Nru temporal-1.21.5-1/src/service/history/queues/rescheduler_test.go temporal-1.22.5/src/service/history/queues/rescheduler_test.go --- temporal-1.21.5-1/src/service/history/queues/rescheduler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/rescheduler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,6 +38,7 @@ "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" ctasks "go.temporal.io/server/common/tasks" + "go.temporal.io/server/service/history/tasks" ) type ( @@ -221,7 +222,7 @@ s.Equal(0, s.rescheduler.Len()) } -func (s *rescheudulerSuite) TestImmdiateReschedule() { +func (s *rescheudulerSuite) TestForceReschedule_ImmediateTask() { now := time.Now() s.timeSource.Update(now) namespaceID := s.mockScheduler.TaskChannelKeyFn()(nil).NamespaceID @@ -234,8 +235,9 @@ taskWG.Add(numTask) for i := 0; i != numTask; i++ { mockTask := NewMockExecutable(s.controller) - mockTask.EXPECT().State().Return(ctasks.TaskStatePending).Times(1) + mockTask.EXPECT().State().Return(ctasks.TaskStatePending).AnyTimes() mockTask.EXPECT().SetScheduledTime(gomock.Any()).AnyTimes() + mockTask.EXPECT().GetKey().Return(tasks.NewImmediateKey(int64(i))).AnyTimes() s.rescheduler.Add( mockTask, now.Add(time.Minute+time.Duration(rand.Int63n(time.Minute.Nanoseconds()))), @@ -251,3 +253,44 @@ taskWG.Wait() s.Equal(0, s.rescheduler.Len()) } + +func (s *rescheudulerSuite) TestForceReschedule_ScheduledTask() { + now := time.Now() + s.timeSource.Update(now) + namespaceID := s.mockScheduler.TaskChannelKeyFn()(nil).NamespaceID + + s.rescheduler.Start() + defer s.rescheduler.Stop() + + taskWG := &sync.WaitGroup{} + taskWG.Add(1) + + retryingTask := NewMockExecutable(s.controller) + retryingTask.EXPECT().State().Return(ctasks.TaskStatePending).AnyTimes() + retryingTask.EXPECT().SetScheduledTime(gomock.Any()).AnyTimes() + retryingTask.EXPECT().GetKey().Return(tasks.NewKey(now.Add(-time.Minute), int64(1))).AnyTimes() + s.rescheduler.Add( + retryingTask, + now.Add(time.Minute), + ) + + // schedule queue pre-fetches tasks + futureTaskTimestamp := now.Add(time.Second) + futureTask := NewMockExecutable(s.controller) + futureTask.EXPECT().State().Return(ctasks.TaskStatePending).AnyTimes() + futureTask.EXPECT().SetScheduledTime(gomock.Any()).AnyTimes() + futureTask.EXPECT().GetKey().Return(tasks.NewKey(futureTaskTimestamp, int64(2))).AnyTimes() + s.rescheduler.Add( + futureTask, + futureTaskTimestamp, + ) + + s.mockScheduler.EXPECT().TrySubmit(gomock.Any()).DoAndReturn(func(_ Executable) bool { + taskWG.Done() + return true + }).Times(1) + + s.rescheduler.Reschedule(namespaceID) + taskWG.Wait() + s.Equal(1, s.rescheduler.Len()) +} diff -Nru temporal-1.21.5-1/src/service/history/queues/scheduler.go temporal-1.22.5/src/service/history/queues/scheduler.go --- temporal-1.21.5-1/src/service/history/queues/scheduler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/scheduler.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,7 +27,6 @@ package queues import ( - "go.temporal.io/server/common" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" @@ -54,13 +53,13 @@ // be called on all executables that have been successfully submited. // Reschedule() will only be called after the Scheduler has been stopped Scheduler interface { - common.Daemon - Submit(Executable) TrySubmit(Executable) bool TaskChannelKeyFn() TaskChannelKeyFn ChannelWeightFn() ChannelWeightFn + Start() + Stop() } TaskChannelKey struct { diff -Nru temporal-1.21.5-1/src/service/history/queues/slice_test.go temporal-1.22.5/src/service/history/queues/slice_test.go --- temporal-1.21.5-1/src/service/history/queues/slice_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/slice_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -69,7 +69,7 @@ s.controller = gomock.NewController(s.T()) s.executableInitializer = func(readerID int64, t tasks.Task) Executable { - return NewExecutable(readerID, t, nil, nil, nil, NewNoopPriorityAssigner(), clock.NewRealTimeSource(), nil, nil, nil, metrics.NoopMetricsHandler) + return NewExecutable(readerID, t, nil, nil, nil, NewNoopPriorityAssigner(), clock.NewRealTimeSource(), nil, nil, nil, metrics.NoopMetricsHandler, func() bool { return false }) } s.monitor = newMonitor(tasks.CategoryTypeScheduled, clock.NewRealTimeSource(), &MonitorOptions{ PendingTasksCriticalCount: dynamicconfig.GetIntPropertyFn(1000), diff -Nru temporal-1.21.5-1/src/service/history/queues/speculative_workflow_task_timeout_queue.go temporal-1.22.5/src/service/history/queues/speculative_workflow_task_timeout_queue.go --- temporal-1.21.5-1/src/service/history/queues/speculative_workflow_task_timeout_queue.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/queues/speculative_workflow_task_timeout_queue.go 2024-02-23 09:45:43.000000000 +0000 @@ -106,6 +106,7 @@ q.clusterMetadata, q.logger, q.metricsHandler, + func() bool { return false }, ), wttt) q.timeoutQueue.Add(executable) } diff -Nru temporal-1.21.5-1/src/service/history/replication/ack_manager.go temporal-1.22.5/src/service/history/replication/ack_manager.go --- temporal-1.21.5-1/src/service/history/replication/ack_manager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/ack_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -84,6 +84,7 @@ shard shard.Context config *configs.Config workflowCache wcache.Cache + eventBlobCache persistence.XDCCache executionMgr persistence.ExecutionManager metricsHandler metrics.Handler logger log.Logger @@ -115,6 +116,7 @@ func NewAckManager( shard shard.Context, workflowCache wcache.Cache, + eventBlobCache persistence.XDCCache, executionMgr persistence.ExecutionManager, logger log.Logger, ) AckManager { @@ -131,6 +133,7 @@ shard: shard, config: shard.GetConfig(), workflowCache: workflowCache, + eventBlobCache: eventBlobCache, executionMgr: executionMgr, metricsHandler: shard.GetMetricsHandler().WithTags(metrics.OperationTag(metrics.ReplicatorQueueProcessorScope)), logger: log.With(logger, tag.ComponentReplicatorQueue), @@ -272,9 +275,6 @@ p.metricsHandler.Histogram(metrics.ReplicationTasksFetched.GetMetricName(), metrics.ReplicationTasksFetched.GetMetricUnit()). Record(int64(len(replicationTasks))) - p.metricsHandler.Histogram(metrics.ReplicationTasksReturned.GetMetricName(), metrics.ReplicationTasksReturned.GetMetricUnit()). - Record(int64(len(replicationTasks))) - replicationEventTime := timestamp.TimePtr(p.shard.GetTimeSource().Now()) if len(replicationTasks) > 0 { replicationEventTime = replicationTasks[len(replicationTasks)-1].GetVisibilityTime() @@ -499,6 +499,7 @@ task, p.shard.GetShardID(), p.workflowCache, + p.eventBlobCache, p.executionMgr, p.logger, ) diff -Nru temporal-1.21.5-1/src/service/history/replication/ack_manager_test.go temporal-1.22.5/src/service/history/replication/ack_manager_test.go --- temporal-1.21.5-1/src/service/history/replication/ack_manager_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/ack_manager_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -122,7 +122,7 @@ workflowCache := wcache.NewCache(s.mockShard) s.replicationAckManager = NewAckManager( - s.mockShard, workflowCache, s.mockExecutionMgr, s.logger, + s.mockShard, workflowCache, nil, s.mockExecutionMgr, s.logger, ).(*ackMgrImpl) } @@ -307,8 +307,7 @@ eventsCache := events.NewEventsCache( s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), s.mockShard.GetConfig().EventsCacheTTL(), s.mockShard.GetExecutionManager(), false, @@ -359,8 +358,7 @@ eventsCache := events.NewEventsCache( s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), s.mockShard.GetConfig().EventsCacheTTL(), s.mockShard.GetExecutionManager(), false, @@ -411,8 +409,7 @@ eventsCache := events.NewEventsCache( s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), s.mockShard.GetConfig().EventsCacheTTL(), s.mockShard.GetExecutionManager(), false, @@ -500,8 +497,7 @@ eventsCache := events.NewEventsCache( s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), s.mockShard.GetConfig().EventsCacheTTL(), s.mockShard.GetExecutionManager(), false, diff -Nru temporal-1.21.5-1/src/service/history/replication/bi_direction_stream.go temporal-1.22.5/src/service/history/replication/bi_direction_stream.go --- temporal-1.21.5-1/src/service/history/replication/bi_direction_stream.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/bi_direction_stream.go 2024-02-23 09:45:43.000000000 +0000 @@ -186,7 +186,7 @@ return default: s.logger.Error(fmt.Sprintf( - "BiDirectionStreamImpl encountered unexpected error, closing: %T %s", + "BiDirectionStream encountered unexpected error, closing: %T %s", err, err, )) var errResp Resp diff -Nru temporal-1.21.5-1/src/service/history/replication/eager_namespace_refresher.go temporal-1.22.5/src/service/history/replication/eager_namespace_refresher.go --- temporal-1.21.5-1/src/service/history/replication/eager_namespace_refresher.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/eager_namespace_refresher.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,182 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package replication + +import ( + "context" + "sync" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/adminservice/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" +) + +type ( + EagerNamespaceRefresher interface { + UpdateNamespaceFailoverVersion(namespaceId namespace.ID, targetFailoverVersion int64) error + SyncNamespaceFromSourceCluster(ctx context.Context, namespaceId namespace.ID, sourceCluster string) error + } + + eagerNamespaceRefresherImpl struct { + metadataManager persistence.MetadataManager + namespaceRegistry namespace.Registry + logger log.Logger + lock sync.Mutex + clientBean client.Bean + replicationTaskExecutor namespace.ReplicationTaskExecutor + currentCluster string + metricsHandler metrics.Handler + } +) + +func NewEagerNamespaceRefresher( + metadataManager persistence.MetadataManager, + namespaceRegistry namespace.Registry, + logger log.Logger, + clientBean client.Bean, + replicationTaskExecutor namespace.ReplicationTaskExecutor, + currentCluster string, + metricsHandler metrics.Handler) EagerNamespaceRefresher { + return &eagerNamespaceRefresherImpl{ + metadataManager: metadataManager, + namespaceRegistry: namespaceRegistry, + logger: logger, + clientBean: clientBean, + replicationTaskExecutor: replicationTaskExecutor, + currentCluster: currentCluster, + metricsHandler: metricsHandler, + } +} + +func (e *eagerNamespaceRefresherImpl) UpdateNamespaceFailoverVersion(namespaceId namespace.ID, targetFailoverVersion int64) error { + e.lock.Lock() + defer e.lock.Unlock() + ns, err := e.namespaceRegistry.GetNamespaceByID(namespaceId) + switch err.(type) { + case nil: + case *serviceerror.NamespaceNotFound: + // TODO: Handle NamespaceNotFound case, probably retrieve the namespace from the source cluster? + return nil + default: + // do nothing as this is the best effort to update the namespace + e.logger.Debug("Failed to get namespace from registry", tag.Error(err)) + return err + } + + if ns.FailoverVersion() >= targetFailoverVersion { + return nil + } + + ctx := headers.SetCallerInfo(context.TODO(), headers.SystemPreemptableCallerInfo) + resp, err := e.metadataManager.GetNamespace(ctx, &persistence.GetNamespaceRequest{ + ID: namespaceId.String(), + }) + if err != nil { + e.logger.Debug("Failed to get namespace from persistent", tag.Error(err)) + return err + } + + currentFailoverVersion := resp.Namespace.FailoverVersion + if currentFailoverVersion >= targetFailoverVersion { + // DB may have a fresher version of namespace, so compare again + return nil + } + + metadata, err := e.metadataManager.GetMetadata(ctx) + if err != nil { + e.logger.Debug("Failed to get metadata", tag.Error(err)) + return err + } + + request := &persistence.UpdateNamespaceRequest{ + Namespace: resp.Namespace, + NotificationVersion: metadata.NotificationVersion, + IsGlobalNamespace: resp.IsGlobalNamespace, + } + + request.Namespace.FailoverVersion = targetFailoverVersion + request.Namespace.FailoverNotificationVersion = metadata.NotificationVersion + + // Question: is it ok to only update failover version WITHOUT updating FailoverHistory? + // request.Namespace.ReplicationConfig.FailoverHistory = ?? + + if err := e.metadataManager.UpdateNamespace(ctx, request); err != nil { + e.logger.Info("Failed to update namespace", tag.Error(err)) + return err + } + return nil +} + +func (e *eagerNamespaceRefresherImpl) SyncNamespaceFromSourceCluster(ctx context.Context, namespaceId namespace.ID, sourceCluster string) error { + /* TODO: 1. Lock here is to prevent multiple creation happening at same time. Current implementation + actually does not help in this case(i.e. after getting the lock, each thread will still fetch from remote and + try to create the namespace). Once we have mechanism to immediate refresh the cache, we + can add logic to check the cache again before doing the remote call and creating namespace + 2. Based on which caller is invoking this method, we may not want to block the caller thread. + */ + e.lock.Lock() + defer e.lock.Unlock() + adminClient, err := e.clientBean.GetRemoteAdminClient(sourceCluster) + if err != nil { + return err + } + resp, err := adminClient.GetNamespace(ctx, &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Id{ + Id: namespaceId.String(), + }, + }) + if err != nil { + return err + } + hasCurrentCluster := false + for _, c := range resp.GetReplicationConfig().GetClusters() { + if e.currentCluster == c.GetClusterName() { + hasCurrentCluster = true + } + } + if !hasCurrentCluster { + e.metricsHandler.Counter(metrics.ReplicationOutlierNamespace.GetMetricName()).Record(1) + return serviceerror.NewFailedPrecondition("Namespace does not belong to current cluster") + } + task := &replicationspb.NamespaceTaskAttributes{ + NamespaceOperation: enumsspb.NAMESPACE_OPERATION_CREATE, + Id: resp.GetInfo().Id, + Info: resp.GetInfo(), + Config: resp.GetConfig(), + ReplicationConfig: resp.GetReplicationConfig(), + ConfigVersion: resp.GetConfigVersion(), + FailoverVersion: resp.GetFailoverVersion(), + FailoverHistory: resp.GetFailoverHistory(), + } + return e.replicationTaskExecutor.Execute(ctx, task) +} diff -Nru temporal-1.21.5-1/src/service/history/replication/eager_namespace_refresher_test.go temporal-1.22.5/src/service/history/replication/eager_namespace_refresher_test.go --- temporal-1.21.5-1/src/service/history/replication/eager_namespace_refresher_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/eager_namespace_refresher_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,439 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package replication + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/uber-go/tally/v4" + enumspb "go.temporal.io/api/enums/v1" + namespacepb "go.temporal.io/api/namespace/v1" + replicationpb "go.temporal.io/api/replication/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/adminservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/service/history/shard" + + "github.com/stretchr/testify/suite" +) + +const mockCurrentCuster = "current_cluster_1" + +type ( + EagerNamespaceRefresherSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockMetadataManager *persistence.MockMetadataManager + mockNamespaceRegistry *namespace.MockRegistry + eagerNamespaceRefresher EagerNamespaceRefresher + logger log.Logger + clientBean *client.MockBean + mockReplicationTaskExecutor *namespace.MockReplicationTaskExecutor + currentCluster string + mockMetricsHandler metrics.Handler + remoteAdminClient *adminservicemock.MockAdminServiceClient + } +) + +func (s *EagerNamespaceRefresherSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.logger = log.NewTestLogger() + s.mockMetadataManager = persistence.NewMockMetadataManager(s.controller) + s.mockNamespaceRegistry = namespace.NewMockRegistry(s.controller) + s.clientBean = client.NewMockBean(s.controller) + s.remoteAdminClient = adminservicemock.NewMockAdminServiceClient(s.controller) + s.clientBean.EXPECT().GetRemoteAdminClient(gomock.Any()).Return(s.remoteAdminClient, nil).AnyTimes() + scope := tally.NewTestScope("test", nil) + s.mockReplicationTaskExecutor = namespace.NewMockReplicationTaskExecutor(s.controller) + s.mockMetricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags( + metrics.ServiceNameTag("serviceName")) + s.eagerNamespaceRefresher = NewEagerNamespaceRefresher( + s.mockMetadataManager, + s.mockNamespaceRegistry, + s.logger, + s.clientBean, + s.mockReplicationTaskExecutor, + mockCurrentCuster, + s.mockMetricsHandler, + ) +} + +func TestEagerNamespaceRefresherSuite(t *testing.T) { + suite.Run(t, new(EagerNamespaceRefresherSuite)) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + currentFailoverVersion := targetFailoverVersion - 1 + + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: currentFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, // Still must be included. + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + ns := namespace.FromPersistentState(nsResponse) + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(ns, nil).Times(1) + + s.mockMetadataManager.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{NotificationVersion: 123}, nil).Times(1) + s.mockMetadataManager.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: namespaceID, + }).Return(nsResponse, nil).Times(1) + s.mockMetadataManager.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Return(nil).Times(1) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + + s.Nil(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_TargetVersionSmallerThanVersionInCache() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + currentFailoverVersion := targetFailoverVersion + 1 + + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: currentFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, // Still must be included. + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + ns := namespace.FromPersistentState(nsResponse) + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(ns, nil).Times(1) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + + s.Nil(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_TargetVersionSmallerThanVersionInPersistent() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + currentFailoverVersion := targetFailoverVersion - 1 + + nsFromCache := namespace.FromPersistentState(&persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: currentFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + }) + + nsFromPersistent := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: targetFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(nsFromCache, nil).Times(1) + + s.mockMetadataManager.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: namespaceID, + }).Return(nsFromPersistent, nil).Times(1) + + s.mockMetadataManager.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Return(nil).Times(0) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + + s.Nil(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_NamespaceNotFoundFromRegistry() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(nil, serviceerror.NewNamespaceNotFound("namespace not found")).Times(1) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + s.Nil(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_GetNamespaceErrorFromRegistry() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(nil, errors.New("some error")).Times(1) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + s.Error(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_GetNamespaceErrorFromPersistent() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + currentFailoverVersion := targetFailoverVersion - 1 + + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: currentFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, // Still must be included. + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + ns := namespace.FromPersistentState(nsResponse) + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(ns, nil).Times(1) + + s.mockMetadataManager.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: namespaceID, + }).Return(nil, errors.New("some error")).Times(1) + // No more interaction with metadata manager + s.mockMetadataManager.EXPECT().GetMetadata(gomock.Any()).Return(&persistence.GetMetadataResponse{NotificationVersion: 123}, nil).Times(0) + s.mockMetadataManager.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Return(nil).Times(0) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + + s.Error(err) +} + +func (s *EagerNamespaceRefresherSuite) TestUpdateNamespaceFailoverVersion_GetMetadataErrorFrom() { + namespaceID := "test-namespace-id" + targetFailoverVersion := int64(100) + currentFailoverVersion := targetFailoverVersion - 1 + + nsResponse := &persistence.GetNamespaceResponse{ + Namespace: &persistencespb.NamespaceDetail{ + FailoverVersion: currentFailoverVersion, + Info: &persistencespb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, // Still must be included. + Data: make(map[string]string)}, + Config: &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(2), + BadBinaries: &namespacepb.BadBinaries{ + Binaries: map[string]*namespacepb.BadBinaryInfo{}, + }}, + ReplicationConfig: &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []string{ + cluster.TestCurrentClusterName, + cluster.TestAlternativeClusterName, + }, + }, + FailoverNotificationVersion: 0, + }, + } + ns := namespace.FromPersistentState(nsResponse) + s.mockNamespaceRegistry.EXPECT().GetNamespaceByID(namespace.ID(namespaceID)).Return(ns, nil).Times(1) + + s.mockMetadataManager.EXPECT().GetNamespace(gomock.Any(), &persistence.GetNamespaceRequest{ + ID: namespaceID, + }).Return(nsResponse, nil).Times(1) + s.mockMetadataManager.EXPECT().GetMetadata(gomock.Any()).Return(nil, errors.New("some error")).Times(1) + + // No more interaction with metadata manager + s.mockMetadataManager.EXPECT().UpdateNamespace(gomock.Any(), gomock.Any()).Return(nil).Times(0) + + err := s.eagerNamespaceRefresher.UpdateNamespaceFailoverVersion(namespace.ID(namespaceID), targetFailoverVersion) + + s.Error(err) +} + +func (s *EagerNamespaceRefresherSuite) TestSyncNamespaceFromSourceCluster_Success() { + namespaceId := namespace.ID("abc") + + nsResponse := &adminservice.GetNamespaceResponse{ + Info: &namespacepb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []*replicationpb.ClusterReplicationConfig{ + {ClusterName: mockCurrentCuster}, + {ClusterName: "not_current_cluster_1"}, + }, + }, + } + s.remoteAdminClient.EXPECT().GetNamespace(gomock.Any(), &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Id{ + Id: namespaceId.String(), + }, + }).Return(nsResponse, nil) + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(nil).Times(1) + + err := s.eagerNamespaceRefresher.SyncNamespaceFromSourceCluster(context.Background(), namespaceId, "currentCluster") + s.Nil(err) +} + +func (s *EagerNamespaceRefresherSuite) TestSyncNamespaceFromSourceCluster_NamespaceNotBelongsToCurrentCluster() { + namespaceId := namespace.ID("abc") + + nsResponse := &adminservice.GetNamespaceResponse{ + Info: &namespacepb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []*replicationpb.ClusterReplicationConfig{ + {ClusterName: "not_current_cluster_1"}, + {ClusterName: "not_current_cluster_2"}, + }, + }, + } + s.remoteAdminClient.EXPECT().GetNamespace(gomock.Any(), &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Id{ + Id: namespaceId.String(), + }, + }).Return(nsResponse, nil).Times(1) + + err := s.eagerNamespaceRefresher.SyncNamespaceFromSourceCluster(context.Background(), namespaceId, "currentCluster") + s.Error(err) + s.IsType(&serviceerror.FailedPrecondition{}, err) +} + +func (s *EagerNamespaceRefresherSuite) TestSyncNamespaceFromSourceCluster_ExecutorReturnsError() { + namespaceId := namespace.ID("abc") + + nsResponse := &adminservice.GetNamespaceResponse{ + Info: &namespacepb.NamespaceInfo{ + Id: namespace.NewID().String(), + Name: "another random namespace name", + State: enumspb.NAMESPACE_STATE_DELETED, + Data: make(map[string]string)}, + ReplicationConfig: &replicationpb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + Clusters: []*replicationpb.ClusterReplicationConfig{ + {ClusterName: mockCurrentCuster}, + {ClusterName: "not_current_cluster_2"}, + }, + }, + } + s.remoteAdminClient.EXPECT().GetNamespace(gomock.Any(), &adminservice.GetNamespaceRequest{ + Attributes: &adminservice.GetNamespaceRequest_Id{ + Id: namespaceId.String(), + }, + }).Return(nsResponse, nil).Times(1) + + expectedError := errors.New("some error") + s.mockReplicationTaskExecutor.EXPECT().Execute(gomock.Any(), gomock.Any()).Return(expectedError) + err := s.eagerNamespaceRefresher.SyncNamespaceFromSourceCluster(context.Background(), namespaceId, "currentCluster") + s.Error(err) + s.Equal(expectedError, err) +} diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_activity_state_task.go temporal-1.22.5/src/service/history/replication/executable_activity_state_task.go --- temporal-1.21.5-1/src/service/history/replication/executable_activity_state_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_activity_state_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -147,15 +147,23 @@ ctx, cancel := newTaskContext(namespaceName) defer cancel() - if resendErr := e.Resend( + if doContinue, resendErr := e.Resend( ctx, e.sourceClusterName, retryErr, - ); resendErr != nil { + ResendAttempt, + ); resendErr != nil || !doContinue { return err } return e.Execute() default: + e.Logger.Error("activity state replication task encountered error", + tag.WorkflowNamespaceID(e.NamespaceID), + tag.WorkflowID(e.WorkflowID), + tag.WorkflowRunID(e.RunID), + tag.TaskID(e.ExecutableTask.TaskID()), + tag.Error(err), + ) return err } } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_activity_state_task_test.go temporal-1.22.5/src/service/history/replication/executable_activity_state_task_test.go --- temporal-1.21.5-1/src/service/history/replication/executable_activity_state_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_activity_state_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -246,7 +246,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(nil) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(true, nil) s.NoError(s.task.HandleErr(err)) } @@ -265,7 +265,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(errors.New("OwO")) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(false, errors.New("OwO")) s.Equal(err, s.task.HandleErr(err)) } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_history_task.go temporal-1.22.5/src/service/history/replication/executable_history_task.go --- temporal-1.21.5-1/src/service/history/replication/executable_history_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_history_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -144,15 +144,23 @@ ctx, cancel := newTaskContext(namespaceName) defer cancel() - if resendErr := e.Resend( + if doContinue, resendErr := e.Resend( ctx, e.sourceClusterName, retryErr, - ); resendErr != nil { + ResendAttempt, + ); resendErr != nil || !doContinue { return err } return e.Execute() default: + e.Logger.Error("history replication task encountered error", + tag.WorkflowNamespaceID(e.NamespaceID), + tag.WorkflowID(e.WorkflowID), + tag.WorkflowRunID(e.RunID), + tag.TaskID(e.ExecutableTask.TaskID()), + tag.Error(err), + ) return err } } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_history_task_test.go temporal-1.22.5/src/service/history/replication/executable_history_task_test.go --- temporal-1.21.5-1/src/service/history/replication/executable_history_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_history_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -243,7 +243,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(nil) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(true, nil) s.NoError(s.task.HandleErr(err)) } @@ -262,7 +262,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(errors.New("OwO")) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(false, errors.New("OwO")) s.Equal(err, s.task.HandleErr(err)) } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_task.go temporal-1.22.5/src/service/history/replication/executable_task.go --- temporal-1.21.5-1/src/service/history/replication/executable_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -58,14 +58,17 @@ const ( applyReplicationTimeout = 20 * time.Second + + ResendAttempt = 2 ) var ( TaskRetryPolicy = backoff.NewExponentialRetryPolicy(1 * time.Second). - WithBackoffCoefficient(1.2). - WithMaximumInterval(5 * time.Second). - WithMaximumAttempts(80). - WithExpirationInterval(5 * time.Minute) + WithBackoffCoefficient(1.2). + WithMaximumInterval(5 * time.Second). + WithMaximumAttempts(80). + WithExpirationInterval(5 * time.Minute) + ErrResendAttemptExceeded = serviceerror.NewInternal("resend history attempts exceeded") ) type ( @@ -86,7 +89,8 @@ ctx context.Context, remoteCluster string, retryErr *serviceerrors.RetryReplication, - ) error + remainingAttempt int, + ) (bool, error) DeleteWorkflow( ctx context.Context, workflowKey definition.WorkflowKey, @@ -271,7 +275,20 @@ ctx context.Context, remoteCluster string, retryErr *serviceerrors.RetryReplication, -) error { + remainingAttempt int, +) (bool, error) { + remainingAttempt-- + if remainingAttempt < 0 { + e.Logger.Error("resend history attempts exceeded", + tag.WorkflowNamespaceID(retryErr.NamespaceId), + tag.WorkflowID(retryErr.WorkflowId), + tag.WorkflowRunID(retryErr.RunId), + tag.Value(retryErr), + tag.Error(ErrResendAttemptExceeded), + ) + return false, ErrResendAttemptExceeded + } + e.MetricsHandler.Counter(metrics.ClientRequests.GetMetricName()).Record( 1, metrics.OperationTag(e.metricsTag+"Resend"), @@ -284,7 +301,7 @@ ) }() - resendErr := e.ProcessToolBox.NDCHistoryResender.SendSingleWorkflowHistory( + switch resendErr := e.ProcessToolBox.NDCHistoryResender.SendSingleWorkflowHistory( ctx, remoteCluster, namespace.ID(retryErr.NamespaceId), @@ -294,11 +311,10 @@ retryErr.StartEventVersion, retryErr.EndEventId, retryErr.EndEventVersion, - ) - switch resendErr.(type) { + ).(type) { case nil: // no-op - return nil + return true, nil case *serviceerror.NotFound: e.Logger.Error( "workflow not found in source cluster, proceed to cleanup", @@ -307,7 +323,7 @@ tag.WorkflowRunID(retryErr.RunId), ) // workflow is not found in source cluster, cleanup workflow in target cluster - return e.DeleteWorkflow( + return false, e.DeleteWorkflow( ctx, definition.NewWorkflowKey( retryErr.NamespaceId, @@ -315,9 +331,38 @@ retryErr.RunId, ), ) + case *serviceerrors.RetryReplication: + // it is possible that resend will trigger another resend, e.g. + // 1. replicating a workflow which is a reset workflow (call this workflow `new workflow`) + // 2. base workflow (call this workflow `old workflow`) of reset workflow is deleted on + // src cluster and never replicated to target cluster + // 3. when any of events of the new workflow arrive at target cluster + // a. using base workflow info to resend until branching point between old & new workflow + // b. attempting to use old workflow history events to replay for mutable state then apply new workflow events + // c. attempt failed due to old workflow does not exist + // d. return error to resend new workflow before the branching point + + // handle 2nd resend error, then 1st resend error + if _, err := e.Resend(ctx, remoteCluster, resendErr, remainingAttempt); err == nil { + return e.Resend(ctx, remoteCluster, retryErr, remainingAttempt) + } + e.Logger.Error("error resend history for history event", + tag.WorkflowNamespaceID(retryErr.NamespaceId), + tag.WorkflowID(retryErr.WorkflowId), + tag.WorkflowRunID(retryErr.RunId), + tag.Value(retryErr), + tag.Error(resendErr), + ) + return false, resendErr default: - e.Logger.Error("error resend history for history event", tag.Error(resendErr)) - return resendErr + e.Logger.Error("error resend history for history event", + tag.WorkflowNamespaceID(retryErr.NamespaceId), + tag.WorkflowID(retryErr.WorkflowId), + tag.WorkflowRunID(retryErr.RunId), + tag.Value(retryErr), + tag.Error(resendErr), + ) + return false, resendErr } } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_task_mock.go temporal-1.22.5/src/service/history/replication/executable_task_mock.go --- temporal-1.21.5-1/src/service/history/replication/executable_task_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_task_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -182,17 +182,18 @@ } // Resend mocks base method. -func (m *MockExecutableTask) Resend(ctx context.Context, remoteCluster string, retryErr *serviceerror.RetryReplication) error { +func (m *MockExecutableTask) Resend(ctx context.Context, remoteCluster string, retryErr *serviceerror.RetryReplication, remainingAttempt int) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Resend", ctx, remoteCluster, retryErr) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "Resend", ctx, remoteCluster, retryErr, remainingAttempt) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Resend indicates an expected call of Resend. -func (mr *MockExecutableTaskMockRecorder) Resend(ctx, remoteCluster, retryErr interface{}) *gomock.Call { +func (mr *MockExecutableTaskMockRecorder) Resend(ctx, remoteCluster, retryErr, remainingAttempt interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resend", reflect.TypeOf((*MockExecutableTask)(nil).Resend), ctx, remoteCluster, retryErr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resend", reflect.TypeOf((*MockExecutableTask)(nil).Resend), ctx, remoteCluster, retryErr, remainingAttempt) } // RetryPolicy mocks base method. diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_task_test.go temporal-1.22.5/src/service/history/replication/executable_task_test.go --- temporal-1.21.5-1/src/service/history/replication/executable_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -270,8 +270,9 @@ resendErr.EndEventVersion, ).Return(nil) - err := s.task.Resend(context.Background(), remoteCluster, resendErr) + doContinue, err := s.task.Resend(context.Background(), remoteCluster, resendErr, ResendAttempt) s.NoError(err) + s.True(doContinue) } func (s *executableTaskSuite) TestResend_NotFound() { @@ -314,8 +315,124 @@ ClosedWorkflowOnly: false, }).Return(&historyservice.DeleteWorkflowExecutionResponse{}, nil) - err := s.task.Resend(context.Background(), remoteCluster, resendErr) + doContinue, err := s.task.Resend(context.Background(), remoteCluster, resendErr, ResendAttempt) s.NoError(err) + s.False(doContinue) +} + +func (s *executableTaskSuite) TestResend_ResendError_Success() { + remoteCluster := cluster.TestAlternativeClusterName + resendErr := &serviceerrors.RetryReplication{ + NamespaceId: uuid.NewString(), + WorkflowId: uuid.NewString(), + RunId: uuid.NewString(), + StartEventId: rand.Int63(), + StartEventVersion: rand.Int63(), + EndEventId: rand.Int63(), + EndEventVersion: rand.Int63(), + } + + anotherResendErr := &serviceerrors.RetryReplication{ + NamespaceId: resendErr.NamespaceId, + WorkflowId: resendErr.WorkflowId, + RunId: resendErr.RunId, + StartEventId: rand.Int63(), + StartEventVersion: rand.Int63(), + EndEventId: rand.Int63(), + EndEventVersion: rand.Int63(), + } + + gomock.InOrder( + s.ndcHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + remoteCluster, + namespace.ID(resendErr.NamespaceId), + resendErr.WorkflowId, + resendErr.RunId, + resendErr.StartEventId, + resendErr.StartEventVersion, + resendErr.EndEventId, + resendErr.EndEventVersion, + ).Return(anotherResendErr), + s.ndcHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + remoteCluster, + namespace.ID(anotherResendErr.NamespaceId), + anotherResendErr.WorkflowId, + anotherResendErr.RunId, + anotherResendErr.StartEventId, + anotherResendErr.StartEventVersion, + anotherResendErr.EndEventId, + anotherResendErr.EndEventVersion, + ).Return(nil), + s.ndcHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + remoteCluster, + namespace.ID(resendErr.NamespaceId), + resendErr.WorkflowId, + resendErr.RunId, + resendErr.StartEventId, + resendErr.StartEventVersion, + resendErr.EndEventId, + resendErr.EndEventVersion, + ).Return(nil), + ) + + doContinue, err := s.task.Resend(context.Background(), remoteCluster, resendErr, ResendAttempt) + s.NoError(err) + s.True(doContinue) +} + +func (s *executableTaskSuite) TestResend_ResendError_Error() { + remoteCluster := cluster.TestAlternativeClusterName + resendErr := &serviceerrors.RetryReplication{ + NamespaceId: uuid.NewString(), + WorkflowId: uuid.NewString(), + RunId: uuid.NewString(), + StartEventId: rand.Int63(), + StartEventVersion: rand.Int63(), + EndEventId: rand.Int63(), + EndEventVersion: rand.Int63(), + } + + anotherResendErr := &serviceerrors.RetryReplication{ + NamespaceId: resendErr.NamespaceId, + WorkflowId: resendErr.WorkflowId, + RunId: resendErr.RunId, + StartEventId: rand.Int63(), + StartEventVersion: rand.Int63(), + EndEventId: rand.Int63(), + EndEventVersion: rand.Int63(), + } + + gomock.InOrder( + s.ndcHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + remoteCluster, + namespace.ID(resendErr.NamespaceId), + resendErr.WorkflowId, + resendErr.RunId, + resendErr.StartEventId, + resendErr.StartEventVersion, + resendErr.EndEventId, + resendErr.EndEventVersion, + ).Return(anotherResendErr), + s.ndcHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + remoteCluster, + namespace.ID(anotherResendErr.NamespaceId), + anotherResendErr.WorkflowId, + anotherResendErr.RunId, + anotherResendErr.StartEventId, + anotherResendErr.StartEventVersion, + anotherResendErr.EndEventId, + anotherResendErr.EndEventVersion, + ).Return(&serviceerrors.RetryReplication{}), + ) + + doContinue, err := s.task.Resend(context.Background(), remoteCluster, resendErr, ResendAttempt) + s.Error(err) + s.False(doContinue) } func (s *executableTaskSuite) TestResend_Error() { @@ -342,8 +459,9 @@ resendErr.EndEventVersion, ).Return(serviceerror.NewUnavailable("")) - err := s.task.Resend(context.Background(), remoteCluster, resendErr) + doContinue, err := s.task.Resend(context.Background(), remoteCluster, resendErr, ResendAttempt) s.Error(err) + s.False(doContinue) } func (s *executableTaskSuite) TestGetNamespaceInfo_Process() { diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_workflow_state_task.go temporal-1.22.5/src/service/history/replication/executable_workflow_state_task.go --- temporal-1.21.5-1/src/service/history/replication/executable_workflow_state_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_workflow_state_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -140,15 +140,23 @@ ctx, cancel := newTaskContext(namespaceName) defer cancel() - if resendErr := e.Resend( + if doContinue, resendErr := e.Resend( ctx, e.sourceClusterName, retryErr, - ); resendErr != nil { + ResendAttempt, + ); resendErr != nil || !doContinue { return err } return e.Execute() default: + e.Logger.Error("workflow state replication task encountered error", + tag.WorkflowNamespaceID(e.NamespaceID), + tag.WorkflowID(e.WorkflowID), + tag.WorkflowRunID(e.RunID), + tag.TaskID(e.ExecutableTask.TaskID()), + tag.Error(err), + ) return err } } diff -Nru temporal-1.21.5-1/src/service/history/replication/executable_workflow_state_task_test.go temporal-1.22.5/src/service/history/replication/executable_workflow_state_task_test.go --- temporal-1.21.5-1/src/service/history/replication/executable_workflow_state_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/executable_workflow_state_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -205,7 +205,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(nil) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(true, nil) engine.EXPECT().ReplicateWorkflowState(gomock.Any(), gomock.Any()).Return(nil) s.NoError(s.task.HandleErr(err)) } @@ -224,7 +224,7 @@ rand.Int63(), rand.Int63(), ) - s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err).Return(errors.New("OwO")) + s.executableTask.EXPECT().Resend(gomock.Any(), s.sourceClusterName, err, ResendAttempt).Return(false, errors.New("OwO")) s.Equal(err, s.task.HandleErr(err)) } diff -Nru temporal-1.21.5-1/src/service/history/replication/fx.go temporal-1.22.5/src/service/history/replication/fx.go --- temporal-1.21.5-1/src/service/history/replication/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,10 +38,12 @@ ctasks "go.temporal.io/server/common/tasks" "go.temporal.io/server/common/xdc" "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/shard" ) var Module = fx.Options( fx.Provide(ReplicationTaskFetcherFactoryProvider), + fx.Provide(ReplicationTaskConverterFactoryProvider), fx.Provide(ReplicationTaskExecutorProvider), fx.Provide(ReplicationStreamSchedulerProvider), fx.Provide(StreamReceiverMonitorProvider), @@ -63,6 +65,17 @@ ) } +func ReplicationTaskConverterFactoryProvider() SourceTaskConverterProvider { + return func(historyEngine shard.Engine, shardContext shard.Context, clientClusterShardCount int32, clientClusterName string, clientShardKey ClusterShardKey) SourceTaskConverter { + return NewSourceTaskConverter( + historyEngine, + shardContext.GetNamespaceRegistry(), + clientClusterShardCount, + clientClusterName, + clientShardKey) + } +} + func ReplicationTaskExecutorProvider() TaskExecutorProvider { return func(params TaskExecutorParams) TaskExecutor { return NewTaskExecutor( diff -Nru temporal-1.21.5-1/src/service/history/replication/raw_task_converter.go temporal-1.22.5/src/service/history/replication/raw_task_converter.go --- temporal-1.21.5-1/src/service/history/replication/raw_task_converter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/raw_task_converter.go 2024-02-23 09:45:43.000000000 +0000 @@ -22,6 +22,8 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination raw_task_converter_mock.go + package replication import ( @@ -42,36 +44,93 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" "go.temporal.io/server/service/history/workflow" wcache "go.temporal.io/server/service/history/workflow/cache" ) -func convertTask( - ctx context.Context, +type ( + SourceTaskConverterImpl struct { + historyEngine shard.Engine + namespaceCache namespace.Registry + clientClusterShardCount int32 + clientClusterName string + clientShardKey ClusterShardKey + } + SourceTaskConverter interface { + Convert(task tasks.Task) (*replicationspb.ReplicationTask, error) + } + SourceTaskConverterProvider func( + historyEngine shard.Engine, + shardContext shard.Context, + clientClusterShardCount int32, + clientClusterName string, + clientShardKey ClusterShardKey, + ) SourceTaskConverter +) + +func NewSourceTaskConverter( + historyEngine shard.Engine, + namespaceCache namespace.Registry, + clientClusterShardCount int32, + clientClusterName string, + clientShardKey ClusterShardKey, +) *SourceTaskConverterImpl { + return &SourceTaskConverterImpl{ + historyEngine: historyEngine, + namespaceCache: namespaceCache, + clientClusterShardCount: clientClusterShardCount, + clientClusterName: clientClusterName, + clientShardKey: clientShardKey, + } +} + +func (c *SourceTaskConverterImpl) Convert( task tasks.Task, - shardID int32, - workflowCache wcache.Cache, - executionManager persistence.ExecutionManager, - logger log.Logger, ) (*replicationspb.ReplicationTask, error) { - switch task := task.(type) { - case *tasks.SyncActivityTask: - return convertActivityStateReplicationTask(ctx, task, workflowCache) - case *tasks.SyncWorkflowStateTask: - return convertWorkflowStateReplicationTask(ctx, task, workflowCache) - case *tasks.HistoryReplicationTask: - return convertHistoryReplicationTask( - ctx, - task, - shardID, - workflowCache, - executionManager, - logger, - ) - default: - return nil, errUnknownReplicationTask + var shouldProcessTask bool + namespaceEntry, err := c.namespaceCache.GetNamespaceByID( + namespace.ID(task.GetNamespaceID()), + ) + if err != nil { + // if there is error, then blindly send the task, better safe than sorry + shouldProcessTask = true + } + + if namespaceEntry != nil { + FilterLoop: + for _, targetCluster := range namespaceEntry.ClusterNames() { + if c.clientClusterName == targetCluster { + shouldProcessTask = true + break FilterLoop + } + } } + + if !shouldProcessTask { + return nil, nil + } + + clientShardID := common.WorkflowIDToHistoryShard(task.GetNamespaceID(), task.GetWorkflowID(), c.clientClusterShardCount) + if clientShardID != c.clientShardKey.ShardID { + return nil, nil + } + var ctx context.Context + var cancel context.CancelFunc + + if namespaceEntry != nil { + ctx, cancel = newTaskContext(namespaceEntry.Name().String()) + } else { + ctx, cancel = context.WithTimeout(context.Background(), applyReplicationTimeout) + } + + defer cancel() + replicationTask, err := c.historyEngine.ConvertReplicationTask(ctx, task) + if err != nil { + return nil, err + } + return replicationTask, nil } func convertActivityStateReplicationTask( @@ -125,7 +184,7 @@ Attempt: activityInfo.Attempt, LastFailure: activityInfo.RetryLastFailure, LastWorkerIdentity: activityInfo.RetryLastWorkerIdentity, - BaseExecutionInfo: copyBaseWorkflowInfo(mutableState.GetBaseWorkflowInfo()), + BaseExecutionInfo: persistence.CopyBaseWorkflowInfo(mutableState.GetBaseWorkflowInfo()), VersionHistory: versionhistory.CopyVersionHistory(currentVersionHistory), }, }, @@ -168,6 +227,7 @@ taskInfo *tasks.HistoryReplicationTask, shardID int32, workflowCache wcache.Cache, + eventBlobCache persistence.XDCCache, executionManager persistence.ExecutionManager, logger log.Logger, ) (*replicationspb.ReplicationTask, error) { @@ -179,6 +239,7 @@ taskInfo.FirstEventID, taskInfo.NextEventID, workflowCache, + eventBlobCache, executionManager, logger, ) @@ -198,6 +259,7 @@ common.FirstEventID, common.FirstEventID+1, workflowCache, + eventBlobCache, executionManager, logger, ) @@ -266,9 +328,20 @@ firstEventID int64, nextEventID int64, workflowCache wcache.Cache, + eventBlobCache persistence.XDCCache, executionManager persistence.ExecutionManager, logger log.Logger, ) ([]*historyspb.VersionHistoryItem, *commonpb.DataBlob, *workflowspb.BaseExecutionInfo, error) { + if eventBlobCache != nil { + if xdcCacheValue, ok := eventBlobCache.Get(persistence.NewXDCCacheKey( + workflowKey, + firstEventID, + nextEventID, + eventVersion, + )); ok { + return xdcCacheValue.VersionHistoryItems, xdcCacheValue.EventBlob, xdcCacheValue.BaseWorkflowInfo, nil + } + } versionHistory, branchToken, baseWorkflowInfo, err := getBranchToken( ctx, workflowKey, @@ -313,7 +386,7 @@ ms, err := wfContext.LoadMutableState(ctx) switch err.(type) { case nil: - return getVersionHistoryItems(ms, eventID, eventVersion) + return persistence.GetXDCCacheValue(ms.GetExecutionInfo(), eventID, eventVersion) case *serviceerror.NotFound, *serviceerror.NamespaceNotFound: return nil, nil, nil, nil default: @@ -329,7 +402,6 @@ nextEventID int64, executionManager persistence.ExecutionManager, ) (*commonpb.DataBlob, error) { - var eventBatchBlobs []*commonpb.DataBlob var pageToken []byte req := &persistence.ReadHistoryBranchRequest{ @@ -362,31 +434,6 @@ return eventBatchBlobs[0], nil } -func getVersionHistoryItems( - mutableState workflow.MutableState, - eventID int64, - version int64, -) ([]*historyspb.VersionHistoryItem, []byte, *workflowspb.BaseExecutionInfo, error) { - baseWorkflowInfo := copyBaseWorkflowInfo(mutableState.GetBaseWorkflowInfo()) - versionHistories := mutableState.GetExecutionInfo().GetVersionHistories() - versionHistoryIndex, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem( - versionHistories, - versionhistory.NewVersionHistoryItem( - eventID, - version, - ), - ) - if err != nil { - return nil, nil, nil, err - } - - versionHistoryBranch, err := versionhistory.GetVersionHistory(versionHistories, versionHistoryIndex) - if err != nil { - return nil, nil, nil, err - } - return versionhistory.CopyVersionHistory(versionHistoryBranch).GetItems(), versionHistoryBranch.GetBranchToken(), baseWorkflowInfo, nil -} - func convertGetHistoryError( workflowKey definition.WorkflowKey, logger log.Logger, @@ -413,16 +460,3 @@ return err } } - -func copyBaseWorkflowInfo( - baseWorkflowInfo *workflowspb.BaseExecutionInfo, -) *workflowspb.BaseExecutionInfo { - if baseWorkflowInfo == nil { - return nil - } - return &workflowspb.BaseExecutionInfo{ - RunId: baseWorkflowInfo.RunId, - LowestCommonAncestorEventId: baseWorkflowInfo.LowestCommonAncestorEventId, - LowestCommonAncestorEventVersion: baseWorkflowInfo.LowestCommonAncestorEventVersion, - } -} diff -Nru temporal-1.21.5-1/src/service/history/replication/raw_task_converter_mock.go temporal-1.22.5/src/service/history/replication/raw_task_converter_mock.go --- temporal-1.21.5-1/src/service/history/replication/raw_task_converter_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/raw_task_converter_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,75 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: raw_task_converter.go + +// Package replication is a generated GoMock package. +package replication + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + repication "go.temporal.io/server/api/replication/v1" + tasks "go.temporal.io/server/service/history/tasks" +) + +// MockSourceTaskConverter is a mock of SourceTaskConverter interface. +type MockSourceTaskConverter struct { + ctrl *gomock.Controller + recorder *MockSourceTaskConverterMockRecorder +} + +// MockSourceTaskConverterMockRecorder is the mock recorder for MockSourceTaskConverter. +type MockSourceTaskConverterMockRecorder struct { + mock *MockSourceTaskConverter +} + +// NewMockSourceTaskConverter creates a new mock instance. +func NewMockSourceTaskConverter(ctrl *gomock.Controller) *MockSourceTaskConverter { + mock := &MockSourceTaskConverter{ctrl: ctrl} + mock.recorder = &MockSourceTaskConverterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSourceTaskConverter) EXPECT() *MockSourceTaskConverterMockRecorder { + return m.recorder +} + +// Convert mocks base method. +func (m *MockSourceTaskConverter) Convert(task tasks.Task) (*repication.ReplicationTask, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Convert", task) + ret0, _ := ret[0].(*repication.ReplicationTask) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Convert indicates an expected call of Convert. +func (mr *MockSourceTaskConverterMockRecorder) Convert(task interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Convert", reflect.TypeOf((*MockSourceTaskConverter)(nil).Convert), task) +} diff -Nru temporal-1.21.5-1/src/service/history/replication/raw_task_converter_test.go temporal-1.22.5/src/service/history/replication/raw_task_converter_test.go --- temporal-1.21.5-1/src/service/history/replication/raw_task_converter_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/raw_task_converter_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -306,7 +306,8 @@ RetryLastWorkerIdentity: activityLastWorkerIdentity, }, true).AnyTimes() s.mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: versionHistories, + BaseExecutionInfo: baseWorkflowInfo, + VersionHistories: versionHistories, }).AnyTimes() s.mutableState.EXPECT().GetBaseWorkflowInfo().Return(baseWorkflowInfo).AnyTimes() @@ -410,7 +411,8 @@ RetryLastWorkerIdentity: activityLastWorkerIdentity, }, true).AnyTimes() s.mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: versionHistories, + BaseExecutionInfo: baseWorkflowInfo, + VersionHistories: versionHistories, }).AnyTimes() s.mutableState.EXPECT().GetBaseWorkflowInfo().Return(baseWorkflowInfo).AnyTimes() @@ -565,7 +567,7 @@ ).Return(s.workflowContext, s.releaseFn, nil) s.workflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(nil, serviceerror.NewNotFound("")) - result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, s.executionManager, s.logger) + result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, nil, s.executionManager, s.logger) s.NoError(err) s.Nil(result) s.True(s.lockReleased) @@ -626,7 +628,8 @@ ).Return(s.workflowContext, s.releaseFn, nil) s.workflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mutableState, nil) s.mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: versionHistories, + BaseExecutionInfo: baseWorkflowInfo, + VersionHistories: versionHistories, }).AnyTimes() s.mutableState.EXPECT().GetBaseWorkflowInfo().Return(baseWorkflowInfo).AnyTimes() s.executionManager.EXPECT().ReadRawHistoryBranch(gomock.Any(), &persistence.ReadHistoryBranchRequest{ @@ -671,7 +674,8 @@ ).Return(s.newWorkflowContext, s.releaseFn, nil) s.newWorkflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.newMutableState, nil) s.newMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: newVersionHistories, + BaseExecutionInfo: baseWorkflowInfo, + VersionHistories: newVersionHistories, }).AnyTimes() s.newMutableState.EXPECT().GetBaseWorkflowInfo().Return(nil).AnyTimes() s.executionManager.EXPECT().ReadRawHistoryBranch(gomock.Any(), &persistence.ReadHistoryBranchRequest{ @@ -686,7 +690,7 @@ NextPageToken: nil, }, nil) - result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, s.executionManager, s.logger) + result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, nil, s.executionManager, s.logger) s.NoError(err) s.Equal(&replicationspb.ReplicationTask{ TaskType: enumsspb.REPLICATION_TASK_TYPE_HISTORY_V2_TASK, @@ -762,7 +766,8 @@ ).Return(s.workflowContext, s.releaseFn, nil) s.workflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(s.mutableState, nil) s.mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - VersionHistories: versionHistories, + BaseExecutionInfo: baseWorkflowInfo, + VersionHistories: versionHistories, }).AnyTimes() s.mutableState.EXPECT().GetBaseWorkflowInfo().Return(baseWorkflowInfo).AnyTimes() s.executionManager.EXPECT().ReadRawHistoryBranch(gomock.Any(), &persistence.ReadHistoryBranchRequest{ @@ -777,7 +782,7 @@ NextPageToken: nil, }, nil) - result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, s.executionManager, s.logger) + result, err := convertHistoryReplicationTask(ctx, task, shardID, s.workflowCache, nil, s.executionManager, s.logger) s.NoError(err) s.Equal(&replicationspb.ReplicationTask{ TaskType: enumsspb.REPLICATION_TASK_TYPE_HISTORY_V2_TASK, diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_receiver.go temporal-1.22.5/src/service/history/replication/stream_receiver.go --- temporal-1.21.5-1/src/service/history/replication/stream_receiver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_receiver.go 2024-02-23 09:45:43.000000000 +0000 @@ -44,9 +44,9 @@ type ( StreamReceiver interface { - common.Daemon IsValid() bool Key() ClusterShardKeyPair + Stop() } StreamReceiverImpl struct { ProcessToolBox diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_receiver_mock.go temporal-1.22.5/src/service/history/replication/stream_receiver_mock.go --- temporal-1.21.5-1/src/service/history/replication/stream_receiver_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_receiver_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -85,18 +85,6 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockStreamReceiver)(nil).Key)) } -// Start mocks base method. -func (m *MockStreamReceiver) Start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start") -} - -// Start indicates an expected call of Start. -func (mr *MockStreamReceiverMockRecorder) Start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockStreamReceiver)(nil).Start)) -} - // Stop mocks base method. func (m *MockStreamReceiver) Stop() { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_receiver_monitor.go temporal-1.22.5/src/service/history/replication/stream_receiver_monitor.go --- temporal-1.21.5-1/src/service/history/replication/stream_receiver_monitor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_receiver_monitor.go 2024-02-23 09:45:43.000000000 +0000 @@ -41,8 +41,9 @@ type ( StreamReceiverMonitor interface { - common.Daemon RegisterInboundStream(streamSender StreamSender) + Start() + Stop() } StreamReceiverMonitorImpl struct { ProcessToolBox diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_sender.go temporal-1.22.5/src/service/history/replication/stream_sender.go --- temporal-1.21.5-1/src/service/history/replication/stream_sender.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_sender.go 2024-02-23 09:45:43.000000000 +0000 @@ -40,36 +40,26 @@ replicationspb "go.temporal.io/server/api/replication/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/channel" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" ) type ( - SourceTaskConvertorImpl struct { - historyEngine shard.Engine - namespaceCache namespace.Registry - clientClusterShardCount int32 - clientClusterName string - clientShardKey ClusterShardKey - } - SourceTaskConvertor interface { - Convert(task tasks.Task) (*replicationspb.ReplicationTask, error) - } StreamSender interface { - common.Daemon IsValid() bool Key() ClusterShardKeyPair + Stop() } StreamSenderImpl struct { server historyservice.HistoryService_StreamWorkflowReplicationMessagesServer shardContext shard.Context historyEngine shard.Engine - taskConvertor SourceTaskConvertor + taskConverter SourceTaskConverter metrics metrics.Handler logger log.Logger @@ -84,7 +74,7 @@ server historyservice.HistoryService_StreamWorkflowReplicationMessagesServer, shardContext shard.Context, historyEngine shard.Engine, - taskConvertor SourceTaskConvertor, + taskConverter SourceTaskConverter, clientShardKey ClusterShardKey, serverShardKey ClusterShardKey, ) *StreamSenderImpl { @@ -92,7 +82,7 @@ server: server, shardContext: shardContext, historyEngine: historyEngine, - taskConvertor: taskConvertor, + taskConverter: taskConverter, metrics: shardContext.GetMetricsHandler(), logger: shardContext.GetLogger(), @@ -313,7 +303,8 @@ }) } - ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + ctx := headers.SetCallerInfo(context.Background(), headers.SystemPreemptableCallerInfo) + ctx, cancel := context.WithTimeout(ctx, replicationTimeout) defer cancel() iter, err := s.historyEngine.GetReplicationTasksIter( ctx, @@ -334,7 +325,7 @@ if err != nil { return err } - task, err := s.taskConvertor.Convert(item) + task, err := s.taskConverter.Convert(item) if err != nil { return err } @@ -369,53 +360,3 @@ }, }) } - -func NewSourceTaskConvertor( - historyEngine shard.Engine, - namespaceCache namespace.Registry, - clientClusterShardCount int32, - clientClusterName string, - clientShardKey ClusterShardKey, -) *SourceTaskConvertorImpl { - return &SourceTaskConvertorImpl{ - historyEngine: historyEngine, - namespaceCache: namespaceCache, - clientClusterShardCount: clientClusterShardCount, - clientClusterName: clientClusterName, - clientShardKey: clientShardKey, - } -} - -func (c *SourceTaskConvertorImpl) Convert( - task tasks.Task, -) (*replicationspb.ReplicationTask, error) { - if namespaceEntry, err := c.namespaceCache.GetNamespaceByID( - namespace.ID(task.GetNamespaceID()), - ); err == nil { - shouldProcessTask := false - FilterLoop: - for _, targetCluster := range namespaceEntry.ClusterNames() { - if c.clientClusterName == targetCluster { - shouldProcessTask = true - break FilterLoop - } - } - if !shouldProcessTask { - return nil, nil - } - } - // if there is error, then blindly send the task, better safe than sorry - - clientShardID := common.WorkflowIDToHistoryShard(task.GetNamespaceID(), task.GetWorkflowID(), c.clientClusterShardCount) - if clientShardID != c.clientShardKey.ShardID { - return nil, nil - } - - ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) - defer cancel() - replicationTask, err := c.historyEngine.ConvertReplicationTask(ctx, task) - if err != nil { - return nil, err - } - return replicationTask, nil -} diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_sender_mock.go temporal-1.22.5/src/service/history/replication/stream_sender_mock.go --- temporal-1.21.5-1/src/service/history/replication/stream_sender_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_sender_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -32,48 +32,8 @@ reflect "reflect" gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/server/api/replication/v1" - tasks "go.temporal.io/server/service/history/tasks" ) -// MockSourceTaskConvertor is a mock of SourceTaskConvertor interface. -type MockSourceTaskConvertor struct { - ctrl *gomock.Controller - recorder *MockSourceTaskConvertorMockRecorder -} - -// MockSourceTaskConvertorMockRecorder is the mock recorder for MockSourceTaskConvertor. -type MockSourceTaskConvertorMockRecorder struct { - mock *MockSourceTaskConvertor -} - -// NewMockSourceTaskConvertor creates a new mock instance. -func NewMockSourceTaskConvertor(ctrl *gomock.Controller) *MockSourceTaskConvertor { - mock := &MockSourceTaskConvertor{ctrl: ctrl} - mock.recorder = &MockSourceTaskConvertorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSourceTaskConvertor) EXPECT() *MockSourceTaskConvertorMockRecorder { - return m.recorder -} - -// Convert mocks base method. -func (m *MockSourceTaskConvertor) Convert(task tasks.Task) (*v1.ReplicationTask, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Convert", task) - ret0, _ := ret[0].(*v1.ReplicationTask) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Convert indicates an expected call of Convert. -func (mr *MockSourceTaskConvertorMockRecorder) Convert(task interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Convert", reflect.TypeOf((*MockSourceTaskConvertor)(nil).Convert), task) -} - // MockStreamSender is a mock of StreamSender interface. type MockStreamSender struct { ctrl *gomock.Controller @@ -125,18 +85,6 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockStreamSender)(nil).Key)) } -// Start mocks base method. -func (m *MockStreamSender) Start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start") -} - -// Start indicates an expected call of Start. -func (mr *MockStreamSenderMockRecorder) Start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockStreamSender)(nil).Start)) -} - // Stop mocks base method. func (m *MockStreamSender) Stop() { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/replication/stream_sender_test.go temporal-1.22.5/src/service/history/replication/stream_sender_test.go --- temporal-1.21.5-1/src/service/history/replication/stream_sender_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/stream_sender_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -58,7 +58,7 @@ server *historyservicemock.MockHistoryService_StreamWorkflowReplicationMessagesServer shardContext *shard.MockContext historyEngine *shard.MockEngine - taskConvertor *MockSourceTaskConvertor + taskConverter *MockSourceTaskConverter clientShardKey ClusterShardKey serverShardKey ClusterShardKey @@ -87,7 +87,7 @@ s.server = historyservicemock.NewMockHistoryService_StreamWorkflowReplicationMessagesServer(s.controller) s.shardContext = shard.NewMockContext(s.controller) s.historyEngine = shard.NewMockEngine(s.controller) - s.taskConvertor = NewMockSourceTaskConvertor(s.controller) + s.taskConverter = NewMockSourceTaskConverter(s.controller) s.clientShardKey = NewClusterShardKey(rand.Int31(), rand.Int31()) s.serverShardKey = NewClusterShardKey(rand.Int31(), rand.Int31()) @@ -99,7 +99,7 @@ s.server, s.shardContext, s.historyEngine, - s.taskConvertor, + s.taskConverter, s.clientShardKey, s.serverShardKey, ) @@ -375,9 +375,9 @@ beginInclusiveWatermark, endExclusiveWatermark, ).Return(iter, nil) - s.taskConvertor.EXPECT().Convert(item0).Return(task0, nil) - s.taskConvertor.EXPECT().Convert(item1).Return(nil, nil) - s.taskConvertor.EXPECT().Convert(item2).Return(task2, nil) + s.taskConverter.EXPECT().Convert(item0).Return(task0, nil) + s.taskConverter.EXPECT().Convert(item1).Return(nil, nil) + s.taskConverter.EXPECT().Convert(item2).Return(task2, nil) gomock.InOrder( s.server.EXPECT().Send(&historyservice.StreamWorkflowReplicationMessagesResponse{ Attributes: &historyservice.StreamWorkflowReplicationMessagesResponse_Messages{ diff -Nru temporal-1.21.5-1/src/service/history/replication/task_fetcher.go temporal-1.22.5/src/service/history/replication/task_fetcher.go --- temporal-1.21.5-1/src/service/history/replication/task_fetcher.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/task_fetcher.go 2024-02-23 09:45:43.000000000 +0000 @@ -56,18 +56,17 @@ type ( // TaskFetcherFactory is a group of fetchers, one per source DC. TaskFetcherFactory interface { - common.Daemon - GetOrCreateFetcher(clusterName string) taskFetcher + Start() + Stop() } // taskFetcher is responsible for fetching replication messages from remote DC. taskFetcher interface { - common.Daemon - getSourceCluster() string getRequestChan() chan<- *replicationTaskRequest getRateLimiter() quotas.RateLimiter + Stop() } // taskFetcherFactoryImpl is a group of fetchers, one per source DC. diff -Nru temporal-1.21.5-1/src/service/history/replication/task_fetcher_mock.go temporal-1.22.5/src/service/history/replication/task_fetcher_mock.go --- temporal-1.21.5-1/src/service/history/replication/task_fetcher_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/task_fetcher_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -119,18 +119,6 @@ return m.recorder } -// Start mocks base method. -func (m *MocktaskFetcher) Start() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start") -} - -// Start indicates an expected call of Start. -func (mr *MocktaskFetcherMockRecorder) Start() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MocktaskFetcher)(nil).Start)) -} - // Stop mocks base method. func (m *MocktaskFetcher) Stop() { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/replication/task_processor.go temporal-1.22.5/src/service/history/replication/task_processor.go --- temporal-1.21.5-1/src/service/history/replication/task_processor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/task_processor.go 2024-02-23 09:45:43.000000000 +0000 @@ -69,7 +69,8 @@ type ( // TaskProcessor is the interface for task processor TaskProcessor interface { - common.Daemon + Start() + Stop() } // taskProcessorImpl is responsible for processing replication tasks for a shard. diff -Nru temporal-1.21.5-1/src/service/history/replication/task_processor_manager.go temporal-1.22.5/src/service/history/replication/task_processor_manager.go --- temporal-1.21.5-1/src/service/history/replication/task_processor_manager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/task_processor_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -80,8 +80,6 @@ } ) -var _ common.Daemon = (*taskProcessorManagerImpl)(nil) - func NewTaskProcessorManager( config *configs.Config, shard shard.Context, @@ -264,12 +262,14 @@ func (r *taskProcessorManagerImpl) checkReplicationDLQEmptyLoop() { for { + timer := time.NewTimer(backoff.FullJitter(dlqSizeCheckInterval)) select { - case <-time.After(backoff.FullJitter(dlqSizeCheckInterval)): + case <-timer.C: if r.config.ReplicationEnableDLQMetrics() { r.checkReplicationDLQSize() } case <-r.shutdownChan: + timer.Stop() return } } @@ -367,7 +367,6 @@ } if !isEmpty { r.metricsHandler.Counter(metrics.ReplicationNonEmptyDLQCount.GetMetricName()).Record(1, metrics.OperationTag(metrics.ReplicationDLQStatsScope)) - r.logger.Info("Replication DLQ is not empty.", tag.ShardID(r.shard.GetShardID()), tag.AckLevel(minTaskKey)) break } } diff -Nru temporal-1.21.5-1/src/service/history/replication/task_processor_test.go temporal-1.22.5/src/service/history/replication/task_processor_test.go --- temporal-1.21.5-1/src/service/history/replication/task_processor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/replication/task_processor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -57,7 +57,6 @@ "go.temporal.io/server/common/resourcetest" "go.temporal.io/server/service/history/configs" "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" "go.temporal.io/server/service/history/tests" ) @@ -114,13 +113,6 @@ &persistencespb.ShardInfo{ ShardId: s.shardID, RangeId: 1, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryReplication.ID(): { - ClusterAckLevel: map[string]int64{ - cluster.TestAlternativeClusterName: persistence.EmptyQueueMessageID, - }, - }, - }, }, s.config, ) diff -Nru temporal-1.21.5-1/src/service/history/service.go temporal-1.22.5/src/service/history/service.go --- temporal-1.21.5-1/src/service/history/service.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/service.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,7 +27,6 @@ import ( "math/rand" "net" - "sync/atomic" "time" "google.golang.org/grpc" @@ -35,7 +34,6 @@ healthpb "google.golang.org/grpc/health/grpc_health_v1" "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/common" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" @@ -48,7 +46,6 @@ // Service represents the history service type ( Service struct { - status int32 handler *Handler visibilityManager manager.VisibilityManager config *configs.Config @@ -76,7 +73,6 @@ healthServer *health.Server, ) *Service { return &Service{ - status: common.DaemonStatusInitialized, server: grpc.NewServer(grpcServerOptions...), handler: handler, visibilityManager: visibilityMgr, @@ -92,12 +88,7 @@ // Start starts the service func (s *Service) Start() { - if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { - return - } - - logger := s.logger - logger.Info("history starting") + s.logger.Info("history starting") s.metricsHandler.Counter(metrics.RestartCount).Record(1) rand.Seed(time.Now().UnixNano()) @@ -108,26 +99,26 @@ healthpb.RegisterHealthServer(s.server, s.healthServer) s.healthServer.SetServingStatus(serviceName, healthpb.HealthCheckResponse_SERVING) - // As soon as we join membership, other hosts will send requests for shards - // that we own. Ideally, then, we would start the GRPC server, and only then - // join membership. That's not possible with the GRPC interface, though, hence - // we start membership in a goroutine. + go func() { + s.logger.Info("Starting to serve on history listener") + if err := s.server.Serve(s.grpcListener); err != nil { + s.logger.Fatal("Failed to serve on history listener", tag.Error(err)) + } + }() + + // As soon as we join membership, other hosts will send requests for shards that we own, + // so we should try to start this after starting the gRPC server. go func() { if delay := s.config.StartupMembershipJoinDelay(); delay > 0 { // In some situations, like rolling upgrades of the history service, // pausing before joining membership can help separate the shard movement // caused by another history instance terminating with this instance starting. - logger.Info("history start: delaying before membership start", + s.logger.Info("history start: delaying before membership start", tag.NewDurationTag("startupMembershipJoinDelay", delay)) time.Sleep(delay) } s.membershipMonitor.Start() }() - - logger.Info("Starting to serve on history listener") - if err := s.server.Serve(s.grpcListener); err != nil { - logger.Fatal("Failed to serve on history listener", tag.Error(err)) - } } // Stop stops the service diff -Nru temporal-1.21.5-1/src/service/history/shard/compatibility.go temporal-1.22.5/src/service/history/shard/compatibility.go --- temporal-1.21.5-1/src/service/history/shard/compatibility.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/compatibility.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,277 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package shard - -import ( - "fmt" - "math" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/service/history/tasks" -) - -func loadShardInfoCompatibilityCheck( - clusterMetadata cluster.Metadata, - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - // TODO this section maintains the forward / backward compatibility - // should be removed once the migration is done - // also see ShardInfoToBlob - - allClusterInfo := clusterMetadata.GetAllClusterInfo() - shardInfo = loadShardInfoCompatibilityCheckWithoutReplication(shardInfo) - shardInfo = loadShardInfoCompatibilityCheckWithReplication(clusterMetadata.GetCurrentClusterName(), allClusterInfo, shardInfo) - - // clear QueueAckLevels to force new logic to only use QueueStates - shardInfo.QueueAckLevels = nil - return shardInfo -} - -func loadShardInfoCompatibilityCheckWithoutReplication( - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - for queueCategoryID, queueAckLevel := range shardInfo.QueueAckLevels { - if queueCategoryID == tasks.CategoryIDReplication { - continue - } - - queueCategory, ok := tasks.GetCategoryByID(queueCategoryID) - if !ok { - panic(fmt.Sprintf("unable to find queue category by queye category ID: %v", queueCategoryID)) - } - minCursor := convertPersistenceAckLevelToTaskKey(queueCategory.Type(), queueAckLevel.AckLevel) - if queueCategory.Type() == tasks.CategoryTypeImmediate && minCursor.TaskID > 0 { - // for immediate task type, the ack level is inclusive - // for scheduled task type, the ack level is exclusive - minCursor = minCursor.Next() - } - - queueState, ok := shardInfo.QueueStates[queueCategoryID] - if !ok || minCursor.CompareTo(ConvertFromPersistenceTaskKey(queueState.ExclusiveReaderHighWatermark)) > 0 { - queueState = &persistencespb.QueueState{ - ExclusiveReaderHighWatermark: ConvertToPersistenceTaskKey(minCursor), - ReaderStates: make(map[int64]*persistencespb.QueueReaderState), - } - shardInfo.QueueStates[queueCategoryID] = queueState - } - } - return shardInfo -} - -func loadShardInfoCompatibilityCheckWithReplication( - currentClusterName string, - allClusterInfo map[string]cluster.ClusterInformation, - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - shardInfo = trimShardInfo(allClusterInfo, shardInfo) - if shardInfo.QueueAckLevels == nil { - return shardInfo - } - queueAckLevel, ok := shardInfo.QueueAckLevels[tasks.CategoryIDReplication] - if !ok { - return shardInfo - } - - for clusterName, ackLevel := range queueAckLevel.ClusterAckLevel { - minCursor := convertPersistenceAckLevelToTaskKey(tasks.CategoryReplication.Type(), ackLevel).Next() - queueStates, ok := shardInfo.QueueStates[tasks.CategoryIDReplication] - if !ok { - queueStates = &persistencespb.QueueState{ - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - } - shardInfo.QueueStates[tasks.CategoryIDReplication] = queueStates - } - - for _, shardID := range common.MapShardID( - allClusterInfo[currentClusterName].ShardCount, - allClusterInfo[clusterName].ShardCount, - shardInfo.ShardId, - ) { - readerID := ReplicationReaderIDFromClusterShardID( - allClusterInfo[clusterName].InitialFailoverVersion, - shardID, - ) - readerState, ok := queueStates.ReaderStates[readerID] - if !ok || minCursor.CompareTo(ConvertFromPersistenceTaskKey(readerState.Scopes[0].Range.InclusiveMin)) > 0 { - queueStates.ReaderStates[readerID] = &persistencespb.QueueReaderState{ - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: ConvertToPersistenceTaskKey(minCursor), - ExclusiveMax: ConvertToPersistenceTaskKey( - convertPersistenceAckLevelToTaskKey( - tasks.CategoryReplication.Type(), - math.MaxInt64, - ), - ), - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - } - } - } - } - return shardInfo -} - -func storeShardInfoCompatibilityCheck( - clusterMetadata cluster.Metadata, - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - // TODO this section maintains the forward / backward compatibility - // should be removed once the migration is done - // also see ShardInfoFromBlob - allClusterInfo := clusterMetadata.GetAllClusterInfo() - shardInfo = storeShardInfoCompatibilityCheckWithoutReplication(shardInfo) - shardInfo = storeShardInfoCompatibilityCheckWithReplication(allClusterInfo, shardInfo) - return shardInfo -} - -func storeShardInfoCompatibilityCheckWithoutReplication( - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - for queueCategoryID, queueState := range shardInfo.QueueStates { - if queueCategoryID == tasks.CategoryIDReplication { - continue - } - - queueCategory, ok := tasks.GetCategoryByID(queueCategoryID) - if !ok { - panic(fmt.Sprintf("unable to find queue category by queye category ID: %v", queueCategoryID)) - } - - // for compatability, update ack level and cluster ack level as well - // so after rollback or disabling the feature, we won't load too many tombstones - minAckLevel := ConvertFromPersistenceTaskKey(queueState.ExclusiveReaderHighWatermark) - for _, readerState := range queueState.ReaderStates { - if len(readerState.Scopes) != 0 { - minAckLevel = tasks.MinKey( - minAckLevel, - ConvertFromPersistenceTaskKey(readerState.Scopes[0].Range.InclusiveMin), - ) - } - } - - if queueCategory.Type() == tasks.CategoryTypeImmediate && minAckLevel.TaskID > 0 { - // for immediate task type, the ack level is inclusive - // for scheduled task type, the ack level is exclusive - minAckLevel = minAckLevel.Prev() - } - persistenceAckLevel := convertTaskKeyToPersistenceAckLevel(queueCategory.Type(), minAckLevel) - - shardInfo.QueueAckLevels[queueCategoryID] = &persistencespb.QueueAckLevel{ - AckLevel: persistenceAckLevel, - ClusterAckLevel: make(map[string]int64), - } - } - return shardInfo -} - -func storeShardInfoCompatibilityCheckWithReplication( - allClusterInfo map[string]cluster.ClusterInformation, - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - shardInfo = trimShardInfo(allClusterInfo, shardInfo) - if shardInfo.QueueStates == nil { - return shardInfo - } - queueStates, ok := shardInfo.QueueStates[tasks.CategoryIDReplication] - if !ok { - return shardInfo - } - - for readerID, readerState := range queueStates.ReaderStates { - clusterID, _ := ReplicationReaderIDToClusterShardID(readerID) - clusterName, _, _ := ClusterNameInfoFromClusterID(allClusterInfo, clusterID) - queueAckLevel, ok := shardInfo.QueueAckLevels[tasks.CategoryIDReplication] - if !ok { - queueAckLevel = &persistencespb.QueueAckLevel{ - AckLevel: 0, - ClusterAckLevel: make(map[string]int64), - } - shardInfo.QueueAckLevels[tasks.CategoryIDReplication] = queueAckLevel - } - readerAckLevel := convertTaskKeyToPersistenceAckLevel( - tasks.CategoryReplication.Type(), - ConvertFromPersistenceTaskKey(readerState.Scopes[0].Range.InclusiveMin).Prev(), - ) - if ackLevel, ok := queueAckLevel.ClusterAckLevel[clusterName]; !ok { - queueAckLevel.ClusterAckLevel[clusterName] = readerAckLevel - } else if ackLevel > readerAckLevel { - queueAckLevel.ClusterAckLevel[clusterName] = readerAckLevel - } - } - return shardInfo -} - -func trimShardInfo( - allClusterInfo map[string]cluster.ClusterInformation, - shardInfo *persistencespb.ShardInfo, -) *persistencespb.ShardInfo { - // clean up replication info if cluster is disabled || missing - if shardInfo.QueueAckLevels != nil && shardInfo.QueueAckLevels[tasks.CategoryIDReplication] != nil { - for clusterName := range shardInfo.QueueAckLevels[tasks.CategoryIDReplication].ClusterAckLevel { - clusterInfo, ok := allClusterInfo[clusterName] - if !ok || !clusterInfo.Enabled { - delete(shardInfo.QueueAckLevels[tasks.CategoryIDReplication].ClusterAckLevel, clusterName) - } - } - if len(shardInfo.QueueAckLevels[tasks.CategoryIDReplication].ClusterAckLevel) == 0 { - delete(shardInfo.QueueAckLevels, tasks.CategoryIDReplication) - } - } - - if shardInfo.QueueStates != nil && shardInfo.QueueStates[tasks.CategoryIDReplication] != nil { - for readerID := range shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates { - clusterID, _ := ReplicationReaderIDToClusterShardID(readerID) - _, clusterInfo, found := ClusterNameInfoFromClusterID(allClusterInfo, clusterID) - if !found || !clusterInfo.Enabled { - delete(shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates, readerID) - } - } - if len(shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates) == 0 { - delete(shardInfo.QueueStates, tasks.CategoryIDReplication) - } - } - return shardInfo -} - -func ClusterNameInfoFromClusterID( - allClusterInfo map[string]cluster.ClusterInformation, - clusterID int64, -) (string, cluster.ClusterInformation, bool) { - for name, info := range allClusterInfo { - if info.InitialFailoverVersion == clusterID { - return name, info, true - } - } - return "", cluster.ClusterInformation{}, false -} diff -Nru temporal-1.21.5-1/src/service/history/shard/compatibility_test.go temporal-1.22.5/src/service/history/shard/compatibility_test.go --- temporal-1.21.5-1/src/service/history/shard/compatibility_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/compatibility_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,696 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package shard - -import ( - "math" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - enumspb "go.temporal.io/api/enums/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/persistence/serialization" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/util" - "go.temporal.io/server/service/history/tasks" -) - -type ( - compatibilitySuite struct { - suite.Suite - *require.Assertions - } -) - -func TestCompatibilitySuite(t *testing.T) { - s := &compatibilitySuite{} - suite.Run(t, s) -} - -func (s *compatibilitySuite) SetupTest() { - s.Assertions = require.New(s.T()) -} - -func (s *compatibilitySuite) TeardownTest() { - s.Assertions = require.New(s.T()) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithoutReplication_OnlyQueueAckLevel() { - transferAckTaskID := rand.Int63() - timerAckTime := rand.Int63() - persistenceShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: transferAckTaskID, - ClusterAckLevel: map[string]int64{}, - }, - tasks.CategoryIDTimer: { - AckLevel: timerAckTime, - ClusterAckLevel: map[string]int64{}, - }, - }, - QueueStates: make(map[int32]*persistencespb.QueueState), - } - - expectedMemShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: transferAckTaskID + 1, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, timerAckTime)), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - }, - } - actualMemShardInfo := loadShardInfoCompatibilityCheckWithoutReplication(copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithoutReplication_OnlyQueueState() { - transferAckTaskID := rand.Int63() - timerAckTime := rand.Int63() - - persistenceShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: transferAckTaskID + 1, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, timerAckTime)), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(persistenceShardInfo) - actualMemShardInfo := loadShardInfoCompatibilityCheckWithoutReplication(copyShardInfo(persistenceShardInfo)) - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithoutReplication_Both() { - ackLevelTransferAckTaskID := rand.Int63() - ackLevelTimerAckTime := rand.Int63() - queueStateTransferAckTaskID := rand.Int63() - queueStateTimerAckTime := rand.Int63() - - persistenceShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: ackLevelTransferAckTaskID, - ClusterAckLevel: map[string]int64{}, - }, - tasks.CategoryIDTimer: { - AckLevel: ackLevelTimerAckTime, - ClusterAckLevel: map[string]int64{}, - }, - }, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: queueStateTransferAckTaskID + 1, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, queueStateTimerAckTime)), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - }, - } - - expectedMemShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: util.Max(ackLevelTransferAckTaskID, queueStateTransferAckTaskID) + 1, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, util.Max(ackLevelTimerAckTime, queueStateTimerAckTime))), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - }, - } - actualMemShardInfo := loadShardInfoCompatibilityCheckWithoutReplication(copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestStoreShardInfoCompatibilityCheckWithoutReplication_NoOverride() { - transferAckTaskID := rand.Int63() - timerAckTime := rand.Int63() - - memShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: transferAckTaskID + 1, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, timerAckTime)), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{}, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(memShardInfo) - expectedMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: transferAckTaskID, - ClusterAckLevel: map[string]int64{}, - }, - tasks.CategoryIDTimer: { - AckLevel: timerAckTime, - ClusterAckLevel: map[string]int64{}, - }, - } - actualMemShardInfo := storeShardInfoCompatibilityCheckWithoutReplication(copyShardInfo(memShardInfo)) - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestStoreShardInfoCompatibilityCheckWithoutReplication_Override() { - transferAckTaskID := rand.Int63() - timerAckTime := rand.Int63() - - memShardInfo := &persistencespb.ShardInfo{ - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDTransfer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: transferAckTaskID + 1 + rand.Int63n(100), - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - 0: { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: transferAckTaskID + 1, - }, - ExclusiveMax: nil, // not used - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - tasks.CategoryIDTimer: { - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, timerAckTime+rand.Int63n(100))), - TaskId: 0, - }, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - 0: { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, timerAckTime)), - TaskId: 0, - }, - ExclusiveMax: nil, // not used - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(memShardInfo) - expectedMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDTransfer: { - AckLevel: transferAckTaskID, - ClusterAckLevel: map[string]int64{}, - }, - tasks.CategoryIDTimer: { - AckLevel: timerAckTime, - ClusterAckLevel: map[string]int64{}, - }, - } - actualMemShardInfo := storeShardInfoCompatibilityCheckWithoutReplication(copyShardInfo(memShardInfo)) - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithReplication_OnlyQueueAckLevel_8_4() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31n(allClusterInfo[cluster.TestCurrentClusterName].ShardCount) + 1 - replicationAckTaskID := rand.Int63() - persistenceShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDReplication: { - AckLevel: 0, - ClusterAckLevel: map[string]int64{ - cluster.TestAlternativeClusterName: replicationAckTaskID, - }, - }, - }, - QueueStates: make(map[int32]*persistencespb.QueueState), - } - - expectedMemShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - shardID, - )[0]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - actualMemShardInfo := loadShardInfoCompatibilityCheckWithReplication(cluster.TestCurrentClusterName, allClusterInfo, copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithReplication_OnlyQueueAckLevel_4_8() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31n(allClusterInfo[cluster.TestAlternativeClusterName].ShardCount) + 1 - replicationAckTaskID := rand.Int63() - persistenceShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDReplication: { - AckLevel: 0, - ClusterAckLevel: map[string]int64{ - cluster.TestCurrentClusterName: replicationAckTaskID, - }, - }, - }, - QueueStates: make(map[int32]*persistencespb.QueueState), - } - - expectedMemShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestCurrentClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - shardID, - )[0]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - ReplicationReaderIDFromClusterShardID(cluster.TestCurrentClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - shardID, - )[1]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - actualMemShardInfo := loadShardInfoCompatibilityCheckWithReplication(cluster.TestAlternativeClusterName, allClusterInfo, copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithReplication_OnlyQueueState() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31n(allClusterInfo[cluster.TestCurrentClusterName].ShardCount) + 1 - replicationAckTaskID := rand.Int63() - persistenceShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - shardID, - )[0]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(persistenceShardInfo) - actualMemShardInfo := loadShardInfoCompatibilityCheckWithReplication(cluster.TestCurrentClusterName, allClusterInfo, copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestLoadShardInfoCompatibilityCheckWithReplication_Both() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31n(allClusterInfo[cluster.TestCurrentClusterName].ShardCount) + 1 - ackLevelReplicationAckTaskID := rand.Int63() - queueStateReplicationAckTaskID := rand.Int63() - persistenceShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDReplication: { - AckLevel: 0, - ClusterAckLevel: map[string]int64{ - cluster.TestAlternativeClusterName: ackLevelReplicationAckTaskID, - }, - }, - }, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - shardID, - )[0]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: queueStateReplicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - - expectedMemShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, common.MapShardID( - allClusterInfo[cluster.TestCurrentClusterName].ShardCount, - allClusterInfo[cluster.TestAlternativeClusterName].ShardCount, - shardID, - )[0]): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: util.Max(ackLevelReplicationAckTaskID, queueStateReplicationAckTaskID) + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - actualMemShardInfo := loadShardInfoCompatibilityCheckWithReplication(cluster.TestCurrentClusterName, allClusterInfo, copyShardInfo(persistenceShardInfo)) - actualMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{} - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestStoreShardInfoCompatibilityCheckWithReplication_NoOverride() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31() - replicationAckTaskID := rand.Int63() - memShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, shardID): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(memShardInfo) - expectedMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDReplication: { - AckLevel: 0, - ClusterAckLevel: map[string]int64{ - cluster.TestAlternativeClusterName: replicationAckTaskID, - }, - }, - } - actualMemShardInfo := storeShardInfoCompatibilityCheckWithReplication(allClusterInfo, copyShardInfo(memShardInfo)) - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) TestStoreShardInfoCompatibilityCheckWithReplication_Override() { - allClusterInfo := cluster.TestAllClusterInfo - shardID := rand.Int31() - replicationAckTaskID := rand.Int63() - memShardInfo := &persistencespb.ShardInfo{ - ShardId: shardID, - QueueAckLevels: map[int32]*persistencespb.QueueAckLevel{}, - QueueStates: map[int32]*persistencespb.QueueState{ - tasks.CategoryIDReplication: { - ExclusiveReaderHighWatermark: nil, - ReaderStates: map[int64]*persistencespb.QueueReaderState{ - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, shardID): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1, - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - ReplicationReaderIDFromClusterShardID(cluster.TestAlternativeClusterInitialFailoverVersion, shardID+1): { - Scopes: []*persistencespb.QueueSliceScope{{ - Range: &persistencespb.QueueSliceRange{ - InclusiveMin: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: replicationAckTaskID + 1 + rand.Int63n(100), - }, - ExclusiveMax: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(time.Unix(0, 0)), - TaskId: math.MaxInt64, - }, - }, - Predicate: &persistencespb.Predicate{ - PredicateType: enumsspb.PREDICATE_TYPE_UNIVERSAL, - Attributes: &persistencespb.Predicate_UniversalPredicateAttributes{}, - }, - }}, - }, - }, - }, - }, - } - - expectedMemShardInfo := copyShardInfo(memShardInfo) - expectedMemShardInfo.QueueAckLevels = map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryIDReplication: { - AckLevel: 0, - ClusterAckLevel: map[string]int64{ - cluster.TestAlternativeClusterName: replicationAckTaskID, - }, - }, - } - actualMemShardInfo := storeShardInfoCompatibilityCheckWithReplication(allClusterInfo, copyShardInfo(memShardInfo)) - s.EqualShardInfo(expectedMemShardInfo, actualMemShardInfo) -} - -func (s *compatibilitySuite) EqualShardInfo( - expected *persistencespb.ShardInfo, - actual *persistencespb.ShardInfo, -) { - // this helper function exists to deal with time comparison issue - - serializer := serialization.NewSerializer() - expectedBlob, err := serializer.ShardInfoToBlob(expected, enumspb.ENCODING_TYPE_PROTO3) - s.NoError(err) - expected, err = serializer.ShardInfoFromBlob(expectedBlob) - s.NoError(err) - - actualBlob, err := serializer.ShardInfoToBlob(actual, enumspb.ENCODING_TYPE_PROTO3) - s.NoError(err) - actual, err = serializer.ShardInfoFromBlob(actualBlob) - s.NoError(err) - - s.Equal(expected, actual) -} diff -Nru temporal-1.21.5-1/src/service/history/shard/context.go temporal-1.22.5/src/service/history/shard/context.go --- temporal-1.21.5-1/src/service/history/shard/context.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context.go 2024-02-23 09:45:43.000000000 +0000 @@ -57,7 +57,6 @@ Context interface { GetShardID() int32 GetRangeID() int64 - IsValid() bool GetOwner() string GetExecutionManager() persistence.ExecutionManager GetNamespaceRegistry() namespace.Registry @@ -126,8 +125,9 @@ // the Controller. ControllableContext interface { Context - common.Pingable + + IsValid() bool FinishStop() } ) diff -Nru temporal-1.21.5-1/src/service/history/shard/context_factory.go temporal-1.22.5/src/service/history/shard/context_factory.go --- temporal-1.21.5-1/src/service/history/shard/context_factory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,7 +27,6 @@ import ( "go.uber.org/fx" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/client" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/clock" @@ -38,6 +37,7 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/resource" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/service/history/configs" ) @@ -59,7 +59,7 @@ ClusterMetadata cluster.Metadata Config *configs.Config EngineFactory EngineFactory - HistoryClient historyservice.HistoryServiceClient + HistoryClient resource.HistoryClient HistoryServiceResolver membership.ServiceResolver HostInfoProvider membership.HostInfoProvider Logger log.Logger diff -Nru temporal-1.21.5-1/src/service/history/shard/context_impl.go temporal-1.22.5/src/service/history/shard/context_impl.go --- temporal-1.21.5-1/src/service/history/shard/context_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context_impl.go 2024-02-23 09:45:43.000000000 +0000 @@ -418,7 +418,7 @@ ackTimestamp time.Time, ) error { clusterID, shardID := ReplicationReaderIDToClusterShardID(readerID) - clusterName, _, ok := ClusterNameInfoFromClusterID(s.clusterMetadata.GetAllClusterInfo(), clusterID) + clusterName, _, ok := clusterNameInfoFromClusterID(s.clusterMetadata.GetAllClusterInfo(), clusterID) if !ok { // cluster is not present in cluster metadata map return serviceerror.NewInternal(fmt.Sprintf("unknown cluster ID: %v", clusterID)) @@ -1120,7 +1120,7 @@ } func (s *ContextImpl) renewRangeLocked(isStealing bool) error { - updatedShardInfo := storeShardInfoCompatibilityCheck(s.clusterMetadata, copyShardInfo(s.shardInfo)) + updatedShardInfo := trimShardInfo(s.clusterMetadata.GetAllClusterInfo(), copyShardInfo(s.shardInfo)) updatedShardInfo.RangeId++ if isStealing { updatedShardInfo.StolenSinceRenew++ @@ -1154,7 +1154,7 @@ s.taskSequenceNumber = updatedShardInfo.GetRangeId() << s.config.RangeSizeBits s.maxTaskSequenceNumber = (updatedShardInfo.GetRangeId() + 1) << s.config.RangeSizeBits s.immediateTaskExclusiveMaxReadLevel = s.taskSequenceNumber - s.shardInfo = loadShardInfoCompatibilityCheck(s.clusterMetadata, copyShardInfo(updatedShardInfo)) + s.shardInfo = trimShardInfo(s.clusterMetadata.GetAllClusterInfo(), copyShardInfo(updatedShardInfo)) return nil } @@ -1176,7 +1176,7 @@ if s.lastUpdated.Add(s.config.ShardUpdateMinInterval()).After(now) { return nil } - updatedShardInfo := storeShardInfoCompatibilityCheck(s.clusterMetadata, copyShardInfo(s.shardInfo)) + updatedShardInfo := trimShardInfo(s.clusterMetadata.GetAllClusterInfo(), copyShardInfo(s.shardInfo)) // since linter is against any logging control ¯\_(ツ)_/¯, e.g. // "flag-parameter: parameter 'verboseLogging' seems to be a control flag, avoid control coupling (revive)" var logger log.Logger = log.NewNoopLogger() @@ -1667,7 +1667,7 @@ return err } *ownershipChanged = resp.ShardInfo.Owner != s.owner - shardInfo := loadShardInfoCompatibilityCheck(s.clusterMetadata, copyShardInfo(resp.ShardInfo)) + shardInfo := trimShardInfo(s.clusterMetadata.GetAllClusterInfo(), copyShardInfo(resp.ShardInfo)) shardInfo.Owner = s.owner // initialize the cluster current time to be the same as ack level @@ -1957,8 +1957,7 @@ } shardContext.eventsCache = events.NewEventsCache( shardContext.GetShardID(), - shardContext.GetConfig().EventsCacheInitialSize(), - shardContext.GetConfig().EventsCacheMaxSize(), + shardContext.GetConfig().EventsCacheMaxSizeBytes(), shardContext.GetConfig().EventsCacheTTL(), shardContext.GetExecutionManager(), false, @@ -1971,13 +1970,6 @@ // TODO: why do we need a deep copy here? func copyShardInfo(shardInfo *persistencespb.ShardInfo) *persistencespb.ShardInfo { - queueAckLevels := make(map[int32]*persistencespb.QueueAckLevel) - for category, ackLevels := range shardInfo.QueueAckLevels { - queueAckLevels[category] = &persistencespb.QueueAckLevel{ - AckLevel: ackLevels.AckLevel, - ClusterAckLevel: maps.Clone(ackLevels.ClusterAckLevel), - } - } // need to ser/de to make a deep copy of queue state queueStates := make(map[int32]*persistencespb.QueueState, len(shardInfo.QueueStates)) for k, v := range shardInfo.QueueStates { @@ -1993,7 +1985,6 @@ StolenSinceRenew: shardInfo.StolenSinceRenew, ReplicationDlqAckLevel: maps.Clone(shardInfo.ReplicationDlqAckLevel), UpdateTime: shardInfo.UpdateTime, - QueueAckLevels: queueAckLevels, QueueStates: queueStates, } } @@ -2100,3 +2091,34 @@ return true } } + +func trimShardInfo( + allClusterInfo map[string]cluster.ClusterInformation, + shardInfo *persistencespb.ShardInfo, +) *persistencespb.ShardInfo { + if shardInfo.QueueStates != nil && shardInfo.QueueStates[tasks.CategoryIDReplication] != nil { + for readerID := range shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates { + clusterID, _ := ReplicationReaderIDToClusterShardID(readerID) + _, clusterInfo, found := clusterNameInfoFromClusterID(allClusterInfo, clusterID) + if !found || !clusterInfo.Enabled { + delete(shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates, readerID) + } + } + if len(shardInfo.QueueStates[tasks.CategoryIDReplication].ReaderStates) == 0 { + delete(shardInfo.QueueStates, tasks.CategoryIDReplication) + } + } + return shardInfo +} + +func clusterNameInfoFromClusterID( + allClusterInfo map[string]cluster.ClusterInformation, + clusterID int64, +) (string, cluster.ClusterInformation, bool) { + for name, info := range allClusterInfo { + if info.InitialFailoverVersion == clusterID { + return name, info, true + } + } + return "", cluster.ClusterInformation{}, false +} diff -Nru temporal-1.21.5-1/src/service/history/shard/context_mock.go temporal-1.22.5/src/service/history/shard/context_mock.go --- temporal-1.21.5-1/src/service/history/shard/context_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -594,20 +594,6 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecution", reflect.TypeOf((*MockContext)(nil).GetWorkflowExecution), ctx, request) } -// IsValid mocks base method. -func (m *MockContext) IsValid() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsValid") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsValid indicates an expected call of IsValid. -func (mr *MockContextMockRecorder) IsValid() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsValid", reflect.TypeOf((*MockContext)(nil).IsValid)) -} - // NewVectorClock mocks base method. func (m *MockContext) NewVectorClock() (*v11.VectorClock, error) { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/shard/context_test.go temporal-1.22.5/src/service/history/shard/context_test.go --- temporal-1.21.5-1/src/service/history/shard/context_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -415,9 +415,8 @@ s.mockShard.state = contextStateAcquiring s.mockShard.acquireShardRetryPolicy = backoff.NewExponentialRetryPolicy(time.Nanosecond). WithMaximumAttempts(5) - // TODO: make this 5 times instead of 6 when retry policy is fixed s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()). - Return(fmt.Errorf("temp error")).Times(6) + Return(fmt.Errorf("temp error")).Times(5) s.mockShard.acquireShard() diff -Nru temporal-1.21.5-1/src/service/history/shard/context_testutil.go temporal-1.22.5/src/service/history/shard/context_testutil.go --- temporal-1.21.5-1/src/service/history/shard/context_testutil.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/context_testutil.go 2024-02-23 09:45:43.000000000 +0000 @@ -72,13 +72,9 @@ eventsCache := events.NewMockCache(ctrl) hostInfoProvider := resourceTest.GetHostInfoProvider() lifecycleCtx, lifecycleCancel := context.WithCancel(context.Background()) - if shardInfo.QueueAckLevels == nil { - shardInfo.QueueAckLevels = make(map[int32]*persistencespb.QueueAckLevel) - } if shardInfo.QueueStates == nil { shardInfo.QueueStates = make(map[int32]*persistencespb.QueueState) } - shardInfo = loadShardInfoCompatibilityCheckWithoutReplication(shardInfo) shard := &ContextImpl{ shardID: shardInfo.GetShardId(), owner: shardInfo.GetOwner(), diff -Nru temporal-1.21.5-1/src/service/history/shard/controller.go temporal-1.22.5/src/service/history/shard/controller.go --- temporal-1.21.5-1/src/service/history/shard/controller.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/controller.go 2024-02-23 09:45:43.000000000 +0000 @@ -33,12 +33,13 @@ type ( Controller interface { - common.Daemon common.Pingable GetShardByID(shardID int32) (Context, error) GetShardByNamespaceWorkflow(namespaceID namespace.ID, workflowID string) (Context, error) CloseShardByID(shardID int32) ShardIDs() []int32 + Start() + Stop() } ) diff -Nru temporal-1.21.5-1/src/service/history/shard/controller_impl.go temporal-1.22.5/src/service/history/shard/controller_impl.go --- temporal-1.21.5-1/src/service/history/shard/controller_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/controller_impl.go 2024-02-23 09:45:43.000000000 +0000 @@ -50,7 +50,7 @@ ) const ( - shardLingerMaxTimeLimit = 5 * time.Second + shardLingerMaxTimeLimit = 1 * time.Minute ) var ( @@ -75,6 +75,14 @@ ownership *ownership status int32 taggedMetricsHandler metrics.Handler + // shardCountSubscriptions is a set of subscriptions that receive shard count updates whenever the set of + // shards that this controller owns changes. + shardCountSubscriptions map[*shardCountSubscription]struct{} + } + // shardCountSubscription is a subscription to shard count updates. + shardCountSubscription struct { + controller *ControllerImpl + ch chan int } ) @@ -87,7 +95,7 @@ metricsHandler metrics.Handler, hostInfoProvider membership.HostInfoProvider, contextFactory ContextFactory, -) Controller { +) *ControllerImpl { hostIdentity := hostInfoProvider.HostInfo().Identity() contextTaggedLogger := log.With(logger, tag.ComponentShardController, tag.Address(hostIdentity)) taggedMetricsHandler := metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryShardControllerScope)) @@ -101,13 +109,14 @@ ) c := &ControllerImpl{ - config: config, - contextFactory: contextFactory, - contextTaggedLogger: contextTaggedLogger, - historyShards: make(map[int32]ControllableContext), - hostInfoProvider: hostInfoProvider, - ownership: ownership, - taggedMetricsHandler: taggedMetricsHandler, + config: config, + contextFactory: contextFactory, + contextTaggedLogger: contextTaggedLogger, + historyShards: make(map[int32]ControllableContext), + hostInfoProvider: hostInfoProvider, + ownership: ownership, + taggedMetricsHandler: taggedMetricsHandler, + shardCountSubscriptions: map[*shardCountSubscription]struct{}{}, } c.lingerState.shards = make(map[ControllableContext]struct{}) return c @@ -231,7 +240,7 @@ // getOrCreateShardContext returns a shard context for the given shard ID, creating a new one // if necessary. If a shard context is created, it will initialize in the background. // This function won't block on rangeid lease acquisition. -func (c *ControllerImpl) getOrCreateShardContext(shardID int32) (Context, error) { +func (c *ControllerImpl) getOrCreateShardContext(shardID int32) (ControllableContext, error) { if err := c.validateShardId(shardID); err != nil { return nil, err } @@ -446,6 +455,20 @@ c.RUnlock() c.taggedMetricsHandler.Gauge(metrics.NumShardsGauge.GetMetricName()).Record(float64(numOfOwnedShards)) + c.publishShardCountUpdate(numOfOwnedShards) +} + +// publishShardCountUpdate publishes the current number of shards that this controller owns to all shard count +// subscribers in a non-blocking manner. +func (c *ControllerImpl) publishShardCountUpdate(shardCount int) { + c.RLock() + defer c.RUnlock() + for sub := range c.shardCountSubscriptions { + select { + case sub.ch <- shardCount: + default: + } + } } func (c *ControllerImpl) doShutdown() { @@ -468,6 +491,35 @@ return nil } +// SubscribeShardCount returns a subscription to shard count updates with a 1-buffered channel. This method is thread-safe. +func (c *ControllerImpl) SubscribeShardCount() ShardCountSubscription { + c.Lock() + defer c.Unlock() + sub := &shardCountSubscription{ + controller: c, + ch: make(chan int, 1), // buffered because we do a non-blocking send + } + c.shardCountSubscriptions[sub] = struct{}{} + return sub +} + +// ShardCount returns a channel that receives the current shard count. This channel will be closed when the subscription +// is canceled. +func (s *shardCountSubscription) ShardCount() <-chan int { + return s.ch +} + +// Unsubscribe removes the subscription from the controller's list of subscriptions. +func (s *shardCountSubscription) Unsubscribe() { + s.controller.Lock() + defer s.controller.Unlock() + if _, ok := s.controller.shardCountSubscriptions[s]; !ok { + return + } + delete(s.controller.shardCountSubscriptions, s) + close(s.ch) +} + func IsShardOwnershipLostError(err error) bool { switch err.(type) { case *persistence.ShardOwnershipLostError: diff -Nru temporal-1.21.5-1/src/service/history/shard/controller_test.go temporal-1.22.5/src/service/history/shard/controller_test.go --- temporal-1.21.5-1/src/service/history/shard/controller_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/controller_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -119,7 +119,7 @@ metricsTestHandler, resource.GetHostInfoProvider(), contextFactory, - ).(*ControllerImpl) + ) } func TestShardControllerSuite(t *testing.T) { @@ -148,6 +148,10 @@ s.NoError(err) s.metricsTestHandler = metricsTestHandler + // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() + s.shardController = NewTestController( s.mockEngineFactory, s.config, @@ -180,9 +184,6 @@ } } - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) count := 0 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) @@ -221,9 +222,6 @@ } } - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) count := 0 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -265,9 +263,6 @@ s.setupMocksForAcquireShard(shardID, mockEngine, 5, 6, true) } - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) for shardID := int32(1); shardID <= numShards; shardID++ { @@ -298,9 +293,6 @@ s.setupMocksForAcquireShard(shardID, mockEngine, 5, 6, true) } - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) for shardID := int32(1); shardID <= numShards; shardID++ { @@ -342,9 +334,6 @@ s.mockServiceResolver.EXPECT().AddListener(shardControllerMembershipUpdateListenerName, gomock.Any()).Return(nil).AnyTimes() - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.Start() s.shardController.acquireShards(context.Background()) @@ -444,9 +433,6 @@ } s.mockServiceResolver.EXPECT().AddListener(shardControllerMembershipUpdateListenerName, gomock.Any()).Return(nil).AnyTimes() - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.Start() s.shardController.acquireShards(context.Background()) @@ -484,8 +470,6 @@ func (s *controllerSuite) TestShardExplicitUnload() { s.config.NumberOfShards = 1 - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() mockEngine := NewMockEngine(s.controller) mockEngine.EXPECT().Stop().AnyTimes() s.setupMocksForAcquireShard(1, mockEngine, 5, 6, false) @@ -507,8 +491,6 @@ func (s *controllerSuite) TestShardExplicitUnloadCancelGetOrCreate() { s.config.NumberOfShards = 1 - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() mockEngine := NewMockEngine(s.controller) mockEngine.EXPECT().Stop().AnyTimes() @@ -521,8 +503,10 @@ s.mockShardManager.EXPECT().GetOrCreateShard(gomock.Any(), getOrCreateShardRequestMatcher(shardID)).DoAndReturn( func(ctx context.Context, req *persistence.GetOrCreateShardRequest) (*persistence.GetOrCreateShardResponse, error) { ready <- struct{}{} + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() select { - case <-time.After(5 * time.Second): + case <-timer.C: wasCanceled <- false return nil, errors.New("timed out") case <-ctx.Done(): @@ -548,8 +532,6 @@ func (s *controllerSuite) TestShardExplicitUnloadCancelAcquire() { s.config.NumberOfShards = 1 - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() mockEngine := NewMockEngine(s.controller) mockEngine.EXPECT().Stop().AnyTimes() @@ -563,7 +545,6 @@ Owner: s.hostInfo.Identity(), RangeId: 5, ReplicationDlqAckLevel: map[string]int64{}, - QueueAckLevels: s.queueAckLevels(), QueueStates: s.queueStates(), }, }, nil) @@ -574,8 +555,10 @@ s.mockShardManager.EXPECT().UpdateShard(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, req *persistence.UpdateShardRequest) error { ready <- struct{}{} + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() select { - case <-time.After(5 * time.Second): + case <-timer.C: wasCanceled <- false return errors.New("timed out") case <-ctx.Done(): @@ -605,8 +588,6 @@ s.mockServiceResolver.EXPECT().AddListener(shardControllerMembershipUpdateListenerName, gomock.Any()).Return(nil).AnyTimes() s.mockServiceResolver.EXPECT().RemoveListener(shardControllerMembershipUpdateListenerName).Return(nil).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() // only for MockEngines: we just need to hook Start/Stop, not verify calls disconnectedMockController := gomock.NewController(nil) @@ -616,7 +597,6 @@ var countCloseWg sync.WaitGroup for shardID := int32(1); shardID <= s.config.NumberOfShards; shardID++ { - queueAckLevels := s.queueAckLevels() queueStates := s.queueStates() s.mockServiceResolver.EXPECT().Lookup(convert.Int32ToString(shardID)).Return(s.hostInfo, nil).AnyTimes() @@ -654,7 +634,6 @@ Owner: s.hostInfo.Identity(), RangeId: 5, ReplicationDlqAckLevel: map[string]int64{}, - QueueAckLevels: queueAckLevels, QueueStates: queueStates, }, }, nil @@ -749,9 +728,6 @@ historyEngines[shardID] = mockEngine s.setupMocksForAcquireShard(shardID, mockEngine, 5, 6, true) - // when shard is initialized, it will use the 2 mock function below to initialize the "current" time of each cluster - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) s.Len(s.shardController.ShardIDs(), 1) @@ -823,8 +799,6 @@ ShardID: shardID, RangeID: 6, }).Return(nil).Times(1) - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestSingleDCClusterInfo).AnyTimes() s.shardController.acquireShards(context.Background()) s.Len(s.shardController.ShardIDs(), 1) @@ -859,6 +833,46 @@ s.Len(s.shardController.ShardIDs(), 0) } +// TestShardCounter verifies that we can subscribe to shard count updates, receive them when shards are acquired, and +// unsubscribe from the updates when needed. +func (s *controllerSuite) TestShardCounter() { + // subscribe to shard count updates + sub1 := s.shardController.SubscribeShardCount() + + // validate that we get the initial shard count + s.Empty(sub1.ShardCount(), "Should not publish shard count before acquiring shards") + s.setupAndAcquireShards(2) + s.Equal(2, <-sub1.ShardCount(), "Should publish shard count after acquiring shards") + s.Empty(sub1.ShardCount(), "Shard count channel should be drained") + + // acquire shards twice to validate that this does not block even if there's no capacity left on the channel + s.setupAndAcquireShards(3) + s.setupAndAcquireShards(4) + s.Equal(3, <-sub1.ShardCount(), "Shard count is buffered, so we should only get the first value") + s.Empty(sub1.ShardCount(), "Shard count channel should be drained") + + // unsubscribe and validate that the channel is closed, but the other subscriber is still receiving updates + sub2 := s.shardController.SubscribeShardCount() + sub1.Unsubscribe() + s.setupAndAcquireShards(4) + _, ok := <-sub1.ShardCount() + s.False(ok, "Channel should be closed because sub1 is canceled") + sub1.Unsubscribe() // should not panic if called twice + s.Equal(4, <-sub2.ShardCount(), "Should receive shard count updates on sub2 even if sub1 is canceled") + sub2.Unsubscribe() +} + +// setupAndAcquireShards sets up the mocks for acquiring the given number of shards and then calls acquireShards. It is +// safe to call this multiple times throughout a test. +func (s *controllerSuite) setupAndAcquireShards(numShards int) { + s.config.NumberOfShards = int32(numShards) + mockEngine := NewMockEngine(s.controller) + for shardID := 1; shardID <= numShards; shardID++ { + s.setupMocksForAcquireShard(int32(shardID), mockEngine, 5, 6, false) + } + s.shardController.acquireShards(context.Background()) +} + func (s *controllerSuite) setupMocksForAcquireShard( shardID int32, mockEngine *MockEngine, @@ -866,7 +880,6 @@ required bool, ) { - queueAckLevels := s.queueAckLevels() queueStates := s.queueStates() minTimes := 0 @@ -887,7 +900,6 @@ Owner: s.hostInfo.Identity(), RangeId: currentRangeID, ReplicationDlqAckLevel: map[string]int64{}, - QueueAckLevels: queueAckLevels, QueueStates: queueStates, }, }, nil).MinTimes(minTimes) @@ -898,7 +910,6 @@ RangeId: newRangeID, StolenSinceRenew: 1, ReplicationDlqAckLevel: map[string]int64{}, - QueueAckLevels: queueAckLevels, QueueStates: queueStates, }, PreviousRangeID: currentRangeID, @@ -909,34 +920,6 @@ }).Return(nil).MinTimes(minTimes) } -func (s *controllerSuite) queueAckLevels() map[int32]*persistencespb.QueueAckLevel { - replicationAck := int64(201) - currentClusterTransferAck := int64(210) - alternativeClusterTransferAck := int64(320) - currentClusterTimerAck := timestamp.TimeNowPtrUtcAddSeconds(-100) - alternativeClusterTimerAck := timestamp.TimeNowPtrUtcAddSeconds(-200) - return map[int32]*persistencespb.QueueAckLevel{ - tasks.CategoryTransfer.ID(): { - AckLevel: currentClusterTransferAck, - ClusterAckLevel: map[string]int64{ - cluster.TestCurrentClusterName: currentClusterTransferAck, - cluster.TestAlternativeClusterName: alternativeClusterTransferAck, - }, - }, - tasks.CategoryTimer.ID(): { - AckLevel: currentClusterTimerAck.UnixNano(), - ClusterAckLevel: map[string]int64{ - cluster.TestCurrentClusterName: currentClusterTimerAck.UnixNano(), - cluster.TestAlternativeClusterName: alternativeClusterTimerAck.UnixNano(), - }, - }, - tasks.CategoryReplication.ID(): { - AckLevel: replicationAck, - ClusterAckLevel: map[string]int64{}, - }, - } -} - func (s *controllerSuite) queueStates() map[int32]*persistencespb.QueueState { return map[int32]*persistencespb.QueueState{ tasks.CategoryTransfer.ID(): { diff -Nru temporal-1.21.5-1/src/service/history/shard/engine.go temporal-1.22.5/src/service/history/shard/engine.go --- temporal-1.21.5-1/src/service/history/shard/engine.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/engine.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,7 +35,6 @@ "go.temporal.io/server/api/historyservice/v1" replicationspb "go.temporal.io/server/api/replication/v1" - "go.temporal.io/server/common" "go.temporal.io/server/common/collection" "go.temporal.io/server/common/namespace" "go.temporal.io/server/service/history/events" @@ -45,8 +44,6 @@ type ( // Engine represents an interface for managing workflow execution history. Engine interface { - common.Daemon - StartWorkflowExecution(ctx context.Context, request *historyservice.StartWorkflowExecutionRequest) (*historyservice.StartWorkflowExecutionResponse, error) GetMutableState(ctx context.Context, request *historyservice.GetMutableStateRequest) (*historyservice.GetMutableStateResponse, error) PollMutableState(ctx context.Context, request *historyservice.PollMutableStateRequest) (*historyservice.PollMutableStateResponse, error) @@ -69,6 +66,8 @@ DeleteWorkflowExecution(ctx context.Context, deleteRequest *historyservice.DeleteWorkflowExecutionRequest) (*historyservice.DeleteWorkflowExecutionResponse, error) ResetWorkflowExecution(ctx context.Context, request *historyservice.ResetWorkflowExecutionRequest) (*historyservice.ResetWorkflowExecutionResponse, error) ScheduleWorkflowTask(ctx context.Context, request *historyservice.ScheduleWorkflowTaskRequest) error + IsActivityTaskValid(ctx context.Context, request *historyservice.IsActivityTaskValidRequest) (*historyservice.IsActivityTaskValidResponse, error) + IsWorkflowTaskValid(ctx context.Context, request *historyservice.IsWorkflowTaskValidRequest) (*historyservice.IsWorkflowTaskValidResponse, error) VerifyFirstWorkflowTaskScheduled(ctx context.Context, request *historyservice.VerifyFirstWorkflowTaskScheduledRequest) error RecordChildExecutionCompleted(ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest) (*historyservice.RecordChildExecutionCompletedResponse, error) VerifyChildExecutionCompletionRecorded(ctx context.Context, request *historyservice.VerifyChildExecutionCompletionRecordedRequest) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) @@ -95,6 +94,8 @@ AddSpeculativeWorkflowTaskTimeoutTask(task *tasks.WorkflowTaskTimeoutTask) ReplicationStream + Start() + Stop() } ReplicationStream interface { diff -Nru temporal-1.21.5-1/src/service/history/shard/engine_mock.go temporal-1.22.5/src/service/history/shard/engine_mock.go --- temporal-1.21.5-1/src/service/history/shard/engine_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/engine_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -244,6 +244,36 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationTasksIter", reflect.TypeOf((*MockEngine)(nil).GetReplicationTasksIter), ctx, pollingCluster, minInclusiveTaskID, maxExclusiveTaskID) } +// IsActivityTaskValid mocks base method. +func (m *MockEngine) IsActivityTaskValid(ctx context.Context, request *historyservice.IsActivityTaskValidRequest) (*historyservice.IsActivityTaskValidResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsActivityTaskValid", ctx, request) + ret0, _ := ret[0].(*historyservice.IsActivityTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsActivityTaskValid indicates an expected call of IsActivityTaskValid. +func (mr *MockEngineMockRecorder) IsActivityTaskValid(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActivityTaskValid", reflect.TypeOf((*MockEngine)(nil).IsActivityTaskValid), ctx, request) +} + +// IsWorkflowTaskValid mocks base method. +func (m *MockEngine) IsWorkflowTaskValid(ctx context.Context, request *historyservice.IsWorkflowTaskValidRequest) (*historyservice.IsWorkflowTaskValidResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsWorkflowTaskValid", ctx, request) + ret0, _ := ret[0].(*historyservice.IsWorkflowTaskValidResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsWorkflowTaskValid indicates an expected call of IsWorkflowTaskValid. +func (mr *MockEngineMockRecorder) IsWorkflowTaskValid(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsWorkflowTaskValid", reflect.TypeOf((*MockEngine)(nil).IsWorkflowTaskValid), ctx, request) +} + // MergeDLQMessages mocks base method. func (m *MockEngine) MergeDLQMessages(ctx context.Context, messagesRequest *historyservice.MergeDLQMessagesRequest) (*historyservice.MergeDLQMessagesResponse, error) { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/shard/fx.go temporal-1.22.5/src/service/history/shard/fx.go --- temporal-1.21.5-1/src/service/history/shard/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,16 +25,25 @@ package shard import ( + "go.temporal.io/server/service/history/configs" "go.uber.org/fx" "go.temporal.io/server/common" ) -var Module = fx.Options( - fx.Provide(ControllerProvider), - fx.Provide(ContextFactoryProvider), - fx.Provide(fx.Annotate( +var Module = fx.Provide( + ControllerProvider, + func(impl *ControllerImpl) Controller { return impl }, + func(impl *ControllerImpl, cfg *configs.Config) (*OwnershipBasedQuotaScaler, error) { + return NewOwnershipBasedQuotaScaler( + impl, + int(cfg.NumberOfShards), + nil, + ) + }, + ContextFactoryProvider, + fx.Annotate( func(p Controller) common.Pingable { return p }, fx.ResultTags(`group:"deadlockDetectorRoots"`), - )), + ), ) diff -Nru temporal-1.21.5-1/src/service/history/shard/ownership_based_quota_scaler.go temporal-1.22.5/src/service/history/shard/ownership_based_quota_scaler.go --- temporal-1.21.5-1/src/service/history/shard/ownership_based_quota_scaler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/ownership_based_quota_scaler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,179 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package shard + +import ( + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + + "go.temporal.io/server/common/quotas" +) + +type ( + // OwnershipBasedQuotaScaler scales rate-limiting quotas linearly with the fraction of the total shards in the + // cluster owned by this host. The purpose is to allocate more quota to hosts with a higher workload. This object + // can be obtained from the fx Module within this package. + OwnershipBasedQuotaScaler struct { + shardCounter ShardCounter + totalNumShards int + updateAppliedCallback chan struct{} + } + // OwnershipScaledRateBurst is a quotas.RateBurst implementation that scales the RPS and burst quotas linearly with + // the fraction of the total shards in the cluster owned by this host. The effective Rate and Burst are both + // multiplied by (shardCount / totalShards). Note that there is no scaling until the first shard count update is + // received. + OwnershipScaledRateBurst struct { + // rb is the base rate burst that we will scale. + rb quotas.RateBurst + // shardCount is the number of shards owned by this host. + shardCount atomic.Int64 + // totalShards is the total number of shards in the cluster. + totalShards int + // subscription is the subscription to the shard counter. + subscription ShardCountSubscription + // updateAppliedCallback is a callback channel that is sent to when the shard count updates are applied. This is + // useful for testing. In production, it should be nil. + updateAppliedCallback chan struct{} + // wg is a wait group that is used to wait for the shard count subscription goroutine to exit. + wg sync.WaitGroup + } + // ShardCountSubscription is a subscription to a ShardCounter. It provides a channel that receives the + // shard count updates and an Unsubscribe method that unsubscribes from the counter. + ShardCountSubscription interface { + // ShardCount returns a channel that receives shard count updates. + ShardCount() <-chan int + // Unsubscribe unsubscribes from the shard counter. This closes the ShardCount channel. + Unsubscribe() + } + // ShardCounter is an observable object that emits the current shard count. + ShardCounter interface { + // SubscribeShardCount returns a ShardCountSubscription for receiving shard count updates. + SubscribeShardCount() ShardCountSubscription + } +) + +var ( + // shardCountNotSet is a sentinel value for the shardCount which indicates that it hasn't been set yet. It's an + // int64 because that's the type of the atomic. + shardCountNotSet int64 = -1 + + ErrNonPositiveTotalNumShards = errors.New("totalNumShards must be greater than 0") +) + +// NewOwnershipBasedQuotaScaler returns an OwnershipBasedQuotaScaler. The updateAppliedCallback field is a channel which +// is sent to in a blocking fashion when the shard count updates are applied. This is useful for testing. In production, +// you should pass in nil, which will cause the callback to be ignored. If totalNumShards is non-positive, then an error +// is returned. +func NewOwnershipBasedQuotaScaler( + shardCounter ShardCounter, + totalNumShards int, + updateAppliedCallback chan struct{}, +) (*OwnershipBasedQuotaScaler, error) { + if totalNumShards <= 0 { + return nil, fmt.Errorf("%w: %d", ErrNonPositiveTotalNumShards, totalNumShards) + } + + return &OwnershipBasedQuotaScaler{ + shardCounter: shardCounter, + totalNumShards: totalNumShards, + updateAppliedCallback: updateAppliedCallback, + }, nil +} + +// ScaleRateBurst returns a new OwnershipScaledRateBurst instance which scales the rate/burst quotas of the base +// RateBurst by the fraction of the total shards in the cluster owned by this host. You should call +// OwnershipScaledRateBurst.StopScaling on the returned instance when you are done with it to avoid leaking resources. +func (s *OwnershipBasedQuotaScaler) ScaleRateBurst(rb quotas.RateBurst) *OwnershipScaledRateBurst { + return newOwnershipScaledRateBurst(rb, s.shardCounter, s.totalNumShards, s.updateAppliedCallback) +} + +func newOwnershipScaledRateBurst( + rb quotas.RateBurst, + shardCounter ShardCounter, + totalNumShards int, + updateAppliedCallback chan struct{}, +) *OwnershipScaledRateBurst { + subscription := shardCounter.SubscribeShardCount() + srb := &OwnershipScaledRateBurst{ + rb: rb, + totalShards: totalNumShards, + subscription: subscription, + updateAppliedCallback: updateAppliedCallback, + } + // Initialize the shard count to the shardCountNotSet sentinel value so that we don't try to apply the scale factor + // until we receive the first shard count. + srb.shardCount.Store(shardCountNotSet) + srb.wg.Add(1) + + go srb.startScaling() + + return srb +} + +// Rate returns the rate of the base rate limiter multiplied by the shard ownership share. +func (rb *OwnershipScaledRateBurst) Rate() float64 { + return rb.rb.Rate() * rb.scaleFactor() +} + +// Burst returns the burst quota of the base rate limiter multiplied by the shard ownership share, rounded up to the +// nearest integer. We round up because we don't want to let this drop to zero unless the base burst is zero. +func (rb *OwnershipScaledRateBurst) Burst() int { + return int(math.Ceil(float64(rb.rb.Burst()) * rb.scaleFactor())) +} + +// scaleFactor returns the fraction of the total shards in the cluster owned by this host. It returns 1.0 if there +// haven't been any shard count updates yet. +func (rb *OwnershipScaledRateBurst) scaleFactor() float64 { + shardCount := rb.shardCount.Load() + if shardCount == shardCountNotSet { + // If the shard count is not set, then we haven't received the first shard count update yet. In this case, we + // return 1.0 so that the base rate/burst quotas are not scaled. + return 1.0 + } + + return float64(shardCount) / float64(rb.totalShards) +} + +func (rb *OwnershipScaledRateBurst) startScaling() { + defer rb.wg.Done() + + for shardCount := range rb.subscription.ShardCount() { + rb.shardCount.Store(int64(shardCount)) + + if rb.updateAppliedCallback != nil { + rb.updateAppliedCallback <- struct{}{} + } + } +} + +// StopScaling unsubscribes from the shard counter and stops scaling the rate and burst quotas. This method blocks until +// the shard count subscription goroutine exits (which should be almost immediately). +func (rb *OwnershipScaledRateBurst) StopScaling() { + rb.subscription.Unsubscribe() + rb.wg.Wait() +} diff -Nru temporal-1.21.5-1/src/service/history/shard/ownership_based_quota_scaler_test.go temporal-1.22.5/src/service/history/shard/ownership_based_quota_scaler_test.go --- temporal-1.21.5-1/src/service/history/shard/ownership_based_quota_scaler_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/ownership_based_quota_scaler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,114 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package shard_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/server/service/history/shard" +) + +// shardCounter adapts a channel of shard count updates to the ShardCounter interface. +type shardCounter struct { + ch chan int + closed bool +} + +func (s *shardCounter) SubscribeShardCount() shard.ShardCountSubscription { + return s +} + +func (s *shardCounter) ShardCount() <-chan int { + return s.ch +} + +func (s *shardCounter) Unsubscribe() { + close(s.ch) + s.closed = true +} + +// constantRateBurst is a quotas.RateBurst implementation that returns the same rate and burst every time. +type constantRateBurst struct { + rate float64 + burst int +} + +func (rb constantRateBurst) Rate() float64 { + return rb.rate +} + +func (rb constantRateBurst) Burst() int { + return rb.burst +} + +func newRateBurst(rate float64, burst int) constantRateBurst { + return constantRateBurst{rate: rate, burst: burst} +} + +func TestOwnershipBasedQuotaScaler_NonPositiveTotalNumShards(t *testing.T) { + t.Parallel() + + sco := &shardCounter{ + ch: make(chan int), + closed: false, + } + totalNumShards := 0 + _, err := shard.NewOwnershipBasedQuotaScaler(sco, totalNumShards, nil) + assert.ErrorIs(t, err, shard.ErrNonPositiveTotalNumShards) +} + +func TestOwnershipBasedQuotaScaler(t *testing.T) { + t.Parallel() + + rb := newRateBurst(2, 4) + sc := &shardCounter{ + ch: make(chan int), + closed: false, + } + totalNumShards := 10 + updateAppliedCallback := make(chan struct{}) + scaler, err := shard.NewOwnershipBasedQuotaScaler(sc, totalNumShards, updateAppliedCallback) + require.NoError(t, err) + srb := scaler.ScaleRateBurst(rb) + assert.Equal(t, 2.0, srb.Rate(), "Rate should be equal to the base rate before any shard count updates") + assert.Equal(t, 4, srb.Burst(), "Burst should be equal to the base burst before any shard count updates") + sc.ch <- 3 + + // Wait for the update to be applied. Even though the send above is blocking, we still need to wait for the + // rate/burst scaler's goroutine to use it to adjust the scale factor. + <-updateAppliedCallback + + // After the update is applied, the scale factor is calculated as 3/10 = 0.3, so the rate and burst should be + // multiplied by 0.3. Since the initial rate and burst are 2 and 4, respectively, the final rate and burst should be + // 0.6 and 1.2, respectively. However, since the burst is rounded up to the nearest integer, the final burst should + // be 2. + assert.Equal(t, 0.6, srb.Rate()) + assert.Equal(t, 2, srb.Burst()) + assert.False(t, sc.closed, "The shard counter should not be closed until the srb scaler is stopped") + srb.StopScaling() + assert.True(t, sc.closed, "The shard counter should be closed after the srb scaler is stopped") +} diff -Nru temporal-1.21.5-1/src/service/history/shard/ownership_test.go temporal-1.22.5/src/service/history/shard/ownership_test.go --- temporal-1.21.5-1/src/service/history/shard/ownership_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/shard/ownership_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -79,7 +79,7 @@ s.resource.GetMetricsHandler(), s.resource.GetHostInfoProvider(), contextFactory, - ).(*ControllerImpl) + ) } func (s *ownershipSuite) TestAcquireViaMembershipUpdate() { diff -Nru temporal-1.21.5-1/src/service/history/tasks/archive_execution_task.go temporal-1.22.5/src/service/history/tasks/archive_execution_task.go --- temporal-1.21.5-1/src/service/history/tasks/archive_execution_task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/tasks/archive_execution_task.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,7 +35,7 @@ type ( // ArchiveExecutionTask is the task which archives both the history and visibility of a workflow execution and then - // produces a retention timer task to delete the data. See "Durable Archival" for more info. + // produces a retention timer task to delete the data. ArchiveExecutionTask struct { definition.WorkflowKey VisibilityTimestamp time.Time diff -Nru temporal-1.21.5-1/src/service/history/tests/vars.go temporal-1.22.5/src/service/history/tests/vars.go --- temporal-1.21.5-1/src/service/history/tests/vars.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/tests/vars.go 2024-02-23 09:45:43.000000000 +0000 @@ -185,6 +185,6 @@ config.EnableActivityEagerExecution = dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true) config.EnableEagerWorkflowStart = dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true) config.NamespaceCacheRefreshInterval = dynamicconfig.GetDurationPropertyFn(time.Second) - config.DurableArchivalEnabled = dynamicconfig.GetBoolPropertyFn(true) + config.EnableAPIGetCurrentRunIDLock = dynamicconfig.GetBoolPropertyFn(true) return config } diff -Nru temporal-1.21.5-1/src/service/history/timerQueueActiveTaskExecutor.go temporal-1.22.5/src/service/history/timerQueueActiveTaskExecutor.go --- temporal-1.21.5-1/src/service/history/timerQueueActiveTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueActiveTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,652 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - - "github.com/pborman/uuid" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/failure" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - timerQueueActiveTaskExecutor struct { - *timerQueueTaskExecutorBase - } -) - -func newTimerQueueActiveTaskExecutor( - shard shard.Context, - workflowCache wcache.Cache, - workflowDeleteManager deletemanager.DeleteManager, - logger log.Logger, - metricProvider metrics.Handler, - config *configs.Config, - matchingClient matchingservice.MatchingServiceClient, -) queues.Executor { - return &timerQueueActiveTaskExecutor{ - timerQueueTaskExecutorBase: newTimerQueueTaskExecutorBase( - shard, - workflowCache, - workflowDeleteManager, - matchingClient, - logger, - metricProvider, - config, - ), - } -} - -func (t *timerQueueActiveTaskExecutor) Execute( - ctx context.Context, - executable queues.Executable, -) ([]metrics.Tag, bool, error) { - taskTypeTagValue := queues.GetActiveTimerTaskTypeTagValue(executable) - namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( - t.shard.GetNamespaceRegistry(), - executable.GetNamespaceID(), - ) - metricsTags := []metrics.Tag{ - namespaceTag, - metrics.TaskTypeTag(taskTypeTagValue), - metrics.OperationTag(taskTypeTagValue), // for backward compatibility - } - - if replicationState == enumspb.REPLICATION_STATE_HANDOVER { - // TODO: exclude task types here if we believe it's safe & necessary to execute - // them during namespace handover. - // TODO: move this logic to queues.Executable when metrics tag doesn't need to - // be returned from task executor - return metricsTags, true, consts.ErrNamespaceHandover - } - - var err error - switch task := executable.GetTask().(type) { - case *tasks.UserTimerTask: - err = t.executeUserTimerTimeoutTask(ctx, task) - case *tasks.ActivityTimeoutTask: - err = t.executeActivityTimeoutTask(ctx, task) - case *tasks.WorkflowTaskTimeoutTask: - err = t.executeWorkflowTaskTimeoutTask(ctx, task) - case *tasks.WorkflowTimeoutTask: - err = t.executeWorkflowTimeoutTask(ctx, task) - case *tasks.ActivityRetryTimerTask: - err = t.executeActivityRetryTimerTask(ctx, task) - case *tasks.WorkflowBackoffTimerTask: - err = t.executeWorkflowBackoffTimerTask(ctx, task) - case *tasks.DeleteHistoryEventTask: - err = t.executeDeleteHistoryEventTask(ctx, task) - default: - err = errUnknownTimerTask - } - - return metricsTags, true, err -} - -func (t *timerQueueActiveTaskExecutor) executeUserTimerTimeoutTask( - ctx context.Context, - task *tasks.UserTimerTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - timerSequence := t.getTimerSequence(mutableState) - referenceTime := t.shard.GetTimeSource().Now() - timerFired := false - -Loop: - for _, timerSequenceID := range timerSequence.LoadAndSortUserTimers() { - timerInfo, ok := mutableState.GetUserTimerInfoByEventID(timerSequenceID.EventID) - if !ok { - errString := fmt.Sprintf("failed to find in user timer event ID: %v", timerSequenceID.EventID) - t.logger.Error(errString) - return serviceerror.NewInternal(errString) - } - - if !queues.IsTimeExpired(referenceTime, timerSequenceID.Timestamp) { - // timer sequence IDs are sorted, once there is one timer - // sequence ID not expired, all after that wil not expired - break Loop - } - - if _, err := mutableState.AddTimerFiredEvent(timerInfo.GetTimerId()); err != nil { - return err - } - timerFired = true - } - - if !timerFired { - return nil - } - - return t.updateWorkflowExecution(ctx, weContext, mutableState, timerFired) -} - -func (t *timerQueueActiveTaskExecutor) executeActivityTimeoutTask( - ctx context.Context, - task *tasks.ActivityTimeoutTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - timerSequence := t.getTimerSequence(mutableState) - referenceTime := t.shard.GetTimeSource().Now() - updateMutableState := false - scheduleWorkflowTask := false - - // need to clear activity heartbeat timer task mask for new activity timer task creation - // NOTE: LastHeartbeatTimeoutVisibilityInSeconds is for deduping heartbeat timer creation as it's possible - // one heartbeat task was persisted multiple times with different taskIDs due to the retry logic - // for updating workflow execution. In that case, only one new heartbeat timeout task should be - // created. - isHeartBeatTask := task.TimeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT - activityInfo, heartbeatTimeoutVis, ok := mutableState.GetActivityInfoWithTimerHeartbeat(task.EventID) - if isHeartBeatTask && ok && queues.IsTimeExpired(task.GetVisibilityTime(), heartbeatTimeoutVis) { - activityInfo.TimerTaskStatus = activityInfo.TimerTaskStatus &^ workflow.TimerTaskStatusCreatedHeartbeat - if err := mutableState.UpdateActivity(activityInfo); err != nil { - return err - } - updateMutableState = true - } - -Loop: - for _, timerSequenceID := range timerSequence.LoadAndSortActivityTimers() { - activityInfo, ok := mutableState.GetActivityInfo(timerSequenceID.EventID) - if !ok || timerSequenceID.Attempt < activityInfo.Attempt { - // handle 2 cases: - // 1. !ok - // this case can happen since each activity can have 4 timers - // and one of those 4 timers may have fired in this loop - // 2. timerSequenceID.attempt < activityInfo.Attempt - // retry could update activity attempt, should not timeouts new attempt - continue Loop - } - - if !queues.IsTimeExpired(referenceTime, timerSequenceID.Timestamp) { - // timer sequence IDs are sorted, once there is one timer - // sequence ID not expired, all after that wil not expired - break Loop - } - - failureMsg := fmt.Sprintf("activity %v timeout", timerSequenceID.TimerType.String()) - timeoutFailure := failure.NewTimeoutFailure(failureMsg, timerSequenceID.TimerType) - var retryState enumspb.RetryState - if retryState, err = mutableState.RetryActivity( - activityInfo, - timeoutFailure, - ); err != nil { - return err - } else if retryState == enumspb.RETRY_STATE_IN_PROGRESS { - updateMutableState = true - continue Loop - } - - timeoutFailure.GetTimeoutFailureInfo().LastHeartbeatDetails = activityInfo.LastHeartbeatDetails - // If retryState is Timeout then it means that expirationTime is expired. - // ExpirationTime is expired when ScheduleToClose timeout is expired. - if retryState == enumspb.RETRY_STATE_TIMEOUT { - timeoutFailure.GetTimeoutFailureInfo().TimeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE - } - - t.emitTimeoutMetricScopeWithNamespaceTag( - namespace.ID(mutableState.GetExecutionInfo().NamespaceId), - metrics.TimerActiveTaskActivityTimeoutScope, - timerSequenceID.TimerType, - ) - if _, err := mutableState.AddActivityTaskTimedOutEvent( - activityInfo.ScheduledEventId, - activityInfo.StartedEventId, - timeoutFailure, - retryState, - ); err != nil { - return err - } - updateMutableState = true - scheduleWorkflowTask = true - } - - if !updateMutableState { - return nil - } - return t.updateWorkflowExecution(ctx, weContext, mutableState, scheduleWorkflowTask) -} - -func (t *timerQueueActiveTaskExecutor) executeWorkflowTaskTimeoutTask( - ctx context.Context, - task *tasks.WorkflowTaskTimeoutTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - workflowTask := mutableState.GetWorkflowTaskByID(task.EventID) - if workflowTask == nil { - return nil - } - - if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { - // Check if mutable state still points to this task. - // Mutable state can lost speculative WT or even has another one there if, for example, workflow was evicted from cache. - if !mutableState.CheckSpeculativeWorkflowTaskTimeoutTask(task) { - return nil - } - } else { - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, task.Version, task) - if err != nil { - return err - } - - if workflowTask.Attempt != task.ScheduleAttempt { - return nil - } - } - - scheduleWorkflowTask := false - switch task.TimeoutType { - case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: - t.emitTimeoutMetricScopeWithNamespaceTag( - namespace.ID(mutableState.GetExecutionInfo().NamespaceId), - metrics.TimerActiveTaskWorkflowTaskTimeoutScope, - enumspb.TIMEOUT_TYPE_START_TO_CLOSE, - ) - if _, err := mutableState.AddWorkflowTaskTimedOutEvent( - workflowTask, - ); err != nil { - return err - } - scheduleWorkflowTask = true - - case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START: - if workflowTask.StartedEventID != common.EmptyEventID { - // workflowTask has already started - return nil - } - - t.emitTimeoutMetricScopeWithNamespaceTag( - namespace.ID(mutableState.GetExecutionInfo().NamespaceId), - metrics.TimerActiveTaskWorkflowTaskTimeoutScope, - enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, - ) - _, err := mutableState.AddWorkflowTaskScheduleToStartTimeoutEvent(workflowTask) - if err != nil { - return err - } - scheduleWorkflowTask = true - } - - return t.updateWorkflowExecution(ctx, weContext, mutableState, scheduleWorkflowTask) -} - -func (t *timerQueueActiveTaskExecutor) executeWorkflowBackoffTimerTask( - ctx context.Context, - task *tasks.WorkflowBackoffTimerTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY { - t.metricHandler.Counter(metrics.WorkflowRetryBackoffTimerCount.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), - ) - } else if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_CRON { - t.metricHandler.Counter(metrics.WorkflowCronBackoffTimerCount.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), - ) - } else if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_DELAY_START { - t.metricHandler.Counter(metrics.WorkflowDelayedStartBackoffTimerCount.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), - ) - } - - if mutableState.HadOrHasWorkflowTask() { - // already has workflow task - return nil - } - - // schedule first workflow task - return t.updateWorkflowExecution(ctx, weContext, mutableState, true) -} - -func (t *timerQueueActiveTaskExecutor) executeActivityRetryTimerTask( - ctx context.Context, - task *tasks.ActivityRetryTimerTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - // generate activity task - activityInfo, ok := mutableState.GetActivityInfo(task.EventID) - if !ok || task.Attempt < activityInfo.Attempt || activityInfo.StartedEventId != common.EmptyEventID { - if ok { - t.logger.Info("Duplicate activity retry timer task", - tag.WorkflowID(mutableState.GetExecutionInfo().WorkflowId), - tag.WorkflowRunID(mutableState.GetExecutionState().GetRunId()), - tag.WorkflowNamespaceID(mutableState.GetExecutionInfo().NamespaceId), - tag.WorkflowScheduledEventID(activityInfo.ScheduledEventId), - tag.Attempt(activityInfo.Attempt), - tag.FailoverVersion(activityInfo.Version), - tag.TimerTaskStatus(activityInfo.TimerTaskStatus), - tag.ScheduleAttempt(task.Attempt)) - } - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, task.Version, task) - if err != nil { - return err - } - - taskQueue := &taskqueuepb.TaskQueue{ - Name: activityInfo.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - scheduleToStartTimeout := timestamp.DurationValue(activityInfo.ScheduleToStartTimeout) - directive := common.MakeVersionDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), activityInfo.UseCompatibleVersion) - - // NOTE: do not access anything related mutable state after this lock release - release(nil) // release earlier as we don't need the lock anymore - - _, retError = t.matchingClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ - NamespaceId: task.GetNamespaceID(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - }, - TaskQueue: taskQueue, - ScheduledEventId: task.EventID, - ScheduleToStartTimeout: timestamp.DurationPtr(scheduleToStartTimeout), - Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), - VersionDirective: directive, - }) - - return retError -} - -func (t *timerQueueActiveTaskExecutor) executeWorkflowTimeoutTask( - ctx context.Context, - task *tasks.WorkflowTimeoutTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - startVersion, err := mutableState.GetStartVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, task.Version, task) - if err != nil { - return err - } - - timeoutFailure := failure.NewTimeoutFailure("workflow timeout", enumspb.TIMEOUT_TYPE_START_TO_CLOSE) - backoffInterval := backoff.NoBackoff - retryState := enumspb.RETRY_STATE_TIMEOUT - initiator := enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED - - wfExpTime := timestamp.TimeValue(mutableState.GetExecutionInfo().WorkflowExecutionExpirationTime) - if wfExpTime.IsZero() || wfExpTime.After(t.shard.GetTimeSource().Now()) { - backoffInterval, retryState = mutableState.GetRetryBackoffDuration(timeoutFailure) - if backoffInterval != backoff.NoBackoff { - // We have a retry policy and we should retry. - initiator = enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY - } else if backoffInterval = mutableState.GetCronBackoffDuration(); backoffInterval != backoff.NoBackoff { - // We have a cron schedule. - initiator = enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE - } - } - - var newRunID string - if initiator != enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED { - newRunID = uuid.New() - } - - // First add timeout workflow event, no matter what we're doing next. - if err := workflow.TimeoutWorkflow( - mutableState, - retryState, - newRunID, - ); err != nil { - return err - } - - // No more retries, or workflow is expired. - if initiator == enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED { - // We apply the update to execution using optimistic concurrency. If it fails due to a conflict than reload - // the history and try the operation again. - return t.updateWorkflowExecution(ctx, weContext, mutableState, false) - } - - startEvent, err := mutableState.GetStartEvent(ctx) - if err != nil { - return err - } - startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() - - newMutableState := workflow.NewMutableState( - t.shard, - t.shard.GetEventsCache(), - t.shard.GetLogger(), - mutableState.GetNamespaceEntry(), - t.shard.GetTimeSource().Now(), - ) - err = workflow.SetupNewWorkflowForRetryOrCron( - ctx, - mutableState, - newMutableState, - newRunID, - startAttr, - startAttr.LastCompletionResult, - timeoutFailure, - backoffInterval, - initiator, - ) - if err != nil { - return err - } - - err = newMutableState.SetHistoryTree( - ctx, - newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, - newMutableState.GetExecutionInfo().WorkflowRunTimeout, - newRunID) - if err != nil { - return err - } - - newExecutionInfo := newMutableState.GetExecutionInfo() - newExecutionState := newMutableState.GetExecutionState() - return weContext.UpdateWorkflowExecutionWithNewAsActive( - ctx, - workflow.NewContext( - t.shard, - definition.NewWorkflowKey( - newExecutionInfo.NamespaceId, - newExecutionInfo.WorkflowId, - newExecutionState.RunId, - ), - t.logger, - ), - newMutableState, - ) -} - -func (t *timerQueueActiveTaskExecutor) getTimerSequence( - mutableState workflow.MutableState, -) workflow.TimerSequence { - return workflow.NewTimerSequence(mutableState) -} - -func (t *timerQueueActiveTaskExecutor) updateWorkflowExecution( - ctx context.Context, - context workflow.Context, - mutableState workflow.MutableState, - scheduleNewWorkflowTask bool, -) error { - var err error - if scheduleNewWorkflowTask { - // Schedule a new workflow task. - err = workflow.ScheduleWorkflowTask(mutableState) - if err != nil { - return err - } - } - return context.UpdateWorkflowExecutionAsActive(ctx) -} - -func (t *timerQueueActiveTaskExecutor) emitTimeoutMetricScopeWithNamespaceTag( - namespaceID namespace.ID, - operation string, - timerType enumspb.TimeoutType, -) { - namespaceEntry, err := t.registry.GetNamespaceByID(namespaceID) - if err != nil { - return - } - metricsScope := t.metricHandler.WithTags( - metrics.OperationTag(operation), - metrics.NamespaceTag(namespaceEntry.Name().String()), - ) - switch timerType { - case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START: - metricsScope.Counter(metrics.ScheduleToStartTimeoutCounter.GetMetricName()).Record(1) - case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: - metricsScope.Counter(metrics.ScheduleToCloseTimeoutCounter.GetMetricName()).Record(1) - case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: - metricsScope.Counter(metrics.StartToCloseTimeoutCounter.GetMetricName()).Record(1) - case enumspb.TIMEOUT_TYPE_HEARTBEAT: - metricsScope.Counter(metrics.HeartbeatTimeoutCounter.GetMetricName()).Record(1) - } -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueActiveTaskExecutor_test.go temporal-1.22.5/src/service/history/timerQueueActiveTaskExecutor_test.go --- temporal-1.21.5-1/src/service/history/timerQueueActiveTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueActiveTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1491 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - timerQueueActiveTaskExecutorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockVisibilityProcessor *queues.MockQueue - mockArchivalProcessor *queues.MockQueue - mockNamespaceCache *namespace.MockRegistry - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - mockClusterMetadata *cluster.MockMetadata - - mockHistoryEngine *historyEngineImpl - mockDeleteManager *deletemanager.MockDeleteManager - mockExecutionMgr *persistence.MockExecutionManager - - workflowCache wcache.Cache - logger log.Logger - namespaceID namespace.ID - namespaceEntry *namespace.Namespace - version int64 - now time.Time - timeSource *clock.EventTimeSource - timerQueueActiveTaskExecutor *timerQueueActiveTaskExecutor - } -) - -func TestTimerQueueActiveTaskExecutorSuite(t *testing.T) { - s := new(timerQueueActiveTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *timerQueueActiveTaskExecutorSuite) SetupSuite() { -} - -func (s *timerQueueActiveTaskExecutorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.namespaceID = tests.NamespaceID - s.namespaceEntry = tests.GlobalNamespaceEntry - s.version = s.namespaceEntry.FailoverVersion() - s.now = time.Now().UTC() - s.timeSource = clock.NewEventTimeSource().Update(s.now) - - s.controller = gomock.NewController(s.T()) - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) - s.mockArchivalProcessor = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() - s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - config := tests.NewDynamicConfig() - s.mockShard = shard.NewTestContextWithTimeSource( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - config, - s.timeSource, - ) - s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - )) - - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockMatchingClient = s.mockShard.Resource.MatchingClient - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - // ack manager will use the namespace information - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(tests.Namespace, nil).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes() - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) - h := &historyEngineImpl{ - currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - metricsHandler: s.mockShard.GetMetricsHandler(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, - s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, - }, - } - s.mockShard.SetEngineForTesting(h) - s.mockHistoryEngine = h - - s.timerQueueActiveTaskExecutor = newTimerQueueActiveTaskExecutor( - s.mockShard, - s.workflowCache, - s.mockDeleteManager, - s.logger, - metrics.NoopMetricsHandler, - config, - s.mockShard.Resource.GetMatchingClient(), - ).(*timerQueueActiveTaskExecutor) -} - -func (s *timerQueueActiveTaskExecutorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerID := "timer" - timerTimeout := 2 * time.Second - event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextUserTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.UserTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, - EventID: event.EventId, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetUserTimerInfo(timerID) - s.False(ok) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerID := "timer" - timerTimeout := 2 * time.Second - event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextUserTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.UserTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, - EventID: event.EventId, - } - - event = addTimerFiredEvent(mutableState, timerID) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - ) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) - s.False(ok) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - ) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: wt.ScheduledEventID, - } - - completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Retry() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - 999*time.Second, - timerTimeout, - timerTimeout, - timerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - s.Nil(startedEvent) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - activityInfo, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) - s.True(ok) - s.Equal(scheduledEvent.GetEventId(), activityInfo.ScheduledEventId) - s.Equal(common.EmptyEventID, activityInfo.StartedEventId) - // only a schedule to start timer will be created, apart from the retry timer - s.Equal(int32(workflow.TimerTaskStatusCreatedScheduleToStart), activityInfo.TimerTaskStatus) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) - s.False(ok) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - s.Nil(startedEvent) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: wt.ScheduledEventID, - } - - completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), common.TransientEventID, nil, identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.timeSource.Update(s.now.Add(2 * timerTimeout)) - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_Heartbeat_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - heartbeatTimerTimeout := time.Second - scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - heartbeatTimerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - s.Nil(startedEvent) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, task.(*tasks.ActivityTimeoutTask).TimeoutType) - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, - VisibilityTimestamp: time.Time{}, - EventID: scheduledEvent.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - - timerTask := &tasks.WorkflowTaskTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ScheduleAttempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, - VisibilityTimestamp: s.now, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - workflowTask := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask() - s.NotNil(workflowTask) - s.True(workflowTask.ScheduledEventID != common.EmptyEventID) - s.Equal(common.EmptyEventID, workflowTask.StartedEventID) - s.Equal(int32(2), workflowTask.Attempt) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - - timerTask := &tasks.WorkflowTaskTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ScheduleAttempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, - VisibilityTimestamp: s.now, - EventID: wt.ScheduledEventID - 1, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - event, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - timerTask := &tasks.WorkflowBackoffTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - workflowTask := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask() - s.NotNil(workflowTask) - s.True(workflowTask.ScheduledEventID != common.EmptyEventID) - s.Equal(common.EmptyEventID, workflowTask.StartedEventID) - s.Equal(int32(1), workflowTask.Attempt) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerTask := &tasks.WorkflowBackoffTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskQueueName, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - activityInfo.Attempt = 1 - - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - EventID: activityInfo.ScheduledEventId, - Attempt: activityInfo.Attempt, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddActivityTask( - gomock.Any(), - &matchingservice.AddActivityTaskRequest{ - NamespaceId: s.namespaceID.String(), - Execution: &execution, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: activityInfo.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ScheduledEventId: activityInfo.ScheduledEventId, - ScheduleToStartTimeout: activityInfo.ScheduleToStartTimeout, - Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), timerTask.TaskID), - VersionDirective: common.MakeVersionDirectiveForActivityTask(nil, false), - }, - gomock.Any(), - ).Return(&matchingservice.AddActivityTaskResponse{}, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry( - mutableState, - event.GetEventId(), - activityID, - activityType, - taskqueue, - nil, - timerTimeout, - timerTimeout, - timerTimeout, - timerTimeout, - &commonpb.RetryPolicy{ - InitialInterval: timestamp.DurationPtr(1 * time.Second), - BackoffCoefficient: 1.2, - MaximumInterval: timestamp.DurationPtr(5 * time.Second), - MaximumAttempts: 5, - NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, - }, - ) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - s.Nil(startedEvent) - - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - EventID: activityInfo.ScheduledEventId, - Attempt: activityInfo.Attempt, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Fire() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = startEvent.GetEventId() - completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - running := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).IsWorkflowExecutionRunning() - s.False(running) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Retry() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), - }, - ) - s.Nil(err) - // need to override the workflow retry policy - executionInfo := mutableState.GetExecutionInfo() - executionInfo.HasRetryPolicy = true - executionInfo.WorkflowExecutionExpirationTime = timestamp.TimeNowPtrUtcAddSeconds(1000) - executionInfo.RetryMaximumAttempts = 10 - executionInfo.RetryInitialInterval = timestamp.DurationFromSeconds(1) - executionInfo.RetryMaximumInterval = timestamp.DurationFromSeconds(1) - executionInfo.RetryBackoffCoefficient = 1 - - wt := addWorkflowTaskScheduledEvent(mutableState) - startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = startEvent.GetEventId() - completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - // one for current workflow, one for new - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) - s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Cron() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), - }, - ) - s.Nil(err) - executionInfo := mutableState.GetExecutionInfo() - executionInfo.StartTime = &s.now - executionInfo.CronSchedule = "* * * * *" - - wt := addWorkflowTaskScheduledEvent(mutableState) - startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = startEvent.GetEventId() - completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - // one for current workflow, one for new - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) - s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) -} - -func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_WorkflowExpired() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(-1 * time.Second)), - }, - ) - s.Nil(err) - executionInfo := mutableState.GetExecutionInfo() - executionInfo.StartTime = &s.now - executionInfo.CronSchedule = "* * * * *" - - wt := addWorkflowTaskScheduledEvent(mutableState) - startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = startEvent.GetEventId() - completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - - _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.NoError(err) - - state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() - s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) - s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) -} - -func (s *timerQueueActiveTaskExecutorSuite) createPersistenceMutableState( - ms workflow.MutableState, - lastEventID int64, - lastEventVersion int64, -) *persistencespb.WorkflowMutableState { - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - s.NoError(err) - err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( - lastEventID, lastEventVersion, - )) - s.NoError(err) - return workflow.TestCloneToProto(ms) -} - -func (s *timerQueueActiveTaskExecutorSuite) getMutableStateFromCache( - namespaceID namespace.ID, - workflowID string, - runID string, -) workflow.MutableState { - return s.workflowCache.(*wcache.CacheImpl).Get( - definition.NewWorkflowKey(namespaceID.String(), workflowID, runID), - ).(*workflow.ContextImpl).MutableState -} - -func (s *timerQueueActiveTaskExecutorSuite) newTaskExecutable( - task tasks.Task, -) queues.Executable { - return queues.NewExecutable( - queues.DefaultReaderId, - task, - s.timerQueueActiveTaskExecutor, - nil, - nil, - queues.NewNoopPriorityAssigner(), - s.mockShard.GetTimeSource(), - s.mockNamespaceCache, - s.mockClusterMetadata, - nil, - metrics.NoopMetricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueFactory.go temporal-1.22.5/src/service/history/timerQueueFactory.go --- temporal-1.21.5-1/src/service/history/timerQueueFactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueFactory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,205 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - - "go.uber.org/fx" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/resource" - "go.temporal.io/server/common/xdc" - deletemanager "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -const ( - timerQueuePersistenceMaxRPSRatio = 0.3 -) - -type ( - timerQueueFactoryParams struct { - fx.In - - QueueFactoryBaseParams - - ClientBean client.Bean - ArchivalClient archiver.Client - MatchingClient resource.MatchingClient - VisibilityManager manager.VisibilityManager - } - - timerQueueFactory struct { - timerQueueFactoryParams - QueueFactoryBase - } -) - -func NewTimerQueueFactory( - params timerQueueFactoryParams, -) QueueFactory { - return &timerQueueFactory{ - timerQueueFactoryParams: params, - QueueFactoryBase: QueueFactoryBase{ - HostScheduler: queues.NewNamespacePriorityScheduler( - params.ClusterMetadata.GetCurrentClusterName(), - queues.NamespacePrioritySchedulerOptions{ - WorkerCount: params.Config.TimerProcessorSchedulerWorkerCount, - ActiveNamespaceWeights: params.Config.TimerProcessorSchedulerActiveRoundRobinWeights, - StandbyNamespaceWeights: params.Config.TimerProcessorSchedulerStandbyRoundRobinWeights, - EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, - EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, - DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, - }, - params.NamespaceRegistry, - params.SchedulerRateLimiter, - params.TimeSource, - params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTimerQueueProcessorScope)), - params.Logger, - ), - HostPriorityAssigner: queues.NewPriorityAssigner(), - HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( - NewHostRateLimiterRateFn( - params.Config.TimerProcessorMaxPollHostRPS, - params.Config.PersistenceMaxQPS, - timerQueuePersistenceMaxRPSRatio, - ), - int64(params.Config.QueueMaxReaderCount()), - ), - }, - } -} - -func (f *timerQueueFactory) CreateQueue( - shard shard.Context, - workflowCache wcache.Cache, -) queues.Queue { - logger := log.With(shard.GetLogger(), tag.ComponentTimerQueue) - metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTimerQueueProcessorScope)) - - currentClusterName := f.ClusterMetadata.GetCurrentClusterName() - workflowDeleteManager := deletemanager.NewDeleteManager( - shard, - workflowCache, - f.Config, - f.ArchivalClient, - shard.GetTimeSource(), - f.VisibilityManager, - ) - - rescheduler := queues.NewRescheduler( - f.HostScheduler, - shard.GetTimeSource(), - logger, - metricsHandler, - ) - - activeExecutor := newTimerQueueActiveTaskExecutor( - shard, - workflowCache, - workflowDeleteManager, - logger, - f.MetricsHandler, - f.Config, - f.MatchingClient, - ) - - standbyExecutor := newTimerQueueStandbyTaskExecutor( - shard, - workflowCache, - workflowDeleteManager, - xdc.NewNDCHistoryResender( - shard.GetNamespaceRegistry(), - f.ClientBean, - func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { - engine, err := shard.GetEngine(ctx) - if err != nil { - return err - } - return engine.ReplicateEventsV2(ctx, request) - }, - shard.GetPayloadSerializer(), - f.Config.StandbyTaskReReplicationContextTimeout, - logger, - ), - f.MatchingClient, - logger, - f.MetricsHandler, - // note: the cluster name is for calculating time for standby tasks, - // here we are basically using current cluster time - // this field will be deprecated soon, currently exists so that - // we have the option of revert to old behavior - currentClusterName, - f.Config, - ) - - executor := queues.NewExecutorWrapper( - currentClusterName, - f.NamespaceRegistry, - activeExecutor, - standbyExecutor, - logger, - ) - - return queues.NewScheduledQueue( - shard, - tasks.CategoryTimer, - f.HostScheduler, - rescheduler, - f.HostPriorityAssigner, - executor, - &queues.Options{ - ReaderOptions: queues.ReaderOptions{ - BatchSize: f.Config.TimerTaskBatchSize, - MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, - PollBackoffInterval: f.Config.TimerProcessorPollBackoffInterval, - }, - MonitorOptions: queues.MonitorOptions{ - PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, - ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, - SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, - }, - MaxPollRPS: f.Config.TimerProcessorMaxPollRPS, - MaxPollInterval: f.Config.TimerProcessorMaxPollInterval, - MaxPollIntervalJitterCoefficient: f.Config.TimerProcessorMaxPollIntervalJitterCoefficient, - CheckpointInterval: f.Config.TimerProcessorUpdateAckInterval, - CheckpointIntervalJitterCoefficient: f.Config.TimerProcessorUpdateAckIntervalJitterCoefficient, - MaxReaderCount: f.Config.QueueMaxReaderCount, - }, - f.HostReaderRateLimiter, - logger, - metricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueStandbyTaskExecutor.go temporal-1.22.5/src/service/history/timerQueueStandbyTaskExecutor.go --- temporal-1.21.5-1/src/service/history/timerQueueStandbyTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueStandbyTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,589 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - timerQueueStandbyTaskExecutor struct { - *timerQueueTaskExecutorBase - - clusterName string - nDCHistoryResender xdc.NDCHistoryResender - } -) - -func newTimerQueueStandbyTaskExecutor( - shard shard.Context, - workflowCache wcache.Cache, - workflowDeleteManager deletemanager.DeleteManager, - nDCHistoryResender xdc.NDCHistoryResender, - matchingClient matchingservice.MatchingServiceClient, - logger log.Logger, - metricProvider metrics.Handler, - clusterName string, - config *configs.Config, -) queues.Executor { - return &timerQueueStandbyTaskExecutor{ - timerQueueTaskExecutorBase: newTimerQueueTaskExecutorBase( - shard, - workflowCache, - workflowDeleteManager, - matchingClient, - logger, - metricProvider, - config, - ), - clusterName: clusterName, - nDCHistoryResender: nDCHistoryResender, - } -} - -func (t *timerQueueStandbyTaskExecutor) Execute( - ctx context.Context, - executable queues.Executable, -) ([]metrics.Tag, bool, error) { - task := executable.GetTask() - taskType := queues.GetStandbyTimerTaskTypeTagValue(task) - metricsTags := []metrics.Tag{ - getNamespaceTagByID(t.shard.GetNamespaceRegistry(), task.GetNamespaceID()), - metrics.TaskTypeTag(taskType), - metrics.OperationTag(taskType), // for backward compatibility - } - - var err error - switch task := task.(type) { - case *tasks.UserTimerTask: - err = t.executeUserTimerTimeoutTask(ctx, task) - case *tasks.ActivityTimeoutTask: - err = t.executeActivityTimeoutTask(ctx, task) - case *tasks.WorkflowTaskTimeoutTask: - err = t.executeWorkflowTaskTimeoutTask(ctx, task) - case *tasks.WorkflowBackoffTimerTask: - err = t.executeWorkflowBackoffTimerTask(ctx, task) - case *tasks.ActivityRetryTimerTask: - err = t.executeActivityRetryTimerTask(ctx, task) - case *tasks.WorkflowTimeoutTask: - err = t.executeWorkflowTimeoutTask(ctx, task) - case *tasks.DeleteHistoryEventTask: - err = t.executeDeleteHistoryEventTask(ctx, task) - default: - err = errUnknownTimerTask - } - - return metricsTags, false, err -} - -func (t *timerQueueStandbyTaskExecutor) executeUserTimerTimeoutTask( - ctx context.Context, - timerTask *tasks.UserTimerTask, -) error { - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - timerSequence := t.getTimerSequence(mutableState) - timerSequenceIDs := timerSequence.LoadAndSortUserTimers() - if len(timerSequenceIDs) > 0 { - timerSequenceID := timerSequenceIDs[0] - _, ok := mutableState.GetUserTimerInfoByEventID(timerSequenceID.EventID) - if !ok { - errString := fmt.Sprintf("failed to find in user timer event ID: %v", timerSequenceID.EventID) - t.logger.Error(errString) - return nil, serviceerror.NewInternal(errString) - } - - if queues.IsTimeExpired( - timerTask.GetVisibilityTime(), - timerSequenceID.Timestamp, - ) { - return getHistoryResendInfo(mutableState) - } - // Since the user timers are already sorted, then if there is one timer which is not expired, - // all user timers after that timer are not expired. - } - // If there is no user timer expired, then we are good. - return nil, nil - } - - return t.processTimer( - ctx, - timerTask, - actionFn, - getStandbyPostActionFn( - timerTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), - t.fetchHistoryFromRemote, - standbyTimerTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) executeActivityTimeoutTask( - ctx context.Context, - timerTask *tasks.ActivityTimeoutTask, -) error { - // activity heartbeat timer task is a special snowflake. - // normal activity timer task on the passive side will be generated by events related to activity in history replicator, - // and the standby timer processor will only need to verify whether the timer task can be safely throw away. - // - // activity heartbeat timer task cannot be handled in the way mentioned above. - // the reason is, there is no event driving the creation of new activity heartbeat timer. - // although there will be an task syncing activity from remote, the task is not an event, - // and cannot attempt to recreate a new activity timer task. - // - // the overall solution is to attempt to generate a new activity timer task whenever the - // task passed in is safe to be throw away. - actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - timerSequence := t.getTimerSequence(mutableState) - updateMutableState := false - timerSequenceIDs := timerSequence.LoadAndSortActivityTimers() - if len(timerSequenceIDs) > 0 { - timerSequenceID := timerSequenceIDs[0] - _, ok := mutableState.GetActivityInfo(timerSequenceID.EventID) - if !ok { - errString := fmt.Sprintf("failed to find in memory activity timer: %v", timerSequenceID.EventID) - t.logger.Error(errString) - return nil, serviceerror.NewInternal(errString) - } - - if queues.IsTimeExpired( - timerTask.GetVisibilityTime(), - timerSequenceID.Timestamp, - ) { - return getHistoryResendInfo(mutableState) - } - // Since the activity timers are already sorted, then if there is one timer which is not expired, - // all activity timers after that timer are not expired. - } - - // for reason to update mutable state & generate a new activity task, - // see comments at the beginning of this function. - // NOTE: this is the only place in the standby logic where mutable state can be updated - - // need to clear the activity heartbeat timer task marks - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return nil, err - } - - // NOTE: LastHeartbeatTimeoutVisibilityInSeconds is for deduping heartbeat timer creation as it's possible - // one heartbeat task was persisted multiple times with different taskIDs due to the retry logic - // for updating workflow execution. In that case, only one new heartbeat timeout task should be - // created. - isHeartBeatTask := timerTask.TimeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT - activityInfo, heartbeatTimeoutVis, ok := mutableState.GetActivityInfoWithTimerHeartbeat(timerTask.EventID) - if isHeartBeatTask && ok && queues.IsTimeExpired(timerTask.GetVisibilityTime(), heartbeatTimeoutVis) { - activityInfo.TimerTaskStatus = activityInfo.TimerTaskStatus &^ workflow.TimerTaskStatusCreatedHeartbeat - if err := mutableState.UpdateActivity(activityInfo); err != nil { - return nil, err - } - updateMutableState = true - } - - // passive logic need to explicitly call create timer - modified, err := timerSequence.CreateNextActivityTimer() - if err != nil { - return nil, err - } - updateMutableState = updateMutableState || modified - - if !updateMutableState { - return nil, nil - } - - // we need to handcraft some of the variables - // since the job being done here is update the activity and possibly write a timer task to DB - // also need to reset the current version. - if err := mutableState.UpdateCurrentVersion(lastWriteVersion, true); err != nil { - return nil, err - } - - err = wfContext.UpdateWorkflowExecutionAsPassive(ctx) - return nil, err - } - - return t.processTimer( - ctx, - timerTask, - actionFn, - getStandbyPostActionFn( - timerTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), - t.fetchHistoryFromRemote, - standbyTimerTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) executeActivityRetryTimerTask( - ctx context.Context, - task *tasks.ActivityRetryTimerTask, -) (retError error) { - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - activityInfo, ok := mutableState.GetActivityInfo(task.EventID) // activity schedule ID - if !ok { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, task.Version, task) - if err != nil { - return nil, err - } - - if activityInfo.Attempt > task.Attempt { - return nil, nil - } - - if activityInfo.StartedEventId != common.EmptyEventID { - return nil, nil - } - - return newActivityRetryTimePostActionInfo(mutableState, activityInfo.TaskQueue, *activityInfo.ScheduleToStartTimeout, activityInfo.UseCompatibleVersion) - } - - return t.processTimer( - ctx, - task, - actionFn, - getStandbyPostActionFn( - task, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(task.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(task.GetType()), - t.fetchHistoryFromRemote, - t.pushActivity, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) executeWorkflowTaskTimeoutTask( - ctx context.Context, - timerTask *tasks.WorkflowTaskTimeoutTask, -) error { - // workflow task schedule to start timer task is a special snowflake. - // the schedule to start timer is for sticky workflow task, which is - // not applicable on the passive cluster - if timerTask.TimeoutType == enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START { - return nil - } - - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - workflowTask := mutableState.GetWorkflowTaskByID(timerTask.EventID) - if workflowTask == nil { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, timerTask.Version, timerTask) - if err != nil { - return nil, err - } - - return getHistoryResendInfo(mutableState) - } - - return t.processTimer( - ctx, - timerTask, - actionFn, - getStandbyPostActionFn( - timerTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), - t.fetchHistoryFromRemote, - standbyTimerTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) executeWorkflowBackoffTimerTask( - ctx context.Context, - timerTask *tasks.WorkflowBackoffTimerTask, -) error { - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - if mutableState.HadOrHasWorkflowTask() { - // if there is one workflow task already been processed - // or has pending workflow task, meaning workflow has already running - return nil, nil - } - - // Note: do not need to verify task version here - // logic can only go here if mutable state build's next event ID is 2 - // meaning history only contains workflow started event. - // we can do the checking of task version vs workflow started version - // however, workflow started version is immutable - - // active cluster will add first workflow task after backoff timeout. - // standby cluster should just call ack manager to retry this task - // since we are stilling waiting for the first WorkflowTaskScheduledEvent to be replicated from active side. - - return getHistoryResendInfo(mutableState) - } - - return t.processTimer( - ctx, - timerTask, - actionFn, - getStandbyPostActionFn( - timerTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), - t.fetchHistoryFromRemote, - standbyTimerTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) executeWorkflowTimeoutTask( - ctx context.Context, - timerTask *tasks.WorkflowTimeoutTask, -) error { - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - // we do not need to notify new timer to base, since if there is no new event being replicated - // checking again if the timer can be completed is meaningless - - startVersion, err := mutableState.GetStartVersion() - if err != nil { - return nil, err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, timerTask.Version, timerTask) - if err != nil { - return nil, err - } - - return getHistoryResendInfo(mutableState) - } - - return t.processTimer( - ctx, - timerTask, - actionFn, - getStandbyPostActionFn( - timerTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), - t.fetchHistoryFromRemote, - standbyTimerTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *timerQueueStandbyTaskExecutor) getTimerSequence( - mutableState workflow.MutableState, -) workflow.TimerSequence { - return workflow.NewTimerSequence(mutableState) -} - -func (t *timerQueueStandbyTaskExecutor) processTimer( - ctx context.Context, - timerTask tasks.Task, - actionFn standbyActionFn, - postActionFn standbyPostActionFn, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - nsRecord, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(timerTask.GetNamespaceID())) - if err != nil { - return err - } - if !nsRecord.IsOnCluster(t.clusterName) { - // namespace is not replicated to local cluster, ignore corresponding tasks - return nil - } - - executionContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, timerTask) - if err != nil { - return err - } - defer func() { - if retError == consts.ErrTaskRetry { - release(nil) - } else { - release(retError) - } - }() - - mutableState, err := loadMutableStateForTimerTask(ctx, executionContext, timerTask, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil { - return nil - } - - if !mutableState.IsWorkflowExecutionRunning() { - // workflow already finished, no need to process the timer - return nil - } - - historyResendInfo, err := actionFn(ctx, executionContext, mutableState) - if err != nil { - return err - } - - // NOTE: do not access anything related mutable state after this lock release - release(nil) - return postActionFn(ctx, timerTask, historyResendInfo, t.logger) -} - -func (t *timerQueueStandbyTaskExecutor) fetchHistoryFromRemote( - ctx context.Context, - taskInfo tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - var resendInfo *historyResendInfo - switch postActionInfo := postActionInfo.(type) { - case nil: - return nil - case *historyResendInfo: - resendInfo = postActionInfo - case *activityTaskPostActionInfo: - resendInfo = postActionInfo.historyResendInfo - default: - logger.Fatal("unknown post action info for fetching remote history", tag.Value(postActionInfo)) - } - - remoteClusterName, err := getRemoteClusterName( - t.currentClusterName, - t.registry, - taskInfo.GetNamespaceID(), - ) - if err != nil { - return err - } - - scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.HistoryRereplicationByTimerTaskScope)) - scope.Counter(metrics.ClientRequests.GetMetricName()).Record(1) - startTime := time.Now() - defer func() { scope.Timer(metrics.ClientLatency.GetMetricName()).Record(time.Since(startTime)) }() - - if resendInfo.lastEventID == common.EmptyEventID || resendInfo.lastEventVersion == common.EmptyVersion { - t.logger.Error("Error re-replicating history from remote: timerQueueStandbyProcessor encountered empty historyResendInfo.", - tag.ShardID(t.shard.GetShardID()), - tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), - tag.WorkflowID(taskInfo.GetWorkflowID()), - tag.WorkflowRunID(taskInfo.GetRunID()), - tag.ClusterName(remoteClusterName)) - - return consts.ErrTaskRetry - } - - // NOTE: history resend may take long time and its timeout is currently - // controlled by a separate dynamicconfig config: StandbyTaskReReplicationContextTimeout - if err = t.nDCHistoryResender.SendSingleWorkflowHistory( - ctx, - remoteClusterName, - namespace.ID(taskInfo.GetNamespaceID()), - taskInfo.GetWorkflowID(), - taskInfo.GetRunID(), - resendInfo.lastEventID, - resendInfo.lastEventVersion, - common.EmptyEventID, - common.EmptyVersion, - ); err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { - // Don't log NamespaceNotFound error because it is valid case, and return error to stop retrying. - return err - } - t.logger.Error("Error re-replicating history from remote.", - tag.ShardID(t.shard.GetShardID()), - tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), - tag.WorkflowID(taskInfo.GetWorkflowID()), - tag.WorkflowRunID(taskInfo.GetRunID()), - tag.ClusterName(remoteClusterName), - tag.Error(err)) - } - - // Return retryable error, so task processing will retry. - return consts.ErrTaskRetry -} - -func (t *timerQueueStandbyTaskExecutor) pushActivity( - ctx context.Context, - task tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - if postActionInfo == nil { - return nil - } - - pushActivityInfo := postActionInfo.(*activityTaskPostActionInfo) - activityScheduleToStartTimeout := &pushActivityInfo.activityTaskScheduleToStartTimeout - activityTask := task.(*tasks.ActivityRetryTimerTask) - - _, err := t.matchingClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ - NamespaceId: activityTask.NamespaceID, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: activityTask.WorkflowID, - RunId: activityTask.RunID, - }, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: pushActivityInfo.taskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ScheduledEventId: activityTask.EventID, - ScheduleToStartTimeout: activityScheduleToStartTimeout, - Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), activityTask.TaskID), - VersionDirective: pushActivityInfo.versionDirective, - }) - return err -} - -func (t *timerQueueStandbyTaskExecutor) getCurrentTime() time.Time { - return t.shard.GetCurrentTime(t.clusterName) -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueStandbyTaskExecutor_test.go temporal-1.22.5/src/service/history/timerQueueStandbyTaskExecutor_test.go --- temporal-1.21.5-1/src/service/history/timerQueueStandbyTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueStandbyTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1509 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/adminservicemock/v1" - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - timerQueueStandbyTaskExecutorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockExecutionMgr *persistence.MockExecutionManager - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockNamespaceCache *namespace.MockRegistry - mockClusterMetadata *cluster.MockMetadata - mockAdminClient *adminservicemock.MockAdminServiceClient - mockNDCHistoryResender *xdc.MockNDCHistoryResender - mockDeleteManager *deletemanager.MockDeleteManager - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - - workflowCache wcache.Cache - logger log.Logger - namespaceID namespace.ID - namespaceEntry *namespace.Namespace - version int64 - clusterName string - now time.Time - timeSource *clock.EventTimeSource - fetchHistoryDuration time.Duration - discardDuration time.Duration - - timerQueueStandbyTaskExecutor *timerQueueStandbyTaskExecutor - } -) - -func TestTimerQueueStandbyTaskExecutorSuite(t *testing.T) { - s := new(timerQueueStandbyTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *timerQueueStandbyTaskExecutorSuite) SetupSuite() { -} - -func (s *timerQueueStandbyTaskExecutorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - config := tests.NewDynamicConfig() - s.namespaceEntry = tests.GlobalStandbyNamespaceEntry - s.namespaceID = s.namespaceEntry.ID() - s.version = s.namespaceEntry.FailoverVersion() - s.clusterName = cluster.TestAlternativeClusterName - s.now = time.Now().UTC() - s.timeSource = clock.NewEventTimeSource().Update(s.now) - s.fetchHistoryDuration = time.Minute * 12 - s.discardDuration = time.Minute * 30 - - s.controller = gomock.NewController(s.T()) - s.mockNDCHistoryResender = xdc.NewMockNDCHistoryResender(s.controller) - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - s.mockShard = shard.NewTestContextWithTimeSource( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - config, - s.timeSource, - ) - s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - )) - - // ack manager will use the namespace information - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockAdminClient = s.mockShard.Resource.RemoteAdminClient - s.mockMatchingClient = s.mockShard.Resource.MatchingClient - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(s.namespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(s.namespaceEntry.Name(), nil).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.clusterName).AnyTimes() - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) - h := &historyEngineImpl{ - currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - metricsHandler: s.mockShard.GetMetricsHandler(), - eventNotifier: events.NewNotifier(s.timeSource, metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - }, - } - s.mockShard.SetEngineForTesting(h) - - s.timerQueueStandbyTaskExecutor = newTimerQueueStandbyTaskExecutor( - s.mockShard, - s.workflowCache, - s.mockDeleteManager, - s.mockNDCHistoryResender, - s.mockMatchingClient, - s.logger, - metrics.NoopMetricsHandler, - s.clusterName, - config, - ).(*timerQueueStandbyTaskExecutor) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerID := "timer" - timerTimeout := 2 * time.Second - event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) - nextEventID := event.GetEventId() - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextUserTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.UserTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, - EventID: event.EventId, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(timerTask.NamespaceID), - timerTask.WorkflowID, - timerTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerID := "timer" - timerTimeout := 2 * time.Second - event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextUserTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.UserTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, - EventID: event.EventId, - } - - event = addTimerFiredEvent(mutableState, timerID) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Multiple() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - timerID1 := "timer-1" - timerTimeout1 := 2 * time.Second - event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID1, timerTimeout1) - - timerID2 := "timer-2" - timerTimeout2 := 50 * time.Second - _, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID2, timerTimeout2) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextUserTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.UserTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, - EventID: event.EventId, - } - - event = addTimerFiredEvent(mutableState, timerID1) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, - timerTimeout, timerTimeout, timerTimeout, timerTimeout) - nextEventID := scheduledEvent.GetEventId() - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: event.EventId, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(timerTask.NamespaceID), - timerTask.WorkflowID, - timerTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, - timerTimeout, timerTimeout, timerTimeout, timerTimeout) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: event.GetEventId(), - } - - completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Heartbeat_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - heartbeatTimerTimeout := time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, - timerTimeout, timerTimeout, timerTimeout, heartbeatTimerTimeout) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, task.(*tasks.ActivityTimeoutTask).TimeoutType) - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, - VisibilityTimestamp: time.Unix(946684800, 0).Add(-100 * time.Second), // see pendingActivityTimerHeartbeats from mutable state - EventID: scheduledEvent.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Multiple_CanUpdate() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID1 := "activity 1" - activityType1 := "activity type 1" - timerTimeout1 := 2 * time.Second - scheduledEvent1, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID1, activityType1, taskqueue, nil, - timerTimeout1, timerTimeout1, timerTimeout1, timerTimeout1) - startedEvent1 := addActivityTaskStartedEvent(mutableState, scheduledEvent1.GetEventId(), identity) - - activityID2 := "activity 2" - activityType2 := "activity type 2" - timerTimeout2 := 20 * time.Second - scheduledEvent2, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID2, activityType2, taskqueue, nil, - timerTimeout2, timerTimeout2, timerTimeout2, 10*time.Second) - addActivityTaskStartedEvent(mutableState, scheduledEvent2.GetEventId(), identity) - activityInfo2 := mutableState.GetPendingActivityInfos()[scheduledEvent2.GetEventId()] - activityInfo2.TimerTaskStatus |= workflow.TimerTaskStatusCreatedHeartbeat - activityInfo2.LastHeartbeatUpdateTime = timestamp.TimePtr(time.Now().UTC()) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - - timerTask := &tasks.ActivityTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, - VisibilityTimestamp: activityInfo2.LastHeartbeatUpdateTime.Add(-5 * time.Second), - EventID: scheduledEvent2.GetEventId(), - } - - completeEvent1 := addActivityTaskCompletedEvent(mutableState, scheduledEvent1.GetEventId(), startedEvent1.GetEventId(), nil, identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent1.GetEventId(), completeEvent1.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, input *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.Equal(1, len(input.UpdateWorkflowMutation.Tasks[tasks.CategoryTimer])) - s.Equal(1, len(input.UpdateWorkflowMutation.UpsertActivityInfos)) - mutableState.GetExecutionInfo().LastUpdateTime = input.UpdateWorkflowMutation.ExecutionInfo.LastUpdateTime - input.RangeID = 0 - input.UpdateWorkflowMutation.ExecutionInfo.LastEventTaskId = 0 - input.UpdateWorkflowMutation.ExecutionInfo.LastFirstEventTxnId = 0 - input.UpdateWorkflowMutation.ExecutionInfo.StateTransitionCount = 0 - mutableState.GetExecutionInfo().LastEventTaskId = 0 - mutableState.GetExecutionInfo().LastFirstEventTxnId = 0 - mutableState.GetExecutionInfo().StateTransitionCount = 0 - mutableState.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = input.UpdateWorkflowMutation.ExecutionInfo.WorkflowTaskOriginalScheduledTime - mutableState.GetExecutionInfo().ExecutionStats = &persistencespb.ExecutionStats{} - - s.Equal(&persistence.UpdateWorkflowExecutionRequest{ - ShardID: s.mockShard.GetShardID(), - UpdateWorkflowMutation: persistence.WorkflowMutation{ - ExecutionInfo: mutableState.GetExecutionInfo(), - ExecutionState: mutableState.GetExecutionState(), - NextEventID: mutableState.GetNextEventID(), - Tasks: input.UpdateWorkflowMutation.Tasks, - Condition: mutableState.GetNextEventID(), - UpsertActivityInfos: input.UpdateWorkflowMutation.UpsertActivityInfos, - DeleteActivityInfos: map[int64]struct{}{}, - UpsertTimerInfos: map[string]*persistencespb.TimerInfo{}, - DeleteTimerInfos: map[string]struct{}{}, - UpsertChildExecutionInfos: map[int64]*persistencespb.ChildExecutionInfo{}, - DeleteChildExecutionInfos: map[int64]struct{}{}, - UpsertRequestCancelInfos: map[int64]*persistencespb.RequestCancelInfo{}, - DeleteRequestCancelInfos: map[int64]struct{}{}, - UpsertSignalInfos: map[int64]*persistencespb.SignalInfo{}, - DeleteSignalInfos: map[int64]struct{}{}, - UpsertSignalRequestedIDs: map[string]struct{}{}, - DeleteSignalRequestedIDs: map[string]struct{}{}, - NewBufferedEvents: nil, - ClearBufferedEvents: false, - }, - UpdateWorkflowEvents: []*persistence.WorkflowEvents{}, - }, input) - return tests.UpdateWorkflowExecutionResponse, nil - }) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _ = mutableState.UpdateCurrentVersion(s.version, false) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - nextEventID := startedEvent.GetEventId() - - timerTask := &tasks.WorkflowTaskTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ScheduleAttempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, - VisibilityTimestamp: s.now, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(timerTask.NamespaceID), - timerTask.WorkflowID, - timerTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_ScheduleToStartTimer() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - - workflowTaskScheduledEventID := int64(16384) - - timerTask := &tasks.WorkflowTaskTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ScheduleAttempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, - VisibilityTimestamp: s.now, - EventID: workflowTaskScheduledEventID, - } - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err := s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(nil, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - timerTask := &tasks.WorkflowTaskTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - ScheduleAttempt: 1, - Version: s.version, - TaskID: int64(100), - TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, - VisibilityTimestamp: s.now, - EventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowBackoffTimer_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - event, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - nextEventID := event.GetEventId() - - timerTask := &tasks.WorkflowBackoffTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, time.Now().UTC().Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(timerTask.NamespaceID), - timerTask.WorkflowID, - timerTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, time.Now().UTC().Add(s.discardDuration)) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowBackoffTimer_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - timerTask := &tasks.WorkflowBackoffTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_CRON, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTimeout_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = startEvent.GetEventId() - completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - nextEventID := completionEvent.GetEventId() - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(timerTask.NamespaceID), - timerTask.WorkflowID, - timerTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTimeout_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - timerTask := &tasks.WorkflowTimeoutTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessRetryTimeout() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - startEvent, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - persistenceMutableState := s.createPersistenceMutableState(mutableState, startEvent.GetEventId(), startEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).AnyTimes() - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 1, - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: s.now, - EventID: int64(16384), - } - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_Noop() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, - timerTimeout, timerTimeout, timerTimeout, timerTimeout) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).AnyTimes() - s.mockShard.SetCurrentTime(s.clusterName, s.now) - - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 2, - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: scheduledEvent.GetEventId(), - } - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) - - timerTask = &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 2, - Version: s.version - 1, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: scheduledEvent.GetEventId(), - } - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskVersionMismatch, err) - - timerTask = &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 0, - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: scheduledEvent.GetEventId(), - } - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_ActivityCompleted() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - identity := "identity" - taskqueue := "taskqueue" - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, - timerTimeout, timerTimeout, timerTimeout, timerTimeout) - startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, s.now) - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 2, - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: scheduledEvent.GetEventId(), - } - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - activityID := "activity" - activityType := "activity type" - timerTimeout := 2 * time.Second - scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, nil, - timerTimeout, timerTimeout, timerTimeout, timerTimeout) - - timerSequence := workflow.NewTimerSequence(mutableState) - mutableState.InsertTasks[tasks.CategoryTimer] = nil - modified, err := timerSequence.CreateNextActivityTimer() - s.NoError(err) - s.True(modified) - task := mutableState.InsertTasks[tasks.CategoryTimer][0] - - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - timerTask := &tasks.ActivityRetryTimerTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Attempt: 2, - Version: s.version, - TaskID: int64(100), - VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, - EventID: scheduledEvent.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - // no-op post action - s.mockShard.SetCurrentTime(s.clusterName, s.now) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - // resend history post action - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - s.namespaceID, - execution.WorkflowId, - execution.RunId, - scheduledEvent.GetEventId(), - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Equal(consts.ErrTaskRetry, err) - - // push to matching post action - s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) - s.mockMatchingClient.EXPECT().AddActivityTask( - gomock.Any(), - &matchingservice.AddActivityTaskRequest{ - NamespaceId: s.namespaceID.String(), - Execution: &execution, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ScheduledEventId: scheduledEvent.EventId, - ScheduleToStartTimeout: &timerTimeout, - Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), timerTask.TaskID), - VersionDirective: common.MakeVersionDirectiveForActivityTask(nil, false), - }, - gomock.Any(), - ).Return(&matchingservice.AddActivityTaskResponse{}, nil) - - _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) - s.Nil(err) -} - -func (s *timerQueueStandbyTaskExecutorSuite) createPersistenceMutableState( - ms workflow.MutableState, - lastEventID int64, - lastEventVersion int64, -) *persistencespb.WorkflowMutableState { - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - s.NoError(err) - err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( - lastEventID, lastEventVersion, - )) - s.NoError(err) - return workflow.TestCloneToProto(ms) -} - -func (s *timerQueueStandbyTaskExecutorSuite) newTaskExecutable( - task tasks.Task, -) queues.Executable { - return queues.NewExecutable( - queues.DefaultReaderId, - task, - s.timerQueueStandbyTaskExecutor, - nil, - nil, - queues.NewNoopPriorityAssigner(), - s.mockShard.GetTimeSource(), - s.mockNamespaceCache, - s.mockClusterMetadata, - nil, - metrics.NoopMetricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueTaskExecutorBase.go temporal-1.22.5/src/service/history/timerQueueTaskExecutorBase.go --- temporal-1.21.5-1/src/service/history/timerQueueTaskExecutorBase.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueTaskExecutorBase.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,207 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - - commonpb "go.temporal.io/api/common/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -var errUnknownTimerTask = serviceerror.NewInternal("unknown timer task") - -type ( - timerQueueTaskExecutorBase struct { - currentClusterName string - shard shard.Context - registry namespace.Registry - deleteManager deletemanager.DeleteManager - cache wcache.Cache - logger log.Logger - matchingClient matchingservice.MatchingServiceClient - metricHandler metrics.Handler - config *configs.Config - } -) - -func newTimerQueueTaskExecutorBase( - shard shard.Context, - workflowCache wcache.Cache, - deleteManager deletemanager.DeleteManager, - matchingClient matchingservice.MatchingServiceClient, - logger log.Logger, - metricHandler metrics.Handler, - config *configs.Config, -) *timerQueueTaskExecutorBase { - return &timerQueueTaskExecutorBase{ - currentClusterName: shard.GetClusterMetadata().GetCurrentClusterName(), - shard: shard, - registry: shard.GetNamespaceRegistry(), - cache: workflowCache, - deleteManager: deleteManager, - logger: logger, - matchingClient: matchingClient, - metricHandler: metricHandler, - config: config, - } -} - -func (t *timerQueueTaskExecutorBase) executeDeleteHistoryEventTask( - ctx context.Context, - task *tasks.DeleteHistoryEventTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - } - - weContext, release, err := t.cache.GetOrCreateWorkflowExecution( - ctx, - namespace.ID(task.GetNamespaceID()), - workflowExecution, - workflow.LockPriorityLow, - ) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) - switch err.(type) { - case nil: - if mutableState == nil { - return nil - } - case *serviceerror.NotFound: - // the mutable state is deleted and delete history branch operation failed. - // use task branch token to delete the leftover history branch - return t.deleteHistoryBranch(ctx, task.BranchToken) - default: - return err - } - - if mutableState.GetExecutionState().GetState() != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { - // If workflow is running then just ignore DeleteHistoryEventTask timer task. - // This should almost never happen because DeleteHistoryEventTask is created only for closed workflows. - // But cross DC replication can resurrect workflow and therefore DeleteHistoryEventTask should be ignored. - return nil - } - - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return err - } - if err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task); err != nil { - return err - } - - // We should only archive if it is enabled, and the data wasn't already archived. If WorkflowDataAlreadyArchived - // flag is set to true, then the data was already archived, so we can skip it. - archiveIfEnabled := !task.WorkflowDataAlreadyArchived - return t.deleteManager.DeleteWorkflowExecutionByRetention( - ctx, - namespace.ID(task.GetNamespaceID()), - workflowExecution, - weContext, - mutableState, - archiveIfEnabled, - &task.ProcessStage, // Pass stage by reference to update it inside delete manager. - ) -} - -func getWorkflowExecutionContextForTask( - ctx context.Context, - workflowCache wcache.Cache, - task tasks.Task, -) (workflow.Context, wcache.ReleaseCacheFunc, error) { - namespaceID, execution := getTaskNamespaceIDAndWorkflowExecution(task) - return getWorkflowExecutionContext( - ctx, - workflowCache, - namespaceID, - execution, - ) -} - -func getWorkflowExecutionContext( - ctx context.Context, - workflowCache wcache.Cache, - namespaceID namespace.ID, - execution commonpb.WorkflowExecution, -) (workflow.Context, wcache.ReleaseCacheFunc, error) { - // workflowCache will automatically use short context timeout when - // locking workflow for all background calls, we don't need a separate context here - weContext, release, err := workflowCache.GetOrCreateWorkflowExecution( - ctx, - namespaceID, - execution, - workflow.LockPriorityLow, - ) - if common.IsContextDeadlineExceededErr(err) { - err = consts.ErrResourceExhaustedBusyWorkflow - } - return weContext, release, err -} - -func getTaskNamespaceIDAndWorkflowExecution( - task tasks.Task, -) (namespace.ID, commonpb.WorkflowExecution) { - return namespace.ID(task.GetNamespaceID()), commonpb.WorkflowExecution{ - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - } -} - -func (t *timerQueueTaskExecutorBase) deleteHistoryBranch( - ctx context.Context, - branchToken []byte, -) error { - if len(branchToken) > 0 { - return t.shard.GetExecutionManager().DeleteHistoryBranch(ctx, &persistence.DeleteHistoryBranchRequest{ - ShardID: t.shard.GetShardID(), - BranchToken: branchToken, - }) - } - return nil -} diff -Nru temporal-1.21.5-1/src/service/history/timerQueueTaskExecutorBase_test.go temporal-1.22.5/src/service/history/timerQueueTaskExecutorBase_test.go --- temporal-1.21.5-1/src/service/history/timerQueueTaskExecutorBase_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/timerQueueTaskExecutorBase_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - commonpb "go.temporal.io/api/common/v1" - "go.temporal.io/api/serviceerror" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - timerQueueTaskExecutorBaseSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockDeleteManager *deletemanager.MockDeleteManager - mockCache *wcache.MockCache - - testShardContext *shard.ContextTest - timerQueueTaskExecutorBase *timerQueueTaskExecutorBase - } -) - -func TestTimerQueueTaskExecutorBaseSuite(t *testing.T) { - s := new(timerQueueTaskExecutorBaseSuite) - suite.Run(t, s) -} - -func (s *timerQueueTaskExecutorBaseSuite) SetupSuite() { -} - -func (s *timerQueueTaskExecutorBaseSuite) TearDownSuite() { -} - -func (s *timerQueueTaskExecutorBaseSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) - s.mockCache = wcache.NewMockCache(s.controller) - - config := tests.NewDynamicConfig() - s.testShardContext = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 0, - RangeId: 1, - }, - config, - ) - s.testShardContext.Resource.ClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - - s.timerQueueTaskExecutorBase = newTimerQueueTaskExecutorBase( - s.testShardContext, - s.mockCache, - s.mockDeleteManager, - nil, - s.testShardContext.GetLogger(), - metrics.NoopMetricsHandler, - config, - ) -} - -func (s *timerQueueTaskExecutorBaseSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *timerQueueTaskExecutorBaseSuite) Test_executeDeleteHistoryEventTask_NoErr() { - for _, alreadyArchived := range []bool{false, true} { - s.Run(fmt.Sprintf("AlreadyArchived=%v", alreadyArchived), func() { - task := &tasks.DeleteHistoryEventTask{ - WorkflowKey: definition.NewWorkflowKey( - tests.NamespaceID.String(), - tests.WorkflowID, - tests.RunID, - ), - Version: 123, - TaskID: 12345, - VisibilityTimestamp: time.Now().UTC(), - WorkflowDataAlreadyArchived: alreadyArchived, - } - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - - mockWeCtx := workflow.NewMockContext(s.controller) - mockMutableState := workflow.NewMockMutableState(s.controller) - - s.mockCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), tests.NamespaceID, we, workflow.LockPriorityLow).Return(mockWeCtx, wcache.NoopReleaseFn, nil) - - mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) - mockMutableState.EXPECT().GetLastWriteVersion().Return(int64(1), nil) - mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{}) - mockMutableState.EXPECT().GetNextEventID().Return(int64(2)) - mockMutableState.EXPECT().GetNamespaceEntry().Return(tests.LocalNamespaceEntry) - s.testShardContext.Resource.ClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false) - mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED}) - - archiveIfEnabled := !alreadyArchived - stage := tasks.DeleteWorkflowExecutionStageNone - s.mockDeleteManager.EXPECT().DeleteWorkflowExecutionByRetention( - gomock.Any(), - tests.NamespaceID, - we, - mockWeCtx, - mockMutableState, - archiveIfEnabled, - &stage, - ).Return(nil) - - err := s.timerQueueTaskExecutorBase.executeDeleteHistoryEventTask( - context.Background(), - task) - s.NoError(err) - }) - } -} - -func (s *timerQueueTaskExecutorBaseSuite) TestArchiveHistory_DeleteFailed() { - for _, alreadyArchived := range []bool{false, true} { - s.Run(fmt.Sprintf("AlreadyArchived=%v", alreadyArchived), func() { - task := &tasks.DeleteHistoryEventTask{ - WorkflowKey: definition.NewWorkflowKey( - tests.NamespaceID.String(), - tests.WorkflowID, - tests.RunID, - ), - Version: 123, - TaskID: 12345, - VisibilityTimestamp: time.Now().UTC(), - WorkflowDataAlreadyArchived: alreadyArchived, - } - we := commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - } - - mockWeCtx := workflow.NewMockContext(s.controller) - mockMutableState := workflow.NewMockMutableState(s.controller) - - s.mockCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), tests.NamespaceID, we, workflow.LockPriorityLow).Return(mockWeCtx, wcache.NoopReleaseFn, nil) - - mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) - mockMutableState.EXPECT().GetLastWriteVersion().Return(int64(1), nil) - mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{}) - mockMutableState.EXPECT().GetNextEventID().Return(int64(2)) - mockMutableState.EXPECT().GetNamespaceEntry().Return(tests.LocalNamespaceEntry) - s.testShardContext.Resource.ClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false) - mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED}) - - archiveIfEnabled := !alreadyArchived - stage := tasks.DeleteWorkflowExecutionStageNone - s.mockDeleteManager.EXPECT().DeleteWorkflowExecutionByRetention( - gomock.Any(), - tests.NamespaceID, - we, - mockWeCtx, - mockMutableState, - archiveIfEnabled, - &stage, - ).Return(serviceerror.NewInternal("test error")) - - err := s.timerQueueTaskExecutorBase.executeDeleteHistoryEventTask( - context.Background(), - task) - s.Error(err) - }) - } -} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_active_task_executor.go temporal-1.22.5/src/service/history/timer_queue_active_task_executor.go --- temporal-1.21.5-1/src/service/history/timer_queue_active_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_active_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,668 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + + "github.com/pborman/uuid" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/failure" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + timerQueueActiveTaskExecutor struct { + *timerQueueTaskExecutorBase + } +) + +func newTimerQueueActiveTaskExecutor( + shard shard.Context, + workflowCache wcache.Cache, + workflowDeleteManager deletemanager.DeleteManager, + logger log.Logger, + metricProvider metrics.Handler, + config *configs.Config, + matchingRawClient resource.MatchingRawClient, +) queues.Executor { + return &timerQueueActiveTaskExecutor{ + timerQueueTaskExecutorBase: newTimerQueueTaskExecutorBase( + shard, + workflowCache, + workflowDeleteManager, + matchingRawClient, + logger, + metricProvider, + config, + ), + } +} + +func (t *timerQueueActiveTaskExecutor) Execute( + ctx context.Context, + executable queues.Executable, +) ([]metrics.Tag, bool, error) { + taskTypeTagValue := queues.GetActiveTimerTaskTypeTagValue(executable) + namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( + t.shard.GetNamespaceRegistry(), + executable.GetNamespaceID(), + ) + metricsTags := []metrics.Tag{ + namespaceTag, + metrics.TaskTypeTag(taskTypeTagValue), + metrics.OperationTag(taskTypeTagValue), // for backward compatibility + } + + if replicationState == enumspb.REPLICATION_STATE_HANDOVER { + // TODO: exclude task types here if we believe it's safe & necessary to execute + // them during namespace handover. + // TODO: move this logic to queues.Executable when metrics tag doesn't need to + // be returned from task executor + return metricsTags, true, consts.ErrNamespaceHandover + } + + var err error + switch task := executable.GetTask().(type) { + case *tasks.UserTimerTask: + err = t.executeUserTimerTimeoutTask(ctx, task) + case *tasks.ActivityTimeoutTask: + err = t.executeActivityTimeoutTask(ctx, task) + case *tasks.WorkflowTaskTimeoutTask: + err = t.executeWorkflowTaskTimeoutTask(ctx, task) + case *tasks.WorkflowTimeoutTask: + err = t.executeWorkflowTimeoutTask(ctx, task) + case *tasks.ActivityRetryTimerTask: + err = t.executeActivityRetryTimerTask(ctx, task) + case *tasks.WorkflowBackoffTimerTask: + err = t.executeWorkflowBackoffTimerTask(ctx, task) + case *tasks.DeleteHistoryEventTask: + err = t.executeDeleteHistoryEventTask(ctx, task) + default: + err = errUnknownTimerTask + } + + return metricsTags, true, err +} + +func (t *timerQueueActiveTaskExecutor) executeUserTimerTimeoutTask( + ctx context.Context, + task *tasks.UserTimerTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + release(nil) // release(nil) so mutable state is not unloaded from cache + return consts.ErrWorkflowExecutionNotFound + } + + timerSequence := t.getTimerSequence(mutableState) + referenceTime := t.shard.GetTimeSource().Now() + timerFired := false + +Loop: + for _, timerSequenceID := range timerSequence.LoadAndSortUserTimers() { + timerInfo, ok := mutableState.GetUserTimerInfoByEventID(timerSequenceID.EventID) + if !ok { + errString := fmt.Sprintf("failed to find in user timer event ID: %v", timerSequenceID.EventID) + t.logger.Error(errString) + return serviceerror.NewInternal(errString) + } + + if !queues.IsTimeExpired(referenceTime, timerSequenceID.Timestamp) { + // timer sequence IDs are sorted, once there is one timer + // sequence ID not expired, all after that wil not expired + break Loop + } + + if !mutableState.IsWorkflowExecutionRunning() { + release(nil) // release(nil) so mutable state is not unloaded from cache + return consts.ErrWorkflowCompleted + } + + if _, err := mutableState.AddTimerFiredEvent(timerInfo.GetTimerId()); err != nil { + return err + } + timerFired = true + } + + if !timerFired { + release(nil) // release(nil) so mutable state is not unloaded from cache + return errNoTimerFired + } + + return t.updateWorkflowExecution(ctx, weContext, mutableState, timerFired) +} + +func (t *timerQueueActiveTaskExecutor) executeActivityTimeoutTask( + ctx context.Context, + task *tasks.ActivityTimeoutTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + timerSequence := t.getTimerSequence(mutableState) + referenceTime := t.shard.GetTimeSource().Now() + updateMutableState := false + scheduleWorkflowTask := false + + // need to clear activity heartbeat timer task mask for new activity timer task creation + // NOTE: LastHeartbeatTimeoutVisibilityInSeconds is for deduping heartbeat timer creation as it's possible + // one heartbeat task was persisted multiple times with different taskIDs due to the retry logic + // for updating workflow execution. In that case, only one new heartbeat timeout task should be + // created. + isHeartBeatTask := task.TimeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT + activityInfo, heartbeatTimeoutVis, ok := mutableState.GetActivityInfoWithTimerHeartbeat(task.EventID) + if isHeartBeatTask && ok && queues.IsTimeExpired(task.GetVisibilityTime(), heartbeatTimeoutVis) { + activityInfo.TimerTaskStatus = activityInfo.TimerTaskStatus &^ workflow.TimerTaskStatusCreatedHeartbeat + if err := mutableState.UpdateActivity(activityInfo); err != nil { + return err + } + updateMutableState = true + } + +Loop: + for _, timerSequenceID := range timerSequence.LoadAndSortActivityTimers() { + activityInfo, ok := mutableState.GetActivityInfo(timerSequenceID.EventID) + if !ok || timerSequenceID.Attempt < activityInfo.Attempt { + // handle 2 cases: + // 1. !ok + // this case can happen since each activity can have 4 timers + // and one of those 4 timers may have fired in this loop + // 2. timerSequenceID.attempt < activityInfo.Attempt + // retry could update activity attempt, should not timeouts new attempt + continue Loop + } + + if !queues.IsTimeExpired(referenceTime, timerSequenceID.Timestamp) { + // timer sequence IDs are sorted, once there is one timer + // sequence ID not expired, all after that wil not expired + break Loop + } + + failureMsg := fmt.Sprintf("activity %v timeout", timerSequenceID.TimerType.String()) + timeoutFailure := failure.NewTimeoutFailure(failureMsg, timerSequenceID.TimerType) + var retryState enumspb.RetryState + if retryState, err = mutableState.RetryActivity( + activityInfo, + timeoutFailure, + ); err != nil { + return err + } else if retryState == enumspb.RETRY_STATE_IN_PROGRESS { + updateMutableState = true + continue Loop + } + + timeoutFailure.GetTimeoutFailureInfo().LastHeartbeatDetails = activityInfo.LastHeartbeatDetails + // If retryState is Timeout then it means that expirationTime is expired. + // ExpirationTime is expired when ScheduleToClose timeout is expired. + if retryState == enumspb.RETRY_STATE_TIMEOUT { + timeoutFailure.GetTimeoutFailureInfo().TimeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE + } + + t.emitTimeoutMetricScopeWithNamespaceTag( + namespace.ID(mutableState.GetExecutionInfo().NamespaceId), + metrics.TimerActiveTaskActivityTimeoutScope, + timerSequenceID.TimerType, + ) + if _, err := mutableState.AddActivityTaskTimedOutEvent( + activityInfo.ScheduledEventId, + activityInfo.StartedEventId, + timeoutFailure, + retryState, + ); err != nil { + return err + } + updateMutableState = true + scheduleWorkflowTask = true + } + + if !updateMutableState { + return nil + } + return t.updateWorkflowExecution(ctx, weContext, mutableState, scheduleWorkflowTask) +} + +func (t *timerQueueActiveTaskExecutor) executeWorkflowTaskTimeoutTask( + ctx context.Context, + task *tasks.WorkflowTaskTimeoutTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + workflowTask := mutableState.GetWorkflowTaskByID(task.EventID) + if workflowTask == nil { + return nil + } + + if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { + // Check if mutable state still points to this task. + // Mutable state can lost speculative WT or even has another one there if, for example, workflow was evicted from cache. + if !mutableState.CheckSpeculativeWorkflowTaskTimeoutTask(task) { + return nil + } + } else { + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, task.Version, task) + if err != nil { + return err + } + + if workflowTask.Attempt != task.ScheduleAttempt { + return nil + } + } + + scheduleWorkflowTask := false + switch task.TimeoutType { + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: + t.emitTimeoutMetricScopeWithNamespaceTag( + namespace.ID(mutableState.GetExecutionInfo().NamespaceId), + metrics.TimerActiveTaskWorkflowTaskTimeoutScope, + enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + ) + if _, err := mutableState.AddWorkflowTaskTimedOutEvent( + workflowTask, + ); err != nil { + return err + } + scheduleWorkflowTask = true + + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START: + if workflowTask.StartedEventID != common.EmptyEventID { + // workflowTask has already started + return nil + } + + t.emitTimeoutMetricScopeWithNamespaceTag( + namespace.ID(mutableState.GetExecutionInfo().NamespaceId), + metrics.TimerActiveTaskWorkflowTaskTimeoutScope, + enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + ) + _, err := mutableState.AddWorkflowTaskScheduleToStartTimeoutEvent(workflowTask) + if err != nil { + return err + } + scheduleWorkflowTask = true + } + + return t.updateWorkflowExecution(ctx, weContext, mutableState, scheduleWorkflowTask) +} + +func (t *timerQueueActiveTaskExecutor) executeWorkflowBackoffTimerTask( + ctx context.Context, + task *tasks.WorkflowBackoffTimerTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY { + t.metricHandler.Counter(metrics.WorkflowRetryBackoffTimerCount.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), + ) + } else if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_CRON { + t.metricHandler.Counter(metrics.WorkflowCronBackoffTimerCount.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), + ) + } else if task.WorkflowBackoffType == enumsspb.WORKFLOW_BACKOFF_TYPE_DELAY_START { + t.metricHandler.Counter(metrics.WorkflowDelayedStartBackoffTimerCount.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.TimerActiveTaskWorkflowBackoffTimerScope), + ) + } + + if mutableState.HadOrHasWorkflowTask() { + // already has workflow task + return nil + } + + // schedule first workflow task + return t.updateWorkflowExecution(ctx, weContext, mutableState, true) +} + +func (t *timerQueueActiveTaskExecutor) executeActivityRetryTimerTask( + ctx context.Context, + task *tasks.ActivityRetryTimerTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + release(nil) // release(nil) so mutable state is not unloaded from cache + return consts.ErrWorkflowExecutionNotFound + } + + // generate activity task + activityInfo, ok := mutableState.GetActivityInfo(task.EventID) + if !ok || task.Attempt < activityInfo.Attempt || activityInfo.StartedEventId != common.EmptyEventID { + if ok { + t.logger.Info("Duplicate activity retry timer task", + tag.WorkflowID(mutableState.GetExecutionInfo().WorkflowId), + tag.WorkflowRunID(mutableState.GetExecutionState().GetRunId()), + tag.WorkflowNamespaceID(mutableState.GetExecutionInfo().NamespaceId), + tag.WorkflowScheduledEventID(activityInfo.ScheduledEventId), + tag.Attempt(activityInfo.Attempt), + tag.FailoverVersion(activityInfo.Version), + tag.TimerTaskStatus(activityInfo.TimerTaskStatus), + tag.ScheduleAttempt(task.Attempt)) + } + release(nil) // release(nil) so mutable state is not unloaded from cache + return consts.ErrActivityTaskNotFound + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, task.Version, task) + if err != nil { + return err + } + + if !mutableState.IsWorkflowExecutionRunning() { + release(nil) // release(nil) so mutable state is not unloaded from cache + return consts.ErrWorkflowCompleted + } + + taskQueue := &taskqueuepb.TaskQueue{ + Name: activityInfo.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + scheduleToStartTimeout := timestamp.DurationValue(activityInfo.ScheduleToStartTimeout) + directive := worker_versioning.MakeDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), activityInfo.UseCompatibleVersion) + + // NOTE: do not access anything related mutable state after this lock release + release(nil) // release earlier as we don't need the lock anymore + + _, retError = t.matchingRawClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ + NamespaceId: task.GetNamespaceID(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + }, + TaskQueue: taskQueue, + ScheduledEventId: task.EventID, + ScheduleToStartTimeout: timestamp.DurationPtr(scheduleToStartTimeout), + Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), + VersionDirective: directive, + }) + + return retError +} + +func (t *timerQueueActiveTaskExecutor) executeWorkflowTimeoutTask( + ctx context.Context, + task *tasks.WorkflowTimeoutTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + startVersion, err := mutableState.GetStartVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, task.Version, task) + if err != nil { + return err + } + + timeoutFailure := failure.NewTimeoutFailure("workflow timeout", enumspb.TIMEOUT_TYPE_START_TO_CLOSE) + backoffInterval := backoff.NoBackoff + retryState := enumspb.RETRY_STATE_TIMEOUT + initiator := enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED + + wfExpTime := timestamp.TimeValue(mutableState.GetExecutionInfo().WorkflowExecutionExpirationTime) + if wfExpTime.IsZero() || wfExpTime.After(t.shard.GetTimeSource().Now()) { + backoffInterval, retryState = mutableState.GetRetryBackoffDuration(timeoutFailure) + if backoffInterval != backoff.NoBackoff { + // We have a retry policy and we should retry. + initiator = enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY + } else if backoffInterval = mutableState.GetCronBackoffDuration(); backoffInterval != backoff.NoBackoff { + // We have a cron schedule. + initiator = enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE + } + } + + var newRunID string + if initiator != enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED { + newRunID = uuid.New() + } + + // First add timeout workflow event, no matter what we're doing next. + if err := workflow.TimeoutWorkflow( + mutableState, + retryState, + newRunID, + ); err != nil { + return err + } + + // No more retries, or workflow is expired. + if initiator == enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED { + // We apply the update to execution using optimistic concurrency. If it fails due to a conflict than reload + // the history and try the operation again. + return t.updateWorkflowExecution(ctx, weContext, mutableState, false) + } + + startEvent, err := mutableState.GetStartEvent(ctx) + if err != nil { + return err + } + startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() + + newMutableState := workflow.NewMutableState( + t.shard, + t.shard.GetEventsCache(), + t.shard.GetLogger(), + mutableState.GetNamespaceEntry(), + t.shard.GetTimeSource().Now(), + ) + err = workflow.SetupNewWorkflowForRetryOrCron( + ctx, + mutableState, + newMutableState, + newRunID, + startAttr, + startAttr.LastCompletionResult, + timeoutFailure, + backoffInterval, + initiator, + ) + if err != nil { + return err + } + + err = newMutableState.SetHistoryTree( + ctx, + newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, + newMutableState.GetExecutionInfo().WorkflowRunTimeout, + newRunID) + if err != nil { + return err + } + + newExecutionInfo := newMutableState.GetExecutionInfo() + newExecutionState := newMutableState.GetExecutionState() + return weContext.UpdateWorkflowExecutionWithNewAsActive( + ctx, + workflow.NewContext( + t.shard, + definition.NewWorkflowKey( + newExecutionInfo.NamespaceId, + newExecutionInfo.WorkflowId, + newExecutionState.RunId, + ), + t.logger, + ), + newMutableState, + ) +} + +func (t *timerQueueActiveTaskExecutor) getTimerSequence( + mutableState workflow.MutableState, +) workflow.TimerSequence { + return workflow.NewTimerSequence(mutableState) +} + +func (t *timerQueueActiveTaskExecutor) updateWorkflowExecution( + ctx context.Context, + context workflow.Context, + mutableState workflow.MutableState, + scheduleNewWorkflowTask bool, +) error { + var err error + if scheduleNewWorkflowTask { + // Schedule a new workflow task. + err = workflow.ScheduleWorkflowTask(mutableState) + if err != nil { + return err + } + } + return context.UpdateWorkflowExecutionAsActive(ctx) +} + +func (t *timerQueueActiveTaskExecutor) emitTimeoutMetricScopeWithNamespaceTag( + namespaceID namespace.ID, + operation string, + timerType enumspb.TimeoutType, +) { + namespaceEntry, err := t.registry.GetNamespaceByID(namespaceID) + if err != nil { + return + } + metricsScope := t.metricHandler.WithTags( + metrics.OperationTag(operation), + metrics.NamespaceTag(namespaceEntry.Name().String()), + ) + switch timerType { + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START: + metricsScope.Counter(metrics.ScheduleToStartTimeoutCounter.GetMetricName()).Record(1) + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: + metricsScope.Counter(metrics.ScheduleToCloseTimeoutCounter.GetMetricName()).Record(1) + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: + metricsScope.Counter(metrics.StartToCloseTimeoutCounter.GetMetricName()).Record(1) + case enumspb.TIMEOUT_TYPE_HEARTBEAT: + metricsScope.Counter(metrics.HeartbeatTimeoutCounter.GetMetricName()).Record(1) + } +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_active_task_executor_test.go temporal-1.22.5/src/service/history/timer_queue_active_task_executor_test.go --- temporal-1.21.5-1/src/service/history/timer_queue_active_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_active_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1615 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/server/service/history/consts" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + timerQueueActiveTaskExecutorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockVisibilityProcessor *queues.MockQueue + mockArchivalProcessor *queues.MockQueue + mockNamespaceCache *namespace.MockRegistry + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + mockClusterMetadata *cluster.MockMetadata + + mockHistoryEngine *historyEngineImpl + mockDeleteManager *deletemanager.MockDeleteManager + mockExecutionMgr *persistence.MockExecutionManager + + workflowCache wcache.Cache + logger log.Logger + namespaceID namespace.ID + namespaceEntry *namespace.Namespace + version int64 + now time.Time + timeSource *clock.EventTimeSource + timerQueueActiveTaskExecutor *timerQueueActiveTaskExecutor + } +) + +func TestTimerQueueActiveTaskExecutorSuite(t *testing.T) { + s := new(timerQueueActiveTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *timerQueueActiveTaskExecutorSuite) SetupSuite() { +} + +func (s *timerQueueActiveTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.namespaceID = tests.NamespaceID + s.namespaceEntry = tests.GlobalNamespaceEntry + s.version = s.namespaceEntry.FailoverVersion() + s.now = time.Now().UTC() + s.timeSource = clock.NewEventTimeSource().Update(s.now) + + s.controller = gomock.NewController(s.T()) + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockVisibilityProcessor = queues.NewMockQueue(s.controller) + s.mockArchivalProcessor = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockVisibilityProcessor.EXPECT().Category().Return(tasks.CategoryVisibility).AnyTimes() + s.mockArchivalProcessor.EXPECT().Category().Return(tasks.CategoryArchival).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockVisibilityProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockArchivalProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + config := tests.NewDynamicConfig() + s.mockShard = shard.NewTestContextWithTimeSource( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + config, + s.timeSource, + ) + s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + )) + + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockMatchingClient = s.mockShard.Resource.MatchingClient + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + // ack manager will use the namespace information + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(tests.Namespace, nil).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes() + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) + h := &historyEngineImpl{ + currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + metricsHandler: s.mockShard.GetMetricsHandler(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + s.mockVisibilityProcessor.Category(): s.mockVisibilityProcessor, + s.mockArchivalProcessor.Category(): s.mockArchivalProcessor, + }, + } + s.mockShard.SetEngineForTesting(h) + s.mockHistoryEngine = h + + s.timerQueueActiveTaskExecutor = newTimerQueueActiveTaskExecutor( + s.mockShard, + s.workflowCache, + s.mockDeleteManager, + s.logger, + metrics.NoopMetricsHandler, + config, + s.mockShard.Resource.GetMatchingClient(), + ).(*timerQueueActiveTaskExecutor) +} + +func (s *timerQueueActiveTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID := "timer" + timerTimeout := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetUserTimerInfo(timerID) + s.False(ok) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID := "timer" + timerTimeout := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + event = addTimerFiredEvent(mutableState, timerID) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.ErrorIs(err, errNoTimerFired) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_WfClosed() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID := "timer" + timerTimeout := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + persistenceMutableState.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.ErrorIs(err, consts.ErrWorkflowCompleted) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_NoTimerAndWfClosed() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: time.Now(), + EventID: event.EventId, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + persistenceMutableState.ExecutionState.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.ErrorIs(err, errNoTimerFired) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + ) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) + s.False(ok) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + ) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: wt.ScheduledEventID, + } + + completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Retry() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + 999*time.Second, + timerTimeout, + timerTimeout, + timerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + s.Nil(startedEvent) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + activityInfo, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) + s.True(ok) + s.Equal(scheduledEvent.GetEventId(), activityInfo.ScheduledEventId) + s.Equal(common.EmptyEventID, activityInfo.StartedEventId) + // only a schedule to start timer will be created, apart from the retry timer + s.Equal(int32(workflow.TimerTaskStatusCreatedScheduleToStart), activityInfo.TimerTaskStatus) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + _, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId()) + s.False(ok) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + s.Nil(startedEvent) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: wt.ScheduledEventID, + } + + completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), common.TransientEventID, nil, identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.timeSource.Update(s.now.Add(2 * timerTimeout)) + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_Heartbeat_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + heartbeatTimerTimeout := time.Second + scheduledEvent, _ := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + heartbeatTimerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + s.Nil(startedEvent) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, task.(*tasks.ActivityTimeoutTask).TimeoutType) + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + VisibilityTimestamp: time.Time{}, + EventID: scheduledEvent.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + + timerTask := &tasks.WorkflowTaskTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ScheduleAttempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + VisibilityTimestamp: s.now, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + workflowTask := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask() + s.NotNil(workflowTask) + s.True(workflowTask.ScheduledEventID != common.EmptyEventID) + s.Equal(common.EmptyEventID, workflowTask.StartedEventID) + s.Equal(int32(2), workflowTask.Attempt) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + + timerTask := &tasks.WorkflowTaskTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ScheduleAttempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + VisibilityTimestamp: s.now, + EventID: wt.ScheduledEventID - 1, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + event, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + timerTask := &tasks.WorkflowBackoffTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + workflowTask := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask() + s.NotNil(workflowTask) + s.True(workflowTask.ScheduledEventID != common.EmptyEventID) + s.Equal(common.EmptyEventID, workflowTask.StartedEventID) + s.Equal(int32(1), workflowTask.Attempt) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.WorkflowBackoffTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskQueueName, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + activityInfo.Attempt = 1 + + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + EventID: activityInfo.ScheduledEventId, + Attempt: activityInfo.Attempt, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddActivityTask( + gomock.Any(), + &matchingservice.AddActivityTaskRequest{ + NamespaceId: s.namespaceID.String(), + Execution: &execution, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: activityInfo.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ScheduledEventId: activityInfo.ScheduledEventId, + ScheduleToStartTimeout: activityInfo.ScheduleToStartTimeout, + Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), timerTask.TaskID), + VersionDirective: worker_versioning.MakeDirectiveForActivityTask(nil, false), + }, + gomock.Any(), + ).Return(&matchingservice.AddActivityTaskResponse{}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry( + mutableState, + event.GetEventId(), + activityID, + activityType, + taskqueue, + nil, + timerTimeout, + timerTimeout, + timerTimeout, + timerTimeout, + &commonpb.RetryPolicy{ + InitialInterval: timestamp.DurationPtr(1 * time.Second), + BackoffCoefficient: 1.2, + MaximumInterval: timestamp.DurationPtr(5 * time.Second), + MaximumAttempts: 5, + NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "}, + }, + ) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + s.Nil(startedEvent) + + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + EventID: activityInfo.ScheduledEventId, + Attempt: activityInfo.Attempt, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.ErrorIs(err, consts.ErrActivityTaskNotFound) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Fire() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = startEvent.GetEventId() + completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + running := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).IsWorkflowExecutionRunning() + s.False(running) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Retry() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), + }, + ) + s.Nil(err) + // need to override the workflow retry policy + executionInfo := mutableState.GetExecutionInfo() + executionInfo.HasRetryPolicy = true + executionInfo.WorkflowExecutionExpirationTime = timestamp.TimeNowPtrUtcAddSeconds(1000) + executionInfo.RetryMaximumAttempts = 10 + executionInfo.RetryInitialInterval = timestamp.DurationFromSeconds(1) + executionInfo.RetryMaximumInterval = timestamp.DurationFromSeconds(1) + executionInfo.RetryBackoffCoefficient = 1 + + wt := addWorkflowTaskScheduledEvent(mutableState) + startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = startEvent.GetEventId() + completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + // one for current workflow, one for new + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) + s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Cron() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)), + }, + ) + s.Nil(err) + executionInfo := mutableState.GetExecutionInfo() + executionInfo.StartTime = &s.now + executionInfo.CronSchedule = "* * * * *" + + wt := addWorkflowTaskScheduledEvent(mutableState) + startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = startEvent.GetEventId() + completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + // one for current workflow, one for new + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) + s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) +} + +func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_WorkflowExpired() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(-1 * time.Second)), + }, + ) + s.Nil(err) + executionInfo := mutableState.GetExecutionInfo() + executionInfo.StartTime = &s.now + executionInfo.CronSchedule = "* * * * *" + + wt := addWorkflowTaskScheduledEvent(mutableState) + startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = startEvent.GetEventId() + completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + + _, _, err = s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.NoError(err) + + state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus() + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state) + s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status) +} + +func (s *timerQueueActiveTaskExecutorSuite) createPersistenceMutableState( + ms workflow.MutableState, + lastEventID int64, + lastEventVersion int64, +) *persistencespb.WorkflowMutableState { + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + s.NoError(err) + err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( + lastEventID, lastEventVersion, + )) + s.NoError(err) + return workflow.TestCloneToProto(ms) +} + +func (s *timerQueueActiveTaskExecutorSuite) getMutableStateFromCache( + namespaceID namespace.ID, + workflowID string, + runID string, +) workflow.MutableState { + return s.workflowCache.(*wcache.CacheImpl).Get( + definition.NewWorkflowKey(namespaceID.String(), workflowID, runID), + ).(*workflow.ContextImpl).MutableState +} + +func (s *timerQueueActiveTaskExecutorSuite) newTaskExecutable( + task tasks.Task, +) queues.Executable { + return queues.NewExecutable( + queues.DefaultReaderId, + task, + s.timerQueueActiveTaskExecutor, + nil, + nil, + queues.NewNoopPriorityAssigner(), + s.mockShard.GetTimeSource(), + s.mockNamespaceCache, + s.mockClusterMetadata, + nil, + metrics.NoopMetricsHandler, + func() bool { return false }, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_factory.go temporal-1.22.5/src/service/history/timer_queue_factory.go --- temporal-1.21.5-1/src/service/history/timer_queue_factory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,208 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + + "go.uber.org/fx" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/xdc" + deletemanager "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +const ( + timerQueuePersistenceMaxRPSRatio = 0.3 +) + +type ( + timerQueueFactoryParams struct { + fx.In + + QueueFactoryBaseParams + + ClientBean client.Bean + ArchivalClient archiver.Client + MatchingRawClient resource.MatchingRawClient + VisibilityManager manager.VisibilityManager + } + + timerQueueFactory struct { + timerQueueFactoryParams + QueueFactoryBase + } +) + +func NewTimerQueueFactory( + params timerQueueFactoryParams, +) QueueFactory { + return &timerQueueFactory{ + timerQueueFactoryParams: params, + QueueFactoryBase: QueueFactoryBase{ + HostScheduler: queues.NewNamespacePriorityScheduler( + params.ClusterMetadata.GetCurrentClusterName(), + queues.NamespacePrioritySchedulerOptions{ + WorkerCount: params.Config.TimerProcessorSchedulerWorkerCount, + ActiveNamespaceWeights: params.Config.TimerProcessorSchedulerActiveRoundRobinWeights, + StandbyNamespaceWeights: params.Config.TimerProcessorSchedulerStandbyRoundRobinWeights, + EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, + EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, + DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, + }, + params.NamespaceRegistry, + params.SchedulerRateLimiter, + params.TimeSource, + params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTimerQueueProcessorScope)), + params.Logger, + ), + HostPriorityAssigner: queues.NewPriorityAssigner(), + HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( + NewHostRateLimiterRateFn( + params.Config.TimerProcessorMaxPollHostRPS, + params.Config.PersistenceMaxQPS, + timerQueuePersistenceMaxRPSRatio, + ), + int64(params.Config.QueueMaxReaderCount()), + ), + }, + } +} + +func (f *timerQueueFactory) CreateQueue( + shard shard.Context, + workflowCache wcache.Cache, +) queues.Queue { + logger := log.With(shard.GetLogger(), tag.ComponentTimerQueue) + metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTimerQueueProcessorScope)) + + currentClusterName := f.ClusterMetadata.GetCurrentClusterName() + workflowDeleteManager := deletemanager.NewDeleteManager( + shard, + workflowCache, + f.Config, + f.ArchivalClient, + shard.GetTimeSource(), + f.VisibilityManager, + ) + + rescheduler := queues.NewRescheduler( + f.HostScheduler, + shard.GetTimeSource(), + logger, + metricsHandler, + ) + + activeExecutor := newTimerQueueActiveTaskExecutor( + shard, + workflowCache, + workflowDeleteManager, + logger, + f.MetricsHandler, + f.Config, + f.MatchingRawClient, + ) + + standbyExecutor := newTimerQueueStandbyTaskExecutor( + shard, + workflowCache, + workflowDeleteManager, + xdc.NewNDCHistoryResender( + shard.GetNamespaceRegistry(), + f.ClientBean, + func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { + engine, err := shard.GetEngine(ctx) + if err != nil { + return err + } + return engine.ReplicateEventsV2(ctx, request) + }, + shard.GetPayloadSerializer(), + f.Config.StandbyTaskReReplicationContextTimeout, + logger, + ), + f.MatchingRawClient, + logger, + f.MetricsHandler, + // note: the cluster name is for calculating time for standby tasks, + // here we are basically using current cluster time + // this field will be deprecated soon, currently exists so that + // we have the option of revert to old behavior + currentClusterName, + f.Config, + ) + + executor := queues.NewActiveStandbyExecutor( + currentClusterName, + f.NamespaceRegistry, + activeExecutor, + standbyExecutor, + logger, + ) + if f.ExecutorWrapper != nil { + executor = f.ExecutorWrapper.Wrap(executor) + } + + return queues.NewScheduledQueue( + shard, + tasks.CategoryTimer, + f.HostScheduler, + rescheduler, + f.HostPriorityAssigner, + executor, + &queues.Options{ + ReaderOptions: queues.ReaderOptions{ + BatchSize: f.Config.TimerTaskBatchSize, + MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, + PollBackoffInterval: f.Config.TimerProcessorPollBackoffInterval, + }, + MonitorOptions: queues.MonitorOptions{ + PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, + ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, + SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, + }, + MaxPollRPS: f.Config.TimerProcessorMaxPollRPS, + MaxPollInterval: f.Config.TimerProcessorMaxPollInterval, + MaxPollIntervalJitterCoefficient: f.Config.TimerProcessorMaxPollIntervalJitterCoefficient, + CheckpointInterval: f.Config.TimerProcessorUpdateAckInterval, + CheckpointIntervalJitterCoefficient: f.Config.TimerProcessorUpdateAckIntervalJitterCoefficient, + MaxReaderCount: f.Config.QueueMaxReaderCount, + }, + f.HostReaderRateLimiter, + logger, + metricsHandler, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_standby_task_executor.go temporal-1.22.5/src/service/history/timer_queue_standby_task_executor.go --- temporal-1.21.5-1/src/service/history/timer_queue_standby_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_standby_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,590 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + timerQueueStandbyTaskExecutor struct { + *timerQueueTaskExecutorBase + + clusterName string + nDCHistoryResender xdc.NDCHistoryResender + } +) + +func newTimerQueueStandbyTaskExecutor( + shard shard.Context, + workflowCache wcache.Cache, + workflowDeleteManager deletemanager.DeleteManager, + nDCHistoryResender xdc.NDCHistoryResender, + matchingRawClient resource.MatchingRawClient, + logger log.Logger, + metricProvider metrics.Handler, + clusterName string, + config *configs.Config, +) queues.Executor { + return &timerQueueStandbyTaskExecutor{ + timerQueueTaskExecutorBase: newTimerQueueTaskExecutorBase( + shard, + workflowCache, + workflowDeleteManager, + matchingRawClient, + logger, + metricProvider, + config, + ), + clusterName: clusterName, + nDCHistoryResender: nDCHistoryResender, + } +} + +func (t *timerQueueStandbyTaskExecutor) Execute( + ctx context.Context, + executable queues.Executable, +) ([]metrics.Tag, bool, error) { + task := executable.GetTask() + taskType := queues.GetStandbyTimerTaskTypeTagValue(task) + metricsTags := []metrics.Tag{ + getNamespaceTagByID(t.shard.GetNamespaceRegistry(), task.GetNamespaceID()), + metrics.TaskTypeTag(taskType), + metrics.OperationTag(taskType), // for backward compatibility + } + + var err error + switch task := task.(type) { + case *tasks.UserTimerTask: + err = t.executeUserTimerTimeoutTask(ctx, task) + case *tasks.ActivityTimeoutTask: + err = t.executeActivityTimeoutTask(ctx, task) + case *tasks.WorkflowTaskTimeoutTask: + err = t.executeWorkflowTaskTimeoutTask(ctx, task) + case *tasks.WorkflowBackoffTimerTask: + err = t.executeWorkflowBackoffTimerTask(ctx, task) + case *tasks.ActivityRetryTimerTask: + err = t.executeActivityRetryTimerTask(ctx, task) + case *tasks.WorkflowTimeoutTask: + err = t.executeWorkflowTimeoutTask(ctx, task) + case *tasks.DeleteHistoryEventTask: + err = t.executeDeleteHistoryEventTask(ctx, task) + default: + err = errUnknownTimerTask + } + + return metricsTags, false, err +} + +func (t *timerQueueStandbyTaskExecutor) executeUserTimerTimeoutTask( + ctx context.Context, + timerTask *tasks.UserTimerTask, +) error { + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + timerSequence := t.getTimerSequence(mutableState) + timerSequenceIDs := timerSequence.LoadAndSortUserTimers() + if len(timerSequenceIDs) > 0 { + timerSequenceID := timerSequenceIDs[0] + _, ok := mutableState.GetUserTimerInfoByEventID(timerSequenceID.EventID) + if !ok { + errString := fmt.Sprintf("failed to find in user timer event ID: %v", timerSequenceID.EventID) + t.logger.Error(errString) + return nil, serviceerror.NewInternal(errString) + } + + if queues.IsTimeExpired( + timerTask.GetVisibilityTime(), + timerSequenceID.Timestamp, + ) { + return getHistoryResendInfo(mutableState) + } + // Since the user timers are already sorted, then if there is one timer which is not expired, + // all user timers after that timer are not expired. + } + // If there is no user timer expired, then we are good. + return nil, nil + } + + return t.processTimer( + ctx, + timerTask, + actionFn, + getStandbyPostActionFn( + timerTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), + t.fetchHistoryFromRemote, + standbyTimerTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) executeActivityTimeoutTask( + ctx context.Context, + timerTask *tasks.ActivityTimeoutTask, +) error { + // activity heartbeat timer task is a special snowflake. + // normal activity timer task on the passive side will be generated by events related to activity in history replicator, + // and the standby timer processor will only need to verify whether the timer task can be safely throw away. + // + // activity heartbeat timer task cannot be handled in the way mentioned above. + // the reason is, there is no event driving the creation of new activity heartbeat timer. + // although there will be an task syncing activity from remote, the task is not an event, + // and cannot attempt to recreate a new activity timer task. + // + // the overall solution is to attempt to generate a new activity timer task whenever the + // task passed in is safe to be throw away. + actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + timerSequence := t.getTimerSequence(mutableState) + updateMutableState := false + timerSequenceIDs := timerSequence.LoadAndSortActivityTimers() + if len(timerSequenceIDs) > 0 { + timerSequenceID := timerSequenceIDs[0] + _, ok := mutableState.GetActivityInfo(timerSequenceID.EventID) + if !ok { + errString := fmt.Sprintf("failed to find in memory activity timer: %v", timerSequenceID.EventID) + t.logger.Error(errString) + return nil, serviceerror.NewInternal(errString) + } + + if queues.IsTimeExpired( + timerTask.GetVisibilityTime(), + timerSequenceID.Timestamp, + ) { + return getHistoryResendInfo(mutableState) + } + // Since the activity timers are already sorted, then if there is one timer which is not expired, + // all activity timers after that timer are not expired. + } + + // for reason to update mutable state & generate a new activity task, + // see comments at the beginning of this function. + // NOTE: this is the only place in the standby logic where mutable state can be updated + + // need to clear the activity heartbeat timer task marks + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return nil, err + } + + // NOTE: LastHeartbeatTimeoutVisibilityInSeconds is for deduping heartbeat timer creation as it's possible + // one heartbeat task was persisted multiple times with different taskIDs due to the retry logic + // for updating workflow execution. In that case, only one new heartbeat timeout task should be + // created. + isHeartBeatTask := timerTask.TimeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT + activityInfo, heartbeatTimeoutVis, ok := mutableState.GetActivityInfoWithTimerHeartbeat(timerTask.EventID) + if isHeartBeatTask && ok && queues.IsTimeExpired(timerTask.GetVisibilityTime(), heartbeatTimeoutVis) { + activityInfo.TimerTaskStatus = activityInfo.TimerTaskStatus &^ workflow.TimerTaskStatusCreatedHeartbeat + if err := mutableState.UpdateActivity(activityInfo); err != nil { + return nil, err + } + updateMutableState = true + } + + // passive logic need to explicitly call create timer + modified, err := timerSequence.CreateNextActivityTimer() + if err != nil { + return nil, err + } + updateMutableState = updateMutableState || modified + + if !updateMutableState { + return nil, nil + } + + // we need to handcraft some of the variables + // since the job being done here is update the activity and possibly write a timer task to DB + // also need to reset the current version. + if err := mutableState.UpdateCurrentVersion(lastWriteVersion, true); err != nil { + return nil, err + } + + err = wfContext.UpdateWorkflowExecutionAsPassive(ctx) + return nil, err + } + + return t.processTimer( + ctx, + timerTask, + actionFn, + getStandbyPostActionFn( + timerTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), + t.fetchHistoryFromRemote, + standbyTimerTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) executeActivityRetryTimerTask( + ctx context.Context, + task *tasks.ActivityRetryTimerTask, +) (retError error) { + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + activityInfo, ok := mutableState.GetActivityInfo(task.EventID) // activity schedule ID + if !ok { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, task.Version, task) + if err != nil { + return nil, err + } + + if activityInfo.Attempt > task.Attempt { + return nil, nil + } + + if activityInfo.StartedEventId != common.EmptyEventID { + return nil, nil + } + + return newActivityRetryTimePostActionInfo(mutableState, activityInfo.TaskQueue, *activityInfo.ScheduleToStartTimeout, activityInfo.UseCompatibleVersion) + } + + return t.processTimer( + ctx, + task, + actionFn, + getStandbyPostActionFn( + task, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(task.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(task.GetType()), + t.fetchHistoryFromRemote, + t.pushActivity, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) executeWorkflowTaskTimeoutTask( + ctx context.Context, + timerTask *tasks.WorkflowTaskTimeoutTask, +) error { + // workflow task schedule to start timer task is a special snowflake. + // the schedule to start timer is for sticky workflow task, which is + // not applicable on the passive cluster + if timerTask.TimeoutType == enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START { + return nil + } + + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + workflowTask := mutableState.GetWorkflowTaskByID(timerTask.EventID) + if workflowTask == nil { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, timerTask.Version, timerTask) + if err != nil { + return nil, err + } + + return getHistoryResendInfo(mutableState) + } + + return t.processTimer( + ctx, + timerTask, + actionFn, + getStandbyPostActionFn( + timerTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), + t.fetchHistoryFromRemote, + standbyTimerTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) executeWorkflowBackoffTimerTask( + ctx context.Context, + timerTask *tasks.WorkflowBackoffTimerTask, +) error { + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + if mutableState.HadOrHasWorkflowTask() { + // if there is one workflow task already been processed + // or has pending workflow task, meaning workflow has already running + return nil, nil + } + + // Note: do not need to verify task version here + // logic can only go here if mutable state build's next event ID is 2 + // meaning history only contains workflow started event. + // we can do the checking of task version vs workflow started version + // however, workflow started version is immutable + + // active cluster will add first workflow task after backoff timeout. + // standby cluster should just call ack manager to retry this task + // since we are stilling waiting for the first WorkflowTaskScheduledEvent to be replicated from active side. + + return getHistoryResendInfo(mutableState) + } + + return t.processTimer( + ctx, + timerTask, + actionFn, + getStandbyPostActionFn( + timerTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), + t.fetchHistoryFromRemote, + standbyTimerTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) executeWorkflowTimeoutTask( + ctx context.Context, + timerTask *tasks.WorkflowTimeoutTask, +) error { + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + // we do not need to notify new timer to base, since if there is no new event being replicated + // checking again if the timer can be completed is meaningless + + startVersion, err := mutableState.GetStartVersion() + if err != nil { + return nil, err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, timerTask.Version, timerTask) + if err != nil { + return nil, err + } + + return getHistoryResendInfo(mutableState) + } + + return t.processTimer( + ctx, + timerTask, + actionFn, + getStandbyPostActionFn( + timerTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(timerTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(timerTask.GetType()), + t.fetchHistoryFromRemote, + standbyTimerTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *timerQueueStandbyTaskExecutor) getTimerSequence( + mutableState workflow.MutableState, +) workflow.TimerSequence { + return workflow.NewTimerSequence(mutableState) +} + +func (t *timerQueueStandbyTaskExecutor) processTimer( + ctx context.Context, + timerTask tasks.Task, + actionFn standbyActionFn, + postActionFn standbyPostActionFn, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + nsRecord, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(timerTask.GetNamespaceID())) + if err != nil { + return err + } + if !nsRecord.IsOnCluster(t.clusterName) { + // namespace is not replicated to local cluster, ignore corresponding tasks + return nil + } + + executionContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, timerTask) + if err != nil { + return err + } + defer func() { + if retError == consts.ErrTaskRetry { + release(nil) + } else { + release(retError) + } + }() + + mutableState, err := loadMutableStateForTimerTask(ctx, executionContext, timerTask, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + return nil + } + + if !mutableState.IsWorkflowExecutionRunning() { + // workflow already finished, no need to process the timer + return nil + } + + historyResendInfo, err := actionFn(ctx, executionContext, mutableState) + if err != nil { + return err + } + + // NOTE: do not access anything related mutable state after this lock release + release(nil) + return postActionFn(ctx, timerTask, historyResendInfo, t.logger) +} + +func (t *timerQueueStandbyTaskExecutor) fetchHistoryFromRemote( + ctx context.Context, + taskInfo tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + var resendInfo *historyResendInfo + switch postActionInfo := postActionInfo.(type) { + case nil: + return nil + case *historyResendInfo: + resendInfo = postActionInfo + case *activityTaskPostActionInfo: + resendInfo = postActionInfo.historyResendInfo + default: + logger.Fatal("unknown post action info for fetching remote history", tag.Value(postActionInfo)) + } + + remoteClusterName, err := getRemoteClusterName( + t.currentClusterName, + t.registry, + taskInfo.GetNamespaceID(), + ) + if err != nil { + return err + } + + scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.HistoryRereplicationByTimerTaskScope)) + scope.Counter(metrics.ClientRequests.GetMetricName()).Record(1) + startTime := time.Now() + defer func() { scope.Timer(metrics.ClientLatency.GetMetricName()).Record(time.Since(startTime)) }() + + if resendInfo.lastEventID == common.EmptyEventID || resendInfo.lastEventVersion == common.EmptyVersion { + t.logger.Error("Error re-replicating history from remote: timerQueueStandbyProcessor encountered empty historyResendInfo.", + tag.ShardID(t.shard.GetShardID()), + tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), + tag.WorkflowID(taskInfo.GetWorkflowID()), + tag.WorkflowRunID(taskInfo.GetRunID()), + tag.ClusterName(remoteClusterName)) + + return consts.ErrTaskRetry + } + + // NOTE: history resend may take long time and its timeout is currently + // controlled by a separate dynamicconfig config: StandbyTaskReReplicationContextTimeout + if err = t.nDCHistoryResender.SendSingleWorkflowHistory( + ctx, + remoteClusterName, + namespace.ID(taskInfo.GetNamespaceID()), + taskInfo.GetWorkflowID(), + taskInfo.GetRunID(), + resendInfo.lastEventID, + resendInfo.lastEventVersion, + common.EmptyEventID, + common.EmptyVersion, + ); err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { + // Don't log NamespaceNotFound error because it is valid case, and return error to stop retrying. + return err + } + t.logger.Error("Error re-replicating history from remote.", + tag.ShardID(t.shard.GetShardID()), + tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), + tag.WorkflowID(taskInfo.GetWorkflowID()), + tag.WorkflowRunID(taskInfo.GetRunID()), + tag.ClusterName(remoteClusterName), + tag.Error(err)) + } + + // Return retryable error, so task processing will retry. + return consts.ErrTaskRetry +} + +func (t *timerQueueStandbyTaskExecutor) pushActivity( + ctx context.Context, + task tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + if postActionInfo == nil { + return nil + } + + pushActivityInfo := postActionInfo.(*activityTaskPostActionInfo) + activityScheduleToStartTimeout := &pushActivityInfo.activityTaskScheduleToStartTimeout + activityTask := task.(*tasks.ActivityRetryTimerTask) + + _, err := t.matchingRawClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ + NamespaceId: activityTask.NamespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: activityTask.WorkflowID, + RunId: activityTask.RunID, + }, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: pushActivityInfo.taskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ScheduledEventId: activityTask.EventID, + ScheduleToStartTimeout: activityScheduleToStartTimeout, + Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), activityTask.TaskID), + VersionDirective: pushActivityInfo.versionDirective, + }) + return err +} + +func (t *timerQueueStandbyTaskExecutor) getCurrentTime() time.Time { + return t.shard.GetCurrentTime(t.clusterName) +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_standby_task_executor_test.go temporal-1.22.5/src/service/history/timer_queue_standby_task_executor_test.go --- temporal-1.21.5-1/src/service/history/timer_queue_standby_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_standby_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1510 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/adminservicemock/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + timerQueueStandbyTaskExecutorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockExecutionMgr *persistence.MockExecutionManager + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockNamespaceCache *namespace.MockRegistry + mockClusterMetadata *cluster.MockMetadata + mockAdminClient *adminservicemock.MockAdminServiceClient + mockNDCHistoryResender *xdc.MockNDCHistoryResender + mockDeleteManager *deletemanager.MockDeleteManager + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + + workflowCache wcache.Cache + logger log.Logger + namespaceID namespace.ID + namespaceEntry *namespace.Namespace + version int64 + clusterName string + now time.Time + timeSource *clock.EventTimeSource + fetchHistoryDuration time.Duration + discardDuration time.Duration + + timerQueueStandbyTaskExecutor *timerQueueStandbyTaskExecutor + } +) + +func TestTimerQueueStandbyTaskExecutorSuite(t *testing.T) { + s := new(timerQueueStandbyTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *timerQueueStandbyTaskExecutorSuite) SetupSuite() { +} + +func (s *timerQueueStandbyTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + config := tests.NewDynamicConfig() + s.namespaceEntry = tests.GlobalStandbyNamespaceEntry + s.namespaceID = s.namespaceEntry.ID() + s.version = s.namespaceEntry.FailoverVersion() + s.clusterName = cluster.TestAlternativeClusterName + s.now = time.Now().UTC() + s.timeSource = clock.NewEventTimeSource().Update(s.now) + s.fetchHistoryDuration = time.Minute * 12 + s.discardDuration = time.Minute * 30 + + s.controller = gomock.NewController(s.T()) + s.mockNDCHistoryResender = xdc.NewMockNDCHistoryResender(s.controller) + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + s.mockShard = shard.NewTestContextWithTimeSource( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + config, + s.timeSource, + ) + s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + )) + + // ack manager will use the namespace information + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockAdminClient = s.mockShard.Resource.RemoteAdminClient + s.mockMatchingClient = s.mockShard.Resource.MatchingClient + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(s.namespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(s.namespaceEntry.Name(), nil).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.clusterName).AnyTimes() + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) + h := &historyEngineImpl{ + currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + metricsHandler: s.mockShard.GetMetricsHandler(), + eventNotifier: events.NewNotifier(s.timeSource, metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + }, + } + s.mockShard.SetEngineForTesting(h) + + s.timerQueueStandbyTaskExecutor = newTimerQueueStandbyTaskExecutor( + s.mockShard, + s.workflowCache, + s.mockDeleteManager, + s.mockNDCHistoryResender, + s.mockMatchingClient, + s.logger, + metrics.NoopMetricsHandler, + s.clusterName, + config, + ).(*timerQueueStandbyTaskExecutor) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID := "timer" + timerTimeout := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) + nextEventID := event.GetEventId() + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(timerTask.NamespaceID), + timerTask.WorkflowID, + timerTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID := "timer" + timerTimeout := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + event = addTimerFiredEvent(mutableState, timerID) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessUserTimerTimeout_Multiple() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + timerID1 := "timer-1" + timerTimeout1 := 2 * time.Second + event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID1, timerTimeout1) + + timerID2 := "timer-2" + timerTimeout2 := 50 * time.Second + _, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID2, timerTimeout2) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextUserTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.UserTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.UserTimerTask).VisibilityTimestamp, + EventID: event.EventId, + } + + event = addTimerFiredEvent(mutableState, timerID1) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, + timerTimeout, timerTimeout, timerTimeout, timerTimeout) + nextEventID := scheduledEvent.GetEventId() + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: event.EventId, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(timerTask.NamespaceID), + timerTask.WorkflowID, + timerTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, + timerTimeout, timerTimeout, timerTimeout, timerTimeout) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: event.GetEventId(), + } + + completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Heartbeat_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + heartbeatTimerTimeout := time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, + timerTimeout, timerTimeout, timerTimeout, heartbeatTimerTimeout) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, task.(*tasks.ActivityTimeoutTask).TimeoutType) + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + VisibilityTimestamp: time.Unix(946684800, 0).Add(-100 * time.Second), // see pendingActivityTimerHeartbeats from mutable state + EventID: scheduledEvent.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityTimeout_Multiple_CanUpdate() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID1 := "activity 1" + activityType1 := "activity type 1" + timerTimeout1 := 2 * time.Second + scheduledEvent1, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID1, activityType1, taskqueue, nil, + timerTimeout1, timerTimeout1, timerTimeout1, timerTimeout1) + startedEvent1 := addActivityTaskStartedEvent(mutableState, scheduledEvent1.GetEventId(), identity) + + activityID2 := "activity 2" + activityType2 := "activity type 2" + timerTimeout2 := 20 * time.Second + scheduledEvent2, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID2, activityType2, taskqueue, nil, + timerTimeout2, timerTimeout2, timerTimeout2, 10*time.Second) + addActivityTaskStartedEvent(mutableState, scheduledEvent2.GetEventId(), identity) + activityInfo2 := mutableState.GetPendingActivityInfos()[scheduledEvent2.GetEventId()] + activityInfo2.TimerTaskStatus |= workflow.TimerTaskStatusCreatedHeartbeat + activityInfo2.LastHeartbeatUpdateTime = timestamp.TimePtr(time.Now().UTC()) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + + timerTask := &tasks.ActivityTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + VisibilityTimestamp: activityInfo2.LastHeartbeatUpdateTime.Add(-5 * time.Second), + EventID: scheduledEvent2.GetEventId(), + } + + completeEvent1 := addActivityTaskCompletedEvent(mutableState, scheduledEvent1.GetEventId(), startedEvent1.GetEventId(), nil, identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent1.GetEventId(), completeEvent1.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, input *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.Equal(1, len(input.UpdateWorkflowMutation.Tasks[tasks.CategoryTimer])) + s.Equal(1, len(input.UpdateWorkflowMutation.UpsertActivityInfos)) + mutableState.GetExecutionInfo().LastUpdateTime = input.UpdateWorkflowMutation.ExecutionInfo.LastUpdateTime + input.RangeID = 0 + input.UpdateWorkflowMutation.ExecutionInfo.LastEventTaskId = 0 + input.UpdateWorkflowMutation.ExecutionInfo.LastFirstEventTxnId = 0 + input.UpdateWorkflowMutation.ExecutionInfo.StateTransitionCount = 0 + mutableState.GetExecutionInfo().LastEventTaskId = 0 + mutableState.GetExecutionInfo().LastFirstEventTxnId = 0 + mutableState.GetExecutionInfo().StateTransitionCount = 0 + mutableState.GetExecutionInfo().WorkflowTaskOriginalScheduledTime = input.UpdateWorkflowMutation.ExecutionInfo.WorkflowTaskOriginalScheduledTime + mutableState.GetExecutionInfo().ExecutionStats = &persistencespb.ExecutionStats{} + + s.Equal(&persistence.UpdateWorkflowExecutionRequest{ + ShardID: s.mockShard.GetShardID(), + UpdateWorkflowMutation: persistence.WorkflowMutation{ + ExecutionInfo: mutableState.GetExecutionInfo(), + ExecutionState: mutableState.GetExecutionState(), + NextEventID: mutableState.GetNextEventID(), + Tasks: input.UpdateWorkflowMutation.Tasks, + Condition: mutableState.GetNextEventID(), + UpsertActivityInfos: input.UpdateWorkflowMutation.UpsertActivityInfos, + DeleteActivityInfos: map[int64]struct{}{}, + UpsertTimerInfos: map[string]*persistencespb.TimerInfo{}, + DeleteTimerInfos: map[string]struct{}{}, + UpsertChildExecutionInfos: map[int64]*persistencespb.ChildExecutionInfo{}, + DeleteChildExecutionInfos: map[int64]struct{}{}, + UpsertRequestCancelInfos: map[int64]*persistencespb.RequestCancelInfo{}, + DeleteRequestCancelInfos: map[int64]struct{}{}, + UpsertSignalInfos: map[int64]*persistencespb.SignalInfo{}, + DeleteSignalInfos: map[int64]struct{}{}, + UpsertSignalRequestedIDs: map[string]struct{}{}, + DeleteSignalRequestedIDs: map[string]struct{}{}, + NewBufferedEvents: nil, + ClearBufferedEvents: false, + }, + UpdateWorkflowEvents: []*persistence.WorkflowEvents{}, + }, input) + return tests.UpdateWorkflowExecutionResponse, nil + }) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _ = mutableState.UpdateCurrentVersion(s.version, false) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + startedEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + nextEventID := startedEvent.GetEventId() + + timerTask := &tasks.WorkflowTaskTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ScheduleAttempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + VisibilityTimestamp: s.now, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(timerTask.NamespaceID), + timerTask.WorkflowID, + timerTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_ScheduleToStartTimer() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + + workflowTaskScheduledEventID := int64(16384) + + timerTask := &tasks.WorkflowTaskTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ScheduleAttempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + VisibilityTimestamp: s.now, + EventID: workflowTaskScheduledEventID, + } + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err := s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(nil, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTaskTimeout_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + timerTask := &tasks.WorkflowTaskTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + ScheduleAttempt: 1, + Version: s.version, + TaskID: int64(100), + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + VisibilityTimestamp: s.now, + EventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowBackoffTimer_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + event, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + nextEventID := event.GetEventId() + + timerTask := &tasks.WorkflowBackoffTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, time.Now().UTC().Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(timerTask.NamespaceID), + timerTask.WorkflowID, + timerTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, time.Now().UTC().Add(s.discardDuration)) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowBackoffTimer_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + timerTask := &tasks.WorkflowBackoffTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_CRON, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTimeout_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + startEvent := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = startEvent.GetEventId() + completionEvent := addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + nextEventID := completionEvent.GetEventId() + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(timerTask.NamespaceID), + timerTask.WorkflowID, + timerTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowTimeout_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + timerTask := &tasks.WorkflowTimeoutTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessRetryTimeout() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + startEvent, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + persistenceMutableState := s.createPersistenceMutableState(mutableState, startEvent.GetEventId(), startEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).AnyTimes() + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 1, + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: s.now, + EventID: int64(16384), + } + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_Noop() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, + timerTimeout, timerTimeout, timerTimeout, timerTimeout) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).AnyTimes() + s.mockShard.SetCurrentTime(s.clusterName, s.now) + + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 2, + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: scheduledEvent.GetEventId(), + } + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) + + timerTask = &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 2, + Version: s.version - 1, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: scheduledEvent.GetEventId(), + } + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskVersionMismatch, err) + + timerTask = &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 0, + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: scheduledEvent.GetEventId(), + } + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_ActivityCompleted() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + identity := "identity" + taskqueue := "taskqueue" + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskqueue, nil, + timerTimeout, timerTimeout, timerTimeout, timerTimeout) + startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, s.now) + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 2, + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: scheduledEvent.GetEventId(), + } + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) TestProcessActivityRetryTimer_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowRunTimeout: timestamp.DurationPtr(200 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + activityID := "activity" + activityType := "activity type" + timerTimeout := 2 * time.Second + scheduledEvent, _ := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, nil, + timerTimeout, timerTimeout, timerTimeout, timerTimeout) + + timerSequence := workflow.NewTimerSequence(mutableState) + mutableState.InsertTasks[tasks.CategoryTimer] = nil + modified, err := timerSequence.CreateNextActivityTimer() + s.NoError(err) + s.True(modified) + task := mutableState.InsertTasks[tasks.CategoryTimer][0] + + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + timerTask := &tasks.ActivityRetryTimerTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Attempt: 2, + Version: s.version, + TaskID: int64(100), + VisibilityTimestamp: task.(*tasks.ActivityTimeoutTask).VisibilityTimestamp, + EventID: scheduledEvent.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + // no-op post action + s.mockShard.SetCurrentTime(s.clusterName, s.now) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + // resend history post action + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + s.namespaceID, + execution.WorkflowId, + execution.RunId, + scheduledEvent.GetEventId(), + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Equal(consts.ErrTaskRetry, err) + + // push to matching post action + s.mockShard.SetCurrentTime(s.clusterName, s.now.Add(s.discardDuration)) + s.mockMatchingClient.EXPECT().AddActivityTask( + gomock.Any(), + &matchingservice.AddActivityTaskRequest{ + NamespaceId: s.namespaceID.String(), + Execution: &execution, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ScheduledEventId: scheduledEvent.EventId, + ScheduleToStartTimeout: &timerTimeout, + Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), timerTask.TaskID), + VersionDirective: worker_versioning.MakeDirectiveForActivityTask(nil, false), + }, + gomock.Any(), + ).Return(&matchingservice.AddActivityTaskResponse{}, nil) + + _, _, err = s.timerQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) + s.Nil(err) +} + +func (s *timerQueueStandbyTaskExecutorSuite) createPersistenceMutableState( + ms workflow.MutableState, + lastEventID int64, + lastEventVersion int64, +) *persistencespb.WorkflowMutableState { + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + s.NoError(err) + err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( + lastEventID, lastEventVersion, + )) + s.NoError(err) + return workflow.TestCloneToProto(ms) +} + +func (s *timerQueueStandbyTaskExecutorSuite) newTaskExecutable( + task tasks.Task, +) queues.Executable { + return queues.NewExecutable( + queues.DefaultReaderId, + task, + s.timerQueueStandbyTaskExecutor, + nil, + nil, + queues.NewNoopPriorityAssigner(), + s.mockShard.GetTimeSource(), + s.mockNamespaceCache, + s.mockClusterMetadata, + nil, + metrics.NoopMetricsHandler, + func() bool { return false }, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_task_executor_base.go temporal-1.22.5/src/service/history/timer_queue_task_executor_base.go --- temporal-1.21.5-1/src/service/history/timer_queue_task_executor_base.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_task_executor_base.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,210 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +var ( + errUnknownTimerTask = serviceerror.NewInternal("unknown timer task") + errNoTimerFired = serviceerror.NewNotFound("no expired timer to fire found") +) + +type ( + timerQueueTaskExecutorBase struct { + currentClusterName string + shard shard.Context + registry namespace.Registry + deleteManager deletemanager.DeleteManager + cache wcache.Cache + logger log.Logger + matchingRawClient resource.MatchingRawClient + metricHandler metrics.Handler + config *configs.Config + } +) + +func newTimerQueueTaskExecutorBase( + shard shard.Context, + workflowCache wcache.Cache, + deleteManager deletemanager.DeleteManager, + matchingRawClient resource.MatchingRawClient, + logger log.Logger, + metricHandler metrics.Handler, + config *configs.Config, +) *timerQueueTaskExecutorBase { + return &timerQueueTaskExecutorBase{ + currentClusterName: shard.GetClusterMetadata().GetCurrentClusterName(), + shard: shard, + registry: shard.GetNamespaceRegistry(), + cache: workflowCache, + deleteManager: deleteManager, + logger: logger, + matchingRawClient: matchingRawClient, + metricHandler: metricHandler, + config: config, + } +} + +func (t *timerQueueTaskExecutorBase) executeDeleteHistoryEventTask( + ctx context.Context, + task *tasks.DeleteHistoryEventTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + } + + weContext, release, err := t.cache.GetOrCreateWorkflowExecution( + ctx, + namespace.ID(task.GetNamespaceID()), + workflowExecution, + workflow.LockPriorityLow, + ) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTimerTask(ctx, weContext, task, t.metricHandler, t.logger) + switch err.(type) { + case nil: + if mutableState == nil { + return nil + } + case *serviceerror.NotFound: + // the mutable state is deleted and delete history branch operation failed. + // use task branch token to delete the leftover history branch + return t.deleteHistoryBranch(ctx, task.BranchToken) + default: + return err + } + + if mutableState.GetExecutionState().GetState() != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + // If workflow is running then just ignore DeleteHistoryEventTask timer task. + // This should almost never happen because DeleteHistoryEventTask is created only for closed workflows. + // But cross DC replication can resurrect workflow and therefore DeleteHistoryEventTask should be ignored. + return nil + } + + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return err + } + if err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task); err != nil { + return err + } + + // We should only archive if it is enabled, and the data wasn't already archived. If WorkflowDataAlreadyArchived + // flag is set to true, then the data was already archived, so we can skip it. + archiveIfEnabled := !task.WorkflowDataAlreadyArchived + return t.deleteManager.DeleteWorkflowExecutionByRetention( + ctx, + namespace.ID(task.GetNamespaceID()), + workflowExecution, + weContext, + mutableState, + archiveIfEnabled, + &task.ProcessStage, // Pass stage by reference to update it inside delete manager. + ) +} + +func getWorkflowExecutionContextForTask( + ctx context.Context, + workflowCache wcache.Cache, + task tasks.Task, +) (workflow.Context, wcache.ReleaseCacheFunc, error) { + namespaceID, execution := getTaskNamespaceIDAndWorkflowExecution(task) + return getWorkflowExecutionContext( + ctx, + workflowCache, + namespaceID, + execution, + ) +} + +func getWorkflowExecutionContext( + ctx context.Context, + workflowCache wcache.Cache, + namespaceID namespace.ID, + execution commonpb.WorkflowExecution, +) (workflow.Context, wcache.ReleaseCacheFunc, error) { + // workflowCache will automatically use short context timeout when + // locking workflow for all background calls, we don't need a separate context here + weContext, release, err := workflowCache.GetOrCreateWorkflowExecution( + ctx, + namespaceID, + execution, + workflow.LockPriorityLow, + ) + if common.IsContextDeadlineExceededErr(err) { + err = consts.ErrResourceExhaustedBusyWorkflow + } + return weContext, release, err +} + +func getTaskNamespaceIDAndWorkflowExecution( + task tasks.Task, +) (namespace.ID, commonpb.WorkflowExecution) { + return namespace.ID(task.GetNamespaceID()), commonpb.WorkflowExecution{ + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + } +} + +func (t *timerQueueTaskExecutorBase) deleteHistoryBranch( + ctx context.Context, + branchToken []byte, +) error { + if len(branchToken) > 0 { + return t.shard.GetExecutionManager().DeleteHistoryBranch(ctx, &persistence.DeleteHistoryBranchRequest{ + ShardID: t.shard.GetShardID(), + BranchToken: branchToken, + }) + } + return nil +} diff -Nru temporal-1.21.5-1/src/service/history/timer_queue_task_executor_base_test.go temporal-1.22.5/src/service/history/timer_queue_task_executor_base_test.go --- temporal-1.21.5-1/src/service/history/timer_queue_task_executor_base_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/timer_queue_task_executor_base_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,213 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + timerQueueTaskExecutorBaseSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockDeleteManager *deletemanager.MockDeleteManager + mockCache *wcache.MockCache + + testShardContext *shard.ContextTest + timerQueueTaskExecutorBase *timerQueueTaskExecutorBase + } +) + +func TestTimerQueueTaskExecutorBaseSuite(t *testing.T) { + s := new(timerQueueTaskExecutorBaseSuite) + suite.Run(t, s) +} + +func (s *timerQueueTaskExecutorBaseSuite) SetupSuite() { +} + +func (s *timerQueueTaskExecutorBaseSuite) TearDownSuite() { +} + +func (s *timerQueueTaskExecutorBaseSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.mockDeleteManager = deletemanager.NewMockDeleteManager(s.controller) + s.mockCache = wcache.NewMockCache(s.controller) + + config := tests.NewDynamicConfig() + s.testShardContext = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 0, + RangeId: 1, + }, + config, + ) + s.testShardContext.Resource.ClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + + s.timerQueueTaskExecutorBase = newTimerQueueTaskExecutorBase( + s.testShardContext, + s.mockCache, + s.mockDeleteManager, + s.testShardContext.Resource.MatchingClient, + s.testShardContext.GetLogger(), + metrics.NoopMetricsHandler, + config, + ) +} + +func (s *timerQueueTaskExecutorBaseSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *timerQueueTaskExecutorBaseSuite) Test_executeDeleteHistoryEventTask_NoErr() { + for _, alreadyArchived := range []bool{false, true} { + s.Run(fmt.Sprintf("AlreadyArchived=%v", alreadyArchived), func() { + task := &tasks.DeleteHistoryEventTask{ + WorkflowKey: definition.NewWorkflowKey( + tests.NamespaceID.String(), + tests.WorkflowID, + tests.RunID, + ), + Version: 123, + TaskID: 12345, + VisibilityTimestamp: time.Now().UTC(), + WorkflowDataAlreadyArchived: alreadyArchived, + } + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + + mockWeCtx := workflow.NewMockContext(s.controller) + mockMutableState := workflow.NewMockMutableState(s.controller) + + s.mockCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), tests.NamespaceID, we, workflow.LockPriorityLow).Return(mockWeCtx, wcache.NoopReleaseFn, nil) + + mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) + mockMutableState.EXPECT().GetLastWriteVersion().Return(int64(1), nil) + mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{}) + mockMutableState.EXPECT().GetNextEventID().Return(int64(2)) + mockMutableState.EXPECT().GetNamespaceEntry().Return(tests.LocalNamespaceEntry) + s.testShardContext.Resource.ClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false) + mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED}) + + archiveIfEnabled := !alreadyArchived + stage := tasks.DeleteWorkflowExecutionStageNone + s.mockDeleteManager.EXPECT().DeleteWorkflowExecutionByRetention( + gomock.Any(), + tests.NamespaceID, + we, + mockWeCtx, + mockMutableState, + archiveIfEnabled, + &stage, + ).Return(nil) + + err := s.timerQueueTaskExecutorBase.executeDeleteHistoryEventTask( + context.Background(), + task) + s.NoError(err) + }) + } +} + +func (s *timerQueueTaskExecutorBaseSuite) TestArchiveHistory_DeleteFailed() { + for _, alreadyArchived := range []bool{false, true} { + s.Run(fmt.Sprintf("AlreadyArchived=%v", alreadyArchived), func() { + task := &tasks.DeleteHistoryEventTask{ + WorkflowKey: definition.NewWorkflowKey( + tests.NamespaceID.String(), + tests.WorkflowID, + tests.RunID, + ), + Version: 123, + TaskID: 12345, + VisibilityTimestamp: time.Now().UTC(), + WorkflowDataAlreadyArchived: alreadyArchived, + } + we := commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + } + + mockWeCtx := workflow.NewMockContext(s.controller) + mockMutableState := workflow.NewMockMutableState(s.controller) + + s.mockCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), tests.NamespaceID, we, workflow.LockPriorityLow).Return(mockWeCtx, wcache.NoopReleaseFn, nil) + + mockWeCtx.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) + mockMutableState.EXPECT().GetLastWriteVersion().Return(int64(1), nil) + mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{}) + mockMutableState.EXPECT().GetNextEventID().Return(int64(2)) + mockMutableState.EXPECT().GetNamespaceEntry().Return(tests.LocalNamespaceEntry) + s.testShardContext.Resource.ClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false) + mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED}) + + archiveIfEnabled := !alreadyArchived + stage := tasks.DeleteWorkflowExecutionStageNone + s.mockDeleteManager.EXPECT().DeleteWorkflowExecutionByRetention( + gomock.Any(), + tests.NamespaceID, + we, + mockWeCtx, + mockMutableState, + archiveIfEnabled, + &stage, + ).Return(serviceerror.NewInternal("test error")) + + err := s.timerQueueTaskExecutorBase.executeDeleteHistoryEventTask( + context.Background(), + task) + s.Error(err) + }) + } +} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueActiveTaskExecutor.go temporal-1.22.5/src/service/history/transferQueueActiveTaskExecutor.go --- temporal-1.21.5-1/src/service/history/transferQueueActiveTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueActiveTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1603 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - - "github.com/pborman/uuid" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - workflowpb "go.temporal.io/api/workflow/v1" - "go.temporal.io/api/workflowservice/v1" - - clockspb "go.temporal.io/server/api/clock/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/rpc" - "go.temporal.io/server/common/sdk" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/ndc" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" - "go.temporal.io/server/service/worker/parentclosepolicy" -) - -type ( - transferQueueActiveTaskExecutor struct { - *transferQueueTaskExecutorBase - - workflowResetter ndc.WorkflowResetter - parentClosePolicyClient parentclosepolicy.Client - } -) - -func newTransferQueueActiveTaskExecutor( - shard shard.Context, - workflowCache wcache.Cache, - archivalClient archiver.Client, - sdkClientFactory sdk.ClientFactory, - logger log.Logger, - metricProvider metrics.Handler, - config *configs.Config, - matchingClient matchingservice.MatchingServiceClient, - visibilityManager manager.VisibilityManager, -) queues.Executor { - return &transferQueueActiveTaskExecutor{ - transferQueueTaskExecutorBase: newTransferQueueTaskExecutorBase( - shard, - workflowCache, - archivalClient, - logger, - metricProvider, - matchingClient, - visibilityManager, - ), - workflowResetter: ndc.NewWorkflowResetter( - shard, - workflowCache, - logger, - ), - parentClosePolicyClient: parentclosepolicy.NewClient( - shard.GetMetricsHandler(), - shard.GetLogger(), - sdkClientFactory, - config.NumParentClosePolicySystemWorkflows(), - ), - } -} - -func (t *transferQueueActiveTaskExecutor) Execute( - ctx context.Context, - executable queues.Executable, -) ([]metrics.Tag, bool, error) { - task := executable.GetTask() - taskType := queues.GetActiveTransferTaskTypeTagValue(task) - namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( - t.shard.GetNamespaceRegistry(), - task.GetNamespaceID(), - ) - metricsTags := []metrics.Tag{ - namespaceTag, - metrics.TaskTypeTag(taskType), - metrics.OperationTag(taskType), // for backward compatibility - } - - if replicationState == enumspb.REPLICATION_STATE_HANDOVER { - // TODO: exclude task types here if we believe it's safe & necessary to execute - // them during namespace handover. - // TODO: move this logic to queues.Executable when metrics tag doesn't need to - // be returned from task executor - return metricsTags, true, consts.ErrNamespaceHandover - } - - var err error - switch task := task.(type) { - case *tasks.ActivityTask: - err = t.processActivityTask(ctx, task) - case *tasks.WorkflowTask: - err = t.processWorkflowTask(ctx, task) - case *tasks.CloseExecutionTask: - err = t.processCloseExecution(ctx, task) - case *tasks.CancelExecutionTask: - err = t.processCancelExecution(ctx, task) - case *tasks.SignalExecutionTask: - err = t.processSignalExecution(ctx, task) - case *tasks.StartChildExecutionTask: - err = t.processStartChildExecution(ctx, task) - case *tasks.ResetWorkflowTask: - err = t.processResetWorkflow(ctx, task) - case *tasks.DeleteExecutionTask: - err = t.processDeleteExecutionTask(ctx, task) - default: - err = errUnknownTransferTask - } - - return metricsTags, true, err -} - -func (t *transferQueueActiveTaskExecutor) processDeleteExecutionTask(ctx context.Context, - task *tasks.DeleteExecutionTask) error { - return t.transferQueueTaskExecutorBase.processDeleteExecutionTask(ctx, task, - t.config.TransferProcessorEnsureCloseBeforeDelete()) -} - -func (t *transferQueueActiveTaskExecutor) processActivityTask( - ctx context.Context, - task *tasks.ActivityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - ai, ok := mutableState.GetActivityInfo(task.ScheduledEventID) - if !ok { - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), ai.Version, task.Version, task) - if err != nil { - return err - } - - timeout := timestamp.DurationValue(ai.ScheduleToStartTimeout) - directive := common.MakeVersionDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), ai.UseCompatibleVersion) - - // NOTE: do not access anything related mutable state after this lock release - // release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - return t.pushActivity(ctx, task, &timeout, directive) -} - -func (t *transferQueueActiveTaskExecutor) processWorkflowTask( - ctx context.Context, - transferTask *tasks.WorkflowTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, transferTask) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, transferTask, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - workflowTask := mutableState.GetWorkflowTaskByID(transferTask.ScheduledEventID) - if workflowTask == nil { - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, transferTask.Version, transferTask) - if err != nil { - return err - } - - // Task queue from transfer task (not current one from mutable state) must be used here. - // If current task queue becomes sticky since this transfer task was created, - // it can't be used here, because timeout timer was not created for it, - // because it used to be non-sticky when this transfer task was created . - taskQueue, scheduleToStartTimeout := mutableState.TaskQueueScheduleToStartTimeout(transferTask.TaskQueue) - - normalTaskQueueName := mutableState.GetExecutionInfo().TaskQueue - - directive := common.MakeVersionDirectiveForWorkflowTask( - mutableState.GetWorkerVersionStamp(), - mutableState.GetLastWorkflowTaskStartedEventID(), - ) - - // NOTE: Do not access mutableState after this lock is released. - // It is important to release the workflow lock here, because pushWorkflowTask will call matching, - // which will call history back (with RecordWorkflowTaskStarted), and it will try to get workflow lock again. - release(nil) - - err = t.pushWorkflowTask(ctx, transferTask, taskQueue, scheduleToStartTimeout, directive) - - if _, ok := err.(*serviceerrors.StickyWorkerUnavailable); ok { - // sticky worker is unavailable, switch to original normal task queue - taskQueue = &taskqueuepb.TaskQueue{ - // do not use task.TaskQueue which is sticky, use original normal task queue from mutable state - Name: normalTaskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - // Continue to use sticky schedule_to_start timeout as TTL for the matching task. Because the schedule_to_start - // timeout timer task is already created which will timeout this task if no worker pick it up in 5s anyway. - // There is no need to reset sticky, because if this task is picked by new worker, the new worker will reset - // the sticky queue to a new one. However, if worker is completely down, that schedule_to_start timeout task - // will re-create a new non-sticky task and reset sticky. - err = t.pushWorkflowTask(ctx, transferTask, taskQueue, scheduleToStartTimeout, directive) - } - return err -} - -func (t *transferQueueActiveTaskExecutor) processCloseExecution( - ctx context.Context, - task *tasks.CloseExecutionTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { - return nil - } - - // DeleteAfterClose is set to true when this close execution task was generated as part of delete open workflow execution procedure. - // Delete workflow execution is started by user API call and should be done regardless of current workflow version. - if !task.DeleteAfterClose { - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) - if err != nil { - return err - } - } - - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - } - executionInfo := mutableState.GetExecutionInfo() - executionState := mutableState.GetExecutionState() - var completionEvent *historypb.HistoryEvent // needed to report close event to parent workflow - replyToParentWorkflow := mutableState.HasParentExecution() && executionInfo.NewExecutionRunId == "" - if replyToParentWorkflow { - // only load close event if needed. - completionEvent, err = mutableState.GetCompletionEvent(ctx) - if err != nil { - return err - } - replyToParentWorkflow = replyToParentWorkflow && !ndc.IsTerminatedByResetter(completionEvent) - } - parentNamespaceID := executionInfo.ParentNamespaceId - parentWorkflowID := executionInfo.ParentWorkflowId - parentRunID := executionInfo.ParentRunId - parentInitiatedID := executionInfo.ParentInitiatedId - parentInitiatedVersion := executionInfo.ParentInitiatedVersion - var parentClock *clockspb.VectorClock - if executionInfo.ParentClock != nil { - parentClock = vclock.NewVectorClock( - executionInfo.ParentClock.ClusterId, - executionInfo.ParentClock.ShardId, - executionInfo.ParentClock.Clock, - ) - } - - workflowTypeName := executionInfo.WorkflowTypeName - workflowCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) - if err != nil { - return err - } - - workflowStatus := executionState.Status - workflowHistoryLength := mutableState.GetNextEventID() - 1 - - workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) - workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) - visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) - searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) - namespaceName := mutableState.GetNamespaceEntry().Name() - children := copyChildWorkflowInfos(mutableState.GetPendingChildExecutionInfos()) - - // NOTE: do not access anything related mutable state after this lock release. - // Release lock immediately since mutable state is not needed - // and the rest of logic is RPC calls, which can take time. - release(nil) - - if !task.CanSkipVisibilityArchival { - err = t.archiveVisibility( - ctx, - namespace.ID(task.NamespaceID), - task.WorkflowID, - task.RunID, - workflowTypeName, - workflowStartTime, - workflowExecutionTime, - *workflowCloseTime, - workflowStatus, - workflowHistoryLength, - visibilityMemo, - searchAttr, - ) - if err != nil { - return err - } - } - - // Communicate the result to parent execution if this is Child Workflow execution - if replyToParentWorkflow { - _, err := t.historyClient.RecordChildExecutionCompleted(ctx, &historyservice.RecordChildExecutionCompletedRequest{ - NamespaceId: parentNamespaceID, - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: parentWorkflowID, - RunId: parentRunID, - }, - ParentInitiatedId: parentInitiatedID, - ParentInitiatedVersion: parentInitiatedVersion, - CompletedExecution: &workflowExecution, - Clock: parentClock, - CompletionEvent: completionEvent, - }) - switch err.(type) { - case nil: - // noop - case *serviceerror.NotFound, *serviceerror.NamespaceNotFound: - // parent gone, noop - default: - return err - } - } - - err = t.processParentClosePolicy( - ctx, - namespaceName.String(), - workflowExecution, - children, - ) - - if err != nil { - // This is some retryable error, not NotFound or NamespaceNotFound. - return err - } - - if task.DeleteAfterClose { - err = t.deleteExecution( - ctx, - task, - // Visibility is not updated (to avoid race condition for visibility tasks) and workflow execution is - // still open there. - true, - false, - &task.DeleteProcessStage, - ) - } - return err -} - -func (t *transferQueueActiveTaskExecutor) processCancelExecution( - ctx context.Context, - task *tasks.CancelExecutionTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - requestCancelInfo, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) - if !ok { - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), requestCancelInfo.Version, task.Version, task) - if err != nil { - return err - } - - initiatedEvent, err := mutableState.GetRequesteCancelExternalInitiatedEvent(ctx, task.InitiatedEventID) - if err != nil { - return err - } - attributes := initiatedEvent.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() - - targetNamespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)) - if err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { - return err - } - // It is possible that target namespace got deleted. Record failure. - t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) - err = t.requestCancelExternalExecutionFailed( - ctx, - task, - weContext, - namespace.Name(task.TargetNamespaceID), // Use ID as namespace name because namespace is already deleted and name is used only for history. - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND) - return err - } - targetNamespaceName := targetNamespaceEntry.Name() - - // handle workflow cancel itself - if task.NamespaceID == task.TargetNamespaceID && task.WorkflowID == task.TargetWorkflowID { - // it does not matter if the run ID is a mismatch - err = t.requestCancelExternalExecutionFailed( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND) - return err - } - - if err = t.requestCancelExternalExecution( - ctx, - task, - targetNamespaceName, - requestCancelInfo, - attributes, - ); err != nil { - t.logger.Debug(fmt.Sprintf("Failed to cancel external workflow execution. Error: %v", err)) - - // Check to see if the error is non-transient, in which case add RequestCancelFailed - // event and complete transfer task by returning nil error. - if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { - // for retryable error just return - return err - } - var failedCause enumspb.CancelExternalWorkflowExecutionFailedCause - switch err.(type) { - case *serviceerror.NotFound: - failedCause = enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND - case *serviceerror.NamespaceNotFound: - failedCause = enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND - default: - t.logger.Error("Unexpected error type returned from RequestCancelWorkflowExecution API call.", tag.ErrorType(err), tag.Error(err)) - return err - } - return t.requestCancelExternalExecutionFailed( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - failedCause, - ) - } - - // Record ExternalWorkflowExecutionCancelRequested in source execution - return t.requestCancelExternalExecutionCompleted( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - ) -} - -func (t *transferQueueActiveTaskExecutor) processSignalExecution( - ctx context.Context, - task *tasks.SignalExecutionTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - signalInfo, ok := mutableState.GetSignalInfo(task.InitiatedEventID) - if !ok { - // TODO: here we should also RemoveSignalMutableState from target workflow - // Otherwise, target SignalRequestID still can leak if shard restart after signalExternalExecutionCompleted - // To do that, probably need to add the SignalRequestID in transfer task. - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), signalInfo.Version, task.Version, task) - if err != nil { - return err - } - - initiatedEvent, err := mutableState.GetSignalExternalInitiatedEvent(ctx, task.InitiatedEventID) - if err != nil { - return err - } - attributes := initiatedEvent.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - targetNamespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)) - if err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { - return err - } - // It is possible that target namespace got deleted. Record failure. - t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) - return t.signalExternalExecutionFailed( - ctx, - task, - weContext, - namespace.Name(task.TargetNamespaceID), // Use ID as namespace name because namespace is already deleted and name is used only for history. - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - attributes.Control, - enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, - ) - } - targetNamespaceName := targetNamespaceEntry.Name() - - // handle workflow signal itself - if task.NamespaceID == task.TargetNamespaceID && task.WorkflowID == task.TargetWorkflowID { - // it does not matter if the run ID is a mismatch - return t.signalExternalExecutionFailed( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - attributes.Control, - enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND, - ) - } - - if err = t.signalExternalExecution( - ctx, - task, - targetNamespaceName, - signalInfo, - attributes, - ); err != nil { - t.logger.Debug("Failed to signal external workflow execution", tag.Error(err)) - - // Check to see if the error is non-transient, in which case add SignalFailed - // event and complete transfer task by returning nil error. - if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { - // for retryable error just return - return err - } - var failedCause enumspb.SignalExternalWorkflowExecutionFailedCause - switch err.(type) { - case *serviceerror.NotFound: - failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND - case *serviceerror.NamespaceNotFound: - failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND - case *serviceerror.InvalidArgument: - failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED - default: - t.logger.Error("Unexpected error type returned from SignalWorkflowExecution API call.", tag.ErrorType(err), tag.Error(err)) - return err - } - return t.signalExternalExecutionFailed( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - attributes.Control, - failedCause, - ) - } - - err = t.signalExternalExecutionCompleted( - ctx, - task, - weContext, - targetNamespaceName, - namespace.ID(task.TargetNamespaceID), - task.TargetWorkflowID, - task.TargetRunID, - attributes.Control, - ) - if err != nil { - return err - } - - signalRequestID := signalInfo.GetRequestId() - - // release the weContext lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(retError) - // remove signalRequestedID from target workflow, after Signal detail is removed from source workflow - _, err = t.historyClient.RemoveSignalMutableState(ctx, &historyservice.RemoveSignalMutableStateRequest{ - NamespaceId: task.TargetNamespaceID, - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: task.TargetRunID, - }, - RequestId: signalRequestID, - }) - return err -} - -func (t *transferQueueActiveTaskExecutor) processStartChildExecution( - ctx context.Context, - task *tasks.StartChildExecutionTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if mutableState == nil { - return nil - } - - childInfo, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) - if !ok { - return nil - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), childInfo.Version, task.Version, task) - if err != nil { - return err - } - - // workflow running or not, child started or not, parent close policy is abandon or not - // 8 cases in total - workflowRunning := mutableState.IsWorkflowExecutionRunning() - childStarted := childInfo.StartedEventId != common.EmptyEventID - if !workflowRunning && (!childStarted || childInfo.ParentClosePolicy != enumspb.PARENT_CLOSE_POLICY_ABANDON) { - // three cases here: - // case 1: workflow not running, child started, parent close policy is not abandon - // case 2: workflow not running, child not started, parent close policy is not abandon - // case 3: workflow not running, child not started, parent close policy is abandon - // - // NOTE: ideally for case 3, we should continue to start child. However, with current start child - // and standby start child verification logic, we can't do that because: - // 1. Once workflow is closed, we can't update mutable state or record child started event. - // If the RPC call for scheduling first workflow task times out but the call actually succeeds on child workflow. - // Then the child workflow can run, complete and another unrelated workflow can reuse this workflowID. - // Now when the start child task retries, we can't rely on requestID to dedup the start child call. (We can use runID instead of requestID to dedup) - // 2. No update to mutable state and child started event means we are not able to replicate the information - // to the standby cluster, so standby start child logic won't be able to verify the child has started. - // To resolve the issue above, we need to - // 1. Start child workflow and schedule the first workflow task in one transaction. Use runID to perform deduplication - // 2. Standby start child logic need to verify if child worflow actually started instead of relying on the information - // in parent mutable state. - return nil - } - - // ChildExecution already started, just create WorkflowTask and complete transfer task - // If parent already closed, since child workflow started event already written to history, - // still schedule the workflowTask if the parent close policy is Abandon. - // If parent close policy cancel or terminate, parent close policy will be applied in another - // transfer task. - // case 4, 5: workflow started, child started, parent close policy is or is not abandon - // case 6: workflow closed, child started, parent close policy is abandon - if childStarted { - childExecution := &commonpb.WorkflowExecution{ - WorkflowId: childInfo.StartedWorkflowId, - RunId: childInfo.StartedRunId, - } - childClock := childInfo.Clock - // NOTE: do not access anything related mutable state after this lock release - // release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - - parentClock, err := t.shard.NewVectorClock() - if err != nil { - return err - } - return t.createFirstWorkflowTask(ctx, task.TargetNamespaceID, childExecution, parentClock, childClock) - } - - // remaining 2 cases: - // case 7, 8: workflow running, child not started, parent close policy is or is not abandon - - initiatedEvent, err := mutableState.GetChildExecutionInitiatedEvent(ctx, task.InitiatedEventID) - if err != nil { - return err - } - attributes := initiatedEvent.GetStartChildWorkflowExecutionInitiatedEventAttributes() - - var parentNamespaceName namespace.Name - if namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.NamespaceID)); err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { - return err - } - // It is possible that the parent namespace got deleted. Use namespaceID instead as this is only needed for the history event. - parentNamespaceName = namespace.Name(task.NamespaceID) - } else { - parentNamespaceName = namespaceEntry.Name() - } - - var targetNamespaceName namespace.Name - if namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)); err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { - return err - } - // It is possible that target namespace got deleted. Record failure. - t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) - err = t.recordStartChildExecutionFailed( - ctx, - task, - weContext, - attributes, - enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, - ) - return err - } else { - targetNamespaceName = namespaceEntry.Name() - } - - // copy version stamp from parent to child if: - // - command says to use compatible version - // - parent is using versioning - var sourceVersionStamp *commonpb.WorkerVersionStamp - if attributes.UseCompatibleVersion { - sourceVersionStamp = common.StampIfUsingVersioning(mutableState.GetWorkerVersionStamp()) - } - - childRunID, childClock, err := t.startWorkflow( - ctx, - task, - parentNamespaceName, - targetNamespaceName, - childInfo.CreateRequestId, - attributes, - sourceVersionStamp, - ) - if err != nil { - t.logger.Debug("Failed to start child workflow execution", tag.Error(err)) - if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { - // for retryable error just return - return err - } - var failedCause enumspb.StartChildWorkflowExecutionFailedCause - switch err.(type) { - case *serviceerror.WorkflowExecutionAlreadyStarted: - failedCause = enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_WORKFLOW_ALREADY_EXISTS - case *serviceerror.NamespaceNotFound: - failedCause = enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND - default: - t.logger.Error("Unexpected error type returned from StartWorkflowExecution API call for child workflow.", tag.ErrorType(err), tag.Error(err)) - return err - } - - return t.recordStartChildExecutionFailed( - ctx, - task, - weContext, - attributes, - failedCause, - ) - } - - t.logger.Debug("Child Execution started successfully", - tag.WorkflowID(attributes.WorkflowId), tag.WorkflowRunID(childRunID)) - - // Child execution is successfully started, record ChildExecutionStartedEvent in parent execution - err = t.recordChildExecutionStarted(ctx, task, weContext, attributes, childRunID, childClock) - if err != nil { - return err - } - - // NOTE: do not access anything related mutable state after this lock is released. - // Release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - parentClock, err := t.shard.NewVectorClock() - if err != nil { - return err - } - return t.createFirstWorkflowTask(ctx, task.TargetNamespaceID, &commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: childRunID, - }, parentClock, childClock) -} - -func (t *transferQueueActiveTaskExecutor) processResetWorkflow( - ctx context.Context, - task *tasks.ResetWorkflowTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - currentContext, currentRelease, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { currentRelease(retError) }() - - currentMutableState, err := loadMutableStateForTransferTask(ctx, currentContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if currentMutableState == nil { - return nil - } - - logger := log.With( - t.logger, - tag.WorkflowNamespaceID(task.NamespaceID), - tag.WorkflowID(task.WorkflowID), - tag.WorkflowRunID(task.RunID), - ) - - if !currentMutableState.IsWorkflowExecutionRunning() { - // it means this this might not be current anymore, we need to check - var resp *persistence.GetCurrentExecutionResponse - resp, err = t.shard.GetCurrentExecution(ctx, &persistence.GetCurrentExecutionRequest{ - ShardID: t.shard.GetShardID(), - NamespaceID: task.NamespaceID, - WorkflowID: task.WorkflowID, - }) - if err != nil { - return err - } - if resp.RunID != task.RunID { - logger.Warn("Auto-Reset is skipped, because current run is stale.") - return nil - } - } - // TODO: current reset doesn't allow childWFs, in the future we will release this restriction - if len(currentMutableState.GetPendingChildExecutionInfos()) > 0 { - logger.Warn("Auto-Reset is skipped, because current run has pending child executions.") - return nil - } - - currentStartVersion, err := currentMutableState.GetStartVersion() - if err != nil { - return err - } - - err = CheckTaskVersion(t.shard, t.logger, currentMutableState.GetNamespaceEntry(), currentStartVersion, task.Version, task) - if err != nil { - return err - } - - executionInfo := currentMutableState.GetExecutionInfo() - executionState := currentMutableState.GetExecutionState() - namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(executionInfo.NamespaceId)) - if err != nil { - return err - } - logger = log.With(logger, tag.WorkflowNamespace(namespaceEntry.Name().String())) - - reason, resetPoint := workflow.FindAutoResetPoint(t.shard.GetTimeSource(), namespaceEntry.VerifyBinaryChecksum, executionInfo.AutoResetPoints) - if resetPoint == nil { - logger.Warn("Auto-Reset is skipped, because reset point is not found.") - return nil - } - logger = log.With( - logger, - tag.WorkflowResetBaseRunID(resetPoint.GetRunId()), - tag.WorkflowBinaryChecksum(resetPoint.GetBinaryChecksum()), - tag.WorkflowEventID(resetPoint.GetFirstWorkflowTaskCompletedId()), - ) - - var baseContext workflow.Context - var baseMutableState workflow.MutableState - var baseRelease wcache.ReleaseCacheFunc - if resetPoint.GetRunId() == executionState.RunId { - baseContext = currentContext - baseMutableState = currentMutableState - baseRelease = currentRelease - } else { - baseContext, baseRelease, err = getWorkflowExecutionContext( - ctx, - t.cache, - namespace.ID(task.NamespaceID), - commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: resetPoint.GetRunId(), - }, - ) - if err != nil { - return err - } - defer func() { baseRelease(retError) }() - baseMutableState, err = loadMutableStateForTransferTask(ctx, baseContext, task, t.metricHandler, t.logger) - if err != nil { - return err - } - if baseMutableState == nil { - return nil - } - } - - // NOTE: reset need to go through history which may take a longer time, - // so it's using its own timeout - return t.resetWorkflow( - ctx, - task, - reason, - resetPoint, - baseMutableState, - currentContext, - currentMutableState, - logger, - ) -} - -func (t *transferQueueActiveTaskExecutor) recordChildExecutionStarted( - ctx context.Context, - task *tasks.StartChildExecutionTask, - context workflow.Context, - initiatedAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, - runID string, - clock *clockspb.VectorClock, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow execution already completed.") - } - - ci, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) - if !ok || ci.StartedEventId != common.EmptyEventID { - return serviceerror.NewNotFound("Pending child execution not found.") - } - - _, err := mutableState.AddChildWorkflowExecutionStartedEvent( - &commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: runID, - }, - initiatedAttributes.WorkflowType, - task.InitiatedEventID, - initiatedAttributes.Header, - clock, - ) - - return err - }) -} - -func (t *transferQueueActiveTaskExecutor) recordStartChildExecutionFailed( - ctx context.Context, - task *tasks.StartChildExecutionTask, - context workflow.Context, - initiatedAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, - failedCause enumspb.StartChildWorkflowExecutionFailedCause, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow execution already completed.") - } - - ci, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) - if !ok || ci.StartedEventId != common.EmptyEventID { - return serviceerror.NewNotFound("Pending child execution not found.") - } - - _, err := mutableState.AddStartChildWorkflowExecutionFailedEvent( - task.InitiatedEventID, - failedCause, - initiatedAttributes, - ) - return err - }) -} - -// createFirstWorkflowTask is used by StartChildExecution transfer task to create the first workflow task for -// child execution. -func (t *transferQueueActiveTaskExecutor) createFirstWorkflowTask( - ctx context.Context, - namespaceID string, - execution *commonpb.WorkflowExecution, - parentClock *clockspb.VectorClock, - childClock *clockspb.VectorClock, -) error { - _, err := t.historyClient.ScheduleWorkflowTask(ctx, &historyservice.ScheduleWorkflowTaskRequest{ - NamespaceId: namespaceID, - WorkflowExecution: execution, - IsFirstWorkflowTask: true, - ParentClock: parentClock, - ChildClock: childClock, - }) - return err -} - -func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecutionCompleted( - ctx context.Context, - task *tasks.CancelExecutionTask, - context workflow.Context, - targetNamespace namespace.Name, - targetNamespaceID namespace.ID, - targetWorkflowID string, - targetRunID string, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow execution already completed.") - } - - _, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) - if !ok { - return workflow.ErrMissingRequestCancelInfo - } - - _, err := mutableState.AddExternalWorkflowExecutionCancelRequested( - task.InitiatedEventID, - targetNamespace, - targetNamespaceID, - targetWorkflowID, - targetRunID, - ) - return err - }) -} - -func (t *transferQueueActiveTaskExecutor) signalExternalExecutionCompleted( - ctx context.Context, - task *tasks.SignalExecutionTask, - context workflow.Context, - targetNamespace namespace.Name, - targetNamespaceID namespace.ID, - targetWorkflowID string, - targetRunID string, - control string, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow execution already completed.") - } - - _, ok := mutableState.GetSignalInfo(task.InitiatedEventID) - if !ok { - return workflow.ErrMissingSignalInfo - } - - _, err := mutableState.AddExternalWorkflowExecutionSignaled( - task.InitiatedEventID, - targetNamespace, - targetNamespaceID, - targetWorkflowID, - targetRunID, - control, - ) - return err - }) -} - -func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecutionFailed( - ctx context.Context, - task *tasks.CancelExecutionTask, - context workflow.Context, - targetNamespace namespace.Name, - targetNamespaceID namespace.ID, - targetWorkflowID string, - targetRunID string, - failedCause enumspb.CancelExternalWorkflowExecutionFailedCause, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow execution already completed.") - } - - _, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) - if !ok { - return workflow.ErrMissingRequestCancelInfo - } - - _, err := mutableState.AddRequestCancelExternalWorkflowExecutionFailedEvent( - task.InitiatedEventID, - targetNamespace, - targetNamespaceID, - targetWorkflowID, - targetRunID, - failedCause, - ) - return err - }) -} - -func (t *transferQueueActiveTaskExecutor) signalExternalExecutionFailed( - ctx context.Context, - task *tasks.SignalExecutionTask, - context workflow.Context, - targetNamespace namespace.Name, - targetNamespaceID namespace.ID, - targetWorkflowID string, - targetRunID string, - control string, - failedCause enumspb.SignalExternalWorkflowExecutionFailedCause, -) error { - return t.updateWorkflowExecution(ctx, context, true, - func(mutableState workflow.MutableState) error { - if !mutableState.IsWorkflowExecutionRunning() { - return serviceerror.NewNotFound("Workflow is not running.") - } - - _, ok := mutableState.GetSignalInfo(task.InitiatedEventID) - if !ok { - return workflow.ErrMissingSignalInfo - } - - _, err := mutableState.AddSignalExternalWorkflowExecutionFailedEvent( - task.InitiatedEventID, - targetNamespace, - targetNamespaceID, - targetWorkflowID, - targetRunID, - control, - failedCause, - ) - return err - }) -} - -func (t *transferQueueActiveTaskExecutor) updateWorkflowExecution( - ctx context.Context, - context workflow.Context, - createWorkflowTask bool, - action func(workflow.MutableState) error, -) error { - mutableState, err := context.LoadMutableState(ctx) - if err != nil { - return err - } - - if err := action(mutableState); err != nil { - return err - } - - if createWorkflowTask { - // Create a transfer task to schedule a workflow task - err := workflow.ScheduleWorkflowTask(mutableState) - if err != nil { - return err - } - } - - return context.UpdateWorkflowExecutionAsActive(ctx) -} - -func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecution( - ctx context.Context, - task *tasks.CancelExecutionTask, - targetNamespace namespace.Name, - requestCancelInfo *persistencespb.RequestCancelInfo, - attributes *historypb.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes, -) error { - request := &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: task.TargetNamespaceID, - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - Namespace: targetNamespace.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: task.TargetRunID, - }, - Identity: consts.IdentityHistoryService, - // Use the same request ID to dedupe RequestCancelWorkflowExecution calls - RequestId: requestCancelInfo.GetCancelRequestId(), - Reason: attributes.Reason, - }, - ExternalInitiatedEventId: task.InitiatedEventID, - ExternalWorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - ChildWorkflowOnly: task.TargetChildWorkflowOnly, - } - - _, err := t.historyClient.RequestCancelWorkflowExecution(ctx, request) - return err -} - -func (t *transferQueueActiveTaskExecutor) signalExternalExecution( - ctx context.Context, - task *tasks.SignalExecutionTask, - targetNamespace namespace.Name, - signalInfo *persistencespb.SignalInfo, - attributes *historypb.SignalExternalWorkflowExecutionInitiatedEventAttributes, -) error { - request := &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: task.TargetNamespaceID, - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: targetNamespace.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: task.TargetRunID, - }, - Identity: consts.IdentityHistoryService, - SignalName: attributes.SignalName, - Input: attributes.Input, - // Use same request ID to deduplicate SignalWorkflowExecution calls - RequestId: signalInfo.GetRequestId(), - Control: attributes.Control, - Header: attributes.Header, - }, - ExternalWorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - ChildWorkflowOnly: task.TargetChildWorkflowOnly, - } - - _, err := t.historyClient.SignalWorkflowExecution(ctx, request) - return err -} - -func (t *transferQueueActiveTaskExecutor) startWorkflow( - ctx context.Context, - task *tasks.StartChildExecutionTask, - namespace namespace.Name, - targetNamespace namespace.Name, - childRequestID string, - attributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, - sourceVersionStamp *commonpb.WorkerVersionStamp, -) (string, *clockspb.VectorClock, error) { - request := common.CreateHistoryStartWorkflowRequest( - task.TargetNamespaceID, - &workflowservice.StartWorkflowExecutionRequest{ - Namespace: targetNamespace.String(), - WorkflowId: attributes.WorkflowId, - WorkflowType: attributes.WorkflowType, - TaskQueue: attributes.TaskQueue, - Input: attributes.Input, - Header: attributes.Header, - WorkflowExecutionTimeout: attributes.WorkflowExecutionTimeout, - WorkflowRunTimeout: attributes.WorkflowRunTimeout, - WorkflowTaskTimeout: attributes.WorkflowTaskTimeout, - - // Use the same request ID to dedupe StartWorkflowExecution calls - RequestId: childRequestID, - WorkflowIdReusePolicy: attributes.WorkflowIdReusePolicy, - RetryPolicy: attributes.RetryPolicy, - CronSchedule: attributes.CronSchedule, - Memo: attributes.Memo, - SearchAttributes: attributes.SearchAttributes, - }, - &workflowspb.ParentExecutionInfo{ - NamespaceId: task.NamespaceID, - Namespace: namespace.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - InitiatedId: task.InitiatedEventID, - InitiatedVersion: task.Version, - Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), - }, - t.shard.GetTimeSource().Now(), - ) - - request.SourceVersionStamp = sourceVersionStamp - - response, err := t.historyClient.StartWorkflowExecution(ctx, request) - if err != nil { - return "", nil, err - } - return response.GetRunId(), response.GetClock(), nil -} - -func (t *transferQueueActiveTaskExecutor) resetWorkflow( - ctx context.Context, - task *tasks.ResetWorkflowTask, - reason string, - resetPoint *workflowpb.ResetPointInfo, - baseMutableState workflow.MutableState, - currentContext workflow.Context, - currentMutableState workflow.MutableState, - logger log.Logger, -) error { - // the actual reset operation needs to read history and may not be able to completed within - // the original context timeout. - // create a new context with a longer timeout, but retain all existing context values. - resetWorkflowCtx, cancel := rpc.ResetContextTimeout(ctx, taskHistoryOpTimeout) - defer cancel() - - namespaceID := namespace.ID(task.NamespaceID) - workflowID := task.WorkflowID - baseRunID := baseMutableState.GetExecutionState().GetRunId() - - resetRunID := uuid.New() - baseRebuildLastEventID := resetPoint.GetFirstWorkflowTaskCompletedId() - 1 - baseVersionHistories := baseMutableState.GetExecutionInfo().GetVersionHistories() - baseCurrentVersionHistory, err := versionhistory.GetCurrentVersionHistory(baseVersionHistories) - if err != nil { - return err - } - baseRebuildLastEventVersion, err := versionhistory.GetVersionHistoryEventVersion(baseCurrentVersionHistory, baseRebuildLastEventID) - if err != nil { - return err - } - baseCurrentBranchToken := baseCurrentVersionHistory.GetBranchToken() - baseNextEventID := baseMutableState.GetNextEventID() - - err = t.workflowResetter.ResetWorkflow( - resetWorkflowCtx, - namespaceID, - workflowID, - baseRunID, - baseCurrentBranchToken, - baseRebuildLastEventID, - baseRebuildLastEventVersion, - baseNextEventID, - resetRunID, - uuid.New(), - ndc.NewWorkflow( - resetWorkflowCtx, - t.registry, - t.shard.GetClusterMetadata(), - currentContext, - currentMutableState, - wcache.NoopReleaseFn, // this is fine since caller will defer on release - ), - reason, - nil, - enumspb.RESET_REAPPLY_TYPE_SIGNAL, - ) - - switch err.(type) { - case nil: - return nil - - case *serviceerror.NotFound, *serviceerror.NamespaceNotFound: - // This means the reset point is corrupted and not retry able. - // There must be a bug in our system that we must fix.(for example, history is not the same in active/passive) - t.metricHandler.Counter(metrics.AutoResetPointCorruptionCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.TransferQueueProcessorScope), - ) - logger.Error("Auto-Reset workflow failed and not retryable. The reset point is corrupted.", tag.Error(err)) - return nil - - default: - // log this error and retry - logger.Error("Auto-Reset workflow failed", tag.Error(err)) - return err - } -} - -func (t *transferQueueActiveTaskExecutor) processParentClosePolicy( - ctx context.Context, - parentNamespaceName string, - parentExecution commonpb.WorkflowExecution, - childInfos map[int64]*persistencespb.ChildExecutionInfo, -) error { - if len(childInfos) == 0 { - return nil - } - - scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.TransferActiveTaskCloseExecutionScope)) - - if t.shard.GetConfig().EnableParentClosePolicyWorker() && - len(childInfos) >= t.shard.GetConfig().ParentClosePolicyThreshold(parentNamespaceName) { - - executions := make([]parentclosepolicy.RequestDetail, 0, len(childInfos)) - for _, childInfo := range childInfos { - if childInfo.ParentClosePolicy == enumspb.PARENT_CLOSE_POLICY_ABANDON { - continue - } - - childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) - if childNamespaceID.IsEmpty() { - // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. - // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. - var err error - childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) - switch err.(type) { - case nil: - case *serviceerror.NamespaceNotFound: - // If child namespace is deleted there is nothing to close. - continue - default: - return err - } - } - - executions = append(executions, parentclosepolicy.RequestDetail{ - Namespace: childInfo.Namespace, - NamespaceID: childNamespaceID.String(), - WorkflowID: childInfo.StartedWorkflowId, - RunID: childInfo.StartedRunId, - Policy: childInfo.ParentClosePolicy, - }) - } - - if len(executions) == 0 { - return nil - } - - request := parentclosepolicy.Request{ - ParentExecution: parentExecution, - Executions: executions, - } - return t.parentClosePolicyClient.SendParentClosePolicyRequest(ctx, request) - } - - for _, childInfo := range childInfos { - err := t.applyParentClosePolicy(ctx, &parentExecution, childInfo) - switch err.(type) { - case nil: - scope.Counter(metrics.ParentClosePolicyProcessorSuccess.GetMetricName()).Record(1) - case *serviceerror.NotFound: - // If child execution is deleted there is nothing to close. - case *serviceerror.NamespaceNotFound: - // If child namespace is deleted there is nothing to close. - default: - scope.Counter(metrics.ParentClosePolicyProcessorFailures.GetMetricName()).Record(1) - return err - } - } - return nil -} - -func (t *transferQueueActiveTaskExecutor) applyParentClosePolicy( - ctx context.Context, - parentExecution *commonpb.WorkflowExecution, - childInfo *persistencespb.ChildExecutionInfo, -) error { - switch childInfo.ParentClosePolicy { - case enumspb.PARENT_CLOSE_POLICY_ABANDON: - // noop - return nil - - case enumspb.PARENT_CLOSE_POLICY_TERMINATE: - childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) - if childNamespaceID.IsEmpty() { - // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. - // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. - var err error - childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) - if err != nil { - return err - } - } - _, err := t.historyClient.TerminateWorkflowExecution(ctx, &historyservice.TerminateWorkflowExecutionRequest{ - NamespaceId: childNamespaceID.String(), - TerminateRequest: &workflowservice.TerminateWorkflowExecutionRequest{ - Namespace: childInfo.GetNamespace(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childInfo.GetStartedWorkflowId(), - }, - // Include StartedRunID as FirstExecutionRunID on the request to allow child to be terminated across runs. - // If the child does continue as new it still propagates the RunID of first execution. - FirstExecutionRunId: childInfo.GetStartedRunId(), - Reason: "by parent close policy", - Identity: consts.IdentityHistoryService, - }, - ExternalWorkflowExecution: parentExecution, - ChildWorkflowOnly: true, - }) - return err - - case enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL: - childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) - if childNamespaceID.IsEmpty() { - // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. - // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. - var err error - childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) - if err != nil { - return err - } - } - - _, err := t.historyClient.RequestCancelWorkflowExecution(ctx, &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: childNamespaceID.String(), - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - Namespace: childInfo.GetNamespace(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childInfo.GetStartedWorkflowId(), - }, - // Include StartedRunID as FirstExecutionRunID on the request to allow child to be canceled across runs. - // If the child does continue as new it still propagates the RunID of first execution. - FirstExecutionRunId: childInfo.GetStartedRunId(), - Identity: consts.IdentityHistoryService, - }, - ExternalWorkflowExecution: parentExecution, - ChildWorkflowOnly: true, - }) - return err - - default: - return serviceerror.NewInternal(fmt.Sprintf("unknown parent close policy: %v", childInfo.ParentClosePolicy)) - } -} - -func copyChildWorkflowInfos( - input map[int64]*persistencespb.ChildExecutionInfo, -) map[int64]*persistencespb.ChildExecutionInfo { - result := make(map[int64]*persistencespb.ChildExecutionInfo) - if input == nil { - return result - } - - for k, v := range input { - result[k] = common.CloneProto(v) - } - return result -} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueActiveTaskExecutor_test.go temporal-1.22.5/src/service/history/transferQueueActiveTaskExecutor_test.go --- temporal-1.21.5-1/src/service/history/transferQueueActiveTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueActiveTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2808 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc" - - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/provider" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/definition" - dc "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - warchiver "go.temporal.io/server/service/worker/archiver" - "go.temporal.io/server/service/worker/parentclosepolicy" -) - -type ( - transferQueueActiveTaskExecutorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockTxProcessor *queues.MockQueue - mockTimerProcessor *queues.MockQueue - mockNamespaceCache *namespace.MockRegistry - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - mockHistoryClient *historyservicemock.MockHistoryServiceClient - mockClusterMetadata *cluster.MockMetadata - mockSearchAttributesProvider *searchattribute.MockProvider - mockVisibilityManager *manager.MockVisibilityManager - - mockExecutionMgr *persistence.MockExecutionManager - mockArchivalClient *warchiver.MockClient - mockArchivalMetadata archiver.MetadataMock - mockArchiverProvider *provider.MockArchiverProvider - mockParentClosePolicyClient *parentclosepolicy.MockClient - - workflowCache wcache.Cache - logger log.Logger - namespaceID namespace.ID - namespace namespace.Name - namespaceEntry *namespace.Namespace - targetNamespaceID namespace.ID - targetNamespace namespace.Name - targetNamespaceEntry *namespace.Namespace - childNamespaceID namespace.ID - childNamespace namespace.Name - childNamespaceEntry *namespace.Namespace - version int64 - now time.Time - timeSource *clock.EventTimeSource - transferQueueActiveTaskExecutor *transferQueueActiveTaskExecutor - } -) - -var defaultWorkflowTaskCompletionLimits = workflow.WorkflowTaskCompletionLimits{MaxResetPoints: configs.DefaultHistoryMaxAutoResetPoints, MaxSearchAttributeValueSize: 2048} - -func TestTransferQueueActiveTaskExecutorSuite(t *testing.T) { - s := new(transferQueueActiveTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *transferQueueActiveTaskExecutorSuite) SetupSuite() { -} - -func (s *transferQueueActiveTaskExecutorSuite) TearDownSuite() { -} - -func (s *transferQueueActiveTaskExecutorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.namespaceID = tests.NamespaceID - s.namespace = tests.Namespace - s.namespaceEntry = tests.GlobalNamespaceEntry - s.targetNamespaceID = tests.TargetNamespaceID - s.targetNamespace = tests.TargetNamespace - s.targetNamespaceEntry = tests.GlobalTargetNamespaceEntry - s.childNamespaceID = tests.ChildNamespaceID - s.childNamespace = tests.ChildNamespace - s.childNamespaceEntry = tests.GlobalChildNamespaceEntry - s.version = s.namespaceEntry.FailoverVersion() - s.now = time.Now().UTC() - s.timeSource = clock.NewEventTimeSource().Update(s.now) - - s.controller = gomock.NewController(s.T()) - s.mockTxProcessor = queues.NewMockQueue(s.controller) - s.mockTimerProcessor = queues.NewMockQueue(s.controller) - s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() - s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() - s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() - - config := tests.NewDynamicConfig() - s.mockShard = shard.NewTestContextWithTimeSource( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - config, - s.timeSource, - ) - s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - )) - - s.mockParentClosePolicyClient = parentclosepolicy.NewMockClient(s.controller) - s.mockArchivalClient = warchiver.NewMockClient(s.controller) - s.mockMatchingClient = s.mockShard.Resource.MatchingClient - s.mockHistoryClient = s.mockShard.Resource.HistoryClient - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockSearchAttributesProvider = s.mockShard.Resource.SearchAttributesProvider - s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager - s.mockArchivalMetadata = s.mockShard.Resource.ArchivalMetadata - s.mockArchiverProvider = s.mockShard.Resource.ArchiverProvider - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.NamespaceID).Return(tests.Namespace, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.TargetNamespaceID).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.TargetNamespace).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.ParentNamespace).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ChildNamespaceID).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.MissedNamespaceID).Return(nil, serviceerror.NewNamespaceNotFound(tests.MissedNamespaceID.String())).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() - s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes() - s.mockArchivalMetadata.SetHistoryEnabledByDefault() - s.mockArchivalMetadata.SetVisibilityEnabledByDefault() - - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - metricsHandler: s.mockShard.GetMetricsHandler(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - queueProcessors: map[tasks.Category]queues.Queue{ - s.mockTxProcessor.Category(): s.mockTxProcessor, - s.mockTimerProcessor.Category(): s.mockTimerProcessor, - }, - } - s.mockShard.SetEngineForTesting(h) - - s.transferQueueActiveTaskExecutor = newTransferQueueActiveTaskExecutor( - s.mockShard, - s.workflowCache, - s.mockArchivalClient, - h.sdkClientFactory, - s.logger, - metrics.NoopMetricsHandler, - config, - s.mockShard.Resource.MatchingClient, - s.mockVisibilityManager, - ).(*transferQueueActiveTaskExecutor) - s.transferQueueActiveTaskExecutor.parentClosePolicyClient = s.mockParentClosePolicyClient -} - -func (s *transferQueueActiveTaskExecutorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessActivityTask_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - activityID := "activity-1" - activityType := "some random activity type" - event, ai := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) - - transferTask := &tasks.ActivityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddActivityTask(gomock.Any(), s.createAddActivityTaskRequest(transferTask, ai), gomock.Any()).Return(&matchingservice.AddActivityTaskResponse{}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessActivityTask_Duplication() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - activityID := "activity-1" - activityType := "some random activity type" - event, ai := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) - - transferTask := &tasks.ActivityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - - event = addActivityTaskStartedEvent(mutableState, event.GetEventId(), "") - ai.StartedEventId = event.GetEventId() - event = addActivityTaskCompletedEvent(mutableState, ai.ScheduledEventId, ai.StartedEventId, nil, "") - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_FirstWorkflowTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_NonFirstWorkflowTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - s.NotNil(event) - - // make another round of workflow task - taskID := int64(59) - wt = addWorkflowTaskScheduledEvent(mutableState) - - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_Sticky_NonFirstWorkflowTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - stickyTaskQueueName := "some random sticky task queue" - stickyTaskQueueTimeout := timestamp.DurationFromSeconds(233) - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - s.NotNil(event) - // set the sticky taskqueue attr - executionInfo := mutableState.GetExecutionInfo() - executionInfo.StickyTaskQueue = stickyTaskQueueName - executionInfo.StickyScheduleToStartTimeout = stickyTaskQueueTimeout - - // make another round of workflow task - taskID := int64(59) - wt = addWorkflowTaskScheduledEvent(mutableState) - - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: stickyTaskQueueName, - ScheduledEventID: wt.ScheduledEventID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_WorkflowTaskNotSticky_MutableStateSticky() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - stickyTaskQueueName := "some random sticky task queue" - stickyTaskQueueTimeout := timestamp.DurationFromSeconds(233) - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - s.NotNil(event) - // set the sticky taskqueue attr - executionInfo := mutableState.GetExecutionInfo() - executionInfo.StickyTaskQueue = stickyTaskQueueName - executionInfo.StickyScheduleToStartTimeout = stickyTaskQueueTimeout - - // make another round of workflow task - taskID := int64(59) - wt = addWorkflowTaskScheduledEvent(mutableState) - - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_Duplication() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - taskID := int64(4096) - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_HasParent() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - parentNamespaceID := "some random parent namespace ID" - parentInitiatedID := int64(3222) - parentInitiatedVersion := int64(1234) - parentNamespace := "some random parent namespace Name" - parentExecution := &commonpb.WorkflowExecution{ - WorkflowId: "some random parent workflow ID", - RunId: uuid.New(), - } - parentClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ - NamespaceId: parentNamespaceID, - Namespace: parentNamespace, - Execution: parentExecution, - InitiatedId: parentInitiatedID, - InitiatedVersion: parentInitiatedVersion, - Clock: parentClock, - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().RecordChildExecutionCompleted(gomock.Any(), &historyservice.RecordChildExecutionCompletedRequest{ - NamespaceId: parentNamespaceID, - WorkflowExecution: parentExecution, - ParentInitiatedId: parentInitiatedID, - ParentInitiatedVersion: parentInitiatedVersion, - Clock: parentClock, - CompletedExecution: &execution, - CompletionEvent: event, - }).Return(nil, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_CanSkipVisibilityArchival() { - for _, skipVisibilityArchival := range []bool{ - false, - true, - } { - s.Run(fmt.Sprintf("CanSkipVisibilityArchival=%v", skipVisibilityArchival), func() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - CanSkipVisibilityArchival: skipVisibilityArchival, - } - - persistenceMutableState := s.createPersistenceMutableState( - mutableState, - event.GetEventId(), - event.GetVersion(), - ) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()). - Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - if !skipVisibilityArchival { - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig(). - Return(archiver.NewArchivalConfig( - "enabled", - dc.GetStringPropertyFn("enabled"), - dc.GetBoolPropertyFn(true), - "disabled", - "random URI", - )).AnyTimes() - s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) - s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) - s.mockVisibilityManager.EXPECT().GetIndexName().Return("") - } - - _, _, err = s.transferQueueActiveTaskExecutor.Execute( - context.Background(), - s.newTaskExecutable(transferTask), - ) - s.Nil(err) - - }) - } -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) - s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) - s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) - s.mockVisibilityManager.EXPECT().GetIndexName().Return("") - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasFewChildren() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace1")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace2")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace3")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION - parentClosePolicy1 := enumspb.PARENT_CLOSE_POLICY_ABANDON - parentClosePolicy2 := enumspb.PARENT_CLOSE_POLICY_TERMINATE - parentClosePolicy3 := enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL - - event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ - Identity: "some random identity", - Commands: []*commandpb.Command{ - { - CommandType: commandType, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow1", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy1, - }}, - }, - { - CommandType: commandType, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace2", - WorkflowId: "child workflow2", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy2, - }}, - }, - { - CommandType: commandType, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace3", - WorkflowId: "child workflow3", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy3, - }}, - }, - }, - }, defaultWorkflowTaskCompletionLimits) - - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow1", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy1, - }, "child namespace1-ID") - s.Nil(err) - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace2", - WorkflowId: "child workflow2", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy2, - }, "child namespace2-ID") - s.Nil(err) - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace3", - WorkflowId: "child workflow3", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy3, - }, "child namespace3-ID") - s.Nil(err) - - mutableState.FlushBufferedEvents() - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) - s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { - s.True(request.GetChildWorkflowOnly()) - s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) - s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) - return nil, nil - }, - ) - s.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.TerminateWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.TerminateWorkflowExecutionResponse, error) { - s.True(request.GetChildWorkflowOnly()) - s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) - s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) - return nil, nil - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasManyChildren() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION - parentClosePolicy := enumspb.PARENT_CLOSE_POLICY_TERMINATE - var commands []*commandpb.Command - for i := 0; i < 10; i++ { - commands = append(commands, &commandpb.Command{ - CommandType: commandType, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - WorkflowId: "child workflow" + convert.IntToString(i), - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy, - }}, - }) - } - - event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ - Identity: "some random identity", - Commands: commands, - }, defaultWorkflowTaskCompletionLimits) - - for i := 0; i < 10; i++ { - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - WorkflowId: "child workflow" + convert.IntToString(i), - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy, - }, "child namespace1-ID") - s.Nil(err) - } - - mutableState.FlushBufferedEvents() - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) - s.mockParentClosePolicyClient.EXPECT().SendParentClosePolicyRequest(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request parentclosepolicy.Request) error { - s.Equal(execution, request.ParentExecution) - return nil - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasManyAbandonedChildren() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION - parentClosePolicy := enumspb.PARENT_CLOSE_POLICY_ABANDON - var commands []*commandpb.Command - for i := 0; i < 10; i++ { - commands = append(commands, &commandpb.Command{ - CommandType: commandType, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - WorkflowId: "child workflow" + convert.IntToString(i), - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy, - }}, - }) - } - - event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ - Identity: "some random identity", - Commands: commands, - }, defaultWorkflowTaskCompletionLimits) - - for i := 0; i < 10; i++ { - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - WorkflowId: "child workflow" + convert.IntToString(i), - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: parentClosePolicy, - }, "child namespace1-ID") - s.Nil(err) - } - - mutableState.FlushBufferedEvents() - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_ChildInDeletedNamespace() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace1")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.NoError(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ - Identity: "some random identity", - Commands: []*commandpb.Command{ - { - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow1", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_TERMINATE, - }}, - }, - { - CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, - Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow2", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL, - }}, - }, - }, - }, defaultWorkflowTaskCompletionLimits) - - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow1", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_TERMINATE, - }, "child namespace1-ID") - s.NoError(err) - - _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ - Namespace: "child namespace1", - WorkflowId: "child workflow2", - WorkflowType: &commonpb.WorkflowType{ - Name: "child workflow type", - }, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - Input: payloads.EncodeString("random input"), - ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL, - }, "child namespace2-ID") - s.NoError(err) - - mutableState.FlushBufferedEvents() - - taskID := int64(22) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) - - s.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.TerminateWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.TerminateWorkflowExecutionResponse, error) { - s.True(request.GetChildWorkflowOnly()) - s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) - s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) - return nil, serviceerror.NewNamespaceNotFound("child namespace1") - }, - ) - - s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { - s.True(request.GetChildWorkflowOnly()) - s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) - s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) - return nil, serviceerror.NewNamespaceNotFound("child namespace1") - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.NoError(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_DeleteAfterClose() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - DeleteAfterClose: true, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")).Times(2) - s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil).Times(2) - s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false).Times(2) - s.mockVisibilityManager.EXPECT().GetIndexName().Return("").Times(2) - mockDeleteMgr := deletemanager.NewMockDeleteManager(s.controller) - mockDeleteMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - s.transferQueueActiveTaskExecutor.workflowDeleteManager = mockDeleteMgr - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.NoError(err) - - transferTask.DeleteAfterClose = false - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.NoError(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, rci := addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - attributes := event.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: s.targetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), s.createRequestCancelWorkflowExecutionRequest(s.targetNamespace, transferTask, rci, attributes)).Return(nil, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Failure() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, rci := addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - attributes := event.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: s.targetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), s.createRequestCancelWorkflowExecutionRequest(s.targetNamespace, transferTask, rci, attributes)).Return(nil, serviceerror.NewNotFound("")) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(gomock.Any(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Failure_TargetNamespaceNotFound() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.MissedNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(gomock.Any(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Duplication() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: s.targetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - event = addCancelRequestedEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Success() { - mutableState, event, si := s.setupSignalExternalWorkflowInitiated() - attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - mutableState.GetExecutionInfo().NamespaceId, - mutableState.GetExecutionInfo().WorkflowId, - mutableState.GetExecutionState().RunId, - ), - Version: s.version, - TargetNamespaceID: attributes.GetNamespaceId(), - TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), - TargetRunID: attributes.WorkflowExecution.GetRunId(), - TaskID: int64(59), - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - s.mockHistoryClient.EXPECT().RemoveSignalMutableState(gomock.Any(), &historyservice.RemoveSignalMutableStateRequest{ - NamespaceId: transferTask.TargetNamespaceID, - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: transferTask.TargetWorkflowID, - RunId: transferTask.TargetRunID, - }, - RequestId: si.GetRequestId(), - }).Return(nil, nil) - - _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_TargetWorkflowNotFound() { - mutableState, event, si := s.setupSignalExternalWorkflowInitiated() - attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - mutableState.GetExecutionInfo().NamespaceId, - mutableState.GetExecutionInfo().WorkflowId, - mutableState.GetExecutionState().RunId, - ), - Version: s.version, - TargetNamespaceID: attributes.GetNamespaceId(), - TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), - TargetRunID: attributes.WorkflowExecution.GetRunId(), - TaskID: int64(59), - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, serviceerror.NewNotFound("")) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( - si.InitiatedEventId, - enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND, - request, - ) - return tests.UpdateWorkflowExecutionResponse, nil - }, - ) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_TargetNamespaceNotFound() { - mutableState, event, si := s.setupSignalExternalWorkflowInitiated() - attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - mutableState.GetExecutionInfo().NamespaceId, - mutableState.GetExecutionInfo().WorkflowId, - mutableState.GetExecutionState().RunId, - ), - Version: s.version, - TargetNamespaceID: tests.MissedNamespaceID.String(), - TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), - TargetRunID: attributes.WorkflowExecution.GetRunId(), - TaskID: int64(59), - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( - si.InitiatedEventId, - enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, - request, - ) - return tests.UpdateWorkflowExecutionResponse, nil - }, - ) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_SignalCountLimitExceeded() { - mutableState, event, si := s.setupSignalExternalWorkflowInitiated() - attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - mutableState.GetExecutionInfo().NamespaceId, - mutableState.GetExecutionInfo().WorkflowId, - mutableState.GetExecutionState().RunId, - ), - Version: s.version, - TargetNamespaceID: attributes.GetNamespaceId(), - TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), - TargetRunID: attributes.WorkflowExecution.GetRunId(), - TaskID: int64(59), - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, consts.ErrSignalsLimitExceeded) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { - s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( - si.InitiatedEventId, - enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED, - request, - ) - return tests.UpdateWorkflowExecutionResponse, nil - }, - ) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Duplication() { - mutableState, event, _ := s.setupSignalExternalWorkflowInitiated() - attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() - - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - mutableState.GetExecutionInfo().NamespaceId, - mutableState.GetExecutionInfo().WorkflowId, - mutableState.GetExecutionState().RunId, - ), - Version: s.version, - TargetNamespaceID: attributes.GetNamespaceId(), - TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), - TargetRunID: attributes.WorkflowExecution.GetRunId(), - TaskID: int64(59), - TargetChildWorkflowOnly: true, - InitiatedEventID: event.GetEventId(), - } - - event = addSignaledEvent( - mutableState, - event.GetEventId(), - tests.TargetNamespace, - namespace.ID(transferTask.TargetNamespaceID), - attributes.WorkflowExecution.GetWorkflowId(), - attributes.WorkflowExecution.GetRunId(), - "", - ) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) setupSignalExternalWorkflowInitiated() ( - *workflow.MutableStateImpl, - *historypb.HistoryEvent, - *persistencespb.SignalInfo, -) { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - signalName := "some random signal name" - signalInput := payloads.EncodeString("some random signal input") - signalControl := "some random signal control" - signalHeader := &commonpb.Header{ - Fields: map[string]*commonpb.Payload{"signal header key": payload.EncodeString("signal header value")}, - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.NoError(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - event, signalInfo := addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), - tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, signalInput, - signalControl, signalHeader) - - return mutableState, event, signalInfo -} - -func (s *transferQueueActiveTaskExecutorSuite) validateUpdateExecutionRequestWithSignalExternalFailedEvent( - signalInitiatedEventId int64, - expectedFailedCause enumspb.SignalExternalWorkflowExecutionFailedCause, - request *persistence.UpdateWorkflowExecutionRequest, -) { - s.Len(request.UpdateWorkflowMutation.DeleteSignalInfos, 1) - _, ok := request.UpdateWorkflowMutation.DeleteSignalInfos[signalInitiatedEventId] - s.True(ok) - - numFailedEvent := 0 - s.Len(request.UpdateWorkflowEvents, 1) - for _, event := range request.UpdateWorkflowEvents[0].Events { - if event.EventType != enumspb.EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED { - continue - } - attr := event.GetSignalExternalWorkflowExecutionFailedEventAttributes() - s.Equal(expectedFailedCause, attr.GetCause()) - numFailedEvent++ - } - s.Equal(1, numFailedEvent) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childRunID := uuid.New() - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, ci := addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.childNamespace, - s.childNamespaceID, - childWorkflowID, - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_TERMINATE, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - - childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), s.createChildWorkflowExecutionRequest( - s.namespace, - s.childNamespace, - transferTask, - mutableState, - ci, - )).Return(&historyservice.StartWorkflowExecutionResponse{RunId: childRunID, Clock: childClock}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - currentShardClock := s.mockShard.CurrentVectorClock() - s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { - parentClock := request.ParentClock - request.ParentClock = nil - s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ - NamespaceId: tests.ChildNamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowID, - RunId: childRunID, - }, - IsFirstWorkflowTask: true, - ParentClock: nil, - ChildClock: childClock, - }, request) - cmpResult, err := vclock.Compare(currentShardClock, parentClock) - if err != nil { - return nil, err - } - s.NoError(err) - s.True(cmpResult <= 0) - return &historyservice.ScheduleWorkflowTaskResponse{}, nil - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Failure() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, ci := addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.childNamespace, - s.childNamespaceID, - childWorkflowID, - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_TERMINATE, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), s.createChildWorkflowExecutionRequest( - s.namespace, - s.childNamespace, - transferTask, - mutableState, - ci, - )).Return(nil, serviceerror.NewWorkflowExecutionAlreadyStarted("msg", "", "")) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Failure_TargetNamespaceNotFound() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, - }, - ) - s.NoError(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, _ = addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.namespace, - s.namespaceID, - childWorkflowID, - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_TERMINATE, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.MissedNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.NoError(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Success_Dup() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childRunID := uuid.New() - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, ci := addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.childNamespace, - s.childNamespaceID, - childWorkflowID, - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_TERMINATE, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, childRunID, childWorkflowType, childClock) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - ci.StartedEventId = event.GetEventId() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - currentShardClock := s.mockShard.CurrentVectorClock() - s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { - parentClock := request.ParentClock - request.ParentClock = nil - s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ - NamespaceId: tests.ChildNamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowID, - RunId: childRunID, - }, - IsFirstWorkflowTask: true, - ParentClock: nil, - ChildClock: childClock, - }, request) - cmpResult, err := vclock.Compare(currentShardClock, parentClock) - if err != nil { - return nil, err - } - s.NoError(err) - s.True(cmpResult <= 0) - return &historyservice.ScheduleWorkflowTaskResponse{}, nil - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Duplication() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random child workflow ID", - RunId: uuid.New(), - } - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, ci := addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.childNamespace, - s.childNamespaceID, - childExecution.GetWorkflowId(), - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_TERMINATE, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childExecution.GetWorkflowId(), - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childExecution.GetWorkflowId(), childExecution.GetRunId(), childWorkflowType, childClock) - ci.StartedEventId = event.GetEventId() - event = addChildWorkflowExecutionCompletedEvent(mutableState, ci.InitiatedEventId, &childExecution, &historypb.WorkflowExecutionCompletedEventAttributes{ - Result: payloads.EncodeString("some random child workflow execution result"), - WorkflowTaskCompletedEventId: transferTask.InitiatedEventID, - }) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestProcessorStartChildExecution_ChildStarted_ParentClosed() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random child workflow ID", - RunId: uuid.New(), - } - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - - event, ci := addStartChildWorkflowExecutionInitiatedEvent( - mutableState, - event.GetEventId(), - uuid.New(), - s.childNamespace, - s.childNamespaceID, - childExecution.GetWorkflowId(), - childWorkflowType, - childTaskQueueName, - nil, - 1*time.Second, - 1*time.Second, - 1*time.Second, - enumspb.PARENT_CLOSE_POLICY_ABANDON, - ) - - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childExecution.GetWorkflowId(), - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - VisibilityTimestamp: time.Now().UTC(), - } - childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childExecution.GetWorkflowId(), childExecution.GetRunId(), childWorkflowType, childClock) - ci.StartedEventId = event.GetEventId() - wt = addWorkflowTaskScheduledEvent(mutableState) - event = addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, "some random identity") - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - event = addCompleteWorkflowEvent(mutableState, event.EventId, nil) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - currentShardClock := s.mockShard.CurrentVectorClock() - s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { - parentClock := request.ParentClock - request.ParentClock = nil - s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ - NamespaceId: s.childNamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childExecution.WorkflowId, - RunId: childExecution.RunId, - }, - IsFirstWorkflowTask: true, - ParentClock: nil, - ChildClock: childClock, - }, request) - cmpResult, err := vclock.Compare(currentShardClock, parentClock) - if err != nil { - return nil, err - } - s.NoError(err) - s.True(cmpResult <= 0) - return &historyservice.ScheduleWorkflowTaskResponse{}, nil - }, - ) - - _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueActiveTaskExecutorSuite) TestCopySearchAttributes() { - var input map[string]*commonpb.Payload - s.Nil(copySearchAttributes(input)) - - key := "key" - val := payload.EncodeBytes([]byte{'1', '2', '3'}) - input = map[string]*commonpb.Payload{ - key: val, - } - result := copySearchAttributes(input) - s.Equal(input, result) - result[key].GetData()[0] = '0' - s.Equal(byte('1'), val.GetData()[0]) -} - -func (s *transferQueueActiveTaskExecutorSuite) createAddActivityTaskRequest( - task *tasks.ActivityTask, - ai *persistencespb.ActivityInfo, -) *matchingservice.AddActivityTaskRequest { - return &matchingservice.AddActivityTaskRequest{ - NamespaceId: task.NamespaceID, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: task.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ScheduledEventId: task.ScheduledEventID, - ScheduleToStartTimeout: ai.ScheduleToStartTimeout, - Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), - VersionDirective: common.MakeVersionDirectiveForActivityTask(nil, false), - } -} - -func (s *transferQueueActiveTaskExecutorSuite) TestPendingCloseExecutionTasks() { - testCases := []struct { - Name string - EnsureCloseBeforeDelete bool - CloseTransferTaskIdSet bool - CloseTaskIsAcked bool - ShouldDelete bool - }{ - { - Name: "skip the check", - EnsureCloseBeforeDelete: false, - ShouldDelete: true, - }, - { - Name: "no task id", - EnsureCloseBeforeDelete: true, - CloseTransferTaskIdSet: false, - ShouldDelete: true, - }, - { - Name: "multicursor queue unacked", - EnsureCloseBeforeDelete: true, - CloseTransferTaskIdSet: true, - CloseTaskIsAcked: false, - ShouldDelete: false, - }, - { - Name: "multicursor queue acked", - EnsureCloseBeforeDelete: true, - CloseTransferTaskIdSet: true, - CloseTaskIsAcked: true, - ShouldDelete: true, - }, - } - for _, c := range testCases { - s.Run(c.Name, func() { - ctrl := gomock.NewController(s.T()) - - mockMutableState := workflow.NewMockMutableState(ctrl) - var closeTransferTaskId int64 - if c.CloseTransferTaskIdSet { - closeTransferTaskId = 10 - } - workflowKey := definition.NewWorkflowKey(uuid.New(), uuid.New(), uuid.New()) - mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - NamespaceId: workflowKey.NamespaceID, - WorkflowId: workflowKey.WorkflowID, - CloseTransferTaskId: closeTransferTaskId, - }).AnyTimes() - var deleteExecutionTaskId int64 = 1 - mockMutableState.EXPECT().GetNextEventID().Return(deleteExecutionTaskId + 1).AnyTimes() - namespaceEntry := tests.GlobalNamespaceEntry - mockMutableState.EXPECT().GetNamespaceEntry().Return(namespaceEntry).AnyTimes() - - mockWorkflowContext := workflow.NewMockContext(ctrl) - mockWorkflowContext.EXPECT().GetWorkflowKey().Return(workflowKey).AnyTimes() - mockWorkflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) - - mockWorkflowCache := wcache.NewMockCache(ctrl) - mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), - ).Return(mockWorkflowContext, wcache.ReleaseCacheFunc(func(err error) { - }), nil) - - mockClusterMetadata := cluster.NewMockMetadata(ctrl) - mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() - - mockShard := shard.NewMockContext(ctrl) - mockShard.EXPECT().GetConfig().Return(&configs.Config{ - TransferProcessorEnsureCloseBeforeDelete: func() bool { - return c.EnsureCloseBeforeDelete - }, - }).AnyTimes() - mockShard.EXPECT().GetClusterMetadata().Return(mockClusterMetadata).AnyTimes() - mockMutableState.EXPECT().GetLastWriteVersion().Return(tests.Version, nil).AnyTimes() - mockNamespaceRegistry := namespace.NewMockRegistry(ctrl) - mockNamespaceRegistry.EXPECT().GetNamespaceByID(gomock.Any()).Return(namespaceEntry, nil) - mockShard.EXPECT().GetNamespaceRegistry().Return(mockNamespaceRegistry) - - var highWatermarkTaskId int64 - if c.CloseTaskIsAcked { - highWatermarkTaskId = closeTransferTaskId + 1 - } else { - highWatermarkTaskId = closeTransferTaskId - } - mockShard.EXPECT().GetQueueState(tasks.CategoryTransfer).Return(&persistencespb.QueueState{ - ReaderStates: nil, - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - FireTime: timestamp.TimePtr(tasks.DefaultFireTime), - TaskId: highWatermarkTaskId, - }, - }, true).AnyTimes() - - mockWorkflowDeleteManager := deletemanager.NewMockDeleteManager(ctrl) - if c.ShouldDelete { - mockWorkflowDeleteManager.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - } - - executor := &transferQueueActiveTaskExecutor{ - transferQueueTaskExecutorBase: &transferQueueTaskExecutorBase{ - cache: mockWorkflowCache, - config: mockShard.GetConfig(), - metricHandler: metrics.NoopMetricsHandler, - shard: mockShard, - workflowDeleteManager: mockWorkflowDeleteManager, - }, - } - - task := &tasks.DeleteExecutionTask{ - WorkflowKey: workflowKey, - TaskID: deleteExecutionTaskId, - Version: tests.Version, - } - executable := queues.NewMockExecutable(ctrl) - executable.EXPECT().GetTask().Return(task) - _, _, err := executor.Execute(context.Background(), executable) - if c.ShouldDelete { - s.NoError(err) - } else { - s.Error(err) - s.Assert().ErrorIs(err, consts.ErrDependencyTaskNotCompleted) - } - }) - } -} - -func (s *transferQueueActiveTaskExecutorSuite) createAddWorkflowTaskRequest( - task *tasks.WorkflowTask, - mutableState workflow.MutableState, -) *matchingservice.AddWorkflowTaskRequest { - taskQueue := &taskqueuepb.TaskQueue{ - Name: task.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - executionInfo := mutableState.GetExecutionInfo() - timeout := executionInfo.WorkflowRunTimeout - if executionInfo.TaskQueue != task.TaskQueue { - taskQueue.Kind = enumspb.TASK_QUEUE_KIND_STICKY - taskQueue.NormalName = executionInfo.TaskQueue - timeout = executionInfo.StickyScheduleToStartTimeout - } - - directive := common.MakeVersionDirectiveForWorkflowTask( - mutableState.GetWorkerVersionStamp(), - mutableState.GetLastWorkflowTaskStartedEventID(), - ) - - return &matchingservice.AddWorkflowTaskRequest{ - NamespaceId: task.NamespaceID, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - TaskQueue: taskQueue, - ScheduledEventId: task.ScheduledEventID, - ScheduleToStartTimeout: timeout, - Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), - VersionDirective: directive, - } -} - -func (s *transferQueueActiveTaskExecutorSuite) createRequestCancelWorkflowExecutionRequest( - targetNamespace namespace.Name, - task *tasks.CancelExecutionTask, - rci *persistencespb.RequestCancelInfo, - attributes *historypb.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes, -) *historyservice.RequestCancelWorkflowExecutionRequest { - sourceExecution := commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - } - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: task.TargetRunID, - } - - return &historyservice.RequestCancelWorkflowExecutionRequest{ - NamespaceId: task.TargetNamespaceID, - CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ - Namespace: targetNamespace.String(), - WorkflowExecution: &targetExecution, - Identity: consts.IdentityHistoryService, - // Use the same request ID to dedupe RequestCancelWorkflowExecution calls - RequestId: rci.GetCancelRequestId(), - Reason: attributes.Reason, - }, - ExternalInitiatedEventId: task.InitiatedEventID, - ExternalWorkflowExecution: &sourceExecution, - ChildWorkflowOnly: task.TargetChildWorkflowOnly, - } -} - -func (s *transferQueueActiveTaskExecutorSuite) createSignalWorkflowExecutionRequest( - targetNamespace namespace.Name, - task *tasks.SignalExecutionTask, - si *persistencespb.SignalInfo, - attributes *historypb.SignalExternalWorkflowExecutionInitiatedEventAttributes, -) *historyservice.SignalWorkflowExecutionRequest { - sourceExecution := commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - } - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: task.TargetWorkflowID, - RunId: task.TargetRunID, - } - - return &historyservice.SignalWorkflowExecutionRequest{ - NamespaceId: task.TargetNamespaceID, - SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ - Namespace: targetNamespace.String(), - WorkflowExecution: &targetExecution, - Identity: consts.IdentityHistoryService, - SignalName: attributes.SignalName, - Input: attributes.Input, - RequestId: si.GetRequestId(), - Control: attributes.Control, - Header: attributes.Header, - }, - ExternalWorkflowExecution: &sourceExecution, - ChildWorkflowOnly: task.TargetChildWorkflowOnly, - } -} - -func (s *transferQueueActiveTaskExecutorSuite) createChildWorkflowExecutionRequest( - namespace namespace.Name, - childNamespace namespace.Name, - task *tasks.StartChildExecutionTask, - mutableState workflow.MutableState, - ci *persistencespb.ChildExecutionInfo, -) *historyservice.StartWorkflowExecutionRequest { - event, err := mutableState.GetChildExecutionInitiatedEvent(context.Background(), task.InitiatedEventID) - s.NoError(err) - attributes := event.GetStartChildWorkflowExecutionInitiatedEventAttributes() - execution := commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - } - now := s.timeSource.Now().UTC() - return &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: task.TargetNamespaceID, - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - Namespace: childNamespace.String(), - WorkflowId: attributes.WorkflowId, - WorkflowType: attributes.WorkflowType, - TaskQueue: attributes.TaskQueue, - Input: attributes.Input, - WorkflowExecutionTimeout: attributes.WorkflowExecutionTimeout, - WorkflowRunTimeout: attributes.WorkflowRunTimeout, - WorkflowTaskTimeout: attributes.WorkflowTaskTimeout, - // Use the same request ID to dedupe StartWorkflowExecution calls - RequestId: ci.CreateRequestId, - WorkflowIdReusePolicy: attributes.WorkflowIdReusePolicy, - }, - ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ - NamespaceId: task.NamespaceID, - Namespace: tests.Namespace.String(), - Execution: &execution, - InitiatedId: task.InitiatedEventID, - InitiatedVersion: task.Version, - Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), - }, - FirstWorkflowTaskBackoff: backoff.GetBackoffForNextScheduleNonNegative(attributes.GetCronSchedule(), now, now), - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, - WorkflowExecutionExpirationTime: timestamp.TimePtr(now.Add(*attributes.WorkflowExecutionTimeout).Round(time.Millisecond)), - } -} - -func (s *transferQueueActiveTaskExecutorSuite) createPersistenceMutableState( - ms workflow.MutableState, - lastEventID int64, - lastEventVersion int64, -) *persistencespb.WorkflowMutableState { - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - s.NoError(err) - err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( - lastEventID, lastEventVersion, - )) - s.NoError(err) - return workflow.TestCloneToProto(ms) -} - -func (s *transferQueueActiveTaskExecutorSuite) newTaskExecutable( - task tasks.Task, -) queues.Executable { - return queues.NewExecutable( - queues.DefaultReaderId, - task, - s.transferQueueActiveTaskExecutor, - nil, - nil, - queues.NewNoopPriorityAssigner(), - s.mockShard.GetTimeSource(), - s.mockNamespaceCache, - s.mockClusterMetadata, - nil, - metrics.NoopMetricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueFactory.go temporal-1.22.5/src/service/history/transferQueueFactory.go --- temporal-1.21.5-1/src/service/history/transferQueueFactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueFactory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - - "go.uber.org/fx" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/client" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/resource" - "go.temporal.io/server/common/sdk" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -const ( - transferQueuePersistenceMaxRPSRatio = 0.3 -) - -type ( - transferQueueFactoryParams struct { - fx.In - - QueueFactoryBaseParams - - ClientBean client.Bean - ArchivalClient archiver.Client - SdkClientFactory sdk.ClientFactory - MatchingClient resource.MatchingClient - HistoryClient historyservice.HistoryServiceClient - VisibilityManager manager.VisibilityManager - } - - transferQueueFactory struct { - transferQueueFactoryParams - QueueFactoryBase - } -) - -func NewTransferQueueFactory( - params transferQueueFactoryParams, -) QueueFactory { - return &transferQueueFactory{ - transferQueueFactoryParams: params, - QueueFactoryBase: QueueFactoryBase{ - HostScheduler: queues.NewNamespacePriorityScheduler( - params.ClusterMetadata.GetCurrentClusterName(), - queues.NamespacePrioritySchedulerOptions{ - WorkerCount: params.Config.TransferProcessorSchedulerWorkerCount, - ActiveNamespaceWeights: params.Config.TransferProcessorSchedulerActiveRoundRobinWeights, - StandbyNamespaceWeights: params.Config.TransferProcessorSchedulerStandbyRoundRobinWeights, - EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, - EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, - DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, - }, - params.NamespaceRegistry, - params.SchedulerRateLimiter, - params.TimeSource, - params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTransferQueueProcessorScope)), - params.Logger, - ), - HostPriorityAssigner: queues.NewPriorityAssigner(), - HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( - NewHostRateLimiterRateFn( - params.Config.TransferProcessorMaxPollHostRPS, - params.Config.PersistenceMaxQPS, - transferQueuePersistenceMaxRPSRatio, - ), - int64(params.Config.QueueMaxReaderCount()), - ), - }, - } -} - -func (f *transferQueueFactory) CreateQueue( - shard shard.Context, - workflowCache wcache.Cache, -) queues.Queue { - logger := log.With(shard.GetLogger(), tag.ComponentTransferQueue) - metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTransferQueueProcessorScope)) - - rescheduler := queues.NewRescheduler( - f.HostScheduler, - shard.GetTimeSource(), - logger, - metricsHandler, - ) - - currentClusterName := f.ClusterMetadata.GetCurrentClusterName() - activeExecutor := newTransferQueueActiveTaskExecutor( - shard, - workflowCache, - f.ArchivalClient, - f.SdkClientFactory, - logger, - f.MetricsHandler, - f.Config, - f.MatchingClient, - f.VisibilityManager, - ) - - standbyExecutor := newTransferQueueStandbyTaskExecutor( - shard, - workflowCache, - f.ArchivalClient, - xdc.NewNDCHistoryResender( - f.NamespaceRegistry, - f.ClientBean, - func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { - engine, err := shard.GetEngine(ctx) - if err != nil { - return err - } - return engine.ReplicateEventsV2(ctx, request) - }, - shard.GetPayloadSerializer(), - f.Config.StandbyTaskReReplicationContextTimeout, - logger, - ), - logger, - f.MetricsHandler, - currentClusterName, - f.MatchingClient, - f.VisibilityManager, - ) - - executor := queues.NewExecutorWrapper( - currentClusterName, - f.NamespaceRegistry, - activeExecutor, - standbyExecutor, - logger, - ) - - return queues.NewImmediateQueue( - shard, - tasks.CategoryTransfer, - f.HostScheduler, - rescheduler, - f.HostPriorityAssigner, - executor, - &queues.Options{ - ReaderOptions: queues.ReaderOptions{ - BatchSize: f.Config.TransferTaskBatchSize, - MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, - PollBackoffInterval: f.Config.TransferProcessorPollBackoffInterval, - }, - MonitorOptions: queues.MonitorOptions{ - PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, - ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, - SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, - }, - MaxPollRPS: f.Config.TransferProcessorMaxPollRPS, - MaxPollInterval: f.Config.TransferProcessorMaxPollInterval, - MaxPollIntervalJitterCoefficient: f.Config.TransferProcessorMaxPollIntervalJitterCoefficient, - CheckpointInterval: f.Config.TransferProcessorUpdateAckInterval, - CheckpointIntervalJitterCoefficient: f.Config.TransferProcessorUpdateAckIntervalJitterCoefficient, - MaxReaderCount: f.Config.QueueMaxReaderCount, - }, - f.HostReaderRateLimiter, - logger, - metricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueStandbyTaskExecutor.go temporal-1.22.5/src/service/history/transferQueueStandbyTaskExecutor.go --- temporal-1.21.5-1/src/service/history/transferQueueStandbyTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueStandbyTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,682 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "errors" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/ndc" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -type ( - transferQueueStandbyTaskExecutor struct { - *transferQueueTaskExecutorBase - - clusterName string - nDCHistoryResender xdc.NDCHistoryResender - } -) - -var ( - errVerificationFailed = errors.New("failed to verify target workflow state") -) - -func newTransferQueueStandbyTaskExecutor( - shard shard.Context, - workflowCache wcache.Cache, - archivalClient archiver.Client, - nDCHistoryResender xdc.NDCHistoryResender, - logger log.Logger, - metricProvider metrics.Handler, - clusterName string, - matchingClient matchingservice.MatchingServiceClient, - visibilityManager manager.VisibilityManager, -) queues.Executor { - return &transferQueueStandbyTaskExecutor{ - transferQueueTaskExecutorBase: newTransferQueueTaskExecutorBase( - shard, - workflowCache, - archivalClient, - logger, - metricProvider, - matchingClient, - visibilityManager, - ), - clusterName: clusterName, - nDCHistoryResender: nDCHistoryResender, - } -} - -func (t *transferQueueStandbyTaskExecutor) Execute( - ctx context.Context, - executable queues.Executable, -) ([]metrics.Tag, bool, error) { - task := executable.GetTask() - taskType := queues.GetStandbyTransferTaskTypeTagValue(task) - metricsTags := []metrics.Tag{ - getNamespaceTagByID(t.shard.GetNamespaceRegistry(), task.GetNamespaceID()), - metrics.TaskTypeTag(taskType), - metrics.OperationTag(taskType), // for backward compatibility - } - - var err error - switch task := task.(type) { - case *tasks.ActivityTask: - err = t.processActivityTask(ctx, task) - case *tasks.WorkflowTask: - err = t.processWorkflowTask(ctx, task) - case *tasks.CancelExecutionTask: - err = t.processCancelExecution(ctx, task) - case *tasks.SignalExecutionTask: - err = t.processSignalExecution(ctx, task) - case *tasks.StartChildExecutionTask: - err = t.processStartChildExecution(ctx, task) - case *tasks.ResetWorkflowTask: - // no reset needed for standby - // TODO: add error logs - err = nil - case *tasks.CloseExecutionTask: - err = t.processCloseExecution(ctx, task) - case *tasks.DeleteExecutionTask: - err = t.processDeleteExecutionTask(ctx, task, false) - default: - err = errUnknownTransferTask - } - - return metricsTags, false, err -} - -func (t *transferQueueStandbyTaskExecutor) processActivityTask( - ctx context.Context, - transferTask *tasks.ActivityTask, -) error { - processTaskIfClosed := false - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - activityInfo, ok := mutableState.GetActivityInfo(transferTask.ScheduledEventID) - if !ok { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - if activityInfo.StartedEventId == common.EmptyEventID { - return newActivityTaskPostActionInfo(mutableState, *activityInfo.ScheduleToStartTimeout, activityInfo.UseCompatibleVersion) - } - - return nil, nil - } - - return t.processTransfer( - ctx, - processTaskIfClosed, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - t.fetchHistoryFromRemote, - t.pushActivity, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processWorkflowTask( - ctx context.Context, - transferTask *tasks.WorkflowTask, -) error { - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - wtInfo := mutableState.GetWorkflowTaskByID(transferTask.ScheduledEventID) - if wtInfo == nil { - return nil, nil - } - - _, scheduleToStartTimeout := mutableState.TaskQueueScheduleToStartTimeout(transferTask.TaskQueue) - // Task queue is ignored here because at standby, always use original normal task queue, - // disregards the transferTask.TaskQueue which could be sticky. - // NOTE: scheduleToStart timeout is respected. If workflow was sticky before namespace become standby, - // transferTask.TaskQueue is sticky, and there is timer already created for this timeout. - // Use this sticky timeout as TTL. - taskQueue := &taskqueuepb.TaskQueue{ - Name: mutableState.GetExecutionInfo().TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), wtInfo.Version, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - if wtInfo.StartedEventID == common.EmptyEventID { - return newWorkflowTaskPostActionInfo( - mutableState, - scheduleToStartTimeout, - *taskQueue, - ) - } - - return nil, nil - } - - return t.processTransfer( - ctx, - false, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - t.fetchHistoryFromRemote, - t.pushWorkflowTask, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processCloseExecution( - ctx context.Context, - transferTask *tasks.CloseExecutionTask, -) error { - processTaskIfClosed := true - actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - if mutableState.IsWorkflowExecutionRunning() { - // this can happen if workflow is reset. - return nil, nil - } - - wfCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) - if err != nil { - return nil, err - } - executionInfo := mutableState.GetExecutionInfo() - executionState := mutableState.GetExecutionState() - workflowTypeName := executionInfo.WorkflowTypeName - workflowStatus := executionState.Status - workflowHistoryLength := mutableState.GetNextEventID() - 1 - workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) - workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) - visibilityMemo := getWorkflowMemo(executionInfo.Memo) - searchAttr := getSearchAttributes(executionInfo.SearchAttributes) - - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return nil, err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - if !transferTask.CanSkipVisibilityArchival { - if err := t.archiveVisibility( - ctx, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - workflowTypeName, - workflowStartTime, - workflowExecutionTime, - timestamp.TimeValue(wfCloseTime), - workflowStatus, - workflowHistoryLength, - visibilityMemo, - searchAttr, - ); err != nil { - return nil, err - } - } - - // verify if parent got the completion event - verifyCompletionRecorded := mutableState.HasParentExecution() && executionInfo.NewExecutionRunId == "" - if verifyCompletionRecorded { - // load close event only if needed. - completionEvent, err := mutableState.GetCompletionEvent(ctx) - if err != nil { - return nil, err - } - - verifyCompletionRecorded = verifyCompletionRecorded && !ndc.IsTerminatedByResetter(completionEvent) - } - - if verifyCompletionRecorded { - _, err := t.historyClient.VerifyChildExecutionCompletionRecorded(ctx, &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: executionInfo.ParentNamespaceId, - ParentExecution: &commonpb.WorkflowExecution{ - WorkflowId: executionInfo.ParentWorkflowId, - RunId: executionInfo.ParentRunId, - }, - ChildExecution: &commonpb.WorkflowExecution{ - WorkflowId: transferTask.WorkflowID, - RunId: transferTask.RunID, - }, - ParentInitiatedId: executionInfo.ParentInitiatedId, - ParentInitiatedVersion: executionInfo.ParentInitiatedVersion, - Clock: executionInfo.ParentClock, - }) - switch err.(type) { - case nil, *serviceerror.NamespaceNotFound, *serviceerror.Unimplemented: - return nil, nil - case *serviceerror.NotFound, *serviceerror.WorkflowNotReady: - return verifyChildCompletionRecordedInfo, nil - default: - t.logger.Error("Failed to verify child execution completion recoreded", - tag.WorkflowNamespaceID(transferTask.GetNamespaceID()), - tag.WorkflowID(transferTask.GetWorkflowID()), - tag.WorkflowRunID(transferTask.GetRunID()), - tag.Error(err), - ) - - // NOTE: we do not return the error here which will cause the mutable state to be cleared and reloaded upon retry - // it's unnecessary as the error is in the target workflow, not this workflow. - return nil, errVerificationFailed - } - } - return nil, nil - } - - return t.processTransfer( - ctx, - processTaskIfClosed, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - standbyTaskPostActionNoOp, - standbyTransferTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processCancelExecution( - ctx context.Context, - transferTask *tasks.CancelExecutionTask, -) error { - processTaskIfClosed := false - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - requestCancelInfo, ok := mutableState.GetRequestCancelInfo(transferTask.InitiatedEventID) - if !ok { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), requestCancelInfo.Version, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - return getHistoryResendInfo(mutableState) - } - - return t.processTransfer( - ctx, - processTaskIfClosed, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - t.fetchHistoryFromRemote, - standbyTransferTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processSignalExecution( - ctx context.Context, - transferTask *tasks.SignalExecutionTask, -) error { - processTaskIfClosed := false - actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - signalInfo, ok := mutableState.GetSignalInfo(transferTask.InitiatedEventID) - if !ok { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), signalInfo.Version, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - return getHistoryResendInfo(mutableState) - } - - return t.processTransfer( - ctx, - processTaskIfClosed, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - t.fetchHistoryFromRemote, - standbyTransferTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processStartChildExecution( - ctx context.Context, - transferTask *tasks.StartChildExecutionTask, -) error { - processTaskIfClosed := true - actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { - childWorkflowInfo, ok := mutableState.GetChildExecutionInfo(transferTask.InitiatedEventID) - if !ok { - return nil, nil - } - - err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), childWorkflowInfo.Version, transferTask.Version, transferTask) - if err != nil { - return nil, err - } - - workflowClosed := !mutableState.IsWorkflowExecutionRunning() - childStarted := childWorkflowInfo.StartedEventId != common.EmptyEventID - childAbandon := childWorkflowInfo.ParentClosePolicy == enumspb.PARENT_CLOSE_POLICY_ABANDON - - if workflowClosed && !(childStarted && childAbandon) { - // NOTE: ideally for workflowClosed, child not started, parent close policy is abandon case, - // we should continue to start the child workflow in active cluster, so standby logic also need to - // perform the verification. However, we can't do that due to some technial reasons. - // Please check the comments in processStartChildExecution in transferQueueActiveTaskExecutor.go - // for details. - return nil, nil - } - - if !childStarted { - historyResendInfo, err := getHistoryResendInfo(mutableState) - if err != nil { - return nil, err - } - return &startChildExecutionPostActionInfo{ - historyResendInfo: historyResendInfo, - }, nil - } - - _, err = t.historyClient.VerifyFirstWorkflowTaskScheduled(ctx, &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: transferTask.TargetNamespaceID, - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: childWorkflowInfo.StartedWorkflowId, - RunId: childWorkflowInfo.StartedRunId, - }, - Clock: childWorkflowInfo.Clock, - }) - switch err.(type) { - case nil, *serviceerror.NamespaceNotFound, *serviceerror.Unimplemented: - return nil, nil - case *serviceerror.NotFound, *serviceerror.WorkflowNotReady: - return &startChildExecutionPostActionInfo{}, nil - default: - t.logger.Error("Failed to verify first workflow task scheduled", - tag.WorkflowNamespaceID(transferTask.GetNamespaceID()), - tag.WorkflowID(transferTask.GetWorkflowID()), - tag.WorkflowRunID(transferTask.GetRunID()), - tag.Error(err), - ) - - // NOTE: we do not return the error here which will cause the mutable state to be cleared and reloaded upon retry - // it's unnecessary as the error is in the target workflow, not this workflow. - return nil, errVerificationFailed - } - } - - return t.processTransfer( - ctx, - processTaskIfClosed, - transferTask, - actionFn, - getStandbyPostActionFn( - transferTask, - t.getCurrentTime, - t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), - t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), - t.startChildExecutionResendPostAction, - standbyTransferTaskPostActionTaskDiscarded, - ), - ) -} - -func (t *transferQueueStandbyTaskExecutor) processTransfer( - ctx context.Context, - processTaskIfClosed bool, - taskInfo tasks.Task, - actionFn standbyActionFn, - postActionFn standbyPostActionFn, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - nsRecord, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(taskInfo.GetNamespaceID())) - if err != nil { - return err - } - if !nsRecord.IsOnCluster(t.clusterName) { - // namespace is not replicated to local cluster, ignore corresponding tasks - return nil - } - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, taskInfo) - if err != nil { - return err - } - defer func() { - if retError == consts.ErrTaskRetry || retError == errVerificationFailed { - release(nil) - } else { - release(retError) - } - }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weContext, taskInfo, t.metricHandler, t.logger) - if err != nil || mutableState == nil { - return err - } - - if !mutableState.IsWorkflowExecutionRunning() && !processTaskIfClosed { - // workflow already finished, no need to process transfer task. - return nil - } - - historyResendInfo, err := actionFn(ctx, weContext, mutableState) - if err != nil { - return err - } - - // NOTE: do not access anything related mutable state after this lock release - release(nil) - return postActionFn(ctx, taskInfo, historyResendInfo, t.logger) -} - -func (t *transferQueueStandbyTaskExecutor) pushActivity( - ctx context.Context, - task tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - if postActionInfo == nil { - return nil - } - - pushActivityInfo := postActionInfo.(*activityTaskPostActionInfo) - timeout := pushActivityInfo.activityTaskScheduleToStartTimeout - return t.transferQueueTaskExecutorBase.pushActivity( - ctx, - task.(*tasks.ActivityTask), - &timeout, - pushActivityInfo.versionDirective, - ) -} - -func (t *transferQueueStandbyTaskExecutor) pushWorkflowTask( - ctx context.Context, - task tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - if postActionInfo == nil { - return nil - } - - pushwtInfo := postActionInfo.(*workflowTaskPostActionInfo) - return t.transferQueueTaskExecutorBase.pushWorkflowTask( - ctx, - task.(*tasks.WorkflowTask), - &pushwtInfo.taskqueue, - pushwtInfo.workflowTaskScheduleToStartTimeout, - pushwtInfo.versionDirective, - ) -} - -func (t *transferQueueStandbyTaskExecutor) startChildExecutionResendPostAction( - ctx context.Context, - taskInfo tasks.Task, - postActionInfo interface{}, - log log.Logger, -) error { - if postActionInfo == nil { - return nil - } - - historyResendInfo := postActionInfo.(*startChildExecutionPostActionInfo).historyResendInfo - if historyResendInfo != nil { - return t.fetchHistoryFromRemote(ctx, taskInfo, historyResendInfo, log) - } - - return standbyTaskPostActionNoOp(ctx, taskInfo, postActionInfo, log) -} - -func (t *transferQueueStandbyTaskExecutor) fetchHistoryFromRemote( - ctx context.Context, - taskInfo tasks.Task, - postActionInfo interface{}, - logger log.Logger, -) error { - var resendInfo *historyResendInfo - switch postActionInfo := postActionInfo.(type) { - case nil: - return nil - case *historyResendInfo: - resendInfo = postActionInfo - case *activityTaskPostActionInfo: - resendInfo = postActionInfo.historyResendInfo - case *workflowTaskPostActionInfo: - resendInfo = postActionInfo.historyResendInfo - default: - logger.Fatal("unknown post action info for fetching remote history", tag.Value(postActionInfo)) - } - - remoteClusterName, err := getRemoteClusterName( - t.currentClusterName, - t.registry, - taskInfo.GetNamespaceID(), - ) - if err != nil { - return err - } - - scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.HistoryRereplicationByTransferTaskScope)) - scope.Counter(metrics.ClientRequests.GetMetricName()).Record(1) - startTime := time.Now().UTC() - defer func() { scope.Timer(metrics.ClientLatency.GetMetricName()).Record(time.Since(startTime)) }() - - if resendInfo.lastEventID == common.EmptyEventID || resendInfo.lastEventVersion == common.EmptyVersion { - t.logger.Error("Error re-replicating history from remote: transferQueueStandbyProcessor encountered empty historyResendInfo.", - tag.ShardID(t.shard.GetShardID()), - tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), - tag.WorkflowID(taskInfo.GetWorkflowID()), - tag.WorkflowRunID(taskInfo.GetRunID()), - tag.SourceCluster(remoteClusterName)) - - return consts.ErrTaskRetry - } - - // NOTE: history resend may take long time and its timeout is currently - // controlled by a separate dynamicconfig config: StandbyTaskReReplicationContextTimeout - if err = t.nDCHistoryResender.SendSingleWorkflowHistory( - ctx, - remoteClusterName, - namespace.ID(taskInfo.GetNamespaceID()), - taskInfo.GetWorkflowID(), - taskInfo.GetRunID(), - resendInfo.lastEventID, - resendInfo.lastEventVersion, - 0, - 0, - ); err != nil { - if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { - // Don't log NamespaceNotFound error because it is valid case, and return error to stop retrying. - return err - } - t.logger.Error("Error re-replicating history from remote.", - tag.ShardID(t.shard.GetShardID()), - tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), - tag.WorkflowID(taskInfo.GetWorkflowID()), - tag.WorkflowRunID(taskInfo.GetRunID()), - tag.SourceCluster(remoteClusterName), - tag.Error(err)) - } - - // Return retryable error, so task processing will retry. - return consts.ErrTaskRetry -} - -func (t *transferQueueStandbyTaskExecutor) getCurrentTime() time.Time { - return t.shard.GetCurrentTime(t.clusterName) -} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueStandbyTaskExecutor_test.go temporal-1.22.5/src/service/history/transferQueueStandbyTaskExecutor_test.go --- temporal-1.21.5-1/src/service/history/transferQueueStandbyTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueStandbyTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1273 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "errors" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/adminservicemock/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - dc "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/searchattribute" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/archiver" - "go.temporal.io/server/common/archiver/provider" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/xdc" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - warchiver "go.temporal.io/server/service/worker/archiver" -) - -type ( - transferQueueStandbyTaskExecutorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - mockNamespaceCache *namespace.MockRegistry - mockClusterMetadata *cluster.MockMetadata - mockAdminClient *adminservicemock.MockAdminServiceClient - mockNDCHistoryResender *xdc.MockNDCHistoryResender - mockHistoryClient *historyservicemock.MockHistoryServiceClient - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - - mockExecutionMgr *persistence.MockExecutionManager - mockArchivalClient *warchiver.MockClient - mockArchivalMetadata archiver.MetadataMock - mockArchiverProvider *provider.MockArchiverProvider - - workflowCache wcache.Cache - logger log.Logger - namespaceID namespace.ID - namespaceEntry *namespace.Namespace - version int64 - clusterName string - now time.Time - timeSource *clock.EventTimeSource - fetchHistoryDuration time.Duration - discardDuration time.Duration - - transferQueueStandbyTaskExecutor *transferQueueStandbyTaskExecutor - mockSearchAttributesProvider *searchattribute.MockProvider - mockVisibilityManager *manager.MockVisibilityManager - } -) - -func TestTransferQueueStandbyTaskExecutorSuite(t *testing.T) { - s := new(transferQueueStandbyTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *transferQueueStandbyTaskExecutorSuite) SetupSuite() { -} - -func (s *transferQueueStandbyTaskExecutorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - config := tests.NewDynamicConfig() - - s.namespaceEntry = tests.GlobalStandbyNamespaceEntry - s.namespaceID = s.namespaceEntry.ID() - s.version = s.namespaceEntry.FailoverVersion() - s.now = time.Now().UTC() - s.timeSource = clock.NewEventTimeSource().Update(s.now) - s.fetchHistoryDuration = time.Minute * 12 - s.discardDuration = time.Minute * 30 - - s.controller = gomock.NewController(s.T()) - s.mockNDCHistoryResender = xdc.NewMockNDCHistoryResender(s.controller) - s.mockArchivalClient = warchiver.NewMockClient(s.controller) - s.mockShard = shard.NewTestContextWithTimeSource( - s.controller, - &persistencespb.ShardInfo{ - RangeId: 1, - }, - config, - s.timeSource, - ) - s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - )) - - s.mockHistoryClient = s.mockShard.Resource.HistoryClient - s.mockMatchingClient = s.mockShard.Resource.MatchingClient - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata - s.mockArchivalMetadata = s.mockShard.Resource.ArchivalMetadata - s.mockArchiverProvider = s.mockShard.Resource.ArchiverProvider - s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache - s.mockAdminClient = s.mockShard.Resource.RemoteAdminClient - s.mockSearchAttributesProvider = s.mockShard.Resource.SearchAttributesProvider - s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.TargetNamespaceID).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.TargetNamespace).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.ParentNamespace).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ChildNamespaceID).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.StandbyNamespaceID).Return(tests.GlobalStandbyNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.StandbyNamespace).Return(tests.GlobalStandbyNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.StandbyNamespaceID).Return(tests.StandbyNamespace, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.StandbyWithVisibilityArchivalNamespaceID). - Return(tests.GlobalStandbyWithVisibilityArchivalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespace(tests.StandbyWithVisibilityArchivalNamespace). - Return(tests.GlobalStandbyWithVisibilityArchivalNamespaceEntry, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.StandbyWithVisibilityArchivalNamespaceID). - Return(tests.StandbyWithVisibilityArchivalNamespace, nil).AnyTimes() - s.mockClusterMetadata.EXPECT().GetClusterID().Return(cluster.TestCurrentClusterInitialFailoverVersion).AnyTimes() - s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.clusterName).AnyTimes() - - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - s.mockArchivalMetadata.SetHistoryEnabledByDefault() - s.mockArchivalMetadata.SetVisibilityEnabledByDefault() - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: s.mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - metricsHandler: s.mockShard.GetMetricsHandler(), - } - s.mockShard.SetEngineForTesting(h) - s.clusterName = cluster.TestAlternativeClusterName - - s.transferQueueStandbyTaskExecutor = newTransferQueueStandbyTaskExecutor( - s.mockShard, - s.workflowCache, - s.mockArchivalClient, - s.mockNDCHistoryResender, - s.logger, - metrics.NoopMetricsHandler, - s.clusterName, - s.mockShard.Resource.GetMatchingClient(), - s.mockVisibilityManager, - ).(*transferQueueStandbyTaskExecutor) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessActivityTask_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - activityID := "activity-1" - activityType := "some random activity type" - event, _ = addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) - - now := time.Now().UTC() - transferTask := &tasks.ActivityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - // no-op post action - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - // resend history post action - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - event.GetEventId(), - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - // push to matching post action - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - s.mockMatchingClient.EXPECT().AddActivityTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(&matchingservice.AddActivityTaskResponse{}, nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessActivityTask_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueueName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - activityID := "activity-1" - activityType := "some random activity type" - event, _ = addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) - - now := time.Now().UTC() - transferTask := &tasks.ActivityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: event.GetEventId(), - } - - event = addActivityTaskStartedEvent(mutableState, event.GetEventId(), "") - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - now := time.Now().UTC() - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - // no-op post action - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - // resend history post action - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - wt.ScheduledEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - // push to matching post action - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Success_FirstWorkflowTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - now := time.Now().UTC() - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - } - - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Success_NonFirstWorkflowTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - wt = addWorkflowTaskScheduledEvent(mutableState) - - now := time.Now().UTC() - transferTask := &tasks.WorkflowTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - TaskQueue: taskQueueName, - ScheduledEventID: wt.ScheduledEventID, - } - - event = addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCloseExecution() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - parentNamespaceID := "some random parent namespace ID" - parentInitiatedID := int64(3222) - parentInitiatedVersion := int64(1234) - parentNamespace := "some random parent namespace Name" - parentExecution := &commonpb.WorkflowExecution{ - WorkflowId: "some random parent workflow ID", - RunId: uuid.New(), - } - parentClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ - NamespaceId: parentNamespaceID, - Namespace: parentNamespace, - Execution: parentExecution, - InitiatedId: parentInitiatedID, - InitiatedVersion: parentInitiatedVersion, - Clock: parentClock, - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - now := time.Now().UTC() - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TaskID: taskID, - } - - expectedVerificationRequest := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ - NamespaceId: parentNamespaceID, - ParentExecution: parentExecution, - ChildExecution: &execution, - ParentInitiatedId: parentInitiatedID, - ParentInitiatedVersion: parentInitiatedVersion, - Clock: parentClock, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()).AnyTimes() - - s.mockShard.SetCurrentTime(s.clusterName, now) - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) - - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowExecutionNotFound) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, serviceerror.NewUnimplemented("not implemented")) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) - - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, errors.New("some random error")) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(errVerificationFailed, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCloseExecution_CanSkipVisibilityArchival() { - for _, skipVisibilityArchival := range []bool{ - false, - true, - } { - s.Run(fmt.Sprintf("CanSkipVisibilityArchival=%v", skipVisibilityArchival), func() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: tests.StandbyWithVisibilityArchivalNamespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - transferTask := &tasks.CloseExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - tests.StandbyWithVisibilityArchivalNamespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - VisibilityTimestamp: time.Now().UTC(), - CanSkipVisibilityArchival: skipVisibilityArchival, - } - - persistenceMutableState := s.createPersistenceMutableState( - mutableState, - event.GetEventId(), - event.GetVersion(), - ) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution( - gomock.Any(), - gomock.Any(), - ).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - if !skipVisibilityArchival { - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return( - archiver.NewArchivalConfig( - "enabled", - dc.GetStringPropertyFn("enabled"), - dc.GetBoolPropertyFn(true), - "disabled", - "random URI", - ), - ).AnyTimes() - s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) - s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) - s.mockVisibilityManager.EXPECT().GetIndexName().Return("") - } - - _, _, err = s.transferQueueStandbyTaskExecutor.Execute( - context.Background(), - s.newTaskExecutable(transferTask), - ) - s.Nil(err) - }) - } -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCancelExecution_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - nextEventID := event.GetEventId() - - now := time.Now().UTC() - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.TargetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TargetChildWorkflowOnly: true, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCancelExecution_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - - now := time.Now().UTC() - transferTask := &tasks.CancelExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.TargetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - - event = addCancelRequestedEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessSignalExecution_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - signalName := "some random signal name" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), - tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, nil, "", nil) - nextEventID := event.GetEventId() - - now := time.Now().UTC() - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.TargetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskDiscarded, err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessSignalExecution_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - targetExecution := commonpb.WorkflowExecution{ - WorkflowId: "some random target workflow ID", - RunId: uuid.New(), - } - signalName := "some random signal name" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), - tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, nil, "", nil) - - now := time.Now().UTC() - transferTask := &tasks.SignalExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.TargetNamespaceID.String(), - TargetWorkflowID: targetExecution.GetWorkflowId(), - TargetRunID: targetExecution.GetRunId(), - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - - event = addSignaledEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), "") - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessStartChildExecution_Pending() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, _ = addStartChildWorkflowExecutionInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), - tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_ABANDON) - nextEventID := event.GetEventId() - - now := time.Now().UTC() - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( - gomock.Any(), - s.clusterName, - namespace.ID(transferTask.NamespaceID), - transferTask.WorkflowID, - transferTask.RunID, - nextEventID, - s.version, - int64(0), - int64(0), - ).Return(nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, uuid.New(), childWorkflowType, nil) - mutableState.FlushBufferedEvents() - - // clear the cache - s.transferQueueStandbyTaskExecutor.cache = wcache.NewCache(s.mockShard) - persistenceMutableState = s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) - - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, consts.ErrWorkflowNotReady) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, consts.ErrWorkflowExecutionNotFound) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskRetry, err) - - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.Unimplemented{}) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) - - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(errVerificationFailed, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.WorkflowNotReady{}) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Equal(consts.ErrTaskDiscarded, err) - - s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) TestProcessStartChildExecution_Success() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - childWorkflowID := "some random child workflow ID" - childWorkflowType := "some random child workflow type" - childTaskQueueName := "some random child task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event, childInfo := addStartChildWorkflowExecutionInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), - tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_ABANDON) - - now := time.Now().UTC() - transferTask := &tasks.StartChildExecutionTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - VisibilityTimestamp: now, - TargetNamespaceID: tests.ChildNamespaceID.String(), - TargetWorkflowID: childWorkflowID, - TaskID: taskID, - InitiatedEventID: event.GetEventId(), - } - event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, uuid.New(), childWorkflowType, nil) - // Flush buffered events so real IDs get assigned - mutableState.FlushBufferedEvents() - childInfo.StartedEventId = event.GetEventId() - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) - - // workflow closed && child started && parent close policy is abandon - event, err = mutableState.AddTimeoutWorkflowEvent( - mutableState.GetNextEventID(), - enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, - uuid.New(), - ) - s.NoError(err) - - s.transferQueueStandbyTaskExecutor.cache = wcache.NewCache(s.mockShard) - persistenceMutableState = s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) - - s.mockShard.SetCurrentTime(s.clusterName, now) - _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) - s.Nil(err) -} - -func (s *transferQueueStandbyTaskExecutorSuite) createPersistenceMutableState( - ms workflow.MutableState, - lastEventID int64, - lastEventVersion int64, -) *persistencespb.WorkflowMutableState { - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - s.NoError(err) - err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( - lastEventID, lastEventVersion, - )) - s.NoError(err) - return workflow.TestCloneToProto(ms) -} - -func (s *transferQueueStandbyTaskExecutorSuite) newTaskExecutable( - task tasks.Task, -) queues.Executable { - return queues.NewExecutable( - queues.DefaultReaderId, - task, - s.transferQueueStandbyTaskExecutor, - nil, - nil, - queues.NewNoopPriorityAssigner(), - s.mockShard.GetTimeSource(), - s.mockNamespaceCache, - s.mockClusterMetadata, - nil, - metrics.NoopMetricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/transferQueueTaskExecutorBase.go temporal-1.22.5/src/service/history/transferQueueTaskExecutorBase.go --- temporal-1.21.5-1/src/service/history/transferQueueTaskExecutorBase.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/transferQueueTaskExecutorBase.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,337 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - taskqueuespb "go.temporal.io/server/api/taskqueue/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/debug" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/deletemanager" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/vclock" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" - "go.temporal.io/server/service/worker/archiver" -) - -const ( - taskTimeout = time.Second * 3 * debug.TimeoutMultiplier - taskHistoryOpTimeout = 20 * time.Second -) - -var errUnknownTransferTask = serviceerror.NewInternal("Unknown transfer task") - -type ( - transferQueueTaskExecutorBase struct { - currentClusterName string - shard shard.Context - registry namespace.Registry - cache wcache.Cache - archivalClient archiver.Client - logger log.Logger - metricHandler metrics.Handler - historyClient historyservice.HistoryServiceClient - matchingClient matchingservice.MatchingServiceClient - config *configs.Config - searchAttributesProvider searchattribute.Provider - visibilityManager manager.VisibilityManager - workflowDeleteManager deletemanager.DeleteManager - } -) - -func newTransferQueueTaskExecutorBase( - shard shard.Context, - workflowCache wcache.Cache, - archivalClient archiver.Client, - logger log.Logger, - metricHandler metrics.Handler, - matchingClient matchingservice.MatchingServiceClient, - visibilityManager manager.VisibilityManager, -) *transferQueueTaskExecutorBase { - return &transferQueueTaskExecutorBase{ - currentClusterName: shard.GetClusterMetadata().GetCurrentClusterName(), - shard: shard, - registry: shard.GetNamespaceRegistry(), - cache: workflowCache, - archivalClient: archivalClient, - logger: logger, - metricHandler: metricHandler, - historyClient: shard.GetHistoryClient(), - matchingClient: matchingClient, - config: shard.GetConfig(), - searchAttributesProvider: shard.GetSearchAttributesProvider(), - visibilityManager: visibilityManager, - workflowDeleteManager: deletemanager.NewDeleteManager( - shard, - workflowCache, - shard.GetConfig(), - archivalClient, - shard.GetTimeSource(), - visibilityManager, - ), - } -} - -func (t *transferQueueTaskExecutorBase) pushActivity( - ctx context.Context, - task *tasks.ActivityTask, - activityScheduleToStartTimeout *time.Duration, - directive *taskqueuespb.TaskVersionDirective, -) error { - _, err := t.matchingClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ - NamespaceId: task.NamespaceID, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: task.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ScheduledEventId: task.ScheduledEventID, - ScheduleToStartTimeout: activityScheduleToStartTimeout, - Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), - VersionDirective: directive, - }) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // NotFound error is not expected for AddTasks calls - // but will be ignored by task error handling logic, so log it here - tasks.InitializeLogger(task, t.logger).Error("Matching returned not found error for AddActivityTask", tag.Error(err)) - } - - return err -} - -func (t *transferQueueTaskExecutorBase) pushWorkflowTask( - ctx context.Context, - task *tasks.WorkflowTask, - taskqueue *taskqueuepb.TaskQueue, - workflowTaskScheduleToStartTimeout *time.Duration, - directive *taskqueuespb.TaskVersionDirective, -) error { - _, err := t.matchingClient.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ - NamespaceId: task.NamespaceID, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - }, - TaskQueue: taskqueue, - ScheduledEventId: task.ScheduledEventID, - ScheduleToStartTimeout: workflowTaskScheduleToStartTimeout, - Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), - VersionDirective: directive, - }) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // NotFound error is not expected for AddTasks calls - // but will be ignored by task error handling logic, so log it here - tasks.InitializeLogger(task, t.logger).Error("Matching returned not found error for AddWorkflowTask", tag.Error(err)) - } - - return err -} - -func (t *transferQueueTaskExecutorBase) archiveVisibility( - ctx context.Context, - namespaceID namespace.ID, - workflowID string, - runID string, - workflowTypeName string, - startTime time.Time, - executionTime time.Time, - endTime time.Time, - status enumspb.WorkflowExecutionStatus, - historyLength int64, - visibilityMemo *commonpb.Memo, - searchAttributes *commonpb.SearchAttributes, -) error { - namespaceEntry, err := t.registry.GetNamespaceByID(namespaceID) - if err != nil { - return err - } - - clusterConfiguredForVisibilityArchival := t.shard.GetArchivalMetadata().GetVisibilityConfig().ClusterConfiguredForArchival() - namespaceConfiguredForVisibilityArchival := namespaceEntry.VisibilityArchivalState().State == enumspb.ARCHIVAL_STATE_ENABLED - archiveVisibility := clusterConfiguredForVisibilityArchival && namespaceConfiguredForVisibilityArchival - - if !archiveVisibility { - return nil - } - - ctx, cancel := context.WithTimeout(ctx, t.config.TransferProcessorVisibilityArchivalTimeLimit()) - defer cancel() - - saTypeMap, err := t.searchAttributesProvider.GetSearchAttributes(t.visibilityManager.GetIndexName(), false) - if err != nil { - return err - } - - // Setting search attributes types here because archival client needs to stringify them - // and it might not have access to type map (i.e. type needs to be embedded). - searchattribute.ApplyTypeMap(searchAttributes, saTypeMap) - - _, err = t.archivalClient.Archive(ctx, &archiver.ClientRequest{ - ArchiveRequest: &archiver.ArchiveRequest{ - ShardID: t.shard.GetShardID(), - NamespaceID: namespaceID.String(), - Namespace: namespaceEntry.Name().String(), - WorkflowID: workflowID, - RunID: runID, - WorkflowTypeName: workflowTypeName, - StartTime: startTime, - ExecutionTime: executionTime, - CloseTime: endTime, - Status: status, - HistoryLength: historyLength, - Memo: visibilityMemo, - SearchAttributes: searchAttributes, - VisibilityURI: namespaceEntry.VisibilityArchivalState().URI, - HistoryURI: namespaceEntry.HistoryArchivalState().URI, - Targets: []archiver.ArchivalTarget{archiver.ArchiveTargetVisibility}, - }, - CallerService: string(primitives.HistoryService), - AttemptArchiveInline: true, // archive visibility inline by default - }) - - return err -} - -func (t *transferQueueTaskExecutorBase) processDeleteExecutionTask( - ctx context.Context, - task *tasks.DeleteExecutionTask, - ensureNoPendingCloseTask bool, -) error { - return t.deleteExecution(ctx, task, false, ensureNoPendingCloseTask, &task.ProcessStage) -} - -func (t *transferQueueTaskExecutorBase) deleteExecution( - ctx context.Context, - task tasks.Task, - forceDeleteFromOpenVisibility bool, - ensureNoPendingCloseTask bool, - stage *tasks.DeleteWorkflowExecutionStage, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - workflowExecution := commonpb.WorkflowExecution{ - WorkflowId: task.GetWorkflowID(), - RunId: task.GetRunID(), - } - - weCtx, release, err := t.cache.GetOrCreateWorkflowExecution( - ctx, - namespace.ID(task.GetNamespaceID()), - workflowExecution, - workflow.LockPriorityLow, - ) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := loadMutableStateForTransferTask(ctx, weCtx, task, t.metricHandler, t.logger) - if err != nil { - return err - } - - // Here, we ensure that the workflow is closed successfully before deleting it. Otherwise, the mutable state - // might be deleted before the close task is executed, and so the close task will be dropped. In passive cluster, - // this check can be ignored. - // - // Additionally, this function itself could be called from within the close execution task, so we need to skip - // the check in that case because the close execution task would be waiting for itself to finish forever. So, the - // ensureNoPendingCloseTask flag is set iff we're running in the active cluster, and we aren't processing the - // CloseExecutionTask from within this same goroutine. - if ensureNoPendingCloseTask { - // Unfortunately, queue states/ack levels are updated with delay (default 30s), therefore this could fail if the - // workflow was closed before the queue state/ack levels were updated, so we return a retryable error. - if t.isCloseExecutionTaskPending(mutableState, weCtx) { - return consts.ErrDependencyTaskNotCompleted - } - } - - // If task version is EmptyVersion it means "don't check task version". - // This can happen when task was created from explicit user API call. - // Or the namespace is a local namespace which will not have version conflict. - if task.GetVersion() != common.EmptyVersion { - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.GetVersion(), task) - if err != nil { - return err - } - } - - return t.workflowDeleteManager.DeleteWorkflowExecution( - ctx, - namespace.ID(task.GetNamespaceID()), - workflowExecution, - weCtx, - mutableState, - forceDeleteFromOpenVisibility, - stage, - ) -} - -func (t *transferQueueTaskExecutorBase) isCloseExecutionTaskPending(ms workflow.MutableState, weCtx workflow.Context) bool { - closeTransferTaskId := ms.GetExecutionInfo().CloseTransferTaskId - // taskID == 0 if workflow closed before this field was added (v1.17). - if closeTransferTaskId == 0 { - return false - } - // check if close execution transfer task is completed - transferQueueState, ok := t.shard.GetQueueState(tasks.CategoryTransfer) - if !ok { - return true - } - fakeCloseTransferTask := &tasks.CloseExecutionTask{ - WorkflowKey: weCtx.GetWorkflowKey(), - TaskID: closeTransferTaskId, - } - return !queues.IsTaskAcked(fakeCloseTransferTask, transferQueueState) -} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_active_task_executor.go temporal-1.22.5/src/service/history/transfer_queue_active_task_executor.go --- temporal-1.21.5-1/src/service/history/transfer_queue_active_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_active_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1622 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + + "github.com/pborman/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" + + clockspb "go.temporal.io/server/api/clock/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/rpc" + "go.temporal.io/server/common/sdk" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/ndc" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" + "go.temporal.io/server/service/worker/parentclosepolicy" +) + +type ( + transferQueueActiveTaskExecutor struct { + *transferQueueTaskExecutorBase + + workflowResetter ndc.WorkflowResetter + parentClosePolicyClient parentclosepolicy.Client + } +) + +func newTransferQueueActiveTaskExecutor( + shard shard.Context, + workflowCache wcache.Cache, + archivalClient archiver.Client, + sdkClientFactory sdk.ClientFactory, + logger log.Logger, + metricProvider metrics.Handler, + config *configs.Config, + historyRawClient resource.HistoryRawClient, + matchingRawClient resource.MatchingRawClient, + visibilityManager manager.VisibilityManager, +) queues.Executor { + return &transferQueueActiveTaskExecutor{ + transferQueueTaskExecutorBase: newTransferQueueTaskExecutorBase( + shard, + workflowCache, + archivalClient, + logger, + metricProvider, + historyRawClient, + matchingRawClient, + visibilityManager, + ), + workflowResetter: ndc.NewWorkflowResetter( + shard, + workflowCache, + logger, + ), + parentClosePolicyClient: parentclosepolicy.NewClient( + shard.GetMetricsHandler(), + shard.GetLogger(), + sdkClientFactory, + config.NumParentClosePolicySystemWorkflows(), + ), + } +} + +func (t *transferQueueActiveTaskExecutor) Execute( + ctx context.Context, + executable queues.Executable, +) ([]metrics.Tag, bool, error) { + task := executable.GetTask() + taskType := queues.GetActiveTransferTaskTypeTagValue(task) + namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( + t.shard.GetNamespaceRegistry(), + task.GetNamespaceID(), + ) + metricsTags := []metrics.Tag{ + namespaceTag, + metrics.TaskTypeTag(taskType), + metrics.OperationTag(taskType), // for backward compatibility + } + + if replicationState == enumspb.REPLICATION_STATE_HANDOVER { + // TODO: exclude task types here if we believe it's safe & necessary to execute + // them during namespace handover. + // TODO: move this logic to queues.Executable when metrics tag doesn't need to + // be returned from task executor + return metricsTags, true, consts.ErrNamespaceHandover + } + + var err error + switch task := task.(type) { + case *tasks.ActivityTask: + err = t.processActivityTask(ctx, task) + case *tasks.WorkflowTask: + err = t.processWorkflowTask(ctx, task) + case *tasks.CloseExecutionTask: + err = t.processCloseExecution(ctx, task) + case *tasks.CancelExecutionTask: + err = t.processCancelExecution(ctx, task) + case *tasks.SignalExecutionTask: + err = t.processSignalExecution(ctx, task) + case *tasks.StartChildExecutionTask: + err = t.processStartChildExecution(ctx, task) + case *tasks.ResetWorkflowTask: + err = t.processResetWorkflow(ctx, task) + case *tasks.DeleteExecutionTask: + err = t.processDeleteExecutionTask(ctx, task) + default: + err = errUnknownTransferTask + } + + return metricsTags, true, err +} + +func (t *transferQueueActiveTaskExecutor) processDeleteExecutionTask(ctx context.Context, + task *tasks.DeleteExecutionTask) error { + return t.transferQueueTaskExecutorBase.processDeleteExecutionTask(ctx, task, + t.config.TransferProcessorEnsureCloseBeforeDelete()) +} + +func (t *transferQueueActiveTaskExecutor) processActivityTask( + ctx context.Context, + task *tasks.ActivityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrWorkflowExecutionNotFound + } + + ai, ok := mutableState.GetActivityInfo(task.ScheduledEventID) + if !ok { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrActivityTaskNotFound + } + + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), ai.Version, task.Version, task) + if err != nil { + return err + } + + if !mutableState.IsWorkflowExecutionRunning() { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrWorkflowCompleted + } + + timeout := timestamp.DurationValue(ai.ScheduleToStartTimeout) + directive := worker_versioning.MakeDirectiveForActivityTask(mutableState.GetWorkerVersionStamp(), ai.UseCompatibleVersion) + + // NOTE: do not access anything related mutable state after this lock release + // release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + return t.pushActivity(ctx, task, &timeout, directive) +} + +func (t *transferQueueActiveTaskExecutor) processWorkflowTask( + ctx context.Context, + transferTask *tasks.WorkflowTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, transferTask) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, transferTask, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + workflowTask := mutableState.GetWorkflowTaskByID(transferTask.ScheduledEventID) + if workflowTask == nil { + return nil + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), workflowTask.Version, transferTask.Version, transferTask) + if err != nil { + return err + } + + // Task queue from transfer task (not current one from mutable state) must be used here. + // If current task queue becomes sticky since this transfer task was created, + // it can't be used here, because timeout timer was not created for it, + // because it used to be non-sticky when this transfer task was created . + taskQueue, scheduleToStartTimeout := mutableState.TaskQueueScheduleToStartTimeout(transferTask.TaskQueue) + + normalTaskQueueName := mutableState.GetExecutionInfo().TaskQueue + + directive := worker_versioning.MakeDirectiveForWorkflowTask( + mutableState.GetWorkerVersionStamp(), + mutableState.GetLastWorkflowTaskStartedEventID(), + ) + + // NOTE: Do not access mutableState after this lock is released. + // It is important to release the workflow lock here, because pushWorkflowTask will call matching, + // which will call history back (with RecordWorkflowTaskStarted), and it will try to get workflow lock again. + release(nil) + + err = t.pushWorkflowTask(ctx, transferTask, taskQueue, scheduleToStartTimeout, directive) + + if _, ok := err.(*serviceerrors.StickyWorkerUnavailable); ok { + // sticky worker is unavailable, switch to original normal task queue + taskQueue = &taskqueuepb.TaskQueue{ + // do not use task.TaskQueue which is sticky, use original normal task queue from mutable state + Name: normalTaskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + // Continue to use sticky schedule_to_start timeout as TTL for the matching task. Because the schedule_to_start + // timeout timer task is already created which will timeout this task if no worker pick it up in 5s anyway. + // There is no need to reset sticky, because if this task is picked by new worker, the new worker will reset + // the sticky queue to a new one. However, if worker is completely down, that schedule_to_start timeout task + // will re-create a new non-sticky task and reset sticky. + err = t.pushWorkflowTask(ctx, transferTask, taskQueue, scheduleToStartTimeout, directive) + } + return err +} + +func (t *transferQueueActiveTaskExecutor) processCloseExecution( + ctx context.Context, + task *tasks.CloseExecutionTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { + return nil + } + + // DeleteAfterClose is set to true when this close execution task was generated as part of delete open workflow execution procedure. + // Delete workflow execution is started by user API call and should be done regardless of current workflow version. + if !task.DeleteAfterClose { + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) + if err != nil { + return err + } + } + + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + } + executionInfo := mutableState.GetExecutionInfo() + executionState := mutableState.GetExecutionState() + var completionEvent *historypb.HistoryEvent // needed to report close event to parent workflow + replyToParentWorkflow := mutableState.HasParentExecution() && executionInfo.NewExecutionRunId == "" + if replyToParentWorkflow { + // only load close event if needed. + completionEvent, err = mutableState.GetCompletionEvent(ctx) + if err != nil { + return err + } + replyToParentWorkflow = replyToParentWorkflow && !ndc.IsTerminatedByResetter(completionEvent) + } + parentNamespaceID := executionInfo.ParentNamespaceId + parentWorkflowID := executionInfo.ParentWorkflowId + parentRunID := executionInfo.ParentRunId + parentInitiatedID := executionInfo.ParentInitiatedId + parentInitiatedVersion := executionInfo.ParentInitiatedVersion + var parentClock *clockspb.VectorClock + if executionInfo.ParentClock != nil { + parentClock = vclock.NewVectorClock( + executionInfo.ParentClock.ClusterId, + executionInfo.ParentClock.ShardId, + executionInfo.ParentClock.Clock, + ) + } + + workflowTypeName := executionInfo.WorkflowTypeName + workflowCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) + if err != nil { + return err + } + + workflowStatus := executionState.Status + workflowHistoryLength := mutableState.GetNextEventID() - 1 + + workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) + workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) + visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) + searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) + namespaceName := mutableState.GetNamespaceEntry().Name() + children := copyChildWorkflowInfos(mutableState.GetPendingChildExecutionInfos()) + + // NOTE: do not access anything related mutable state after this lock release. + // Release lock immediately since mutable state is not needed + // and the rest of logic is RPC calls, which can take time. + release(nil) + + if !task.CanSkipVisibilityArchival { + err = t.archiveVisibility( + ctx, + namespace.ID(task.NamespaceID), + task.WorkflowID, + task.RunID, + workflowTypeName, + workflowStartTime, + workflowExecutionTime, + *workflowCloseTime, + workflowStatus, + workflowHistoryLength, + visibilityMemo, + searchAttr, + ) + if err != nil { + return err + } + } + + // Communicate the result to parent execution if this is Child Workflow execution + if replyToParentWorkflow { + _, err := t.historyRawClient.RecordChildExecutionCompleted(ctx, &historyservice.RecordChildExecutionCompletedRequest{ + NamespaceId: parentNamespaceID, + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: parentWorkflowID, + RunId: parentRunID, + }, + ParentInitiatedId: parentInitiatedID, + ParentInitiatedVersion: parentInitiatedVersion, + ChildExecution: &workflowExecution, + Clock: parentClock, + CompletionEvent: completionEvent, + }) + switch err.(type) { + case nil: + // noop + case *serviceerror.NotFound, *serviceerror.NamespaceNotFound: + // parent gone, noop + default: + return err + } + } + + err = t.processParentClosePolicy( + ctx, + namespaceName.String(), + workflowExecution, + children, + ) + + if err != nil { + // This is some retryable error, not NotFound or NamespaceNotFound. + return err + } + + if task.DeleteAfterClose { + err = t.deleteExecution( + ctx, + task, + // Visibility is not updated (to avoid race condition for visibility tasks) and workflow execution is + // still open there. + true, + false, + &task.DeleteProcessStage, + ) + } + return err +} + +func (t *transferQueueActiveTaskExecutor) processCancelExecution( + ctx context.Context, + task *tasks.CancelExecutionTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + requestCancelInfo, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) + if !ok { + return nil + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), requestCancelInfo.Version, task.Version, task) + if err != nil { + return err + } + + initiatedEvent, err := mutableState.GetRequesteCancelExternalInitiatedEvent(ctx, task.InitiatedEventID) + if err != nil { + return err + } + attributes := initiatedEvent.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() + + targetNamespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)) + if err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { + return err + } + // It is possible that target namespace got deleted. Record failure. + t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) + err = t.requestCancelExternalExecutionFailed( + ctx, + task, + weContext, + namespace.Name(task.TargetNamespaceID), // Use ID as namespace name because namespace is already deleted and name is used only for history. + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND) + return err + } + targetNamespaceName := targetNamespaceEntry.Name() + + // handle workflow cancel itself + if task.NamespaceID == task.TargetNamespaceID && task.WorkflowID == task.TargetWorkflowID { + // it does not matter if the run ID is a mismatch + err = t.requestCancelExternalExecutionFailed( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND) + return err + } + + if err = t.requestCancelExternalExecution( + ctx, + task, + targetNamespaceName, + requestCancelInfo, + attributes, + ); err != nil { + t.logger.Debug(fmt.Sprintf("Failed to cancel external workflow execution. Error: %v", err)) + + // Check to see if the error is non-transient, in which case add RequestCancelFailed + // event and complete transfer task by returning nil error. + if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { + // for retryable error just return + return err + } + var failedCause enumspb.CancelExternalWorkflowExecutionFailedCause + switch err.(type) { + case *serviceerror.NotFound: + failedCause = enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND + case *serviceerror.NamespaceNotFound: + failedCause = enumspb.CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND + default: + t.logger.Error("Unexpected error type returned from RequestCancelWorkflowExecution API call.", tag.ErrorType(err), tag.Error(err)) + return err + } + return t.requestCancelExternalExecutionFailed( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + failedCause, + ) + } + + // Record ExternalWorkflowExecutionCancelRequested in source execution + return t.requestCancelExternalExecutionCompleted( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + ) +} + +func (t *transferQueueActiveTaskExecutor) processSignalExecution( + ctx context.Context, + task *tasks.SignalExecutionTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrWorkflowExecutionNotFound + } + + signalInfo, ok := mutableState.GetSignalInfo(task.InitiatedEventID) + if !ok { + // TODO: here we should also RemoveSignalMutableState from target workflow + // Otherwise, target SignalRequestID still can leak if shard restart after signalExternalExecutionCompleted + // To do that, probably need to add the SignalRequestID in transfer task. + return nil + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), signalInfo.Version, task.Version, task) + if err != nil { + return err + } + + if !mutableState.IsWorkflowExecutionRunning() { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrWorkflowCompleted + } + + initiatedEvent, err := mutableState.GetSignalExternalInitiatedEvent(ctx, task.InitiatedEventID) + if err != nil { + return err + } + attributes := initiatedEvent.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + targetNamespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)) + if err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { + return err + } + // It is possible that target namespace got deleted. Record failure. + t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) + return t.signalExternalExecutionFailed( + ctx, + task, + weContext, + namespace.Name(task.TargetNamespaceID), // Use ID as namespace name because namespace is already deleted and name is used only for history. + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + attributes.Control, + enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, + ) + } + targetNamespaceName := targetNamespaceEntry.Name() + + // handle workflow signal itself + if task.NamespaceID == task.TargetNamespaceID && task.WorkflowID == task.TargetWorkflowID { + // it does not matter if the run ID is a mismatch + return t.signalExternalExecutionFailed( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + attributes.Control, + enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND, + ) + } + + if err = t.signalExternalExecution( + ctx, + task, + targetNamespaceName, + signalInfo, + attributes, + ); err != nil { + t.logger.Debug("Failed to signal external workflow execution", tag.Error(err)) + + // Check to see if the error is non-transient, in which case add SignalFailed + // event and complete transfer task by returning nil error. + if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { + // for retryable error just return + return err + } + var failedCause enumspb.SignalExternalWorkflowExecutionFailedCause + switch err.(type) { + case *serviceerror.NotFound: + failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND + case *serviceerror.NamespaceNotFound: + failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND + case *serviceerror.InvalidArgument: + failedCause = enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED + default: + t.logger.Error("Unexpected error type returned from SignalWorkflowExecution API call.", tag.ErrorType(err), tag.Error(err)) + return err + } + return t.signalExternalExecutionFailed( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + attributes.Control, + failedCause, + ) + } + + err = t.signalExternalExecutionCompleted( + ctx, + task, + weContext, + targetNamespaceName, + namespace.ID(task.TargetNamespaceID), + task.TargetWorkflowID, + task.TargetRunID, + attributes.Control, + ) + if err != nil { + return err + } + + signalRequestID := signalInfo.GetRequestId() + + // release the weContext lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(retError) + // remove signalRequestedID from target workflow, after Signal detail is removed from source workflow + _, err = t.historyRawClient.RemoveSignalMutableState(ctx, &historyservice.RemoveSignalMutableStateRequest{ + NamespaceId: task.TargetNamespaceID, + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: task.TargetRunID, + }, + RequestId: signalRequestID, + }) + return err +} + +func (t *transferQueueActiveTaskExecutor) processStartChildExecution( + ctx context.Context, + task *tasks.StartChildExecutionTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if mutableState == nil { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrWorkflowExecutionNotFound + } + + childInfo, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) + if !ok { + release(nil) // release(nil) so that the mutable state is not unloaded from cache + return consts.ErrChildExecutionNotFound + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), childInfo.Version, task.Version, task) + if err != nil { + return err + } + + // workflow running or not, child started or not, parent close policy is abandon or not + // 8 cases in total + workflowRunning := mutableState.IsWorkflowExecutionRunning() + childStarted := childInfo.StartedEventId != common.EmptyEventID + if !workflowRunning && (!childStarted || childInfo.ParentClosePolicy != enumspb.PARENT_CLOSE_POLICY_ABANDON) { + // three cases here: + // case 1: workflow not running, child started, parent close policy is not abandon + // case 2: workflow not running, child not started, parent close policy is not abandon + // case 3: workflow not running, child not started, parent close policy is abandon + // + // NOTE: ideally for case 3, we should continue to start child. However, with current start child + // and standby start child verification logic, we can't do that because: + // 1. Once workflow is closed, we can't update mutable state or record child started event. + // If the RPC call for scheduling first workflow task times out but the call actually succeeds on child workflow. + // Then the child workflow can run, complete and another unrelated workflow can reuse this workflowID. + // Now when the start child task retries, we can't rely on requestID to dedupe the start child call. (We can use runID instead of requestID to dedupe) + // 2. No update to mutable state and child started event means we are not able to replicate the information + // to the standby cluster, so standby start child logic won't be able to verify the child has started. + // To resolve the issue above, we need to + // 1. Start child workflow and schedule the first workflow task in one transaction. Use runID to perform deduplication + // 2. Standby start child logic need to verify if child workflow actually started instead of relying on the information + // in parent mutable state. + return nil + } + + // ChildExecution already started, just create WorkflowTask and complete transfer task + // If parent already closed, since child workflow started event already written to history, + // still schedule the workflowTask if the parent close policy is Abandon. + // If parent close policy cancel or terminate, parent close policy will be applied in another + // transfer task. + // case 4, 5: workflow started, child started, parent close policy is or is not abandon + // case 6: workflow closed, child started, parent close policy is abandon + if childStarted { + childExecution := &commonpb.WorkflowExecution{ + WorkflowId: childInfo.StartedWorkflowId, + RunId: childInfo.StartedRunId, + } + childClock := childInfo.Clock + // NOTE: do not access anything related mutable state after this lock release + // release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + + parentClock, err := t.shard.NewVectorClock() + if err != nil { + return err + } + return t.createFirstWorkflowTask(ctx, task.TargetNamespaceID, childExecution, parentClock, childClock) + } + + // remaining 2 cases: + // case 7, 8: workflow running, child not started, parent close policy is or is not abandon + + initiatedEvent, err := mutableState.GetChildExecutionInitiatedEvent(ctx, task.InitiatedEventID) + if err != nil { + return err + } + attributes := initiatedEvent.GetStartChildWorkflowExecutionInitiatedEventAttributes() + + var parentNamespaceName namespace.Name + if namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.NamespaceID)); err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { + return err + } + // It is possible that the parent namespace got deleted. Use namespaceID instead as this is only needed for the history event. + parentNamespaceName = namespace.Name(task.NamespaceID) + } else { + parentNamespaceName = namespaceEntry.Name() + } + + var targetNamespaceName namespace.Name + if namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(task.TargetNamespaceID)); err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); !isNotFound { + return err + } + // It is possible that target namespace got deleted. Record failure. + t.logger.Debug("Target namespace is not found.", tag.WorkflowNamespaceID(task.TargetNamespaceID)) + err = t.recordStartChildExecutionFailed( + ctx, + task, + weContext, + attributes, + enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, + ) + return err + } else { + targetNamespaceName = namespaceEntry.Name() + } + + // copy version stamp from parent to child if: + // - command says to use compatible version + // - parent is using versioning + var sourceVersionStamp *commonpb.WorkerVersionStamp + if attributes.UseCompatibleVersion { + sourceVersionStamp = worker_versioning.StampIfUsingVersioning(mutableState.GetWorkerVersionStamp()) + } + + childRunID, childClock, err := t.startWorkflow( + ctx, + task, + parentNamespaceName, + targetNamespaceName, + childInfo.CreateRequestId, + attributes, + sourceVersionStamp, + ) + if err != nil { + t.logger.Debug("Failed to start child workflow execution", tag.Error(err)) + if common.IsServiceTransientError(err) || common.IsContextDeadlineExceededErr(err) { + // for retryable error just return + return err + } + var failedCause enumspb.StartChildWorkflowExecutionFailedCause + switch err.(type) { + case *serviceerror.WorkflowExecutionAlreadyStarted: + failedCause = enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_WORKFLOW_ALREADY_EXISTS + case *serviceerror.NamespaceNotFound: + failedCause = enumspb.START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND + default: + t.logger.Error("Unexpected error type returned from StartWorkflowExecution API call for child workflow.", tag.ErrorType(err), tag.Error(err)) + return err + } + + return t.recordStartChildExecutionFailed( + ctx, + task, + weContext, + attributes, + failedCause, + ) + } + + t.logger.Debug("Child Execution started successfully", + tag.WorkflowID(attributes.WorkflowId), tag.WorkflowRunID(childRunID)) + + // Child execution is successfully started, record ChildExecutionStartedEvent in parent execution + err = t.recordChildExecutionStarted(ctx, task, weContext, attributes, childRunID, childClock) + if err != nil { + return err + } + + // NOTE: do not access anything related mutable state after this lock is released. + // Release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + parentClock, err := t.shard.NewVectorClock() + if err != nil { + return err + } + return t.createFirstWorkflowTask(ctx, task.TargetNamespaceID, &commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: childRunID, + }, parentClock, childClock) +} + +func (t *transferQueueActiveTaskExecutor) processResetWorkflow( + ctx context.Context, + task *tasks.ResetWorkflowTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + currentContext, currentRelease, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { currentRelease(retError) }() + + currentMutableState, err := loadMutableStateForTransferTask(ctx, currentContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if currentMutableState == nil { + return nil + } + + logger := log.With( + t.logger, + tag.WorkflowNamespaceID(task.NamespaceID), + tag.WorkflowID(task.WorkflowID), + tag.WorkflowRunID(task.RunID), + ) + + if !currentMutableState.IsWorkflowExecutionRunning() { + // it means this this might not be current anymore, we need to check + var resp *persistence.GetCurrentExecutionResponse + resp, err = t.shard.GetCurrentExecution(ctx, &persistence.GetCurrentExecutionRequest{ + ShardID: t.shard.GetShardID(), + NamespaceID: task.NamespaceID, + WorkflowID: task.WorkflowID, + }) + if err != nil { + return err + } + if resp.RunID != task.RunID { + logger.Warn("Auto-Reset is skipped, because current run is stale.") + return nil + } + } + // TODO: current reset doesn't allow childWFs, in the future we will release this restriction + if len(currentMutableState.GetPendingChildExecutionInfos()) > 0 { + logger.Warn("Auto-Reset is skipped, because current run has pending child executions.") + return nil + } + + currentStartVersion, err := currentMutableState.GetStartVersion() + if err != nil { + return err + } + + err = CheckTaskVersion(t.shard, t.logger, currentMutableState.GetNamespaceEntry(), currentStartVersion, task.Version, task) + if err != nil { + return err + } + + executionInfo := currentMutableState.GetExecutionInfo() + executionState := currentMutableState.GetExecutionState() + namespaceEntry, err := t.registry.GetNamespaceByID(namespace.ID(executionInfo.NamespaceId)) + if err != nil { + return err + } + logger = log.With(logger, tag.WorkflowNamespace(namespaceEntry.Name().String())) + + reason, resetPoint := workflow.FindAutoResetPoint(t.shard.GetTimeSource(), namespaceEntry.VerifyBinaryChecksum, executionInfo.AutoResetPoints) + if resetPoint == nil { + logger.Warn("Auto-Reset is skipped, because reset point is not found.") + return nil + } + logger = log.With( + logger, + tag.WorkflowResetBaseRunID(resetPoint.GetRunId()), + tag.WorkflowBinaryChecksum(resetPoint.GetBinaryChecksum()), + tag.WorkflowEventID(resetPoint.GetFirstWorkflowTaskCompletedId()), + ) + + var baseContext workflow.Context + var baseMutableState workflow.MutableState + var baseRelease wcache.ReleaseCacheFunc + if resetPoint.GetRunId() == executionState.RunId { + baseContext = currentContext + baseMutableState = currentMutableState + baseRelease = currentRelease + } else { + baseContext, baseRelease, err = getWorkflowExecutionContext( + ctx, + t.cache, + namespace.ID(task.NamespaceID), + commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: resetPoint.GetRunId(), + }, + ) + if err != nil { + return err + } + defer func() { baseRelease(retError) }() + baseMutableState, err = loadMutableStateForTransferTask(ctx, baseContext, task, t.metricHandler, t.logger) + if err != nil { + return err + } + if baseMutableState == nil { + return nil + } + } + + // NOTE: reset need to go through history which may take a longer time, + // so it's using its own timeout + return t.resetWorkflow( + ctx, + task, + reason, + resetPoint, + baseMutableState, + currentContext, + currentMutableState, + logger, + ) +} + +func (t *transferQueueActiveTaskExecutor) recordChildExecutionStarted( + ctx context.Context, + task *tasks.StartChildExecutionTask, + context workflow.Context, + initiatedAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, + runID string, + clock *clockspb.VectorClock, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow execution already completed.") + } + + ci, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) + if !ok || ci.StartedEventId != common.EmptyEventID { + return serviceerror.NewNotFound("Pending child execution not found.") + } + + _, err := mutableState.AddChildWorkflowExecutionStartedEvent( + &commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: runID, + }, + initiatedAttributes.WorkflowType, + task.InitiatedEventID, + initiatedAttributes.Header, + clock, + ) + + return err + }) +} + +func (t *transferQueueActiveTaskExecutor) recordStartChildExecutionFailed( + ctx context.Context, + task *tasks.StartChildExecutionTask, + context workflow.Context, + initiatedAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, + failedCause enumspb.StartChildWorkflowExecutionFailedCause, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow execution already completed.") + } + + ci, ok := mutableState.GetChildExecutionInfo(task.InitiatedEventID) + if !ok || ci.StartedEventId != common.EmptyEventID { + return serviceerror.NewNotFound("Pending child execution not found.") + } + + _, err := mutableState.AddStartChildWorkflowExecutionFailedEvent( + task.InitiatedEventID, + failedCause, + initiatedAttributes, + ) + return err + }) +} + +// createFirstWorkflowTask is used by StartChildExecution transfer task to create the first workflow task for +// child execution. +func (t *transferQueueActiveTaskExecutor) createFirstWorkflowTask( + ctx context.Context, + namespaceID string, + execution *commonpb.WorkflowExecution, + parentClock *clockspb.VectorClock, + childClock *clockspb.VectorClock, +) error { + _, err := t.historyRawClient.ScheduleWorkflowTask(ctx, &historyservice.ScheduleWorkflowTaskRequest{ + NamespaceId: namespaceID, + WorkflowExecution: execution, + IsFirstWorkflowTask: true, + ParentClock: parentClock, + ChildClock: childClock, + }) + return err +} + +func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecutionCompleted( + ctx context.Context, + task *tasks.CancelExecutionTask, + context workflow.Context, + targetNamespace namespace.Name, + targetNamespaceID namespace.ID, + targetWorkflowID string, + targetRunID string, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow execution already completed.") + } + + _, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) + if !ok { + return workflow.ErrMissingRequestCancelInfo + } + + _, err := mutableState.AddExternalWorkflowExecutionCancelRequested( + task.InitiatedEventID, + targetNamespace, + targetNamespaceID, + targetWorkflowID, + targetRunID, + ) + return err + }) +} + +func (t *transferQueueActiveTaskExecutor) signalExternalExecutionCompleted( + ctx context.Context, + task *tasks.SignalExecutionTask, + context workflow.Context, + targetNamespace namespace.Name, + targetNamespaceID namespace.ID, + targetWorkflowID string, + targetRunID string, + control string, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow execution already completed.") + } + + _, ok := mutableState.GetSignalInfo(task.InitiatedEventID) + if !ok { + return workflow.ErrMissingSignalInfo + } + + _, err := mutableState.AddExternalWorkflowExecutionSignaled( + task.InitiatedEventID, + targetNamespace, + targetNamespaceID, + targetWorkflowID, + targetRunID, + control, + ) + return err + }) +} + +func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecutionFailed( + ctx context.Context, + task *tasks.CancelExecutionTask, + context workflow.Context, + targetNamespace namespace.Name, + targetNamespaceID namespace.ID, + targetWorkflowID string, + targetRunID string, + failedCause enumspb.CancelExternalWorkflowExecutionFailedCause, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow execution already completed.") + } + + _, ok := mutableState.GetRequestCancelInfo(task.InitiatedEventID) + if !ok { + return workflow.ErrMissingRequestCancelInfo + } + + _, err := mutableState.AddRequestCancelExternalWorkflowExecutionFailedEvent( + task.InitiatedEventID, + targetNamespace, + targetNamespaceID, + targetWorkflowID, + targetRunID, + failedCause, + ) + return err + }) +} + +func (t *transferQueueActiveTaskExecutor) signalExternalExecutionFailed( + ctx context.Context, + task *tasks.SignalExecutionTask, + context workflow.Context, + targetNamespace namespace.Name, + targetNamespaceID namespace.ID, + targetWorkflowID string, + targetRunID string, + control string, + failedCause enumspb.SignalExternalWorkflowExecutionFailedCause, +) error { + return t.updateWorkflowExecution(ctx, context, true, + func(mutableState workflow.MutableState) error { + if !mutableState.IsWorkflowExecutionRunning() { + return serviceerror.NewNotFound("Workflow is not running.") + } + + _, ok := mutableState.GetSignalInfo(task.InitiatedEventID) + if !ok { + return workflow.ErrMissingSignalInfo + } + + _, err := mutableState.AddSignalExternalWorkflowExecutionFailedEvent( + task.InitiatedEventID, + targetNamespace, + targetNamespaceID, + targetWorkflowID, + targetRunID, + control, + failedCause, + ) + return err + }) +} + +func (t *transferQueueActiveTaskExecutor) updateWorkflowExecution( + ctx context.Context, + context workflow.Context, + createWorkflowTask bool, + action func(workflow.MutableState) error, +) error { + mutableState, err := context.LoadMutableState(ctx) + if err != nil { + return err + } + + if err := action(mutableState); err != nil { + return err + } + + if createWorkflowTask { + // Create a transfer task to schedule a workflow task + err := workflow.ScheduleWorkflowTask(mutableState) + if err != nil { + return err + } + } + + return context.UpdateWorkflowExecutionAsActive(ctx) +} + +func (t *transferQueueActiveTaskExecutor) requestCancelExternalExecution( + ctx context.Context, + task *tasks.CancelExecutionTask, + targetNamespace namespace.Name, + requestCancelInfo *persistencespb.RequestCancelInfo, + attributes *historypb.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes, +) error { + request := &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: task.TargetNamespaceID, + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + Namespace: targetNamespace.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: task.TargetRunID, + }, + Identity: consts.IdentityHistoryService, + // Use the same request ID to dedupe RequestCancelWorkflowExecution calls + RequestId: requestCancelInfo.GetCancelRequestId(), + Reason: attributes.Reason, + }, + ExternalInitiatedEventId: task.InitiatedEventID, + ExternalWorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + ChildWorkflowOnly: task.TargetChildWorkflowOnly, + } + + _, err := t.historyRawClient.RequestCancelWorkflowExecution(ctx, request) + return err +} + +func (t *transferQueueActiveTaskExecutor) signalExternalExecution( + ctx context.Context, + task *tasks.SignalExecutionTask, + targetNamespace namespace.Name, + signalInfo *persistencespb.SignalInfo, + attributes *historypb.SignalExternalWorkflowExecutionInitiatedEventAttributes, +) error { + request := &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: task.TargetNamespaceID, + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: targetNamespace.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: task.TargetRunID, + }, + Identity: consts.IdentityHistoryService, + SignalName: attributes.SignalName, + Input: attributes.Input, + // Use same request ID to deduplicate SignalWorkflowExecution calls + RequestId: signalInfo.GetRequestId(), + Control: attributes.Control, + Header: attributes.Header, + }, + ExternalWorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + ChildWorkflowOnly: task.TargetChildWorkflowOnly, + } + + _, err := t.historyRawClient.SignalWorkflowExecution(ctx, request) + return err +} + +func (t *transferQueueActiveTaskExecutor) startWorkflow( + ctx context.Context, + task *tasks.StartChildExecutionTask, + namespace namespace.Name, + targetNamespace namespace.Name, + childRequestID string, + attributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, + sourceVersionStamp *commonpb.WorkerVersionStamp, +) (string, *clockspb.VectorClock, error) { + request := common.CreateHistoryStartWorkflowRequest( + task.TargetNamespaceID, + &workflowservice.StartWorkflowExecutionRequest{ + Namespace: targetNamespace.String(), + WorkflowId: attributes.WorkflowId, + WorkflowType: attributes.WorkflowType, + TaskQueue: attributes.TaskQueue, + Input: attributes.Input, + Header: attributes.Header, + WorkflowExecutionTimeout: attributes.WorkflowExecutionTimeout, + WorkflowRunTimeout: attributes.WorkflowRunTimeout, + WorkflowTaskTimeout: attributes.WorkflowTaskTimeout, + + // Use the same request ID to dedupe StartWorkflowExecution calls + RequestId: childRequestID, + WorkflowIdReusePolicy: attributes.WorkflowIdReusePolicy, + RetryPolicy: attributes.RetryPolicy, + CronSchedule: attributes.CronSchedule, + Memo: attributes.Memo, + SearchAttributes: attributes.SearchAttributes, + }, + &workflowspb.ParentExecutionInfo{ + NamespaceId: task.NamespaceID, + Namespace: namespace.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + InitiatedId: task.InitiatedEventID, + InitiatedVersion: task.Version, + Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), + }, + t.shard.GetTimeSource().Now(), + ) + + request.SourceVersionStamp = sourceVersionStamp + + response, err := t.historyRawClient.StartWorkflowExecution(ctx, request) + if err != nil { + return "", nil, err + } + return response.GetRunId(), response.GetClock(), nil +} + +func (t *transferQueueActiveTaskExecutor) resetWorkflow( + ctx context.Context, + task *tasks.ResetWorkflowTask, + reason string, + resetPoint *workflowpb.ResetPointInfo, + baseMutableState workflow.MutableState, + currentContext workflow.Context, + currentMutableState workflow.MutableState, + logger log.Logger, +) error { + // the actual reset operation needs to read history and may not be able to completed within + // the original context timeout. + // create a new context with a longer timeout, but retain all existing context values. + resetWorkflowCtx, cancel := rpc.ResetContextTimeout(ctx, taskHistoryOpTimeout) + defer cancel() + + namespaceID := namespace.ID(task.NamespaceID) + workflowID := task.WorkflowID + baseRunID := baseMutableState.GetExecutionState().GetRunId() + + resetRunID := uuid.New() + baseRebuildLastEventID := resetPoint.GetFirstWorkflowTaskCompletedId() - 1 + baseVersionHistories := baseMutableState.GetExecutionInfo().GetVersionHistories() + baseCurrentVersionHistory, err := versionhistory.GetCurrentVersionHistory(baseVersionHistories) + if err != nil { + return err + } + baseRebuildLastEventVersion, err := versionhistory.GetVersionHistoryEventVersion(baseCurrentVersionHistory, baseRebuildLastEventID) + if err != nil { + return err + } + baseCurrentBranchToken := baseCurrentVersionHistory.GetBranchToken() + baseNextEventID := baseMutableState.GetNextEventID() + + err = t.workflowResetter.ResetWorkflow( + resetWorkflowCtx, + namespaceID, + workflowID, + baseRunID, + baseCurrentBranchToken, + baseRebuildLastEventID, + baseRebuildLastEventVersion, + baseNextEventID, + resetRunID, + uuid.New(), + ndc.NewWorkflow( + resetWorkflowCtx, + t.registry, + t.shard.GetClusterMetadata(), + currentContext, + currentMutableState, + wcache.NoopReleaseFn, // this is fine since caller will defer on release + ), + reason, + nil, + enumspb.RESET_REAPPLY_TYPE_SIGNAL, + ) + + switch err.(type) { + case nil: + return nil + + case *serviceerror.NotFound, *serviceerror.NamespaceNotFound: + // This means the reset point is corrupted and not retry able. + // There must be a bug in our system that we must fix.(for example, history is not the same in active/passive) + t.metricHandler.Counter(metrics.AutoResetPointCorruptionCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.TransferQueueProcessorScope), + ) + logger.Error("Auto-Reset workflow failed and not retryable. The reset point is corrupted.", tag.Error(err)) + return nil + + default: + // log this error and retry + logger.Error("Auto-Reset workflow failed", tag.Error(err)) + return err + } +} + +func (t *transferQueueActiveTaskExecutor) processParentClosePolicy( + ctx context.Context, + parentNamespaceName string, + parentExecution commonpb.WorkflowExecution, + childInfos map[int64]*persistencespb.ChildExecutionInfo, +) error { + if len(childInfos) == 0 { + return nil + } + + scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.TransferActiveTaskCloseExecutionScope)) + + if t.shard.GetConfig().EnableParentClosePolicyWorker() && + len(childInfos) >= t.shard.GetConfig().ParentClosePolicyThreshold(parentNamespaceName) { + + executions := make([]parentclosepolicy.RequestDetail, 0, len(childInfos)) + for _, childInfo := range childInfos { + if childInfo.ParentClosePolicy == enumspb.PARENT_CLOSE_POLICY_ABANDON { + continue + } + + childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) + if childNamespaceID.IsEmpty() { + // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. + // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. + var err error + childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) + switch err.(type) { + case nil: + case *serviceerror.NamespaceNotFound: + // If child namespace is deleted there is nothing to close. + continue + default: + return err + } + } + + executions = append(executions, parentclosepolicy.RequestDetail{ + Namespace: childInfo.Namespace, + NamespaceID: childNamespaceID.String(), + WorkflowID: childInfo.StartedWorkflowId, + RunID: childInfo.StartedRunId, + Policy: childInfo.ParentClosePolicy, + }) + } + + if len(executions) == 0 { + return nil + } + + request := parentclosepolicy.Request{ + ParentExecution: parentExecution, + Executions: executions, + } + return t.parentClosePolicyClient.SendParentClosePolicyRequest(ctx, request) + } + + for _, childInfo := range childInfos { + err := t.applyParentClosePolicy(ctx, &parentExecution, childInfo) + switch err.(type) { + case nil: + scope.Counter(metrics.ParentClosePolicyProcessorSuccess.GetMetricName()).Record(1) + case *serviceerror.NotFound: + // If child execution is deleted there is nothing to close. + case *serviceerror.NamespaceNotFound: + // If child namespace is deleted there is nothing to close. + default: + scope.Counter(metrics.ParentClosePolicyProcessorFailures.GetMetricName()).Record(1) + return err + } + } + return nil +} + +func (t *transferQueueActiveTaskExecutor) applyParentClosePolicy( + ctx context.Context, + parentExecution *commonpb.WorkflowExecution, + childInfo *persistencespb.ChildExecutionInfo, +) error { + switch childInfo.ParentClosePolicy { + case enumspb.PARENT_CLOSE_POLICY_ABANDON: + // noop + return nil + + case enumspb.PARENT_CLOSE_POLICY_TERMINATE: + childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) + if childNamespaceID.IsEmpty() { + // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. + // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. + var err error + childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) + if err != nil { + return err + } + } + _, err := t.historyRawClient.TerminateWorkflowExecution(ctx, &historyservice.TerminateWorkflowExecutionRequest{ + NamespaceId: childNamespaceID.String(), + TerminateRequest: &workflowservice.TerminateWorkflowExecutionRequest{ + Namespace: childInfo.GetNamespace(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childInfo.GetStartedWorkflowId(), + }, + // Include StartedRunID as FirstExecutionRunID on the request to allow child to be terminated across runs. + // If the child does continue as new it still propagates the RunID of first execution. + FirstExecutionRunId: childInfo.GetStartedRunId(), + Reason: "by parent close policy", + Identity: consts.IdentityHistoryService, + }, + ExternalWorkflowExecution: parentExecution, + ChildWorkflowOnly: true, + }) + return err + + case enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL: + childNamespaceID := namespace.ID(childInfo.GetNamespaceId()) + if childNamespaceID.IsEmpty() { + // TODO (alex): Remove after childInfo.NamespaceId is back filled. Backward compatibility: old childInfo doesn't have NamespaceId set. + // TODO (alex): consider reverse lookup of namespace name from ID but namespace name is not actually used. + var err error + childNamespaceID, err = t.registry.GetNamespaceID(namespace.Name(childInfo.GetNamespace())) + if err != nil { + return err + } + } + + _, err := t.historyRawClient.RequestCancelWorkflowExecution(ctx, &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: childNamespaceID.String(), + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + Namespace: childInfo.GetNamespace(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childInfo.GetStartedWorkflowId(), + }, + // Include StartedRunID as FirstExecutionRunID on the request to allow child to be canceled across runs. + // If the child does continue as new it still propagates the RunID of first execution. + FirstExecutionRunId: childInfo.GetStartedRunId(), + Identity: consts.IdentityHistoryService, + }, + ExternalWorkflowExecution: parentExecution, + ChildWorkflowOnly: true, + }) + return err + + default: + return serviceerror.NewInternal(fmt.Sprintf("unknown parent close policy: %v", childInfo.ParentClosePolicy)) + } +} + +func copyChildWorkflowInfos( + input map[int64]*persistencespb.ChildExecutionInfo, +) map[int64]*persistencespb.ChildExecutionInfo { + result := make(map[int64]*persistencespb.ChildExecutionInfo) + if input == nil { + return result + } + + for k, v := range input { + result[k] = common.CloneProto(v) + } + return result +} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_active_task_executor_test.go temporal-1.22.5/src/service/history/transfer_queue_active_task_executor_test.go --- temporal-1.21.5-1/src/service/history/transfer_queue_active_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_active_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,2810 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/provider" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/definition" + dc "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/worker_versioning" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + warchiver "go.temporal.io/server/service/worker/archiver" + "go.temporal.io/server/service/worker/parentclosepolicy" +) + +type ( + transferQueueActiveTaskExecutorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockTxProcessor *queues.MockQueue + mockTimerProcessor *queues.MockQueue + mockNamespaceCache *namespace.MockRegistry + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + mockHistoryClient *historyservicemock.MockHistoryServiceClient + mockClusterMetadata *cluster.MockMetadata + mockSearchAttributesProvider *searchattribute.MockProvider + mockVisibilityManager *manager.MockVisibilityManager + + mockExecutionMgr *persistence.MockExecutionManager + mockArchivalClient *warchiver.MockClient + mockArchivalMetadata archiver.MetadataMock + mockArchiverProvider *provider.MockArchiverProvider + mockParentClosePolicyClient *parentclosepolicy.MockClient + + workflowCache wcache.Cache + logger log.Logger + namespaceID namespace.ID + namespace namespace.Name + namespaceEntry *namespace.Namespace + targetNamespaceID namespace.ID + targetNamespace namespace.Name + targetNamespaceEntry *namespace.Namespace + childNamespaceID namespace.ID + childNamespace namespace.Name + childNamespaceEntry *namespace.Namespace + version int64 + now time.Time + timeSource *clock.EventTimeSource + transferQueueActiveTaskExecutor *transferQueueActiveTaskExecutor + } +) + +var defaultWorkflowTaskCompletionLimits = workflow.WorkflowTaskCompletionLimits{MaxResetPoints: configs.DefaultHistoryMaxAutoResetPoints, MaxSearchAttributeValueSize: 2048} + +func TestTransferQueueActiveTaskExecutorSuite(t *testing.T) { + s := new(transferQueueActiveTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *transferQueueActiveTaskExecutorSuite) SetupSuite() { +} + +func (s *transferQueueActiveTaskExecutorSuite) TearDownSuite() { +} + +func (s *transferQueueActiveTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.namespaceID = tests.NamespaceID + s.namespace = tests.Namespace + s.namespaceEntry = tests.GlobalNamespaceEntry + s.targetNamespaceID = tests.TargetNamespaceID + s.targetNamespace = tests.TargetNamespace + s.targetNamespaceEntry = tests.GlobalTargetNamespaceEntry + s.childNamespaceID = tests.ChildNamespaceID + s.childNamespace = tests.ChildNamespace + s.childNamespaceEntry = tests.GlobalChildNamespaceEntry + s.version = s.namespaceEntry.FailoverVersion() + s.now = time.Now().UTC() + s.timeSource = clock.NewEventTimeSource().Update(s.now) + + s.controller = gomock.NewController(s.T()) + s.mockTxProcessor = queues.NewMockQueue(s.controller) + s.mockTimerProcessor = queues.NewMockQueue(s.controller) + s.mockTxProcessor.EXPECT().Category().Return(tasks.CategoryTransfer).AnyTimes() + s.mockTimerProcessor.EXPECT().Category().Return(tasks.CategoryTimer).AnyTimes() + s.mockTxProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + s.mockTimerProcessor.EXPECT().NotifyNewTasks(gomock.Any()).AnyTimes() + + config := tests.NewDynamicConfig() + s.mockShard = shard.NewTestContextWithTimeSource( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + config, + s.timeSource, + ) + s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + )) + + s.mockParentClosePolicyClient = parentclosepolicy.NewMockClient(s.controller) + s.mockArchivalClient = warchiver.NewMockClient(s.controller) + s.mockMatchingClient = s.mockShard.Resource.MatchingClient + s.mockHistoryClient = s.mockShard.Resource.HistoryClient + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockSearchAttributesProvider = s.mockShard.Resource.SearchAttributesProvider + s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager + s.mockArchivalMetadata = s.mockShard.Resource.ArchivalMetadata + s.mockArchiverProvider = s.mockShard.Resource.ArchiverProvider + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.NamespaceID).Return(tests.Namespace, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.TargetNamespaceID).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.TargetNamespace).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.ParentNamespace).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ChildNamespaceID).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.MissedNamespaceID).Return(nil, serviceerror.NewNamespaceNotFound(tests.MissedNamespaceID.String())).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(tests.Version).AnyTimes() + s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(tests.Version, tests.Version).Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes() + s.mockArchivalMetadata.SetHistoryEnabledByDefault() + s.mockArchivalMetadata.SetVisibilityEnabledByDefault() + + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + metricsHandler: s.mockShard.GetMetricsHandler(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + queueProcessors: map[tasks.Category]queues.Queue{ + s.mockTxProcessor.Category(): s.mockTxProcessor, + s.mockTimerProcessor.Category(): s.mockTimerProcessor, + }, + } + s.mockShard.SetEngineForTesting(h) + + s.transferQueueActiveTaskExecutor = newTransferQueueActiveTaskExecutor( + s.mockShard, + s.workflowCache, + s.mockArchivalClient, + h.sdkClientFactory, + s.logger, + metrics.NoopMetricsHandler, + config, + s.mockShard.Resource.HistoryClient, + s.mockShard.Resource.MatchingClient, + s.mockVisibilityManager, + ).(*transferQueueActiveTaskExecutor) + s.transferQueueActiveTaskExecutor.parentClosePolicyClient = s.mockParentClosePolicyClient +} + +func (s *transferQueueActiveTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessActivityTask_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + activityID := "activity-1" + activityType := "some random activity type" + event, ai := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) + + transferTask := &tasks.ActivityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddActivityTask(gomock.Any(), s.createAddActivityTaskRequest(transferTask, ai), gomock.Any()).Return(&matchingservice.AddActivityTaskResponse{}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessActivityTask_Duplication() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + activityID := "activity-1" + activityType := "some random activity type" + event, ai := addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) + + transferTask := &tasks.ActivityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + + event = addActivityTaskStartedEvent(mutableState, event.GetEventId(), "") + ai.StartedEventId = event.GetEventId() + event = addActivityTaskCompletedEvent(mutableState, ai.ScheduledEventId, ai.StartedEventId, nil, "") + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.ErrorIs(err, consts.ErrActivityTaskNotFound) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_FirstWorkflowTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_NonFirstWorkflowTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + s.NotNil(event) + + // make another round of workflow task + taskID := int64(59) + wt = addWorkflowTaskScheduledEvent(mutableState) + + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_Sticky_NonFirstWorkflowTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + stickyTaskQueueName := "some random sticky task queue" + stickyTaskQueueTimeout := timestamp.DurationFromSeconds(233) + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + s.NotNil(event) + // set the sticky taskqueue attr + executionInfo := mutableState.GetExecutionInfo() + executionInfo.StickyTaskQueue = stickyTaskQueueName + executionInfo.StickyScheduleToStartTimeout = stickyTaskQueueTimeout + + // make another round of workflow task + taskID := int64(59) + wt = addWorkflowTaskScheduledEvent(mutableState) + + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: stickyTaskQueueName, + ScheduledEventID: wt.ScheduledEventID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_WorkflowTaskNotSticky_MutableStateSticky() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + stickyTaskQueueName := "some random sticky task queue" + stickyTaskQueueTimeout := timestamp.DurationFromSeconds(233) + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + s.NotNil(event) + // set the sticky taskqueue attr + executionInfo := mutableState.GetExecutionInfo() + executionInfo.StickyTaskQueue = stickyTaskQueueName + executionInfo.StickyScheduleToStartTimeout = stickyTaskQueueTimeout + + // make another round of workflow task + taskID := int64(59) + wt = addWorkflowTaskScheduledEvent(mutableState) + + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), s.createAddWorkflowTaskRequest(transferTask, mutableState), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessWorkflowTask_Duplication() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + taskID := int64(4096) + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_HasParent() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + parentNamespaceID := "some random parent namespace ID" + parentInitiatedID := int64(3222) + parentInitiatedVersion := int64(1234) + parentNamespace := "some random parent namespace Name" + parentExecution := &commonpb.WorkflowExecution{ + WorkflowId: "some random parent workflow ID", + RunId: uuid.New(), + } + parentClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ + NamespaceId: parentNamespaceID, + Namespace: parentNamespace, + Execution: parentExecution, + InitiatedId: parentInitiatedID, + InitiatedVersion: parentInitiatedVersion, + Clock: parentClock, + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().RecordChildExecutionCompleted(gomock.Any(), &historyservice.RecordChildExecutionCompletedRequest{ + NamespaceId: parentNamespaceID, + ParentExecution: parentExecution, + ParentInitiatedId: parentInitiatedID, + ParentInitiatedVersion: parentInitiatedVersion, + Clock: parentClock, + ChildExecution: &execution, + CompletionEvent: event, + }).Return(nil, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_CanSkipVisibilityArchival() { + for _, skipVisibilityArchival := range []bool{ + false, + true, + } { + s.Run(fmt.Sprintf("CanSkipVisibilityArchival=%v", skipVisibilityArchival), func() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + CanSkipVisibilityArchival: skipVisibilityArchival, + } + + persistenceMutableState := s.createPersistenceMutableState( + mutableState, + event.GetEventId(), + event.GetVersion(), + ) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()). + Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + if !skipVisibilityArchival { + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig(). + Return(archiver.NewArchivalConfig( + "enabled", + dc.GetStringPropertyFn("enabled"), + dc.GetBoolPropertyFn(true), + "disabled", + "random URI", + )).AnyTimes() + s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) + s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) + s.mockVisibilityManager.EXPECT().GetIndexName().Return("") + } + + _, _, err = s.transferQueueActiveTaskExecutor.Execute( + context.Background(), + s.newTaskExecutable(transferTask), + ) + s.Nil(err) + + }) + } +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) + s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) + s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) + s.mockVisibilityManager.EXPECT().GetIndexName().Return("") + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasFewChildren() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace1")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace2")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace3")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION + parentClosePolicy1 := enumspb.PARENT_CLOSE_POLICY_ABANDON + parentClosePolicy2 := enumspb.PARENT_CLOSE_POLICY_TERMINATE + parentClosePolicy3 := enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL + + event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: "some random identity", + Commands: []*commandpb.Command{ + { + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow1", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy1, + }}, + }, + { + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace2", + WorkflowId: "child workflow2", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy2, + }}, + }, + { + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace3", + WorkflowId: "child workflow3", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy3, + }}, + }, + }, + }, defaultWorkflowTaskCompletionLimits) + + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow1", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy1, + }, "child namespace1-ID") + s.Nil(err) + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace2", + WorkflowId: "child workflow2", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy2, + }, "child namespace2-ID") + s.Nil(err) + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace3", + WorkflowId: "child workflow3", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy3, + }, "child namespace3-ID") + s.Nil(err) + + mutableState.FlushBufferedEvents() + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { + s.True(request.GetChildWorkflowOnly()) + s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) + s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) + return nil, nil + }, + ) + s.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.TerminateWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.TerminateWorkflowExecutionResponse, error) { + s.True(request.GetChildWorkflowOnly()) + s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) + s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) + return nil, nil + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasManyChildren() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION + parentClosePolicy := enumspb.PARENT_CLOSE_POLICY_TERMINATE + var commands []*commandpb.Command + for i := 0; i < 10; i++ { + commands = append(commands, &commandpb.Command{ + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + WorkflowId: "child workflow" + convert.IntToString(i), + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy, + }}, + }) + } + + event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: "some random identity", + Commands: commands, + }, defaultWorkflowTaskCompletionLimits) + + for i := 0; i < 10; i++ { + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + WorkflowId: "child workflow" + convert.IntToString(i), + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy, + }, "child namespace1-ID") + s.Nil(err) + } + + mutableState.FlushBufferedEvents() + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockParentClosePolicyClient.EXPECT().SendParentClosePolicyRequest(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request parentclosepolicy.Request) error { + s.Equal(execution, request.ParentExecution) + return nil + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_HasManyAbandonedChildren() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + commandType := enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION + parentClosePolicy := enumspb.PARENT_CLOSE_POLICY_ABANDON + var commands []*commandpb.Command + for i := 0; i < 10; i++ { + commands = append(commands, &commandpb.Command{ + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + WorkflowId: "child workflow" + convert.IntToString(i), + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy, + }}, + }) + } + + event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: "some random identity", + Commands: commands, + }, defaultWorkflowTaskCompletionLimits) + + for i := 0; i < 10; i++ { + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + WorkflowId: "child workflow" + convert.IntToString(i), + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: parentClosePolicy, + }, "child namespace1-ID") + s.Nil(err) + } + + mutableState.FlushBufferedEvents() + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_NoParent_ChildInDeletedNamespace() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + s.mockNamespaceCache.EXPECT().GetNamespace(namespace.Name("child namespace1")).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.NoError(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + event, _ = mutableState.AddWorkflowTaskCompletedEvent(wt, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: "some random identity", + Commands: []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow1", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_TERMINATE, + }}, + }, + { + CommandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow2", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL, + }}, + }, + }, + }, defaultWorkflowTaskCompletionLimits) + + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow1", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_TERMINATE, + }, "child namespace1-ID") + s.NoError(err) + + _, _, err = mutableState.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: "child namespace1", + WorkflowId: "child workflow2", + WorkflowType: &commonpb.WorkflowType{ + Name: "child workflow type", + }, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + Input: payloads.EncodeString("random input"), + ParentClosePolicy: enumspb.PARENT_CLOSE_POLICY_REQUEST_CANCEL, + }, "child namespace2-ID") + s.NoError(err) + + mutableState.FlushBufferedEvents() + + taskID := int64(22) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + + s.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.TerminateWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.TerminateWorkflowExecutionResponse, error) { + s.True(request.GetChildWorkflowOnly()) + s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) + s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) + return nil, serviceerror.NewNamespaceNotFound("child namespace1") + }, + ) + + s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest, _ ...grpc.CallOption) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { + s.True(request.GetChildWorkflowOnly()) + s.Equal(execution.GetWorkflowId(), request.GetExternalWorkflowExecution().GetWorkflowId()) + s.Equal(execution.GetRunId(), request.GetExternalWorkflowExecution().GetRunId()) + return nil, serviceerror.NewNamespaceNotFound("child namespace1") + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.NoError(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCloseExecution_DeleteAfterClose() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + DeleteAfterClose: true, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")).Times(2) + s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil).Times(2) + s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false).Times(2) + s.mockVisibilityManager.EXPECT().GetIndexName().Return("").Times(2) + mockDeleteMgr := deletemanager.NewMockDeleteManager(s.controller) + mockDeleteMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + s.transferQueueActiveTaskExecutor.workflowDeleteManager = mockDeleteMgr + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.NoError(err) + + transferTask.DeleteAfterClose = false + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.NoError(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, rci := addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + attributes := event.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: s.targetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), s.createRequestCancelWorkflowExecutionRequest(s.targetNamespace, transferTask, rci, attributes)).Return(nil, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Failure() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, rci := addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + attributes := event.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: s.targetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), s.createRequestCancelWorkflowExecutionRequest(s.targetNamespace, transferTask, rci, attributes)).Return(nil, serviceerror.NewNotFound("")) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(gomock.Any(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Failure_TargetNamespaceNotFound() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.MissedNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(gomock.Any(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessCancelExecution_Duplication() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: s.targetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + event = addCancelRequestedEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Success() { + mutableState, event, si := s.setupSignalExternalWorkflowInitiated() + attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + mutableState.GetExecutionInfo().NamespaceId, + mutableState.GetExecutionInfo().WorkflowId, + mutableState.GetExecutionState().RunId, + ), + Version: s.version, + TargetNamespaceID: attributes.GetNamespaceId(), + TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), + TargetRunID: attributes.WorkflowExecution.GetRunId(), + TaskID: int64(59), + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + s.mockHistoryClient.EXPECT().RemoveSignalMutableState(gomock.Any(), &historyservice.RemoveSignalMutableStateRequest{ + NamespaceId: transferTask.TargetNamespaceID, + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: transferTask.TargetWorkflowID, + RunId: transferTask.TargetRunID, + }, + RequestId: si.GetRequestId(), + }).Return(nil, nil) + + _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_TargetWorkflowNotFound() { + mutableState, event, si := s.setupSignalExternalWorkflowInitiated() + attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + mutableState.GetExecutionInfo().NamespaceId, + mutableState.GetExecutionInfo().WorkflowId, + mutableState.GetExecutionState().RunId, + ), + Version: s.version, + TargetNamespaceID: attributes.GetNamespaceId(), + TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), + TargetRunID: attributes.WorkflowExecution.GetRunId(), + TaskID: int64(59), + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, serviceerror.NewNotFound("")) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( + si.InitiatedEventId, + enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND, + request, + ) + return tests.UpdateWorkflowExecutionResponse, nil + }, + ) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_TargetNamespaceNotFound() { + mutableState, event, si := s.setupSignalExternalWorkflowInitiated() + attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + mutableState.GetExecutionInfo().NamespaceId, + mutableState.GetExecutionInfo().WorkflowId, + mutableState.GetExecutionState().RunId, + ), + Version: s.version, + TargetNamespaceID: tests.MissedNamespaceID.String(), + TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), + TargetRunID: attributes.WorkflowExecution.GetRunId(), + TaskID: int64(59), + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( + si.InitiatedEventId, + enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND, + request, + ) + return tests.UpdateWorkflowExecutionResponse, nil + }, + ) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Failure_SignalCountLimitExceeded() { + mutableState, event, si := s.setupSignalExternalWorkflowInitiated() + attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + mutableState.GetExecutionInfo().NamespaceId, + mutableState.GetExecutionInfo().WorkflowId, + mutableState.GetExecutionState().RunId, + ), + Version: s.version, + TargetNamespaceID: attributes.GetNamespaceId(), + TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), + TargetRunID: attributes.WorkflowExecution.GetRunId(), + TaskID: int64(59), + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), s.createSignalWorkflowExecutionRequest(namespace.Name(attributes.Namespace), transferTask, si, attributes)).Return(nil, consts.ErrSignalsLimitExceeded) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.UpdateWorkflowExecutionRequest) (*persistence.UpdateWorkflowExecutionResponse, error) { + s.validateUpdateExecutionRequestWithSignalExternalFailedEvent( + si.InitiatedEventId, + enumspb.SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED, + request, + ) + return tests.UpdateWorkflowExecutionResponse, nil + }, + ) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessSignalExecution_Duplication() { + mutableState, event, _ := s.setupSignalExternalWorkflowInitiated() + attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() + + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + mutableState.GetExecutionInfo().NamespaceId, + mutableState.GetExecutionInfo().WorkflowId, + mutableState.GetExecutionState().RunId, + ), + Version: s.version, + TargetNamespaceID: attributes.GetNamespaceId(), + TargetWorkflowID: attributes.WorkflowExecution.GetWorkflowId(), + TargetRunID: attributes.WorkflowExecution.GetRunId(), + TaskID: int64(59), + TargetChildWorkflowOnly: true, + InitiatedEventID: event.GetEventId(), + } + + event = addSignaledEvent( + mutableState, + event.GetEventId(), + tests.TargetNamespace, + namespace.ID(transferTask.TargetNamespaceID), + attributes.WorkflowExecution.GetWorkflowId(), + attributes.WorkflowExecution.GetRunId(), + "", + ) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err := s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) setupSignalExternalWorkflowInitiated() ( + *workflow.MutableStateImpl, + *historypb.HistoryEvent, + *persistencespb.SignalInfo, +) { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + signalName := "some random signal name" + signalInput := payloads.EncodeString("some random signal input") + signalControl := "some random signal control" + signalHeader := &commonpb.Header{ + Fields: map[string]*commonpb.Payload{"signal header key": payload.EncodeString("signal header value")}, + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.NoError(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + event, signalInfo := addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), + tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, signalInput, + signalControl, signalHeader) + + return mutableState, event, signalInfo +} + +func (s *transferQueueActiveTaskExecutorSuite) validateUpdateExecutionRequestWithSignalExternalFailedEvent( + signalInitiatedEventId int64, + expectedFailedCause enumspb.SignalExternalWorkflowExecutionFailedCause, + request *persistence.UpdateWorkflowExecutionRequest, +) { + s.Len(request.UpdateWorkflowMutation.DeleteSignalInfos, 1) + _, ok := request.UpdateWorkflowMutation.DeleteSignalInfos[signalInitiatedEventId] + s.True(ok) + + numFailedEvent := 0 + s.Len(request.UpdateWorkflowEvents, 1) + for _, event := range request.UpdateWorkflowEvents[0].Events { + if event.EventType != enumspb.EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED { + continue + } + attr := event.GetSignalExternalWorkflowExecutionFailedEventAttributes() + s.Equal(expectedFailedCause, attr.GetCause()) + numFailedEvent++ + } + s.Equal(1, numFailedEvent) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childRunID := uuid.New() + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, ci := addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.childNamespace, + s.childNamespaceID, + childWorkflowID, + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_TERMINATE, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + + childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), s.createChildWorkflowExecutionRequest( + s.namespace, + s.childNamespace, + transferTask, + mutableState, + ci, + )).Return(&historyservice.StartWorkflowExecutionResponse{RunId: childRunID, Clock: childClock}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + currentShardClock := s.mockShard.CurrentVectorClock() + s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { + parentClock := request.ParentClock + request.ParentClock = nil + s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ + NamespaceId: tests.ChildNamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowID, + RunId: childRunID, + }, + IsFirstWorkflowTask: true, + ParentClock: nil, + ChildClock: childClock, + }, request) + cmpResult, err := vclock.Compare(currentShardClock, parentClock) + if err != nil { + return nil, err + } + s.NoError(err) + s.True(cmpResult <= 0) + return &historyservice.ScheduleWorkflowTaskResponse{}, nil + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Failure() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, ci := addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.childNamespace, + s.childNamespaceID, + childWorkflowID, + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_TERMINATE, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), s.createChildWorkflowExecutionRequest( + s.namespace, + s.childNamespace, + transferTask, + mutableState, + ci, + )).Return(nil, serviceerror.NewWorkflowExecutionAlreadyStarted("msg", "", "")) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Failure_TargetNamespaceNotFound() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, + }, + ) + s.NoError(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, _ = addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.namespace, + s.namespaceID, + childWorkflowID, + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_TERMINATE, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.MissedNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(cluster.TestCurrentClusterName).AnyTimes() + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.NoError(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Success_Dup() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childRunID := uuid.New() + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, ci := addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.childNamespace, + s.childNamespaceID, + childWorkflowID, + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_TERMINATE, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, childRunID, childWorkflowType, childClock) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + ci.StartedEventId = event.GetEventId() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + currentShardClock := s.mockShard.CurrentVectorClock() + s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { + parentClock := request.ParentClock + request.ParentClock = nil + s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ + NamespaceId: tests.ChildNamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowID, + RunId: childRunID, + }, + IsFirstWorkflowTask: true, + ParentClock: nil, + ChildClock: childClock, + }, request) + cmpResult, err := vclock.Compare(currentShardClock, parentClock) + if err != nil { + return nil, err + } + s.NoError(err) + s.True(cmpResult <= 0) + return &historyservice.ScheduleWorkflowTaskResponse{}, nil + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessStartChildExecution_Duplication() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random child workflow ID", + RunId: uuid.New(), + } + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, ci := addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.childNamespace, + s.childNamespaceID, + childExecution.GetWorkflowId(), + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_TERMINATE, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childExecution.GetWorkflowId(), + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childExecution.GetWorkflowId(), childExecution.GetRunId(), childWorkflowType, childClock) + ci.StartedEventId = event.GetEventId() + event = addChildWorkflowExecutionCompletedEvent(mutableState, ci.InitiatedEventId, &childExecution, &historypb.WorkflowExecutionCompletedEventAttributes{ + Result: payloads.EncodeString("some random child workflow execution result"), + WorkflowTaskCompletedEventId: transferTask.InitiatedEventID, + }) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Error(err, consts.ErrChildExecutionNotFound) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestProcessorStartChildExecution_ChildStarted_ParentClosed() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random child workflow ID", + RunId: uuid.New(), + } + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + + event, ci := addStartChildWorkflowExecutionInitiatedEvent( + mutableState, + event.GetEventId(), + uuid.New(), + s.childNamespace, + s.childNamespaceID, + childExecution.GetWorkflowId(), + childWorkflowType, + childTaskQueueName, + nil, + 1*time.Second, + 1*time.Second, + 1*time.Second, + enumspb.PARENT_CLOSE_POLICY_ABANDON, + ) + + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childExecution.GetWorkflowId(), + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + VisibilityTimestamp: time.Now().UTC(), + } + childClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childExecution.GetWorkflowId(), childExecution.GetRunId(), childWorkflowType, childClock) + ci.StartedEventId = event.GetEventId() + wt = addWorkflowTaskScheduledEvent(mutableState) + event = addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, "some random identity") + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + event = addCompleteWorkflowEvent(mutableState, event.EventId, nil) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + currentShardClock := s.mockShard.CurrentVectorClock() + s.mockHistoryClient.EXPECT().ScheduleWorkflowTask(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *historyservice.ScheduleWorkflowTaskRequest, _ ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { + parentClock := request.ParentClock + request.ParentClock = nil + s.Equal(&historyservice.ScheduleWorkflowTaskRequest{ + NamespaceId: s.childNamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childExecution.WorkflowId, + RunId: childExecution.RunId, + }, + IsFirstWorkflowTask: true, + ParentClock: nil, + ChildClock: childClock, + }, request) + cmpResult, err := vclock.Compare(currentShardClock, parentClock) + if err != nil { + return nil, err + } + s.NoError(err) + s.True(cmpResult <= 0) + return &historyservice.ScheduleWorkflowTaskResponse{}, nil + }, + ) + + _, _, err = s.transferQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueActiveTaskExecutorSuite) TestCopySearchAttributes() { + var input map[string]*commonpb.Payload + s.Nil(copySearchAttributes(input)) + + key := "key" + val := payload.EncodeBytes([]byte{'1', '2', '3'}) + input = map[string]*commonpb.Payload{ + key: val, + } + result := copySearchAttributes(input) + s.Equal(input, result) + result[key].GetData()[0] = '0' + s.Equal(byte('1'), val.GetData()[0]) +} + +func (s *transferQueueActiveTaskExecutorSuite) createAddActivityTaskRequest( + task *tasks.ActivityTask, + ai *persistencespb.ActivityInfo, +) *matchingservice.AddActivityTaskRequest { + return &matchingservice.AddActivityTaskRequest{ + NamespaceId: task.NamespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: task.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ScheduledEventId: task.ScheduledEventID, + ScheduleToStartTimeout: ai.ScheduleToStartTimeout, + Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), + VersionDirective: worker_versioning.MakeDirectiveForActivityTask(nil, false), + } +} + +func (s *transferQueueActiveTaskExecutorSuite) TestPendingCloseExecutionTasks() { + testCases := []struct { + Name string + EnsureCloseBeforeDelete bool + CloseTransferTaskIdSet bool + CloseTaskIsAcked bool + ShouldDelete bool + }{ + { + Name: "skip the check", + EnsureCloseBeforeDelete: false, + ShouldDelete: true, + }, + { + Name: "no task id", + EnsureCloseBeforeDelete: true, + CloseTransferTaskIdSet: false, + ShouldDelete: true, + }, + { + Name: "multicursor queue unacked", + EnsureCloseBeforeDelete: true, + CloseTransferTaskIdSet: true, + CloseTaskIsAcked: false, + ShouldDelete: false, + }, + { + Name: "multicursor queue acked", + EnsureCloseBeforeDelete: true, + CloseTransferTaskIdSet: true, + CloseTaskIsAcked: true, + ShouldDelete: true, + }, + } + for _, c := range testCases { + s.Run(c.Name, func() { + ctrl := gomock.NewController(s.T()) + + mockMutableState := workflow.NewMockMutableState(ctrl) + var closeTransferTaskId int64 + if c.CloseTransferTaskIdSet { + closeTransferTaskId = 10 + } + workflowKey := definition.NewWorkflowKey(uuid.New(), uuid.New(), uuid.New()) + mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + NamespaceId: workflowKey.NamespaceID, + WorkflowId: workflowKey.WorkflowID, + CloseTransferTaskId: closeTransferTaskId, + }).AnyTimes() + var deleteExecutionTaskId int64 = 1 + mockMutableState.EXPECT().GetNextEventID().Return(deleteExecutionTaskId + 1).AnyTimes() + namespaceEntry := tests.GlobalNamespaceEntry + mockMutableState.EXPECT().GetNamespaceEntry().Return(namespaceEntry).AnyTimes() + + mockWorkflowContext := workflow.NewMockContext(ctrl) + mockWorkflowContext.EXPECT().GetWorkflowKey().Return(workflowKey).AnyTimes() + mockWorkflowContext.EXPECT().LoadMutableState(gomock.Any()).Return(mockMutableState, nil) + + mockWorkflowCache := wcache.NewMockCache(ctrl) + mockWorkflowCache.EXPECT().GetOrCreateWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), + ).Return(mockWorkflowContext, wcache.ReleaseCacheFunc(func(err error) { + }), nil) + + mockClusterMetadata := cluster.NewMockMetadata(ctrl) + mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false).AnyTimes() + + mockShard := shard.NewMockContext(ctrl) + mockShard.EXPECT().GetConfig().Return(&configs.Config{ + TransferProcessorEnsureCloseBeforeDelete: func() bool { + return c.EnsureCloseBeforeDelete + }, + }).AnyTimes() + mockShard.EXPECT().GetClusterMetadata().Return(mockClusterMetadata).AnyTimes() + mockMutableState.EXPECT().GetLastWriteVersion().Return(tests.Version, nil).AnyTimes() + mockNamespaceRegistry := namespace.NewMockRegistry(ctrl) + mockNamespaceRegistry.EXPECT().GetNamespaceByID(gomock.Any()).Return(namespaceEntry, nil) + mockShard.EXPECT().GetNamespaceRegistry().Return(mockNamespaceRegistry) + + var highWatermarkTaskId int64 + if c.CloseTaskIsAcked { + highWatermarkTaskId = closeTransferTaskId + 1 + } else { + highWatermarkTaskId = closeTransferTaskId + } + mockShard.EXPECT().GetQueueState(tasks.CategoryTransfer).Return(&persistencespb.QueueState{ + ReaderStates: nil, + ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ + FireTime: timestamp.TimePtr(tasks.DefaultFireTime), + TaskId: highWatermarkTaskId, + }, + }, true).AnyTimes() + + mockWorkflowDeleteManager := deletemanager.NewMockDeleteManager(ctrl) + if c.ShouldDelete { + mockWorkflowDeleteManager.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + } + + executor := &transferQueueActiveTaskExecutor{ + transferQueueTaskExecutorBase: &transferQueueTaskExecutorBase{ + cache: mockWorkflowCache, + config: mockShard.GetConfig(), + metricHandler: metrics.NoopMetricsHandler, + shard: mockShard, + workflowDeleteManager: mockWorkflowDeleteManager, + }, + } + + task := &tasks.DeleteExecutionTask{ + WorkflowKey: workflowKey, + TaskID: deleteExecutionTaskId, + Version: tests.Version, + } + executable := queues.NewMockExecutable(ctrl) + executable.EXPECT().GetTask().Return(task) + _, _, err := executor.Execute(context.Background(), executable) + if c.ShouldDelete { + s.NoError(err) + } else { + s.Error(err) + s.Assert().ErrorIs(err, consts.ErrDependencyTaskNotCompleted) + } + }) + } +} + +func (s *transferQueueActiveTaskExecutorSuite) createAddWorkflowTaskRequest( + task *tasks.WorkflowTask, + mutableState workflow.MutableState, +) *matchingservice.AddWorkflowTaskRequest { + taskQueue := &taskqueuepb.TaskQueue{ + Name: task.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + executionInfo := mutableState.GetExecutionInfo() + timeout := executionInfo.WorkflowRunTimeout + if executionInfo.TaskQueue != task.TaskQueue { + taskQueue.Kind = enumspb.TASK_QUEUE_KIND_STICKY + taskQueue.NormalName = executionInfo.TaskQueue + timeout = executionInfo.StickyScheduleToStartTimeout + } + + directive := worker_versioning.MakeDirectiveForWorkflowTask( + mutableState.GetWorkerVersionStamp(), + mutableState.GetLastWorkflowTaskStartedEventID(), + ) + + return &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: task.NamespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + TaskQueue: taskQueue, + ScheduledEventId: task.ScheduledEventID, + ScheduleToStartTimeout: timeout, + Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), + VersionDirective: directive, + } +} + +func (s *transferQueueActiveTaskExecutorSuite) createRequestCancelWorkflowExecutionRequest( + targetNamespace namespace.Name, + task *tasks.CancelExecutionTask, + rci *persistencespb.RequestCancelInfo, + attributes *historypb.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes, +) *historyservice.RequestCancelWorkflowExecutionRequest { + sourceExecution := commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + } + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: task.TargetRunID, + } + + return &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: task.TargetNamespaceID, + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + Namespace: targetNamespace.String(), + WorkflowExecution: &targetExecution, + Identity: consts.IdentityHistoryService, + // Use the same request ID to dedupe RequestCancelWorkflowExecution calls + RequestId: rci.GetCancelRequestId(), + Reason: attributes.Reason, + }, + ExternalInitiatedEventId: task.InitiatedEventID, + ExternalWorkflowExecution: &sourceExecution, + ChildWorkflowOnly: task.TargetChildWorkflowOnly, + } +} + +func (s *transferQueueActiveTaskExecutorSuite) createSignalWorkflowExecutionRequest( + targetNamespace namespace.Name, + task *tasks.SignalExecutionTask, + si *persistencespb.SignalInfo, + attributes *historypb.SignalExternalWorkflowExecutionInitiatedEventAttributes, +) *historyservice.SignalWorkflowExecutionRequest { + sourceExecution := commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + } + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: task.TargetWorkflowID, + RunId: task.TargetRunID, + } + + return &historyservice.SignalWorkflowExecutionRequest{ + NamespaceId: task.TargetNamespaceID, + SignalRequest: &workflowservice.SignalWorkflowExecutionRequest{ + Namespace: targetNamespace.String(), + WorkflowExecution: &targetExecution, + Identity: consts.IdentityHistoryService, + SignalName: attributes.SignalName, + Input: attributes.Input, + RequestId: si.GetRequestId(), + Control: attributes.Control, + Header: attributes.Header, + }, + ExternalWorkflowExecution: &sourceExecution, + ChildWorkflowOnly: task.TargetChildWorkflowOnly, + } +} + +func (s *transferQueueActiveTaskExecutorSuite) createChildWorkflowExecutionRequest( + namespace namespace.Name, + childNamespace namespace.Name, + task *tasks.StartChildExecutionTask, + mutableState workflow.MutableState, + ci *persistencespb.ChildExecutionInfo, +) *historyservice.StartWorkflowExecutionRequest { + event, err := mutableState.GetChildExecutionInitiatedEvent(context.Background(), task.InitiatedEventID) + s.NoError(err) + attributes := event.GetStartChildWorkflowExecutionInitiatedEventAttributes() + execution := commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + } + now := s.timeSource.Now().UTC() + return &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: task.TargetNamespaceID, + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + Namespace: childNamespace.String(), + WorkflowId: attributes.WorkflowId, + WorkflowType: attributes.WorkflowType, + TaskQueue: attributes.TaskQueue, + Input: attributes.Input, + WorkflowExecutionTimeout: attributes.WorkflowExecutionTimeout, + WorkflowRunTimeout: attributes.WorkflowRunTimeout, + WorkflowTaskTimeout: attributes.WorkflowTaskTimeout, + // Use the same request ID to dedupe StartWorkflowExecution calls + RequestId: ci.CreateRequestId, + WorkflowIdReusePolicy: attributes.WorkflowIdReusePolicy, + }, + ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ + NamespaceId: task.NamespaceID, + Namespace: tests.Namespace.String(), + Execution: &execution, + InitiatedId: task.InitiatedEventID, + InitiatedVersion: task.Version, + Clock: vclock.NewVectorClock(s.mockClusterMetadata.GetClusterID(), s.mockShard.GetShardID(), task.TaskID), + }, + FirstWorkflowTaskBackoff: backoff.GetBackoffForNextScheduleNonNegative(attributes.GetCronSchedule(), now, now), + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, + WorkflowExecutionExpirationTime: timestamp.TimePtr(now.Add(*attributes.WorkflowExecutionTimeout).Round(time.Millisecond)), + } +} + +func (s *transferQueueActiveTaskExecutorSuite) createPersistenceMutableState( + ms workflow.MutableState, + lastEventID int64, + lastEventVersion int64, +) *persistencespb.WorkflowMutableState { + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + s.NoError(err) + err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( + lastEventID, lastEventVersion, + )) + s.NoError(err) + return workflow.TestCloneToProto(ms) +} + +func (s *transferQueueActiveTaskExecutorSuite) newTaskExecutable( + task tasks.Task, +) queues.Executable { + return queues.NewExecutable( + queues.DefaultReaderId, + task, + s.transferQueueActiveTaskExecutor, + nil, + nil, + queues.NewNoopPriorityAssigner(), + s.mockShard.GetTimeSource(), + s.mockNamespaceCache, + s.mockClusterMetadata, + nil, + metrics.NoopMetricsHandler, + func() bool { return false }, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_factory.go temporal-1.22.5/src/service/history/transfer_queue_factory.go --- temporal-1.21.5-1/src/service/history/transfer_queue_factory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,201 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + + "go.uber.org/fx" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/client" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +const ( + transferQueuePersistenceMaxRPSRatio = 0.3 +) + +type ( + transferQueueFactoryParams struct { + fx.In + + QueueFactoryBaseParams + + ClientBean client.Bean + ArchivalClient archiver.Client + SdkClientFactory sdk.ClientFactory + HistoryRawClient resource.HistoryRawClient + MatchingRawClient resource.MatchingRawClient + VisibilityManager manager.VisibilityManager + } + + transferQueueFactory struct { + transferQueueFactoryParams + QueueFactoryBase + } +) + +func NewTransferQueueFactory( + params transferQueueFactoryParams, +) QueueFactory { + return &transferQueueFactory{ + transferQueueFactoryParams: params, + QueueFactoryBase: QueueFactoryBase{ + HostScheduler: queues.NewNamespacePriorityScheduler( + params.ClusterMetadata.GetCurrentClusterName(), + queues.NamespacePrioritySchedulerOptions{ + WorkerCount: params.Config.TransferProcessorSchedulerWorkerCount, + ActiveNamespaceWeights: params.Config.TransferProcessorSchedulerActiveRoundRobinWeights, + StandbyNamespaceWeights: params.Config.TransferProcessorSchedulerStandbyRoundRobinWeights, + EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, + EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, + DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, + }, + params.NamespaceRegistry, + params.SchedulerRateLimiter, + params.TimeSource, + params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTransferQueueProcessorScope)), + params.Logger, + ), + HostPriorityAssigner: queues.NewPriorityAssigner(), + HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( + NewHostRateLimiterRateFn( + params.Config.TransferProcessorMaxPollHostRPS, + params.Config.PersistenceMaxQPS, + transferQueuePersistenceMaxRPSRatio, + ), + int64(params.Config.QueueMaxReaderCount()), + ), + }, + } +} + +func (f *transferQueueFactory) CreateQueue( + shard shard.Context, + workflowCache wcache.Cache, +) queues.Queue { + logger := log.With(shard.GetLogger(), tag.ComponentTransferQueue) + metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationTransferQueueProcessorScope)) + + rescheduler := queues.NewRescheduler( + f.HostScheduler, + shard.GetTimeSource(), + logger, + metricsHandler, + ) + + currentClusterName := f.ClusterMetadata.GetCurrentClusterName() + activeExecutor := newTransferQueueActiveTaskExecutor( + shard, + workflowCache, + f.ArchivalClient, + f.SdkClientFactory, + logger, + f.MetricsHandler, + f.Config, + f.HistoryRawClient, + f.MatchingRawClient, + f.VisibilityManager, + ) + + standbyExecutor := newTransferQueueStandbyTaskExecutor( + shard, + workflowCache, + f.ArchivalClient, + xdc.NewNDCHistoryResender( + f.NamespaceRegistry, + f.ClientBean, + func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { + engine, err := shard.GetEngine(ctx) + if err != nil { + return err + } + return engine.ReplicateEventsV2(ctx, request) + }, + shard.GetPayloadSerializer(), + f.Config.StandbyTaskReReplicationContextTimeout, + logger, + ), + logger, + f.MetricsHandler, + currentClusterName, + f.HistoryRawClient, + f.MatchingRawClient, + f.VisibilityManager, + ) + + executor := queues.NewActiveStandbyExecutor( + currentClusterName, + f.NamespaceRegistry, + activeExecutor, + standbyExecutor, + logger, + ) + if f.ExecutorWrapper != nil { + executor = f.ExecutorWrapper.Wrap(executor) + } + + return queues.NewImmediateQueue( + shard, + tasks.CategoryTransfer, + f.HostScheduler, + rescheduler, + f.HostPriorityAssigner, + executor, + &queues.Options{ + ReaderOptions: queues.ReaderOptions{ + BatchSize: f.Config.TransferTaskBatchSize, + MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, + PollBackoffInterval: f.Config.TransferProcessorPollBackoffInterval, + }, + MonitorOptions: queues.MonitorOptions{ + PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, + ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, + SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, + }, + MaxPollRPS: f.Config.TransferProcessorMaxPollRPS, + MaxPollInterval: f.Config.TransferProcessorMaxPollInterval, + MaxPollIntervalJitterCoefficient: f.Config.TransferProcessorMaxPollIntervalJitterCoefficient, + CheckpointInterval: f.Config.TransferProcessorUpdateAckInterval, + CheckpointIntervalJitterCoefficient: f.Config.TransferProcessorUpdateAckIntervalJitterCoefficient, + MaxReaderCount: f.Config.QueueMaxReaderCount, + }, + f.HostReaderRateLimiter, + logger, + metricsHandler, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_standby_task_executor.go temporal-1.22.5/src/service/history/transfer_queue_standby_task_executor.go --- temporal-1.21.5-1/src/service/history/transfer_queue_standby_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_standby_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,684 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "errors" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/ndc" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +type ( + transferQueueStandbyTaskExecutor struct { + *transferQueueTaskExecutorBase + + clusterName string + nDCHistoryResender xdc.NDCHistoryResender + } +) + +var ( + errVerificationFailed = errors.New("failed to verify target workflow state") +) + +func newTransferQueueStandbyTaskExecutor( + shard shard.Context, + workflowCache wcache.Cache, + archivalClient archiver.Client, + nDCHistoryResender xdc.NDCHistoryResender, + logger log.Logger, + metricProvider metrics.Handler, + clusterName string, + historyRawClient resource.HistoryRawClient, + matchingRawClient resource.MatchingRawClient, + visibilityManager manager.VisibilityManager, +) queues.Executor { + return &transferQueueStandbyTaskExecutor{ + transferQueueTaskExecutorBase: newTransferQueueTaskExecutorBase( + shard, + workflowCache, + archivalClient, + logger, + metricProvider, + historyRawClient, + matchingRawClient, + visibilityManager, + ), + clusterName: clusterName, + nDCHistoryResender: nDCHistoryResender, + } +} + +func (t *transferQueueStandbyTaskExecutor) Execute( + ctx context.Context, + executable queues.Executable, +) ([]metrics.Tag, bool, error) { + task := executable.GetTask() + taskType := queues.GetStandbyTransferTaskTypeTagValue(task) + metricsTags := []metrics.Tag{ + getNamespaceTagByID(t.shard.GetNamespaceRegistry(), task.GetNamespaceID()), + metrics.TaskTypeTag(taskType), + metrics.OperationTag(taskType), // for backward compatibility + } + + var err error + switch task := task.(type) { + case *tasks.ActivityTask: + err = t.processActivityTask(ctx, task) + case *tasks.WorkflowTask: + err = t.processWorkflowTask(ctx, task) + case *tasks.CancelExecutionTask: + err = t.processCancelExecution(ctx, task) + case *tasks.SignalExecutionTask: + err = t.processSignalExecution(ctx, task) + case *tasks.StartChildExecutionTask: + err = t.processStartChildExecution(ctx, task) + case *tasks.ResetWorkflowTask: + // no reset needed for standby + // TODO: add error logs + err = nil + case *tasks.CloseExecutionTask: + err = t.processCloseExecution(ctx, task) + case *tasks.DeleteExecutionTask: + err = t.processDeleteExecutionTask(ctx, task, false) + default: + err = errUnknownTransferTask + } + + return metricsTags, false, err +} + +func (t *transferQueueStandbyTaskExecutor) processActivityTask( + ctx context.Context, + transferTask *tasks.ActivityTask, +) error { + processTaskIfClosed := false + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + activityInfo, ok := mutableState.GetActivityInfo(transferTask.ScheduledEventID) + if !ok { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + if activityInfo.StartedEventId == common.EmptyEventID { + return newActivityTaskPostActionInfo(mutableState, *activityInfo.ScheduleToStartTimeout, activityInfo.UseCompatibleVersion) + } + + return nil, nil + } + + return t.processTransfer( + ctx, + processTaskIfClosed, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + t.fetchHistoryFromRemote, + t.pushActivity, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processWorkflowTask( + ctx context.Context, + transferTask *tasks.WorkflowTask, +) error { + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + wtInfo := mutableState.GetWorkflowTaskByID(transferTask.ScheduledEventID) + if wtInfo == nil { + return nil, nil + } + + _, scheduleToStartTimeout := mutableState.TaskQueueScheduleToStartTimeout(transferTask.TaskQueue) + // Task queue is ignored here because at standby, always use original normal task queue, + // disregards the transferTask.TaskQueue which could be sticky. + // NOTE: scheduleToStart timeout is respected. If workflow was sticky before namespace become standby, + // transferTask.TaskQueue is sticky, and there is timer already created for this timeout. + // Use this sticky timeout as TTL. + taskQueue := &taskqueuepb.TaskQueue{ + Name: mutableState.GetExecutionInfo().TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), wtInfo.Version, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + if wtInfo.StartedEventID == common.EmptyEventID { + return newWorkflowTaskPostActionInfo( + mutableState, + scheduleToStartTimeout, + *taskQueue, + ) + } + + return nil, nil + } + + return t.processTransfer( + ctx, + false, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + t.fetchHistoryFromRemote, + t.pushWorkflowTask, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processCloseExecution( + ctx context.Context, + transferTask *tasks.CloseExecutionTask, +) error { + processTaskIfClosed := true + actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + if mutableState.IsWorkflowExecutionRunning() { + // this can happen if workflow is reset. + return nil, nil + } + + wfCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) + if err != nil { + return nil, err + } + executionInfo := mutableState.GetExecutionInfo() + executionState := mutableState.GetExecutionState() + workflowTypeName := executionInfo.WorkflowTypeName + workflowStatus := executionState.Status + workflowHistoryLength := mutableState.GetNextEventID() - 1 + workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) + workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) + visibilityMemo := getWorkflowMemo(executionInfo.Memo) + searchAttr := getSearchAttributes(executionInfo.SearchAttributes) + + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return nil, err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + if !transferTask.CanSkipVisibilityArchival { + if err := t.archiveVisibility( + ctx, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + workflowTypeName, + workflowStartTime, + workflowExecutionTime, + timestamp.TimeValue(wfCloseTime), + workflowStatus, + workflowHistoryLength, + visibilityMemo, + searchAttr, + ); err != nil { + return nil, err + } + } + + // verify if parent got the completion event + verifyCompletionRecorded := mutableState.HasParentExecution() && executionInfo.NewExecutionRunId == "" + if verifyCompletionRecorded { + // load close event only if needed. + completionEvent, err := mutableState.GetCompletionEvent(ctx) + if err != nil { + return nil, err + } + + verifyCompletionRecorded = verifyCompletionRecorded && !ndc.IsTerminatedByResetter(completionEvent) + } + + if verifyCompletionRecorded { + _, err := t.historyRawClient.VerifyChildExecutionCompletionRecorded(ctx, &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: executionInfo.ParentNamespaceId, + ParentExecution: &commonpb.WorkflowExecution{ + WorkflowId: executionInfo.ParentWorkflowId, + RunId: executionInfo.ParentRunId, + }, + ChildExecution: &commonpb.WorkflowExecution{ + WorkflowId: transferTask.WorkflowID, + RunId: transferTask.RunID, + }, + ParentInitiatedId: executionInfo.ParentInitiatedId, + ParentInitiatedVersion: executionInfo.ParentInitiatedVersion, + Clock: executionInfo.ParentClock, + }) + switch err.(type) { + case nil, *serviceerror.NamespaceNotFound, *serviceerror.Unimplemented: + return nil, nil + case *serviceerror.NotFound, *serviceerror.WorkflowNotReady: + return verifyChildCompletionRecordedInfo, nil + default: + t.logger.Error("Failed to verify child execution completion recoreded", + tag.WorkflowNamespaceID(transferTask.GetNamespaceID()), + tag.WorkflowID(transferTask.GetWorkflowID()), + tag.WorkflowRunID(transferTask.GetRunID()), + tag.Error(err), + ) + + // NOTE: we do not return the error here which will cause the mutable state to be cleared and reloaded upon retry + // it's unnecessary as the error is in the target workflow, not this workflow. + return nil, errVerificationFailed + } + } + return nil, nil + } + + return t.processTransfer( + ctx, + processTaskIfClosed, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + standbyTaskPostActionNoOp, + standbyTransferTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processCancelExecution( + ctx context.Context, + transferTask *tasks.CancelExecutionTask, +) error { + processTaskIfClosed := false + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + requestCancelInfo, ok := mutableState.GetRequestCancelInfo(transferTask.InitiatedEventID) + if !ok { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), requestCancelInfo.Version, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + return getHistoryResendInfo(mutableState) + } + + return t.processTransfer( + ctx, + processTaskIfClosed, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + t.fetchHistoryFromRemote, + standbyTransferTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processSignalExecution( + ctx context.Context, + transferTask *tasks.SignalExecutionTask, +) error { + processTaskIfClosed := false + actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + signalInfo, ok := mutableState.GetSignalInfo(transferTask.InitiatedEventID) + if !ok { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), signalInfo.Version, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + return getHistoryResendInfo(mutableState) + } + + return t.processTransfer( + ctx, + processTaskIfClosed, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + t.fetchHistoryFromRemote, + standbyTransferTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processStartChildExecution( + ctx context.Context, + transferTask *tasks.StartChildExecutionTask, +) error { + processTaskIfClosed := true + actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { + childWorkflowInfo, ok := mutableState.GetChildExecutionInfo(transferTask.InitiatedEventID) + if !ok { + return nil, nil + } + + err := CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), childWorkflowInfo.Version, transferTask.Version, transferTask) + if err != nil { + return nil, err + } + + workflowClosed := !mutableState.IsWorkflowExecutionRunning() + childStarted := childWorkflowInfo.StartedEventId != common.EmptyEventID + childAbandon := childWorkflowInfo.ParentClosePolicy == enumspb.PARENT_CLOSE_POLICY_ABANDON + + if workflowClosed && !(childStarted && childAbandon) { + // NOTE: ideally for workflowClosed, child not started, parent close policy is abandon case, + // we should continue to start the child workflow in active cluster, so standby logic also need to + // perform the verification. However, we can't do that due to some technial reasons. + // Please check the comments in processStartChildExecution in transferQueueActiveTaskExecutor.go + // for details. + return nil, nil + } + + if !childStarted { + historyResendInfo, err := getHistoryResendInfo(mutableState) + if err != nil { + return nil, err + } + return &startChildExecutionPostActionInfo{ + historyResendInfo: historyResendInfo, + }, nil + } + + _, err = t.historyRawClient.VerifyFirstWorkflowTaskScheduled(ctx, &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: transferTask.TargetNamespaceID, + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: childWorkflowInfo.StartedWorkflowId, + RunId: childWorkflowInfo.StartedRunId, + }, + Clock: childWorkflowInfo.Clock, + }) + switch err.(type) { + case nil, *serviceerror.NamespaceNotFound, *serviceerror.Unimplemented: + return nil, nil + case *serviceerror.NotFound, *serviceerror.WorkflowNotReady: + return &startChildExecutionPostActionInfo{}, nil + default: + t.logger.Error("Failed to verify first workflow task scheduled", + tag.WorkflowNamespaceID(transferTask.GetNamespaceID()), + tag.WorkflowID(transferTask.GetWorkflowID()), + tag.WorkflowRunID(transferTask.GetRunID()), + tag.Error(err), + ) + + // NOTE: we do not return the error here which will cause the mutable state to be cleared and reloaded upon retry + // it's unnecessary as the error is in the target workflow, not this workflow. + return nil, errVerificationFailed + } + } + + return t.processTransfer( + ctx, + processTaskIfClosed, + transferTask, + actionFn, + getStandbyPostActionFn( + transferTask, + t.getCurrentTime, + t.config.StandbyTaskMissingEventsResendDelay(transferTask.GetType()), + t.config.StandbyTaskMissingEventsDiscardDelay(transferTask.GetType()), + t.startChildExecutionResendPostAction, + standbyTransferTaskPostActionTaskDiscarded, + ), + ) +} + +func (t *transferQueueStandbyTaskExecutor) processTransfer( + ctx context.Context, + processTaskIfClosed bool, + taskInfo tasks.Task, + actionFn standbyActionFn, + postActionFn standbyPostActionFn, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + nsRecord, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(taskInfo.GetNamespaceID())) + if err != nil { + return err + } + if !nsRecord.IsOnCluster(t.clusterName) { + // namespace is not replicated to local cluster, ignore corresponding tasks + return nil + } + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, taskInfo) + if err != nil { + return err + } + defer func() { + if retError == consts.ErrTaskRetry || retError == errVerificationFailed { + release(nil) + } else { + release(retError) + } + }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weContext, taskInfo, t.metricHandler, t.logger) + if err != nil || mutableState == nil { + return err + } + + if !mutableState.IsWorkflowExecutionRunning() && !processTaskIfClosed { + // workflow already finished, no need to process transfer task. + return nil + } + + historyResendInfo, err := actionFn(ctx, weContext, mutableState) + if err != nil { + return err + } + + // NOTE: do not access anything related mutable state after this lock release + release(nil) + return postActionFn(ctx, taskInfo, historyResendInfo, t.logger) +} + +func (t *transferQueueStandbyTaskExecutor) pushActivity( + ctx context.Context, + task tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + if postActionInfo == nil { + return nil + } + + pushActivityInfo := postActionInfo.(*activityTaskPostActionInfo) + timeout := pushActivityInfo.activityTaskScheduleToStartTimeout + return t.transferQueueTaskExecutorBase.pushActivity( + ctx, + task.(*tasks.ActivityTask), + &timeout, + pushActivityInfo.versionDirective, + ) +} + +func (t *transferQueueStandbyTaskExecutor) pushWorkflowTask( + ctx context.Context, + task tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + if postActionInfo == nil { + return nil + } + + pushwtInfo := postActionInfo.(*workflowTaskPostActionInfo) + return t.transferQueueTaskExecutorBase.pushWorkflowTask( + ctx, + task.(*tasks.WorkflowTask), + &pushwtInfo.taskqueue, + pushwtInfo.workflowTaskScheduleToStartTimeout, + pushwtInfo.versionDirective, + ) +} + +func (t *transferQueueStandbyTaskExecutor) startChildExecutionResendPostAction( + ctx context.Context, + taskInfo tasks.Task, + postActionInfo interface{}, + log log.Logger, +) error { + if postActionInfo == nil { + return nil + } + + historyResendInfo := postActionInfo.(*startChildExecutionPostActionInfo).historyResendInfo + if historyResendInfo != nil { + return t.fetchHistoryFromRemote(ctx, taskInfo, historyResendInfo, log) + } + + return standbyTaskPostActionNoOp(ctx, taskInfo, postActionInfo, log) +} + +func (t *transferQueueStandbyTaskExecutor) fetchHistoryFromRemote( + ctx context.Context, + taskInfo tasks.Task, + postActionInfo interface{}, + logger log.Logger, +) error { + var resendInfo *historyResendInfo + switch postActionInfo := postActionInfo.(type) { + case nil: + return nil + case *historyResendInfo: + resendInfo = postActionInfo + case *activityTaskPostActionInfo: + resendInfo = postActionInfo.historyResendInfo + case *workflowTaskPostActionInfo: + resendInfo = postActionInfo.historyResendInfo + default: + logger.Fatal("unknown post action info for fetching remote history", tag.Value(postActionInfo)) + } + + remoteClusterName, err := getRemoteClusterName( + t.currentClusterName, + t.registry, + taskInfo.GetNamespaceID(), + ) + if err != nil { + return err + } + + scope := t.metricHandler.WithTags(metrics.OperationTag(metrics.HistoryRereplicationByTransferTaskScope)) + scope.Counter(metrics.ClientRequests.GetMetricName()).Record(1) + startTime := time.Now().UTC() + defer func() { scope.Timer(metrics.ClientLatency.GetMetricName()).Record(time.Since(startTime)) }() + + if resendInfo.lastEventID == common.EmptyEventID || resendInfo.lastEventVersion == common.EmptyVersion { + t.logger.Error("Error re-replicating history from remote: transferQueueStandbyProcessor encountered empty historyResendInfo.", + tag.ShardID(t.shard.GetShardID()), + tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), + tag.WorkflowID(taskInfo.GetWorkflowID()), + tag.WorkflowRunID(taskInfo.GetRunID()), + tag.SourceCluster(remoteClusterName)) + + return consts.ErrTaskRetry + } + + // NOTE: history resend may take long time and its timeout is currently + // controlled by a separate dynamicconfig config: StandbyTaskReReplicationContextTimeout + if err = t.nDCHistoryResender.SendSingleWorkflowHistory( + ctx, + remoteClusterName, + namespace.ID(taskInfo.GetNamespaceID()), + taskInfo.GetWorkflowID(), + taskInfo.GetRunID(), + resendInfo.lastEventID, + resendInfo.lastEventVersion, + 0, + 0, + ); err != nil { + if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { + // Don't log NamespaceNotFound error because it is valid case, and return error to stop retrying. + return err + } + t.logger.Error("Error re-replicating history from remote.", + tag.ShardID(t.shard.GetShardID()), + tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), + tag.WorkflowID(taskInfo.GetWorkflowID()), + tag.WorkflowRunID(taskInfo.GetRunID()), + tag.SourceCluster(remoteClusterName), + tag.Error(err)) + } + + // Return retryable error, so task processing will retry. + return consts.ErrTaskRetry +} + +func (t *transferQueueStandbyTaskExecutor) getCurrentTime() time.Time { + return t.shard.GetCurrentTime(t.clusterName) +} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_standby_task_executor_test.go temporal-1.22.5/src/service/history/transfer_queue_standby_task_executor_test.go --- temporal-1.21.5-1/src/service/history/transfer_queue_standby_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_standby_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1274 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/adminservicemock/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + dc "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/searchattribute" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/provider" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/xdc" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + warchiver "go.temporal.io/server/service/worker/archiver" +) + +type ( + transferQueueStandbyTaskExecutorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + mockNamespaceCache *namespace.MockRegistry + mockClusterMetadata *cluster.MockMetadata + mockAdminClient *adminservicemock.MockAdminServiceClient + mockNDCHistoryResender *xdc.MockNDCHistoryResender + mockHistoryClient *historyservicemock.MockHistoryServiceClient + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + + mockExecutionMgr *persistence.MockExecutionManager + mockArchivalClient *warchiver.MockClient + mockArchivalMetadata archiver.MetadataMock + mockArchiverProvider *provider.MockArchiverProvider + + workflowCache wcache.Cache + logger log.Logger + namespaceID namespace.ID + namespaceEntry *namespace.Namespace + version int64 + clusterName string + now time.Time + timeSource *clock.EventTimeSource + fetchHistoryDuration time.Duration + discardDuration time.Duration + + transferQueueStandbyTaskExecutor *transferQueueStandbyTaskExecutor + mockSearchAttributesProvider *searchattribute.MockProvider + mockVisibilityManager *manager.MockVisibilityManager + } +) + +func TestTransferQueueStandbyTaskExecutorSuite(t *testing.T) { + s := new(transferQueueStandbyTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *transferQueueStandbyTaskExecutorSuite) SetupSuite() { +} + +func (s *transferQueueStandbyTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + config := tests.NewDynamicConfig() + + s.namespaceEntry = tests.GlobalStandbyNamespaceEntry + s.namespaceID = s.namespaceEntry.ID() + s.version = s.namespaceEntry.FailoverVersion() + s.now = time.Now().UTC() + s.timeSource = clock.NewEventTimeSource().Update(s.now) + s.fetchHistoryDuration = time.Minute * 12 + s.discardDuration = time.Minute * 30 + + s.controller = gomock.NewController(s.T()) + s.mockNDCHistoryResender = xdc.NewMockNDCHistoryResender(s.controller) + s.mockArchivalClient = warchiver.NewMockClient(s.controller) + s.mockShard = shard.NewTestContextWithTimeSource( + s.controller, + &persistencespb.ShardInfo{ + RangeId: 1, + }, + config, + s.timeSource, + ) + s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + )) + + s.mockHistoryClient = s.mockShard.Resource.HistoryClient + s.mockMatchingClient = s.mockShard.Resource.MatchingClient + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata + s.mockArchivalMetadata = s.mockShard.Resource.ArchivalMetadata + s.mockArchiverProvider = s.mockShard.Resource.ArchiverProvider + s.mockNamespaceCache = s.mockShard.Resource.NamespaceCache + s.mockAdminClient = s.mockShard.Resource.RemoteAdminClient + s.mockSearchAttributesProvider = s.mockShard.Resource.SearchAttributesProvider + s.mockVisibilityManager = s.mockShard.Resource.VisibilityManager + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.TargetNamespaceID).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.TargetNamespace).Return(tests.GlobalTargetNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ParentNamespaceID).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.ParentNamespace).Return(tests.GlobalParentNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.ChildNamespaceID).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.ChildNamespace).Return(tests.GlobalChildNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.StandbyNamespaceID).Return(tests.GlobalStandbyNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.StandbyNamespace).Return(tests.GlobalStandbyNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.StandbyNamespaceID).Return(tests.StandbyNamespace, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceByID(tests.StandbyWithVisibilityArchivalNamespaceID). + Return(tests.GlobalStandbyWithVisibilityArchivalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespace(tests.StandbyWithVisibilityArchivalNamespace). + Return(tests.GlobalStandbyWithVisibilityArchivalNamespaceEntry, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(tests.StandbyWithVisibilityArchivalNamespaceID). + Return(tests.StandbyWithVisibilityArchivalNamespace, nil).AnyTimes() + s.mockClusterMetadata.EXPECT().GetClusterID().Return(cluster.TestCurrentClusterInitialFailoverVersion).AnyTimes() + s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.IsGlobalNamespace(), s.version).Return(s.clusterName).AnyTimes() + + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + s.mockArchivalMetadata.SetHistoryEnabledByDefault() + s.mockArchivalMetadata.SetVisibilityEnabledByDefault() + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: s.mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + metricsHandler: s.mockShard.GetMetricsHandler(), + } + s.mockShard.SetEngineForTesting(h) + s.clusterName = cluster.TestAlternativeClusterName + + s.transferQueueStandbyTaskExecutor = newTransferQueueStandbyTaskExecutor( + s.mockShard, + s.workflowCache, + s.mockArchivalClient, + s.mockNDCHistoryResender, + s.logger, + metrics.NoopMetricsHandler, + s.clusterName, + s.mockShard.Resource.HistoryClient, + s.mockShard.Resource.MatchingClient, + s.mockVisibilityManager, + ).(*transferQueueStandbyTaskExecutor) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessActivityTask_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + activityID := "activity-1" + activityType := "some random activity type" + event, _ = addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) + + now := time.Now().UTC() + transferTask := &tasks.ActivityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + // no-op post action + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + // resend history post action + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + event.GetEventId(), + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + // push to matching post action + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + s.mockMatchingClient.EXPECT().AddActivityTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(&matchingservice.AddActivityTaskResponse{}, nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessActivityTask_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueueName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + activityID := "activity-1" + activityType := "some random activity type" + event, _ = addActivityTaskScheduledEvent(mutableState, event.GetEventId(), activityID, activityType, taskQueueName, &commonpb.Payloads{}, 1*time.Second, 1*time.Second, 1*time.Second, 1*time.Second) + + now := time.Now().UTC() + transferTask := &tasks.ActivityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: event.GetEventId(), + } + + event = addActivityTaskStartedEvent(mutableState, event.GetEventId(), "") + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + now := time.Now().UTC() + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + // no-op post action + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + // resend history post action + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + wt.ScheduledEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + // push to matching post action + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + s.mockMatchingClient.EXPECT().AddWorkflowTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(&matchingservice.AddWorkflowTaskResponse{}, nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Success_FirstWorkflowTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + now := time.Now().UTC() + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + } + + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessWorkflowTask_Success_NonFirstWorkflowTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + wt = addWorkflowTaskScheduledEvent(mutableState) + + now := time.Now().UTC() + transferTask := &tasks.WorkflowTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + TaskQueue: taskQueueName, + ScheduledEventID: wt.ScheduledEventID, + } + + event = addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCloseExecution() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + parentNamespaceID := "some random parent namespace ID" + parentInitiatedID := int64(3222) + parentInitiatedVersion := int64(1234) + parentNamespace := "some random parent namespace Name" + parentExecution := &commonpb.WorkflowExecution{ + WorkflowId: "some random parent workflow ID", + RunId: uuid.New(), + } + parentClock := vclock.NewVectorClock(rand.Int63(), rand.Int31(), rand.Int63()) + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ + NamespaceId: parentNamespaceID, + Namespace: parentNamespace, + Execution: parentExecution, + InitiatedId: parentInitiatedID, + InitiatedVersion: parentInitiatedVersion, + Clock: parentClock, + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + now := time.Now().UTC() + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TaskID: taskID, + } + + expectedVerificationRequest := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ + NamespaceId: parentNamespaceID, + ParentExecution: parentExecution, + ChildExecution: &execution, + ParentInitiatedId: parentInitiatedID, + ParentInitiatedVersion: parentInitiatedVersion, + Clock: parentClock, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()).AnyTimes() + + s.mockShard.SetCurrentTime(s.clusterName, now) + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) + + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowExecutionNotFound) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, serviceerror.NewUnimplemented("not implemented")) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) + + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, errors.New("some random error")) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(errVerificationFailed, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, consts.ErrWorkflowNotReady) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCloseExecution_CanSkipVisibilityArchival() { + for _, skipVisibilityArchival := range []bool{ + false, + true, + } { + s.Run(fmt.Sprintf("CanSkipVisibilityArchival=%v", skipVisibilityArchival), func() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: tests.StandbyWithVisibilityArchivalNamespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + transferTask := &tasks.CloseExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + tests.StandbyWithVisibilityArchivalNamespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + VisibilityTimestamp: time.Now().UTC(), + CanSkipVisibilityArchival: skipVisibilityArchival, + } + + persistenceMutableState := s.createPersistenceMutableState( + mutableState, + event.GetEventId(), + event.GetVersion(), + ) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution( + gomock.Any(), + gomock.Any(), + ).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + if !skipVisibilityArchival { + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return( + archiver.NewArchivalConfig( + "enabled", + dc.GetStringPropertyFn("enabled"), + dc.GetBoolPropertyFn(true), + "disabled", + "random URI", + ), + ).AnyTimes() + s.mockArchivalClient.EXPECT().Archive(gomock.Any(), gomock.Any()).Return(nil, nil) + s.mockSearchAttributesProvider.EXPECT().GetSearchAttributes(gomock.Any(), false) + s.mockVisibilityManager.EXPECT().GetIndexName().Return("") + } + + _, _, err = s.transferQueueStandbyTaskExecutor.Execute( + context.Background(), + s.newTaskExecutable(transferTask), + ) + s.Nil(err) + }) + } +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCancelExecution_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + nextEventID := event.GetEventId() + + now := time.Now().UTC() + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.TargetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TargetChildWorkflowOnly: true, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCancelExecution_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestCancelInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + + now := time.Now().UTC() + transferTask := &tasks.CancelExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.TargetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + + event = addCancelRequestedEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId()) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessSignalExecution_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + signalName := "some random signal name" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), + tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, nil, "", nil) + nextEventID := event.GetEventId() + + now := time.Now().UTC() + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.TargetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskDiscarded, err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessSignalExecution_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + targetExecution := commonpb.WorkflowExecution{ + WorkflowId: "some random target workflow ID", + RunId: uuid.New(), + } + signalName := "some random signal name" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addRequestSignalInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), + tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), signalName, nil, "", nil) + + now := time.Now().UTC() + transferTask := &tasks.SignalExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.TargetNamespaceID.String(), + TargetWorkflowID: targetExecution.GetWorkflowId(), + TargetRunID: targetExecution.GetRunId(), + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + + event = addSignaledEvent(mutableState, event.GetEventId(), tests.TargetNamespace, tests.TargetNamespaceID, targetExecution.GetWorkflowId(), targetExecution.GetRunId(), "") + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessStartChildExecution_Pending() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, _ = addStartChildWorkflowExecutionInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), + tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_ABANDON) + nextEventID := event.GetEventId() + + now := time.Now().UTC() + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockNDCHistoryResender.EXPECT().SendSingleWorkflowHistory( + gomock.Any(), + s.clusterName, + namespace.ID(transferTask.NamespaceID), + transferTask.WorkflowID, + transferTask.RunID, + nextEventID, + s.version, + int64(0), + int64(0), + ).Return(nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, uuid.New(), childWorkflowType, nil) + mutableState.FlushBufferedEvents() + + // clear the cache + s.transferQueueStandbyTaskExecutor.cache = wcache.NewCache(s.mockShard) + persistenceMutableState = s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.fetchHistoryDuration)) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) + + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, consts.ErrWorkflowNotReady) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, consts.ErrWorkflowExecutionNotFound) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskRetry, err) + + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.Unimplemented{}) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) + + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, errors.New("some random error")) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(errVerificationFailed, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.WorkflowNotReady{}) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Equal(consts.ErrTaskDiscarded, err) + + s.mockShard.SetCurrentTime(s.clusterName, now.Add(s.discardDuration)) + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) TestProcessStartChildExecution_Success() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + childWorkflowID := "some random child workflow ID" + childWorkflowType := "some random child workflow type" + childTaskQueueName := "some random child task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ContinueAsNewInitiator: enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event, childInfo := addStartChildWorkflowExecutionInitiatedEvent(mutableState, event.GetEventId(), uuid.New(), + tests.ChildNamespace, tests.ChildNamespaceID, childWorkflowID, childWorkflowType, childTaskQueueName, nil, 1*time.Second, 1*time.Second, 1*time.Second, enumspb.PARENT_CLOSE_POLICY_ABANDON) + + now := time.Now().UTC() + transferTask := &tasks.StartChildExecutionTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + VisibilityTimestamp: now, + TargetNamespaceID: tests.ChildNamespaceID.String(), + TargetWorkflowID: childWorkflowID, + TaskID: taskID, + InitiatedEventID: event.GetEventId(), + } + event = addChildWorkflowExecutionStartedEvent(mutableState, event.GetEventId(), childWorkflowID, uuid.New(), childWorkflowType, nil) + // Flush buffered events so real IDs get assigned + mutableState.FlushBufferedEvents() + childInfo.StartedEventId = event.GetEventId() + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) + + // workflow closed && child started && parent close policy is abandon + event, err = mutableState.AddTimeoutWorkflowEvent( + mutableState.GetNextEventID(), + enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, + uuid.New(), + ) + s.NoError(err) + + s.transferQueueStandbyTaskExecutor.cache = wcache.NewCache(s.mockShard) + persistenceMutableState = s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockHistoryClient.EXPECT().VerifyFirstWorkflowTaskScheduled(gomock.Any(), gomock.Any()).Return(nil, nil) + + s.mockShard.SetCurrentTime(s.clusterName, now) + _, _, err = s.transferQueueStandbyTaskExecutor.Execute(context.Background(), s.newTaskExecutable(transferTask)) + s.Nil(err) +} + +func (s *transferQueueStandbyTaskExecutorSuite) createPersistenceMutableState( + ms workflow.MutableState, + lastEventID int64, + lastEventVersion int64, +) *persistencespb.WorkflowMutableState { + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + s.NoError(err) + err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( + lastEventID, lastEventVersion, + )) + s.NoError(err) + return workflow.TestCloneToProto(ms) +} + +func (s *transferQueueStandbyTaskExecutorSuite) newTaskExecutable( + task tasks.Task, +) queues.Executable { + return queues.NewExecutable( + queues.DefaultReaderId, + task, + s.transferQueueStandbyTaskExecutor, + nil, + nil, + queues.NewNoopPriorityAssigner(), + s.mockShard.GetTimeSource(), + s.mockNamespaceCache, + s.mockClusterMetadata, + nil, + metrics.NoopMetricsHandler, + func() bool { return false }, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/transfer_queue_task_executor_base.go temporal-1.22.5/src/service/history/transfer_queue_task_executor_base.go --- temporal-1.21.5-1/src/service/history/transfer_queue_task_executor_base.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/transfer_queue_task_executor_base.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,340 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + "go.temporal.io/server/api/matchingservice/v1" + taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/deletemanager" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/vclock" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" + "go.temporal.io/server/service/worker/archiver" +) + +const ( + taskTimeout = time.Second * 3 * debug.TimeoutMultiplier + taskHistoryOpTimeout = 20 * time.Second +) + +var ( + errUnknownTransferTask = serviceerror.NewInternal("Unknown transfer task") +) + +type ( + transferQueueTaskExecutorBase struct { + currentClusterName string + shard shard.Context + registry namespace.Registry + cache wcache.Cache + archivalClient archiver.Client + logger log.Logger + metricHandler metrics.Handler + historyRawClient resource.HistoryRawClient + matchingRawClient resource.MatchingRawClient + config *configs.Config + searchAttributesProvider searchattribute.Provider + visibilityManager manager.VisibilityManager + workflowDeleteManager deletemanager.DeleteManager + } +) + +func newTransferQueueTaskExecutorBase( + shard shard.Context, + workflowCache wcache.Cache, + archivalClient archiver.Client, + logger log.Logger, + metricHandler metrics.Handler, + historyRawClient resource.HistoryRawClient, + matchingRawClient resource.MatchingRawClient, + visibilityManager manager.VisibilityManager, +) *transferQueueTaskExecutorBase { + return &transferQueueTaskExecutorBase{ + currentClusterName: shard.GetClusterMetadata().GetCurrentClusterName(), + shard: shard, + registry: shard.GetNamespaceRegistry(), + cache: workflowCache, + archivalClient: archivalClient, + logger: logger, + metricHandler: metricHandler, + historyRawClient: historyRawClient, + matchingRawClient: matchingRawClient, + config: shard.GetConfig(), + searchAttributesProvider: shard.GetSearchAttributesProvider(), + visibilityManager: visibilityManager, + workflowDeleteManager: deletemanager.NewDeleteManager( + shard, + workflowCache, + shard.GetConfig(), + archivalClient, + shard.GetTimeSource(), + visibilityManager, + ), + } +} + +func (t *transferQueueTaskExecutorBase) pushActivity( + ctx context.Context, + task *tasks.ActivityTask, + activityScheduleToStartTimeout *time.Duration, + directive *taskqueuespb.TaskVersionDirective, +) error { + _, err := t.matchingRawClient.AddActivityTask(ctx, &matchingservice.AddActivityTaskRequest{ + NamespaceId: task.NamespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: task.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ScheduledEventId: task.ScheduledEventID, + ScheduleToStartTimeout: activityScheduleToStartTimeout, + Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), + VersionDirective: directive, + }) + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // NotFound error is not expected for AddTasks calls + // but will be ignored by task error handling logic, so log it here + tasks.InitializeLogger(task, t.logger).Error("Matching returned not found error for AddActivityTask", tag.Error(err)) + } + + return err +} + +func (t *transferQueueTaskExecutorBase) pushWorkflowTask( + ctx context.Context, + task *tasks.WorkflowTask, + taskqueue *taskqueuepb.TaskQueue, + workflowTaskScheduleToStartTimeout *time.Duration, + directive *taskqueuespb.TaskVersionDirective, +) error { + _, err := t.matchingRawClient.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: task.NamespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + }, + TaskQueue: taskqueue, + ScheduledEventId: task.ScheduledEventID, + ScheduleToStartTimeout: workflowTaskScheduleToStartTimeout, + Clock: vclock.NewVectorClock(t.shard.GetClusterMetadata().GetClusterID(), t.shard.GetShardID(), task.TaskID), + VersionDirective: directive, + }) + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // NotFound error is not expected for AddTasks calls + // but will be ignored by task error handling logic, so log it here + tasks.InitializeLogger(task, t.logger).Error("Matching returned not found error for AddWorkflowTask", tag.Error(err)) + } + + return err +} + +func (t *transferQueueTaskExecutorBase) archiveVisibility( + ctx context.Context, + namespaceID namespace.ID, + workflowID string, + runID string, + workflowTypeName string, + startTime time.Time, + executionTime time.Time, + endTime time.Time, + status enumspb.WorkflowExecutionStatus, + historyLength int64, + visibilityMemo *commonpb.Memo, + searchAttributes *commonpb.SearchAttributes, +) error { + namespaceEntry, err := t.registry.GetNamespaceByID(namespaceID) + if err != nil { + return err + } + + clusterConfiguredForVisibilityArchival := t.shard.GetArchivalMetadata().GetVisibilityConfig().ClusterConfiguredForArchival() + namespaceConfiguredForVisibilityArchival := namespaceEntry.VisibilityArchivalState().State == enumspb.ARCHIVAL_STATE_ENABLED + archiveVisibility := clusterConfiguredForVisibilityArchival && namespaceConfiguredForVisibilityArchival + + if !archiveVisibility { + return nil + } + + ctx, cancel := context.WithTimeout(ctx, t.config.TransferProcessorVisibilityArchivalTimeLimit()) + defer cancel() + + saTypeMap, err := t.searchAttributesProvider.GetSearchAttributes(t.visibilityManager.GetIndexName(), false) + if err != nil { + return err + } + + // Setting search attributes types here because archival client needs to stringify them + // and it might not have access to type map (i.e. type needs to be embedded). + searchattribute.ApplyTypeMap(searchAttributes, saTypeMap) + + _, err = t.archivalClient.Archive(ctx, &archiver.ClientRequest{ + ArchiveRequest: &archiver.ArchiveRequest{ + ShardID: t.shard.GetShardID(), + NamespaceID: namespaceID.String(), + Namespace: namespaceEntry.Name().String(), + WorkflowID: workflowID, + RunID: runID, + WorkflowTypeName: workflowTypeName, + StartTime: startTime, + ExecutionTime: executionTime, + CloseTime: endTime, + Status: status, + HistoryLength: historyLength, + Memo: visibilityMemo, + SearchAttributes: searchAttributes, + VisibilityURI: namespaceEntry.VisibilityArchivalState().URI, + HistoryURI: namespaceEntry.HistoryArchivalState().URI, + Targets: []archiver.ArchivalTarget{archiver.ArchiveTargetVisibility}, + }, + CallerService: string(primitives.HistoryService), + AttemptArchiveInline: true, // archive visibility inline by default + }) + + return err +} + +func (t *transferQueueTaskExecutorBase) processDeleteExecutionTask( + ctx context.Context, + task *tasks.DeleteExecutionTask, + ensureNoPendingCloseTask bool, +) error { + return t.deleteExecution(ctx, task, false, ensureNoPendingCloseTask, &task.ProcessStage) +} + +func (t *transferQueueTaskExecutorBase) deleteExecution( + ctx context.Context, + task tasks.Task, + forceDeleteFromOpenVisibility bool, + ensureNoPendingCloseTask bool, + stage *tasks.DeleteWorkflowExecutionStage, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + workflowExecution := commonpb.WorkflowExecution{ + WorkflowId: task.GetWorkflowID(), + RunId: task.GetRunID(), + } + + weCtx, release, err := t.cache.GetOrCreateWorkflowExecution( + ctx, + namespace.ID(task.GetNamespaceID()), + workflowExecution, + workflow.LockPriorityLow, + ) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := loadMutableStateForTransferTask(ctx, weCtx, task, t.metricHandler, t.logger) + if err != nil { + return err + } + + // Here, we ensure that the workflow is closed successfully before deleting it. Otherwise, the mutable state + // might be deleted before the close task is executed, and so the close task will be dropped. In passive cluster, + // this check can be ignored. + // + // Additionally, this function itself could be called from within the close execution task, so we need to skip + // the check in that case because the close execution task would be waiting for itself to finish forever. So, the + // ensureNoPendingCloseTask flag is set iff we're running in the active cluster, and we aren't processing the + // CloseExecutionTask from within this same goroutine. + if ensureNoPendingCloseTask { + // Unfortunately, queue states/ack levels are updated with delay (default 30s), therefore this could fail if the + // workflow was closed before the queue state/ack levels were updated, so we return a retryable error. + if t.isCloseExecutionTaskPending(mutableState, weCtx) { + return consts.ErrDependencyTaskNotCompleted + } + } + + // If task version is EmptyVersion it means "don't check task version". + // This can happen when task was created from explicit user API call. + // Or the namespace is a local namespace which will not have version conflict. + if task.GetVersion() != common.EmptyVersion { + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.GetVersion(), task) + if err != nil { + return err + } + } + + return t.workflowDeleteManager.DeleteWorkflowExecution( + ctx, + namespace.ID(task.GetNamespaceID()), + workflowExecution, + weCtx, + mutableState, + forceDeleteFromOpenVisibility, + stage, + ) +} + +func (t *transferQueueTaskExecutorBase) isCloseExecutionTaskPending(ms workflow.MutableState, weCtx workflow.Context) bool { + closeTransferTaskId := ms.GetExecutionInfo().CloseTransferTaskId + // taskID == 0 if workflow closed before this field was added (v1.17). + if closeTransferTaskId == 0 { + return false + } + // check if close execution transfer task is completed + transferQueueState, ok := t.shard.GetQueueState(tasks.CategoryTransfer) + if !ok { + return true + } + fakeCloseTransferTask := &tasks.CloseExecutionTask{ + WorkflowKey: weCtx.GetWorkflowKey(), + TaskID: closeTransferTaskId, + } + return !queues.IsTaskAcked(fakeCloseTransferTask, transferQueueState) +} diff -Nru temporal-1.21.5-1/src/service/history/visibilityQueueFactory.go temporal-1.22.5/src/service/history/visibilityQueueFactory.go --- temporal-1.21.5-1/src/service/history/visibilityQueueFactory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibilityQueueFactory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "go.uber.org/fx" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -const ( - visibilityQueuePersistenceMaxRPSRatio = 0.15 -) - -type ( - visibilityQueueFactoryParams struct { - fx.In - - QueueFactoryBaseParams - - VisibilityMgr manager.VisibilityManager - } - - visibilityQueueFactory struct { - visibilityQueueFactoryParams - QueueFactoryBase - } -) - -func NewVisibilityQueueFactory( - params visibilityQueueFactoryParams, -) QueueFactory { - return &visibilityQueueFactory{ - visibilityQueueFactoryParams: params, - QueueFactoryBase: QueueFactoryBase{ - HostScheduler: queues.NewNamespacePriorityScheduler( - params.ClusterMetadata.GetCurrentClusterName(), - queues.NamespacePrioritySchedulerOptions{ - WorkerCount: params.Config.VisibilityProcessorSchedulerWorkerCount, - ActiveNamespaceWeights: params.Config.VisibilityProcessorSchedulerActiveRoundRobinWeights, - StandbyNamespaceWeights: params.Config.VisibilityProcessorSchedulerStandbyRoundRobinWeights, - EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, - EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, - DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, - }, - params.NamespaceRegistry, - params.SchedulerRateLimiter, - params.TimeSource, - params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationVisibilityQueueProcessorScope)), - params.Logger, - ), - HostPriorityAssigner: queues.NewPriorityAssigner(), - HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( - NewHostRateLimiterRateFn( - params.Config.VisibilityProcessorMaxPollHostRPS, - params.Config.PersistenceMaxQPS, - visibilityQueuePersistenceMaxRPSRatio, - ), - int64(params.Config.QueueMaxReaderCount()), - ), - }, - } -} - -func (f *visibilityQueueFactory) CreateQueue( - shard shard.Context, - workflowCache wcache.Cache, -) queues.Queue { - logger := log.With(shard.GetLogger(), tag.ComponentVisibilityQueue) - metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationVisibilityQueueProcessorScope)) - - rescheduler := queues.NewRescheduler( - f.HostScheduler, - shard.GetTimeSource(), - logger, - metricsHandler, - ) - - executor := newVisibilityQueueTaskExecutor( - shard, - workflowCache, - f.VisibilityMgr, - logger, - f.MetricsHandler, - f.Config.VisibilityProcessorEnsureCloseBeforeDelete, - f.Config.VisibilityProcessorEnableCloseWorkflowCleanup, - ) - - return queues.NewImmediateQueue( - shard, - tasks.CategoryVisibility, - f.HostScheduler, - rescheduler, - f.HostPriorityAssigner, - executor, - &queues.Options{ - ReaderOptions: queues.ReaderOptions{ - BatchSize: f.Config.VisibilityTaskBatchSize, - MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, - PollBackoffInterval: f.Config.VisibilityProcessorPollBackoffInterval, - }, - MonitorOptions: queues.MonitorOptions{ - PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, - ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, - SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, - }, - MaxPollRPS: f.Config.VisibilityProcessorMaxPollRPS, - MaxPollInterval: f.Config.VisibilityProcessorMaxPollInterval, - MaxPollIntervalJitterCoefficient: f.Config.VisibilityProcessorMaxPollIntervalJitterCoefficient, - CheckpointInterval: f.Config.VisibilityProcessorUpdateAckInterval, - CheckpointIntervalJitterCoefficient: f.Config.VisibilityProcessorUpdateAckIntervalJitterCoefficient, - MaxReaderCount: f.Config.QueueMaxReaderCount, - }, - f.HostReaderRateLimiter, - logger, - metricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/visibilityQueueTaskExecutor.go temporal-1.22.5/src/service/history/visibilityQueueTaskExecutor.go --- temporal-1.21.5-1/src/service/history/visibilityQueueTaskExecutor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibilityQueueTaskExecutor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,598 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - visibilityQueueTaskExecutor struct { - shard shard.Context - cache wcache.Cache - logger log.Logger - metricProvider metrics.Handler - visibilityMgr manager.VisibilityManager - - ensureCloseBeforeDelete dynamicconfig.BoolPropertyFn - enableCloseWorkflowCleanup dynamicconfig.BoolPropertyFnWithNamespaceFilter - } -) - -var errUnknownVisibilityTask = serviceerror.NewInternal("unknown visibility task") - -func newVisibilityQueueTaskExecutor( - shard shard.Context, - workflowCache wcache.Cache, - visibilityMgr manager.VisibilityManager, - logger log.Logger, - metricProvider metrics.Handler, - ensureCloseBeforeDelete dynamicconfig.BoolPropertyFn, - enableCloseWorkflowCleanup dynamicconfig.BoolPropertyFnWithNamespaceFilter, -) *visibilityQueueTaskExecutor { - return &visibilityQueueTaskExecutor{ - shard: shard, - cache: workflowCache, - logger: logger, - metricProvider: metricProvider, - visibilityMgr: visibilityMgr, - - ensureCloseBeforeDelete: ensureCloseBeforeDelete, - enableCloseWorkflowCleanup: enableCloseWorkflowCleanup, - } -} - -func (t *visibilityQueueTaskExecutor) Execute( - ctx context.Context, - executable queues.Executable, -) ([]metrics.Tag, bool, error) { - task := executable.GetTask() - taskType := queues.GetVisibilityTaskTypeTagValue(task) - namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( - t.shard.GetNamespaceRegistry(), - task.GetNamespaceID(), - ) - metricsTags := []metrics.Tag{ - namespaceTag, - metrics.TaskTypeTag(taskType), - metrics.OperationTag(taskType), // for backward compatibility - } - - if replicationState == enumspb.REPLICATION_STATE_HANDOVER { - // TODO: exclude task types here if we believe it's safe & necessary to execute - // them during namespace handover. - // Visibility tasks should all be safe, but close execution task - // might do a setWorkflowExecution to clean up memo and search attributes, which - // will be blocked by shard context during ns handover - // TODO: move this logic to queues.Executable when metrics tag doesn't need to - // be returned from task executor - return metricsTags, true, consts.ErrNamespaceHandover - } - - var err error - switch task := task.(type) { - case *tasks.StartExecutionVisibilityTask: - err = t.processStartExecution(ctx, task) - case *tasks.UpsertExecutionVisibilityTask: - err = t.processUpsertExecution(ctx, task) - case *tasks.CloseExecutionVisibilityTask: - err = t.processCloseExecution(ctx, task) - case *tasks.DeleteExecutionVisibilityTask: - err = t.processDeleteExecution(ctx, task) - default: - err = errUnknownVisibilityTask - } - - return metricsTags, true, err -} - -func (t *visibilityQueueTaskExecutor) processStartExecution( - ctx context.Context, - task *tasks.StartExecutionVisibilityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := weContext.LoadMutableState(ctx) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - // verify task version for RecordWorkflowStarted. - // upsert doesn't require verifyTask, because it is just a sync of mutableState. - startVersion, err := mutableState.GetStartVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, task.Version, task) - if err != nil { - return err - } - - executionInfo := mutableState.GetExecutionInfo() - executionState := mutableState.GetExecutionState() - wfTypeName := executionInfo.WorkflowTypeName - - workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) - workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) - visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) - searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) - executionStatus := executionState.GetStatus() - taskQueue := executionInfo.TaskQueue - stateTransitionCount := executionInfo.GetStateTransitionCount() - - // NOTE: do not access anything related mutable state after this lock release - // release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - - return t.recordStartExecution( - ctx, - namespace.ID(task.GetNamespaceID()), - task.GetWorkflowID(), - task.GetRunID(), - wfTypeName, - workflowStartTime, - workflowExecutionTime, - stateTransitionCount, - task.GetTaskID(), - executionStatus, - taskQueue, - visibilityMemo, - searchAttr, - ) -} - -func (t *visibilityQueueTaskExecutor) processUpsertExecution( - ctx context.Context, - task *tasks.UpsertExecutionVisibilityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := weContext.LoadMutableState(ctx) - if err != nil { - return err - } - if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { - return nil - } - - executionInfo := mutableState.GetExecutionInfo() - executionState := mutableState.GetExecutionState() - wfTypeName := executionInfo.WorkflowTypeName - - workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) - workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) - visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) - searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) - executionStatus := executionState.GetStatus() - taskQueue := executionInfo.TaskQueue - stateTransitionCount := executionInfo.GetStateTransitionCount() - - // NOTE: do not access anything related mutable state after this lock release - // release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - - return t.upsertExecution( - ctx, - namespace.ID(task.GetNamespaceID()), - task.GetWorkflowID(), - task.GetRunID(), - wfTypeName, - workflowStartTime, - workflowExecutionTime, - stateTransitionCount, - task.GetTaskID(), - executionStatus, - taskQueue, - visibilityMemo, - searchAttr, - ) -} - -func (t *visibilityQueueTaskExecutor) recordStartExecution( - ctx context.Context, - namespaceID namespace.ID, - workflowID string, - runID string, - workflowTypeName string, - startTime time.Time, - executionTime time.Time, - stateTransitionCount int64, - taskID int64, - status enumspb.WorkflowExecutionStatus, - taskQueue string, - visibilityMemo *commonpb.Memo, - searchAttributes *commonpb.SearchAttributes, -) error { - namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespaceID) - if err != nil { - return err - } - - request := &manager.RecordWorkflowExecutionStartedRequest{ - VisibilityRequestBase: &manager.VisibilityRequestBase{ - NamespaceID: namespaceID, - Namespace: namespaceEntry.Name(), - Execution: commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - WorkflowTypeName: workflowTypeName, - StartTime: startTime, - ExecutionTime: executionTime, - StateTransitionCount: stateTransitionCount, TaskID: taskID, - Status: status, - ShardID: t.shard.GetShardID(), - Memo: visibilityMemo, - TaskQueue: taskQueue, - SearchAttributes: searchAttributes, - }, - } - return t.visibilityMgr.RecordWorkflowExecutionStarted(ctx, request) -} - -func (t *visibilityQueueTaskExecutor) upsertExecution( - ctx context.Context, - namespaceID namespace.ID, - workflowID string, - runID string, - workflowTypeName string, - startTime time.Time, - executionTime time.Time, - stateTransitionCount int64, - taskID int64, - status enumspb.WorkflowExecutionStatus, - taskQueue string, - visibilityMemo *commonpb.Memo, - searchAttributes *commonpb.SearchAttributes, -) error { - namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespaceID) - if err != nil { - return err - } - - request := &manager.UpsertWorkflowExecutionRequest{ - VisibilityRequestBase: &manager.VisibilityRequestBase{ - NamespaceID: namespaceID, - Namespace: namespaceEntry.Name(), - Execution: commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - WorkflowTypeName: workflowTypeName, - StartTime: startTime, - ExecutionTime: executionTime, - StateTransitionCount: stateTransitionCount, TaskID: taskID, - ShardID: t.shard.GetShardID(), - Status: status, - Memo: visibilityMemo, - TaskQueue: taskQueue, - SearchAttributes: searchAttributes, - }, - } - - return t.visibilityMgr.UpsertWorkflowExecution(ctx, request) -} - -func (t *visibilityQueueTaskExecutor) processCloseExecution( - parentCtx context.Context, - task *tasks.CloseExecutionVisibilityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(parentCtx, taskTimeout) - defer cancel() - - namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(task.GetNamespaceID())) - if err != nil { - return err - } - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := weContext.LoadMutableState(ctx) - if err != nil { - return err - } - if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { - return nil - } - - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) - if err != nil { - return err - } - - executionInfo := mutableState.GetExecutionInfo() - executionState := mutableState.GetExecutionState() - wfCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) - if err != nil { - return err - } - workflowTypeName := executionInfo.WorkflowTypeName - workflowStatus := executionState.Status - workflowHistoryLength := mutableState.GetNextEventID() - 1 - workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) - workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) - visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) - searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) - taskQueue := executionInfo.TaskQueue - stateTransitionCount := executionInfo.GetStateTransitionCount() - historySizeBytes := executionInfo.GetExecutionStats().GetHistorySize() - - // NOTE: do not access anything related mutable state after this lock release - // release the context lock since we no longer need mutable state and - // the rest of logic is making RPC call, which takes time. - release(nil) - err = t.recordCloseExecution( - ctx, - namespaceEntry, - task.GetWorkflowID(), - task.GetRunID(), - workflowTypeName, - workflowStartTime, - workflowExecutionTime, - timestamp.TimeValue(wfCloseTime), - workflowStatus, - stateTransitionCount, - workflowHistoryLength, - task.GetTaskID(), - visibilityMemo, - taskQueue, - searchAttr, - historySizeBytes, - ) - if err != nil { - return err - } - - // Elasticsearch bulk processor doesn't respect context timeout - // because under heavy load bulk flush might take longer than taskTimeout. - // Therefore, ctx timeout might be already expired - // and parentCtx (which doesn't have timeout) must be used everywhere bellow. - - if t.enableCloseWorkflowCleanup(namespaceEntry.Name().String()) { - return t.cleanupExecutionInfo(parentCtx, task) - } - return nil -} - -func (t *visibilityQueueTaskExecutor) recordCloseExecution( - ctx context.Context, - namespaceEntry *namespace.Namespace, - workflowID string, - runID string, - workflowTypeName string, - startTime time.Time, - executionTime time.Time, - endTime time.Time, - status enumspb.WorkflowExecutionStatus, - stateTransitionCount int64, - historyLength int64, - taskID int64, - visibilityMemo *commonpb.Memo, - taskQueue string, - searchAttributes *commonpb.SearchAttributes, - historySizeBytes int64, -) error { - return t.visibilityMgr.RecordWorkflowExecutionClosed(ctx, &manager.RecordWorkflowExecutionClosedRequest{ - VisibilityRequestBase: &manager.VisibilityRequestBase{ - NamespaceID: namespaceEntry.ID(), - Namespace: namespaceEntry.Name(), - Execution: commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }, - WorkflowTypeName: workflowTypeName, - StartTime: startTime, - ExecutionTime: executionTime, - StateTransitionCount: stateTransitionCount, - Status: status, - TaskID: taskID, - ShardID: t.shard.GetShardID(), - Memo: visibilityMemo, - TaskQueue: taskQueue, - SearchAttributes: searchAttributes, - }, - CloseTime: endTime, - HistoryLength: historyLength, - HistorySizeBytes: historySizeBytes, - }) -} - -func (t *visibilityQueueTaskExecutor) processDeleteExecution( - ctx context.Context, - task *tasks.DeleteExecutionVisibilityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - request := &manager.VisibilityDeleteWorkflowExecutionRequest{ - NamespaceID: namespace.ID(task.NamespaceID), - WorkflowID: task.WorkflowID, - RunID: task.RunID, - TaskID: task.TaskID, - StartTime: task.StartTime, - CloseTime: task.CloseTime, - } - if t.ensureCloseBeforeDelete() { - // If visibility delete task is executed before visibility close task then visibility close task - // (which change workflow execution status by uploading new visibility record) will resurrect visibility record. - // - // Queue states/ack levels are updated with delay (default 30s). Therefore, this check could return false - // if the workflow was closed and then deleted within this delay period. - if t.isCloseExecutionVisibilityTaskPending(task) { - // Return retryable error for task processor to retry the operation later. - return consts.ErrDependencyTaskNotCompleted - } - } - return t.visibilityMgr.DeleteWorkflowExecution(ctx, request) -} - -func (t *visibilityQueueTaskExecutor) isCloseExecutionVisibilityTaskPending(task *tasks.DeleteExecutionVisibilityTask) bool { - CloseExecutionVisibilityTaskID := task.CloseExecutionVisibilityTaskID - // taskID == 0 if workflow still running in passive cluster or closed before this field was added (v1.17). - if CloseExecutionVisibilityTaskID == 0 { - return false - } - // check if close execution visibility task is completed - visibilityQueueState, ok := t.shard.GetQueueState(tasks.CategoryVisibility) - if !ok { - return true - } - queryTask := &tasks.CloseExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey(task.GetNamespaceID(), task.GetWorkflowID(), task.GetRunID()), - TaskID: CloseExecutionVisibilityTaskID, - } - return !queues.IsTaskAcked(queryTask, visibilityQueueState) -} - -// cleanupExecutionInfo cleans up workflow execution info after visibility close -// task has been processed and acked by visibility store. -func (t *visibilityQueueTaskExecutor) cleanupExecutionInfo( - ctx context.Context, - task *tasks.CloseExecutionVisibilityTask, -) (retError error) { - ctx, cancel := context.WithTimeout(ctx, taskTimeout) - defer cancel() - - weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) - if err != nil { - return err - } - defer func() { release(retError) }() - - mutableState, err := weContext.LoadMutableState(ctx) - if err != nil { - return err - } - if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { - return nil - } - - lastWriteVersion, err := mutableState.GetLastWriteVersion() - if err != nil { - return err - } - err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) - if err != nil { - return err - } - - executionInfo := mutableState.GetExecutionInfo() - executionInfo.Memo = nil - executionInfo.SearchAttributes = nil - executionInfo.CloseVisibilityTaskCompleted = true - return weContext.SetWorkflowExecution(ctx) -} - -func getWorkflowMemo( - memoFields map[string]*commonpb.Payload, -) *commonpb.Memo { - if memoFields == nil { - return nil - } - return &commonpb.Memo{Fields: memoFields} -} - -func copyMemo( - memoFields map[string]*commonpb.Payload, -) map[string]*commonpb.Payload { - if memoFields == nil { - return nil - } - - result := make(map[string]*commonpb.Payload) - for k, v := range memoFields { - result[k] = common.CloneProto(v) - } - return result -} - -func getSearchAttributes( - indexedFields map[string]*commonpb.Payload, -) *commonpb.SearchAttributes { - if indexedFields == nil { - return nil - } - return &commonpb.SearchAttributes{IndexedFields: indexedFields} -} - -func copySearchAttributes( - input map[string]*commonpb.Payload, -) map[string]*commonpb.Payload { - if input == nil { - return nil - } - - result := make(map[string]*commonpb.Payload) - for k, v := range input { - result[k] = common.CloneProto(v) - } - return result -} diff -Nru temporal-1.21.5-1/src/service/history/visibilityQueueTaskExecutor_test.go temporal-1.22.5/src/service/history/visibilityQueueTaskExecutor_test.go --- temporal-1.21.5-1/src/service/history/visibilityQueueTaskExecutor_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibilityQueueTaskExecutor_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,622 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - workflowspb "go.temporal.io/server/api/workflow/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/queues" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tasks" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - visibilityQueueTaskExecutorSuite struct { - suite.Suite - *require.Assertions - - controller *gomock.Controller - mockShard *shard.ContextTest - - mockVisibilityMgr *manager.MockVisibilityManager - mockExecutionMgr *persistence.MockExecutionManager - - workflowCache wcache.Cache - logger log.Logger - namespaceID namespace.ID - namespace namespace.Name - version int64 - now time.Time - timeSource *clock.EventTimeSource - visibilityQueueTaskExecutor *visibilityQueueTaskExecutor - - enableCloseWorkflowCleanup bool - } -) - -func TestVisibilityQueueTaskExecutorSuite(t *testing.T) { - s := new(visibilityQueueTaskExecutorSuite) - suite.Run(t, s) -} - -func (s *visibilityQueueTaskExecutorSuite) SetupSuite() { -} - -func (s *visibilityQueueTaskExecutorSuite) TearDownSuite() { -} - -func (s *visibilityQueueTaskExecutorSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.namespaceID = tests.NamespaceID - s.namespace = tests.Namespace - s.version = tests.GlobalNamespaceEntry.FailoverVersion() - s.now = time.Now().UTC() - s.timeSource = clock.NewEventTimeSource().Update(s.now) - - s.controller = gomock.NewController(s.T()) - - config := tests.NewDynamicConfig() - s.mockShard = shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - config, - ) - s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( - s.mockShard.GetShardID(), - s.mockShard.GetConfig().EventsCacheInitialSize(), - s.mockShard.GetConfig().EventsCacheMaxSize(), - s.mockShard.GetConfig().EventsCacheTTL(), - s.mockShard.GetExecutionManager(), - false, - s.mockShard.GetLogger(), - s.mockShard.GetMetricsHandler(), - )) - s.mockShard.Resource.TimeSource = s.timeSource - - s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr - s.mockVisibilityMgr = manager.NewMockVisibilityManager(s.controller) - - mockNamespaceCache := s.mockShard.Resource.NamespaceCache - mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() - mockNamespaceCache.EXPECT().GetNamespaceName(tests.NamespaceID).Return(tests.Namespace, nil).AnyTimes() - - mockClusterMetadata := s.mockShard.Resource.ClusterMetadata - mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() - mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() - mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, s.version).Return(mockClusterMetadata.GetCurrentClusterName()).AnyTimes() - - s.workflowCache = wcache.NewCache(s.mockShard) - s.logger = s.mockShard.GetLogger() - - h := &historyEngineImpl{ - currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), - shard: s.mockShard, - clusterMetadata: mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - metricsHandler: s.mockShard.GetMetricsHandler(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - } - s.mockShard.SetEngineForTesting(h) - - s.enableCloseWorkflowCleanup = false - s.visibilityQueueTaskExecutor = newVisibilityQueueTaskExecutor( - s.mockShard, - s.workflowCache, - s.mockVisibilityMgr, - s.logger, - metrics.NoopMetricsHandler, - config.VisibilityProcessorEnsureCloseBeforeDelete, - func(_ string) bool { return s.enableCloseWorkflowCleanup }, - ) -} - -func (s *visibilityQueueTaskExecutorSuite) TearDownTest() { - s.controller.Finish() - s.mockShard.StopForTest() -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessCloseExecution() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - parentNamespaceID := "some random parent namespace ID" - parentInitiatedID := int64(3222) - parentInitiatedVersion := int64(1234) - parentNamespace := "some random parent namespace Name" - parentExecution := &commonpb.WorkflowExecution{ - WorkflowId: "some random parent workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ - NamespaceId: parentNamespaceID, - Namespace: parentNamespace, - Execution: parentExecution, - InitiatedId: parentInitiatedID, - InitiatedVersion: parentInitiatedVersion, - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - visibilityTask := &tasks.CloseExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - VisibilityTimestamp: time.Now().UTC(), - Version: s.version, - TaskID: taskID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionClosed(gomock.Any(), gomock.Any()).Return(nil) - - _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) - s.Nil(err) -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessCloseExecutionWithWorkflowClosedCleanup() { - s.enableCloseWorkflowCleanup = true - - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - parentNamespaceID := "some random parent namespace ID" - parentInitiatedID := int64(3222) - parentInitiatedVersion := int64(1234) - parentNamespace := "some random parent namespace Name" - parentExecution := &commonpb.WorkflowExecution{ - WorkflowId: "some random parent workflow ID", - RunId: uuid.New(), - } - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ - NamespaceId: parentNamespaceID, - Namespace: parentNamespace, - Execution: parentExecution, - InitiatedId: parentInitiatedID, - InitiatedVersion: parentInitiatedVersion, - }, - }, - ) - s.Nil(err) - - wt := addWorkflowTaskScheduledEvent(mutableState) - event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) - wt.StartedEventID = event.GetEventId() - event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - taskID := int64(59) - event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) - - visibilityTask := &tasks.CloseExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - VisibilityTimestamp: time.Now().UTC(), - Version: s.version, - TaskID: taskID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockExecutionMgr.EXPECT().SetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.SetWorkflowExecutionResponse{}, nil) - s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionClosed(gomock.Any(), gomock.Any()).Return(nil) - - _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) - s.Nil(err) -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessRecordWorkflowStartedTask() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - cronSchedule := "@every 5s" - backoff := 5 * time.Second - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - - event, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - CronSchedule: cronSchedule, - }, - FirstWorkflowTaskBackoff: &backoff, - }, - ) - s.Nil(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - visibilityTask := &tasks.StartExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - VisibilityTimestamp: time.Now().UTC(), - Version: s.version, - TaskID: taskID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionStarted( - gomock.Any(), - s.createRecordWorkflowExecutionStartedRequest(s.namespace, event, visibilityTask, mutableState, backoff, taskQueueName), - ).Return(nil) - - _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) - s.Nil(err) -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessUpsertWorkflowSearchAttributes() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) - - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.NoError(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - visibilityTask := &tasks.UpsertExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - } - - persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockVisibilityMgr.EXPECT().UpsertWorkflowExecution( - gomock.Any(), - s.createUpsertWorkflowRequest(s.namespace, visibilityTask, mutableState, taskQueueName), - ).Return(nil) - - _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) - s.NoError(err) -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessModifyWorkflowProperties() { - execution := commonpb.WorkflowExecution{ - WorkflowId: "some random workflow ID", - RunId: uuid.New(), - } - workflowType := "some random workflow type" - taskQueueName := "some random task queue" - - mutableState := workflow.TestGlobalMutableState( - s.mockShard, - s.mockShard.GetEventsCache(), - s.logger, - s.version, - execution.GetRunId(), - ) - - _, err := mutableState.AddWorkflowExecutionStartedEvent( - execution, - &historyservice.StartWorkflowExecutionRequest{ - Attempt: 1, - NamespaceId: s.namespaceID.String(), - StartRequest: &workflowservice.StartWorkflowExecutionRequest{ - WorkflowType: &commonpb.WorkflowType{Name: workflowType}, - TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, - WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), - WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), - }, - }, - ) - s.NoError(err) - - taskID := int64(59) - wt := addWorkflowTaskScheduledEvent(mutableState) - - visibilityTask := &tasks.UpsertExecutionVisibilityTask{ - WorkflowKey: definition.NewWorkflowKey( - s.namespaceID.String(), - execution.GetWorkflowId(), - execution.GetRunId(), - ), - Version: s.version, - TaskID: taskID, - } - - persistenceMutableState := s.createPersistenceMutableState( - mutableState, - wt.ScheduledEventID, - wt.Version, - ) - s.mockExecutionMgr.EXPECT().GetWorkflowExecution( - gomock.Any(), - gomock.Any(), - ).Return( - &persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, - nil, - ) - s.mockVisibilityMgr.EXPECT().UpsertWorkflowExecution( - gomock.Any(), - s.createUpsertWorkflowRequest(s.namespace, visibilityTask, mutableState, taskQueueName), - ).Return(nil) - - _, _, err = s.visibilityQueueTaskExecutor.Execute( - context.Background(), - s.newTaskExecutable(visibilityTask), - ) - s.NoError(err) -} - -func (s *visibilityQueueTaskExecutorSuite) TestProcessorDeleteExecution() { - s.T().SkipNow() - workflowKey := definition.WorkflowKey{ - NamespaceID: s.namespaceID.String(), - } - s.Run("TaskID=0", func() { - s.mockVisibilityMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()) - err := s.execute(&tasks.DeleteExecutionVisibilityTask{ - WorkflowKey: workflowKey, - CloseExecutionVisibilityTaskID: 0, - }) - s.Assert().NoError(err) - }) - s.Run("MultiCursorQueue", func() { - const highWatermark int64 = 5 - s.NoError(s.mockShard.SetQueueState(tasks.CategoryVisibility, &persistencespb.QueueState{ - ReaderStates: nil, - ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ - TaskId: highWatermark, - FireTime: timestamp.TimePtr(tasks.DefaultFireTime), - }, - })) - s.Run("NotAcked", func() { - err := s.execute(&tasks.DeleteExecutionVisibilityTask{ - WorkflowKey: workflowKey, - CloseExecutionVisibilityTaskID: highWatermark + 1, - }) - s.ErrorIs(err, consts.ErrDependencyTaskNotCompleted) - }) - s.Run("Acked", func() { - s.mockVisibilityMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()) - err := s.execute(&tasks.DeleteExecutionVisibilityTask{ - WorkflowKey: workflowKey, - CloseExecutionVisibilityTaskID: highWatermark - 1, - }) - s.NoError(err) - }) - }) -} - -func (s *visibilityQueueTaskExecutorSuite) execute(task tasks.Task) error { - _, _, err := s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(task)) - return err -} - -func (s *visibilityQueueTaskExecutorSuite) createRecordWorkflowExecutionStartedRequest( - namespaceName namespace.Name, - startEvent *historypb.HistoryEvent, - task *tasks.StartExecutionVisibilityTask, - mutableState workflow.MutableState, - backoff time.Duration, - taskQueueName string, -) *manager.RecordWorkflowExecutionStartedRequest { - execution := &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - } - executionInfo := mutableState.GetExecutionInfo() - executionTimestamp := timestamp.TimeValue(startEvent.GetEventTime()).Add(backoff) - - return &manager.RecordWorkflowExecutionStartedRequest{ - VisibilityRequestBase: &manager.VisibilityRequestBase{ - Namespace: namespaceName, - NamespaceID: namespace.ID(task.NamespaceID), - Execution: *execution, - WorkflowTypeName: executionInfo.WorkflowTypeName, - StartTime: timestamp.TimeValue(startEvent.GetEventTime()), - ExecutionTime: executionTimestamp, - TaskID: task.TaskID, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - ShardID: s.mockShard.GetShardID(), - TaskQueue: taskQueueName, - }, - } -} - -func (s *visibilityQueueTaskExecutorSuite) createUpsertWorkflowRequest( - namespaceName namespace.Name, - task *tasks.UpsertExecutionVisibilityTask, - mutableState workflow.MutableState, - taskQueueName string, -) *manager.UpsertWorkflowExecutionRequest { - execution := &commonpb.WorkflowExecution{ - WorkflowId: task.WorkflowID, - RunId: task.RunID, - } - executionInfo := mutableState.GetExecutionInfo() - - return &manager.UpsertWorkflowExecutionRequest{ - VisibilityRequestBase: &manager.VisibilityRequestBase{ - Namespace: namespaceName, - NamespaceID: namespace.ID(task.NamespaceID), - Execution: *execution, - WorkflowTypeName: executionInfo.WorkflowTypeName, - StartTime: timestamp.TimeValue(executionInfo.GetStartTime()), - ExecutionTime: timestamp.TimeValue(executionInfo.GetExecutionTime()), - TaskID: task.TaskID, - Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - TaskQueue: taskQueueName, - ShardID: s.mockShard.GetShardID(), - }, - } -} - -func (s *visibilityQueueTaskExecutorSuite) createPersistenceMutableState( - ms workflow.MutableState, - lastEventID int64, - lastEventVersion int64, -) *persistencespb.WorkflowMutableState { - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) - s.NoError(err) - err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( - lastEventID, lastEventVersion, - )) - s.NoError(err) - return workflow.TestCloneToProto(ms) -} - -func (s *visibilityQueueTaskExecutorSuite) newTaskExecutable( - task tasks.Task, -) queues.Executable { - return queues.NewExecutable( - queues.DefaultReaderId, - task, - s.visibilityQueueTaskExecutor, - nil, - nil, - queues.NewNoopPriorityAssigner(), - s.mockShard.GetTimeSource(), - s.mockShard.GetNamespaceRegistry(), - s.mockShard.GetClusterMetadata(), - nil, - metrics.NoopMetricsHandler, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/visibility_queue_factory.go temporal-1.22.5/src/service/history/visibility_queue_factory.go --- temporal-1.21.5-1/src/service/history/visibility_queue_factory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibility_queue_factory.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,150 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "go.uber.org/fx" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +const ( + visibilityQueuePersistenceMaxRPSRatio = 0.15 +) + +type ( + visibilityQueueFactoryParams struct { + fx.In + + QueueFactoryBaseParams + + VisibilityMgr manager.VisibilityManager + } + + visibilityQueueFactory struct { + visibilityQueueFactoryParams + QueueFactoryBase + } +) + +func NewVisibilityQueueFactory( + params visibilityQueueFactoryParams, +) QueueFactory { + return &visibilityQueueFactory{ + visibilityQueueFactoryParams: params, + QueueFactoryBase: QueueFactoryBase{ + HostScheduler: queues.NewNamespacePriorityScheduler( + params.ClusterMetadata.GetCurrentClusterName(), + queues.NamespacePrioritySchedulerOptions{ + WorkerCount: params.Config.VisibilityProcessorSchedulerWorkerCount, + ActiveNamespaceWeights: params.Config.VisibilityProcessorSchedulerActiveRoundRobinWeights, + StandbyNamespaceWeights: params.Config.VisibilityProcessorSchedulerStandbyRoundRobinWeights, + EnableRateLimiter: params.Config.TaskSchedulerEnableRateLimiter, + EnableRateLimiterShadowMode: params.Config.TaskSchedulerEnableRateLimiterShadowMode, + DispatchThrottleDuration: params.Config.TaskSchedulerThrottleDuration, + }, + params.NamespaceRegistry, + params.SchedulerRateLimiter, + params.TimeSource, + params.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationVisibilityQueueProcessorScope)), + params.Logger, + ), + HostPriorityAssigner: queues.NewPriorityAssigner(), + HostReaderRateLimiter: queues.NewReaderPriorityRateLimiter( + NewHostRateLimiterRateFn( + params.Config.VisibilityProcessorMaxPollHostRPS, + params.Config.PersistenceMaxQPS, + visibilityQueuePersistenceMaxRPSRatio, + ), + int64(params.Config.QueueMaxReaderCount()), + ), + }, + } +} + +func (f *visibilityQueueFactory) CreateQueue( + shard shard.Context, + workflowCache wcache.Cache, +) queues.Queue { + logger := log.With(shard.GetLogger(), tag.ComponentVisibilityQueue) + metricsHandler := f.MetricsHandler.WithTags(metrics.OperationTag(metrics.OperationVisibilityQueueProcessorScope)) + + rescheduler := queues.NewRescheduler( + f.HostScheduler, + shard.GetTimeSource(), + logger, + metricsHandler, + ) + + executor := newVisibilityQueueTaskExecutor( + shard, + workflowCache, + f.VisibilityMgr, + logger, + f.MetricsHandler, + f.Config.VisibilityProcessorEnsureCloseBeforeDelete, + f.Config.VisibilityProcessorEnableCloseWorkflowCleanup, + ) + if f.ExecutorWrapper != nil { + executor = f.ExecutorWrapper.Wrap(executor) + } + + return queues.NewImmediateQueue( + shard, + tasks.CategoryVisibility, + f.HostScheduler, + rescheduler, + f.HostPriorityAssigner, + executor, + &queues.Options{ + ReaderOptions: queues.ReaderOptions{ + BatchSize: f.Config.VisibilityTaskBatchSize, + MaxPendingTasksCount: f.Config.QueuePendingTaskMaxCount, + PollBackoffInterval: f.Config.VisibilityProcessorPollBackoffInterval, + }, + MonitorOptions: queues.MonitorOptions{ + PendingTasksCriticalCount: f.Config.QueuePendingTaskCriticalCount, + ReaderStuckCriticalAttempts: f.Config.QueueReaderStuckCriticalAttempts, + SliceCountCriticalThreshold: f.Config.QueueCriticalSlicesCount, + }, + MaxPollRPS: f.Config.VisibilityProcessorMaxPollRPS, + MaxPollInterval: f.Config.VisibilityProcessorMaxPollInterval, + MaxPollIntervalJitterCoefficient: f.Config.VisibilityProcessorMaxPollIntervalJitterCoefficient, + CheckpointInterval: f.Config.VisibilityProcessorUpdateAckInterval, + CheckpointIntervalJitterCoefficient: f.Config.VisibilityProcessorUpdateAckIntervalJitterCoefficient, + MaxReaderCount: f.Config.QueueMaxReaderCount, + }, + f.HostReaderRateLimiter, + logger, + metricsHandler, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/visibility_queue_task_executor.go temporal-1.22.5/src/service/history/visibility_queue_task_executor.go --- temporal-1.21.5-1/src/service/history/visibility_queue_task_executor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibility_queue_task_executor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,598 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + visibilityQueueTaskExecutor struct { + shard shard.Context + cache wcache.Cache + logger log.Logger + metricProvider metrics.Handler + visibilityMgr manager.VisibilityManager + + ensureCloseBeforeDelete dynamicconfig.BoolPropertyFn + enableCloseWorkflowCleanup dynamicconfig.BoolPropertyFnWithNamespaceFilter + } +) + +var errUnknownVisibilityTask = serviceerror.NewInternal("unknown visibility task") + +func newVisibilityQueueTaskExecutor( + shard shard.Context, + workflowCache wcache.Cache, + visibilityMgr manager.VisibilityManager, + logger log.Logger, + metricProvider metrics.Handler, + ensureCloseBeforeDelete dynamicconfig.BoolPropertyFn, + enableCloseWorkflowCleanup dynamicconfig.BoolPropertyFnWithNamespaceFilter, +) queues.Executor { + return &visibilityQueueTaskExecutor{ + shard: shard, + cache: workflowCache, + logger: logger, + metricProvider: metricProvider, + visibilityMgr: visibilityMgr, + + ensureCloseBeforeDelete: ensureCloseBeforeDelete, + enableCloseWorkflowCleanup: enableCloseWorkflowCleanup, + } +} + +func (t *visibilityQueueTaskExecutor) Execute( + ctx context.Context, + executable queues.Executable, +) ([]metrics.Tag, bool, error) { + task := executable.GetTask() + taskType := queues.GetVisibilityTaskTypeTagValue(task) + namespaceTag, replicationState := getNamespaceTagAndReplicationStateByID( + t.shard.GetNamespaceRegistry(), + task.GetNamespaceID(), + ) + metricsTags := []metrics.Tag{ + namespaceTag, + metrics.TaskTypeTag(taskType), + metrics.OperationTag(taskType), // for backward compatibility + } + + if replicationState == enumspb.REPLICATION_STATE_HANDOVER { + // TODO: exclude task types here if we believe it's safe & necessary to execute + // them during namespace handover. + // Visibility tasks should all be safe, but close execution task + // might do a setWorkflowExecution to clean up memo and search attributes, which + // will be blocked by shard context during ns handover + // TODO: move this logic to queues.Executable when metrics tag doesn't need to + // be returned from task executor + return metricsTags, true, consts.ErrNamespaceHandover + } + + var err error + switch task := task.(type) { + case *tasks.StartExecutionVisibilityTask: + err = t.processStartExecution(ctx, task) + case *tasks.UpsertExecutionVisibilityTask: + err = t.processUpsertExecution(ctx, task) + case *tasks.CloseExecutionVisibilityTask: + err = t.processCloseExecution(ctx, task) + case *tasks.DeleteExecutionVisibilityTask: + err = t.processDeleteExecution(ctx, task) + default: + err = errUnknownVisibilityTask + } + + return metricsTags, true, err +} + +func (t *visibilityQueueTaskExecutor) processStartExecution( + ctx context.Context, + task *tasks.StartExecutionVisibilityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := weContext.LoadMutableState(ctx) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + // verify task version for RecordWorkflowStarted. + // upsert doesn't require verifyTask, because it is just a sync of mutableState. + startVersion, err := mutableState.GetStartVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), startVersion, task.Version, task) + if err != nil { + return err + } + + executionInfo := mutableState.GetExecutionInfo() + executionState := mutableState.GetExecutionState() + wfTypeName := executionInfo.WorkflowTypeName + + workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) + workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) + visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) + searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) + executionStatus := executionState.GetStatus() + taskQueue := executionInfo.TaskQueue + stateTransitionCount := executionInfo.GetStateTransitionCount() + + // NOTE: do not access anything related mutable state after this lock release + // release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + + return t.recordStartExecution( + ctx, + namespace.ID(task.GetNamespaceID()), + task.GetWorkflowID(), + task.GetRunID(), + wfTypeName, + workflowStartTime, + workflowExecutionTime, + stateTransitionCount, + task.GetTaskID(), + executionStatus, + taskQueue, + visibilityMemo, + searchAttr, + ) +} + +func (t *visibilityQueueTaskExecutor) processUpsertExecution( + ctx context.Context, + task *tasks.UpsertExecutionVisibilityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := weContext.LoadMutableState(ctx) + if err != nil { + return err + } + if mutableState == nil || !mutableState.IsWorkflowExecutionRunning() { + return nil + } + + executionInfo := mutableState.GetExecutionInfo() + executionState := mutableState.GetExecutionState() + wfTypeName := executionInfo.WorkflowTypeName + + workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) + workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) + visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) + searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) + executionStatus := executionState.GetStatus() + taskQueue := executionInfo.TaskQueue + stateTransitionCount := executionInfo.GetStateTransitionCount() + + // NOTE: do not access anything related mutable state after this lock release + // release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + + return t.upsertExecution( + ctx, + namespace.ID(task.GetNamespaceID()), + task.GetWorkflowID(), + task.GetRunID(), + wfTypeName, + workflowStartTime, + workflowExecutionTime, + stateTransitionCount, + task.GetTaskID(), + executionStatus, + taskQueue, + visibilityMemo, + searchAttr, + ) +} + +func (t *visibilityQueueTaskExecutor) recordStartExecution( + ctx context.Context, + namespaceID namespace.ID, + workflowID string, + runID string, + workflowTypeName string, + startTime time.Time, + executionTime time.Time, + stateTransitionCount int64, + taskID int64, + status enumspb.WorkflowExecutionStatus, + taskQueue string, + visibilityMemo *commonpb.Memo, + searchAttributes *commonpb.SearchAttributes, +) error { + namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespaceID) + if err != nil { + return err + } + + request := &manager.RecordWorkflowExecutionStartedRequest{ + VisibilityRequestBase: &manager.VisibilityRequestBase{ + NamespaceID: namespaceID, + Namespace: namespaceEntry.Name(), + Execution: commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + WorkflowTypeName: workflowTypeName, + StartTime: startTime, + ExecutionTime: executionTime, + StateTransitionCount: stateTransitionCount, TaskID: taskID, + Status: status, + ShardID: t.shard.GetShardID(), + Memo: visibilityMemo, + TaskQueue: taskQueue, + SearchAttributes: searchAttributes, + }, + } + return t.visibilityMgr.RecordWorkflowExecutionStarted(ctx, request) +} + +func (t *visibilityQueueTaskExecutor) upsertExecution( + ctx context.Context, + namespaceID namespace.ID, + workflowID string, + runID string, + workflowTypeName string, + startTime time.Time, + executionTime time.Time, + stateTransitionCount int64, + taskID int64, + status enumspb.WorkflowExecutionStatus, + taskQueue string, + visibilityMemo *commonpb.Memo, + searchAttributes *commonpb.SearchAttributes, +) error { + namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespaceID) + if err != nil { + return err + } + + request := &manager.UpsertWorkflowExecutionRequest{ + VisibilityRequestBase: &manager.VisibilityRequestBase{ + NamespaceID: namespaceID, + Namespace: namespaceEntry.Name(), + Execution: commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + WorkflowTypeName: workflowTypeName, + StartTime: startTime, + ExecutionTime: executionTime, + StateTransitionCount: stateTransitionCount, TaskID: taskID, + ShardID: t.shard.GetShardID(), + Status: status, + Memo: visibilityMemo, + TaskQueue: taskQueue, + SearchAttributes: searchAttributes, + }, + } + + return t.visibilityMgr.UpsertWorkflowExecution(ctx, request) +} + +func (t *visibilityQueueTaskExecutor) processCloseExecution( + parentCtx context.Context, + task *tasks.CloseExecutionVisibilityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(parentCtx, taskTimeout) + defer cancel() + + namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespace.ID(task.GetNamespaceID())) + if err != nil { + return err + } + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := weContext.LoadMutableState(ctx) + if err != nil { + return err + } + if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { + return nil + } + + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) + if err != nil { + return err + } + + executionInfo := mutableState.GetExecutionInfo() + executionState := mutableState.GetExecutionState() + wfCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) + if err != nil { + return err + } + workflowTypeName := executionInfo.WorkflowTypeName + workflowStatus := executionState.Status + workflowHistoryLength := mutableState.GetNextEventID() - 1 + workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) + workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) + visibilityMemo := getWorkflowMemo(copyMemo(executionInfo.Memo)) + searchAttr := getSearchAttributes(copySearchAttributes(executionInfo.SearchAttributes)) + taskQueue := executionInfo.TaskQueue + stateTransitionCount := executionInfo.GetStateTransitionCount() + historySizeBytes := executionInfo.GetExecutionStats().GetHistorySize() + + // NOTE: do not access anything related mutable state after this lock release + // release the context lock since we no longer need mutable state and + // the rest of logic is making RPC call, which takes time. + release(nil) + err = t.recordCloseExecution( + ctx, + namespaceEntry, + task.GetWorkflowID(), + task.GetRunID(), + workflowTypeName, + workflowStartTime, + workflowExecutionTime, + timestamp.TimeValue(wfCloseTime), + workflowStatus, + stateTransitionCount, + workflowHistoryLength, + task.GetTaskID(), + visibilityMemo, + taskQueue, + searchAttr, + historySizeBytes, + ) + if err != nil { + return err + } + + // Elasticsearch bulk processor doesn't respect context timeout + // because under heavy load bulk flush might take longer than taskTimeout. + // Therefore, ctx timeout might be already expired + // and parentCtx (which doesn't have timeout) must be used everywhere bellow. + + if t.enableCloseWorkflowCleanup(namespaceEntry.Name().String()) { + return t.cleanupExecutionInfo(parentCtx, task) + } + return nil +} + +func (t *visibilityQueueTaskExecutor) recordCloseExecution( + ctx context.Context, + namespaceEntry *namespace.Namespace, + workflowID string, + runID string, + workflowTypeName string, + startTime time.Time, + executionTime time.Time, + endTime time.Time, + status enumspb.WorkflowExecutionStatus, + stateTransitionCount int64, + historyLength int64, + taskID int64, + visibilityMemo *commonpb.Memo, + taskQueue string, + searchAttributes *commonpb.SearchAttributes, + historySizeBytes int64, +) error { + return t.visibilityMgr.RecordWorkflowExecutionClosed(ctx, &manager.RecordWorkflowExecutionClosedRequest{ + VisibilityRequestBase: &manager.VisibilityRequestBase{ + NamespaceID: namespaceEntry.ID(), + Namespace: namespaceEntry.Name(), + Execution: commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + WorkflowTypeName: workflowTypeName, + StartTime: startTime, + ExecutionTime: executionTime, + StateTransitionCount: stateTransitionCount, + Status: status, + TaskID: taskID, + ShardID: t.shard.GetShardID(), + Memo: visibilityMemo, + TaskQueue: taskQueue, + SearchAttributes: searchAttributes, + }, + CloseTime: endTime, + HistoryLength: historyLength, + HistorySizeBytes: historySizeBytes, + }) +} + +func (t *visibilityQueueTaskExecutor) processDeleteExecution( + ctx context.Context, + task *tasks.DeleteExecutionVisibilityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + request := &manager.VisibilityDeleteWorkflowExecutionRequest{ + NamespaceID: namespace.ID(task.NamespaceID), + WorkflowID: task.WorkflowID, + RunID: task.RunID, + TaskID: task.TaskID, + StartTime: task.StartTime, + CloseTime: task.CloseTime, + } + if t.ensureCloseBeforeDelete() { + // If visibility delete task is executed before visibility close task then visibility close task + // (which change workflow execution status by uploading new visibility record) will resurrect visibility record. + // + // Queue states/ack levels are updated with delay (default 30s). Therefore, this check could return false + // if the workflow was closed and then deleted within this delay period. + if t.isCloseExecutionVisibilityTaskPending(task) { + // Return retryable error for task processor to retry the operation later. + return consts.ErrDependencyTaskNotCompleted + } + } + return t.visibilityMgr.DeleteWorkflowExecution(ctx, request) +} + +func (t *visibilityQueueTaskExecutor) isCloseExecutionVisibilityTaskPending(task *tasks.DeleteExecutionVisibilityTask) bool { + CloseExecutionVisibilityTaskID := task.CloseExecutionVisibilityTaskID + // taskID == 0 if workflow still running in passive cluster or closed before this field was added (v1.17). + if CloseExecutionVisibilityTaskID == 0 { + return false + } + // check if close execution visibility task is completed + visibilityQueueState, ok := t.shard.GetQueueState(tasks.CategoryVisibility) + if !ok { + return true + } + queryTask := &tasks.CloseExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey(task.GetNamespaceID(), task.GetWorkflowID(), task.GetRunID()), + TaskID: CloseExecutionVisibilityTaskID, + } + return !queues.IsTaskAcked(queryTask, visibilityQueueState) +} + +// cleanupExecutionInfo cleans up workflow execution info after visibility close +// task has been processed and acked by visibility store. +func (t *visibilityQueueTaskExecutor) cleanupExecutionInfo( + ctx context.Context, + task *tasks.CloseExecutionVisibilityTask, +) (retError error) { + ctx, cancel := context.WithTimeout(ctx, taskTimeout) + defer cancel() + + weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, task) + if err != nil { + return err + } + defer func() { release(retError) }() + + mutableState, err := weContext.LoadMutableState(ctx) + if err != nil { + return err + } + if mutableState == nil || mutableState.IsWorkflowExecutionRunning() { + return nil + } + + lastWriteVersion, err := mutableState.GetLastWriteVersion() + if err != nil { + return err + } + err = CheckTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, task.Version, task) + if err != nil { + return err + } + + executionInfo := mutableState.GetExecutionInfo() + executionInfo.Memo = nil + executionInfo.SearchAttributes = nil + executionInfo.CloseVisibilityTaskCompleted = true + return weContext.SetWorkflowExecution(ctx) +} + +func getWorkflowMemo( + memoFields map[string]*commonpb.Payload, +) *commonpb.Memo { + if memoFields == nil { + return nil + } + return &commonpb.Memo{Fields: memoFields} +} + +func copyMemo( + memoFields map[string]*commonpb.Payload, +) map[string]*commonpb.Payload { + if memoFields == nil { + return nil + } + + result := make(map[string]*commonpb.Payload) + for k, v := range memoFields { + result[k] = common.CloneProto(v) + } + return result +} + +func getSearchAttributes( + indexedFields map[string]*commonpb.Payload, +) *commonpb.SearchAttributes { + if indexedFields == nil { + return nil + } + return &commonpb.SearchAttributes{IndexedFields: indexedFields} +} + +func copySearchAttributes( + input map[string]*commonpb.Payload, +) map[string]*commonpb.Payload { + if input == nil { + return nil + } + + result := make(map[string]*commonpb.Payload) + for k, v := range input { + result[k] = common.CloneProto(v) + } + return result +} diff -Nru temporal-1.21.5-1/src/service/history/visibility_queue_task_executor_test.go temporal-1.22.5/src/service/history/visibility_queue_task_executor_test.go --- temporal-1.21.5-1/src/service/history/visibility_queue_task_executor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/visibility_queue_task_executor_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,622 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + workflowspb "go.temporal.io/server/api/workflow/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/queues" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tasks" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + visibilityQueueTaskExecutorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + mockShard *shard.ContextTest + + mockVisibilityMgr *manager.MockVisibilityManager + mockExecutionMgr *persistence.MockExecutionManager + + workflowCache wcache.Cache + logger log.Logger + namespaceID namespace.ID + namespace namespace.Name + version int64 + now time.Time + timeSource *clock.EventTimeSource + visibilityQueueTaskExecutor queues.Executor + + enableCloseWorkflowCleanup bool + } +) + +func TestVisibilityQueueTaskExecutorSuite(t *testing.T) { + s := new(visibilityQueueTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *visibilityQueueTaskExecutorSuite) SetupSuite() { +} + +func (s *visibilityQueueTaskExecutorSuite) TearDownSuite() { +} + +func (s *visibilityQueueTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.namespaceID = tests.NamespaceID + s.namespace = tests.Namespace + s.version = tests.GlobalNamespaceEntry.FailoverVersion() + s.now = time.Now().UTC() + s.timeSource = clock.NewEventTimeSource().Update(s.now) + + s.controller = gomock.NewController(s.T()) + + config := tests.NewDynamicConfig() + s.mockShard = shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + config, + ) + s.mockShard.SetEventsCacheForTesting(events.NewEventsCache( + s.mockShard.GetShardID(), + s.mockShard.GetConfig().EventsCacheMaxSizeBytes(), + s.mockShard.GetConfig().EventsCacheTTL(), + s.mockShard.GetExecutionManager(), + false, + s.mockShard.GetLogger(), + s.mockShard.GetMetricsHandler(), + )) + s.mockShard.Resource.TimeSource = s.timeSource + + s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr + s.mockVisibilityMgr = manager.NewMockVisibilityManager(s.controller) + + mockNamespaceCache := s.mockShard.Resource.NamespaceCache + mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + mockNamespaceCache.EXPECT().GetNamespace(tests.Namespace).Return(tests.GlobalNamespaceEntry, nil).AnyTimes() + mockNamespaceCache.EXPECT().GetNamespaceName(tests.NamespaceID).Return(tests.Namespace, nil).AnyTimes() + + mockClusterMetadata := s.mockShard.Resource.ClusterMetadata + mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() + mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() + mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, s.version).Return(mockClusterMetadata.GetCurrentClusterName()).AnyTimes() + + s.workflowCache = wcache.NewCache(s.mockShard) + s.logger = s.mockShard.GetLogger() + + h := &historyEngineImpl{ + currentClusterName: s.mockShard.Resource.GetClusterMetadata().GetCurrentClusterName(), + shard: s.mockShard, + clusterMetadata: mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + metricsHandler: s.mockShard.GetMetricsHandler(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + } + s.mockShard.SetEngineForTesting(h) + + s.enableCloseWorkflowCleanup = false + s.visibilityQueueTaskExecutor = newVisibilityQueueTaskExecutor( + s.mockShard, + s.workflowCache, + s.mockVisibilityMgr, + s.logger, + metrics.NoopMetricsHandler, + config.VisibilityProcessorEnsureCloseBeforeDelete, + func(_ string) bool { return s.enableCloseWorkflowCleanup }, + ) +} + +func (s *visibilityQueueTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockShard.StopForTest() +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessCloseExecution() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + parentNamespaceID := "some random parent namespace ID" + parentInitiatedID := int64(3222) + parentInitiatedVersion := int64(1234) + parentNamespace := "some random parent namespace Name" + parentExecution := &commonpb.WorkflowExecution{ + WorkflowId: "some random parent workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ + NamespaceId: parentNamespaceID, + Namespace: parentNamespace, + Execution: parentExecution, + InitiatedId: parentInitiatedID, + InitiatedVersion: parentInitiatedVersion, + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + visibilityTask := &tasks.CloseExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + VisibilityTimestamp: time.Now().UTC(), + Version: s.version, + TaskID: taskID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionClosed(gomock.Any(), gomock.Any()).Return(nil) + + _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) + s.Nil(err) +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessCloseExecutionWithWorkflowClosedCleanup() { + s.enableCloseWorkflowCleanup = true + + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + parentNamespaceID := "some random parent namespace ID" + parentInitiatedID := int64(3222) + parentInitiatedVersion := int64(1234) + parentNamespace := "some random parent namespace Name" + parentExecution := &commonpb.WorkflowExecution{ + WorkflowId: "some random parent workflow ID", + RunId: uuid.New(), + } + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + ParentExecutionInfo: &workflowspb.ParentExecutionInfo{ + NamespaceId: parentNamespaceID, + Namespace: parentNamespace, + Execution: parentExecution, + InitiatedId: parentInitiatedID, + InitiatedVersion: parentInitiatedVersion, + }, + }, + ) + s.Nil(err) + + wt := addWorkflowTaskScheduledEvent(mutableState) + event := addWorkflowTaskStartedEvent(mutableState, wt.ScheduledEventID, taskQueueName, uuid.New()) + wt.StartedEventID = event.GetEventId() + event = addWorkflowTaskCompletedEvent(&s.Suite, mutableState, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + taskID := int64(59) + event = addCompleteWorkflowEvent(mutableState, event.GetEventId(), nil) + + visibilityTask := &tasks.CloseExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + VisibilityTimestamp: time.Now().UTC(), + Version: s.version, + TaskID: taskID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockExecutionMgr.EXPECT().SetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.SetWorkflowExecutionResponse{}, nil) + s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionClosed(gomock.Any(), gomock.Any()).Return(nil) + + _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) + s.Nil(err) +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessRecordWorkflowStartedTask() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + cronSchedule := "@every 5s" + backoff := 5 * time.Second + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + + event, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + CronSchedule: cronSchedule, + }, + FirstWorkflowTaskBackoff: &backoff, + }, + ) + s.Nil(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + visibilityTask := &tasks.StartExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + VisibilityTimestamp: time.Now().UTC(), + Version: s.version, + TaskID: taskID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockVisibilityMgr.EXPECT().RecordWorkflowExecutionStarted( + gomock.Any(), + s.createRecordWorkflowExecutionStartedRequest(s.namespace, event, visibilityTask, mutableState, backoff, taskQueueName), + ).Return(nil) + + _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) + s.Nil(err) +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessUpsertWorkflowSearchAttributes() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.NoError(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + visibilityTask := &tasks.UpsertExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + } + + persistenceMutableState := s.createPersistenceMutableState(mutableState, wt.ScheduledEventID, wt.Version) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockVisibilityMgr.EXPECT().UpsertWorkflowExecution( + gomock.Any(), + s.createUpsertWorkflowRequest(s.namespace, visibilityTask, mutableState, taskQueueName), + ).Return(nil) + + _, _, err = s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(visibilityTask)) + s.NoError(err) +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessModifyWorkflowProperties() { + execution := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + workflowType := "some random workflow type" + taskQueueName := "some random task queue" + + mutableState := workflow.TestGlobalMutableState( + s.mockShard, + s.mockShard.GetEventsCache(), + s.logger, + s.version, + execution.GetRunId(), + ) + + _, err := mutableState.AddWorkflowExecutionStartedEvent( + execution, + &historyservice.StartWorkflowExecutionRequest{ + Attempt: 1, + NamespaceId: s.namespaceID.String(), + StartRequest: &workflowservice.StartWorkflowExecutionRequest{ + WorkflowType: &commonpb.WorkflowType{Name: workflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName}, + WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second), + WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second), + }, + }, + ) + s.NoError(err) + + taskID := int64(59) + wt := addWorkflowTaskScheduledEvent(mutableState) + + visibilityTask := &tasks.UpsertExecutionVisibilityTask{ + WorkflowKey: definition.NewWorkflowKey( + s.namespaceID.String(), + execution.GetWorkflowId(), + execution.GetRunId(), + ), + Version: s.version, + TaskID: taskID, + } + + persistenceMutableState := s.createPersistenceMutableState( + mutableState, + wt.ScheduledEventID, + wt.Version, + ) + s.mockExecutionMgr.EXPECT().GetWorkflowExecution( + gomock.Any(), + gomock.Any(), + ).Return( + &persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, + nil, + ) + s.mockVisibilityMgr.EXPECT().UpsertWorkflowExecution( + gomock.Any(), + s.createUpsertWorkflowRequest(s.namespace, visibilityTask, mutableState, taskQueueName), + ).Return(nil) + + _, _, err = s.visibilityQueueTaskExecutor.Execute( + context.Background(), + s.newTaskExecutable(visibilityTask), + ) + s.NoError(err) +} + +func (s *visibilityQueueTaskExecutorSuite) TestProcessorDeleteExecution() { + s.T().SkipNow() + workflowKey := definition.WorkflowKey{ + NamespaceID: s.namespaceID.String(), + } + s.Run("TaskID=0", func() { + s.mockVisibilityMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()) + err := s.execute(&tasks.DeleteExecutionVisibilityTask{ + WorkflowKey: workflowKey, + CloseExecutionVisibilityTaskID: 0, + }) + s.Assert().NoError(err) + }) + s.Run("MultiCursorQueue", func() { + const highWatermark int64 = 5 + s.NoError(s.mockShard.SetQueueState(tasks.CategoryVisibility, &persistencespb.QueueState{ + ReaderStates: nil, + ExclusiveReaderHighWatermark: &persistencespb.TaskKey{ + TaskId: highWatermark, + FireTime: timestamp.TimePtr(tasks.DefaultFireTime), + }, + })) + s.Run("NotAcked", func() { + err := s.execute(&tasks.DeleteExecutionVisibilityTask{ + WorkflowKey: workflowKey, + CloseExecutionVisibilityTaskID: highWatermark + 1, + }) + s.ErrorIs(err, consts.ErrDependencyTaskNotCompleted) + }) + s.Run("Acked", func() { + s.mockVisibilityMgr.EXPECT().DeleteWorkflowExecution(gomock.Any(), gomock.Any()) + err := s.execute(&tasks.DeleteExecutionVisibilityTask{ + WorkflowKey: workflowKey, + CloseExecutionVisibilityTaskID: highWatermark - 1, + }) + s.NoError(err) + }) + }) +} + +func (s *visibilityQueueTaskExecutorSuite) execute(task tasks.Task) error { + _, _, err := s.visibilityQueueTaskExecutor.Execute(context.Background(), s.newTaskExecutable(task)) + return err +} + +func (s *visibilityQueueTaskExecutorSuite) createRecordWorkflowExecutionStartedRequest( + namespaceName namespace.Name, + startEvent *historypb.HistoryEvent, + task *tasks.StartExecutionVisibilityTask, + mutableState workflow.MutableState, + backoff time.Duration, + taskQueueName string, +) *manager.RecordWorkflowExecutionStartedRequest { + execution := &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + } + executionInfo := mutableState.GetExecutionInfo() + executionTimestamp := timestamp.TimeValue(startEvent.GetEventTime()).Add(backoff) + + return &manager.RecordWorkflowExecutionStartedRequest{ + VisibilityRequestBase: &manager.VisibilityRequestBase{ + Namespace: namespaceName, + NamespaceID: namespace.ID(task.NamespaceID), + Execution: *execution, + WorkflowTypeName: executionInfo.WorkflowTypeName, + StartTime: timestamp.TimeValue(startEvent.GetEventTime()), + ExecutionTime: executionTimestamp, + TaskID: task.TaskID, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + ShardID: s.mockShard.GetShardID(), + TaskQueue: taskQueueName, + }, + } +} + +func (s *visibilityQueueTaskExecutorSuite) createUpsertWorkflowRequest( + namespaceName namespace.Name, + task *tasks.UpsertExecutionVisibilityTask, + mutableState workflow.MutableState, + taskQueueName string, +) *manager.UpsertWorkflowExecutionRequest { + execution := &commonpb.WorkflowExecution{ + WorkflowId: task.WorkflowID, + RunId: task.RunID, + } + executionInfo := mutableState.GetExecutionInfo() + + return &manager.UpsertWorkflowExecutionRequest{ + VisibilityRequestBase: &manager.VisibilityRequestBase{ + Namespace: namespaceName, + NamespaceID: namespace.ID(task.NamespaceID), + Execution: *execution, + WorkflowTypeName: executionInfo.WorkflowTypeName, + StartTime: timestamp.TimeValue(executionInfo.GetStartTime()), + ExecutionTime: timestamp.TimeValue(executionInfo.GetExecutionTime()), + TaskID: task.TaskID, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + TaskQueue: taskQueueName, + ShardID: s.mockShard.GetShardID(), + }, + } +} + +func (s *visibilityQueueTaskExecutorSuite) createPersistenceMutableState( + ms workflow.MutableState, + lastEventID int64, + lastEventVersion int64, +) *persistencespb.WorkflowMutableState { + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(ms.GetExecutionInfo().GetVersionHistories()) + s.NoError(err) + err = versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( + lastEventID, lastEventVersion, + )) + s.NoError(err) + return workflow.TestCloneToProto(ms) +} + +func (s *visibilityQueueTaskExecutorSuite) newTaskExecutable( + task tasks.Task, +) queues.Executable { + return queues.NewExecutable( + queues.DefaultReaderId, + task, + s.visibilityQueueTaskExecutor, + nil, + nil, + queues.NewNoopPriorityAssigner(), + s.mockShard.GetTimeSource(), + s.mockShard.GetNamespaceRegistry(), + s.mockShard.GetClusterMetadata(), + nil, + metrics.NoopMetricsHandler, + func() bool { return false }, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/workflow/cache/cache.go temporal-1.22.5/src/service/history/workflow/cache/cache.go --- temporal-1.21.5-1/src/service/history/workflow/cache/cache.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/cache/cache.go 2024-02-23 09:45:43.000000000 +0000 @@ -98,7 +98,6 @@ func NewCache(shard shard.Context) Cache { opts := &cache.Options{} config := shard.GetConfig() - opts.InitialCapacity = config.HistoryCacheInitialSize() opts.TTL = config.HistoryCacheTTL() opts.Pin = true @@ -263,9 +262,24 @@ if err != nil || forceClearContext { // TODO see issue #668, there are certain type or errors which can bypass the clear context.Clear() + context.Unlock(lockPriority) + c.Release(key) + } else { + isDirty := context.IsDirty() + if isDirty { + context.Clear() + c.logger.Error("Cache encountered dirty mutable state transaction", + tag.WorkflowNamespaceID(context.GetWorkflowKey().NamespaceID), + tag.WorkflowID(context.GetWorkflowKey().WorkflowID), + tag.WorkflowRunID(context.GetWorkflowKey().RunID), + ) + } + context.Unlock(lockPriority) + c.Release(key) + if isDirty { + panic("Cache encountered dirty mutable state transaction") + } } - context.Unlock(lockPriority) - c.Release(key) } } } diff -Nru temporal-1.21.5-1/src/service/history/workflow/cache/cache_test.go temporal-1.22.5/src/service/history/workflow/cache/cache_test.go --- temporal-1.21.5-1/src/service/history/workflow/cache/cache_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/cache/cache_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,6 +38,7 @@ "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" commonpb "go.temporal.io/api/common/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/dynamicconfig" @@ -103,13 +104,14 @@ RunId: uuid.New(), } mockMS1 := workflow.NewMockMutableState(s.controller) + mockMS1.EXPECT().IsDirty().Return(false).AnyTimes() ctx, release, err := s.cache.GetOrCreateWorkflowExecution( context.Background(), namespaceID, execution1, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) ctx.(*workflow.ContextImpl).MutableState = mockMS1 release(nil) ctx, release, err = s.cache.GetOrCreateWorkflowExecution( @@ -118,7 +120,7 @@ execution1, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) s.Equal(mockMS1, ctx.(*workflow.ContextImpl).MutableState) release(nil) @@ -132,11 +134,49 @@ execution2, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) s.NotEqual(mockMS1, ctx.(*workflow.ContextImpl).MutableState) release(nil) } +func (s *workflowCacheSuite) TestHistoryCachePanic() { + s.cache = NewCache(s.mockShard) + + namespaceID := namespace.ID("test_namespace_id") + execution1 := commonpb.WorkflowExecution{ + WorkflowId: "some random workflow ID", + RunId: uuid.New(), + } + mockMS1 := workflow.NewMockMutableState(s.controller) + mockMS1.EXPECT().IsDirty().Return(true).AnyTimes() + mockMS1.EXPECT().GetQueryRegistry().Return(workflow.NewQueryRegistry()).AnyTimes() + ctx, release, err := s.cache.GetOrCreateWorkflowExecution( + context.Background(), + namespaceID, + execution1, + workflow.LockPriorityHigh, + ) + s.NoError(err) + ctx.(*workflow.ContextImpl).MutableState = mockMS1 + + defer func() { + if recover() != nil { + ctx, release, err = s.cache.GetOrCreateWorkflowExecution( + context.Background(), + namespaceID, + execution1, + workflow.LockPriorityHigh, + ) + s.NoError(err) + s.Nil(ctx.(*workflow.ContextImpl).MutableState) + release(nil) + } else { + s.Fail("test should panic") + } + }() + release(nil) +} + func (s *workflowCacheSuite) TestHistoryCachePinning() { s.mockShard.GetConfig().HistoryCacheMaxSize = dynamicconfig.GetIntPropertyFn(1) namespaceID := namespace.ID("test_namespace_id") @@ -152,7 +192,7 @@ we, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) we2 := commonpb.WorkflowExecution{ WorkflowId: "wf-cache-test-pinning", @@ -166,7 +206,7 @@ we2, workflow.LockPriorityHigh, ) - s.NotNil(err2) + s.Error(err2) // Now release the context, this should unpin it. release(err2) @@ -177,7 +217,7 @@ we2, workflow.LockPriorityHigh, ) - s.Nil(err3) + s.NoError(err3) release2(err3) // Old context should be evicted. @@ -187,7 +227,7 @@ we, workflow.LockPriorityHigh, ) - s.Nil(err4) + s.NoError(err4) s.False(ctx == newContext) release(err4) } @@ -207,10 +247,11 @@ we, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) // since we are just testing whether the release function will clear the cache // all we need is a fake MutableState mock := workflow.NewMockMutableState(s.controller) + mock.EXPECT().IsDirty().Return(false).AnyTimes() ctx.(*workflow.ContextImpl).MutableState = mock release(nil) @@ -223,7 +264,7 @@ we, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) s.NotNil(ctx.(*workflow.ContextImpl).MutableState) mock.EXPECT().GetQueryRegistry().Return(workflow.NewQueryRegistry()) @@ -237,7 +278,7 @@ we, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) s.Nil(ctx.(*workflow.ContextImpl).MutableState) release(nil) } @@ -272,7 +313,7 @@ }, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) // since each time the is reset to nil s.Nil(ctx.(*workflow.ContextImpl).MutableState) // since we are just testing whether the release function will clear the cache @@ -297,7 +338,7 @@ }, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) // since we are just testing whether the release function will clear the cache // all we need is a fake MutableState s.Nil(ctx.(*workflow.ContextImpl).MutableState) @@ -391,7 +432,7 @@ }, workflow.LockPriorityHigh, ) - s.Nil(err) + s.NoError(err) defer release(nil) latency2, ok := metrics.ContextCounterGet(ctx, metrics.HistoryWorkflowExecutionCacheLatency.GetMetricName()) @@ -459,7 +500,7 @@ if tt.shouldLockBefore { // lock the workflow to allow it to time out err := workflowCtx.Lock(ctx, workflow.LockPriorityHigh) - s.Nil(err) + s.NoError(err) } if err := c.lockWorkflowExecution(ctx, workflowCtx, key, workflow.LockPriorityHigh); (err != nil) != tt.wantErr { diff -Nru temporal-1.21.5-1/src/service/history/workflow/context.go temporal-1.22.5/src/service/history/workflow/context.go --- temporal-1.21.5-1/src/service/history/workflow/context.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/context.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,7 +29,6 @@ import ( "context" "fmt" - "time" "go.opentelemetry.io/otel/trace" commonpb "go.temporal.io/api/common/v1" @@ -58,10 +57,6 @@ ) const ( - defaultRemoteCallTimeout = 30 * time.Second -) - -const ( LockPriorityHigh LockPriority = 0 LockPriorityLow LockPriority = 1 ) @@ -79,6 +74,8 @@ Lock(ctx context.Context, lockPriority LockPriority) error Unlock(lockPriority LockPriority) + IsDirty() bool + ReapplyEvents( ctx context.Context, eventBatches []*persistence.WorkflowEvents, @@ -206,6 +203,13 @@ } } +func (c *ContextImpl) IsDirty() bool { + if c.MutableState == nil { + return false + } + return c.MutableState.IsDirty() +} + func (c *ContextImpl) Clear() { c.metricsHandler.Counter(metrics.WorkflowContextCleared.GetMetricName()).Record(1) if c.MutableState != nil { diff -Nru temporal-1.21.5-1/src/service/history/workflow/context_mock.go temporal-1.22.5/src/service/history/workflow/context_mock.go --- temporal-1.21.5-1/src/service/history/workflow/context_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/context_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -116,6 +116,20 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowKey", reflect.TypeOf((*MockContext)(nil).GetWorkflowKey)) } +// IsDirty mocks base method. +func (m *MockContext) IsDirty() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDirty") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsDirty indicates an expected call of IsDirty. +func (mr *MockContextMockRecorder) IsDirty() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDirty", reflect.TypeOf((*MockContext)(nil).IsDirty)) +} + // LoadExecutionStats mocks base method. func (m *MockContext) LoadExecutionStats(ctx context.Context) (*v1.ExecutionStats, error) { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/workflow/mutable_state.go temporal-1.22.5/src/service/history/workflow/mutable_state.go --- temporal-1.21.5-1/src/service/history/workflow/mutable_state.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/mutable_state.go 2024-02-23 09:45:43.000000000 +0000 @@ -169,6 +169,7 @@ GetActivityByActivityID(string) (*persistencespb.ActivityInfo, bool) GetActivityInfo(int64) (*persistencespb.ActivityInfo, bool) GetActivityInfoWithTimerHeartbeat(scheduledEventID int64) (*persistencespb.ActivityInfo, time.Time, bool) + GetActivityType(context.Context, *persistencespb.ActivityInfo) (*commonpb.ActivityType, error) GetActivityScheduledEvent(context.Context, int64) (*historypb.HistoryEvent, error) GetRequesteCancelExternalInitiatedEvent(context.Context, int64) (*historypb.HistoryEvent, error) GetChildExecutionInfo(int64) (*persistencespb.ChildExecutionInfo, bool) @@ -302,10 +303,11 @@ CheckSpeculativeWorkflowTaskTimeoutTask(task *tasks.WorkflowTaskTimeoutTask) bool RemoveSpeculativeWorkflowTaskTimeoutTask() + IsDirty() bool StartTransaction(entry *namespace.Namespace) (bool, error) CloseTransactionAsMutation(transactionPolicy TransactionPolicy) (*persistence.WorkflowMutation, []*persistence.WorkflowEvents, error) CloseTransactionAsSnapshot(transactionPolicy TransactionPolicy) (*persistence.WorkflowSnapshot, []*persistence.WorkflowEvents, error) - GenerateMigrationTasks() (tasks.Task, int64, error) + GenerateMigrationTasks() ([]tasks.Task, int64, error) // ContinueAsNewMinBackoff calculate minimal backoff for next ContinueAsNew run. // Input backoffDuration is current backoff for next run. diff -Nru temporal-1.21.5-1/src/service/history/workflow/mutable_state_impl.go temporal-1.22.5/src/service/history/workflow/mutable_state_impl.go --- temporal-1.21.5-1/src/service/history/workflow/mutable_state_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/mutable_state_impl.go 2024-02-23 09:45:43.000000000 +0000 @@ -832,6 +832,23 @@ return ms.GetActivityInfo(eventID) } +// GetActivityType gets the ActivityType from ActivityInfo if set, +// or from the events history otherwise for backwards compatibility. +func (ms *MutableStateImpl) GetActivityType( + ctx context.Context, + ai *persistencespb.ActivityInfo, +) (*commonpb.ActivityType, error) { + if ai.GetActivityType() != nil { + return ai.GetActivityType(), nil + } + // For backwards compatibility in case ActivityType is not set in ActivityInfo. + scheduledEvent, err := ms.GetActivityScheduledEvent(ctx, ai.ScheduledEventId) + if err != nil { + return nil, err + } + return scheduledEvent.GetActivityTaskScheduledEventAttributes().ActivityType, nil +} + // GetChildExecutionInfo gives details about a child execution that is currently in progress. func (ms *MutableStateImpl) GetChildExecutionInfo( initiatedEventID int64, @@ -1664,7 +1681,7 @@ // - using versioning var sourceVersionStamp *commonpb.WorkerVersionStamp if command.UseCompatibleVersion { - sourceVersionStamp = common.StampIfUsingVersioning(previousExecutionInfo.WorkerVersionStamp) + sourceVersionStamp = worker_versioning.StampIfUsingVersioning(previousExecutionInfo.WorkerVersionStamp) } req := &historyservice.StartWorkflowExecutionRequest{ @@ -2123,17 +2140,16 @@ saPayload, found := searchAttributes[searchattribute.BuildIds] if !found { return []string{}, nil - } else { - decoded, err := searchattribute.DecodeValue(saPayload, enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST, true) - if err != nil { - return nil, err - } - searchAttributeValues, ok := decoded.([]string) - if !ok { - return nil, serviceerror.NewInternal("invalid search attribute value stored for BuildIds") - } - return searchAttributeValues, nil } + decoded, err := searchattribute.DecodeValue(saPayload, enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST, true) + if err != nil { + return nil, err + } + searchAttributeValues, ok := decoded.([]string) + if !ok { + return nil, serviceerror.NewInternal("invalid search attribute value stored for BuildIds") + } + return searchAttributeValues, nil } // Takes a list of loaded build IDs from a search attribute and adds new build IDs to it. Returns a potentially modified @@ -2352,6 +2368,7 @@ HasRetryPolicy: attributes.RetryPolicy != nil, Attempt: 1, UseCompatibleVersion: attributes.UseCompatibleVersion, + ActivityType: attributes.GetActivityType(), } if ai.HasRetryPolicy { ai.RetryInitialInterval = attributes.RetryPolicy.GetInitialInterval() @@ -4377,10 +4394,14 @@ return setStateStatus(ms.executionState, state, status) } +func (ms *MutableStateImpl) IsDirty() bool { + return ms.hBuilder.IsDirty() || len(ms.InsertTasks) > 0 +} + func (ms *MutableStateImpl) StartTransaction( namespaceEntry *namespace.Namespace, ) (bool, error) { - if ms.hBuilder.IsDirty() || len(ms.InsertTasks) > 0 { + if ms.IsDirty() { ms.logger.Error("MutableState encountered dirty transaction", tag.WorkflowNamespaceID(ms.executionInfo.NamespaceId), tag.WorkflowID(ms.executionInfo.WorkflowId), @@ -4585,7 +4606,7 @@ ms.appliedEvents[id] = struct{}{} } -func (ms *MutableStateImpl) GenerateMigrationTasks() (tasks.Task, int64, error) { +func (ms *MutableStateImpl) GenerateMigrationTasks() ([]tasks.Task, int64, error) { return ms.taskGenerator.GenerateMigrationTasks() } diff -Nru temporal-1.21.5-1/src/service/history/workflow/mutable_state_impl_test.go temporal-1.22.5/src/service/history/workflow/mutable_state_impl_test.go --- temporal-1.21.5-1/src/service/history/workflow/mutable_state_impl_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/mutable_state_impl_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -60,6 +60,7 @@ "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/tqname" "go.temporal.io/server/common/worker_versioning" "go.temporal.io/server/service/history/configs" "go.temporal.io/server/service/history/events" @@ -461,14 +462,25 @@ err = s.mutableState.UpdateCurrentVersion(version+1, true) s.NoError(err) + name, err := tqname.FromBaseName("tq") + s.NoError(err) + _, _, err = s.mutableState.AddWorkflowTaskStartedEvent( s.mutableState.GetNextEventID(), uuid.New(), - &taskqueuepb.TaskQueue{}, + &taskqueuepb.TaskQueue{Name: name.WithPartition(5).FullName()}, "random identity", ) s.NoError(err) s.Equal(0, s.mutableState.hBuilder.NumBufferedEvents()) + + mutation, err := s.mutableState.hBuilder.Finish(true) + s.NoError(err) + s.Equal(1, len(mutation.DBEventsBatches)) + s.Equal(2, len(mutation.DBEventsBatches[0])) + attrs := mutation.DBEventsBatches[0][0].GetWorkflowTaskScheduledEventAttributes() + s.NotNil(attrs) + s.Equal("tq", attrs.TaskQueue.Name) } func (s *mutableStateSuite) TestSanitizedMutableState() { diff -Nru temporal-1.21.5-1/src/service/history/workflow/mutable_state_mock.go temporal-1.22.5/src/service/history/workflow/mutable_state_mock.go --- temporal-1.21.5-1/src/service/history/workflow/mutable_state_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/mutable_state_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -985,10 +985,10 @@ } // GenerateMigrationTasks mocks base method. -func (m *MockMutableState) GenerateMigrationTasks() (tasks.Task, int64, error) { +func (m *MockMutableState) GenerateMigrationTasks() ([]tasks.Task, int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateMigrationTasks") - ret0, _ := ret[0].(tasks.Task) + ret0, _ := ret[0].([]tasks.Task) ret1, _ := ret[1].(int64) ret2, _ := ret[2].(error) return ret0, ret1, ret2 @@ -1061,6 +1061,21 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActivityScheduledEvent", reflect.TypeOf((*MockMutableState)(nil).GetActivityScheduledEvent), arg0, arg1) } +// GetActivityType mocks base method. +func (m *MockMutableState) GetActivityType(arg0 context.Context, arg1 *v112.ActivityInfo) (*v10.ActivityType, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActivityType", arg0, arg1) + ret0, _ := ret[0].(*v10.ActivityType) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActivityType indicates an expected call of GetActivityType. +func (mr *MockMutableStateMockRecorder) GetActivityType(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActivityType", reflect.TypeOf((*MockMutableState)(nil).GetActivityType), arg0, arg1) +} + // GetApproximatePersistedSize mocks base method. func (m *MockMutableState) GetApproximatePersistedSize() int { m.ctrl.T.Helper() @@ -1795,6 +1810,20 @@ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCurrentWorkflowGuaranteed", reflect.TypeOf((*MockMutableState)(nil).IsCurrentWorkflowGuaranteed)) } +// IsDirty mocks base method. +func (m *MockMutableState) IsDirty() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDirty") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsDirty indicates an expected call of IsDirty. +func (mr *MockMutableStateMockRecorder) IsDirty() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDirty", reflect.TypeOf((*MockMutableState)(nil).IsDirty)) +} + // IsResourceDuplicated mocks base method. func (m *MockMutableState) IsResourceDuplicated(resourceDedupKey definition.DeduplicationID) bool { m.ctrl.T.Helper() diff -Nru temporal-1.21.5-1/src/service/history/workflow/mutable_state_rebuilder.go temporal-1.22.5/src/service/history/workflow/mutable_state_rebuilder.go --- temporal-1.21.5-1/src/service/history/workflow/mutable_state_rebuilder.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/mutable_state_rebuilder.go 2024-02-23 09:45:43.000000000 +0000 @@ -134,6 +134,7 @@ return nil, serviceerror.NewInternal(ErrMessageHistorySizeZero) } firstEvent := history[0] + lastEvent := history[len(history)-1] var newRunMutableState MutableState taskGenerator := taskGeneratorProvider.NewTaskGenerator(b.shard, b.mutableState) @@ -143,26 +144,24 @@ executionInfo := b.mutableState.GetExecutionInfo() executionInfo.LastFirstEventId = firstEvent.GetEventId() - for _, event := range history { - // NOTE: stateRebuilder is also being used in the active side - if executionInfo.GetVersionHistories() != nil { - if err := b.mutableState.UpdateCurrentVersion(event.GetVersion(), true); err != nil { - return nil, err - } - versionHistories := executionInfo.GetVersionHistories() - versionHistory, err := versionhistory.GetCurrentVersionHistory(versionHistories) - if err != nil { - return nil, err - } - if err := versionhistory.AddOrUpdateVersionHistoryItem(versionHistory, versionhistory.NewVersionHistoryItem( - event.GetEventId(), - event.GetVersion(), - )); err != nil { - return nil, err - } - executionInfo.LastEventTaskId = event.GetTaskId() - } + // NOTE: stateRebuilder is also being used in the active side + if err := b.mutableState.UpdateCurrentVersion(lastEvent.GetVersion(), true); err != nil { + return nil, err + } + versionHistories := executionInfo.GetVersionHistories() + versionHistory, err := versionhistory.GetCurrentVersionHistory(versionHistories) + if err != nil { + return nil, err + } + if err := versionhistory.AddOrUpdateVersionHistoryItem(versionHistory, versionhistory.NewVersionHistoryItem( + lastEvent.GetEventId(), + lastEvent.GetVersion(), + )); err != nil { + return nil, err + } + executionInfo.LastEventTaskId = lastEvent.GetTaskId() + for _, event := range history { switch event.GetEventType() { case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED: attributes := event.GetWorkflowExecutionStartedEventAttributes() diff -Nru temporal-1.21.5-1/src/service/history/workflow/retry.go temporal-1.22.5/src/service/history/workflow/retry.go --- temporal-1.21.5-1/src/service/history/workflow/retry.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/retry.go 2024-02-23 09:45:43.000000000 +0000 @@ -45,6 +45,7 @@ "go.temporal.io/server/common" "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/worker_versioning" ) // TODO treat 0 as 0, not infinite @@ -245,7 +246,7 @@ // For cron: do not propagate (always start on latest version). var sourceVersionStamp *commonpb.WorkerVersionStamp if initiator == enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY { - sourceVersionStamp = common.StampIfUsingVersioning(previousMutableState.GetWorkerVersionStamp()) + sourceVersionStamp = worker_versioning.StampIfUsingVersioning(previousMutableState.GetWorkerVersionStamp()) } req := &historyservice.StartWorkflowExecutionRequest{ diff -Nru temporal-1.21.5-1/src/service/history/workflow/task_generator.go temporal-1.22.5/src/service/history/workflow/task_generator.go --- temporal-1.21.5-1/src/service/history/workflow/task_generator.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/task_generator.go 2024-02-23 09:45:43.000000000 +0000 @@ -102,7 +102,7 @@ GenerateHistoryReplicationTasks( events []*historypb.HistoryEvent, ) error - GenerateMigrationTasks() (tasks.Task, int64, error) + GenerateMigrationTasks() ([]tasks.Task, int64, error) } TaskGeneratorImpl struct { @@ -208,8 +208,8 @@ // archiveTime is the time when the archival queue recognizes the ArchiveExecutionTask as ready-to-process archiveTime := closeEvent.GetEventTime().Add(delay) - // We can skip visibility archival in the close execution task if we are using the durable archival flow. - // The visibility archival will be handled by the archival queue. + // This flag is only untrue for old server versions which were using the archival workflow instead of the + // archival queue. closeExecutionTask.CanSkipVisibilityArchival = true task := &tasks.ArchiveExecutionTask{ // TaskID is set by the shard @@ -614,7 +614,7 @@ return nil } -func (r *TaskGeneratorImpl) GenerateMigrationTasks() (tasks.Task, int64, error) { +func (r *TaskGeneratorImpl) GenerateMigrationTasks() ([]tasks.Task, int64, error) { executionInfo := r.mutableState.GetExecutionInfo() versionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.GetVersionHistories()) if err != nil { @@ -625,21 +625,37 @@ return nil, 0, err } + workflowKey := r.mutableState.GetWorkflowKey() + if r.mutableState.GetExecutionState().State == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { - return &tasks.SyncWorkflowStateTask{ + return []tasks.Task{&tasks.SyncWorkflowStateTask{ // TaskID, VisibilityTimestamp is set by shard - WorkflowKey: r.mutableState.GetWorkflowKey(), + WorkflowKey: workflowKey, Version: lastItem.GetVersion(), - }, 1, nil - } else { - return &tasks.HistoryReplicationTask{ - // TaskID, VisibilityTimestamp is set by shard - WorkflowKey: r.mutableState.GetWorkflowKey(), - FirstEventID: executionInfo.LastFirstEventId, - NextEventID: lastItem.GetEventId() + 1, - Version: lastItem.GetVersion(), - }, executionInfo.StateTransitionCount, nil + }}, 1, nil } + + now := time.Now().UTC() + replicationTasks := make([]tasks.Task, 0, len(r.mutableState.GetPendingActivityInfos())+1) + replicationTasks = append(replicationTasks, &tasks.HistoryReplicationTask{ + // TaskID, VisibilityTimestamp is set by shard + WorkflowKey: workflowKey, + FirstEventID: executionInfo.LastFirstEventId, + NextEventID: lastItem.GetEventId() + 1, + Version: lastItem.GetVersion(), + }) + activityIDs := make(map[int64]struct{}, len(r.mutableState.GetPendingActivityInfos())) + for activityID := range r.mutableState.GetPendingActivityInfos() { + activityIDs[activityID] = struct{}{} + } + replicationTasks = append(replicationTasks, convertSyncActivityInfos( + now, + workflowKey, + r.mutableState.GetPendingActivityInfos(), + activityIDs, + )...) + return replicationTasks, executionInfo.StateTransitionCount, nil + } func (r *TaskGeneratorImpl) getTimerSequence() TimerSequence { @@ -670,9 +686,6 @@ // itself is also enabled. // For both history and visibility, we check that archival is enabled for both the cluster and the namespace. func (r *TaskGeneratorImpl) archivalQueueEnabled() bool { - if !r.config.DurableArchivalEnabled() { - return false - } namespaceEntry := r.mutableState.GetNamespaceEntry() return r.archivalMetadata.GetHistoryConfig().ClusterConfiguredForArchival() && namespaceEntry.HistoryArchivalState().State == enumspb.ARCHIVAL_STATE_ENABLED || diff -Nru temporal-1.21.5-1/src/service/history/workflow/task_generator_mock.go temporal-1.22.5/src/service/history/workflow/task_generator_mock.go --- temporal-1.21.5-1/src/service/history/workflow/task_generator_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/task_generator_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -174,10 +174,10 @@ } // GenerateMigrationTasks mocks base method. -func (m *MockTaskGenerator) GenerateMigrationTasks() (tasks.Task, int64, error) { +func (m *MockTaskGenerator) GenerateMigrationTasks() ([]tasks.Task, int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateMigrationTasks") - ret0, _ := ret[0].(tasks.Task) + ret0, _ := ret[0].([]tasks.Task) ret1, _ := ret[1].(int64) ret2, _ := ret[2].(error) return ret0, ret1, ret2 diff -Nru temporal-1.21.5-1/src/service/history/workflow/task_generator_test.go temporal-1.22.5/src/service/history/workflow/task_generator_test.go --- temporal-1.21.5-1/src/service/history/workflow/task_generator_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/task_generator_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -76,7 +76,6 @@ } type testParams struct { - DurableArchivalEnabled bool DeleteAfterClose bool CloseEventTime time.Time Retention time.Duration @@ -96,17 +95,8 @@ func TestTaskGeneratorImpl_GenerateWorkflowCloseTasks(t *testing.T) { for _, c := range []testConfig{ { - Name: "delete after retention", - ConfigFn: func(p *testParams) { - p.ExpectCloseExecutionVisibilityTask = true - p.ExpectDeleteHistoryEventTask = true - }, - }, - { Name: "use archival queue", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true - p.ExpectCloseExecutionVisibilityTask = true p.ExpectArchiveExecutionTask = true }, @@ -114,14 +104,12 @@ { Name: "delete after close ignores durable execution flag", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.DeleteAfterClose = true }, }, { Name: "delay is zero", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.CloseEventTime = time.Unix(0, 0) p.Retention = 24 * time.Hour p.ArchivalProcessorArchiveDelay = 0 @@ -134,7 +122,6 @@ { Name: "delay exceeds retention", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.CloseEventTime = time.Unix(0, 0) p.Retention = 24 * time.Hour p.ArchivalProcessorArchiveDelay = 48*time.Hour + time.Second @@ -147,7 +134,6 @@ { Name: "delay is less than retention", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.CloseEventTime = time.Unix(0, 0) p.Retention = 24 * time.Hour p.ArchivalProcessorArchiveDelay = 12 * time.Hour @@ -160,7 +146,6 @@ { Name: "history archival disabled", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.HistoryArchivalEnabledInCluster = false p.HistoryArchivalEnabledInNamespace = false @@ -171,7 +156,6 @@ { Name: "visibility archival disabled", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.VisibilityArchivalEnabledForCluster = false p.VisibilityArchivalEnabledInNamespace = false @@ -182,7 +166,6 @@ { Name: "archival disabled in cluster", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.HistoryArchivalEnabledInCluster = false p.VisibilityArchivalEnabledForCluster = false @@ -194,7 +177,6 @@ { Name: "archival disabled in namespace", ConfigFn: func(p *testParams) { - p.DurableArchivalEnabled = true p.HistoryArchivalEnabledInNamespace = false p.VisibilityArchivalEnabledInNamespace = false @@ -210,7 +192,6 @@ ctrl := gomock.NewController(t) mockLogger := log.NewMockLogger(ctrl) p := testParams{ - DurableArchivalEnabled: false, DeleteAfterClose: false, CloseEventTime: now, Retention: time.Hour * 24 * 7, @@ -270,9 +251,6 @@ mutableState.EXPECT().GetCurrentBranchToken().Return(nil, nil).AnyTimes() retentionTimerDelay := time.Second cfg := &configs.Config{ - DurableArchivalEnabled: func() bool { - return p.DurableArchivalEnabled - }, RetentionTimerJitterDuration: func() time.Duration { return retentionTimerDelay }, diff -Nru temporal-1.21.5-1/src/service/history/workflow/task_refresher.go temporal-1.22.5/src/service/history/workflow/task_refresher.go --- temporal-1.21.5-1/src/service/history/workflow/task_refresher.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/task_refresher.go 2024-02-23 09:45:43.000000000 +0000 @@ -148,7 +148,7 @@ return err } - return r.refreshTasksForWorkflowSearchAttr(taskGenerator) + return r.refreshTasksForWorkflowSearchAttr(mutableState, taskGenerator) } func (r *TaskRefresherImpl) refreshTasksForWorkflowStart( @@ -157,6 +157,11 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + startEvent, err := mutableState.GetStartEvent(ctx) if err != nil { return err @@ -187,20 +192,18 @@ ) error { executionState := mutableState.GetExecutionState() - - if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { - closeEvent, err := mutableState.GetCompletionEvent(ctx) - if err != nil { - return err - } - - return taskGenerator.GenerateWorkflowCloseTasks( - closeEvent, - false, - ) + if executionState.Status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + closeEvent, err := mutableState.GetCompletionEvent(ctx) + if err != nil { + return err } - return nil + return taskGenerator.GenerateWorkflowCloseTasks( + closeEvent, + false, + ) } func (r *TaskRefresherImpl) refreshTasksForRecordWorkflowStarted( @@ -209,20 +212,19 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + startEvent, err := mutableState.GetStartEvent(ctx) if err != nil { return err } - executionState := mutableState.GetExecutionState() - - if executionState.Status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { - return taskGenerator.GenerateRecordWorkflowStartedTasks( - startEvent, - ) - } - - return nil + return taskGenerator.GenerateRecordWorkflowStartedTasks( + startEvent, + ) } func (r *TaskRefresherImpl) refreshWorkflowTaskTasks( @@ -230,6 +232,11 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + if !mutableState.HasPendingWorkflowTask() { // no workflow task at all return nil @@ -265,6 +272,11 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + pendingActivityInfos := mutableState.GetPendingActivityInfos() Loop: @@ -308,8 +320,12 @@ mutableState MutableState, ) error { - pendingTimerInfos := mutableState.GetPendingTimerInfos() + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + pendingTimerInfos := mutableState.GetPendingTimerInfos() for _, timerInfo := range pendingTimerInfos { // clear all timer task mask for later timer task re-generation timerInfo.TaskStatus = TimerTaskStatusNone @@ -365,6 +381,11 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + pendingRequestCancelInfos := mutableState.GetPendingRequestCancelExternalInfos() for _, requestCancelInfo := range pendingRequestCancelInfos { @@ -389,6 +410,11 @@ taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } + pendingSignalInfos := mutableState.GetPendingSignalExternalInfos() for _, signalInfo := range pendingSignalInfos { @@ -409,8 +435,13 @@ } func (r *TaskRefresherImpl) refreshTasksForWorkflowSearchAttr( + mutableState MutableState, taskGenerator TaskGenerator, ) error { + executionState := mutableState.GetExecutionState() + if executionState.Status != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { + return nil + } return taskGenerator.GenerateUpsertVisibilityTask() } diff -Nru temporal-1.21.5-1/src/service/history/workflow/transaction_impl.go temporal-1.22.5/src/service/history/workflow/transaction_impl.go --- temporal-1.21.5-1/src/service/history/workflow/transaction_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/transaction_impl.go 2024-02-23 09:45:43.000000000 +0000 @@ -37,7 +37,6 @@ "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/service/history/events" "go.temporal.io/server/service/history/shard" ) @@ -602,11 +601,6 @@ namespaceID := executionInfo.NamespaceId workflowID := executionInfo.WorkflowId runID := executionState.RunId - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) - if err != nil { - return err - } - currentBranchToken := currentVersionHistory.BranchToken workflowState := executionState.State workflowStatus := executionState.Status lastFirstEventID := executionInfo.LastFirstEventId @@ -624,9 +618,9 @@ lastFirstEventTxnID, nextEventID, lastWorkflowTaskStartEventID, - currentBranchToken, workflowState, workflowStatus, + executionInfo.VersionHistories, )) return nil } @@ -646,11 +640,6 @@ namespaceID := executionInfo.NamespaceId workflowID := executionInfo.WorkflowId runID := executionState.RunId - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(executionInfo.VersionHistories) - if err != nil { - return err - } - currentBranchToken := currentVersionHistory.BranchToken workflowState := executionState.State workflowStatus := executionState.Status lastFirstEventID := executionInfo.LastFirstEventId @@ -668,9 +657,9 @@ lastFirstEventTxnID, nextEventID, lastWorkflowTaskStartEventID, - currentBranchToken, workflowState, workflowStatus, + executionInfo.VersionHistories, )) return nil } diff -Nru temporal-1.21.5-1/src/service/history/workflow/workflow_task_state_machine.go temporal-1.22.5/src/service/history/workflow/workflow_task_state_machine.go --- temporal-1.21.5-1/src/service/history/workflow/workflow_task_state_machine.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow/workflow_task_state_machine.go 2024-02-23 09:45:43.000000000 +0000 @@ -47,6 +47,7 @@ "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/tqname" ) type ( @@ -458,7 +459,9 @@ workflowTask.Type = enumsspb.WORKFLOW_TASK_TYPE_NORMAL workflowTaskScheduledEventCreated = true scheduledEvent := m.ms.hBuilder.AddWorkflowTaskScheduledEvent( - taskQueue, + // taskQueue may come directly from RecordWorkflowTaskStarted from matching, which will + // contain a specific partition name. We only want to record the base name here. + cleanTaskQueue(taskQueue), workflowTask.WorkflowTaskTimeout, workflowTask.Attempt, startTime, @@ -1035,3 +1038,16 @@ return nil } + +func cleanTaskQueue(tq *taskqueuepb.TaskQueue) *taskqueuepb.TaskQueue { + if tq == nil { + return tq + } + name, err := tqname.Parse(tq.Name) + if err != nil { + return tq + } + cleanTq := *tq + cleanTq.Name = name.BaseNameString() + return &cleanTq +} diff -Nru temporal-1.21.5-1/src/service/history/workflowRebuilder.go temporal-1.22.5/src/service/history/workflowRebuilder.go --- temporal-1.21.5-1/src/service/history/workflowRebuilder.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowRebuilder.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,173 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination workflowRebuilder_mock.go - -package history - -import ( - "context" - "math" - - "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence/versionhistory" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/ndc" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - workflowRebuilder interface { - // rebuild rebuilds a workflow, in case of any kind of corruption - rebuild( - ctx context.Context, - workflowKey definition.WorkflowKey, - ) error - } - - workflowRebuilderImpl struct { - shard shard.Context - workflowConsistencyChecker api.WorkflowConsistencyChecker - transaction workflow.Transaction - logger log.Logger - } -) - -var _ workflowRebuilder = (*workflowRebuilderImpl)(nil) - -func NewWorkflowRebuilder( - shard shard.Context, - workflowCache wcache.Cache, - logger log.Logger, -) *workflowRebuilderImpl { - return &workflowRebuilderImpl{ - shard: shard, - workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(shard, workflowCache), - transaction: workflow.NewTransaction(shard), - logger: logger, - } -} - -func (r *workflowRebuilderImpl) rebuild( - ctx context.Context, - workflowKey definition.WorkflowKey, -) (retError error) { - - wfContext, err := r.workflowConsistencyChecker.GetWorkflowContext( - ctx, - nil, - api.BypassMutableStateConsistencyPredicate, - workflowKey, - workflow.LockPriorityHigh, - ) - if err != nil { - return err - } - defer func() { - wfContext.GetReleaseFn()(retError) - wfContext.GetContext().Clear() - }() - - mutableState := wfContext.GetMutableState() - _, dbRecordVersion := mutableState.GetUpdateCondition() - - requestID := mutableState.GetExecutionState().CreateRequestId - versionHistories := mutableState.GetExecutionInfo().VersionHistories - currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(versionHistories) - if err != nil { - return err - } - branchToken := currentVersionHistory.BranchToken - stateTransitionCount := mutableState.GetExecutionInfo().StateTransitionCount - - rebuildMutableState, err := r.replayResetWorkflow( - ctx, - workflowKey, - branchToken, - stateTransitionCount, - dbRecordVersion, - requestID, - ) - if err != nil { - return err - } - return r.persistToDB(ctx, rebuildMutableState) -} - -func (r *workflowRebuilderImpl) replayResetWorkflow( - ctx context.Context, - workflowKey definition.WorkflowKey, - branchToken []byte, - stateTransitionCount int64, - dbRecordVersion int64, - requestID string, -) (workflow.MutableState, error) { - - rebuildMutableState, rebuildHistorySize, err := ndc.NewStateRebuilder(r.shard, r.logger).Rebuild( - ctx, - r.shard.GetTimeSource().Now(), - workflowKey, - branchToken, - math.MaxInt64-1, // NOTE: this is last event ID, layer below will +1 to calculate the next event ID - nil, // skip event ID & version check - workflowKey, - branchToken, - requestID, - ) - if err != nil { - return nil, err - } - - // note: this is an admin API, for operator to recover a corrupted mutable state, so state transition count - // should remain the same, the -= 1 exists here since later CloseTransactionAsSnapshot will += 1 to state transition count - rebuildMutableState.GetExecutionInfo().StateTransitionCount = stateTransitionCount - 1 - rebuildMutableState.AddHistorySize(rebuildHistorySize) - rebuildMutableState.SetUpdateCondition(rebuildMutableState.GetNextEventID(), dbRecordVersion) - return rebuildMutableState, nil -} - -func (r *workflowRebuilderImpl) persistToDB( - ctx context.Context, - mutableState workflow.MutableState, -) error { - resetWorkflowSnapshot, resetWorkflowEventsSeq, err := mutableState.CloseTransactionAsSnapshot( - workflow.TransactionPolicyPassive, - ) - if err != nil { - return err - } - if len(resetWorkflowEventsSeq) != 0 { - return serviceerror.NewInternal("workflowRebuilder encountered new events when rebuilding mutable state") - } - - return r.transaction.SetWorkflowExecution( - ctx, - resetWorkflowSnapshot, - ) -} diff -Nru temporal-1.21.5-1/src/service/history/workflowRebuilder_mock.go temporal-1.22.5/src/service/history/workflowRebuilder_mock.go --- temporal-1.21.5-1/src/service/history/workflowRebuilder_mock.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowRebuilder_mock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: workflowRebuilder.go - -// Package history is a generated GoMock package. -package history - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - definition "go.temporal.io/server/common/definition" -) - -// MockworkflowRebuilder is a mock of workflowRebuilder interface. -type MockworkflowRebuilder struct { - ctrl *gomock.Controller - recorder *MockworkflowRebuilderMockRecorder -} - -// MockworkflowRebuilderMockRecorder is the mock recorder for MockworkflowRebuilder. -type MockworkflowRebuilderMockRecorder struct { - mock *MockworkflowRebuilder -} - -// NewMockworkflowRebuilder creates a new mock instance. -func NewMockworkflowRebuilder(ctrl *gomock.Controller) *MockworkflowRebuilder { - mock := &MockworkflowRebuilder{ctrl: ctrl} - mock.recorder = &MockworkflowRebuilderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockworkflowRebuilder) EXPECT() *MockworkflowRebuilderMockRecorder { - return m.recorder -} - -// rebuild mocks base method. -func (m *MockworkflowRebuilder) rebuild(ctx context.Context, workflowKey definition.WorkflowKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "rebuild", ctx, workflowKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// rebuild indicates an expected call of rebuild. -func (mr *MockworkflowRebuilderMockRecorder) rebuild(ctx, workflowKey interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "rebuild", reflect.TypeOf((*MockworkflowRebuilder)(nil).rebuild), ctx, workflowKey) -} diff -Nru temporal-1.21.5-1/src/service/history/workflowTaskHandler.go temporal-1.22.5/src/service/history/workflowTaskHandler.go --- temporal-1.21.5-1/src/service/history/workflowTaskHandler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowTaskHandler.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1410 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - "time" - - "github.com/pborman/uuid" - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - failurepb "go.temporal.io/api/failure/v1" - protocolpb "go.temporal.io/api/protocol/v1" - "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflowservice/v1" - - "go.temporal.io/server/internal/effect" - "go.temporal.io/server/internal/protocol" - "go.temporal.io/server/service/history/workflow/update" - - "go.temporal.io/server/api/historyservice/v1" - tokenspb "go.temporal.io/server/api/token/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/collection" - "go.temporal.io/server/common/enums" - "go.temporal.io/server/common/failure" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/workflow" -) - -type ( - commandAttrValidationFn func() (enumspb.WorkflowTaskFailedCause, error) - - workflowTaskHandlerImpl struct { - identity string - workflowTaskCompletedID int64 - - // internal state - hasBufferedEvents bool - workflowTaskFailedCause *workflowTaskFailedCause - activityNotStartedCancelled bool - newMutableState workflow.MutableState - stopProcessing bool // should stop processing any more commands - mutableState workflow.MutableState - effects effect.Controller - initiatedChildExecutionsInBatch map[string]struct{} // Set of initiated child executions in the workflow task - updateRegistry update.Registry - - // validation - attrValidator *commandAttrValidator - sizeLimitChecker *workflowSizeChecker - searchAttributesMapperProvider searchattribute.MapperProvider - - logger log.Logger - namespaceRegistry namespace.Registry - metricsHandler metrics.Handler - config *configs.Config - shard shard.Context - tokenSerializer common.TaskTokenSerializer - } - - workflowTaskFailedCause struct { - failedCause enumspb.WorkflowTaskFailedCause - causeErr error - workflowFailure *failurepb.Failure - } - - workflowTaskResponseMutation func( - resp *historyservice.RespondWorkflowTaskCompletedResponse, - ) error - - commandPostAction func( - ctx context.Context, - ) (workflowTaskResponseMutation, error) - - handleCommandResponse struct { - workflowTaskResponseMutation workflowTaskResponseMutation - commandPostAction commandPostAction - } -) - -func newWorkflowTaskHandler( - identity string, - workflowTaskCompletedID int64, - mutableState workflow.MutableState, - updateRegistry update.Registry, - effects effect.Controller, - attrValidator *commandAttrValidator, - sizeLimitChecker *workflowSizeChecker, - logger log.Logger, - namespaceRegistry namespace.Registry, - metricsHandler metrics.Handler, - config *configs.Config, - shard shard.Context, - searchAttributesMapperProvider searchattribute.MapperProvider, - hasBufferedEvents bool, -) *workflowTaskHandlerImpl { - - return &workflowTaskHandlerImpl{ - identity: identity, - workflowTaskCompletedID: workflowTaskCompletedID, - - // internal state - hasBufferedEvents: hasBufferedEvents, - workflowTaskFailedCause: nil, - activityNotStartedCancelled: false, - newMutableState: nil, - stopProcessing: false, - mutableState: mutableState, - effects: effects, - initiatedChildExecutionsInBatch: make(map[string]struct{}), - updateRegistry: updateRegistry, - - // validation - attrValidator: attrValidator, - sizeLimitChecker: sizeLimitChecker, - searchAttributesMapperProvider: searchAttributesMapperProvider, - - logger: logger, - namespaceRegistry: namespaceRegistry, - metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)), - config: config, - shard: shard, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - } -} - -func (handler *workflowTaskHandlerImpl) handleCommands( - ctx context.Context, - commands []*commandpb.Command, - msgs *collection.IndexedTakeList[string, *protocolpb.Message], -) ([]workflowTaskResponseMutation, error) { - if err := handler.attrValidator.validateCommandSequence( - commands, - ); err != nil { - return nil, err - } - - var mutations []workflowTaskResponseMutation - var postActions []commandPostAction - for _, command := range commands { - response, err := handler.handleCommand(ctx, command, msgs) - if err != nil || handler.stopProcessing { - return nil, err - } - if response != nil { - if response.workflowTaskResponseMutation != nil { - mutations = append(mutations, response.workflowTaskResponseMutation) - } - if response.commandPostAction != nil { - postActions = append(postActions, response.commandPostAction) - } - } - } - - if handler.mutableState.IsWorkflowExecutionRunning() { - for _, msg := range msgs.TakeRemaining() { - err := handler.handleMessage(ctx, msg) - if err != nil || handler.stopProcessing { - return nil, err - } - } - } - - for _, postAction := range postActions { - mutation, err := postAction(ctx) - if err != nil || handler.stopProcessing { - return nil, err - } - if mutation != nil { - mutations = append(mutations, mutation) - } - } - - return mutations, nil -} - -//revive:disable:cyclomatic grandfathered -func (handler *workflowTaskHandlerImpl) handleCommand( - ctx context.Context, - command *commandpb.Command, - msgs *collection.IndexedTakeList[string, *protocolpb.Message], -) (*handleCommandResponse, error) { - switch command.GetCommandType() { - case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK: - return handler.handleCommandScheduleActivity(ctx, command.GetScheduleActivityTaskCommandAttributes()) - - case enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION: - return nil, handler.handleCommandCompleteWorkflow(ctx, command.GetCompleteWorkflowExecutionCommandAttributes(), msgs) - - case enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION: - return nil, handler.handleCommandFailWorkflow(ctx, command.GetFailWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION: - return nil, handler.handleCommandCancelWorkflow(ctx, command.GetCancelWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_START_TIMER: - return nil, handler.handleCommandStartTimer(ctx, command.GetStartTimerCommandAttributes()) - - case enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK: - return nil, handler.handleCommandRequestCancelActivity(ctx, command.GetRequestCancelActivityTaskCommandAttributes()) - - case enumspb.COMMAND_TYPE_CANCEL_TIMER: - return nil, handler.handleCommandCancelTimer(ctx, command.GetCancelTimerCommandAttributes()) - - case enumspb.COMMAND_TYPE_RECORD_MARKER: - return nil, handler.handleCommandRecordMarker(ctx, command.GetRecordMarkerCommandAttributes()) - - case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION: - return nil, handler.handleCommandRequestCancelExternalWorkflow(ctx, command.GetRequestCancelExternalWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION: - return nil, handler.handleCommandSignalExternalWorkflow(ctx, command.GetSignalExternalWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION: - return nil, handler.handleCommandContinueAsNewWorkflow(ctx, command.GetContinueAsNewWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION: - return nil, handler.handleCommandStartChildWorkflow(ctx, command.GetStartChildWorkflowExecutionCommandAttributes()) - - case enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES: - return nil, handler.handleCommandUpsertWorkflowSearchAttributes(ctx, command.GetUpsertWorkflowSearchAttributesCommandAttributes()) - - case enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES: - return nil, handler.handleCommandModifyWorkflowProperties(ctx, command.GetModifyWorkflowPropertiesCommandAttributes()) - - case enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE: - return nil, handler.handleCommandProtocolMessage(ctx, command.GetProtocolMessageCommandAttributes(), msgs) - - default: - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf("Unknown command type: %v", command.GetCommandType())) - } -} - -func (handler *workflowTaskHandlerImpl) handleMessage( - ctx context.Context, - message *protocolpb.Message, -) error { - protocolType, msgType, err := protocol.Identify(message) - if err != nil { - return serviceerror.NewInvalidArgument(err.Error()) - } - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - // TODO (alex-update): Should use MessageTypeTag here but then it needs to be another metric name too. - metrics.CommandTypeTag(msgType.String()), - message.Body.Size(), - fmt.Sprintf("Message type %v exceeds size limit.", msgType), - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, err) - } - - switch protocolType { - case update.ProtocolV1: - upd, ok := handler.updateRegistry.Find(ctx, message.ProtocolInstanceId) - if !ok { - return handler.failWorkflowTask( - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - serviceerror.NewNotFound(fmt.Sprintf("update %q not found", message.ProtocolInstanceId))) - } - if err := upd.OnMessage(ctx, message, workflow.WithEffects(handler.effects, handler.mutableState)); err != nil { - return handler.failWorkflowTaskOnInvalidArgument( - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, err) - } - default: - return handler.failWorkflowTask( - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - serviceerror.NewInvalidArgument(fmt.Sprintf("unsupported protocol type %q", protocolType))) - } - - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandProtocolMessage( - ctx context.Context, - attr *commandpb.ProtocolMessageCommandAttributes, - msgs *collection.IndexedTakeList[string, *protocolpb.Message], -) error { - handler.metricsHandler.Counter(metrics.CommandTypeProtocolMessage.GetMetricName()).Record(1) - - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateProtocolMessageAttributes( - namespaceID, - attr, - timestamp.DurationValue(executionInfo.WorkflowRunTimeout), - ) - }, - ); err != nil || handler.stopProcessing { - return err - } - - if msg, ok := msgs.Take(attr.MessageId); ok { - return handler.handleMessage(ctx, msg) - } - return handler.failWorkflowTask( - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - serviceerror.NewInvalidArgument(fmt.Sprintf("ProtocolMessageCommand referenced absent message ID %q", attr.MessageId)), - ) -} - -func (handler *workflowTaskHandlerImpl) handleCommandScheduleActivity( - _ context.Context, - attr *commandpb.ScheduleActivityTaskCommandAttributes, -) (*handleCommandResponse, error) { - - handler.metricsHandler.Counter(metrics.CommandTypeScheduleActivityCounter.GetMetricName()).Record(1) - - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateActivityScheduleAttributes( - namespaceID, - attr, - timestamp.DurationValue(executionInfo.WorkflowRunTimeout), - ) - }, - ); err != nil || handler.stopProcessing { - return nil, err - } - - // TODO: relax this restriction after matching can support this - if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { - err := serviceerror.NewInvalidArgument("Activity with UseCompatibleVersion cannot run on different task queue.") - return nil, handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK.String()), - attr.GetInput().Size(), - "ScheduleActivityTaskCommandAttributes.Input exceeds size limit.", - ); err != nil { - return nil, handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) - } - if err := handler.sizeLimitChecker.checkIfNumPendingActivitiesExceedsLimit(); err != nil { - return nil, handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_ACTIVITIES_LIMIT_EXCEEDED, err) - } - - enums.SetDefaultTaskQueueKind(&attr.GetTaskQueue().Kind) - - eagerStartActivity := false - namespace := handler.mutableState.GetNamespaceEntry().Name().String() - if attr.RequestEagerExecution && handler.config.EnableActivityEagerExecution(namespace) { - eagerStartActivity = true - } - - _, _, err := handler.mutableState.AddActivityTaskScheduledEvent( - handler.workflowTaskCompletedID, - attr, - eagerStartActivity, - ) - if err != nil { - return nil, handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID, err) - } - - if !eagerStartActivity { - return &handleCommandResponse{}, nil - } - - return &handleCommandResponse{ - commandPostAction: func(ctx context.Context) (workflowTaskResponseMutation, error) { - return handler.handlePostCommandEagerExecuteActivity(ctx, attr) - }, - }, nil -} - -func (handler *workflowTaskHandlerImpl) handlePostCommandEagerExecuteActivity( - _ context.Context, - attr *commandpb.ScheduleActivityTaskCommandAttributes, -) (workflowTaskResponseMutation, error) { - if !handler.mutableState.IsWorkflowExecutionRunning() { - // workflow closed in the same workflow task - // this function is executed as a callback after all workflow commands - // are handled, so need to check for workflow completion case. - return nil, nil - } - - ai, ok := handler.mutableState.GetActivityByActivityID(attr.ActivityId) - if !ok { - // activity cancelled in the same worflow task - return nil, nil - } - - if _, err := handler.mutableState.AddActivityTaskStartedEvent( - ai, - ai.GetScheduledEventId(), - uuid.New(), - handler.identity, - ); err != nil { - return nil, err - } - - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - runID := handler.mutableState.GetExecutionState().RunId - - shardClock, err := handler.shard.NewVectorClock() - if err != nil { - return nil, err - } - - taskToken := &tokenspb.Task{ - NamespaceId: namespaceID.String(), - WorkflowId: executionInfo.WorkflowId, - RunId: runID, - ScheduledEventId: ai.GetScheduledEventId(), - Attempt: ai.Attempt, - ActivityId: attr.ActivityId, - ActivityType: attr.ActivityType.GetName(), - Clock: shardClock, - } - serializedToken, err := handler.tokenSerializer.Serialize(taskToken) - if err != nil { - return nil, err - } - - activityTask := &workflowservice.PollActivityTaskQueueResponse{ - ActivityId: attr.ActivityId, - ActivityType: attr.ActivityType, - Header: attr.Header, - Input: attr.Input, - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: executionInfo.WorkflowId, - RunId: runID, - }, - CurrentAttemptScheduledTime: ai.ScheduledTime, - ScheduledTime: ai.ScheduledTime, - ScheduleToCloseTimeout: attr.ScheduleToCloseTimeout, - StartedTime: ai.StartedTime, - StartToCloseTimeout: attr.StartToCloseTimeout, - HeartbeatTimeout: attr.HeartbeatTimeout, - TaskToken: serializedToken, - Attempt: ai.Attempt, - HeartbeatDetails: ai.LastHeartbeatDetails, - WorkflowType: handler.mutableState.GetWorkflowType(), - WorkflowNamespace: handler.mutableState.GetNamespaceEntry().Name().String(), - } - handler.metricsHandler.Counter( - metrics.ActivityEagerExecutionCounter.GetMetricName(), - ).Record( - 1, - metrics.NamespaceTag(string(handler.mutableState.GetNamespaceEntry().Name())), - metrics.TaskQueueTag(ai.TaskQueue), - ) - - return func(resp *historyservice.RespondWorkflowTaskCompletedResponse) error { - resp.ActivityTasks = append(resp.ActivityTasks, activityTask) - return nil - }, nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelActivity( - _ context.Context, - attr *commandpb.RequestCancelActivityTaskCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeCancelActivityCounter.GetMetricName()).Record(1) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateActivityCancelAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - scheduledEventID := attr.GetScheduledEventId() - actCancelReqEvent, ai, err := handler.mutableState.AddActivityTaskCancelRequestedEvent( - handler.workflowTaskCompletedID, - scheduledEventID, - handler.identity, - ) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES, err) - } - if ai != nil { - // If ai is nil, the activity has already been canceled/completed/timedout. The cancel request - // will be recorded in the history, but no further action will be taken. - - if ai.StartedEventId == common.EmptyEventID { - // We haven't started the activity yet, we can cancel the activity right away and - // schedule a workflow task to ensure the workflow makes progress. - _, err = handler.mutableState.AddActivityTaskCanceledEvent( - ai.ScheduledEventId, - ai.StartedEventId, - actCancelReqEvent.GetEventId(), - payloads.EncodeString(activityCancellationMsgActivityNotStarted), - handler.identity, - ) - if err != nil { - return err - } - handler.activityNotStartedCancelled = true - } - } - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandStartTimer( - _ context.Context, - attr *commandpb.StartTimerCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeStartTimerCounter.GetMetricName()).Record(1) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateTimerScheduleAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - _, _, err := handler.mutableState.AddTimerStartedEvent(handler.workflowTaskCompletedID, attr) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID, err) - } - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandCompleteWorkflow( - ctx context.Context, - attr *commandpb.CompleteWorkflowExecutionCommandAttributes, - msgs *collection.IndexedTakeList[string, *protocolpb.Message], -) error { - - for _, msg := range msgs.TakeRemaining() { - err := handler.handleMessage(ctx, msg) - if err != nil || handler.stopProcessing { - return err - } - } - - handler.metricsHandler.Counter(metrics.CommandTypeCompleteWorkflowCounter.GetMetricName()).Record(1) - - if handler.hasBufferedEvents { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) - } - - handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateCompleteWorkflowExecutionAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION.String()), - attr.GetResult().Size(), - "CompleteWorkflowExecutionCommandAttributes.Result exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) - } - - // If the workflow task has more than one completion event than just pick the first one - if !handler.mutableState.IsWorkflowExecutionRunning() { - handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) - handler.logger.Warn( - "Multiple completion commands", - tag.WorkflowCommandType(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION), - tag.ErrorTypeMultipleCompletionCommands, - ) - return nil - } - - cronBackoff := handler.mutableState.GetCronBackoffDuration() - var newExecutionRunID string - if cronBackoff != backoff.NoBackoff { - newExecutionRunID = uuid.New() - } - - // Always add workflow completed event to this one - _, err := handler.mutableState.AddCompletedWorkflowEvent(handler.workflowTaskCompletedID, attr, newExecutionRunID) - if err != nil { - return err - } - - // Check if this workflow has a cron schedule - if cronBackoff != backoff.NoBackoff { - return handler.handleCron(ctx, cronBackoff, attr.GetResult(), nil, newExecutionRunID) - } - - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandFailWorkflow( - ctx context.Context, - attr *commandpb.FailWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeFailWorkflowCounter.GetMetricName()).Record(1) - - if handler.hasBufferedEvents { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) - } - - handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateFailWorkflowExecutionAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION.String()), - attr.GetFailure().Size(), - "FailWorkflowExecutionCommandAttributes.Failure exceeds size limit.", - ) - if err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES, err) - } - - // If the workflow task has more than one completion event than just pick the first one - if !handler.mutableState.IsWorkflowExecutionRunning() { - handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) - handler.logger.Warn( - "Multiple completion commands", - tag.WorkflowCommandType(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION), - tag.ErrorTypeMultipleCompletionCommands, - ) - return nil - } - - // First check retry policy to do a retry. - retryBackoff, retryState := handler.mutableState.GetRetryBackoffDuration(attr.GetFailure()) - cronBackoff := backoff.NoBackoff - if retryBackoff == backoff.NoBackoff { - // If no retry, check cron. - cronBackoff = handler.mutableState.GetCronBackoffDuration() - } - - var newExecutionRunID string - if retryBackoff != backoff.NoBackoff || cronBackoff != backoff.NoBackoff { - newExecutionRunID = uuid.New() - } - - // Always add workflow failed event - if _, err = handler.mutableState.AddFailWorkflowEvent( - handler.workflowTaskCompletedID, - retryState, - attr, - newExecutionRunID, - ); err != nil { - return err - } - - // Handle retry or cron - if retryBackoff != backoff.NoBackoff { - return handler.handleRetry(ctx, retryBackoff, retryState, attr.GetFailure(), newExecutionRunID) - } else if cronBackoff != backoff.NoBackoff { - return handler.handleCron(ctx, cronBackoff, nil, attr.GetFailure(), newExecutionRunID) - } - - // No retry or cron - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandCancelTimer( - _ context.Context, - attr *commandpb.CancelTimerCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeCancelTimerCounter.GetMetricName()).Record(1) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateTimerCancelAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - _, err := handler.mutableState.AddTimerCanceledEvent( - handler.workflowTaskCompletedID, - attr, - handler.identity) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES, err) - } - - // In case the timer was cancelled and its TimerFired event was deleted from buffered events, attempt - // to unset hasBufferedEvents to allow the workflow to complete. - handler.hasBufferedEvents = handler.hasBufferedEvents && handler.mutableState.HasBufferedEvents() - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandCancelWorkflow( - ctx context.Context, - attr *commandpb.CancelWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeCancelWorkflowCounter.GetMetricName()).Record(1) - - if handler.hasBufferedEvents { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) - } - - handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateCancelWorkflowExecutionAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - // If the workflow task has more than one completion event than just pick the first one - if !handler.mutableState.IsWorkflowExecutionRunning() { - handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) - handler.logger.Warn( - "Multiple completion commands", - tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION), - tag.ErrorTypeMultipleCompletionCommands, - ) - return nil - } - - _, err := handler.mutableState.AddWorkflowExecutionCanceledEvent(handler.workflowTaskCompletedID, attr) - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelExternalWorkflow( - _ context.Context, - attr *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeCancelExternalWorkflowCounter.GetMetricName()).Record(1) - - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - targetNamespaceID := namespaceID - if attr.GetNamespace() != "" { - targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) - if err != nil { - return err - } - targetNamespaceID = targetNamespaceEntry.ID() - } - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateCancelExternalWorkflowExecutionAttributes( - namespaceID, - targetNamespaceID, - handler.initiatedChildExecutionsInBatch, - attr, - ) - }, - ); err != nil || handler.stopProcessing { - return err - } - if err := handler.sizeLimitChecker.checkIfNumPendingCancelRequestsExceedsLimit(); err != nil { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_REQUEST_CANCEL_LIMIT_EXCEEDED, err) - } - - cancelRequestID := uuid.New() - _, _, err := handler.mutableState.AddRequestCancelExternalWorkflowExecutionInitiatedEvent( - handler.workflowTaskCompletedID, cancelRequestID, attr, targetNamespaceID, - ) - - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandRecordMarker( - _ context.Context, - attr *commandpb.RecordMarkerCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeRecordMarkerCounter.GetMetricName()).Record(1) - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateRecordMarkerAttributes(attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_RECORD_MARKER.String()), - common.GetPayloadsMapSize(attr.GetDetails()), - "RecordMarkerCommandAttributes.Details exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES, err) - } - - _, err := handler.mutableState.AddRecordMarkerEvent(handler.workflowTaskCompletedID, attr) - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandContinueAsNewWorkflow( - ctx context.Context, - attr *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeContinueAsNewCounter.GetMetricName()).Record(1) - - if handler.hasBufferedEvents { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) - } - - handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) - - namespaceName := handler.mutableState.GetNamespaceEntry().Name() - - unaliasedSas, err := searchattribute.UnaliasFields( - handler.searchAttributesMapperProvider, - attr.GetSearchAttributes(), - namespaceName.String(), - ) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) - } - if unaliasedSas != nil { - // Create a shallow copy of the `attr` to avoid modification of original `attr`, - // which can be needed again in case of retry. - newAttr := *attr - newAttr.SearchAttributes = unaliasedSas - attr = &newAttr - } - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateContinueAsNewWorkflowExecutionAttributes( - namespaceName, - attr, - handler.mutableState.GetExecutionInfo(), - ) - }, - ); err != nil || handler.stopProcessing { - return err - } - - // TODO: relax this restriction after matching can support this - if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { - err := serviceerror.NewInvalidArgument("ContinueAsNew with UseCompatibleVersion cannot run on different task queue.") - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), - attr.GetInput().Size(), - "ContinueAsNewWorkflowExecutionCommandAttributes. Input exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) - } - - if err := handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( - attr.GetMemo(), - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), - "ContinueAsNewWorkflowExecutionCommandAttributes. Memo exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) - } - - // search attribute validation must be done after unaliasing keys - if err := handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( - attr.GetSearchAttributes(), - namespaceName, - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) - } - - // If the workflow task has more than one completion event than just pick the first one - if !handler.mutableState.IsWorkflowExecutionRunning() { - handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) - handler.logger.Warn( - "Multiple completion commands", - tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION), - tag.ErrorTypeMultipleCompletionCommands, - ) - return nil - } - - // Extract parentNamespace, so it can be passed down to next run of workflow execution - var parentNamespace namespace.Name - if handler.mutableState.HasParentExecution() { - parentNamespaceID := namespace.ID(handler.mutableState.GetExecutionInfo().ParentNamespaceId) - parentNamespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(parentNamespaceID) - if err == nil { - parentNamespace = parentNamespaceEntry.Name() - } - } - - _, newMutableState, err := handler.mutableState.AddContinueAsNewEvent( - ctx, - handler.workflowTaskCompletedID, - handler.workflowTaskCompletedID, - parentNamespace, - attr, - ) - if err != nil { - return err - } - - handler.newMutableState = newMutableState - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCommandStartChildWorkflow( - _ context.Context, - attr *commandpb.StartChildWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeChildWorkflowCounter.GetMetricName()).Record(1) - - parentNamespaceEntry := handler.mutableState.GetNamespaceEntry() - parentNamespaceID := parentNamespaceEntry.ID() - parentNamespace := parentNamespaceEntry.Name() - targetNamespaceID := parentNamespaceID - targetNamespace := parentNamespace - if attr.GetNamespace() != "" { - targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) - if err != nil { - return err - } - targetNamespace = targetNamespaceEntry.Name() - targetNamespaceID = targetNamespaceEntry.ID() - } else { - attr.Namespace = parentNamespace.String() - } - - unaliasedSas, err := searchattribute.UnaliasFields( - handler.searchAttributesMapperProvider, - attr.GetSearchAttributes(), - targetNamespace.String(), - ) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) - } - if unaliasedSas != nil { - // Create a shallow copy of the `attr` to avoid modification of original `attr`, - // which can be needed again in case of retry. - newAttr := *attr - newAttr.SearchAttributes = unaliasedSas - attr = &newAttr - } - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateStartChildExecutionAttributes( - parentNamespaceID, - targetNamespaceID, - targetNamespace, - attr, - handler.mutableState.GetExecutionInfo(), - handler.config.DefaultWorkflowTaskTimeout, - ) - }, - ); err != nil || handler.stopProcessing { - return err - } - - // TODO: relax this restriction after matching can support this - if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { - err := serviceerror.NewInvalidArgument("StartChildWorkflowExecution with UseCompatibleVersion cannot run on different task queue.") - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), - attr.GetInput().Size(), - "StartChildWorkflowExecutionCommandAttributes. Input exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) - } - - if err := handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( - attr.GetMemo(), - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), - "StartChildWorkflowExecutionCommandAttributes.Memo exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) - } - - // search attribute validation must be done after unaliasing keys - if err := handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( - attr.GetSearchAttributes(), - targetNamespace, - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) - } - - // child workflow limit - if err := handler.sizeLimitChecker.checkIfNumChildWorkflowsExceedsLimit(); err != nil { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_CHILD_WORKFLOWS_LIMIT_EXCEEDED, err) - } - - enabled := handler.config.EnableParentClosePolicy(parentNamespace.String()) - if enabled { - enums.SetDefaultParentClosePolicy(&attr.ParentClosePolicy) - } else { - attr.ParentClosePolicy = enumspb.PARENT_CLOSE_POLICY_ABANDON - } - - enums.SetDefaultWorkflowIdReusePolicy(&attr.WorkflowIdReusePolicy) - - requestID := uuid.New() - _, _, err = handler.mutableState.AddStartChildWorkflowExecutionInitiatedEvent( - handler.workflowTaskCompletedID, requestID, attr, targetNamespaceID, - ) - if err == nil { - // Keep track of all child initiated commands in this workflow task to validate request cancel commands - handler.initiatedChildExecutionsInBatch[attr.GetWorkflowId()] = struct{}{} - } - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandSignalExternalWorkflow( - _ context.Context, - attr *commandpb.SignalExternalWorkflowExecutionCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeSignalExternalWorkflowCounter.GetMetricName()).Record(1) - - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - targetNamespaceID := namespaceID - if attr.GetNamespace() != "" { - targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) - if err != nil { - return err - } - targetNamespaceID = targetNamespaceEntry.ID() - } - - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateSignalExternalWorkflowExecutionAttributes( - namespaceID, - targetNamespaceID, - attr, - ) - }, - ); err != nil || handler.stopProcessing { - return err - } - if err := handler.sizeLimitChecker.checkIfNumPendingSignalsExceedsLimit(); err != nil { - return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_SIGNALS_LIMIT_EXCEEDED, err) - } - - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION.String()), - attr.GetInput().Size(), - "SignalExternalWorkflowExecutionCommandAttributes.Input exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, err) - } - - signalRequestID := uuid.New() // for deduplicate - _, _, err := handler.mutableState.AddSignalExternalWorkflowExecutionInitiatedEvent( - handler.workflowTaskCompletedID, signalRequestID, attr, targetNamespaceID, - ) - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandUpsertWorkflowSearchAttributes( - _ context.Context, - attr *commandpb.UpsertWorkflowSearchAttributesCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeUpsertWorkflowSearchAttributesCounter.GetMetricName()).Record(1) - - // get namespace name - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - namespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID)) - } - namespace := namespaceEntry.Name() - - unaliasedSas, err := searchattribute.UnaliasFields( - handler.searchAttributesMapperProvider, - attr.GetSearchAttributes(), - namespace.String(), - ) - if err != nil { - return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) - } - if unaliasedSas != nil { - // Create a shallow copy of the `attr` to avoid modification of original `attr`, - // which can be needed again in case of retry. - newAttr := *attr - newAttr.SearchAttributes = unaliasedSas - attr = &newAttr - } - - // valid search attributes for upsert - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateUpsertWorkflowSearchAttributes(namespace, attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - // blob size limit check - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()), - payloadsMapSize(attr.GetSearchAttributes().GetIndexedFields()), - "UpsertWorkflowSearchAttributesCommandAttributes exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) - } - - // new search attributes size limit check - // search attribute validation must be done after unaliasing keys - err = handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( - &commonpb.SearchAttributes{ - IndexedFields: payload.MergeMapOfPayload( - executionInfo.SearchAttributes, - attr.GetSearchAttributes().GetIndexedFields(), - ), - }, - namespace, - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()), - ) - if err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) - } - - _, err = handler.mutableState.AddUpsertWorkflowSearchAttributesEvent( - handler.workflowTaskCompletedID, attr, - ) - return err -} - -func (handler *workflowTaskHandlerImpl) handleCommandModifyWorkflowProperties( - _ context.Context, - attr *commandpb.ModifyWorkflowPropertiesCommandAttributes, -) error { - - handler.metricsHandler.Counter(metrics.CommandTypeModifyWorkflowPropertiesCounter.GetMetricName()).Record(1) - - // get namespace name - executionInfo := handler.mutableState.GetExecutionInfo() - namespaceID := namespace.ID(executionInfo.NamespaceId) - namespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return serviceerror.NewUnavailable(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID)) - } - namespace := namespaceEntry.Name() - - // valid properties - if err := handler.validateCommandAttr( - func() (enumspb.WorkflowTaskFailedCause, error) { - return handler.attrValidator.validateModifyWorkflowProperties(namespace, attr) - }, - ); err != nil || handler.stopProcessing { - return err - } - - // blob size limit check - if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES.String()), - payloadsMapSize(attr.GetUpsertedMemo().GetFields()), - "ModifyWorkflowPropertiesCommandAttributes exceeds size limit.", - ); err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, err) - } - - // new memo size limit check - err = handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( - &commonpb.Memo{ - Fields: payload.MergeMapOfPayload(executionInfo.Memo, attr.GetUpsertedMemo().GetFields()), - }, - metrics.CommandTypeTag(enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES.String()), - "ModifyWorkflowPropertiesCommandAttributes. Memo exceeds size limit.", - ) - if err != nil { - return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, err) - } - - _, err = handler.mutableState.AddWorkflowPropertiesModifiedEvent( - handler.workflowTaskCompletedID, attr, - ) - return err -} - -func payloadsMapSize(fields map[string]*commonpb.Payload) int { - result := 0 - - for k, v := range fields { - result += len(k) - result += len(v.GetData()) - } - return result -} - -func (handler *workflowTaskHandlerImpl) handleRetry( - ctx context.Context, - backoffInterval time.Duration, - retryState enumspb.RetryState, - failure *failurepb.Failure, - newRunID string, -) error { - startEvent, err := handler.mutableState.GetStartEvent(ctx) - if err != nil { - return err - } - startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() - - newMutableState := workflow.NewMutableState( - handler.shard, - handler.shard.GetEventsCache(), - handler.shard.GetLogger(), - handler.mutableState.GetNamespaceEntry(), - handler.shard.GetTimeSource().Now(), - ) - - err = workflow.SetupNewWorkflowForRetryOrCron( - ctx, - handler.mutableState, - newMutableState, - newRunID, - startAttr, - nil, - failure, - backoffInterval, - enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY, - ) - if err != nil { - return err - } - - err = newMutableState.SetHistoryTree( - ctx, - newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, - newMutableState.GetExecutionInfo().WorkflowRunTimeout, - newRunID, - ) - if err != nil { - return err - } - - handler.newMutableState = newMutableState - return nil -} - -func (handler *workflowTaskHandlerImpl) handleCron( - ctx context.Context, - backoffInterval time.Duration, - lastCompletionResult *commonpb.Payloads, - failure *failurepb.Failure, - newRunID string, -) error { - startEvent, err := handler.mutableState.GetStartEvent(ctx) - if err != nil { - return err - } - startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() - - if failure != nil { - lastCompletionResult = startAttr.LastCompletionResult - } - - newMutableState := workflow.NewMutableState( - handler.shard, - handler.shard.GetEventsCache(), - handler.shard.GetLogger(), - handler.mutableState.GetNamespaceEntry(), - handler.shard.GetTimeSource().Now(), - ) - - err = workflow.SetupNewWorkflowForRetryOrCron( - ctx, - handler.mutableState, - newMutableState, - newRunID, - startAttr, - lastCompletionResult, - failure, - backoffInterval, - enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE, - ) - if err != nil { - return err - } - - err = newMutableState.SetHistoryTree( - ctx, - newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, - newMutableState.GetExecutionInfo().WorkflowRunTimeout, - newRunID, - ) - if err != nil { - return err - } - - handler.newMutableState = newMutableState - return nil -} - -func (handler *workflowTaskHandlerImpl) validateCommandAttr( - validationFn commandAttrValidationFn, -) error { - - return handler.failWorkflowTaskOnInvalidArgument(validationFn()) -} - -func (handler *workflowTaskHandlerImpl) failWorkflowTaskOnInvalidArgument( - wtFailedCause enumspb.WorkflowTaskFailedCause, - err error, -) error { - - switch err.(type) { - case *serviceerror.InvalidArgument: - return handler.failWorkflowTask(wtFailedCause, err) - default: - return err - } -} - -func (handler *workflowTaskHandlerImpl) failWorkflowTask( - failedCause enumspb.WorkflowTaskFailedCause, - causeErr error, -) error { - - handler.workflowTaskFailedCause = newWorkflowTaskFailedCause( - failedCause, - causeErr, - nil) - handler.stopProcessing = true - // NOTE: failWorkflowTask always return nil. - // It is important to clear returned error if WT needs to be failed to properly add WTFailed event. - // Handler will rely on stopProcessing flag and workflowTaskFailedCause field. - return nil -} - -func (handler *workflowTaskHandlerImpl) failWorkflow( - failedCause enumspb.WorkflowTaskFailedCause, - causeErr error, -) error { - - handler.workflowTaskFailedCause = newWorkflowTaskFailedCause( - failedCause, - causeErr, - failure.NewServerFailure(causeErr.Error(), true)) - handler.stopProcessing = true - // NOTE: failWorkflow always return nil. - // It is important to clear returned error if WT needs to be failed to properly add WTFailed and FailWorkflow events. - // Handler will rely on stopProcessing flag and workflowTaskFailedCause field. - return nil -} - -func newWorkflowTaskFailedCause(failedCause enumspb.WorkflowTaskFailedCause, causeErr error, workflowFailure *failurepb.Failure) *workflowTaskFailedCause { - - return &workflowTaskFailedCause{ - failedCause: failedCause, - causeErr: causeErr, - workflowFailure: workflowFailure, - } -} - -func (c *workflowTaskFailedCause) Message() string { - - if c.causeErr == nil { - return c.failedCause.String() - } - - return fmt.Sprintf("%v: %v", c.failedCause, c.causeErr.Error()) -} diff -Nru temporal-1.21.5-1/src/service/history/workflowTaskHandlerCallbacks.go temporal-1.22.5/src/service/history/workflowTaskHandlerCallbacks.go --- temporal-1.21.5-1/src/service/history/workflowTaskHandlerCallbacks.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowTaskHandlerCallbacks.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1040 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "fmt" - - commandpb "go.temporal.io/api/command/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - protocolpb "go.temporal.io/api/protocol/v1" - querypb "go.temporal.io/api/query/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/collection" - "go.temporal.io/server/common/definition" - "go.temporal.io/server/common/failure" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/searchattribute" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/internal/effect" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/workflow" - "go.temporal.io/server/service/history/workflow/update" -) - -type ( - // workflow task business logic handler - workflowTaskHandlerCallbacks interface { - handleWorkflowTaskScheduled(context.Context, *historyservice.ScheduleWorkflowTaskRequest) error - handleWorkflowTaskStarted(context.Context, - *historyservice.RecordWorkflowTaskStartedRequest) (*historyservice.RecordWorkflowTaskStartedResponse, error) - handleWorkflowTaskFailed(context.Context, - *historyservice.RespondWorkflowTaskFailedRequest) error - handleWorkflowTaskCompleted(context.Context, - *historyservice.RespondWorkflowTaskCompletedRequest) (*historyservice.RespondWorkflowTaskCompletedResponse, error) - verifyFirstWorkflowTaskScheduled(context.Context, *historyservice.VerifyFirstWorkflowTaskScheduledRequest) error - // TODO also include the handle of workflow task timeout here - } - - workflowTaskHandlerCallbacksImpl struct { - currentClusterName string - config *configs.Config - shard shard.Context - workflowConsistencyChecker api.WorkflowConsistencyChecker - timeSource clock.TimeSource - namespaceRegistry namespace.Registry - tokenSerializer common.TaskTokenSerializer - metricsHandler metrics.Handler - logger log.Logger - throttledLogger log.Logger - commandAttrValidator *commandAttrValidator - searchAttributesMapperProvider searchattribute.MapperProvider - searchAttributesValidator *searchattribute.Validator - } -) - -func newWorkflowTaskHandlerCallback(historyEngine *historyEngineImpl) *workflowTaskHandlerCallbacksImpl { - return &workflowTaskHandlerCallbacksImpl{ - currentClusterName: historyEngine.currentClusterName, - config: historyEngine.config, - shard: historyEngine.shard, - workflowConsistencyChecker: historyEngine.workflowConsistencyChecker, - timeSource: historyEngine.shard.GetTimeSource(), - namespaceRegistry: historyEngine.shard.GetNamespaceRegistry(), - tokenSerializer: historyEngine.tokenSerializer, - metricsHandler: historyEngine.metricsHandler, - logger: historyEngine.logger, - throttledLogger: historyEngine.throttledLogger, - commandAttrValidator: newCommandAttrValidator( - historyEngine.shard.GetNamespaceRegistry(), - historyEngine.config, - historyEngine.searchAttributesValidator, - ), - searchAttributesMapperProvider: historyEngine.shard.GetSearchAttributesMapperProvider(), - searchAttributesValidator: historyEngine.searchAttributesValidator, - } -} - -func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskScheduled( - ctx context.Context, - req *historyservice.ScheduleWorkflowTaskRequest, -) error { - - _, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) - if err != nil { - return err - } - - return api.GetAndUpdateWorkflowWithNew( - ctx, - req.ChildClock, - api.BypassMutableStateConsistencyPredicate, - definition.NewWorkflowKey( - req.NamespaceId, - req.WorkflowExecution.WorkflowId, - req.WorkflowExecution.RunId, - ), - func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { - mutableState := workflowContext.GetMutableState() - if !mutableState.IsWorkflowExecutionRunning() { - return nil, consts.ErrWorkflowCompleted - } - - if req.IsFirstWorkflowTask && mutableState.HadOrHasWorkflowTask() { - return &api.UpdateWorkflowAction{ - Noop: true, - }, nil - } - - startEvent, err := mutableState.GetStartEvent(ctx) - if err != nil { - return nil, err - } - if _, err := mutableState.AddFirstWorkflowTaskScheduled(req.ParentClock, startEvent, false); err != nil { - return nil, err - } - - return &api.UpdateWorkflowAction{}, nil - }, - nil, - handler.shard, - handler.workflowConsistencyChecker, - ) -} - -func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskStarted( - ctx context.Context, - req *historyservice.RecordWorkflowTaskStartedRequest, -) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - namespaceEntry, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) - if err != nil { - return nil, err - } - - scheduledEventID := req.GetScheduledEventId() - requestID := req.GetRequestId() - - var resp *historyservice.RecordWorkflowTaskStartedResponse - err = api.GetAndUpdateWorkflowWithNew( - ctx, - req.Clock, - api.BypassMutableStateConsistencyPredicate, - definition.NewWorkflowKey( - req.NamespaceId, - req.WorkflowExecution.WorkflowId, - req.WorkflowExecution.RunId, - ), - func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { - mutableState := workflowContext.GetMutableState() - if !mutableState.IsWorkflowExecutionRunning() { - return nil, consts.ErrWorkflowCompleted - } - - workflowTask := mutableState.GetWorkflowTaskByID(scheduledEventID) - metricsScope := handler.metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryRecordWorkflowTaskStartedScope)) - - // First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in - // some extreme cassandra failure cases. - if workflowTask == nil && scheduledEventID >= mutableState.GetNextEventID() { - metricsScope.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record(1) - // Reload workflow execution history - // ErrStaleState will trigger updateWorkflow function to reload the mutable state - return nil, consts.ErrStaleState - } - - // Check execution state to make sure task is in the list of outstanding tasks and it is not yet started. If - // task is not outstanding than it is most probably a duplicate and complete the task. - if workflowTask == nil { - // Looks like WorkflowTask already completed as a result of another call. - // It is OK to drop the task at this point. - return nil, serviceerror.NewNotFound("Workflow task not found.") - } - - updateAction := &api.UpdateWorkflowAction{} - - if workflowTask.StartedEventID != common.EmptyEventID { - // If workflow task is started as part of the current request scope then return a positive response - if workflowTask.RequestID == requestID { - resp, err = handler.createRecordWorkflowTaskStartedResponse(mutableState, workflowContext.GetUpdateRegistry(ctx), workflowTask, req.PollRequest.GetIdentity()) - if err != nil { - return nil, err - } - updateAction.Noop = true - return updateAction, nil - } - - // Looks like WorkflowTask already started as a result of another call. - // It is OK to drop the task at this point. - return nil, serviceerrors.NewTaskAlreadyStarted("Workflow") - } - - // Assuming a workflow is running on a sticky task queue by a workerA. - // After workerA is dead for more than 10s, matching will return StickyWorkerUnavailable error when history - // tries to push a new workflow task. When history sees that error, it will fall back to push the task to - // its original normal task queue without clear its stickiness to avoid an extra persistence write. - // We will clear the stickiness here when that task is delivered to another worker polling from normal queue. - // The stickiness info is used by frontend to decide if it should send down partial history or full history. - // Sending down partial history will cost the worker an extra fetch to server for the full history. - currentTaskQueue := mutableState.CurrentTaskQueue() - if currentTaskQueue.Kind == enumspb.TASK_QUEUE_KIND_STICKY && - currentTaskQueue.GetName() != req.PollRequest.TaskQueue.GetName() { - // req.PollRequest.TaskQueue.GetName() may include partition, but we only check when sticky is enabled, - // and sticky queue never has partition, so it does not matter. - mutableState.ClearStickyTaskQueue() - } - - _, workflowTask, err = mutableState.AddWorkflowTaskStartedEvent( - scheduledEventID, - requestID, - req.PollRequest.TaskQueue, - req.PollRequest.Identity, - ) - if err != nil { - // Unable to add WorkflowTaskStarted event to history - return nil, err - } - - if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { - updateAction.Noop = true - } - - workflowScheduleToStartLatency := workflowTask.StartedTime.Sub(*workflowTask.ScheduledTime) - namespaceName := namespaceEntry.Name() - taskQueue := workflowTask.TaskQueue - metrics.GetPerTaskQueueScope( - metricsScope, - namespaceName.String(), - taskQueue.GetName(), - taskQueue.GetKind(), - ).Timer(metrics.TaskScheduleToStartLatency.GetMetricName()).Record( - workflowScheduleToStartLatency, - metrics.TaskQueueTypeTag(enumspb.TASK_QUEUE_TYPE_WORKFLOW), - ) - - resp, err = handler.createRecordWorkflowTaskStartedResponse(mutableState, workflowContext.GetUpdateRegistry(ctx), workflowTask, req.PollRequest.GetIdentity()) - if err != nil { - return nil, err - } - return updateAction, nil - }, - nil, - handler.shard, - handler.workflowConsistencyChecker, - ) - - if err != nil { - return nil, err - } - return resp, nil -} - -func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskFailed( - ctx context.Context, - req *historyservice.RespondWorkflowTaskFailedRequest, -) (retError error) { - - _, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) - if err != nil { - return err - } - - request := req.FailedRequest - token, err := handler.tokenSerializer.Deserialize(request.TaskToken) - if err != nil { - return consts.ErrDeserializingToken - } - - return api.GetAndUpdateWorkflowWithNew( - ctx, - token.Clock, - api.BypassMutableStateConsistencyPredicate, - definition.NewWorkflowKey( - token.NamespaceId, - token.WorkflowId, - token.RunId, - ), - func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { - mutableState := workflowContext.GetMutableState() - if !mutableState.IsWorkflowExecutionRunning() { - return nil, consts.ErrWorkflowCompleted - } - - scheduledEventID := token.GetScheduledEventId() - workflowTask := mutableState.GetWorkflowTaskByID(scheduledEventID) - - if workflowTask == nil || - workflowTask.StartedEventID == common.EmptyEventID || - (token.StartedEventId != common.EmptyEventID && token.StartedEventId != workflowTask.StartedEventID) || - (token.StartedTime != nil && workflowTask.StartedTime != nil && !token.StartedTime.Equal(*workflowTask.StartedTime)) || - workflowTask.Attempt != token.Attempt { - // we have not alter mutable state yet, so release with it with nil to avoid clear MS. - workflowContext.GetReleaseFn()(nil) - return nil, serviceerror.NewNotFound("Workflow task not found.") - } - - if _, err := mutableState.AddWorkflowTaskFailedEvent( - workflowTask, - request.GetCause(), - request.GetFailure(), - request.GetIdentity(), - request.GetBinaryChecksum(), - "", - "", - 0); err != nil { - return nil, err - } - - // TODO (alex-update): if it was speculative WT that failed, and there is nothing but pending updates, - // new WT also should be create as speculative (or not?). Currently, it will be recreated as normal WT. - return &api.UpdateWorkflowAction{ - Noop: false, - CreateWorkflowTask: true, - }, nil - }, - nil, - handler.shard, - handler.workflowConsistencyChecker, - ) -} - -func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskCompleted( - ctx context.Context, - req *historyservice.RespondWorkflowTaskCompletedRequest, -) (_ *historyservice.RespondWorkflowTaskCompletedResponse, retError error) { - namespaceEntry, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) - if err != nil { - return nil, err - } - - request := req.CompleteRequest - token, err0 := handler.tokenSerializer.Deserialize(request.TaskToken) - if err0 != nil { - return nil, consts.ErrDeserializingToken - } - - workflowContext, err := handler.workflowConsistencyChecker.GetWorkflowContext( - ctx, - token.Clock, - func(mutableState workflow.MutableState) bool { - workflowTask := mutableState.GetWorkflowTaskByID(token.GetScheduledEventId()) - if workflowTask == nil && token.GetScheduledEventId() >= mutableState.GetNextEventID() { - handler.metricsHandler.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - return false - } - return true - }, - definition.NewWorkflowKey( - namespaceEntry.ID().String(), - token.WorkflowId, - token.RunId, - ), - workflow.LockPriorityHigh, - ) - if err != nil { - return nil, err - } - weContext := workflowContext.GetContext() - ms := workflowContext.GetMutableState() - - currentWorkflowTask := ms.GetWorkflowTaskByID(token.GetScheduledEventId()) - if !ms.IsWorkflowExecutionRunning() || - currentWorkflowTask == nil || - currentWorkflowTask.StartedEventID == common.EmptyEventID || - (token.StartedEventId != common.EmptyEventID && token.StartedEventId != currentWorkflowTask.StartedEventID) || - (token.StartedTime != nil && currentWorkflowTask.StartedTime != nil && !token.StartedTime.Equal(*currentWorkflowTask.StartedTime)) || - currentWorkflowTask.Attempt != token.Attempt { - // we have not alter mutable state yet, so release with it with nil to avoid clear MS. - workflowContext.GetReleaseFn()(nil) - return nil, serviceerror.NewNotFound("Workflow task not found.") - } - - defer func() { workflowContext.GetReleaseFn()(retError) }() - - var effects effect.Buffer - defer func() { - // code in this file and workflowTaskHandler is inconsistent in the way - // errors are returned - some functions which appear to return error - // actually return nil in all cases and instead set a member variable - // that should be observed by other collaborating code (e.g. - // workflowtaskHandler.workflowTaskFailedCause). That made me paranoid - // about the way this function exits so while we have this defer here - // there is _also_ code to call effects.Cancel at key points. - if retError != nil { - effects.Cancel(ctx) - } - effects.Apply(ctx) - }() - - // It's an error if the workflow has used versioning in the past but this task has no versioning info. - if ms.GetWorkerVersionStamp().GetUseVersioning() && !request.GetWorkerVersionStamp().GetUseVersioning() { - return nil, serviceerror.NewInvalidArgument("Workflow using versioning must continue to use versioning.") - } - - nsName := namespaceEntry.Name().String() - limits := workflow.WorkflowTaskCompletionLimits{ - MaxResetPoints: handler.config.MaxAutoResetPoints(nsName), - MaxSearchAttributeValueSize: handler.config.SearchAttributesSizeOfValueLimit(nsName), - } - // TODO: this metric is inaccurate, it should only be emitted if a new binary checksum (or build ID) is added in this completion. - if ms.GetExecutionInfo().AutoResetPoints != nil && limits.MaxResetPoints == len(ms.GetExecutionInfo().AutoResetPoints.Points) { - handler.metricsHandler.Counter(metrics.AutoResetPointsLimitExceededCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - } - - workflowTaskHeartbeating := request.GetForceCreateNewWorkflowTask() && len(request.Commands) == 0 && len(request.Messages) == 0 - var workflowTaskHeartbeatTimeout bool - var completedEvent *historypb.HistoryEvent - var responseMutations []workflowTaskResponseMutation - - if workflowTaskHeartbeating { - namespace := namespaceEntry.Name() - timeout := handler.config.WorkflowTaskHeartbeatTimeout(namespace.String()) - origSchedTime := timestamp.TimeValue(currentWorkflowTask.OriginalScheduledTime) - if origSchedTime.UnixNano() > 0 && handler.timeSource.Now().After(origSchedTime.Add(timeout)) { - workflowTaskHeartbeatTimeout = true - - scope := handler.metricsHandler.WithTags( - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), - metrics.NamespaceTag(namespace.String()), - ) - scope.Counter(metrics.WorkflowTaskHeartbeatTimeoutCounter.GetMetricName()).Record(1) - completedEvent, err = ms.AddWorkflowTaskTimedOutEvent(currentWorkflowTask) - if err != nil { - return nil, err - } - ms.ClearStickyTaskQueue() - } else { - completedEvent, err = ms.AddWorkflowTaskCompletedEvent(currentWorkflowTask, request, limits) - if err != nil { - return nil, err - } - } - } else { - completedEvent, err = ms.AddWorkflowTaskCompletedEvent(currentWorkflowTask, request, limits) - if err != nil { - return nil, err - } - } - // NOTE: completedEvent might be nil if WT was speculative and request has only `update.Rejection` messages. - // See workflowTaskStateMachine.skipWorkflowTaskCompletedEvent for more details. - - if request.StickyAttributes == nil || request.StickyAttributes.WorkerTaskQueue == nil { - handler.metricsHandler.Counter(metrics.CompleteWorkflowTaskWithStickyDisabledCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - ms.ClearStickyTaskQueue() - } else { - handler.metricsHandler.Counter(metrics.CompleteWorkflowTaskWithStickyEnabledCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - ms.SetStickyTaskQueue(request.StickyAttributes.WorkerTaskQueue.GetName(), request.StickyAttributes.GetScheduleToStartTimeout()) - } - - var ( - wtFailedCause *workflowTaskFailedCause - activityNotStartedCancelled bool - newMutableState workflow.MutableState - ) - // hasBufferedEvents indicates if there are any buffered events which should generate a new workflow task - hasBufferedEvents := ms.HasBufferedEvents() - if err := namespaceEntry.VerifyBinaryChecksum(request.GetBinaryChecksum()); err != nil { - wtFailedCause = newWorkflowTaskFailedCause( - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_BINARY, - serviceerror.NewInvalidArgument( - fmt.Sprintf( - "binary %v is marked as bad deployment", - request.GetBinaryChecksum())), - nil) - } else { - namespace := namespaceEntry.Name() - workflowSizeChecker := newWorkflowSizeChecker( - workflowSizeLimits{ - blobSizeLimitWarn: handler.config.BlobSizeLimitWarn(namespace.String()), - blobSizeLimitError: handler.config.BlobSizeLimitError(namespace.String()), - memoSizeLimitWarn: handler.config.MemoSizeLimitWarn(namespace.String()), - memoSizeLimitError: handler.config.MemoSizeLimitError(namespace.String()), - numPendingChildExecutionsLimit: handler.config.NumPendingChildExecutionsLimit(namespace.String()), - numPendingActivitiesLimit: handler.config.NumPendingActivitiesLimit(namespace.String()), - numPendingSignalsLimit: handler.config.NumPendingSignalsLimit(namespace.String()), - numPendingCancelsRequestLimit: handler.config.NumPendingCancelsRequestLimit(namespace.String()), - }, - ms, - handler.searchAttributesValidator, - handler.metricsHandler.WithTags( - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), - metrics.NamespaceTag(namespace.String()), - ), - handler.throttledLogger, - ) - - workflowTaskHandler := newWorkflowTaskHandler( - request.GetIdentity(), - completedEvent.GetEventId(), // If completedEvent is nil, then GetEventId() returns 0 and this value shouldn't be used in workflowTaskHandler. - ms, - weContext.UpdateRegistry(ctx), - &effects, - handler.commandAttrValidator, - workflowSizeChecker, - handler.logger, - handler.namespaceRegistry, - handler.metricsHandler, - handler.config, - handler.shard, - handler.searchAttributesMapperProvider, - hasBufferedEvents, - ) - - if responseMutations, err = workflowTaskHandler.handleCommands( - ctx, - request.Commands, - collection.NewIndexedTakeList( - request.Messages, - func(msg *protocolpb.Message) string { return msg.Id }, - ), - ); err != nil { - return nil, err - } - - // set the vars used by following logic - // further refactor should also clean up the vars used below - wtFailedCause = workflowTaskHandler.workflowTaskFailedCause - - // failMessage is not used by workflowTaskHandlerCallbacks - activityNotStartedCancelled = workflowTaskHandler.activityNotStartedCancelled - // continueAsNewTimerTasks is not used by workflowTaskHandlerCallbacks - - newMutableState = workflowTaskHandler.newMutableState - - hasBufferedEvents = workflowTaskHandler.hasBufferedEvents - } - - wtFailedShouldCreateNewTask := false - if wtFailedCause != nil { - effects.Cancel(ctx) - handler.metricsHandler.Counter(metrics.FailedWorkflowTasksCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - handler.logger.Info("Failing the workflow task.", - tag.Value(wtFailedCause.Message()), - tag.WorkflowID(token.GetWorkflowId()), - tag.WorkflowRunID(token.GetRunId()), - tag.WorkflowNamespaceID(namespaceEntry.ID().String())) - if currentWorkflowTask.Attempt > 1 && wtFailedCause.failedCause != enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND { - // drop this workflow task if it keeps failing. This will cause the workflow task to timeout and get retried after timeout. - return nil, serviceerror.NewInvalidArgument(wtFailedCause.Message()) - } - var wtFailedEventID int64 - ms, wtFailedEventID, err = failWorkflowTask(ctx, weContext, currentWorkflowTask, wtFailedCause, request) - if err != nil { - return nil, err - } - wtFailedShouldCreateNewTask = true - newMutableState = nil - - if wtFailedCause.workflowFailure != nil { - // Flush buffer event before failing the workflow - ms.FlushBufferedEvents() - - attributes := &commandpb.FailWorkflowExecutionCommandAttributes{ - Failure: wtFailedCause.workflowFailure, - } - if _, err := ms.AddFailWorkflowEvent(wtFailedEventID, enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE, attributes, ""); err != nil { - return nil, err - } - wtFailedShouldCreateNewTask = false - } - } - - bufferedEventShouldCreateNewTask := hasBufferedEvents && ms.HasAnyBufferedEvent(eventShouldGenerateNewTaskFilter) - if hasBufferedEvents && !bufferedEventShouldCreateNewTask { - // Make sure tasks that should not create a new event don't get stuck in ms forever - ms.FlushBufferedEvents() - } - newWorkflowTaskType := enumsspb.WORKFLOW_TASK_TYPE_UNSPECIFIED - if ms.IsWorkflowExecutionRunning() { - if request.GetForceCreateNewWorkflowTask() || // Heartbeat WT is always of Normal type. - wtFailedShouldCreateNewTask || - bufferedEventShouldCreateNewTask || - activityNotStartedCancelled { - newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL - } else if weContext.UpdateRegistry(ctx).HasOutgoing() { - if completedEvent == nil || ms.GetNextEventID() == completedEvent.GetEventId()+1 { - newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE - } else { - newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL - } - } - } - - bypassTaskGeneration := request.GetReturnNewWorkflowTask() && wtFailedCause == nil - // TODO (alex-update): Need to support case when ReturnNewWorkflowTask=false and WT.Type=Speculative. - // In this case WT needs to be added directly to matching. - // Current implementation will create normal WT. - if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE && !bypassTaskGeneration { - // If task generation can't be bypassed workflow task must be of Normal type because Speculative workflow task always skip task generation. - newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL - } - - var newWorkflowTask *workflow.WorkflowTaskInfo - // Speculative workflow task will be created after mutable state is persisted. - if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_NORMAL { - var newWTErr error - if workflowTaskHeartbeating && !workflowTaskHeartbeatTimeout { - newWorkflowTask, newWTErr = ms.AddWorkflowTaskScheduledEventAsHeartbeat( - bypassTaskGeneration, - currentWorkflowTask.OriginalScheduledTime, - enumsspb.WORKFLOW_TASK_TYPE_NORMAL, // Heartbeat workflow task is always of Normal type. - ) - } else { - newWorkflowTask, newWTErr = ms.AddWorkflowTaskScheduledEvent(bypassTaskGeneration, newWorkflowTaskType) - } - if newWTErr != nil { - return nil, newWTErr - } - - // skip transfer task for workflow task if request asking to return new workflow task - if bypassTaskGeneration { - // start the new workflow task if request asked to do so - // TODO: replace the poll request - _, newWorkflowTask, err = ms.AddWorkflowTaskStartedEvent( - newWorkflowTask.ScheduledEventID, - "request-from-RespondWorkflowTaskCompleted", - newWorkflowTask.TaskQueue, - request.Identity, - ) - if err != nil { - return nil, err - } - } - } - - var updateErr error - if newMutableState != nil { - newWorkflowExecutionInfo := newMutableState.GetExecutionInfo() - newWorkflowExecutionState := newMutableState.GetExecutionState() - updateErr = weContext.UpdateWorkflowExecutionWithNewAsActive( - ctx, - workflow.NewContext( - handler.shard, - definition.NewWorkflowKey( - newWorkflowExecutionInfo.NamespaceId, - newWorkflowExecutionInfo.WorkflowId, - newWorkflowExecutionState.RunId, - ), - handler.logger, - ), - newMutableState, - ) - } else { - // If completedEvent is not nil (which it means that WT wasn't speculative) - // OR new WT is normal, then mutable state is persisted. - // Otherwise, (both old and new WT are speculative) mutable state is updated in memory only but not persisted. - if completedEvent != nil || newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_NORMAL { - updateErr = weContext.UpdateWorkflowExecutionAsActive(ctx) - } - } - - if updateErr != nil { - effects.Cancel(ctx) - if persistence.IsConflictErr(updateErr) { - handler.metricsHandler.Counter(metrics.ConcurrencyUpdateFailureCounter.GetMetricName()).Record( - 1, - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) - } - - // if updateErr resulted in TransactionSizeLimitError then fail workflow - switch updateErr.(type) { - case *persistence.TransactionSizeLimitError: - // must reload mutable state because the first call to updateWorkflowExecutionWithContext or continueAsNewWorkflowExecution - // clears mutable state if error is returned - ms, err = weContext.LoadMutableState(ctx) - if err != nil { - return nil, err - } - - if err := workflow.TerminateWorkflow( - ms, - common.FailureReasonTransactionSizeExceedsLimit, - payloads.EncodeString(updateErr.Error()), - consts.IdentityHistoryService, - false, - ); err != nil { - return nil, err - } - if err := weContext.UpdateWorkflowExecutionAsActive( - ctx, - ); err != nil { - return nil, err - } - } - - return nil, updateErr - } - - // Create speculative workflow task after mutable state is persisted. - if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { - newWorkflowTask, err = ms.AddWorkflowTaskScheduledEvent(bypassTaskGeneration, newWorkflowTaskType) - if err != nil { - return nil, err - } - _, newWorkflowTask, err = ms.AddWorkflowTaskStartedEvent( - newWorkflowTask.ScheduledEventID, - "request-from-RespondWorkflowTaskCompleted", - newWorkflowTask.TaskQueue, - request.Identity, - ) - if err != nil { - return nil, err - } - } - - handler.handleBufferedQueries(ms, req.GetCompleteRequest().GetQueryResults(), newWorkflowTask != nil, namespaceEntry, workflowTaskHeartbeating) - - if workflowTaskHeartbeatTimeout { - // at this point, update is successful, but we still return an error to client so that the worker will give up this workflow - // release workflow lock with nil error to prevent mutable state from being cleared and reloaded - workflowContext.GetReleaseFn()(nil) - return nil, serviceerror.NewNotFound("workflow task heartbeat timeout") - } - - if wtFailedCause != nil { - // release workflow lock with nil error to prevent mutable state from being cleared and reloaded - workflowContext.GetReleaseFn()(nil) - return nil, serviceerror.NewInvalidArgument(wtFailedCause.Message()) - } - - resp := &historyservice.RespondWorkflowTaskCompletedResponse{} - if request.GetReturnNewWorkflowTask() && newWorkflowTask != nil { - resp.StartedResponse, err = handler.createRecordWorkflowTaskStartedResponse(ms, weContext.UpdateRegistry(ctx), newWorkflowTask, request.GetIdentity()) - if err != nil { - return nil, err - } - // sticky is always enabled when worker request for new workflow task from RespondWorkflowTaskCompleted - resp.StartedResponse.StickyExecutionEnabled = true - } - - // If completedEvent is nil then it means that WT was speculative and - // WT events (scheduled/started/completed) were not written to the history and were dropped. - // SDK needs to know where to roll back its history event pointer, i.e. after what event all other events needs to be dropped. - // SDK uses WorkflowTaskStartedEventID to do that. - if completedEvent == nil { - resp.ResetHistoryEventId = ms.GetExecutionInfo().LastWorkflowTaskStartedEventId - } - - for _, mutation := range responseMutations { - if err := mutation(resp); err != nil { - return nil, err - } - } - - return resp, nil -} - -func (handler *workflowTaskHandlerCallbacksImpl) verifyFirstWorkflowTaskScheduled( - ctx context.Context, - req *historyservice.VerifyFirstWorkflowTaskScheduledRequest, -) (retError error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - if err := api.ValidateNamespaceUUID(namespaceID); err != nil { - return err - } - - workflowContext, err := handler.workflowConsistencyChecker.GetWorkflowContext( - ctx, - req.Clock, - api.BypassMutableStateConsistencyPredicate, - definition.NewWorkflowKey( - req.NamespaceId, - req.WorkflowExecution.WorkflowId, - req.WorkflowExecution.RunId, - ), - workflow.LockPriorityLow, - ) - if err != nil { - return err - } - defer func() { workflowContext.GetReleaseFn()(retError) }() - - mutableState := workflowContext.GetMutableState() - if !mutableState.IsWorkflowExecutionRunning() && - mutableState.GetExecutionState().State != enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { - return nil - } - - if !mutableState.HadOrHasWorkflowTask() { - return consts.ErrWorkflowNotReady - } - - return nil -} - -func (handler *workflowTaskHandlerCallbacksImpl) createRecordWorkflowTaskStartedResponse( - ms workflow.MutableState, - updateRegistry update.Registry, - workflowTask *workflow.WorkflowTaskInfo, - identity string, -) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - - response := &historyservice.RecordWorkflowTaskStartedResponse{} - response.WorkflowType = ms.GetWorkflowType() - executionInfo := ms.GetExecutionInfo() - if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { - response.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId - } - - // Starting workflowTask could result in different scheduledEventID if workflowTask was transient and new events came in - // before it was started. - response.ScheduledEventId = workflowTask.ScheduledEventID - response.StartedEventId = workflowTask.StartedEventID - response.StickyExecutionEnabled = ms.IsStickyTaskQueueSet() - response.NextEventId = ms.GetNextEventID() - response.Attempt = workflowTask.Attempt - response.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ - Name: executionInfo.TaskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - response.ScheduledTime = workflowTask.ScheduledTime - response.StartedTime = workflowTask.StartedTime - - // TODO (alex-update): Transient needs to be renamed to "TransientOrSpeculative" - response.TransientWorkflowTask = ms.GetTransientWorkflowTaskInfo(workflowTask, identity) - - currentBranchToken, err := ms.GetCurrentBranchToken() - if err != nil { - return nil, err - } - response.BranchToken = currentBranchToken - - qr := ms.GetQueryRegistry() - bufferedQueryIDs := qr.GetBufferedIDs() - if len(bufferedQueryIDs) > 0 { - response.Queries = make(map[string]*querypb.WorkflowQuery, len(bufferedQueryIDs)) - for _, bufferedQueryID := range bufferedQueryIDs { - input, err := qr.GetQueryInput(bufferedQueryID) - if err != nil { - continue - } - response.Queries[bufferedQueryID] = input - } - } - - response.Messages = updateRegistry.ReadOutgoingMessages(workflowTask.StartedEventID) - - if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE && len(response.GetMessages()) == 0 { - return nil, serviceerror.NewNotFound("No messages for speculative workflow task.") - } - - return response, nil -} - -func (handler *workflowTaskHandlerCallbacksImpl) handleBufferedQueries(ms workflow.MutableState, queryResults map[string]*querypb.WorkflowQueryResult, createNewWorkflowTask bool, namespaceEntry *namespace.Namespace, workflowTaskHeartbeating bool) { - queryRegistry := ms.GetQueryRegistry() - if !queryRegistry.HasBufferedQuery() { - return - } - - namespaceName := namespaceEntry.Name() - workflowID := ms.GetExecutionInfo().WorkflowId - runID := ms.GetExecutionState().GetRunId() - - scope := handler.metricsHandler.WithTags( - metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), - metrics.NamespaceTag(namespaceEntry.Name().String()), - metrics.CommandTypeTag("ConsistentQuery")) - - // if its a heartbeat workflow task it means local activities may still be running on the worker - // which were started by an external event which happened before the query - if workflowTaskHeartbeating { - return - } - - sizeLimitError := handler.config.BlobSizeLimitError(namespaceName.String()) - sizeLimitWarn := handler.config.BlobSizeLimitWarn(namespaceName.String()) - - // Complete or fail all queries we have results for - for id, result := range queryResults { - if err := common.CheckEventBlobSizeLimit( - result.GetAnswer().Size(), - sizeLimitWarn, - sizeLimitError, - namespaceName.String(), - workflowID, - runID, - scope, - handler.throttledLogger, - tag.BlobSizeViolationOperation("ConsistentQuery"), - ); err != nil { - handler.logger.Info("failing query because query result size is too large", - tag.WorkflowNamespace(namespaceName.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.QueryID(id), - tag.Error(err)) - failedCompletionState := &workflow.QueryCompletionState{ - Type: workflow.QueryCompletionTypeFailed, - Err: err, - } - if err := queryRegistry.SetCompletionState(id, failedCompletionState); err != nil { - handler.logger.Error( - "failed to set query completion state to failed", - tag.WorkflowNamespace(namespaceName.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.QueryID(id), - tag.Error(err)) - scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) - } - } else { - succeededCompletionState := &workflow.QueryCompletionState{ - Type: workflow.QueryCompletionTypeSucceeded, - Result: result, - } - if err := queryRegistry.SetCompletionState(id, succeededCompletionState); err != nil { - handler.logger.Error( - "failed to set query completion state to succeeded", - tag.WorkflowNamespace(namespaceName.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.QueryID(id), - tag.Error(err)) - scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) - } - } - } - - // If no workflow task was created then it means no buffered events came in during this workflow task's handling. - // This means all unanswered buffered queries can be dispatched directly through matching at this point. - if !createNewWorkflowTask { - buffered := queryRegistry.GetBufferedIDs() - for _, id := range buffered { - unblockCompletionState := &workflow.QueryCompletionState{ - Type: workflow.QueryCompletionTypeUnblocked, - } - if err := queryRegistry.SetCompletionState(id, unblockCompletionState); err != nil { - handler.logger.Error( - "failed to set query completion state to unblocked", - tag.WorkflowNamespace(namespaceName.String()), - tag.WorkflowID(workflowID), - tag.WorkflowRunID(runID), - tag.QueryID(id), - tag.Error(err)) - scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) - } - } - } -} - -func failWorkflowTask( - ctx context.Context, - wfContext workflow.Context, - workflowTask *workflow.WorkflowTaskInfo, - wtFailedCause *workflowTaskFailedCause, - request *workflowservice.RespondWorkflowTaskCompletedRequest, -) (workflow.MutableState, int64, error) { - - // clear any updates we have accumulated so far - wfContext.Clear() - - // Reload workflow execution so we can apply the workflow task failure event - mutableState, err := wfContext.LoadMutableState(ctx) - if err != nil { - return nil, common.EmptyEventID, err - } - wtFailedEvent, err := mutableState.AddWorkflowTaskFailedEvent( - workflowTask, - wtFailedCause.failedCause, - failure.NewServerFailure(wtFailedCause.Message(), true), - request.GetIdentity(), - request.GetBinaryChecksum(), - "", - "", - 0) - if err != nil { - return nil, common.EmptyEventID, err - } - - var wtFailedEventID int64 - if wtFailedEvent != nil { - // If WTFailed event was added to the history then use its Id as wtFailedEventID. - wtFailedEventID = wtFailedEvent.GetEventId() - } else { - // Otherwise, if it was transient WT, last event should be WTFailed event from the 1st attempt. - wtFailedEventID = mutableState.GetNextEventID() - 1 - } - - // Return reloaded mutable state back to the caller for further updates. - return mutableState, wtFailedEventID, nil -} - -// Filter function to be passed to mutable_state.HasAnyBufferedEvent -// Returns true if the event should generate a new workflow task -// Currently only signal events with SkipGenerateWorkflowTask=true flag set do not generate tasks -func eventShouldGenerateNewTaskFilter(event *historypb.HistoryEvent) bool { - if event.GetEventType() != enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED { - return true - } - return !event.GetWorkflowExecutionSignaledEventAttributes().GetSkipGenerateWorkflowTask() -} diff -Nru temporal-1.21.5-1/src/service/history/workflowTaskHandlerCallbacks_test.go temporal-1.22.5/src/service/history/workflowTaskHandlerCallbacks_test.go --- temporal-1.21.5-1/src/service/history/workflowTaskHandlerCallbacks_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowTaskHandlerCallbacks_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,347 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - querypb "go.temporal.io/api/query/v1" - "go.temporal.io/api/serviceerror" - "golang.org/x/exp/maps" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/historyservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/service/history/api" - "go.temporal.io/server/service/history/events" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/tests" - "go.temporal.io/server/service/history/workflow" - wcache "go.temporal.io/server/service/history/workflow/cache" -) - -type ( - WorkflowTaskHandlerCallbackSuite struct { - *require.Assertions - suite.Suite - - controller *gomock.Controller - mockEventsCache *events.MockCache - mockExecutionMgr *persistence.MockExecutionManager - - logger log.Logger - - workflowTaskHandlerCallback *workflowTaskHandlerCallbacksImpl - } -) - -func TestWorkflowTaskHandlerCallbackSuite(t *testing.T) { - suite.Run(t, new(WorkflowTaskHandlerCallbackSuite)) -} - -func (s *WorkflowTaskHandlerCallbackSuite) SetupTest() { - s.Assertions = require.New(s.T()) - - s.controller = gomock.NewController(s.T()) - config := tests.NewDynamicConfig() - mockShard := shard.NewTestContext( - s.controller, - &persistencespb.ShardInfo{ - ShardId: 1, - RangeId: 1, - }, - config, - ) - mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() - - mockNamespaceCache := mockShard.Resource.NamespaceCache - mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.LocalNamespaceEntry, nil).AnyTimes() - s.mockExecutionMgr = mockShard.Resource.ExecutionMgr - mockClusterMetadata := mockShard.Resource.ClusterMetadata - mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() - mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, tests.Version).Return(cluster.TestCurrentClusterName).AnyTimes() - - mockVisibilityManager := mockShard.Resource.VisibilityManager - mockVisibilityManager.EXPECT().GetIndexName().Return("").AnyTimes() - mockVisibilityManager.EXPECT(). - ValidateCustomSearchAttributes(gomock.Any()). - DoAndReturn( - func(searchAttributes map[string]any) (map[string]any, error) { - return searchAttributes, nil - }, - ). - AnyTimes() - - s.mockEventsCache = mockShard.MockEventsCache - s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() - s.logger = mockShard.GetLogger() - - workflowCache := wcache.NewCache(mockShard) - h := &historyEngineImpl{ - currentClusterName: mockShard.GetClusterMetadata().GetCurrentClusterName(), - shard: mockShard, - clusterMetadata: mockClusterMetadata, - executionManager: s.mockExecutionMgr, - logger: s.logger, - throttledLogger: s.logger, - metricsHandler: metrics.NoopMetricsHandler, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - config: config, - timeSource: mockShard.GetTimeSource(), - eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), - searchAttributesValidator: searchattribute.NewValidator( - searchattribute.NewTestProvider(), - mockShard.Resource.SearchAttributesMapperProvider, - config.SearchAttributesNumberOfKeysLimit, - config.SearchAttributesSizeOfValueLimit, - config.SearchAttributesTotalSizeLimit, - mockShard.Resource.VisibilityManager, - false, - ), - workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(mockShard, workflowCache), - } - - s.workflowTaskHandlerCallback = newWorkflowTaskHandlerCallback(h) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TearDownTest() { - s.controller.Finish() -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowNotFound() { - request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - } - - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.NotFound{}) - - err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) - s.IsType(&serviceerror.NotFound{}, err) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowCompleted() { - request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - } - - ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - - _, err := ms.AddTimeoutWorkflowEvent( - ms.GetNextEventID(), - enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, - uuid.New(), - ) - s.NoError(err) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - err = s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) - s.NoError(err) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowZombie() { - request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - } - - ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - - // zombie state should be treated as open - s.NoError(ms.UpdateWorkflowStateStatus( - enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, - enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, - )) - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) - s.IsType(&serviceerror.WorkflowNotReady{}, err) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowRunning_TaskPending() { - request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - } - - ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - addWorkflowTaskScheduledEvent(ms) - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) - s.NoError(err) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowRunning_TaskProcessed() { - request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ - NamespaceId: tests.NamespaceID.String(), - WorkflowExecution: &commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, - } - - ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) - addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ - WorkflowId: tests.WorkflowID, - RunId: tests.RunID, - }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") - wt := addWorkflowTaskScheduledEvent(ms) - workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, "testTaskQueue", uuid.New()) - wt.StartedEventID = workflowTasksStartEvent.GetEventId() - addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") - - wfMs := workflow.TestCloneToProto(ms) - gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} - s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) - - err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) - s.NoError(err) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_HeartbeatWorkflowTask() { - queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() - s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) - queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) - s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, true) - s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_NewWorkflowTask() { - queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() - s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) - queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) - s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, true, tests.GlobalNamespaceEntry, false) - s.assertQueryCounts(queryRegistry, 5, 5, 0, 0) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_NoNewWorkflowTask() { - queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() - s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) - queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) - s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, false) - s.assertQueryCounts(queryRegistry, 0, 5, 5, 0) -} - -func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_QueryTooLarge() { - queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() - s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) - bufferedIDs := queryRegistry.GetBufferedIDs() - queryResults := s.constructQueryResults(bufferedIDs[0:5], 10) - largeQueryResults := s.constructQueryResults(bufferedIDs[5:10], 10*1024*1024) - maps.Copy(queryResults, largeQueryResults) - s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, false) - s.assertQueryCounts(queryRegistry, 0, 5, 0, 5) -} - -func (s *WorkflowTaskHandlerCallbackSuite) setupBufferedQueriesMocks() (workflow.QueryRegistry, *workflow.MockMutableState) { - queryRegistry := s.constructQueryRegistry(10) - mockMutableState := workflow.NewMockMutableState(s.controller) - mockMutableState.EXPECT().GetQueryRegistry().Return(queryRegistry) - mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ - WorkflowId: tests.WorkflowID, - }).AnyTimes() - mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{ - RunId: tests.RunID, - }).AnyTimes() - return queryRegistry, mockMutableState -} - -func (s *WorkflowTaskHandlerCallbackSuite) constructQueryResults(ids []string, resultSize int) map[string]*querypb.WorkflowQueryResult { - results := make(map[string]*querypb.WorkflowQueryResult) - for _, id := range ids { - results[id] = &querypb.WorkflowQueryResult{ - ResultType: enumspb.QUERY_RESULT_TYPE_ANSWERED, - Answer: payloads.EncodeBytes(make([]byte, resultSize)), - } - } - return results -} - -func (s *WorkflowTaskHandlerCallbackSuite) constructQueryRegistry(numQueries int) workflow.QueryRegistry { - queryRegistry := workflow.NewQueryRegistry() - for i := 0; i < numQueries; i++ { - queryRegistry.BufferQuery(&querypb.WorkflowQuery{}) - } - return queryRegistry -} - -func (s *WorkflowTaskHandlerCallbackSuite) assertQueryCounts(queryRegistry workflow.QueryRegistry, buffered, completed, unblocked, failed int) { - s.Len(queryRegistry.GetBufferedIDs(), buffered) - s.Len(queryRegistry.GetCompletedIDs(), completed) - s.Len(queryRegistry.GetUnblockedIDs(), unblocked) - s.Len(queryRegistry.GetFailedIDs(), failed) -} diff -Nru temporal-1.21.5-1/src/service/history/workflowTaskHandler_test.go temporal-1.22.5/src/service/history/workflowTaskHandler_test.go --- temporal-1.21.5-1/src/service/history/workflowTaskHandler_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflowTaskHandler_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,315 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package history - -import ( - "context" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - commandpb "go.temporal.io/api/command/v1" - enumspb "go.temporal.io/api/enums/v1" - protocolpb "go.temporal.io/api/protocol/v1" - "go.temporal.io/api/serviceerror" - updatepb "go.temporal.io/api/update/v1" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/collection" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/internal/effect" - "go.temporal.io/server/service/history/configs" - "go.temporal.io/server/service/history/shard" - "go.temporal.io/server/service/history/workflow" - "go.temporal.io/server/service/history/workflow/update" -) - -func TestCommandProtocolMessage(t *testing.T) { - t.Parallel() - - type testconf struct { - ms *workflow.MockMutableState - updates update.Registry - handler *workflowTaskHandlerImpl - conf map[dynamicconfig.Key]any - } - - const defaultBlobSizeLimit = 1 * 1024 * 1024 - - msgCommand := func(msgID string) *commandpb.Command { - return &commandpb.Command{ - CommandType: enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE, - Attributes: &commandpb.Command_ProtocolMessageCommandAttributes{ - ProtocolMessageCommandAttributes: &commandpb.ProtocolMessageCommandAttributes{ - MessageId: msgID, - }, - }, - } - } - - setup := func(t *testing.T, out *testconf, blobSizeLimit int) { - shardCtx := shard.NewMockContext(gomock.NewController(t)) - logger := log.NewNoopLogger() - metricsHandler := metrics.NoopMetricsHandler - out.conf = map[dynamicconfig.Key]any{} - out.ms = workflow.NewMockMutableState(gomock.NewController(t)) - out.ms.EXPECT().VisitUpdates(gomock.Any()).Times(1) - out.updates = update.NewRegistry(func() update.UpdateStore { return out.ms }) - var effects effect.Buffer - config := configs.NewConfig( - dynamicconfig.NewCollection( - dynamicconfig.StaticClient(out.conf), logger), 1, false, false) - mockMeta := persistence.NewMockMetadataManager(gomock.NewController(t)) - nsReg := namespace.NewRegistry( - mockMeta, - true, - func() time.Duration { return 1 * time.Hour }, - dynamicconfig.GetBoolPropertyFn(false), - metricsHandler, - logger, - ) - out.handler = newWorkflowTaskHandler( // 😲 - t.Name(), // identity - 123, // workflowTaskCompletedID - out.ms, - out.updates, - &effects, - newCommandAttrValidator( - nsReg, - config, - nil, // searchAttributesValidator - ), - newWorkflowSizeChecker( - workflowSizeLimits{blobSizeLimitError: blobSizeLimit}, - out.ms, - nil, // searchAttributesValidator - metricsHandler, - logger, - ), - logger, - nsReg, - metricsHandler, - config, - shardCtx, - nil, // searchattribute.MapperProvider - false, - ) - } - - t.Run("missing message ID", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - command = msgCommand("") // blank is invalid - ) - - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - - _, err := tc.handler.handleCommand(context.Background(), command, newMsgList()) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - }) - - t.Run("message not found", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - command = msgCommand("valid_but_not_found_msg_id") - ) - - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - - _, err := tc.handler.handleCommand(context.Background(), command, newMsgList()) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - }) - - t.Run("message too large", func(t *testing.T) { - var tc testconf - t.Log("setting max blob size to zero") - setup(t, &tc, 0) - var ( - msgID = t.Name() + "-message-id" - command = msgCommand(msgID) // blank is invalid - msg = &protocolpb.Message{ - Id: msgID, - ProtocolInstanceId: "does_not_matter", - Body: mustMarshalAny(t, &types.Empty{}), - } - ) - - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) - - _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - require.ErrorContains(t, tc.handler.workflowTaskFailedCause.causeErr, "exceeds size limit") - }) - - t.Run("message for unsupported protocol", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - msgID = t.Name() + "-message-id" - command = msgCommand(msgID) // blank is invalid - msg = &protocolpb.Message{ - Id: msgID, - ProtocolInstanceId: "does_not_matter", - Body: mustMarshalAny(t, &types.Empty{}), - } - ) - - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) - - _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - var invalidArg *serviceerror.InvalidArgument - require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, &invalidArg) - require.ErrorContains(t, tc.handler.workflowTaskFailedCause.causeErr, "protocol type") - }) - - t.Run("update not found", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - msgID = t.Name() + "-message-id" - command = msgCommand(msgID) // blank is invalid - msg = &protocolpb.Message{ - Id: msgID, - ProtocolInstanceId: "will not be found", - Body: mustMarshalAny(t, &updatepb.Acceptance{}), - } - ) - - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) - tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), "will not be found").Return(nil, serviceerror.NewNotFound("")) - - _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - var notfound *serviceerror.NotFound - require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, ¬found) - }) - - t.Run("deliver message failure", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - updateID = t.Name() + "-update-id" - msgID = t.Name() + "-message-id" - command = msgCommand(msgID) // blank is invalid - msg = &protocolpb.Message{ - Id: msgID, - ProtocolInstanceId: updateID, - Body: mustMarshalAny(t, &updatepb.Acceptance{}), - } - ) - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) - tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), updateID).Return(nil, serviceerror.NewNotFound("")) - - t.Log("create the expected protocol instance") - _, _, err := tc.updates.FindOrCreate(context.Background(), updateID) - require.NoError(t, err) - - t.Log("delivering an acceptance message to an update in the admitted state should cause a protocol error") - _, err = tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) - require.NoError(t, err) - require.NotNil(t, tc.handler.workflowTaskFailedCause) - require.Equal(t, - enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, - tc.handler.workflowTaskFailedCause.failedCause) - var gotErr *serviceerror.InvalidArgument - require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, &gotErr) - }) - - t.Run("deliver message success", func(t *testing.T) { - var tc testconf - setup(t, &tc, defaultBlobSizeLimit) - var ( - updateID = t.Name() + "-update-id" - msgID = t.Name() + "-message-id" - command = msgCommand(msgID) // blank is invalid - msg = &protocolpb.Message{ - Id: msgID, - ProtocolInstanceId: updateID, - Body: mustMarshalAny(t, &updatepb.Request{ - Meta: &updatepb.Meta{UpdateId: updateID}, - Input: &updatepb.Input{Name: "not_empty"}, - }), - } - msgs = newMsgList(msg) - ) - tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) - tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) - tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), updateID).Return(nil, serviceerror.NewNotFound("")) - - t.Log("create the expected protocol instance") - _, _, err := tc.updates.FindOrCreate(context.Background(), updateID) - require.NoError(t, err) - - _, err = tc.handler.handleCommand(context.Background(), command, msgs) - require.NoError(t, err, - "delivering a request message to an update in the admitted state should succeed") - require.Nil(t, tc.handler.workflowTaskFailedCause) - }) -} - -func newMsgList(msgs ...*protocolpb.Message) *collection.IndexedTakeList[string, *protocolpb.Message] { - return collection.NewIndexedTakeList(msgs, func(msg *protocolpb.Message) string { return msg.Id }) -} - -func mustMarshalAny(t *testing.T, pb proto.Message) *types.Any { - t.Helper() - a, err := types.MarshalAny(pb) - require.NoError(t, err) - return a -} diff -Nru temporal-1.21.5-1/src/service/history/workflow_rebuilder.go temporal-1.22.5/src/service/history/workflow_rebuilder.go --- temporal-1.21.5-1/src/service/history/workflow_rebuilder.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_rebuilder.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,173 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination workflow_rebuilder_mock.go + +package history + +import ( + "context" + "math" + + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence/versionhistory" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/ndc" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + workflowRebuilder interface { + // rebuild rebuilds a workflow, in case of any kind of corruption + rebuild( + ctx context.Context, + workflowKey definition.WorkflowKey, + ) error + } + + workflowRebuilderImpl struct { + shard shard.Context + workflowConsistencyChecker api.WorkflowConsistencyChecker + transaction workflow.Transaction + logger log.Logger + } +) + +var _ workflowRebuilder = (*workflowRebuilderImpl)(nil) + +func NewWorkflowRebuilder( + shard shard.Context, + workflowCache wcache.Cache, + logger log.Logger, +) *workflowRebuilderImpl { + return &workflowRebuilderImpl{ + shard: shard, + workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(shard, workflowCache), + transaction: workflow.NewTransaction(shard), + logger: logger, + } +} + +func (r *workflowRebuilderImpl) rebuild( + ctx context.Context, + workflowKey definition.WorkflowKey, +) (retError error) { + + wfContext, err := r.workflowConsistencyChecker.GetWorkflowContext( + ctx, + nil, + api.BypassMutableStateConsistencyPredicate, + workflowKey, + workflow.LockPriorityHigh, + ) + if err != nil { + return err + } + defer func() { + wfContext.GetReleaseFn()(retError) + wfContext.GetContext().Clear() + }() + + mutableState := wfContext.GetMutableState() + _, dbRecordVersion := mutableState.GetUpdateCondition() + + requestID := mutableState.GetExecutionState().CreateRequestId + versionHistories := mutableState.GetExecutionInfo().VersionHistories + currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(versionHistories) + if err != nil { + return err + } + branchToken := currentVersionHistory.BranchToken + stateTransitionCount := mutableState.GetExecutionInfo().StateTransitionCount + + rebuildMutableState, err := r.replayResetWorkflow( + ctx, + workflowKey, + branchToken, + stateTransitionCount, + dbRecordVersion, + requestID, + ) + if err != nil { + return err + } + return r.persistToDB(ctx, rebuildMutableState) +} + +func (r *workflowRebuilderImpl) replayResetWorkflow( + ctx context.Context, + workflowKey definition.WorkflowKey, + branchToken []byte, + stateTransitionCount int64, + dbRecordVersion int64, + requestID string, +) (workflow.MutableState, error) { + + rebuildMutableState, rebuildHistorySize, err := ndc.NewStateRebuilder(r.shard, r.logger).Rebuild( + ctx, + r.shard.GetTimeSource().Now(), + workflowKey, + branchToken, + math.MaxInt64-1, // NOTE: this is last event ID, layer below will +1 to calculate the next event ID + nil, // skip event ID & version check + workflowKey, + branchToken, + requestID, + ) + if err != nil { + return nil, err + } + + // note: this is an admin API, for operator to recover a corrupted mutable state, so state transition count + // should remain the same, the -= 1 exists here since later CloseTransactionAsSnapshot will += 1 to state transition count + rebuildMutableState.GetExecutionInfo().StateTransitionCount = stateTransitionCount - 1 + rebuildMutableState.AddHistorySize(rebuildHistorySize) + rebuildMutableState.SetUpdateCondition(rebuildMutableState.GetNextEventID(), dbRecordVersion) + return rebuildMutableState, nil +} + +func (r *workflowRebuilderImpl) persistToDB( + ctx context.Context, + mutableState workflow.MutableState, +) error { + resetWorkflowSnapshot, resetWorkflowEventsSeq, err := mutableState.CloseTransactionAsSnapshot( + workflow.TransactionPolicyPassive, + ) + if err != nil { + return err + } + if len(resetWorkflowEventsSeq) != 0 { + return serviceerror.NewInternal("workflowRebuilder encountered new events when rebuilding mutable state") + } + + return r.transaction.SetWorkflowExecution( + ctx, + resetWorkflowSnapshot, + ) +} diff -Nru temporal-1.21.5-1/src/service/history/workflow_rebuilder_mock.go temporal-1.22.5/src/service/history/workflow_rebuilder_mock.go --- temporal-1.21.5-1/src/service/history/workflow_rebuilder_mock.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_rebuilder_mock.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,74 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: workflow_rebuilder.go + +// Package history is a generated GoMock package. +package history + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + definition "go.temporal.io/server/common/definition" +) + +// MockworkflowRebuilder is a mock of workflowRebuilder interface. +type MockworkflowRebuilder struct { + ctrl *gomock.Controller + recorder *MockworkflowRebuilderMockRecorder +} + +// MockworkflowRebuilderMockRecorder is the mock recorder for MockworkflowRebuilder. +type MockworkflowRebuilderMockRecorder struct { + mock *MockworkflowRebuilder +} + +// NewMockworkflowRebuilder creates a new mock instance. +func NewMockworkflowRebuilder(ctrl *gomock.Controller) *MockworkflowRebuilder { + mock := &MockworkflowRebuilder{ctrl: ctrl} + mock.recorder = &MockworkflowRebuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockworkflowRebuilder) EXPECT() *MockworkflowRebuilderMockRecorder { + return m.recorder +} + +// rebuild mocks base method. +func (m *MockworkflowRebuilder) rebuild(ctx context.Context, workflowKey definition.WorkflowKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "rebuild", ctx, workflowKey) + ret0, _ := ret[0].(error) + return ret0 +} + +// rebuild indicates an expected call of rebuild. +func (mr *MockworkflowRebuilderMockRecorder) rebuild(ctx, workflowKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "rebuild", reflect.TypeOf((*MockworkflowRebuilder)(nil).rebuild), ctx, workflowKey) +} diff -Nru temporal-1.21.5-1/src/service/history/workflow_task_handler.go temporal-1.22.5/src/service/history/workflow_task_handler.go --- temporal-1.21.5-1/src/service/history/workflow_task_handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_task_handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1416 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + "time" + + "github.com/pborman/uuid" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + protocolpb "go.temporal.io/api/protocol/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/common/tasktoken" + "go.temporal.io/server/internal/effect" + "go.temporal.io/server/internal/protocol" + "go.temporal.io/server/service/history/workflow/update" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/enums" + "go.temporal.io/server/common/failure" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/workflow" +) + +type ( + commandAttrValidationFn func() (enumspb.WorkflowTaskFailedCause, error) + + workflowTaskHandlerImpl struct { + identity string + workflowTaskCompletedID int64 + + // internal state + hasBufferedEvents bool + workflowTaskFailedCause *workflowTaskFailedCause + activityNotStartedCancelled bool + newMutableState workflow.MutableState + stopProcessing bool // should stop processing any more commands + mutableState workflow.MutableState + effects effect.Controller + initiatedChildExecutionsInBatch map[string]struct{} // Set of initiated child executions in the workflow task + updateRegistry update.Registry + + // validation + attrValidator *commandAttrValidator + sizeLimitChecker *workflowSizeChecker + searchAttributesMapperProvider searchattribute.MapperProvider + + logger log.Logger + namespaceRegistry namespace.Registry + metricsHandler metrics.Handler + config *configs.Config + shard shard.Context + tokenSerializer common.TaskTokenSerializer + } + + workflowTaskFailedCause struct { + failedCause enumspb.WorkflowTaskFailedCause + causeErr error + workflowFailure *failurepb.Failure + } + + workflowTaskResponseMutation func( + resp *historyservice.RespondWorkflowTaskCompletedResponse, + ) error + + commandPostAction func( + ctx context.Context, + ) (workflowTaskResponseMutation, error) + + handleCommandResponse struct { + workflowTaskResponseMutation workflowTaskResponseMutation + commandPostAction commandPostAction + } +) + +func newWorkflowTaskHandler( + identity string, + workflowTaskCompletedID int64, + mutableState workflow.MutableState, + updateRegistry update.Registry, + effects effect.Controller, + attrValidator *commandAttrValidator, + sizeLimitChecker *workflowSizeChecker, + logger log.Logger, + namespaceRegistry namespace.Registry, + metricsHandler metrics.Handler, + config *configs.Config, + shard shard.Context, + searchAttributesMapperProvider searchattribute.MapperProvider, + hasBufferedEvents bool, +) *workflowTaskHandlerImpl { + + return &workflowTaskHandlerImpl{ + identity: identity, + workflowTaskCompletedID: workflowTaskCompletedID, + + // internal state + hasBufferedEvents: hasBufferedEvents, + workflowTaskFailedCause: nil, + activityNotStartedCancelled: false, + newMutableState: nil, + stopProcessing: false, + mutableState: mutableState, + effects: effects, + initiatedChildExecutionsInBatch: make(map[string]struct{}), + updateRegistry: updateRegistry, + + // validation + attrValidator: attrValidator, + sizeLimitChecker: sizeLimitChecker, + searchAttributesMapperProvider: searchAttributesMapperProvider, + + logger: logger, + namespaceRegistry: namespaceRegistry, + metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)), + config: config, + shard: shard, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + } +} + +func (handler *workflowTaskHandlerImpl) handleCommands( + ctx context.Context, + commands []*commandpb.Command, + msgs *collection.IndexedTakeList[string, *protocolpb.Message], +) ([]workflowTaskResponseMutation, error) { + if err := handler.attrValidator.validateCommandSequence( + commands, + ); err != nil { + return nil, err + } + + var mutations []workflowTaskResponseMutation + var postActions []commandPostAction + for _, command := range commands { + response, err := handler.handleCommand(ctx, command, msgs) + if err != nil || handler.stopProcessing { + return nil, err + } + if response != nil { + if response.workflowTaskResponseMutation != nil { + mutations = append(mutations, response.workflowTaskResponseMutation) + } + if response.commandPostAction != nil { + postActions = append(postActions, response.commandPostAction) + } + } + } + + if handler.mutableState.IsWorkflowExecutionRunning() { + for _, msg := range msgs.TakeRemaining() { + err := handler.handleMessage(ctx, msg) + if err != nil || handler.stopProcessing { + return nil, err + } + } + } + + for _, postAction := range postActions { + mutation, err := postAction(ctx) + if err != nil || handler.stopProcessing { + return nil, err + } + if mutation != nil { + mutations = append(mutations, mutation) + } + } + + return mutations, nil +} + +//revive:disable:cyclomatic grandfathered +func (handler *workflowTaskHandlerImpl) handleCommand( + ctx context.Context, + command *commandpb.Command, + msgs *collection.IndexedTakeList[string, *protocolpb.Message], +) (*handleCommandResponse, error) { + switch command.GetCommandType() { + case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK: + return handler.handleCommandScheduleActivity(ctx, command.GetScheduleActivityTaskCommandAttributes()) + + case enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION: + return nil, handler.handleCommandCompleteWorkflow(ctx, command.GetCompleteWorkflowExecutionCommandAttributes(), msgs) + + case enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION: + return nil, handler.handleCommandFailWorkflow(ctx, command.GetFailWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION: + return nil, handler.handleCommandCancelWorkflow(ctx, command.GetCancelWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_START_TIMER: + return nil, handler.handleCommandStartTimer(ctx, command.GetStartTimerCommandAttributes()) + + case enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK: + return nil, handler.handleCommandRequestCancelActivity(ctx, command.GetRequestCancelActivityTaskCommandAttributes()) + + case enumspb.COMMAND_TYPE_CANCEL_TIMER: + return nil, handler.handleCommandCancelTimer(ctx, command.GetCancelTimerCommandAttributes()) + + case enumspb.COMMAND_TYPE_RECORD_MARKER: + return nil, handler.handleCommandRecordMarker(ctx, command.GetRecordMarkerCommandAttributes()) + + case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION: + return nil, handler.handleCommandRequestCancelExternalWorkflow(ctx, command.GetRequestCancelExternalWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION: + return nil, handler.handleCommandSignalExternalWorkflow(ctx, command.GetSignalExternalWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION: + return nil, handler.handleCommandContinueAsNewWorkflow(ctx, command.GetContinueAsNewWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION: + return nil, handler.handleCommandStartChildWorkflow(ctx, command.GetStartChildWorkflowExecutionCommandAttributes()) + + case enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES: + return nil, handler.handleCommandUpsertWorkflowSearchAttributes(ctx, command.GetUpsertWorkflowSearchAttributesCommandAttributes()) + + case enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES: + return nil, handler.handleCommandModifyWorkflowProperties(ctx, command.GetModifyWorkflowPropertiesCommandAttributes()) + + case enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE: + return nil, handler.handleCommandProtocolMessage(ctx, command.GetProtocolMessageCommandAttributes(), msgs) + + default: + return nil, serviceerror.NewInvalidArgument(fmt.Sprintf("Unknown command type: %v", command.GetCommandType())) + } +} + +func (handler *workflowTaskHandlerImpl) handleMessage( + ctx context.Context, + message *protocolpb.Message, +) error { + protocolType, msgType, err := protocol.Identify(message) + if err != nil { + return serviceerror.NewInvalidArgument(err.Error()) + } + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + // TODO (alex-update): Should use MessageTypeTag here but then it needs to be another metric name too. + metrics.CommandTypeTag(msgType.String()), + message.Body.Size(), + fmt.Sprintf("Message type %v exceeds size limit.", msgType), + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, err) + } + + switch protocolType { + case update.ProtocolV1: + upd, ok := handler.updateRegistry.Find(ctx, message.ProtocolInstanceId) + if !ok { + return handler.failWorkflowTask( + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + serviceerror.NewNotFound(fmt.Sprintf("update %q not found", message.ProtocolInstanceId))) + } + if err := upd.OnMessage(ctx, message, workflow.WithEffects(handler.effects, handler.mutableState)); err != nil { + return handler.failWorkflowTaskOnInvalidArgument( + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, err) + } + default: + return handler.failWorkflowTask( + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + serviceerror.NewInvalidArgument(fmt.Sprintf("unsupported protocol type %q", protocolType))) + } + + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandProtocolMessage( + ctx context.Context, + attr *commandpb.ProtocolMessageCommandAttributes, + msgs *collection.IndexedTakeList[string, *protocolpb.Message], +) error { + handler.metricsHandler.Counter(metrics.CommandTypeProtocolMessage.GetMetricName()).Record(1) + + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateProtocolMessageAttributes( + namespaceID, + attr, + timestamp.DurationValue(executionInfo.WorkflowRunTimeout), + ) + }, + ); err != nil || handler.stopProcessing { + return err + } + + if msg, ok := msgs.Take(attr.MessageId); ok { + return handler.handleMessage(ctx, msg) + } + return handler.failWorkflowTask( + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + serviceerror.NewInvalidArgument(fmt.Sprintf("ProtocolMessageCommand referenced absent message ID %q", attr.MessageId)), + ) +} + +func (handler *workflowTaskHandlerImpl) handleCommandScheduleActivity( + _ context.Context, + attr *commandpb.ScheduleActivityTaskCommandAttributes, +) (*handleCommandResponse, error) { + + handler.metricsHandler.Counter(metrics.CommandTypeScheduleActivityCounter.GetMetricName()).Record(1) + + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateActivityScheduleAttributes( + namespaceID, + attr, + timestamp.DurationValue(executionInfo.WorkflowRunTimeout), + ) + }, + ); err != nil || handler.stopProcessing { + return nil, err + } + + // TODO: relax this restriction after matching can support this + if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { + err := serviceerror.NewInvalidArgument("Activity with UseCompatibleVersion cannot run on different task queue.") + return nil, handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK.String()), + attr.GetInput().Size(), + "ScheduleActivityTaskCommandAttributes.Input exceeds size limit.", + ); err != nil { + return nil, handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) + } + if err := handler.sizeLimitChecker.checkIfNumPendingActivitiesExceedsLimit(); err != nil { + return nil, handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_ACTIVITIES_LIMIT_EXCEEDED, err) + } + + enums.SetDefaultTaskQueueKind(&attr.GetTaskQueue().Kind) + + namespace := handler.mutableState.GetNamespaceEntry().Name().String() + + // Enable eager activity start if dynamic config enables it and either 1. workflow doesn't use versioning, + // or 2. workflow uses versioning and activity intends to use a compatible version (since a + // worker is obviously compatible with itself and we are okay dispatching an eager task knowing that there may be a + // newer "default" compatible version). + // Note that if `UseCompatibleVersion` is false, it implies that the activity should run on the "default" version + // for the task queue. + eagerStartActivity := attr.RequestEagerExecution && handler.config.EnableActivityEagerExecution(namespace) && + (!handler.mutableState.GetWorkerVersionStamp().GetUseVersioning() || attr.UseCompatibleVersion) + + _, _, err := handler.mutableState.AddActivityTaskScheduledEvent( + handler.workflowTaskCompletedID, + attr, + eagerStartActivity, + ) + if err != nil { + return nil, handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID, err) + } + + if !eagerStartActivity { + return &handleCommandResponse{}, nil + } + + return &handleCommandResponse{ + commandPostAction: func(ctx context.Context) (workflowTaskResponseMutation, error) { + return handler.handlePostCommandEagerExecuteActivity(ctx, attr) + }, + }, nil +} + +func (handler *workflowTaskHandlerImpl) handlePostCommandEagerExecuteActivity( + _ context.Context, + attr *commandpb.ScheduleActivityTaskCommandAttributes, +) (workflowTaskResponseMutation, error) { + if !handler.mutableState.IsWorkflowExecutionRunning() { + // workflow closed in the same workflow task + // this function is executed as a callback after all workflow commands + // are handled, so need to check for workflow completion case. + return nil, nil + } + + ai, ok := handler.mutableState.GetActivityByActivityID(attr.ActivityId) + if !ok { + // activity cancelled in the same worflow task + return nil, nil + } + + if _, err := handler.mutableState.AddActivityTaskStartedEvent( + ai, + ai.GetScheduledEventId(), + uuid.New(), + handler.identity, + ); err != nil { + return nil, err + } + + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + runID := handler.mutableState.GetExecutionState().RunId + + shardClock, err := handler.shard.NewVectorClock() + if err != nil { + return nil, err + } + + taskToken := tasktoken.NewActivityTaskToken( + namespaceID.String(), + executionInfo.WorkflowId, + runID, + ai.GetScheduledEventId(), + attr.ActivityId, + attr.ActivityType.GetName(), + ai.Attempt, + shardClock, + ai.Version, + ) + serializedToken, err := handler.tokenSerializer.Serialize(taskToken) + if err != nil { + return nil, err + } + + activityTask := &workflowservice.PollActivityTaskQueueResponse{ + ActivityId: attr.ActivityId, + ActivityType: attr.ActivityType, + Header: attr.Header, + Input: attr.Input, + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: executionInfo.WorkflowId, + RunId: runID, + }, + CurrentAttemptScheduledTime: ai.ScheduledTime, + ScheduledTime: ai.ScheduledTime, + ScheduleToCloseTimeout: attr.ScheduleToCloseTimeout, + StartedTime: ai.StartedTime, + StartToCloseTimeout: attr.StartToCloseTimeout, + HeartbeatTimeout: attr.HeartbeatTimeout, + TaskToken: serializedToken, + Attempt: ai.Attempt, + HeartbeatDetails: ai.LastHeartbeatDetails, + WorkflowType: handler.mutableState.GetWorkflowType(), + WorkflowNamespace: handler.mutableState.GetNamespaceEntry().Name().String(), + } + handler.metricsHandler.Counter( + metrics.ActivityEagerExecutionCounter.GetMetricName(), + ).Record( + 1, + metrics.NamespaceTag(string(handler.mutableState.GetNamespaceEntry().Name())), + metrics.TaskQueueTag(ai.TaskQueue), + ) + + return func(resp *historyservice.RespondWorkflowTaskCompletedResponse) error { + resp.ActivityTasks = append(resp.ActivityTasks, activityTask) + return nil + }, nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelActivity( + _ context.Context, + attr *commandpb.RequestCancelActivityTaskCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeCancelActivityCounter.GetMetricName()).Record(1) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateActivityCancelAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + scheduledEventID := attr.GetScheduledEventId() + actCancelReqEvent, ai, err := handler.mutableState.AddActivityTaskCancelRequestedEvent( + handler.workflowTaskCompletedID, + scheduledEventID, + handler.identity, + ) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES, err) + } + if ai != nil { + // If ai is nil, the activity has already been canceled/completed/timedout. The cancel request + // will be recorded in the history, but no further action will be taken. + + if ai.StartedEventId == common.EmptyEventID { + // We haven't started the activity yet, we can cancel the activity right away and + // schedule a workflow task to ensure the workflow makes progress. + _, err = handler.mutableState.AddActivityTaskCanceledEvent( + ai.ScheduledEventId, + ai.StartedEventId, + actCancelReqEvent.GetEventId(), + payloads.EncodeString(activityCancellationMsgActivityNotStarted), + handler.identity, + ) + if err != nil { + return err + } + handler.activityNotStartedCancelled = true + } + } + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandStartTimer( + _ context.Context, + attr *commandpb.StartTimerCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeStartTimerCounter.GetMetricName()).Record(1) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateTimerScheduleAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + _, _, err := handler.mutableState.AddTimerStartedEvent(handler.workflowTaskCompletedID, attr) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID, err) + } + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandCompleteWorkflow( + ctx context.Context, + attr *commandpb.CompleteWorkflowExecutionCommandAttributes, + msgs *collection.IndexedTakeList[string, *protocolpb.Message], +) error { + + for _, msg := range msgs.TakeRemaining() { + err := handler.handleMessage(ctx, msg) + if err != nil || handler.stopProcessing { + return err + } + } + + handler.metricsHandler.Counter(metrics.CommandTypeCompleteWorkflowCounter.GetMetricName()).Record(1) + + if handler.hasBufferedEvents { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) + } + + handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateCompleteWorkflowExecutionAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION.String()), + attr.GetResult().Size(), + "CompleteWorkflowExecutionCommandAttributes.Result exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES, err) + } + + // If the workflow task has more than one completion event than just pick the first one + if !handler.mutableState.IsWorkflowExecutionRunning() { + handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) + handler.logger.Warn( + "Multiple completion commands", + tag.WorkflowCommandType(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION), + tag.ErrorTypeMultipleCompletionCommands, + ) + return nil + } + + cronBackoff := handler.mutableState.GetCronBackoffDuration() + var newExecutionRunID string + if cronBackoff != backoff.NoBackoff { + newExecutionRunID = uuid.New() + } + + // Always add workflow completed event to this one + _, err := handler.mutableState.AddCompletedWorkflowEvent(handler.workflowTaskCompletedID, attr, newExecutionRunID) + if err != nil { + return err + } + + // Check if this workflow has a cron schedule + if cronBackoff != backoff.NoBackoff { + return handler.handleCron(ctx, cronBackoff, attr.GetResult(), nil, newExecutionRunID) + } + + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandFailWorkflow( + ctx context.Context, + attr *commandpb.FailWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeFailWorkflowCounter.GetMetricName()).Record(1) + + if handler.hasBufferedEvents { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) + } + + handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateFailWorkflowExecutionAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION.String()), + attr.GetFailure().Size(), + "FailWorkflowExecutionCommandAttributes.Failure exceeds size limit.", + ) + if err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES, err) + } + + // If the workflow task has more than one completion event than just pick the first one + if !handler.mutableState.IsWorkflowExecutionRunning() { + handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) + handler.logger.Warn( + "Multiple completion commands", + tag.WorkflowCommandType(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION), + tag.ErrorTypeMultipleCompletionCommands, + ) + return nil + } + + // First check retry policy to do a retry. + retryBackoff, retryState := handler.mutableState.GetRetryBackoffDuration(attr.GetFailure()) + cronBackoff := backoff.NoBackoff + if retryBackoff == backoff.NoBackoff { + // If no retry, check cron. + cronBackoff = handler.mutableState.GetCronBackoffDuration() + } + + var newExecutionRunID string + if retryBackoff != backoff.NoBackoff || cronBackoff != backoff.NoBackoff { + newExecutionRunID = uuid.New() + } + + // Always add workflow failed event + if _, err = handler.mutableState.AddFailWorkflowEvent( + handler.workflowTaskCompletedID, + retryState, + attr, + newExecutionRunID, + ); err != nil { + return err + } + + // Handle retry or cron + if retryBackoff != backoff.NoBackoff { + return handler.handleRetry(ctx, retryBackoff, retryState, attr.GetFailure(), newExecutionRunID) + } else if cronBackoff != backoff.NoBackoff { + return handler.handleCron(ctx, cronBackoff, nil, attr.GetFailure(), newExecutionRunID) + } + + // No retry or cron + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandCancelTimer( + _ context.Context, + attr *commandpb.CancelTimerCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeCancelTimerCounter.GetMetricName()).Record(1) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateTimerCancelAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + _, err := handler.mutableState.AddTimerCanceledEvent( + handler.workflowTaskCompletedID, + attr, + handler.identity) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES, err) + } + + // In case the timer was cancelled and its TimerFired event was deleted from buffered events, attempt + // to unset hasBufferedEvents to allow the workflow to complete. + handler.hasBufferedEvents = handler.hasBufferedEvents && handler.mutableState.HasBufferedEvents() + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandCancelWorkflow( + ctx context.Context, + attr *commandpb.CancelWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeCancelWorkflowCounter.GetMetricName()).Record(1) + + if handler.hasBufferedEvents { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) + } + + handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateCancelWorkflowExecutionAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + // If the workflow task has more than one completion event than just pick the first one + if !handler.mutableState.IsWorkflowExecutionRunning() { + handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) + handler.logger.Warn( + "Multiple completion commands", + tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION), + tag.ErrorTypeMultipleCompletionCommands, + ) + return nil + } + + _, err := handler.mutableState.AddWorkflowExecutionCanceledEvent(handler.workflowTaskCompletedID, attr) + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelExternalWorkflow( + _ context.Context, + attr *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeCancelExternalWorkflowCounter.GetMetricName()).Record(1) + + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + targetNamespaceID := namespaceID + if attr.GetNamespace() != "" { + targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) + if err != nil { + return err + } + targetNamespaceID = targetNamespaceEntry.ID() + } + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateCancelExternalWorkflowExecutionAttributes( + namespaceID, + targetNamespaceID, + handler.initiatedChildExecutionsInBatch, + attr, + ) + }, + ); err != nil || handler.stopProcessing { + return err + } + if err := handler.sizeLimitChecker.checkIfNumPendingCancelRequestsExceedsLimit(); err != nil { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_REQUEST_CANCEL_LIMIT_EXCEEDED, err) + } + + cancelRequestID := uuid.New() + _, _, err := handler.mutableState.AddRequestCancelExternalWorkflowExecutionInitiatedEvent( + handler.workflowTaskCompletedID, cancelRequestID, attr, targetNamespaceID, + ) + + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandRecordMarker( + _ context.Context, + attr *commandpb.RecordMarkerCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeRecordMarkerCounter.GetMetricName()).Record(1) + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateRecordMarkerAttributes(attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_RECORD_MARKER.String()), + common.GetPayloadsMapSize(attr.GetDetails()), + "RecordMarkerCommandAttributes.Details exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES, err) + } + + _, err := handler.mutableState.AddRecordMarkerEvent(handler.workflowTaskCompletedID, attr) + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandContinueAsNewWorkflow( + ctx context.Context, + attr *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeContinueAsNewCounter.GetMetricName()).Record(1) + + if handler.hasBufferedEvents { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil) + } + + handler.updateRegistry.TerminateUpdates(ctx, workflow.WithEffects(handler.effects, handler.mutableState)) + + namespaceName := handler.mutableState.GetNamespaceEntry().Name() + + unaliasedSas, err := searchattribute.UnaliasFields( + handler.searchAttributesMapperProvider, + attr.GetSearchAttributes(), + namespaceName.String(), + ) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) + } + if unaliasedSas != nil { + // Create a shallow copy of the `attr` to avoid modification of original `attr`, + // which can be needed again in case of retry. + newAttr := *attr + newAttr.SearchAttributes = unaliasedSas + attr = &newAttr + } + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateContinueAsNewWorkflowExecutionAttributes( + namespaceName, + attr, + handler.mutableState.GetExecutionInfo(), + ) + }, + ); err != nil || handler.stopProcessing { + return err + } + + // TODO: relax this restriction after matching can support this + if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { + err := serviceerror.NewInvalidArgument("ContinueAsNew with UseCompatibleVersion cannot run on different task queue.") + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), + attr.GetInput().Size(), + "ContinueAsNewWorkflowExecutionCommandAttributes. Input exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) + } + + if err := handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( + attr.GetMemo(), + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), + "ContinueAsNewWorkflowExecutionCommandAttributes. Memo exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) + } + + // search attribute validation must be done after unaliasing keys + if err := handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( + attr.GetSearchAttributes(), + namespaceName, + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()), + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES, err) + } + + // If the workflow task has more than one completion event than just pick the first one + if !handler.mutableState.IsWorkflowExecutionRunning() { + handler.metricsHandler.Counter(metrics.MultipleCompletionCommandsCounter.GetMetricName()).Record(1) + handler.logger.Warn( + "Multiple completion commands", + tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION), + tag.ErrorTypeMultipleCompletionCommands, + ) + return nil + } + + // Extract parentNamespace, so it can be passed down to next run of workflow execution + var parentNamespace namespace.Name + if handler.mutableState.HasParentExecution() { + parentNamespaceID := namespace.ID(handler.mutableState.GetExecutionInfo().ParentNamespaceId) + parentNamespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(parentNamespaceID) + if err == nil { + parentNamespace = parentNamespaceEntry.Name() + } + } + + _, newMutableState, err := handler.mutableState.AddContinueAsNewEvent( + ctx, + handler.workflowTaskCompletedID, + handler.workflowTaskCompletedID, + parentNamespace, + attr, + ) + if err != nil { + return err + } + + handler.newMutableState = newMutableState + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCommandStartChildWorkflow( + _ context.Context, + attr *commandpb.StartChildWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeChildWorkflowCounter.GetMetricName()).Record(1) + + parentNamespaceEntry := handler.mutableState.GetNamespaceEntry() + parentNamespaceID := parentNamespaceEntry.ID() + parentNamespace := parentNamespaceEntry.Name() + targetNamespaceID := parentNamespaceID + targetNamespace := parentNamespace + if attr.GetNamespace() != "" { + targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) + if err != nil { + return err + } + targetNamespace = targetNamespaceEntry.Name() + targetNamespaceID = targetNamespaceEntry.ID() + } else { + attr.Namespace = parentNamespace.String() + } + + unaliasedSas, err := searchattribute.UnaliasFields( + handler.searchAttributesMapperProvider, + attr.GetSearchAttributes(), + targetNamespace.String(), + ) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) + } + if unaliasedSas != nil { + // Create a shallow copy of the `attr` to avoid modification of original `attr`, + // which can be needed again in case of retry. + newAttr := *attr + newAttr.SearchAttributes = unaliasedSas + attr = &newAttr + } + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateStartChildExecutionAttributes( + parentNamespaceID, + targetNamespaceID, + targetNamespace, + attr, + handler.mutableState.GetExecutionInfo(), + handler.config.DefaultWorkflowTaskTimeout, + ) + }, + ); err != nil || handler.stopProcessing { + return err + } + + // TODO: relax this restriction after matching can support this + if attr.UseCompatibleVersion && attr.TaskQueue.GetName() != "" && attr.TaskQueue.Name != handler.mutableState.GetExecutionInfo().TaskQueue { + err := serviceerror.NewInvalidArgument("StartChildWorkflowExecution with UseCompatibleVersion cannot run on different task queue.") + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), + attr.GetInput().Size(), + "StartChildWorkflowExecutionCommandAttributes. Input exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) + } + + if err := handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( + attr.GetMemo(), + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), + "StartChildWorkflowExecutionCommandAttributes.Memo exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) + } + + // search attribute validation must be done after unaliasing keys + if err := handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( + attr.GetSearchAttributes(), + targetNamespace, + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()), + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES, err) + } + + // child workflow limit + if err := handler.sizeLimitChecker.checkIfNumChildWorkflowsExceedsLimit(); err != nil { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_CHILD_WORKFLOWS_LIMIT_EXCEEDED, err) + } + + enabled := handler.config.EnableParentClosePolicy(parentNamespace.String()) + if enabled { + enums.SetDefaultParentClosePolicy(&attr.ParentClosePolicy) + } else { + attr.ParentClosePolicy = enumspb.PARENT_CLOSE_POLICY_ABANDON + } + + enums.SetDefaultWorkflowIdReusePolicy(&attr.WorkflowIdReusePolicy) + + requestID := uuid.New() + _, _, err = handler.mutableState.AddStartChildWorkflowExecutionInitiatedEvent( + handler.workflowTaskCompletedID, requestID, attr, targetNamespaceID, + ) + if err == nil { + // Keep track of all child initiated commands in this workflow task to validate request cancel commands + handler.initiatedChildExecutionsInBatch[attr.GetWorkflowId()] = struct{}{} + } + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandSignalExternalWorkflow( + _ context.Context, + attr *commandpb.SignalExternalWorkflowExecutionCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeSignalExternalWorkflowCounter.GetMetricName()).Record(1) + + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + targetNamespaceID := namespaceID + if attr.GetNamespace() != "" { + targetNamespaceEntry, err := handler.namespaceRegistry.GetNamespace(namespace.Name(attr.GetNamespace())) + if err != nil { + return err + } + targetNamespaceID = targetNamespaceEntry.ID() + } + + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateSignalExternalWorkflowExecutionAttributes( + namespaceID, + targetNamespaceID, + attr, + ) + }, + ); err != nil || handler.stopProcessing { + return err + } + if err := handler.sizeLimitChecker.checkIfNumPendingSignalsExceedsLimit(); err != nil { + return handler.failWorkflowTask(enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_SIGNALS_LIMIT_EXCEEDED, err) + } + + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION.String()), + attr.GetInput().Size(), + "SignalExternalWorkflowExecutionCommandAttributes.Input exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES, err) + } + + signalRequestID := uuid.New() // for deduplicate + _, _, err := handler.mutableState.AddSignalExternalWorkflowExecutionInitiatedEvent( + handler.workflowTaskCompletedID, signalRequestID, attr, targetNamespaceID, + ) + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandUpsertWorkflowSearchAttributes( + _ context.Context, + attr *commandpb.UpsertWorkflowSearchAttributesCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeUpsertWorkflowSearchAttributesCounter.GetMetricName()).Record(1) + + // get namespace name + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + namespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID)) + } + namespace := namespaceEntry.Name() + + unaliasedSas, err := searchattribute.UnaliasFields( + handler.searchAttributesMapperProvider, + attr.GetSearchAttributes(), + namespace.String(), + ) + if err != nil { + return handler.failWorkflowTaskOnInvalidArgument(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) + } + if unaliasedSas != nil { + // Create a shallow copy of the `attr` to avoid modification of original `attr`, + // which can be needed again in case of retry. + newAttr := *attr + newAttr.SearchAttributes = unaliasedSas + attr = &newAttr + } + + // valid search attributes for upsert + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateUpsertWorkflowSearchAttributes(namespace, attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + // blob size limit check + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()), + payloadsMapSize(attr.GetSearchAttributes().GetIndexedFields()), + "UpsertWorkflowSearchAttributesCommandAttributes exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) + } + + // new search attributes size limit check + // search attribute validation must be done after unaliasing keys + err = handler.sizeLimitChecker.checkIfSearchAttributesSizeExceedsLimit( + &commonpb.SearchAttributes{ + IndexedFields: payload.MergeMapOfPayload( + executionInfo.SearchAttributes, + attr.GetSearchAttributes().GetIndexedFields(), + ), + }, + namespace, + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()), + ) + if err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES, err) + } + + _, err = handler.mutableState.AddUpsertWorkflowSearchAttributesEvent( + handler.workflowTaskCompletedID, attr, + ) + return err +} + +func (handler *workflowTaskHandlerImpl) handleCommandModifyWorkflowProperties( + _ context.Context, + attr *commandpb.ModifyWorkflowPropertiesCommandAttributes, +) error { + + handler.metricsHandler.Counter(metrics.CommandTypeModifyWorkflowPropertiesCounter.GetMetricName()).Record(1) + + // get namespace name + executionInfo := handler.mutableState.GetExecutionInfo() + namespaceID := namespace.ID(executionInfo.NamespaceId) + namespaceEntry, err := handler.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return serviceerror.NewUnavailable(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID)) + } + namespace := namespaceEntry.Name() + + // valid properties + if err := handler.validateCommandAttr( + func() (enumspb.WorkflowTaskFailedCause, error) { + return handler.attrValidator.validateModifyWorkflowProperties(namespace, attr) + }, + ); err != nil || handler.stopProcessing { + return err + } + + // blob size limit check + if err := handler.sizeLimitChecker.checkIfPayloadSizeExceedsLimit( + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES.String()), + payloadsMapSize(attr.GetUpsertedMemo().GetFields()), + "ModifyWorkflowPropertiesCommandAttributes exceeds size limit.", + ); err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, err) + } + + // new memo size limit check + err = handler.sizeLimitChecker.checkIfMemoSizeExceedsLimit( + &commonpb.Memo{ + Fields: payload.MergeMapOfPayload(executionInfo.Memo, attr.GetUpsertedMemo().GetFields()), + }, + metrics.CommandTypeTag(enumspb.COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES.String()), + "ModifyWorkflowPropertiesCommandAttributes. Memo exceeds size limit.", + ) + if err != nil { + return handler.failWorkflow(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES, err) + } + + _, err = handler.mutableState.AddWorkflowPropertiesModifiedEvent( + handler.workflowTaskCompletedID, attr, + ) + return err +} + +func payloadsMapSize(fields map[string]*commonpb.Payload) int { + result := 0 + + for k, v := range fields { + result += len(k) + result += len(v.GetData()) + } + return result +} + +func (handler *workflowTaskHandlerImpl) handleRetry( + ctx context.Context, + backoffInterval time.Duration, + retryState enumspb.RetryState, + failure *failurepb.Failure, + newRunID string, +) error { + startEvent, err := handler.mutableState.GetStartEvent(ctx) + if err != nil { + return err + } + startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() + + newMutableState := workflow.NewMutableState( + handler.shard, + handler.shard.GetEventsCache(), + handler.shard.GetLogger(), + handler.mutableState.GetNamespaceEntry(), + handler.shard.GetTimeSource().Now(), + ) + + err = workflow.SetupNewWorkflowForRetryOrCron( + ctx, + handler.mutableState, + newMutableState, + newRunID, + startAttr, + nil, + failure, + backoffInterval, + enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY, + ) + if err != nil { + return err + } + + err = newMutableState.SetHistoryTree( + ctx, + newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, + newMutableState.GetExecutionInfo().WorkflowRunTimeout, + newRunID, + ) + if err != nil { + return err + } + + handler.newMutableState = newMutableState + return nil +} + +func (handler *workflowTaskHandlerImpl) handleCron( + ctx context.Context, + backoffInterval time.Duration, + lastCompletionResult *commonpb.Payloads, + failure *failurepb.Failure, + newRunID string, +) error { + startEvent, err := handler.mutableState.GetStartEvent(ctx) + if err != nil { + return err + } + startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes() + + if failure != nil { + lastCompletionResult = startAttr.LastCompletionResult + } + + newMutableState := workflow.NewMutableState( + handler.shard, + handler.shard.GetEventsCache(), + handler.shard.GetLogger(), + handler.mutableState.GetNamespaceEntry(), + handler.shard.GetTimeSource().Now(), + ) + + err = workflow.SetupNewWorkflowForRetryOrCron( + ctx, + handler.mutableState, + newMutableState, + newRunID, + startAttr, + lastCompletionResult, + failure, + backoffInterval, + enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE, + ) + if err != nil { + return err + } + + err = newMutableState.SetHistoryTree( + ctx, + newMutableState.GetExecutionInfo().WorkflowExecutionTimeout, + newMutableState.GetExecutionInfo().WorkflowRunTimeout, + newRunID, + ) + if err != nil { + return err + } + + handler.newMutableState = newMutableState + return nil +} + +func (handler *workflowTaskHandlerImpl) validateCommandAttr( + validationFn commandAttrValidationFn, +) error { + + return handler.failWorkflowTaskOnInvalidArgument(validationFn()) +} + +func (handler *workflowTaskHandlerImpl) failWorkflowTaskOnInvalidArgument( + wtFailedCause enumspb.WorkflowTaskFailedCause, + err error, +) error { + + switch err.(type) { + case *serviceerror.InvalidArgument: + return handler.failWorkflowTask(wtFailedCause, err) + default: + return err + } +} + +func (handler *workflowTaskHandlerImpl) failWorkflowTask( + failedCause enumspb.WorkflowTaskFailedCause, + causeErr error, +) error { + + handler.workflowTaskFailedCause = newWorkflowTaskFailedCause( + failedCause, + causeErr, + nil) + handler.stopProcessing = true + // NOTE: failWorkflowTask always return nil. + // It is important to clear returned error if WT needs to be failed to properly add WTFailed event. + // Handler will rely on stopProcessing flag and workflowTaskFailedCause field. + return nil +} + +func (handler *workflowTaskHandlerImpl) failWorkflow( + failedCause enumspb.WorkflowTaskFailedCause, + causeErr error, +) error { + + handler.workflowTaskFailedCause = newWorkflowTaskFailedCause( + failedCause, + causeErr, + failure.NewServerFailure(causeErr.Error(), true)) + handler.stopProcessing = true + // NOTE: failWorkflow always return nil. + // It is important to clear returned error if WT needs to be failed to properly add WTFailed and FailWorkflow events. + // Handler will rely on stopProcessing flag and workflowTaskFailedCause field. + return nil +} + +func newWorkflowTaskFailedCause(failedCause enumspb.WorkflowTaskFailedCause, causeErr error, workflowFailure *failurepb.Failure) *workflowTaskFailedCause { + + return &workflowTaskFailedCause{ + failedCause: failedCause, + causeErr: causeErr, + workflowFailure: workflowFailure, + } +} + +func (c *workflowTaskFailedCause) Message() string { + + if c.causeErr == nil { + return c.failedCause.String() + } + + return fmt.Sprintf("%v: %v", c.failedCause, c.causeErr.Error()) +} diff -Nru temporal-1.21.5-1/src/service/history/workflow_task_handler_callbacks.go temporal-1.22.5/src/service/history/workflow_task_handler_callbacks.go --- temporal-1.21.5-1/src/service/history/workflow_task_handler_callbacks.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_task_handler_callbacks.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1043 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + + commandpb "go.temporal.io/api/command/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + protocolpb "go.temporal.io/api/protocol/v1" + querypb "go.temporal.io/api/query/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/failure" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/internal/effect" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/consts" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/workflow" + "go.temporal.io/server/service/history/workflow/update" +) + +type ( + // workflow task business logic handler + workflowTaskHandlerCallbacks interface { + handleWorkflowTaskScheduled(context.Context, *historyservice.ScheduleWorkflowTaskRequest) error + handleWorkflowTaskStarted(context.Context, + *historyservice.RecordWorkflowTaskStartedRequest) (*historyservice.RecordWorkflowTaskStartedResponse, error) + handleWorkflowTaskFailed(context.Context, + *historyservice.RespondWorkflowTaskFailedRequest) error + handleWorkflowTaskCompleted(context.Context, + *historyservice.RespondWorkflowTaskCompletedRequest) (*historyservice.RespondWorkflowTaskCompletedResponse, error) + verifyFirstWorkflowTaskScheduled(context.Context, *historyservice.VerifyFirstWorkflowTaskScheduledRequest) error + // TODO also include the handle of workflow task timeout here + } + + workflowTaskHandlerCallbacksImpl struct { + currentClusterName string + config *configs.Config + shard shard.Context + workflowConsistencyChecker api.WorkflowConsistencyChecker + timeSource clock.TimeSource + namespaceRegistry namespace.Registry + tokenSerializer common.TaskTokenSerializer + metricsHandler metrics.Handler + logger log.Logger + throttledLogger log.Logger + commandAttrValidator *commandAttrValidator + searchAttributesMapperProvider searchattribute.MapperProvider + searchAttributesValidator *searchattribute.Validator + } +) + +func newWorkflowTaskHandlerCallback(historyEngine *historyEngineImpl) *workflowTaskHandlerCallbacksImpl { + return &workflowTaskHandlerCallbacksImpl{ + currentClusterName: historyEngine.currentClusterName, + config: historyEngine.config, + shard: historyEngine.shard, + workflowConsistencyChecker: historyEngine.workflowConsistencyChecker, + timeSource: historyEngine.shard.GetTimeSource(), + namespaceRegistry: historyEngine.shard.GetNamespaceRegistry(), + tokenSerializer: historyEngine.tokenSerializer, + metricsHandler: historyEngine.metricsHandler, + logger: historyEngine.logger, + throttledLogger: historyEngine.throttledLogger, + commandAttrValidator: newCommandAttrValidator( + historyEngine.shard.GetNamespaceRegistry(), + historyEngine.config, + historyEngine.searchAttributesValidator, + ), + searchAttributesMapperProvider: historyEngine.shard.GetSearchAttributesMapperProvider(), + searchAttributesValidator: historyEngine.searchAttributesValidator, + } +} + +func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskScheduled( + ctx context.Context, + req *historyservice.ScheduleWorkflowTaskRequest, +) error { + + _, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) + if err != nil { + return err + } + + return api.GetAndUpdateWorkflowWithNew( + ctx, + req.ChildClock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + req.NamespaceId, + req.WorkflowExecution.WorkflowId, + req.WorkflowExecution.RunId, + ), + func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() { + return nil, consts.ErrWorkflowCompleted + } + + if req.IsFirstWorkflowTask && mutableState.HadOrHasWorkflowTask() { + return &api.UpdateWorkflowAction{ + Noop: true, + }, nil + } + + startEvent, err := mutableState.GetStartEvent(ctx) + if err != nil { + return nil, err + } + if _, err := mutableState.AddFirstWorkflowTaskScheduled(req.ParentClock, startEvent, false); err != nil { + return nil, err + } + + return &api.UpdateWorkflowAction{}, nil + }, + nil, + handler.shard, + handler.workflowConsistencyChecker, + ) +} + +func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskStarted( + ctx context.Context, + req *historyservice.RecordWorkflowTaskStartedRequest, +) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + namespaceEntry, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) + if err != nil { + return nil, err + } + + scheduledEventID := req.GetScheduledEventId() + requestID := req.GetRequestId() + + var resp *historyservice.RecordWorkflowTaskStartedResponse + err = api.GetAndUpdateWorkflowWithNew( + ctx, + req.Clock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + req.NamespaceId, + req.WorkflowExecution.WorkflowId, + req.WorkflowExecution.RunId, + ), + func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() { + return nil, consts.ErrWorkflowCompleted + } + + workflowTask := mutableState.GetWorkflowTaskByID(scheduledEventID) + metricsScope := handler.metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryRecordWorkflowTaskStartedScope)) + + // First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in + // some extreme cassandra failure cases. + if workflowTask == nil && scheduledEventID >= mutableState.GetNextEventID() { + metricsScope.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record(1) + // Reload workflow execution history + // ErrStaleState will trigger updateWorkflow function to reload the mutable state + return nil, consts.ErrStaleState + } + + // Check execution state to make sure task is in the list of outstanding tasks and it is not yet started. If + // task is not outstanding than it is most probably a duplicate and complete the task. + if workflowTask == nil { + // Looks like WorkflowTask already completed as a result of another call. + // It is OK to drop the task at this point. + return nil, serviceerror.NewNotFound("Workflow task not found.") + } + + updateAction := &api.UpdateWorkflowAction{} + + if workflowTask.StartedEventID != common.EmptyEventID { + // If workflow task is started as part of the current request scope then return a positive response + if workflowTask.RequestID == requestID { + resp, err = handler.createRecordWorkflowTaskStartedResponse(mutableState, workflowContext.GetUpdateRegistry(ctx), workflowTask, req.PollRequest.GetIdentity()) + if err != nil { + return nil, err + } + updateAction.Noop = true + return updateAction, nil + } + + // Looks like WorkflowTask already started as a result of another call. + // It is OK to drop the task at this point. + return nil, serviceerrors.NewTaskAlreadyStarted("Workflow") + } + + // Assuming a workflow is running on a sticky task queue by a workerA. + // After workerA is dead for more than 10s, matching will return StickyWorkerUnavailable error when history + // tries to push a new workflow task. When history sees that error, it will fall back to push the task to + // its original normal task queue without clear its stickiness to avoid an extra persistence write. + // We will clear the stickiness here when that task is delivered to another worker polling from normal queue. + // The stickiness info is used by frontend to decide if it should send down partial history or full history. + // Sending down partial history will cost the worker an extra fetch to server for the full history. + currentTaskQueue := mutableState.CurrentTaskQueue() + if currentTaskQueue.Kind == enumspb.TASK_QUEUE_KIND_STICKY && + currentTaskQueue.GetName() != req.PollRequest.TaskQueue.GetName() { + // req.PollRequest.TaskQueue.GetName() may include partition, but we only check when sticky is enabled, + // and sticky queue never has partition, so it does not matter. + mutableState.ClearStickyTaskQueue() + } + + _, workflowTask, err = mutableState.AddWorkflowTaskStartedEvent( + scheduledEventID, + requestID, + req.PollRequest.TaskQueue, + req.PollRequest.Identity, + ) + if err != nil { + // Unable to add WorkflowTaskStarted event to history + return nil, err + } + + if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { + updateAction.Noop = true + } + + workflowScheduleToStartLatency := workflowTask.StartedTime.Sub(*workflowTask.ScheduledTime) + namespaceName := namespaceEntry.Name() + taskQueue := workflowTask.TaskQueue + metrics.GetPerTaskQueueScope( + metricsScope, + namespaceName.String(), + taskQueue.GetName(), + taskQueue.GetKind(), + ).Timer(metrics.TaskScheduleToStartLatency.GetMetricName()).Record( + workflowScheduleToStartLatency, + metrics.TaskQueueTypeTag(enumspb.TASK_QUEUE_TYPE_WORKFLOW), + ) + + resp, err = handler.createRecordWorkflowTaskStartedResponse(mutableState, workflowContext.GetUpdateRegistry(ctx), workflowTask, req.PollRequest.GetIdentity()) + if err != nil { + return nil, err + } + return updateAction, nil + }, + nil, + handler.shard, + handler.workflowConsistencyChecker, + ) + + if err != nil { + return nil, err + } + return resp, nil +} + +func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskFailed( + ctx context.Context, + req *historyservice.RespondWorkflowTaskFailedRequest, +) (retError error) { + + _, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) + if err != nil { + return err + } + + request := req.FailedRequest + token, err := handler.tokenSerializer.Deserialize(request.TaskToken) + if err != nil { + return consts.ErrDeserializingToken + } + + return api.GetAndUpdateWorkflowWithNew( + ctx, + token.Clock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + token.NamespaceId, + token.WorkflowId, + token.RunId, + ), + func(workflowContext api.WorkflowContext) (*api.UpdateWorkflowAction, error) { + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() { + return nil, consts.ErrWorkflowCompleted + } + + scheduledEventID := token.GetScheduledEventId() + workflowTask := mutableState.GetWorkflowTaskByID(scheduledEventID) + + if workflowTask == nil || + workflowTask.StartedEventID == common.EmptyEventID || + (token.StartedEventId != common.EmptyEventID && token.StartedEventId != workflowTask.StartedEventID) || + (token.StartedTime != nil && workflowTask.StartedTime != nil && !token.StartedTime.Equal(*workflowTask.StartedTime)) || + workflowTask.Attempt != token.Attempt || + (workflowTask.Version != common.EmptyVersion && token.Version != workflowTask.Version) { + // we have not alter mutable state yet, so release with it with nil to avoid clear MS. + workflowContext.GetReleaseFn()(nil) + return nil, serviceerror.NewNotFound("Workflow task not found.") + } + + if _, err := mutableState.AddWorkflowTaskFailedEvent( + workflowTask, + request.GetCause(), + request.GetFailure(), + request.GetIdentity(), + request.GetBinaryChecksum(), + "", + "", + 0); err != nil { + return nil, err + } + + // TODO (alex-update): if it was speculative WT that failed, and there is nothing but pending updates, + // new WT also should be create as speculative (or not?). Currently, it will be recreated as normal WT. + return &api.UpdateWorkflowAction{ + Noop: false, + CreateWorkflowTask: true, + }, nil + }, + nil, + handler.shard, + handler.workflowConsistencyChecker, + ) +} + +func (handler *workflowTaskHandlerCallbacksImpl) handleWorkflowTaskCompleted( + ctx context.Context, + req *historyservice.RespondWorkflowTaskCompletedRequest, +) (_ *historyservice.RespondWorkflowTaskCompletedResponse, retError error) { + namespaceEntry, err := api.GetActiveNamespace(handler.shard, namespace.ID(req.GetNamespaceId())) + if err != nil { + return nil, err + } + + request := req.CompleteRequest + token, err0 := handler.tokenSerializer.Deserialize(request.TaskToken) + if err0 != nil { + return nil, consts.ErrDeserializingToken + } + + workflowContext, err := handler.workflowConsistencyChecker.GetWorkflowContext( + ctx, + token.Clock, + func(mutableState workflow.MutableState) bool { + workflowTask := mutableState.GetWorkflowTaskByID(token.GetScheduledEventId()) + if workflowTask == nil && token.GetScheduledEventId() >= mutableState.GetNextEventID() { + handler.metricsHandler.Counter(metrics.StaleMutableStateCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + return false + } + return true + }, + definition.NewWorkflowKey( + namespaceEntry.ID().String(), + token.WorkflowId, + token.RunId, + ), + workflow.LockPriorityHigh, + ) + if err != nil { + return nil, err + } + weContext := workflowContext.GetContext() + ms := workflowContext.GetMutableState() + + currentWorkflowTask := ms.GetWorkflowTaskByID(token.GetScheduledEventId()) + if !ms.IsWorkflowExecutionRunning() || + currentWorkflowTask == nil || + currentWorkflowTask.StartedEventID == common.EmptyEventID || + (token.StartedEventId != common.EmptyEventID && token.StartedEventId != currentWorkflowTask.StartedEventID) || + (token.StartedTime != nil && currentWorkflowTask.StartedTime != nil && !token.StartedTime.Equal(*currentWorkflowTask.StartedTime)) || + currentWorkflowTask.Attempt != token.Attempt || + (token.Version != common.EmptyVersion && token.Version != currentWorkflowTask.Version) { + // we have not alter mutable state yet, so release with it with nil to avoid clear MS. + workflowContext.GetReleaseFn()(nil) + return nil, serviceerror.NewNotFound("Workflow task not found.") + } + + defer func() { workflowContext.GetReleaseFn()(retError) }() + + var effects effect.Buffer + defer func() { + // code in this file and workflowTaskHandler is inconsistent in the way + // errors are returned - some functions which appear to return error + // actually return nil in all cases and instead set a member variable + // that should be observed by other collaborating code (e.g. + // workflowtaskHandler.workflowTaskFailedCause). That made me paranoid + // about the way this function exits so while we have this defer here + // there is _also_ code to call effects.Cancel at key points. + if retError != nil { + effects.Cancel(ctx) + } + effects.Apply(ctx) + }() + + // It's an error if the workflow has used versioning in the past but this task has no versioning info. + if ms.GetWorkerVersionStamp().GetUseVersioning() && !request.GetWorkerVersionStamp().GetUseVersioning() { + return nil, serviceerror.NewInvalidArgument("Workflow using versioning must continue to use versioning.") + } + + nsName := namespaceEntry.Name().String() + limits := workflow.WorkflowTaskCompletionLimits{ + MaxResetPoints: handler.config.MaxAutoResetPoints(nsName), + MaxSearchAttributeValueSize: handler.config.SearchAttributesSizeOfValueLimit(nsName), + } + // TODO: this metric is inaccurate, it should only be emitted if a new binary checksum (or build ID) is added in this completion. + if ms.GetExecutionInfo().AutoResetPoints != nil && limits.MaxResetPoints == len(ms.GetExecutionInfo().AutoResetPoints.Points) { + handler.metricsHandler.Counter(metrics.AutoResetPointsLimitExceededCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + } + + workflowTaskHeartbeating := request.GetForceCreateNewWorkflowTask() && len(request.Commands) == 0 && len(request.Messages) == 0 + var workflowTaskHeartbeatTimeout bool + var completedEvent *historypb.HistoryEvent + var responseMutations []workflowTaskResponseMutation + + if workflowTaskHeartbeating { + namespace := namespaceEntry.Name() + timeout := handler.config.WorkflowTaskHeartbeatTimeout(namespace.String()) + origSchedTime := timestamp.TimeValue(currentWorkflowTask.OriginalScheduledTime) + if origSchedTime.UnixNano() > 0 && handler.timeSource.Now().After(origSchedTime.Add(timeout)) { + workflowTaskHeartbeatTimeout = true + + scope := handler.metricsHandler.WithTags( + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), + metrics.NamespaceTag(namespace.String()), + ) + scope.Counter(metrics.WorkflowTaskHeartbeatTimeoutCounter.GetMetricName()).Record(1) + completedEvent, err = ms.AddWorkflowTaskTimedOutEvent(currentWorkflowTask) + if err != nil { + return nil, err + } + ms.ClearStickyTaskQueue() + } else { + completedEvent, err = ms.AddWorkflowTaskCompletedEvent(currentWorkflowTask, request, limits) + if err != nil { + return nil, err + } + } + } else { + completedEvent, err = ms.AddWorkflowTaskCompletedEvent(currentWorkflowTask, request, limits) + if err != nil { + return nil, err + } + } + // NOTE: completedEvent might be nil if WT was speculative and request has only `update.Rejection` messages. + // See workflowTaskStateMachine.skipWorkflowTaskCompletedEvent for more details. + + if request.StickyAttributes == nil || request.StickyAttributes.WorkerTaskQueue == nil { + handler.metricsHandler.Counter(metrics.CompleteWorkflowTaskWithStickyDisabledCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + ms.ClearStickyTaskQueue() + } else { + handler.metricsHandler.Counter(metrics.CompleteWorkflowTaskWithStickyEnabledCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + ms.SetStickyTaskQueue(request.StickyAttributes.WorkerTaskQueue.GetName(), request.StickyAttributes.GetScheduleToStartTimeout()) + } + + var ( + wtFailedCause *workflowTaskFailedCause + activityNotStartedCancelled bool + newMutableState workflow.MutableState + ) + // hasBufferedEvents indicates if there are any buffered events which should generate a new workflow task + hasBufferedEvents := ms.HasBufferedEvents() + if err := namespaceEntry.VerifyBinaryChecksum(request.GetBinaryChecksum()); err != nil { + wtFailedCause = newWorkflowTaskFailedCause( + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_BINARY, + serviceerror.NewInvalidArgument( + fmt.Sprintf( + "binary %v is marked as bad deployment", + request.GetBinaryChecksum())), + nil) + } else { + namespace := namespaceEntry.Name() + workflowSizeChecker := newWorkflowSizeChecker( + workflowSizeLimits{ + blobSizeLimitWarn: handler.config.BlobSizeLimitWarn(namespace.String()), + blobSizeLimitError: handler.config.BlobSizeLimitError(namespace.String()), + memoSizeLimitWarn: handler.config.MemoSizeLimitWarn(namespace.String()), + memoSizeLimitError: handler.config.MemoSizeLimitError(namespace.String()), + numPendingChildExecutionsLimit: handler.config.NumPendingChildExecutionsLimit(namespace.String()), + numPendingActivitiesLimit: handler.config.NumPendingActivitiesLimit(namespace.String()), + numPendingSignalsLimit: handler.config.NumPendingSignalsLimit(namespace.String()), + numPendingCancelsRequestLimit: handler.config.NumPendingCancelsRequestLimit(namespace.String()), + }, + ms, + handler.searchAttributesValidator, + handler.metricsHandler.WithTags( + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), + metrics.NamespaceTag(namespace.String()), + ), + handler.throttledLogger, + ) + + workflowTaskHandler := newWorkflowTaskHandler( + request.GetIdentity(), + completedEvent.GetEventId(), // If completedEvent is nil, then GetEventId() returns 0 and this value shouldn't be used in workflowTaskHandler. + ms, + weContext.UpdateRegistry(ctx), + &effects, + handler.commandAttrValidator, + workflowSizeChecker, + handler.logger, + handler.namespaceRegistry, + handler.metricsHandler, + handler.config, + handler.shard, + handler.searchAttributesMapperProvider, + hasBufferedEvents, + ) + + if responseMutations, err = workflowTaskHandler.handleCommands( + ctx, + request.Commands, + collection.NewIndexedTakeList( + request.Messages, + func(msg *protocolpb.Message) string { return msg.Id }, + ), + ); err != nil { + return nil, err + } + + // set the vars used by following logic + // further refactor should also clean up the vars used below + wtFailedCause = workflowTaskHandler.workflowTaskFailedCause + + // failMessage is not used by workflowTaskHandlerCallbacks + activityNotStartedCancelled = workflowTaskHandler.activityNotStartedCancelled + // continueAsNewTimerTasks is not used by workflowTaskHandlerCallbacks + + newMutableState = workflowTaskHandler.newMutableState + + hasBufferedEvents = workflowTaskHandler.hasBufferedEvents + } + + wtFailedShouldCreateNewTask := false + if wtFailedCause != nil { + effects.Cancel(ctx) + handler.metricsHandler.Counter(metrics.FailedWorkflowTasksCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + handler.logger.Info("Failing the workflow task.", + tag.Value(wtFailedCause.Message()), + tag.WorkflowID(token.GetWorkflowId()), + tag.WorkflowRunID(token.GetRunId()), + tag.WorkflowNamespaceID(namespaceEntry.ID().String())) + if currentWorkflowTask.Attempt > 1 && wtFailedCause.failedCause != enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND { + // drop this workflow task if it keeps failing. This will cause the workflow task to timeout and get retried after timeout. + return nil, serviceerror.NewInvalidArgument(wtFailedCause.Message()) + } + var wtFailedEventID int64 + ms, wtFailedEventID, err = failWorkflowTask(ctx, weContext, currentWorkflowTask, wtFailedCause, request) + if err != nil { + return nil, err + } + wtFailedShouldCreateNewTask = true + newMutableState = nil + + if wtFailedCause.workflowFailure != nil { + // Flush buffer event before failing the workflow + ms.FlushBufferedEvents() + + attributes := &commandpb.FailWorkflowExecutionCommandAttributes{ + Failure: wtFailedCause.workflowFailure, + } + if _, err := ms.AddFailWorkflowEvent(wtFailedEventID, enumspb.RETRY_STATE_NON_RETRYABLE_FAILURE, attributes, ""); err != nil { + return nil, err + } + wtFailedShouldCreateNewTask = false + } + } + + bufferedEventShouldCreateNewTask := hasBufferedEvents && ms.HasAnyBufferedEvent(eventShouldGenerateNewTaskFilter) + if hasBufferedEvents && !bufferedEventShouldCreateNewTask { + // Make sure tasks that should not create a new event don't get stuck in ms forever + ms.FlushBufferedEvents() + } + newWorkflowTaskType := enumsspb.WORKFLOW_TASK_TYPE_UNSPECIFIED + if ms.IsWorkflowExecutionRunning() { + if request.GetForceCreateNewWorkflowTask() || // Heartbeat WT is always of Normal type. + wtFailedShouldCreateNewTask || + bufferedEventShouldCreateNewTask || + activityNotStartedCancelled { + newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL + } else if weContext.UpdateRegistry(ctx).HasOutgoing() { + if completedEvent == nil || ms.GetNextEventID() == completedEvent.GetEventId()+1 { + newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE + } else { + newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL + } + } + } + + bypassTaskGeneration := request.GetReturnNewWorkflowTask() && wtFailedCause == nil + // TODO (alex-update): Need to support case when ReturnNewWorkflowTask=false and WT.Type=Speculative. + // In this case WT needs to be added directly to matching. + // Current implementation will create normal WT. + if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE && !bypassTaskGeneration { + // If task generation can't be bypassed workflow task must be of Normal type because Speculative workflow task always skip task generation. + newWorkflowTaskType = enumsspb.WORKFLOW_TASK_TYPE_NORMAL + } + + var newWorkflowTask *workflow.WorkflowTaskInfo + // Speculative workflow task will be created after mutable state is persisted. + if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_NORMAL { + var newWTErr error + if workflowTaskHeartbeating && !workflowTaskHeartbeatTimeout { + newWorkflowTask, newWTErr = ms.AddWorkflowTaskScheduledEventAsHeartbeat( + bypassTaskGeneration, + currentWorkflowTask.OriginalScheduledTime, + enumsspb.WORKFLOW_TASK_TYPE_NORMAL, // Heartbeat workflow task is always of Normal type. + ) + } else { + newWorkflowTask, newWTErr = ms.AddWorkflowTaskScheduledEvent(bypassTaskGeneration, newWorkflowTaskType) + } + if newWTErr != nil { + return nil, newWTErr + } + + // skip transfer task for workflow task if request asking to return new workflow task + if bypassTaskGeneration { + // start the new workflow task if request asked to do so + // TODO: replace the poll request + _, newWorkflowTask, err = ms.AddWorkflowTaskStartedEvent( + newWorkflowTask.ScheduledEventID, + "request-from-RespondWorkflowTaskCompleted", + newWorkflowTask.TaskQueue, + request.Identity, + ) + if err != nil { + return nil, err + } + } + } + + var updateErr error + if newMutableState != nil { + newWorkflowExecutionInfo := newMutableState.GetExecutionInfo() + newWorkflowExecutionState := newMutableState.GetExecutionState() + updateErr = weContext.UpdateWorkflowExecutionWithNewAsActive( + ctx, + workflow.NewContext( + handler.shard, + definition.NewWorkflowKey( + newWorkflowExecutionInfo.NamespaceId, + newWorkflowExecutionInfo.WorkflowId, + newWorkflowExecutionState.RunId, + ), + handler.logger, + ), + newMutableState, + ) + } else { + // If completedEvent is not nil (which it means that WT wasn't speculative) + // OR new WT is normal, then mutable state is persisted. + // Otherwise, (both old and new WT are speculative) mutable state is updated in memory only but not persisted. + if completedEvent != nil || newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_NORMAL { + updateErr = weContext.UpdateWorkflowExecutionAsActive(ctx) + } + } + + if updateErr != nil { + effects.Cancel(ctx) + if persistence.IsConflictErr(updateErr) { + handler.metricsHandler.Counter(metrics.ConcurrencyUpdateFailureCounter.GetMetricName()).Record( + 1, + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope)) + } + + // if updateErr resulted in TransactionSizeLimitError then fail workflow + switch updateErr.(type) { + case *persistence.TransactionSizeLimitError: + // must reload mutable state because the first call to updateWorkflowExecutionWithContext or continueAsNewWorkflowExecution + // clears mutable state if error is returned + ms, err = weContext.LoadMutableState(ctx) + if err != nil { + return nil, err + } + + if err := workflow.TerminateWorkflow( + ms, + common.FailureReasonTransactionSizeExceedsLimit, + payloads.EncodeString(updateErr.Error()), + consts.IdentityHistoryService, + false, + ); err != nil { + return nil, err + } + if err := weContext.UpdateWorkflowExecutionAsActive( + ctx, + ); err != nil { + return nil, err + } + } + + return nil, updateErr + } + + // Create speculative workflow task after mutable state is persisted. + if newWorkflowTaskType == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE { + newWorkflowTask, err = ms.AddWorkflowTaskScheduledEvent(bypassTaskGeneration, newWorkflowTaskType) + if err != nil { + return nil, err + } + _, newWorkflowTask, err = ms.AddWorkflowTaskStartedEvent( + newWorkflowTask.ScheduledEventID, + "request-from-RespondWorkflowTaskCompleted", + newWorkflowTask.TaskQueue, + request.Identity, + ) + if err != nil { + return nil, err + } + } + + handler.handleBufferedQueries(ms, req.GetCompleteRequest().GetQueryResults(), newWorkflowTask != nil, namespaceEntry, workflowTaskHeartbeating) + + if workflowTaskHeartbeatTimeout { + // at this point, update is successful, but we still return an error to client so that the worker will give up this workflow + // release workflow lock with nil error to prevent mutable state from being cleared and reloaded + workflowContext.GetReleaseFn()(nil) + return nil, serviceerror.NewNotFound("workflow task heartbeat timeout") + } + + if wtFailedCause != nil { + // release workflow lock with nil error to prevent mutable state from being cleared and reloaded + workflowContext.GetReleaseFn()(nil) + return nil, serviceerror.NewInvalidArgument(wtFailedCause.Message()) + } + + resp := &historyservice.RespondWorkflowTaskCompletedResponse{} + if request.GetReturnNewWorkflowTask() && newWorkflowTask != nil { + resp.StartedResponse, err = handler.createRecordWorkflowTaskStartedResponse(ms, weContext.UpdateRegistry(ctx), newWorkflowTask, request.GetIdentity()) + if err != nil { + return nil, err + } + // sticky is always enabled when worker request for new workflow task from RespondWorkflowTaskCompleted + resp.StartedResponse.StickyExecutionEnabled = true + } + + // If completedEvent is nil then it means that WT was speculative and + // WT events (scheduled/started/completed) were not written to the history and were dropped. + // SDK needs to know where to roll back its history event pointer, i.e. after what event all other events needs to be dropped. + // SDK uses WorkflowTaskStartedEventID to do that. + if completedEvent == nil { + resp.ResetHistoryEventId = ms.GetExecutionInfo().LastWorkflowTaskStartedEventId + } + + for _, mutation := range responseMutations { + if err := mutation(resp); err != nil { + return nil, err + } + } + + return resp, nil +} + +func (handler *workflowTaskHandlerCallbacksImpl) verifyFirstWorkflowTaskScheduled( + ctx context.Context, + req *historyservice.VerifyFirstWorkflowTaskScheduledRequest, +) (retError error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + if err := api.ValidateNamespaceUUID(namespaceID); err != nil { + return err + } + + workflowContext, err := handler.workflowConsistencyChecker.GetWorkflowContext( + ctx, + req.Clock, + api.BypassMutableStateConsistencyPredicate, + definition.NewWorkflowKey( + req.NamespaceId, + req.WorkflowExecution.WorkflowId, + req.WorkflowExecution.RunId, + ), + workflow.LockPriorityLow, + ) + if err != nil { + return err + } + defer func() { workflowContext.GetReleaseFn()(retError) }() + + mutableState := workflowContext.GetMutableState() + if !mutableState.IsWorkflowExecutionRunning() && + mutableState.GetExecutionState().State != enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE { + return nil + } + + if !mutableState.HadOrHasWorkflowTask() { + return consts.ErrWorkflowNotReady + } + + return nil +} + +func (handler *workflowTaskHandlerCallbacksImpl) createRecordWorkflowTaskStartedResponse( + ms workflow.MutableState, + updateRegistry update.Registry, + workflowTask *workflow.WorkflowTaskInfo, + identity string, +) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + + response := &historyservice.RecordWorkflowTaskStartedResponse{} + response.WorkflowType = ms.GetWorkflowType() + executionInfo := ms.GetExecutionInfo() + if executionInfo.LastWorkflowTaskStartedEventId != common.EmptyEventID { + response.PreviousStartedEventId = executionInfo.LastWorkflowTaskStartedEventId + } + + // Starting workflowTask could result in different scheduledEventID if workflowTask was transient and new events came in + // before it was started. + response.ScheduledEventId = workflowTask.ScheduledEventID + response.StartedEventId = workflowTask.StartedEventID + response.StickyExecutionEnabled = ms.IsStickyTaskQueueSet() + response.NextEventId = ms.GetNextEventID() + response.Attempt = workflowTask.Attempt + response.WorkflowExecutionTaskQueue = &taskqueuepb.TaskQueue{ + Name: executionInfo.TaskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + response.ScheduledTime = workflowTask.ScheduledTime + response.StartedTime = workflowTask.StartedTime + response.Version = workflowTask.Version + + // TODO (alex-update): Transient needs to be renamed to "TransientOrSpeculative" + response.TransientWorkflowTask = ms.GetTransientWorkflowTaskInfo(workflowTask, identity) + + currentBranchToken, err := ms.GetCurrentBranchToken() + if err != nil { + return nil, err + } + response.BranchToken = currentBranchToken + + qr := ms.GetQueryRegistry() + bufferedQueryIDs := qr.GetBufferedIDs() + if len(bufferedQueryIDs) > 0 { + response.Queries = make(map[string]*querypb.WorkflowQuery, len(bufferedQueryIDs)) + for _, bufferedQueryID := range bufferedQueryIDs { + input, err := qr.GetQueryInput(bufferedQueryID) + if err != nil { + continue + } + response.Queries[bufferedQueryID] = input + } + } + + response.Messages = updateRegistry.ReadOutgoingMessages(workflowTask.StartedEventID) + + if workflowTask.Type == enumsspb.WORKFLOW_TASK_TYPE_SPECULATIVE && len(response.GetMessages()) == 0 { + return nil, serviceerror.NewNotFound("No messages for speculative workflow task.") + } + + return response, nil +} + +func (handler *workflowTaskHandlerCallbacksImpl) handleBufferedQueries(ms workflow.MutableState, queryResults map[string]*querypb.WorkflowQueryResult, createNewWorkflowTask bool, namespaceEntry *namespace.Namespace, workflowTaskHeartbeating bool) { + queryRegistry := ms.GetQueryRegistry() + if !queryRegistry.HasBufferedQuery() { + return + } + + namespaceName := namespaceEntry.Name() + workflowID := ms.GetExecutionInfo().WorkflowId + runID := ms.GetExecutionState().GetRunId() + + scope := handler.metricsHandler.WithTags( + metrics.OperationTag(metrics.HistoryRespondWorkflowTaskCompletedScope), + metrics.NamespaceTag(namespaceEntry.Name().String()), + metrics.CommandTypeTag("ConsistentQuery")) + + // if its a heartbeat workflow task it means local activities may still be running on the worker + // which were started by an external event which happened before the query + if workflowTaskHeartbeating { + return + } + + sizeLimitError := handler.config.BlobSizeLimitError(namespaceName.String()) + sizeLimitWarn := handler.config.BlobSizeLimitWarn(namespaceName.String()) + + // Complete or fail all queries we have results for + for id, result := range queryResults { + if err := common.CheckEventBlobSizeLimit( + result.GetAnswer().Size(), + sizeLimitWarn, + sizeLimitError, + namespaceName.String(), + workflowID, + runID, + scope, + handler.throttledLogger, + tag.BlobSizeViolationOperation("ConsistentQuery"), + ); err != nil { + handler.logger.Info("failing query because query result size is too large", + tag.WorkflowNamespace(namespaceName.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.QueryID(id), + tag.Error(err)) + failedCompletionState := &workflow.QueryCompletionState{ + Type: workflow.QueryCompletionTypeFailed, + Err: err, + } + if err := queryRegistry.SetCompletionState(id, failedCompletionState); err != nil { + handler.logger.Error( + "failed to set query completion state to failed", + tag.WorkflowNamespace(namespaceName.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.QueryID(id), + tag.Error(err)) + scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) + } + } else { + succeededCompletionState := &workflow.QueryCompletionState{ + Type: workflow.QueryCompletionTypeSucceeded, + Result: result, + } + if err := queryRegistry.SetCompletionState(id, succeededCompletionState); err != nil { + handler.logger.Error( + "failed to set query completion state to succeeded", + tag.WorkflowNamespace(namespaceName.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.QueryID(id), + tag.Error(err)) + scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) + } + } + } + + // If no workflow task was created then it means no buffered events came in during this workflow task's handling. + // This means all unanswered buffered queries can be dispatched directly through matching at this point. + if !createNewWorkflowTask { + buffered := queryRegistry.GetBufferedIDs() + for _, id := range buffered { + unblockCompletionState := &workflow.QueryCompletionState{ + Type: workflow.QueryCompletionTypeUnblocked, + } + if err := queryRegistry.SetCompletionState(id, unblockCompletionState); err != nil { + handler.logger.Error( + "failed to set query completion state to unblocked", + tag.WorkflowNamespace(namespaceName.String()), + tag.WorkflowID(workflowID), + tag.WorkflowRunID(runID), + tag.QueryID(id), + tag.Error(err)) + scope.Counter(metrics.QueryRegistryInvalidStateCount.GetMetricName()).Record(1) + } + } + } +} + +func failWorkflowTask( + ctx context.Context, + wfContext workflow.Context, + workflowTask *workflow.WorkflowTaskInfo, + wtFailedCause *workflowTaskFailedCause, + request *workflowservice.RespondWorkflowTaskCompletedRequest, +) (workflow.MutableState, int64, error) { + + // clear any updates we have accumulated so far + wfContext.Clear() + + // Reload workflow execution so we can apply the workflow task failure event + mutableState, err := wfContext.LoadMutableState(ctx) + if err != nil { + return nil, common.EmptyEventID, err + } + wtFailedEvent, err := mutableState.AddWorkflowTaskFailedEvent( + workflowTask, + wtFailedCause.failedCause, + failure.NewServerFailure(wtFailedCause.Message(), true), + request.GetIdentity(), + request.GetBinaryChecksum(), + "", + "", + 0) + if err != nil { + return nil, common.EmptyEventID, err + } + + var wtFailedEventID int64 + if wtFailedEvent != nil { + // If WTFailed event was added to the history then use its Id as wtFailedEventID. + wtFailedEventID = wtFailedEvent.GetEventId() + } else { + // Otherwise, if it was transient WT, last event should be WTFailed event from the 1st attempt. + wtFailedEventID = mutableState.GetNextEventID() - 1 + } + + // Return reloaded mutable state back to the caller for further updates. + return mutableState, wtFailedEventID, nil +} + +// Filter function to be passed to mutable_state.HasAnyBufferedEvent +// Returns true if the event should generate a new workflow task +// Currently only signal events with SkipGenerateWorkflowTask=true flag set do not generate tasks +func eventShouldGenerateNewTaskFilter(event *historypb.HistoryEvent) bool { + if event.GetEventType() != enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED { + return true + } + return !event.GetWorkflowExecutionSignaledEventAttributes().GetSkipGenerateWorkflowTask() +} diff -Nru temporal-1.21.5-1/src/service/history/workflow_task_handler_callbacks_test.go temporal-1.22.5/src/service/history/workflow_task_handler_callbacks_test.go --- temporal-1.21.5-1/src/service/history/workflow_task_handler_callbacks_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_task_handler_callbacks_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,347 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + querypb "go.temporal.io/api/query/v1" + "go.temporal.io/api/serviceerror" + "golang.org/x/exp/maps" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/service/history/api" + "go.temporal.io/server/service/history/events" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/tests" + "go.temporal.io/server/service/history/workflow" + wcache "go.temporal.io/server/service/history/workflow/cache" +) + +type ( + WorkflowTaskHandlerCallbackSuite struct { + *require.Assertions + suite.Suite + + controller *gomock.Controller + mockEventsCache *events.MockCache + mockExecutionMgr *persistence.MockExecutionManager + + logger log.Logger + + workflowTaskHandlerCallback *workflowTaskHandlerCallbacksImpl + } +) + +func TestWorkflowTaskHandlerCallbackSuite(t *testing.T) { + suite.Run(t, new(WorkflowTaskHandlerCallbackSuite)) +} + +func (s *WorkflowTaskHandlerCallbackSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + config := tests.NewDynamicConfig() + mockShard := shard.NewTestContext( + s.controller, + &persistencespb.ShardInfo{ + ShardId: 1, + RangeId: 1, + }, + config, + ) + mockShard.Resource.ShardMgr.EXPECT().AssertShardOwnership(gomock.Any(), gomock.Any()).AnyTimes() + + mockNamespaceCache := mockShard.Resource.NamespaceCache + mockNamespaceCache.EXPECT().GetNamespaceByID(tests.NamespaceID).Return(tests.LocalNamespaceEntry, nil).AnyTimes() + s.mockExecutionMgr = mockShard.Resource.ExecutionMgr + mockClusterMetadata := mockShard.Resource.ClusterMetadata + mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() + mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(false, common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() + mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(true, tests.Version).Return(cluster.TestCurrentClusterName).AnyTimes() + + mockVisibilityManager := mockShard.Resource.VisibilityManager + mockVisibilityManager.EXPECT().GetIndexName().Return("").AnyTimes() + mockVisibilityManager.EXPECT(). + ValidateCustomSearchAttributes(gomock.Any()). + DoAndReturn( + func(searchAttributes map[string]any) (map[string]any, error) { + return searchAttributes, nil + }, + ). + AnyTimes() + + s.mockEventsCache = mockShard.MockEventsCache + s.mockEventsCache.EXPECT().PutEvent(gomock.Any(), gomock.Any()).AnyTimes() + s.logger = mockShard.GetLogger() + + workflowCache := wcache.NewCache(mockShard) + h := &historyEngineImpl{ + currentClusterName: mockShard.GetClusterMetadata().GetCurrentClusterName(), + shard: mockShard, + clusterMetadata: mockClusterMetadata, + executionManager: s.mockExecutionMgr, + logger: s.logger, + throttledLogger: s.logger, + metricsHandler: metrics.NoopMetricsHandler, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + config: config, + timeSource: mockShard.GetTimeSource(), + eventNotifier: events.NewNotifier(clock.NewRealTimeSource(), metrics.NoopMetricsHandler, func(namespace.ID, string) int32 { return 1 }), + searchAttributesValidator: searchattribute.NewValidator( + searchattribute.NewTestProvider(), + mockShard.Resource.SearchAttributesMapperProvider, + config.SearchAttributesNumberOfKeysLimit, + config.SearchAttributesSizeOfValueLimit, + config.SearchAttributesTotalSizeLimit, + mockShard.Resource.VisibilityManager, + false, + ), + workflowConsistencyChecker: api.NewWorkflowConsistencyChecker(mockShard, workflowCache), + } + + s.workflowTaskHandlerCallback = newWorkflowTaskHandlerCallback(h) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowNotFound() { + request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + } + + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &serviceerror.NotFound{}) + + err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) + s.IsType(&serviceerror.NotFound{}, err) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowCompleted() { + request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + } + + ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + + _, err := ms.AddTimeoutWorkflowEvent( + ms.GetNextEventID(), + enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, + uuid.New(), + ) + s.NoError(err) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + err = s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) + s.NoError(err) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowZombie() { + request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + } + + ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + + // zombie state should be treated as open + s.NoError(ms.UpdateWorkflowStateStatus( + enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE, + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + )) + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) + s.IsType(&serviceerror.WorkflowNotReady{}, err) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowRunning_TaskPending() { + request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + } + + ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + addWorkflowTaskScheduledEvent(ms) + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) + s.NoError(err) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestVerifyFirstWorkflowTaskScheduled_WorkflowRunning_TaskProcessed() { + request := &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ + NamespaceId: tests.NamespaceID.String(), + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, + } + + ms := workflow.TestGlobalMutableState(s.workflowTaskHandlerCallback.shard, s.mockEventsCache, s.logger, tests.Version, tests.RunID) + addWorkflowExecutionStartedEvent(ms, commonpb.WorkflowExecution{ + WorkflowId: tests.WorkflowID, + RunId: tests.RunID, + }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, "identity") + wt := addWorkflowTaskScheduledEvent(ms) + workflowTasksStartEvent := addWorkflowTaskStartedEvent(ms, wt.ScheduledEventID, "testTaskQueue", uuid.New()) + wt.StartedEventID = workflowTasksStartEvent.GetEventId() + addWorkflowTaskCompletedEvent(&s.Suite, ms, wt.ScheduledEventID, wt.StartedEventID, "some random identity") + + wfMs := workflow.TestCloneToProto(ms) + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: wfMs} + s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) + + err := s.workflowTaskHandlerCallback.verifyFirstWorkflowTaskScheduled(context.Background(), request) + s.NoError(err) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_HeartbeatWorkflowTask() { + queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() + s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) + queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) + s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, true) + s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_NewWorkflowTask() { + queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() + s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) + queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) + s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, true, tests.GlobalNamespaceEntry, false) + s.assertQueryCounts(queryRegistry, 5, 5, 0, 0) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_NoNewWorkflowTask() { + queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() + s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) + queryResults := s.constructQueryResults(queryRegistry.GetBufferedIDs()[0:5], 10) + s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, false) + s.assertQueryCounts(queryRegistry, 0, 5, 5, 0) +} + +func (s *WorkflowTaskHandlerCallbackSuite) TestHandleBufferedQueries_QueryTooLarge() { + queryRegistry, mockMutableState := s.setupBufferedQueriesMocks() + s.assertQueryCounts(queryRegistry, 10, 0, 0, 0) + bufferedIDs := queryRegistry.GetBufferedIDs() + queryResults := s.constructQueryResults(bufferedIDs[0:5], 10) + largeQueryResults := s.constructQueryResults(bufferedIDs[5:10], 10*1024*1024) + maps.Copy(queryResults, largeQueryResults) + s.workflowTaskHandlerCallback.handleBufferedQueries(mockMutableState, queryResults, false, tests.GlobalNamespaceEntry, false) + s.assertQueryCounts(queryRegistry, 0, 5, 0, 5) +} + +func (s *WorkflowTaskHandlerCallbackSuite) setupBufferedQueriesMocks() (workflow.QueryRegistry, *workflow.MockMutableState) { + queryRegistry := s.constructQueryRegistry(10) + mockMutableState := workflow.NewMockMutableState(s.controller) + mockMutableState.EXPECT().GetQueryRegistry().Return(queryRegistry) + mockMutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{ + WorkflowId: tests.WorkflowID, + }).AnyTimes() + mockMutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{ + RunId: tests.RunID, + }).AnyTimes() + return queryRegistry, mockMutableState +} + +func (s *WorkflowTaskHandlerCallbackSuite) constructQueryResults(ids []string, resultSize int) map[string]*querypb.WorkflowQueryResult { + results := make(map[string]*querypb.WorkflowQueryResult) + for _, id := range ids { + results[id] = &querypb.WorkflowQueryResult{ + ResultType: enumspb.QUERY_RESULT_TYPE_ANSWERED, + Answer: payloads.EncodeBytes(make([]byte, resultSize)), + } + } + return results +} + +func (s *WorkflowTaskHandlerCallbackSuite) constructQueryRegistry(numQueries int) workflow.QueryRegistry { + queryRegistry := workflow.NewQueryRegistry() + for i := 0; i < numQueries; i++ { + queryRegistry.BufferQuery(&querypb.WorkflowQuery{}) + } + return queryRegistry +} + +func (s *WorkflowTaskHandlerCallbackSuite) assertQueryCounts(queryRegistry workflow.QueryRegistry, buffered, completed, unblocked, failed int) { + s.Len(queryRegistry.GetBufferedIDs(), buffered) + s.Len(queryRegistry.GetCompletedIDs(), completed) + s.Len(queryRegistry.GetUnblockedIDs(), unblocked) + s.Len(queryRegistry.GetFailedIDs(), failed) +} diff -Nru temporal-1.21.5-1/src/service/history/workflow_task_handler_test.go temporal-1.22.5/src/service/history/workflow_task_handler_test.go --- temporal-1.21.5-1/src/service/history/workflow_task_handler_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/history/workflow_task_handler_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,315 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + commandpb "go.temporal.io/api/command/v1" + enumspb "go.temporal.io/api/enums/v1" + protocolpb "go.temporal.io/api/protocol/v1" + "go.temporal.io/api/serviceerror" + updatepb "go.temporal.io/api/update/v1" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/internal/effect" + "go.temporal.io/server/service/history/configs" + "go.temporal.io/server/service/history/shard" + "go.temporal.io/server/service/history/workflow" + "go.temporal.io/server/service/history/workflow/update" +) + +func TestCommandProtocolMessage(t *testing.T) { + t.Parallel() + + type testconf struct { + ms *workflow.MockMutableState + updates update.Registry + handler *workflowTaskHandlerImpl + conf map[dynamicconfig.Key]any + } + + const defaultBlobSizeLimit = 1 * 1024 * 1024 + + msgCommand := func(msgID string) *commandpb.Command { + return &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_PROTOCOL_MESSAGE, + Attributes: &commandpb.Command_ProtocolMessageCommandAttributes{ + ProtocolMessageCommandAttributes: &commandpb.ProtocolMessageCommandAttributes{ + MessageId: msgID, + }, + }, + } + } + + setup := func(t *testing.T, out *testconf, blobSizeLimit int) { + shardCtx := shard.NewMockContext(gomock.NewController(t)) + logger := log.NewNoopLogger() + metricsHandler := metrics.NoopMetricsHandler + out.conf = map[dynamicconfig.Key]any{} + out.ms = workflow.NewMockMutableState(gomock.NewController(t)) + out.ms.EXPECT().VisitUpdates(gomock.Any()).Times(1) + out.updates = update.NewRegistry(func() update.UpdateStore { return out.ms }) + var effects effect.Buffer + config := configs.NewConfig( + dynamicconfig.NewCollection( + dynamicconfig.StaticClient(out.conf), logger), 1, false, false) + mockMeta := persistence.NewMockMetadataManager(gomock.NewController(t)) + nsReg := namespace.NewRegistry( + mockMeta, + true, + func() time.Duration { return 1 * time.Hour }, + dynamicconfig.GetBoolPropertyFn(false), + metricsHandler, + logger, + ) + out.handler = newWorkflowTaskHandler( // 😲 + t.Name(), // identity + 123, // workflowTaskCompletedID + out.ms, + out.updates, + &effects, + newCommandAttrValidator( + nsReg, + config, + nil, // searchAttributesValidator + ), + newWorkflowSizeChecker( + workflowSizeLimits{blobSizeLimitError: blobSizeLimit}, + out.ms, + nil, // searchAttributesValidator + metricsHandler, + logger, + ), + logger, + nsReg, + metricsHandler, + config, + shardCtx, + nil, // searchattribute.MapperProvider + false, + ) + } + + t.Run("missing message ID", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + command = msgCommand("") // blank is invalid + ) + + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + + _, err := tc.handler.handleCommand(context.Background(), command, newMsgList()) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + }) + + t.Run("message not found", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + command = msgCommand("valid_but_not_found_msg_id") + ) + + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + + _, err := tc.handler.handleCommand(context.Background(), command, newMsgList()) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + }) + + t.Run("message too large", func(t *testing.T) { + var tc testconf + t.Log("setting max blob size to zero") + setup(t, &tc, 0) + var ( + msgID = t.Name() + "-message-id" + command = msgCommand(msgID) // blank is invalid + msg = &protocolpb.Message{ + Id: msgID, + ProtocolInstanceId: "does_not_matter", + Body: mustMarshalAny(t, &types.Empty{}), + } + ) + + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) + + _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + require.ErrorContains(t, tc.handler.workflowTaskFailedCause.causeErr, "exceeds size limit") + }) + + t.Run("message for unsupported protocol", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + msgID = t.Name() + "-message-id" + command = msgCommand(msgID) // blank is invalid + msg = &protocolpb.Message{ + Id: msgID, + ProtocolInstanceId: "does_not_matter", + Body: mustMarshalAny(t, &types.Empty{}), + } + ) + + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) + + _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + var invalidArg *serviceerror.InvalidArgument + require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, &invalidArg) + require.ErrorContains(t, tc.handler.workflowTaskFailedCause.causeErr, "protocol type") + }) + + t.Run("update not found", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + msgID = t.Name() + "-message-id" + command = msgCommand(msgID) // blank is invalid + msg = &protocolpb.Message{ + Id: msgID, + ProtocolInstanceId: "will not be found", + Body: mustMarshalAny(t, &updatepb.Acceptance{}), + } + ) + + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) + tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), "will not be found").Return(nil, serviceerror.NewNotFound("")) + + _, err := tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + var notfound *serviceerror.NotFound + require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, ¬found) + }) + + t.Run("deliver message failure", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + updateID = t.Name() + "-update-id" + msgID = t.Name() + "-message-id" + command = msgCommand(msgID) // blank is invalid + msg = &protocolpb.Message{ + Id: msgID, + ProtocolInstanceId: updateID, + Body: mustMarshalAny(t, &updatepb.Acceptance{}), + } + ) + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) + tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), updateID).Return(nil, serviceerror.NewNotFound("")) + + t.Log("create the expected protocol instance") + _, _, err := tc.updates.FindOrCreate(context.Background(), updateID) + require.NoError(t, err) + + t.Log("delivering an acceptance message to an update in the admitted state should cause a protocol error") + _, err = tc.handler.handleCommand(context.Background(), command, newMsgList(msg)) + require.NoError(t, err) + require.NotNil(t, tc.handler.workflowTaskFailedCause) + require.Equal(t, + enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE, + tc.handler.workflowTaskFailedCause.failedCause) + var gotErr *serviceerror.InvalidArgument + require.ErrorAs(t, tc.handler.workflowTaskFailedCause.causeErr, &gotErr) + }) + + t.Run("deliver message success", func(t *testing.T) { + var tc testconf + setup(t, &tc, defaultBlobSizeLimit) + var ( + updateID = t.Name() + "-update-id" + msgID = t.Name() + "-message-id" + command = msgCommand(msgID) // blank is invalid + msg = &protocolpb.Message{ + Id: msgID, + ProtocolInstanceId: updateID, + Body: mustMarshalAny(t, &updatepb.Request{ + Meta: &updatepb.Meta{UpdateId: updateID}, + Input: &updatepb.Input{Name: "not_empty"}, + }), + } + msgs = newMsgList(msg) + ) + tc.ms.EXPECT().GetExecutionInfo().AnyTimes().Return(&persistencespb.WorkflowExecutionInfo{}) + tc.ms.EXPECT().GetExecutionState().AnyTimes().Return(&persistencespb.WorkflowExecutionState{}) + tc.ms.EXPECT().GetUpdateOutcome(gomock.Any(), updateID).Return(nil, serviceerror.NewNotFound("")) + + t.Log("create the expected protocol instance") + _, _, err := tc.updates.FindOrCreate(context.Background(), updateID) + require.NoError(t, err) + + _, err = tc.handler.handleCommand(context.Background(), command, msgs) + require.NoError(t, err, + "delivering a request message to an update in the admitted state should succeed") + require.Nil(t, tc.handler.workflowTaskFailedCause) + }) +} + +func newMsgList(msgs ...*protocolpb.Message) *collection.IndexedTakeList[string, *protocolpb.Message] { + return collection.NewIndexedTakeList(msgs, func(msg *protocolpb.Message) string { return msg.Id }) +} + +func mustMarshalAny(t *testing.T, pb proto.Message) *types.Any { + t.Helper() + a, err := types.MarshalAny(pb) + require.NoError(t, err) + return a +} diff -Nru temporal-1.21.5-1/src/service/matching/ackManager.go temporal-1.22.5/src/service/matching/ackManager.go --- temporal-1.21.5-1/src/service/matching/ackManager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/ackManager.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "sync" - - "go.uber.org/atomic" - "golang.org/x/exp/maps" - - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/util" -) - -// Used to convert out of order acks into ackLevel movement. -type ackManager struct { - sync.RWMutex - outstandingTasks map[int64]bool // key->TaskID, value->(true for acked, false->for non acked) - readLevel int64 // Maximum TaskID inserted into outstandingTasks - ackLevel int64 // Maximum TaskID below which all tasks are acked - backlogCounter atomic.Int64 - logger log.Logger -} - -func newAckManager(logger log.Logger) ackManager { - return ackManager{logger: logger, outstandingTasks: make(map[int64]bool), readLevel: -1, ackLevel: -1} -} - -// Registers task as in-flight and moves read level to it. Tasks can be added in increasing order of taskID only. -func (m *ackManager) addTask(taskID int64) { - m.Lock() - defer m.Unlock() - if m.readLevel >= taskID { - m.logger.Fatal("Next task ID is less than current read level.", - tag.TaskID(taskID), - tag.ReadLevel(m.readLevel)) - } - m.readLevel = taskID - if _, ok := m.outstandingTasks[taskID]; ok { - m.logger.Fatal("Already present in outstanding tasks", tag.TaskID(taskID)) - } - m.outstandingTasks[taskID] = false // true is for acked - m.backlogCounter.Inc() -} - -func (m *ackManager) getReadLevel() int64 { - m.RLock() - defer m.RUnlock() - return m.readLevel -} - -func (m *ackManager) setReadLevel(readLevel int64) { - m.Lock() - defer m.Unlock() - m.readLevel = readLevel -} - -func (m *ackManager) setReadLevelAfterGap(newReadLevel int64) { - m.Lock() - defer m.Unlock() - if m.ackLevel == m.readLevel { - // This is called after we read a range and find no tasks. The range we read was m.readLevel to newReadLevel. - // (We know this because nothing should change m.readLevel except the getTasksPump loop itself, after initialization. - // And getTasksPump doesn't start until it gets a signal from taskWriter that it's initialized the levels.) - // If we've acked all tasks up to m.readLevel, and there are no tasks between that and newReadLevel, then we've - // acked all tasks up to newReadLevel too. This lets us advance the ack level on a task queue with no activity - // but where the rangeid has moved higher, to prevent excessive reads on the next load. - m.ackLevel = newReadLevel - } - m.readLevel = newReadLevel -} - -func (m *ackManager) getAckLevel() int64 { - m.RLock() - defer m.RUnlock() - return m.ackLevel -} - -// Moves ack level to the new level if it is higher than the current one. -// Also updates the read level if it is lower than the ackLevel. -func (m *ackManager) setAckLevel(ackLevel int64) { - m.Lock() - defer m.Unlock() - if ackLevel > m.ackLevel { - m.ackLevel = ackLevel - } - if ackLevel > m.readLevel { - m.readLevel = ackLevel - } -} - -func (m *ackManager) completeTask(taskID int64) (ackLevel int64) { - m.Lock() - defer m.Unlock() - if completed, ok := m.outstandingTasks[taskID]; ok && !completed { - m.outstandingTasks[taskID] = true - m.backlogCounter.Dec() - } - - // TODO the ack level management shuld be done by a dedicated coroutine - // this is only a temporarily solution - - taskIDs := maps.Keys(m.outstandingTasks) - util.SortSlice(taskIDs) - - // Update ackLevel - for _, taskID := range taskIDs { - if acked := m.outstandingTasks[taskID]; acked { - m.ackLevel = taskID - delete(m.outstandingTasks, taskID) - } else { - return m.ackLevel - } - } - return m.ackLevel -} - -func (m *ackManager) getBacklogCountHint() int64 { - return m.backlogCounter.Load() -} diff -Nru temporal-1.21.5-1/src/service/matching/ack_manager.go temporal-1.22.5/src/service/matching/ack_manager.go --- temporal-1.21.5-1/src/service/matching/ack_manager.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/ack_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,143 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "sync" + + "go.uber.org/atomic" + "golang.org/x/exp/maps" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/util" +) + +// Used to convert out of order acks into ackLevel movement. +type ackManager struct { + sync.RWMutex + outstandingTasks map[int64]bool // key->TaskID, value->(true for acked, false->for non acked) + readLevel int64 // Maximum TaskID inserted into outstandingTasks + ackLevel int64 // Maximum TaskID below which all tasks are acked + backlogCounter atomic.Int64 + logger log.Logger +} + +func newAckManager(logger log.Logger) ackManager { + return ackManager{logger: logger, outstandingTasks: make(map[int64]bool), readLevel: -1, ackLevel: -1} +} + +// Registers task as in-flight and moves read level to it. Tasks can be added in increasing order of taskID only. +func (m *ackManager) addTask(taskID int64) { + m.Lock() + defer m.Unlock() + if m.readLevel >= taskID { + m.logger.Fatal("Next task ID is less than current read level.", + tag.TaskID(taskID), + tag.ReadLevel(m.readLevel)) + } + m.readLevel = taskID + if _, ok := m.outstandingTasks[taskID]; ok { + m.logger.Fatal("Already present in outstanding tasks", tag.TaskID(taskID)) + } + m.outstandingTasks[taskID] = false // true is for acked + m.backlogCounter.Inc() +} + +func (m *ackManager) getReadLevel() int64 { + m.RLock() + defer m.RUnlock() + return m.readLevel +} + +func (m *ackManager) setReadLevel(readLevel int64) { + m.Lock() + defer m.Unlock() + m.readLevel = readLevel +} + +func (m *ackManager) setReadLevelAfterGap(newReadLevel int64) { + m.Lock() + defer m.Unlock() + if m.ackLevel == m.readLevel { + // This is called after we read a range and find no tasks. The range we read was m.readLevel to newReadLevel. + // (We know this because nothing should change m.readLevel except the getTasksPump loop itself, after initialization. + // And getTasksPump doesn't start until it gets a signal from taskWriter that it's initialized the levels.) + // If we've acked all tasks up to m.readLevel, and there are no tasks between that and newReadLevel, then we've + // acked all tasks up to newReadLevel too. This lets us advance the ack level on a task queue with no activity + // but where the rangeid has moved higher, to prevent excessive reads on the next load. + m.ackLevel = newReadLevel + } + m.readLevel = newReadLevel +} + +func (m *ackManager) getAckLevel() int64 { + m.RLock() + defer m.RUnlock() + return m.ackLevel +} + +// Moves ack level to the new level if it is higher than the current one. +// Also updates the read level if it is lower than the ackLevel. +func (m *ackManager) setAckLevel(ackLevel int64) { + m.Lock() + defer m.Unlock() + if ackLevel > m.ackLevel { + m.ackLevel = ackLevel + } + if ackLevel > m.readLevel { + m.readLevel = ackLevel + } +} + +func (m *ackManager) completeTask(taskID int64) (ackLevel int64) { + m.Lock() + defer m.Unlock() + if completed, ok := m.outstandingTasks[taskID]; ok && !completed { + m.outstandingTasks[taskID] = true + m.backlogCounter.Dec() + } + + // TODO the ack level management should be done by a dedicated coroutine + // this is only a temporarily solution + + taskIDs := maps.Keys(m.outstandingTasks) + util.SortSlice(taskIDs) + + // Update ackLevel + for _, taskID := range taskIDs { + if acked := m.outstandingTasks[taskID]; acked { + m.ackLevel = taskID + delete(m.outstandingTasks, taskID) + } else { + return m.ackLevel + } + } + return m.ackLevel +} + +func (m *ackManager) getBacklogCountHint() int64 { + return m.backlogCounter.Load() +} diff -Nru temporal-1.21.5-1/src/service/matching/config.go temporal-1.22.5/src/service/matching/config.go --- temporal-1.21.5-1/src/service/matching/config.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/config.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,6 +27,7 @@ import ( "time" + "go.temporal.io/server/common" "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/namespace" @@ -47,6 +48,7 @@ SyncMatchWaitDuration dynamicconfig.DurationPropertyFnWithTaskQueueInfoFilters TestDisableSyncMatch dynamicconfig.BoolPropertyFn RPS dynamicconfig.IntPropertyFn + OperatorRPSRatio dynamicconfig.FloatPropertyFn ShutdownDrainDuration dynamicconfig.DurationPropertyFn // taskQueueManager configuration @@ -61,9 +63,9 @@ ForwarderMaxOutstandingTasks dynamicconfig.IntPropertyFnWithTaskQueueInfoFilters ForwarderMaxRatePerSecond dynamicconfig.IntPropertyFnWithTaskQueueInfoFilters ForwarderMaxChildrenPerNode dynamicconfig.IntPropertyFnWithTaskQueueInfoFilters - VersionCompatibleSetLimitPerQueue dynamicconfig.IntPropertyFn - VersionBuildIdLimitPerQueue dynamicconfig.IntPropertyFn - TaskQueueLimitPerBuildId dynamicconfig.IntPropertyFn + VersionCompatibleSetLimitPerQueue dynamicconfig.IntPropertyFnWithNamespaceFilter + VersionBuildIdLimitPerQueue dynamicconfig.IntPropertyFnWithNamespaceFilter + TaskQueueLimitPerBuildId dynamicconfig.IntPropertyFnWithNamespaceFilter GetUserDataLongPollTimeout dynamicconfig.DurationPropertyFn // Time to hold a poll request before returning an empty response if there are no tasks @@ -124,7 +126,7 @@ AdminNamespaceTaskQueueToPartitionDispatchRate func() float64 // If set to false, matching does not load user data from DB for root partitions or fetch it via RPC from the - // root. When disbled, features that rely on user data (e.g. worker versioning) will essentially be disabled. + // root. When disabled, features that rely on user data (e.g. worker versioning) will essentially be disabled. // See the documentation for constants.MatchingLoadUserData for the implications on versioning. LoadUserData func() bool @@ -163,6 +165,7 @@ TestDisableSyncMatch: dc.GetBoolProperty(dynamicconfig.TestMatchingDisableSyncMatch, false), LoadUserData: dc.GetBoolPropertyFilteredByTaskQueueInfo(dynamicconfig.MatchingLoadUserData, true), RPS: dc.GetIntProperty(dynamicconfig.MatchingRPS, 1200), + OperatorRPSRatio: dc.GetFloat64Property(dynamicconfig.OperatorRPSRatio, common.DefaultOperatorRPSRatio), RangeSize: 100000, GetTasksBatchSize: dc.GetIntPropertyFilteredByTaskQueueInfo(dynamicconfig.MatchingGetTasksBatchSize, 1000), UpdateAckInterval: dc.GetDurationPropertyFilteredByTaskQueueInfo(dynamicconfig.MatchingUpdateAckInterval, defaultUpdateAckInterval), @@ -180,9 +183,9 @@ ForwarderMaxRatePerSecond: dc.GetIntPropertyFilteredByTaskQueueInfo(dynamicconfig.MatchingForwarderMaxRatePerSecond, 10), ForwarderMaxChildrenPerNode: dc.GetIntPropertyFilteredByTaskQueueInfo(dynamicconfig.MatchingForwarderMaxChildrenPerNode, 20), ShutdownDrainDuration: dc.GetDurationProperty(dynamicconfig.MatchingShutdownDrainDuration, 0*time.Second), - VersionCompatibleSetLimitPerQueue: dc.GetIntProperty(dynamicconfig.VersionCompatibleSetLimitPerQueue, 10), - VersionBuildIdLimitPerQueue: dc.GetIntProperty(dynamicconfig.VersionBuildIdLimitPerQueue, 100), - TaskQueueLimitPerBuildId: dc.GetIntProperty(dynamicconfig.TaskQueuesPerBuildIdLimit, 20), + VersionCompatibleSetLimitPerQueue: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.VersionCompatibleSetLimitPerQueue, 10), + VersionBuildIdLimitPerQueue: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.VersionBuildIdLimitPerQueue, 100), + TaskQueueLimitPerBuildId: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.TaskQueuesPerBuildIdLimit, 20), GetUserDataLongPollTimeout: dc.GetDurationProperty(dynamicconfig.MatchingGetUserDataLongPollTimeout, 5*time.Minute), AdminNamespaceToPartitionDispatchRate: dc.GetFloatPropertyFilteredByNamespace(dynamicconfig.AdminMatchingNamespaceToPartitionDispatchRate, 10000), diff -Nru temporal-1.21.5-1/src/service/matching/configs/quotas.go temporal-1.22.5/src/service/matching/configs/quotas.go --- temporal-1.21.5-1/src/service/matching/configs/quotas.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/configs/quotas.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,44 +25,68 @@ package configs import ( + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/quotas" ) +const ( + // OperatorPriority is used to give precedence to calls coming from web UI or tctl + OperatorPriority = 0 +) + var ( APIToPriority = map[string]int{ - "AddActivityTask": 0, - "AddWorkflowTask": 0, - "CancelOutstandingPoll": 0, - "DescribeTaskQueue": 0, - "ListTaskQueuePartitions": 0, - "PollActivityTaskQueue": 0, - "PollWorkflowTaskQueue": 0, - "QueryWorkflow": 0, - "RespondQueryTaskCompleted": 0, - "GetWorkerBuildIdCompatibility": 0, - "UpdateWorkerBuildIdCompatibility": 0, - "GetTaskQueueUserData": 0, - "ApplyTaskQueueUserDataReplicationEvent": 0, - "GetBuildIdTaskQueueMapping": 0, - "ForceUnloadTaskQueue": 0, - "UpdateTaskQueueUserData": 0, - "ReplicateTaskQueueUserData": 0, + "AddActivityTask": 1, + "AddWorkflowTask": 1, + "CancelOutstandingPoll": 1, + "DescribeTaskQueue": 1, + "ListTaskQueuePartitions": 1, + "PollActivityTaskQueue": 1, + "PollWorkflowTaskQueue": 1, + "QueryWorkflow": 1, + "RespondQueryTaskCompleted": 1, + "GetWorkerBuildIdCompatibility": 1, + "UpdateWorkerBuildIdCompatibility": 1, + "GetTaskQueueUserData": 1, + "ApplyTaskQueueUserDataReplicationEvent": 1, + "GetBuildIdTaskQueueMapping": 1, + "ForceUnloadTaskQueue": 1, + "UpdateTaskQueueUserData": 1, + "ReplicateTaskQueueUserData": 1, } - APIPrioritiesOrdered = []int{0} + APIPrioritiesOrdered = []int{0, 1} ) func NewPriorityRateLimiter( rateFn quotas.RateFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, ) quotas.RequestRateLimiter { rateLimiters := make(map[int]quotas.RequestRateLimiter) for priority := range APIPrioritiesOrdered { - rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(rateFn)) + if priority == OperatorPriority { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(operatorRateFn(rateFn, operatorRPSRatio))) + } else { + rateLimiters[priority] = quotas.NewRequestRateLimiterAdapter(quotas.NewDefaultIncomingRateLimiter(rateFn)) + } } return quotas.NewPriorityRateLimiter(func(req quotas.Request) int { + if req.CallerType == headers.CallerTypeOperator { + return OperatorPriority + } if priority, ok := APIToPriority[req.API]; ok { return priority } return APIPrioritiesOrdered[len(APIPrioritiesOrdered)-1] }, rateLimiters) } + +func operatorRateFn( + rateFn quotas.RateFn, + operatorRPSRatio dynamicconfig.FloatPropertyFn, +) quotas.RateFn { + return func() float64 { + return operatorRPSRatio() * rateFn() + } +} diff -Nru temporal-1.21.5-1/src/service/matching/configs/quotas_test.go temporal-1.22.5/src/service/matching/configs/quotas_test.go --- temporal-1.21.5-1/src/service/matching/configs/quotas_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/configs/quotas_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,12 +27,15 @@ import ( "reflect" "testing" + "time" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "golang.org/x/exp/slices" "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/quotas" ) type ( @@ -83,3 +86,36 @@ } s.Equal(apiToPriority, APIToPriority) } + +func (s *quotasSuite) TestOperatorPrioritized() { + rateFn := func() float64 { return 5 } + operatorRPSRatioFn := func() float64 { return 0.2 } + limiter := NewPriorityRateLimiter(rateFn, operatorRPSRatioFn) + + operatorRequest := quotas.NewRequest( + "QueryWorkflow", + 1, + "", + headers.CallerTypeOperator, + -1, + "") + + apiRequest := quotas.NewRequest( + "QueryWorkflow", + 1, + "", + headers.CallerTypeAPI, + -1, + "") + + requestTime := time.Now() + limitCount := 0 + + for i := 0; i < 12; i++ { + if !limiter.Allow(requestTime, apiRequest) { + limitCount++ + s.True(limiter.Allow(requestTime, operatorRequest)) + } + } + s.Equal(2, limitCount) +} diff -Nru temporal-1.21.5-1/src/service/matching/db.go temporal-1.22.5/src/service/matching/db.go --- temporal-1.21.5-1/src/service/matching/db.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/db.go 2024-02-23 09:45:43.000000000 +0000 @@ -47,6 +47,12 @@ initialRangeID = 1 // Id of the first range of a new task queue stickyTaskQueueTTL = 24 * time.Hour + // "Version set id" for the dlq for versioned tasks. This won't match any real version set + // since those are based on hashes of build ids. + dlqVersionSet = "dlq" +) + +const ( // userDataEnabled is the default state: user data is enabled. userDataEnabled userDataState = iota // userDataDisabled means user data is disabled due to the LoadUserData dynamic config @@ -57,6 +63,8 @@ // have its own user data and it should not be used. This should cause GetUserData to // return an Internal error (access would indicate a bug). userDataSpecificVersion + // userDataClosed means the task queue is closed. + userDataClosed ) type ( @@ -90,6 +98,8 @@ errNoUserDataOnVersionedTQM = serviceerror.NewInternal("should not get user data on versioned tqm") errUserDataDisabled = serviceerror.NewFailedPrecondition("Task queue user data operations are disabled") + + errTaskQueueClosed = serviceerror.NewUnavailable("task queue closed") ) // newTaskQueueDB returns an instance of an object that represents @@ -310,17 +320,15 @@ return n, err } -// Returns true if we are storing user data in the db. We need to be the root partition, -// workflow type, unversioned, and also a normal queue. +// DbStoresUserData returns true if we are storing user data in the db. We need to be the root partition, workflow type, +// unversioned, and also a normal queue. func (db *taskQueueDB) DbStoresUserData() bool { return db.taskQueue.OwnsUserData() && db.taskQueueKind == enumspb.TASK_QUEUE_KIND_NORMAL } // GetUserData returns the versioning data for this task queue. Do not mutate the returned pointer, as doing so // will cause cache inconsistency. -func (db *taskQueueDB) GetUserData( - ctx context.Context, -) (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) { +func (db *taskQueueDB) GetUserData() (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) { db.Lock() defer db.Unlock() return db.getUserDataLocked() @@ -331,9 +339,13 @@ case userDataEnabled: return db.userData, db.userDataChanged, nil case userDataDisabled: - return nil, nil, errUserDataDisabled + // return userDataChanged even with an error here so that a blocking wait can be + // interrupted when user data is enabled again. + return nil, db.userDataChanged, errUserDataDisabled case userDataSpecificVersion: return nil, nil, errNoUserDataOnVersionedTQM + case userDataClosed: + return nil, nil, errTaskQueueClosed default: // shouldn't happen return nil, nil, serviceerror.NewInternal("unexpected user data enabled state") @@ -371,10 +383,15 @@ return nil } -func (db *taskQueueDB) setUserDataState(setUserDataState userDataState) { +func (db *taskQueueDB) setUserDataState(userDataState userDataState) { db.Lock() defer db.Unlock() - db.userDataState = setUserDataState + + if userDataState != db.userDataState && db.userDataState != userDataClosed { + db.userDataState = userDataState + close(db.userDataChanged) + db.userDataChanged = make(chan struct{}) + } } // UpdateUserData allows callers to update user data (such as worker build IDs) for this task queue. The pointer passed diff -Nru temporal-1.21.5-1/src/service/matching/forwarder.go temporal-1.22.5/src/service/matching/forwarder.go --- temporal-1.21.5-1/src/service/matching/forwarder.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/forwarder.go 2024-02-23 09:45:43.000000000 +0000 @@ -113,7 +113,7 @@ return fwdr } -// ForwardTask forwards an activity or workflow task to the parent task queue partition if it exist +// ForwardTask forwards an activity or workflow task to the parent task queue partition if it exists func (fwdr *Forwarder) ForwardTask(ctx context.Context, task *internalTask) error { if fwdr.taskQueueKind == enumspb.TASK_QUEUE_KIND_STICKY { return errTaskQueueKind @@ -131,15 +131,12 @@ var expirationDuration time.Duration expirationTime := timestamp.TimeValue(task.event.Data.ExpiryTime) - if expirationTime.IsZero() { - // noop - } else { + if !expirationTime.IsZero() { expirationDuration = time.Until(expirationTime) if expirationDuration <= 0 { return nil } } - switch fwdr.taskQueueID.taskType { case enumspb.TASK_QUEUE_TYPE_WORKFLOW: _, err = fwdr.client.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ @@ -178,7 +175,7 @@ return fwdr.handleErr(err) } -// ForwardQueryTask forwards a query task to parent task queue partition, if it exist +// ForwardQueryTask forwards a query task to parent task queue partition, if it exists func (fwdr *Forwarder) ForwardQueryTask( ctx context.Context, task *internalTask, @@ -200,8 +197,9 @@ Name: target.FullName(), Kind: fwdr.taskQueueKind, }, - QueryRequest: task.query.request.QueryRequest, - ForwardedSource: fwdr.taskQueueID.FullName(), + QueryRequest: task.query.request.QueryRequest, + ForwardedSource: fwdr.taskQueueID.FullName(), + VersionDirective: task.query.request.VersionDirective, }) return resp, fwdr.handleErr(err) diff -Nru temporal-1.21.5-1/src/service/matching/forwarder_test.go temporal-1.22.5/src/service/matching/forwarder_test.go --- temporal-1.21.5-1/src/service/matching/forwarder_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/forwarder_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -77,7 +77,9 @@ } func (t *ForwarderTestSuite) TestForwardTaskError() { - task := newInternalTask(&persistencespb.AllocatedTaskInfo{}, nil, enumsspb.TASK_SOURCE_HISTORY, "", false) + task := newInternalTask(&persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{}, + }, nil, enumsspb.TASK_SOURCE_HISTORY, "", false) t.Equal(tqname.ErrNoParent, t.fwdr.ForwardTask(context.Background(), task)) t.usingTaskqueuePartition(enumspb.TASK_QUEUE_TYPE_ACTIVITY) @@ -100,7 +102,7 @@ t.NoError(t.fwdr.ForwardTask(context.Background(), task)) t.NotNil(request) t.Equal(mustParent(t.taskQueue.Name, 20).FullName(), request.TaskQueue.GetName()) - t.Equal(enumspb.TaskQueueKind(t.fwdr.taskQueueKind), request.TaskQueue.GetKind()) + t.Equal(t.fwdr.taskQueueKind, request.TaskQueue.GetKind()) t.Equal(taskInfo.Data.GetNamespaceId(), request.GetNamespaceId()) t.Equal(taskInfo.Data.GetWorkflowId(), request.GetExecution().GetWorkflowId()) t.Equal(taskInfo.Data.GetRunId(), request.GetExecution().GetRunId()) @@ -175,7 +177,7 @@ gotResp, err := t.fwdr.ForwardQueryTask(context.Background(), task) t.NoError(err) t.Equal(mustParent(t.taskQueue.Name, 20).FullName(), request.TaskQueue.GetName()) - t.Equal(enumspb.TaskQueueKind(t.fwdr.taskQueueKind), request.TaskQueue.GetKind()) + t.Equal(t.fwdr.taskQueueKind, request.TaskQueue.GetKind()) t.Equal(task.query.request.QueryRequest, request.QueryRequest) t.Equal(resp, gotResp) } @@ -191,7 +193,7 @@ t.NoError(err) } _, err := t.fwdr.ForwardQueryTask(context.Background(), task) - t.NoError(err) // no rateliming should be enforced for query task + t.NoError(err) // no rate limiting should be enforced for query task } func (t *ForwarderTestSuite) TestForwardPollError() { @@ -228,7 +230,7 @@ t.Equal(t.taskQueue.namespaceID, namespace.ID(request.GetNamespaceId())) t.Equal("id1", request.GetPollRequest().GetIdentity()) t.Equal(mustParent(t.taskQueue.Name, 20).FullName(), request.GetPollRequest().GetTaskQueue().GetName()) - t.Equal(enumspb.TaskQueueKind(t.fwdr.taskQueueKind), request.GetPollRequest().GetTaskQueue().GetKind()) + t.Equal(t.fwdr.taskQueueKind, request.GetPollRequest().GetTaskQueue().GetKind()) t.Equal(resp, task.pollWorkflowTaskQueueResponse()) t.Nil(task.pollActivityTaskQueueResponse()) } @@ -256,7 +258,7 @@ t.Equal(t.taskQueue.namespaceID, namespace.ID(request.GetNamespaceId())) t.Equal("id1", request.GetPollRequest().GetIdentity()) t.Equal(mustParent(t.taskQueue.Name, 20).FullName(), request.GetPollRequest().GetTaskQueue().GetName()) - t.Equal(enumspb.TaskQueueKind(t.fwdr.taskQueueKind), request.GetPollRequest().GetTaskQueue().GetKind()) + t.Equal(t.fwdr.taskQueueKind, request.GetPollRequest().GetTaskQueue().GetKind()) t.Equal(resp, task.pollActivityTaskQueueResponse()) t.Nil(task.pollWorkflowTaskQueueResponse()) } @@ -284,24 +286,26 @@ for i := 0; i < concurrency; i++ { wg.Add(1) go func() { + timer := time.NewTimer(time.Millisecond * 200) select { case token := <-t.fwdr.AddReqTokenC(): + timer.Stop() if !tc.mustLeakToken { token.release() } atomic.AddInt32(&adds, 1) - case <-time.After(time.Millisecond * 200): - break + case <-timer.C: } + timer = time.NewTimer(time.Millisecond * 200) select { case token := <-t.fwdr.PollReqTokenC(): + timer.Stop() if !tc.mustLeakToken { token.release() } atomic.AddInt32(&polls, 1) - case <-time.After(time.Millisecond * 200): - break + case <-timer.C: } wg.Done() }() diff -Nru temporal-1.21.5-1/src/service/matching/fx.go temporal-1.22.5/src/service/matching/fx.go --- temporal-1.21.5-1/src/service/matching/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,11 +25,8 @@ package matching import ( - "context" - "go.uber.org/fx" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/config" @@ -107,12 +104,12 @@ serviceConfig *Config, ) *interceptor.RateLimitInterceptor { return interceptor.NewRateLimitInterceptor( - configs.NewPriorityRateLimiter(func() float64 { return float64(serviceConfig.RPS()) }), + configs.NewPriorityRateLimiter(func() float64 { return float64(serviceConfig.RPS()) }, serviceConfig.OperatorRPSRatio), map[string]int{}, ) } -// This function is the same between services but uses different config sources. +// PersistenceRateLimitingParamsProvider is the same between services but uses different config sources. // if-case comes from resourceImpl.New. func PersistenceRateLimitingParamsProvider( serviceConfig *Config, @@ -123,6 +120,7 @@ serviceConfig.PersistenceNamespaceMaxQPS, serviceConfig.PersistencePerShardNamespaceMaxQPS, serviceConfig.EnablePersistencePriorityRateLimiting, + serviceConfig.OperatorRPSRatio, serviceConfig.PersistenceDynamicRateLimitingParams, ) } @@ -131,8 +129,8 @@ return membershipMonitor.GetResolver(primitives.MatchingService) } -// This type is used to ensure the replicator only gets set if global namespaces are enabled on this cluster. -// See NamespaceReplicationQueueProvider below. +// TaskQueueReplicatorNamespaceReplicationQueue is used to ensure the replicator only gets set if global namespaces are +// enabled on this cluster. See NamespaceReplicationQueueProvider below. type TaskQueueReplicatorNamespaceReplicationQueue persistence.NamespaceReplicationQueue func NamespaceReplicationQueueProvider( @@ -165,6 +163,7 @@ searchAttributesMapperProvider, serviceConfig.VisibilityPersistenceMaxReadQPS, serviceConfig.VisibilityPersistenceMaxWriteQPS, + serviceConfig.OperatorRPSRatio, serviceConfig.EnableReadFromSecondaryVisibility, dynamicconfig.GetStringPropertyFn(visibility.SecondaryVisibilityWritingModeOff), // matching visibility never writes serviceConfig.VisibilityDisableOrderByClause, @@ -179,7 +178,7 @@ logger log.SnTaggedLogger, throttledLogger log.ThrottledLogger, taskManager persistence.TaskManager, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, matchingRawClient resource.MatchingRawClient, matchingServiceResolver membership.ServiceResolver, metricsHandler metrics.Handler, @@ -204,27 +203,6 @@ ) } -func ServiceLifetimeHooks( - lc fx.Lifecycle, - svcStoppedCh chan struct{}, - svc *Service, -) { - lc.Append( - fx.Hook{ - OnStart: func(context.Context) error { - go func(svc common.Daemon, svcStoppedCh chan<- struct{}) { - // Start is blocked until Stop() is called. - svc.Start() - close(svcStoppedCh) - }(svc, svcStoppedCh) - - return nil - }, - OnStop: func(ctx context.Context) error { - svc.Stop() - return nil - }, - }, - ) - +func ServiceLifetimeHooks(lc fx.Lifecycle, svc *Service) { + lc.Append(fx.StartStopHook(svc.Start, svc.Stop)) } diff -Nru temporal-1.21.5-1/src/service/matching/handler.go temporal-1.22.5/src/service/matching/handler.go --- temporal-1.21.5-1/src/service/matching/handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,7 +31,6 @@ taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/cluster" @@ -41,6 +40,7 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/resource" ) type ( @@ -70,8 +70,8 @@ logger log.Logger, throttledLogger log.Logger, taskManager persistence.TaskManager, - historyClient historyservice.HistoryServiceClient, - matchingRawClient matchingservice.MatchingServiceClient, + historyClient resource.HistoryClient, + matchingRawClient resource.MatchingRawClient, matchingServiceResolver membership.ServiceResolver, metricsHandler metrics.Handler, namespaceRegistry namespace.Registry, @@ -90,6 +90,7 @@ matchingRawClient, // Use non retry client inside matching config, logger, + throttledLogger, metricsHandler, namespaceRegistry, matchingServiceResolver, diff -Nru temporal-1.21.5-1/src/service/matching/liveness.go temporal-1.22.5/src/service/matching/liveness.go --- temporal-1.21.5-1/src/service/matching/liveness.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/liveness.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,36 +28,36 @@ "sync/atomic" "time" - "github.com/jonboulle/clockwork" + "go.temporal.io/server/common/clock" ) type ( liveness struct { - clock clockwork.Clock - ttl func() time.Duration - onIdle func() - timer atomic.Value + timeSource clock.TimeSource + ttl func() time.Duration + onIdle func() + timer atomic.Value } timerWrapper struct { - clockwork.Timer + clock.Timer } ) func newLiveness( - clock clockwork.Clock, + timeSource clock.TimeSource, ttl func() time.Duration, onIdle func(), ) *liveness { return &liveness{ - clock: clock, - ttl: ttl, - onIdle: onIdle, + timeSource: timeSource, + ttl: ttl, + onIdle: onIdle, } } func (l *liveness) Start() { - l.timer.Store(timerWrapper{l.clock.AfterFunc(l.ttl(), l.onIdle)}) + l.timer.Store(timerWrapper{l.timeSource.AfterFunc(l.ttl(), l.onIdle)}) } func (l *liveness) Stop() { diff -Nru temporal-1.21.5-1/src/service/matching/liveness_test.go temporal-1.22.5/src/service/matching/liveness_test.go --- temporal-1.21.5-1/src/service/matching/liveness_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/liveness_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,33 +29,28 @@ "testing" "time" - "github.com/jonboulle/clockwork" "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/clock" ) func TestLiveness(t *testing.T) { t.Parallel() var idleCalled atomic.Int32 ttl := func() time.Duration { return 2500 * time.Millisecond } - clock := clockwork.NewFakeClock() - liveness := newLiveness(clock, ttl, func() { idleCalled.Store(1) }) + timeSource := clock.NewEventTimeSource() + liveness := newLiveness(timeSource, ttl, func() { idleCalled.Store(1) }) liveness.Start() - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) // need actual time to pass since onIdle still runs async + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(0), idleCalled.Load()) liveness.markAlive() - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(0), idleCalled.Load()) liveness.markAlive() - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(0), idleCalled.Load()) - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(0), idleCalled.Load()) - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(1), idleCalled.Load()) liveness.Stop() } @@ -64,14 +59,12 @@ t.Parallel() var idleCalled atomic.Int32 ttl := func() time.Duration { return 1000 * time.Millisecond } - clock := clockwork.NewFakeClock() - liveness := newLiveness(clock, ttl, func() { idleCalled.Store(1) }) + timeSource := clock.NewEventTimeSource() + liveness := newLiveness(timeSource, ttl, func() { idleCalled.Store(1) }) liveness.Start() - clock.Advance(500 * time.Millisecond) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(500 * time.Millisecond) liveness.Stop() - clock.Advance(1 * time.Second) - time.Sleep(50 * time.Millisecond) + timeSource.Advance(1 * time.Second) assert.Equal(t, int32(0), idleCalled.Load()) liveness.markAlive() // should not panic } diff -Nru temporal-1.21.5-1/src/service/matching/matcher.go temporal-1.22.5/src/service/matching/matcher.go --- temporal-1.21.5-1/src/service/matching/matcher.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matcher.go 2024-02-23 09:45:43.000000000 +0000 @@ -45,10 +45,10 @@ // synchronous task channel to match producer/consumer taskC chan *internalTask - // synchronous task channel to match query task - the reason to have - // separate channel for this is because there are cases when consumers - // are interested in queryTasks but not others. Example is when namespace is - // not active in a cluster + // synchronous task channel to match query task - the reason to have a + // separate channel for this is that there are cases where consumers + // are interested in queryTasks but not others. One example is when a + // namespace is not active in a cluster. queryTaskC chan *internalTask // dynamicRate is the dynamic rate & burst for rate limiter @@ -75,9 +75,8 @@ errInterrupted = errors.New("interrupted offer") ) -// newTaskMatcher returns an task matcher instance. The returned instance can be -// used by task producers and consumers to find a match. Both sync matches and non-sync -// matches should use this implementation +// newTaskMatcher returns a task matcher instance. The returned instance can be used by task producers and consumers to +// find a match. Both sync matches and non-sync matches should use this implementation func newTaskMatcher(config *taskQueueConfig, fwdr *Forwarder, metricsHandler metrics.Handler) *TaskMatcher { dynamicRateBurst := quotas.NewMutableRateBurst( defaultTaskDispatchRPS, @@ -298,13 +297,13 @@ // Poll blocks until a task is found or context deadline is exceeded // On success, the returned task could be a query task or a regular task -// Returns ErrNoTasks when context deadline is exceeded +// Returns errNoTasks when context deadline is exceeded func (tm *TaskMatcher) Poll(ctx context.Context, pollMetadata *pollMetadata) (*internalTask, error) { return tm.poll(ctx, pollMetadata, false) } // PollForQuery blocks until a *query* task is found or context deadline is exceeded -// Returns ErrNoTasks when context deadline is exceeded +// Returns errNoTasks when context deadline is exceeded func (tm *TaskMatcher) PollForQuery(ctx context.Context, pollMetadata *pollMetadata) (*internalTask, error) { return tm.poll(ctx, pollMetadata, true) } @@ -364,7 +363,7 @@ select { case <-ctx.Done(): tm.metricsHandler.Counter(metrics.PollTimeoutPerTaskQueueCounter.GetMetricName()).Record(1) - return nil, ErrNoTasks + return nil, errNoTasks default: } @@ -383,11 +382,11 @@ default: } - // 3. forwarding (and all other clauses repeated again) + // 3. forwarding (and all other clauses repeated) select { case <-ctx.Done(): tm.metricsHandler.Counter(metrics.PollTimeoutPerTaskQueueCounter.GetMetricName()).Record(1) - return nil, ErrNoTasks + return nil, errNoTasks case task := <-taskC: if task.responseC != nil { tm.metricsHandler.Counter(metrics.PollSuccessWithSyncPerTaskQueueCounter.GetMetricName()).Record(1) @@ -410,7 +409,7 @@ select { case <-ctx.Done(): tm.metricsHandler.Counter(metrics.PollTimeoutPerTaskQueueCounter.GetMetricName()).Record(1) - return nil, ErrNoTasks + return nil, errNoTasks case task := <-taskC: if task.responseC != nil { tm.metricsHandler.Counter(metrics.PollSuccessWithSyncPerTaskQueueCounter.GetMetricName()).Record(1) diff -Nru temporal-1.21.5-1/src/service/matching/matchingEngine.go temporal-1.22.5/src/service/matching/matchingEngine.go --- temporal-1.21.5-1/src/service/matching/matchingEngine.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matchingEngine.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1599 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "bytes" - "context" - "errors" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/pborman/uuid" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - replicationspb "go.temporal.io/server/api/replication/v1" - taskqueuespb "go.temporal.io/server/api/taskqueue/v1" - tokenspb "go.temporal.io/server/api/token/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/common/worker_versioning" -) - -const ( - // If sticky poller is not seem in last 10s, we treat it as sticky worker unavailable - // This seems aggressive, but the default sticky schedule_to_start timeout is 5s, so 10s seems reasonable. - stickyPollerUnavailableWindow = 10 * time.Second - - recordTaskStartedDefaultTimeout = 10 * time.Second - recordTaskStartedSyncMatchTimeout = 1 * time.Second -) - -type ( - pollerIDCtxKey string - identityCtxKey string - - // lockableQueryTaskMap maps query TaskID (which is a UUID generated in QueryWorkflow() call) to a channel - // that QueryWorkflow() will block on. The channel is unblocked either by worker sending response through - // RespondQueryTaskCompleted() or through an internal service error causing temporal to be unable to dispatch - // query task to workflow worker. - lockableQueryTaskMap struct { - sync.RWMutex - queryTaskMap map[string]chan *queryResult - } - - lockablePollMap struct { - sync.Mutex - polls map[string]context.CancelFunc - } - - taskQueueCounterKey struct { - namespaceID namespace.ID - taskType enumspb.TaskQueueType - kind enumspb.TaskQueueKind - } - - pollMetadata struct { - ratePerSecond *float64 - workerVersionCapabilities *commonpb.WorkerVersionCapabilities - } - - namespaceUpdateLocks struct { - updateLock sync.Mutex - replicationLock sync.Mutex - } - - // Implements matching.Engine - matchingEngineImpl struct { - status int32 - taskManager persistence.TaskManager - historyClient historyservice.HistoryServiceClient - matchingClient matchingservice.MatchingServiceClient - tokenSerializer common.TaskTokenSerializer - logger log.Logger - namespaceRegistry namespace.Registry - keyResolver membership.ServiceResolver - clusterMeta cluster.Metadata - timeSource clock.TimeSource - visibilityManager manager.VisibilityManager - metricsHandler metrics.Handler - taskQueuesLock sync.RWMutex // locks mutation of taskQueues - taskQueues map[taskQueueID]taskQueueManager - taskQueueCount map[taskQueueCounterKey]int // per-namespace task queue counter - config *Config - lockableQueryTaskMap lockableQueryTaskMap - // pollMap is needed to keep track of all outstanding pollers for a particular - // taskqueue. PollerID generated by frontend is used as the key and CancelFunc is the - // value. This is used to cancel the context to unblock any outstanding poller when - // the frontend detects client connection is closed to prevent tasks being dispatched - // to zombie pollers. - pollMap lockablePollMap - // Only set if global namespaces are enabled on the cluster. - namespaceReplicationQueue persistence.NamespaceReplicationQueue - // Disables concurrent task queue user data updates and replication requests (due to a cassandra limitation) - namespaceUpdateLockMap map[string]*namespaceUpdateLocks - // Serializes access to the per namespace lock map - namespaceUpdateLockMapLock sync.Mutex - } -) - -var ( - // EmptyPollWorkflowTaskQueueResponse is the response when there are no workflow tasks to hand out - emptyPollWorkflowTaskQueueResponse = &matchingservice.PollWorkflowTaskQueueResponse{} - // EmptyPollActivityTaskQueueResponse is the response when there are no activity tasks to hand out - emptyPollActivityTaskQueueResponse = &matchingservice.PollActivityTaskQueueResponse{} - - // ErrNoTasks is exported temporarily for integration test - ErrNoTasks = errors.New("no tasks") - errPumpClosed = errors.New("task queue pump closed its channel") - - pollerIDKey pollerIDCtxKey = "pollerID" - identityKey identityCtxKey = "identity" -) - -var _ Engine = (*matchingEngineImpl)(nil) // Asserts that interface is indeed implemented - -// NewEngine creates an instance of matching engine -func NewEngine( - taskManager persistence.TaskManager, - historyClient historyservice.HistoryServiceClient, - matchingClient matchingservice.MatchingServiceClient, - config *Config, - logger log.Logger, - metricsHandler metrics.Handler, - namespaceRegistry namespace.Registry, - resolver membership.ServiceResolver, - clusterMeta cluster.Metadata, - namespaceReplicationQueue persistence.NamespaceReplicationQueue, - visibilityManager manager.VisibilityManager, -) Engine { - - return &matchingEngineImpl{ - status: common.DaemonStatusInitialized, - taskManager: taskManager, - historyClient: historyClient, - matchingClient: matchingClient, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - logger: log.With(logger, tag.ComponentMatchingEngine), - namespaceRegistry: namespaceRegistry, - keyResolver: resolver, - clusterMeta: clusterMeta, - timeSource: clock.NewRealTimeSource(), // No need to mock this at the moment - visibilityManager: visibilityManager, - metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.MatchingEngineScope)), - taskQueues: make(map[taskQueueID]taskQueueManager), - taskQueueCount: make(map[taskQueueCounterKey]int), - config: config, - lockableQueryTaskMap: lockableQueryTaskMap{queryTaskMap: make(map[string]chan *queryResult)}, - pollMap: lockablePollMap{polls: make(map[string]context.CancelFunc)}, - namespaceReplicationQueue: namespaceReplicationQueue, - namespaceUpdateLockMap: make(map[string]*namespaceUpdateLocks), - } -} - -func (e *matchingEngineImpl) Start() { - if !atomic.CompareAndSwapInt32( - &e.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } -} - -func (e *matchingEngineImpl) Stop() { - if !atomic.CompareAndSwapInt32( - &e.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - - for _, l := range e.getTaskQueues(math.MaxInt32) { - l.Stop() - } -} - -func (e *matchingEngineImpl) getTaskQueues(maxCount int) (lists []taskQueueManager) { - e.taskQueuesLock.RLock() - defer e.taskQueuesLock.RUnlock() - lists = make([]taskQueueManager, 0, len(e.taskQueues)) - count := 0 - for _, tlMgr := range e.taskQueues { - lists = append(lists, tlMgr) - count++ - if count >= maxCount { - break - } - } - return -} - -func (e *matchingEngineImpl) String() string { - // Executes taskQueue.String() on each task queue outside of lock - buf := new(bytes.Buffer) - for _, l := range e.getTaskQueues(1000) { - fmt.Fprintf(buf, "\n%s", l.String()) - } - return buf.String() -} - -// Returns taskQueueManager for a task queue. If not already cached, and create is true, tries -// to get new range from DB and create one. This blocks (up to the context deadline) for the -// task queue to be initialized. -// -// Note that stickyInfo is not used as part of the task queue identity. That means that if -// getTaskQueueManager is called twice with the same taskQueue but different stickyInfo, the -// properties of the taskQueueManager will depend on which call came first. In general we can -// rely on kind being the same for all calls now, but normalName was a later addition to the -// protocol and is not always set consistently. normalName is only required when using -// versioning, and SDKs that support versioning will always set it. The current server version -// will also set it when adding tasks from history. So that particular inconsistency is okay. -func (e *matchingEngineImpl) getTaskQueueManager( - ctx context.Context, - taskQueue *taskQueueID, - stickyInfo stickyInfo, - create bool, -) (taskQueueManager, error) { - e.taskQueuesLock.RLock() - tqm, ok := e.taskQueues[*taskQueue] - e.taskQueuesLock.RUnlock() - - if !ok { - if !create { - return nil, nil - } - - // If it gets here, write lock and check again in case a task queue is created between the two locks - e.taskQueuesLock.Lock() - if tqm, ok = e.taskQueues[*taskQueue]; !ok { - var err error - tqm, err = newTaskQueueManager(e, taskQueue, stickyInfo, e.config, e.clusterMeta) - if err != nil { - e.taskQueuesLock.Unlock() - return nil, err - } - tqm.Start() - e.taskQueues[*taskQueue] = tqm - countKey := taskQueueCounterKey{ - namespaceID: taskQueue.namespaceID, - taskType: taskQueue.taskType, - kind: stickyInfo.kind, - } - e.taskQueueCount[countKey]++ - taskQueueCount := e.taskQueueCount[countKey] - e.updateTaskQueueGauge(countKey, taskQueueCount) - } - e.taskQueuesLock.Unlock() - } - - if err := tqm.WaitUntilInitialized(ctx); err != nil { - return nil, err - } - - return tqm, nil -} - -// For use in tests -func (e *matchingEngineImpl) updateTaskQueue(taskQueue *taskQueueID, mgr taskQueueManager) { - e.taskQueuesLock.Lock() - defer e.taskQueuesLock.Unlock() - e.taskQueues[*taskQueue] = mgr -} - -// AddWorkflowTask either delivers task directly to waiting poller or save it into task queue persistence. -func (e *matchingEngineImpl) AddWorkflowTask( - ctx context.Context, - addRequest *matchingservice.AddWorkflowTaskRequest, -) (bool, error) { - namespaceID := namespace.ID(addRequest.GetNamespaceId()) - taskQueueName := addRequest.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(addRequest.TaskQueue) - - origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return false, err - } - - // We don't need the userDataChanged channel here because: - // - if we sync match or sticky worker unavailable, we're done - // - if we spool to db, we'll re-resolve when it comes out of the db - taskQueue, _, err := e.redirectToVersionedQueueForAdd(ctx, origTaskQueue, addRequest.VersionDirective, stickyInfo) - if err != nil { - if errors.Is(err, errUserDataDisabled) { - // When user data loading is disabled, we intentionally drop tasks for versioned workflows - // to avoid breaking versioning semantics and dispatching tasks to the wrong workers. - err = nil - } - return false, err - } - - sticky := stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY - // do not load sticky task queue if it is not already loaded, which means it has no poller. - tqm, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, !sticky) - if err != nil { - return false, err - } else if sticky && (tqm == nil || !tqm.HasPollerAfter(time.Now().Add(-stickyPollerUnavailableWindow))) { - return false, serviceerrors.NewStickyWorkerUnavailable() - } - - // This needs to move to history see - https://go.temporal.io/server/issues/181 - var expirationTime *time.Time - now := timestamp.TimePtr(time.Now().UTC()) - expirationDuration := timestamp.DurationValue(addRequest.GetScheduleToStartTimeout()) - if expirationDuration == 0 { - // noop - } else { - expirationTime = timestamp.TimePtr(now.Add(expirationDuration)) - } - taskInfo := &persistencespb.TaskInfo{ - NamespaceId: namespaceID.String(), - RunId: addRequest.Execution.GetRunId(), - WorkflowId: addRequest.Execution.GetWorkflowId(), - ScheduledEventId: addRequest.GetScheduledEventId(), - Clock: addRequest.GetClock(), - ExpiryTime: expirationTime, - CreateTime: now, - VersionDirective: addRequest.VersionDirective, - } - - baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, true) - if err != nil { - return false, err - } - return tqm.AddTask(ctx, addTaskParams{ - execution: addRequest.Execution, - taskInfo: taskInfo, - source: addRequest.GetSource(), - forwardedFrom: addRequest.GetForwardedSource(), - baseTqm: baseTqm, - }) -} - -// AddActivityTask either delivers task directly to waiting poller or save it into task queue persistence. -func (e *matchingEngineImpl) AddActivityTask( - ctx context.Context, - addRequest *matchingservice.AddActivityTaskRequest, -) (bool, error) { - namespaceID := namespace.ID(addRequest.GetNamespaceId()) - runID := addRequest.Execution.GetRunId() - taskQueueName := addRequest.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(addRequest.TaskQueue) - - origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - if err != nil { - return false, err - } - - // We don't need the userDataChanged channel here because: - // - if we sync match, we're done - // - if we spool to db, we'll re-resolve when it comes out of the db - taskQueue, _, err := e.redirectToVersionedQueueForAdd(ctx, origTaskQueue, addRequest.VersionDirective, stickyInfo) - if err != nil { - if errors.Is(err, errUserDataDisabled) { - // When user data loading is disabled, we intentionally drop tasks for versioned workflows - // to avoid breaking versioning semantics and dispatching tasks to the wrong workers. - err = nil - } - return false, err - } - - tlMgr, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, true) - if err != nil { - return false, err - } - - var expirationTime *time.Time - now := timestamp.TimePtr(time.Now().UTC()) - expirationDuration := timestamp.DurationValue(addRequest.GetScheduleToStartTimeout()) - if expirationDuration == 0 { - // noop - } else { - expirationTime = timestamp.TimePtr(now.Add(expirationDuration)) - } - taskInfo := &persistencespb.TaskInfo{ - NamespaceId: namespaceID.String(), - RunId: runID, - WorkflowId: addRequest.Execution.GetWorkflowId(), - ScheduledEventId: addRequest.GetScheduledEventId(), - Clock: addRequest.GetClock(), - CreateTime: now, - ExpiryTime: expirationTime, - VersionDirective: addRequest.VersionDirective, - } - - baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, true) - if err != nil { - return false, err - } - - return tlMgr.AddTask(ctx, addTaskParams{ - execution: addRequest.Execution, - taskInfo: taskInfo, - source: addRequest.GetSource(), - forwardedFrom: addRequest.GetForwardedSource(), - baseTqm: baseTqm, - }) -} - -func (e *matchingEngineImpl) DispatchSpooledTask( - ctx context.Context, - task *internalTask, - origTaskQueue *taskQueueID, - stickyInfo stickyInfo, -) error { - taskInfo := task.event.GetData() - // This task came from taskReader so task.event is always set here. - directive := taskInfo.GetVersionDirective() - // If this came from a versioned queue, ignore the version and re-resolve, in case we're - // going to the default and the default changed. - unversionedOrigTaskQueue := newTaskQueueIDWithVersionSet(origTaskQueue, "") - // Redirect and re-resolve if we're blocked in matcher and user data changes. - for { - taskQueue, userDataChanged, err := e.redirectToVersionedQueueForAdd( - ctx, unversionedOrigTaskQueue, directive, stickyInfo) - if err != nil { - return err - } - sticky := stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY - tqm, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, !sticky) - if err != nil { - return err - } - err = tqm.DispatchSpooledTask(ctx, task, userDataChanged) - if err != errInterrupted { - return err - } - } -} - -// PollWorkflowTaskQueue tries to get the workflow task using exponential backoff. -func (e *matchingEngineImpl) PollWorkflowTaskQueue( - ctx context.Context, - req *matchingservice.PollWorkflowTaskQueueRequest, - opMetrics metrics.Handler, -) (*matchingservice.PollWorkflowTaskQueueResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - pollerID := req.GetPollerId() - request := req.PollRequest - taskQueueName := request.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(request.TaskQueue) - e.logger.Debug("Received PollWorkflowTaskQueue for taskQueue", tag.WorkflowTaskQueueName(taskQueueName)) -pollLoop: - for { - err := common.IsValidContext(ctx) - if err != nil { - return nil, err - } - // Add frontend generated pollerID to context so taskqueueMgr can support cancellation of - // long-poll when frontend calls CancelOutstandingPoll API - pollerCtx := context.WithValue(ctx, pollerIDKey, pollerID) - pollerCtx = context.WithValue(pollerCtx, identityKey, request.GetIdentity()) - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - pollMetadata := &pollMetadata{ - workerVersionCapabilities: request.WorkerVersionCapabilities, - } - task, err := e.getTask(pollerCtx, taskQueue, stickyInfo, pollMetadata) - if err != nil { - // TODO: Is empty poll the best reply for errPumpClosed? - if err == ErrNoTasks || err == errPumpClosed { - return emptyPollWorkflowTaskQueueResponse, nil - } - return nil, err - } - - e.emitForwardedSourceStats(opMetrics, task.isForwarded(), req.GetForwardedSource()) - - if task.isStarted() { - // tasks received from remote are already started. So, simply forward the response - return task.pollWorkflowTaskQueueResponse(), nil - } - - if task.isQuery() { - task.finish(nil) // this only means query task sync match succeed. - - // for query task, we don't need to update history to record workflow task started. but we need to know - // the NextEventID so front end knows what are the history events to load for this workflow task. - mutableStateResp, err := e.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ - NamespaceId: req.GetNamespaceId(), - Execution: task.workflowExecution(), - }) - if err != nil { - // will notify query client that the query task failed - _ = e.deliverQueryResult(task.query.taskID, &queryResult{internalError: err}) - return emptyPollWorkflowTaskQueueResponse, nil - } - - // A non-sticky poll may get task for a workflow that has sticky still set in its mutable state after - // their sticky worker is dead for longer than 10s. In such case, we should set this to false so that - // frontend returns full history. - isStickyEnabled := taskQueueName == mutableStateResp.StickyTaskQueue.GetName() - resp := &historyservice.RecordWorkflowTaskStartedResponse{ - PreviousStartedEventId: mutableStateResp.PreviousStartedEventId, - NextEventId: mutableStateResp.NextEventId, - WorkflowType: mutableStateResp.WorkflowType, - StickyExecutionEnabled: isStickyEnabled, - WorkflowExecutionTaskQueue: mutableStateResp.TaskQueue, - BranchToken: mutableStateResp.CurrentBranchToken, - StartedEventId: common.EmptyEventID, - Attempt: 1, - } - return e.createPollWorkflowTaskQueueResponse(task, resp, opMetrics), nil - } - - resp, err := e.recordWorkflowTaskStarted(ctx, request, task) - if err != nil { - switch err.(type) { - case *serviceerror.NotFound: // mutable state not found, workflow not running or workflow task not found - e.logger.Info("Workflow task not found", - tag.WorkflowTaskQueueName(taskQueueName), - tag.WorkflowNamespaceID(task.event.Data.GetNamespaceId()), - tag.WorkflowID(task.event.Data.GetWorkflowId()), - tag.WorkflowRunID(task.event.Data.GetRunId()), - tag.WorkflowTaskQueueName(taskQueueName), - tag.TaskID(task.event.GetTaskId()), - tag.TaskVisibilityTimestamp(timestamp.TimeValue(task.event.Data.GetCreateTime())), - tag.WorkflowEventID(task.event.Data.GetScheduledEventId()), - tag.Error(err), - ) - task.finish(nil) - case *serviceerrors.TaskAlreadyStarted: - e.logger.Debug("Duplicated workflow task", tag.WorkflowTaskQueueName(taskQueueName), tag.TaskID(task.event.GetTaskId())) - task.finish(nil) - default: - task.finish(err) - if err.Error() == common.ErrNamespaceHandover.Error() { - // do not keep polling new tasks when namespace is in handover state - // as record start request will be rejected by history service - return nil, err - } - } - - continue pollLoop - } - task.finish(nil) - return e.createPollWorkflowTaskQueueResponse(task, resp, opMetrics), nil - } -} - -// PollActivityTaskQueue takes one task from the task manager, update workflow execution history, mark task as -// completed and return it to user. If a task from task manager is already started, return an empty response, without -// error. Timeouts handled by the timer queue. -func (e *matchingEngineImpl) PollActivityTaskQueue( - ctx context.Context, - req *matchingservice.PollActivityTaskQueueRequest, - opMetrics metrics.Handler, -) (*matchingservice.PollActivityTaskQueueResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - pollerID := req.GetPollerId() - request := req.PollRequest - taskQueueName := request.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(request.TaskQueue) - e.logger.Debug("Received PollActivityTaskQueue for taskQueue", tag.Name(taskQueueName)) -pollLoop: - for { - err := common.IsValidContext(ctx) - if err != nil { - return nil, err - } - - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - if err != nil { - return nil, err - } - - // Add frontend generated pollerID to context so taskqueueMgr can support cancellation of - // long-poll when frontend calls CancelOutstandingPoll API - pollerCtx := context.WithValue(ctx, pollerIDKey, pollerID) - pollerCtx = context.WithValue(pollerCtx, identityKey, request.GetIdentity()) - pollMetadata := &pollMetadata{ - workerVersionCapabilities: request.WorkerVersionCapabilities, - } - if request.TaskQueueMetadata != nil && request.TaskQueueMetadata.MaxTasksPerSecond != nil { - pollMetadata.ratePerSecond = &request.TaskQueueMetadata.MaxTasksPerSecond.Value - } - task, err := e.getTask(pollerCtx, taskQueue, stickyInfo, pollMetadata) - if err != nil { - // TODO: Is empty poll the best reply for errPumpClosed? - if err == ErrNoTasks || err == errPumpClosed { - return emptyPollActivityTaskQueueResponse, nil - } - return nil, err - } - - e.emitForwardedSourceStats(opMetrics, task.isForwarded(), req.GetForwardedSource()) - - if task.isStarted() { - // tasks received from remote are already started. So, simply forward the response - return task.pollActivityTaskQueueResponse(), nil - } - - resp, err := e.recordActivityTaskStarted(ctx, request, task) - if err != nil { - switch err.(type) { - case *serviceerror.NotFound: // mutable state not found, workflow not running or activity info not found - e.logger.Info("Activity task not found", - tag.WorkflowNamespaceID(task.event.Data.GetNamespaceId()), - tag.WorkflowID(task.event.Data.GetWorkflowId()), - tag.WorkflowRunID(task.event.Data.GetRunId()), - tag.WorkflowTaskQueueName(taskQueueName), - tag.TaskID(task.event.GetTaskId()), - tag.TaskVisibilityTimestamp(timestamp.TimeValue(task.event.Data.GetCreateTime())), - tag.WorkflowEventID(task.event.Data.GetScheduledEventId()), - tag.Error(err), - ) - task.finish(nil) - case *serviceerrors.TaskAlreadyStarted: - e.logger.Debug("Duplicated activity task", tag.WorkflowTaskQueueName(taskQueueName), tag.TaskID(task.event.GetTaskId())) - task.finish(nil) - default: - task.finish(err) - if err.Error() == common.ErrNamespaceHandover.Error() { - // do not keep polling new tasks when namespace is in handover state - // as record start request will be rejected by history service - return nil, err - } - } - - continue pollLoop - } - task.finish(nil) - return e.createPollActivityTaskQueueResponse(task, resp, opMetrics), nil - } -} - -type queryResult struct { - workerResponse *matchingservice.RespondQueryTaskCompletedRequest - internalError error -} - -// QueryWorkflow creates a WorkflowTask with query data, send it through sync match channel, wait for that WorkflowTask -// to be processed by worker, and then return the query result. -func (e *matchingEngineImpl) QueryWorkflow( - ctx context.Context, - queryRequest *matchingservice.QueryWorkflowRequest, -) (*matchingservice.QueryWorkflowResponse, error) { - namespaceID := namespace.ID(queryRequest.GetNamespaceId()) - taskQueueName := queryRequest.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(queryRequest.TaskQueue) - - origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - - // We don't need the userDataChanged channel here because we either do this sync (local or remote) - // or fail with a relatively short timeout. - taskQueue, _, err := e.redirectToVersionedQueueForAdd(ctx, origTaskQueue, queryRequest.VersionDirective, stickyInfo) - if err != nil { - if errors.Is(err, errUserDataDisabled) { - // Rewrite to nicer error message - err = serviceerror.NewFailedPrecondition("Operations on versioned workflows are disabled") - } - return nil, err - } - - sticky := stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY - // do not load sticky task queue if it is not already loaded, which means it has no poller. - tqm, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, !sticky) - if err != nil { - return nil, err - } else if sticky && (tqm == nil || !tqm.HasPollerAfter(time.Now().Add(-stickyPollerUnavailableWindow))) { - return nil, serviceerrors.NewStickyWorkerUnavailable() - } - - taskID := uuid.New() - resp, err := tqm.DispatchQueryTask(ctx, taskID, queryRequest) - - // if get response or error it means that query task was handled by forwarding to another matching host - // this remote host's result can be returned directly - if resp != nil || err != nil { - return resp, err - } - - // if get here it means that dispatch of query task has occurred locally - // must wait on result channel to get query result - queryResultCh := make(chan *queryResult, 1) - e.lockableQueryTaskMap.put(taskID, queryResultCh) - defer e.lockableQueryTaskMap.delete(taskID) - - select { - case result := <-queryResultCh: - if result.internalError != nil { - return nil, result.internalError - } - - workerResponse := result.workerResponse - switch workerResponse.GetCompletedRequest().GetCompletedType() { - case enumspb.QUERY_RESULT_TYPE_ANSWERED: - return &matchingservice.QueryWorkflowResponse{QueryResult: workerResponse.GetCompletedRequest().GetQueryResult()}, nil - case enumspb.QUERY_RESULT_TYPE_FAILED: - return nil, serviceerror.NewQueryFailed(workerResponse.GetCompletedRequest().GetErrorMessage()) - default: - return nil, serviceerror.NewInternal("unknown query completed type") - } - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -func (e *matchingEngineImpl) RespondQueryTaskCompleted( - ctx context.Context, - request *matchingservice.RespondQueryTaskCompletedRequest, - opMetrics metrics.Handler, -) error { - if err := e.deliverQueryResult(request.GetTaskId(), &queryResult{workerResponse: request}); err != nil { - opMetrics.Counter(metrics.RespondQueryTaskFailedPerTaskQueueCounter.GetMetricName()).Record(1) - return err - } - return nil -} - -func (e *matchingEngineImpl) deliverQueryResult(taskID string, queryResult *queryResult) error { - queryResultCh, ok := e.lockableQueryTaskMap.get(taskID) - if !ok { - return serviceerror.NewNotFound("query task not found, or already expired") - } - queryResultCh <- queryResult - return nil -} - -func (e *matchingEngineImpl) CancelOutstandingPoll( - ctx context.Context, - request *matchingservice.CancelOutstandingPollRequest, -) error { - e.pollMap.cancel(request.PollerId) - return nil -} - -func (e *matchingEngineImpl) DescribeTaskQueue( - ctx context.Context, - request *matchingservice.DescribeTaskQueueRequest, -) (*matchingservice.DescribeTaskQueueResponse, error) { - namespaceID := namespace.ID(request.GetNamespaceId()) - taskQueueType := request.DescRequest.GetTaskQueueType() - taskQueueName := request.DescRequest.TaskQueue.GetName() - stickyInfo := stickyInfoFromTaskQueue(request.DescRequest.TaskQueue) - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, taskQueueType) - if err != nil { - return nil, err - } - tlMgr, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, true) - if err != nil { - return nil, err - } - - return tlMgr.DescribeTaskQueue(request.DescRequest.GetIncludeTaskQueueStatus()), nil -} - -func (e *matchingEngineImpl) ListTaskQueuePartitions( - ctx context.Context, - request *matchingservice.ListTaskQueuePartitionsRequest, -) (*matchingservice.ListTaskQueuePartitionsResponse, error) { - activityTaskQueueInfo, err := e.listTaskQueuePartitions(request, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - if err != nil { - return nil, err - } - workflowTaskQueueInfo, err := e.listTaskQueuePartitions(request, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - resp := matchingservice.ListTaskQueuePartitionsResponse{ - ActivityTaskQueuePartitions: activityTaskQueueInfo, - WorkflowTaskQueuePartitions: workflowTaskQueueInfo, - } - return &resp, nil -} - -func (e *matchingEngineImpl) listTaskQueuePartitions(request *matchingservice.ListTaskQueuePartitionsRequest, taskQueueType enumspb.TaskQueueType) ([]*taskqueuepb.TaskQueuePartitionMetadata, error) { - partitions, err := e.getAllPartitions( - namespace.Name(request.GetNamespace()), - *request.TaskQueue, - taskQueueType, - ) - - if err != nil { - return nil, err - } - - partitionHostInfo := make([]*taskqueuepb.TaskQueuePartitionMetadata, len(partitions)) - for i, partition := range partitions { - host, err := e.getHostInfo(partition) - if err != nil { - return nil, err - } - - partitionHostInfo[i] = &taskqueuepb.TaskQueuePartitionMetadata{ - Key: partition, - OwnerHostName: host, - } - } - - return partitionHostInfo, nil -} - -func (e *matchingEngineImpl) UpdateWorkerBuildIdCompatibility( - ctx context.Context, - req *matchingservice.UpdateWorkerBuildIdCompatibilityRequest, -) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - taskQueueName := req.GetTaskQueue() - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) - if err != nil { - return nil, err - } - updateOptions := UserDataUpdateOptions{} - operationCreatedTombstones := false - switch req.GetOperation().(type) { - case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_: - // Only apply the limit when request is initiated by a user. - updateOptions.TaskQueueLimitPerBuildId = e.config.TaskQueueLimitPerBuildId() - case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_: - updateOptions.KnownVersion = req.GetRemoveBuildIds().GetKnownUserDataVersion() - default: - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf("invalid operation: %v", req.GetOperation())) - } - - err = tqMgr.UpdateUserData(ctx, updateOptions, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { - clock := data.GetClock() - if clock == nil { - tmp := hlc.Zero(e.clusterMeta.GetClusterID()) - clock = &tmp - } - updatedClock := hlc.Next(*clock, e.timeSource) - var versioningData *persistencespb.VersioningData - switch req.GetOperation().(type) { - case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_: - var err error - versioningData, err = UpdateVersionSets( - updatedClock, - data.GetVersioningData(), - req.GetApplyPublicRequest().GetRequest(), - e.config.VersionCompatibleSetLimitPerQueue(), - e.config.VersionBuildIdLimitPerQueue(), - ) - if err != nil { - return nil, false, err - } - case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_: - ns, err := e.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return nil, false, err - } - versioningData = RemoveBuildIds( - updatedClock, - data.GetVersioningData(), - req.GetRemoveBuildIds().GetBuildIds(), - ) - if ns.ReplicationPolicy() == namespace.ReplicationPolicyMultiCluster { - operationCreatedTombstones = true - } else { - // We don't need to keep the tombstones around if we're not replicating them. - versioningData = ClearTombstones(versioningData) - } - default: - return nil, false, serviceerror.NewInvalidArgument(fmt.Sprintf("invalid operation: %v", req.GetOperation())) - } - // Avoid mutation - ret := *data - ret.Clock = &updatedClock - ret.VersioningData = versioningData - return &ret, true, nil - }) - if err != nil { - return nil, err - } - - // Only clear tombstones after they have been replicated. - if operationCreatedTombstones { - err = tqMgr.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { - updatedClock := hlc.Next(*data.GetClock(), e.timeSource) - // Avoid mutation - ret := *data - ret.Clock = &updatedClock - ret.VersioningData = ClearTombstones(data.VersioningData) - return &ret, false, nil // Do not replicate the deletion of tombstones - }) - if err != nil { - return nil, err - } - } - return &matchingservice.UpdateWorkerBuildIdCompatibilityResponse{}, nil -} - -func (e *matchingEngineImpl) GetWorkerBuildIdCompatibility( - ctx context.Context, - req *matchingservice.GetWorkerBuildIdCompatibilityRequest, -) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - taskQueueName := req.GetRequest().GetTaskQueue() - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) - if err != nil { - if _, ok := err.(*serviceerror.NotFound); ok { - return &matchingservice.GetWorkerBuildIdCompatibilityResponse{}, nil - } - return nil, err - } - userData, _, err := tqMgr.GetUserData(ctx) - if err != nil { - return nil, err - } - return &matchingservice.GetWorkerBuildIdCompatibilityResponse{ - Response: ToBuildIdOrderingResponse(userData.GetData().GetVersioningData(), int(req.GetRequest().GetMaxSets())), - }, nil -} - -func (e *matchingEngineImpl) GetTaskQueueUserData( - ctx context.Context, - req *matchingservice.GetTaskQueueUserDataRequest, -) (*matchingservice.GetTaskQueueUserDataResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - taskQueue, err := newTaskQueueID(namespaceID, req.GetTaskQueue(), req.GetTaskQueueType()) - if err != nil { - return nil, err - } - tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) - if err != nil { - return nil, err - } - version := req.GetLastKnownUserDataVersion() - if version < 0 { - return nil, serviceerror.NewInvalidArgument("last_known_user_data_version must not be negative") - } - - if req.WaitNewData { - var cancel context.CancelFunc - ctx, cancel = newChildContext(ctx, e.config.GetUserDataLongPollTimeout(), returnEmptyTaskTimeBudget) - defer cancel() - } - - for { - resp := &matchingservice.GetTaskQueueUserDataResponse{} - userData, userDataChanged, err := tqMgr.GetUserData(ctx) - if err != nil { - return nil, err - } - if req.WaitNewData && userData.GetVersion() == version { - // long-poll: wait for data to change/appear - select { - case <-ctx.Done(): - resp.TaskQueueHasUserData = userData != nil - return resp, nil - case <-userDataChanged: - continue - } - } - if userData != nil { - resp.TaskQueueHasUserData = true - if userData.Version > version { - resp.UserData = userData - } else if userData.Version < version { - // This is highly unlikely but may happen due to an edge case in during ownership transfer. - // We rely on client retries in this case to let the system eventually self-heal. - return nil, serviceerror.NewInvalidArgument( - "requested task queue user data for version greater than known version") - } - } - return resp, nil - } -} - -func (e *matchingEngineImpl) ApplyTaskQueueUserDataReplicationEvent( - ctx context.Context, - req *matchingservice.ApplyTaskQueueUserDataReplicationEventRequest, -) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - ns, err := e.namespaceRegistry.GetNamespaceByID(namespaceID) - if err != nil { - return nil, err - } - taskQueueName := req.GetTaskQueue() - taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return nil, err - } - tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) - if err != nil { - return nil, err - } - updateOptions := UserDataUpdateOptions{ - // Avoid setting a limit to allow the replication event to always be applied - TaskQueueLimitPerBuildId: 0, - } - err = tqMgr.UpdateUserData(ctx, updateOptions, func(current *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { - mergedUserData := *current - _, buildIdsRemoved := GetBuildIdDeltas(current.GetVersioningData(), req.GetUserData().GetVersioningData()) - var buildIdsToRevive []string - for _, buildId := range buildIdsRemoved { - // We accept that the user data is locked for updates while running these visibility queries. - // Nothing else is _supposed_ to update it on follower (standby) clusters. - exists, err := worker_versioning.WorkflowsExistForBuildId(ctx, e.visibilityManager, ns, req.TaskQueue, buildId) - if err != nil { - return nil, false, err - } - if exists { - buildIdsToRevive = append(buildIdsToRevive, buildId) - } - } - mergedData := MergeVersioningData(current.GetVersioningData(), req.GetUserData().GetVersioningData()) - - for _, buildId := range buildIdsToRevive { - setIdx, buildIdIdx := worker_versioning.FindBuildId(mergedData, buildId) - if setIdx == -1 { - continue - } - set := mergedData.VersionSets[setIdx] - set.BuildIds[buildIdIdx] = e.reviveBuildId(ns, req.GetTaskQueue(), set.GetBuildIds()[buildIdIdx]) - mergedUserData.Clock = hlc.Ptr(hlc.Max(*mergedUserData.Clock, *set.BuildIds[buildIdIdx].StateUpdateTimestamp)) - - setDefault := set.BuildIds[len(set.BuildIds)-1] - if setDefault.State == persistencespb.STATE_DELETED { - // We merged an update which removed (at least) two build ids: the default for set x and another one for set - // x. We discovered we're still using the other one, so we revive it. now we also have to revive the default - // for set x, or it will be left with the wrong default. - set.BuildIds[len(set.BuildIds)-1] = e.reviveBuildId(ns, req.GetTaskQueue(), setDefault) - mergedUserData.Clock = hlc.Ptr(hlc.Max(*mergedUserData.Clock, *setDefault.StateUpdateTimestamp)) - } - } - - // No need to keep the tombstones around after replication. - mergedUserData.VersioningData = ClearTombstones(mergedData) - return &mergedUserData, len(buildIdsToRevive) > 0, nil - }) - return &matchingservice.ApplyTaskQueueUserDataReplicationEventResponse{}, err -} - -func (e *matchingEngineImpl) GetBuildIdTaskQueueMapping( - ctx context.Context, - req *matchingservice.GetBuildIdTaskQueueMappingRequest, -) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) { - taskQueues, err := e.taskManager.GetTaskQueuesByBuildId(ctx, &persistence.GetTaskQueuesByBuildIdRequest{ - NamespaceID: req.NamespaceId, - BuildID: req.BuildId, - }) - if err != nil { - return nil, err - } - return &matchingservice.GetBuildIdTaskQueueMappingResponse{TaskQueues: taskQueues}, nil -} - -func (e *matchingEngineImpl) ForceUnloadTaskQueue( - ctx context.Context, - req *matchingservice.ForceUnloadTaskQueueRequest, -) (*matchingservice.ForceUnloadTaskQueueResponse, error) { - namespaceID := namespace.ID(req.GetNamespaceId()) - taskQueue, err := newTaskQueueID(namespaceID, req.TaskQueue, req.TaskQueueType) - if err != nil { - return nil, err - } - tqm, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, false) - if err != nil { - return nil, err - } - if tqm == nil { - return &matchingservice.ForceUnloadTaskQueueResponse{WasLoaded: false}, nil - } - e.unloadTaskQueue(tqm) - return &matchingservice.ForceUnloadTaskQueueResponse{WasLoaded: true}, nil -} - -func (e *matchingEngineImpl) UpdateTaskQueueUserData(ctx context.Context, request *matchingservice.UpdateTaskQueueUserDataRequest) (*matchingservice.UpdateTaskQueueUserDataResponse, error) { - locks := e.getNamespaceUpdateLocks(request.GetNamespaceId()) - locks.updateLock.Lock() - defer locks.updateLock.Unlock() - - err := e.taskManager.UpdateTaskQueueUserData(ctx, &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: request.GetNamespaceId(), - TaskQueue: request.GetTaskQueue(), - UserData: request.GetUserData(), - BuildIdsAdded: request.BuildIdsAdded, - BuildIdsRemoved: request.BuildIdsRemoved, - }) - return &matchingservice.UpdateTaskQueueUserDataResponse{}, err -} - -func (e *matchingEngineImpl) ReplicateTaskQueueUserData(ctx context.Context, request *matchingservice.ReplicateTaskQueueUserDataRequest) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) { - if e.namespaceReplicationQueue == nil { - return &matchingservice.ReplicateTaskQueueUserDataResponse{}, nil - } - - locks := e.getNamespaceUpdateLocks(request.GetNamespaceId()) - locks.replicationLock.Lock() - defer locks.replicationLock.Unlock() - - err := e.namespaceReplicationQueue.Publish(ctx, &replicationspb.ReplicationTask{ - TaskType: enumsspb.REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA, - Attributes: &replicationspb.ReplicationTask_TaskQueueUserDataAttributes{ - TaskQueueUserDataAttributes: &replicationspb.TaskQueueUserDataAttributes{ - NamespaceId: request.GetNamespaceId(), - TaskQueueName: request.GetTaskQueue(), - UserData: request.GetUserData(), - }, - }, - }) - return &matchingservice.ReplicateTaskQueueUserDataResponse{}, err - -} - -func (e *matchingEngineImpl) getNamespaceUpdateLocks(namespaceId string) *namespaceUpdateLocks { - e.namespaceUpdateLockMapLock.Lock() - defer e.namespaceUpdateLockMapLock.Unlock() - locks, found := e.namespaceUpdateLockMap[namespaceId] - if !found { - locks = &namespaceUpdateLocks{} - e.namespaceUpdateLockMap[namespaceId] = locks - } - return locks -} - -func (e *matchingEngineImpl) getHostInfo(partitionKey string) (string, error) { - host, err := e.keyResolver.Lookup(partitionKey) - if err != nil { - return "", err - } - return host.GetAddress(), nil -} - -func (e *matchingEngineImpl) getAllPartitions( - namespace namespace.Name, - taskQueue taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, -) ([]string, error) { - var partitionKeys []string - namespaceID, err := e.namespaceRegistry.GetNamespaceID(namespace) - if err != nil { - return partitionKeys, err - } - taskQueueID, err := newTaskQueueID(namespaceID, taskQueue.GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) - if err != nil { - return partitionKeys, err - } - - n := e.config.NumTaskqueueWritePartitions(namespace.String(), taskQueueID.BaseNameString(), taskQueueType) - for i := 0; i < n; i++ { - partitionKeys = append(partitionKeys, taskQueueID.WithPartition(i).FullName()) - } - - return partitionKeys, nil -} - -func (e *matchingEngineImpl) getTask( - ctx context.Context, - origTaskQueue *taskQueueID, - stickyInfo stickyInfo, - pollMetadata *pollMetadata, -) (*internalTask, error) { - baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, true) - if err != nil { - return nil, err - } - - taskQueue, err := e.redirectToVersionedQueueForPoll( - ctx, - baseTqm, - origTaskQueue, - pollMetadata.workerVersionCapabilities, - stickyInfo, - ) - if err != nil { - if errors.Is(err, errUserDataDisabled) { - // Rewrite to nicer error message - err = serviceerror.NewFailedPrecondition("Operations on versioned workflows are disabled") - } - return nil, err - } - tqm, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, true) - if err != nil { - return nil, err - } - - // We need to set a shorter timeout than the original ctx; otherwise, by the time ctx deadline is - // reached, instead of emptyTask, context timeout error is returned to the frontend by the rpc stack, - // which counts against our SLO. By shortening the timeout by a very small amount, the emptyTask can be - // returned to the handler before a context timeout error is generated. - ctx, cancel := newChildContext(ctx, baseTqm.LongPollExpirationInterval(), returnEmptyTaskTimeBudget) - defer cancel() - - if pollerID, ok := ctx.Value(pollerIDKey).(string); ok && pollerID != "" { - e.pollMap.add(pollerID, cancel) - defer e.pollMap.remove(pollerID) - } - - if identity, ok := ctx.Value(identityKey).(string); ok && identity != "" { - baseTqm.UpdatePollerInfo(pollerIdentity(identity), pollMetadata) - // update timestamp when long poll ends - defer baseTqm.UpdatePollerInfo(pollerIdentity(identity), pollMetadata) - } - - return tqm.GetTask(ctx, pollMetadata) -} - -func (e *matchingEngineImpl) unloadTaskQueue(unloadTQM taskQueueManager) { - queueID := unloadTQM.QueueID() - e.taskQueuesLock.Lock() - foundTQM, ok := e.taskQueues[*queueID] - if !ok || foundTQM != unloadTQM { - e.taskQueuesLock.Unlock() - return - } - delete(e.taskQueues, *queueID) - countKey := taskQueueCounterKey{namespaceID: queueID.namespaceID, taskType: queueID.taskType, kind: foundTQM.TaskQueueKind()} - e.taskQueueCount[countKey]-- - taskQueueCount := e.taskQueueCount[countKey] - e.taskQueuesLock.Unlock() - - e.updateTaskQueueGauge(countKey, taskQueueCount) - foundTQM.Stop() -} - -func (e *matchingEngineImpl) updateTaskQueueGauge(countKey taskQueueCounterKey, taskQueueCount int) { - nsEntry, err := e.namespaceRegistry.GetNamespaceByID(countKey.namespaceID) - namespace := namespace.Name("unknown") - if err == nil { - namespace = nsEntry.Name() - } - - e.metricsHandler.Gauge(metrics.LoadedTaskQueueGauge.GetMetricName()).Record( - float64(taskQueueCount), - metrics.NamespaceTag(namespace.String()), - metrics.TaskTypeTag(countKey.taskType.String()), - metrics.QueueTypeTag(countKey.kind.String()), - ) -} - -// Populate the workflow task response based on context and scheduled/started events. -func (e *matchingEngineImpl) createPollWorkflowTaskQueueResponse( - task *internalTask, - historyResponse *historyservice.RecordWorkflowTaskStartedResponse, - metricsHandler metrics.Handler, -) *matchingservice.PollWorkflowTaskQueueResponse { - - var serializedToken []byte - if task.isQuery() { - // for a query task - queryRequest := task.query.request - queryTaskToken := &tokenspb.QueryTask{ - NamespaceId: queryRequest.GetNamespaceId(), - TaskQueue: queryRequest.TaskQueue.Name, - TaskId: task.query.taskID, - } - serializedToken, _ = e.tokenSerializer.SerializeQueryTaskToken(queryTaskToken) - } else { - taskToken := &tokenspb.Task{ - NamespaceId: task.event.Data.GetNamespaceId(), - WorkflowId: task.event.Data.GetWorkflowId(), - RunId: task.event.Data.GetRunId(), - ScheduledEventId: historyResponse.GetScheduledEventId(), - StartedEventId: historyResponse.GetStartedEventId(), - StartedTime: historyResponse.GetStartedTime(), - Attempt: historyResponse.GetAttempt(), - Clock: historyResponse.GetClock(), - } - serializedToken, _ = e.tokenSerializer.Serialize(taskToken) - if task.responseC == nil { - ct := timestamp.TimeValue(task.event.Data.CreateTime) - metricsHandler.Timer(metrics.AsyncMatchLatencyPerTaskQueue.GetMetricName()).Record(time.Since(ct)) - } - } - - response := common.CreateMatchingPollWorkflowTaskQueueResponse( - historyResponse, - task.workflowExecution(), - serializedToken) - - if task.query != nil { - response.Query = task.query.request.QueryRequest.Query - } - if task.backlogCountHint != nil { - response.BacklogCountHint = task.backlogCountHint() - } - return response -} - -// Populate the activity task response based on context and scheduled/started events. -func (e *matchingEngineImpl) createPollActivityTaskQueueResponse( - task *internalTask, - historyResponse *historyservice.RecordActivityTaskStartedResponse, - metricsHandler metrics.Handler, -) *matchingservice.PollActivityTaskQueueResponse { - - scheduledEvent := historyResponse.ScheduledEvent - if scheduledEvent.GetActivityTaskScheduledEventAttributes() == nil { - panic("GetActivityTaskScheduledEventAttributes is not set") - } - attributes := scheduledEvent.GetActivityTaskScheduledEventAttributes() - if attributes.ActivityId == "" { - panic("ActivityTaskScheduledEventAttributes.ActivityID is not set") - } - if task.responseC == nil { - ct := timestamp.TimeValue(task.event.Data.CreateTime) - metricsHandler.Timer(metrics.AsyncMatchLatencyPerTaskQueue.GetMetricName()).Record(time.Since(ct)) - } - - taskToken := &tokenspb.Task{ - NamespaceId: task.event.Data.GetNamespaceId(), - WorkflowId: task.event.Data.GetWorkflowId(), - RunId: task.event.Data.GetRunId(), - ScheduledEventId: task.event.Data.GetScheduledEventId(), - Attempt: historyResponse.GetAttempt(), - ActivityId: attributes.GetActivityId(), - ActivityType: attributes.GetActivityType().GetName(), - Clock: historyResponse.GetClock(), - } - - serializedToken, _ := e.tokenSerializer.Serialize(taskToken) - - return &matchingservice.PollActivityTaskQueueResponse{ - ActivityId: attributes.ActivityId, - ActivityType: attributes.ActivityType, - Header: attributes.Header, - Input: attributes.Input, - WorkflowExecution: task.workflowExecution(), - CurrentAttemptScheduledTime: historyResponse.CurrentAttemptScheduledTime, - ScheduledTime: scheduledEvent.EventTime, - ScheduleToCloseTimeout: attributes.ScheduleToCloseTimeout, - StartedTime: historyResponse.StartedTime, - StartToCloseTimeout: attributes.StartToCloseTimeout, - HeartbeatTimeout: attributes.HeartbeatTimeout, - TaskToken: serializedToken, - Attempt: taskToken.Attempt, - HeartbeatDetails: historyResponse.HeartbeatDetails, - WorkflowType: historyResponse.WorkflowType, - WorkflowNamespace: historyResponse.WorkflowNamespace, - } -} - -func (e *matchingEngineImpl) recordWorkflowTaskStarted( - ctx context.Context, - pollReq *workflowservice.PollWorkflowTaskQueueRequest, - task *internalTask, -) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - ctx, cancel := newRecordTaskStartedContext(ctx, task) - defer cancel() - - return e.historyClient.RecordWorkflowTaskStarted(ctx, &historyservice.RecordWorkflowTaskStartedRequest{ - NamespaceId: task.event.Data.GetNamespaceId(), - WorkflowExecution: task.workflowExecution(), - ScheduledEventId: task.event.Data.GetScheduledEventId(), - Clock: task.event.Data.GetClock(), - TaskId: task.event.GetTaskId(), - RequestId: uuid.New(), - PollRequest: pollReq, - }) -} - -func (e *matchingEngineImpl) recordActivityTaskStarted( - ctx context.Context, - pollReq *workflowservice.PollActivityTaskQueueRequest, - task *internalTask, -) (*historyservice.RecordActivityTaskStartedResponse, error) { - ctx, cancel := newRecordTaskStartedContext(ctx, task) - defer cancel() - - return e.historyClient.RecordActivityTaskStarted(ctx, &historyservice.RecordActivityTaskStartedRequest{ - NamespaceId: task.event.Data.GetNamespaceId(), - WorkflowExecution: task.workflowExecution(), - ScheduledEventId: task.event.Data.GetScheduledEventId(), - Clock: task.event.Data.GetClock(), - TaskId: task.event.GetTaskId(), - RequestId: uuid.New(), - PollRequest: pollReq, - }) -} - -func (e *matchingEngineImpl) emitForwardedSourceStats( - metricsHandler metrics.Handler, - isTaskForwarded bool, - pollForwardedSource string, -) { - isPollForwarded := len(pollForwardedSource) > 0 - switch { - case isTaskForwarded && isPollForwarded: - metricsHandler.Counter(metrics.RemoteToRemoteMatchPerTaskQueueCounter.GetMetricName()).Record(1) - case isTaskForwarded: - metricsHandler.Counter(metrics.RemoteToLocalMatchPerTaskQueueCounter.GetMetricName()).Record(1) - case isPollForwarded: - metricsHandler.Counter(metrics.LocalToRemoteMatchPerTaskQueueCounter.GetMetricName()).Record(1) - default: - metricsHandler.Counter(metrics.LocalToLocalMatchPerTaskQueueCounter.GetMetricName()).Record(1) - } -} - -func (e *matchingEngineImpl) redirectToVersionedQueueForPoll( - ctx context.Context, - baseTqm taskQueueManager, - taskQueue *taskQueueID, - workerVersionCapabilities *commonpb.WorkerVersionCapabilities, - stickyInfo stickyInfo, -) (*taskQueueID, error) { - if !workerVersionCapabilities.GetUseVersioning() { - // Either this task queue is versioned, or there are still some workflows running on - // the "unversioned" set. - return taskQueue, nil - } - - // We don't need the userDataChanged channel here because polls have a timeout and the - // client will retry, so if we're blocked on the wrong matcher it'll just take one poll - // timeout to fix itself. - userData, _, err := baseTqm.GetUserData(ctx) - if err != nil { - return nil, err - } - data := userData.GetData().GetVersioningData() - - if stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY { - // In the sticky case we don't redirect, but we may kick off this worker if there's a - // newer one. - err := checkVersionForStickyPoll(data, workerVersionCapabilities) - return taskQueue, err - } - - versionSet, err := lookupVersionSetForPoll(data, workerVersionCapabilities) - if err != nil { - return nil, err - } - return newTaskQueueIDWithVersionSet(taskQueue, versionSet), nil -} - -func (e *matchingEngineImpl) redirectToVersionedQueueForAdd( - ctx context.Context, - taskQueue *taskQueueID, - directive *taskqueuespb.TaskVersionDirective, - stickyInfo stickyInfo, -) (*taskQueueID, chan struct{}, error) { - var buildId string - switch dir := directive.GetValue().(type) { - case *taskqueuespb.TaskVersionDirective_UseDefault: - // leave buildId = "", lookupVersionSetForAdd understands that to mean "default" - case *taskqueuespb.TaskVersionDirective_BuildId: - buildId = dir.BuildId - default: - // Unversioned task, leave on unversioned queue. - return taskQueue, nil, nil - } - - baseTqm, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, true) - if err != nil { - return nil, nil, err - } - - // Have to look up versioning data. - userData, userDataChanged, err := baseTqm.GetUserData(ctx) - if err != nil { - if errors.Is(err, errUserDataDisabled) && buildId == "" { - // When user data disabled, send "default" tasks to unversioned queue. - return taskQueue, userDataChanged, nil - } - return nil, nil, err - } - data := userData.GetData().GetVersioningData() - - if stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY { - // In the sticky case we don't redirect, but we may kick off this worker if there's a - // newer one. - err := checkVersionForStickyAdd(data, buildId) - return taskQueue, userDataChanged, err - } - - versionSet, err := lookupVersionSetForAdd(data, buildId) - if err == errEmptyVersioningData { - // default was requested for an unversioned queue - return taskQueue, userDataChanged, nil - } else if err != nil { - return nil, nil, err - } - return newTaskQueueIDWithVersionSet(taskQueue, versionSet), userDataChanged, nil -} - -func (m *lockableQueryTaskMap) put(key string, value chan *queryResult) { - m.Lock() - defer m.Unlock() - m.queryTaskMap[key] = value -} - -func (m *lockableQueryTaskMap) get(key string) (chan *queryResult, bool) { - m.RLock() - defer m.RUnlock() - result, ok := m.queryTaskMap[key] - return result, ok -} - -func (m *lockableQueryTaskMap) delete(key string) { - m.Lock() - defer m.Unlock() - delete(m.queryTaskMap, key) -} - -func (m *lockablePollMap) add(cancelId string, cancel context.CancelFunc) { - m.Lock() - defer m.Unlock() - m.polls[cancelId] = cancel -} - -func (m *lockablePollMap) remove(cancelId string) { - m.Lock() - defer m.Unlock() - delete(m.polls, cancelId) -} - -func (m *lockablePollMap) cancel(cancelId string) { - m.Lock() - defer m.Unlock() - if cancel, ok := m.polls[cancelId]; ok { - cancel() - delete(m.polls, cancelId) - } -} - -// newRecordTaskStartedContext creates a context for recording -// activity or workflow task started. The parentCtx from -// pollActivity/WorkflowTaskQueue endpoint (which is a long poll -// API) has long timeout and unsuitable for recording task started, -// especially if the task is doing sync match and has caller -// (history transfer queue) waiting for response. -func newRecordTaskStartedContext( - parentCtx context.Context, - task *internalTask, -) (context.Context, context.CancelFunc) { - timeout := recordTaskStartedDefaultTimeout - if task.isSyncMatchTask() { - timeout = recordTaskStartedSyncMatchTimeout - } - - return context.WithTimeout(parentCtx, timeout) -} - -// Revives a deleted build id updating its HLC timestamp. -// Returns a new build id leaving the provided one untouched. -func (e *matchingEngineImpl) reviveBuildId(ns *namespace.Namespace, taskQueue string, buildId *persistencespb.BuildId) *persistencespb.BuildId { - // Bump the stamp and ensure it's newer than the deletion stamp. - prevStamp := *buildId.StateUpdateTimestamp - stamp := hlc.Next(prevStamp, e.timeSource) - stamp.ClusterId = e.clusterMeta.GetClusterID() - e.logger.Info("Revived build id while applying replication event", - tag.WorkflowNamespace(ns.Name().String()), - tag.WorkflowTaskQueueName(taskQueue), - tag.BuildId(buildId.Id)) - return &persistencespb.BuildId{ - Id: buildId.GetId(), - State: persistencespb.STATE_ACTIVE, - StateUpdateTimestamp: &stamp, - BecameDefaultTimestamp: buildId.BecameDefaultTimestamp, - } -} diff -Nru temporal-1.21.5-1/src/service/matching/matchingEngineInterfaces.go temporal-1.22.5/src/service/matching/matchingEngineInterfaces.go --- temporal-1.21.5-1/src/service/matching/matchingEngineInterfaces.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matchingEngineInterfaces.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common/metrics" -) - -type ( - // Engine exposes interfaces for clients to interact with the matching engine - Engine interface { - Start() - Stop() - AddWorkflowTask(ctx context.Context, addRequest *matchingservice.AddWorkflowTaskRequest) (syncMatch bool, err error) - AddActivityTask(ctx context.Context, addRequest *matchingservice.AddActivityTaskRequest) (syncMatch bool, err error) - PollWorkflowTaskQueue(ctx context.Context, request *matchingservice.PollWorkflowTaskQueueRequest, opMetrics metrics.Handler) (*matchingservice.PollWorkflowTaskQueueResponse, error) - PollActivityTaskQueue(ctx context.Context, request *matchingservice.PollActivityTaskQueueRequest, opMetrics metrics.Handler) (*matchingservice.PollActivityTaskQueueResponse, error) - QueryWorkflow(ctx context.Context, request *matchingservice.QueryWorkflowRequest) (*matchingservice.QueryWorkflowResponse, error) - RespondQueryTaskCompleted(ctx context.Context, request *matchingservice.RespondQueryTaskCompletedRequest, opMetrics metrics.Handler) error - CancelOutstandingPoll(ctx context.Context, request *matchingservice.CancelOutstandingPollRequest) error - DescribeTaskQueue(ctx context.Context, request *matchingservice.DescribeTaskQueueRequest) (*matchingservice.DescribeTaskQueueResponse, error) - ListTaskQueuePartitions(ctx context.Context, request *matchingservice.ListTaskQueuePartitionsRequest) (*matchingservice.ListTaskQueuePartitionsResponse, error) - UpdateWorkerBuildIdCompatibility(ctx context.Context, request *matchingservice.UpdateWorkerBuildIdCompatibilityRequest) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) - GetWorkerBuildIdCompatibility(ctx context.Context, request *matchingservice.GetWorkerBuildIdCompatibilityRequest) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) - GetTaskQueueUserData(ctx context.Context, request *matchingservice.GetTaskQueueUserDataRequest) (*matchingservice.GetTaskQueueUserDataResponse, error) - ApplyTaskQueueUserDataReplicationEvent(ctx context.Context, request *matchingservice.ApplyTaskQueueUserDataReplicationEventRequest) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) - GetBuildIdTaskQueueMapping(ctx context.Context, request *matchingservice.GetBuildIdTaskQueueMappingRequest) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) - ForceUnloadTaskQueue(ctx context.Context, request *matchingservice.ForceUnloadTaskQueueRequest) (*matchingservice.ForceUnloadTaskQueueResponse, error) - UpdateTaskQueueUserData(ctx context.Context, request *matchingservice.UpdateTaskQueueUserDataRequest) (*matchingservice.UpdateTaskQueueUserDataResponse, error) - ReplicateTaskQueueUserData(ctx context.Context, request *matchingservice.ReplicateTaskQueueUserDataRequest) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) - } -) diff -Nru temporal-1.21.5-1/src/service/matching/matchingEngine_test.go temporal-1.22.5/src/service/matching/matchingEngine_test.go --- temporal-1.21.5-1/src/service/matching/matchingEngine_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matchingEngine_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2813 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/emirpasic/gods/maps/treemap" - "github.com/gogo/protobuf/types" - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/suite" - "github.com/uber-go/tally/v4" - - commandpb "go.temporal.io/api/command/v1" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - - clockspb "go.temporal.io/server/api/clock/v1" - "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/historyservicemock/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/api/taskqueue/v1" - tokenspb "go.temporal.io/server/api/token/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/clock" - "go.temporal.io/server/common/clock/hybrid_logical_clock" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/payload" - "go.temporal.io/server/common/payloads" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/common/quotas" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/common/util" -) - -type ( - matchingEngineSuite struct { - suite.Suite - controller *gomock.Controller - mockHistoryClient *historyservicemock.MockHistoryServiceClient - mockMatchingClient *matchingservicemock.MockMatchingServiceClient - mockNamespaceCache *namespace.MockRegistry - mockVisibilityManager *manager.MockVisibilityManager - - matchingEngine *matchingEngineImpl - taskManager *testTaskManager - logger log.Logger - sync.Mutex - } -) - -const ( - matchingTestNamespace = "matching-test" - matchingTestTaskQueue = "matching-test-taskqueue" -) - -func TestMatchingEngineSuite(t *testing.T) { - s := new(matchingEngineSuite) - suite.Run(t, s) -} - -func (s *matchingEngineSuite) SetupSuite() { -} - -func (s *matchingEngineSuite) TearDownSuite() { -} - -func (s *matchingEngineSuite) SetupTest() { - s.logger = log.NewTestLogger() - s.Lock() - defer s.Unlock() - s.controller = gomock.NewController(s.T()) - s.mockHistoryClient = historyservicemock.NewMockHistoryServiceClient(s.controller) - s.mockMatchingClient = matchingservicemock.NewMockMatchingServiceClient(s.controller) - s.mockMatchingClient.EXPECT().GetTaskQueueUserData(gomock.Any(), gomock.Any()). - Return(&matchingservice.GetTaskQueueUserDataResponse{}, nil).AnyTimes() - s.mockMatchingClient.EXPECT().UpdateTaskQueueUserData(gomock.Any(), gomock.Any()). - Return(&matchingservice.UpdateTaskQueueUserDataResponse{}, nil).AnyTimes() - s.mockMatchingClient.EXPECT().ReplicateTaskQueueUserData(gomock.Any(), gomock.Any()). - Return(&matchingservice.ReplicateTaskQueueUserDataResponse{}, nil).AnyTimes() - s.taskManager = newTestTaskManager(s.logger) - s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) - ns := namespace.NewLocalNamespaceForTest(&persistencespb.NamespaceInfo{Name: matchingTestNamespace}, nil, "") - s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil).AnyTimes() - s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(ns.Name(), nil).AnyTimes() - s.mockVisibilityManager = manager.NewMockVisibilityManager(s.controller) - s.mockVisibilityManager.EXPECT().Close().AnyTimes() - - s.matchingEngine = s.newMatchingEngine(defaultTestConfig(), s.taskManager) - s.matchingEngine.Start() -} - -func (s *matchingEngineSuite) TearDownTest() { - s.matchingEngine.Stop() - s.controller.Finish() -} - -func (s *matchingEngineSuite) newMatchingEngine( - config *Config, taskMgr persistence.TaskManager, -) *matchingEngineImpl { - return newMatchingEngine(config, taskMgr, s.mockHistoryClient, s.logger, s.mockNamespaceCache, s.mockMatchingClient, s.mockVisibilityManager) -} - -func newMatchingEngine( - config *Config, taskMgr persistence.TaskManager, mockHistoryClient historyservice.HistoryServiceClient, - logger log.Logger, mockNamespaceCache namespace.Registry, mockMatchingClient matchingservice.MatchingServiceClient, - mockVisibilityManager manager.VisibilityManager, -) *matchingEngineImpl { - return &matchingEngineImpl{ - taskManager: taskMgr, - historyClient: mockHistoryClient, - taskQueues: make(map[taskQueueID]taskQueueManager), - taskQueueCount: make(map[taskQueueCounterKey]int), - logger: logger, - metricsHandler: metrics.NoopMetricsHandler, - matchingClient: mockMatchingClient, - tokenSerializer: common.NewProtoTaskTokenSerializer(), - config: config, - namespaceRegistry: mockNamespaceCache, - clusterMeta: cluster.NewMetadataForTest(cluster.NewTestClusterMetadataConfig(false, true)), - timeSource: clock.NewRealTimeSource(), - visibilityManager: mockVisibilityManager, - } -} - -func (s *matchingEngineSuite) TestAckManager() { - m := newAckManager(s.logger) - m.setAckLevel(100) - s.EqualValues(100, m.getAckLevel()) - s.EqualValues(100, m.getReadLevel()) - const t1 = 200 - const t2 = 220 - const t3 = 320 - const t4 = 340 - const t5 = 360 - const t6 = 380 - - m.addTask(t1) - s.EqualValues(100, m.getAckLevel()) - s.EqualValues(t1, m.getReadLevel()) - - m.addTask(t2) - s.EqualValues(100, m.getAckLevel()) - s.EqualValues(t2, m.getReadLevel()) - - m.completeTask(t2) - s.EqualValues(100, m.getAckLevel()) - s.EqualValues(t2, m.getReadLevel()) - - m.completeTask(t1) - s.EqualValues(t2, m.getAckLevel()) - s.EqualValues(t2, m.getReadLevel()) - - m.setAckLevel(300) - s.EqualValues(300, m.getAckLevel()) - s.EqualValues(300, m.getReadLevel()) - - m.addTask(t3) - s.EqualValues(300, m.getAckLevel()) - s.EqualValues(t3, m.getReadLevel()) - - m.addTask(t4) - s.EqualValues(300, m.getAckLevel()) - s.EqualValues(t4, m.getReadLevel()) - - m.completeTask(t3) - s.EqualValues(t3, m.getAckLevel()) - s.EqualValues(t4, m.getReadLevel()) - - m.completeTask(t4) - s.EqualValues(t4, m.getAckLevel()) - s.EqualValues(t4, m.getReadLevel()) - - m.setReadLevel(t5) - s.EqualValues(t5, m.getReadLevel()) - - m.setAckLevel(t5) - m.setReadLevelAfterGap(t6) - s.EqualValues(t6, m.getReadLevel()) - s.EqualValues(t6, m.getAckLevel()) -} - -func (s *matchingEngineSuite) TestAckManager_Sort() { - m := newAckManager(s.logger) - const t0 = 100 - m.setAckLevel(t0) - s.EqualValues(t0, m.getAckLevel()) - s.EqualValues(t0, m.getReadLevel()) - const t1 = 200 - const t2 = 220 - const t3 = 320 - const t4 = 340 - const t5 = 360 - - m.addTask(t1) - m.addTask(t2) - m.addTask(t3) - m.addTask(t4) - m.addTask(t5) - - m.completeTask(t2) - s.EqualValues(t0, m.getAckLevel()) - - m.completeTask(t1) - s.EqualValues(t2, m.getAckLevel()) - - m.completeTask(t5) - s.EqualValues(t2, m.getAckLevel()) - - m.completeTask(t4) - s.EqualValues(t2, m.getAckLevel()) - - m.completeTask(t3) - s.EqualValues(t5, m.getAckLevel()) -} - -func (s *matchingEngineSuite) TestPollActivityTaskQueuesEmptyResult() { - s.PollForTasksEmptyResultTest(context.Background(), enumspb.TASK_QUEUE_TYPE_ACTIVITY) -} - -func (s *matchingEngineSuite) TestPollWorkflowTaskQueuesEmptyResult() { - s.PollForTasksEmptyResultTest(context.Background(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) -} - -func (s *matchingEngineSuite) TestPollActivityTaskQueuesEmptyResultWithShortContext() { - shortContextTimeout := returnEmptyTaskTimeBudget + 10*time.Millisecond - callContext, cancel := context.WithTimeout(context.Background(), shortContextTimeout) - defer cancel() - s.PollForTasksEmptyResultTest(callContext, enumspb.TASK_QUEUE_TYPE_ACTIVITY) -} - -func (s *matchingEngineSuite) TestPollWorkflowTaskQueuesEmptyResultWithShortContext() { - shortContextTimeout := returnEmptyTaskTimeBudget + 10*time.Millisecond - callContext, cancel := context.WithTimeout(context.Background(), shortContextTimeout) - defer cancel() - s.PollForTasksEmptyResultTest(callContext, enumspb.TASK_QUEUE_TYPE_WORKFLOW) -} - -func (s *matchingEngineSuite) TestOnlyUnloadMatchingInstance() { - queueID := newTestTaskQueueID( - namespace.ID(uuid.New()), - "makeToast", - enumspb.TASK_QUEUE_TYPE_ACTIVITY) - tqm, err := s.matchingEngine.getTaskQueueManager( - context.Background(), - queueID, - normalStickyInfo, - true) - s.Require().NoError(err) - - tqm2, err := newTaskQueueManager( - s.matchingEngine, - queueID, // same queueID as above - normalStickyInfo, - s.matchingEngine.config, - s.matchingEngine.clusterMeta, - ) - s.Require().NoError(err) - - // try to unload a different tqm instance with the same taskqueue ID - s.matchingEngine.unloadTaskQueue(tqm2) - - got, err := s.matchingEngine.getTaskQueueManager( - context.Background(), queueID, normalStickyInfo, true) - s.Require().NoError(err) - s.Require().Same(tqm, got, - "Unload call with non-matching taskQueueManager should not cause unload") - - // this time unload the right tqm - s.matchingEngine.unloadTaskQueue(tqm) - - got, err = s.matchingEngine.getTaskQueueManager( - context.Background(), queueID, normalStickyInfo, true) - s.Require().NoError(err) - s.Require().NotSame(tqm, got, - "Unload call with matching incarnation should have caused unload") -} - -func (s *matchingEngineSuite) TestPollWorkflowTaskQueues() { - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - stickyTl := "makeStickyToast" - stickyTlKind := enumspb.TASK_QUEUE_KIND_STICKY - identity := "selfDrivingToaster" - - stickyTaskQueue := &taskqueuepb.TaskQueue{Name: stickyTl, Kind: stickyTlKind} - - s.matchingEngine.config.RangeSize = 2 // to test that range is not updated without tasks - s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) - - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowType := &commonpb.WorkflowType{ - Name: "workflow", - } - execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - scheduledEventID := int64(0) - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") - response := &historyservice.RecordWorkflowTaskStartedResponse{ - WorkflowType: workflowType, - PreviousStartedEventId: scheduledEventID, - ScheduledEventId: scheduledEventID + 1, - Attempt: 1, - StickyExecutionEnabled: true, - WorkflowExecutionTaskQueue: &taskqueuepb.TaskQueue{Name: tl, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, - } - return response, nil - }).AnyTimes() - - addRequest := matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: execution, - ScheduledEventId: scheduledEventID, - TaskQueue: stickyTaskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - // fail due to no sticky worker - s.Error(err) - s.ErrorContains(err, "sticky worker unavailable") - // poll the sticky queue, should get no result - resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: stickyTaskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - s.NoError(err) - s.Equal(emptyPollWorkflowTaskQueueResponse, resp) - - // add task to sticky queue again, this time it should pass - _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - s.NoError(err) - - resp, err = s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: stickyTaskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - s.NoError(err) - - expectedResp := &matchingservice.PollWorkflowTaskQueueResponse{ - TaskToken: resp.TaskToken, - WorkflowExecution: execution, - WorkflowType: workflowType, - PreviousStartedEventId: scheduledEventID, - StartedEventId: common.EmptyEventID, - Attempt: 1, - NextEventId: common.EmptyEventID, - BacklogCountHint: 0, - StickyExecutionEnabled: true, - Query: nil, - TransientWorkflowTask: nil, - WorkflowExecutionTaskQueue: &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - BranchToken: nil, - ScheduledTime: nil, - StartedTime: nil, - Queries: nil, - } - - s.Nil(err) - s.Equal(expectedResp, resp) -} - -func (s *matchingEngineSuite) PollForTasksEmptyResultTest(callContext context.Context, taskType enumspb.TaskQueueType) { - s.matchingEngine.config.RangeSize = 2 // to test that range is not updated without tasks - if _, ok := callContext.Deadline(); !ok { - s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) - } - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - identity := "selfDrivingToaster" - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - var taskQueueType enumspb.TaskQueueType - tlID := newTestTaskQueueID(namespaceID, tl, taskType) - const pollCount = 10 - for i := 0; i < pollCount; i++ { - if taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { - pollResp, err := s.matchingEngine.PollActivityTaskQueue(callContext, &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - s.NoError(err) - s.Equal(emptyPollActivityTaskQueueResponse, pollResp) - - taskQueueType = enumspb.TASK_QUEUE_TYPE_ACTIVITY - } else { - resp, err := s.matchingEngine.PollWorkflowTaskQueue(callContext, &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - s.NoError(err) - s.Equal(emptyPollWorkflowTaskQueueResponse, resp) - - taskQueueType = enumspb.TASK_QUEUE_TYPE_WORKFLOW - } - select { - case <-callContext.Done(): - s.FailNow("Call context has expired.") - default: - } - // check the poller information - descResp, err := s.matchingEngine.DescribeTaskQueue(context.Background(), &matchingservice.DescribeTaskQueueRequest{ - NamespaceId: namespaceID.String(), - DescRequest: &workflowservice.DescribeTaskQueueRequest{ - TaskQueue: taskQueue, - TaskQueueType: taskQueueType, - IncludeTaskQueueStatus: false, - }, - }) - s.NoError(err) - s.Equal(1, len(descResp.Pollers)) - s.Equal(identity, descResp.Pollers[0].GetIdentity()) - s.NotEmpty(descResp.Pollers[0].GetLastAccessTime()) - s.Nil(descResp.GetTaskQueueStatus()) - } - s.EqualValues(1, s.taskManager.getTaskQueueManager(tlID).RangeID()) -} - -func (s *matchingEngineSuite) TestPollWorkflowTaskQueues_NamespaceHandover() { - namespaceID := namespace.ID(uuid.New()) - taskQueue := &taskqueuepb.TaskQueue{Name: "taskQueue", Kind: enumspb.TASK_QUEUE_KIND_NORMAL} - - addRequest := matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: &commonpb.WorkflowExecution{WorkflowId: "workflowID", RunId: uuid.NewRandom().String()}, - ScheduledEventId: int64(0), - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - // add multiple workflow tasks, but matching should not keeping polling new tasks - // upon getting namespace handover error when recording start for the first task - _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - s.NoError(err) - _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - s.NoError(err) - - s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil, common.ErrNamespaceHandover).Times(1) - resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: "identity", - }, - }, metrics.NoopMetricsHandler) - s.Nil(resp) - s.Equal(common.ErrNamespaceHandover.Error(), err.Error()) -} - -func (s *matchingEngineSuite) TestPollActivityTaskQueues_NamespaceHandover() { - namespaceID := namespace.ID(uuid.New()) - taskQueue := &taskqueuepb.TaskQueue{Name: "taskQueue", Kind: enumspb.TASK_QUEUE_KIND_NORMAL} - - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: &commonpb.WorkflowExecution{WorkflowId: "workflowID", RunId: uuid.NewRandom().String()}, - ScheduledEventId: int64(5), - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - // add multiple activity tasks, but matching should not keeping polling new tasks - // upon getting namespace handover error when recording start for the first task - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil, common.ErrNamespaceHandover).Times(1) - resp, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: "identity", - }, - }, metrics.NoopMetricsHandler) - s.Nil(resp) - s.Equal(common.ErrNamespaceHandover.Error(), err.Error()) -} - -func (s *matchingEngineSuite) TestPollWorkflowTask_UserDataDisabled() { - s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) - taskQueue := s.T().Name() - - resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: "asdf", - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - Namespace: "asdf", - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueue, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - Identity: "identity", - WorkerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ - BuildId: "some_build_id", - UseVersioning: true, - }, - }, - }, metrics.NoopMetricsHandler) - s.Error(err) - s.Nil(resp) - var failedPrecondition *serviceerror.FailedPrecondition - s.ErrorAs(err, &failedPrecondition) -} - -func (s *matchingEngineSuite) TestAddActivityTasks() { - s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_ACTIVITY, false) -} - -func (s *matchingEngineSuite) TestAddWorkflowTasks() { - s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_WORKFLOW, false) -} - -func (s *matchingEngineSuite) TestAddWorkflowTasksForwarded() { - s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_WORKFLOW, true) -} - -func (s *matchingEngineSuite) AddTasksTest(taskType enumspb.TaskQueueType, isForwarded bool) { - s.matchingEngine.config.RangeSize = 300 // override to low number for the test - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - forwardedFrom := "/_sys/makeToast/1" - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - const taskCount = 111 - - runID := uuid.New() - workflowID := "workflow1" - execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - for i := int64(0); i < taskCount; i++ { - scheduledEventID := i * 3 - var err error - if taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: execution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - if isForwarded { - addRequest.ForwardedSource = forwardedFrom - } - _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - } else { - addRequest := matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: execution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - if isForwarded { - addRequest.ForwardedSource = forwardedFrom - } - _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - } - - switch isForwarded { - case false: - s.NoError(err) - case true: - s.Equal(errRemoteSyncMatchFailed, err) - } - } - - switch isForwarded { - case false: - s.EqualValues(taskCount, s.taskManager.getTaskCount(newTestTaskQueueID(namespaceID, tl, taskType))) - case true: - s.EqualValues(0, s.taskManager.getTaskCount(newTestTaskQueueID(namespaceID, tl, taskType))) - } -} - -func (s *matchingEngineSuite) TestTaskWriterShutdown() { - s.matchingEngine.config.RangeSize = 300 // override to low number for the test - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - runID := uuid.NewRandom().String() - workflowID := "workflow1" - execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - tlm, err := s.matchingEngine.getTaskQueueManager(context.Background(), tlID, normalStickyInfo, true) - s.Nil(err) - - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: execution, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - // stop the task writer explicitly - tlmImpl := tlm.(*taskQueueManagerImpl) - tlmImpl.taskWriter.Stop() - - // now attempt to add a task - scheduledEventID := int64(5) - addRequest.ScheduledEventId = scheduledEventID - _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.Error(err) -} - -func (s *matchingEngineSuite) TestAddThenConsumeActivities() { - s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) - - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const taskCount = 1000 - const initialRangeID = 102 - // TODO: Understand why publish is low when rangeSize is 3 - const rangeSize = 30 - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - for i := int64(0); i < taskCount; i++ { - scheduledEventID := i * 3 - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - } - s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) - - activityTypeName := "activity1" - activityID := "activityId1" - activityType := &commonpb.ActivityType{Name: activityTypeName} - activityInput := payloads.EncodeString("Activity1 Input") - - identity := "nobody" - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") - resp := &historyservice.RecordActivityTaskStartedResponse{ - Attempt: 1, - ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, - &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueue.Name, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ActivityType: activityType, - Input: activityInput, - ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(50 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(10 * time.Second), - }), - } - resp.StartedTime = timestamp.TimeNowPtrUtc() - return resp, nil - }).AnyTimes() - - for i := int64(0); i < taskCount; { - scheduledEventID := i * 3 - - result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - - s.NoError(err) - s.NotNil(result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - s.EqualValues(activityID, result.ActivityId) - s.EqualValues(activityType, result.ActivityType) - s.EqualValues(activityInput, result.Input) - s.EqualValues(workflowExecution, result.WorkflowExecution) - s.Equal(true, validateTimeRange(*result.ScheduledTime, time.Minute)) - s.EqualValues(time.Second*100, *result.ScheduleToCloseTimeout) - s.Equal(true, validateTimeRange(*result.StartedTime, time.Minute)) - s.EqualValues(time.Second*50, *result.StartToCloseTimeout) - s.EqualValues(time.Second*10, *result.HeartbeatTimeout) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - ActivityId: activityID, - ActivityType: activityTypeName, - } - - serializedToken, _ := s.matchingEngine.tokenSerializer.Serialize(taskToken) - s.EqualValues(serializedToken, result.TaskToken) - i++ - } - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - expectedRange := int64(initialRangeID + taskCount/rangeSize) - if taskCount%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) -} - -func (s *matchingEngineSuite) TestSyncMatchActivities() { - // Set a short long poll expiration so we don't have to wait too long for 0 throttling cases - s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) - - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const taskCount = 10 - const initialRangeID = 102 - // TODO: Understand why publish is low when rangeSize is 3 - const rangeSize = 30 - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - // So we can get snapshots - scope := tally.NewTestScope("test", nil) - s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags(metrics.ServiceNameTag(primitives.MatchingService)) - - var err error - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - mgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, s.matchingEngine.config, s.matchingEngine.clusterMeta) - s.NoError(err) - - mgrImpl, ok := mgr.(*taskQueueManagerImpl) - s.True(ok) - - mgrImpl.matcher.config.MinTaskThrottlingBurstSize = func() int { return 0 } - mgrImpl.matcher.rateLimiter = quotas.NewRateLimiter( - defaultTaskDispatchRPS, - defaultTaskDispatchRPS, - ) - mgrImpl.matcher.dynamicRateBurst = &dynamicRateBurstWrapper{ - MutableRateBurst: quotas.NewMutableRateBurst( - defaultTaskDispatchRPS, - defaultTaskDispatchRPS, - ), - RateLimiterImpl: mgrImpl.matcher.rateLimiter.(*quotas.RateLimiterImpl), - } - s.matchingEngine.updateTaskQueue(tlID, mgr) - - mgr.Start() - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - activityTypeName := "activity1" - activityID := "activityId1" - activityType := &commonpb.ActivityType{Name: activityTypeName} - activityInput := payloads.EncodeString("Activity1 Input") - - identity := "nobody" - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") - return &historyservice.RecordActivityTaskStartedResponse{ - Attempt: 1, - ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, - &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueue.Name, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ActivityType: activityType, - Input: activityInput, - ScheduleToStartTimeout: timestamp.DurationPtr(1 * time.Second), - ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), - }), - }, nil - }).AnyTimes() - - pollFunc := func(maxDispatch float64) (*matchingservice.PollActivityTaskQueueResponse, error) { - return s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - TaskQueueMetadata: &taskqueuepb.TaskQueueMetadata{MaxTasksPerSecond: &types.DoubleValue{Value: maxDispatch}}, - }, - }, metrics.NoopMetricsHandler) - } - - for i := int64(0); i < taskCount; i++ { - scheduledEventID := i * 3 - - var wg sync.WaitGroup - var result *matchingservice.PollActivityTaskQueueResponse - var pollErr error - maxDispatch := defaultTaskDispatchRPS - if i == taskCount/2 { - maxDispatch = 0 - } - wg.Add(1) - go func() { - defer wg.Done() - result, pollErr = pollFunc(maxDispatch) - }() - time.Sleep(20 * time.Millisecond) // Necessary for sync match to happen - - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - wg.Wait() - s.NoError(err) - s.NoError(pollErr) - s.NotNil(result) - - if len(result.TaskToken) == 0 { - // when ratelimit is set to zero, poller is expected to return empty result - // reset ratelimit, poll again and make sure task is returned this time - s.logger.Debug("empty poll returned") - s.Equal(float64(0), maxDispatch) - maxDispatch = defaultTaskDispatchRPS - wg.Add(1) - go func() { - defer wg.Done() - result, pollErr = pollFunc(maxDispatch) - }() - wg.Wait() - s.NoError(err) - s.NoError(pollErr) - s.NotNil(result) - s.True(len(result.TaskToken) > 0) - } - - s.EqualValues(activityID, result.ActivityId) - s.EqualValues(activityType, result.ActivityType) - s.EqualValues(activityInput, result.Input) - s.EqualValues(workflowExecution, result.WorkflowExecution) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - ActivityId: activityID, - ActivityType: activityTypeName, - } - - serializedToken, _ := s.matchingEngine.tokenSerializer.Serialize(taskToken) - // s.EqualValues(scheduledEventID, result.Task) - - s.EqualValues(serializedToken, result.TaskToken) - } - - time.Sleep(20 * time.Millisecond) // So any buffer tasks from 0 rps get picked up - snap := scope.Snapshot() - syncCtr := snap.Counters()["test.sync_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,service_name=matching,task_type=Activity,taskqueue=makeToast"] - s.Equal(1, int(syncCtr.Value())) // Check times zero rps is set = throttle counter - s.EqualValues(1, s.taskManager.getCreateTaskCount(tlID)) // Check times zero rps is set = Tasks stored in persistence - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - expectedRange := int64(initialRangeID + taskCount/rangeSize) - if taskCount%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) - - // check the poller information - tlType := enumspb.TASK_QUEUE_TYPE_ACTIVITY - descResp, err := s.matchingEngine.DescribeTaskQueue(context.Background(), &matchingservice.DescribeTaskQueueRequest{ - NamespaceId: namespaceID.String(), - DescRequest: &workflowservice.DescribeTaskQueueRequest{ - TaskQueue: taskQueue, - TaskQueueType: tlType, - IncludeTaskQueueStatus: true, - }, - }) - s.NoError(err) - s.Equal(1, len(descResp.Pollers)) - s.Equal(identity, descResp.Pollers[0].GetIdentity()) - s.NotEmpty(descResp.Pollers[0].GetLastAccessTime()) - s.Equal(defaultTaskDispatchRPS, descResp.Pollers[0].GetRatePerSecond()) - s.NotNil(descResp.GetTaskQueueStatus()) - numPartitions := float64(s.matchingEngine.config.NumTaskqueueWritePartitions("", "", tlType)) - s.True(descResp.GetTaskQueueStatus().GetRatePerSecond()*numPartitions >= (defaultTaskDispatchRPS - 1)) -} - -func (s *matchingEngineSuite) TestConcurrentPublishConsumeActivities() { - dispatchLimitFn := func(int, int64) float64 { - return defaultTaskDispatchRPS - } - const workerCount = 20 - const taskCount = 100 - throttleCt := s.concurrentPublishConsumeActivities(workerCount, taskCount, dispatchLimitFn) - s.Zero(throttleCt) -} - -func (s *matchingEngineSuite) TestConcurrentPublishConsumeActivitiesWithZeroDispatch() { - s.T().Skip("Racy - times out ~50% of the time running locally with --race") - // Set a short long poll expiration so we don't have to wait too long for 0 throttling cases - s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(20 * time.Millisecond) - dispatchLimitFn := func(wc int, tc int64) float64 { - if tc%50 == 0 && wc%5 == 0 { // Gets triggered atleast 20 times - return 0 - } - return defaultTaskDispatchRPS - } - const workerCount = 20 - const taskCount = 100 - throttleCt := s.concurrentPublishConsumeActivities(workerCount, taskCount, dispatchLimitFn) - s.logger.Info("Number of tasks throttled", tag.Number(throttleCt)) - // atleast once from 0 dispatch poll, and until TTL is hit at which time throttle limit is reset - // hard to predict exactly how many times, since the atomic.Value load might not have updated. - s.True(throttleCt >= 1) -} - -func (s *matchingEngineSuite) concurrentPublishConsumeActivities( - workerCount int, - taskCount int64, - dispatchLimitFn func(int, int64) float64, -) int64 { - scope := tally.NewTestScope("test", nil) - s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags(metrics.ServiceNameTag(primitives.MatchingService)) - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const initialRangeID = 0 - const rangeSize = 3 - var scheduledEventID int64 = 123 - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - var err error - mgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, s.matchingEngine.config, s.matchingEngine.clusterMeta) - s.NoError(err) - - mgrImpl := mgr.(*taskQueueManagerImpl) - mgrImpl.matcher.config.MinTaskThrottlingBurstSize = func() int { return 0 } - mgrImpl.matcher.rateLimiter = quotas.NewRateLimiter( - defaultTaskDispatchRPS, - defaultTaskDispatchRPS, - ) - mgrImpl.matcher.dynamicRateBurst = &dynamicRateBurstWrapper{ - MutableRateBurst: quotas.NewMutableRateBurst( - defaultTaskDispatchRPS, - defaultTaskDispatchRPS, - ), - RateLimiterImpl: mgrImpl.matcher.rateLimiter.(*quotas.RateLimiterImpl), - } - s.matchingEngine.updateTaskQueue(tlID, mgr) - mgr.Start() - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - var wg sync.WaitGroup - wg.Add(2 * workerCount) - - for p := 0; p < workerCount; p++ { - go func() { - defer wg.Done() - for i := int64(0); i < taskCount; i++ { - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - if err != nil { - s.logger.Info("Failure in AddActivityTask", tag.Error(err)) - i-- - } - } - }() - } - - activityTypeName := "activity1" - activityID := "activityId1" - activityType := &commonpb.ActivityType{Name: activityTypeName} - activityInput := payloads.EncodeString("Activity1 Input") - activityHeader := &commonpb.Header{ - Fields: map[string]*commonpb.Payload{"tracing": payload.EncodeString("tracing data")}, - } - - identity := "nobody" - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") - return &historyservice.RecordActivityTaskStartedResponse{ - Attempt: 1, - ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, - &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueue.Name, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ActivityType: activityType, - Input: activityInput, - Header: activityHeader, - ScheduleToStartTimeout: timestamp.DurationPtr(1 * time.Second), - ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), - }), - }, nil - }).AnyTimes() - - for p := 0; p < workerCount; p++ { - go func(wNum int) { - defer wg.Done() - for i := int64(0); i < taskCount; { - maxDispatch := dispatchLimitFn(wNum, i) - result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - TaskQueueMetadata: &taskqueuepb.TaskQueueMetadata{MaxTasksPerSecond: &types.DoubleValue{Value: maxDispatch}}, - }, - }, metrics.NoopMetricsHandler) - s.NoError(err) - s.NotNil(result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - s.EqualValues(activityID, result.ActivityId) - s.EqualValues(activityType, result.ActivityType) - s.EqualValues(activityInput, result.Input) - s.EqualValues(activityHeader, result.Header) - s.EqualValues(workflowExecution, result.WorkflowExecution) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - ActivityId: activityID, - ActivityType: activityTypeName, - } - resultToken, err := s.matchingEngine.tokenSerializer.Deserialize(result.TaskToken) - s.NoError(err) - - // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) - // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) - s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) - i++ - } - }(p) - } - wg.Wait() - totalTasks := int(taskCount) * workerCount - persisted := s.taskManager.getCreateTaskCount(tlID) - s.True(persisted < totalTasks) - expectedRange := int64(initialRangeID + persisted/rangeSize) - if persisted%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - - syncCtr := scope.Snapshot().Counters()["test.sync_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,taskqueue=makeToast"] - bufCtr := scope.Snapshot().Counters()["test.buffer_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,taskqueue=makeToast"] - total := int64(0) - if syncCtr != nil { - total += syncCtr.Value() - } - if bufCtr != nil { - total += bufCtr.Value() - } - return total -} - -func (s *matchingEngineSuite) TestConcurrentPublishConsumeWorkflowTasks() { - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const workerCount = 20 - const taskCount = 100 - const initialRangeID = 0 - const rangeSize = 5 - var scheduledEventID int64 = 123 - var startedEventID int64 = 1412 - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - var wg sync.WaitGroup - wg.Add(2 * workerCount) - - for p := 0; p < workerCount; p++ { - go func() { - for i := int64(0); i < taskCount; i++ { - addRequest := matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) - if err != nil { - panic(err) - } - } - wg.Done() - }() - } - workflowTypeName := "workflowType1" - workflowType := &commonpb.WorkflowType{Name: workflowTypeName} - - identity := "nobody" - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") - return &historyservice.RecordWorkflowTaskStartedResponse{ - PreviousStartedEventId: startedEventID, - StartedEventId: startedEventID, - ScheduledEventId: scheduledEventID, - WorkflowType: workflowType, - Attempt: 1, - }, nil - }).AnyTimes() - for p := 0; p < workerCount; p++ { - go func() { - for i := int64(0); i < taskCount; { - result, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - if err != nil { - panic(err) - } - s.NotNil(result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - s.EqualValues(workflowExecution, result.WorkflowExecution) - s.EqualValues(workflowType, result.WorkflowType) - s.EqualValues(startedEventID, result.StartedEventId) - s.EqualValues(workflowExecution, result.WorkflowExecution) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - StartedEventId: startedEventID, - } - resultToken, err := s.matchingEngine.tokenSerializer.Deserialize(result.TaskToken) - if err != nil { - panic(err) - } - - // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) - // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) - s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) - i++ - } - wg.Done() - }() - } - wg.Wait() - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - totalTasks := taskCount * workerCount - persisted := s.taskManager.getCreateTaskCount(tlID) - s.True(persisted < totalTasks) - expectedRange := int64(initialRangeID + persisted/rangeSize) - if persisted%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) -} - -func (s *matchingEngineSuite) TestPollWithExpiredContext() { - identity := "nobody" - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - // Try with cancelled context - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - cancel() - _, err := s.matchingEngine.PollActivityTaskQueue(ctx, &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - - s.Equal(ctx.Err(), err) - - // Try with expired context - ctx, cancel = context.WithTimeout(context.Background(), time.Second) - defer cancel() - resp, err := s.matchingEngine.PollActivityTaskQueue(ctx, &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - s.Nil(err) - s.Equal(emptyPollActivityTaskQueueResponse, resp) -} - -func (s *matchingEngineSuite) TestMultipleEnginesActivitiesRangeStealing() { - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const engineCount = 2 - const taskCount = 400 - const iterations = 2 - const initialRangeID = 0 - const rangeSize = 10 - var scheduledEventID int64 = 123 - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - engines := make([]*matchingEngineImpl, engineCount) - for p := 0; p < engineCount; p++ { - e := s.newMatchingEngine(defaultTestConfig(), s.taskManager) - e.config.RangeSize = rangeSize - engines[p] = e - e.Start() - } - - for j := 0; j < iterations; j++ { - for p := 0; p < engineCount; p++ { - engine := engines[p] - for i := int64(0); i < taskCount; i++ { - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(600), - } - - _, err := engine.AddActivityTask(context.Background(), &addRequest) - if err != nil { - if _, ok := err.(*persistence.ConditionFailedError); ok { - i-- // retry adding - } else { - panic(fmt.Sprintf("errType=%T, err=%v", err, err)) - } - } - } - } - } - - s.EqualValues(iterations*engineCount*taskCount, s.taskManager.getCreateTaskCount(tlID)) - - activityTypeName := "activity1" - activityID := "activityId1" - activityType := &commonpb.ActivityType{Name: activityTypeName} - activityInput := payloads.EncodeString("Activity1 Input") - - identity := "nobody" - - startedTasks := make(map[int64]bool) - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { - if _, ok := startedTasks[taskRequest.TaskId]; ok { - s.logger.Debug("From error function Mock Received DUPLICATED RecordActivityTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) - return nil, serviceerror.NewNotFound("already started") - } - s.logger.Debug("Mock Received RecordActivityTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) - - startedTasks[taskRequest.TaskId] = true - return &historyservice.RecordActivityTaskStartedResponse{ - Attempt: 1, - ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, - &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: taskQueue.Name, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ActivityType: activityType, - Input: activityInput, - ScheduleToStartTimeout: timestamp.DurationPtr(600 * time.Second), - ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), - }), - }, nil - }).AnyTimes() - for j := 0; j < iterations; j++ { - for p := 0; p < engineCount; p++ { - engine := engines[p] - for i := int64(0); i < taskCount; /* incremented explicitly to skip empty polls */ { - result, err := engine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - if err != nil { - panic(err) - } - s.NotNil(result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - s.EqualValues(activityID, result.ActivityId) - s.EqualValues(activityType, result.ActivityType) - s.EqualValues(activityInput, result.Input) - s.EqualValues(workflowExecution, result.WorkflowExecution) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - ActivityId: activityID, - ActivityType: activityTypeName, - } - resultToken, err := engine.tokenSerializer.Deserialize(result.TaskToken) - if err != nil { - panic(err) - } - // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) - // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) - s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) - i++ - } - } - } - - for _, e := range engines { - e.Stop() - } - - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - totalTasks := taskCount * engineCount * iterations - persisted := s.taskManager.getCreateTaskCount(tlID) - // No sync matching as all messages are published first - s.EqualValues(totalTasks, persisted) - expectedRange := int64(initialRangeID + persisted/rangeSize) - if persisted%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) -} - -func (s *matchingEngineSuite) TestMultipleEnginesWorkflowTasksRangeStealing() { - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - const engineCount = 2 - const taskCount = 400 - const iterations = 2 - const initialRangeID = 0 - const rangeSize = 10 - var scheduledEventID int64 = 123 - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_WORKFLOW) - s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID - s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - engines := make([]*matchingEngineImpl, engineCount) - for p := 0; p < engineCount; p++ { - e := s.newMatchingEngine(defaultTestConfig(), s.taskManager) - e.config.RangeSize = rangeSize - engines[p] = e - e.Start() - } - - for j := 0; j < iterations; j++ { - for p := 0; p < engineCount; p++ { - engine := engines[p] - for i := int64(0); i < taskCount; i++ { - addRequest := matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(600), - } - - _, err := engine.AddWorkflowTask(context.Background(), &addRequest) - if err != nil { - if _, ok := err.(*persistence.ConditionFailedError); ok { - i-- // retry adding - } else { - panic(fmt.Sprintf("errType=%T, err=%v", err, err)) - } - } - } - } - } - workflowTypeName := "workflowType1" - workflowType := &commonpb.WorkflowType{Name: workflowTypeName} - - identity := "nobody" - var startedEventID int64 = 1412 - - startedTasks := make(map[int64]bool) - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - if _, ok := startedTasks[taskRequest.TaskId]; ok { - s.logger.Debug("From error function Mock Received DUPLICATED RecordWorkflowTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) - return nil, serviceerrors.NewTaskAlreadyStarted("Workflow") - } - s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) - s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") - startedTasks[taskRequest.TaskId] = true - return &historyservice.RecordWorkflowTaskStartedResponse{ - PreviousStartedEventId: startedEventID, - StartedEventId: startedEventID, - ScheduledEventId: scheduledEventID, - WorkflowType: workflowType, - Attempt: 1, - }, nil - }).AnyTimes() - for j := 0; j < iterations; j++ { - for p := 0; p < engineCount; p++ { - engine := engines[p] - for i := int64(0); i < taskCount; /* incremented explicitly to skip empty polls */ { - result, err := engine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - if err != nil { - panic(err) - } - s.NotNil(result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - s.EqualValues(workflowExecution, result.WorkflowExecution) - s.EqualValues(workflowType, result.WorkflowType) - s.EqualValues(startedEventID, result.StartedEventId) - s.EqualValues(workflowExecution, result.WorkflowExecution) - taskToken := &tokenspb.Task{ - Attempt: 1, - NamespaceId: namespaceID.String(), - WorkflowId: workflowID, - RunId: runID, - ScheduledEventId: scheduledEventID, - StartedEventId: startedEventID, - } - resultToken, err := engine.tokenSerializer.Deserialize(result.TaskToken) - if err != nil { - panic(err) - } - - // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) - // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) - s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) - i++ - } - } - } - - for _, e := range engines { - e.Stop() - } - - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) - totalTasks := taskCount * engineCount * iterations - persisted := s.taskManager.getCreateTaskCount(tlID) - // No sync matching as all messages are published first - s.EqualValues(totalTasks, persisted) - expectedRange := int64(initialRangeID + persisted/rangeSize) - if persisted%rangeSize > 0 { - expectedRange++ - } - // Due to conflicts some ids are skipped and more real ranges are used. - s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) -} - -func (s *matchingEngineSuite) TestAddTaskAfterStartFailure() { - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - scheduledEventID := int64(0) - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - s.EqualValues(1, s.taskManager.getTaskCount(tlID)) - - ctx, err := s.matchingEngine.getTask(context.Background(), tlID, normalStickyInfo, &pollMetadata{}) - s.NoError(err) - - ctx.finish(errors.New("test error")) - s.EqualValues(1, s.taskManager.getTaskCount(tlID)) - ctx2, err := s.matchingEngine.getTask(context.Background(), tlID, normalStickyInfo, &pollMetadata{}) - s.NoError(err) - - s.NotEqual(ctx.event.GetTaskId(), ctx2.event.GetTaskId()) - s.Equal(ctx.event.Data.GetWorkflowId(), ctx2.event.Data.GetWorkflowId()) - s.Equal(ctx.event.Data.GetRunId(), ctx2.event.Data.GetRunId()) - s.Equal(ctx.event.Data.GetScheduledEventId(), ctx2.event.Data.GetScheduledEventId()) - - ctx2.finish(nil) - s.EqualValues(0, s.taskManager.getTaskCount(tlID)) -} - -func (s *matchingEngineSuite) TestTaskQueueManagerGetTaskBatch() { - runID := uuid.NewRandom().String() - workflowID := "workflow1" - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - const taskCount = 1200 - const rangeSize = 10 - s.matchingEngine.config.RangeSize = rangeSize - - // add taskCount tasks - for i := int64(0); i < taskCount; i++ { - scheduledEventID := i * 3 - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - } - - tlMgr, ok := s.matchingEngine.taskQueues[*tlID].(*taskQueueManagerImpl) - s.True(ok, "taskQueueManger doesn't implement taskQueueManager interface") - s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) - - // wait until all tasks are read by the task pump and enqeued into the in-memory buffer - // at the end of this step, ackManager readLevel will also be equal to the buffer size - expectedBufSize := util.Min(cap(tlMgr.taskReader.taskBuffer), taskCount) - s.True(s.awaitCondition(func() bool { return len(tlMgr.taskReader.taskBuffer) == expectedBufSize }, time.Second)) - - // stop all goroutines that read / write tasks in the background - // remainder of this test works with the in-memory buffer - tlMgr.Stop() - - // setReadLevel should NEVER be called without updating ackManager.outstandingTasks - // This is only for unit test purpose - tlMgr.taskAckManager.setReadLevel(tlMgr.taskWriter.GetMaxReadLevel()) - tasks, readLevel, isReadBatchDone, err := tlMgr.taskReader.getTaskBatch(context.Background()) - s.Nil(err) - s.EqualValues(0, len(tasks)) - s.EqualValues(tlMgr.taskWriter.GetMaxReadLevel(), readLevel) - s.True(isReadBatchDone) - - tlMgr.taskAckManager.setReadLevel(0) - tasks, readLevel, isReadBatchDone, err = tlMgr.taskReader.getTaskBatch(context.Background()) - s.Nil(err) - s.EqualValues(rangeSize, len(tasks)) - s.EqualValues(rangeSize, readLevel) - s.True(isReadBatchDone) - - s.setupRecordActivityTaskStartedMock(tl) - - // reset the ackManager readLevel to the buffer size and consume - // the in-memory tasks by calling Poll API - assert ackMgr state - // at the end - tlMgr.taskAckManager.setReadLevel(int64(expectedBufSize)) - - // complete rangeSize events - for i := int64(0); i < rangeSize; i++ { - identity := "nobody" - result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{ - TaskQueue: taskQueue, - Identity: identity, - }, - }, metrics.NoopMetricsHandler) - - s.NoError(err) - s.NotNil(result) - s.NotEqual(emptyPollActivityTaskQueueResponse, result) - if len(result.TaskToken) == 0 { - s.logger.Debug("empty poll returned") - continue - } - } - s.EqualValues(taskCount-rangeSize, s.taskManager.getTaskCount(tlID)) - tasks, _, isReadBatchDone, err = tlMgr.taskReader.getTaskBatch(context.Background()) - s.Nil(err) - s.True(0 < len(tasks) && len(tasks) <= rangeSize) - s.True(isReadBatchDone) -} - -func (s *matchingEngineSuite) TestTaskQueueManagerGetTaskBatch_ReadBatchDone() { - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - - const rangeSize = 10 - const maxReadLevel = int64(120) - config := defaultTestConfig() - config.RangeSize = rangeSize - tlMgr0, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, config, s.matchingEngine.clusterMeta) - s.NoError(err) - - tlMgr, ok := tlMgr0.(*taskQueueManagerImpl) - s.True(ok) - - tlMgr.Start() - - // tlMgr.taskWriter startup is async so give it time to complete, otherwise - // the following few lines get clobbered as part of the taskWriter.Start() - time.Sleep(100 * time.Millisecond) - - tlMgr.taskAckManager.setReadLevel(0) - atomic.StoreInt64(&tlMgr.taskWriter.maxReadLevel, maxReadLevel) - tasks, readLevel, isReadBatchDone, err := tlMgr.taskReader.getTaskBatch(context.Background()) - s.Empty(tasks) - s.Equal(int64(rangeSize*10), readLevel) - s.False(isReadBatchDone) - s.NoError(err) - - tlMgr.taskAckManager.setReadLevel(readLevel) - tasks, readLevel, isReadBatchDone, err = tlMgr.taskReader.getTaskBatch(context.Background()) - s.Empty(tasks) - s.Equal(maxReadLevel, readLevel) - s.True(isReadBatchDone) - s.NoError(err) -} - -func (s *matchingEngineSuite) TestTaskQueueManager_CyclingBehavior() { - namespaceID := namespace.ID(uuid.New()) - tl := "makeToast" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - config := defaultTestConfig() - - for i := 0; i < 4; i++ { - prevGetTasksCount := s.taskManager.getGetTasksCount(tlID) - - tlMgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, config, s.matchingEngine.clusterMeta) - s.NoError(err) - - tlMgr.Start() - // tlMgr.taskWriter startup is async so give it time to complete - time.Sleep(100 * time.Millisecond) - tlMgr.Stop() - - getTasksCount := s.taskManager.getGetTasksCount(tlID) - prevGetTasksCount - s.LessOrEqual(getTasksCount, 1) - } -} - -func (s *matchingEngineSuite) TestTaskExpiryAndCompletion() { - runID := uuid.NewRandom().String() - workflowID := uuid.New() - workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} - - namespaceID := namespace.ID(uuid.New()) - tl := "task-expiry-completion-tl0" - tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) - - taskQueue := &taskqueuepb.TaskQueue{ - Name: tl, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - - const taskCount = 20 // must be multiple of 4 - const rangeSize = 10 - s.matchingEngine.config.RangeSize = rangeSize - s.matchingEngine.config.MaxTaskDeleteBatchSize = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(2) - - testCases := []struct { - maxTimeBtwnDeletes time.Duration - }{ - {time.Minute}, // test taskGC deleting due to size threshold - {time.Nanosecond}, // test taskGC deleting due to time condition - } - - for _, tc := range testCases { - for i := int64(0); i < taskCount; i++ { - scheduledEventID := i * 3 - addRequest := matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceID.String(), - Execution: workflowExecution, - ScheduledEventId: scheduledEventID, - TaskQueue: taskQueue, - ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), - } - switch i % 4 { - case 0: - // simulates creating a task whose scheduledToStartTimeout is already expired - addRequest.ScheduleToStartTimeout = timestamp.DurationFromSeconds(-5) - case 2: - // simulates creating a task which will time out in the buffer - addRequest.ScheduleToStartTimeout = timestamp.DurationPtr(250 * time.Millisecond) - } - _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) - s.NoError(err) - } - - tlMgr, ok := s.matchingEngine.taskQueues[*tlID].(*taskQueueManagerImpl) - s.True(ok, "failed to load task queue") - s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) - - // wait until all tasks are loaded by into in-memory buffers by task queue manager - // the buffer size should be one less than expected because dispatcher will dequeue the head - // 1/4 should be thrown out because they are expired before they hit the buffer - s.True(s.awaitCondition(func() bool { return len(tlMgr.taskReader.taskBuffer) >= (3*taskCount/4 - 1) }, time.Second)) - - // ensure the 1/4 of tasks with small ScheduleToStartTimeout will be expired when they come out of the buffer - time.Sleep(300 * time.Millisecond) - - maxTimeBetweenTaskDeletes = tc.maxTimeBtwnDeletes - - s.setupRecordActivityTaskStartedMock(tl) - - pollReq := &matchingservice.PollActivityTaskQueueRequest{ - NamespaceId: namespaceID.String(), - PollRequest: &workflowservice.PollActivityTaskQueueRequest{TaskQueue: taskQueue, Identity: "test"}, - } - - remaining := taskCount - for i := 0; i < 2; i++ { - // verify that (1) expired tasks are not returned in poll result (2) taskCleaner deletes tasks correctly - for i := int64(0); i < taskCount/4; i++ { - result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), pollReq, metrics.NoopMetricsHandler) - s.NoError(err) - s.NotNil(result) - s.NotEqual(result, emptyPollActivityTaskQueueResponse) - } - remaining -= taskCount / 2 - // since every other task is expired, we expect half the tasks to be deleted - // after poll consumed 1/4th of what is available. - // however, the gc is best-effort and might not run exactly when we want it to. - // various thread interleavings between the two task reader threads and this one - // might leave the gc behind by up to 3 tasks, or ahead by up to 1. - delta := remaining - s.taskManager.getTaskCount(tlID) - s.Truef(-3 <= delta && delta <= 1, "remaining %d, getTaskCount %d", remaining, s.taskManager.getTaskCount(tlID)) - } - // ensure full gc for the next case (twice in case one doesn't get the gc lock) - tlMgr.taskGC.RunNow(context.Background(), tlMgr.taskAckManager.getAckLevel()) - tlMgr.taskGC.RunNow(context.Background(), tlMgr.taskAckManager.getAckLevel()) - } -} - -func (s *matchingEngineSuite) TestGetVersioningData() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - // Ensure we can fetch without first needing to set anything - res, err := s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - MaxSets: 0, - }, - }) - s.NoError(err) - s.NotNil(res) - - // Set a long list of versions - for i := 0; i < 10; i++ { - id := fmt.Sprintf("%d", i) - res, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ - ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ - Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ - AddNewBuildIdInNewDefaultSet: id, - }, - }, - }, - }, - }) - s.NoError(err) - s.NotNil(res) - } - // Make a long compat-versions chain - for i := 0; i < 80; i++ { - id := fmt.Sprintf("9.%d", i) - prevCompat := fmt.Sprintf("9.%d", i-1) - if i == 0 { - prevCompat = "9" - } - res, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ - ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ - Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleBuildId{ - AddNewCompatibleBuildId: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleVersion{ - NewBuildId: id, - ExistingCompatibleBuildId: prevCompat, - MakeSetDefault: false, - }, - }, - }, - }, - }, - }) - s.NoError(err) - s.NotNil(res) - } - - // Ensure they all exist - res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - MaxSets: 0, - }, - }) - s.NoError(err) - majorSets := res.GetResponse().GetMajorVersionSets() - curDefault := majorSets[len(majorSets)-1] - s.NotNil(curDefault) - s.Equal("9", curDefault.GetBuildIds()[0]) - lastNode := curDefault.GetBuildIds()[len(curDefault.GetBuildIds())-1] - s.Equal("9.79", lastNode) - s.Equal("0", majorSets[0].GetBuildIds()[0]) - - // Ensure depth limiting works - res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - MaxSets: 1, - }, - }) - s.NoError(err) - majorSets = res.GetResponse().GetMajorVersionSets() - curDefault = majorSets[len(majorSets)-1] - s.Equal("9", curDefault.GetBuildIds()[0]) - lastNode = curDefault.GetBuildIds()[len(curDefault.GetBuildIds())-1] - s.Equal("9.79", lastNode) - s.Equal(1, len(majorSets)) - - res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - MaxSets: 5, - }, - }) - s.NoError(err) - majorSets = res.GetResponse().GetMajorVersionSets() - s.Equal("5", majorSets[0].GetBuildIds()[0]) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_NoData() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - }) - s.NoError(err) - s.False(res.TaskQueueHasUserData) - s.Nil(res.UserData.GetData()) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_ReturnsData() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - userData := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, - } - s.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: tq, - UserData: userData, - }) - userData.Version++ - - res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - }) - s.NoError(err) - s.True(res.TaskQueueHasUserData) - s.Equal(res.UserData, userData) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_ReturnsEmpty() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - userData := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, - } - s.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: tq, - UserData: userData, - }) - userData.Version++ - - res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: userData.Version, - }) - s.NoError(err) - s.True(res.TaskQueueHasUserData) - s.Nil(res.UserData.GetData()) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_Expires() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - userData := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, - } - s.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: tq, - UserData: userData, - }) - userData.Version++ - - // GetTaskQueueUserData will try to return 5s with a min of 1s before the deadline, so this will block 1s - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - start := time.Now() - res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: userData.Version, - WaitNewData: true, - }) - s.NoError(err) - s.True(res.TaskQueueHasUserData) - s.Nil(res.UserData.GetData()) - elapsed := time.Since(start) - s.Greater(elapsed, 900*time.Millisecond) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_WakesUp_FromNothing() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - go func() { - time.Sleep(500 * time.Millisecond) - - _, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ - ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ - Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ - AddNewBuildIdInNewDefaultSet: "v1", - }, - }, - }, - }, - }) - s.NoError(err) - }() - - res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, // must be zero to start - WaitNewData: true, - }) - s.NoError(err) - s.True(res.TaskQueueHasUserData) - s.NotNil(res.UserData.Data.VersioningData) -} - -func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_WakesUp_From2to3() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - userData := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, - } - s.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: tq, - UserData: userData, - }) - userData.Version++ - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - go func() { - time.Sleep(500 * time.Millisecond) - - _, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ - ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ - Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ - Namespace: namespaceID.String(), - TaskQueue: tq, - Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ - AddNewBuildIdInNewDefaultSet: "v1", - }, - }, - }, - }, - }) - s.NoError(err) - }() - - res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: userData.Version, - WaitNewData: true, - }) - s.NoError(err) - s.True(res.TaskQueueHasUserData) - s.True(hybrid_logical_clock.Greater(*res.UserData.Data.Clock, *userData.Data.Clock)) - s.NotNil(res.UserData.Data.VersioningData) -} - -func (s *matchingEngineSuite) TestUpdateUserData_FailsOnKnownVersionMismatch() { - namespaceID := namespace.ID(uuid.New()) - tq := "tupac" - - userData := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, - } - err := s.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: namespaceID.String(), - TaskQueue: tq, - UserData: userData, - }) - s.NoError(err) - - _, err = s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ - NamespaceId: namespaceID.String(), - TaskQueue: tq, - Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_{ - RemoveBuildIds: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds{ - KnownUserDataVersion: 1, - }, - }, - }) - var failedPreconditionError *serviceerror.FailedPrecondition - s.ErrorAs(err, &failedPreconditionError) -} - -func (s *matchingEngineSuite) TestAddWorkflowTask_ForVersionedWorkflows_SilentlyDroppedWhenDisablingLoadingUserData() { - namespaceId := uuid.New() - tq := taskqueuepb.TaskQueue{ - Name: "test", - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) - - _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &matchingservice.AddWorkflowTaskRequest{ - NamespaceId: namespaceId, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "test", - RunId: uuid.New(), - }, - TaskQueue: &tq, - ScheduledEventId: 7, - Source: enums.TASK_SOURCE_HISTORY, - VersionDirective: &taskqueue.TaskVersionDirective{ - Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, - }, - }) - s.Require().NoError(err) -} - -func (s *matchingEngineSuite) TestAddActivityTask_ForVersionedWorkflows_SilentlyDroppedWhenDisablingLoadingUserData() { - namespaceId := uuid.New() - tq := taskqueuepb.TaskQueue{ - Name: "test", - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - } - s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) - - _, err := s.matchingEngine.AddActivityTask(context.Background(), &matchingservice.AddActivityTaskRequest{ - NamespaceId: namespaceId, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: "test", - RunId: uuid.New(), - }, - TaskQueue: &tq, - ScheduledEventId: 7, - Source: enums.TASK_SOURCE_HISTORY, - VersionDirective: &taskqueue.TaskVersionDirective{ - Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, - }, - }) - s.Require().NoError(err) -} - -func (s *matchingEngineSuite) setupRecordActivityTaskStartedMock(tlName string) { - activityTypeName := "activity1" - activityID := "activityId1" - activityType := &commonpb.ActivityType{Name: activityTypeName} - activityInput := payloads.EncodeString("Activity1 Input") - - // History service is using mock - s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { - s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") - return &historyservice.RecordActivityTaskStartedResponse{ - Attempt: 1, - ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, - &commandpb.ScheduleActivityTaskCommandAttributes{ - ActivityId: activityID, - TaskQueue: &taskqueuepb.TaskQueue{ - Name: tlName, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - }, - ActivityType: activityType, - Input: activityInput, - ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), - ScheduleToStartTimeout: timestamp.DurationPtr(50 * time.Second), - StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), - HeartbeatTimeout: timestamp.DurationPtr(10 * time.Second), - }), - }, nil - }).AnyTimes() -} - -func (s *matchingEngineSuite) awaitCondition(cond func() bool, timeout time.Duration) bool { - expiry := time.Now().UTC().Add(timeout) - for !cond() { - time.Sleep(time.Millisecond * 5) - if time.Now().UTC().After(expiry) { - return false - } - } - return true -} - -func newActivityTaskScheduledEvent(eventID int64, workflowTaskCompletedEventID int64, - scheduleAttributes *commandpb.ScheduleActivityTaskCommandAttributes, -) *historypb.HistoryEvent { - historyEvent := newHistoryEvent(eventID, enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED) - historyEvent.Attributes = &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ - ActivityId: scheduleAttributes.ActivityId, - ActivityType: scheduleAttributes.ActivityType, - TaskQueue: scheduleAttributes.TaskQueue, - Input: scheduleAttributes.Input, - Header: scheduleAttributes.Header, - ScheduleToCloseTimeout: scheduleAttributes.ScheduleToCloseTimeout, - ScheduleToStartTimeout: scheduleAttributes.ScheduleToStartTimeout, - StartToCloseTimeout: scheduleAttributes.StartToCloseTimeout, - HeartbeatTimeout: scheduleAttributes.HeartbeatTimeout, - WorkflowTaskCompletedEventId: workflowTaskCompletedEventID, - }} - return historyEvent -} - -func newHistoryEvent(eventID int64, eventType enumspb.EventType) *historypb.HistoryEvent { - historyEvent := &historypb.HistoryEvent{ - EventId: eventID, - EventTime: timestamp.TimePtr(time.Now().UTC()), - EventType: eventType, - } - - return historyEvent -} - -var _ persistence.TaskManager = (*testTaskManager)(nil) // Asserts that interface is indeed implemented - -type testTaskManager struct { - sync.Mutex - taskQueues map[taskQueueID]*testTaskQueueManager - logger log.Logger -} - -func newTestTaskManager(logger log.Logger) *testTaskManager { - return &testTaskManager{taskQueues: make(map[taskQueueID]*testTaskQueueManager), logger: logger} -} - -func (m *testTaskManager) GetName() string { - return "test" -} - -func (m *testTaskManager) Close() { -} - -func (m *testTaskManager) getTaskQueueManager(id *taskQueueID) *testTaskQueueManager { - m.Lock() - defer m.Unlock() - result, ok := m.taskQueues[*id] - if ok { - return result - } - result = newTestTaskQueueManager() - m.taskQueues[*id] = result - return result -} - -type testTaskQueueManager struct { - sync.Mutex - rangeID int64 - ackLevel int64 - createTaskCount int - getTasksCount int - getUserDataCount int - updateCount int - tasks *treemap.Map - userData *persistencespb.VersionedTaskQueueUserData -} - -func (m *testTaskQueueManager) RangeID() int64 { - m.Lock() - defer m.Unlock() - return m.rangeID -} - -func Int64Comparator(a, b interface{}) int { - aAsserted := a.(int64) - bAsserted := b.(int64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -func newTestTaskQueueManager() *testTaskQueueManager { - return &testTaskQueueManager{tasks: treemap.NewWith(Int64Comparator)} -} - -func newTestTaskQueueID(namespaceID namespace.ID, name string, taskType enumspb.TaskQueueType) *taskQueueID { - result, err := newTaskQueueID(namespaceID, name, taskType) - if err != nil { - panic(fmt.Sprintf("newTaskQueueID failed with error %v", err)) - } - return result -} - -func (m *testTaskManager) CreateTaskQueue( - _ context.Context, - request *persistence.CreateTaskQueueRequest, -) (*persistence.CreateTaskQueueResponse, error) { - tli := request.TaskQueueInfo - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.GetNamespaceId()), tli.Name, tli.TaskType)) - tlm.Lock() - defer tlm.Unlock() - - if tlm.rangeID != 0 { - return nil, &persistence.ConditionFailedError{ - Msg: fmt.Sprintf("Failed to create task queue: name=%v, type=%v", tli.Name, tli.TaskType), - } - } - - tlm.rangeID = request.RangeID - tlm.ackLevel = tli.AckLevel - return &persistence.CreateTaskQueueResponse{}, nil -} - -// UpdateTaskQueue provides a mock function with given fields: request -func (m *testTaskManager) UpdateTaskQueue( - _ context.Context, - request *persistence.UpdateTaskQueueRequest, -) (*persistence.UpdateTaskQueueResponse, error) { - tli := request.TaskQueueInfo - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.GetNamespaceId()), tli.Name, tli.TaskType)) - tlm.Lock() - defer tlm.Unlock() - tlm.updateCount++ - - if tlm.rangeID != request.PrevRangeID { - return nil, &persistence.ConditionFailedError{ - Msg: fmt.Sprintf("Failed to update task queue: name=%v, type=%v", tli.Name, tli.TaskType), - } - } - tlm.ackLevel = tli.AckLevel - tlm.rangeID = request.RangeID - return &persistence.UpdateTaskQueueResponse{}, nil -} - -func (m *testTaskManager) GetTaskQueue( - _ context.Context, - request *persistence.GetTaskQueueRequest, -) (*persistence.GetTaskQueueResponse, error) { - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, request.TaskType)) - tlm.Lock() - defer tlm.Unlock() - - if tlm.rangeID == 0 { - return nil, serviceerror.NewNotFound("task queue not found") - } - return &persistence.GetTaskQueueResponse{ - TaskQueueInfo: &persistencespb.TaskQueueInfo{ - NamespaceId: request.NamespaceID, - Name: request.TaskQueue, - TaskType: request.TaskType, - Kind: enumspb.TASK_QUEUE_KIND_NORMAL, - AckLevel: tlm.ackLevel, - ExpiryTime: nil, - LastUpdateTime: timestamp.TimeNowPtrUtc(), - }, - RangeID: tlm.rangeID, - }, nil -} - -// CompleteTask provides a mock function with given fields: request -func (m *testTaskManager) CompleteTask( - _ context.Context, - request *persistence.CompleteTaskRequest, -) error { - m.logger.Debug("CompleteTask", tag.TaskID(request.TaskID), tag.Name(request.TaskQueue.TaskQueueName), tag.WorkflowTaskQueueType(request.TaskQueue.TaskQueueType)) - if request.TaskID <= 0 { - panic(fmt.Errorf("invalid taskID=%v", request.TaskID)) - } - - tli := request.TaskQueue - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.NamespaceID), tli.TaskQueueName, tli.TaskQueueType)) - - tlm.Lock() - defer tlm.Unlock() - - tlm.tasks.Remove(request.TaskID) - return nil -} - -func (m *testTaskManager) CompleteTasksLessThan( - _ context.Context, - request *persistence.CompleteTasksLessThanRequest, -) (int, error) { - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueueName, request.TaskType)) - tlm.Lock() - defer tlm.Unlock() - keys := tlm.tasks.Keys() - for _, key := range keys { - id := key.(int64) - if id < request.ExclusiveMaxTaskID { - tlm.tasks.Remove(id) - } - } - return persistence.UnknownNumRowsAffected, nil -} - -func (m *testTaskManager) ListTaskQueue( - _ context.Context, - _ *persistence.ListTaskQueueRequest, -) (*persistence.ListTaskQueueResponse, error) { - return nil, fmt.Errorf("unsupported operation") -} - -func (m *testTaskManager) DeleteTaskQueue( - _ context.Context, - request *persistence.DeleteTaskQueueRequest, -) error { - m.Lock() - defer m.Unlock() - key := newTestTaskQueueID(namespace.ID(request.TaskQueue.NamespaceID), request.TaskQueue.TaskQueueName, request.TaskQueue.TaskQueueType) - delete(m.taskQueues, *key) - return nil -} - -// CreateTask provides a mock function with given fields: request -func (m *testTaskManager) CreateTasks( - _ context.Context, - request *persistence.CreateTasksRequest, -) (*persistence.CreateTasksResponse, error) { - namespaceID := namespace.ID(request.TaskQueueInfo.Data.GetNamespaceId()) - taskQueue := request.TaskQueueInfo.Data.Name - taskType := request.TaskQueueInfo.Data.TaskType - rangeID := request.TaskQueueInfo.RangeID - - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespaceID, taskQueue, taskType)) - tlm.Lock() - defer tlm.Unlock() - - // First validate the entire batch - for _, task := range request.Tasks { - m.logger.Debug("testTaskManager.CreateTask", tag.TaskID(task.GetTaskId()), tag.ShardRangeID(rangeID)) - if task.GetTaskId() <= 0 { - panic(fmt.Errorf("invalid taskID=%v", task.GetTaskId())) - } - - if tlm.rangeID != rangeID { - m.logger.Debug("testTaskManager.CreateTask ConditionFailedError", - tag.TaskID(task.GetTaskId()), tag.ShardRangeID(rangeID), tag.ShardRangeID(tlm.rangeID)) - - return nil, &persistence.ConditionFailedError{ - Msg: fmt.Sprintf("testTaskManager.CreateTask failed. TaskQueue: %v, taskQueueType: %v, rangeID: %v, db rangeID: %v", - taskQueue, taskType, rangeID, tlm.rangeID), - } - } - _, ok := tlm.tasks.Get(task.GetTaskId()) - if ok { - panic(fmt.Sprintf("Duplicated TaskID %v", task.GetTaskId())) - } - } - - // Then insert all tasks if no errors - for _, task := range request.Tasks { - tlm.tasks.Put(task.GetTaskId(), &persistencespb.AllocatedTaskInfo{ - Data: task.Data, - TaskId: task.GetTaskId(), - }) - tlm.createTaskCount++ - } - - return &persistence.CreateTasksResponse{}, nil -} - -// GetTasks provides a mock function with given fields: request -func (m *testTaskManager) GetTasks( - _ context.Context, - request *persistence.GetTasksRequest, -) (*persistence.GetTasksResponse, error) { - m.logger.Debug("testTaskManager.GetTasks", tag.MinLevel(request.InclusiveMinTaskID), tag.MaxLevel(request.ExclusiveMaxTaskID)) - - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, request.TaskType)) - tlm.Lock() - defer tlm.Unlock() - var tasks []*persistencespb.AllocatedTaskInfo - - it := tlm.tasks.Iterator() - for it.Next() { - taskID := it.Key().(int64) - if taskID < request.InclusiveMinTaskID { - continue - } - if taskID >= request.ExclusiveMaxTaskID { - break - } - tasks = append(tasks, it.Value().(*persistencespb.AllocatedTaskInfo)) - } - tlm.getTasksCount++ - return &persistence.GetTasksResponse{ - Tasks: tasks, - }, nil -} - -// getTaskCount returns number of tasks in a task queue -func (m *testTaskManager) getTaskCount(taskQueue *taskQueueID) int { - tlm := m.getTaskQueueManager(taskQueue) - tlm.Lock() - defer tlm.Unlock() - return tlm.tasks.Size() -} - -// getCreateTaskCount returns how many times CreateTask was called -func (m *testTaskManager) getCreateTaskCount(taskQueue *taskQueueID) int { - tlm := m.getTaskQueueManager(taskQueue) - tlm.Lock() - defer tlm.Unlock() - return tlm.createTaskCount -} - -// getGetTasksCount returns how many times GetTasks was called -func (m *testTaskManager) getGetTasksCount(taskQueue *taskQueueID) int { - tlm := m.getTaskQueueManager(taskQueue) - tlm.Lock() - defer tlm.Unlock() - return tlm.getTasksCount -} - -// getGetUserDataCount returns how many times GetUserData was called -func (m *testTaskManager) getGetUserDataCount(taskQueue *taskQueueID) int { - tlm := m.getTaskQueueManager(taskQueue) - tlm.Lock() - defer tlm.Unlock() - return tlm.getUserDataCount -} - -// getUpdateCount returns how many times UpdateTaskQueue was called -func (m *testTaskManager) getUpdateCount(taskQueue *taskQueueID) int { - tlm := m.getTaskQueueManager(taskQueue) - tlm.Lock() - defer tlm.Unlock() - return tlm.updateCount -} - -func (m *testTaskManager) String() string { - m.Lock() - defer m.Unlock() - var result string - for id, tl := range m.taskQueues { - tl.Lock() - if id.taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { - result += "Activity" - } else { - result += "Workflow" - } - result += " task queue " + id.FullName() - result += "\n" - result += fmt.Sprintf("AckLevel=%v\n", tl.ackLevel) - result += fmt.Sprintf("CreateTaskCount=%v\n", tl.createTaskCount) - result += fmt.Sprintf("RangeID=%v\n", tl.rangeID) - result += "Tasks=\n" - for _, t := range tl.tasks.Values() { - result += fmt.Sprintf("%v\n", t) - } - tl.Unlock() - } - return result -} - -// GetTaskQueueData implements persistence.TaskManager -func (m *testTaskManager) GetTaskQueueUserData(ctx context.Context, request *persistence.GetTaskQueueUserDataRequest) (*persistence.GetTaskQueueUserDataResponse, error) { - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, enumspb.TASK_QUEUE_TYPE_WORKFLOW)) - tlm.Lock() - defer tlm.Unlock() - tlm.getUserDataCount++ - return &persistence.GetTaskQueueUserDataResponse{ - UserData: tlm.userData, - }, nil -} - -// UpdateTaskQueueUserData implements persistence.TaskManager -func (m *testTaskManager) UpdateTaskQueueUserData(ctx context.Context, request *persistence.UpdateTaskQueueUserDataRequest) error { - tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, enumspb.TASK_QUEUE_TYPE_WORKFLOW)) - tlm.Lock() - defer tlm.Unlock() - newData := *request.UserData - newData.Version++ - tlm.userData = &newData - return nil -} - -// ListTaskQueueUserDataEntries implements persistence.TaskManager -func (*testTaskManager) ListTaskQueueUserDataEntries(ctx context.Context, request *persistence.ListTaskQueueUserDataEntriesRequest) (*persistence.ListTaskQueueUserDataEntriesResponse, error) { - // No need to implement this for unit tests - panic("unimplemented") -} - -// GetTaskQueuesByBuildId implements persistence.TaskManager -func (*testTaskManager) GetTaskQueuesByBuildId(ctx context.Context, request *persistence.GetTaskQueuesByBuildIdRequest) ([]string, error) { - // No need to implement this for unit tests - panic("unimplemented") -} - -// CountTaskQueuesByBuildId implements persistence.TaskManager -func (*testTaskManager) CountTaskQueuesByBuildId(ctx context.Context, request *persistence.CountTaskQueuesByBuildIdRequest) (int, error) { - // This is only used to validate that the build id to task queue mapping is enforced (at the time of writing), report 0. - return 0, nil -} - -func validateTimeRange(t time.Time, expectedDuration time.Duration) bool { - currentTime := time.Now().UTC() - diff := time.Duration(currentTime.UnixNano() - t.UnixNano()) - if diff > expectedDuration { - fmt.Printf("Current time: %v, Application time: %v, Difference: %v \n", currentTime, t, diff) - return false - } - return true -} - -func defaultTestConfig() *Config { - config := NewConfig(dynamicconfig.NewNoopCollection(), false, false) - config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(100 * time.Millisecond) - config.MaxTaskDeleteBatchSize = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(1) - return config -} - -type ( - dynamicRateBurstWrapper struct { - quotas.MutableRateBurst - *quotas.RateLimiterImpl - } -) - -func (d *dynamicRateBurstWrapper) SetRate(rate float64) { - d.MutableRateBurst.SetRate(rate) - d.RateLimiterImpl.SetRate(rate) -} - -func (d *dynamicRateBurstWrapper) SetBurst(burst int) { - d.MutableRateBurst.SetBurst(burst) - d.RateLimiterImpl.SetBurst(burst) -} - -func (d *dynamicRateBurstWrapper) Rate() float64 { - return d.RateLimiterImpl.Rate() -} - -func (d *dynamicRateBurstWrapper) Burst() int { - return d.RateLimiterImpl.Burst() -} diff -Nru temporal-1.21.5-1/src/service/matching/matching_engine.go temporal-1.22.5/src/service/matching/matching_engine.go --- temporal-1.21.5-1/src/service/matching/matching_engine.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matching_engine.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1519 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "bytes" + "context" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/pborman/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + replicationspb "go.temporal.io/server/api/replication/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/resource" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/tasktoken" + "go.temporal.io/server/common/worker_versioning" +) + +const ( + // If sticky poller is not seem in last 10s, we treat it as sticky worker unavailable + // This seems aggressive, but the default sticky schedule_to_start timeout is 5s, so 10s seems reasonable. + stickyPollerUnavailableWindow = 10 * time.Second + + recordTaskStartedDefaultTimeout = 10 * time.Second + recordTaskStartedSyncMatchTimeout = 1 * time.Second +) + +type ( + pollerIDCtxKey string + identityCtxKey string + + // lockableQueryTaskMap maps query TaskID (which is a UUID generated in QueryWorkflow() call) to a channel + // that QueryWorkflow() will block on. The channel is unblocked either by worker sending response through + // RespondQueryTaskCompleted() or through an internal service error causing temporal to be unable to dispatch + // query task to workflow worker. + lockableQueryTaskMap struct { + sync.RWMutex + queryTaskMap map[string]chan *queryResult + } + + lockablePollMap struct { + sync.Mutex + polls map[string]context.CancelFunc + } + + taskQueueCounterKey struct { + namespaceID namespace.ID + taskType enumspb.TaskQueueType + kind enumspb.TaskQueueKind + versioned bool + } + + pollMetadata struct { + ratePerSecond *float64 + workerVersionCapabilities *commonpb.WorkerVersionCapabilities + } + + namespaceUpdateLocks struct { + updateLock sync.Mutex + replicationLock sync.Mutex + } + + // Implements matching.Engine + matchingEngineImpl struct { + status int32 + taskManager persistence.TaskManager + historyClient resource.HistoryClient + matchingRawClient resource.MatchingRawClient + tokenSerializer common.TaskTokenSerializer + logger log.Logger + throttledLogger log.ThrottledLogger + namespaceRegistry namespace.Registry + keyResolver membership.ServiceResolver + clusterMeta cluster.Metadata + timeSource clock.TimeSource + visibilityManager manager.VisibilityManager + metricsHandler metrics.Handler + taskQueuesLock sync.RWMutex // locks mutation of taskQueues + taskQueues map[taskQueueID]taskQueueManager + taskQueueCountLock sync.Mutex + taskQueueCount map[taskQueueCounterKey]int // per-namespace task queue counter + config *Config + lockableQueryTaskMap lockableQueryTaskMap + // pollMap is needed to keep track of all outstanding pollers for a particular + // taskqueue. PollerID generated by frontend is used as the key and CancelFunc is the + // value. This is used to cancel the context to unblock any outstanding poller when + // the frontend detects client connection is closed to prevent tasks being dispatched + // to zombie pollers. + pollMap lockablePollMap + // Only set if global namespaces are enabled on the cluster. + namespaceReplicationQueue persistence.NamespaceReplicationQueue + // Disables concurrent task queue user data updates and replication requests (due to a cassandra limitation) + namespaceUpdateLockMap map[string]*namespaceUpdateLocks + // Serializes access to the per namespace lock map + namespaceUpdateLockMapLock sync.Mutex + } +) + +var ( + // EmptyPollWorkflowTaskQueueResponse is the response when there are no workflow tasks to hand out + emptyPollWorkflowTaskQueueResponse = &matchingservice.PollWorkflowTaskQueueResponse{} + // EmptyPollActivityTaskQueueResponse is the response when there are no activity tasks to hand out + emptyPollActivityTaskQueueResponse = &matchingservice.PollActivityTaskQueueResponse{} + + errNoTasks = errors.New("no tasks") + + pollerIDKey pollerIDCtxKey = "pollerID" + identityKey identityCtxKey = "identity" +) + +var _ Engine = (*matchingEngineImpl)(nil) // Asserts that interface is indeed implemented + +// NewEngine creates an instance of matching engine +func NewEngine( + taskManager persistence.TaskManager, + historyClient resource.HistoryClient, + matchingRawClient resource.MatchingRawClient, + config *Config, + logger log.Logger, + throttledLogger log.ThrottledLogger, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, + resolver membership.ServiceResolver, + clusterMeta cluster.Metadata, + namespaceReplicationQueue persistence.NamespaceReplicationQueue, + visibilityManager manager.VisibilityManager, +) Engine { + + return &matchingEngineImpl{ + status: common.DaemonStatusInitialized, + taskManager: taskManager, + historyClient: historyClient, + matchingRawClient: matchingRawClient, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + logger: log.With(logger, tag.ComponentMatchingEngine), + throttledLogger: log.With(throttledLogger, tag.ComponentMatchingEngine), + namespaceRegistry: namespaceRegistry, + keyResolver: resolver, + clusterMeta: clusterMeta, + timeSource: clock.NewRealTimeSource(), // No need to mock this at the moment + visibilityManager: visibilityManager, + metricsHandler: metricsHandler.WithTags(metrics.OperationTag(metrics.MatchingEngineScope)), + taskQueues: make(map[taskQueueID]taskQueueManager), + taskQueueCount: make(map[taskQueueCounterKey]int), + config: config, + lockableQueryTaskMap: lockableQueryTaskMap{queryTaskMap: make(map[string]chan *queryResult)}, + pollMap: lockablePollMap{polls: make(map[string]context.CancelFunc)}, + namespaceReplicationQueue: namespaceReplicationQueue, + namespaceUpdateLockMap: make(map[string]*namespaceUpdateLocks), + } +} + +func (e *matchingEngineImpl) Start() { + if !atomic.CompareAndSwapInt32( + &e.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + return + } +} + +func (e *matchingEngineImpl) Stop() { + if !atomic.CompareAndSwapInt32( + &e.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + return + } + + for _, l := range e.getTaskQueues(math.MaxInt32) { + l.Stop() + } +} + +func (e *matchingEngineImpl) getTaskQueues(maxCount int) (lists []taskQueueManager) { + e.taskQueuesLock.RLock() + defer e.taskQueuesLock.RUnlock() + lists = make([]taskQueueManager, 0, len(e.taskQueues)) + count := 0 + for _, tlMgr := range e.taskQueues { + lists = append(lists, tlMgr) + count++ + if count >= maxCount { + break + } + } + return +} + +func (e *matchingEngineImpl) String() string { + // Executes taskQueue.String() on each task queue outside of lock + buf := new(bytes.Buffer) + for _, l := range e.getTaskQueues(1000) { + fmt.Fprintf(buf, "\n%s", l.String()) + } + return buf.String() +} + +// Returns taskQueueManager for a task queue. If not already cached, and create is true, tries +// to get new range from DB and create one. This blocks (up to the context deadline) for the +// task queue to be initialized. +// +// Note that stickyInfo is not used as part of the task queue identity. That means that if +// getTaskQueueManager is called twice with the same taskQueue but different stickyInfo, the +// properties of the taskQueueManager will depend on which call came first. In general, we can +// rely on kind being the same for all calls now, but normalName was a later addition to the +// protocol and is not always set consistently. normalName is only required when using +// versioning, and SDKs that support versioning will always set it. The current server version +// will also set it when adding tasks from history. So that particular inconsistency is okay. +func (e *matchingEngineImpl) getTaskQueueManager( + ctx context.Context, + taskQueue *taskQueueID, + stickyInfo stickyInfo, + create bool, +) (taskQueueManager, error) { + tqm, err := e.getTaskQueueManagerNoWait(taskQueue, stickyInfo, create) + if err != nil || tqm == nil { + return nil, err + } + if err = tqm.WaitUntilInitialized(ctx); err != nil { + return nil, err + } + return tqm, nil +} + +// Returns taskQueueManager for a task queue. If not already cached, and create is true, tries +// to get new range from DB and create one. This does not block for the task queue to be +// initialized. +func (e *matchingEngineImpl) getTaskQueueManagerNoWait( + taskQueue *taskQueueID, + stickyInfo stickyInfo, + create bool, +) (taskQueueManager, error) { + e.taskQueuesLock.RLock() + tqm, ok := e.taskQueues[*taskQueue] + e.taskQueuesLock.RUnlock() + if !ok { + if !create { + return nil, nil + } + + // If it gets here, write lock and check again in case a task queue is created between the two locks + e.taskQueuesLock.Lock() + tqm, ok = e.taskQueues[*taskQueue] + if !ok { + var err error + tqm, err = newTaskQueueManager(e, taskQueue, stickyInfo, e.config) + if err != nil { + e.taskQueuesLock.Unlock() + return nil, err + } + e.taskQueues[*taskQueue] = tqm + } + e.taskQueuesLock.Unlock() + + if !ok { + tqm.Start() + e.updateTaskQueueGauge(tqm, 1) + } + } + return tqm, nil +} + +// For use in tests +func (e *matchingEngineImpl) updateTaskQueue(taskQueue *taskQueueID, mgr taskQueueManager) { + e.taskQueuesLock.Lock() + defer e.taskQueuesLock.Unlock() + e.taskQueues[*taskQueue] = mgr +} + +// AddWorkflowTask either delivers task directly to waiting poller or save it into task queue persistence. +func (e *matchingEngineImpl) AddWorkflowTask( + ctx context.Context, + addRequest *matchingservice.AddWorkflowTaskRequest, +) (bool, error) { + namespaceID := namespace.ID(addRequest.GetNamespaceId()) + taskQueueName := addRequest.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(addRequest.TaskQueue) + + origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return false, err + } + + sticky := stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY + // do not load sticky task queue if it is not already loaded, which means it has no poller. + baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, !sticky) + if err != nil { + return false, err + } else if sticky && !stickyWorkerAvailable(baseTqm) { + return false, serviceerrors.NewStickyWorkerUnavailable() + } + + // We don't need the userDataChanged channel here because: + // - if we sync match or sticky worker unavailable, we're done + // - if we spool to db, we'll re-resolve when it comes out of the db + tqm, _, err := baseTqm.RedirectToVersionedQueueForAdd(ctx, addRequest.VersionDirective) + if err != nil { + return false, err + } + + // This needs to move to history see - https://go.temporal.io/server/issues/181 + var expirationTime *time.Time + now := timestamp.TimePtr(time.Now().UTC()) + expirationDuration := timestamp.DurationValue(addRequest.GetScheduleToStartTimeout()) + if expirationDuration != 0 { + expirationTime = timestamp.TimePtr(now.Add(expirationDuration)) + } + taskInfo := &persistencespb.TaskInfo{ + NamespaceId: namespaceID.String(), + RunId: addRequest.Execution.GetRunId(), + WorkflowId: addRequest.Execution.GetWorkflowId(), + ScheduledEventId: addRequest.GetScheduledEventId(), + Clock: addRequest.GetClock(), + ExpiryTime: expirationTime, + CreateTime: now, + VersionDirective: addRequest.VersionDirective, + } + + return tqm.AddTask(ctx, addTaskParams{ + execution: addRequest.Execution, + taskInfo: taskInfo, + source: addRequest.GetSource(), + forwardedFrom: addRequest.GetForwardedSource(), + baseTqm: baseTqm, + }) +} + +// AddActivityTask either delivers task directly to waiting poller or save it into task queue persistence. +func (e *matchingEngineImpl) AddActivityTask( + ctx context.Context, + addRequest *matchingservice.AddActivityTaskRequest, +) (bool, error) { + namespaceID := namespace.ID(addRequest.GetNamespaceId()) + taskQueueName := addRequest.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(addRequest.TaskQueue) + + origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + if err != nil { + return false, err + } + + baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, true) + if err != nil { + return false, err + } + // We don't need the userDataChanged channel here because: + // - if we sync match, we're done + // - if we spool to db, we'll re-resolve when it comes out of the db + tqm, _, err := baseTqm.RedirectToVersionedQueueForAdd(ctx, addRequest.VersionDirective) + if err != nil { + return false, err + } + + var expirationTime *time.Time + now := timestamp.TimePtr(time.Now().UTC()) + expirationDuration := timestamp.DurationValue(addRequest.GetScheduleToStartTimeout()) + if expirationDuration != 0 { + expirationTime = timestamp.TimePtr(now.Add(expirationDuration)) + } + taskInfo := &persistencespb.TaskInfo{ + NamespaceId: namespaceID.String(), + RunId: addRequest.Execution.GetRunId(), + WorkflowId: addRequest.Execution.GetWorkflowId(), + ScheduledEventId: addRequest.GetScheduledEventId(), + Clock: addRequest.GetClock(), + CreateTime: now, + ExpiryTime: expirationTime, + VersionDirective: addRequest.VersionDirective, + } + + return tqm.AddTask(ctx, addTaskParams{ + execution: addRequest.Execution, + taskInfo: taskInfo, + source: addRequest.GetSource(), + forwardedFrom: addRequest.GetForwardedSource(), + baseTqm: baseTqm, + }) +} + +func (e *matchingEngineImpl) DispatchSpooledTask( + ctx context.Context, + task *internalTask, + origTaskQueue *taskQueueID, + stickyInfo stickyInfo, +) error { + taskInfo := task.event.GetData() + // This task came from taskReader so task.event is always set here. + directive := taskInfo.GetVersionDirective() + // If this came from a versioned queue, ignore the version and re-resolve, in case we're + // going to the default and the default changed. + unversionedOrigTaskQueue := newTaskQueueIDWithVersionSet(origTaskQueue, "") + // Redirect and re-resolve if we're blocked in matcher and user data changes. + for { + // If normal queue: always load the base tqm to get versioning data. + // If sticky queue: sticky is not versioned, so if we got here (by taskReader calling this), + // the queue is already loaded. + // So we can always use true here. + baseTqm, err := e.getTaskQueueManager(ctx, unversionedOrigTaskQueue, stickyInfo, true) + if err != nil { + return err + } + tqm, userDataChanged, err := baseTqm.RedirectToVersionedQueueForAdd(ctx, directive) + if err != nil { + return err + } + err = tqm.DispatchSpooledTask(ctx, task, userDataChanged) + if err != errInterrupted { // nolint:goerr113 + return err + } + } +} + +// PollWorkflowTaskQueue tries to get the workflow task using exponential backoff. +func (e *matchingEngineImpl) PollWorkflowTaskQueue( + ctx context.Context, + req *matchingservice.PollWorkflowTaskQueueRequest, + opMetrics metrics.Handler, +) (*matchingservice.PollWorkflowTaskQueueResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + pollerID := req.GetPollerId() + request := req.PollRequest + taskQueueName := request.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(request.TaskQueue) + e.logger.Debug("Received PollWorkflowTaskQueue for taskQueue", tag.WorkflowTaskQueueName(taskQueueName)) +pollLoop: + for { + err := common.IsValidContext(ctx) + if err != nil { + return nil, err + } + // Add frontend generated pollerID to context so taskqueueMgr can support cancellation of + // long-poll when frontend calls CancelOutstandingPoll API + pollerCtx := context.WithValue(ctx, pollerIDKey, pollerID) + pollerCtx = context.WithValue(pollerCtx, identityKey, request.GetIdentity()) + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + pollMetadata := &pollMetadata{ + workerVersionCapabilities: request.WorkerVersionCapabilities, + } + task, err := e.getTask(pollerCtx, taskQueue, stickyInfo, pollMetadata) + if err != nil { + if err == errNoTasks { + return emptyPollWorkflowTaskQueueResponse, nil + } + return nil, err + } + + e.emitForwardedSourceStats(opMetrics, task.isForwarded(), req.GetForwardedSource()) + + if task.isStarted() { + // tasks received from remote are already started. So, simply forward the response + return task.pollWorkflowTaskQueueResponse(), nil + } + + if task.isQuery() { + task.finish(nil) // this only means query task sync match succeed. + + // for query task, we don't need to update history to record workflow task started. but we need to know + // the NextEventID so front end knows what are the history events to load for this workflow task. + mutableStateResp, err := e.historyClient.GetMutableState(ctx, &historyservice.GetMutableStateRequest{ + NamespaceId: req.GetNamespaceId(), + Execution: task.workflowExecution(), + }) + if err != nil { + // will notify query client that the query task failed + _ = e.deliverQueryResult(task.query.taskID, &queryResult{internalError: err}) + return emptyPollWorkflowTaskQueueResponse, nil + } + + // A non-sticky poll may get task for a workflow that has sticky still set in its mutable state after + // their sticky worker is dead for longer than 10s. In such case, we should set this to false so that + // frontend returns full history. + isStickyEnabled := taskQueueName == mutableStateResp.StickyTaskQueue.GetName() + resp := &historyservice.RecordWorkflowTaskStartedResponse{ + PreviousStartedEventId: mutableStateResp.PreviousStartedEventId, + NextEventId: mutableStateResp.NextEventId, + WorkflowType: mutableStateResp.WorkflowType, + StickyExecutionEnabled: isStickyEnabled, + WorkflowExecutionTaskQueue: mutableStateResp.TaskQueue, + BranchToken: mutableStateResp.CurrentBranchToken, + StartedEventId: common.EmptyEventID, + Attempt: 1, + } + return e.createPollWorkflowTaskQueueResponse(task, resp, opMetrics), nil + } + + resp, err := e.recordWorkflowTaskStarted(ctx, request, task) + if err != nil { + switch err.(type) { + case *serviceerror.NotFound: // mutable state not found, workflow not running or workflow task not found + e.logger.Info("Workflow task not found", + tag.WorkflowTaskQueueName(taskQueueName), + tag.WorkflowNamespaceID(task.event.Data.GetNamespaceId()), + tag.WorkflowID(task.event.Data.GetWorkflowId()), + tag.WorkflowRunID(task.event.Data.GetRunId()), + tag.WorkflowTaskQueueName(taskQueueName), + tag.TaskID(task.event.GetTaskId()), + tag.TaskVisibilityTimestamp(timestamp.TimeValue(task.event.Data.GetCreateTime())), + tag.WorkflowEventID(task.event.Data.GetScheduledEventId()), + tag.Error(err), + ) + task.finish(nil) + case *serviceerrors.TaskAlreadyStarted: + e.logger.Debug("Duplicated workflow task", tag.WorkflowTaskQueueName(taskQueueName), tag.TaskID(task.event.GetTaskId())) + task.finish(nil) + default: + task.finish(err) + if err.Error() == common.ErrNamespaceHandover.Error() { + // do not keep polling new tasks when namespace is in handover state + // as record start request will be rejected by history service + return nil, err + } + } + + continue pollLoop + } + task.finish(nil) + return e.createPollWorkflowTaskQueueResponse(task, resp, opMetrics), nil + } +} + +// PollActivityTaskQueue takes one task from the task manager, update workflow execution history, mark task as +// completed and return it to user. If a task from task manager is already started, return an empty response, without +// error. Timeouts handled by the timer queue. +func (e *matchingEngineImpl) PollActivityTaskQueue( + ctx context.Context, + req *matchingservice.PollActivityTaskQueueRequest, + opMetrics metrics.Handler, +) (*matchingservice.PollActivityTaskQueueResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + pollerID := req.GetPollerId() + request := req.PollRequest + taskQueueName := request.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(request.TaskQueue) + e.logger.Debug("Received PollActivityTaskQueue for taskQueue", tag.Name(taskQueueName)) +pollLoop: + for { + err := common.IsValidContext(ctx) + if err != nil { + return nil, err + } + + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + if err != nil { + return nil, err + } + + // Add frontend generated pollerID to context so taskqueueMgr can support cancellation of + // long-poll when frontend calls CancelOutstandingPoll API + pollerCtx := context.WithValue(ctx, pollerIDKey, pollerID) + pollerCtx = context.WithValue(pollerCtx, identityKey, request.GetIdentity()) + pollMetadata := &pollMetadata{ + workerVersionCapabilities: request.WorkerVersionCapabilities, + } + if request.TaskQueueMetadata != nil && request.TaskQueueMetadata.MaxTasksPerSecond != nil { + pollMetadata.ratePerSecond = &request.TaskQueueMetadata.MaxTasksPerSecond.Value + } + task, err := e.getTask(pollerCtx, taskQueue, stickyInfo, pollMetadata) + if err != nil { + if err == errNoTasks { + return emptyPollActivityTaskQueueResponse, nil + } + return nil, err + } + + e.emitForwardedSourceStats(opMetrics, task.isForwarded(), req.GetForwardedSource()) + + if task.isStarted() { + // tasks received from remote are already started. So, simply forward the response + return task.pollActivityTaskQueueResponse(), nil + } + + resp, err := e.recordActivityTaskStarted(ctx, request, task) + if err != nil { + switch err.(type) { + case *serviceerror.NotFound: // mutable state not found, workflow not running or activity info not found + e.logger.Info("Activity task not found", + tag.WorkflowNamespaceID(task.event.Data.GetNamespaceId()), + tag.WorkflowID(task.event.Data.GetWorkflowId()), + tag.WorkflowRunID(task.event.Data.GetRunId()), + tag.WorkflowTaskQueueName(taskQueueName), + tag.TaskID(task.event.GetTaskId()), + tag.TaskVisibilityTimestamp(timestamp.TimeValue(task.event.Data.GetCreateTime())), + tag.WorkflowEventID(task.event.Data.GetScheduledEventId()), + tag.Error(err), + ) + task.finish(nil) + case *serviceerrors.TaskAlreadyStarted: + e.logger.Debug("Duplicated activity task", tag.WorkflowTaskQueueName(taskQueueName), tag.TaskID(task.event.GetTaskId())) + task.finish(nil) + default: + task.finish(err) + if err.Error() == common.ErrNamespaceHandover.Error() { + // do not keep polling new tasks when namespace is in handover state + // as record start request will be rejected by history service + return nil, err + } + } + + continue pollLoop + } + task.finish(nil) + return e.createPollActivityTaskQueueResponse(task, resp, opMetrics), nil + } +} + +type queryResult struct { + workerResponse *matchingservice.RespondQueryTaskCompletedRequest + internalError error +} + +// QueryWorkflow creates a WorkflowTask with query data, send it through sync match channel, wait for that WorkflowTask +// to be processed by worker, and then return the query result. +func (e *matchingEngineImpl) QueryWorkflow( + ctx context.Context, + queryRequest *matchingservice.QueryWorkflowRequest, +) (*matchingservice.QueryWorkflowResponse, error) { + namespaceID := namespace.ID(queryRequest.GetNamespaceId()) + taskQueueName := queryRequest.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(queryRequest.TaskQueue) + + origTaskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + sticky := stickyInfo.kind == enumspb.TASK_QUEUE_KIND_STICKY + // do not load sticky task queue if it is not already loaded, which means it has no poller. + baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, !sticky) + if err != nil { + return nil, err + } else if sticky && !stickyWorkerAvailable(baseTqm) { + return nil, serviceerrors.NewStickyWorkerUnavailable() + } + + // We don't need the userDataChanged channel here because we either do this sync (local or remote) + // or fail with a relatively short timeout. + tqm, _, err := baseTqm.RedirectToVersionedQueueForAdd(ctx, queryRequest.VersionDirective) + if err != nil { + return nil, err + } else if tqm.QueueID().VersionSet() == dlqVersionSet { + return nil, serviceerror.NewFailedPrecondition("Operations on versioned workflows are disabled") + } + + taskID := uuid.New() + resp, err := tqm.DispatchQueryTask(ctx, taskID, queryRequest) + + // if we get a response or error it means that query task was handled by forwarding to another matching host + // this remote host's result can be returned directly + if resp != nil || err != nil { + return resp, err + } + + // if we get here it means that dispatch of query task has occurred locally + // must wait on result channel to get query result + queryResultCh := make(chan *queryResult, 1) + e.lockableQueryTaskMap.put(taskID, queryResultCh) + defer e.lockableQueryTaskMap.delete(taskID) + + select { + case result := <-queryResultCh: + if result.internalError != nil { + return nil, result.internalError + } + + workerResponse := result.workerResponse + switch workerResponse.GetCompletedRequest().GetCompletedType() { + case enumspb.QUERY_RESULT_TYPE_ANSWERED: + return &matchingservice.QueryWorkflowResponse{QueryResult: workerResponse.GetCompletedRequest().GetQueryResult()}, nil + case enumspb.QUERY_RESULT_TYPE_FAILED: + return nil, serviceerror.NewQueryFailed(workerResponse.GetCompletedRequest().GetErrorMessage()) + default: + return nil, serviceerror.NewInternal("unknown query completed type") + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (e *matchingEngineImpl) RespondQueryTaskCompleted( + _ context.Context, + request *matchingservice.RespondQueryTaskCompletedRequest, + opMetrics metrics.Handler, +) error { + if err := e.deliverQueryResult(request.GetTaskId(), &queryResult{workerResponse: request}); err != nil { + opMetrics.Counter(metrics.RespondQueryTaskFailedPerTaskQueueCounter.GetMetricName()).Record(1) + return err + } + return nil +} + +func (e *matchingEngineImpl) deliverQueryResult(taskID string, queryResult *queryResult) error { + queryResultCh, ok := e.lockableQueryTaskMap.get(taskID) + if !ok { + return serviceerror.NewNotFound("query task not found, or already expired") + } + queryResultCh <- queryResult + return nil +} + +func (e *matchingEngineImpl) CancelOutstandingPoll( + _ context.Context, + request *matchingservice.CancelOutstandingPollRequest, +) error { + e.pollMap.cancel(request.PollerId) + return nil +} + +func (e *matchingEngineImpl) DescribeTaskQueue( + ctx context.Context, + request *matchingservice.DescribeTaskQueueRequest, +) (*matchingservice.DescribeTaskQueueResponse, error) { + namespaceID := namespace.ID(request.GetNamespaceId()) + taskQueueType := request.DescRequest.GetTaskQueueType() + taskQueueName := request.DescRequest.TaskQueue.GetName() + stickyInfo := stickyInfoFromTaskQueue(request.DescRequest.TaskQueue) + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, taskQueueType) + if err != nil { + return nil, err + } + tlMgr, err := e.getTaskQueueManager(ctx, taskQueue, stickyInfo, true) + if err != nil { + return nil, err + } + + return tlMgr.DescribeTaskQueue(request.DescRequest.GetIncludeTaskQueueStatus()), nil +} + +func (e *matchingEngineImpl) ListTaskQueuePartitions( + _ context.Context, + request *matchingservice.ListTaskQueuePartitionsRequest, +) (*matchingservice.ListTaskQueuePartitionsResponse, error) { + activityTaskQueueInfo, err := e.listTaskQueuePartitions(request, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + if err != nil { + return nil, err + } + workflowTaskQueueInfo, err := e.listTaskQueuePartitions(request, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + resp := matchingservice.ListTaskQueuePartitionsResponse{ + ActivityTaskQueuePartitions: activityTaskQueueInfo, + WorkflowTaskQueuePartitions: workflowTaskQueueInfo, + } + return &resp, nil +} + +func (e *matchingEngineImpl) listTaskQueuePartitions(request *matchingservice.ListTaskQueuePartitionsRequest, taskQueueType enumspb.TaskQueueType) ([]*taskqueuepb.TaskQueuePartitionMetadata, error) { + partitions, err := e.getAllPartitions( + namespace.Name(request.GetNamespace()), + *request.TaskQueue, + taskQueueType, + ) + + if err != nil { + return nil, err + } + + partitionHostInfo := make([]*taskqueuepb.TaskQueuePartitionMetadata, len(partitions)) + for i, partition := range partitions { + host, err := e.getHostInfo(partition) + if err != nil { + return nil, err + } + + partitionHostInfo[i] = &taskqueuepb.TaskQueuePartitionMetadata{ + Key: partition, + OwnerHostName: host, + } + } + + return partitionHostInfo, nil +} + +func (e *matchingEngineImpl) UpdateWorkerBuildIdCompatibility( + ctx context.Context, + req *matchingservice.UpdateWorkerBuildIdCompatibilityRequest, +) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + ns, err := e.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return nil, err + } + taskQueueName := req.GetTaskQueue() + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) + if err != nil { + return nil, err + } + updateOptions := UserDataUpdateOptions{} + operationCreatedTombstones := false + switch req.GetOperation().(type) { + case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_: + // Only apply the limit when request is initiated by a user. + updateOptions.TaskQueueLimitPerBuildId = e.config.TaskQueueLimitPerBuildId(ns.Name().String()) + case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_: + updateOptions.KnownVersion = req.GetRemoveBuildIds().GetKnownUserDataVersion() + } + + err = tqMgr.UpdateUserData(ctx, updateOptions, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { + clk := data.GetClock() + if clk == nil { + tmp := hlc.Zero(e.clusterMeta.GetClusterID()) + clk = &tmp + } + updatedClock := hlc.Next(*clk, e.timeSource) + var versioningData *persistencespb.VersioningData + switch req.GetOperation().(type) { + case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_: + var err error + versioningData, err = UpdateVersionSets( + updatedClock, + data.GetVersioningData(), + req.GetApplyPublicRequest().GetRequest(), + e.config.VersionCompatibleSetLimitPerQueue(ns.Name().String()), + e.config.VersionBuildIdLimitPerQueue(ns.Name().String()), + ) + if err != nil { + return nil, false, err + } + case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_: + versioningData = RemoveBuildIds( + updatedClock, + data.GetVersioningData(), + req.GetRemoveBuildIds().GetBuildIds(), + ) + if ns.ReplicationPolicy() == namespace.ReplicationPolicyMultiCluster { + operationCreatedTombstones = true + } else { + // We don't need to keep the tombstones around if we're not replicating them. + versioningData = ClearTombstones(versioningData) + } + case *matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId: + versioningData = PersistUnknownBuildId( + updatedClock, + data.GetVersioningData(), + req.GetPersistUnknownBuildId(), + ) + default: + return nil, false, serviceerror.NewInvalidArgument(fmt.Sprintf("invalid operation: %v", req.GetOperation())) + } + // Avoid mutation + ret := *data + ret.Clock = &updatedClock + ret.VersioningData = versioningData + return &ret, true, nil + }) + if err != nil { + return nil, err + } + + // Only clear tombstones after they have been replicated. + if operationCreatedTombstones { + err = tqMgr.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { + updatedClock := hlc.Next(*data.GetClock(), e.timeSource) + // Avoid mutation + ret := *data + ret.Clock = &updatedClock + ret.VersioningData = ClearTombstones(data.VersioningData) + return &ret, false, nil // Do not replicate the deletion of tombstones + }) + if err != nil { + return nil, err + } + } + return &matchingservice.UpdateWorkerBuildIdCompatibilityResponse{}, nil +} + +func (e *matchingEngineImpl) GetWorkerBuildIdCompatibility( + ctx context.Context, + req *matchingservice.GetWorkerBuildIdCompatibilityRequest, +) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + taskQueueName := req.GetRequest().GetTaskQueue() + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) + if err != nil { + if _, ok := err.(*serviceerror.NotFound); ok { + return &matchingservice.GetWorkerBuildIdCompatibilityResponse{}, nil + } + return nil, err + } + userData, _, err := tqMgr.GetUserData() + if err != nil { + return nil, err + } + return &matchingservice.GetWorkerBuildIdCompatibilityResponse{ + Response: ToBuildIdOrderingResponse(userData.GetData().GetVersioningData(), int(req.GetRequest().GetMaxSets())), + }, nil +} + +func (e *matchingEngineImpl) GetTaskQueueUserData( + ctx context.Context, + req *matchingservice.GetTaskQueueUserDataRequest, +) (*matchingservice.GetTaskQueueUserDataResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + taskQueue, err := newTaskQueueID(namespaceID, req.GetTaskQueue(), req.GetTaskQueueType()) + if err != nil { + return nil, err + } + tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) + if err != nil { + return nil, err + } + version := req.GetLastKnownUserDataVersion() + if version < 0 { + return nil, serviceerror.NewInvalidArgument("last_known_user_data_version must not be negative") + } + + if req.WaitNewData { + var cancel context.CancelFunc + ctx, cancel = newChildContext(ctx, e.config.GetUserDataLongPollTimeout(), returnEmptyTaskTimeBudget) + defer cancel() + } + + for { + resp := &matchingservice.GetTaskQueueUserDataResponse{} + userData, userDataChanged, err := tqMgr.GetUserData() + if err != nil { + return nil, err + } + if req.WaitNewData && userData.GetVersion() == version { + // long-poll: wait for data to change/appear + select { + case <-ctx.Done(): + resp.TaskQueueHasUserData = userData != nil + return resp, nil + case <-userDataChanged: + continue + } + } + if userData != nil { + resp.TaskQueueHasUserData = true + if userData.Version > version { + resp.UserData = userData + } else if userData.Version < version { + // This is highly unlikely but may happen due to an edge case in during ownership transfer. + // We rely on client retries in this case to let the system eventually self-heal. + return nil, serviceerror.NewInvalidArgument( + "requested task queue user data for version greater than known version") + } + } + return resp, nil + } +} + +func (e *matchingEngineImpl) ApplyTaskQueueUserDataReplicationEvent( + ctx context.Context, + req *matchingservice.ApplyTaskQueueUserDataReplicationEventRequest, +) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + ns, err := e.namespaceRegistry.GetNamespaceByID(namespaceID) + if err != nil { + return nil, err + } + taskQueueName := req.GetTaskQueue() + taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + tqMgr, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, true) + if err != nil { + return nil, err + } + updateOptions := UserDataUpdateOptions{ + // Avoid setting a limit to allow the replication event to always be applied + TaskQueueLimitPerBuildId: 0, + } + err = tqMgr.UpdateUserData(ctx, updateOptions, func(current *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { + mergedUserData := *current + _, buildIdsRemoved := GetBuildIdDeltas(current.GetVersioningData(), req.GetUserData().GetVersioningData()) + var buildIdsToRevive []string + for _, buildId := range buildIdsRemoved { + // We accept that the user data is locked for updates while running these visibility queries. + // Nothing else is _supposed_ to update it on follower (standby) clusters. + exists, err := worker_versioning.WorkflowsExistForBuildId(ctx, e.visibilityManager, ns, req.TaskQueue, buildId) + if err != nil { + return nil, false, err + } + if exists { + buildIdsToRevive = append(buildIdsToRevive, buildId) + } + } + mergedData := MergeVersioningData(current.GetVersioningData(), req.GetUserData().GetVersioningData()) + + for _, buildId := range buildIdsToRevive { + setIdx, buildIdIdx := worker_versioning.FindBuildId(mergedData, buildId) + if setIdx == -1 { + continue + } + set := mergedData.VersionSets[setIdx] + set.BuildIds[buildIdIdx] = e.reviveBuildId(ns, req.GetTaskQueue(), set.GetBuildIds()[buildIdIdx]) + mergedUserData.Clock = hlc.Ptr(hlc.Max(*mergedUserData.Clock, *set.BuildIds[buildIdIdx].StateUpdateTimestamp)) + + setDefault := set.BuildIds[len(set.BuildIds)-1] + if setDefault.State == persistencespb.STATE_DELETED { + // We merged an update which removed (at least) two build ids: the default for set x and another one for set + // x. We discovered we're still using the other one, so we revive it. now we also have to revive the default + // for set x, or it will be left with the wrong default. + set.BuildIds[len(set.BuildIds)-1] = e.reviveBuildId(ns, req.GetTaskQueue(), setDefault) + mergedUserData.Clock = hlc.Ptr(hlc.Max(*mergedUserData.Clock, *setDefault.StateUpdateTimestamp)) + } + } + + // No need to keep the tombstones around after replication. + mergedUserData.VersioningData = ClearTombstones(mergedData) + return &mergedUserData, len(buildIdsToRevive) > 0, nil + }) + return &matchingservice.ApplyTaskQueueUserDataReplicationEventResponse{}, err +} + +func (e *matchingEngineImpl) GetBuildIdTaskQueueMapping( + ctx context.Context, + req *matchingservice.GetBuildIdTaskQueueMappingRequest, +) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) { + taskQueues, err := e.taskManager.GetTaskQueuesByBuildId(ctx, &persistence.GetTaskQueuesByBuildIdRequest{ + NamespaceID: req.NamespaceId, + BuildID: req.BuildId, + }) + if err != nil { + return nil, err + } + return &matchingservice.GetBuildIdTaskQueueMappingResponse{TaskQueues: taskQueues}, nil +} + +func (e *matchingEngineImpl) ForceUnloadTaskQueue( + ctx context.Context, + req *matchingservice.ForceUnloadTaskQueueRequest, +) (*matchingservice.ForceUnloadTaskQueueResponse, error) { + namespaceID := namespace.ID(req.GetNamespaceId()) + taskQueue, err := newTaskQueueID(namespaceID, req.TaskQueue, req.TaskQueueType) + if err != nil { + return nil, err + } + tqm, err := e.getTaskQueueManager(ctx, taskQueue, normalStickyInfo, false) + if err != nil { + return nil, err + } + if tqm == nil { + return &matchingservice.ForceUnloadTaskQueueResponse{WasLoaded: false}, nil + } + e.unloadTaskQueue(tqm) + return &matchingservice.ForceUnloadTaskQueueResponse{WasLoaded: true}, nil +} + +func (e *matchingEngineImpl) UpdateTaskQueueUserData(ctx context.Context, request *matchingservice.UpdateTaskQueueUserDataRequest) (*matchingservice.UpdateTaskQueueUserDataResponse, error) { + locks := e.getNamespaceUpdateLocks(request.GetNamespaceId()) + locks.updateLock.Lock() + defer locks.updateLock.Unlock() + + err := e.taskManager.UpdateTaskQueueUserData(ctx, &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: request.GetNamespaceId(), + TaskQueue: request.GetTaskQueue(), + UserData: request.GetUserData(), + BuildIdsAdded: request.BuildIdsAdded, + BuildIdsRemoved: request.BuildIdsRemoved, + }) + return &matchingservice.UpdateTaskQueueUserDataResponse{}, err +} + +func (e *matchingEngineImpl) ReplicateTaskQueueUserData(ctx context.Context, request *matchingservice.ReplicateTaskQueueUserDataRequest) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) { + if e.namespaceReplicationQueue == nil { + return &matchingservice.ReplicateTaskQueueUserDataResponse{}, nil + } + + locks := e.getNamespaceUpdateLocks(request.GetNamespaceId()) + locks.replicationLock.Lock() + defer locks.replicationLock.Unlock() + + err := e.namespaceReplicationQueue.Publish(ctx, &replicationspb.ReplicationTask{ + TaskType: enumsspb.REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA, + Attributes: &replicationspb.ReplicationTask_TaskQueueUserDataAttributes{ + TaskQueueUserDataAttributes: &replicationspb.TaskQueueUserDataAttributes{ + NamespaceId: request.GetNamespaceId(), + TaskQueueName: request.GetTaskQueue(), + UserData: request.GetUserData(), + }, + }, + }) + return &matchingservice.ReplicateTaskQueueUserDataResponse{}, err + +} + +func (e *matchingEngineImpl) getNamespaceUpdateLocks(namespaceId string) *namespaceUpdateLocks { + e.namespaceUpdateLockMapLock.Lock() + defer e.namespaceUpdateLockMapLock.Unlock() + locks, found := e.namespaceUpdateLockMap[namespaceId] + if !found { + locks = &namespaceUpdateLocks{} + e.namespaceUpdateLockMap[namespaceId] = locks + } + return locks +} + +func (e *matchingEngineImpl) getHostInfo(partitionKey string) (string, error) { + host, err := e.keyResolver.Lookup(partitionKey) + if err != nil { + return "", err + } + return host.GetAddress(), nil +} + +func (e *matchingEngineImpl) getAllPartitions( + ns namespace.Name, + taskQueue taskqueuepb.TaskQueue, + taskQueueType enumspb.TaskQueueType, +) ([]string, error) { + var partitionKeys []string + namespaceID, err := e.namespaceRegistry.GetNamespaceID(ns) + if err != nil { + return partitionKeys, err + } + taskQueueID, err := newTaskQueueID(namespaceID, taskQueue.GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return partitionKeys, err + } + + n := e.config.NumTaskqueueWritePartitions(ns.String(), taskQueueID.BaseNameString(), taskQueueType) + for i := 0; i < n; i++ { + partitionKeys = append(partitionKeys, taskQueueID.WithPartition(i).FullName()) + } + + return partitionKeys, nil +} + +func (e *matchingEngineImpl) getTask( + ctx context.Context, + origTaskQueue *taskQueueID, + stickyInfo stickyInfo, + pollMetadata *pollMetadata, +) (*internalTask, error) { + baseTqm, err := e.getTaskQueueManager(ctx, origTaskQueue, stickyInfo, true) + if err != nil { + return nil, err + } + + tqm, err := baseTqm.RedirectToVersionedQueueForPoll(ctx, pollMetadata.workerVersionCapabilities) + if err != nil { + if errors.Is(err, errUserDataDisabled) { + // Rewrite to nicer error message + err = serviceerror.NewFailedPrecondition("Operations on versioned workflows are disabled") + } + return nil, err + } + + // We need to set a shorter timeout than the original ctx; otherwise, by the time ctx deadline is + // reached, instead of emptyTask, context timeout error is returned to the frontend by the rpc stack, + // which counts against our SLO. By shortening the timeout by a very small amount, the emptyTask can be + // returned to the handler before a context timeout error is generated. + ctx, cancel := newChildContext(ctx, baseTqm.LongPollExpirationInterval(), returnEmptyTaskTimeBudget) + defer cancel() + + if pollerID, ok := ctx.Value(pollerIDKey).(string); ok && pollerID != "" { + e.pollMap.add(pollerID, cancel) + defer e.pollMap.remove(pollerID) + } + + if identity, ok := ctx.Value(identityKey).(string); ok && identity != "" { + baseTqm.UpdatePollerInfo(pollerIdentity(identity), pollMetadata) + // update timestamp when long poll ends + defer baseTqm.UpdatePollerInfo(pollerIdentity(identity), pollMetadata) + } + + return tqm.GetTask(ctx, pollMetadata) +} + +func (e *matchingEngineImpl) unloadTaskQueue(unloadTQM taskQueueManager) { + queueID := unloadTQM.QueueID() + e.taskQueuesLock.Lock() + foundTQM, ok := e.taskQueues[*queueID] + if !ok || foundTQM != unloadTQM { + e.taskQueuesLock.Unlock() + return + } + delete(e.taskQueues, *queueID) + e.taskQueuesLock.Unlock() + // This may call unloadTaskQueue again but that's okay, the next call will not find it. + foundTQM.Stop() + e.updateTaskQueueGauge(foundTQM, -1) +} + +func (e *matchingEngineImpl) updateTaskQueueGauge(tqm taskQueueManager, delta int) { + id := tqm.QueueID() + countKey := taskQueueCounterKey{ + namespaceID: id.namespaceID, + taskType: id.taskType, + kind: tqm.TaskQueueKind(), + versioned: id.VersionSet() != "", + } + + e.taskQueueCountLock.Lock() + e.taskQueueCount[countKey] += delta + newCount := e.taskQueueCount[countKey] + e.taskQueueCountLock.Unlock() + + nsEntry, err := e.namespaceRegistry.GetNamespaceByID(countKey.namespaceID) + ns := namespace.Name("unknown") + if err == nil { + ns = nsEntry.Name() + } + + e.metricsHandler.Gauge(metrics.LoadedTaskQueueGauge.GetMetricName()).Record( + float64(newCount), + metrics.NamespaceTag(ns.String()), + metrics.TaskTypeTag(countKey.taskType.String()), + metrics.QueueTypeTag(countKey.kind.String()), + metrics.VersionedTag(countKey.versioned), + ) +} + +// Populate the workflow task response based on context and scheduled/started events. +func (e *matchingEngineImpl) createPollWorkflowTaskQueueResponse( + task *internalTask, + historyResponse *historyservice.RecordWorkflowTaskStartedResponse, + metricsHandler metrics.Handler, +) *matchingservice.PollWorkflowTaskQueueResponse { + + var serializedToken []byte + if task.isQuery() { + // for a query task + queryRequest := task.query.request + queryTaskToken := &tokenspb.QueryTask{ + NamespaceId: queryRequest.GetNamespaceId(), + TaskQueue: queryRequest.TaskQueue.Name, + TaskId: task.query.taskID, + } + serializedToken, _ = e.tokenSerializer.SerializeQueryTaskToken(queryTaskToken) + } else { + taskToken := tasktoken.NewWorkflowTaskToken( + task.event.Data.GetNamespaceId(), + task.event.Data.GetWorkflowId(), + task.event.Data.GetRunId(), + historyResponse.GetScheduledEventId(), + historyResponse.GetStartedEventId(), + historyResponse.GetStartedTime(), + historyResponse.GetAttempt(), + historyResponse.GetClock(), + historyResponse.GetVersion(), + ) + serializedToken, _ = e.tokenSerializer.Serialize(taskToken) + if task.responseC == nil { + ct := timestamp.TimeValue(task.event.Data.CreateTime) + metricsHandler.Timer(metrics.AsyncMatchLatencyPerTaskQueue.GetMetricName()).Record(time.Since(ct)) + } + } + + response := common.CreateMatchingPollWorkflowTaskQueueResponse( + historyResponse, + task.workflowExecution(), + serializedToken) + + if task.query != nil { + response.Query = task.query.request.QueryRequest.Query + } + if task.backlogCountHint != nil { + response.BacklogCountHint = task.backlogCountHint() + } + return response +} + +// Populate the activity task response based on context and scheduled/started events. +func (e *matchingEngineImpl) createPollActivityTaskQueueResponse( + task *internalTask, + historyResponse *historyservice.RecordActivityTaskStartedResponse, + metricsHandler metrics.Handler, +) *matchingservice.PollActivityTaskQueueResponse { + + scheduledEvent := historyResponse.ScheduledEvent + if scheduledEvent.GetActivityTaskScheduledEventAttributes() == nil { + panic("GetActivityTaskScheduledEventAttributes is not set") + } + attributes := scheduledEvent.GetActivityTaskScheduledEventAttributes() + if attributes.ActivityId == "" { + panic("ActivityTaskScheduledEventAttributes.ActivityID is not set") + } + if task.responseC == nil { + ct := timestamp.TimeValue(task.event.Data.CreateTime) + metricsHandler.Timer(metrics.AsyncMatchLatencyPerTaskQueue.GetMetricName()).Record(time.Since(ct)) + } + + taskToken := tasktoken.NewActivityTaskToken( + task.event.Data.GetNamespaceId(), + task.event.Data.GetWorkflowId(), + task.event.Data.GetRunId(), + task.event.Data.GetScheduledEventId(), + attributes.GetActivityId(), + attributes.GetActivityType().GetName(), + historyResponse.GetAttempt(), + historyResponse.GetClock(), + historyResponse.GetVersion(), + ) + serializedToken, _ := e.tokenSerializer.Serialize(taskToken) + + // This is here to ensure that this field is never nil as expected by the TS SDK. + // This may happen if ScheduleActivityExecution was recorded in version 1.23. + scheduleToCloseTimeout := attributes.ScheduleToCloseTimeout + if scheduleToCloseTimeout == nil { + scheduleToCloseTimeout = timestamp.DurationPtr(0) + } + + return &matchingservice.PollActivityTaskQueueResponse{ + ActivityId: attributes.ActivityId, + ActivityType: attributes.ActivityType, + Header: attributes.Header, + Input: attributes.Input, + WorkflowExecution: task.workflowExecution(), + CurrentAttemptScheduledTime: historyResponse.CurrentAttemptScheduledTime, + ScheduledTime: scheduledEvent.EventTime, + ScheduleToCloseTimeout: scheduleToCloseTimeout, + StartedTime: historyResponse.StartedTime, + StartToCloseTimeout: attributes.StartToCloseTimeout, + HeartbeatTimeout: attributes.HeartbeatTimeout, + TaskToken: serializedToken, + Attempt: taskToken.Attempt, + HeartbeatDetails: historyResponse.HeartbeatDetails, + WorkflowType: historyResponse.WorkflowType, + WorkflowNamespace: historyResponse.WorkflowNamespace, + } +} + +func (e *matchingEngineImpl) recordWorkflowTaskStarted( + ctx context.Context, + pollReq *workflowservice.PollWorkflowTaskQueueRequest, + task *internalTask, +) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + ctx, cancel := newRecordTaskStartedContext(ctx, task) + defer cancel() + + return e.historyClient.RecordWorkflowTaskStarted(ctx, &historyservice.RecordWorkflowTaskStartedRequest{ + NamespaceId: task.event.Data.GetNamespaceId(), + WorkflowExecution: task.workflowExecution(), + ScheduledEventId: task.event.Data.GetScheduledEventId(), + Clock: task.event.Data.GetClock(), + TaskId: task.event.GetTaskId(), + RequestId: uuid.New(), + PollRequest: pollReq, + }) +} + +func (e *matchingEngineImpl) recordActivityTaskStarted( + ctx context.Context, + pollReq *workflowservice.PollActivityTaskQueueRequest, + task *internalTask, +) (*historyservice.RecordActivityTaskStartedResponse, error) { + ctx, cancel := newRecordTaskStartedContext(ctx, task) + defer cancel() + + return e.historyClient.RecordActivityTaskStarted(ctx, &historyservice.RecordActivityTaskStartedRequest{ + NamespaceId: task.event.Data.GetNamespaceId(), + WorkflowExecution: task.workflowExecution(), + ScheduledEventId: task.event.Data.GetScheduledEventId(), + Clock: task.event.Data.GetClock(), + TaskId: task.event.GetTaskId(), + RequestId: uuid.New(), + PollRequest: pollReq, + }) +} + +func (e *matchingEngineImpl) emitForwardedSourceStats( + metricsHandler metrics.Handler, + isTaskForwarded bool, + pollForwardedSource string, +) { + isPollForwarded := len(pollForwardedSource) > 0 + switch { + case isTaskForwarded && isPollForwarded: + metricsHandler.Counter(metrics.RemoteToRemoteMatchPerTaskQueueCounter.GetMetricName()).Record(1) + case isTaskForwarded: + metricsHandler.Counter(metrics.RemoteToLocalMatchPerTaskQueueCounter.GetMetricName()).Record(1) + case isPollForwarded: + metricsHandler.Counter(metrics.LocalToRemoteMatchPerTaskQueueCounter.GetMetricName()).Record(1) + default: + metricsHandler.Counter(metrics.LocalToLocalMatchPerTaskQueueCounter.GetMetricName()).Record(1) + } +} + +func (m *lockableQueryTaskMap) put(key string, value chan *queryResult) { + m.Lock() + defer m.Unlock() + m.queryTaskMap[key] = value +} + +func (m *lockableQueryTaskMap) get(key string) (chan *queryResult, bool) { + m.RLock() + defer m.RUnlock() + result, ok := m.queryTaskMap[key] + return result, ok +} + +func (m *lockableQueryTaskMap) delete(key string) { + m.Lock() + defer m.Unlock() + delete(m.queryTaskMap, key) +} + +func (m *lockablePollMap) add(cancelId string, cancel context.CancelFunc) { + m.Lock() + defer m.Unlock() + m.polls[cancelId] = cancel +} + +func (m *lockablePollMap) remove(cancelId string) { + m.Lock() + defer m.Unlock() + delete(m.polls, cancelId) +} + +func (m *lockablePollMap) cancel(cancelId string) { + m.Lock() + defer m.Unlock() + if cancel, ok := m.polls[cancelId]; ok { + cancel() + delete(m.polls, cancelId) + } +} + +// newRecordTaskStartedContext creates a context for recording +// activity or workflow task started. The parentCtx from +// pollActivity/WorkflowTaskQueue endpoint (which is a long poll +// API) has long timeout and unsuitable for recording task started, +// especially if the task is doing sync match and has caller +// (history transfer queue) waiting for response. +func newRecordTaskStartedContext( + parentCtx context.Context, + task *internalTask, +) (context.Context, context.CancelFunc) { + timeout := recordTaskStartedDefaultTimeout + if task.isSyncMatchTask() { + timeout = recordTaskStartedSyncMatchTimeout + } + + return context.WithTimeout(parentCtx, timeout) +} + +// Revives a deleted build id updating its HLC timestamp. +// Returns a new build id leaving the provided one untouched. +func (e *matchingEngineImpl) reviveBuildId(ns *namespace.Namespace, taskQueue string, buildId *persistencespb.BuildId) *persistencespb.BuildId { + // Bump the stamp and ensure it's newer than the deletion stamp. + prevStamp := *buildId.StateUpdateTimestamp + stamp := hlc.Next(prevStamp, e.timeSource) + stamp.ClusterId = e.clusterMeta.GetClusterID() + e.logger.Info("Revived build id while applying replication event", + tag.WorkflowNamespace(ns.Name().String()), + tag.WorkflowTaskQueueName(taskQueue), + tag.BuildId(buildId.Id)) + return &persistencespb.BuildId{ + Id: buildId.GetId(), + State: persistencespb.STATE_ACTIVE, + StateUpdateTimestamp: &stamp, + BecameDefaultTimestamp: buildId.BecameDefaultTimestamp, + } +} + +// We use a very short timeout for considering a sticky worker available, since tasks can also +// be processed on the normal queue. +func stickyWorkerAvailable(tqm taskQueueManager) bool { + return tqm != nil && tqm.HasPollerAfter(time.Now().Add(-stickyPollerUnavailableWindow)) +} diff -Nru temporal-1.21.5-1/src/service/matching/matching_engine_interfaces.go temporal-1.22.5/src/service/matching/matching_engine_interfaces.go --- temporal-1.21.5-1/src/service/matching/matching_engine_interfaces.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matching_engine_interfaces.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,57 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/common/metrics" +) + +type ( + // Engine exposes interfaces for clients to interact with the matching engine + Engine interface { + Start() + Stop() + AddWorkflowTask(ctx context.Context, addRequest *matchingservice.AddWorkflowTaskRequest) (syncMatch bool, err error) + AddActivityTask(ctx context.Context, addRequest *matchingservice.AddActivityTaskRequest) (syncMatch bool, err error) + PollWorkflowTaskQueue(ctx context.Context, request *matchingservice.PollWorkflowTaskQueueRequest, opMetrics metrics.Handler) (*matchingservice.PollWorkflowTaskQueueResponse, error) + PollActivityTaskQueue(ctx context.Context, request *matchingservice.PollActivityTaskQueueRequest, opMetrics metrics.Handler) (*matchingservice.PollActivityTaskQueueResponse, error) + QueryWorkflow(ctx context.Context, request *matchingservice.QueryWorkflowRequest) (*matchingservice.QueryWorkflowResponse, error) + RespondQueryTaskCompleted(ctx context.Context, request *matchingservice.RespondQueryTaskCompletedRequest, opMetrics metrics.Handler) error + CancelOutstandingPoll(ctx context.Context, request *matchingservice.CancelOutstandingPollRequest) error + DescribeTaskQueue(ctx context.Context, request *matchingservice.DescribeTaskQueueRequest) (*matchingservice.DescribeTaskQueueResponse, error) + ListTaskQueuePartitions(ctx context.Context, request *matchingservice.ListTaskQueuePartitionsRequest) (*matchingservice.ListTaskQueuePartitionsResponse, error) + UpdateWorkerBuildIdCompatibility(ctx context.Context, request *matchingservice.UpdateWorkerBuildIdCompatibilityRequest) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) + GetWorkerBuildIdCompatibility(ctx context.Context, request *matchingservice.GetWorkerBuildIdCompatibilityRequest) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) + GetTaskQueueUserData(ctx context.Context, request *matchingservice.GetTaskQueueUserDataRequest) (*matchingservice.GetTaskQueueUserDataResponse, error) + ApplyTaskQueueUserDataReplicationEvent(ctx context.Context, request *matchingservice.ApplyTaskQueueUserDataReplicationEventRequest) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) + GetBuildIdTaskQueueMapping(ctx context.Context, request *matchingservice.GetBuildIdTaskQueueMappingRequest) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) + ForceUnloadTaskQueue(ctx context.Context, request *matchingservice.ForceUnloadTaskQueueRequest) (*matchingservice.ForceUnloadTaskQueueResponse, error) + UpdateTaskQueueUserData(ctx context.Context, request *matchingservice.UpdateTaskQueueUserDataRequest) (*matchingservice.UpdateTaskQueueUserDataResponse, error) + ReplicateTaskQueueUserData(ctx context.Context, request *matchingservice.ReplicateTaskQueueUserDataRequest) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) + } +) diff -Nru temporal-1.21.5-1/src/service/matching/matching_engine_test.go temporal-1.22.5/src/service/matching/matching_engine_test.go --- temporal-1.21.5-1/src/service/matching/matching_engine_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/matching_engine_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,3081 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/emirpasic/gods/maps/treemap" + "github.com/gogo/protobuf/types" + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/suite" + "github.com/uber-go/tally/v4" + + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + querypb "go.temporal.io/api/query/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + + clockspb "go.temporal.io/server/api/clock/v1" + "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/api/taskqueue/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/clock/hybrid_logical_clock" + hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/quotas" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/util" +) + +type ( + matchingEngineSuite struct { + suite.Suite + controller *gomock.Controller + mockHistoryClient *historyservicemock.MockHistoryServiceClient + mockMatchingClient *matchingservicemock.MockMatchingServiceClient + mockNamespaceCache *namespace.MockRegistry + mockVisibilityManager *manager.MockVisibilityManager + + matchingEngine *matchingEngineImpl + taskManager *testTaskManager + logger log.Logger + sync.Mutex + } +) + +const ( + matchingTestNamespace = "matching-test" +) + +func TestMatchingEngineSuite(t *testing.T) { + s := new(matchingEngineSuite) + suite.Run(t, s) +} + +func (s *matchingEngineSuite) SetupSuite() { +} + +func (s *matchingEngineSuite) TearDownSuite() { +} + +func (s *matchingEngineSuite) SetupTest() { + s.logger = log.NewTestLogger() + s.Lock() + defer s.Unlock() + s.controller = gomock.NewController(s.T()) + s.mockHistoryClient = historyservicemock.NewMockHistoryServiceClient(s.controller) + s.mockMatchingClient = matchingservicemock.NewMockMatchingServiceClient(s.controller) + s.mockMatchingClient.EXPECT().GetTaskQueueUserData(gomock.Any(), gomock.Any()). + Return(&matchingservice.GetTaskQueueUserDataResponse{}, nil).AnyTimes() + s.mockMatchingClient.EXPECT().UpdateTaskQueueUserData(gomock.Any(), gomock.Any()). + Return(&matchingservice.UpdateTaskQueueUserDataResponse{}, nil).AnyTimes() + s.mockMatchingClient.EXPECT().ReplicateTaskQueueUserData(gomock.Any(), gomock.Any()). + Return(&matchingservice.ReplicateTaskQueueUserDataResponse{}, nil).AnyTimes() + s.taskManager = newTestTaskManager(s.logger) + s.mockNamespaceCache = namespace.NewMockRegistry(s.controller) + ns := namespace.NewLocalNamespaceForTest(&persistencespb.NamespaceInfo{Name: matchingTestNamespace}, nil, "") + s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil).AnyTimes() + s.mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(ns.Name(), nil).AnyTimes() + s.mockVisibilityManager = manager.NewMockVisibilityManager(s.controller) + s.mockVisibilityManager.EXPECT().Close().AnyTimes() + + s.matchingEngine = s.newMatchingEngine(defaultTestConfig(), s.taskManager) + s.matchingEngine.Start() +} + +func (s *matchingEngineSuite) TearDownTest() { + s.matchingEngine.Stop() + s.controller.Finish() +} + +func (s *matchingEngineSuite) newMatchingEngine( + config *Config, taskMgr persistence.TaskManager, +) *matchingEngineImpl { + return newMatchingEngine(config, taskMgr, s.mockHistoryClient, s.logger, s.mockNamespaceCache, s.mockMatchingClient, s.mockVisibilityManager) +} + +func newMatchingEngine( + config *Config, taskMgr persistence.TaskManager, mockHistoryClient historyservice.HistoryServiceClient, + logger log.Logger, mockNamespaceCache namespace.Registry, mockMatchingClient matchingservice.MatchingServiceClient, + mockVisibilityManager manager.VisibilityManager, +) *matchingEngineImpl { + return &matchingEngineImpl{ + taskManager: taskMgr, + historyClient: mockHistoryClient, + taskQueues: make(map[taskQueueID]taskQueueManager), + taskQueueCount: make(map[taskQueueCounterKey]int), + logger: logger, + throttledLogger: log.ThrottledLogger(logger), + metricsHandler: metrics.NoopMetricsHandler, + matchingRawClient: mockMatchingClient, + tokenSerializer: common.NewProtoTaskTokenSerializer(), + config: config, + namespaceRegistry: mockNamespaceCache, + clusterMeta: cluster.NewMetadataForTest(cluster.NewTestClusterMetadataConfig(false, true)), + timeSource: clock.NewRealTimeSource(), + visibilityManager: mockVisibilityManager, + } +} + +func (s *matchingEngineSuite) TestAckManager() { + m := newAckManager(s.logger) + m.setAckLevel(100) + s.EqualValues(100, m.getAckLevel()) + s.EqualValues(100, m.getReadLevel()) + const t1 = 200 + const t2 = 220 + const t3 = 320 + const t4 = 340 + const t5 = 360 + const t6 = 380 + + m.addTask(t1) + s.EqualValues(100, m.getAckLevel()) + s.EqualValues(t1, m.getReadLevel()) + + m.addTask(t2) + s.EqualValues(100, m.getAckLevel()) + s.EqualValues(t2, m.getReadLevel()) + + m.completeTask(t2) + s.EqualValues(100, m.getAckLevel()) + s.EqualValues(t2, m.getReadLevel()) + + m.completeTask(t1) + s.EqualValues(t2, m.getAckLevel()) + s.EqualValues(t2, m.getReadLevel()) + + m.setAckLevel(300) + s.EqualValues(300, m.getAckLevel()) + s.EqualValues(300, m.getReadLevel()) + + m.addTask(t3) + s.EqualValues(300, m.getAckLevel()) + s.EqualValues(t3, m.getReadLevel()) + + m.addTask(t4) + s.EqualValues(300, m.getAckLevel()) + s.EqualValues(t4, m.getReadLevel()) + + m.completeTask(t3) + s.EqualValues(t3, m.getAckLevel()) + s.EqualValues(t4, m.getReadLevel()) + + m.completeTask(t4) + s.EqualValues(t4, m.getAckLevel()) + s.EqualValues(t4, m.getReadLevel()) + + m.setReadLevel(t5) + s.EqualValues(t5, m.getReadLevel()) + + m.setAckLevel(t5) + m.setReadLevelAfterGap(t6) + s.EqualValues(t6, m.getReadLevel()) + s.EqualValues(t6, m.getAckLevel()) +} + +func (s *matchingEngineSuite) TestAckManager_Sort() { + m := newAckManager(s.logger) + const t0 = 100 + m.setAckLevel(t0) + s.EqualValues(t0, m.getAckLevel()) + s.EqualValues(t0, m.getReadLevel()) + const t1 = 200 + const t2 = 220 + const t3 = 320 + const t4 = 340 + const t5 = 360 + + m.addTask(t1) + m.addTask(t2) + m.addTask(t3) + m.addTask(t4) + m.addTask(t5) + + m.completeTask(t2) + s.EqualValues(t0, m.getAckLevel()) + + m.completeTask(t1) + s.EqualValues(t2, m.getAckLevel()) + + m.completeTask(t5) + s.EqualValues(t2, m.getAckLevel()) + + m.completeTask(t4) + s.EqualValues(t2, m.getAckLevel()) + + m.completeTask(t3) + s.EqualValues(t5, m.getAckLevel()) +} + +func (s *matchingEngineSuite) TestPollActivityTaskQueuesEmptyResult() { + s.PollForTasksEmptyResultTest(context.Background(), enumspb.TASK_QUEUE_TYPE_ACTIVITY) +} + +func (s *matchingEngineSuite) TestPollWorkflowTaskQueuesEmptyResult() { + s.PollForTasksEmptyResultTest(context.Background(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) +} + +func (s *matchingEngineSuite) TestPollActivityTaskQueuesEmptyResultWithShortContext() { + shortContextTimeout := returnEmptyTaskTimeBudget + 10*time.Millisecond + callContext, cancel := context.WithTimeout(context.Background(), shortContextTimeout) + defer cancel() + s.PollForTasksEmptyResultTest(callContext, enumspb.TASK_QUEUE_TYPE_ACTIVITY) +} + +func (s *matchingEngineSuite) TestPollWorkflowTaskQueuesEmptyResultWithShortContext() { + shortContextTimeout := returnEmptyTaskTimeBudget + 10*time.Millisecond + callContext, cancel := context.WithTimeout(context.Background(), shortContextTimeout) + defer cancel() + s.PollForTasksEmptyResultTest(callContext, enumspb.TASK_QUEUE_TYPE_WORKFLOW) +} + +func (s *matchingEngineSuite) TestOnlyUnloadMatchingInstance() { + queueID := newTestTaskQueueID( + namespace.ID(uuid.New()), + "makeToast", + enumspb.TASK_QUEUE_TYPE_ACTIVITY) + tqm, err := s.matchingEngine.getTaskQueueManager( + context.Background(), + queueID, + normalStickyInfo, + true) + s.Require().NoError(err) + + tqm2, err := newTaskQueueManager( + s.matchingEngine, + queueID, // same queueID as above + normalStickyInfo, + s.matchingEngine.config, + ) + s.Require().NoError(err) + + // try to unload a different tqm instance with the same taskqueue ID + s.matchingEngine.unloadTaskQueue(tqm2) + + got, err := s.matchingEngine.getTaskQueueManager( + context.Background(), queueID, normalStickyInfo, true) + s.Require().NoError(err) + s.Require().Same(tqm, got, + "Unload call with non-matching taskQueueManager should not cause unload") + + // this time unload the right tqm + s.matchingEngine.unloadTaskQueue(tqm) + + got, err = s.matchingEngine.getTaskQueueManager( + context.Background(), queueID, normalStickyInfo, true) + s.Require().NoError(err) + s.Require().NotSame(tqm, got, + "Unload call with matching incarnation should have caused unload") +} + +func (s *matchingEngineSuite) TestPollWorkflowTaskQueues() { + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + stickyTl := "makeStickyToast" + stickyTlKind := enumspb.TASK_QUEUE_KIND_STICKY + identity := "selfDrivingToaster" + + stickyTaskQueue := &taskqueuepb.TaskQueue{Name: stickyTl, Kind: stickyTlKind} + + s.matchingEngine.config.RangeSize = 2 // to test that range is not updated without tasks + s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) + + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowType := &commonpb.WorkflowType{ + Name: "workflow", + } + execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + scheduledEventID := int64(0) + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") + response := &historyservice.RecordWorkflowTaskStartedResponse{ + WorkflowType: workflowType, + PreviousStartedEventId: scheduledEventID, + ScheduledEventId: scheduledEventID + 1, + Attempt: 1, + StickyExecutionEnabled: true, + WorkflowExecutionTaskQueue: &taskqueuepb.TaskQueue{Name: tl, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + } + return response, nil + }).AnyTimes() + + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: execution, + ScheduledEventId: scheduledEventID, + TaskQueue: stickyTaskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + // fail due to no sticky worker + s.ErrorAs(err, new(*serviceerrors.StickyWorkerUnavailable)) + // poll the sticky queue, should get no result + resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: stickyTaskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + s.NoError(err) + s.Equal(emptyPollWorkflowTaskQueueResponse, resp) + + // add task to sticky queue again, this time it should pass + _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + s.NoError(err) + + resp, err = s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: stickyTaskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + s.NoError(err) + + expectedResp := &matchingservice.PollWorkflowTaskQueueResponse{ + TaskToken: resp.TaskToken, + WorkflowExecution: execution, + WorkflowType: workflowType, + PreviousStartedEventId: scheduledEventID, + StartedEventId: common.EmptyEventID, + Attempt: 1, + NextEventId: common.EmptyEventID, + BacklogCountHint: 0, + StickyExecutionEnabled: true, + Query: nil, + TransientWorkflowTask: nil, + WorkflowExecutionTaskQueue: &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + BranchToken: nil, + ScheduledTime: nil, + StartedTime: nil, + Queries: nil, + } + + s.Nil(err) + s.Equal(expectedResp, resp) +} + +func (s *matchingEngineSuite) PollForTasksEmptyResultTest(callContext context.Context, taskType enumspb.TaskQueueType) { + s.matchingEngine.config.RangeSize = 2 // to test that range is not updated without tasks + if _, ok := callContext.Deadline(); !ok { + s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) + } + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + identity := "selfDrivingToaster" + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + var taskQueueType enumspb.TaskQueueType + tlID := newTestTaskQueueID(namespaceID, tl, taskType) + const pollCount = 10 + for i := 0; i < pollCount; i++ { + if taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { + pollResp, err := s.matchingEngine.PollActivityTaskQueue(callContext, &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + s.NoError(err) + s.Equal(emptyPollActivityTaskQueueResponse, pollResp) + + taskQueueType = enumspb.TASK_QUEUE_TYPE_ACTIVITY + } else { + resp, err := s.matchingEngine.PollWorkflowTaskQueue(callContext, &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + s.NoError(err) + s.Equal(emptyPollWorkflowTaskQueueResponse, resp) + + taskQueueType = enumspb.TASK_QUEUE_TYPE_WORKFLOW + } + select { + case <-callContext.Done(): + s.FailNow("Call context has expired.") + default: + } + // check the poller information + descResp, err := s.matchingEngine.DescribeTaskQueue(context.Background(), &matchingservice.DescribeTaskQueueRequest{ + NamespaceId: namespaceID.String(), + DescRequest: &workflowservice.DescribeTaskQueueRequest{ + TaskQueue: taskQueue, + TaskQueueType: taskQueueType, + IncludeTaskQueueStatus: false, + }, + }) + s.NoError(err) + s.Equal(1, len(descResp.Pollers)) + s.Equal(identity, descResp.Pollers[0].GetIdentity()) + s.NotEmpty(descResp.Pollers[0].GetLastAccessTime()) + s.Nil(descResp.GetTaskQueueStatus()) + } + s.EqualValues(1, s.taskManager.getTaskQueueManager(tlID).RangeID()) +} + +func (s *matchingEngineSuite) TestPollWorkflowTaskQueues_NamespaceHandover() { + namespaceID := namespace.ID(uuid.New()) + taskQueue := &taskqueuepb.TaskQueue{Name: "taskQueue", Kind: enumspb.TASK_QUEUE_KIND_NORMAL} + + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: &commonpb.WorkflowExecution{WorkflowId: "workflowID", RunId: uuid.NewRandom().String()}, + ScheduledEventId: int64(0), + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + // add multiple workflow tasks, but matching should not keep polling new tasks + // upon getting namespace handover error when recording start for the first task + _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + s.NoError(err) + _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + s.NoError(err) + + s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, common.ErrNamespaceHandover).Times(1) + resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: "identity", + }, + }, metrics.NoopMetricsHandler) + s.Nil(resp) + s.Equal(common.ErrNamespaceHandover.Error(), err.Error()) +} + +func (s *matchingEngineSuite) TestPollActivityTaskQueues_NamespaceHandover() { + namespaceID := namespace.ID(uuid.New()) + taskQueue := &taskqueuepb.TaskQueue{Name: "taskQueue", Kind: enumspb.TASK_QUEUE_KIND_NORMAL} + + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: &commonpb.WorkflowExecution{WorkflowId: "workflowID", RunId: uuid.NewRandom().String()}, + ScheduledEventId: int64(5), + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + // add multiple activity tasks, but matching should not keep polling new tasks + // upon getting namespace handover error when recording start for the first task + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, common.ErrNamespaceHandover).Times(1) + resp, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: "identity", + }, + }, metrics.NoopMetricsHandler) + s.Nil(resp) + s.Equal(common.ErrNamespaceHandover.Error(), err.Error()) +} + +func (s *matchingEngineSuite) TestPollWorkflowTask_UserDataDisabled() { + s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) + taskQueue := s.T().Name() + + resp, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: "asdf", + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: "asdf", + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + Identity: "identity", + WorkerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ + BuildId: "some_build_id", + UseVersioning: true, + }, + }, + }, metrics.NoopMetricsHandler) + s.Error(err) + s.Nil(resp) + var failedPrecondition *serviceerror.FailedPrecondition + s.ErrorAs(err, &failedPrecondition) +} + +func (s *matchingEngineSuite) TestAddActivityTasks() { + s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_ACTIVITY, false) +} + +func (s *matchingEngineSuite) TestAddWorkflowTasks() { + s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_WORKFLOW, false) +} + +func (s *matchingEngineSuite) TestAddWorkflowTasksForwarded() { + s.AddTasksTest(enumspb.TASK_QUEUE_TYPE_WORKFLOW, true) +} + +func (s *matchingEngineSuite) AddTasksTest(taskType enumspb.TaskQueueType, isForwarded bool) { + s.matchingEngine.config.RangeSize = 300 // override to low number for the test + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + forwardedFrom := "/_sys/makeToast/1" + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + const taskCount = 111 + + runID := uuid.New() + workflowID := "workflow1" + execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + for i := int64(0); i < taskCount; i++ { + scheduledEventID := i * 3 + var err error + if taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: execution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + if isForwarded { + addRequest.ForwardedSource = forwardedFrom + } + _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + } else { + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: execution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + if isForwarded { + addRequest.ForwardedSource = forwardedFrom + } + _, err = s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + } + + switch isForwarded { + case false: + s.NoError(err) + case true: + s.Equal(errRemoteSyncMatchFailed, err) + } + } + + switch isForwarded { + case false: + s.EqualValues(taskCount, s.taskManager.getTaskCount(newTestTaskQueueID(namespaceID, tl, taskType))) + case true: + s.EqualValues(0, s.taskManager.getTaskCount(newTestTaskQueueID(namespaceID, tl, taskType))) + } +} + +func (s *matchingEngineSuite) TestAddWorkflowTaskDoesNotLoadSticky() { + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: uuid.New(), + Execution: &commonpb.WorkflowExecution{RunId: uuid.New(), WorkflowId: "wf1"}, + ScheduledEventId: 0, + TaskQueue: &taskqueuepb.TaskQueue{Name: "sticky", Kind: enumspb.TASK_QUEUE_KIND_STICKY}, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + s.ErrorAs(err, new(*serviceerrors.StickyWorkerUnavailable)) + // check loaded queues + s.matchingEngine.taskQueuesLock.RLock() + defer s.matchingEngine.taskQueuesLock.RUnlock() + s.Equal(0, len(s.matchingEngine.taskQueues)) +} + +func (s *matchingEngineSuite) TestQueryWorkflowDoesNotLoadSticky() { + query := matchingservice.QueryWorkflowRequest{ + NamespaceId: uuid.New(), + TaskQueue: &taskqueuepb.TaskQueue{Name: "sticky", Kind: enumspb.TASK_QUEUE_KIND_STICKY}, + QueryRequest: &workflowservice.QueryWorkflowRequest{ + Namespace: "ns", + Execution: &commonpb.WorkflowExecution{RunId: uuid.New(), WorkflowId: "wf1"}, + Query: &querypb.WorkflowQuery{QueryType: "q"}, + }, + } + _, err := s.matchingEngine.QueryWorkflow(context.Background(), &query) + s.ErrorAs(err, new(*serviceerrors.StickyWorkerUnavailable)) + // check loaded queues + s.matchingEngine.taskQueuesLock.RLock() + defer s.matchingEngine.taskQueuesLock.RUnlock() + s.Equal(0, len(s.matchingEngine.taskQueues)) +} + +func (s *matchingEngineSuite) TestTaskWriterShutdown() { + s.matchingEngine.config.RangeSize = 300 // override to low number for the test + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + runID := uuid.NewRandom().String() + workflowID := "workflow1" + execution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + tlm, err := s.matchingEngine.getTaskQueueManager(context.Background(), tlID, normalStickyInfo, true) + s.Nil(err) + + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: execution, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + // stop the task writer explicitly + tlmImpl := tlm.(*taskQueueManagerImpl) + tlmImpl.taskWriter.Stop() + + // now attempt to add a task + scheduledEventID := int64(5) + addRequest.ScheduledEventId = scheduledEventID + _, err = s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.Error(err) +} + +func (s *matchingEngineSuite) TestAddThenConsumeActivities() { + s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(10 * time.Millisecond) + + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const taskCount = 1000 + const initialRangeID = 102 + // TODO: Understand why publish is low when rangeSize is 3 + const rangeSize = 30 + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + for i := int64(0); i < taskCount; i++ { + scheduledEventID := i * 3 + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + } + s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) + + activityTypeName := "activity1" + activityID := "activityId1" + activityType := &commonpb.ActivityType{Name: activityTypeName} + activityInput := payloads.EncodeString("Activity1 Input") + + identity := "nobody" + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") + resp := &historyservice.RecordActivityTaskStartedResponse{ + Attempt: 1, + ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, + &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ActivityType: activityType, + Input: activityInput, + ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(50 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(10 * time.Second), + }), + } + resp.StartedTime = timestamp.TimeNowPtrUtc() + return resp, nil + }).AnyTimes() + + for i := int64(0); i < taskCount; { + scheduledEventID := i * 3 + + result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + + s.NoError(err) + s.NotNil(result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + s.EqualValues(activityID, result.ActivityId) + s.EqualValues(activityType, result.ActivityType) + s.EqualValues(activityInput, result.Input) + s.EqualValues(workflowExecution, result.WorkflowExecution) + s.Equal(true, validateTimeRange(*result.ScheduledTime, time.Minute)) + s.EqualValues(time.Second*100, *result.ScheduleToCloseTimeout) + s.Equal(true, validateTimeRange(*result.StartedTime, time.Minute)) + s.EqualValues(time.Second*50, *result.StartToCloseTimeout) + s.EqualValues(time.Second*10, *result.HeartbeatTimeout) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + ActivityId: activityID, + ActivityType: activityTypeName, + } + + serializedToken, _ := s.matchingEngine.tokenSerializer.Serialize(taskToken) + s.EqualValues(serializedToken, result.TaskToken) + i++ + } + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + expectedRange := int64(initialRangeID + taskCount/rangeSize) + if taskCount%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) +} + +func (s *matchingEngineSuite) TestSyncMatchActivities() { + // Set a short long poll expiration so that we don't have to wait too long for 0 throttling cases + s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) + + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const taskCount = 10 + const initialRangeID = 102 + // TODO: Understand why publish is low when rangeSize is 3 + const rangeSize = 30 + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + // So we can get snapshots + scope := tally.NewTestScope("test", nil) + s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags(metrics.ServiceNameTag(primitives.MatchingService)) + + var err error + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + mgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, s.matchingEngine.config) + s.NoError(err) + + mgrImpl, ok := mgr.(*taskQueueManagerImpl) + s.True(ok) + + mgrImpl.matcher.config.MinTaskThrottlingBurstSize = func() int { return 0 } + mgrImpl.matcher.rateLimiter = quotas.NewRateLimiter( + defaultTaskDispatchRPS, + defaultTaskDispatchRPS, + ) + mgrImpl.matcher.dynamicRateBurst = &dynamicRateBurstWrapper{ + MutableRateBurst: quotas.NewMutableRateBurst( + defaultTaskDispatchRPS, + defaultTaskDispatchRPS, + ), + RateLimiterImpl: mgrImpl.matcher.rateLimiter.(*quotas.RateLimiterImpl), + } + s.matchingEngine.updateTaskQueue(tlID, mgr) + + mgr.Start() + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + activityTypeName := "activity1" + activityID := "activityId1" + activityType := &commonpb.ActivityType{Name: activityTypeName} + activityInput := payloads.EncodeString("Activity1 Input") + + identity := "nobody" + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") + return &historyservice.RecordActivityTaskStartedResponse{ + Attempt: 1, + ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, + &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ActivityType: activityType, + Input: activityInput, + ScheduleToStartTimeout: timestamp.DurationPtr(1 * time.Second), + ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), + }), + }, nil + }).AnyTimes() + + pollFunc := func(maxDispatch float64) (*matchingservice.PollActivityTaskQueueResponse, error) { + return s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + TaskQueueMetadata: &taskqueuepb.TaskQueueMetadata{MaxTasksPerSecond: &types.DoubleValue{Value: maxDispatch}}, + }, + }, metrics.NoopMetricsHandler) + } + + for i := int64(0); i < taskCount; i++ { + scheduledEventID := i * 3 + + var wg sync.WaitGroup + var result *matchingservice.PollActivityTaskQueueResponse + var pollErr error + maxDispatch := defaultTaskDispatchRPS + if i == taskCount/2 { + maxDispatch = 0 + } + wg.Add(1) + go func() { + defer wg.Done() + result, pollErr = pollFunc(maxDispatch) + }() + time.Sleep(20 * time.Millisecond) // Necessary for sync match to happen + + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + wg.Wait() + s.NoError(err) + s.NoError(pollErr) + s.NotNil(result) + + if len(result.TaskToken) == 0 { + // when ratelimit is set to zero, poller is expected to return empty result + // reset ratelimit, poll again and make sure task is returned this time + s.logger.Debug("empty poll returned") + s.Equal(float64(0), maxDispatch) + maxDispatch = defaultTaskDispatchRPS + wg.Add(1) + go func() { + defer wg.Done() + result, pollErr = pollFunc(maxDispatch) + }() + wg.Wait() + s.NoError(err) + s.NoError(pollErr) + s.NotNil(result) + s.True(len(result.TaskToken) > 0) + } + + s.EqualValues(activityID, result.ActivityId) + s.EqualValues(activityType, result.ActivityType) + s.EqualValues(activityInput, result.Input) + s.EqualValues(workflowExecution, result.WorkflowExecution) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + ActivityId: activityID, + ActivityType: activityTypeName, + } + + serializedToken, _ := s.matchingEngine.tokenSerializer.Serialize(taskToken) + // s.EqualValues(scheduledEventID, result.Task) + + s.EqualValues(serializedToken, result.TaskToken) + } + + time.Sleep(20 * time.Millisecond) // So any buffer tasks from 0 rps get picked up + snap := scope.Snapshot() + syncCtr := snap.Counters()["test.sync_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,service_name=matching,task_type=Activity,taskqueue=makeToast"] + s.Equal(1, int(syncCtr.Value())) // Check times zero rps is set = throttle counter + s.EqualValues(1, s.taskManager.getCreateTaskCount(tlID)) // Check times zero rps is set = Tasks stored in persistence + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + expectedRange := int64(initialRangeID + taskCount/rangeSize) + if taskCount%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) + + // check the poller information + tlType := enumspb.TASK_QUEUE_TYPE_ACTIVITY + descResp, err := s.matchingEngine.DescribeTaskQueue(context.Background(), &matchingservice.DescribeTaskQueueRequest{ + NamespaceId: namespaceID.String(), + DescRequest: &workflowservice.DescribeTaskQueueRequest{ + TaskQueue: taskQueue, + TaskQueueType: tlType, + IncludeTaskQueueStatus: true, + }, + }) + s.NoError(err) + s.Equal(1, len(descResp.Pollers)) + s.Equal(identity, descResp.Pollers[0].GetIdentity()) + s.NotEmpty(descResp.Pollers[0].GetLastAccessTime()) + s.Equal(defaultTaskDispatchRPS, descResp.Pollers[0].GetRatePerSecond()) + s.NotNil(descResp.GetTaskQueueStatus()) + numPartitions := float64(s.matchingEngine.config.NumTaskqueueWritePartitions("", "", tlType)) + s.True(descResp.GetTaskQueueStatus().GetRatePerSecond()*numPartitions >= (defaultTaskDispatchRPS - 1)) +} + +func (s *matchingEngineSuite) TestConcurrentPublishConsumeActivities() { + dispatchLimitFn := func(int, int64) float64 { + return defaultTaskDispatchRPS + } + const workerCount = 20 + const taskCount = 100 + throttleCt := s.concurrentPublishConsumeActivities(workerCount, taskCount, dispatchLimitFn) + s.Zero(throttleCt) +} + +func (s *matchingEngineSuite) TestConcurrentPublishConsumeActivitiesWithZeroDispatch() { + s.T().Skip("Racy - times out ~50% of the time running locally with --race") + // Set a short long poll expiration so that we don't have to wait too long for 0 throttling cases + s.matchingEngine.config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(20 * time.Millisecond) + dispatchLimitFn := func(wc int, tc int64) float64 { + if tc%50 == 0 && wc%5 == 0 { // Gets triggered atleast 20 times + return 0 + } + return defaultTaskDispatchRPS + } + const workerCount = 20 + const taskCount = 100 + throttleCt := s.concurrentPublishConsumeActivities(workerCount, taskCount, dispatchLimitFn) + s.logger.Info("Number of tasks throttled", tag.Number(throttleCt)) + // atleast once from 0 dispatch poll, and until TTL is hit at which time throttle limit is reset + // hard to predict exactly how many times, since the atomic.Value load might not have updated. + s.True(throttleCt >= 1) +} + +func (s *matchingEngineSuite) concurrentPublishConsumeActivities( + workerCount int, + taskCount int64, + dispatchLimitFn func(int, int64) float64, +) int64 { + scope := tally.NewTestScope("test", nil) + s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope).WithTags(metrics.ServiceNameTag(primitives.MatchingService)) + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const initialRangeID = 0 + const rangeSize = 3 + var scheduledEventID int64 = 123 + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + var err error + mgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, s.matchingEngine.config) + s.NoError(err) + + mgrImpl := mgr.(*taskQueueManagerImpl) + mgrImpl.matcher.config.MinTaskThrottlingBurstSize = func() int { return 0 } + mgrImpl.matcher.rateLimiter = quotas.NewRateLimiter( + defaultTaskDispatchRPS, + defaultTaskDispatchRPS, + ) + mgrImpl.matcher.dynamicRateBurst = &dynamicRateBurstWrapper{ + MutableRateBurst: quotas.NewMutableRateBurst( + defaultTaskDispatchRPS, + defaultTaskDispatchRPS, + ), + RateLimiterImpl: mgrImpl.matcher.rateLimiter.(*quotas.RateLimiterImpl), + } + s.matchingEngine.updateTaskQueue(tlID, mgr) + mgr.Start() + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + var wg sync.WaitGroup + wg.Add(2 * workerCount) + + for p := 0; p < workerCount; p++ { + go func() { + defer wg.Done() + for i := int64(0); i < taskCount; i++ { + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + if err != nil { + s.logger.Info("Failure in AddActivityTask", tag.Error(err)) + i-- + } + } + }() + } + + activityTypeName := "activity1" + activityID := "activityId1" + activityType := &commonpb.ActivityType{Name: activityTypeName} + activityInput := payloads.EncodeString("Activity1 Input") + activityHeader := &commonpb.Header{ + Fields: map[string]*commonpb.Payload{"tracing": payload.EncodeString("tracing data")}, + } + + identity := "nobody" + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") + return &historyservice.RecordActivityTaskStartedResponse{ + Attempt: 1, + ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, + &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ActivityType: activityType, + Input: activityInput, + Header: activityHeader, + ScheduleToStartTimeout: timestamp.DurationPtr(1 * time.Second), + ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), + }), + }, nil + }).AnyTimes() + + for p := 0; p < workerCount; p++ { + go func(wNum int) { + defer wg.Done() + for i := int64(0); i < taskCount; { + maxDispatch := dispatchLimitFn(wNum, i) + result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + TaskQueueMetadata: &taskqueuepb.TaskQueueMetadata{MaxTasksPerSecond: &types.DoubleValue{Value: maxDispatch}}, + }, + }, metrics.NoopMetricsHandler) + s.NoError(err) + s.NotNil(result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + s.EqualValues(activityID, result.ActivityId) + s.EqualValues(activityType, result.ActivityType) + s.EqualValues(activityInput, result.Input) + s.EqualValues(activityHeader, result.Header) + s.EqualValues(workflowExecution, result.WorkflowExecution) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + ActivityId: activityID, + ActivityType: activityTypeName, + } + resultToken, err := s.matchingEngine.tokenSerializer.Deserialize(result.TaskToken) + s.NoError(err) + + // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) + // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) + s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) + i++ + } + }(p) + } + wg.Wait() + totalTasks := int(taskCount) * workerCount + persisted := s.taskManager.getCreateTaskCount(tlID) + s.True(persisted < totalTasks) + expectedRange := int64(initialRangeID + persisted/rangeSize) + if persisted%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + + syncCtr := scope.Snapshot().Counters()["test.sync_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,taskqueue=makeToast"] + bufCtr := scope.Snapshot().Counters()["test.buffer_throttle_count+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,taskqueue=makeToast"] + total := int64(0) + if syncCtr != nil { + total += syncCtr.Value() + } + if bufCtr != nil { + total += bufCtr.Value() + } + return total +} + +func (s *matchingEngineSuite) TestConcurrentPublishConsumeWorkflowTasks() { + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const workerCount = 20 + const taskCount = 100 + const initialRangeID = 0 + const rangeSize = 5 + var scheduledEventID int64 = 123 + var startedEventID int64 = 1412 + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + var wg sync.WaitGroup + wg.Add(2 * workerCount) + + for p := 0; p < workerCount; p++ { + go func() { + for i := int64(0); i < taskCount; i++ { + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &addRequest) + if err != nil { + panic(err) + } + } + wg.Done() + }() + } + workflowTypeName := "workflowType1" + workflowType := &commonpb.WorkflowType{Name: workflowTypeName} + + identity := "nobody" + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") + return &historyservice.RecordWorkflowTaskStartedResponse{ + PreviousStartedEventId: startedEventID, + StartedEventId: startedEventID, + ScheduledEventId: scheduledEventID, + WorkflowType: workflowType, + Attempt: 1, + }, nil + }).AnyTimes() + for p := 0; p < workerCount; p++ { + go func() { + for i := int64(0); i < taskCount; { + result, err := s.matchingEngine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + if err != nil { + panic(err) + } + s.NotNil(result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + s.EqualValues(workflowExecution, result.WorkflowExecution) + s.EqualValues(workflowType, result.WorkflowType) + s.EqualValues(startedEventID, result.StartedEventId) + s.EqualValues(workflowExecution, result.WorkflowExecution) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + StartedEventId: startedEventID, + } + resultToken, err := s.matchingEngine.tokenSerializer.Deserialize(result.TaskToken) + if err != nil { + panic(err) + } + + // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) + // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) + s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) + i++ + } + wg.Done() + }() + } + wg.Wait() + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + totalTasks := taskCount * workerCount + persisted := s.taskManager.getCreateTaskCount(tlID) + s.True(persisted < totalTasks) + expectedRange := int64(initialRangeID + persisted/rangeSize) + if persisted%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) +} + +func (s *matchingEngineSuite) TestPollWithExpiredContext() { + identity := "nobody" + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + // Try with cancelled context + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cancel() + _, err := s.matchingEngine.PollActivityTaskQueue(ctx, &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + + s.Equal(ctx.Err(), err) + + // Try with expired context + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() + resp, err := s.matchingEngine.PollActivityTaskQueue(ctx, &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + s.Nil(err) + s.Equal(emptyPollActivityTaskQueueResponse, resp) +} + +func (s *matchingEngineSuite) TestMultipleEnginesActivitiesRangeStealing() { + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const engineCount = 2 + const taskCount = 400 + const iterations = 2 + const initialRangeID = 0 + const rangeSize = 10 + var scheduledEventID int64 = 123 + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + engines := make([]*matchingEngineImpl, engineCount) + for p := 0; p < engineCount; p++ { + e := s.newMatchingEngine(defaultTestConfig(), s.taskManager) + e.config.RangeSize = rangeSize + engines[p] = e + e.Start() + } + + for j := 0; j < iterations; j++ { + for p := 0; p < engineCount; p++ { + engine := engines[p] + for i := int64(0); i < taskCount; i++ { + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(600), + } + + _, err := engine.AddActivityTask(context.Background(), &addRequest) + if err != nil { + if _, ok := err.(*persistence.ConditionFailedError); ok { + i-- // retry adding + } else { + panic(fmt.Sprintf("errType=%T, err=%v", err, err)) + } + } + } + } + } + + s.EqualValues(iterations*engineCount*taskCount, s.taskManager.getCreateTaskCount(tlID)) + + activityTypeName := "activity1" + activityID := "activityId1" + activityType := &commonpb.ActivityType{Name: activityTypeName} + activityInput := payloads.EncodeString("Activity1 Input") + + identity := "nobody" + + startedTasks := make(map[int64]bool) + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { + if _, ok := startedTasks[taskRequest.TaskId]; ok { + s.logger.Debug("From error function Mock Received DUPLICATED RecordActivityTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) + return nil, serviceerror.NewNotFound("already started") + } + s.logger.Debug("Mock Received RecordActivityTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) + + startedTasks[taskRequest.TaskId] = true + return &historyservice.RecordActivityTaskStartedResponse{ + Attempt: 1, + ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, + &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ActivityType: activityType, + Input: activityInput, + ScheduleToStartTimeout: timestamp.DurationPtr(600 * time.Second), + ScheduleToCloseTimeout: timestamp.DurationPtr(2 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(1 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(1 * time.Second), + }), + }, nil + }).AnyTimes() + for j := 0; j < iterations; j++ { + for p := 0; p < engineCount; p++ { + engine := engines[p] + for i := int64(0); i < taskCount; /* incremented explicitly to skip empty polls */ { + result, err := engine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + if err != nil { + panic(err) + } + s.NotNil(result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + s.EqualValues(activityID, result.ActivityId) + s.EqualValues(activityType, result.ActivityType) + s.EqualValues(activityInput, result.Input) + s.EqualValues(workflowExecution, result.WorkflowExecution) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + ActivityId: activityID, + ActivityType: activityTypeName, + } + resultToken, err := engine.tokenSerializer.Deserialize(result.TaskToken) + if err != nil { + panic(err) + } + // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) + // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) + s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) + i++ + } + } + } + + for _, e := range engines { + e.Stop() + } + + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + totalTasks := taskCount * engineCount * iterations + persisted := s.taskManager.getCreateTaskCount(tlID) + // No sync matching as all messages are published first + s.EqualValues(totalTasks, persisted) + expectedRange := int64(initialRangeID + persisted/rangeSize) + if persisted%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) +} + +func (s *matchingEngineSuite) TestMultipleEnginesWorkflowTasksRangeStealing() { + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + const engineCount = 2 + const taskCount = 400 + const iterations = 2 + const initialRangeID = 0 + const rangeSize = 10 + var scheduledEventID int64 = 123 + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + s.taskManager.getTaskQueueManager(tlID).rangeID = initialRangeID + s.matchingEngine.config.RangeSize = rangeSize // override to low number for the test + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + engines := make([]*matchingEngineImpl, engineCount) + for p := 0; p < engineCount; p++ { + e := s.newMatchingEngine(defaultTestConfig(), s.taskManager) + e.config.RangeSize = rangeSize + engines[p] = e + e.Start() + } + + for j := 0; j < iterations; j++ { + for p := 0; p < engineCount; p++ { + engine := engines[p] + for i := int64(0); i < taskCount; i++ { + addRequest := matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(600), + } + + _, err := engine.AddWorkflowTask(context.Background(), &addRequest) + if err != nil { + if _, ok := err.(*persistence.ConditionFailedError); ok { + i-- // retry adding + } else { + panic(fmt.Sprintf("errType=%T, err=%v", err, err)) + } + } + } + } + } + workflowTypeName := "workflowType1" + workflowType := &commonpb.WorkflowType{Name: workflowTypeName} + + identity := "nobody" + var startedEventID int64 = 1412 + + startedTasks := make(map[int64]bool) + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordWorkflowTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordWorkflowTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordWorkflowTaskStartedResponse, error) { + if _, ok := startedTasks[taskRequest.TaskId]; ok { + s.logger.Debug("From error function Mock Received DUPLICATED RecordWorkflowTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) + return nil, serviceerrors.NewTaskAlreadyStarted("Workflow") + } + s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest", tag.TaskID(taskRequest.TaskId)) + s.logger.Debug("Mock Received RecordWorkflowTaskStartedRequest") + startedTasks[taskRequest.TaskId] = true + return &historyservice.RecordWorkflowTaskStartedResponse{ + PreviousStartedEventId: startedEventID, + StartedEventId: startedEventID, + ScheduledEventId: scheduledEventID, + WorkflowType: workflowType, + Attempt: 1, + }, nil + }).AnyTimes() + for j := 0; j < iterations; j++ { + for p := 0; p < engineCount; p++ { + engine := engines[p] + for i := int64(0); i < taskCount; /* incremented explicitly to skip empty polls */ { + result, err := engine.PollWorkflowTaskQueue(context.Background(), &matchingservice.PollWorkflowTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollWorkflowTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + if err != nil { + panic(err) + } + s.NotNil(result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + s.EqualValues(workflowExecution, result.WorkflowExecution) + s.EqualValues(workflowType, result.WorkflowType) + s.EqualValues(startedEventID, result.StartedEventId) + s.EqualValues(workflowExecution, result.WorkflowExecution) + taskToken := &tokenspb.Task{ + Attempt: 1, + NamespaceId: namespaceID.String(), + WorkflowId: workflowID, + RunId: runID, + ScheduledEventId: scheduledEventID, + StartedEventId: startedEventID, + } + resultToken, err := engine.tokenSerializer.Deserialize(result.TaskToken) + if err != nil { + panic(err) + } + + // taskToken, _ := s.matchingEngine.tokenSerializer.Serialize(token) + // s.EqualValues(taskToken, result.Task, fmt.Sprintf("%v!=%v", string(taskToken))) + s.EqualValues(taskToken, resultToken, fmt.Sprintf("%v!=%v", taskToken, resultToken)) + i++ + } + } + } + + for _, e := range engines { + e.Stop() + } + + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) + totalTasks := taskCount * engineCount * iterations + persisted := s.taskManager.getCreateTaskCount(tlID) + // No sync matching as all messages are published first + s.EqualValues(totalTasks, persisted) + expectedRange := int64(initialRangeID + persisted/rangeSize) + if persisted%rangeSize > 0 { + expectedRange++ + } + // Due to conflicts some ids are skipped and more real ranges are used. + s.True(expectedRange <= s.taskManager.getTaskQueueManager(tlID).rangeID) +} + +func (s *matchingEngineSuite) TestAddTaskAfterStartFailure() { + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + scheduledEventID := int64(0) + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + s.EqualValues(1, s.taskManager.getTaskCount(tlID)) + + ctx, err := s.matchingEngine.getTask(context.Background(), tlID, normalStickyInfo, &pollMetadata{}) + s.NoError(err) + + ctx.finish(errors.New("test error")) + s.EqualValues(1, s.taskManager.getTaskCount(tlID)) + ctx2, err := s.matchingEngine.getTask(context.Background(), tlID, normalStickyInfo, &pollMetadata{}) + s.NoError(err) + + s.NotEqual(ctx.event.GetTaskId(), ctx2.event.GetTaskId()) + s.Equal(ctx.event.Data.GetWorkflowId(), ctx2.event.Data.GetWorkflowId()) + s.Equal(ctx.event.Data.GetRunId(), ctx2.event.Data.GetRunId()) + s.Equal(ctx.event.Data.GetScheduledEventId(), ctx2.event.Data.GetScheduledEventId()) + + ctx2.finish(nil) + s.EqualValues(0, s.taskManager.getTaskCount(tlID)) +} + +func (s *matchingEngineSuite) TestTaskQueueManagerGetTaskBatch() { + runID := uuid.NewRandom().String() + workflowID := "workflow1" + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + const taskCount = 1200 + const rangeSize = 10 + s.matchingEngine.config.RangeSize = rangeSize + + // add taskCount tasks + for i := int64(0); i < taskCount; i++ { + scheduledEventID := i * 3 + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + } + + tlMgr, ok := s.matchingEngine.taskQueues[*tlID].(*taskQueueManagerImpl) + s.True(ok, "taskQueueManger doesn't implement taskQueueManager interface") + s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) + + // wait until all tasks are read by the task pump and enqueued into the in-memory buffer + // at the end of this step, ackManager readLevel will also be equal to the buffer size + expectedBufSize := util.Min(cap(tlMgr.taskReader.taskBuffer), taskCount) + s.True(s.awaitCondition(func() bool { return len(tlMgr.taskReader.taskBuffer) == expectedBufSize }, time.Second)) + + // stop all goroutines that read / write tasks in the background + // remainder of this test works with the in-memory buffer + tlMgr.Stop() + + // setReadLevel should NEVER be called without updating ackManager.outstandingTasks + // This is only for unit test purpose + tlMgr.taskAckManager.setReadLevel(tlMgr.taskWriter.GetMaxReadLevel()) + batch, err := tlMgr.taskReader.getTaskBatch(context.Background()) + s.Nil(err) + s.EqualValues(0, len(batch.tasks)) + s.EqualValues(tlMgr.taskWriter.GetMaxReadLevel(), batch.readLevel) + s.True(batch.isReadBatchDone) + + tlMgr.taskAckManager.setReadLevel(0) + batch, err = tlMgr.taskReader.getTaskBatch(context.Background()) + s.Nil(err) + s.EqualValues(rangeSize, len(batch.tasks)) + s.EqualValues(rangeSize, batch.readLevel) + s.True(batch.isReadBatchDone) + + s.setupRecordActivityTaskStartedMock(tl) + + // reset the ackManager readLevel to the buffer size and consume + // the in-memory tasks by calling Poll API - assert ackMgr state + // at the end + tlMgr.taskAckManager.setReadLevel(int64(expectedBufSize)) + + // complete rangeSize events + for i := int64(0); i < rangeSize; i++ { + identity := "nobody" + result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + TaskQueue: taskQueue, + Identity: identity, + }, + }, metrics.NoopMetricsHandler) + + s.NoError(err) + s.NotNil(result) + s.NotEqual(emptyPollActivityTaskQueueResponse, result) + if len(result.TaskToken) == 0 { + s.logger.Debug("empty poll returned") + continue + } + } + s.EqualValues(taskCount-rangeSize, s.taskManager.getTaskCount(tlID)) + batch, err = tlMgr.taskReader.getTaskBatch(context.Background()) + s.Nil(err) + s.True(0 < len(batch.tasks) && len(batch.tasks) <= rangeSize) + s.True(batch.isReadBatchDone) +} + +func (s *matchingEngineSuite) TestTaskQueueManagerGetTaskBatch_ReadBatchDone() { + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + + const rangeSize = 10 + const maxReadLevel = int64(120) + config := defaultTestConfig() + config.RangeSize = rangeSize + tlMgr0, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, config) + s.NoError(err) + + tlMgr, ok := tlMgr0.(*taskQueueManagerImpl) + s.True(ok) + + tlMgr.Start() + + // tlMgr.taskWriter startup is async so give it time to complete, otherwise + // the following few lines get clobbered as part of the taskWriter.Start() + time.Sleep(100 * time.Millisecond) + + tlMgr.taskAckManager.setReadLevel(0) + atomic.StoreInt64(&tlMgr.taskWriter.maxReadLevel, maxReadLevel) + batch, err := tlMgr.taskReader.getTaskBatch(context.Background()) + s.Empty(batch.tasks) + s.Equal(int64(rangeSize*10), batch.readLevel) + s.False(batch.isReadBatchDone) + s.NoError(err) + + tlMgr.taskAckManager.setReadLevel(batch.readLevel) + batch, err = tlMgr.taskReader.getTaskBatch(context.Background()) + s.Empty(batch.tasks) + s.Equal(maxReadLevel, batch.readLevel) + s.True(batch.isReadBatchDone) + s.NoError(err) +} + +func (s *matchingEngineSuite) TestTaskQueueManager_CyclingBehavior() { + namespaceID := namespace.ID(uuid.New()) + tl := "makeToast" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + config := defaultTestConfig() + + for i := 0; i < 4; i++ { + prevGetTasksCount := s.taskManager.getGetTasksCount(tlID) + + tlMgr, err := newTaskQueueManager(s.matchingEngine, tlID, normalStickyInfo, config) + s.NoError(err) + + tlMgr.Start() + // tlMgr.taskWriter startup is async so give it time to complete + time.Sleep(100 * time.Millisecond) + tlMgr.Stop() + + getTasksCount := s.taskManager.getGetTasksCount(tlID) - prevGetTasksCount + s.LessOrEqual(getTasksCount, 1) + } +} + +func (s *matchingEngineSuite) TestTaskExpiryAndCompletion() { + runID := uuid.NewRandom().String() + workflowID := uuid.New() + workflowExecution := &commonpb.WorkflowExecution{RunId: runID, WorkflowId: workflowID} + + namespaceID := namespace.ID(uuid.New()) + tl := "task-expiry-completion-tl0" + tlID := newTestTaskQueueID(namespaceID, tl, enumspb.TASK_QUEUE_TYPE_ACTIVITY) + + taskQueue := &taskqueuepb.TaskQueue{ + Name: tl, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + + const taskCount = 20 // must be multiple of 4 + const rangeSize = 10 + s.matchingEngine.config.RangeSize = rangeSize + s.matchingEngine.config.MaxTaskDeleteBatchSize = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(2) + + testCases := []struct { + maxTimeBtwnDeletes time.Duration + }{ + {time.Minute}, // test taskGC deleting due to size threshold + {time.Nanosecond}, // test taskGC deleting due to time condition + } + + for _, tc := range testCases { + for i := int64(0); i < taskCount; i++ { + scheduledEventID := i * 3 + addRequest := matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID.String(), + Execution: workflowExecution, + ScheduledEventId: scheduledEventID, + TaskQueue: taskQueue, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + } + switch i % 4 { + case 0: + // simulates creating a task whose scheduledToStartTimeout is already expired + addRequest.ScheduleToStartTimeout = timestamp.DurationFromSeconds(-5) + case 2: + // simulates creating a task which will time out in the buffer + addRequest.ScheduleToStartTimeout = timestamp.DurationPtr(250 * time.Millisecond) + } + _, err := s.matchingEngine.AddActivityTask(context.Background(), &addRequest) + s.NoError(err) + } + + tlMgr, ok := s.matchingEngine.taskQueues[*tlID].(*taskQueueManagerImpl) + s.True(ok, "failed to load task queue") + s.EqualValues(taskCount, s.taskManager.getTaskCount(tlID)) + + // wait until all tasks are loaded by into in-memory buffers by task queue manager + // the buffer size should be one less than expected because dispatcher will dequeue the head + // 1/4 should be thrown out because they are expired before they hit the buffer + s.True(s.awaitCondition(func() bool { return len(tlMgr.taskReader.taskBuffer) >= (3*taskCount/4 - 1) }, time.Second)) + + // ensure the 1/4 of tasks with small ScheduleToStartTimeout will be expired when they come out of the buffer + time.Sleep(300 * time.Millisecond) + + maxTimeBetweenTaskDeletes = tc.maxTimeBtwnDeletes + + s.setupRecordActivityTaskStartedMock(tl) + + pollReq := &matchingservice.PollActivityTaskQueueRequest{ + NamespaceId: namespaceID.String(), + PollRequest: &workflowservice.PollActivityTaskQueueRequest{TaskQueue: taskQueue, Identity: "test"}, + } + + remaining := taskCount + for i := 0; i < 2; i++ { + // verify that (1) expired tasks are not returned in poll result (2) taskCleaner deletes tasks correctly + for i := int64(0); i < taskCount/4; i++ { + result, err := s.matchingEngine.PollActivityTaskQueue(context.Background(), pollReq, metrics.NoopMetricsHandler) + s.NoError(err) + s.NotNil(result) + s.NotEqual(result, emptyPollActivityTaskQueueResponse) + } + remaining -= taskCount / 2 + // since every other task is expired, we expect half the tasks to be deleted + // after poll consumed 1/4th of what is available. + // however, the gc is best-effort and might not run exactly when we want it to. + // various thread interleavings between the two task reader threads and this one + // might leave the gc behind by up to 3 tasks, or ahead by up to 1. + delta := remaining - s.taskManager.getTaskCount(tlID) + s.Truef(-3 <= delta && delta <= 1, "remaining %d, getTaskCount %d", remaining, s.taskManager.getTaskCount(tlID)) + } + // ensure full gc for the next case (twice in case one doesn't get the gc lock) + tlMgr.taskGC.RunNow(context.Background(), tlMgr.taskAckManager.getAckLevel()) + tlMgr.taskGC.RunNow(context.Background(), tlMgr.taskAckManager.getAckLevel()) + } +} + +func (s *matchingEngineSuite) TestGetVersioningData() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + // Ensure we can fetch without first needing to set anything + res, err := s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + MaxSets: 0, + }, + }) + s.NoError(err) + s.NotNil(res) + + // Set a long list of versions + for i := 0; i < 10; i++ { + id := fmt.Sprintf("%d", i) + res, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ + ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ + Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ + AddNewBuildIdInNewDefaultSet: id, + }, + }, + }, + }, + }) + s.NoError(err) + s.NotNil(res) + } + // Make a long compat-versions chain + for i := 0; i < 80; i++ { + id := fmt.Sprintf("9.%d", i) + prevCompat := fmt.Sprintf("9.%d", i-1) + if i == 0 { + prevCompat = "9" + } + res, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ + ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ + Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleBuildId{ + AddNewCompatibleBuildId: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleVersion{ + NewBuildId: id, + ExistingCompatibleBuildId: prevCompat, + MakeSetDefault: false, + }, + }, + }, + }, + }, + }) + s.NoError(err) + s.NotNil(res) + } + + // Ensure they all exist + res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + MaxSets: 0, + }, + }) + s.NoError(err) + majorSets := res.GetResponse().GetMajorVersionSets() + curDefault := majorSets[len(majorSets)-1] + s.NotNil(curDefault) + s.Equal("9", curDefault.GetBuildIds()[0]) + lastNode := curDefault.GetBuildIds()[len(curDefault.GetBuildIds())-1] + s.Equal("9.79", lastNode) + s.Equal("0", majorSets[0].GetBuildIds()[0]) + + // Ensure depth limiting works + res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + MaxSets: 1, + }, + }) + s.NoError(err) + majorSets = res.GetResponse().GetMajorVersionSets() + curDefault = majorSets[len(majorSets)-1] + s.Equal("9", curDefault.GetBuildIds()[0]) + lastNode = curDefault.GetBuildIds()[len(curDefault.GetBuildIds())-1] + s.Equal("9.79", lastNode) + s.Equal(1, len(majorSets)) + + res, err = s.matchingEngine.GetWorkerBuildIdCompatibility(context.Background(), &matchingservice.GetWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + Request: &workflowservice.GetWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + MaxSets: 5, + }, + }) + s.NoError(err) + majorSets = res.GetResponse().GetMajorVersionSets() + s.Equal("5", majorSets[0].GetBuildIds()[0]) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_NoData() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + }) + s.NoError(err) + s.False(res.TaskQueueHasUserData) + s.Nil(res.UserData.GetData()) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_ReturnsData() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + userData := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, + } + s.NoError(s.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: tq, + UserData: userData, + })) + userData.Version++ + + res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + }) + s.NoError(err) + s.True(res.TaskQueueHasUserData) + s.Equal(res.UserData, userData) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_ReturnsEmpty() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + userData := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, + } + s.NoError(s.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: tq, + UserData: userData, + })) + userData.Version++ + + res, err := s.matchingEngine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: userData.Version, + }) + s.NoError(err) + s.True(res.TaskQueueHasUserData) + s.Nil(res.UserData.GetData()) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_Expires() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + userData := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, + } + s.NoError(s.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: tq, + UserData: userData, + })) + userData.Version++ + + // GetTaskQueueUserData will try to return 5s with a min of 1s before the deadline, so this will block 1s + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + start := time.Now() + res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: userData.Version, + WaitNewData: true, + }) + s.NoError(err) + s.True(res.TaskQueueHasUserData) + s.Nil(res.UserData.GetData()) + elapsed := time.Since(start) + s.Greater(elapsed, 900*time.Millisecond) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_WakesUp_FromNothing() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + go func() { + time.Sleep(200 * time.Millisecond) + + _, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ + ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ + Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ + AddNewBuildIdInNewDefaultSet: "v1", + }, + }, + }, + }, + }) + s.NoError(err) + }() + + res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, // must be zero to start + WaitNewData: true, + }) + s.NoError(err) + s.True(res.TaskQueueHasUserData) + s.NotNil(res.UserData.Data.VersioningData) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_WakesUp_From2to3() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + userData := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, + } + s.NoError(s.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: tq, + UserData: userData, + })) + userData.Version++ + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + go func() { + time.Sleep(200 * time.Millisecond) + + _, err := s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_{ + ApplyPublicRequest: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{ + Request: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest{ + Namespace: namespaceID.String(), + TaskQueue: tq, + Operation: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewBuildIdInNewDefaultSet{ + AddNewBuildIdInNewDefaultSet: "v1", + }, + }, + }, + }, + }) + s.NoError(err) + }() + + res, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: userData.Version, + WaitNewData: true, + }) + s.NoError(err) + s.True(res.TaskQueueHasUserData) + s.True(hybrid_logical_clock.Greater(*res.UserData.Data.Clock, *userData.Data.Clock)) + s.NotNil(res.UserData.Data.VersioningData) +} + +func (s *matchingEngineSuite) TestGetTaskQueueUserData_LongPoll_Closes() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + go func() { + time.Sleep(200 * time.Millisecond) + _, _ = s.matchingEngine.ForceUnloadTaskQueue(context.Background(), &matchingservice.ForceUnloadTaskQueueRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + }) + }() + + _, err := s.matchingEngine.GetTaskQueueUserData(ctx, &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + WaitNewData: true, + }) + s.ErrorAs(err, new(*serviceerror.Unavailable)) + +} + +func (s *matchingEngineSuite) TestUpdateUserData_FailsOnKnownVersionMismatch() { + namespaceID := namespace.ID(uuid.New()) + tq := "tupac" + + userData := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: &persistencespb.TaskQueueUserData{Clock: &clockspb.HybridLogicalClock{WallClock: 123456}}, + } + err := s.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceID.String(), + TaskQueue: tq, + UserData: userData, + }) + s.NoError(err) + + _, err = s.matchingEngine.UpdateWorkerBuildIdCompatibility(context.Background(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceID.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_{ + RemoveBuildIds: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds{ + KnownUserDataVersion: 1, + }, + }, + }) + var failedPreconditionError *serviceerror.FailedPrecondition + s.ErrorAs(err, &failedPreconditionError) +} + +func (s *matchingEngineSuite) TestAddWorkflowTask_ForVersionedWorkflows_SilentlyDroppedWhenDisablingLoadingUserData() { + namespaceId := uuid.New() + tq := taskqueuepb.TaskQueue{ + Name: "test", + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) + + _, err := s.matchingEngine.AddWorkflowTask(context.Background(), &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceId, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "test", + RunId: uuid.New(), + }, + TaskQueue: &tq, + ScheduledEventId: 7, + Source: enums.TASK_SOURCE_HISTORY, + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, + }, + }) + s.Require().NoError(err) +} + +func (s *matchingEngineSuite) TestAddActivityTask_ForVersionedWorkflows_SilentlyDroppedWhenDisablingLoadingUserData() { + namespaceId := uuid.New() + tq := taskqueuepb.TaskQueue{ + Name: "test", + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + } + s.matchingEngine.config.LoadUserData = dynamicconfig.GetBoolPropertyFnFilteredByTaskQueueInfo(false) + + _, err := s.matchingEngine.AddActivityTask(context.Background(), &matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceId, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: "test", + RunId: uuid.New(), + }, + TaskQueue: &tq, + ScheduledEventId: 7, + Source: enums.TASK_SOURCE_HISTORY, + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, + }, + }) + s.Require().NoError(err) +} + +func (s *matchingEngineSuite) TestUnknownBuildId_Poll() { + namespaceId := namespace.ID(uuid.New()) + tq := "makeToast" + tqId := newTestTaskQueueID(namespaceId, tq, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + + scope := tally.NewTestScope("test", nil) + s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + + _, err := s.matchingEngine.getTask(ctx, tqId, normalStickyInfo, &pollMetadata{ + workerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ + BuildId: "unknown", + UseVersioning: true, + }, + }) + s.Error(err) // deadline exceeded or canceled + + unknownCtr := scope.Snapshot().Counters()["test.unknown_build_polls+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,task_type=Workflow,taskqueue=makeToast"] + s.Equal(int64(1), unknownCtr.Value()) +} + +func (s *matchingEngineSuite) TestUnknownBuildId_Add() { + namespaceId := namespace.ID(uuid.New()) + tq := "makeToast" + + scope := tally.NewTestScope("test", nil) + s.matchingEngine.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + + s.mockMatchingClient.EXPECT().UpdateWorkerBuildIdCompatibility(gomock.Any(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceId.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{ + PersistUnknownBuildId: "unknown", + }, + }).Return(&matchingservice.UpdateWorkerBuildIdCompatibilityResponse{}, nil) + + _, err := s.matchingEngine.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceId.String(), + Execution: &commonpb.WorkflowExecution{RunId: "run", WorkflowId: "wf"}, + ScheduledEventId: 0, + TaskQueue: &taskqueuepb.TaskQueue{Name: tq, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + ForwardedSource: "somewhere", // force sync match only + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_BuildId{ + BuildId: "unknown", + }, + }, + }) + s.ErrorIs(err, errRemoteSyncMatchFailed) + + unknownCtr := scope.Snapshot().Counters()["test.unknown_build_tasks+namespace="+matchingTestNamespace+",operation=TaskQueueMgr,task_type=Workflow,taskqueue=makeToast"] + s.Equal(int64(1), unknownCtr.Value()) +} + +func (s *matchingEngineSuite) TestUnknownBuildId_Match() { + namespaceId := namespace.ID(uuid.New()) + tq := "makeToast" + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + s.mockMatchingClient.EXPECT().UpdateWorkerBuildIdCompatibility(gomock.Any(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceId.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{ + PersistUnknownBuildId: "unknown", + }, + }).Return(&matchingservice.UpdateWorkerBuildIdCompatibilityResponse{}, nil).AnyTimes() // might get called again on dispatch from spooled + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + _, err := s.matchingEngine.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceId.String(), + Execution: &commonpb.WorkflowExecution{RunId: "run", WorkflowId: "wf"}, + ScheduledEventId: 123, + TaskQueue: &taskqueuepb.TaskQueue{Name: tq, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + ScheduleToStartTimeout: timestamp.DurationFromSeconds(100), + // do not set ForwardedSource, allow to go to db + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_BuildId{ + BuildId: "unknown", + }, + }, + }) + s.NoError(err) + wg.Done() + }() + + go func() { + tqId := newTestTaskQueueID(namespaceId, tq, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + task, err := s.matchingEngine.getTask(ctx, tqId, normalStickyInfo, &pollMetadata{ + workerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ + BuildId: "unknown", + UseVersioning: true, + }, + }) + s.NoError(err) + s.Equal("wf", task.event.Data.WorkflowId) + s.Equal(int64(123), task.event.Data.ScheduledEventId) + task.finish(nil) + wg.Done() + }() + + wg.Wait() +} + +func (s *matchingEngineSuite) TestUnknownBuildId_Demoted_Match() { + namespaceId := namespace.ID(uuid.New()) + tq := "makeToast" + unknown := "unknown" + build1 := "build1" + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + s.mockMatchingClient.EXPECT().UpdateWorkerBuildIdCompatibility(gomock.Any(), &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: namespaceId.String(), + TaskQueue: tq, + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{ + PersistUnknownBuildId: unknown, + }, + }).Return(&matchingservice.UpdateWorkerBuildIdCompatibilityResponse{}, nil).AnyTimes() // might get called again on dispatch from spooled + + // add a task for an unknown build id, will get redirected to guessed set + _, err := s.matchingEngine.AddWorkflowTask(ctx, &matchingservice.AddWorkflowTaskRequest{ + NamespaceId: namespaceId.String(), + Execution: &commonpb.WorkflowExecution{RunId: "run", WorkflowId: "wf"}, + ScheduledEventId: 123, + TaskQueue: &taskqueuepb.TaskQueue{Name: tq, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_BuildId{ + BuildId: unknown, + }, + }, + }) + s.NoError(err) + // allow taskReader to finish starting dispatch loop so we don't get an extra load + time.Sleep(10 * time.Millisecond) + + // unload base and versioned tqm. note: unload versioned first since versioned taskReader + // tries to load base for dispatching. + id := newTestTaskQueueID(namespaceId, tq, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + verId := newTaskQueueIDWithVersionSet(id, hashBuildId(unknown)) + verTqm, err := s.matchingEngine.getTaskQueueManager(ctx, verId, normalStickyInfo, false) + s.NoError(err) + s.NotNil(verTqm) + s.matchingEngine.unloadTaskQueue(verTqm) + // allow taskReader goroutines time to exit + time.Sleep(10 * time.Millisecond) + + // unload base + baseTqm, err := s.matchingEngine.getTaskQueueManager(ctx, id, normalStickyInfo, false) + s.NoError(err) + s.NotNil(baseTqm) + s.matchingEngine.unloadTaskQueue(baseTqm) + // allow taskReader goroutines time to exit + time.Sleep(10 * time.Millisecond) + + // both are now unloaded. change versioning data to merge unknown into another set. + clock := hlc.Zero(1) + userData := &persistencespb.TaskQueueUserData{ + Clock: &clock, + VersioningData: &persistencespb.VersioningData{ + VersionSets: []*persistencespb.CompatibleVersionSet{ + { + // make "unknown" the demoted one to test demoted set loading. + // it works the other way too but doesn't test anything new. + SetIds: []string{hashBuildId(build1), hashBuildId(unknown)}, + BuildIds: []*persistencespb.BuildId{ + mkBuildId(unknown, clock), + mkBuildId(build1, clock), + }, + BecameDefaultTimestamp: &clock, + }, + }, + }, + } + err = s.taskManager.UpdateTaskQueueUserData(ctx, &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: namespaceId.String(), + TaskQueue: tq, + UserData: &persistencespb.VersionedTaskQueueUserData{ + Data: userData, + Version: 34, + }, + }) + s.NoError(err) + + // now poll for the task + task, err := s.matchingEngine.getTask(ctx, id, normalStickyInfo, &pollMetadata{ + workerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ + BuildId: build1, + UseVersioning: true, + }, + }) + s.Require().NoError(err) + s.Equal("wf", task.event.Data.WorkflowId) + s.Equal(int64(123), task.event.Data.ScheduledEventId) + task.finish(nil) +} + +func (s *matchingEngineSuite) setupRecordActivityTaskStartedMock(tlName string) { + activityTypeName := "activity1" + activityID := "activityId1" + activityType := &commonpb.ActivityType{Name: activityTypeName} + activityInput := payloads.EncodeString("Activity1 Input") + + // History service is using mock + s.mockHistoryClient.EXPECT().RecordActivityTaskStarted(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, taskRequest *historyservice.RecordActivityTaskStartedRequest, arg2 ...interface{}) (*historyservice.RecordActivityTaskStartedResponse, error) { + s.logger.Debug("Mock Received RecordActivityTaskStartedRequest") + return &historyservice.RecordActivityTaskStartedResponse{ + Attempt: 1, + ScheduledEvent: newActivityTaskScheduledEvent(taskRequest.ScheduledEventId, 0, + &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: activityID, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tlName, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + ActivityType: activityType, + Input: activityInput, + ScheduleToCloseTimeout: timestamp.DurationPtr(100 * time.Second), + ScheduleToStartTimeout: timestamp.DurationPtr(50 * time.Second), + StartToCloseTimeout: timestamp.DurationPtr(50 * time.Second), + HeartbeatTimeout: timestamp.DurationPtr(10 * time.Second), + }), + }, nil + }).AnyTimes() +} + +func (s *matchingEngineSuite) awaitCondition(cond func() bool, timeout time.Duration) bool { + expiry := time.Now().UTC().Add(timeout) + for !cond() { + time.Sleep(time.Millisecond * 5) + if time.Now().UTC().After(expiry) { + return false + } + } + return true +} + +func newActivityTaskScheduledEvent(eventID int64, workflowTaskCompletedEventID int64, + scheduleAttributes *commandpb.ScheduleActivityTaskCommandAttributes, +) *historypb.HistoryEvent { + historyEvent := newHistoryEvent(eventID, enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED) + historyEvent.Attributes = &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ + ActivityId: scheduleAttributes.ActivityId, + ActivityType: scheduleAttributes.ActivityType, + TaskQueue: scheduleAttributes.TaskQueue, + Input: scheduleAttributes.Input, + Header: scheduleAttributes.Header, + ScheduleToCloseTimeout: scheduleAttributes.ScheduleToCloseTimeout, + ScheduleToStartTimeout: scheduleAttributes.ScheduleToStartTimeout, + StartToCloseTimeout: scheduleAttributes.StartToCloseTimeout, + HeartbeatTimeout: scheduleAttributes.HeartbeatTimeout, + WorkflowTaskCompletedEventId: workflowTaskCompletedEventID, + }} + return historyEvent +} + +func newHistoryEvent(eventID int64, eventType enumspb.EventType) *historypb.HistoryEvent { + historyEvent := &historypb.HistoryEvent{ + EventId: eventID, + EventTime: timestamp.TimePtr(time.Now().UTC()), + EventType: eventType, + } + + return historyEvent +} + +var _ persistence.TaskManager = (*testTaskManager)(nil) // Asserts that interface is indeed implemented + +type testTaskManager struct { + sync.Mutex + taskQueues map[taskQueueID]*testTaskQueueManager + logger log.Logger +} + +func newTestTaskManager(logger log.Logger) *testTaskManager { + return &testTaskManager{taskQueues: make(map[taskQueueID]*testTaskQueueManager), logger: logger} +} + +func (m *testTaskManager) GetName() string { + return "test" +} + +func (m *testTaskManager) Close() { +} + +func (m *testTaskManager) getTaskQueueManager(id *taskQueueID) *testTaskQueueManager { + m.Lock() + defer m.Unlock() + result, ok := m.taskQueues[*id] + if ok { + return result + } + result = newTestTaskQueueManager() + m.taskQueues[*id] = result + return result +} + +type testTaskQueueManager struct { + sync.Mutex + rangeID int64 + ackLevel int64 + createTaskCount int + getTasksCount int + getUserDataCount int + updateCount int + tasks *treemap.Map + userData *persistencespb.VersionedTaskQueueUserData +} + +func (m *testTaskQueueManager) RangeID() int64 { + m.Lock() + defer m.Unlock() + return m.rangeID +} + +func Int64Comparator(a, b interface{}) int { + aAsserted := a.(int64) + bAsserted := b.(int64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +func newTestTaskQueueManager() *testTaskQueueManager { + return &testTaskQueueManager{tasks: treemap.NewWith(Int64Comparator)} +} + +func newTestTaskQueueID(namespaceID namespace.ID, name string, taskType enumspb.TaskQueueType) *taskQueueID { + result, err := newTaskQueueID(namespaceID, name, taskType) + if err != nil { + panic(fmt.Sprintf("newTaskQueueID failed with error %v", err)) + } + return result +} + +func (m *testTaskManager) CreateTaskQueue( + _ context.Context, + request *persistence.CreateTaskQueueRequest, +) (*persistence.CreateTaskQueueResponse, error) { + tli := request.TaskQueueInfo + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.GetNamespaceId()), tli.Name, tli.TaskType)) + tlm.Lock() + defer tlm.Unlock() + + if tlm.rangeID != 0 { + return nil, &persistence.ConditionFailedError{ + Msg: fmt.Sprintf("Failed to create task queue: name=%v, type=%v", tli.Name, tli.TaskType), + } + } + + tlm.rangeID = request.RangeID + tlm.ackLevel = tli.AckLevel + return &persistence.CreateTaskQueueResponse{}, nil +} + +// UpdateTaskQueue provides a mock function with given fields: request +func (m *testTaskManager) UpdateTaskQueue( + _ context.Context, + request *persistence.UpdateTaskQueueRequest, +) (*persistence.UpdateTaskQueueResponse, error) { + tli := request.TaskQueueInfo + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.GetNamespaceId()), tli.Name, tli.TaskType)) + tlm.Lock() + defer tlm.Unlock() + tlm.updateCount++ + + if tlm.rangeID != request.PrevRangeID { + return nil, &persistence.ConditionFailedError{ + Msg: fmt.Sprintf("Failed to update task queue: name=%v, type=%v", tli.Name, tli.TaskType), + } + } + tlm.ackLevel = tli.AckLevel + tlm.rangeID = request.RangeID + return &persistence.UpdateTaskQueueResponse{}, nil +} + +func (m *testTaskManager) GetTaskQueue( + _ context.Context, + request *persistence.GetTaskQueueRequest, +) (*persistence.GetTaskQueueResponse, error) { + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, request.TaskType)) + tlm.Lock() + defer tlm.Unlock() + + if tlm.rangeID == 0 { + return nil, serviceerror.NewNotFound("task queue not found") + } + return &persistence.GetTaskQueueResponse{ + TaskQueueInfo: &persistencespb.TaskQueueInfo{ + NamespaceId: request.NamespaceID, + Name: request.TaskQueue, + TaskType: request.TaskType, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + AckLevel: tlm.ackLevel, + ExpiryTime: nil, + LastUpdateTime: timestamp.TimeNowPtrUtc(), + }, + RangeID: tlm.rangeID, + }, nil +} + +// CompleteTask provides a mock function with given fields: request +func (m *testTaskManager) CompleteTask( + _ context.Context, + request *persistence.CompleteTaskRequest, +) error { + m.logger.Debug("CompleteTask", tag.TaskID(request.TaskID), tag.Name(request.TaskQueue.TaskQueueName), tag.WorkflowTaskQueueType(request.TaskQueue.TaskQueueType)) + if request.TaskID <= 0 { + panic(fmt.Errorf("invalid taskID=%v", request.TaskID)) + } + + tli := request.TaskQueue + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(tli.NamespaceID), tli.TaskQueueName, tli.TaskQueueType)) + + tlm.Lock() + defer tlm.Unlock() + + tlm.tasks.Remove(request.TaskID) + return nil +} + +func (m *testTaskManager) CompleteTasksLessThan( + _ context.Context, + request *persistence.CompleteTasksLessThanRequest, +) (int, error) { + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueueName, request.TaskType)) + tlm.Lock() + defer tlm.Unlock() + keys := tlm.tasks.Keys() + for _, key := range keys { + id := key.(int64) + if id < request.ExclusiveMaxTaskID { + tlm.tasks.Remove(id) + } + } + return persistence.UnknownNumRowsAffected, nil +} + +func (m *testTaskManager) ListTaskQueue( + _ context.Context, + _ *persistence.ListTaskQueueRequest, +) (*persistence.ListTaskQueueResponse, error) { + return nil, fmt.Errorf("unsupported operation") +} + +func (m *testTaskManager) DeleteTaskQueue( + _ context.Context, + request *persistence.DeleteTaskQueueRequest, +) error { + m.Lock() + defer m.Unlock() + key := newTestTaskQueueID(namespace.ID(request.TaskQueue.NamespaceID), request.TaskQueue.TaskQueueName, request.TaskQueue.TaskQueueType) + delete(m.taskQueues, *key) + return nil +} + +// CreateTask provides a mock function with given fields: request +func (m *testTaskManager) CreateTasks( + _ context.Context, + request *persistence.CreateTasksRequest, +) (*persistence.CreateTasksResponse, error) { + namespaceID := namespace.ID(request.TaskQueueInfo.Data.GetNamespaceId()) + taskQueue := request.TaskQueueInfo.Data.Name + taskType := request.TaskQueueInfo.Data.TaskType + rangeID := request.TaskQueueInfo.RangeID + + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespaceID, taskQueue, taskType)) + tlm.Lock() + defer tlm.Unlock() + + // First validate the entire batch + for _, task := range request.Tasks { + m.logger.Debug("testTaskManager.CreateTask", tag.TaskID(task.GetTaskId()), tag.ShardRangeID(rangeID)) + if task.GetTaskId() <= 0 { + panic(fmt.Errorf("invalid taskID=%v", task.GetTaskId())) + } + + if tlm.rangeID != rangeID { + m.logger.Debug("testTaskManager.CreateTask ConditionFailedError", + tag.TaskID(task.GetTaskId()), tag.ShardRangeID(rangeID), tag.ShardRangeID(tlm.rangeID)) + + return nil, &persistence.ConditionFailedError{ + Msg: fmt.Sprintf("testTaskManager.CreateTask failed. TaskQueue: %v, taskQueueType: %v, rangeID: %v, db rangeID: %v", + taskQueue, taskType, rangeID, tlm.rangeID), + } + } + _, ok := tlm.tasks.Get(task.GetTaskId()) + if ok { + panic(fmt.Sprintf("Duplicated TaskID %v", task.GetTaskId())) + } + } + + // Then insert all tasks if no errors + for _, task := range request.Tasks { + tlm.tasks.Put(task.GetTaskId(), &persistencespb.AllocatedTaskInfo{ + Data: task.Data, + TaskId: task.GetTaskId(), + }) + tlm.createTaskCount++ + } + + return &persistence.CreateTasksResponse{}, nil +} + +// GetTasks provides a mock function with given fields: request +func (m *testTaskManager) GetTasks( + _ context.Context, + request *persistence.GetTasksRequest, +) (*persistence.GetTasksResponse, error) { + m.logger.Debug("testTaskManager.GetTasks", tag.MinLevel(request.InclusiveMinTaskID), tag.MaxLevel(request.ExclusiveMaxTaskID)) + + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, request.TaskType)) + tlm.Lock() + defer tlm.Unlock() + var tasks []*persistencespb.AllocatedTaskInfo + + it := tlm.tasks.Iterator() + for it.Next() { + taskID := it.Key().(int64) + if taskID < request.InclusiveMinTaskID { + continue + } + if taskID >= request.ExclusiveMaxTaskID { + break + } + tasks = append(tasks, it.Value().(*persistencespb.AllocatedTaskInfo)) + } + tlm.getTasksCount++ + return &persistence.GetTasksResponse{ + Tasks: tasks, + }, nil +} + +// getTaskCount returns number of tasks in a task queue +func (m *testTaskManager) getTaskCount(taskQueue *taskQueueID) int { + tlm := m.getTaskQueueManager(taskQueue) + tlm.Lock() + defer tlm.Unlock() + return tlm.tasks.Size() +} + +// getCreateTaskCount returns how many times CreateTask was called +func (m *testTaskManager) getCreateTaskCount(taskQueue *taskQueueID) int { + tlm := m.getTaskQueueManager(taskQueue) + tlm.Lock() + defer tlm.Unlock() + return tlm.createTaskCount +} + +// getGetTasksCount returns how many times GetTasks was called +func (m *testTaskManager) getGetTasksCount(taskQueue *taskQueueID) int { + tlm := m.getTaskQueueManager(taskQueue) + tlm.Lock() + defer tlm.Unlock() + return tlm.getTasksCount +} + +// getGetUserDataCount returns how many times GetUserData was called +func (m *testTaskManager) getGetUserDataCount(taskQueue *taskQueueID) int { + tlm := m.getTaskQueueManager(taskQueue) + tlm.Lock() + defer tlm.Unlock() + return tlm.getUserDataCount +} + +// getUpdateCount returns how many times UpdateTaskQueue was called +func (m *testTaskManager) getUpdateCount(taskQueue *taskQueueID) int { + tlm := m.getTaskQueueManager(taskQueue) + tlm.Lock() + defer tlm.Unlock() + return tlm.updateCount +} + +func (m *testTaskManager) String() string { + m.Lock() + defer m.Unlock() + var result string + for id, tl := range m.taskQueues { + tl.Lock() + if id.taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { + result += "Activity" + } else { + result += "Workflow" + } + result += " task queue " + id.FullName() + result += "\n" + result += fmt.Sprintf("AckLevel=%v\n", tl.ackLevel) + result += fmt.Sprintf("CreateTaskCount=%v\n", tl.createTaskCount) + result += fmt.Sprintf("RangeID=%v\n", tl.rangeID) + result += "Tasks=\n" + for _, t := range tl.tasks.Values() { + result += fmt.Sprintf("%v\n", t) + } + tl.Unlock() + } + return result +} + +// GetTaskQueueData implements persistence.TaskManager +func (m *testTaskManager) GetTaskQueueUserData(_ context.Context, request *persistence.GetTaskQueueUserDataRequest) (*persistence.GetTaskQueueUserDataResponse, error) { + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, enumspb.TASK_QUEUE_TYPE_WORKFLOW)) + tlm.Lock() + defer tlm.Unlock() + tlm.getUserDataCount++ + return &persistence.GetTaskQueueUserDataResponse{ + UserData: tlm.userData, + }, nil +} + +// UpdateTaskQueueUserData implements persistence.TaskManager +func (m *testTaskManager) UpdateTaskQueueUserData(_ context.Context, request *persistence.UpdateTaskQueueUserDataRequest) error { + tlm := m.getTaskQueueManager(newTestTaskQueueID(namespace.ID(request.NamespaceID), request.TaskQueue, enumspb.TASK_QUEUE_TYPE_WORKFLOW)) + tlm.Lock() + defer tlm.Unlock() + newData := *request.UserData + newData.Version++ + tlm.userData = &newData + return nil +} + +// ListTaskQueueUserDataEntries implements persistence.TaskManager +func (*testTaskManager) ListTaskQueueUserDataEntries(context.Context, *persistence.ListTaskQueueUserDataEntriesRequest) (*persistence.ListTaskQueueUserDataEntriesResponse, error) { + // No need to implement this for unit tests + panic("unimplemented") +} + +// GetTaskQueuesByBuildId implements persistence.TaskManager +func (*testTaskManager) GetTaskQueuesByBuildId(context.Context, *persistence.GetTaskQueuesByBuildIdRequest) ([]string, error) { + // No need to implement this for unit tests + panic("unimplemented") +} + +// CountTaskQueuesByBuildId implements persistence.TaskManager +func (*testTaskManager) CountTaskQueuesByBuildId(context.Context, *persistence.CountTaskQueuesByBuildIdRequest) (int, error) { + // This is only used to validate that the build id to task queue mapping is enforced (at the time of writing), report 0. + return 0, nil +} + +func validateTimeRange(t time.Time, expectedDuration time.Duration) bool { + currentTime := time.Now().UTC() + diff := time.Duration(currentTime.UnixNano() - t.UnixNano()) + if diff > expectedDuration { + fmt.Printf("Current time: %v, Application time: %v, Difference: %v \n", currentTime, t, diff) + return false + } + return true +} + +func defaultTestConfig() *Config { + config := NewConfig(dynamicconfig.NewNoopCollection(), false, false) + config.LongPollExpirationInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(100 * time.Millisecond) + config.MaxTaskDeleteBatchSize = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(1) + return config +} + +type ( + dynamicRateBurstWrapper struct { + quotas.MutableRateBurst + *quotas.RateLimiterImpl + } +) + +func (d *dynamicRateBurstWrapper) SetRate(rate float64) { + d.MutableRateBurst.SetRate(rate) + d.RateLimiterImpl.SetRate(rate) +} + +func (d *dynamicRateBurstWrapper) SetBurst(burst int) { + d.MutableRateBurst.SetBurst(burst) + d.RateLimiterImpl.SetBurst(burst) +} + +func (d *dynamicRateBurstWrapper) Rate() float64 { + return d.RateLimiterImpl.Rate() +} + +func (d *dynamicRateBurstWrapper) Burst() int { + return d.RateLimiterImpl.Burst() +} diff -Nru temporal-1.21.5-1/src/service/matching/pollerHistory.go temporal-1.22.5/src/service/matching/pollerHistory.go --- temporal-1.21.5-1/src/service/matching/pollerHistory.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/pollerHistory.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "time" - - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - "go.temporal.io/server/common/cache" -) - -const ( - pollerHistoryInitSize = 0 - pollerHistoryInitMaxSize = 1000 - pollerHistoryTTL = 5 * time.Minute -) - -type ( - pollerIdentity string - - pollerInfo struct { - pollMetadata - } -) - -type pollerHistory struct { - // poller ID -> pollerInfo - // pollers map[pollerID]pollerInfo - history cache.Cache -} - -func newPollerHistory() *pollerHistory { - opts := &cache.Options{ - InitialCapacity: pollerHistoryInitSize, - TTL: pollerHistoryTTL, - Pin: false, - } - - return &pollerHistory{ - history: cache.New(pollerHistoryInitMaxSize, opts), - } -} - -func (pollers *pollerHistory) updatePollerInfo(id pollerIdentity, pollMetadata *pollMetadata) { - pollers.history.Put(id, &pollerInfo{pollMetadata: *pollMetadata}) -} - -func (pollers *pollerHistory) getPollerInfo(earliestAccessTime time.Time) []*taskqueuepb.PollerInfo { - var result []*taskqueuepb.PollerInfo - - ite := pollers.history.Iterator() - defer ite.Close() - for ite.HasNext() { - entry := ite.Next() - key := entry.Key().(pollerIdentity) - value := entry.Value().(*pollerInfo) - lastAccessTime := entry.CreateTime() - if earliestAccessTime.Before(lastAccessTime) { - result = append(result, &taskqueuepb.PollerInfo{ - Identity: string(key), - LastAccessTime: &lastAccessTime, - RatePerSecond: defaultValue(value.ratePerSecond, defaultTaskDispatchRPS), - WorkerVersionCapabilities: value.workerVersionCapabilities, - }) - } - } - - return result -} - -func defaultValue[T any, P ~*T](p P, def T) T { - if p == nil { - return def - } - return *p -} diff -Nru temporal-1.21.5-1/src/service/matching/poller_history.go temporal-1.22.5/src/service/matching/poller_history.go --- temporal-1.21.5-1/src/service/matching/poller_history.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/poller_history.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,97 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "time" + + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + "go.temporal.io/server/common/cache" +) + +const ( + pollerHistoryInitMaxSize = 1000 + pollerHistoryTTL = 5 * time.Minute +) + +type ( + pollerIdentity string + + pollerInfo struct { + pollMetadata + } +) + +type pollerHistory struct { + // poller ID -> pollerInfo + // pollers map[pollerID]pollerInfo + history cache.Cache +} + +func newPollerHistory() *pollerHistory { + opts := &cache.Options{ + TTL: pollerHistoryTTL, + Pin: false, + } + + return &pollerHistory{ + history: cache.New(pollerHistoryInitMaxSize, opts), + } +} + +func (pollers *pollerHistory) updatePollerInfo(id pollerIdentity, pollMetadata *pollMetadata) { + pollers.history.Put(id, &pollerInfo{pollMetadata: *pollMetadata}) +} + +func (pollers *pollerHistory) getPollerInfo(earliestAccessTime time.Time) []*taskqueuepb.PollerInfo { + var result []*taskqueuepb.PollerInfo + + ite := pollers.history.Iterator() + defer ite.Close() + for ite.HasNext() { + entry := ite.Next() + key := entry.Key().(pollerIdentity) + value := entry.Value().(*pollerInfo) + lastAccessTime := entry.CreateTime() + if earliestAccessTime.Before(lastAccessTime) { + result = append(result, &taskqueuepb.PollerInfo{ + Identity: string(key), + LastAccessTime: &lastAccessTime, + RatePerSecond: defaultValue(value.ratePerSecond, defaultTaskDispatchRPS), + WorkerVersionCapabilities: value.workerVersionCapabilities, + }) + } + } + + return result +} + +func defaultValue[T any, P ~*T](p P, def T) T { + if p == nil { + return def + } + return *p +} diff -Nru temporal-1.21.5-1/src/service/matching/service.go temporal-1.22.5/src/service/matching/service.go --- temporal-1.21.5-1/src/service/matching/service.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/service.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,7 +27,6 @@ import ( "math/rand" "net" - "sync/atomic" "time" "google.golang.org/grpc" @@ -35,7 +34,6 @@ healthpb "google.golang.org/grpc/health/grpc_health_v1" "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/common" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" @@ -46,7 +44,6 @@ // Service represents the matching service type Service struct { - status int32 handler *Handler config *Config @@ -75,7 +72,6 @@ visibilityManager manager.VisibilityManager, ) *Service { return &Service{ - status: common.DaemonStatusInitialized, config: serviceConfig, server: grpc.NewServer(grpcServerOptions...), handler: handler, @@ -92,10 +88,6 @@ // Start starts the service func (s *Service) Start() { - if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { - return - } - s.logger.Info("matching starting") // must start base service first @@ -108,20 +100,18 @@ healthpb.RegisterHealthServer(s.server, s.healthServer) s.healthServer.SetServingStatus(serviceName, healthpb.HealthCheckResponse_SERVING) - go s.membershipMonitor.Start() + go func() { + s.logger.Info("Starting to serve on matching listener") + if err := s.server.Serve(s.grpcListener); err != nil { + s.logger.Fatal("Failed to serve on matching listener", tag.Error(err)) + } + }() - s.logger.Info("Starting to serve on matching listener") - if err := s.server.Serve(s.grpcListener); err != nil { - s.logger.Fatal("Failed to serve on matching listener", tag.Error(err)) - } + go s.membershipMonitor.Start() } // Stop stops the service func (s *Service) Stop() { - if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { - return - } - // remove self from membership ring and wait for traffic to drain s.logger.Info("ShutdownHandler: Evicting self from membership ring") if err := s.membershipMonitor.EvictSelf(); err != nil { diff -Nru temporal-1.21.5-1/src/service/matching/task.go temporal-1.22.5/src/service/matching/task.go --- temporal-1.21.5-1/src/service/matching/task.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task.go 2024-02-23 09:45:43.000000000 +0000 @@ -164,7 +164,7 @@ case task.responseC != nil: task.responseC <- err case task.event.completionFunc != nil: - // TODO: this probably should not be done synchronosly in PollWorkflow/ActivityTaskQueue + // TODO: this probably should not be done synchronously in PollWorkflow/ActivityTaskQueue task.event.completionFunc(task.event.AllocatedTaskInfo, err) } } diff -Nru temporal-1.21.5-1/src/service/matching/taskGC.go temporal-1.22.5/src/service/matching/taskGC.go --- temporal-1.21.5-1/src/service/matching/taskGC.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskGC.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - "sync/atomic" - "time" - - "go.temporal.io/server/common/persistence" -) - -type taskGC struct { - lock int64 - db *taskQueueDB - ackLevel int64 - lastDeleteTime time.Time - config *taskQueueConfig -} - -var maxTimeBetweenTaskDeletes = time.Second - -// newTaskGC returns an instance of a task garbage collector object -// taskGC internally maintains a delete cursor and attempts to delete -// a batch of tasks everytime Run() method is called. -// -// In order for the taskGC to actually delete tasks when Run() is called, one of -// two conditions must be met -// - Size Threshold: More than MaxDeleteBatchSize tasks are waiting to be deleted (rough estimation) -// - Time Threshold: Time since previous delete was attempted exceeds maxTimeBetweenTaskDeletes -// -// Finally, the Run() method is safe to be called from multiple threads. The underlying -// implementation will make sure only one caller executes Run() and others simply bail out -func newTaskGC(db *taskQueueDB, config *taskQueueConfig) *taskGC { - return &taskGC{db: db, config: config} -} - -// Run deletes a batch of completed tasks, if its possible to do so -// Only attempts deletion if size or time thresholds are met -func (tgc *taskGC) Run(ctx context.Context, ackLevel int64) { - tgc.tryDeleteNextBatch(ctx, ackLevel, false) -} - -// RunNow deletes a batch of completed tasks if its possible to do so -// This method attempts deletions without waiting for size/time threshold to be met -func (tgc *taskGC) RunNow(ctx context.Context, ackLevel int64) { - tgc.tryDeleteNextBatch(ctx, ackLevel, true) -} - -func (tgc *taskGC) tryDeleteNextBatch(ctx context.Context, ackLevel int64, ignoreTimeCond bool) { - if !tgc.tryLock() { - return - } - defer tgc.unlock() - batchSize := tgc.config.MaxTaskDeleteBatchSize() - if !tgc.checkPrecond(ackLevel, batchSize, ignoreTimeCond) { - return - } - tgc.lastDeleteTime = time.Now().UTC() - n, err := tgc.db.CompleteTasksLessThan(ctx, ackLevel+1, batchSize) - if err != nil { - return - } - // implementation behavior for CompleteTasksLessThan: - // - unit test, cassandra: always return UnknownNumRowsAffected (in this case means "all") - // - sql: return number of rows affected (should be <= batchSize) - // if we get UnknownNumRowsAffected or a smaller number than our limit, we know we got - // everything <= ackLevel, so we can reset ours. if not, we may have to try again. - if n == persistence.UnknownNumRowsAffected || n < batchSize { - tgc.ackLevel = ackLevel - } -} - -func (tgc *taskGC) checkPrecond(ackLevel int64, batchSize int, ignoreTimeCond bool) bool { - backlog := ackLevel - tgc.ackLevel - if backlog >= int64(batchSize) { - return true - } - return backlog > 0 && (ignoreTimeCond || time.Now().UTC().Sub(tgc.lastDeleteTime) > maxTimeBetweenTaskDeletes) -} - -func (tgc *taskGC) tryLock() bool { - return atomic.CompareAndSwapInt64(&tgc.lock, 0, 1) -} - -func (tgc *taskGC) unlock() { - atomic.StoreInt64(&tgc.lock, 0) -} diff -Nru temporal-1.21.5-1/src/service/matching/taskQueueManager.go temporal-1.22.5/src/service/matching/taskQueueManager.go --- temporal-1.21.5-1/src/service/matching/taskQueueManager.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskQueueManager.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,911 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync/atomic" - "time" - - "github.com/jonboulle/clockwork" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/matchingservice/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/debug" - "go.temporal.io/server/common/future" - "go.temporal.io/server/common/headers" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/tqname" - "go.temporal.io/server/common/util" - "go.temporal.io/server/internal/goro" -) - -const ( - // Time budget for empty task to propagate through the function stack and be returned to - // pollForActivityTask or pollForWorkflowTask handler. - returnEmptyTaskTimeBudget = time.Second - - // Fake Task ID to wrap a task for syncmatch - syncMatchTaskId = -137 - - ioTimeout = 5 * time.Second * debug.TimeoutMultiplier - - // Threshold for counting a AddTask call as a no recent poller call - noPollerThreshold = time.Minute * 2 -) - -var ( - // this retry policy is currenly only used for matching persistence operations - // that, if failed, the entire task queue needs to be reload - persistenceOperationRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). - WithMaximumInterval(1 * time.Second). - WithExpirationInterval(30 * time.Second) -) - -type ( - taskQueueManagerOpt func(*taskQueueManagerImpl) - - idBlockAllocator interface { - RenewLease(context.Context) (taskQueueState, error) - RangeID() int64 - } - - addTaskParams struct { - execution *commonpb.WorkflowExecution - taskInfo *persistencespb.TaskInfo - source enumsspb.TaskSource - forwardedFrom string - baseTqm taskQueueManager - } - - stickyInfo struct { - kind enumspb.TaskQueueKind // sticky taskQueue has different process in persistence - normalName string // if kind is sticky, name of normal queue - } - - UserDataUpdateOptions struct { - TaskQueueLimitPerBuildId int - // Only perform the update if current version equals to supplied version. - // 0 is unset. - KnownVersion int64 - } - // UserDataUpdateFunc accepts the current user data for a task queue and returns the updated user data, a boolean - // indicating whether this data should be replicated, and an error. - // Extra care should be taken to avoid mutating the current user data to avoid keeping uncommitted data in memory. - UserDataUpdateFunc func(*persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) - - taskQueueManager interface { - Start() - Stop() - WaitUntilInitialized(context.Context) error - // AddTask adds a task to the task queue. This method will first attempt a synchronous - // match with a poller. When that fails, task will be written to database and later - // asynchronously matched with a poller - AddTask(ctx context.Context, params addTaskParams) (syncMatch bool, err error) - // GetTask blocks waiting for a task Returns error when context deadline is exceeded - // maxDispatchPerSecond is the max rate at which tasks are allowed to be dispatched - // from this task queue to pollers - GetTask(ctx context.Context, pollMetadata *pollMetadata) (*internalTask, error) - // SpoolTask spools a task to persistence to be matched asynchronously when a poller is available. - SpoolTask(params addTaskParams) error - // DispatchSpooledTask dispatches a task to a poller. When there are no pollers to pick - // up the task, this method will return error. Task will not be persisted to db - DispatchSpooledTask(ctx context.Context, task *internalTask, userDataChanged chan struct{}) error - // DispatchQueryTask will dispatch query to local or remote poller. If forwarded then result or error is returned, - // if dispatched to local poller then nil and nil is returned. - DispatchQueryTask(ctx context.Context, taskID string, request *matchingservice.QueryWorkflowRequest) (*matchingservice.QueryWorkflowResponse, error) - // GetUserData returns the verioned user data for this task queue - GetUserData(ctx context.Context) (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) - // UpdateUserData updates user data for this task queue and replicates across clusters if necessary. - // Extra care should be taken to avoid mutating the existing data in the update function. - UpdateUserData(ctx context.Context, options UserDataUpdateOptions, updateFn UserDataUpdateFunc) error - UpdatePollerInfo(pollerIdentity, *pollMetadata) - GetAllPollerInfo() []*taskqueuepb.PollerInfo - HasPollerAfter(accessTime time.Time) bool - // DescribeTaskQueue returns information about the target task queue - DescribeTaskQueue(includeTaskQueueStatus bool) *matchingservice.DescribeTaskQueueResponse - String() string - QueueID() *taskQueueID - TaskQueueKind() enumspb.TaskQueueKind - LongPollExpirationInterval() time.Duration - } - - // Single task queue in memory state - taskQueueManagerImpl struct { - status int32 - engine *matchingEngineImpl - taskQueueID *taskQueueID - stickyInfo - config *taskQueueConfig - db *taskQueueDB - taskWriter *taskWriter - taskReader *taskReader // reads tasks from db and async matches it with poller - liveness *liveness - taskGC *taskGC - taskAckManager ackManager // tracks ackLevel for delivered messages - matcher *TaskMatcher // for matching a task producer with a poller - namespaceRegistry namespace.Registry - logger log.Logger - matchingClient matchingservice.MatchingServiceClient - metricsHandler metrics.Handler - namespace namespace.Name - taggedMetricsHandler metrics.Handler // namespace/taskqueue tagged metric scope - // pollerHistory stores poller which poll from this taskqueue in last few minutes - pollerHistory *pollerHistory - currentPolls atomic.Int64 - clusterMeta cluster.Metadata - goroGroup goro.Group - initializedError *future.FutureImpl[struct{}] - // userDataReady is fulfilled once versioning data is fetched from the root partition. If this TQ is - // the root partition, it is fulfilled as soon as it is fetched from db. - userDataReady *future.FutureImpl[struct{}] - // lostOwnership controls behavior on Stop: if it's false, we try to write one final - // update before unloading - lostOwnership atomic.Bool - } -) - -var _ taskQueueManager = (*taskQueueManagerImpl)(nil) - -var ( - errRemoteSyncMatchFailed = serviceerror.NewCanceled("remote sync match failed") - errMissingNormalQueueName = errors.New("missing normal queue name") - - normalStickyInfo = stickyInfo{kind: enumspb.TASK_QUEUE_KIND_NORMAL} -) - -func withIDBlockAllocator(ibl idBlockAllocator) taskQueueManagerOpt { - return func(tqm *taskQueueManagerImpl) { - tqm.taskWriter.idAlloc = ibl - } -} - -func stickyInfoFromTaskQueue(tq *taskqueuepb.TaskQueue) stickyInfo { - return stickyInfo{ - kind: tq.GetKind(), - normalName: tq.GetNormalName(), - } -} - -func newTaskQueueManager( - e *matchingEngineImpl, - taskQueue *taskQueueID, - stickyInfo stickyInfo, - config *Config, - clusterMeta cluster.Metadata, - opts ...taskQueueManagerOpt, -) (taskQueueManager, error) { - namespaceEntry, err := e.namespaceRegistry.GetNamespaceByID(taskQueue.namespaceID) - if err != nil { - return nil, err - } - nsName := namespaceEntry.Name() - - taskQueueConfig := newTaskQueueConfig(taskQueue, config, nsName) - - db := newTaskQueueDB(e.taskManager, e.matchingClient, taskQueue.namespaceID, taskQueue, stickyInfo.kind, e.logger) - logger := log.With(e.logger, - tag.WorkflowTaskQueueName(taskQueue.FullName()), - tag.WorkflowTaskQueueType(taskQueue.taskType), - tag.WorkflowNamespace(nsName.String())) - taggedMetricsHandler := metrics.GetPerTaskQueueScope( - e.metricsHandler.WithTags(metrics.OperationTag(metrics.MatchingTaskQueueMgrScope), metrics.TaskQueueTypeTag(taskQueue.taskType)), - nsName.String(), - taskQueue.FullName(), - stickyInfo.kind, - ) - tlMgr := &taskQueueManagerImpl{ - status: common.DaemonStatusInitialized, - engine: e, - namespaceRegistry: e.namespaceRegistry, - matchingClient: e.matchingClient, - metricsHandler: e.metricsHandler, - taskQueueID: taskQueue, - stickyInfo: stickyInfo, - logger: logger, - db: db, - taskAckManager: newAckManager(e.logger), - taskGC: newTaskGC(db, taskQueueConfig), - config: taskQueueConfig, - clusterMeta: clusterMeta, - namespace: nsName, - taggedMetricsHandler: taggedMetricsHandler, - initializedError: future.NewFuture[struct{}](), - userDataReady: future.NewFuture[struct{}](), - } - // poller history is only kept for the base task queue manager - if !tlMgr.managesSpecificVersionSet() { - tlMgr.pollerHistory = newPollerHistory() - } - - tlMgr.liveness = newLiveness( - clockwork.NewRealClock(), - taskQueueConfig.MaxTaskQueueIdleTime, - tlMgr.unloadFromEngine, - ) - tlMgr.taskWriter = newTaskWriter(tlMgr) - tlMgr.taskReader = newTaskReader(tlMgr) - - var fwdr *Forwarder - if tlMgr.isFowardingAllowed(taskQueue, stickyInfo.kind) { - // Forward without version set, the target will resolve the correct version set from - // the build id itself. TODO: check if we still need this here after tqm refactoring - forwardTaskQueue := newTaskQueueIDWithVersionSet(taskQueue, "") - fwdr = newForwarder(&taskQueueConfig.forwarderConfig, forwardTaskQueue, stickyInfo.kind, e.matchingClient) - } - tlMgr.matcher = newTaskMatcher(taskQueueConfig, fwdr, tlMgr.taggedMetricsHandler) - for _, opt := range opts { - opt(tlMgr) - } - return tlMgr, nil -} - -// unloadFromEngine asks the MatchingEngine to unload this task queue. It will cause Stop to be called. -func (c *taskQueueManagerImpl) unloadFromEngine() { - c.engine.unloadTaskQueue(c) -} - -// signalIfFatal calls unloadFromEngine on this taskQueueManagerImpl instance -// if and only if the supplied error represents a fatal condition, e.g. the -// existence of another taskQueueManager newer lease. Returns true if the signal -// is emitted, false otherwise. -func (c *taskQueueManagerImpl) signalIfFatal(err error) bool { - if err == nil { - return false - } - var condfail *persistence.ConditionFailedError - if errors.As(err, &condfail) { - c.taggedMetricsHandler.Counter(metrics.ConditionFailedErrorPerTaskQueueCounter.GetMetricName()).Record(1) - c.lostOwnership.Store(true) - c.unloadFromEngine() - return true - } - return false -} - -func (c *taskQueueManagerImpl) Start() { - if !atomic.CompareAndSwapInt32( - &c.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - c.liveness.Start() - c.taskWriter.Start() - c.taskReader.Start() - if c.db.DbStoresUserData() { - c.goroGroup.Go(c.loadUserData) - } else { - c.goroGroup.Go(c.fetchUserData) - } - c.logger.Info("", tag.LifeCycleStarted) - c.taggedMetricsHandler.Counter(metrics.TaskQueueStartedCounter.GetMetricName()).Record(1) -} - -func (c *taskQueueManagerImpl) Stop() { - if !atomic.CompareAndSwapInt32( - &c.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - // Maybe try to write one final update of ack level and GC some tasks. - // Skip the update if we never initialized (ackLevel will be -1 in that case). - // Also skip if we're stopping due to lost ownership (the update will fail in that case). - // Ignore any errors. - // Note that it's fine to GC even if the update ack level fails because we did match the - // tasks, the next owner will just read over an empty range. - ackLevel := c.taskAckManager.getAckLevel() - if ackLevel >= 0 && !c.lostOwnership.Load() { - ctx, cancel := c.newIOContext() - defer cancel() - - _ = c.db.UpdateState(ctx, ackLevel) - c.taskGC.RunNow(ctx, ackLevel) - } - c.liveness.Stop() - c.taskWriter.Stop() - c.taskReader.Stop() - c.goroGroup.Cancel() - c.logger.Info("", tag.LifeCycleStopped) - c.taggedMetricsHandler.Counter(metrics.TaskQueueStoppedCounter.GetMetricName()).Record(1) - // This may call Stop again, but the status check above makes that a no-op. - c.unloadFromEngine() -} - -// managesSpecificVersionSet returns true if this is a tqm for a specific version set in the -// build-id-based versioning feature. Note that this is a different concept from the overall -// task queue having versioning data associated with it, which is the usual meaning of -// "versioned task queue". These task queues are not interacted with directly outside outside -// of a single matching node. -func (c *taskQueueManagerImpl) managesSpecificVersionSet() bool { - return c.taskQueueID.VersionSet() != "" -} - -func (c *taskQueueManagerImpl) SetInitializedError(err error) { - c.initializedError.Set(struct{}{}, err) - if err != nil { - // We can't recover from here without starting over, so unload the whole task queue - c.lostOwnership.Store(true) // not really lost ownership but we want to skip the last write - c.unloadFromEngine() - } -} - -// Sets user data enabled/disabled and marks the future ready (if it's not ready yet). -// userDataState controls whether GetUserData return an error, and which. -// futureError is the error to set on the ready future. If this is non-nil, the task queue will -// be unloaded. -// Note that this must only be called from a single goroutine since the Ready/Set sequence is -// potentially racy otherwise. -func (c *taskQueueManagerImpl) SetUserDataState(userDataState userDataState, futureError error) { - // Always set state enabled/disabled even if we're not setting the future since we only set - // the future once but the enabled/disabled state may change over time. - c.db.setUserDataState(userDataState) - - if !c.userDataReady.Ready() { - c.userDataReady.Set(struct{}{}, futureError) - if futureError != nil { - c.lostOwnership.Store(true) // not really lost ownership but we want to skip the last write - c.unloadFromEngine() - } - } -} - -func (c *taskQueueManagerImpl) WaitUntilInitialized(ctx context.Context) error { - _, err := c.initializedError.Get(ctx) - if err != nil { - return err - } - _, err = c.userDataReady.Get(ctx) - return err -} - -// AddTask adds a task to the task queue. This method will first attempt a synchronous -// match with a poller. When there are no pollers or if ratelimit is exceeded, task will -// be written to database and later asynchronously matched with a poller -func (c *taskQueueManagerImpl) AddTask( - ctx context.Context, - params addTaskParams, -) (bool, error) { - if params.forwardedFrom == "" { - // request sent by history service - c.liveness.markAlive() - } - - // TODO: make this work for versioned queues too - if c.QueueID().IsRoot() && c.QueueID().VersionSet() == "" && !c.HasPollerAfter(time.Now().Add(-noPollerThreshold)) { - // Only checks recent pollers in the root partition - c.taggedMetricsHandler.Counter(metrics.NoRecentPollerTasksPerTaskQueueCounter.GetMetricName()).Record(1) - } - - taskInfo := params.taskInfo - - namespaceEntry, err := c.namespaceRegistry.GetNamespaceByID(namespace.ID(taskInfo.GetNamespaceId())) - if err != nil { - return false, err - } - - if namespaceEntry.ActiveInCluster(c.clusterMeta.GetCurrentClusterName()) { - syncMatch, err := c.trySyncMatch(ctx, params) - if syncMatch { - return syncMatch, err - } - } - - if params.forwardedFrom != "" { - // forwarded from child partition - only do sync match - // child partition will persist the task when sync match fails - return false, errRemoteSyncMatchFailed - } - - // Ensure that tasks with the "default" versioning directive get spooled in the unversioned queue as they not - // associated with any version set until their execution is touched by a version specific worker. - // "compatible" tasks OTOH are associated with a specific version set and should be stored along with all tasks for - // that version set. - // The task queue default set is dynamic and applies only at dispatch time. Putting "default" tasks into version set - // specific queues could cause them to get stuck behind "compatible" tasks when they should be able to progress - // independently. - if taskInfo.VersionDirective.GetUseDefault() != nil { - err = params.baseTqm.SpoolTask(params) - } else { - err = c.SpoolTask(params) - } - return false, err -} - -func (c *taskQueueManagerImpl) SpoolTask(params addTaskParams) error { - _, err := c.taskWriter.appendTask(params.execution, params.taskInfo) - c.signalIfFatal(err) - if err == nil { - c.taskReader.Signal() - } - return err -} - -// GetTask blocks waiting for a task. -// Returns error when context deadline is exceeded -// maxDispatchPerSecond is the max rate at which tasks are allowed -// to be dispatched from this task queue to pollers -func (c *taskQueueManagerImpl) GetTask( - ctx context.Context, - pollMetadata *pollMetadata, -) (*internalTask, error) { - c.liveness.markAlive() - - c.currentPolls.Add(1) - defer c.currentPolls.Add(-1) - - namespaceEntry, err := c.namespaceRegistry.GetNamespaceByID(c.taskQueueID.namespaceID) - if err != nil { - return nil, err - } - - // the desired global rate limit for the task queue comes from the - // poller, which lives inside the client side worker. There is - // one rateLimiter for this entire task queue and as we get polls, - // we update the ratelimiter rps if it has changed from the last - // value. Last poller wins if different pollers provide different values - c.matcher.UpdateRatelimit(pollMetadata.ratePerSecond) - - if !namespaceEntry.ActiveInCluster(c.clusterMeta.GetCurrentClusterName()) { - return c.matcher.PollForQuery(ctx, pollMetadata) - } - - task, err := c.matcher.Poll(ctx, pollMetadata) - if err != nil { - return nil, err - } - - task.namespace = c.namespace - task.backlogCountHint = c.taskAckManager.getBacklogCountHint - return task, nil -} - -// DispatchSpooledTask dispatches a task to a poller. When there are no pollers to pick -// up the task or if rate limit is exceeded, this method will return error. Task -// *will not* be persisted to db -func (c *taskQueueManagerImpl) DispatchSpooledTask( - ctx context.Context, - task *internalTask, - userDataChanged chan struct{}, -) error { - return c.matcher.MustOffer(ctx, task, userDataChanged) -} - -// DispatchQueryTask will dispatch query to local or remote poller. If forwarded then result or error is returned, -// if dispatched to local poller then nil and nil is returned. -func (c *taskQueueManagerImpl) DispatchQueryTask( - ctx context.Context, - taskID string, - request *matchingservice.QueryWorkflowRequest, -) (*matchingservice.QueryWorkflowResponse, error) { - task := newInternalQueryTask(taskID, request) - return c.matcher.OfferQuery(ctx, task) -} - -// GetUserData returns the user data for the task queue if any. -// Note: can return nil value with no error. -func (c *taskQueueManagerImpl) GetUserData(ctx context.Context) (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) { - return c.db.GetUserData(ctx) -} - -// UpdateUserData updates user data for this task queue and replicates across clusters if necessary. -func (c *taskQueueManagerImpl) UpdateUserData(ctx context.Context, options UserDataUpdateOptions, updateFn UserDataUpdateFunc) error { - newData, shouldReplicate, err := c.db.UpdateUserData(ctx, updateFn, options.KnownVersion, options.TaskQueueLimitPerBuildId) - if err != nil { - return err - } - c.signalIfFatal(err) - if !shouldReplicate { - return nil - } - - // Only replicate if namespace is global and has at least 2 clusters registered. - ns, err := c.namespaceRegistry.GetNamespaceByID(c.db.namespaceID) - if err != nil { - return err - } - if ns.ReplicationPolicy() != namespace.ReplicationPolicyMultiCluster { - return nil - } - - _, err = c.matchingClient.ReplicateTaskQueueUserData(ctx, &matchingservice.ReplicateTaskQueueUserDataRequest{ - NamespaceId: c.db.namespaceID.String(), - TaskQueue: c.taskQueueID.BaseNameString(), - UserData: newData.GetData(), - }) - if err != nil { - c.logger.Error("Failed to publish a replication task after updating task queue user data", tag.Error(err)) - return serviceerror.NewUnavailable("storing task queue user data succeeded but publishing to the namespace replication queue failed, please try again") - } - return err -} - -func (c *taskQueueManagerImpl) UpdatePollerInfo(id pollerIdentity, pollMetadata *pollMetadata) { - if c.pollerHistory != nil { - c.pollerHistory.updatePollerInfo(id, pollMetadata) - } -} - -// GetAllPollerInfo returns all pollers that polled from this taskqueue in last few minutes -func (c *taskQueueManagerImpl) GetAllPollerInfo() []*taskqueuepb.PollerInfo { - if c.pollerHistory == nil { - return nil - } - return c.pollerHistory.getPollerInfo(time.Time{}) -} - -func (c *taskQueueManagerImpl) HasPollerAfter(accessTime time.Time) bool { - if c.currentPolls.Load() > 0 { - return true - } - if c.pollerHistory == nil { - return false - } - recentPollers := c.pollerHistory.getPollerInfo(accessTime) - return len(recentPollers) > 0 -} - -// DescribeTaskQueue returns information about the target taskqueue, right now this API returns the -// pollers which polled this taskqueue in last few minutes and status of taskqueue's ackManager -// (readLevel, ackLevel, backlogCountHint and taskIDBlock). -func (c *taskQueueManagerImpl) DescribeTaskQueue(includeTaskQueueStatus bool) *matchingservice.DescribeTaskQueueResponse { - response := &matchingservice.DescribeTaskQueueResponse{Pollers: c.GetAllPollerInfo()} - if !includeTaskQueueStatus { - return response - } - - taskIDBlock := rangeIDToTaskIDBlock(c.db.RangeID(), c.config.RangeSize) - response.TaskQueueStatus = &taskqueuepb.TaskQueueStatus{ - ReadLevel: c.taskAckManager.getReadLevel(), - AckLevel: c.taskAckManager.getAckLevel(), - BacklogCountHint: c.taskAckManager.getBacklogCountHint(), - RatePerSecond: c.matcher.Rate(), - TaskIdBlock: &taskqueuepb.TaskIdBlock{ - StartId: taskIDBlock.start, - EndId: taskIDBlock.end, - }, - } - - return response -} - -func (c *taskQueueManagerImpl) String() string { - buf := new(bytes.Buffer) - if c.taskQueueID.taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { - buf.WriteString("Activity") - } else { - buf.WriteString("Workflow") - } - rangeID := c.db.RangeID() - _, _ = fmt.Fprintf(buf, " task queue %v\n", c.taskQueueID.FullName()) - _, _ = fmt.Fprintf(buf, "RangeID=%v\n", rangeID) - _, _ = fmt.Fprintf(buf, "TaskIDBlock=%+v\n", rangeIDToTaskIDBlock(rangeID, c.config.RangeSize)) - _, _ = fmt.Fprintf(buf, "AckLevel=%v\n", c.taskAckManager.ackLevel) - _, _ = fmt.Fprintf(buf, "MaxTaskID=%v\n", c.taskAckManager.getReadLevel()) - - return buf.String() -} - -// completeTask marks a task as processed. Only tasks created by taskReader (i.e. backlog from db) reach -// here. As part of completion: -// - task is deleted from the database when err is nil -// - new task is created and current task is deleted when err is not nil -func (c *taskQueueManagerImpl) completeTask(task *persistencespb.AllocatedTaskInfo, err error) { - if err != nil { - // failed to start the task. - // We cannot just remove it from persistence because then it will be lost. - // We handle this by writing the task back to persistence with a higher taskID. - // This will allow subsequent tasks to make progress, and hopefully by the time this task is picked-up - // again the underlying reason for failing to start will be resolved. - // Note that RecordTaskStarted only fails after retrying for a long time, so a single task will not be - // re-written to persistence frequently. - err = executeWithRetry(context.Background(), func(_ context.Context) error { - wf := &commonpb.WorkflowExecution{WorkflowId: task.Data.GetWorkflowId(), RunId: task.Data.GetRunId()} - _, err := c.taskWriter.appendTask(wf, task.Data) - return err - }) - - if err != nil { - // OK, we also failed to write to persistence. - // This should only happen in very extreme cases where persistence is completely down. - // We still can't lose the old task so we just unload the entire task queue - c.logger.Error("Persistent store operation failure", - tag.StoreOperationStopTaskQueue, - tag.Error(err), - tag.WorkflowTaskQueueName(c.taskQueueID.FullName()), - tag.WorkflowTaskQueueType(c.taskQueueID.taskType)) - c.lostOwnership.Store(true) // not really lost ownership but we want to skip the last write - c.unloadFromEngine() - return - } - c.taskReader.Signal() - } - - ackLevel := c.taskAckManager.completeTask(task.GetTaskId()) - - // TODO: completeTaskFunc and task.finish() should take in a context - ctx, cancel := c.newIOContext() - defer cancel() - c.taskGC.Run(ctx, ackLevel) -} - -func rangeIDToTaskIDBlock(rangeID int64, rangeSize int64) taskIDBlock { - return taskIDBlock{ - start: (rangeID-1)*rangeSize + 1, - end: rangeID * rangeSize, - } -} - -// Retry operation on transient error. -func executeWithRetry( - ctx context.Context, - operation func(context.Context) error, -) error { - return backoff.ThrottleRetryContext(ctx, operation, persistenceOperationRetryPolicy, func(err error) bool { - if common.IsContextDeadlineExceededErr(err) || common.IsContextCanceledErr(err) { - return false - } - if _, ok := err.(*persistence.ConditionFailedError); ok { - return false - } - return common.IsPersistenceTransientError(err) - }) -} - -func (c *taskQueueManagerImpl) trySyncMatch(ctx context.Context, params addTaskParams) (bool, error) { - if params.forwardedFrom == "" && c.config.TestDisableSyncMatch() { - return false, nil - } - childCtx, cancel := newChildContext(ctx, c.config.SyncMatchWaitDuration(), time.Second) - defer cancel() - - // Use fake TaskId for sync match as it hasn't been allocated yet - fakeTaskIdWrapper := &persistencespb.AllocatedTaskInfo{ - Data: params.taskInfo, - TaskId: syncMatchTaskId, - } - - task := newInternalTask(fakeTaskIdWrapper, nil, params.source, params.forwardedFrom, true) - return c.matcher.Offer(childCtx, task) -} - -// newChildContext creates a child context with desired timeout. -// if tailroom is non-zero, then child context timeout will be -// the minOf(parentCtx.Deadline()-tailroom, timeout). Use this -// method to create child context when childContext cannot use -// all of parent's deadline but instead there is a need to leave -// some time for parent to do some post-work -func newChildContext( - parent context.Context, - timeout time.Duration, - tailroom time.Duration, -) (context.Context, context.CancelFunc) { - if parent.Err() != nil { - return parent, func() {} - } - deadline, ok := parent.Deadline() - if !ok { - return context.WithTimeout(parent, timeout) - } - remaining := time.Until(deadline) - tailroom - if remaining < timeout { - timeout = util.Max(0, remaining) - } - return context.WithTimeout(parent, timeout) -} - -func (c *taskQueueManagerImpl) isFowardingAllowed(taskQueue *taskQueueID, kind enumspb.TaskQueueKind) bool { - return !taskQueue.IsRoot() && kind != enumspb.TASK_QUEUE_KIND_STICKY -} - -func (c *taskQueueManagerImpl) QueueID() *taskQueueID { - return c.taskQueueID -} - -func (c *taskQueueManagerImpl) TaskQueueKind() enumspb.TaskQueueKind { - return c.kind -} - -func (c *taskQueueManagerImpl) LongPollExpirationInterval() time.Duration { - return c.config.LongPollExpirationInterval() -} - -func (c *taskQueueManagerImpl) callerInfoContext(ctx context.Context) context.Context { - namespace, _ := c.namespaceRegistry.GetNamespaceName(c.taskQueueID.namespaceID) - return headers.SetCallerInfo(ctx, headers.NewBackgroundCallerInfo(namespace.String())) -} - -func (c *taskQueueManagerImpl) newIOContext() (context.Context, context.CancelFunc) { - ctx, cancel := context.WithTimeout(context.Background(), ioTimeout) - return c.callerInfoContext(ctx), cancel -} - -func (c *taskQueueManagerImpl) loadUserData(ctx context.Context) error { - ctx = c.callerInfoContext(ctx) - - hasLoadedUserData := false - - for ctx.Err() == nil { - if !c.config.LoadUserData() { - // if disabled, mark disabled and ready - c.SetUserDataState(userDataDisabled, nil) - hasLoadedUserData = false // load again if re-enabled - } else if !hasLoadedUserData { - // otherwise try to load from db once - err := c.db.loadUserData(ctx) - c.SetUserDataState(userDataEnabled, err) - hasLoadedUserData = err == nil - } else { - // if already loaded, set enabled - c.SetUserDataState(userDataEnabled, nil) - } - common.InterruptibleSleep(ctx, c.config.GetUserDataLongPollTimeout()) - } - - return nil -} - -func (c *taskQueueManagerImpl) userDataFetchSource() (string, error) { - if c.kind == enumspb.TASK_QUEUE_KIND_STICKY { - // Sticky queues get data from their corresponding normal queue - if c.normalName == "" { - // Older SDKs don't send the normal name. That's okay, they just can't use versioning. - return "", errMissingNormalQueueName - } - return c.normalName, nil - } - - degree := c.config.ForwarderMaxChildrenPerNode() - parent, err := c.taskQueueID.Parent(degree) - if err == tqname.ErrNoParent { - // we're the root activity task queue, ask the root workflow task queue - return c.taskQueueID.FullName(), nil - } else if err != nil { - // invalid degree - return "", err - } - return parent.FullName(), nil -} - -func (c *taskQueueManagerImpl) fetchUserData(ctx context.Context) error { - ctx = c.callerInfoContext(ctx) - - if c.managesSpecificVersionSet() { - // tqm for specific version set doesn't have its own user data - c.SetUserDataState(userDataSpecificVersion, nil) - return nil - } - - // otherwise fetch from parent partition - - fetchSource, err := c.userDataFetchSource() - if err != nil { - if err == errMissingNormalQueueName { - // pretend we have no user data. this is a sticky queue so the only effect is that we can't - // kick off versioned pollers. - c.SetUserDataState(userDataEnabled, nil) - } - return err - } - - // hasFetchedUserData is true if we have gotten a successful reply to GetTaskQueueUserData. - // It's used to control whether we do a long poll or a simple get. - hasFetchedUserData := false - - op := func(ctx context.Context) error { - if !c.config.LoadUserData() { - // if disabled, mark disabled and ready, but allow retries so that we notice if - // it's re-enabled - c.SetUserDataState(userDataDisabled, nil) - return errUserDataDisabled - } - - knownUserData, _, err := c.GetUserData(ctx) - if err != nil { - // Start with a non-long poll after re-enabling after disable, so that we don't have to wait the - // full long poll interval before calling SetUserDataStatus to enable again. - // Leave knownUserData as nil and GetVersion will return 0. - hasFetchedUserData = false - } - - callCtx, cancel := context.WithTimeout(ctx, c.config.GetUserDataLongPollTimeout()) - defer cancel() - - res, err := c.matchingClient.GetTaskQueueUserData(callCtx, &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: c.taskQueueID.namespaceID.String(), - TaskQueue: fetchSource, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: knownUserData.GetVersion(), - WaitNewData: hasFetchedUserData, - }) - if err != nil { - var unimplErr *serviceerror.Unimplemented - var failedPrecondErr *serviceerror.FailedPrecondition - if errors.As(err, &unimplErr) { - // This might happen during a deployment. The older version couldn't have had any user data, - // so we act as if it just returned an empty response and set ourselves ready. - // Return the error so that we backoff with retry, and do not set hasFetchedUserData so that - // we don't do a long poll next time. - c.SetUserDataState(userDataEnabled, nil) - } else if errors.As(err, &failedPrecondErr) { - // This means the parent has the LoadUserData switch turned off. Act like our switch is off also. - c.SetUserDataState(userDataDisabled, nil) - } - return err - } - // If the root partition returns nil here, then that means our data matched, and we don't need to update. - // If it's nil because it never existed, then we'd never have any data. - // It can't be nil due to removing versions, as that would result in a non-nil container with - // nil inner fields. - if res.GetUserData() != nil { - c.db.setUserDataForNonOwningPartition(res.GetUserData()) - } - hasFetchedUserData = true - c.SetUserDataState(userDataEnabled, nil) - return nil - } - - minWaitTime := c.config.GetUserDataMinWaitTime - - for ctx.Err() == nil { - start := time.Now() - _ = backoff.ThrottleRetryContext(ctx, op, c.config.GetUserDataRetryPolicy, nil) - elapsed := time.Since(start) - - // In general we want to start a new call immediately on completion of the previous - // one. But if the remote is broken and returns success immediately, we might end up - // spinning. So enforce a minimum wait time that increases as long as we keep getting - // very fast replies. - if elapsed < minWaitTime { - common.InterruptibleSleep(ctx, minWaitTime-elapsed) - // Don't let this get near our call timeout, otherwise we can't tell the difference - // between a fast reply and a timeout. - minWaitTime = util.Min(minWaitTime*2, c.config.GetUserDataLongPollTimeout()/2) - } else { - minWaitTime = c.config.GetUserDataMinWaitTime - } - } - - return ctx.Err() -} diff -Nru temporal-1.21.5-1/src/service/matching/taskQueueManager_test.go temporal-1.22.5/src/service/matching/taskQueueManager_test.go --- temporal-1.21.5-1/src/service/matching/taskQueueManager_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskQueueManager_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1366 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - "errors" - "math" - "sync/atomic" - "testing" - "time" - - "github.com/gogo/protobuf/types" - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/uber-go/tally/v4" - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - "google.golang.org/grpc" - - enumsspb "go.temporal.io/server/api/enums/v1" - "go.temporal.io/server/api/matchingservice/v1" - "go.temporal.io/server/api/matchingservicemock/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/api/taskqueue/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/dynamicconfig" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/internal/goro" -) - -var rpsInf = math.Inf(1) - -const ( - defaultNamespaceId = namespace.ID("deadbeef-0000-4567-890a-bcdef0123456") - defaultRootTqID = "tq" -) - -type tqmTestOpts struct { - config *Config - tqId *taskQueueID - matchingClientMock *matchingservicemock.MockMatchingServiceClient -} - -func defaultTqmTestOpts(controller *gomock.Controller) *tqmTestOpts { - return &tqmTestOpts{ - config: defaultTestConfig(), - tqId: defaultTqId(), - matchingClientMock: matchingservicemock.NewMockMatchingServiceClient(controller), - } -} - -func TestDeliverBufferTasks(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - tests := []func(tlm *taskQueueManagerImpl){ - func(tlm *taskQueueManagerImpl) { close(tlm.taskReader.taskBuffer) }, - func(tlm *taskQueueManagerImpl) { tlm.taskReader.gorogrp.Cancel() }, - func(tlm *taskQueueManagerImpl) { - rps := 0.1 - tlm.matcher.UpdateRatelimit(&rps) - tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{} - err := tlm.matcher.rateLimiter.Wait(context.Background()) // consume the token - assert.NoError(t, err) - tlm.taskReader.gorogrp.Cancel() - }, - } - for _, test := range tests { - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) - test(tlm) - // dispatchBufferedTasks should stop after invocation of the test function - tlm.taskReader.gorogrp.Wait() - } -} - -func TestDeliverBufferTasks_NoPollers(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{} - tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) - time.Sleep(100 * time.Millisecond) // let go routine run first and block on tasksForPoll - tlm.taskReader.gorogrp.Cancel() - tlm.taskReader.gorogrp.Wait() -} - -func TestDeliverBufferTasks_DisableUserData_SendsVersionedToUnversioned(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) - - scope := tally.NewTestScope("test", nil) - tlm.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) - - tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ - Data: &persistencespb.TaskInfo{ - VersionDirective: &taskqueue.TaskVersionDirective{ - Value: &taskqueue.TaskVersionDirective_BuildId{BuildId: "asdf"}, - }, - }, - } - - tlm.SetInitializedError(nil) - tlm.SetUserDataState(userDataEnabled, nil) - tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) - - time.Sleep(3 * taskReaderOfferThrottleWait) - - // count retries with this metric - errCount := scope.Snapshot().Counters()["test.buffer_throttle_count+"] - require.NotNil(t, errCount, "nil counter probably means dispatch did not get error and blocked trying to load new tqm") - require.GreaterOrEqual(t, errCount.Value(), int64(2)) - - tlm.taskReader.gorogrp.Cancel() - tlm.taskReader.gorogrp.Wait() -} - -func TestDeliverBufferTasks_DisableUserData_SendsDefaultToUnversioned(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) - - scope := tally.NewTestScope("test", nil) - tlm.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) - - tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ - Data: &persistencespb.TaskInfo{ - VersionDirective: &taskqueue.TaskVersionDirective{ - Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, - }, - }, - } - - tlm.SetInitializedError(nil) - tlm.SetUserDataState(userDataEnabled, nil) - tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) - - time.Sleep(taskReaderOfferThrottleWait) - - // should be no retries - errCount := scope.Snapshot().Counters()["test.buffer_throttle_count+"] - require.Nil(t, errCount) - - tlm.taskReader.gorogrp.Cancel() - tlm.taskReader.gorogrp.Wait() -} - -func TestReadLevelForAllExpiredTasksInBatch(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.db.rangeID = int64(1) - tlm.db.ackLevel = int64(0) - tlm.taskAckManager.setAckLevel(tlm.db.ackLevel) - tlm.taskAckManager.setReadLevel(tlm.db.ackLevel) - require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) - require.Equal(t, int64(0), tlm.taskAckManager.getReadLevel()) - - // Add all expired tasks - tasks := []*persistencespb.AllocatedTaskInfo{ - { - Data: &persistencespb.TaskInfo{ - ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), - CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), - }, - TaskId: 11, - }, - { - Data: &persistencespb.TaskInfo{ - ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), - CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), - }, - TaskId: 12, - }, - } - - require.NoError(t, tlm.taskReader.addTasksToBuffer(context.TODO(), tasks)) - require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) - require.Equal(t, int64(12), tlm.taskAckManager.getReadLevel()) - - // Now add a mix of valid and expired tasks - require.NoError(t, tlm.taskReader.addTasksToBuffer(context.TODO(), []*persistencespb.AllocatedTaskInfo{ - { - Data: &persistencespb.TaskInfo{ - ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), - CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), - }, - TaskId: 13, - }, - { - Data: &persistencespb.TaskInfo{ - ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), - CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), - }, - TaskId: 14, - }, - })) - require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) - require.Equal(t, int64(14), tlm.taskAckManager.getReadLevel()) -} - -type testIDBlockAlloc struct { - rid int64 - alloc func() (taskQueueState, error) -} - -func (a *testIDBlockAlloc) RangeID() int64 { - return a.rid -} - -func (a *testIDBlockAlloc) RenewLease(_ context.Context) (taskQueueState, error) { - s, err := a.alloc() - if err == nil { - a.rid = s.rangeID - } - return s, err -} - -func makeTestBlocAlloc(f func() (taskQueueState, error)) taskQueueManagerOpt { - return withIDBlockAllocator(&testIDBlockAlloc{alloc: f}) -} - -func TestSyncMatchLeasingUnavailable(t *testing.T) { - tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t), - makeTestBlocAlloc(func() (taskQueueState, error) { - // any error other than ConditionFailedError indicates an - // availability problem at a lower layer so the TQM should NOT - // unload itself because resilient sync match is enabled. - return taskQueueState{}, errors.New(t.Name()) - })) - tqm.Start() - defer tqm.Stop() - poller, _ := runOneShotPoller(context.Background(), tqm) - defer poller.Cancel() - - sync, err := tqm.AddTask(context.TODO(), addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY}) - require.NoError(t, err) - require.True(t, sync) -} - -func TestForeignPartitionOwnerCausesUnload(t *testing.T) { - cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) - cfg.RangeSize = 1 // TaskID block size - var leaseErr error = nil - tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t), - makeTestBlocAlloc(func() (taskQueueState, error) { - return taskQueueState{rangeID: 1}, leaseErr - })) - tqm.Start() - defer tqm.Stop() - - // TQM started succesfully with an ID block of size 1. Perform one send - // without a poller to consume the one task ID from the reserved block. - sync, err := tqm.AddTask(context.TODO(), addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY}) - require.False(t, sync) - require.NoError(t, err) - - // TQM's ID block should be empty so the next AddTask will trigger an - // attempt to obtain more IDs. This specific error type indicates that - // another service instance has become the owner of the partition - leaseErr = &persistence.ConditionFailedError{Msg: "should kill the tqm"} - - sync, err = tqm.AddTask(context.TODO(), addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY, - }) - require.NoError(t, err) - require.False(t, sync) -} - -func TestReaderSignaling(t *testing.T) { - readerNotifications := make(chan struct{}, 1) - clearNotifications := func() { - for len(readerNotifications) > 0 { - <-readerNotifications - } - } - tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t)) - - // redirect taskReader signals into our local channel - tqm.taskReader.notifyC = readerNotifications - - tqm.Start() - defer tqm.Stop() - - // shut down the taskReader so it doesn't steal notifications from us - tqm.taskReader.gorogrp.Cancel() - tqm.taskReader.gorogrp.Wait() - - clearNotifications() - - sync, err := tqm.AddTask(context.TODO(), addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY}) - require.NoError(t, err) - require.False(t, sync) - require.Len(t, readerNotifications, 1, - "Sync match failure with successful db write should signal taskReader") - - clearNotifications() - poller, _ := runOneShotPoller(context.Background(), tqm) - defer poller.Cancel() - - sync, err = tqm.AddTask(context.TODO(), addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY}) - require.NoError(t, err) - require.True(t, sync) - require.Len(t, readerNotifications, 0, - "Sync match should not signal taskReader") -} - -// runOneShotPoller spawns a goroutine to call tqm.GetTask on the provided tqm. -// The second return value is a channel of either error or *internalTask. -func runOneShotPoller(ctx context.Context, tqm taskQueueManager) (*goro.Handle, chan interface{}) { - out := make(chan interface{}, 1) - handle := goro.NewHandle(ctx).Go(func(ctx context.Context) error { - task, err := tqm.GetTask(ctx, &pollMetadata{ratePerSecond: &rpsInf}) - if task == nil { - out <- err - return nil - } - task.finish(err) - out <- task - return nil - }) - // tqm.GetTask() needs some time to attach the goro started above to the - // internal task channel. Sorry for this but it appears unavoidable. - time.Sleep(10 * time.Millisecond) - return handle, out -} - -func defaultTqId() *taskQueueID { - return newTestTaskQueueID(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW) -} - -func mustCreateTestTaskQueueManager( - t *testing.T, - controller *gomock.Controller, - opts ...taskQueueManagerOpt, -) *taskQueueManagerImpl { - t.Helper() - return mustCreateTestTaskQueueManagerWithConfig(t, controller, defaultTqmTestOpts(controller), opts...) -} - -func mustCreateTestTaskQueueManagerWithConfig( - t *testing.T, - controller *gomock.Controller, - testOpts *tqmTestOpts, - opts ...taskQueueManagerOpt, -) *taskQueueManagerImpl { - t.Helper() - tqm, err := createTestTaskQueueManagerWithConfig(controller, testOpts, opts...) - require.NoError(t, err) - return tqm -} - -func createTestTaskQueueManagerWithConfig( - controller *gomock.Controller, - testOpts *tqmTestOpts, - opts ...taskQueueManagerOpt, -) (*taskQueueManagerImpl, error) { - logger := log.NewTestLogger() - tm := newTestTaskManager(logger) - mockNamespaceCache := namespace.NewMockRegistry(controller) - mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(&namespace.Namespace{}, nil).AnyTimes() - mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(namespace.Name("ns-name"), nil).AnyTimes() - mockVisibilityManager := manager.NewMockVisibilityManager(controller) - mockVisibilityManager.EXPECT().Close().AnyTimes() - cmeta := cluster.NewMetadataForTest(cluster.NewTestClusterMetadataConfig(false, true)) - me := newMatchingEngine(testOpts.config, tm, nil, logger, mockNamespaceCache, testOpts.matchingClientMock, mockVisibilityManager) - tlMgr, err := newTaskQueueManager(me, testOpts.tqId, normalStickyInfo, testOpts.config, cmeta, opts...) - if err != nil { - return nil, err - } - me.taskQueues[*testOpts.tqId] = tlMgr - return tlMgr.(*taskQueueManagerImpl), nil -} - -func TestDescribeTaskQueue(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - startTaskID := int64(1) - taskCount := int64(3) - PollerIdentity := "test-poll" - - // Create taskQueue Manager and set taskQueue state - tlm := mustCreateTestTaskQueueManager(t, controller) - tlm.db.rangeID = int64(1) - tlm.db.ackLevel = int64(0) - tlm.taskAckManager.setAckLevel(tlm.db.ackLevel) - - for i := int64(0); i < taskCount; i++ { - tlm.taskAckManager.addTask(startTaskID + i) - } - - includeTaskStatus := false - descResp := tlm.DescribeTaskQueue(includeTaskStatus) - require.Equal(t, 0, len(descResp.GetPollers())) - require.Nil(t, descResp.GetTaskQueueStatus()) - - includeTaskStatus = true - taskQueueStatus := tlm.DescribeTaskQueue(includeTaskStatus).GetTaskQueueStatus() - require.NotNil(t, taskQueueStatus) - require.Zero(t, taskQueueStatus.GetAckLevel()) - require.Equal(t, taskCount, taskQueueStatus.GetReadLevel()) - require.Equal(t, taskCount, taskQueueStatus.GetBacklogCountHint()) - taskIDBlock := taskQueueStatus.GetTaskIdBlock() - require.Equal(t, int64(1), taskIDBlock.GetStartId()) - require.Equal(t, tlm.config.RangeSize, taskIDBlock.GetEndId()) - - // Add a poller and complete all tasks - tlm.pollerHistory.updatePollerInfo(pollerIdentity(PollerIdentity), &pollMetadata{}) - for i := int64(0); i < taskCount; i++ { - tlm.taskAckManager.completeTask(startTaskID + i) - } - - descResp = tlm.DescribeTaskQueue(includeTaskStatus) - require.Equal(t, 1, len(descResp.GetPollers())) - require.Equal(t, PollerIdentity, descResp.Pollers[0].GetIdentity()) - require.NotEmpty(t, descResp.Pollers[0].GetLastAccessTime()) - - rps := 5.0 - tlm.pollerHistory.updatePollerInfo(pollerIdentity(PollerIdentity), &pollMetadata{ratePerSecond: &rps}) - descResp = tlm.DescribeTaskQueue(includeTaskStatus) - require.Equal(t, 1, len(descResp.GetPollers())) - require.Equal(t, PollerIdentity, descResp.Pollers[0].GetIdentity()) - require.True(t, descResp.Pollers[0].GetRatePerSecond() > 4.0 && descResp.Pollers[0].GetRatePerSecond() < 6.0) - - taskQueueStatus = descResp.GetTaskQueueStatus() - require.NotNil(t, taskQueueStatus) - require.Equal(t, taskCount, taskQueueStatus.GetAckLevel()) - require.Zero(t, taskQueueStatus.GetBacklogCountHint()) -} - -func TestCheckIdleTaskQueue(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) - cfg.MaxTaskQueueIdleTime = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.config = cfg - - // Idle - tlm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tlm.Start() - time.Sleep(1 * time.Second) - require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) - - // Active poll-er - tlm = mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tlm.Start() - tlm.pollerHistory.updatePollerInfo(pollerIdentity("test-poll"), &pollMetadata{}) - require.Equal(t, 1, len(tlm.GetAllPollerInfo())) - time.Sleep(1 * time.Second) - require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) - tlm.Stop() - require.Equal(t, common.DaemonStatusStopped, atomic.LoadInt32(&tlm.status)) - - // Active adding task - tlm = mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tlm.Start() - require.Equal(t, 0, len(tlm.GetAllPollerInfo())) - tlm.taskReader.Signal() - time.Sleep(1 * time.Second) - require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) - tlm.Stop() - require.Equal(t, common.DaemonStatusStopped, atomic.LoadInt32(&tlm.status)) -} - -func TestAddTaskStandby(t *testing.T) { - controller := gomock.NewController(t) - defer controller.Finish() - - tlm := mustCreateTestTaskQueueManagerWithConfig( - t, - controller, - defaultTqmTestOpts(controller), - func(tqm *taskQueueManagerImpl) { - ns := namespace.NewGlobalNamespaceForTest( - &persistencespb.NamespaceInfo{}, - &persistencespb.NamespaceConfig{}, - &persistencespb.NamespaceReplicationConfig{ - ActiveClusterName: cluster.TestAlternativeClusterName, - }, - cluster.TestAlternativeClusterInitialFailoverVersion, - ) - - // we need to override the mockNamespaceCache to return a passive namespace - mockNamespaceCache := namespace.NewMockRegistry(controller) - mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil).AnyTimes() - mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(ns.Name(), nil).AnyTimes() - tqm.namespaceRegistry = mockNamespaceCache - }, - ) - tlm.Start() - // stop taskWriter so that we can check if there's any call to it - // otherwise the task persist process is async and hard to test - tlm.taskWriter.Stop() - <-tlm.taskWriter.writeLoop.Done() - - addTaskParam := addTaskParams{ - execution: &commonpb.WorkflowExecution{}, - taskInfo: &persistencespb.TaskInfo{}, - source: enumsspb.TASK_SOURCE_HISTORY, - } - - syncMatch, err := tlm.AddTask(context.Background(), addTaskParam) - require.Equal(t, errShutdown, err) // task writer was stopped above - require.False(t, syncMatch) - - addTaskParam.forwardedFrom = "from child partition" - syncMatch, err = tlm.AddTask(context.Background(), addTaskParam) - require.Equal(t, errRemoteSyncMatchFailed, err) // should not persist the task - require.False(t, syncMatch) -} - -func TestTQMDoesFinalUpdateOnIdleUnload(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - - cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) - cfg.MaxTaskQueueIdleTime = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(1 * time.Second) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.config = cfg - - tqm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tm := tqm.engine.taskManager.(*testTaskManager) - - tqm.Start() - time.Sleep(2 * time.Second) // will unload due to idleness - require.Equal(t, 1, tm.getUpdateCount(tqCfg.tqId)) -} - -func TestTQMDoesNotDoFinalUpdateOnOwnershipLost(t *testing.T) { - // TODO: use mocks instead of testTaskManager so we can do synchronization better instead of sleeps - t.Parallel() - - controller := gomock.NewController(t) - - cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) - cfg.UpdateAckInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.config = cfg - - tqm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tm := tqm.engine.taskManager.(*testTaskManager) - - tqm.Start() - time.Sleep(1 * time.Second) - - // simulate ownership lost - ttm := tm.getTaskQueueManager(tqCfg.tqId) - ttm.Lock() - ttm.rangeID++ - ttm.Unlock() - - time.Sleep(2 * time.Second) // will attempt to update and fail and not try again - - require.Equal(t, 1, tm.getUpdateCount(tqCfg.tqId)) -} - -func TestUserData_LoadOnInit(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - - tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - UserData: data1, - }) - data1.Version++ - - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_DontLoadWhenDisabled(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) - - require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - UserData: data1, - })) - - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.Nil(t, userData) - require.Equal(t, err, errUserDataDisabled) - tq.Stop() -} - -func TestUserData_LoadDisableEnable(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - loadUserData := make(chan bool) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataLongPollTimeout = dynamicconfig.GetDurationPropertyFn(10 * time.Millisecond) - tq.config.LoadUserData = func() bool { return <-loadUserData } - - require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - UserData: data1, - })) - data1.Version++ - - tq.Start() - - loadUserData <- true - time.Sleep(100 * time.Millisecond) - - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - - loadUserData <- false - time.Sleep(100 * time.Millisecond) - - userData, _, err = tq.GetUserData(ctx) - require.Equal(t, err, errUserDataDisabled) - require.Nil(t, userData) - - // check engine-level rpc also - _, err = tq.engine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: tqId.namespaceID.String(), - TaskQueue: tqId.FullName(), - TaskQueueType: tqId.taskType, - }) - var failedPrecondition *serviceerror.FailedPrecondition - require.True(t, errors.As(err, &failedPrecondition)) - - // updated in db without going through tqm (this shouldn't happen but lets us test that it re-reads) - require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), - &persistence.UpdateTaskQueueUserDataRequest{ - NamespaceID: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - UserData: data1, - })) - data1.Version++ - - loadUserData <- true - time.Sleep(100 * time.Millisecond) - - userData, _, err = tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - - tq.Stop() -} - -func TestUserData_LoadOnInit_OnlyOnceWhenNoData(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tm := tq.engine.taskManager.(*testTaskManager) - - require.Equal(t, 0, tm.getGetUserDataCount(tqId)) - - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - - require.Equal(t, 1, tm.getGetUserDataCount(tqId)) - - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Nil(t, userData) - - require.Equal(t, 1, tm.getGetUserDataCount(tqId)) - - userData, _, err = tq.GetUserData(ctx) - require.NoError(t, err) - require.Nil(t, userData) - - require.Equal(t, 1, tm.getGetUserDataCount(tqId)) - - tq.Stop() -} - -func TestUserData_FetchesOnInit(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, // first fetch is not long poll - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Second // only one fetch - - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_FetchesAndFetchesAgain(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - // note: using activity here - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - data2 := &persistencespb.VersionedTaskQueueUserData{ - Version: 2, - Data: mkUserData(2), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, // first is not long poll - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 1, - WaitNewData: true, // second is long poll - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data2, - }, nil) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 2, - WaitNewData: true, - }). - Return(nil, serviceerror.NewUnavailable("hold on")).AnyTimes() - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Millisecond // fetch again quickly - tq.Start() - time.Sleep(100 * time.Millisecond) - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data2, userData) - tq.Stop() -} - -func TestUserData_FetchDisableEnable(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - // note: using activity here - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - loadUserData := make(chan bool) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Millisecond // fetch again quickly - tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(10 * time.Millisecond).WithMaximumInterval(10 * time.Millisecond) - tq.config.LoadUserData = func() bool { return <-loadUserData } - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - data2 := &persistencespb.VersionedTaskQueueUserData{ - Version: 2, - Data: mkUserData(2), - } - data3 := &persistencespb.VersionedTaskQueueUserData{ - Version: 3, - Data: mkUserData(3), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, // first is not long poll - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 1, - WaitNewData: true, // second is long poll - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data2, - }, nil) - - // after enabling again: - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, // sends zero for first request after re-enabling - WaitNewData: false, - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data3, - }, nil) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 3, - WaitNewData: true, - }). - Return(nil, serviceerror.NewUnavailable("hold on")).AnyTimes() - - tq.Start() - - loadUserData <- true - loadUserData <- true - time.Sleep(100 * time.Millisecond) - - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data2, userData) - - loadUserData <- false - time.Sleep(100 * time.Millisecond) - - // should have fetched twice but now user data is disabled - userData, _, err = tq.GetUserData(ctx) - require.Nil(t, userData) - require.Equal(t, err, errUserDataDisabled) - - // enable again - loadUserData <- true - time.Sleep(100 * time.Millisecond) - - // should be available again with data3 - userData, _, err = tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data3, userData) - - tq.Stop() -} - -func TestUserData_RetriesFetchOnUnavailable(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - ch := make(chan struct{}) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { - <-ch - return nil, serviceerror.NewUnavailable("wait a sec") - }).Times(3) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { - <-ch - return &matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil - }) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success - tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). - WithMaximumInterval(50 * time.Millisecond) // faster retry on failure - - tq.Start() - - ch <- struct{}{} - ch <- struct{}{} - - // at this point it should have tried two times and gotten unavailable. it should not be ready yet. - require.False(t, tq.userDataReady.Ready()) - - ch <- struct{}{} - ch <- struct{}{} - time.Sleep(100 * time.Millisecond) // time to return - - // now it should be ready - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_RetriesFetchOnUnImplemented(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - ch := make(chan struct{}) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { - <-ch - return nil, serviceerror.NewUnimplemented("older version") - }).Times(3) - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { - <-ch - return &matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil - }) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success - tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). - WithMaximumInterval(50 * time.Millisecond) // faster retry on failure - - tq.Start() - - ch <- struct{}{} - ch <- struct{}{} - - // at this point it should have tried once and gotten unimplemented. it should be ready already. - require.NoError(t, tq.WaitUntilInitialized(ctx)) - - userData, _, err := tq.GetUserData(ctx) - require.Nil(t, userData) - require.NoError(t, err) - - ch <- struct{}{} - ch <- struct{}{} - time.Sleep(100 * time.Millisecond) // time to return - - userData, _, err = tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_FetchesUpTree(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 31) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.config.ForwarderMaxChildrenPerNode = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(3) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: tqId.Name.WithPartition(10).FullName(), - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_FetchesActivityToWorkflow(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - // note: activity root - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 0) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: defaultRootTqID, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_FetchesStickyToNormal(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - tqCfg := defaultTqmTestOpts(controller) - - normalName := "normal-queue" - stickyName := uuid.New() - - tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, stickyName, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) - require.NoError(t, err) - tqCfg.tqId = tqId - - data1 := &persistencespb.VersionedTaskQueueUserData{ - Version: 1, - Data: mkUserData(1), - } - - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( - gomock.Any(), - &matchingservice.GetTaskQueueUserDataRequest{ - NamespaceId: defaultNamespaceId.String(), - TaskQueue: normalName, - TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, - LastKnownUserDataVersion: 0, - WaitNewData: false, - }). - Return(&matchingservice.GetTaskQueueUserDataResponse{ - TaskQueueHasUserData: true, - UserData: data1, - }, nil) - - // have to create manually to get sticky - logger := log.NewTestLogger() - tm := newTestTaskManager(logger) - mockNamespaceCache := namespace.NewMockRegistry(controller) - mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(&namespace.Namespace{}, nil).AnyTimes() - mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(namespace.Name("ns-name"), nil).AnyTimes() - mockVisibilityManager := manager.NewMockVisibilityManager(controller) - mockVisibilityManager.EXPECT().Close().AnyTimes() - me := newMatchingEngine(tqCfg.config, tm, nil, logger, mockNamespaceCache, tqCfg.matchingClientMock, mockVisibilityManager) - cmeta := cluster.NewMetadataForTest(cluster.NewTestClusterMetadataConfig(false, true)) - stickyInfo := stickyInfo{ - kind: enumspb.TASK_QUEUE_KIND_STICKY, - normalName: normalName, - } - tlMgr, err := newTaskQueueManager(me, tqCfg.tqId, stickyInfo, tqCfg.config, cmeta) - require.NoError(t, err) - tq := tlMgr.(*taskQueueManagerImpl) - - tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success - tq.Start() - require.NoError(t, tq.WaitUntilInitialized(ctx)) - userData, _, err := tq.GetUserData(ctx) - require.NoError(t, err) - require.Equal(t, data1, userData) - tq.Stop() -} - -func TestUserData_UpdateOnNonRootFails(t *testing.T) { - t.Parallel() - - controller := gomock.NewController(t) - defer controller.Finish() - ctx := context.Background() - - subTqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = subTqId - subTq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - err = subTq.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { - return data, false, nil - }) - require.Error(t, err) - require.ErrorIs(t, err, errUserDataNoMutateNonRoot) - - actTqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 0) - require.NoError(t, err) - actTqCfg := defaultTqmTestOpts(controller) - actTqCfg.tqId = actTqId - actTq := mustCreateTestTaskQueueManagerWithConfig(t, controller, actTqCfg) - err = actTq.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { - return data, false, nil - }) - require.Error(t, err) - require.ErrorIs(t, err, errUserDataNoMutateNonRoot) -} - -func TestUserData_DontFetchWhenDisabled(t *testing.T) { - t.Parallel() - - ctx := context.Background() - controller := gomock.NewController(t) - defer controller.Finish() - taskQueueId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) - require.NoError(t, err) - tqCfg := defaultTqmTestOpts(controller) - tqCfg.tqId = taskQueueId - mgr := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) - tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData(gomock.Any(), gomock.Any()).Times(0) - mgr.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) - mgr.Start() - err = mgr.WaitUntilInitialized(ctx) - require.NoError(t, err) -} diff -Nru temporal-1.21.5-1/src/service/matching/taskReader.go temporal-1.22.5/src/service/matching/taskReader.go --- temporal-1.21.5-1/src/service/matching/taskReader.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskReader.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,333 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - "errors" - "sync" - "sync/atomic" - "time" - - enumsspb "go.temporal.io/server/api/enums/v1" - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/internal/goro" - "go.temporal.io/server/service/worker/scanner/taskqueue" -) - -const ( - taskReaderOfferThrottleWait = time.Second - taskReaderThrottleRetryDelay = 3 * time.Second -) - -type ( - taskReader struct { - status int32 - taskBuffer chan *persistencespb.AllocatedTaskInfo // tasks loaded from persistence - notifyC chan struct{} // Used as signal to notify pump of new tasks - tlMgr *taskQueueManagerImpl - gorogrp goro.Group - - backoffTimerLock sync.Mutex - backoffTimer *time.Timer - retrier backoff.Retrier - } -) - -func newTaskReader(tlMgr *taskQueueManagerImpl) *taskReader { - return &taskReader{ - status: common.DaemonStatusInitialized, - tlMgr: tlMgr, - notifyC: make(chan struct{}, 1), - // we always dequeue the head of the buffer and try to dispatch it to a poller - // so allocate one less than desired target buffer size - taskBuffer: make(chan *persistencespb.AllocatedTaskInfo, tlMgr.config.GetTasksBatchSize()-1), - retrier: backoff.NewRetrier( - common.CreateReadTaskRetryPolicy(), - backoff.SystemClock, - ), - } -} - -// Start reading pump for the given task queue. -// The pump fills up taskBuffer from persistence. -func (tr *taskReader) Start() { - if !atomic.CompareAndSwapInt32( - &tr.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - - tr.gorogrp.Go(tr.dispatchBufferedTasks) - tr.gorogrp.Go(tr.getTasksPump) -} - -// Stop pump that fills up taskBuffer from persistence. -func (tr *taskReader) Stop() { - if !atomic.CompareAndSwapInt32( - &tr.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - - tr.gorogrp.Cancel() -} - -func (tr *taskReader) Signal() { - var event struct{} - select { - case tr.notifyC <- event: - default: // channel already has an event, don't block - } -} - -func (tr *taskReader) dispatchBufferedTasks(ctx context.Context) error { - ctx = tr.tlMgr.callerInfoContext(ctx) - -dispatchLoop: - for ctx.Err() == nil { - select { - case taskInfo, ok := <-tr.taskBuffer: - if !ok { // Task queue getTasks pump is shutdown - break dispatchLoop - } - task := newInternalTask(taskInfo, tr.tlMgr.completeTask, enumsspb.TASK_SOURCE_DB_BACKLOG, "", false) - for ctx.Err() == nil { - // We checked if the task was expired before putting it in the buffer, but it - // might have expired while it sat in the buffer, so we should check again. - if taskqueue.IsTaskExpired(taskInfo) { - task.finish(nil) - tr.taggedMetricsHandler().Counter(metrics.ExpiredTasksPerTaskQueueCounter.GetMetricName()).Record(1) - // Don't try to set read level here because it may have been advanced already. - break - } - err := tr.tlMgr.engine.DispatchSpooledTask(ctx, task, tr.tlMgr.taskQueueID, tr.tlMgr.stickyInfo) - if err == nil { - break - } - if err == context.Canceled { - break dispatchLoop - } - // this should never happen unless there is a bug - don't drop the task - tr.taggedMetricsHandler().Counter(metrics.BufferThrottlePerTaskQueueCounter.GetMetricName()).Record(1) - if errors.Is(err, errUserDataDisabled) { - // We're trying to dispatch a versioned task but user data isn't loaded. - // Don't log here since it would be too spammy. - } else { - tr.logger().Error("taskReader: unexpected error dispatching task", tag.Error(err)) - } - common.InterruptibleSleep(ctx, taskReaderOfferThrottleWait) - } - - case <-ctx.Done(): - break dispatchLoop - } - } - tr.tlMgr.logger.Info("Taskqueue manager context is cancelled, shutting down") - return nil -} - -func (tr *taskReader) getTasksPump(ctx context.Context) error { - ctx = tr.tlMgr.callerInfoContext(ctx) - - if err := tr.tlMgr.WaitUntilInitialized(ctx); err != nil { - return err - } - - updateAckTimer := time.NewTimer(tr.tlMgr.config.UpdateAckInterval()) - defer updateAckTimer.Stop() - - tr.Signal() // prime pump -Loop: - for { - // Prioritize exiting over other processing - select { - case <-ctx.Done(): - return nil - default: - } - - select { - case <-ctx.Done(): - return nil - - case <-tr.notifyC: - tasks, readLevel, isReadBatchDone, err := tr.getTaskBatch(ctx) - tr.tlMgr.signalIfFatal(err) - if err != nil { - // TODO: Should we ever stop retrying on db errors? - if common.IsResourceExhausted(err) { - tr.backoff(taskReaderThrottleRetryDelay) - } else { - tr.backoff(tr.retrier.NextBackOff()) - } - continue Loop - } - tr.retrier.Reset() - - if len(tasks) == 0 { - tr.tlMgr.taskAckManager.setReadLevelAfterGap(readLevel) - if !isReadBatchDone { - tr.Signal() - } - continue Loop - } - - // only error here is due to context cancelation which we also - // handle above - _ = tr.addTasksToBuffer(ctx, tasks) - // There maybe more tasks. We yield now, but signal pump to check again later. - tr.Signal() - - case <-updateAckTimer.C: - err := tr.persistAckLevel(ctx) - isConditionFailed := tr.tlMgr.signalIfFatal(err) - if err != nil && !isConditionFailed { - tr.logger().Error("Persistent store operation failure", - tag.StoreOperationUpdateTaskQueue, - tag.Error(err)) - // keep going as saving ack is not critical - } - tr.Signal() // periodically signal pump to check persistence for tasks - updateAckTimer = time.NewTimer(tr.tlMgr.config.UpdateAckInterval()) - } - } -} - -func (tr *taskReader) getTaskBatchWithRange( - ctx context.Context, - readLevel int64, - maxReadLevel int64, -) ([]*persistencespb.AllocatedTaskInfo, error) { - response, err := tr.tlMgr.db.GetTasks(ctx, readLevel+1, maxReadLevel+1, tr.tlMgr.config.GetTasksBatchSize()) - if err != nil { - return nil, err - } - return response.Tasks, err -} - -// Returns a batch of tasks from persistence starting form current read level. -// Also return a number that can be used to update readLevel -// Also return a bool to indicate whether read is finished -func (tr *taskReader) getTaskBatch(ctx context.Context) ([]*persistencespb.AllocatedTaskInfo, int64, bool, error) { - var tasks []*persistencespb.AllocatedTaskInfo - readLevel := tr.tlMgr.taskAckManager.getReadLevel() - maxReadLevel := tr.tlMgr.taskWriter.GetMaxReadLevel() - - // counter i is used to break and let caller check whether taskqueue is still alive and need resume read. - for i := 0; i < 10 && readLevel < maxReadLevel; i++ { - upper := readLevel + tr.tlMgr.config.RangeSize - if upper > maxReadLevel { - upper = maxReadLevel - } - tasks, err := tr.getTaskBatchWithRange(ctx, readLevel, upper) - if err != nil { - return nil, readLevel, true, err - } - // return as long as it grabs any tasks - if len(tasks) > 0 { - return tasks, upper, true, nil - } - readLevel = upper - } - return tasks, readLevel, readLevel == maxReadLevel, nil // caller will update readLevel when no task grabbed -} - -func (tr *taskReader) addTasksToBuffer( - ctx context.Context, - tasks []*persistencespb.AllocatedTaskInfo, -) error { - for _, t := range tasks { - if taskqueue.IsTaskExpired(t) { - tr.taggedMetricsHandler().Counter(metrics.ExpiredTasksPerTaskQueueCounter.GetMetricName()).Record(1) - // Also increment readLevel for expired tasks otherwise it could result in - // looping over the same tasks if all tasks read in the batch are expired - tr.tlMgr.taskAckManager.setReadLevel(t.GetTaskId()) - continue - } - if err := tr.addSingleTaskToBuffer(ctx, t); err != nil { - return err - } - } - return nil -} - -func (tr *taskReader) addSingleTaskToBuffer( - ctx context.Context, - task *persistencespb.AllocatedTaskInfo, -) error { - tr.tlMgr.taskAckManager.addTask(task.GetTaskId()) - select { - case tr.taskBuffer <- task: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (tr *taskReader) persistAckLevel(ctx context.Context) error { - ackLevel := tr.tlMgr.taskAckManager.getAckLevel() - tr.emitTaskLagMetric(ackLevel) - return tr.tlMgr.db.UpdateState(ctx, ackLevel) -} - -func (tr *taskReader) logger() log.Logger { - return tr.tlMgr.logger -} - -func (tr *taskReader) taggedMetricsHandler() metrics.Handler { - return tr.tlMgr.metricsHandler -} - -func (tr *taskReader) emitTaskLagMetric(ackLevel int64) { - // note: this metric is only an estimation for the lag. - // taskID in DB may not be continuous, especially when task list ownership changes. - maxReadLevel := tr.tlMgr.taskWriter.GetMaxReadLevel() - tr.taggedMetricsHandler().Gauge(metrics.TaskLagPerTaskQueueGauge.GetMetricName()).Record(float64(maxReadLevel - ackLevel)) -} - -func (tr *taskReader) backoff(duration time.Duration) { - tr.backoffTimerLock.Lock() - defer tr.backoffTimerLock.Unlock() - - if tr.backoffTimer == nil { - tr.backoffTimer = time.AfterFunc(duration, func() { - tr.backoffTimerLock.Lock() - defer tr.backoffTimerLock.Unlock() - - tr.Signal() // re-enqueue the event - tr.backoffTimer = nil - }) - } -} diff -Nru temporal-1.21.5-1/src/service/matching/taskWriter.go temporal-1.22.5/src/service/matching/taskWriter.go --- temporal-1.21.5-1/src/service/matching/taskWriter.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskWriter.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,323 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package matching - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - commonpb "go.temporal.io/api/common/v1" - enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/serviceerror" - - persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" - "go.temporal.io/server/internal/goro" -) - -type ( - writeTaskResponse struct { - err error - persistenceResponse *persistence.CreateTasksResponse - } - - writeTaskRequest struct { - execution *commonpb.WorkflowExecution - taskInfo *persistencespb.TaskInfo - responseCh chan<- *writeTaskResponse - } - - taskIDBlock struct { - start int64 - end int64 - } - - // taskWriter writes tasks sequentially to persistence - taskWriter struct { - status int32 - tlMgr *taskQueueManagerImpl - config *taskQueueConfig - taskQueueID *taskQueueID - appendCh chan *writeTaskRequest - taskIDBlock taskIDBlock - maxReadLevel int64 - logger log.Logger - writeLoop *goro.Handle - idAlloc idBlockAllocator - } -) - -// errShutdown indicates that the task queue is shutting down -var errShutdown = &persistence.ConditionFailedError{Msg: "task queue shutting down"} - -var noTaskIDs = taskIDBlock{start: 1, end: 0} - -func newTaskWriter( - tlMgr *taskQueueManagerImpl, -) *taskWriter { - return &taskWriter{ - status: common.DaemonStatusInitialized, - tlMgr: tlMgr, - config: tlMgr.config, - taskQueueID: tlMgr.taskQueueID, - appendCh: make(chan *writeTaskRequest, tlMgr.config.OutstandingTaskAppendsThreshold()), - taskIDBlock: noTaskIDs, - maxReadLevel: noTaskIDs.start - 1, - logger: tlMgr.logger, - idAlloc: tlMgr.db, - } -} - -func (w *taskWriter) Start() { - if !atomic.CompareAndSwapInt32( - &w.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - - w.writeLoop = goro.NewHandle(w.tlMgr.callerInfoContext(context.Background())) - w.writeLoop.Go(w.taskWriterLoop) -} - -// Stop stops the taskWriter -func (w *taskWriter) Stop() { - if !atomic.CompareAndSwapInt32( - &w.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - w.writeLoop.Cancel() -} - -func (w *taskWriter) initReadWriteState(ctx context.Context) error { - retryForever := backoff.NewExponentialRetryPolicy(1 * time.Second). - WithMaximumInterval(10 * time.Second). - WithExpirationInterval(backoff.NoInterval) - - state, err := w.renewLeaseWithRetry( - ctx, retryForever, common.IsPersistenceTransientError) - if err != nil { - return err - } - w.taskIDBlock = rangeIDToTaskIDBlock(state.rangeID, w.config.RangeSize) - atomic.StoreInt64(&w.maxReadLevel, w.taskIDBlock.start-1) - w.tlMgr.taskAckManager.setAckLevel(state.ackLevel) - return nil -} - -func (w *taskWriter) appendTask( - execution *commonpb.WorkflowExecution, - taskInfo *persistencespb.TaskInfo, -) (*persistence.CreateTasksResponse, error) { - - select { - case <-w.writeLoop.Done(): - return nil, errShutdown - default: - // noop - } - - startTime := time.Now().UTC() - ch := make(chan *writeTaskResponse) - req := &writeTaskRequest{ - execution: execution, - taskInfo: taskInfo, - responseCh: ch, - } - - select { - case w.appendCh <- req: - select { - case r := <-ch: - w.tlMgr.metricsHandler.Timer(metrics.TaskWriteLatencyPerTaskQueue.GetMetricName()).Record(time.Since(startTime)) - return r.persistenceResponse, r.err - case <-w.writeLoop.Done(): - // if we are shutting down, this request will never make - // it to cassandra, just bail out and fail this request - return nil, errShutdown - } - default: // channel is full, throttle - w.tlMgr.metricsHandler.Counter(metrics.TaskWriteThrottlePerTaskQueueCounter.GetMetricName()).Record(1) - return nil, serviceerror.NewResourceExhausted( - enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, - "Too many outstanding appends to the task queue") - } -} - -func (w *taskWriter) GetMaxReadLevel() int64 { - return atomic.LoadInt64(&w.maxReadLevel) -} - -func (w *taskWriter) allocTaskIDs(ctx context.Context, count int) ([]int64, error) { - result := make([]int64, count) - for i := 0; i < count; i++ { - if w.taskIDBlock.start > w.taskIDBlock.end { - // we ran out of current allocation block - newBlock, err := w.allocTaskIDBlock(ctx, w.taskIDBlock.end) - if err != nil { - return nil, err - } - w.taskIDBlock = newBlock - } - result[i] = w.taskIDBlock.start - w.taskIDBlock.start++ - } - return result, nil -} - -func (w *taskWriter) appendTasks( - ctx context.Context, - tasks []*persistencespb.AllocatedTaskInfo, -) (*persistence.CreateTasksResponse, error) { - - resp, err := w.tlMgr.db.CreateTasks(ctx, tasks) - if err != nil { - w.tlMgr.signalIfFatal(err) - w.logger.Error("Persistent store operation failure", - tag.StoreOperationCreateTask, - tag.Error(err), - tag.WorkflowTaskQueueName(w.taskQueueID.FullName()), - tag.WorkflowTaskQueueType(w.taskQueueID.taskType)) - return nil, err - } - return resp, nil -} - -func (w *taskWriter) taskWriterLoop(ctx context.Context) error { - err := w.initReadWriteState(ctx) - w.tlMgr.SetInitializedError(err) - -writerLoop: - for { - select { - case request := <-w.appendCh: - // read a batch of requests from the channel - reqs := []*writeTaskRequest{request} - reqs = w.getWriteBatch(reqs) - batchSize := len(reqs) - - maxReadLevel := int64(0) - - taskIDs, err := w.allocTaskIDs(ctx, batchSize) - if err != nil { - w.sendWriteResponse(reqs, nil, err) - continue writerLoop - } - - var tasks []*persistencespb.AllocatedTaskInfo - for i, req := range reqs { - tasks = append(tasks, &persistencespb.AllocatedTaskInfo{ - TaskId: taskIDs[i], - Data: req.taskInfo, - }) - maxReadLevel = taskIDs[i] - } - - resp, err := w.appendTasks(ctx, tasks) - w.sendWriteResponse(reqs, resp, err) - // Update the maxReadLevel after the writes are completed. - if maxReadLevel > 0 { - atomic.StoreInt64(&w.maxReadLevel, maxReadLevel) - } - - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (w *taskWriter) getWriteBatch(reqs []*writeTaskRequest) []*writeTaskRequest { -readLoop: - for i := 0; i < w.config.MaxTaskBatchSize(); i++ { - select { - case req := <-w.appendCh: - reqs = append(reqs, req) - default: // channel is empty, don't block - break readLoop - } - } - return reqs -} - -func (w *taskWriter) sendWriteResponse( - reqs []*writeTaskRequest, - persistenceResponse *persistence.CreateTasksResponse, - err error, -) { - for _, req := range reqs { - resp := &writeTaskResponse{ - err: err, - persistenceResponse: persistenceResponse, - } - - req.responseCh <- resp - } -} - -func (w *taskWriter) renewLeaseWithRetry( - ctx context.Context, - retryPolicy backoff.RetryPolicy, - retryErrors backoff.IsRetryable, -) (taskQueueState, error) { - var newState taskQueueState - op := func(context.Context) (err error) { - newState, err = w.idAlloc.RenewLease(ctx) - return - } - w.tlMgr.metricsHandler.Counter(metrics.LeaseRequestPerTaskQueueCounter.GetMetricName()).Record(1) - err := backoff.ThrottleRetryContext(ctx, op, retryPolicy, retryErrors) - if err != nil { - w.tlMgr.metricsHandler.Counter(metrics.LeaseFailurePerTaskQueueCounter.GetMetricName()).Record(1) - return newState, err - } - return newState, nil -} - -func (w *taskWriter) allocTaskIDBlock(ctx context.Context, prevBlockEnd int64) (taskIDBlock, error) { - currBlock := rangeIDToTaskIDBlock(w.idAlloc.RangeID(), w.config.RangeSize) - if currBlock.end != prevBlockEnd { - return taskIDBlock{}, - fmt.Errorf("allocTaskIDBlock: invalid state: prevBlockEnd:%v != currTaskIDBlock:%+v", prevBlockEnd, currBlock) - } - state, err := w.renewLeaseWithRetry(ctx, persistenceOperationRetryPolicy, common.IsPersistenceTransientError) - if err != nil { - if w.tlMgr.signalIfFatal(err) { - return taskIDBlock{}, errShutdown - } - return taskIDBlock{}, err - } - return rangeIDToTaskIDBlock(state.rangeID, w.config.RangeSize), nil -} diff -Nru temporal-1.21.5-1/src/service/matching/task_gc.go temporal-1.22.5/src/service/matching/task_gc.go --- temporal-1.21.5-1/src/service/matching/task_gc.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_gc.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,110 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "sync/atomic" + "time" + + "go.temporal.io/server/common/persistence" +) + +type taskGC struct { + lock int64 + db *taskQueueDB + ackLevel int64 + lastDeleteTime time.Time + config *taskQueueConfig +} + +var maxTimeBetweenTaskDeletes = time.Second + +// newTaskGC returns an instance of a task garbage collector object +// taskGC internally maintains a delete cursor and attempts to delete +// a batch of tasks everytime Run() method is called. +// +// In order for the taskGC to actually delete tasks when Run() is called, one of +// two conditions must be met +// - Size Threshold: More than MaxDeleteBatchSize tasks are waiting to be deleted (rough estimation) +// - Time Threshold: Time since previous delete was attempted exceeds maxTimeBetweenTaskDeletes +// +// Finally, the Run() method is safe to be called from multiple threads. The underlying +// implementation will make sure only one caller executes Run() and others simply bail out +func newTaskGC(db *taskQueueDB, config *taskQueueConfig) *taskGC { + return &taskGC{db: db, config: config} +} + +// Run deletes a batch of completed tasks, if it's possible to do so +// Only attempts deletion if size or time thresholds are met +func (tgc *taskGC) Run(ctx context.Context, ackLevel int64) { + tgc.tryDeleteNextBatch(ctx, ackLevel, false) +} + +// RunNow deletes a batch of completed tasks if it's possible to do so +// This method attempts deletions without waiting for size/time threshold to be met +func (tgc *taskGC) RunNow(ctx context.Context, ackLevel int64) { + tgc.tryDeleteNextBatch(ctx, ackLevel, true) +} + +func (tgc *taskGC) tryDeleteNextBatch(ctx context.Context, ackLevel int64, ignoreTimeCond bool) { + if !tgc.tryLock() { + return + } + defer tgc.unlock() + batchSize := tgc.config.MaxTaskDeleteBatchSize() + if !tgc.checkPrecond(ackLevel, batchSize, ignoreTimeCond) { + return + } + tgc.lastDeleteTime = time.Now().UTC() + n, err := tgc.db.CompleteTasksLessThan(ctx, ackLevel+1, batchSize) + if err != nil { + return + } + // implementation behavior for CompleteTasksLessThan: + // - unit test, cassandra: always return UnknownNumRowsAffected (in this case means "all") + // - sql: return number of rows affected (should be <= batchSize) + // if we get UnknownNumRowsAffected or a smaller number than our limit, we know we got + // everything <= ackLevel, so we can reset ours. if not, we may have to try again. + if n == persistence.UnknownNumRowsAffected || n < batchSize { + tgc.ackLevel = ackLevel + } +} + +func (tgc *taskGC) checkPrecond(ackLevel int64, batchSize int, ignoreTimeCond bool) bool { + backlog := ackLevel - tgc.ackLevel + if backlog >= int64(batchSize) { + return true + } + return backlog > 0 && (ignoreTimeCond || time.Now().UTC().Sub(tgc.lastDeleteTime) > maxTimeBetweenTaskDeletes) +} + +func (tgc *taskGC) tryLock() bool { + return atomic.CompareAndSwapInt64(&tgc.lock, 0, 1) +} + +func (tgc *taskGC) unlock() { + atomic.StoreInt64(&tgc.lock, 0) +} diff -Nru temporal-1.21.5-1/src/service/matching/task_queue_manager.go temporal-1.22.5/src/service/matching/task_queue_manager.go --- temporal-1.21.5-1/src/service/matching/task_queue_manager.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_queue_manager.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1078 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "go.temporal.io/server/common/clock" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/matchingservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/future" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/tqname" + "go.temporal.io/server/common/util" + "go.temporal.io/server/internal/goro" +) + +const ( + // Time budget for empty task to propagate through the function stack and be returned to + // pollForActivityTask or pollForWorkflowTask handler. + returnEmptyTaskTimeBudget = time.Second + + // Fake Task ID to wrap a task for syncmatch + syncMatchTaskId = -137 + + ioTimeout = 5 * time.Second * debug.TimeoutMultiplier + + // Threshold for counting a AddTask call as a no recent poller call + noPollerThreshold = time.Minute * 2 +) + +var ( + // this retry policy is currently only used for matching persistence operations + // that, if failed, the entire task queue needs to be reloaded + persistenceOperationRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). + WithMaximumInterval(1 * time.Second). + WithExpirationInterval(30 * time.Second) +) + +type ( + taskQueueManagerOpt func(*taskQueueManagerImpl) + + idBlockAllocator interface { + RenewLease(context.Context) (taskQueueState, error) + RangeID() int64 + } + + addTaskParams struct { + execution *commonpb.WorkflowExecution + taskInfo *persistencespb.TaskInfo + source enumsspb.TaskSource + forwardedFrom string + baseTqm taskQueueManager + } + + stickyInfo struct { + kind enumspb.TaskQueueKind // sticky taskQueue has different process in persistence + normalName string // if kind is sticky, name of normal queue + } + + UserDataUpdateOptions struct { + TaskQueueLimitPerBuildId int + // Only perform the update if current version equals to supplied version. + // 0 is unset. + KnownVersion int64 + } + // UserDataUpdateFunc accepts the current user data for a task queue and returns the updated user data, a boolean + // indicating whether this data should be replicated, and an error. + // Extra care should be taken to avoid mutating the current user data to avoid keeping uncommitted data in memory. + UserDataUpdateFunc func(*persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) + + taskQueueManager interface { + Start() + Stop() + WaitUntilInitialized(context.Context) error + // AddTask adds a task to the task queue. This method will first attempt a synchronous + // match with a poller. When that fails, task will be written to database and later + // asynchronously matched with a poller + AddTask(ctx context.Context, params addTaskParams) (syncMatch bool, err error) + // GetTask blocks waiting for a task Returns error when context deadline is exceeded + // maxDispatchPerSecond is the max rate at which tasks are allowed to be dispatched + // from this task queue to pollers + GetTask(ctx context.Context, pollMetadata *pollMetadata) (*internalTask, error) + // MarkAlive updates the liveness timer to keep this taskQueueManager alive. + MarkAlive() + // SpoolTask spools a task to persistence to be matched asynchronously when a poller is available. + SpoolTask(params addTaskParams) error + // DispatchSpooledTask dispatches a task to a poller. When there are no pollers to pick + // up the task, this method will return error. Task will not be persisted to db + DispatchSpooledTask(ctx context.Context, task *internalTask, userDataChanged chan struct{}) error + // DispatchQueryTask will dispatch query to local or remote poller. If forwarded then result or error is returned, + // if dispatched to local poller then nil and nil is returned. + DispatchQueryTask(ctx context.Context, taskID string, request *matchingservice.QueryWorkflowRequest) (*matchingservice.QueryWorkflowResponse, error) + // GetUserData returns the versioned user data for this task queue + GetUserData() (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) + // UpdateUserData updates user data for this task queue and replicates across clusters if necessary. + // Extra care should be taken to avoid mutating the existing data in the update function. + UpdateUserData(ctx context.Context, options UserDataUpdateOptions, updateFn UserDataUpdateFunc) error + UpdatePollerInfo(pollerIdentity, *pollMetadata) + GetAllPollerInfo() []*taskqueuepb.PollerInfo + HasPollerAfter(accessTime time.Time) bool + // DescribeTaskQueue returns information about the target task queue + DescribeTaskQueue(includeTaskQueueStatus bool) *matchingservice.DescribeTaskQueueResponse + String() string + QueueID() *taskQueueID + TaskQueueKind() enumspb.TaskQueueKind + LongPollExpirationInterval() time.Duration + RedirectToVersionedQueueForAdd(context.Context, *taskqueuespb.TaskVersionDirective) (taskQueueManager, chan struct{}, error) + RedirectToVersionedQueueForPoll(context.Context, *commonpb.WorkerVersionCapabilities) (taskQueueManager, error) + } + + // Single task queue in memory state + taskQueueManagerImpl struct { + status int32 + engine *matchingEngineImpl + taskQueueID *taskQueueID + stickyInfo + config *taskQueueConfig + db *taskQueueDB + taskWriter *taskWriter + taskReader *taskReader // reads tasks from db and async matches it with poller + liveness *liveness + taskGC *taskGC + taskAckManager ackManager // tracks ackLevel for delivered messages + matcher *TaskMatcher // for matching a task producer with a poller + namespaceRegistry namespace.Registry + logger log.Logger + throttledLogger log.ThrottledLogger + matchingClient matchingservice.MatchingServiceClient + metricsHandler metrics.Handler + clusterMeta cluster.Metadata + namespace namespace.Name + taggedMetricsHandler metrics.Handler // namespace/taskqueue tagged metric scope + // pollerHistory stores poller which poll from this taskqueue in last few minutes + pollerHistory *pollerHistory + currentPolls atomic.Int64 + goroGroup goro.Group + initializedError *future.FutureImpl[struct{}] + // userDataReady is fulfilled once versioning data is fetched from the root partition. If this TQ is + // the root partition, it is fulfilled as soon as it is fetched from db. + userDataReady *future.FutureImpl[struct{}] + // skipFinalUpdate controls behavior on Stop: if it's false, we try to write one final + // update before unloading + skipFinalUpdate atomic.Bool + } +) + +var _ taskQueueManager = (*taskQueueManagerImpl)(nil) + +var ( + errRemoteSyncMatchFailed = serviceerror.NewCanceled("remote sync match failed") + errMissingNormalQueueName = errors.New("missing normal queue name") + + normalStickyInfo = stickyInfo{kind: enumspb.TASK_QUEUE_KIND_NORMAL} +) + +func withIDBlockAllocator(ibl idBlockAllocator) taskQueueManagerOpt { + return func(tqm *taskQueueManagerImpl) { + tqm.taskWriter.idAlloc = ibl + } +} + +func stickyInfoFromTaskQueue(tq *taskqueuepb.TaskQueue) stickyInfo { + return stickyInfo{ + kind: tq.GetKind(), + normalName: tq.GetNormalName(), + } +} + +func newTaskQueueManager( + e *matchingEngineImpl, + taskQueue *taskQueueID, + stickyInfo stickyInfo, + config *Config, + opts ...taskQueueManagerOpt, +) (taskQueueManager, error) { + namespaceEntry, err := e.namespaceRegistry.GetNamespaceByID(taskQueue.namespaceID) + if err != nil { + return nil, err + } + nsName := namespaceEntry.Name() + + taskQueueConfig := newTaskQueueConfig(taskQueue, config, nsName) + + db := newTaskQueueDB(e.taskManager, e.matchingRawClient, taskQueue.namespaceID, taskQueue, stickyInfo.kind, e.logger) + logger := log.With(e.logger, + tag.WorkflowTaskQueueName(taskQueue.FullName()), + tag.WorkflowTaskQueueType(taskQueue.taskType), + tag.WorkflowNamespace(nsName.String())) + throttledLogger := log.With(e.throttledLogger, + tag.WorkflowTaskQueueName(taskQueue.FullName()), + tag.WorkflowTaskQueueType(taskQueue.taskType), + tag.WorkflowNamespace(nsName.String())) + taggedMetricsHandler := metrics.GetPerTaskQueueScope( + e.metricsHandler.WithTags(metrics.OperationTag(metrics.MatchingTaskQueueMgrScope), metrics.TaskQueueTypeTag(taskQueue.taskType)), + nsName.String(), + taskQueue.FullName(), + stickyInfo.kind, + ) + tlMgr := &taskQueueManagerImpl{ + status: common.DaemonStatusInitialized, + engine: e, + namespaceRegistry: e.namespaceRegistry, + matchingClient: e.matchingRawClient, + metricsHandler: e.metricsHandler, + clusterMeta: e.clusterMeta, + taskQueueID: taskQueue, + stickyInfo: stickyInfo, + logger: logger, + throttledLogger: throttledLogger, + db: db, + taskAckManager: newAckManager(e.logger), + taskGC: newTaskGC(db, taskQueueConfig), + config: taskQueueConfig, + namespace: nsName, + taggedMetricsHandler: taggedMetricsHandler, + initializedError: future.NewFuture[struct{}](), + userDataReady: future.NewFuture[struct{}](), + } + // poller history is only kept for the base task queue manager + if !tlMgr.managesSpecificVersionSet() { + tlMgr.pollerHistory = newPollerHistory() + } + + tlMgr.liveness = newLiveness( + clock.NewRealTimeSource(), + taskQueueConfig.MaxTaskQueueIdleTime, + tlMgr.unloadFromEngine, + ) + tlMgr.taskWriter = newTaskWriter(tlMgr) + tlMgr.taskReader = newTaskReader(tlMgr) + + var fwdr *Forwarder + if tlMgr.isFowardingAllowed(taskQueue, stickyInfo.kind) { + // Forward without version set, the target will resolve the correct version set from + // the build id itself. TODO: check if we still need this here after tqm refactoring + forwardTaskQueue := newTaskQueueIDWithVersionSet(taskQueue, "") + fwdr = newForwarder(&taskQueueConfig.forwarderConfig, forwardTaskQueue, stickyInfo.kind, e.matchingRawClient) + } + tlMgr.matcher = newTaskMatcher(taskQueueConfig, fwdr, tlMgr.taggedMetricsHandler) + for _, opt := range opts { + opt(tlMgr) + } + return tlMgr, nil +} + +// unloadFromEngine asks the MatchingEngine to unload this task queue. It will cause Stop to be called. +func (c *taskQueueManagerImpl) unloadFromEngine() { + c.engine.unloadTaskQueue(c) +} + +// signalIfFatal calls unloadFromEngine on this taskQueueManagerImpl instance +// if and only if the supplied error represents a fatal condition, e.g. the +// existence of another taskQueueManager newer lease. Returns true if the signal +// is emitted, false otherwise. +func (c *taskQueueManagerImpl) signalIfFatal(err error) bool { + if err == nil { + return false + } + var condfail *persistence.ConditionFailedError + if errors.As(err, &condfail) { + c.taggedMetricsHandler.Counter(metrics.ConditionFailedErrorPerTaskQueueCounter.GetMetricName()).Record(1) + c.skipFinalUpdate.Store(true) + c.unloadFromEngine() + return true + } + return false +} + +func (c *taskQueueManagerImpl) Start() { + if !atomic.CompareAndSwapInt32( + &c.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + return + } + c.liveness.Start() + c.taskWriter.Start() + c.taskReader.Start() + if c.db.DbStoresUserData() { + c.goroGroup.Go(c.loadUserData) + } else { + c.goroGroup.Go(c.fetchUserData) + } + c.logger.Info("", tag.LifeCycleStarted) + c.taggedMetricsHandler.Counter(metrics.TaskQueueStartedCounter.GetMetricName()).Record(1) +} + +func (c *taskQueueManagerImpl) Stop() { + if !atomic.CompareAndSwapInt32( + &c.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + return + } + // Maybe try to write one final update of ack level and GC some tasks. + // Skip the update if we never initialized (ackLevel will be -1 in that case). + // Also skip if we're stopping due to lost ownership (the update will fail in that case). + // Ignore any errors. + // Note that it's fine to GC even if the update ack level fails because we did match the + // tasks, the next owner will just read over an empty range. + ackLevel := c.taskAckManager.getAckLevel() + if ackLevel >= 0 && !c.skipFinalUpdate.Load() { + ctx, cancel := c.newIOContext() + defer cancel() + + _ = c.db.UpdateState(ctx, ackLevel) + c.taskGC.RunNow(ctx, ackLevel) + } + c.liveness.Stop() + c.taskWriter.Stop() + c.taskReader.Stop() + c.goroGroup.Cancel() + // Set user data state on stop to wake up anyone blocked on the user data changed channel. + c.db.setUserDataState(userDataClosed) + c.logger.Info("", tag.LifeCycleStopped) + c.taggedMetricsHandler.Counter(metrics.TaskQueueStoppedCounter.GetMetricName()).Record(1) + // This may call Stop again, but the status check above makes that a no-op. + c.unloadFromEngine() +} + +// managesSpecificVersionSet returns true if this is a tqm for a specific version set in the build-id-based versioning +// feature. Note that this is a different concept from the overall task queue having versioning data associated with it, +// which is the usual meaning of "versioned task queue". These task queues are not interacted with directly outside of +// a single matching node. +func (c *taskQueueManagerImpl) managesSpecificVersionSet() bool { + return c.taskQueueID.VersionSet() != "" +} + +func (c *taskQueueManagerImpl) SetInitializedError(err error) { + c.initializedError.Set(struct{}{}, err) + if err != nil { + // We can't recover from here without starting over, so unload the whole task queue. + // Skip final update since we never initialized. + c.skipFinalUpdate.Store(true) + c.unloadFromEngine() + } +} + +// Sets user data enabled/disabled and marks the future ready (if it's not ready yet). +// userDataState controls whether GetUserData return an error, and which. +// futureError is the error to set on the ready future. If this is non-nil, the task queue will +// be unloaded. +// Note that this must only be called from a single goroutine since the Ready/Set sequence is +// potentially racy otherwise. +func (c *taskQueueManagerImpl) SetUserDataState(userDataState userDataState, futureError error) { + // Always set state enabled/disabled even if we're not setting the future since we only set + // the future once but the enabled/disabled state may change over time. + c.db.setUserDataState(userDataState) + + if !c.userDataReady.Ready() { + c.userDataReady.Set(struct{}{}, futureError) + if futureError != nil { + // We can't recover from here without starting over, so unload the whole task queue. + // Skip final update since we never initialized. + c.skipFinalUpdate.Store(true) + c.unloadFromEngine() + } + } +} + +func (c *taskQueueManagerImpl) WaitUntilInitialized(ctx context.Context) error { + _, err := c.initializedError.Get(ctx) + if err != nil { + return err + } + _, err = c.userDataReady.Get(ctx) + return err +} + +// AddTask adds a task to the task queue. This method will first attempt a synchronous +// match with a poller. When there are no pollers or if ratelimit is exceeded, task will +// be written to database and later asynchronously matched with a poller +func (c *taskQueueManagerImpl) AddTask( + ctx context.Context, + params addTaskParams, +) (bool, error) { + if params.forwardedFrom == "" { + // request sent by history service + c.liveness.markAlive() + } + + // TODO: make this work for versioned queues too + if c.QueueID().IsRoot() && c.QueueID().VersionSet() == "" && !c.HasPollerAfter(time.Now().Add(-noPollerThreshold)) { + // Only checks recent pollers in the root partition + c.taggedMetricsHandler.Counter(metrics.NoRecentPollerTasksPerTaskQueueCounter.GetMetricName()).Record(1) + } + + taskInfo := params.taskInfo + + namespaceEntry, err := c.namespaceRegistry.GetNamespaceByID(namespace.ID(taskInfo.GetNamespaceId())) + if err != nil { + return false, err + } + + // If this is the versioned task dlq, skip sync match since we know we have no pollers. + isDlq := c.taskQueueID.VersionSet() == dlqVersionSet + if namespaceEntry.ActiveInCluster(c.clusterMeta.GetCurrentClusterName()) && !isDlq { + syncMatch, err := c.trySyncMatch(ctx, params) + if syncMatch { + return syncMatch, err + } + } + + if params.forwardedFrom != "" { + // forwarded from child partition - only do sync match + // child partition will persist the task when sync match fails + return false, errRemoteSyncMatchFailed + } + + // Ensure that tasks with the "default" versioning directive get spooled in the unversioned queue as they are not + // associated with any version set until their execution is touched by a version specific worker. + // "compatible" tasks OTOH are associated with a specific version set and should be stored along with all tasks for + // that version set. + // The task queue default set is dynamic and applies only at dispatch time. Putting "default" tasks into version set + // specific queues could cause them to get stuck behind "compatible" tasks when they should be able to progress + // independently. + if taskInfo.VersionDirective.GetUseDefault() != nil { + err = params.baseTqm.SpoolTask(params) + } else { + err = c.SpoolTask(params) + } + return false, err +} + +func (c *taskQueueManagerImpl) SpoolTask(params addTaskParams) error { + _, err := c.taskWriter.appendTask(params.execution, params.taskInfo) + c.signalIfFatal(err) + if err == nil { + c.taskReader.Signal() + } + return err +} + +// GetTask blocks waiting for a task. +// Returns error when context deadline is exceeded +// maxDispatchPerSecond is the max rate at which tasks are allowed +// to be dispatched from this task queue to pollers +func (c *taskQueueManagerImpl) GetTask( + ctx context.Context, + pollMetadata *pollMetadata, +) (*internalTask, error) { + c.liveness.markAlive() + + c.currentPolls.Add(1) + defer c.currentPolls.Add(-1) + + namespaceEntry, err := c.namespaceRegistry.GetNamespaceByID(c.taskQueueID.namespaceID) + if err != nil { + return nil, err + } + + // the desired global rate limit for the task queue comes from the + // poller, which lives inside the client side worker. There is + // one rateLimiter for this entire task queue and as we get polls, + // we update the ratelimiter rps if it has changed from the last + // value. Last poller wins if different pollers provide different values + c.matcher.UpdateRatelimit(pollMetadata.ratePerSecond) + + if !namespaceEntry.ActiveInCluster(c.clusterMeta.GetCurrentClusterName()) { + return c.matcher.PollForQuery(ctx, pollMetadata) + } + + task, err := c.matcher.Poll(ctx, pollMetadata) + if err != nil { + return nil, err + } + + task.namespace = c.namespace + task.backlogCountHint = c.taskAckManager.getBacklogCountHint + return task, nil +} + +func (c *taskQueueManagerImpl) MarkAlive() { + c.liveness.markAlive() +} + +// DispatchSpooledTask dispatches a task to a poller. When there are no pollers to pick +// up the task or if rate limit is exceeded, this method will return error. Task +// *will not* be persisted to db +func (c *taskQueueManagerImpl) DispatchSpooledTask( + ctx context.Context, + task *internalTask, + userDataChanged chan struct{}, +) error { + return c.matcher.MustOffer(ctx, task, userDataChanged) +} + +// DispatchQueryTask will dispatch query to local or remote poller. If forwarded then result or error is returned, +// if dispatched to local poller then nil and nil is returned. +func (c *taskQueueManagerImpl) DispatchQueryTask( + ctx context.Context, + taskID string, + request *matchingservice.QueryWorkflowRequest, +) (*matchingservice.QueryWorkflowResponse, error) { + task := newInternalQueryTask(taskID, request) + return c.matcher.OfferQuery(ctx, task) +} + +// GetUserData returns the user data for the task queue if any. +// Note: can return nil value with no error. +func (c *taskQueueManagerImpl) GetUserData() (*persistencespb.VersionedTaskQueueUserData, chan struct{}, error) { + return c.db.GetUserData() +} + +// UpdateUserData updates user data for this task queue and replicates across clusters if necessary. +func (c *taskQueueManagerImpl) UpdateUserData(ctx context.Context, options UserDataUpdateOptions, updateFn UserDataUpdateFunc) error { + newData, shouldReplicate, err := c.db.UpdateUserData(ctx, updateFn, options.KnownVersion, options.TaskQueueLimitPerBuildId) + if err != nil { + return err + } + if !shouldReplicate { + return nil + } + + // Only replicate if namespace is global and has at least 2 clusters registered. + ns, err := c.namespaceRegistry.GetNamespaceByID(c.db.namespaceID) + if err != nil { + return err + } + if ns.ReplicationPolicy() != namespace.ReplicationPolicyMultiCluster { + return nil + } + + _, err = c.matchingClient.ReplicateTaskQueueUserData(ctx, &matchingservice.ReplicateTaskQueueUserDataRequest{ + NamespaceId: c.db.namespaceID.String(), + TaskQueue: c.taskQueueID.BaseNameString(), + UserData: newData.GetData(), + }) + if err != nil { + c.logger.Error("Failed to publish a replication task after updating task queue user data", tag.Error(err)) + return serviceerror.NewUnavailable("storing task queue user data succeeded but publishing to the namespace replication queue failed, please try again") + } + return err +} + +func (c *taskQueueManagerImpl) UpdatePollerInfo(id pollerIdentity, pollMetadata *pollMetadata) { + if c.pollerHistory != nil { + c.pollerHistory.updatePollerInfo(id, pollMetadata) + } +} + +// GetAllPollerInfo returns all pollers that polled from this taskqueue in last few minutes +func (c *taskQueueManagerImpl) GetAllPollerInfo() []*taskqueuepb.PollerInfo { + if c.pollerHistory == nil { + return nil + } + return c.pollerHistory.getPollerInfo(time.Time{}) +} + +func (c *taskQueueManagerImpl) HasPollerAfter(accessTime time.Time) bool { + if c.currentPolls.Load() > 0 { + return true + } + if c.pollerHistory == nil { + return false + } + recentPollers := c.pollerHistory.getPollerInfo(accessTime) + return len(recentPollers) > 0 +} + +// DescribeTaskQueue returns information about the target taskqueue, right now this API returns the +// pollers which polled this taskqueue in last few minutes and status of taskqueue's ackManager +// (readLevel, ackLevel, backlogCountHint and taskIDBlock). +func (c *taskQueueManagerImpl) DescribeTaskQueue(includeTaskQueueStatus bool) *matchingservice.DescribeTaskQueueResponse { + response := &matchingservice.DescribeTaskQueueResponse{Pollers: c.GetAllPollerInfo()} + if !includeTaskQueueStatus { + return response + } + + taskIDBlock := rangeIDToTaskIDBlock(c.db.RangeID(), c.config.RangeSize) + response.TaskQueueStatus = &taskqueuepb.TaskQueueStatus{ + ReadLevel: c.taskAckManager.getReadLevel(), + AckLevel: c.taskAckManager.getAckLevel(), + BacklogCountHint: c.taskAckManager.getBacklogCountHint(), + RatePerSecond: c.matcher.Rate(), + TaskIdBlock: &taskqueuepb.TaskIdBlock{ + StartId: taskIDBlock.start, + EndId: taskIDBlock.end, + }, + } + + return response +} + +func (c *taskQueueManagerImpl) String() string { + buf := new(bytes.Buffer) + if c.taskQueueID.taskType == enumspb.TASK_QUEUE_TYPE_ACTIVITY { + buf.WriteString("Activity") + } else { + buf.WriteString("Workflow") + } + rangeID := c.db.RangeID() + _, _ = fmt.Fprintf(buf, " task queue %v\n", c.taskQueueID.FullName()) + _, _ = fmt.Fprintf(buf, "RangeID=%v\n", rangeID) + _, _ = fmt.Fprintf(buf, "TaskIDBlock=%+v\n", rangeIDToTaskIDBlock(rangeID, c.config.RangeSize)) + _, _ = fmt.Fprintf(buf, "AckLevel=%v\n", c.taskAckManager.ackLevel) + _, _ = fmt.Fprintf(buf, "MaxTaskID=%v\n", c.taskAckManager.getReadLevel()) + + return buf.String() +} + +// completeTask marks a task as processed. Only tasks created by taskReader (i.e. backlog from db) reach +// here. As part of completion: +// - task is deleted from the database when err is nil +// - new task is created and current task is deleted when err is not nil +func (c *taskQueueManagerImpl) completeTask(task *persistencespb.AllocatedTaskInfo, err error) { + if err != nil { + // failed to start the task. + // We cannot just remove it from persistence because then it will be lost. + // We handle this by writing the task back to persistence with a higher taskID. + // This will allow subsequent tasks to make progress, and hopefully by the time this task is picked-up + // again the underlying reason for failing to start will be resolved. + // Note that RecordTaskStarted only fails after retrying for a long time, so a single task will not be + // re-written to persistence frequently. + err = executeWithRetry(context.Background(), func(_ context.Context) error { + wf := &commonpb.WorkflowExecution{WorkflowId: task.Data.GetWorkflowId(), RunId: task.Data.GetRunId()} + _, err := c.taskWriter.appendTask(wf, task.Data) + return err + }) + + if err != nil { + // OK, we also failed to write to persistence. + // This should only happen in very extreme cases where persistence is completely down. + // We still can't lose the old task, so we just unload the entire task queue + c.logger.Error("Persistent store operation failure", + tag.StoreOperationStopTaskQueue, + tag.Error(err), + tag.WorkflowTaskQueueName(c.taskQueueID.FullName()), + tag.WorkflowTaskQueueType(c.taskQueueID.taskType)) + // Skip final update since persistence is having problems. + c.skipFinalUpdate.Store(true) + c.unloadFromEngine() + return + } + c.taskReader.Signal() + } + + ackLevel := c.taskAckManager.completeTask(task.GetTaskId()) + + // TODO: completeTaskFunc and task.finish() should take in a context + ctx, cancel := c.newIOContext() + defer cancel() + c.taskGC.Run(ctx, ackLevel) +} + +func rangeIDToTaskIDBlock(rangeID int64, rangeSize int64) taskIDBlock { + return taskIDBlock{ + start: (rangeID-1)*rangeSize + 1, + end: rangeID * rangeSize, + } +} + +// Retry operation on transient error. +func executeWithRetry( + ctx context.Context, + operation func(context.Context) error, +) error { + return backoff.ThrottleRetryContext(ctx, operation, persistenceOperationRetryPolicy, func(err error) bool { + if common.IsContextDeadlineExceededErr(err) || common.IsContextCanceledErr(err) { + return false + } + if _, ok := err.(*persistence.ConditionFailedError); ok { + return false + } + return common.IsPersistenceTransientError(err) + }) +} + +func (c *taskQueueManagerImpl) trySyncMatch(ctx context.Context, params addTaskParams) (bool, error) { + if params.forwardedFrom == "" && c.config.TestDisableSyncMatch() { + return false, nil + } + childCtx, cancel := newChildContext(ctx, c.config.SyncMatchWaitDuration(), time.Second) + defer cancel() + + // Use fake TaskId for sync match as it hasn't been allocated yet + fakeTaskIdWrapper := &persistencespb.AllocatedTaskInfo{ + Data: params.taskInfo, + TaskId: syncMatchTaskId, + } + + task := newInternalTask(fakeTaskIdWrapper, nil, params.source, params.forwardedFrom, true) + return c.matcher.Offer(childCtx, task) +} + +// newChildContext creates a child context with desired timeout. +// if tailroom is non-zero, then child context timeout will be +// the minOf(parentCtx.Deadline()-tailroom, timeout). Use this +// method to create child context when childContext cannot use +// all of parent's deadline but instead there is a need to leave +// some time for parent to do some post-work +func newChildContext( + parent context.Context, + timeout time.Duration, + tailroom time.Duration, +) (context.Context, context.CancelFunc) { + if parent.Err() != nil { + return parent, func() {} + } + deadline, ok := parent.Deadline() + if !ok { + return context.WithTimeout(parent, timeout) + } + remaining := time.Until(deadline) - tailroom + if remaining < timeout { + timeout = util.Max(0, remaining) + } + return context.WithTimeout(parent, timeout) +} + +func (c *taskQueueManagerImpl) isFowardingAllowed(taskQueue *taskQueueID, kind enumspb.TaskQueueKind) bool { + return !taskQueue.IsRoot() && kind != enumspb.TASK_QUEUE_KIND_STICKY +} + +func (c *taskQueueManagerImpl) QueueID() *taskQueueID { + return c.taskQueueID +} + +func (c *taskQueueManagerImpl) TaskQueueKind() enumspb.TaskQueueKind { + return c.kind +} + +func (c *taskQueueManagerImpl) LongPollExpirationInterval() time.Duration { + return c.config.LongPollExpirationInterval() +} + +func (c *taskQueueManagerImpl) RedirectToVersionedQueueForPoll(ctx context.Context, caps *commonpb.WorkerVersionCapabilities) (taskQueueManager, error) { + if !caps.GetUseVersioning() { + // Either this task queue is versioned, or there are still some workflows running on + // the "unversioned" set. + return c, nil + } + // We don't need the userDataChanged channel here because polls have a timeout and the + // client will retry, so if we're blocked on the wrong matcher it'll just take one poll + // timeout to fix itself. + userData, _, err := c.GetUserData() + if err != nil { + return nil, err + } + data := userData.GetData().GetVersioningData() + + if c.kind == enumspb.TASK_QUEUE_KIND_STICKY { + // In the sticky case we don't redirect, but we may kick off this worker if there's a newer one. + unknownBuild, err := checkVersionForStickyPoll(data, caps) + if err != nil { + return nil, err + } + if unknownBuild { + c.recordUnknownBuildPoll(caps.BuildId) + } + return c, nil + } + + primarySetId, demotedSetIds, unknownBuild, err := lookupVersionSetForPoll(data, caps) + if err != nil { + return nil, err + } + if unknownBuild { + c.recordUnknownBuildPoll(caps.BuildId) + } + c.loadDemotedSetIds(demotedSetIds) + + newId := newTaskQueueIDWithVersionSet(c.taskQueueID, primarySetId) + return c.engine.getTaskQueueManager(ctx, newId, c.stickyInfo, true) +} + +func (c *taskQueueManagerImpl) loadDemotedSetIds(demotedSetIds []string) { + // If we have demoted set ids, we need to load all task queues for them because even though + // no new tasks will be sent to them, they might have old tasks in the db. + // Also mark them alive, so that their liveness will be roughly synchronized. + // TODO: once we know a demoted set id has no more tasks, we can remove it from versioning data + for _, demotedSetId := range demotedSetIds { + newId := newTaskQueueIDWithVersionSet(c.taskQueueID, demotedSetId) + tqm, _ := c.engine.getTaskQueueManagerNoWait(newId, c.stickyInfo, true) + if tqm != nil { + tqm.MarkAlive() + } + } +} + +func (c *taskQueueManagerImpl) RedirectToVersionedQueueForAdd(ctx context.Context, directive *taskqueuespb.TaskVersionDirective) (taskQueueManager, chan struct{}, error) { + var buildId string + switch dir := directive.GetValue().(type) { + case *taskqueuespb.TaskVersionDirective_UseDefault: + // leave buildId = "", lookupVersionSetForAdd understands that to mean "default" + case *taskqueuespb.TaskVersionDirective_BuildId: + buildId = dir.BuildId + default: + // Unversioned task, leave on unversioned queue. + return c, nil, nil + } + + // Have to look up versioning data. + userData, userDataChanged, err := c.GetUserData() + if err != nil { + if errors.Is(err, errUserDataDisabled) { + // When user data disabled, send "default" tasks to unversioned queue. + if buildId == "" { + return c, userDataChanged, nil + } + // Send versioned sticky back to regular queue so they can go in the dlq. + if c.kind == enumspb.TASK_QUEUE_KIND_STICKY { + return nil, nil, serviceerrors.NewStickyWorkerUnavailable() + } + // Send versioned tasks to dlq. + newId := newTaskQueueIDWithVersionSet(c.taskQueueID, dlqVersionSet) + // If we're called by QueryWorkflow, then we technically don't need to load the dlq + // tqm here. But it's not a big deal if we do. + tqm, err := c.engine.getTaskQueueManager(ctx, newId, c.stickyInfo, true) + if err != nil { + return nil, nil, err + } + return tqm, userDataChanged, nil + } + return nil, nil, err + } + data := userData.GetData().GetVersioningData() + + if c.kind == enumspb.TASK_QUEUE_KIND_STICKY { + // In the sticky case we don't redirect, but we may kick off this worker if there's a newer one. + unknownBuild, err := checkVersionForStickyAdd(data, buildId) + if err != nil { + return nil, nil, err + } + if unknownBuild { + c.recordUnknownBuildTask(buildId) + // Don't bother persisting the unknown build id in this case: sticky tasks have a + // short timeout, so it doesn't matter if they get lost. + } + return c, userDataChanged, nil + } + + versionSet, unknownBuild, err := lookupVersionSetForAdd(data, buildId) + if err == errEmptyVersioningData { // nolint:goerr113 + // default was requested for an unversioned queue + return c, userDataChanged, nil + } else if err != nil { + return nil, nil, err + } + if unknownBuild { + c.recordUnknownBuildTask(buildId) + // Send rpc to root partition to persist the unknown build id before we return success. + _, err = c.matchingClient.UpdateWorkerBuildIdCompatibility(ctx, &matchingservice.UpdateWorkerBuildIdCompatibilityRequest{ + NamespaceId: c.taskQueueID.namespaceID.String(), + TaskQueue: c.taskQueueID.Root().FullName(), + Operation: &matchingservice.UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId{ + PersistUnknownBuildId: buildId, + }, + }) + if err != nil { + return nil, nil, err + } + } + + newId := newTaskQueueIDWithVersionSet(c.taskQueueID, versionSet) + tqm, err := c.engine.getTaskQueueManager(ctx, newId, c.stickyInfo, true) + if err != nil { + return nil, nil, err + } + return tqm, userDataChanged, nil +} + +func (c *taskQueueManagerImpl) recordUnknownBuildPoll(buildId string) { + c.logger.Warn("unknown build id in poll", tag.BuildId(buildId)) + c.taggedMetricsHandler.Counter(metrics.UnknownBuildPollsCounter.GetMetricName()).Record(1) +} + +func (c *taskQueueManagerImpl) recordUnknownBuildTask(buildId string) { + c.logger.Warn("unknown build id in task", tag.BuildId(buildId)) + c.taggedMetricsHandler.Counter(metrics.UnknownBuildTasksCounter.GetMetricName()).Record(1) +} + +func (c *taskQueueManagerImpl) callerInfoContext(ctx context.Context) context.Context { + ns, _ := c.namespaceRegistry.GetNamespaceName(c.taskQueueID.namespaceID) + return headers.SetCallerInfo(ctx, headers.NewBackgroundCallerInfo(ns.String())) +} + +func (c *taskQueueManagerImpl) newIOContext() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithTimeout(context.Background(), ioTimeout) + return c.callerInfoContext(ctx), cancel +} + +func (c *taskQueueManagerImpl) loadUserData(ctx context.Context) error { + ctx = c.callerInfoContext(ctx) + + hasLoadedUserData := false + + for ctx.Err() == nil { + if !c.config.LoadUserData() { + // if disabled, mark disabled and ready + c.SetUserDataState(userDataDisabled, nil) + hasLoadedUserData = false // load again if re-enabled + } else if !hasLoadedUserData { + // otherwise try to load from db once + err := c.db.loadUserData(ctx) + c.SetUserDataState(userDataEnabled, err) + hasLoadedUserData = err == nil + } else { + // if already loaded, set enabled + c.SetUserDataState(userDataEnabled, nil) + } + common.InterruptibleSleep(ctx, c.config.GetUserDataLongPollTimeout()) + } + + return nil +} + +func (c *taskQueueManagerImpl) userDataFetchSource() (string, error) { + if c.kind == enumspb.TASK_QUEUE_KIND_STICKY { + // Sticky queues get data from their corresponding normal queue + if c.normalName == "" { + // Older SDKs don't send the normal name. That's okay, they just can't use versioning. + return "", errMissingNormalQueueName + } + return c.normalName, nil + } + + degree := c.config.ForwarderMaxChildrenPerNode() + parent, err := c.taskQueueID.Parent(degree) + if err == tqname.ErrNoParent { // nolint:goerr113 + // we're the root activity task queue, ask the root workflow task queue + return c.taskQueueID.FullName(), nil + } else if err != nil { + // invalid degree + return "", err + } + return parent.FullName(), nil +} + +func (c *taskQueueManagerImpl) fetchUserData(ctx context.Context) error { + ctx = c.callerInfoContext(ctx) + + if c.managesSpecificVersionSet() { + // tqm for specific version set doesn't have its own user data + c.SetUserDataState(userDataSpecificVersion, nil) + return nil + } + + // otherwise fetch from parent partition + + fetchSource, err := c.userDataFetchSource() + if err != nil { + if err == errMissingNormalQueueName { // nolint:goerr113 + // pretend we have no user data. this is a sticky queue so the only effect is that we can't + // kick off versioned pollers. + c.SetUserDataState(userDataEnabled, nil) + } + return err + } + + // hasFetchedUserData is true if we have gotten a successful reply to GetTaskQueueUserData. + // It's used to control whether we do a long poll or a simple get. + hasFetchedUserData := false + + op := func(ctx context.Context) error { + if !c.config.LoadUserData() { + // if disabled, mark disabled and ready, but allow retries so that we notice if + // it's re-enabled + c.SetUserDataState(userDataDisabled, nil) + return errUserDataDisabled + } + + knownUserData, _, err := c.GetUserData() + if err != nil { + // Start with a non-long poll after re-enabling after disable, so that we don't have to wait the + // full long poll interval before calling SetUserDataStatus to enable again. + // Leave knownUserData as nil and GetVersion will return 0. + hasFetchedUserData = false + } + + callCtx, cancel := context.WithTimeout(ctx, c.config.GetUserDataLongPollTimeout()) + defer cancel() + + res, err := c.matchingClient.GetTaskQueueUserData(callCtx, &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: c.taskQueueID.namespaceID.String(), + TaskQueue: fetchSource, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: knownUserData.GetVersion(), + WaitNewData: hasFetchedUserData, + }) + if err != nil { + var unimplErr *serviceerror.Unimplemented + var failedPrecondErr *serviceerror.FailedPrecondition + if errors.As(err, &unimplErr) { + // This might happen during a deployment. The older version couldn't have had any user data, + // so we act as if it just returned an empty response and set ourselves ready. + // Return the error so that we backoff with retry, and do not set hasFetchedUserData so that + // we don't do a long poll next time. + c.SetUserDataState(userDataEnabled, nil) + } else if errors.As(err, &failedPrecondErr) { + // This means the parent has the LoadUserData switch turned off. Act like our switch is off also. + c.SetUserDataState(userDataDisabled, nil) + } + return err + } + // If the root partition returns nil here, then that means our data matched, and we don't need to update. + // If it's nil because it never existed, then we'd never have any data. + // It can't be nil due to removing versions, as that would result in a non-nil container with + // nil inner fields. + if res.GetUserData() != nil { + c.db.setUserDataForNonOwningPartition(res.GetUserData()) + } + hasFetchedUserData = true + c.SetUserDataState(userDataEnabled, nil) + return nil + } + + minWaitTime := c.config.GetUserDataMinWaitTime + + for ctx.Err() == nil { + start := time.Now() + _ = backoff.ThrottleRetryContext(ctx, op, c.config.GetUserDataRetryPolicy, nil) + elapsed := time.Since(start) + + // In general, we want to start a new call immediately on completion of the previous + // one. But if the remote is broken and returns success immediately, we might end up + // spinning. So enforce a minimum wait time that increases as long as we keep getting + // very fast replies. + if elapsed < minWaitTime { + common.InterruptibleSleep(ctx, minWaitTime-elapsed) + // Don't let this get near our call timeout, otherwise we can't tell the difference + // between a fast reply and a timeout. + minWaitTime = util.Min(minWaitTime*2, c.config.GetUserDataLongPollTimeout()/2) + } else { + minWaitTime = c.config.GetUserDataMinWaitTime + } + } + + return ctx.Err() +} diff -Nru temporal-1.21.5-1/src/service/matching/task_queue_manager_test.go temporal-1.22.5/src/service/matching/task_queue_manager_test.go --- temporal-1.21.5-1/src/service/matching/task_queue_manager_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_queue_manager_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,1373 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "errors" + "math" + "sync/atomic" + "testing" + "time" + + "github.com/gogo/protobuf/types" + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber-go/tally/v4" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "google.golang.org/grpc" + + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/api/matchingservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/internal/goro" +) + +var rpsInf = math.Inf(1) + +const ( + defaultNamespaceId = namespace.ID("deadbeef-0000-4567-890a-bcdef0123456") + defaultRootTqID = "tq" +) + +type tqmTestOpts struct { + config *Config + tqId *taskQueueID + matchingClientMock *matchingservicemock.MockMatchingServiceClient +} + +func defaultTqmTestOpts(controller *gomock.Controller) *tqmTestOpts { + return &tqmTestOpts{ + config: defaultTestConfig(), + tqId: defaultTqId(), + matchingClientMock: matchingservicemock.NewMockMatchingServiceClient(controller), + } +} + +func TestDeliverBufferTasks(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + tests := []func(tlm *taskQueueManagerImpl){ + func(tlm *taskQueueManagerImpl) { close(tlm.taskReader.taskBuffer) }, + func(tlm *taskQueueManagerImpl) { tlm.taskReader.gorogrp.Cancel() }, + func(tlm *taskQueueManagerImpl) { + rps := 0.1 + tlm.matcher.UpdateRatelimit(&rps) + tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{}, + } + err := tlm.matcher.rateLimiter.Wait(context.Background()) // consume the token + assert.NoError(t, err) + tlm.taskReader.gorogrp.Cancel() + }, + } + for _, test := range tests { + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) + test(tlm) + // dispatchBufferedTasks should stop after invocation of the test function + tlm.taskReader.gorogrp.Wait() + } +} + +func TestDeliverBufferTasks_NoPollers(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{}, + } + tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) + time.Sleep(100 * time.Millisecond) // let go routine run first and block on tasksForPoll + tlm.taskReader.gorogrp.Cancel() + tlm.taskReader.gorogrp.Wait() +} + +func TestDeliverBufferTasks_DisableUserData_SendsVersionedToDlq(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) + + scope := tally.NewTestScope("test", nil) + tlm.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) + + tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{ + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_BuildId{BuildId: "asdf"}, + }, + }, + } + + tlm.SetInitializedError(nil) + tlm.SetUserDataState(userDataDisabled, nil) + tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) + + time.Sleep(taskReaderOfferThrottleWait) + + // should be no retries + // TODO: this test could eventually be improved to check which tqm the task got redirected to. + // for now, this is tested better by integration tests. + errCount := scope.Snapshot().Counters()["test.buffer_throttle_count+"] + require.Nil(t, errCount) + + tlm.taskReader.gorogrp.Cancel() + tlm.taskReader.gorogrp.Wait() +} + +func TestDeliverBufferTasks_DisableUserData_SendsDefaultToUnversioned(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) + + scope := tally.NewTestScope("test", nil) + tlm.metricsHandler = metrics.NewTallyMetricsHandler(metrics.ClientConfig{}, scope) + + tlm.taskReader.taskBuffer <- &persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{ + VersionDirective: &taskqueue.TaskVersionDirective{ + Value: &taskqueue.TaskVersionDirective_UseDefault{UseDefault: &types.Empty{}}, + }, + }, + } + + tlm.SetInitializedError(nil) + tlm.SetUserDataState(userDataDisabled, nil) + tlm.taskReader.gorogrp.Go(tlm.taskReader.dispatchBufferedTasks) + + time.Sleep(taskReaderOfferThrottleWait) + + // should be no retries + errCount := scope.Snapshot().Counters()["test.buffer_throttle_count+"] + require.Nil(t, errCount) + + tlm.taskReader.gorogrp.Cancel() + tlm.taskReader.gorogrp.Wait() +} + +func TestReadLevelForAllExpiredTasksInBatch(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.db.rangeID = int64(1) + tlm.db.ackLevel = int64(0) + tlm.taskAckManager.setAckLevel(tlm.db.ackLevel) + tlm.taskAckManager.setReadLevel(tlm.db.ackLevel) + require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) + require.Equal(t, int64(0), tlm.taskAckManager.getReadLevel()) + + // Add all expired tasks + tasks := []*persistencespb.AllocatedTaskInfo{ + { + Data: &persistencespb.TaskInfo{ + ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), + CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), + }, + TaskId: 11, + }, + { + Data: &persistencespb.TaskInfo{ + ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), + CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), + }, + TaskId: 12, + }, + } + + require.NoError(t, tlm.taskReader.addTasksToBuffer(context.TODO(), tasks)) + require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) + require.Equal(t, int64(12), tlm.taskAckManager.getReadLevel()) + + // Now add a mix of valid and expired tasks + require.NoError(t, tlm.taskReader.addTasksToBuffer(context.TODO(), []*persistencespb.AllocatedTaskInfo{ + { + Data: &persistencespb.TaskInfo{ + ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), + CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), + }, + TaskId: 13, + }, + { + Data: &persistencespb.TaskInfo{ + ExpiryTime: timestamp.TimeNowPtrUtcAddSeconds(-60), + CreateTime: timestamp.TimeNowPtrUtcAddSeconds(-60 * 60), + }, + TaskId: 14, + }, + })) + require.Equal(t, int64(0), tlm.taskAckManager.getAckLevel()) + require.Equal(t, int64(14), tlm.taskAckManager.getReadLevel()) +} + +type testIDBlockAlloc struct { + rid int64 + alloc func() (taskQueueState, error) +} + +func (a *testIDBlockAlloc) RangeID() int64 { + return a.rid +} + +func (a *testIDBlockAlloc) RenewLease(_ context.Context) (taskQueueState, error) { + s, err := a.alloc() + if err == nil { + a.rid = s.rangeID + } + return s, err +} + +func makeTestBlocAlloc(f func() (taskQueueState, error)) taskQueueManagerOpt { + return withIDBlockAllocator(&testIDBlockAlloc{alloc: f}) +} + +func TestSyncMatchLeasingUnavailable(t *testing.T) { + tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t), + makeTestBlocAlloc(func() (taskQueueState, error) { + // any error other than ConditionFailedError indicates an + // availability problem at a lower layer so the TQM should NOT + // unload itself because resilient sync match is enabled. + return taskQueueState{}, errors.New(t.Name()) + })) + tqm.Start() + defer tqm.Stop() + poller, _ := runOneShotPoller(context.Background(), tqm) + defer poller.Cancel() + + sync, err := tqm.AddTask(context.TODO(), addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY}) + require.NoError(t, err) + require.True(t, sync) +} + +func TestForeignPartitionOwnerCausesUnload(t *testing.T) { + cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) + cfg.RangeSize = 1 // TaskID block size + var leaseErr error + tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t), + makeTestBlocAlloc(func() (taskQueueState, error) { + return taskQueueState{rangeID: 1}, leaseErr + })) + tqm.Start() + defer tqm.Stop() + + // TQM started succesfully with an ID block of size 1. Perform one send + // without a poller to consume the one task ID from the reserved block. + sync, err := tqm.AddTask(context.TODO(), addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY}) + require.False(t, sync) + require.NoError(t, err) + + // TQM's ID block should be empty so the next AddTask will trigger an + // attempt to obtain more IDs. This specific error type indicates that + // another service instance has become the owner of the partition + leaseErr = &persistence.ConditionFailedError{Msg: "should kill the tqm"} + + sync, err = tqm.AddTask(context.TODO(), addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY, + }) + require.NoError(t, err) + require.False(t, sync) +} + +func TestReaderSignaling(t *testing.T) { + readerNotifications := make(chan struct{}, 1) + clearNotifications := func() { + for len(readerNotifications) > 0 { + <-readerNotifications + } + } + tqm := mustCreateTestTaskQueueManager(t, gomock.NewController(t)) + + // redirect taskReader signals into our local channel + tqm.taskReader.notifyC = readerNotifications + + tqm.Start() + defer tqm.Stop() + + // shut down the taskReader so it doesn't steal notifications from us + tqm.taskReader.gorogrp.Cancel() + tqm.taskReader.gorogrp.Wait() + + clearNotifications() + + sync, err := tqm.AddTask(context.TODO(), addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY}) + require.NoError(t, err) + require.False(t, sync) + require.Len(t, readerNotifications, 1, + "Sync match failure with successful db write should signal taskReader") + + clearNotifications() + poller, _ := runOneShotPoller(context.Background(), tqm) + defer poller.Cancel() + + sync, err = tqm.AddTask(context.TODO(), addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY}) + require.NoError(t, err) + require.True(t, sync) + require.Len(t, readerNotifications, 0, + "Sync match should not signal taskReader") +} + +// runOneShotPoller spawns a goroutine to call tqm.GetTask on the provided tqm. +// The second return value is a channel of either error or *internalTask. +func runOneShotPoller(ctx context.Context, tqm taskQueueManager) (*goro.Handle, chan interface{}) { + out := make(chan interface{}, 1) + handle := goro.NewHandle(ctx).Go(func(ctx context.Context) error { + task, err := tqm.GetTask(ctx, &pollMetadata{ratePerSecond: &rpsInf}) + if task == nil { + out <- err + return nil + } + task.finish(err) + out <- task + return nil + }) + // tqm.GetTask() needs some time to attach the goro started above to the + // internal task channel. Sorry for this but it appears unavoidable. + time.Sleep(10 * time.Millisecond) + return handle, out +} + +func defaultTqId() *taskQueueID { + return newTestTaskQueueID(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW) +} + +func mustCreateTestTaskQueueManager( + t *testing.T, + controller *gomock.Controller, + opts ...taskQueueManagerOpt, +) *taskQueueManagerImpl { + t.Helper() + return mustCreateTestTaskQueueManagerWithConfig(t, controller, defaultTqmTestOpts(controller), opts...) +} + +func mustCreateTestTaskQueueManagerWithConfig( + t *testing.T, + controller *gomock.Controller, + testOpts *tqmTestOpts, + opts ...taskQueueManagerOpt, +) *taskQueueManagerImpl { + t.Helper() + tqm, err := createTestTaskQueueManagerWithConfig(controller, testOpts, opts...) + require.NoError(t, err) + return tqm +} + +func createTestTaskQueueManagerWithConfig( + controller *gomock.Controller, + testOpts *tqmTestOpts, + opts ...taskQueueManagerOpt, +) (*taskQueueManagerImpl, error) { + logger := log.NewTestLogger() + tm := newTestTaskManager(logger) + mockNamespaceCache := namespace.NewMockRegistry(controller) + mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(&namespace.Namespace{}, nil).AnyTimes() + mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(namespace.Name("ns-name"), nil).AnyTimes() + mockVisibilityManager := manager.NewMockVisibilityManager(controller) + mockVisibilityManager.EXPECT().Close().AnyTimes() + mockHistoryClient := historyservicemock.NewMockHistoryServiceClient(controller) + mockHistoryClient.EXPECT().IsWorkflowTaskValid(gomock.Any(), gomock.Any()).Return(&historyservice.IsWorkflowTaskValidResponse{IsValid: true}, nil).AnyTimes() + mockHistoryClient.EXPECT().IsActivityTaskValid(gomock.Any(), gomock.Any()).Return(&historyservice.IsActivityTaskValidResponse{IsValid: true}, nil).AnyTimes() + me := newMatchingEngine(testOpts.config, tm, mockHistoryClient, logger, mockNamespaceCache, testOpts.matchingClientMock, mockVisibilityManager) + tlMgr, err := newTaskQueueManager(me, testOpts.tqId, normalStickyInfo, testOpts.config, opts...) + if err != nil { + return nil, err + } + me.taskQueues[*testOpts.tqId] = tlMgr + return tlMgr.(*taskQueueManagerImpl), nil +} + +func TestDescribeTaskQueue(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + startTaskID := int64(1) + taskCount := int64(3) + PollerIdentity := "test-poll" + + // Create taskQueue Manager and set taskQueue state + tlm := mustCreateTestTaskQueueManager(t, controller) + tlm.db.rangeID = int64(1) + tlm.db.ackLevel = int64(0) + tlm.taskAckManager.setAckLevel(tlm.db.ackLevel) + + for i := int64(0); i < taskCount; i++ { + tlm.taskAckManager.addTask(startTaskID + i) + } + + includeTaskStatus := false + descResp := tlm.DescribeTaskQueue(includeTaskStatus) + require.Equal(t, 0, len(descResp.GetPollers())) + require.Nil(t, descResp.GetTaskQueueStatus()) + + includeTaskStatus = true + taskQueueStatus := tlm.DescribeTaskQueue(includeTaskStatus).GetTaskQueueStatus() + require.NotNil(t, taskQueueStatus) + require.Zero(t, taskQueueStatus.GetAckLevel()) + require.Equal(t, taskCount, taskQueueStatus.GetReadLevel()) + require.Equal(t, taskCount, taskQueueStatus.GetBacklogCountHint()) + taskIDBlock := taskQueueStatus.GetTaskIdBlock() + require.Equal(t, int64(1), taskIDBlock.GetStartId()) + require.Equal(t, tlm.config.RangeSize, taskIDBlock.GetEndId()) + + // Add a poller and complete all tasks + tlm.pollerHistory.updatePollerInfo(pollerIdentity(PollerIdentity), &pollMetadata{}) + for i := int64(0); i < taskCount; i++ { + tlm.taskAckManager.completeTask(startTaskID + i) + } + + descResp = tlm.DescribeTaskQueue(includeTaskStatus) + require.Equal(t, 1, len(descResp.GetPollers())) + require.Equal(t, PollerIdentity, descResp.Pollers[0].GetIdentity()) + require.NotEmpty(t, descResp.Pollers[0].GetLastAccessTime()) + + rps := 5.0 + tlm.pollerHistory.updatePollerInfo(pollerIdentity(PollerIdentity), &pollMetadata{ratePerSecond: &rps}) + descResp = tlm.DescribeTaskQueue(includeTaskStatus) + require.Equal(t, 1, len(descResp.GetPollers())) + require.Equal(t, PollerIdentity, descResp.Pollers[0].GetIdentity()) + require.True(t, descResp.Pollers[0].GetRatePerSecond() > 4.0 && descResp.Pollers[0].GetRatePerSecond() < 6.0) + + taskQueueStatus = descResp.GetTaskQueueStatus() + require.NotNil(t, taskQueueStatus) + require.Equal(t, taskCount, taskQueueStatus.GetAckLevel()) + require.Zero(t, taskQueueStatus.GetBacklogCountHint()) +} + +func TestCheckIdleTaskQueue(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) + cfg.MaxTaskQueueIdleTime = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.config = cfg + + // Idle + tlm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tlm.Start() + time.Sleep(1 * time.Second) + require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) + + // Active poll-er + tlm = mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tlm.Start() + tlm.pollerHistory.updatePollerInfo("test-poll", &pollMetadata{}) + require.Equal(t, 1, len(tlm.GetAllPollerInfo())) + time.Sleep(1 * time.Second) + require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) + tlm.Stop() + require.Equal(t, common.DaemonStatusStopped, atomic.LoadInt32(&tlm.status)) + + // Active adding task + tlm = mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tlm.Start() + require.Equal(t, 0, len(tlm.GetAllPollerInfo())) + tlm.taskReader.Signal() + time.Sleep(1 * time.Second) + require.Equal(t, common.DaemonStatusStarted, atomic.LoadInt32(&tlm.status)) + tlm.Stop() + require.Equal(t, common.DaemonStatusStopped, atomic.LoadInt32(&tlm.status)) +} + +func TestAddTaskStandby(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + tlm := mustCreateTestTaskQueueManagerWithConfig( + t, + controller, + defaultTqmTestOpts(controller), + func(tqm *taskQueueManagerImpl) { + ns := namespace.NewGlobalNamespaceForTest( + &persistencespb.NamespaceInfo{}, + &persistencespb.NamespaceConfig{}, + &persistencespb.NamespaceReplicationConfig{ + ActiveClusterName: cluster.TestAlternativeClusterName, + }, + cluster.TestAlternativeClusterInitialFailoverVersion, + ) + + // we need to override the mockNamespaceCache to return a passive namespace + mockNamespaceCache := namespace.NewMockRegistry(controller) + mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil).AnyTimes() + mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(ns.Name(), nil).AnyTimes() + tqm.namespaceRegistry = mockNamespaceCache + }, + ) + tlm.Start() + // stop taskWriter so that we can check if there's any call to it + // otherwise the task persist process is async and hard to test + tlm.taskWriter.Stop() + <-tlm.taskWriter.writeLoop.Done() + + addTaskParam := addTaskParams{ + execution: &commonpb.WorkflowExecution{}, + taskInfo: &persistencespb.TaskInfo{}, + source: enumsspb.TASK_SOURCE_HISTORY, + } + + syncMatch, err := tlm.AddTask(context.Background(), addTaskParam) + require.Equal(t, errShutdown, err) // task writer was stopped above + require.False(t, syncMatch) + + addTaskParam.forwardedFrom = "from child partition" + syncMatch, err = tlm.AddTask(context.Background(), addTaskParam) + require.Equal(t, errRemoteSyncMatchFailed, err) // should not persist the task + require.False(t, syncMatch) +} + +func TestTQMDoesFinalUpdateOnIdleUnload(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + + cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) + cfg.MaxTaskQueueIdleTime = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(1 * time.Second) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.config = cfg + + tqm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tm := tqm.engine.taskManager.(*testTaskManager) + + tqm.Start() + time.Sleep(2 * time.Second) // will unload due to idleness + require.Equal(t, 1, tm.getUpdateCount(tqCfg.tqId)) +} + +func TestTQMDoesNotDoFinalUpdateOnOwnershipLost(t *testing.T) { + // TODO: use mocks instead of testTaskManager so we can do synchronization better instead of sleeps + t.Parallel() + + controller := gomock.NewController(t) + + cfg := NewConfig(dynamicconfig.NewNoopCollection(), false, false) + cfg.UpdateAckInterval = dynamicconfig.GetDurationPropertyFnFilteredByTaskQueueInfo(2 * time.Second) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.config = cfg + + tqm := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tm := tqm.engine.taskManager.(*testTaskManager) + + tqm.Start() + time.Sleep(1 * time.Second) + + // simulate ownership lost + ttm := tm.getTaskQueueManager(tqCfg.tqId) + ttm.Lock() + ttm.rangeID++ + ttm.Unlock() + + time.Sleep(2 * time.Second) // will attempt to update and fail and not try again + + require.Equal(t, 1, tm.getUpdateCount(tqCfg.tqId)) +} + +func TestUserData_LoadOnInit(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + + require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + UserData: data1, + })) + data1.Version++ + + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_DontLoadWhenDisabled(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) + + require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + UserData: data1, + })) + + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.Nil(t, userData) + require.Equal(t, err, errUserDataDisabled) + tq.Stop() +} + +func TestUserData_LoadDisableEnable(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + loadUserData := make(chan bool) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataLongPollTimeout = dynamicconfig.GetDurationPropertyFn(10 * time.Millisecond) + tq.config.LoadUserData = func() bool { return <-loadUserData } + + require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + UserData: data1, + })) + data1.Version++ + + tq.Start() + + loadUserData <- true + time.Sleep(100 * time.Millisecond) + + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + + loadUserData <- false + time.Sleep(100 * time.Millisecond) + + userData, _, err = tq.GetUserData() + require.Equal(t, err, errUserDataDisabled) + require.Nil(t, userData) + + // check engine-level rpc also + _, err = tq.engine.GetTaskQueueUserData(context.Background(), &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: tqId.namespaceID.String(), + TaskQueue: tqId.FullName(), + TaskQueueType: tqId.taskType, + }) + var failedPrecondition *serviceerror.FailedPrecondition + require.True(t, errors.As(err, &failedPrecondition)) + + // updated in db without going through tqm (this shouldn't happen but lets us test that it re-reads) + require.NoError(t, tq.engine.taskManager.UpdateTaskQueueUserData(context.Background(), + &persistence.UpdateTaskQueueUserDataRequest{ + NamespaceID: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + UserData: data1, + })) + data1.Version++ + + loadUserData <- true + time.Sleep(100 * time.Millisecond) + + userData, _, err = tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + + tq.Stop() +} + +func TestUserData_LoadOnInit_OnlyOnceWhenNoData(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tm := tq.engine.taskManager.(*testTaskManager) + + require.Equal(t, 0, tm.getGetUserDataCount(tqId)) + + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + + require.Equal(t, 1, tm.getGetUserDataCount(tqId)) + + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Nil(t, userData) + + require.Equal(t, 1, tm.getGetUserDataCount(tqId)) + + userData, _, err = tq.GetUserData() + require.NoError(t, err) + require.Nil(t, userData) + + require.Equal(t, 1, tm.getGetUserDataCount(tqId)) + + tq.Stop() +} + +func TestUserData_FetchesOnInit(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, // first fetch is not long poll + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Second // only one fetch + + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_FetchesAndFetchesAgain(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + // note: using activity here + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + data2 := &persistencespb.VersionedTaskQueueUserData{ + Version: 2, + Data: mkUserData(2), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, // first is not long poll + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 1, + WaitNewData: true, // second is long poll + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data2, + }, nil) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 2, + WaitNewData: true, + }). + Return(nil, serviceerror.NewUnavailable("hold on")).AnyTimes() + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Millisecond // fetch again quickly + tq.Start() + time.Sleep(100 * time.Millisecond) + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data2, userData) + tq.Stop() +} + +func TestUserData_FetchDisableEnable(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + // note: using activity here + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + loadUserData := make(chan bool) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Millisecond // fetch again quickly + tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(10 * time.Millisecond).WithMaximumInterval(10 * time.Millisecond) + tq.config.LoadUserData = func() bool { return <-loadUserData } + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + data2 := &persistencespb.VersionedTaskQueueUserData{ + Version: 2, + Data: mkUserData(2), + } + data3 := &persistencespb.VersionedTaskQueueUserData{ + Version: 3, + Data: mkUserData(3), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, // first is not long poll + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 1, + WaitNewData: true, // second is long poll + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data2, + }, nil) + + // after enabling again: + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, // sends zero for first request after re-enabling + WaitNewData: false, + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data3, + }, nil) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 3, + WaitNewData: true, + }). + Return(nil, serviceerror.NewUnavailable("hold on")).AnyTimes() + + tq.Start() + + loadUserData <- true + loadUserData <- true + time.Sleep(100 * time.Millisecond) + + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data2, userData) + + loadUserData <- false + time.Sleep(100 * time.Millisecond) + + // should have fetched twice but now user data is disabled + userData, _, err = tq.GetUserData() + require.Nil(t, userData) + require.Equal(t, err, errUserDataDisabled) + + // enable again + loadUserData <- true + time.Sleep(100 * time.Millisecond) + + // should be available again with data3 + userData, _, err = tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data3, userData) + + tq.Stop() +} + +func TestUserData_RetriesFetchOnUnavailable(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + ch := make(chan struct{}) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { + <-ch + return nil, serviceerror.NewUnavailable("wait a sec") + }).Times(3) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { + <-ch + return &matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil + }) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success + tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). + WithMaximumInterval(50 * time.Millisecond) // faster retry on failure + + tq.Start() + + ch <- struct{}{} + ch <- struct{}{} + + // at this point it should have tried two times and gotten unavailable. it should not be ready yet. + require.False(t, tq.userDataReady.Ready()) + + ch <- struct{}{} + ch <- struct{}{} + time.Sleep(100 * time.Millisecond) // time to return + + // now it should be ready + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_RetriesFetchOnUnImplemented(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + ch := make(chan struct{}) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { + <-ch + return nil, serviceerror.NewUnimplemented("older version") + }).Times(3) + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + DoAndReturn(func(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { + <-ch + return &matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil + }) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success + tq.config.GetUserDataRetryPolicy = backoff.NewExponentialRetryPolicy(50 * time.Millisecond). + WithMaximumInterval(50 * time.Millisecond) // faster retry on failure + + tq.Start() + + ch <- struct{}{} + ch <- struct{}{} + + // at this point it should have tried once and gotten unimplemented. it should be ready already. + require.NoError(t, tq.WaitUntilInitialized(ctx)) + + userData, _, err := tq.GetUserData() + require.Nil(t, userData) + require.NoError(t, err) + + ch <- struct{}{} + ch <- struct{}{} + time.Sleep(100 * time.Millisecond) // time to return + + userData, _, err = tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_FetchesUpTree(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 31) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.config.ForwarderMaxChildrenPerNode = dynamicconfig.GetIntPropertyFilteredByTaskQueueInfo(3) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: tqId.Name.WithPartition(10).FullName(), + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_FetchesActivityToWorkflow(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + // note: activity root + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 0) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: defaultRootTqID, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + tq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_FetchesStickyToNormal(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + tqCfg := defaultTqmTestOpts(controller) + + normalName := "normal-queue" + stickyName := uuid.New() + + tqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, stickyName, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 0) + require.NoError(t, err) + tqCfg.tqId = tqId + + data1 := &persistencespb.VersionedTaskQueueUserData{ + Version: 1, + Data: mkUserData(1), + } + + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData( + gomock.Any(), + &matchingservice.GetTaskQueueUserDataRequest{ + NamespaceId: defaultNamespaceId.String(), + TaskQueue: normalName, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + LastKnownUserDataVersion: 0, + WaitNewData: false, + }). + Return(&matchingservice.GetTaskQueueUserDataResponse{ + TaskQueueHasUserData: true, + UserData: data1, + }, nil) + + // have to create manually to get sticky + logger := log.NewTestLogger() + tm := newTestTaskManager(logger) + mockNamespaceCache := namespace.NewMockRegistry(controller) + mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(&namespace.Namespace{}, nil).AnyTimes() + mockNamespaceCache.EXPECT().GetNamespaceName(gomock.Any()).Return(namespace.Name("ns-name"), nil).AnyTimes() + mockVisibilityManager := manager.NewMockVisibilityManager(controller) + mockVisibilityManager.EXPECT().Close().AnyTimes() + me := newMatchingEngine(tqCfg.config, tm, nil, logger, mockNamespaceCache, tqCfg.matchingClientMock, mockVisibilityManager) + stickyInfo := stickyInfo{ + kind: enumspb.TASK_QUEUE_KIND_STICKY, + normalName: normalName, + } + tlMgr, err := newTaskQueueManager(me, tqCfg.tqId, stickyInfo, tqCfg.config) + require.NoError(t, err) + tq := tlMgr.(*taskQueueManagerImpl) + + tq.config.GetUserDataMinWaitTime = 10 * time.Second // wait on success + tq.Start() + require.NoError(t, tq.WaitUntilInitialized(ctx)) + userData, _, err := tq.GetUserData() + require.NoError(t, err) + require.Equal(t, data1, userData) + tq.Stop() +} + +func TestUserData_UpdateOnNonRootFails(t *testing.T) { + t.Parallel() + + controller := gomock.NewController(t) + defer controller.Finish() + ctx := context.Background() + + subTqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = subTqId + subTq := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + err = subTq.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { + return data, false, nil + }) + require.Error(t, err) + require.ErrorIs(t, err, errUserDataNoMutateNonRoot) + + actTqId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_ACTIVITY, 0) + require.NoError(t, err) + actTqCfg := defaultTqmTestOpts(controller) + actTqCfg.tqId = actTqId + actTq := mustCreateTestTaskQueueManagerWithConfig(t, controller, actTqCfg) + err = actTq.UpdateUserData(ctx, UserDataUpdateOptions{}, func(data *persistencespb.TaskQueueUserData) (*persistencespb.TaskQueueUserData, bool, error) { + return data, false, nil + }) + require.Error(t, err) + require.ErrorIs(t, err, errUserDataNoMutateNonRoot) +} + +func TestUserData_DontFetchWhenDisabled(t *testing.T) { + t.Parallel() + + ctx := context.Background() + controller := gomock.NewController(t) + defer controller.Finish() + taskQueueId, err := newTaskQueueIDWithPartition(defaultNamespaceId, defaultRootTqID, enumspb.TASK_QUEUE_TYPE_WORKFLOW, 1) + require.NoError(t, err) + tqCfg := defaultTqmTestOpts(controller) + tqCfg.tqId = taskQueueId + mgr := mustCreateTestTaskQueueManagerWithConfig(t, controller, tqCfg) + tqCfg.matchingClientMock.EXPECT().GetTaskQueueUserData(gomock.Any(), gomock.Any()).Times(0) + mgr.config.LoadUserData = dynamicconfig.GetBoolPropertyFn(false) + mgr.Start() + err = mgr.WaitUntilInitialized(ctx) + require.NoError(t, err) +} diff -Nru temporal-1.21.5-1/src/service/matching/task_reader.go temporal-1.22.5/src/service/matching/task_reader.go --- temporal-1.21.5-1/src/service/matching/task_reader.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_reader.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,348 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "time" + + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/internal/goro" +) + +const ( + taskReaderOfferThrottleWait = time.Second + taskReaderThrottleRetryDelay = 3 * time.Second +) + +type ( + taskReader struct { + status int32 + taskBuffer chan *persistencespb.AllocatedTaskInfo // tasks loaded from persistence + notifyC chan struct{} // Used as signal to notify pump of new tasks + tlMgr *taskQueueManagerImpl + taskValidator taskValidator + gorogrp goro.Group + + backoffTimerLock sync.Mutex + backoffTimer *time.Timer + retrier backoff.Retrier + } +) + +func newTaskReader(tlMgr *taskQueueManagerImpl) *taskReader { + return &taskReader{ + status: common.DaemonStatusInitialized, + tlMgr: tlMgr, + taskValidator: newTaskValidator(tlMgr.newIOContext, tlMgr.clusterMeta, tlMgr.namespaceRegistry, tlMgr.engine.historyClient), + notifyC: make(chan struct{}, 1), + // we always dequeue the head of the buffer and try to dispatch it to a poller + // so allocate one less than desired target buffer size + taskBuffer: make(chan *persistencespb.AllocatedTaskInfo, tlMgr.config.GetTasksBatchSize()-1), + retrier: backoff.NewRetrier( + common.CreateReadTaskRetryPolicy(), + backoff.SystemClock, + ), + } +} + +// Start reading pump for the given task queue. +// The pump fills up taskBuffer from persistence. +func (tr *taskReader) Start() { + if !atomic.CompareAndSwapInt32( + &tr.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + return + } + + tr.gorogrp.Go(tr.dispatchBufferedTasks) + tr.gorogrp.Go(tr.getTasksPump) +} + +// Stop pump that fills up taskBuffer from persistence. +func (tr *taskReader) Stop() { + if !atomic.CompareAndSwapInt32( + &tr.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + return + } + + tr.gorogrp.Cancel() +} + +func (tr *taskReader) Signal() { + var event struct{} + select { + case tr.notifyC <- event: + default: // channel already has an event, don't block + } +} + +func (tr *taskReader) dispatchBufferedTasks(ctx context.Context) error { + ctx = tr.tlMgr.callerInfoContext(ctx) + +dispatchLoop: + for ctx.Err() == nil { + select { + case taskInfo, ok := <-tr.taskBuffer: + if !ok { // Task queue getTasks pump is shutdown + break dispatchLoop + } + task := newInternalTask(taskInfo, tr.tlMgr.completeTask, enumsspb.TASK_SOURCE_DB_BACKLOG, "", false) + for ctx.Err() == nil { + if !tr.taskValidator.maybeValidate(taskInfo, tr.tlMgr.taskQueueID.taskType) { + task.finish(nil) + tr.taggedMetricsHandler().Counter(metrics.ExpiredTasksPerTaskQueueCounter.GetMetricName()).Record(1) + // Don't try to set read level here because it may have been advanced already. + continue dispatchLoop + } + + taskCtx, cancel := context.WithTimeout(ctx, taskReaderOfferTimeout) + err := tr.tlMgr.engine.DispatchSpooledTask(taskCtx, task, tr.tlMgr.taskQueueID, tr.tlMgr.stickyInfo) + cancel() + if err == nil { + continue dispatchLoop + } + + // if task is still valid (truly valid or unable to verify if task is valid) + tr.taggedMetricsHandler().Counter(metrics.BufferThrottlePerTaskQueueCounter.GetMetricName()).Record(1) + if !errors.Is(err, errUserDataDisabled) && !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + // Don't log here if encounters missing user data error when dispatch a versioned task. + tr.throttledLogger().Error("taskReader: unexpected error dispatching task", tag.Error(err)) + } + common.InterruptibleSleep(ctx, taskReaderOfferThrottleWait) + } + return ctx.Err() + case <-ctx.Done(): + return ctx.Err() + } + } + return ctx.Err() +} + +func (tr *taskReader) getTasksPump(ctx context.Context) error { + ctx = tr.tlMgr.callerInfoContext(ctx) + + if err := tr.tlMgr.WaitUntilInitialized(ctx); err != nil { + return err + } + + updateAckTimer := time.NewTimer(tr.tlMgr.config.UpdateAckInterval()) + defer updateAckTimer.Stop() + + tr.Signal() // prime pump +Loop: + for { + // Prioritize exiting over other processing + select { + case <-ctx.Done(): + return nil + default: + } + + select { + case <-ctx.Done(): + return nil + + case <-tr.notifyC: + batch, err := tr.getTaskBatch(ctx) + tr.tlMgr.signalIfFatal(err) + if err != nil { + // TODO: Should we ever stop retrying on db errors? + if common.IsResourceExhausted(err) { + tr.reEnqueueAfterDelay(taskReaderThrottleRetryDelay) + } else { + tr.reEnqueueAfterDelay(tr.retrier.NextBackOff()) + } + continue Loop + } + tr.retrier.Reset() + + if len(batch.tasks) == 0 { + tr.tlMgr.taskAckManager.setReadLevelAfterGap(batch.readLevel) + if !batch.isReadBatchDone { + tr.Signal() + } + continue Loop + } + + // only error here is due to context cancellation which we also + // handle above + _ = tr.addTasksToBuffer(ctx, batch.tasks) + // There maybe more tasks. We yield now, but signal pump to check again later. + tr.Signal() + + case <-updateAckTimer.C: + err := tr.persistAckLevel(ctx) + isConditionFailed := tr.tlMgr.signalIfFatal(err) + if err != nil && !isConditionFailed { + tr.logger().Error("Persistent store operation failure", + tag.StoreOperationUpdateTaskQueue, + tag.Error(err)) + // keep going as saving ack is not critical + } + tr.Signal() // periodically signal pump to check persistence for tasks + updateAckTimer = time.NewTimer(tr.tlMgr.config.UpdateAckInterval()) + } + } +} + +func (tr *taskReader) getTaskBatchWithRange( + ctx context.Context, + readLevel int64, + maxReadLevel int64, +) ([]*persistencespb.AllocatedTaskInfo, error) { + response, err := tr.tlMgr.db.GetTasks(ctx, readLevel+1, maxReadLevel+1, tr.tlMgr.config.GetTasksBatchSize()) + if err != nil { + return nil, err + } + return response.Tasks, err +} + +type getTasksBatchResponse struct { + tasks []*persistencespb.AllocatedTaskInfo + readLevel int64 + isReadBatchDone bool +} + +// Returns a batch of tasks from persistence starting form current read level. +// Also return a number that can be used to update readLevel +// Also return a bool to indicate whether read is finished +func (tr *taskReader) getTaskBatch(ctx context.Context) (*getTasksBatchResponse, error) { + var tasks []*persistencespb.AllocatedTaskInfo + readLevel := tr.tlMgr.taskAckManager.getReadLevel() + maxReadLevel := tr.tlMgr.taskWriter.GetMaxReadLevel() + + // counter i is used to break and let caller check whether taskqueue is still alive and need resume read. + for i := 0; i < 10 && readLevel < maxReadLevel; i++ { + upper := readLevel + tr.tlMgr.config.RangeSize + if upper > maxReadLevel { + upper = maxReadLevel + } + tasks, err := tr.getTaskBatchWithRange(ctx, readLevel, upper) + if err != nil { + return nil, err + } + // return as long as it grabs any tasks + if len(tasks) > 0 { + return &getTasksBatchResponse{ + tasks: tasks, + readLevel: upper, + isReadBatchDone: true, + }, nil + } + readLevel = upper + } + return &getTasksBatchResponse{ + tasks: tasks, + readLevel: readLevel, + isReadBatchDone: readLevel == maxReadLevel, + }, nil // caller will update readLevel when no task grabbed +} + +func (tr *taskReader) addTasksToBuffer( + ctx context.Context, + tasks []*persistencespb.AllocatedTaskInfo, +) error { + for _, t := range tasks { + if IsTaskExpired(t) { + tr.taggedMetricsHandler().Counter(metrics.ExpiredTasksPerTaskQueueCounter.GetMetricName()).Record(1) + // Also increment readLevel for expired tasks otherwise it could result in + // looping over the same tasks if all tasks read in the batch are expired + tr.tlMgr.taskAckManager.setReadLevel(t.GetTaskId()) + continue + } + if err := tr.addSingleTaskToBuffer(ctx, t); err != nil { + return err + } + } + return nil +} + +func (tr *taskReader) addSingleTaskToBuffer( + ctx context.Context, + task *persistencespb.AllocatedTaskInfo, +) error { + tr.tlMgr.taskAckManager.addTask(task.GetTaskId()) + select { + case tr.taskBuffer <- task: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (tr *taskReader) persistAckLevel(ctx context.Context) error { + ackLevel := tr.tlMgr.taskAckManager.getAckLevel() + tr.emitTaskLagMetric(ackLevel) + return tr.tlMgr.db.UpdateState(ctx, ackLevel) +} + +func (tr *taskReader) logger() log.Logger { + return tr.tlMgr.logger +} + +func (tr *taskReader) throttledLogger() log.ThrottledLogger { + return tr.tlMgr.throttledLogger +} + +func (tr *taskReader) taggedMetricsHandler() metrics.Handler { + return tr.tlMgr.metricsHandler +} + +func (tr *taskReader) emitTaskLagMetric(ackLevel int64) { + // note: this metric is only an estimation for the lag. + // taskID in DB may not be continuous, especially when task list ownership changes. + maxReadLevel := tr.tlMgr.taskWriter.GetMaxReadLevel() + tr.taggedMetricsHandler().Gauge(metrics.TaskLagPerTaskQueueGauge.GetMetricName()).Record(float64(maxReadLevel - ackLevel)) +} + +func (tr *taskReader) reEnqueueAfterDelay(duration time.Duration) { + tr.backoffTimerLock.Lock() + defer tr.backoffTimerLock.Unlock() + + if tr.backoffTimer == nil { + tr.backoffTimer = time.AfterFunc(duration, func() { + tr.backoffTimerLock.Lock() + defer tr.backoffTimerLock.Unlock() + + tr.Signal() // re-enqueue the event + tr.backoffTimer = nil + }) + } +} diff -Nru temporal-1.21.5-1/src/service/matching/task_validation.go temporal-1.22.5/src/service/matching/task_validation.go --- temporal-1.21.5-1/src/service/matching/task_validation.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_validation.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,242 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" +) + +const ( + taskReaderOfferTimeout = 60 * time.Second + taskReaderValidationThreshold = 600 * time.Second +) + +type ( + taskValidator interface { + maybeValidate( + task *persistencespb.AllocatedTaskInfo, + taskType enumspb.TaskQueueType, + ) bool + } + + taskValidationInfo struct { + taskID int64 + validationTime time.Time + } + + taskValidatorImpl struct { + newIOContextFn func() (context.Context, context.CancelFunc) + clusterMetadata cluster.Metadata + namespaceRegistry namespace.Registry + historyClient historyservice.HistoryServiceClient + + lastValidatedTaskInfo taskValidationInfo + } +) + +func newTaskValidator( + newIOContextFn func() (context.Context, context.CancelFunc), + clusterMetadata cluster.Metadata, + namespaceRegistry namespace.Registry, + historyClient historyservice.HistoryServiceClient, +) *taskValidatorImpl { + return &taskValidatorImpl{ + newIOContextFn: newIOContextFn, + clusterMetadata: clusterMetadata, + namespaceRegistry: namespaceRegistry, + historyClient: historyClient, + } +} + +// check if a task has expired / is valid +// if return false, then task is invalid and should be discarded +// if return true, then task is *maybe-valid*, and should be dispatched +// +// a task is invalid if this task is already failed; timeout; completed, etc +// a task is *not invalid* if this task can be started, or caller cannot verify the validity +func (v *taskValidatorImpl) maybeValidate( + task *persistencespb.AllocatedTaskInfo, + taskType enumspb.TaskQueueType, +) bool { + if IsTaskExpired(task) { + return false + } + if !v.preValidate(task) { + return true + } + valid, err := v.isTaskValid(task, taskType) + if err != nil { + return true + } + v.postValidate(task) + return valid +} + +// preValidate track a task and return if validation should be done +func (v *taskValidatorImpl) preValidate( + task *persistencespb.AllocatedTaskInfo, +) bool { + namespaceID := task.Data.NamespaceId + namespaceEntry, err := v.namespaceRegistry.GetNamespaceByID(namespace.ID(namespaceID)) + if err != nil { + // if cannot find the namespace entry, treat task as active + return v.preValidateActive(task) + } + if v.clusterMetadata.GetCurrentClusterName() == namespaceEntry.ActiveClusterName() { + return v.preValidateActive(task) + } + return v.preValidatePassive(task) +} + +// preValidateActive track a task and return if validation should be done, if namespace is active +func (v *taskValidatorImpl) preValidateActive( + task *persistencespb.AllocatedTaskInfo, +) bool { + if v.lastValidatedTaskInfo.taskID != task.TaskId { + // first time seen the task, caller should try to dispatch first + if task.Data.CreateTime != nil { + v.lastValidatedTaskInfo = taskValidationInfo{ + taskID: task.TaskId, + validationTime: *task.Data.CreateTime, // task is valid when created + } + } else { + v.lastValidatedTaskInfo = taskValidationInfo{ + taskID: task.TaskId, + validationTime: time.Now().UTC(), // if no creation time specified, use now + } + } + return false + } + + // this task has been validated before + return time.Since(v.lastValidatedTaskInfo.validationTime) > taskReaderValidationThreshold +} + +// preValidatePassive track a task and return if validation should be done, if namespace is passive +func (v *taskValidatorImpl) preValidatePassive( + task *persistencespb.AllocatedTaskInfo, +) bool { + if v.lastValidatedTaskInfo.taskID != task.TaskId { + // first time seen the task, make a decision based on task creation time + if task.Data.CreateTime != nil { + v.lastValidatedTaskInfo = taskValidationInfo{ + taskID: task.TaskId, + validationTime: *task.Data.CreateTime, // task is valid when created + } + } else { + v.lastValidatedTaskInfo = taskValidationInfo{ + taskID: task.TaskId, + validationTime: time.Now().UTC(), // if no creation time specified, use now + } + } + } + + // this task has been validated before + return time.Since(v.lastValidatedTaskInfo.validationTime) > taskReaderValidationThreshold +} + +// postValidate update tracked task info +func (v *taskValidatorImpl) postValidate( + task *persistencespb.AllocatedTaskInfo, +) { + v.lastValidatedTaskInfo = taskValidationInfo{ + taskID: task.TaskId, + validationTime: time.Now().UTC(), + } +} + +func (v *taskValidatorImpl) isTaskValid( + task *persistencespb.AllocatedTaskInfo, + taskType enumspb.TaskQueueType, +) (bool, error) { + ctx, cancel := v.newIOContextFn() + defer cancel() + + namespaceID := task.Data.NamespaceId + workflowID := task.Data.WorkflowId + runID := task.Data.RunId + + switch taskType { + case enumspb.TASK_QUEUE_TYPE_ACTIVITY: + resp, err := v.historyClient.IsActivityTaskValid(ctx, &historyservice.IsActivityTaskValidRequest{ + NamespaceId: namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + Clock: task.Data.Clock, + ScheduledEventId: task.Data.ScheduledEventId, + }) + switch err.(type) { + case nil: + return resp.IsValid, nil + case *serviceerror.NotFound: + return false, nil + default: + return false, err + } + case enumspb.TASK_QUEUE_TYPE_WORKFLOW: + resp, err := v.historyClient.IsWorkflowTaskValid(ctx, &historyservice.IsWorkflowTaskValidRequest{ + NamespaceId: namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: workflowID, + RunId: runID, + }, + Clock: task.Data.Clock, + ScheduledEventId: task.Data.ScheduledEventId, + }) + switch err.(type) { + case nil: + return resp.IsValid, nil + case *serviceerror.NotFound: + return false, nil + default: + return false, err + } + default: + return true, nil + } +} + +// TODO https://github.com/temporalio/temporal/issues/1021 +// +// there should be more validation logic here +// 1. if task has valid TTL -> TTL reached -> delete +// 2. if task has 0 TTL / no TTL -> logic need to additionally check if corresponding workflow still exists +func IsTaskExpired(t *persistencespb.AllocatedTaskInfo) bool { + expiry := timestamp.TimeValue(t.GetData().GetExpiryTime()) + return expiry.Unix() > 0 && expiry.Before(time.Now()) +} diff -Nru temporal-1.21.5-1/src/service/matching/task_validation_test.go temporal-1.22.5/src/service/matching/task_validation_test.go --- temporal-1.21.5-1/src/service/matching/task_validation_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_validation_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,320 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" +) + +type ( + taskValidatorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + clusterMetadata *cluster.MockMetadata + historyClient *historyservicemock.MockHistoryServiceClient + namespaceCache *namespace.MockRegistry + + namespaceID string + workflowID string + runID string + scheduleEventID int64 + task *persistencespb.AllocatedTaskInfo + + taskValidator *taskValidatorImpl + } +) + +func TestTaskValidatorSuite(t *testing.T) { + s := new(taskValidatorSuite) + suite.Run(t, s) +} + +func (s *taskValidatorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + s.clusterMetadata = cluster.NewMockMetadata(s.controller) + s.historyClient = historyservicemock.NewMockHistoryServiceClient(s.controller) + s.namespaceCache = namespace.NewMockRegistry(s.controller) + + s.namespaceID = uuid.New().String() + s.workflowID = uuid.New().String() + s.runID = uuid.New().String() + s.scheduleEventID = rand.Int63() + s.task = &persistencespb.AllocatedTaskInfo{ + Data: &persistencespb.TaskInfo{ + NamespaceId: s.namespaceID, + WorkflowId: s.workflowID, + RunId: s.runID, + ScheduledEventId: s.scheduleEventID, + CreateTime: timestamp.TimeNowPtrUtc(), + }, + } + + s.taskValidator = newTaskValidator(func() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), 4*time.Second) + }, s.clusterMetadata, s.namespaceCache, s.historyClient) +} + +func (s *taskValidatorSuite) TeardownTest() { + s.controller.Finish() +} + +func (s *taskValidatorSuite) TestPreValidateActive_NewTask_Skip_WithCreationTime() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId - 1, + validationTime: time.Unix(0, rand.Int63()), + } + s.task.Data.CreateTime = timestamp.TimePtr(time.Unix(0, rand.Int63())) + + shouldValidate := s.taskValidator.preValidateActive(s.task) + s.False(shouldValidate) + s.Equal(taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: *s.task.Data.CreateTime, + }, s.taskValidator.lastValidatedTaskInfo) +} + +func (s *taskValidatorSuite) TestPreValidateActive_NewTask_Skip_WithoutCreationTime() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId - 1, + validationTime: time.Unix(0, rand.Int63()), + } + s.task.Data.CreateTime = nil + + shouldValidate := s.taskValidator.preValidateActive(s.task) + s.False(shouldValidate) + s.Equal(s.task.TaskId, s.taskValidator.lastValidatedTaskInfo.taskID) + s.True(time.Now().Sub(s.taskValidator.lastValidatedTaskInfo.validationTime) < time.Second) +} + +func (s *taskValidatorSuite) TestPreValidateActive_ExistingTask_Validate() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: time.Now().Add(-taskReaderValidationThreshold * 2), + } + + shouldValidate := s.taskValidator.preValidateActive(s.task) + s.True(shouldValidate) +} + +func (s *taskValidatorSuite) TestPreValidateActive_ExistingTask_Skip() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: time.Now().Add(taskReaderValidationThreshold * 2), + } + + shouldValidate := s.taskValidator.preValidateActive(s.task) + s.False(shouldValidate) +} + +func (s *taskValidatorSuite) TestPreValidatePassive_NewTask_Skip_WithCreationTime() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId - 1, + validationTime: time.Unix(0, rand.Int63()), + } + s.task.Data.CreateTime = timestamp.TimePtr(time.Now().Add(-taskReaderValidationThreshold / 2)) + + shouldValidate := s.taskValidator.preValidatePassive(s.task) + s.False(shouldValidate) + s.Equal(taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: *s.task.Data.CreateTime, + }, s.taskValidator.lastValidatedTaskInfo) +} + +func (s *taskValidatorSuite) TestPreValidatePassive_NewTask_Validate_WithCreationTime() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId - 1, + validationTime: time.Unix(0, rand.Int63()), + } + s.task.Data.CreateTime = timestamp.TimePtr(time.Now().Add(-taskReaderValidationThreshold * 2)) + + shouldValidate := s.taskValidator.preValidatePassive(s.task) + s.True(shouldValidate) + s.Equal(taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: *s.task.Data.CreateTime, + }, s.taskValidator.lastValidatedTaskInfo) +} + +func (s *taskValidatorSuite) TestPreValidatePassive_NewTask_Skip_WithoutCreationTime() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId - 1, + validationTime: time.Unix(0, rand.Int63()), + } + s.task.Data.CreateTime = nil + + shouldValidate := s.taskValidator.preValidatePassive(s.task) + s.False(shouldValidate) + s.Equal(s.task.TaskId, s.taskValidator.lastValidatedTaskInfo.taskID) + s.True(time.Now().Sub(s.taskValidator.lastValidatedTaskInfo.validationTime) < time.Second) +} + +func (s *taskValidatorSuite) TestPreValidatePassive_ExistingTask_Validate() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: time.Now().Add(-taskReaderValidationThreshold * 2), + } + + shouldValidate := s.taskValidator.preValidatePassive(s.task) + s.True(shouldValidate) +} + +func (s *taskValidatorSuite) TestPreValidatePassive_ExistingTask_Skip() { + s.taskValidator.lastValidatedTaskInfo = taskValidationInfo{ + taskID: s.task.TaskId, + validationTime: time.Now().Add(taskReaderValidationThreshold * 2), + } + + shouldValidate := s.taskValidator.preValidatePassive(s.task) + s.False(shouldValidate) +} + +func (s *taskValidatorSuite) TestIsTaskValid_ActivityTask_Valid() { + taskType := enumspb.TASK_QUEUE_TYPE_ACTIVITY + + s.historyClient.EXPECT().IsActivityTaskValid(gomock.Any(), &historyservice.IsActivityTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(&historyservice.IsActivityTaskValidResponse{IsValid: true}, nil) + + valid, err := s.taskValidator.isTaskValid(s.task, taskType) + s.NoError(err) + s.True(valid) +} + +func (s *taskValidatorSuite) TestIsTaskValid_ActivityTask_NotFound() { + taskType := enumspb.TASK_QUEUE_TYPE_ACTIVITY + + s.historyClient.EXPECT().IsActivityTaskValid(gomock.Any(), &historyservice.IsActivityTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(nil, &serviceerror.NotFound{}) + + valid, err := s.taskValidator.isTaskValid(s.task, taskType) + s.NoError(err) + s.False(valid) +} + +func (s *taskValidatorSuite) TestIsTaskValid_ActivityTask_Error() { + taskType := enumspb.TASK_QUEUE_TYPE_ACTIVITY + + s.historyClient.EXPECT().IsActivityTaskValid(gomock.Any(), &historyservice.IsActivityTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(nil, &serviceerror.Unavailable{}) + + _, err := s.taskValidator.isTaskValid(s.task, taskType) + s.Error(err) +} + +func (s *taskValidatorSuite) TestIsTaskValid_WorkflowTask_Valid() { + taskType := enumspb.TASK_QUEUE_TYPE_WORKFLOW + + s.historyClient.EXPECT().IsWorkflowTaskValid(gomock.Any(), &historyservice.IsWorkflowTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(&historyservice.IsWorkflowTaskValidResponse{IsValid: true}, nil) + + valid, err := s.taskValidator.isTaskValid(s.task, taskType) + s.NoError(err) + s.True(valid) +} + +func (s *taskValidatorSuite) TestIsTaskValid_WorkflowTask_NotFound() { + taskType := enumspb.TASK_QUEUE_TYPE_WORKFLOW + + s.historyClient.EXPECT().IsWorkflowTaskValid(gomock.Any(), &historyservice.IsWorkflowTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(nil, &serviceerror.NotFound{}) + + valid, err := s.taskValidator.isTaskValid(s.task, taskType) + s.NoError(err) + s.False(valid) +} + +func (s *taskValidatorSuite) TestIsTaskValid_WorkflowTask_Error() { + taskType := enumspb.TASK_QUEUE_TYPE_WORKFLOW + + s.historyClient.EXPECT().IsWorkflowTaskValid(gomock.Any(), &historyservice.IsWorkflowTaskValidRequest{ + NamespaceId: s.namespaceID, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: s.workflowID, + RunId: s.runID, + }, + Clock: s.task.Data.Clock, + ScheduledEventId: s.task.Data.ScheduledEventId, + }).Return(nil, &serviceerror.Unavailable{}) + + _, err := s.taskValidator.isTaskValid(s.task, taskType) + s.Error(err) +} diff -Nru temporal-1.21.5-1/src/service/matching/task_writer.go temporal-1.22.5/src/service/matching/task_writer.go --- temporal-1.21.5-1/src/service/matching/task_writer.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/matching/task_writer.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,332 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package matching + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/internal/goro" +) + +type ( + writeTaskResponse struct { + err error + persistenceResponse *persistence.CreateTasksResponse + } + + writeTaskRequest struct { + execution *commonpb.WorkflowExecution + taskInfo *persistencespb.TaskInfo + responseCh chan<- *writeTaskResponse + } + + taskIDBlock struct { + start int64 + end int64 + } + + // taskWriter writes tasks sequentially to persistence + taskWriter struct { + status int32 + tlMgr *taskQueueManagerImpl + config *taskQueueConfig + taskQueueID *taskQueueID + appendCh chan *writeTaskRequest + taskIDBlock taskIDBlock + maxReadLevel int64 + logger log.Logger + writeLoop *goro.Handle + idAlloc idBlockAllocator + } +) + +var ( + // errShutdown indicates that the task queue is shutting down + errShutdown = &persistence.ConditionFailedError{Msg: "task queue shutting down"} + errNonContiguousBlocks = errors.New("previous block end is not equal to current block") + + noTaskIDs = taskIDBlock{start: 1, end: 0} +) + +func newTaskWriter( + tlMgr *taskQueueManagerImpl, +) *taskWriter { + return &taskWriter{ + status: common.DaemonStatusInitialized, + tlMgr: tlMgr, + config: tlMgr.config, + taskQueueID: tlMgr.taskQueueID, + appendCh: make(chan *writeTaskRequest, tlMgr.config.OutstandingTaskAppendsThreshold()), + taskIDBlock: noTaskIDs, + maxReadLevel: noTaskIDs.start - 1, + logger: tlMgr.logger, + idAlloc: tlMgr.db, + } +} + +func (w *taskWriter) Start() { + if !atomic.CompareAndSwapInt32( + &w.status, + common.DaemonStatusInitialized, + common.DaemonStatusStarted, + ) { + return + } + + w.writeLoop = goro.NewHandle(w.tlMgr.callerInfoContext(context.Background())) + w.writeLoop.Go(w.taskWriterLoop) +} + +// Stop stops the taskWriter +func (w *taskWriter) Stop() { + if !atomic.CompareAndSwapInt32( + &w.status, + common.DaemonStatusStarted, + common.DaemonStatusStopped, + ) { + return + } + w.writeLoop.Cancel() +} + +func (w *taskWriter) initReadWriteState(ctx context.Context) error { + retryForever := backoff.NewExponentialRetryPolicy(1 * time.Second). + WithMaximumInterval(10 * time.Second). + WithExpirationInterval(backoff.NoInterval) + + state, err := w.renewLeaseWithRetry( + ctx, retryForever, common.IsPersistenceTransientError) + if err != nil { + return err + } + w.taskIDBlock = rangeIDToTaskIDBlock(state.rangeID, w.config.RangeSize) + atomic.StoreInt64(&w.maxReadLevel, w.taskIDBlock.start-1) + w.tlMgr.taskAckManager.setAckLevel(state.ackLevel) + return nil +} + +func (w *taskWriter) appendTask( + execution *commonpb.WorkflowExecution, + taskInfo *persistencespb.TaskInfo, +) (*persistence.CreateTasksResponse, error) { + + select { + case <-w.writeLoop.Done(): + return nil, errShutdown + default: + // noop + } + + startTime := time.Now().UTC() + ch := make(chan *writeTaskResponse) + req := &writeTaskRequest{ + execution: execution, + taskInfo: taskInfo, + responseCh: ch, + } + + select { + case w.appendCh <- req: + select { + case r := <-ch: + w.tlMgr.metricsHandler.Timer(metrics.TaskWriteLatencyPerTaskQueue.GetMetricName()).Record(time.Since(startTime)) + return r.persistenceResponse, r.err + case <-w.writeLoop.Done(): + // if we are shutting down, this request will never make + // it to cassandra, just bail out and fail this request + return nil, errShutdown + } + default: // channel is full, throttle + w.tlMgr.metricsHandler.Counter(metrics.TaskWriteThrottlePerTaskQueueCounter.GetMetricName()).Record(1) + return nil, serviceerror.NewResourceExhausted( + enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, + "Too many outstanding appends to the task queue") + } +} + +func (w *taskWriter) GetMaxReadLevel() int64 { + return atomic.LoadInt64(&w.maxReadLevel) +} + +func (w *taskWriter) allocTaskIDs(ctx context.Context, count int) ([]int64, error) { + result := make([]int64, count) + for i := 0; i < count; i++ { + if w.taskIDBlock.start > w.taskIDBlock.end { + // we ran out of current allocation block + newBlock, err := w.allocTaskIDBlock(ctx, w.taskIDBlock.end) + if err != nil { + return nil, err + } + w.taskIDBlock = newBlock + } + result[i] = w.taskIDBlock.start + w.taskIDBlock.start++ + } + return result, nil +} + +func (w *taskWriter) appendTasks( + ctx context.Context, + tasks []*persistencespb.AllocatedTaskInfo, +) (*persistence.CreateTasksResponse, error) { + + resp, err := w.tlMgr.db.CreateTasks(ctx, tasks) + if err != nil { + w.tlMgr.signalIfFatal(err) + w.logger.Error("Persistent store operation failure", + tag.StoreOperationCreateTask, + tag.Error(err), + tag.WorkflowTaskQueueName(w.taskQueueID.FullName()), + tag.WorkflowTaskQueueType(w.taskQueueID.taskType)) + return nil, err + } + return resp, nil +} + +func (w *taskWriter) taskWriterLoop(ctx context.Context) error { + err := w.initReadWriteState(ctx) + w.tlMgr.SetInitializedError(err) + +writerLoop: + for { + select { + case request := <-w.appendCh: + // read a batch of requests from the channel + reqs := []*writeTaskRequest{request} + reqs = w.getWriteBatch(reqs) + batchSize := len(reqs) + + maxReadLevel := int64(0) + + taskIDs, err := w.allocTaskIDs(ctx, batchSize) + if err != nil { + w.sendWriteResponse(reqs, nil, err) + continue writerLoop + } + + var tasks []*persistencespb.AllocatedTaskInfo + for i, req := range reqs { + tasks = append(tasks, &persistencespb.AllocatedTaskInfo{ + TaskId: taskIDs[i], + Data: req.taskInfo, + }) + maxReadLevel = taskIDs[i] + } + + resp, err := w.appendTasks(ctx, tasks) + w.sendWriteResponse(reqs, resp, err) + // Update the maxReadLevel after the writes are completed. + if maxReadLevel > 0 { + atomic.StoreInt64(&w.maxReadLevel, maxReadLevel) + } + + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (w *taskWriter) getWriteBatch(reqs []*writeTaskRequest) []*writeTaskRequest { +readLoop: + for i := 0; i < w.config.MaxTaskBatchSize(); i++ { + select { + case req := <-w.appendCh: + reqs = append(reqs, req) + default: // channel is empty, don't block + break readLoop + } + } + return reqs +} + +func (w *taskWriter) sendWriteResponse( + reqs []*writeTaskRequest, + persistenceResponse *persistence.CreateTasksResponse, + err error, +) { + for _, req := range reqs { + resp := &writeTaskResponse{ + err: err, + persistenceResponse: persistenceResponse, + } + + req.responseCh <- resp + } +} + +func (w *taskWriter) renewLeaseWithRetry( + ctx context.Context, + retryPolicy backoff.RetryPolicy, + retryErrors backoff.IsRetryable, +) (taskQueueState, error) { + var newState taskQueueState + op := func(context.Context) (err error) { + newState, err = w.idAlloc.RenewLease(ctx) + return + } + w.tlMgr.metricsHandler.Counter(metrics.LeaseRequestPerTaskQueueCounter.GetMetricName()).Record(1) + err := backoff.ThrottleRetryContext(ctx, op, retryPolicy, retryErrors) + if err != nil { + w.tlMgr.metricsHandler.Counter(metrics.LeaseFailurePerTaskQueueCounter.GetMetricName()).Record(1) + return newState, err + } + return newState, nil +} + +func (w *taskWriter) allocTaskIDBlock(ctx context.Context, prevBlockEnd int64) (taskIDBlock, error) { + currBlock := rangeIDToTaskIDBlock(w.idAlloc.RangeID(), w.config.RangeSize) + if currBlock.end != prevBlockEnd { + return taskIDBlock{}, + fmt.Errorf( + "%w: allocTaskIDBlock: invalid state: prevBlockEnd:%v != currTaskIDBlock:%+v", + errNonContiguousBlocks, + prevBlockEnd, + currBlock, + ) + } + state, err := w.renewLeaseWithRetry(ctx, persistenceOperationRetryPolicy, common.IsPersistenceTransientError) + if err != nil { + if w.tlMgr.signalIfFatal(err) { + return taskIDBlock{}, errShutdown + } + return taskIDBlock{}, err + } + return rangeIDToTaskIDBlock(state.rangeID, w.config.RangeSize), nil +} diff -Nru temporal-1.21.5-1/src/service/matching/taskqueue.go temporal-1.22.5/src/service/matching/taskqueue.go --- temporal-1.21.5-1/src/service/matching/taskqueue.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/taskqueue.go 2024-02-23 09:45:43.000000000 +0000 @@ -42,7 +42,7 @@ } ) -// newTaskQueueID returns taskQueueID which uniquely identfies as task queue +// newTaskQueueID returns taskQueueID which uniquely identifies as task queue func newTaskQueueID(namespaceID namespace.ID, taskQueueName string, taskType enumspb.TaskQueueType) (*taskQueueID, error) { return newTaskQueueIDWithPartition(namespaceID, taskQueueName, taskType, -1) } diff -Nru temporal-1.21.5-1/src/service/matching/version_sets.go temporal-1.22.5/src/service/matching/version_sets.go --- temporal-1.21.5-1/src/service/matching/version_sets.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/version_sets.go 2024-02-23 09:45:43.000000000 +0000 @@ -34,6 +34,8 @@ "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" "go.temporal.io/api/workflowservice/v1" + "golang.org/x/exp/slices" + persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common" hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" @@ -45,9 +47,6 @@ var ( // Error used to signal that a queue has no versioning data. This shouldn't escape matching. errEmptyVersioningData = serviceerror.NewInternal("versioning data is empty") - - // Temporary until we persist guessed set ids - errUnknownBuildId = serviceerror.NewFailedPrecondition("unknown build id") ) // ToBuildIdOrderingResponse transforms the internal VersioningData representation to public representation. @@ -154,21 +153,17 @@ } func shallowCloneVersioningData(data *persistencespb.VersioningData) *persistencespb.VersioningData { - clone := persistencespb.VersioningData{ - VersionSets: make([]*persistencespb.CompatibleVersionSet, len(data.GetVersionSets())), + return &persistencespb.VersioningData{ + VersionSets: slices.Clone(data.GetVersionSets()), } - copy(clone.VersionSets, data.GetVersionSets()) - return &clone } func shallowCloneVersionSet(set *persistencespb.CompatibleVersionSet) *persistencespb.CompatibleVersionSet { - clone := &persistencespb.CompatibleVersionSet{ - SetIds: set.SetIds, - BuildIds: make([]*persistencespb.BuildId, len(set.BuildIds)), + return &persistencespb.CompatibleVersionSet{ + SetIds: slices.Clone(set.SetIds), + BuildIds: slices.Clone(set.BuildIds), BecameDefaultTimestamp: set.BecameDefaultTimestamp, } - copy(clone.BuildIds, set.BuildIds) - return clone } // UpdateVersionSets updates version sets given existing versioning data and an update request. The request is expected @@ -353,7 +348,7 @@ } // Requires: caps is not nil -func lookupVersionSetForPoll(data *persistencespb.VersioningData, caps *commonpb.WorkerVersionCapabilities) (string, error) { +func lookupVersionSetForPoll(data *persistencespb.VersioningData, caps *commonpb.WorkerVersionCapabilities) (string, []string, bool, error) { // For poll, only the latest version in the compatible set can get tasks. // Find the version set that this worker is in. // Note data may be nil here, findVersion will return -1 then. @@ -367,40 +362,40 @@ // versioning data replicates, we'll redirect the poll to the correct set id. // In the meantime (e.g. during an ungraceful failover) we can at least match tasks // using the exact same build ID. - // TODO: add metric and log to make this situation visible guessedSetId := hashBuildId(caps.BuildId) - return guessedSetId, nil + return guessedSetId, nil, true, nil } set := data.VersionSets[setIdx] lastIndex := len(set.BuildIds) - 1 if indexInSet != lastIndex { - return "", serviceerror.NewNewerBuildExists(set.BuildIds[lastIndex].Id) + return "", nil, false, serviceerror.NewNewerBuildExists(set.BuildIds[lastIndex].Id) } - return getSetID(set), nil + primarySetId, demotedSetIds := getSetIds(set) + return primarySetId, demotedSetIds, false, nil } // Requires: caps is not nil -func checkVersionForStickyPoll(data *persistencespb.VersioningData, caps *commonpb.WorkerVersionCapabilities) error { +func checkVersionForStickyPoll(data *persistencespb.VersioningData, caps *commonpb.WorkerVersionCapabilities) (bool, error) { // For poll, only the latest version in the compatible set can get tasks. // Find the version set that this worker is in. // Note data may be nil here, findVersion will return -1 then. setIdx, indexInSet := worker_versioning.FindBuildId(data, caps.BuildId) if setIdx < 0 { - // A poller is using a build ID but we don't know about that build ID. See comments in + // A poller is using a build ID, but we don't know about that build ID. See comments in // lookupVersionSetForPoll. If we consider it the default for its set, then we should // leave it on the sticky queue here. - return nil + return true, nil } set := data.VersionSets[setIdx] lastIndex := len(set.BuildIds) - 1 if indexInSet != lastIndex { - return serviceerror.NewNewerBuildExists(set.BuildIds[lastIndex].Id) + return false, serviceerror.NewNewerBuildExists(set.BuildIds[lastIndex].Id) } - return nil + return false, nil } // For this function, buildId == "" means "use default" -func lookupVersionSetForAdd(data *persistencespb.VersioningData, buildId string) (string, error) { +func lookupVersionSetForAdd(data *persistencespb.VersioningData, buildId string) (string, bool, error) { var set *persistencespb.CompatibleVersionSet if buildId == "" { // If this is a new workflow, assign it to the latest version. @@ -408,7 +403,7 @@ // leave it on the unversioned one. That case is handled already before we get here.) setLen := len(data.GetVersionSets()) if setLen == 0 || data.VersionSets[setLen-1] == nil { - return "", errEmptyVersioningData + return "", false, errEmptyVersioningData } set = data.VersionSets[setLen-1] } else { @@ -416,8 +411,6 @@ // Note data may be nil here, findVersion will return -1 then. setIdx, _ := worker_versioning.FindBuildId(data, buildId) if setIdx < 0 { - // TODO: persist guessed set it and then remove this - return "", errUnknownBuildId // A workflow has a build ID set, but we don't know about that build ID. This can // happen in replication scenario: the workflow itself was migrated and we failed // over, but the versioning data hasn't been migrated yet. Instead of rejecting it, @@ -425,47 +418,50 @@ // its set on the other side, then our guess is right and things will work out. If // not, then we'll guess wrong, but when we get the replication event, we'll merge // the sets and use both ids. - // TODO: add metric and log to make this situation visible - // guessedSetId := hashBuildId(buildId) - // return guessedSetId, nil + // Note that in the add task case, we have to persist this guessed set id before we + // can accept the task. + guessedSetId := hashBuildId(buildId) + return guessedSetId, true, nil } set = data.VersionSets[setIdx] } - return getSetID(set), nil + // Demoted set ids don't matter for add, we always write to the primary. + primarySetId, _ := getSetIds(set) + return primarySetId, false, nil } // For this function, buildId == "" means "use default" -func checkVersionForStickyAdd(data *persistencespb.VersioningData, buildId string) error { +func checkVersionForStickyAdd(data *persistencespb.VersioningData, buildId string) (bool, error) { if buildId == "" { // This shouldn't happen. - return serviceerror.NewInternal("should have a build id directive on versioned sticky queue") + return false, serviceerror.NewInternal("should have a build id directive on versioned sticky queue") } // For add, any version in the compatible set maps to the set. // Note data may be nil here, findVersion will return -1 then. setIdx, indexInSet := worker_versioning.FindBuildId(data, buildId) if setIdx < 0 { - // A poller is using a build ID but we don't know about that build ID. See comments in + // A poller is using a build ID, but we don't know about that build ID. See comments in // lookupVersionSetForAdd. If we consider it the default for its set, then we should // leave it on the sticky queue here. - return nil + return true, nil } // If this is not the set's default anymore, we need to kick it back to the regular queue. if indexInSet != len(data.VersionSets[setIdx].BuildIds)-1 { - return serviceerrors.NewStickyWorkerUnavailable() + return false, serviceerrors.NewStickyWorkerUnavailable() } - return nil + return false, nil } -// getSetID returns an arbitrary but consistent member of the set. -// We want Add and Poll requests for the same set to converge on a single id so we can match +// getSetIds returns an arbitrary but consistent member of the set, and the rest of the set. +// We want Add and Poll requests for the same set to converge on a single id, so we can match // them, but we don't have a single id for a set in the general case: in rare cases we may have // multiple ids (due to failovers). We can do this by picking an arbitrary id in the set, e.g. // the first. If the versioning data changes in any way, we'll re-resolve the set id, so this // choice only has to be consistent within one version of the versioning data. (For correct // handling of spooled tasks in Add, this does need to be an actual set id, not an arbitrary // string.) -func getSetID(set *persistencespb.CompatibleVersionSet) string { - return set.SetIds[0] +func getSetIds(set *persistencespb.CompatibleVersionSet) (string, []string) { + return set.SetIds[0], set.SetIds[1:] } // ClearTombstones clears all tombstone build ids (with STATE_DELETED) from versioning data. @@ -485,3 +481,39 @@ }) return modifiedData } + +func PersistUnknownBuildId(clock hlc.Clock, data *persistencespb.VersioningData, buildId string) *persistencespb.VersioningData { + guessedSetId := hashBuildId(buildId) + + if foundSetId, _ := worker_versioning.FindBuildId(data, buildId); foundSetId >= 0 { + // it's already there. make sure its set id is present. + set := data.VersionSets[foundSetId] + if slices.Contains(set.SetIds, guessedSetId) { + return data + } + + // if not, add the guessed set id + newSet := shallowCloneVersionSet(set) + newSet.SetIds = append(newSet.SetIds, guessedSetId) + newData := shallowCloneVersioningData(data) + newData.VersionSets[foundSetId] = newSet + return newData + } + + // insert unknown build id with zero time so that if merged with any other set, the other + // will become the default. + clock = hlc.Zero(clock.ClusterId) + + newData := shallowCloneVersioningData(data) + newData.VersionSets = slices.Insert(newData.VersionSets, 0, &persistencespb.CompatibleVersionSet{ + SetIds: []string{guessedSetId}, + BuildIds: []*persistencespb.BuildId{{ + Id: buildId, + State: persistencespb.STATE_ACTIVE, + StateUpdateTimestamp: &clock, + BecameDefaultTimestamp: &clock, + }}, + BecameDefaultTimestamp: &clock, + }) + return newData +} diff -Nru temporal-1.21.5-1/src/service/matching/version_sets_merge_test.go temporal-1.22.5/src/service/matching/version_sets_merge_test.go --- temporal-1.21.5-1/src/service/matching/version_sets_merge_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/version_sets_merge_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,6 +30,7 @@ "github.com/stretchr/testify/assert" persistencespb "go.temporal.io/server/api/persistence/v1" + commonclock "go.temporal.io/server/common/clock" hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" ) @@ -181,3 +182,39 @@ assert.Equal(t, b, MergeVersioningData(a, b)) assert.Equal(t, b, MergeVersioningData(b, a)) } + +func TestPersistUnknownBuildId_Merge(t *testing.T) { + t.Parallel() + clock := hlc.Next(hlc.Zero(1), commonclock.NewRealTimeSource()) + initialData := mkInitialData(2, clock) // ids: "0", "1" + + // on a's side, 1.1 was added as unknown + a := PersistUnknownBuildId(clock, initialData, "1.1") + + // on b's side, 1.1 was added compatible with 1 + req := mkNewCompatReq("1.1", "1", true) + nextClock := hlc.Next(clock, commonclock.NewRealTimeSource()) + b, err := UpdateVersionSets(nextClock, initialData, req, 0, 0) + assert.NoError(t, err) + + // now merge them. we should see 1.1 in a set with 1, but it should have two set ids + ab := MergeVersioningData(a, b) + expected := &persistencespb.VersioningData{ + VersionSets: []*persistencespb.CompatibleVersionSet{ + mkSingleBuildIdSet("0", clock), + { + SetIds: []string{hashBuildId("1"), hashBuildId("1.1")}, + BuildIds: []*persistencespb.BuildId{ + mkBuildId("1", clock), + mkBuildId("1.1", nextClock), + }, + BecameDefaultTimestamp: &nextClock, + }, + }, + } + assert.Equal(t, expected, ab) + + // the other way too + ba := MergeVersioningData(b, a) + assert.Equal(t, expected, ba) +} diff -Nru temporal-1.21.5-1/src/service/matching/version_sets_test.go temporal-1.22.5/src/service/matching/version_sets_test.go --- temporal-1.21.5-1/src/service/matching/version_sets_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/matching/version_sets_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -851,3 +851,46 @@ _, err2 := UpdateVersionSets(nextClock, initialData, req2, 0, 0) assert.Error(t, err2) } + +func TestPersistUnknownBuildId(t *testing.T) { + t.Parallel() + clock := hlc.Next(hlc.Zero(1), commonclock.NewRealTimeSource()) + initialData := mkInitialData(2, clock) + + actual := PersistUnknownBuildId(clock, initialData, "new-build-id") + assert.Equal(t, 3, len(actual.VersionSets)) + newSet := actual.VersionSets[0] + assert.Equal(t, 1, len(newSet.BuildIds)) + assert.Equal(t, "new-build-id", newSet.BuildIds[0].Id) +} + +func TestPersistUnknownBuildIdAlreadyThere(t *testing.T) { + t.Parallel() + clock := hlc.Next(hlc.Zero(1), commonclock.NewRealTimeSource()) + + initial := &persistencespb.VersioningData{ + VersionSets: []*persistencespb.CompatibleVersionSet{ + { + SetIds: []string{hashBuildId("1")}, + BuildIds: []*persistencespb.BuildId{mkBuildId("1", clock), mkBuildId("2", clock)}, + BecameDefaultTimestamp: &clock, + }, + }, + } + + actual := PersistUnknownBuildId(clock, initial, "1") + assert.Equal(t, initial, actual) + + // build id is already there but adds set id + actual = PersistUnknownBuildId(clock, initial, "2") + expected := &persistencespb.VersioningData{ + VersionSets: []*persistencespb.CompatibleVersionSet{ + { + SetIds: []string{hashBuildId("1"), hashBuildId("2")}, + BuildIds: []*persistencespb.BuildId{mkBuildId("1", clock), mkBuildId("2", clock)}, + BecameDefaultTimestamp: &clock, + }, + }, + } + assert.Equal(t, expected, actual) +} diff -Nru temporal-1.21.5-1/src/service/worker/deletenamespace/fx.go temporal-1.22.5/src/service/worker/deletenamespace/fx.go --- temporal-1.21.5-1/src/service/worker/deletenamespace/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/deletenamespace/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,11 +29,11 @@ "go.temporal.io/sdk/workflow" "go.uber.org/fx" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/resource" workercommon "go.temporal.io/server/service/worker/common" "go.temporal.io/server/service/worker/deletenamespace/deleteexecutions" "go.temporal.io/server/service/worker/deletenamespace/reclaimresources" @@ -44,7 +44,7 @@ deleteNamespaceComponent struct { visibilityManager manager.VisibilityManager metadataManager persistence.MetadataManager - historyClient historyservice.HistoryServiceClient + historyClient resource.HistoryClient metricsHandler metrics.Handler logger log.Logger } @@ -62,7 +62,7 @@ func newComponent( visibilityManager manager.VisibilityManager, metadataManager persistence.MetadataManager, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, metricsHandler metrics.Handler, logger log.Logger, ) component { diff -Nru temporal-1.21.5-1/src/service/worker/fx.go temporal-1.22.5/src/service/worker/fx.go --- temporal-1.21.5-1/src/service/worker/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -25,11 +25,8 @@ package worker import ( - "context" - "go.uber.org/fx" - "go.temporal.io/server/common" "go.temporal.io/server/common/config" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" @@ -79,6 +76,7 @@ serviceConfig.PersistenceNamespaceMaxQPS, serviceConfig.PersistencePerShardNamespaceMaxQPS, serviceConfig.EnablePersistencePriorityRateLimiting, + serviceConfig.OperatorRPSRatio, serviceConfig.PersistenceDynamicRateLimitingParams, ) } @@ -114,6 +112,7 @@ searchAttributesMapperProvider, serviceConfig.VisibilityPersistenceMaxReadQPS, serviceConfig.VisibilityPersistenceMaxWriteQPS, + serviceConfig.OperatorRPSRatio, serviceConfig.EnableReadFromSecondaryVisibility, dynamicconfig.GetStringPropertyFn(visibility.SecondaryVisibilityWritingModeOff), // worker visibility never write serviceConfig.VisibilityDisableOrderByClause, @@ -123,26 +122,6 @@ ) } -func ServiceLifetimeHooks( - lc fx.Lifecycle, - svcStoppedCh chan struct{}, - svc *Service, -) { - lc.Append( - fx.Hook{ - OnStart: func(context.Context) error { - go func(svc common.Daemon, svcStoppedCh chan<- struct{}) { - // Start is blocked until Stop() is called. - svc.Start() - close(svcStoppedCh) - }(svc, svcStoppedCh) - - return nil - }, - OnStop: func(ctx context.Context) error { - svc.Stop() - return nil - }, - }, - ) +func ServiceLifetimeHooks(lc fx.Lifecycle, svc *Service) { + lc.Append(fx.StartStopHook(svc.Start, svc.Stop)) } diff -Nru temporal-1.21.5-1/src/service/worker/migration/activities.go temporal-1.22.5/src/service/worker/migration/activities.go --- temporal-1.21.5-1/src/service/worker/migration/activities.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/activities.go 2024-02-23 09:45:43.000000000 +0000 @@ -39,13 +39,16 @@ "go.temporal.io/sdk/activity" "go.temporal.io/sdk/temporal" + "go.temporal.io/server/api/adminservice/v1" enumsspb "go.temporal.io/server/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" replicationspb "go.temporal.io/server/api/replication/v1" + serverClient "go.temporal.io/server/client" "go.temporal.io/server/client/admin" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" @@ -55,6 +58,21 @@ ) type ( + activities struct { + historyShardCount int32 + executionManager persistence.ExecutionManager + taskManager persistence.TaskManager + namespaceRegistry namespace.Registry + historyClient historyservice.HistoryServiceClient + frontendClient workflowservice.WorkflowServiceClient + clientFactory serverClient.Factory + clientBean serverClient.Bean + logger log.Logger + metricsHandler metrics.Handler + forceReplicationMetricsHandler metrics.Handler + namespaceReplicationQueue persistence.NamespaceReplicationQueue + } + SkippedWorkflowExecution struct { WorkflowExecution commonpb.WorkflowExecution Reason string @@ -313,10 +331,14 @@ return err } - stateTransitionCount := resp.StateTransitionCount - for stateTransitionCount > 0 { - token := util.Min(int(stateTransitionCount), rateLimiter.Burst()) - stateTransitionCount -= int64(token) + // If workflow has many activity retries (bug in activity code e.g.,), the state transition count can be + // large but the number of actual state transition that is applied on target cluster can be very small. + // Take the minimum between StateTransitionCount and HistoryLength as heuristic to avoid unnecessary throttling + // in such situation. + count := util.Min(resp.StateTransitionCount, resp.HistoryLength) + for count > 0 { + token := util.Min(int(count), rateLimiter.Burst()) + count -= int64(token) _ = rateLimiter.ReserveN(time.Now(), token) } @@ -638,6 +660,9 @@ Reason: reason, }) + case *serviceerror.NamespaceNotFound: + return false, skippedList, temporal.NewNonRetryableApplicationError("remoteClient.DescribeMutableState call failed", "NamespaceNotFound", err) + default: a.forceReplicationMetricsHandler.WithTags(metrics.NamespaceTag(request.Namespace), metrics.ServiceErrorTypeTag(err)). Counter(metrics.VerifyReplicationTaskFailed.GetMetricName()).Record(1) @@ -658,13 +683,24 @@ func (a *activities) VerifyReplicationTasks(ctx context.Context, request *verifyReplicationTasksRequest) (verifyReplicationTasksResponse, error) { ctx = headers.SetCallerInfo(ctx, headers.NewPreemptableCallerInfo(request.Namespace)) - remoteClient := a.clientFactory.NewRemoteAdminClientWithTimeout( - request.TargetClusterEndpoint, - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - var response verifyReplicationTasksResponse + var remoteClient adminservice.AdminServiceClient + var err error + + if len(request.TargetClusterName) > 0 { + remoteClient, err = a.clientBean.GetRemoteAdminClient(request.TargetClusterName) + if err != nil { + return response, err + } + } else { + // TODO: remove once TargetClusterEndpoint is no longer used. + remoteClient = a.clientFactory.NewRemoteAdminClientWithTimeout( + request.TargetClusterEndpoint, + admin.DefaultTimeout, + admin.DefaultLargeTimeout, + ) + } + var details replicationTasksHeartbeatDetails if activity.HasHeartbeatDetails(ctx) { if err := activity.GetHeartbeatDetails(ctx, &details); err != nil { diff -Nru temporal-1.21.5-1/src/service/worker/migration/activities_test.go temporal-1.22.5/src/service/worker/migration/activities_test.go --- temporal-1.21.5-1/src/service/worker/migration/activities_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/activities_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -35,6 +35,7 @@ "go.temporal.io/sdk/interceptor" "go.temporal.io/sdk/testsuite" "go.temporal.io/sdk/worker" + "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/adminservicemock/v1" enumsspb "go.temporal.io/server/api/enums/v1" @@ -60,6 +61,7 @@ mockNamespaceReplicationQueue *persistence.MockNamespaceReplicationQueue mockNamespaceRegistry *namespace.MockRegistry mockClientFactory *client.MockFactory + mockClientBean *client.MockBean mockFrontendClient *workflowservicemock.MockWorkflowServiceClient mockHistoryClient *historyservicemock.MockHistoryServiceClient @@ -74,7 +76,7 @@ const ( mockedNamespace = "test_namespace" mockedNamespaceID = "test_namespace_id" - remoteRpcAddress = "remote" + remoteCluster = "remote_cluster" ) var ( @@ -123,7 +125,7 @@ s.mockTaskManager = persistence.NewMockTaskManager(s.controller) s.mockNamespaceReplicationQueue = persistence.NewMockNamespaceReplicationQueue(s.controller) s.mockNamespaceRegistry = namespace.NewMockRegistry(s.controller) - s.mockClientFactory = client.NewMockFactory(s.controller) + s.mockClientBean = client.NewMockBean(s.controller) s.mockFrontendClient = workflowservicemock.NewMockWorkflowServiceClient(s.controller) s.mockHistoryClient = historyservicemock.NewMockHistoryServiceClient(s.controller) @@ -134,8 +136,7 @@ s.mockMetricsHandler.EXPECT().WithTags(gomock.Any()).Return(s.mockMetricsHandler).AnyTimes() s.mockMetricsHandler.EXPECT().Timer(gomock.Any()).Return(metrics.NoopTimerMetricFunc).AnyTimes() s.mockMetricsHandler.EXPECT().Counter(gomock.Any()).Return(metrics.NoopCounterMetricFunc).AnyTimes() - s.mockClientFactory.EXPECT().NewRemoteAdminClientWithTimeout(remoteRpcAddress, gomock.Any(), gomock.Any()). - Return(s.mockRemoteAdminClient).AnyTimes() + s.mockClientBean.EXPECT().GetRemoteAdminClient(remoteCluster).Return(s.mockRemoteAdminClient, nil).AnyTimes() s.mockNamespaceRegistry.EXPECT().GetNamespaceName(gomock.Any()). Return(namespace.Name(mockedNamespace), nil).AnyTimes() s.mockNamespaceRegistry.EXPECT().GetNamespace(gomock.Any()). @@ -145,6 +146,7 @@ namespaceRegistry: s.mockNamespaceRegistry, namespaceReplicationQueue: s.mockNamespaceReplicationQueue, clientFactory: s.mockClientFactory, + clientBean: s.mockClientBean, taskManager: s.mockTaskManager, frontendClient: s.mockFrontendClient, historyClient: s.mockHistoryClient, @@ -171,10 +173,10 @@ env, iceptor := s.initEnv() request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, - Executions: []commonpb.WorkflowExecution{execution1, execution2}, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, + Executions: []commonpb.WorkflowExecution{execution1, execution2}, } // Immediately replicated @@ -244,10 +246,10 @@ } request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, - Executions: []commonpb.WorkflowExecution{execution1}, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, + Executions: []commonpb.WorkflowExecution{execution1}, } start := time.Now() @@ -289,10 +291,10 @@ func (s *activitiesSuite) TestVerifyReplicationTasks_FailedNotFound() { env, iceptor := s.initEnv() request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, - Executions: []commonpb.WorkflowExecution{execution1}, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, + Executions: []commonpb.WorkflowExecution{execution1}, } s.mockHistoryClient.EXPECT().DescribeMutableState(gomock.Any(), &historyservice.DescribeMutableStateRequest{ @@ -325,10 +327,10 @@ func (s *activitiesSuite) TestVerifyReplicationTasks_AlreadyVerified() { env, iceptor := s.initEnv() request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, - Executions: []commonpb.WorkflowExecution{execution1, execution2}, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, + Executions: []commonpb.WorkflowExecution{execution1, execution2}, } // Set NextIndex to indicate all executions have been verified. No additional mock is needed. @@ -391,9 +393,9 @@ func (s *activitiesSuite) Test_verifyReplicationTasks() { request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, } ctx := context.TODO() @@ -460,13 +462,11 @@ } func (s *activitiesSuite) Test_verifyReplicationTasksSkipRetention() { - bias := time.Minute request := verifyReplicationTasksRequest{ - Namespace: mockedNamespace, - NamespaceID: mockedNamespaceID, - TargetClusterEndpoint: remoteRpcAddress, - RetentionBiasDuration: bias, - Executions: []commonpb.WorkflowExecution{execution1}, + Namespace: mockedNamespace, + NamespaceID: mockedNamespaceID, + TargetClusterName: remoteCluster, + Executions: []commonpb.WorkflowExecution{execution1}, } var tests = []struct { diff -Nru temporal-1.21.5-1/src/service/worker/migration/force_replication_workflow.go temporal-1.22.5/src/service/worker/migration/force_replication_workflow.go --- temporal-1.21.5-1/src/service/worker/migration/force_replication_workflow.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/force_replication_workflow.go 2024-02-23 09:45:43.000000000 +0000 @@ -61,9 +61,10 @@ // Used for verifying workflow executions were replicated successfully on target cluster. EnableVerification bool - TargetClusterEndpoint string `validate:"required"` - VerifyIntervalInSeconds int `validate:"gte=0"` - RetentionBiasInSeconds int `validate:"gte=0"` + TargetClusterEndpoint string + TargetClusterName string + VerifyIntervalInSeconds int `validate:"gte=0"` + RetentionBiasInSeconds int `validate:"gte=0"` // Used by query handler to indicate overall progress of replication LastCloseTime time.Time @@ -107,6 +108,7 @@ Namespace string NamespaceID string TargetClusterEndpoint string + TargetClusterName string VerifyInterval time.Duration `validate:"gte=0"` RetentionBiasDuration time.Duration `validate:"gte=0"` Executions []commonpb.WorkflowExecution @@ -282,6 +284,10 @@ return temporal.NewNonRetryableApplicationError("InvalidArgument: Namespace is required", "InvalidArgument", nil) } + if params.EnableVerification && len(params.TargetClusterEndpoint) == 0 && len(params.TargetClusterName) == 0 { + return temporal.NewNonRetryableApplicationError("InvalidArgument: TargetClusterEndpoint or TargetClusterName is required with verification enabled", "InvalidArgument", nil) + } + if params.ConcurrentActivityCount <= 0 { params.ConcurrentActivityCount = 1 } @@ -405,6 +411,7 @@ if params.EnableVerification { verifyTaskFuture := workflow.ExecuteActivity(actx, a.VerifyReplicationTasks, &verifyReplicationTasksRequest{ TargetClusterEndpoint: params.TargetClusterEndpoint, + TargetClusterName: params.TargetClusterName, Namespace: params.Namespace, NamespaceID: namespaceID, Executions: workflowExecutions, diff -Nru temporal-1.21.5-1/src/service/worker/migration/force_replication_workflow_test.go temporal-1.22.5/src/service/worker/migration/force_replication_workflow_test.go --- temporal-1.21.5-1/src/service/worker/migration/force_replication_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/force_replication_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -43,6 +43,7 @@ "go.temporal.io/sdk/temporal" "go.temporal.io/sdk/testsuite" "go.temporal.io/sdk/worker" + replicationspb "go.temporal.io/server/api/replication/v1" "go.temporal.io/server/common/log" "go.temporal.io/server/common/persistence" @@ -98,6 +99,7 @@ ListWorkflowsPageSize: 1, PageCountPerExecution: 4, EnableVerification: true, + TargetClusterEndpoint: "test-target", }) require.True(t, env.IsWorkflowCompleted()) @@ -165,6 +167,7 @@ ListWorkflowsPageSize: 1, PageCountPerExecution: maxPageCountPerExecution, EnableVerification: true, + TargetClusterEndpoint: "test-target", }) require.True(t, env.IsWorkflowCompleted()) @@ -307,6 +310,7 @@ ListWorkflowsPageSize: 1, PageCountPerExecution: 4, EnableVerification: true, + TargetClusterEndpoint: "test-target", }) require.True(t, env.IsWorkflowCompleted()) @@ -362,6 +366,7 @@ ListWorkflowsPageSize: 1, PageCountPerExecution: 4, EnableVerification: true, + TargetClusterEndpoint: "test-target", }) require.True(t, env.IsWorkflowCompleted()) diff -Nru temporal-1.21.5-1/src/service/worker/migration/fx.go temporal-1.22.5/src/service/worker/migration/fx.go --- temporal-1.21.5-1/src/service/worker/migration/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -30,13 +30,13 @@ "go.temporal.io/sdk/workflow" "go.uber.org/fx" - "go.temporal.io/server/api/historyservice/v1" serverClient "go.temporal.io/server/client" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/resource" workercommon "go.temporal.io/server/service/worker/common" ) @@ -46,9 +46,10 @@ PersistenceConfig *config.Persistence ExecutionManager persistence.ExecutionManager NamespaceRegistry namespace.Registry - HistoryClient historyservice.HistoryServiceClient + HistoryClient resource.HistoryClient FrontendClient workflowservice.WorkflowServiceClient ClientFactory serverClient.Factory + ClientBean serverClient.Bean NamespaceReplicationQueue persistence.NamespaceReplicationQueue TaskManager persistence.TaskManager Logger log.Logger @@ -98,6 +99,7 @@ historyClient: wc.HistoryClient, frontendClient: wc.FrontendClient, clientFactory: wc.ClientFactory, + clientBean: wc.ClientBean, namespaceReplicationQueue: wc.NamespaceReplicationQueue, taskManager: wc.TaskManager, logger: wc.Logger, diff -Nru temporal-1.21.5-1/src/service/worker/migration/handover_workflow.go temporal-1.22.5/src/service/worker/migration/handover_workflow.go --- temporal-1.21.5-1/src/service/worker/migration/handover_workflow.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/migration/handover_workflow.go 2024-02-23 09:45:43.000000000 +0000 @@ -28,16 +28,8 @@ "time" enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/api/workflowservice/v1" "go.temporal.io/sdk/temporal" "go.temporal.io/sdk/workflow" - - "go.temporal.io/server/api/historyservice/v1" - serverClient "go.temporal.io/server/client" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/persistence" ) const ( @@ -60,20 +52,6 @@ HandoverTimeoutSeconds int } - activities struct { - historyShardCount int32 - executionManager persistence.ExecutionManager - taskManager persistence.TaskManager - namespaceRegistry namespace.Registry - historyClient historyservice.HistoryServiceClient - frontendClient workflowservice.WorkflowServiceClient - clientFactory serverClient.Factory - logger log.Logger - metricsHandler metrics.Handler - forceReplicationMetricsHandler metrics.Handler - namespaceReplicationQueue persistence.NamespaceReplicationQueue - } - replicationStatus struct { MaxReplicationTaskIds map[int32]int64 // max replication task id for each shard. } diff -Nru temporal-1.21.5-1/src/service/worker/pernamespaceworker.go temporal-1.22.5/src/service/worker/pernamespaceworker.go --- temporal-1.21.5-1/src/service/worker/pernamespaceworker.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/pernamespaceworker.go 2024-02-23 09:45:43.000000000 +0000 @@ -222,12 +222,14 @@ func (wm *perNamespaceWorkerManager) membershipChangedListener() { loop: for { + timer := time.NewTimer(refreshInterval) select { case _, ok := <-wm.membershipChangedCh: + timer.Stop() if !ok { break loop } - case <-time.After(refreshInterval): + case <-timer.C: } wm.refreshAll() } diff -Nru temporal-1.21.5-1/src/service/worker/replicator/namespace_replication_message_processor.go temporal-1.22.5/src/service/worker/replicator/namespace_replication_message_processor.go --- temporal-1.21.5-1/src/service/worker/replicator/namespace_replication_message_processor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/replicator/namespace_replication_message_processor.go 2024-02-23 09:45:43.000000000 +0000 @@ -242,9 +242,21 @@ switch task.TaskType { case enumsspb.REPLICATION_TASK_TYPE_NAMESPACE_TASK: - return p.namespaceTaskExecutor.Execute(ctx, task.GetNamespaceTaskAttributes()) + attr := task.GetNamespaceTaskAttributes() + err := p.namespaceTaskExecutor.Execute(ctx, attr) + if err != nil { + p.logger.Error("unable to process namespace replication task", + tag.WorkflowNamespaceID(attr.Id)) + } + return err case enumsspb.REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA: - return p.handleTaskQueueUserDataReplicationTask(ctx, task.GetTaskQueueUserDataAttributes()) + attr := task.GetTaskQueueUserDataAttributes() + err := p.handleTaskQueueUserDataReplicationTask(ctx, attr) + if err != nil { + p.logger.Error(fmt.Sprintf("unable to process task queue metadata replication task, %v", attr.TaskQueueName), + tag.WorkflowNamespaceID(attr.NamespaceId)) + } + return err default: return fmt.Errorf("cannot handle replication task of type %v", task.TaskType) } diff -Nru temporal-1.21.5-1/src/service/worker/scanner/build_ids/scavenger.go temporal-1.22.5/src/service/worker/scanner/build_ids/scavenger.go --- temporal-1.21.5-1/src/service/worker/scanner/build_ids/scavenger.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scanner/build_ids/scavenger.go 2024-02-23 09:45:43.000000000 +0000 @@ -36,7 +36,8 @@ "go.temporal.io/server/api/matchingservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" - "go.temporal.io/server/common/clock/hybrid_logical_clock" + "go.temporal.io/server/common" + hlc "go.temporal.io/server/common/clock/hybrid_logical_clock" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -68,6 +69,7 @@ BuildIdScavangerInput struct { NamespaceListPageSize int TaskQueueListPageSize int + IgnoreRetentionTime bool // If true, consider build ids added since retention time also } Activities struct { @@ -206,12 +208,17 @@ return err } for heartbeat.TaskQueueIdx < len(tqResponse.Entries) { - if ctx.Err() != nil { - return ctx.Err() - } entry := tqResponse.Entries[heartbeat.TaskQueueIdx] - if err := a.processUserDataEntry(ctx, rateLimiter, *heartbeat, ns, entry); err != nil { - // Intentionally don't fail the activity on single entry. + if err := a.processUserDataEntry(ctx, rateLimiter, input, *heartbeat, ns, entry); err != nil { + if common.IsContextDeadlineExceededErr(err) { + // This is either a real DeadlineExceeded from the context, or the rate limiter + // thinks there's not enough time left until the deadline. Either way, we're done. + return err + } else if ctx.Err() != nil { + // Also return on context.Canceled. + return ctx.Err() + } + // Intentionally don't fail the activity on other single entry errors. a.logger.Error("Failed to update task queue user data", tag.WorkflowNamespace(ns.Name().String()), tag.WorkflowTaskQueueName(entry.TaskQueue), @@ -233,11 +240,12 @@ func (a *Activities) processUserDataEntry( ctx context.Context, rateLimiter quotas.RateLimiter, + input BuildIdScavangerInput, heartbeat heartbeatDetails, ns *namespace.Namespace, entry *persistence.TaskQueueUserDataEntry, ) error { - buildIdsToRemove, err := a.findBuildIdsToRemove(ctx, rateLimiter, heartbeat, ns, entry) + buildIdsToRemove, err := a.findBuildIdsToRemove(ctx, rateLimiter, input, heartbeat, ns, entry) if err != nil { return err } @@ -261,13 +269,25 @@ func (a *Activities) findBuildIdsToRemove( ctx context.Context, rateLimiter quotas.RateLimiter, + input BuildIdScavangerInput, heartbeat heartbeatDetails, ns *namespace.Namespace, entry *persistence.TaskQueueUserDataEntry, ) ([]string, error) { + // Only consider build ids that have been active at least as long as the retention time. + // This assumes that when a build id is added, it's used soon afterwards. + // This lets us avoid making visibility queries that would probably find some workflows. + retention := ns.Retention() + // Don't consider build ids that were recently the default, since there may be workers + // still processing tasks or data that hasn't made it to visibility yet. + removableBuildIdDurationSinceDefault := a.removableBuildIdDurationSinceDefault() + versioningData := entry.UserData.Data.GetVersioningData() var buildIdsToRemove []string for setIdx, set := range versioningData.GetVersionSets() { + // Note that setActive counts build ids that may have associated workflows, i.e. not + // just all with STATE_ACTIVE. Also note that we always examine the default build id + // for a set last, so setActive will be 1 + the number of active non-default build ids. setActive := len(set.BuildIds) for buildIdIdx, buildId := range set.BuildIds { if buildId.State == persistencespb.STATE_DELETED { @@ -276,21 +296,25 @@ } buildIdIsSetDefault := buildIdIdx == len(set.BuildIds)-1 setIsQueueDefault := setIdx == len(versioningData.VersionSets)-1 - // Don't remove if build id is the queue default of there's another active build id in this set. + // Don't remove if build id is the queue default or there's another active build id in + // this set, since we might need to dispatch new tasks to this set. But if no build ids + // are active for the whole set, we can remove them all. if buildIdIsSetDefault && (setIsQueueDefault || setActive > 1) { continue } - timeSinceWasDefault := time.Since(hybrid_logical_clock.UTC(*buildId.BecameDefaultTimestamp)) - if timeSinceWasDefault < a.removableBuildIdDurationSinceDefault() { + if hlc.SincePtr(buildId.BecameDefaultTimestamp) < removableBuildIdDurationSinceDefault { + continue + } + if !input.IgnoreRetentionTime && hlc.SincePtr(buildId.StateUpdateTimestamp) < retention { continue } if err := rateLimiter.Wait(ctx); err != nil { - return buildIdsToRemove, err + return nil, context.DeadlineExceeded } exists, err := worker_versioning.WorkflowsExistForBuildId(ctx, a.visibilityManager, ns, entry.TaskQueue, buildId.Id) if err != nil { - return buildIdsToRemove, err + return nil, err } a.recordHeartbeat(ctx, heartbeat) if !exists { diff -Nru temporal-1.21.5-1/src/service/worker/scanner/build_ids/scavenger_test.go temporal-1.22.5/src/service/worker/scanner/build_ids/scavenger_test.go --- temporal-1.21.5-1/src/service/worker/scanner/build_ids/scavenger_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scanner/build_ids/scavenger_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -49,13 +49,16 @@ "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/quotas" "go.temporal.io/server/common/worker_versioning" "google.golang.org/grpc" ) func Test_findBuildIdsToRemove_AcceptsNilVersioningData(t *testing.T) { - a := &Activities{} + a := &Activities{ + removableBuildIdDurationSinceDefault: dynamicconfig.GetDurationPropertyFn(time.Hour), + } ctx := context.Background() c0 := hlc.Zero(0) @@ -64,13 +67,20 @@ VersioningData: nil, } - buildIdsRemoved, err := a.findBuildIdsToRemove(ctx, nil, heartbeatDetails{}, namespace.NewNamespaceForTest(nil, nil, false, nil, 0), &persistence.TaskQueueUserDataEntry{ - TaskQueue: "test", - UserData: &persistencespb.VersionedTaskQueueUserData{ - Version: 0, - Data: userData, + buildIdsRemoved, err := a.findBuildIdsToRemove( + ctx, + nil, + BuildIdScavangerInput{}, + heartbeatDetails{}, + namespace.NewNamespaceForTest(nil, nil, false, nil, 0), + &persistence.TaskQueueUserDataEntry{ + TaskQueue: "test", + UserData: &persistencespb.VersionedTaskQueueUserData{ + Version: 0, + Data: userData, + }, }, - }) + ) require.NoError(t, err) require.Equal(t, []string(nil), buildIdsRemoved) require.True(t, hlc.Equal(c0, *userData.Clock)) @@ -184,6 +194,20 @@ BecameDefaultTimestamp: &c0, }, { + SetIds: []string{"v4.1"}, + BuildIds: []*persistencespb.BuildId{ + { + Id: "v4.1", + State: persistencespb.STATE_ACTIVE, + // We should not even query for this one since we assume it was + // used soon after being added + StateUpdateTimestamp: &c1, + BecameDefaultTimestamp: &c0, + }, + }, + BecameDefaultTimestamp: &c0, + }, + { SetIds: []string{"v5"}, BuildIds: []*persistencespb.BuildId{ { @@ -199,14 +223,24 @@ }, } + ns := namespace.NewNamespaceForTest(nil, &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationPtr(24 * time.Hour), + }, false, nil, 0) act := func(ctx context.Context) ([]string, error) { - return a.findBuildIdsToRemove(ctx, rateLimiter, heartbeatDetails{}, namespace.NewNamespaceForTest(nil, nil, false, nil, 0), &persistence.TaskQueueUserDataEntry{ - TaskQueue: "test", - UserData: &persistencespb.VersionedTaskQueueUserData{ - Version: 0, - Data: userData, + return a.findBuildIdsToRemove( + ctx, + rateLimiter, + BuildIdScavangerInput{}, + heartbeatDetails{}, + ns, + &persistence.TaskQueueUserDataEntry{ + TaskQueue: "test", + UserData: &persistencespb.VersionedTaskQueueUserData{ + Version: 0, + Data: userData, + }, }, - }) + ) } env.RegisterActivity(act) removedBuildIDsEncoded, err := env.ExecuteActivity(act) @@ -302,7 +336,6 @@ NextPageToken: []byte{}, }, nil } else { - // nolint:goerr113 return nil, errors.New("invalid NextPageToken") } }, diff -Nru temporal-1.21.5-1/src/service/worker/scanner/executions/scavenger.go temporal-1.22.5/src/service/worker/scanner/executions/scavenger.go --- temporal-1.21.5-1/src/service/worker/scanner/executions/scavenger.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scanner/executions/scavenger.go 2024-02-23 09:45:43.000000000 +0000 @@ -204,11 +204,13 @@ outstanding := s.executor.TaskCount() for outstanding > 0 { + timer := time.NewTimer(executorPollInterval) select { - case <-time.After(executorPollInterval): + case <-timer.C: outstanding = s.executor.TaskCount() s.metricsHandler.Gauge(metrics.ExecutionsOutstandingCount.GetMetricName()).Record(float64(outstanding)) case <-s.stopC: + timer.Stop() return } } diff -Nru temporal-1.21.5-1/src/service/worker/scanner/taskqueue/handler.go temporal-1.22.5/src/service/worker/scanner/taskqueue/handler.go --- temporal-1.21.5-1/src/service/worker/scanner/taskqueue/handler.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scanner/taskqueue/handler.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,10 +29,10 @@ "sync/atomic" "time" - persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common/log/tag" p "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/service/matching" "go.temporal.io/server/service/worker/scanner/executor" ) @@ -82,7 +82,7 @@ for _, task := range resp.Tasks { nProcessed++ - if !IsTaskExpired(task) { + if !matching.IsTaskExpired(task) { return handlerStatusDone } } @@ -141,13 +141,3 @@ tag.WorkflowNamespaceID(key.NamespaceID), tag.WorkflowTaskQueueName(key.TaskQueueName), tag.WorkflowTaskQueueType(key.TaskQueueType), tag.NumberProcessed(nProcessed), tag.NumberDeleted(nDeleted)) } } - -// TODO https://github.com/temporalio/temporal/issues/1021 -// -// there should be more validation logic here -// 1. if task has valid TTL -> TTL reached -> delete -// 2. if task has 0 TTL / no TTL -> logic need to additionally check if corresponding workflow still exists -func IsTaskExpired(t *persistencespb.AllocatedTaskInfo) bool { - expiry := timestamp.TimeValue(t.GetData().GetExpiryTime()) - return expiry.Unix() > 0 && expiry.Before(time.Now()) -} diff -Nru temporal-1.21.5-1/src/service/worker/scanner/taskqueue/scavenger.go temporal-1.22.5/src/service/worker/scanner/taskqueue/scavenger.go --- temporal-1.21.5-1/src/service/worker/scanner/taskqueue/scavenger.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scanner/taskqueue/scavenger.go 2024-02-23 09:45:43.000000000 +0000 @@ -194,11 +194,13 @@ func (s *Scavenger) awaitExecutor() { outstanding := s.executor.TaskCount() for outstanding > 0 { + timer := time.NewTimer(executorPollInterval) select { - case <-time.After(executorPollInterval): + case <-timer.C: outstanding = s.executor.TaskCount() s.metricsHandler.Gauge(metrics.TaskQueueOutstandingCount.GetMetricName()).Record(float64(outstanding)) case <-s.stopC: + timer.Stop() return } } diff -Nru temporal-1.21.5-1/src/service/worker/scheduler/fx.go temporal-1.22.5/src/service/worker/scheduler/fx.go --- temporal-1.21.5-1/src/service/worker/scheduler/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scheduler/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -31,12 +31,12 @@ sdkworker "go.temporal.io/sdk/worker" "go.temporal.io/sdk/workflow" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/quotas" + "go.temporal.io/server/common/resource" workercommon "go.temporal.io/server/service/worker/common" ) @@ -56,7 +56,7 @@ fx.In MetricsHandler metrics.Handler Logger log.Logger - HistoryClient historyservice.HistoryServiceClient + HistoryClient resource.HistoryClient FrontendClient workflowservice.WorkflowServiceClient } diff -Nru temporal-1.21.5-1/src/service/worker/scheduler/replay_test.go temporal-1.22.5/src/service/worker/scheduler/replay_test.go --- temporal-1.21.5-1/src/service/worker/scheduler/replay_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scheduler/replay_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,67 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package scheduler_test + +import ( + "compress/gzip" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/sdk/client" + "go.temporal.io/sdk/worker" + "go.temporal.io/sdk/workflow" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/service/worker/scheduler" +) + +// TestReplays tests workflow logic backwards compatibility from previous versions. +// Whenever there's a change in logic, consider capturing a new history with the +// testdata/generate_history.sh script and checking it in. +func TestReplays(t *testing.T) { + replayer := worker.NewWorkflowReplayer() + replayer.RegisterWorkflowWithOptions(scheduler.SchedulerWorkflow, workflow.RegisterOptions{Name: scheduler.WorkflowType}) + + files, err := filepath.Glob("testdata/replay_*.json.gz") + require.NoError(t, err) + + logger := log.NewSdkLogger(log.NewTestLogger()) + + for _, filename := range files { + logger.Info("Replaying", "file", filename) + f, err := os.Open(filename) + require.NoError(t, err) + r, err := gzip.NewReader(f) + require.NoError(t, err) + history, err := client.HistoryFromJSON(r, client.HistoryJSONOptions{}) + require.NoError(t, err) + err = replayer.ReplayWorkflowHistory(logger, history) + require.NoError(t, err) + _ = r.Close() + _ = f.Close() + } +} diff -Nru temporal-1.21.5-1/src/service/worker/scheduler/testdata/generate_history.sh temporal-1.22.5/src/service/worker/scheduler/testdata/generate_history.sh --- temporal-1.21.5-1/src/service/worker/scheduler/testdata/generate_history.sh 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scheduler/testdata/generate_history.sh 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# +# Consider running this script to generate a new history for TestReplays +# whenever there's some change to the scheduler workflow. +# To use it, run a local server (any backend) and run this script. + +set -x + +id=sched1 + +# shellcheck disable=SC2064 +trap "temporal schedule delete -s '$id'" EXIT + +temporal schedule create -s "$id" \ + --overlap-policy bufferall \ + --interval 10s \ + --jitter 8s \ + -w mywf \ + -t mytq \ + --workflow-type mywf \ + --execution-timeout 5 + +sleep 50 # ~5 normal actions, some may be buffered + +# backfill 3 actions +temporal schedule backfill -s "$id" \ + --overlap-policy allowall \ + --start-time 2022-05-09T11:22:22Z \ + --end-time 2022-05-09T11:22:55Z + +sleep 22 # another 2-3 normal actions + +# trigger a couple (will definitely buffer) +temporal schedule trigger -s sched1 +sleep 3 +temporal schedule trigger -s sched1 + +# pause +temporal schedule toggle -s sched1 --pause --reason testing +sleep 21 +temporal schedule toggle -s sched1 --unpause --reason testing + +# update +temporal schedule update -s "$id" \ + --calendar '{"hour":"*","minute":"*","second":"*/5"}' \ + --remaining-actions 1 \ + -w mywf \ + -t mytq \ + --workflow-type mywf \ + --execution-timeout 3 + +sleep 12 +# should have used one action and be idle now + +# capture history +now=$(date +%s) +temporal workflow show -w "temporal-sys-scheduler:$id" -o json | gzip -9c > "replay_$now.json.gz" Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/service/worker/scheduler/testdata/replay_v1.19.1.json.gz and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/service/worker/scheduler/testdata/replay_v1.19.1.json.gz differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/service/worker/scheduler/testdata/replay_v1.20.4.json.gz and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/service/worker/scheduler/testdata/replay_v1.20.4.json.gz differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/service/worker/scheduler/testdata/replay_v1.21.3.json.gz and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/service/worker/scheduler/testdata/replay_v1.21.3.json.gz differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/service/worker/scheduler/testdata/replay_v1.22.0.json.gz and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/service/worker/scheduler/testdata/replay_v1.22.0.json.gz differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/service/worker/scheduler/testdata/replay_v1.23-pre.json.gz and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/service/worker/scheduler/testdata/replay_v1.23-pre.json.gz differ diff -Nru temporal-1.21.5-1/src/service/worker/scheduler/workflow.go temporal-1.22.5/src/service/worker/scheduler/workflow.go --- temporal-1.21.5-1/src/service/worker/scheduler/workflow.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scheduler/workflow.go 2024-02-23 09:45:43.000000000 +0000 @@ -56,12 +56,18 @@ type SchedulerWorkflowVersion int64 const ( + // Versions of workflow logic. When introducing a new version, consider generating a new + // history for TestReplays using generate_history.sh. + // represents the state before Version is introduced InitialVersion SchedulerWorkflowVersion = 0 // skip over entire time range if paused and batch and cache getNextTime queries BatchAndCacheTimeQueries = 1 // use cache v2, and include ids in jitter NewCacheAndJitter = 2 + // Don't put possibly-overlapping runs (from SCHEDULE_OVERLAP_POLICY_ALLOW_ALL) in + // RunningWorkflows. + DontTrackOverlapping = 3 ) const ( @@ -72,9 +78,10 @@ // id, used for validation in the frontend. AppendedTimestampForValidation = "-2009-11-10T23:00:00Z" - SignalNameUpdate = "update" - SignalNamePatch = "patch" - SignalNameRefresh = "refresh" + SignalNameUpdate = "update" + SignalNamePatch = "patch" + SignalNameRefresh = "refresh" + SignalNameForceCAN = "force-continue-as-new" QueryNameDescribe = "describe" QueryNameListMatchingTimes = "listMatchingTimes" @@ -90,6 +97,8 @@ rateLimitedErrorType = "RateLimited" nextTimeCacheV1Size = 10 + + impossibleHistorySize = 1e6 // just for testing, no real history can be this long ) type ( @@ -115,6 +124,7 @@ // Signal requests pendingPatch *schedpb.SchedulePatch pendingUpdate *schedspb.FullUpdateRequest + forceCAN bool uuidBatch []string @@ -134,8 +144,8 @@ RecentActionCount int // The number of recent actual action results to include in Describe. FutureActionCountForList int // The number of future action times to include in List (search attr). RecentActionCountForList int // The number of recent actual action results to include in List (search attr). - IterationsBeforeContinueAsNew int - SleepWhilePaused bool // If true, don't set timers while paused/out of actions + IterationsBeforeContinueAsNew int // Number of iterations per run, or 0 to use server-suggested + SleepWhilePaused bool // If true, don't set timers while paused/out of actions // MaxBufferSize limits the number of buffered starts. This also limits the number of // workflows that can be backfilled at once (since they all have to fit in the buffer). MaxBufferSize int @@ -144,6 +154,9 @@ NextTimeCacheV2Size int // Size of next time cache (v2) Version SchedulerWorkflowVersion // Used to keep track of schedules version to release new features and for backward compatibility // version 0 corresponds to the schedule version that comes before introducing the Version parameter + + // When introducing a new field with new workflow logic, consider generating a new + // history for TestReplays using generate_history.sh. } nextTimeCacheV2 struct { @@ -173,20 +186,20 @@ currentTweakablePolicies = tweakablePolicies{ DefaultCatchupWindow: 365 * 24 * time.Hour, MinCatchupWindow: 10 * time.Second, - RetentionTime: 0, // TODO: enable later: 7 * 24 * time.Hour, + RetentionTime: 7 * 24 * time.Hour, CanceledTerminatedCountAsFailures: false, AlwaysAppendTimestamp: true, FutureActionCount: 10, RecentActionCount: 10, FutureActionCountForList: 5, RecentActionCountForList: 5, - IterationsBeforeContinueAsNew: 500, + IterationsBeforeContinueAsNew: 0, SleepWhilePaused: true, MaxBufferSize: 1000, AllowZeroSleep: true, ReuseTimer: true, - NextTimeCacheV2Size: 14, // see note below - Version: BatchAndCacheTimeQueries, // TODO: set later: NewCacheAndJitter + NextTimeCacheV2Size: 14, // see note below + Version: NewCacheAndJitter, // TODO: switch to DontTrackOverlapping } // Note on NextTimeCacheV2Size: This value must be > FutureActionCountForList. Each @@ -240,7 +253,19 @@ s.pendingPatch = s.InitialPatch s.InitialPatch = nil - for iters := s.tweakables.IterationsBeforeContinueAsNew; iters > 0 || s.pendingUpdate != nil || s.pendingPatch != nil; iters-- { + iters := s.tweakables.IterationsBeforeContinueAsNew + for { + info := workflow.GetInfo(s.ctx) + suggestContinueAsNew := info.GetCurrentHistoryLength() >= impossibleHistorySize + if s.tweakables.IterationsBeforeContinueAsNew > 0 { + suggestContinueAsNew = suggestContinueAsNew || iters <= 0 + iters-- + } else { + suggestContinueAsNew = suggestContinueAsNew || info.GetContinueAsNewSuggested() || s.forceCAN + } + if suggestContinueAsNew && s.pendingUpdate == nil && s.pendingPatch == nil { + break + } t1 := timestamp.TimeValue(s.State.LastProcessedTime) t2 := s.now() @@ -279,7 +304,6 @@ // 3. a workflow that we were watching finished s.sleep(nextWakeup) s.updateTweakables() - } // Any watcher activities will get cancelled automatically if running. @@ -568,6 +592,9 @@ refreshCh := workflow.GetSignalChannel(s.ctx, SignalNameRefresh) sel.AddReceive(refreshCh, s.handleRefreshSignal) + forceCAN := workflow.GetSignalChannel(s.ctx, SignalNameForceCAN) + sel.AddReceive(forceCAN, s.handleForceCANSignal) + // if we're paused or out of actions, we don't need to wake up until we get an update if s.tweakables.SleepWhilePaused && !s.canTakeScheduledAction(false, false) { nextWakeup = time.Time{} @@ -702,6 +729,12 @@ s.State.NeedRefresh = true } +func (s *scheduler) handleForceCANSignal(ch workflow.ReceiveChannel, _ bool) { + ch.Receive(s.ctx, nil) + s.logger.Debug("got force-continue-as-new signal") + s.forceCAN = true +} + func (s *scheduler) processSignals() bool { scheduleChanged := false if s.pendingPatch != nil { @@ -754,6 +787,7 @@ // this is a query handler, don't modify s.Info directly infoCopy := *s.Info infoCopy.FutureActionTimes = s.getFutureActionTimes(false, s.tweakables.FutureActionCount) + infoCopy.BufferSize = int64(len(s.State.BufferedStarts)) return &schedspb.DescribeResponse{ Schedule: s.Schedule, @@ -888,6 +922,7 @@ if s.tweakables.MaxBufferSize > 0 && len(s.State.BufferedStarts) >= s.tweakables.MaxBufferSize { s.logger.Warn("Buffer too large", "start-time", nominalTime, "overlap-policy", overlapPolicy, "manual", manual) s.metrics.Counter(metrics.ScheduleBufferOverruns.GetMetricName()).Inc(1) + s.Info.BufferDropped += 1 return } s.State.BufferedStarts = append(s.State.BufferedStarts, &schedspb.BufferedStart{ @@ -956,7 +991,8 @@ continue } metricsWithTag.Counter(metrics.ScheduleActionSuccess.GetMetricName()).Inc(1) - s.recordAction(result) + nonOverlapping := start == action.nonOverlappingStart + s.recordAction(result, nonOverlapping) } // Terminate or cancel if required (terminate overrides cancel if both are present) @@ -985,10 +1021,11 @@ return tryAgain } -func (s *scheduler) recordAction(result *schedpb.ScheduleActionResult) { +func (s *scheduler) recordAction(result *schedpb.ScheduleActionResult, nonOverlapping bool) { s.Info.ActionCount++ s.Info.RecentActions = util.SliceTail(append(s.Info.RecentActions, result), s.tweakables.RecentActionCount) - if result.StartWorkflowResult != nil { + canTrack := nonOverlapping || !s.hasMinVersion(DontTrackOverlapping) + if canTrack && result.StartWorkflowResult != nil { s.Info.RunningWorkflows = append(s.Info.RunningWorkflows, result.StartWorkflowResult) } } @@ -1021,6 +1058,13 @@ } ctx := workflow.WithLocalActivityOptions(s.ctx, options) + lastCompletionResult, continuedFailure := s.State.LastCompletionResult, s.State.ContinuedFailure + if start.OverlapPolicy == enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL && s.hasMinVersion(DontTrackOverlapping) { + // ALLOW_ALL runs don't participate in lastCompletionResult/continuedFailure at all + lastCompletionResult = nil + continuedFailure = nil + } + req := &schedspb.StartWorkflowRequest{ Request: &workflowservice.StartWorkflowExecutionRequest{ WorkflowId: workflowID, @@ -1037,8 +1081,8 @@ Memo: newWorkflow.Memo, SearchAttributes: s.addSearchAttributes(newWorkflow.SearchAttributes, nominalTimeSec), Header: newWorkflow.Header, - LastCompletionResult: s.State.LastCompletionResult, - ContinuedFailure: s.State.ContinuedFailure, + LastCompletionResult: lastCompletionResult, + ContinuedFailure: continuedFailure, }, } for { diff -Nru temporal-1.21.5-1/src/service/worker/scheduler/workflow_test.go temporal-1.22.5/src/service/worker/scheduler/workflow_test.go --- temporal-1.21.5-1/src/service/worker/scheduler/workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/scheduler/workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -48,7 +48,6 @@ "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/common/util" ) type ( @@ -192,7 +191,12 @@ result enumspb.WorkflowExecutionStatus } -func (s *workflowSuite) setupMocksForWorkflows(runs []workflowRun, started map[string]time.Time) { +type runAcrossContinueState struct { + started map[string]time.Time + finished bool +} + +func (s *workflowSuite) setupMocksForWorkflows(runs []workflowRun, state *runAcrossContinueState) { for _, run := range runs { run := run // capture fresh value // set up start @@ -201,10 +205,10 @@ }) s.env.OnActivity(new(activities).StartWorkflow, mock.Anything, matchStart).Times(0).Maybe().Return( func(_ context.Context, req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { - if _, ok := started[req.Request.WorkflowId]; ok { + if _, ok := state.started[req.Request.WorkflowId]; ok { s.Failf("multiple starts for %s", req.Request.WorkflowId) } - started[req.Request.WorkflowId] = s.now() + state.started[req.Request.WorkflowId] = s.now() return &schedspb.StartWorkflowResponse{ RunId: uuid.NewString(), RealStartTime: timestamp.TimePtr(time.Now()), @@ -234,13 +238,20 @@ } type delayedCallback struct { - at time.Time - f func() + at time.Time + f func() + finishTest bool } -func (s *workflowSuite) setupDelayedCallbacks(start time.Time, cbs []delayedCallback) { +func (s *workflowSuite) setupDelayedCallbacks(start time.Time, cbs []delayedCallback, state *runAcrossContinueState) { for _, cb := range cbs { if delay := cb.at.Sub(start); delay > 0 { + if cb.finishTest { + cb.f = func() { + s.env.SetCurrentHistoryLength(impossibleHistorySize) // signals workflow loop to exit + state.finished = true // signals test to exit + } + } s.env.RegisterDelayedCallback(cb.f, delay) } } @@ -250,7 +261,6 @@ runs []workflowRun, cbs []delayedCallback, sched *schedpb.Schedule, - maxIterations int, ) { // fill this in so callers don't need to sched.Action = s.defaultAction("myid") @@ -268,21 +278,21 @@ ConflictToken: InitialConflictToken, }, } - iterations := maxIterations - gotRuns := make(map[string]time.Time) + currentTweakablePolicies.IterationsBeforeContinueAsNew = every + state := runAcrossContinueState{ + started: make(map[string]time.Time), + } for { s.env = s.NewTestWorkflowEnvironment() s.env.SetStartTime(startTime) - s.setupMocksForWorkflows(runs, gotRuns) - s.setupDelayedCallbacks(startTime, cbs) - - currentTweakablePolicies.IterationsBeforeContinueAsNew = util.Min(iterations, every) + s.setupMocksForWorkflows(runs, &state) + s.setupDelayedCallbacks(startTime, cbs, &state) - s.T().Logf("starting workflow for %d iterations out of %d remaining, %d total, start time %s", - currentTweakablePolicies.IterationsBeforeContinueAsNew, iterations, maxIterations, startTime) + s.T().Logf("starting workflow with CAN every %d iterations, start time %s", + currentTweakablePolicies.IterationsBeforeContinueAsNew, startTime) s.env.ExecuteWorkflow(SchedulerWorkflow, startArgs) - s.T().Logf("finished workflow, time is now %s", s.now()) + s.T().Logf("finished workflow, time is now %s, finished is %v", s.now(), state.finished) s.True(s.env.IsWorkflowCompleted()) result := s.env.GetWorkflowError() @@ -291,8 +301,7 @@ s.env.AssertExpectations(s.T()) - iterations -= currentTweakablePolicies.IterationsBeforeContinueAsNew - if iterations == 0 { + if state.finished { break } @@ -301,9 +310,9 @@ s.NoError(payloads.Decode(canErr.Input, &startArgs)) } // check starts that we actually got - s.Require().Equal(len(runs), len(gotRuns)) + s.Require().Equal(len(runs), len(state.started)) for _, run := range runs { - s.Truef(run.start.Equal(gotRuns[run.id]), "%v != %v", run.start, gotRuns[run.id]) + s.Truef(run.start.Equal(state.started[run.id]), "%v != %v", run.start, state.started[run.id]) } } } @@ -513,6 +522,10 @@ s.Equal([]string{"myid-2022-06-01T00:15:00Z"}, s.runningWorkflows()) }, }, + { + at: time.Date(2022, 6, 1, 0, 18, 0, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -524,7 +537,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, }, }, - 4, ) } @@ -544,7 +556,7 @@ end: time.Date(2022, 6, 1, 0, 29, 0, 0, time.UTC), result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, }, - // skipped over :15, :20 + // skipped over :15, :20, :25 { id: "myid-2022-06-01T00:30:00Z", start: time.Date(2022, 6, 1, 0, 30, 0, 0, time.UTC), @@ -558,9 +570,41 @@ f: func() { s.Equal([]string{"myid-2022-06-01T00:05:00Z"}, s.runningWorkflows()) }, }, { + at: time.Date(2022, 6, 1, 0, 11, 0, 0, time.UTC), + f: func() { + s.Equal(int64(1), s.describe().Info.BufferSize) + s.Equal(int64(0), s.describe().Info.OverlapSkipped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 16, 0, 0, time.UTC), + f: func() { + s.Equal(int64(1), s.describe().Info.BufferSize) + s.Equal(int64(1), s.describe().Info.OverlapSkipped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 26, 0, 0, time.UTC), + f: func() { + s.Equal(int64(1), s.describe().Info.BufferSize) + s.Equal(int64(3), s.describe().Info.OverlapSkipped) + }, + }, + { at: time.Date(2022, 6, 1, 0, 31, 0, 0, time.UTC), f: func() { s.Equal([]string{"myid-2022-06-01T00:30:00Z"}, s.runningWorkflows()) }, }, + { + at: time.Date(2022, 6, 1, 0, 32, 0, 0, time.UTC), + f: func() { + s.Equal(int64(0), s.describe().Info.BufferSize) + s.Equal(int64(3), s.describe().Info.OverlapSkipped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 34, 59, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -572,7 +616,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ONE, }, }, - 8, ) } @@ -623,6 +666,88 @@ at: time.Date(2022, 6, 1, 0, 22, 30, 0, time.UTC), f: func() { s.Equal([]string{"myid-2022-06-01T00:20:00Z"}, s.runningWorkflows()) }, }, + { + at: time.Date(2022, 6, 1, 0, 29, 30, 0, time.UTC), + finishTest: true, + }, + }, + &schedpb.Schedule{ + Spec: &schedpb.ScheduleSpec{ + Interval: []*schedpb.IntervalSpec{{ + Interval: timestamp.DurationPtr(5 * time.Minute), + }}, + }, + Policies: &schedpb.SchedulePolicies{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ALL, + }, + }, + ) +} + +func (s *workflowSuite) TestBufferLimit() { + originalMaxBufferSize := currentTweakablePolicies.MaxBufferSize + currentTweakablePolicies.MaxBufferSize = 2 + defer func() { currentTweakablePolicies.MaxBufferSize = originalMaxBufferSize }() + + s.runAcrossContinue( + []workflowRun{ + { + id: "myid-2022-06-01T00:05:00Z", + start: time.Date(2022, 6, 1, 0, 5, 0, 0, time.UTC), + end: time.Date(2022, 6, 1, 0, 22, 0, 0, time.UTC), + result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + // first buffered one: + { + id: "myid-2022-06-01T00:10:00Z", + start: time.Date(2022, 6, 1, 0, 22, 0, 0, time.UTC), + end: time.Date(2022, 6, 1, 0, 23, 0, 0, time.UTC), + result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + // next buffered one, and also one more gets buffered: + { + id: "myid-2022-06-01T00:15:00Z", + start: time.Date(2022, 6, 1, 0, 23, 0, 0, time.UTC), + end: time.Date(2022, 6, 1, 0, 24, 0, 0, time.UTC), + result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + // run :20 does not fit in the buffer. finally back on track for :25 + { + id: "myid-2022-06-01T00:25:00Z", + start: time.Date(2022, 6, 1, 0, 25, 0, 0, time.UTC), + end: time.Date(2022, 6, 1, 0, 27, 0, 0, time.UTC), + result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + }, + []delayedCallback{ + { + at: time.Date(2022, 6, 1, 0, 20, 30, 0, time.UTC), + f: func() { + s.Equal([]string{"myid-2022-06-01T00:05:00Z"}, s.runningWorkflows()) + s.Equal(int64(2), s.describe().Info.BufferSize) + s.Equal(int64(1), s.describe().Info.BufferDropped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 23, 30, 0, time.UTC), + f: func() { + s.Equal([]string{"myid-2022-06-01T00:15:00Z"}, s.runningWorkflows()) + s.Equal(int64(0), s.describe().Info.BufferSize) + s.Equal(int64(1), s.describe().Info.BufferDropped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 25, 30, 0, time.UTC), + f: func() { + s.Equal([]string{"myid-2022-06-01T00:25:00Z"}, s.runningWorkflows()) + s.Equal(int64(0), s.describe().Info.BufferSize) + s.Equal(int64(1), s.describe().Info.BufferDropped) + }, + }, + { + at: time.Date(2022, 6, 1, 0, 29, 30, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -634,7 +759,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ALL, }, }, - 9, ) } @@ -738,7 +862,6 @@ } func (s *workflowSuite) TestOverlapAllowAll() { - // also contains tests for RunningWorkflows and refresh, since it's convenient to do here s.runAcrossContinue( []workflowRun{ { @@ -768,66 +891,8 @@ }, []delayedCallback{ { - at: time.Date(2022, 6, 1, 0, 6, 0, 0, time.UTC), - f: func() { s.Equal([]string{"myid-2022-06-01T00:05:00Z"}, s.runningWorkflows()) }, - }, - { - at: time.Date(2022, 6, 1, 0, 11, 0, 0, time.UTC), - f: func() { - s.Equal([]string{"myid-2022-06-01T00:05:00Z", "myid-2022-06-01T00:10:00Z"}, s.runningWorkflows()) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 15, 30, 0, time.UTC), - f: func() { - s.Equal([]string{"myid-2022-06-01T00:05:00Z", "myid-2022-06-01T00:10:00Z", "myid-2022-06-01T00:15:00Z"}, s.runningWorkflows()) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 16, 30, 0, time.UTC), - f: func() { - // :15 has ended here, but we won't know until we refresh since we don't have a long-poll watcher - s.Equal([]string{"myid-2022-06-01T00:05:00Z", "myid-2022-06-01T00:10:00Z", "myid-2022-06-01T00:15:00Z"}, s.runningWorkflows()) - // poke it to refresh - s.env.SignalWorkflow(SignalNameRefresh, nil) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 16, 31, 0, time.UTC), - f: func() { - // now we'll see it end - s.Equal([]string{"myid-2022-06-01T00:05:00Z", "myid-2022-06-01T00:10:00Z"}, s.runningWorkflows()) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 18, 0, 0, time.UTC), - f: func() { - // :05 has ended, but we won't see it yet - s.Equal([]string{"myid-2022-06-01T00:05:00Z", "myid-2022-06-01T00:10:00Z"}, s.runningWorkflows()) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 21, 0, 0, time.UTC), - f: func() { - // we'll see :05 ended because :20 started and did an implicit refresh - s.Equal([]string{"myid-2022-06-01T00:10:00Z", "myid-2022-06-01T00:20:00Z"}, s.runningWorkflows()) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 23, 0, 0, time.UTC), - f: func() { - // we won't see these ended yet - s.Equal([]string{"myid-2022-06-01T00:10:00Z", "myid-2022-06-01T00:20:00Z"}, s.runningWorkflows()) - // poke it to refresh - s.env.SignalWorkflow(SignalNameRefresh, nil) - }, - }, - { - at: time.Date(2022, 6, 1, 0, 23, 1, 0, time.UTC), - f: func() { - // now we will - s.Equal([]string(nil), s.runningWorkflows()) - }, + at: time.Date(2022, 6, 1, 0, 24, 30, 0, time.UTC), + finishTest: true, }, }, &schedpb.Schedule{ @@ -840,7 +905,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, }, }, - 7, ) } @@ -958,6 +1022,45 @@ s.True(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) } +func (s *workflowSuite) TestOnlyStartForAllowAll() { + if currentTweakablePolicies.Version < DontTrackOverlapping { + s.T().Skip("test will run after Version updated") + } + // written using low-level mocks so we can check fields of start workflow requests + + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-2022-06-01T00:05:00Z", req.Request.WorkflowId) + s.Nil(req.Request.LastCompletionResult) + s.Nil(req.Request.ContinuedFailure) + return nil, nil + }) + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-2022-06-01T00:10:00Z", req.Request.WorkflowId) + s.Nil(req.Request.LastCompletionResult) + s.Nil(req.Request.ContinuedFailure) + return nil, nil + }) + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-2022-06-01T00:15:00Z", req.Request.WorkflowId) + s.Nil(req.Request.LastCompletionResult) + s.Nil(req.Request.ContinuedFailure) + return nil, nil + }) + + s.run(&schedpb.Schedule{ + Spec: &schedpb.ScheduleSpec{ + Interval: []*schedpb.IntervalSpec{{ + Interval: timestamp.DurationPtr(5 * time.Minute), + }}, + }, + Policies: &schedpb.SchedulePolicies{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, 4) + s.True(s.env.IsWorkflowCompleted()) + s.True(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) +} + func (s *workflowSuite) TestPauseOnFailure() { // written using low-level mocks so we can return failures @@ -1057,6 +1160,10 @@ }) }, }, + { + at: time.Date(2022, 6, 1, 0, 54, 0, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -1068,7 +1175,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, }, }, - 4, ) } @@ -1120,6 +1226,10 @@ }) }, }, + { + at: time.Date(2022, 7, 31, 19, 6, 0, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -1133,7 +1243,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, }, }, - 6, ) } @@ -1193,6 +1302,10 @@ s.Equal("go ahead", desc.Schedule.State.Notes) }, }, + { + at: time.Date(2022, 6, 1, 0, 28, 8, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -1204,7 +1317,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, }, }, - 12, ) } @@ -1277,6 +1389,10 @@ }) }, }, + { + at: time.Date(2022, 6, 1, 0, 19, 30, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -1288,7 +1404,6 @@ OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, }, }, - 10, ) } @@ -1330,6 +1445,10 @@ }) }, }, + { + at: time.Date(2022, 6, 1, 1, 7, 55, 0, time.UTC), + finishTest: true, + }, }, &schedpb.Schedule{ Spec: &schedpb.ScheduleSpec{ @@ -1338,7 +1457,6 @@ }}, }, }, - 5, ) } @@ -1398,7 +1516,7 @@ RemainingActions: 2, }, Policies: &schedpb.SchedulePolicies{ - OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, }, }, 4) s.True(s.env.IsWorkflowCompleted()) @@ -1420,16 +1538,15 @@ result: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, } } + testEnd := runs[len(runs)-1].end.Add(time.Second) delayedCallbacks := make([]delayedCallback, backfillIterations) - expected := runIterations // schedule some callbacks to spray backfills among scheduled runs // each call back adds random number of backfills in [10, 20) range for i := range delayedCallbacks { maxRuns := rand.Intn(10) + 10 - expected += maxRuns // a point in time to send the callback request offset := i * runIterations / backfillIterations callbackTime := time.Date(2022, 6, 1, offset, 2, 0, 0, time.UTC) @@ -1461,6 +1578,11 @@ } } + delayedCallbacks = append(delayedCallbacks, delayedCallback{ + at: testEnd, + finishTest: true, + }) + s.runAcrossContinue( runs, delayedCallbacks, @@ -1472,13 +1594,10 @@ }, }, }, - expected+1, ) } func (s *workflowSuite) TestExitScheduleWorkflowWhenNoActions() { - s.T().Skip("re-enable later") - scheduleId := "myschedule" s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { s.True(time.Date(2022, 6, 1, 0, 15, 0, 0, time.UTC).Equal(s.now())) @@ -1525,8 +1644,6 @@ } func (s *workflowSuite) TestExitScheduleWorkflowWhenNoNextTime() { - s.T().Skip("re-enable later") - scheduleId := "myschedule" s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { s.True(time.Date(2022, 6, 1, 1, 0, 0, 0, time.UTC).Equal(s.now())) @@ -1563,8 +1680,6 @@ } func (s *workflowSuite) TestExitScheduleWorkflowWhenEmpty() { - s.T().Skip("re-enable later") - scheduleId := "myschedule" currentTweakablePolicies.IterationsBeforeContinueAsNew = 3 @@ -1585,3 +1700,121 @@ s.False(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) s.True(s.env.Now().Sub(baseStartTime) == currentTweakablePolicies.RetentionTime) } + +func (s *workflowSuite) TestCANByIterations() { + // written using low-level mocks so we can control iteration count + + const iters = 30 + // note: one fewer run than iters since the first doesn't start anything + for i := 1; i < iters; i++ { + t := baseStartTime.Add(5 * time.Minute * time.Duration(i)) + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-"+t.Format(time.RFC3339), req.Request.WorkflowId) + return nil, nil + }) + } + // this one catches and fails if we go over + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Fail("too many starts") + return nil, nil + }).Times(0).Maybe() + s.expectWatch(func(req *schedspb.WatchWorkflowRequest) (*schedspb.WatchWorkflowResponse, error) { + return &schedspb.WatchWorkflowResponse{Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED}, nil + }).Times(0).Maybe() + + // this is ignored because we set iters explicitly + s.env.RegisterDelayedCallback(func() { + s.env.SetContinueAsNewSuggested(true) + }, 5*time.Minute*iters/2-time.Second) + + s.run(&schedpb.Schedule{ + Spec: &schedpb.ScheduleSpec{ + Interval: []*schedpb.IntervalSpec{{ + Interval: timestamp.DurationPtr(5 * time.Minute), + }}, + }, + Policies: &schedpb.SchedulePolicies{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, iters) + s.True(s.env.IsWorkflowCompleted()) + s.True(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) +} + +func (s *workflowSuite) TestCANBySuggested() { + // written using low-level mocks so we can control iteration count + + const iters = 30 + // note: one fewer run than iters since the first doesn't start anything + for i := 1; i < iters; i++ { + t := baseStartTime.Add(5 * time.Minute * time.Duration(i)) + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-"+t.Format(time.RFC3339), req.Request.WorkflowId) + return nil, nil + }) + } + // this one catches and fails if we go over + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Fail("too many starts", req.Request.WorkflowId) + return nil, nil + }).Times(0).Maybe() + s.expectWatch(func(req *schedspb.WatchWorkflowRequest) (*schedspb.WatchWorkflowResponse, error) { + return &schedspb.WatchWorkflowResponse{Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED}, nil + }).Times(0).Maybe() + + s.env.RegisterDelayedCallback(func() { + s.env.SetContinueAsNewSuggested(true) + }, 5*time.Minute*iters-time.Second) + + s.run(&schedpb.Schedule{ + Spec: &schedpb.ScheduleSpec{ + Interval: []*schedpb.IntervalSpec{{ + Interval: timestamp.DurationPtr(5 * time.Minute), + }}, + }, + Policies: &schedpb.SchedulePolicies{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, 0) // 0 means use suggested + s.True(s.env.IsWorkflowCompleted()) + s.True(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) +} + +func (s *workflowSuite) TestCANBySignal() { + // written using low-level mocks so we can control iteration count + + const iters = 30 + // note: one fewer run than iters since the first doesn't start anything + for i := 1; i < iters; i++ { + t := baseStartTime.Add(5 * time.Minute * time.Duration(i)) + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Equal("myid-"+t.Format(time.RFC3339), req.Request.WorkflowId) + return nil, nil + }) + } + // this one catches and fails if we go over + s.expectStart(func(req *schedspb.StartWorkflowRequest) (*schedspb.StartWorkflowResponse, error) { + s.Fail("too many starts", req.Request.WorkflowId) + return nil, nil + }).Times(0).Maybe() + s.expectWatch(func(req *schedspb.WatchWorkflowRequest) (*schedspb.WatchWorkflowResponse, error) { + return &schedspb.WatchWorkflowResponse{Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED}, nil + }).Times(0).Maybe() + + s.env.RegisterDelayedCallback(func() { + s.env.SignalWorkflow(SignalNameForceCAN, nil) + }, 5*time.Minute*iters-time.Second) + + s.run(&schedpb.Schedule{ + Spec: &schedpb.ScheduleSpec{ + Interval: []*schedpb.IntervalSpec{{ + Interval: timestamp.DurationPtr(5 * time.Minute), + }}, + }, + Policies: &schedpb.SchedulePolicies{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, 0) // 0 means use suggested + s.True(s.env.IsWorkflowCompleted()) + s.True(workflow.IsContinueAsNewError(s.env.GetWorkflowError())) +} diff -Nru temporal-1.21.5-1/src/service/worker/service.go temporal-1.22.5/src/service/worker/service.go --- temporal-1.21.5-1/src/service/worker/service.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/service/worker/service.go 2024-02-23 09:45:43.000000000 +0000 @@ -27,15 +27,13 @@ import ( "context" "math/rand" - "sync/atomic" "time" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/client" - "go.temporal.io/server/common" carchiver "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/provider" "go.temporal.io/server/common/cluster" @@ -76,7 +74,7 @@ hostInfo membership.HostInfo executionManager persistence.ExecutionManager taskManager persistence.TaskManager - historyClient historyservice.HistoryServiceClient + historyClient resource.HistoryClient namespaceRegistry namespace.Registry workerServiceResolver membership.ServiceResolver visibilityManager manager.VisibilityManager @@ -89,8 +87,6 @@ metricsHandler metrics.Handler - status int32 - stopC chan struct{} sdkClientFactory sdk.ClientFactory esClient esclient.Client config *Config @@ -113,6 +109,7 @@ PersistencePerShardNamespaceMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter EnablePersistencePriorityRateLimiting dynamicconfig.BoolPropertyFn PersistenceDynamicRateLimitingParams dynamicconfig.MapPropertyFn + OperatorRPSRatio dynamicconfig.FloatPropertyFn EnableBatcher dynamicconfig.BoolPropertyFn BatcherRPS dynamicconfig.IntPropertyFnWithNamespaceFilter BatcherConcurrency dynamicconfig.IntPropertyFnWithNamespaceFilter @@ -147,7 +144,7 @@ metricsHandler metrics.Handler, metadataManager persistence.MetadataManager, taskManager persistence.TaskManager, - historyClient historyservice.HistoryServiceClient, + historyClient resource.HistoryClient, workerManager *workerManager, perNamespaceWorkerManager *perNamespaceWorkerManager, visibilityManager manager.VisibilityManager, @@ -159,11 +156,9 @@ } s := &Service{ - status: common.DaemonStatusInitialized, config: serviceConfig, sdkClientFactory: sdkClientFactory, esClient: esClient, - stopC: make(chan struct{}), logger: logger, archivalMetadata: archivalMetadata, clusterMetadata: clusterMetadata, @@ -368,6 +363,7 @@ true, ), PersistenceDynamicRateLimitingParams: dc.GetMapProperty(dynamicconfig.WorkerPersistenceDynamicRateLimitingParams, dynamicconfig.DefaultDynamicRateLimitingParams), + OperatorRPSRatio: dc.GetFloat64Property(dynamicconfig.OperatorRPSRatio, common.DefaultOperatorRPSRatio), VisibilityPersistenceMaxReadQPS: visibility.GetVisibilityPersistenceMaxReadQPS(dc, enableReadFromES), VisibilityPersistenceMaxWriteQPS: visibility.GetVisibilityPersistenceMaxWriteQPS(dc, enableReadFromES), @@ -380,14 +376,6 @@ // Start is called to start the service func (s *Service) Start() { - if !atomic.CompareAndSwapInt32( - &s.status, - common.DaemonStatusInitialized, - common.DaemonStatusStarted, - ) { - return - } - s.logger.Info( "worker starting", tag.ComponentWorker, @@ -432,21 +420,10 @@ tag.ComponentWorker, tag.Address(s.hostInfo.GetAddress()), ) - <-s.stopC } // Stop is called to stop the service func (s *Service) Stop() { - if !atomic.CompareAndSwapInt32( - &s.status, - common.DaemonStatusStarted, - common.DaemonStatusStopped, - ) { - return - } - - close(s.stopC) - s.scanner.Stop() s.perNamespaceWorkerManager.Stop() s.workerManager.Stop() diff -Nru temporal-1.21.5-1/src/temporal/fx.go temporal-1.22.5/src/temporal/fx.go --- temporal-1.21.5-1/src/temporal/fx.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/temporal/fx.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,6 +26,7 @@ import ( "context" + "errors" "fmt" "strings" @@ -37,8 +38,10 @@ otelsdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.10.0" "go.opentelemetry.io/otel/trace" + "go.temporal.io/api/serviceerror" "go.uber.org/fx" "go.uber.org/fx/fxevent" + "golang.org/x/exp/maps" "google.golang.org/grpc" persistencespb "go.temporal.io/server/api/persistence/v1" @@ -72,6 +75,11 @@ "go.temporal.io/server/service/worker" ) +var ( + clusterMetadataInitErr = errors.New("failed to initialize current cluster metadata") + missingCurrentClusterMetadataErr = errors.New("missing current cluster metadata under clusterMetadata.ClusterInformation") +) + type ( ServicesGroupOut struct { fx.Out @@ -87,7 +95,6 @@ app *fx.App serviceName primitives.ServiceName logger log.Logger - stopChan chan struct{} } ServerFx struct { @@ -194,7 +201,10 @@ // MetricsHandler metricHandler := so.metricHandler if metricHandler == nil { - metricHandler = metrics.MetricsHandlerFromConfig(logger, so.config.Global.Metrics) + metricHandler, err = metrics.MetricsHandlerFromConfig(logger, so.config.Global.Metrics) + if err != nil { + return serverOptionsProvider{}, fmt.Errorf("unable to create metrics handler: %w", err) + } } // DynamicConfigClient @@ -313,13 +323,6 @@ if err != nil { svc.logger.Error("Failed to stop service", tag.Service(svc.serviceName), tag.Error(err)) } - - // verify "Start" goroutine returned - select { - case <-svc.stopChan: - case <-stopCtx.Done(): - svc.logger.Error("Timed out waiting for service to stop", tag.Service(svc.serviceName), tag.NewDurationTag("timeout", serviceStopTimeout)) - } } type ( @@ -351,13 +354,12 @@ } ) -func NewService(app *fx.App, serviceName primitives.ServiceName, logger log.Logger, stopChan chan struct{}) ServicesGroupOut { +func NewService(app *fx.App, serviceName primitives.ServiceName, logger log.Logger) ServicesGroupOut { return ServicesGroupOut{ Services: &ServicesMetadata{ app: app, serviceName: serviceName, logger: logger, - stopChan: stopChan, }, } } @@ -372,10 +374,8 @@ return ServicesGroupOut{}, nil } - stopChan := make(chan struct{}) app := fx.New( fx.Supply( - stopChan, params.EsConfig, params.PersistenceConfig, params.ClusterMetadata, @@ -409,7 +409,7 @@ FxLogAdapter, ) - return NewService(app, serviceName, params.Logger, stopChan), app.Err() + return NewService(app, serviceName, params.Logger), app.Err() } func MatchingServiceProvider( @@ -422,10 +422,8 @@ return ServicesGroupOut{}, nil } - stopChan := make(chan struct{}) app := fx.New( fx.Supply( - stopChan, params.EsConfig, params.PersistenceConfig, params.ClusterMetadata, @@ -456,7 +454,7 @@ FxLogAdapter, ) - return NewService(app, serviceName, params.Logger, stopChan), app.Err() + return NewService(app, serviceName, params.Logger), app.Err() } func FrontendServiceProvider( @@ -480,10 +478,8 @@ return ServicesGroupOut{}, nil } - stopChan := make(chan struct{}) app := fx.New( fx.Supply( - stopChan, params.EsConfig, params.PersistenceConfig, params.ClusterMetadata, @@ -533,7 +529,7 @@ FxLogAdapter, ) - return NewService(app, serviceName, params.Logger, stopChan), app.Err() + return NewService(app, serviceName, params.Logger), app.Err() } func WorkerServiceProvider( @@ -546,10 +542,8 @@ return ServicesGroupOut{}, nil } - stopChan := make(chan struct{}) app := fx.New( fx.Supply( - stopChan, params.EsConfig, params.PersistenceConfig, params.ClusterMetadata, @@ -580,7 +574,7 @@ FxLogAdapter, ) - return NewService(app, serviceName, params.Logger, stopChan), app.Err() + return NewService(app, serviceName, params.Logger), app.Err() } // ApplyClusterMetadataConfigProvider performs a config check against the configured persistence store for cluster metadata. @@ -589,212 +583,109 @@ // TODO: move this to cluster.fx func ApplyClusterMetadataConfigProvider( logger log.Logger, - config *config.Config, + svc *config.Config, persistenceServiceResolver resolver.ServiceResolver, persistenceFactoryProvider persistenceClient.FactoryProviderFn, customDataStoreFactory persistenceClient.AbstractDataStoreFactory, + metricsHandler metrics.Handler, ) (*cluster.Config, config.Persistence, error) { ctx := context.TODO() logger = log.With(logger, tag.ComponentMetadataInitializer) - - clusterName := persistenceClient.ClusterName(config.ClusterMetadata.CurrentClusterName) + metricsHandler = metricsHandler.WithTags(metrics.ServiceNameTag(primitives.ServerService)) + clusterName := persistenceClient.ClusterName(svc.ClusterMetadata.CurrentClusterName) dataStoreFactory, _ := persistenceClient.DataStoreFactoryProvider( clusterName, persistenceServiceResolver, - &config.Persistence, + &svc.Persistence, customDataStoreFactory, logger, - nil, + metricsHandler, ) factory := persistenceFactoryProvider(persistenceClient.NewFactoryParams{ DataStoreFactory: dataStoreFactory, - Cfg: &config.Persistence, + Cfg: &svc.Persistence, PersistenceMaxQPS: nil, PersistenceNamespaceMaxQPS: nil, EnablePriorityRateLimiting: nil, - ClusterName: persistenceClient.ClusterName(config.ClusterMetadata.CurrentClusterName), - MetricsHandler: nil, + ClusterName: persistenceClient.ClusterName(svc.ClusterMetadata.CurrentClusterName), + MetricsHandler: metricsHandler, Logger: logger, }) defer factory.Close() clusterMetadataManager, err := factory.NewClusterMetadataManager() if err != nil { - return config.ClusterMetadata, config.Persistence, fmt.Errorf("error initializing cluster metadata manager: %w", err) + return svc.ClusterMetadata, svc.Persistence, fmt.Errorf("error initializing cluster metadata manager: %w", err) } defer clusterMetadataManager.Close() - var sqlIndexNames []string initialIndexSearchAttributes := make(map[string]*persistencespb.IndexSearchAttributes) - if ds := config.Persistence.GetVisibilityStoreConfig(); ds.SQL != nil { - indexName := ds.GetIndexName() - sqlIndexNames = append(sqlIndexNames, indexName) - initialIndexSearchAttributes[indexName] = searchattribute.GetSqlDbIndexSearchAttributes() - } - if ds := config.Persistence.GetSecondaryVisibilityStoreConfig(); ds.SQL != nil { - indexName := ds.GetIndexName() - sqlIndexNames = append(sqlIndexNames, indexName) - initialIndexSearchAttributes[indexName] = searchattribute.GetSqlDbIndexSearchAttributes() - } - - clusterData := config.ClusterMetadata - for clusterName, clusterInfo := range clusterData.ClusterInformation { - if clusterName != clusterData.CurrentClusterName { - logger.Warn( - "ClusterInformation in ClusterMetadata config is deprecated. "+ - "Please use TCTL admin tool to configure remote cluster connections", - tag.Key("clusterInformation"), - tag.ClusterName(clusterName), - tag.IgnoredValue(clusterInfo)) - - // Only configure current cluster metadata from static config file - continue - } - - var clusterId string - if uuid.Parse(clusterInfo.ClusterID) == nil { - if clusterInfo.ClusterID != "" { - logger.Warn("Cluster Id in Cluster Metadata config is not a valid uuid. Generating a new Cluster Id") - } - clusterId = uuid.New() - } else { - clusterId = clusterInfo.ClusterID - } - - applied, err := clusterMetadataManager.SaveClusterMetadata( - ctx, - &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: persistencespb.ClusterMetadata{ - HistoryShardCount: config.Persistence.NumHistoryShards, - ClusterName: clusterName, - ClusterId: clusterId, - ClusterAddress: clusterInfo.RPCAddress, - FailoverVersionIncrement: clusterData.FailoverVersionIncrement, - InitialFailoverVersion: clusterInfo.InitialFailoverVersion, - IsGlobalNamespaceEnabled: clusterData.EnableGlobalNamespace, - IsConnectionEnabled: clusterInfo.Enabled, - UseClusterIdMembership: true, // Enable this for new cluster after 1.19. This is to prevent two clusters join into one ring. - IndexSearchAttributes: initialIndexSearchAttributes, - }, - }) - if err != nil { - logger.Warn("Failed to save cluster metadata.", tag.Error(err), tag.ClusterName(clusterName)) - } - if applied { - logger.Info("Successfully saved cluster metadata.", tag.ClusterName(clusterName)) - continue - } + if ds := svc.Persistence.GetVisibilityStoreConfig(); ds.SQL != nil { + initialIndexSearchAttributes[ds.GetIndexName()] = searchattribute.GetSqlDbIndexSearchAttributes() + } + if ds := svc.Persistence.GetSecondaryVisibilityStoreConfig(); ds.SQL != nil { + initialIndexSearchAttributes[ds.GetIndexName()] = searchattribute.GetSqlDbIndexSearchAttributes() + } - resp, err := clusterMetadataManager.GetClusterMetadata( + clusterMetadata := svc.ClusterMetadata + if len(clusterMetadata.ClusterInformation) > 1 { + logger.Warn( + "All remote cluster settings under ClusterMetadata.ClusterInformation config will be ignored. "+ + "Please use TCTL admin tool to configure remote cluster settings", + tag.Key("clusterInformation")) + } + if _, ok := clusterMetadata.ClusterInformation[clusterMetadata.CurrentClusterName]; !ok { + logger.Error("Current cluster setting is missing under clusterMetadata.ClusterInformation", + tag.ClusterName(clusterMetadata.CurrentClusterName)) + return svc.ClusterMetadata, svc.Persistence, missingCurrentClusterMetadataErr + } + ctx = headers.SetCallerInfo(ctx, headers.SystemBackgroundCallerInfo) + resp, err := clusterMetadataManager.GetClusterMetadata( + ctx, + &persistence.GetClusterMetadataRequest{ClusterName: clusterMetadata.CurrentClusterName}, + ) + switch err.(type) { + case nil: + // Update current record + if updateErr := updateCurrentClusterMetadataRecord( ctx, - &persistence.GetClusterMetadataRequest{ClusterName: clusterName}, + clusterMetadataManager, + svc, + initialIndexSearchAttributes, + resp, + ); updateErr != nil { + return svc.ClusterMetadata, svc.Persistence, updateErr + } + // Ignore invalid cluster metadata + overwriteCurrentClusterMetadataWithDBRecord( + svc, + resp, + logger, ) - if err != nil { - return config.ClusterMetadata, config.Persistence, fmt.Errorf("error while fetching cluster metadata: %w", err) - } - currentMetadata := resp.ClusterMetadata - - // TODO (rodrigozhou): Remove this block for v1.21. - // Handle registering custom search attributes when upgrading to v1.20. - if len(sqlIndexNames) > 0 { - needSave := false - if currentMetadata.IndexSearchAttributes == nil { - currentMetadata.IndexSearchAttributes = initialIndexSearchAttributes - needSave = true - } else { - for _, indexName := range sqlIndexNames { - if _, ok := currentMetadata.IndexSearchAttributes[indexName]; !ok { - currentMetadata.IndexSearchAttributes[indexName] = searchattribute.GetSqlDbIndexSearchAttributes() - needSave = true - } - } - } - - if needSave { - _, err := clusterMetadataManager.SaveClusterMetadata( - ctx, - &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: currentMetadata, - Version: resp.Version, - }, - ) - if err != nil { - logger.Warn( - "Failed to register search attributes.", - tag.Error(err), - tag.ClusterName(clusterName), - ) - } - logger.Info("Successfully registered search attributes.", tag.ClusterName(clusterName)) - - // need to re-fetch cluster metadata since it might need to be updated again below - resp, err = clusterMetadataManager.GetClusterMetadata( - ctx, - &persistence.GetClusterMetadataRequest{ClusterName: clusterName}, - ) - if err != nil { - return config.ClusterMetadata, config.Persistence, fmt.Errorf("error while fetching cluster metadata: %w", err) - } - currentMetadata = resp.ClusterMetadata - } - } - - // Allow updating cluster metadata if global namespace is disabled - if !resp.IsGlobalNamespaceEnabled && clusterData.EnableGlobalNamespace { - currentMetadata.IsGlobalNamespaceEnabled = clusterData.EnableGlobalNamespace - currentMetadata.InitialFailoverVersion = clusterInfo.InitialFailoverVersion - currentMetadata.FailoverVersionIncrement = clusterData.FailoverVersionIncrement - - applied, err := clusterMetadataManager.SaveClusterMetadata( - ctx, - &persistence.SaveClusterMetadataRequest{ - ClusterMetadata: currentMetadata, - Version: resp.Version, - }) - if !applied || err != nil { - return config.ClusterMetadata, config.Persistence, fmt.Errorf("error while updating cluster metadata: %w", err) - } - } else if resp.IsGlobalNamespaceEnabled != clusterData.EnableGlobalNamespace { - logger.Warn( - mismatchLogMessage, - tag.Key("clusterMetadata.EnableGlobalNamespace"), - tag.IgnoredValue(clusterData.EnableGlobalNamespace), - tag.Value(resp.IsGlobalNamespaceEnabled)) - config.ClusterMetadata.EnableGlobalNamespace = resp.IsGlobalNamespaceEnabled - } - - // Verify current cluster metadata - persistedShardCount := resp.HistoryShardCount - if config.Persistence.NumHistoryShards != persistedShardCount { - logger.Warn( - mismatchLogMessage, - tag.Key("persistence.numHistoryShards"), - tag.IgnoredValue(config.Persistence.NumHistoryShards), - tag.Value(persistedShardCount)) - config.Persistence.NumHistoryShards = persistedShardCount - } - if resp.FailoverVersionIncrement != clusterData.FailoverVersionIncrement { - logger.Warn( - mismatchLogMessage, - tag.Key("clusterMetadata.FailoverVersionIncrement"), - tag.IgnoredValue(clusterData.FailoverVersionIncrement), - tag.Value(resp.FailoverVersionIncrement)) - config.ClusterMetadata.FailoverVersionIncrement = resp.FailoverVersionIncrement + case *serviceerror.NotFound: + // Initialize current cluster record + if initErr := initCurrentClusterMetadataRecord( + ctx, + clusterMetadataManager, + svc, + initialIndexSearchAttributes, + logger, + ); initErr != nil { + return svc.ClusterMetadata, svc.Persistence, initErr } + default: + return svc.ClusterMetadata, svc.Persistence, fmt.Errorf("error while fetching cluster metadata: %w", err) } - err = loadClusterInformationFromStore(ctx, config, clusterMetadataManager, logger) + + err = loadClusterInformationFromStore(ctx, svc, clusterMetadataManager, logger) if err != nil { - return config.ClusterMetadata, config.Persistence, fmt.Errorf("error while loading metadata from cluster: %w", err) + return svc.ClusterMetadata, svc.Persistence, fmt.Errorf("error while loading metadata from cluster: %w", err) } - return config.ClusterMetadata, config.Persistence, nil -} - -func PersistenceFactoryProvider() persistenceClient.FactoryProviderFn { - return persistenceClient.FactoryProvider + return svc.ClusterMetadata, svc.Persistence, nil } // TODO: move this to cluster.fx -func loadClusterInformationFromStore(ctx context.Context, config *config.Config, clusterMsg persistence.ClusterMetadataManager, logger log.Logger) error { +func loadClusterInformationFromStore(ctx context.Context, svc *config.Config, clusterMsg persistence.ClusterMetadataManager, logger log.Logger) error { iter := collection.NewPagingIterator(func(paginationToken []byte) ([]interface{}, []byte, error) { request := &persistence.ListClusterMetadataRequest{ PageSize: 100, @@ -819,19 +710,20 @@ metadata := item.(*persistence.GetClusterMetadataResponse) shardCount := metadata.HistoryShardCount if shardCount == 0 { - // This is to add backward compatibility to the config based cluster connection. - shardCount = config.Persistence.NumHistoryShards + // This is to add backward compatibility to the svc based cluster connection. + shardCount = svc.Persistence.NumHistoryShards } newMetadata := cluster.ClusterInformation{ Enabled: metadata.IsConnectionEnabled, InitialFailoverVersion: metadata.InitialFailoverVersion, RPCAddress: metadata.ClusterAddress, ShardCount: shardCount, + Tags: metadata.Tags, } - if staticClusterMetadata, ok := config.ClusterMetadata.ClusterInformation[metadata.ClusterName]; ok { - if metadata.ClusterName != config.ClusterMetadata.CurrentClusterName { + if staticClusterMetadata, ok := svc.ClusterMetadata.ClusterInformation[metadata.ClusterName]; ok { + if metadata.ClusterName != svc.ClusterMetadata.CurrentClusterName { logger.Warn( - "ClusterInformation in ClusterMetadata config is deprecated. Please use TCTL tool to configure remote cluster connections", + "ClusterInformation in ClusterMetadata svc is deprecated. Please use TCTL tool to configure remote cluster connections", tag.Key("clusterInformation"), tag.IgnoredValue(staticClusterMetadata), tag.Value(newMetadata)) @@ -840,11 +732,152 @@ logger.Info(fmt.Sprintf("Use rpc address %v for cluster %v.", newMetadata.RPCAddress, metadata.ClusterName)) } } - config.ClusterMetadata.ClusterInformation[metadata.ClusterName] = newMetadata + svc.ClusterMetadata.ClusterInformation[metadata.ClusterName] = newMetadata } return nil } +func initCurrentClusterMetadataRecord( + ctx context.Context, + clusterMetadataManager persistence.ClusterMetadataManager, + svc *config.Config, + initialIndexSearchAttributes map[string]*persistencespb.IndexSearchAttributes, + logger log.Logger, +) error { + var clusterId string + currentClusterName := svc.ClusterMetadata.CurrentClusterName + currentClusterInfo := svc.ClusterMetadata.ClusterInformation[currentClusterName] + if uuid.Parse(currentClusterInfo.ClusterID) == nil { + if currentClusterInfo.ClusterID != "" { + logger.Warn("Cluster Id in Cluster Metadata config is not a valid uuid. Generating a new Cluster Id") + } + clusterId = uuid.New() + } else { + clusterId = currentClusterInfo.ClusterID + } + + applied, err := clusterMetadataManager.SaveClusterMetadata( + ctx, + &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: persistencespb.ClusterMetadata{ + HistoryShardCount: svc.Persistence.NumHistoryShards, + ClusterName: currentClusterName, + ClusterId: clusterId, + ClusterAddress: currentClusterInfo.RPCAddress, + FailoverVersionIncrement: svc.ClusterMetadata.FailoverVersionIncrement, + InitialFailoverVersion: currentClusterInfo.InitialFailoverVersion, + IsGlobalNamespaceEnabled: svc.ClusterMetadata.EnableGlobalNamespace, + IsConnectionEnabled: currentClusterInfo.Enabled, + UseClusterIdMembership: true, // Enable this for new cluster after 1.19. This is to prevent two clusters join into one ring. + IndexSearchAttributes: initialIndexSearchAttributes, + Tags: svc.ClusterMetadata.Tags, + }, + }) + if err != nil { + logger.Warn("Failed to save cluster metadata.", tag.Error(err), tag.ClusterName(currentClusterName)) + return err + } + if !applied { + logger.Error("Failed to apple cluster metadata.", tag.ClusterName(currentClusterName)) + return clusterMetadataInitErr + } + return nil +} + +func updateCurrentClusterMetadataRecord( + ctx context.Context, + clusterMetadataManager persistence.ClusterMetadataManager, + svc *config.Config, + initialIndexSearchAttributes map[string]*persistencespb.IndexSearchAttributes, + currentClusterDBRecord *persistence.GetClusterMetadataResponse, +) error { + updateDBRecord := false + currentClusterMetadata := svc.ClusterMetadata + currentClusterName := currentClusterMetadata.CurrentClusterName + currentCLusterInfo := currentClusterMetadata.ClusterInformation[currentClusterName] + // Allow updating cluster metadata if global namespace is disabled + if !currentClusterDBRecord.IsGlobalNamespaceEnabled && currentClusterMetadata.EnableGlobalNamespace { + currentClusterDBRecord.IsGlobalNamespaceEnabled = currentClusterMetadata.EnableGlobalNamespace + currentClusterDBRecord.InitialFailoverVersion = currentCLusterInfo.InitialFailoverVersion + currentClusterDBRecord.FailoverVersionIncrement = currentClusterMetadata.FailoverVersionIncrement + updateDBRecord = true + } + if currentClusterDBRecord.ClusterAddress != currentCLusterInfo.RPCAddress { + currentClusterDBRecord.ClusterAddress = currentCLusterInfo.RPCAddress + updateDBRecord = true + } + if !maps.Equal(currentClusterDBRecord.Tags, svc.ClusterMetadata.Tags) { + currentClusterDBRecord.Tags = svc.ClusterMetadata.Tags + updateDBRecord = true + } + + if len(initialIndexSearchAttributes) > 0 { + if currentClusterDBRecord.IndexSearchAttributes == nil { + currentClusterDBRecord.IndexSearchAttributes = initialIndexSearchAttributes + updateDBRecord = true + } else { + for indexName, initialValue := range initialIndexSearchAttributes { + if _, ok := currentClusterDBRecord.IndexSearchAttributes[indexName]; !ok { + currentClusterDBRecord.IndexSearchAttributes[indexName] = initialValue + updateDBRecord = true + } + } + } + } + + if !updateDBRecord { + return nil + } + + applied, err := clusterMetadataManager.SaveClusterMetadata( + ctx, + &persistence.SaveClusterMetadataRequest{ + ClusterMetadata: currentClusterDBRecord.ClusterMetadata, + Version: currentClusterDBRecord.Version, + }) + if !applied || err != nil { + return fmt.Errorf("error while updating cluster metadata: %w", err) + } + return nil +} + +func overwriteCurrentClusterMetadataWithDBRecord( + svc *config.Config, + currentClusterDBRecord *persistence.GetClusterMetadataResponse, + logger log.Logger, +) { + clusterMetadata := svc.ClusterMetadata + if currentClusterDBRecord.IsGlobalNamespaceEnabled && !clusterMetadata.EnableGlobalNamespace { + logger.Warn( + mismatchLogMessage, + tag.Key("clusterMetadata.EnableGlobalNamespace"), + tag.IgnoredValue(clusterMetadata.EnableGlobalNamespace), + tag.Value(currentClusterDBRecord.IsGlobalNamespaceEnabled)) + svc.ClusterMetadata.EnableGlobalNamespace = currentClusterDBRecord.IsGlobalNamespaceEnabled + } + persistedShardCount := currentClusterDBRecord.HistoryShardCount + if svc.Persistence.NumHistoryShards != persistedShardCount { + logger.Warn( + mismatchLogMessage, + tag.Key("persistence.numHistoryShards"), + tag.IgnoredValue(svc.Persistence.NumHistoryShards), + tag.Value(persistedShardCount)) + svc.Persistence.NumHistoryShards = persistedShardCount + } + if currentClusterDBRecord.FailoverVersionIncrement != clusterMetadata.FailoverVersionIncrement { + logger.Warn( + mismatchLogMessage, + tag.Key("clusterMetadata.FailoverVersionIncrement"), + tag.IgnoredValue(clusterMetadata.FailoverVersionIncrement), + tag.Value(currentClusterDBRecord.FailoverVersionIncrement)) + svc.ClusterMetadata.FailoverVersionIncrement = currentClusterDBRecord.FailoverVersionIncrement + } +} + +func PersistenceFactoryProvider() persistenceClient.FactoryProviderFn { + return persistenceClient.FactoryProvider +} + func ServerLifetimeHooks( lc fx.Lifecycle, svr *ServerImpl, @@ -1084,6 +1117,16 @@ tag.NewStringTag("module", e.ModuleName), tag.Error(e.Err)) } + case *fxevent.Run: + if e.Err != nil { + l.logger.Error("error returned", + tag.ComponentFX, + tag.NewStringTag("name", e.Name), + tag.NewStringTag("kind", e.Kind), + tag.NewStringTag("module", e.ModuleName), + tag.Error(e.Err), + ) + } case *fxevent.Invoking: // Do not log stack as it will make logs hard to read. l.logger.Debug("invoking", diff -Nru temporal-1.21.5-1/src/temporal/fx_test.go temporal-1.22.5/src/temporal/fx_test.go --- temporal-1.21.5-1/src/temporal/fx_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/temporal/fx_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,125 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package temporal + +import ( + "context" + "path" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/tests/testutils" +) + +func TestInitCurrentClusterMetadataRecord(t *testing.T) { + configDir := path.Join(testutils.GetRepoRootDirectory(), "config") + cfg, err := config.LoadConfig("development-cass", configDir, "") + require.NoError(t, err) + controller := gomock.NewController(t) + + mockClusterMetadataManager := persistence.NewMockClusterMetadataManager(controller) + mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.SaveClusterMetadataRequest) (bool, error) { + require.Equal(t, cfg.ClusterMetadata.EnableGlobalNamespace, request.IsGlobalNamespaceEnabled) + require.Equal(t, cfg.ClusterMetadata.CurrentClusterName, request.ClusterName) + require.Equal(t, cfg.ClusterMetadata.ClusterInformation[cfg.ClusterMetadata.CurrentClusterName].RPCAddress, request.ClusterAddress) + require.Equal(t, cfg.ClusterMetadata.ClusterInformation[cfg.ClusterMetadata.CurrentClusterName].InitialFailoverVersion, request.InitialFailoverVersion) + require.Equal(t, cfg.Persistence.NumHistoryShards, request.HistoryShardCount) + require.Equal(t, cfg.ClusterMetadata.FailoverVersionIncrement, request.FailoverVersionIncrement) + require.Equal(t, int64(0), request.Version) + return true, nil + }, + ) + err = initCurrentClusterMetadataRecord( + context.TODO(), + mockClusterMetadataManager, + cfg, + nil, + log.NewNoopLogger(), + ) + require.NoError(t, err) +} + +func TestUpdateCurrentClusterMetadataRecord(t *testing.T) { + configDir := path.Join(testutils.GetRepoRootDirectory(), "config") + cfg, err := config.LoadConfig("development-cluster-a", configDir, "") + require.NoError(t, err) + controller := gomock.NewController(t) + + mockClusterMetadataManager := persistence.NewMockClusterMetadataManager(controller) + mockClusterMetadataManager.EXPECT().SaveClusterMetadata(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *persistence.SaveClusterMetadataRequest) (bool, error) { + require.Equal(t, cfg.ClusterMetadata.EnableGlobalNamespace, request.IsGlobalNamespaceEnabled) + require.Equal(t, "", request.ClusterName) + require.Equal(t, cfg.ClusterMetadata.ClusterInformation[cfg.ClusterMetadata.CurrentClusterName].RPCAddress, request.ClusterAddress) + require.Equal(t, cfg.ClusterMetadata.ClusterInformation[cfg.ClusterMetadata.CurrentClusterName].InitialFailoverVersion, request.InitialFailoverVersion) + require.Equal(t, int32(0), request.HistoryShardCount) + require.Equal(t, cfg.ClusterMetadata.FailoverVersionIncrement, request.FailoverVersionIncrement) + require.Equal(t, int64(1), request.Version) + return true, nil + }, + ) + updateRecord := &persistence.GetClusterMetadataResponse{ + ClusterMetadata: persistencespb.ClusterMetadata{}, + Version: 1, + } + err = updateCurrentClusterMetadataRecord( + context.TODO(), + mockClusterMetadataManager, + cfg, + nil, + updateRecord, + ) + require.NoError(t, err) +} + +func TestOverwriteCurrentClusterMetadataWithDBRecord(t *testing.T) { + configDir := path.Join(testutils.GetRepoRootDirectory(), "config") + cfg, err := config.LoadConfig("development-cass", configDir, "") + require.NoError(t, err) + + dbRecord := &persistence.GetClusterMetadataResponse{ + ClusterMetadata: persistencespb.ClusterMetadata{ + HistoryShardCount: 1024, + FailoverVersionIncrement: 10000, + IsGlobalNamespaceEnabled: true, + }, + Version: 1, + } + overwriteCurrentClusterMetadataWithDBRecord( + cfg, + dbRecord, + log.NewNoopLogger(), + ) + require.Equal(t, int64(10000), cfg.ClusterMetadata.FailoverVersionIncrement) + require.True(t, cfg.ClusterMetadata.EnableGlobalNamespace) + require.Equal(t, int32(1024), cfg.Persistence.NumHistoryShards) +} diff -Nru temporal-1.21.5-1/src/temporal/server_impl.go temporal-1.22.5/src/temporal/server_impl.go --- temporal-1.21.5-1/src/temporal/server_impl.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/temporal/server_impl.go 2024-02-23 09:45:43.000000000 +0000 @@ -33,9 +33,12 @@ "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/config" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" persistenceClient "go.temporal.io/server/common/persistence/client" + "go.temporal.io/server/common/primitives" "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/resource" "go.temporal.io/server/common/util" @@ -53,6 +56,7 @@ persistenceConfig config.Persistence clusterMetadata *cluster.Config persistenceFactoryProvider persistenceClient.FactoryProviderFn + metricsHandler metrics.Handler } ) @@ -66,6 +70,7 @@ persistenceConfig config.Persistence, clusterMetadata *cluster.Config, persistenceFactoryProvider persistenceClient.FactoryProviderFn, + metricsHandler metrics.Handler, ) *ServerImpl { s := &ServerImpl{ so: opts, @@ -75,6 +80,7 @@ persistenceConfig: persistenceConfig, clusterMetadata: clusterMetadata, persistenceFactoryProvider: persistenceFactoryProvider, + metricsHandler: metricsHandler, } for _, svcMeta := range servicesGroup.Services { if svcMeta != nil { @@ -96,6 +102,7 @@ s.persistenceFactoryProvider, s.logger, s.so.customDataStoreFactory, + s.metricsHandler, ); err != nil { return fmt.Errorf("unable to initialize system namespace: %w", err) } @@ -165,15 +172,17 @@ persistenceFactoryProvider persistenceClient.FactoryProviderFn, logger log.Logger, customDataStoreFactory persistenceClient.AbstractDataStoreFactory, + metricsHandler metrics.Handler, ) error { clusterName := persistenceClient.ClusterName(currentClusterName) + metricsHandler = metricsHandler.WithTags(metrics.ServiceNameTag(primitives.ServerService)) dataStoreFactory, _ := persistenceClient.DataStoreFactoryProvider( clusterName, persistenceServiceResolver, cfg, customDataStoreFactory, logger, - nil, + metricsHandler, ) factory := persistenceFactoryProvider(persistenceClient.NewFactoryParams{ DataStoreFactory: dataStoreFactory, @@ -182,7 +191,7 @@ PersistenceNamespaceMaxQPS: nil, EnablePriorityRateLimiting: nil, ClusterName: persistenceClient.ClusterName(currentClusterName), - MetricsHandler: nil, + MetricsHandler: metricsHandler, Logger: logger, }) defer factory.Close() @@ -192,6 +201,7 @@ return fmt.Errorf("unable to initialize metadata manager: %w", err) } defer metadataManager.Close() + ctx = headers.SetCallerInfo(ctx, headers.SystemBackgroundCallerInfo) if err = metadataManager.InitializeSystemNamespaces(ctx, currentClusterName); err != nil { return fmt.Errorf("unable to register system namespace: %w", err) } diff -Nru temporal-1.21.5-1/src/temporal/server_test.go temporal-1.22.5/src/temporal/server_test.go --- temporal-1.21.5-1/src/temporal/server_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/temporal/server_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -22,31 +22,167 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package temporal +package temporal_test import ( + "fmt" "path" + "strings" + "sync/atomic" "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "time" "go.temporal.io/server/common/config" - // need to import this package to register the sqlite plugin - _ "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + _ "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" // needed to register the sqlite plugin + "go.temporal.io/server/temporal" "go.temporal.io/server/tests/testutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -// TestNewServer verifies that NewServer doesn't cause any fx errors +// TestNewServer verifies that NewServer doesn't cause any fx errors, and that there are no unexpected error logs after +// running for a few seconds. func TestNewServer(t *testing.T) { + t.Parallel() + + cfg := loadConfig(t) + logDetector := newErrorLogDetector(t) + logDetector.Start() + + server, err := temporal.NewServer( + temporal.ForServices(temporal.DefaultServices), + temporal.WithConfig(cfg), + temporal.WithLogger(logDetector), + ) + require.NoError(t, err) + t.Cleanup(func() { + logDetector.Stop() + assert.NoError(t, server.Stop()) + }) + require.NoError(t, server.Start()) + time.Sleep(10 * time.Second) +} + +func loadConfig(t *testing.T) *config.Config { + cfg := loadSQLiteConfig(t) + setTestPorts(cfg) + + return cfg +} + +// loadSQLiteConfig loads the config for the sqlite persistence store. We use sqlite because it doesn't require any +// external dependencies, so it's easy to run this test in isolation. +func loadSQLiteConfig(t *testing.T) *config.Config { configDir := path.Join(testutils.GetRepoRootDirectory(), "config") cfg, err := config.LoadConfig("development-sqlite", configDir, "") require.NoError(t, err) + cfg.DynamicConfigClient.Filepath = path.Join(configDir, "dynamicconfig", "development-sql.yaml") - _, err = NewServer( - ForServices(DefaultServices), - WithConfig(cfg), - ) - assert.NoError(t, err) - // TODO: add tests for Server.Run(), etc. + + return cfg +} + +// setTestPorts sets the ports of all services to something different from the default ports, so that we can run the +// tests in parallel. +func setTestPorts(cfg *config.Config) { + port := 10000 + + for k, v := range cfg.Services { + rpc := v.RPC + rpc.GRPCPort = port + port++ + rpc.MembershipPort = port + port++ + v.RPC = rpc + cfg.Services[k] = v + } +} + +type errorLogDetector struct { + t testing.TB + on atomic.Bool + log.Logger +} + +func (d *errorLogDetector) Start() { + d.on.Store(true) +} + +func (d *errorLogDetector) Stop() { + d.on.Store(false) +} + +func (d *errorLogDetector) Warn(msg string, tags ...tag.Tag) { + d.Logger.Warn(msg, tags...) + + if !d.on.Load() { + return + } + + if strings.Contains(msg, "error creating sdk client") { + return + } + + d.t.Errorf("unexpected warning log: %s", msg) +} + +func (d *errorLogDetector) Error(msg string, tags ...tag.Tag) { + d.Logger.Error(msg, tags...) + + if !d.on.Load() { + return + } + + if strings.Contains(msg, "Unable to process new range") { + return + } + + d.t.Errorf("unexpected error log: %s", msg) +} + +// newErrorLogDetector returns a logger that fails the test if it logs any errors or warnings, except for the ones that +// are expected. Ideally, there are no "expected" errors or warnings, but we still want this test to avoid introducing +// any new ones while we are working on removing the existing ones. +func newErrorLogDetector(t testing.TB) *errorLogDetector { + return &errorLogDetector{ + t: t, + Logger: log.NewCLILogger(), + } +} + +type fakeTest struct { + testing.TB + errorfMsgs []string +} + +func (f *fakeTest) Errorf(msg string, args ...any) { + f.errorfMsgs = append(f.errorfMsgs, fmt.Sprintf(msg, args...)) +} + +func TestErrorLogDetector(t *testing.T) { + t.Parallel() + + f := &fakeTest{TB: t} + d := newErrorLogDetector(f) + d.Start() + d.Warn("error creating sdk client") + d.Error("Unable to process new range") + d.Error("unexpected error") + d.Warn("unexpected warning") + + assert.Equal(t, []string{ + "unexpected error log: unexpected error", + "unexpected warning log: unexpected warning", + }, f.errorfMsgs, "should fail the test if there are any unexpected errors or warnings") + + d.Stop() + + f.errorfMsgs = nil + + d.Error("unexpected error") + d.Warn("unexpected warning") + assert.Empty(t, f.errorfMsgs, "should not fail the test if the detector is stopped") } diff -Nru temporal-1.21.5-1/src/tests/activity_test.go temporal-1.22.5/src/tests/activity_test.go --- temporal-1.21.5-1/src/tests/activity_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/activity_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -52,7 +52,6 @@ "go.temporal.io/server/common/payload" "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/matching" ) func (s *integrationSuite) TestActivityHeartBeatWorkflow_Success() { @@ -156,16 +155,16 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) - s.True(err == nil || err == matching.ErrNoTasks) + _, err := poller.PollAndProcessWorkflowTask() + s.True(err == nil || err == errNoTasks) err = poller.PollAndProcessActivityTask(false) - s.True(err == nil || err == matching.ErrNoTasks) + s.True(err == nil || err == errNoTasks) s.Logger.Info("Waiting for workflow to complete", tag.WorkflowRunID(we.RunId)) s.False(workflowComplete) - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) s.Equal(1, activityExecutedCount) @@ -339,11 +338,11 @@ }) } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) err = poller.PollAndProcessActivityTask(false) - s.True(err == nil || err == matching.ErrNoTasks, err) + s.True(err == nil || err == errNoTasks, err) descResp, err := describeWorkflowExecution() s.NoError(err) @@ -358,7 +357,7 @@ } err = poller2.PollAndProcessActivityTask(false) - s.True(err == nil || err == matching.ErrNoTasks, err) + s.True(err == nil || err == errNoTasks, err) descResp, err = describeWorkflowExecution() s.NoError(err) @@ -377,7 +376,7 @@ s.False(workflowComplete) s.Logger.Info("Processing workflow task:", tag.Counter(i)) - _, err := poller.PollAndProcessWorkflowTaskWithoutRetry(false, false) + _, err := poller.PollAndProcessWorkflowTask(WithRetries(1)) if err != nil { s.printWorkflowHistory(s.namespace, &commonpb.WorkflowExecution{ WorkflowId: id, @@ -487,7 +486,7 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) for i := 0; i <= activityExecutedLimit; i++ { @@ -495,7 +494,7 @@ s.NoError(err) } - _, err = poller.PollAndProcessWorkflowTaskWithoutRetry(false, false) + _, err = poller.PollAndProcessWorkflowTask(WithRetries(1)) s.NoError(err) s.True(workflowComplete) } @@ -588,8 +587,8 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) - s.True(err == nil || err == matching.ErrNoTasks) + _, err := poller.PollAndProcessWorkflowTask() + s.True(err == nil || err == errNoTasks) err = poller.PollAndProcessActivityTask(false) // Not s.ErrorIs() because error goes through RPC. @@ -599,7 +598,7 @@ s.Logger.Info("Waiting for workflow to complete", tag.WorkflowRunID(we.RunId)) s.False(workflowComplete) - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) } @@ -713,8 +712,8 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) - s.True(err == nil || err == matching.ErrNoTasks, err) + _, err := poller.PollAndProcessWorkflowTask() + s.True(err == nil || err == errNoTasks, err) cancelCh := make(chan struct{}) go func() { @@ -734,14 +733,14 @@ scheduleActivity = false requestCancellation = true - _, err2 := poller.PollAndProcessWorkflowTask(false, false) + _, err2 := poller.PollAndProcessWorkflowTask() s.NoError(err2) close(cancelCh) }() s.Logger.Info("Start activity.") err = poller.PollAndProcessActivityTask(false) - s.True(err == nil || err == matching.ErrNoTasks, err) + s.True(err == nil || err == errNoTasks, err) s.Logger.Info("Waiting for cancel to complete.", tag.WorkflowRunID(we.RunId)) <-cancelCh @@ -842,8 +841,8 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) - s.True(err == nil || err == matching.ErrNoTasks) + _, err := poller.PollAndProcessWorkflowTask() + s.True(err == nil || err == errNoTasks) // Send signal so that worker can send an activity cancel signalName := "my signal" @@ -863,13 +862,13 @@ // Process signal in workflow and send request cancellation scheduleActivity = false requestCancellation = true - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) scheduleActivity = false requestCancellation = false - _, err = poller.PollAndProcessWorkflowTask(false, false) - s.True(err == nil || err == matching.ErrNoTasks) + _, err = poller.PollAndProcessWorkflowTask() + s.True(err == nil || err == errNoTasks) } func (s *clientIntegrationSuite) TestActivityHeartbeatDetailsDuringRetry() { diff -Nru temporal-1.21.5-1/src/tests/advanced_visibility_test.go temporal-1.22.5/src/tests/advanced_visibility_test.go --- temporal-1.21.5-1/src/tests/advanced_visibility_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/advanced_visibility_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -305,16 +305,14 @@ Logger: s.Logger, T: s.T(), } - _, newTask, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask := res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) @@ -1165,6 +1163,87 @@ s.Equal(int64(0), resp.GetCount()) } +func (s *advancedVisibilitySuite) TestCountGroupByWorkflow() { + id := "es-integration-count-groupby-workflow-test" + wt := "es-integration-count-groupby-workflow-test-type" + tl := "es-integration-count-groupby-workflow-test-taskqueue" + + numWorkflows := 10 + numClosedWorkflows := 4 + for i := 0; i < numWorkflows; i++ { + wfid := id + strconv.Itoa(i) + request := s.createStartWorkflowExecutionRequest(wfid, wt, tl) + we, err := s.engine.StartWorkflowExecution(NewContext(), request) + s.NoError(err) + if i < numClosedWorkflows { + _, err := s.engine.TerminateWorkflowExecution( + NewContext(), + &workflowservice.TerminateWorkflowExecutionRequest{ + Namespace: s.namespace, + WorkflowExecution: &commonpb.WorkflowExecution{ + WorkflowId: wfid, + RunId: we.RunId, + }, + }, + ) + s.NoError(err) + } + } + + query := `GROUP BY ExecutionStatus` + countRequest := &workflowservice.CountWorkflowExecutionsRequest{ + Namespace: s.namespace, + Query: query, + } + var resp *workflowservice.CountWorkflowExecutionsResponse + var err error + for i := 0; i < numOfRetry; i++ { + resp, err = s.engine.CountWorkflowExecutions(NewContext(), countRequest) + s.NoError(err) + if resp.GetCount() == int64(numWorkflows) { + break + } + time.Sleep(waitTimeInMs * time.Millisecond) + } + s.Equal(int64(numWorkflows), resp.GetCount()) + s.Equal(2, len(resp.Groups)) + + runningStatusPayload, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + terminatedStatusPayload, _ := searchattribute.EncodeValue( + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED.String(), + enumspb.INDEXED_VALUE_TYPE_KEYWORD, + ) + s.Equal( + &workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + GroupValues: []*commonpb.Payload{runningStatusPayload}, + Count: int64(numWorkflows - numClosedWorkflows), + }, + resp.Groups[0], + ) + s.Equal( + &workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{ + GroupValues: []*commonpb.Payload{terminatedStatusPayload}, + Count: int64(numClosedWorkflows), + }, + resp.Groups[1], + ) + + query = `GROUP BY WorkflowType` + countRequest.Query = query + _, err = s.engine.CountWorkflowExecutions(NewContext(), countRequest) + s.Error(err) + s.Contains(err.Error(), "'group by' clause is only supported for ExecutionStatus search attribute") + + query = `GROUP BY ExecutionStatus, WorkflowType` + countRequest.Query = query + _, err = s.engine.CountWorkflowExecutions(NewContext(), countRequest) + s.Error(err) + s.Contains(err.Error(), "'group by' clause supports only a single field") +} + func (s *advancedVisibilitySuite) createStartWorkflowExecutionRequest(id, wt, tl string) *workflowservice.StartWorkflowExecutionRequest { identity := "worker1" workflowType := &commonpb.WorkflowType{Name: wt} @@ -1283,16 +1362,14 @@ } // process 1st workflow task and assert workflow task is handled correctly. - _, newTask, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask := res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) s.Equal(int64(3), newTask.WorkflowTask.GetPreviousStartedEventId()) @@ -1333,16 +1410,14 @@ s.True(verified) // process 2nd workflow task and assert workflow task is handled correctly. - _, newTask, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err = poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask = res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) s.Equal(4, len(newTask.WorkflowTask.History.Events)) @@ -1357,16 +1432,14 @@ s.testListResultForUpsertSearchAttributes(listRequest) // process 3rd workflow task and assert workflow task is handled correctly. - _, newTask, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err = poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask = res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) s.Equal(4, len(newTask.WorkflowTask.History.Events)) @@ -1450,16 +1523,14 @@ } // process close workflow task and assert search attributes is correct after workflow is closed - _, newTask, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err = poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask = res.NewTask s.NotNil(newTask) s.Nil(newTask.WorkflowTask) @@ -1586,16 +1657,14 @@ } // process 1st workflow task and assert workflow task is handled correctly. - _, newTask, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask := res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) s.Equal(int64(3), newTask.WorkflowTask.GetPreviousStartedEventId()) @@ -1636,16 +1705,14 @@ s.True(verified) // process 2nd workflow task and assert workflow task is handled correctly. - _, newTask, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err = poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask = res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) s.Equal(4, len(newTask.WorkflowTask.History.Events)) @@ -1684,16 +1751,14 @@ s.True(verified) // process close workflow task and assert workflow task is handled correctly. - _, newTask, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err = poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask = res.NewTask s.NotNil(newTask) s.Nil(newTask.WorkflowTask) @@ -1825,7 +1890,7 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) if s.isElasticsearchEnabled { @@ -2584,7 +2649,9 @@ run, err := s.sysSDKClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ ID: s.T().Name() + "-scavenger", TaskQueue: build_ids.BuildIdScavengerTaskQueueName, - }, build_ids.BuildIdScavangerWorkflowName) + }, build_ids.BuildIdScavangerWorkflowName, build_ids.BuildIdScavangerInput{ + IgnoreRetentionTime: true, + }) s.Require().NoError(err) err = run.Get(ctx, nil) s.Require().NoError(err) diff -Nru temporal-1.21.5-1/src/tests/archival_test.go temporal-1.22.5/src/tests/archival_test.go --- temporal-1.21.5-1/src/tests/archival_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/archival_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -82,30 +82,12 @@ func TestArchivalSuite(t *testing.T) { flag.Parse() - for _, c := range []struct { - Name string - DurableArchivalIsEnabled bool - }{ - { - Name: "DurableArchivalIsDisabled", - DurableArchivalIsEnabled: false, - }, - { - Name: "DurableArchivalIsEnabled", - DurableArchivalIsEnabled: true, - }, - } { - c := c - t.Run(c.Name, func(t *testing.T) { - s := new(archivalSuite) - s.dynamicConfigOverrides = map[dynamicconfig.Key]interface{}{ - dynamicconfig.RetentionTimerJitterDuration: time.Second, - dynamicconfig.ArchivalProcessorArchiveDelay: time.Duration(0), - dynamicconfig.DurableArchivalEnabled: c.DurableArchivalIsEnabled, - } - suite.Run(t, s) - }) + s := new(archivalSuite) + s.dynamicConfigOverrides = map[dynamicconfig.Key]interface{}{ + dynamicconfig.RetentionTimerJitterDuration: time.Second, + dynamicconfig.ArchivalProcessorArchiveDelay: time.Duration(0), } + suite.Run(t, s) } func (s *archivalSuite) TestArchival_TimerQueueProcessor() { @@ -441,7 +423,7 @@ } for run := 0; run < numRuns; run++ { for i := 0; i < numActivities; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) if i%2 == 0 { @@ -453,7 +435,7 @@ s.NoError(err) } - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) } diff -Nru temporal-1.21.5-1/src/tests/cancel_workflow_test.go temporal-1.22.5/src/tests/cancel_workflow_test.go --- temporal-1.21.5-1/src/tests/cancel_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/cancel_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -119,7 +119,7 @@ }) s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -257,19 +257,19 @@ } // Cancel the foreign workflow with this workflow task request. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(cancellationSent) // Finish execution - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Accept cancellation. - _, err = foreignPoller.PollAndProcessWorkflowTask(false, false) + _, err = foreignPoller.PollAndProcessWorkflowTask() s.Logger.Info("foreign PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -389,19 +389,19 @@ } // Complete target workflow - _, err := foreignPoller.PollAndProcessWorkflowTask(false, false) + _, err := foreignPoller.PollAndProcessWorkflowTask() s.Logger.Info("foreign PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Cancel the target workflow with this workflow task request. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(cancellationSent) // Finish execution - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -474,14 +474,14 @@ } // Cancel the target workflow with this workflow task request. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(cancellationSent) // Finish execution - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -622,7 +622,7 @@ } s.Logger.Info("Process first workflow task which starts and request cancels child workflow") - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) s.Equal("BadRequestCancelExternalWorkflowExecutionAttributes: Start and RequestCancel for child workflow is not allowed in same workflow task.", err.Error()) @@ -632,7 +632,7 @@ }) s.Logger.Info("Process second workflow task which observes child workflow is cancelled and completes") - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) s.printWorkflowHistory(s.namespace, &commonpb.WorkflowExecution{ diff -Nru temporal-1.21.5-1/src/tests/child_workflow_test.go temporal-1.22.5/src/tests/child_workflow_test.go --- temporal-1.21.5-1/src/tests/child_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/child_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -195,17 +195,17 @@ } // Make first workflow task to start child execution - _, err := pollerParent.PollAndProcessWorkflowTask(false, false) + _, err := pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(childExecutionStarted) // Process ChildExecution Started event and Process Child Execution and complete it - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(startedEvent) @@ -226,7 +226,7 @@ s.Equal(200*time.Second, timestamp.DurationValue(childStartedEvent.GetWorkflowExecutionStartedEventAttributes().GetWorkflowRunTimeout())) // Process ChildExecution completed event and complete parent execution - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(completedEvent) @@ -355,20 +355,20 @@ } // Make first workflow task to start child execution - _, err := pollerParent.PollAndProcessWorkflowTask(false, false) + _, err := pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(childExecutionStarted) // Process ChildExecution Started event - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(seenChildStarted) // Run through three executions of the child workflow for i := 0; i < 3; i++ { - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err), tag.Counter(i)) s.NoError(err) } @@ -383,7 +383,7 @@ s.Nil(terminateErr) // Process ChildExecution terminated event and complete parent execution - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(terminatedEvent) @@ -563,37 +563,37 @@ } // Make first workflow task to start child execution - _, err := pollerParent.PollAndProcessWorkflowTask(false, false) + _, err := pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(childExecutionStarted) // Process ChildExecution Started event - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(startedEvent) // Process Child Execution #1 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.False(childComplete) // Process Child Execution #2 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.False(childComplete) // Process Child Execution #3 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(childComplete) // Parent should see child complete - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -728,34 +728,34 @@ } // Make first workflow task to start child execution - _, err := pollerParent.PollAndProcessWorkflowTask(false, false) + _, err := pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(childExecutionStarted) // Process ChildExecution Started event - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(startedEvent) // Process Child Execution #1 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Process Child Execution #2 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Process Child Execution #3 - _, err = pollerChild.PollAndProcessWorkflowTask(false, false) + _, err = pollerChild.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Parent should see child complete - _, err = pollerParent.PollAndProcessWorkflowTask(false, false) + _, err = pollerParent.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/continue_as_new_test.go temporal-1.22.5/src/tests/continue_as_new_test.go --- temporal-1.21.5-1/src/tests/continue_as_new_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/continue_as_new_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -139,13 +139,13 @@ } for i := 0; i < 10; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err, strconv.Itoa(i)) } s.False(workflowComplete) - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) s.Equal(previousRunID, lastRunStartedEvent.GetWorkflowExecutionStartedEventAttributes().GetContinuedExecutionRunId()) @@ -224,7 +224,7 @@ } // process the workflow task and continue as new - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -325,7 +325,7 @@ } minTaskID := int64(0) - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) events := s.getHistory(s.namespace, executions[0]) s.True(len(events) != 0) @@ -334,7 +334,7 @@ minTaskID = event.GetTaskId() } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(s.namespace, executions[1]) s.True(len(events) != 0) @@ -505,7 +505,7 @@ } // Make first command to start child execution - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(definition.childExecutionStarted) @@ -513,7 +513,7 @@ // Process ChildExecution Started event and all generations of child executions for i := 0; i < 11; i++ { s.Logger.Info("workflow task", tag.Counter(i)) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -522,13 +522,13 @@ s.NotNil(definition.startedEvent) // Process Child Execution final workflow task to complete it - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(definition.childComplete) // Process ChildExecution completed event and complete parent execution - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(definition.completedEvent) @@ -588,7 +588,7 @@ } // Make first command to start child execution - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(definition.childExecutionStarted) @@ -596,7 +596,7 @@ // Process ChildExecution Started event and all generations of child executions for i := 0; i < 11; i++ { s.Logger.Info("workflow task", tag.Counter(i)) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } diff -Nru temporal-1.21.5-1/src/tests/cron_test.go temporal-1.22.5/src/tests/cron_test.go --- temporal-1.21.5-1/src/tests/cron_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/cron_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -122,11 +122,11 @@ } s.Logger.Info("Process first cron run which fails") - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.Logger.Info("Process first cron run which completes") - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(seeRetry) @@ -256,7 +256,7 @@ executionInfo := resp.GetExecutions()[0] s.Equal(targetBackoffDuration, executionInfo.GetExecutionTime().Sub(timestamp.TimeValue(executionInfo.GetStartTime()))) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) // Make sure the cron workflow start running at a proper time, in this case 3 seconds after the @@ -265,10 +265,10 @@ s.True(backoffDuration > targetBackoffDuration) s.True(backoffDuration < targetBackoffDuration+backoffDurationTolerance) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) s.Equal(3, len(executions)) diff -Nru temporal-1.21.5-1/src/tests/describe_test.go temporal-1.22.5/src/tests/describe_test.go --- temporal-1.21.5-1/src/tests/describe_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/describe_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -134,7 +134,7 @@ } // first workflow task to schedule new activity - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -156,7 +156,7 @@ s.Equal(0, len(dweResponse.PendingActivities)) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) @@ -262,7 +262,7 @@ pollerInfos = testDescribeTaskQueue(s.namespace, &taskqueuepb.TaskQueue{Name: tl}, enumspb.TASK_QUEUE_TYPE_WORKFLOW) s.Empty(pollerInfos) - _, errWorkflowTask := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask := poller.PollAndProcessWorkflowTask() s.NoError(errWorkflowTask) pollerInfos = testDescribeTaskQueue(s.namespace, &taskqueuepb.TaskQueue{Name: tl}, enumspb.TASK_QUEUE_TYPE_ACTIVITY) s.Empty(pollerInfos) diff -Nru temporal-1.21.5-1/src/tests/flag.go temporal-1.22.5/src/tests/flag.go --- temporal-1.21.5-1/src/tests/flag.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/flag.go 2024-02-23 09:45:43.000000000 +0000 @@ -29,6 +29,7 @@ // TestFlags contains the feature flags for integration tests var TestFlags struct { FrontendAddr string + FrontendHTTPAddr string PersistenceType string PersistenceDriver string TestClusterConfigFile string @@ -37,6 +38,7 @@ func init() { flag.StringVar(&TestFlags.FrontendAddr, "frontendAddress", "", "host:port for temporal frontend service") + flag.StringVar(&TestFlags.FrontendHTTPAddr, "frontendHttpAddress", "", "host:port for temporal frontend HTTP service (only applies when frontendAddress set)") flag.StringVar(&TestFlags.PersistenceType, "persistenceType", "sql", "type of persistence - [nosql or sql]") flag.StringVar(&TestFlags.PersistenceDriver, "persistenceDriver", "sqlite", "driver of nosql / sql- [cassandra, mysql, postgresql, sqlite]") flag.StringVar(&TestFlags.TestClusterConfigFile, "TestClusterConfigFile", "", "test cluster config file location") diff -Nru temporal-1.21.5-1/src/tests/gethistory_test.go temporal-1.22.5/src/tests/gethistory_test.go --- temporal-1.21.5-1/src/tests/gethistory_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/gethistory_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -194,7 +194,7 @@ // here do a long pull and check # of events and time elapsed // Make first command to schedule activity, this should affect the long poll above time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask1)) }) start = time.Now().UTC() @@ -210,7 +210,7 @@ s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errActivity)) }) time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask2)) }) for token != nil { @@ -361,7 +361,7 @@ // here do a long pull and check # of events and time elapsed // Make first command to schedule activity, this should affect the long poll above time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask1)) }) start = time.Now().UTC() @@ -377,7 +377,7 @@ s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errActivity)) }) time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask2)) }) for token != nil { @@ -552,7 +552,7 @@ // here do a long pull and check # of events and time elapsed // Make first command to schedule activity, this should affect the long poll above time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask1 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask1)) }) start = time.Now().UTC() @@ -569,7 +569,7 @@ s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errActivity)) }) time.AfterFunc(time.Second*8, func() { - _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask(false, false) + _, errWorkflowTask2 := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(errWorkflowTask2)) }) for token != nil { diff -Nru temporal-1.21.5-1/src/tests/http_api_test.go temporal-1.22.5/src/tests/http_api_test.go --- temporal-1.21.5-1/src/tests/http_api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tests/http_api_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,261 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tests + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "sync" + + "go.temporal.io/sdk/workflow" + "go.temporal.io/server/common/authorization" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/metrics" + "google.golang.org/grpc/metadata" +) + +type SomeJSONStruct struct { + SomeField string `json:"someField"` +} + +func (s *clientIntegrationSuite) TestHTTPAPIBasics() { + if s.httpAPIAddress == "" { + s.T().Skip("HTTP API server not enabled") + } + // Create basic workflow that can answer queries, get signals, etc + workflowFn := func(ctx workflow.Context, arg *SomeJSONStruct) (*SomeJSONStruct, error) { + // Query that just returns query arg + err := workflow.SetQueryHandler(ctx, "some-query", func(queryArg *SomeJSONStruct) (*SomeJSONStruct, error) { + return queryArg, nil + }) + if err != nil { + return nil, err + } + // Wait for signal to complete + var done bool + sel := workflow.NewSelector(ctx) + sel.AddReceive(workflow.GetSignalChannel(ctx, "some-signal"), func(ch workflow.ReceiveChannel, _ bool) { + var signalArg SomeJSONStruct + ch.Receive(ctx, &signalArg) + if signalArg.SomeField != "signal-arg" { + panic("invalid signal arg") + } + done = true + }) + for !done { + sel.Select(ctx) + } + return arg, nil + } + s.worker.RegisterWorkflowWithOptions(workflowFn, workflow.RegisterOptions{Name: "http-basic-workflow"}) + + // Capture metrics + capture := s.testCluster.host.captureMetricsHandler.StartCapture() + defer s.testCluster.host.captureMetricsHandler.StopCapture(capture) + + // Start + workflowID := s.randomizeStr("wf") + _, respBody := s.httpPost(http.StatusOK, "/api/v1/namespaces/"+s.namespace+"/workflows/"+workflowID, `{ + "workflowType": { "name": "http-basic-workflow" }, + "taskQueue": { "name": "`+s.taskQueue+`" }, + "input": [{ "someField": "workflow-arg" }] + }`) + var startResp struct { + RunID string `json:"runId"` + } + s.Require().NoError(json.Unmarshal(respBody, &startResp)) + + // Check that there is a an HTTP call metric with the proper tags/value. We + // can't test overall counts because the metrics handler is shared across + // concurrently executing tests. + var found bool + for _, metric := range capture.Snapshot()[metrics.HTTPServiceRequests.GetMetricName()] { + found = + metric.Tags[metrics.OperationTagName] == "/temporal.api.workflowservice.v1.WorkflowService/StartWorkflowExecution" && + metric.Tags["namespace"] == s.namespace && + metric.Value == int64(1) + if found { + break + } + } + s.Require().True(found) + + // Confirm already exists error with details and proper code + _, respBody = s.httpPost(http.StatusConflict, "/api/v1/namespaces/"+s.namespace+"/workflows/"+workflowID, `{ + "workflowType": { "name": "http-basic-workflow" }, + "taskQueue": { "name": "`+s.taskQueue+`" }, + "input": [{ "someField": "workflow-arg" }], + "requestId": "`+s.randomizeStr("req")+`" + }`) + var errResp struct { + Message string `json:"message"` + Details []struct { + RunID string `json:"runId"` + } `json:"details"` + } + s.Require().NoError(json.Unmarshal(respBody, &errResp)) + s.Require().Contains(errResp.Message, "already running") + s.Require().Equal(startResp.RunID, errResp.Details[0].RunID) + + // Query + _, respBody = s.httpPost( + http.StatusOK, + "/api/v1/namespaces/"+s.namespace+"/workflows/"+workflowID+"/query/some-query", + `{ "query": { "queryArgs": [{ "someField": "query-arg" }] } }`, + ) + var queryResp struct { + QueryResult json.RawMessage `json:"queryResult"` + } + s.Require().NoError(json.Unmarshal(respBody, &queryResp)) + s.Require().JSONEq(`[{ "someField": "query-arg" }]`, string(queryResp.QueryResult)) + + // Signal which also completes the workflow + s.httpPost( + http.StatusOK, + "/api/v1/namespaces/"+s.namespace+"/workflows/"+workflowID+"/signal/some-signal", + `{ "input": [{ "someField": "signal-arg" }] }`, + ) + + // Confirm workflow complete + _, respBody = s.httpGet( + http.StatusOK, + // Our version of gRPC gateway only supports integer enums in queries :-( + "/api/v1/namespaces/"+s.namespace+"/workflows/"+workflowID+"/history?historyEventFilterType=2", + ) + var histResp struct { + History struct { + Events []struct { + EventType string `json:"eventType"` + WorkflowExecutionCompletedEventAttributes struct { + Result json.RawMessage `json:"result"` + } `json:"workflowExecutionCompletedEventAttributes"` + } `json:"events"` + } `json:"history"` + } + s.Require().NoError(json.Unmarshal(respBody, &histResp)) + s.Require().Equal("WorkflowExecutionCompleted", histResp.History.Events[0].EventType) + s.Require().JSONEq( + `[{ "someField": "workflow-arg" }]`, + string(histResp.History.Events[0].WorkflowExecutionCompletedEventAttributes.Result), + ) + +} + +func (s *clientIntegrationSuite) TestHTTPAPIHeaders() { + if s.httpAPIAddress == "" { + s.T().Skip("HTTP API server not enabled") + } + // Make a claim mapper and authorizer that capture info + var lastInfo *authorization.AuthInfo + var listWorkflowMetadata metadata.MD + var callbackLock sync.RWMutex + s.testCluster.host.SetOnGetClaims(func(info *authorization.AuthInfo) (*authorization.Claims, error) { + callbackLock.Lock() + defer callbackLock.Unlock() + if info != nil { + lastInfo = info + } + return &authorization.Claims{System: authorization.RoleAdmin}, nil + }) + s.testCluster.host.SetOnAuthorize(func( + ctx context.Context, + caller *authorization.Claims, + target *authorization.CallTarget, + ) (authorization.Result, error) { + callbackLock.Lock() + defer callbackLock.Unlock() + if target.APIName == "/temporal.api.workflowservice.v1.WorkflowService/ListWorkflowExecutions" { + listWorkflowMetadata, _ = metadata.FromIncomingContext(ctx) + } + return authorization.Result{Decision: authorization.DecisionAllow}, nil + }) + + // Make a simple list call that we don't care about the result + req, err := http.NewRequest("GET", "/api/v1/namespaces/"+s.namespace+"/workflows", nil) + s.Require().NoError(err) + req.Header.Set("Authorization", "my-auth-token") + req.Header.Set("X-Forwarded-For", "1.2.3.4:5678") + // The header is set to forward deep in the onebox config + req.Header.Set("This-Header-Forwarded", "some-value") + req.Header.Set("This-Header-Not-Forwarded", "some-value") + s.httpRequest(http.StatusOK, req) + + // Confirm the claims got my auth token + callbackLock.RLock() + defer callbackLock.RUnlock() + s.Require().Equal("my-auth-token", lastInfo.AuthToken) + + // Check headers + s.Require().Equal("my-auth-token", listWorkflowMetadata["authorization"][0]) + s.Require().Contains(listWorkflowMetadata["x-forwarded-for"][0], "1.2.3.4:5678") + s.Require().Equal("some-value", listWorkflowMetadata["this-header-forwarded"][0]) + s.Require().NotContains(listWorkflowMetadata, "this-header-not-forwarded") + s.Require().Equal(headers.ClientNameServerHTTP, listWorkflowMetadata[headers.ClientNameHeaderName][0]) + s.Require().Equal(headers.ServerVersion, listWorkflowMetadata[headers.ClientVersionHeaderName][0]) +} + +func (s *clientIntegrationSuite) TestHTTPAPIPretty() { + if s.httpAPIAddress == "" { + s.T().Skip("HTTP API server not enabled") + } + // Make a call to system info normal, confirm no newline, then ask for pretty + // and confirm newlines + _, b := s.httpGet(http.StatusOK, "/api/v1/system-info") + s.Require().NotContains(b, byte('\n')) + _, b = s.httpGet(http.StatusOK, "/api/v1/system-info?pretty") + s.Require().Contains(b, byte('\n')) +} + +func (s *clientIntegrationSuite) httpGet(expectedStatus int, url string) (*http.Response, []byte) { + req, err := http.NewRequest("GET", url, nil) + s.Require().NoError(err) + return s.httpRequest(expectedStatus, req) +} + +func (s *clientIntegrationSuite) httpPost(expectedStatus int, url string, jsonBody string) (*http.Response, []byte) { + req, err := http.NewRequest("POST", url, strings.NewReader(jsonBody)) + s.Require().NoError(err) + req.Header.Set("Content-Type", "application/json") + return s.httpRequest(expectedStatus, req) +} + +func (s *clientIntegrationSuite) httpRequest(expectedStatus int, req *http.Request) (*http.Response, []byte) { + if req.URL.Scheme == "" { + req.URL.Scheme = "http" + } + if req.URL.Host == "" { + req.URL.Host = s.httpAPIAddress + } + resp, err := http.DefaultClient.Do(req) + s.Require().NoError(err) + body, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + s.Require().NoError(err) + s.Require().Equal(expectedStatus, resp.StatusCode, "Bad status, body: %s", body) + return resp, body +} diff -Nru temporal-1.21.5-1/src/tests/integrationbase.go temporal-1.22.5/src/tests/integrationbase.go --- temporal-1.21.5-1/src/tests/integrationbase.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/integrationbase.go 2024-02-23 09:45:43.000000000 +0000 @@ -69,6 +69,7 @@ engine FrontendClient adminClient AdminClient operatorClient operatorservice.OperatorServiceClient + httpAPIAddress string Logger log.Logger namespace string foreignNamespace string @@ -96,6 +97,7 @@ s.engine = NewFrontendClient(connection) s.adminClient = NewAdminClient(connection) s.operatorClient = operatorservice.NewOperatorServiceClient(connection) + s.httpAPIAddress = TestFlags.FrontendHTTPAddr } else { s.Logger.Info("Running integration test against test cluster") cluster, err := NewCluster(clusterConfig, s.Logger) @@ -104,6 +106,7 @@ s.engine = s.testCluster.GetFrontendClient() s.adminClient = s.testCluster.GetAdminClient() s.operatorClient = s.testCluster.GetOperatorClient() + s.httpAPIAddress = cluster.host.FrontendHTTPAddress() } s.namespace = s.randomizeStr("integration-test-namespace") diff -Nru temporal-1.21.5-1/src/tests/onebox.go temporal-1.22.5/src/tests/onebox.go --- temporal-1.21.5-1/src/tests/onebox.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/onebox.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,9 +26,11 @@ import ( "context" + "crypto/tls" "encoding/json" "fmt" "net" + "strconv" "sync" "time" @@ -57,6 +59,7 @@ "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/metrics/metricstest" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" persistenceClient "go.temporal.io/server/common/persistence/client" @@ -66,6 +69,7 @@ "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/resource" "go.temporal.io/server/common/rpc" + "go.temporal.io/server/common/rpc/encryption" "go.temporal.io/server/common/sdk" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/service/frontend" @@ -121,6 +125,12 @@ mockAdminClient map[string]adminservice.AdminServiceClient namespaceReplicationTaskExecutor namespace.ReplicationTaskExecutor spanExporters []otelsdktrace.SpanExporter + tlsConfigProvider *encryption.FixedTLSConfigProvider + captureMetricsHandler *metricstest.CaptureHandler + + onGetClaims func(*authorization.AuthInfo) (*authorization.Claims, error) + onAuthorize func(context.Context, *authorization.Claims, *authorization.CallTarget) (authorization.Result, error) + callbackLock sync.RWMutex // Must be used for above callbacks } // HistoryConfig contains configs for history service @@ -158,6 +168,8 @@ NamespaceReplicationTaskExecutor namespace.ReplicationTaskExecutor SpanExporters []otelsdktrace.SpanExporter DynamicConfigOverrides map[dynamicconfig.Key]interface{} + TLSConfigProvider *encryption.FixedTLSConfigProvider + CaptureMetricsHandler *metricstest.CaptureHandler } listenHostPort string @@ -190,6 +202,8 @@ mockAdminClient: params.MockAdminClient, namespaceReplicationTaskExecutor: params.NamespaceReplicationTaskExecutor, spanExporters: params.SpanExporters, + tlsConfigProvider: params.TLSConfigProvider, + captureMetricsHandler: params.CaptureMetricsHandler, dcClient: testDCClient, } impl.overrideHistoryDynamicConfig(testDCClient) @@ -277,6 +291,21 @@ } } +func (c *temporalImpl) FrontendHTTPAddress() string { + host, port := c.FrontendHTTPHostPort() + return net.JoinHostPort(host, strconv.Itoa(port)) +} + +func (c *temporalImpl) FrontendHTTPHostPort() (string, int) { + if host, port, err := net.SplitHostPort(c.FrontendGRPCAddress()); err != nil { + panic(fmt.Errorf("Invalid gRPC frontend address: %w", err)) + } else if portNum, err := strconv.Atoi(port); err != nil { + panic(fmt.Errorf("Invalid gRPC frontend port: %w", err)) + } else { + return host, portNum + 10 + } +} + func (c *temporalImpl) HistoryServiceAddress() []string { var hosts []string var startPort int @@ -367,22 +396,21 @@ } } - stoppedCh := make(chan struct{}) var frontendService *frontend.Service var clientBean client.Bean var namespaceRegistry namespace.Registry var rpcFactory common.RPCFactory feApp := fx.New( fx.Supply( - stoppedCh, persistenceConfig, serviceName, ), + fx.Provide(c.frontendConfigProvider), fx.Provide(func() listenHostPort { return listenHostPort(c.FrontendGRPCAddress()) }), fx.Provide(func() config.DCRedirectionPolicy { return config.DCRedirectionPolicy{} }), fx.Provide(func() log.ThrottledLogger { return c.logger }), fx.Provide(func() resource.NamespaceLogger { return c.logger }), - fx.Provide(newRPCFactoryImpl), + fx.Provide(c.newRPCFactory), fx.Provide(func() membership.Monitor { return newSimpleMonitor(hosts) }), @@ -393,10 +421,10 @@ fx.Provide(func() carchiver.ArchivalMetadata { return c.archiverMetadata }), fx.Provide(func() provider.ArchiverProvider { return c.archiverProvider }), fx.Provide(sdkClientFactoryProvider), - fx.Provide(func() metrics.Handler { return metrics.NoopMetricsHandler }), + fx.Provide(c.GetMetricsHandler), fx.Provide(func() []grpc.UnaryServerInterceptor { return nil }), - fx.Provide(func() authorization.Authorizer { return nil }), - fx.Provide(func() authorization.ClaimMapper { return nil }), + fx.Provide(func() authorization.Authorizer { return c }), + fx.Provide(func() authorization.ClaimMapper { return c }), fx.Provide(func() authorization.JWTAudienceMapper { return nil }), fx.Provide(func() client.FactoryProvider { return client.NewFactoryProvider() }), fx.Provide(func() searchattribute.Mapper { return nil }), @@ -410,6 +438,7 @@ fx.Provide(resource.DefaultSnTaggedLoggerProvider), fx.Provide(func() *esclient.Config { return c.esConfig }), fx.Provide(func() esclient.Client { return c.esClient }), + fx.Provide(c.GetTLSConfigProvider), fx.Supply(c.spanExporters), temporal.ServiceTracingModule, frontend.Module, @@ -464,21 +493,19 @@ } } - stoppedCh := make(chan struct{}) var historyService *history.Service var clientBean client.Bean var namespaceRegistry namespace.Registry app := fx.New( fx.Supply( - stoppedCh, persistenceConfig, serviceName, ), - fx.Provide(func() metrics.Handler { return metrics.NoopMetricsHandler }), + fx.Provide(c.GetMetricsHandler), fx.Provide(func() listenHostPort { return listenHostPort(grpcPort) }), fx.Provide(func() config.DCRedirectionPolicy { return config.DCRedirectionPolicy{} }), fx.Provide(func() log.ThrottledLogger { return c.logger }), - fx.Provide(newRPCFactoryImpl), + fx.Provide(c.newRPCFactory), fx.Provide(func() membership.Monitor { return newSimpleMonitor(hosts) }), @@ -501,6 +528,7 @@ fx.Provide(resource.DefaultSnTaggedLoggerProvider), fx.Provide(func() *esclient.Config { return c.esConfig }), fx.Provide(func() esclient.Client { return c.esClient }), + fx.Provide(c.GetTLSConfigProvider), fx.Provide(workflow.NewTaskGeneratorProvider), fx.Supply(c.spanExporters), temporal.ServiceTracingModule, @@ -562,20 +590,18 @@ } } - stoppedCh := make(chan struct{}) var matchingService *matching.Service var clientBean client.Bean var namespaceRegistry namespace.Registry app := fx.New( fx.Supply( - stoppedCh, persistenceConfig, serviceName, ), - fx.Provide(func() metrics.Handler { return metrics.NoopMetricsHandler }), + fx.Provide(c.GetMetricsHandler), fx.Provide(func() listenHostPort { return listenHostPort(c.MatchingGRPCServiceAddress()) }), fx.Provide(func() log.ThrottledLogger { return c.logger }), - fx.Provide(newRPCFactoryImpl), + fx.Provide(c.newRPCFactory), fx.Provide(func() membership.Monitor { return newSimpleMonitor(hosts) }), @@ -593,6 +619,7 @@ fx.Provide(func() dynamicconfig.Client { return c.dcClient }), fx.Provide(func() *esclient.Config { return c.esConfig }), fx.Provide(func() esclient.Client { return c.esClient }), + fx.Provide(c.GetTLSConfigProvider), fx.Provide(func() log.Logger { return c.logger }), fx.Provide(resource.DefaultSnTaggedLoggerProvider), fx.Supply(c.spanExporters), @@ -656,21 +683,19 @@ clusterConfigCopy.EnableGlobalNamespace = true } - stoppedCh := make(chan struct{}) var workerService *worker.Service var clientBean client.Bean var namespaceRegistry namespace.Registry app := fx.New( fx.Supply( - stoppedCh, persistenceConfig, serviceName, ), - fx.Provide(func() metrics.Handler { return metrics.NoopMetricsHandler }), + fx.Provide(c.GetMetricsHandler), fx.Provide(func() listenHostPort { return listenHostPort(c.WorkerGRPCServiceAddress()) }), fx.Provide(func() config.DCRedirectionPolicy { return config.DCRedirectionPolicy{} }), fx.Provide(func() log.ThrottledLogger { return c.logger }), - fx.Provide(newRPCFactoryImpl), + fx.Provide(c.newRPCFactory), fx.Provide(func() membership.Monitor { return newSimpleMonitor(hosts) }), @@ -691,6 +716,7 @@ fx.Provide(resource.DefaultSnTaggedLoggerProvider), fx.Provide(func() esclient.Client { return c.esClient }), fx.Provide(func() *esclient.Config { return c.esConfig }), + fx.Provide(c.GetTLSConfigProvider), fx.Supply(c.spanExporters), temporal.ServiceTracingModule, worker.Module, @@ -726,6 +752,37 @@ return c.executionManager } +func (c *temporalImpl) GetTLSConfigProvider() encryption.TLSConfigProvider { + // If we just return this directly, the interface will be non-nil but the + // pointer will be nil + if c.tlsConfigProvider != nil { + return c.tlsConfigProvider + } + return nil +} + +func (c *temporalImpl) GetMetricsHandler() metrics.Handler { + if c.captureMetricsHandler != nil { + return c.captureMetricsHandler + } + return metrics.NoopMetricsHandler +} + +func (c *temporalImpl) frontendConfigProvider() *config.Config { + // Set HTTP port and a test HTTP forwarded header + _, httpPort := c.FrontendHTTPHostPort() + return &config.Config{ + Services: map[string]config.Service{ + string(primitives.FrontendService): { + RPC: config.RPC{ + HTTPPort: httpPort, + HTTPAdditionalForwardedHeaders: []string{"this-header-forwarded"}, + }, + }, + }, + } +} + func (c *temporalImpl) overrideHistoryDynamicConfig(client *dcClient) { client.OverrideValue(dynamicconfig.ReplicationTaskProcessorStartWait, time.Nanosecond) @@ -754,6 +811,78 @@ // For DeleteWorkflowExecution tests client.OverrideValue(dynamicconfig.TransferProcessorUpdateAckInterval, 1*time.Second) client.OverrideValue(dynamicconfig.VisibilityProcessorUpdateAckInterval, 1*time.Second) + + client.OverrideValue(dynamicconfig.EnableAPIGetCurrentRunIDLock, true) +} + +func (c *temporalImpl) newRPCFactory( + sn primitives.ServiceName, + grpcHostPort listenHostPort, + logger log.Logger, + grpcResolver membership.GRPCResolver, + tlsConfigProvider encryption.TLSConfigProvider, +) (common.RPCFactory, error) { + host, portStr, err := net.SplitHostPort(string(grpcHostPort)) + if err != nil { + return nil, fmt.Errorf("failed parsing host:port: %w", err) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("invalid port: %w", err) + } + var frontendTLSConfig *tls.Config + if tlsConfigProvider != nil { + if frontendTLSConfig, err = tlsConfigProvider.GetFrontendClientConfig(); err != nil { + return nil, fmt.Errorf("failed getting client TLS config: %w", err) + } + } + return rpc.NewFactory( + &config.RPC{BindOnIP: host, GRPCPort: port}, + sn, + logger, + tlsConfigProvider, + grpcResolver.MakeURL(primitives.FrontendService), + frontendTLSConfig, + nil, + ), nil +} + +func (c *temporalImpl) SetOnGetClaims(fn func(*authorization.AuthInfo) (*authorization.Claims, error)) { + c.callbackLock.Lock() + c.onGetClaims = fn + c.callbackLock.Unlock() +} + +func (c *temporalImpl) GetClaims(authInfo *authorization.AuthInfo) (*authorization.Claims, error) { + c.callbackLock.RLock() + onGetClaims := c.onGetClaims + c.callbackLock.RUnlock() + if onGetClaims != nil { + return onGetClaims(authInfo) + } + return &authorization.Claims{System: authorization.RoleAdmin}, nil +} + +func (c *temporalImpl) SetOnAuthorize( + fn func(context.Context, *authorization.Claims, *authorization.CallTarget) (authorization.Result, error), +) { + c.callbackLock.Lock() + c.onAuthorize = fn + c.callbackLock.Unlock() +} + +func (c *temporalImpl) Authorize( + ctx context.Context, + caller *authorization.Claims, + target *authorization.CallTarget, +) (authorization.Result, error) { + c.callbackLock.RLock() + onAuthorize := c.onAuthorize + c.callbackLock.RUnlock() + if onAuthorize != nil { + return onAuthorize(ctx, caller, target) + } + return authorization.Result{Decision: authorization.DecisionAllow}, nil } // copyPersistenceConfig makes a deepcopy of persistence config. @@ -783,89 +912,24 @@ metricsHandler metrics.Handler, logger log.Logger, dc *dynamicconfig.Collection, + tlsConfigProvider encryption.TLSConfigProvider, ) sdk.ClientFactory { + var tlsConfig *tls.Config + if tlsConfigProvider != nil { + var err error + if tlsConfig, err = tlsConfigProvider.GetFrontendClientConfig(); err != nil { + panic(err) + } + } return sdk.NewClientFactory( resolver.MakeURL(primitives.FrontendService), - nil, + tlsConfig, metricsHandler, logger, dc.GetIntProperty(dynamicconfig.WorkerStickyCacheSize, 0), ) } -type rpcFactoryImpl struct { - serviceName primitives.ServiceName - grpcHostPort string - logger log.Logger - frontendURL string - - sync.RWMutex - listener net.Listener -} - -func (c *rpcFactoryImpl) GetFrontendGRPCServerOptions() ([]grpc.ServerOption, error) { - return nil, nil -} - -func (c *rpcFactoryImpl) GetInternodeGRPCServerOptions() ([]grpc.ServerOption, error) { - return nil, nil -} - -func (c *rpcFactoryImpl) CreateRemoteFrontendGRPCConnection(hostName string) *grpc.ClientConn { - return c.CreateGRPCConnection(hostName) -} - -func (c *rpcFactoryImpl) CreateLocalFrontendGRPCConnection() *grpc.ClientConn { - return c.CreateGRPCConnection(c.frontendURL) -} - -func (c *rpcFactoryImpl) CreateInternodeGRPCConnection(hostName string) *grpc.ClientConn { - return c.CreateGRPCConnection(hostName) -} - -func newRPCFactoryImpl(sn primitives.ServiceName, grpcHostPort listenHostPort, logger log.Logger, resolver membership.GRPCResolver) common.RPCFactory { - return &rpcFactoryImpl{ - serviceName: sn, - grpcHostPort: string(grpcHostPort), - logger: logger, - frontendURL: resolver.MakeURL(primitives.FrontendService), - } -} - -func (c *rpcFactoryImpl) GetGRPCListener() net.Listener { - c.RLock() - if c.listener != nil { - c.RUnlock() - return c.listener - } - c.RUnlock() - - c.Lock() - defer c.Unlock() - - if c.listener == nil { - var err error - c.listener, err = net.Listen("tcp", c.grpcHostPort) - if err != nil { - c.logger.Fatal("Failed create gRPC listener", tag.Error(err), tag.Service(c.serviceName), tag.Address(c.grpcHostPort)) - } - - c.logger.Info("Created gRPC listener", tag.Service(c.serviceName), tag.Address(c.grpcHostPort)) - } - - return c.listener -} - -// CreateGRPCConnection creates connection for gRPC calls -func (c *rpcFactoryImpl) CreateGRPCConnection(hostName string) *grpc.ClientConn { - connection, err := rpc.Dial(hostName, nil, c.logger) - if err != nil { - c.logger.Fatal("Failed to create gRPC connection", tag.Error(err)) - } - - return connection -} - func newSimpleHostInfoProvider(serviceName primitives.ServiceName, hosts map[primitives.ServiceName][]string) membership.HostInfoProvider { hostInfo := membership.NewHostInfoFromAddress(hosts[serviceName][0]) return membership.NewHostInfoProvider(hostInfo) diff -Nru temporal-1.21.5-1/src/tests/relay_task_test.go temporal-1.22.5/src/tests/relay_task_test.go --- temporal-1.21.5-1/src/tests/relay_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/relay_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -97,17 +97,13 @@ } // First workflow task complete with a marker command, and request to relay workflow task (immediately return a new workflow task) - _, newTask, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - false, - false, - 0, - 3, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithExpectedAttemptCount(0), + WithRetries(3), + WithForceNewWorkflowTask) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) + newTask := res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) @@ -127,7 +123,7 @@ s.True(workflowTaskTimeout) // Now complete workflow - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 2) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithExpectedAttemptCount(2)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/reset_workflow_test.go temporal-1.22.5/src/tests/reset_workflow_test.go --- temporal-1.21.5-1/src/tests/reset_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/reset_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -146,7 +146,7 @@ } // Process first workflow task to schedule activities - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -156,7 +156,7 @@ s.NoError(err) // Process second workflow task which checks activity completion - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("Poll and process second workflow task", tag.Error(err)) s.NoError(err) @@ -193,7 +193,7 @@ s.Logger.Info("Poll and process third activity", tag.Error(err)) s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("Poll and process final workflow task", tag.Error(err)) s.NoError(err) @@ -296,7 +296,7 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -305,7 +305,7 @@ _, err = s.engine.SignalWorkflowExecution(NewContext(), signalRequest) s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -463,11 +463,11 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.Error(err) // due to workflow termination (reset) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowComplete) @@ -590,7 +590,7 @@ T: s.T(), } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -614,7 +614,7 @@ }) s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowComplete) diff -Nru temporal-1.21.5-1/src/tests/schedule_test.go temporal-1.22.5/src/tests/schedule_test.go --- temporal-1.21.5-1/src/tests/schedule_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/schedule_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -650,9 +650,11 @@ s.NoError(err) s.EqualValues(0, len(describeResp.Info.RunningWorkflows)) - // scheduler has done some stuff - events3 := s.getHistory(s.namespace, &commonpb.WorkflowExecution{WorkflowId: scheduler.WorkflowIDPrefix + sid}) - s.Greater(len(events3), len(events2)) + // check scheduler has gotten the refresh and done some stuff. signal is sent without waiting so we need to wait. + s.Eventually(func() bool { + events3 := s.getHistory(s.namespace, &commonpb.WorkflowExecution{WorkflowId: scheduler.WorkflowIDPrefix + sid}) + return len(events3) > len(events2) + }, 5*time.Second, 100*time.Millisecond) // cleanup _, err = s.engine.DeleteSchedule(NewContext(), &workflowservice.DeleteScheduleRequest{ @@ -816,8 +818,6 @@ } func (s *scheduleIntegrationSuite) TestNextTimeCache() { - s.T().Skip("re-enable after enabling new cache") - sid := "sched-test-next-time-cache" wid := "sched-test-next-time-cache-wf" wt := "sched-test-next-time-cache-wt" diff -Nru temporal-1.21.5-1/src/tests/signal_workflow_test.go temporal-1.22.5/src/tests/signal_workflow_test.go --- temporal-1.21.5-1/src/tests/signal_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/signal_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -160,7 +160,7 @@ } // Make first command to schedule activity - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -181,7 +181,7 @@ s.NoError(err) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -207,7 +207,7 @@ s.NoError(err) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -338,7 +338,7 @@ } // Make first command to schedule activity - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -361,7 +361,7 @@ s.NoError(err) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -377,7 +377,7 @@ s.NoError(err) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -542,11 +542,11 @@ } // Start both current and foreign workflows to make some progress. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) - _, err = foreignPoller.PollAndProcessWorkflowTask(false, false) + _, err = foreignPoller.PollAndProcessWorkflowTask() s.Logger.Info("foreign PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -555,7 +555,7 @@ s.NoError(err) // Signal the foreign workflow with this command. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -595,7 +595,7 @@ s.True(signalSent) // Process signal in workflow for foreign workflow - _, err = foreignPoller.PollAndProcessWorkflowTask(true, false) + _, err = foreignPoller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -678,7 +678,7 @@ } // Make first command to schedule activity - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowTaskDelay > time.Second*2) @@ -740,7 +740,7 @@ } // process start task - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -773,7 +773,7 @@ s.NoError(err) // process signal and complete workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -876,11 +876,11 @@ T: s.T(), } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.Error(err) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -1037,11 +1037,11 @@ } // Start both current and foreign workflows to make some progress. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) - _, err = foreignPoller.PollAndProcessWorkflowTask(false, false) + _, err = foreignPoller.PollAndProcessWorkflowTask() s.Logger.Info("foreign PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1050,7 +1050,7 @@ s.NoError(err) // Signal the foreign workflow with this command. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1089,7 +1089,7 @@ s.True(signalSent) // Process signal in workflow for foreign workflow - _, err = foreignPoller.PollAndProcessWorkflowTask(true, false) + _, err = foreignPoller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1183,12 +1183,12 @@ } // Start workflows to make some progress. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Signal the foreign workflow with this command. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1308,12 +1308,12 @@ } // Start workflows to make some progress. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Signal the foreign workflow with this command. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1463,7 +1463,7 @@ } // Make first command to schedule activity - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1491,7 +1491,7 @@ s.Equal(we.GetRunId(), resp.GetRunId()) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1527,7 +1527,7 @@ newWorkflowStarted = true // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1552,7 +1552,7 @@ newWorkflowStarted = true // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1696,10 +1696,10 @@ } // Start workflows, make some progress and complete workflow - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowComplete) @@ -1854,7 +1854,7 @@ T: s.T(), } - _, pollErr := poller.PollAndProcessWorkflowTask(true, false) + _, pollErr := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(pollErr) s.GreaterOrEqual(delayEndTime.Sub(reqStartTime), startDelay) s.NotNil(signalEvent) @@ -1927,7 +1927,7 @@ } // process start task - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) signalName := "my signal" @@ -1966,7 +1966,7 @@ s.NoError(err) // process signal and complete workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/simpleMonitor.go temporal-1.22.5/src/tests/simpleMonitor.go --- temporal-1.21.5-1/src/tests/simpleMonitor.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/simpleMonitor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package tests - -import ( - "context" - - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/primitives" -) - -type simpleMonitor struct { - hosts map[primitives.ServiceName][]string - resolvers map[primitives.ServiceName]*simpleResolver -} - -// NewSimpleMonitor returns a simple monitor interface -func newSimpleMonitor(hosts map[primitives.ServiceName][]string) *simpleMonitor { - resolvers := make(map[primitives.ServiceName]*simpleResolver, len(hosts)) - for service, hostList := range hosts { - resolvers[service] = newSimpleResolver(service, hostList) - } - - return &simpleMonitor{ - hosts: hosts, - resolvers: resolvers, - } -} - -func (s *simpleMonitor) Start() { - for service, r := range s.resolvers { - r.start(s.hosts[service]) - } -} - -func (s *simpleMonitor) EvictSelf() error { - return nil -} - -func (s *simpleMonitor) GetResolver(service primitives.ServiceName) (membership.ServiceResolver, error) { - resolver, ok := s.resolvers[service] - if !ok { - return nil, membership.ErrUnknownService - } - return resolver, nil -} - -func (s *simpleMonitor) GetReachableMembers() ([]string, error) { - return nil, nil -} - -func (s *simpleMonitor) WaitUntilInitialized(_ context.Context) error { - return nil -} diff -Nru temporal-1.21.5-1/src/tests/simpleServiceResolver.go temporal-1.22.5/src/tests/simpleServiceResolver.go --- temporal-1.21.5-1/src/tests/simpleServiceResolver.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/simpleServiceResolver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package tests - -import ( - "sync" - - "github.com/dgryski/go-farm" - - "go.temporal.io/server/common/membership" - "go.temporal.io/server/common/primitives" -) - -type simpleResolver struct { - mu sync.Mutex - hostInfos []membership.HostInfo - listeners map[string]chan<- *membership.ChangedEvent - - hashfunc func([]byte) uint32 -} - -// newSimpleResolver returns a service resolver that maintains static mapping -// between services and host info -func newSimpleResolver(service primitives.ServiceName, hosts []string) *simpleResolver { - hostInfos := make([]membership.HostInfo, 0, len(hosts)) - for _, host := range hosts { - hostInfos = append(hostInfos, membership.NewHostInfoFromAddress(host)) - } - return &simpleResolver{ - hostInfos: hostInfos, - hashfunc: farm.Fingerprint32, - listeners: make(map[string]chan<- *membership.ChangedEvent), - } -} - -func (s *simpleResolver) start(hosts []string) { - hostInfos := make([]membership.HostInfo, 0, len(hosts)) - for _, host := range hosts { - hostInfos = append(hostInfos, membership.NewHostInfoFromAddress(host)) - } - event := &membership.ChangedEvent{ - HostsAdded: hostInfos, - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.hostInfos = hostInfos - - for _, ch := range s.listeners { - select { - case ch <- event: - default: - } - } -} - -func (s *simpleResolver) Lookup(key string) (membership.HostInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - if len(s.hostInfos) == 0 { - return nil, membership.ErrInsufficientHosts - } - hash := int(s.hashfunc([]byte(key))) - idx := hash % len(s.hostInfos) - return s.hostInfos[idx], nil -} - -func (s *simpleResolver) AddListener(name string, notifyChannel chan<- *membership.ChangedEvent) error { - s.mu.Lock() - defer s.mu.Unlock() - _, ok := s.listeners[name] - if ok { - return membership.ErrListenerAlreadyExist - } - s.listeners[name] = notifyChannel - return nil -} - -func (s *simpleResolver) RemoveListener(name string) error { - s.mu.Lock() - defer s.mu.Unlock() - _, ok := s.listeners[name] - if !ok { - return nil - } - delete(s.listeners, name) - return nil -} - -func (s *simpleResolver) MemberCount() int { - s.mu.Lock() - defer s.mu.Unlock() - return len(s.hostInfos) -} - -func (s *simpleResolver) Members() []membership.HostInfo { - s.mu.Lock() - defer s.mu.Unlock() - return s.hostInfos -} - -func (s *simpleResolver) RequestRefresh() { -} diff -Nru temporal-1.21.5-1/src/tests/simple_monitor.go temporal-1.22.5/src/tests/simple_monitor.go --- temporal-1.21.5-1/src/tests/simple_monitor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tests/simple_monitor.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,76 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tests + +import ( + "context" + + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/primitives" +) + +type simpleMonitor struct { + hosts map[primitives.ServiceName][]string + resolvers map[primitives.ServiceName]*simpleResolver +} + +// NewSimpleMonitor returns a simple monitor interface +func newSimpleMonitor(hosts map[primitives.ServiceName][]string) *simpleMonitor { + resolvers := make(map[primitives.ServiceName]*simpleResolver, len(hosts)) + for service, hostList := range hosts { + resolvers[service] = newSimpleResolver(service, hostList) + } + + return &simpleMonitor{ + hosts: hosts, + resolvers: resolvers, + } +} + +func (s *simpleMonitor) Start() { + for service, r := range s.resolvers { + r.start(s.hosts[service]) + } +} + +func (s *simpleMonitor) EvictSelf() error { + return nil +} + +func (s *simpleMonitor) GetResolver(service primitives.ServiceName) (membership.ServiceResolver, error) { + resolver, ok := s.resolvers[service] + if !ok { + return nil, membership.ErrUnknownService + } + return resolver, nil +} + +func (s *simpleMonitor) GetReachableMembers() ([]string, error) { + return nil, nil +} + +func (s *simpleMonitor) WaitUntilInitialized(_ context.Context) error { + return nil +} diff -Nru temporal-1.21.5-1/src/tests/simple_service_resolver.go temporal-1.22.5/src/tests/simple_service_resolver.go --- temporal-1.21.5-1/src/tests/simple_service_resolver.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tests/simple_service_resolver.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,126 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tests + +import ( + "sync" + + "github.com/dgryski/go-farm" + + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/primitives" +) + +type simpleResolver struct { + mu sync.Mutex + hostInfos []membership.HostInfo + listeners map[string]chan<- *membership.ChangedEvent + + hashfunc func([]byte) uint32 +} + +// newSimpleResolver returns a service resolver that maintains static mapping +// between services and host info +func newSimpleResolver(service primitives.ServiceName, hosts []string) *simpleResolver { + hostInfos := make([]membership.HostInfo, 0, len(hosts)) + for _, host := range hosts { + hostInfos = append(hostInfos, membership.NewHostInfoFromAddress(host)) + } + return &simpleResolver{ + hostInfos: hostInfos, + hashfunc: farm.Fingerprint32, + listeners: make(map[string]chan<- *membership.ChangedEvent), + } +} + +func (s *simpleResolver) start(hosts []string) { + hostInfos := make([]membership.HostInfo, 0, len(hosts)) + for _, host := range hosts { + hostInfos = append(hostInfos, membership.NewHostInfoFromAddress(host)) + } + event := &membership.ChangedEvent{ + HostsAdded: hostInfos, + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.hostInfos = hostInfos + + for _, ch := range s.listeners { + select { + case ch <- event: + default: + } + } +} + +func (s *simpleResolver) Lookup(key string) (membership.HostInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.hostInfos) == 0 { + return nil, membership.ErrInsufficientHosts + } + hash := int(s.hashfunc([]byte(key))) + idx := hash % len(s.hostInfos) + return s.hostInfos[idx], nil +} + +func (s *simpleResolver) AddListener(name string, notifyChannel chan<- *membership.ChangedEvent) error { + s.mu.Lock() + defer s.mu.Unlock() + _, ok := s.listeners[name] + if ok { + return membership.ErrListenerAlreadyExist + } + s.listeners[name] = notifyChannel + return nil +} + +func (s *simpleResolver) RemoveListener(name string) error { + s.mu.Lock() + defer s.mu.Unlock() + _, ok := s.listeners[name] + if !ok { + return nil + } + delete(s.listeners, name) + return nil +} + +func (s *simpleResolver) MemberCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return len(s.hostInfos) +} + +func (s *simpleResolver) Members() []membership.HostInfo { + s.mu.Lock() + defer s.mu.Unlock() + return s.hostInfos +} + +func (s *simpleResolver) RequestRefresh() { +} diff -Nru temporal-1.21.5-1/src/tests/sizelimit_test.go temporal-1.22.5/src/tests/sizelimit_test.go --- temporal-1.21.5-1/src/tests/sizelimit_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/sizelimit_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -165,7 +165,7 @@ // Poll workflow task only if it is running if dwResp.WorkflowExecutionInfo.Status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -304,7 +304,7 @@ s.NoError(err) go func() { - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) }() @@ -431,7 +431,7 @@ // Poll workflow task only if it is running if dwResp.WorkflowExecutionInfo.Status == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) // Workflow should be force terminated at this point diff -Nru temporal-1.21.5-1/src/tests/stickytq_test.go temporal-1.22.5/src/tests/stickytq_test.go --- temporal-1.21.5-1/src/tests/stickytq_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/stickytq_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -131,7 +131,7 @@ StickyScheduleToStartTimeout: stickyScheduleToStartTimeout, } - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, true, 1) + _, err := poller.PollAndProcessWorkflowTask(WithRespondSticky) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -162,7 +162,7 @@ s.True(stickyTimeout, "Workflow task not timed out") for i := 1; i <= 3; i++ { - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, int32(i)) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(i)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -178,7 +178,7 @@ s.NoError(err) for i := 1; i <= 2; i++ { - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, int32(i)) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(i)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -194,7 +194,7 @@ s.True(workflowTaskFailed) // Complete workflow execution - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, 3) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(3)) s.NoError(err) // Assert for single workflow task failed and workflow completion @@ -291,7 +291,7 @@ StickyScheduleToStartTimeout: stickyScheduleToStartTimeout, } - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, true, 1) + _, err := poller.PollAndProcessWorkflowTask(WithRespondSticky) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -329,7 +329,7 @@ s.True(stickyTimeout, "Workflow task not timed out") for i := 1; i <= 3; i++ { - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, int32(i)) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(i)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -345,7 +345,7 @@ s.NoError(err) for i := 1; i <= 2; i++ { - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, int32(i)) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(i)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) } @@ -361,7 +361,7 @@ s.True(workflowTaskFailed) // Complete workflow execution - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, 3) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithRespondSticky, WithExpectedAttemptCount(3)) s.NoError(err) // Assert for single workflow task failed and workflow completion diff -Nru temporal-1.21.5-1/src/tests/taskpoller.go temporal-1.22.5/src/tests/taskpoller.go --- temporal-1.21.5-1/src/tests/taskpoller.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/taskpoller.go 2024-02-23 09:45:43.000000000 +0000 @@ -47,7 +47,6 @@ "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/payloads" "go.temporal.io/server/service/history/consts" - "go.temporal.io/server/service/matching" ) type ( @@ -75,79 +74,73 @@ Logger log.Logger T *testing.T } + + PollAndProcessWorkflowTaskOptions struct { + DumpHistory bool + DumpCommands bool + DropTask bool + PollSticky bool + RespondSticky bool + ExpectedAttemptCount int + Retries int + ForceNewWorkflowTask bool + QueryResult *querypb.WorkflowQueryResult + } + + PollAndProcessWorkflowTaskOptionFunc func(*PollAndProcessWorkflowTaskOptions) + + PollAndProcessWorkflowTaskResponse struct { + IsQueryTask bool + NewTask *workflowservice.RespondWorkflowTaskCompletedResponse + } ) -// PollAndProcessWorkflowTask for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTask(dumpHistory bool, dropTask bool) (isQueryTask bool, err error) { - return p.PollAndProcessWorkflowTaskWithAttempt(dumpHistory, dropTask, false, false, 1) -} +var ( + errNoTasks = errors.New("no tasks") -// PollAndProcessWorkflowTaskWithSticky for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTaskWithSticky(dumpHistory bool, dropTask bool) (isQueryTask bool, err error) { - return p.PollAndProcessWorkflowTaskWithAttempt(dumpHistory, dropTask, true, true, 1) -} + defaultPollAndProcessWorkflowTaskOptions = PollAndProcessWorkflowTaskOptions{ + DumpHistory: false, + DumpCommands: true, + DropTask: false, + PollSticky: false, + RespondSticky: false, + ExpectedAttemptCount: 1, + Retries: 5, + ForceNewWorkflowTask: false, + QueryResult: nil, + } +) -// PollAndProcessWorkflowTaskWithoutRetry for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTaskWithoutRetry(dumpHistory bool, dropTask bool) (isQueryTask bool, err error) { - return p.PollAndProcessWorkflowTaskWithAttemptAndRetry(dumpHistory, dropTask, false, false, 1, 1) +func WithDumpHistory(o *PollAndProcessWorkflowTaskOptions) { o.DumpHistory = true } +func WithNoDumpCommands(o *PollAndProcessWorkflowTaskOptions) { o.DumpCommands = false } +func WithDropTask(o *PollAndProcessWorkflowTaskOptions) { o.DropTask = true } +func WithPollSticky(o *PollAndProcessWorkflowTaskOptions) { o.PollSticky = true } +func WithRespondSticky(o *PollAndProcessWorkflowTaskOptions) { o.RespondSticky = true } +func WithExpectedAttemptCount(c int) PollAndProcessWorkflowTaskOptionFunc { + return func(o *PollAndProcessWorkflowTaskOptions) { o.ExpectedAttemptCount = c } } - -// PollAndProcessWorkflowTaskWithAttempt for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTaskWithAttempt( - dumpHistory bool, - dropTask bool, - pollStickyTaskQueue bool, - respondStickyTaskQueue bool, - workflowTaskAttempt int32, -) (isQueryTask bool, err error) { - - return p.PollAndProcessWorkflowTaskWithAttemptAndRetry( - dumpHistory, - dropTask, - pollStickyTaskQueue, - respondStickyTaskQueue, - workflowTaskAttempt, - 5) +func WithRetries(c int) PollAndProcessWorkflowTaskOptionFunc { + return func(o *PollAndProcessWorkflowTaskOptions) { o.Retries = c } +} +func WithForceNewWorkflowTask(o *PollAndProcessWorkflowTaskOptions) { o.ForceNewWorkflowTask = true } +func WithQueryResult(r *querypb.WorkflowQueryResult) PollAndProcessWorkflowTaskOptionFunc { + return func(o *PollAndProcessWorkflowTaskOptions) { o.QueryResult = r } } -// PollAndProcessWorkflowTaskWithAttemptAndRetry for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTaskWithAttemptAndRetry( - dumpHistory bool, - dropTask bool, - pollStickyTaskQueue bool, - respondStickyTaskQueue bool, - workflowTaskAttempt int32, - retryCount int, -) (isQueryTask bool, err error) { - - isQueryTask, _, err = p.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - dumpHistory, - dropTask, - pollStickyTaskQueue, - respondStickyTaskQueue, - workflowTaskAttempt, - retryCount, - false, - nil) - return isQueryTask, err +func (p *TaskPoller) PollAndProcessWorkflowTask(funcs ...PollAndProcessWorkflowTaskOptionFunc) (res PollAndProcessWorkflowTaskResponse, err error) { + opts := defaultPollAndProcessWorkflowTaskOptions + for _, f := range funcs { + f(&opts) + } + return p.PollAndProcessWorkflowTaskWithOptions(&opts) } -// PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask for workflow tasks -func (p *TaskPoller) PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - dumpHistory bool, - dropTask bool, - pollStickyTaskQueue bool, - respondStickyTaskQueue bool, - workflowTaskAttempt int32, - retryCount int, - forceCreateNewWorkflowTask bool, - queryResult *querypb.WorkflowQueryResult, -) (isQueryTask bool, newTask *workflowservice.RespondWorkflowTaskCompletedResponse, err error) { +func (p *TaskPoller) PollAndProcessWorkflowTaskWithOptions(opts *PollAndProcessWorkflowTaskOptions) (res PollAndProcessWorkflowTaskResponse, err error) { Loop: - for attempt := 1; attempt <= retryCount; attempt++ { + for attempt := 1; attempt <= opts.Retries; attempt++ { taskQueue := p.TaskQueue - if pollStickyTaskQueue { + if opts.PollSticky { taskQueue = p.StickyTaskQueue } @@ -158,7 +151,7 @@ }) if !common.IsServiceTransientError(err1) { - return false, nil, err1 + return PollAndProcessWorkflowTaskResponse{}, err1 } if err1 == consts.ErrDuplicate { @@ -167,7 +160,7 @@ } if err1 != nil { - return false, nil, err1 + return PollAndProcessWorkflowTaskResponse{}, err1 } if response == nil || len(response.TaskToken) == 0 { @@ -176,19 +169,19 @@ } var events []*historypb.HistoryEvent - if response.Query == nil || !pollStickyTaskQueue { + if response.Query == nil || !opts.PollSticky { // if not query task, should have some history events // for non sticky query, there should be events returned history := response.History if history == nil { p.Logger.Fatal("History is nil") - return false, nil, errors.New("history is nil") + return PollAndProcessWorkflowTaskResponse{}, errors.New("history is nil") } events = history.Events if len(events) == 0 { p.Logger.Fatal("History Events are empty") - return false, nil, errors.New("history events are empty") + return PollAndProcessWorkflowTaskResponse{}, errors.New("history events are empty") } nextPageToken := response.NextPageToken @@ -200,7 +193,7 @@ }) if err2 != nil { - return false, nil, err2 + return PollAndProcessWorkflowTaskResponse{}, err2 } events = append(events, resp.History.Events...) @@ -217,12 +210,12 @@ } } - if dropTask { + if opts.DropTask { p.Logger.Info("Dropping Workflow task: ") - return false, nil, nil + return PollAndProcessWorkflowTaskResponse{}, nil } - if dumpHistory { + if opts.DumpHistory { common.PrettyPrint(response.History.Events) } @@ -245,7 +238,7 @@ } _, err = p.Engine.RespondQueryTaskCompleted(NewContext(), completeRequest) - return true, nil, err + return PollAndProcessWorkflowTaskResponse{IsQueryTask: true}, err } // Handle messages. @@ -261,7 +254,7 @@ Failure: newApplicationFailure(err, false, nil), Identity: p.Identity, }) - return false, nil, err + return PollAndProcessWorkflowTaskResponse{}, err } } @@ -272,8 +265,8 @@ lastWorkflowTaskScheduleEvent = e } } - if lastWorkflowTaskScheduleEvent != nil && workflowTaskAttempt > 1 { - require.Equal(p.T, workflowTaskAttempt, lastWorkflowTaskScheduleEvent.GetWorkflowTaskScheduledEventAttributes().GetAttempt()) + if lastWorkflowTaskScheduleEvent != nil && opts.ExpectedAttemptCount > 1 { + require.Equal(p.T, opts.ExpectedAttemptCount, int(lastWorkflowTaskScheduleEvent.GetWorkflowTaskScheduledEventAttributes().GetAttempt())) } commands, err := p.WorkflowTaskHandler(response.WorkflowExecution, response.WorkflowType, response.PreviousStartedEventId, response.StartedEventId, response.History) @@ -286,16 +279,18 @@ Failure: newApplicationFailure(err, false, nil), Identity: p.Identity, }) - return false, nil, err + return PollAndProcessWorkflowTaskResponse{}, err } - if len(commands) > 0 { - common.PrettyPrint(commands, "Send commands to server using RespondWorkflowTaskCompleted:") - } - if len(workerToServerMessages) > 0 { - common.PrettyPrint(workerToServerMessages, "Send messages to server using RespondWorkflowTaskCompleted:") + if opts.DumpCommands { + if len(commands) > 0 { + common.PrettyPrint(commands, "Send commands to server using RespondWorkflowTaskCompleted:") + } + if len(workerToServerMessages) > 0 { + common.PrettyPrint(workerToServerMessages, "Send messages to server using RespondWorkflowTaskCompleted:") + } } - if !respondStickyTaskQueue { + if !opts.RespondSticky { // non sticky taskqueue newTask, err := p.Engine.RespondWorkflowTaskCompleted(NewContext(), &workflowservice.RespondWorkflowTaskCompletedRequest{ Namespace: p.Namespace, @@ -304,10 +299,10 @@ Commands: commands, Messages: workerToServerMessages, ReturnNewWorkflowTask: true, - ForceCreateNewWorkflowTask: forceCreateNewWorkflowTask, - QueryResults: getQueryResults(response.GetQueries(), queryResult), + ForceCreateNewWorkflowTask: opts.ForceNewWorkflowTask, + QueryResults: getQueryResults(response.GetQueries(), opts.QueryResult), }) - return false, newTask, err + return PollAndProcessWorkflowTaskResponse{NewTask: newTask}, err } // sticky taskqueue newTask, err := p.Engine.RespondWorkflowTaskCompleted( @@ -322,15 +317,15 @@ ScheduleToStartTimeout: &p.StickyScheduleToStartTimeout, }, ReturnNewWorkflowTask: true, - ForceCreateNewWorkflowTask: forceCreateNewWorkflowTask, - QueryResults: getQueryResults(response.GetQueries(), queryResult), + ForceCreateNewWorkflowTask: opts.ForceNewWorkflowTask, + QueryResults: getQueryResults(response.GetQueries(), opts.QueryResult), }, ) - return false, newTask, err + return PollAndProcessWorkflowTaskResponse{NewTask: newTask}, err } - return false, nil, matching.ErrNoTasks + return PollAndProcessWorkflowTaskResponse{}, errNoTasks } // HandlePartialWorkflowTask for workflow task @@ -474,7 +469,7 @@ return err } - return matching.ErrNoTasks + return errNoTasks } // PollAndProcessActivityTaskWithID is similar to PollAndProcessActivityTask but using RespondActivityTask...ByID @@ -550,7 +545,7 @@ return err } - return matching.ErrNoTasks + return errNoTasks } func getQueryResults(queries map[string]*querypb.WorkflowQuery, queryResult *querypb.WorkflowQueryResult) map[string]*querypb.WorkflowQueryResult { diff -Nru temporal-1.21.5-1/src/tests/test_cluster.go temporal-1.22.5/src/tests/test_cluster.go --- temporal-1.21.5-1/src/tests/test_cluster.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/test_cluster.go 2024-02-23 09:45:43.000000000 +0000 @@ -26,6 +26,9 @@ import ( "context" + "crypto/tls" + "crypto/x509" + "errors" "fmt" "os" "path" @@ -45,6 +48,7 @@ "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics/metricstest" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" persistencetests "go.temporal.io/server/common/persistence/persistence-tests" @@ -53,6 +57,7 @@ "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" "go.temporal.io/server/common/pprof" + "go.temporal.io/server/common/rpc/encryption" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/tests/testutils" ) @@ -89,6 +94,8 @@ MockAdminClient map[string]adminservice.AdminServiceClient FaultInjection config.FaultInjection `yaml:"faultinjection"` DynamicConfigOverrides map[dynamicconfig.Key]interface{} + GenerateMTLS bool + EnableMetricsCapture bool } // WorkerConfig is the config for enabling/disabling Temporal worker @@ -100,13 +107,13 @@ ) const ( - defaultPageSize = 5 - pprofTestPort = 7000 + defaultPageSize = 5 + pprofTestPort = 7000 + tlsCertCommonName = "my-common-name" ) // NewCluster creates and sets up the test cluster func NewCluster(options *TestClusterConfig, logger log.Logger) (*TestCluster, error) { - clusterMetadataConfig := cluster.NewTestClusterMetadataConfig( options.ClusterMetadata.EnableGlobalNamespace, options.IsMasterCluster, @@ -224,6 +231,13 @@ return nil, err } + var tlsConfigProvider *encryption.FixedTLSConfigProvider + if options.GenerateMTLS { + if tlsConfigProvider, err = createFixedTLSConfigProvider(); err != nil { + return nil, err + } + } + temporalParams := &TemporalParams{ ClusterMetadataConfig: clusterMetadataConfig, PersistenceConfig: pConfig, @@ -244,6 +258,11 @@ MockAdminClient: options.MockAdminClient, NamespaceReplicationTaskExecutor: namespace.NewReplicationTaskExecutor(options.ClusterMetadata.CurrentClusterName, testBase.MetadataManager, logger), DynamicConfigOverrides: options.DynamicConfigOverrides, + TLSConfigProvider: tlsConfigProvider, + } + + if options.EnableMetricsCapture { + temporalParams.CaptureMetricsHandler = metricstest.NewCaptureHandler() } err = newPProfInitializerImpl(logger, pprofTestPort).Start() @@ -448,3 +467,52 @@ func (tc *TestCluster) GetHost() *temporalImpl { return tc.host } + +var errCannotAddCACertToPool = errors.New("failed adding CA to pool") + +func createFixedTLSConfigProvider() (*encryption.FixedTLSConfigProvider, error) { + // We use the existing cert generation utilities even though they use slow + // RSA and use disk unnecessarily + tempDir, err := os.MkdirTemp("", "") + if err != nil { + return nil, err + } + defer os.RemoveAll(tempDir) + + certChain, err := testutils.GenerateTestChain(tempDir, tlsCertCommonName) + if err != nil { + return nil, err + } + + // Due to how mTLS is built in the server, we have to reuse the CA for server + // and client, therefore we might as well reuse the cert too + + tlsCert, err := tls.LoadX509KeyPair(certChain.CertPubFile, certChain.CertKeyFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + if caCertBytes, err := os.ReadFile(certChain.CaPubFile); err != nil { + return nil, err + } else if !caCertPool.AppendCertsFromPEM(caCertBytes) { + return nil, errCannotAddCACertToPool + } + + serverTLSConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + } + clientTLSConfig := &tls.Config{ + ServerName: tlsCertCommonName, + Certificates: []tls.Certificate{tlsCert}, + RootCAs: caCertPool, + } + + return &encryption.FixedTLSConfigProvider{ + InternodeServerConfig: serverTLSConfig, + InternodeClientConfig: clientTLSConfig, + FrontendServerConfig: serverTLSConfig, + FrontendClientConfig: clientTLSConfig, + }, nil +} diff -Nru temporal-1.21.5-1/src/tests/testdata/acquire_shard_deadline_exceeded_error.yaml temporal-1.22.5/src/tests/testdata/acquire_shard_deadline_exceeded_error.yaml --- temporal-1.21.5-1/src/tests/testdata/acquire_shard_deadline_exceeded_error.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/testdata/acquire_shard_deadline_exceeded_error.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -13,4 +13,4 @@ methods: UpdateShard: errors: - DeadlineExceededError: 1.0 # 100% of the time, return a deadline exceeded error + DeadlineExceeded: 1.0 # 100% of the time, return a deadline exceeded error diff -Nru temporal-1.21.5-1/src/tests/testdata/acquire_shard_eventual_success.yaml temporal-1.22.5/src/tests/testdata/acquire_shard_eventual_success.yaml --- temporal-1.21.5-1/src/tests/testdata/acquire_shard_eventual_success.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/testdata/acquire_shard_eventual_success.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -14,4 +14,4 @@ UpdateShard: seed: 43 # deterministically generate a deadline exceeded error followed by a success errors: - DeadlineExceededError: 0.5 # 50% of the time, return a deadline exceeded error + DeadlineExceeded: 0.5 # 50% of the time, return a deadline exceeded error diff -Nru temporal-1.21.5-1/src/tests/testdata/acquire_shard_ownership_lost_error.yaml temporal-1.22.5/src/tests/testdata/acquire_shard_ownership_lost_error.yaml --- temporal-1.21.5-1/src/tests/testdata/acquire_shard_ownership_lost_error.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/testdata/acquire_shard_ownership_lost_error.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -13,4 +13,4 @@ methods: UpdateShard: errors: - ShardOwnershipLostError: 1.0 # 100% of the time, return a ShardOwnershipLostError + ShardOwnershipLost: 1.0 # 100% of the time, return a persistence.ShardOwnershipLost. diff -Nru temporal-1.21.5-1/src/tests/testdata/clientintegrationtestcluster.yaml temporal-1.22.5/src/tests/testdata/clientintegrationtestcluster.yaml --- temporal-1.21.5-1/src/tests/testdata/clientintegrationtestcluster.yaml 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/testdata/clientintegrationtestcluster.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -1,4 +1,5 @@ enablearchival: false +enablemetricscapture: true clusterno: 0 historyconfig: numhistoryshards: 4 diff -Nru temporal-1.21.5-1/src/tests/testdata/tls_integration_test_cluster.yaml temporal-1.22.5/src/tests/testdata/tls_integration_test_cluster.yaml --- temporal-1.21.5-1/src/tests/testdata/tls_integration_test_cluster.yaml 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tests/testdata/tls_integration_test_cluster.yaml 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,8 @@ +historyconfig: + numhistoryshards: 4 + numhistoryhosts: 1 +workerconfig: + enablearchiver: false + enablereplicator: false + startworkeranyway: true +generatemtls: true \ No newline at end of file diff -Nru temporal-1.21.5-1/src/tests/testutils/certificate.go temporal-1.22.5/src/tests/testutils/certificate.go --- temporal-1.21.5-1/src/tests/testutils/certificate.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/testutils/certificate.go 2024-02-23 09:45:43.000000000 +0000 @@ -63,11 +63,12 @@ if ip.IsLoopback() { template.DNSNames = []string{"localhost"} } + } else { + template.DNSNames = []string{commonName} } if strings.ToLower(commonName) == "localhost" { template.IPAddresses = []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)} - template.DNSNames = []string{"localhost"} } privateKey, err := rsa.GenerateKey(rand.Reader, keyLengthBits) @@ -116,11 +117,12 @@ if ip.IsLoopback() { template.DNSNames = []string{"localhost"} } + } else { + template.DNSNames = []string{commonName} } if strings.ToLower(commonName) == "localhost" { template.IPAddresses = []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)} - template.DNSNames = []string{"localhost"} } privateKey, err := rsa.GenerateKey(rand.Reader, 4096) diff -Nru temporal-1.21.5-1/src/tests/tls_test.go temporal-1.22.5/src/tests/tls_test.go --- temporal-1.21.5-1/src/tests/tls_test.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tests/tls_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,152 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tests + +import ( + "context" + "flag" + "net/http" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "go.temporal.io/api/workflowservice/v1" + sdkclient "go.temporal.io/sdk/client" + "go.temporal.io/server/common/authorization" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/rpc" +) + +type tlsIntegrationSuite struct { + IntegrationBase + hostPort string + sdkClient sdkclient.Client +} + +func TestTLSIntegrationSuite(t *testing.T) { + flag.Parse() + suite.Run(t, new(tlsIntegrationSuite)) +} + +func (s *tlsIntegrationSuite) SetupSuite() { + s.setupSuite("testdata/tls_integration_test_cluster.yaml") + s.hostPort = "127.0.0.1:7134" + if TestFlags.FrontendAddr != "" { + s.hostPort = TestFlags.FrontendAddr + } +} + +func (s *tlsIntegrationSuite) TearDownSuite() { + s.tearDownSuite() +} + +func (s *tlsIntegrationSuite) SetupTest() { + var err error + s.sdkClient, err = sdkclient.Dial(sdkclient.Options{ + HostPort: s.hostPort, + Namespace: s.namespace, + ConnectionOptions: sdkclient.ConnectionOptions{ + TLS: s.testCluster.host.tlsConfigProvider.FrontendClientConfig, + }, + }) + if err != nil { + s.Logger.Fatal("Error when creating SDK client", tag.Error(err)) + } +} + +func (s *tlsIntegrationSuite) TearDownTest() { + s.sdkClient.Close() +} + +func (s *tlsIntegrationSuite) TestGRPCMTLS() { + ctx, cancel := rpc.NewContextWithTimeoutAndVersionHeaders(time.Minute) + defer cancel() + + // Track auth info + calls := s.trackAuthInfoByCall() + + // Make a list-open call + _, _ = s.sdkClient.ListOpenWorkflow(ctx, &workflowservice.ListOpenWorkflowExecutionsRequest{}) + + // Confirm auth info as expected + authInfo, ok := calls.Load("/temporal.api.workflowservice.v1.WorkflowService/ListOpenWorkflowExecutions") + s.Require().True(ok) + s.Require().Equal(tlsCertCommonName, authInfo.(*authorization.AuthInfo).TLSSubject.CommonName) +} + +func (s *tlsIntegrationSuite) TestHTTPMTLS() { + if s.httpAPIAddress == "" { + s.T().Skip("HTTP API server not enabled") + } + // Track auth info + calls := s.trackAuthInfoByCall() + + // Confirm non-HTTPS call is rejected with 400 + resp, err := http.Get("http://" + s.httpAPIAddress + "/api/v1/namespaces/" + s.namespace + "/workflows") + s.Require().NoError(err) + s.Require().Equal(http.StatusBadRequest, resp.StatusCode) + + // Create HTTP client with TLS config + httpClient := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: s.testCluster.host.tlsConfigProvider.FrontendClientConfig, + }, + } + + // Make a list call + req, err := http.NewRequest("GET", "https://"+s.httpAPIAddress+"/api/v1/namespaces/"+s.namespace+"/workflows", nil) + s.Require().NoError(err) + resp, err = httpClient.Do(req) + s.Require().NoError(err) + s.Require().Equal(http.StatusOK, resp.StatusCode) + + // Confirm auth info as expected + authInfo, ok := calls.Load("/temporal.api.workflowservice.v1.WorkflowService/ListWorkflowExecutions") + s.Require().True(ok) + s.Require().Equal(tlsCertCommonName, authInfo.(*authorization.AuthInfo).TLSSubject.CommonName) +} + +func (s *tlsIntegrationSuite) trackAuthInfoByCall() *sync.Map { + var calls sync.Map + // Put auth info on claim, then use authorizer to set on the map by call + s.testCluster.host.SetOnGetClaims(func(authInfo *authorization.AuthInfo) (*authorization.Claims, error) { + return &authorization.Claims{ + System: authorization.RoleAdmin, + Extensions: authInfo, + }, nil + }) + s.testCluster.host.SetOnAuthorize(func( + ctx context.Context, + caller *authorization.Claims, + target *authorization.CallTarget, + ) (authorization.Result, error) { + if authInfo, _ := caller.Extensions.(*authorization.AuthInfo); authInfo != nil { + calls.Store(target.APIName, authInfo) + } + return authorization.Result{Decision: authorization.DecisionAllow}, nil + }) + return &calls +} diff -Nru temporal-1.21.5-1/src/tests/transient_task_test.go temporal-1.22.5/src/tests/transient_task_test.go --- temporal-1.21.5-1/src/tests/transient_task_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/transient_task_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -111,7 +111,7 @@ } // First workflow task immediately fails and schedules a transient workflow task - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -120,12 +120,12 @@ s.NoError(err, "failed to send signal to execution") // Drop workflow task to cause a workflow task timeout - _, err = poller.PollAndProcessWorkflowTask(true, true) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithDropTask) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // Now process signal and complete workflow execution - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 2) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithExpectedAttemptCount(2)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -260,7 +260,7 @@ } // stage 1 - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask(WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -268,7 +268,7 @@ s.NoError(err, "failed to send signal to execution") // stage 2 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask(WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -276,7 +276,7 @@ s.NoError(err, "failed to send signal to execution") // stage 3: this one fails with a panic - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask(WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -285,7 +285,7 @@ s.testCluster.host.dcClient.OverrideValue(dynamicconfig.HistorySizeSuggestContinueAsNew, 8*1024*1024) // stage 4 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask(WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -293,12 +293,12 @@ s.NoError(err, "failed to send signal to execution") // drop workflow task to cause a workflow task timeout - _, err = poller.PollAndProcessWorkflowTask(true, true) + _, err = poller.PollAndProcessWorkflowTask(WithDropTask, WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) // stage 5 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask(WithNoDumpCommands) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -403,7 +403,7 @@ // fist workflow task, this try to do a continue as new but there is a buffered event, // so it will fail and create a new workflow task - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) @@ -411,7 +411,7 @@ // second workflow task, which will complete the workflow // this expect the workflow task to have attempt == 1 - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 1) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithExpectedAttemptCount(1)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/update_workflow_test.go temporal-1.22.5/src/tests/update_workflow_test.go --- temporal-1.21.5-1/src/tests/update_workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/update_workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -211,7 +211,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -273,7 +273,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -282,14 +282,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -421,7 +422,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -430,14 +431,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -468,7 +470,7 @@ } } -func (s *integrationSuite) TestUpdateWorkflow_FirstNormalStartedWorkflowTask_AcceptComplete() { +func (s *integrationSuite) TestUpdateWorkflow_FirstNormalScheduledWorkflowTask_AcceptComplete() { testCases := []struct { Name string @@ -559,17 +561,16 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) - lastWorkflowTask := updateResp.GetWorkflowTask() - s.NotNil(lastWorkflowTask) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(lastWorkflowTask, true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -626,7 +627,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -689,7 +690,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) // Send signal to schedule new WT. @@ -702,17 +703,16 @@ }() // Process update in workflow. It will be attached to existing WT. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) - lastWorkflowTask := updateResp.GetWorkflowTask() - s.NotNil(lastWorkflowTask) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(lastWorkflowTask, true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -742,6 +742,239 @@ } } +func (s *integrationSuite) TestUpdateWorkflow_NewSpeculativeFromStartedWorkflowTask_Rejected() { + + tv := testvars.New(s.T().Name()) + + tv = s.startWorkflow(tv) + + updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) + + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Send update after 1st WT has started. + go func() { + updateResultCh <- s.sendUpdateNoError(tv, "1") + }() + // To make sure that 1st update gets to the sever while WT1 is running. + time.Sleep(500 * time.Millisecond) + // Completes WT with empty command list to create next WT as speculative. + return nil, nil + case 2: + s.EqualHistory(` + 4 WorkflowTaskCompleted + 5 WorkflowTaskScheduled // Speculative WT2 which was created while completing WT1. + 6 WorkflowTaskStarted`, history) + // Message handler rejects update. + return nil, nil + case 3: + s.EqualHistory(` + 4 WorkflowTaskCompleted // Speculative WT2 disappeared and new normal WT was created. + 5 WorkflowTaskScheduled + 6 WorkflowTaskStarted`, history) + + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{}}, + }}, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + msgHandlerCalls := 0 + msgHandler := func(task *workflowservice.PollWorkflowTaskQueueResponse) ([]*protocolpb.Message, error) { + msgHandlerCalls++ + switch msgHandlerCalls { + case 1: + return nil, nil + case 2: + s.Len(task.Messages, 1) + updRequestMsg := task.Messages[0] + s.EqualValues(5, updRequestMsg.GetEventId()) + + return s.rejectUpdateMessages(tv, updRequestMsg, "1"), nil + case 3: + return nil, nil + default: + s.Failf("msgHandler called too many times", "msgHandler shouldn't be called %d times", msgHandlerCalls) + return nil, nil + } + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + Identity: tv.WorkerIdentity(), + WorkflowTaskHandler: wtHandler, + MessageHandler: msgHandler, + Logger: s.Logger, + T: s.T(), + } + + // Drain first WT which starts 1st update. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1)) + s.NoError(err) + wt1Resp := res.NewTask + + // Reject update in 2nd WT. + wt2Resp, err := poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), true) + s.NoError(err) + updateResult := <-updateResultCh + s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) + s.EqualValues(3, wt2Resp.ResetHistoryEventId) + + // Complete workflow. + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(wt2Resp.GetWorkflowTask(), false) + s.NoError(err) + s.NotNil(completeWorkflowResp) + s.Nil(completeWorkflowResp.GetWorkflowTask()) + s.EqualValues(0, completeWorkflowResp.ResetHistoryEventId) + + s.Equal(3, wtHandlerCalls) + s.Equal(3, msgHandlerCalls) + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 WorkflowTaskScheduled + 6 WorkflowTaskStarted + 7 WorkflowTaskCompleted + 8 WorkflowExecutionCompleted`, events) +} + +func (s *integrationSuite) TestUpdateWorkflow_NewNormalFromStartedWorkflowTask_Rejected() { + + tv := testvars.New(s.T().Name()) + + tv = s.startWorkflow(tv) + + updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) + + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Send update after 1st WT has started. + go func() { + updateResultCh <- s.sendUpdateNoError(tv, "1") + }() + // To make sure that 1st update gets to the sever while WT1 is running. + time.Sleep(500 * time.Millisecond) + // Completes WT with update unrelated commands to prevent next WT to be speculative. + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID(), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }}, nil + case 2: + s.EqualHistory(` + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled // Normal WT2 which was created while completing WT1. + 7 WorkflowTaskStarted`, history) + // Message handler rejects update. + return nil, nil + case 3: + s.EqualHistory(` + 8 WorkflowTaskCompleted // New normal WT is created. + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, history) + + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, + Attributes: &commandpb.Command_CompleteWorkflowExecutionCommandAttributes{CompleteWorkflowExecutionCommandAttributes: &commandpb.CompleteWorkflowExecutionCommandAttributes{}}, + }}, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + msgHandlerCalls := 0 + msgHandler := func(task *workflowservice.PollWorkflowTaskQueueResponse) ([]*protocolpb.Message, error) { + msgHandlerCalls++ + switch msgHandlerCalls { + case 1: + return nil, nil + case 2: + s.Len(task.Messages, 1) + updRequestMsg := task.Messages[0] + s.EqualValues(6, updRequestMsg.GetEventId()) + + return s.rejectUpdateMessages(tv, updRequestMsg, "1"), nil + case 3: + return nil, nil + default: + s.Failf("msgHandler called too many times", "msgHandler shouldn't be called %d times", msgHandlerCalls) + return nil, nil + } + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + Identity: tv.WorkerIdentity(), + WorkflowTaskHandler: wtHandler, + MessageHandler: msgHandler, + Logger: s.Logger, + T: s.T(), + } + + // Drain first WT which starts 1st update. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1)) + s.NoError(err) + wt1Resp := res.NewTask + + // Reject update in 2nd WT. + wt2Resp, err := poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), true) + s.NoError(err) + updateResult := <-updateResultCh + s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) + s.EqualValues(0, wt2Resp.ResetHistoryEventId) + + // Complete workflow. + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(wt2Resp.GetWorkflowTask(), false) + s.NoError(err) + s.NotNil(completeWorkflowResp) + s.Nil(completeWorkflowResp.GetWorkflowTask()) + s.EqualValues(0, completeWorkflowResp.ResetHistoryEventId) + + s.Equal(3, wtHandlerCalls) + s.Equal(3, msgHandlerCalls) + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted + 11 WorkflowTaskCompleted + 12 WorkflowExecutionCompleted`, events) +} + func (s *integrationSuite) TestUpdateWorkflow_ValidateWorkerMessages() { testCases := []struct { Name string @@ -1077,7 +1310,7 @@ go updateWorkflowFn(tc.RespondWorkflowTaskError != "") // Process update in workflow. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() if tc.RespondWorkflowTaskError != "" { require.Error(s.T(), err, "RespondWorkflowTaskCompleted should return an error contains `%v`", tc.RespondWorkflowTaskError) require.Contains(s.T(), err.Error(), tc.RespondWorkflowTaskError) @@ -1119,7 +1352,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: // This WT contains partial history because sticky was enabled. s.EqualHistory(` @@ -1181,7 +1414,7 @@ } // Drain existing first WT from regular task queue, but respond with sticky queue enabled response, next WT will go to sticky queue. - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, true, 1) + _, err := poller.PollAndProcessWorkflowTask(WithRespondSticky) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -1191,14 +1424,15 @@ }() // Process update in workflow task (it is sticky). - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, true, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithPollSticky, WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1238,7 +1472,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: // Worker gets full history because update was issued after sticky worker is gone. s.EqualHistory(` @@ -1304,7 +1538,7 @@ } // Drain existing WT from regular task queue, but respond with sticky enabled response to enable stick task queue. - _, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetry(false, false, false, true, 1, 5) + _, err := poller.PollAndProcessWorkflowTask(WithRespondSticky, WithRetries(1)) s.NoError(err) s.Logger.Info("Sleep 10 seconds to make sure stickyPollerUnavailableWindow time has passed.") @@ -1320,14 +1554,15 @@ }() // Process update in workflow task from non-sticky task queue. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1354,7 +1589,7 @@ 13 WorkflowExecutionCompleted`, events) } -func (s *integrationSuite) TestUpdateWorkflow_FirstNormalStartedWorkflowTask_Reject() { +func (s *integrationSuite) TestUpdateWorkflow_FirstNormalScheduledWorkflowTask_Reject() { tv := testvars.New(s.T().Name()) tv = s.startWorkflow(tv) @@ -1422,14 +1657,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1462,7 +1698,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -1522,7 +1758,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -1531,14 +1767,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) s.EqualValues(3, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1639,7 +1876,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -1648,14 +1885,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) s.EqualValues(0, updateResp.ResetHistoryEventId, "no reset of event ID should happened after update rejection if it was delivered with normal workflow task") // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1784,7 +2022,7 @@ }() // Accept update1 in normal WT1. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) // Send 2nd update and create speculative WT2. @@ -1794,8 +2032,9 @@ }() // Poll for WT2 which 2nd update. Accept update2. - _, updateAcceptResp2, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateAcceptResp2 := res.NewTask s.NotNil(updateAcceptResp2) s.EqualValues(0, updateAcceptResp2.ResetHistoryEventId) @@ -1816,7 +2055,7 @@ s.EqualValues(0, updateCompleteResp1.ResetHistoryEventId) // Complete WF in WT5. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateCompleteResp1.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateCompleteResp1.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -1945,7 +2184,7 @@ }() // Accept update1 in WT1. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) // Send 2nd update and create speculative WT2. @@ -1955,8 +2194,9 @@ }() // Poll for WT2 which 2nd update. Reject update2. - _, updateRejectResp2, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateRejectResp2 := res.NewTask s.NotNil(updateRejectResp2) s.EqualValues(0, updateRejectResp2.ResetHistoryEventId, "no reset of event ID should happened after update rejection if it was delivered with normal workflow task") @@ -1972,7 +2212,7 @@ s.EqualValues(0, updateCompleteResp1.ResetHistoryEventId) // Complete WT4. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateCompleteResp1.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateCompleteResp1.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -2013,7 +2253,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -2111,7 +2351,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -2137,13 +2377,13 @@ go updateWorkflowFn() // Try to accept update in workflow: get malformed response. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Error(err) s.Contains(err.Error(), "not found") // New normal (but transient) WT will be created but not returned. // Try to accept update in workflow 2nd time: get error. Poller will fail WT. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() // The error is from RespondWorkflowTaskFailed, which should go w/o error. s.NoError(err) @@ -2152,12 +2392,12 @@ <-updateResultCh // Try to accept update in workflow 3rd time: get error. Poller will fail WT. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() // The error is from RespondWorkflowTaskFailed, which should go w/o error. s.NoError(err) // Complete workflow. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) s.Equal(5, wtHandlerCalls) @@ -2189,7 +2429,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -2252,7 +2492,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -2261,14 +2501,15 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -2305,7 +2546,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -2365,7 +2606,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -2380,14 +2621,15 @@ s.NoError(err) // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.Equal(tv.String("update rejected", "1"), updateResult.GetOutcome().GetFailure().GetMessage()) s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -2436,7 +2678,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -2509,7 +2751,7 @@ } // Drain first WT. - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -2518,14 +2760,15 @@ }() // Try to process update in workflow, but it takes more than WT timeout. So, WT times out. - _, _, err = poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + _, err = poller.PollAndProcessWorkflowTask(WithRetries(1)) s.Error(err) s.Equal("Workflow task not found.", err.Error()) // New normal WT was created on server after speculative WT has timed out. // It will accept and complete update first and workflow itself with the same WT. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1)) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) @@ -2563,7 +2806,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: // Speculative WT timed out on sticky task queue. Server sent full history with sticky timeout event. s.EqualHistory(` @@ -2623,7 +2866,7 @@ } // Drain first WT and respond with sticky enabled response to enable sticky task queue. - _, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetry(false, false, false, true, 1, 5) + _, err := poller.PollAndProcessWorkflowTask(WithRespondSticky, WithRetries(1)) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -2635,14 +2878,16 @@ time.Sleep(poller.StickyScheduleToStartTimeout + 100*time.Millisecond) // Try to process update in workflow, poll from normal task queue. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask s.NotNil(updateResp) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) + s.Nil(completeWorkflowResp.GetWorkflowTask()) s.Equal(3, wtHandlerCalls) s.Equal(3, msgHandlerCalls) @@ -2676,7 +2921,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: // Terminate workflow while speculative WT is running. _, err := s.engine.TerminateWorkflowExecution(NewContext(), &workflowservice.TerminateWorkflowExecutionRequest{ @@ -2727,7 +2972,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -2754,11 +2999,10 @@ go updateWorkflowFn() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 5, true, nil) + _, err = poller.PollAndProcessWorkflowTask(WithRetries(1)) s.Error(err) s.IsType(err, (*serviceerror.NotFound)(nil)) s.ErrorContains(err, "Workflow task not found.") - s.Nil(updateResp) <-updateResultCh s.Equal(2, wtHandlerCalls) @@ -2795,7 +3039,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil default: s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) return nil, nil @@ -2826,7 +3070,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -2927,7 +3171,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: return append(tc.Commands(tv), &commandpb.Command{ CommandType: enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION, @@ -2966,7 +3210,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -2998,7 +3242,7 @@ }(tc.UpdateErrMsg) // Complete workflow. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) <-updateResultCh @@ -3019,7 +3263,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3087,7 +3331,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -3096,8 +3340,9 @@ }() // Heartbeat from workflow. - _, heartbeatResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + heartbeatResp := res.NewTask // Reject update from workflow. updateResp, err := poller.HandlePartialWorkflowTask(heartbeatResp.GetWorkflowTask(), true) @@ -3107,7 +3352,7 @@ s.EqualValues(0, updateResp.ResetHistoryEventId, "no reset of event ID should happened after update rejection because of heartbeat") // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -3146,7 +3391,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3193,7 +3438,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -3242,7 +3487,7 @@ s.NoError(err) // Complete workflow. - completeWorkflowResp, err := poller.PollAndProcessWorkflowTask(false, false) + completeWorkflowResp, err := poller.PollAndProcessWorkflowTask() s.NoError(err) s.NotNil(completeWorkflowResp) @@ -3274,7 +3519,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3340,7 +3585,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan struct{}) @@ -3368,11 +3613,10 @@ go updateWorkflowFn() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + _, err = poller.PollAndProcessWorkflowTask(WithRetries(1)) s.Error(err) s.IsType(&serviceerror.NotFound{}, err) s.ErrorContains(err, "Workflow task not found") - s.Nil(updateResp) <-updateResultCh @@ -3381,7 +3625,7 @@ s.NoError(err) // Complete workflow. - completeWorkflowResp, err := poller.PollAndProcessWorkflowTask(false, false) + completeWorkflowResp, err := poller.PollAndProcessWorkflowTask() s.NoError(err) s.NotNil(completeWorkflowResp) @@ -3419,7 +3663,7 @@ `, history) // Close shard. InvalidArgument error will be returned to RespondWorkflowTaskCompleted. s.closeShard(tv.WorkflowID()) - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3493,7 +3737,7 @@ go updateWorkflowFn() // Process update in workflow. Update won't be found on server due to shard reload and server will fail WT. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err, "workflow task failure must be an InvalidArgument error") s.ErrorContains(err, fmt.Sprintf("update %q not found", tv.UpdateID("1"))) @@ -3501,7 +3745,7 @@ <-updateResultCh // Complete workflow. - completeWorkflowResp, err := poller.PollAndProcessWorkflowTask(false, false) + completeWorkflowResp, err := poller.PollAndProcessWorkflowTask() s.NoError(err) s.NotNil(completeWorkflowResp) @@ -3532,7 +3776,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3592,7 +3836,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -3607,8 +3851,9 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh updateResult2 := <-updateResultCh2 s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) @@ -3616,7 +3861,7 @@ s.EqualValues(0, updateResp.ResetHistoryEventId) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -3656,7 +3901,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: // Send second update with the same ID when WT is started but not completed. go func() { @@ -3719,7 +3964,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -3728,8 +3973,9 @@ }() // Process update in workflow. - _, updateResp, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask(false, false, false, false, 1, 1, true, nil) + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) s.NoError(err) + updateResp := res.NewTask updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) s.EqualValues(0, updateResp.ResetHistoryEventId) @@ -3738,7 +3984,7 @@ s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult2.GetOutcome().GetSuccess())) // Complete workflow. - completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), true) + completeWorkflowResp, err := poller.HandlePartialWorkflowTask(updateResp.GetWorkflowTask(), false) s.NoError(err) s.NotNil(completeWorkflowResp) s.Nil(completeWorkflowResp.GetWorkflowTask()) @@ -3792,7 +4038,7 @@ switch wtHandlerCalls { case 1: // Completes first WT with empty command list. - return []*commandpb.Command{}, nil + return nil, nil case 2: s.EqualHistory(` 1 WorkflowExecutionStarted @@ -3844,7 +4090,7 @@ } // Drain first WT. - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) updateResultCh := make(chan *workflowservice.UpdateWorkflowExecutionResponse) @@ -3853,7 +4099,7 @@ }() // Process update in workflow. - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) updateResult := <-updateResultCh s.EqualValues(tv.String("success-result", "1"), decodeString(s, updateResult.GetOutcome().GetSuccess())) @@ -3891,7 +4137,7 @@ s.NoError(err) // Complete workflow. - completeWorkflowResp, err := poller.PollAndProcessWorkflowTask(false, false) + completeWorkflowResp, err := poller.PollAndProcessWorkflowTask() s.NoError(err) s.NotNil(completeWorkflowResp) @@ -3918,3 +4164,751 @@ }) } } + +func (s *integrationSuite) TestUpdateWorkflow_StaleSpeculativeWorkflowTask_CloseShard_DifferentStartedId_Rejected() { + /* + Test scenario: + An update created a speculative WT and WT is dispatched to the worker (started). + Shard is reloaded, speculative WT is disappeared from server. + Another update come in and second speculative WT is scheduled but not dispatched yet. + An activity completes, it converts the 2nd speculative WT into normal one. + The first speculative WT responds back, server fails request it because WorkflowTaskStarted event Id is mismatched. + The second speculative WT responds back and server completes it. + */ + + tv := testvars.New(s.T().Name()) + tv = s.startWorkflow(tv) + + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Schedule activity. + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID(), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }}, nil + case 2: + return nil, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + atHandler := func(execution *commonpb.WorkflowExecution, activityType *commonpb.ActivityType, + activityID string, input *commonpb.Payloads, taskToken []byte) (*commonpb.Payloads, bool, error) { + return payloads.EncodeString(tv.String("activity-result")), false, nil + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + WorkflowTaskHandler: wtHandler, + ActivityTaskHandler: atHandler, + Logger: s.Logger, + T: s.T(), + } + + // First WT will schedule activity and create a new WT. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) + s.NoError(err) + wt1Resp := res.NewTask + + // Drain 2nd WT (which is force created as requested) to make all events seen by SDK so following update can be speculative. + _, err = poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), false) + s.NoError(err) + s.EqualValues(0, wt1Resp.ResetHistoryEventId) + + // Send 1st update. It will create 3rd WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Poll 3rd speculative WT with 1st update. + wt3, err := s.engine.PollWorkflowTaskQueue(NewContext(), &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt3) + s.NotEmpty(wt3.TaskToken, "3rd workflow task must have valid task token") + s.Len(wt3.Messages, 1, "3rd workflow task must have a message with 1st update") + s.EqualValues(10, wt3.StartedEventId) + s.EqualValues(9, wt3.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt3.History) + + // Close shard, this will clear mutable state and speculative WT will disappear. + s.closeShard(tv.WorkflowID()) + + // Send 2nd update (with SAME updateId). This will create a 4th WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Before polling for the 4th speculative WT, process activity. This will convert 4th speculative WT to normal WT. + err = poller.PollAndProcessActivityTask(false) + s.NoError(err) + + // Poll the 4th WT (not speculative anymore) but must have 2nd update. + wt4, err := s.engine.PollWorkflowTaskQueue(NewContext(), &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt4) + s.NotEmpty(wt4.TaskToken, "4th workflow task must have valid task token") + s.Len(wt4.Messages, 1, "4th workflow task must have a message with 2nd update") + s.EqualValues(12, wt4.StartedEventId) + s.EqualValues(11, wt4.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 ActivityTaskStarted + 11 ActivityTaskCompleted + 12 WorkflowTaskStarted`, wt4.History) + + // Now try to complete 3rd WT (speculative). It should fail because WorkflowTaskStarted event Id is mismatched. + _, err = s.engine.RespondWorkflowTaskCompleted(NewContext(), &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt3.TaskToken, + Commands: s.acceptUpdateCommands(tv, "1"), + Messages: s.acceptUpdateMessages(tv, wt3.Messages[0], "1"), + }) + s.Error(err) + s.Contains(err.Error(), "Workflow task not found") + + // Complete 4th WT. It should succeed. + _, err = s.engine.RespondWorkflowTaskCompleted(NewContext(), &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt4.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "1"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID(), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt4.Messages[0], "1"), + }) + s.NoError(err) + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 ActivityTaskStarted {"ScheduledEventId":5} + 11 ActivityTaskCompleted + 12 WorkflowTaskStarted + 13 WorkflowTaskCompleted + 14 WorkflowExecutionUpdateAccepted {"AcceptedRequestSequencingEventId":11} + 15 ActivityTaskScheduled + `, events) +} + +func (s *integrationSuite) TestUpdateWorkflow_StaleSpeculativeWorkflowTask_CloseShard_SameStartedId_SameUpdateId_Accepted() { + /* + Test scenario: + An update created a speculative WT and WT is dispatched to the worker (started). + Shard is reloaded, speculative WT is disappeared from server. + Another update come in and second speculative WT is dispatched to worker with same WT scheduled/started Id and update Id. + The first speculative WT respond back, server reject it because startTime is different. + The second speculative WT respond back, server accept it. + */ + tv := testvars.New(s.T().Name()) + tv = s.startWorkflow(tv) + + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Schedule activity. + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("1"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }}, nil + case 2: + return nil, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + atHandler := func(execution *commonpb.WorkflowExecution, activityType *commonpb.ActivityType, + activityID string, input *commonpb.Payloads, taskToken []byte) (*commonpb.Payloads, bool, error) { + return payloads.EncodeString(tv.String("activity-result")), false, nil + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + WorkflowTaskHandler: wtHandler, + ActivityTaskHandler: atHandler, + Logger: s.Logger, + T: s.T(), + } + + // First WT will schedule activity and create a new WT. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) + s.NoError(err) + wt1Resp := res.NewTask + + // Drain 2nd WT (which is force created as requested) to make all events seem by SDK so following update can be speculative. + _, err = poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), false) + s.NoError(err) + s.EqualValues(0, wt1Resp.ResetHistoryEventId) + + // Send 1st update. It will create 3rd WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Poll 3rd speculative WT with 1st update. + wt3, err := s.engine.PollWorkflowTaskQueue(NewContext(), &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt3) + s.NotEmpty(wt3.TaskToken, "3rd workflow task must have valid task token") + s.Len(wt3.Messages, 1, "3rd workflow task must have a message with 1st update") + s.EqualValues(10, wt3.StartedEventId) + s.EqualValues(9, wt3.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt3.History) + + // Close shard, this will clear mutable state and speculative WT will disappear. + s.closeShard(tv.WorkflowID()) + + // Send 2nd update (with SAME updateId). This will create a 4th WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Poll for the 4th speculative WT. + wt4, err := s.engine.PollWorkflowTaskQueue(NewContext(), &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt4) + s.NotEmpty(wt4.TaskToken, "4th workflow task must have valid task token") + s.Len(wt4.Messages, 1, "4th workflow task must have a message with 1st update") + s.EqualValues(10, wt4.StartedEventId) + s.EqualValues(9, wt4.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt4.History) + + // Now try to complete 3rd (speculative) WT, it should fail. + _, err = s.engine.RespondWorkflowTaskCompleted(NewContext(), &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt3.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "1"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("2"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt3.Messages[0], "1"), + }) + s.IsType(&serviceerror.NotFound{}, err) + + // Try to complete 4th WT, it should succeed + _, err = s.engine.RespondWorkflowTaskCompleted(NewContext(), &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt4.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "1"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("2"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt4.Messages[0], "1"), + }) + s.printWorkflowHistory(s.namespace, tv.WorkflowExecution()) + s.NoError(err, "2nd speculative WT should be completed because it has same WT scheduled/started Id and startTime matches the accepted message is valid (same update Id)") + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted + 11 WorkflowTaskCompleted + 12 WorkflowExecutionUpdateAccepted {"AcceptedRequestSequencingEventId":9} + 13 ActivityTaskScheduled + `, events) +} + +func (s *integrationSuite) TestUpdateWorkflow_StaleSpeculativeWorkflowTask_ClearMutableState_Accepted() { + /* + Test scenario: + An update created a speculative WT and WT is dispatched to the worker (started). + Mutable state cleared, speculative WT is disappeared from server but update registry stays as is. + Another update come in, and second speculative WT is dispatched to worker with same WT scheduled/started Id but different update Id. + The first speculative WT responds back, server rejected it (different start time). + The second speculative WT responds back, server accepted it. + */ + + tv := testvars.New(s.T().Name()) + tv = s.startWorkflow(tv) + + testCtx := NewContext() + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Schedule activity. + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("1"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }}, nil + case 2: + return nil, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + atHandler := func(execution *commonpb.WorkflowExecution, activityType *commonpb.ActivityType, + activityID string, input *commonpb.Payloads, taskToken []byte) (*commonpb.Payloads, bool, error) { + return payloads.EncodeString(tv.String("activity-result")), false, nil + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + WorkflowTaskHandler: wtHandler, + ActivityTaskHandler: atHandler, + Logger: s.Logger, + T: s.T(), + } + + // First WT will schedule activity and create a new WT. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) + s.NoError(err) + wt1Resp := res.NewTask + + // Drain 2nd WT (which is force created as requested) to make all events seen by SDK so following update can be speculative. + _, err = poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), false) + s.NoError(err) + s.EqualValues(0, wt1Resp.ResetHistoryEventId) + + // Send 1st update. It will create 3rd WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Poll 3rd speculative WT with 1st update. + wt3, err := s.engine.PollWorkflowTaskQueue(testCtx, &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt3) + s.NotEmpty(wt3.TaskToken, "3rd workflow task must have valid task token") + s.Len(wt3.Messages, 1, "3rd workflow task must have a message with 1st update") + s.EqualValues(10, wt3.StartedEventId) + s.EqualValues(9, wt3.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt3.History) + + // DescribeMutableState will clear MS, cause the speculative WT to disappear but the registry for update "1" will stay. + _, err = s.adminClient.DescribeMutableState(testCtx, &adminservice.DescribeMutableStateRequest{ + Namespace: s.namespace, + Execution: tv.WorkflowExecution(), + }) + s.NoError(err) + + // Send 2nd update (with DIFFERENT updateId). This will create a 4th WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "2") + }() + + // Poll the 4th speculative WT. + wt4, err := s.engine.PollWorkflowTaskQueue(testCtx, &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt4) + s.NotEmpty(wt4.TaskToken, "4th workflow task must have valid task token") + s.Len(wt4.Messages, 2, "4th workflow task must have a message with 1st and 2nd updates") + s.EqualValues(10, wt4.StartedEventId) + s.EqualValues(9, wt4.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt4.History) + + // Now try to complete 3rd speculative WT, it should fail because start time does not match. + _, err = s.engine.RespondWorkflowTaskCompleted(testCtx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt3.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "1"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("2"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt3.Messages[0], "1"), + ReturnNewWorkflowTask: true, + }) + s.IsType(&serviceerror.NotFound{}, err) + + // Complete of the 4th WT should succeed + wt5Resp, err := s.engine.RespondWorkflowTaskCompleted(testCtx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt4.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "2"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("3"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt4.Messages[0], "2"), + ReturnNewWorkflowTask: true, + }) + s.NoError(err) + s.NotNil(wt5Resp) + wt5 := wt5Resp.WorkflowTask + s.NotNil(wt5) + s.NotEmpty(wt5.TaskToken, "5th workflow task must have valid task token") + s.Len(wt5.Messages, 1, "5th workflow task must have a message with 2nd update") + s.EqualValues(15, wt5.StartedEventId) + s.EqualValues(14, wt5.Messages[0].GetEventId()) + s.EqualHistory(` + 11 WorkflowTaskCompleted + 12 WorkflowExecutionUpdateAccepted + 13 ActivityTaskScheduled + 14 WorkflowTaskScheduled + 15 WorkflowTaskStarted`, wt5.History) + + // Complete WT5 should succeed. + _, err = s.engine.RespondWorkflowTaskCompleted(testCtx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt5.TaskToken, + Commands: append(s.acceptUpdateCommands(tv, "1"), &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("4"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt5.Messages[0], "1"), + }) + s.NoError(err) + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted + 11 WorkflowTaskCompleted + 12 WorkflowExecutionUpdateAccepted {"AcceptedRequestSequencingEventId":9} + 13 ActivityTaskScheduled + 14 WorkflowTaskScheduled + 15 WorkflowTaskStarted + 16 WorkflowTaskCompleted + 17 WorkflowExecutionUpdateAccepted {"AcceptedRequestSequencingEventId":14} + 18 ActivityTaskScheduled + `, events) +} + +func (s *integrationSuite) TestUpdateWorkflow_StaleSpeculativeWorkflowTask_SameStartedId_DifferentUpdateId_Rejected() { + /* + Test scenario: + An update created a speculative WT and WT is dispatched to the worker (started). + Shard is reloaded, speculative WT and update registry are disappeared from server. + Another update come in (with different update Id), and second speculative WT is dispatched to worker. + The first speculative WT responds back, server fails WT because start time different. + The second speculative WT responds back, server reject it. + */ + + tv := testvars.New(s.T().Name()) + tv = s.startWorkflow(tv) + + testCtx := NewContext() + wtHandlerCalls := 0 + wtHandler := func(execution *commonpb.WorkflowExecution, wt *commonpb.WorkflowType, previousStartedEventID, startedEventID int64, history *historypb.History) ([]*commandpb.Command, error) { + wtHandlerCalls++ + switch wtHandlerCalls { + case 1: + // Schedule activity. + return []*commandpb.Command{{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("1"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }}, nil + case 2: + return nil, nil + default: + s.Failf("wtHandler called too many times", "wtHandler shouldn't be called %d times", wtHandlerCalls) + return nil, nil + } + } + + atHandler := func(execution *commonpb.WorkflowExecution, activityType *commonpb.ActivityType, + activityID string, input *commonpb.Payloads, taskToken []byte) (*commonpb.Payloads, bool, error) { + return payloads.EncodeString(tv.String("activity-result")), false, nil + } + + poller := &TaskPoller{ + Engine: s.engine, + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + WorkflowTaskHandler: wtHandler, + ActivityTaskHandler: atHandler, + Logger: s.Logger, + T: s.T(), + } + + // First WT will schedule activity and create a new WT. + res, err := poller.PollAndProcessWorkflowTask(WithRetries(1), WithForceNewWorkflowTask) + s.NoError(err) + wt1Resp := res.NewTask + + // Drain 2nd WT (which is force created as requested) to make all events seen by SDK so following update can be speculative. + _, err = poller.HandlePartialWorkflowTask(wt1Resp.GetWorkflowTask(), false) + s.NoError(err) + s.EqualValues(0, wt1Resp.ResetHistoryEventId) + + // send update wf request, this will trigger speculative wft + go func() { + _, _ = s.sendUpdate(tv, "1") + }() + + // Poll 3rd speculative WT. + wt3, err := s.engine.PollWorkflowTaskQueue(testCtx, &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt3) + s.NotEmpty(wt3.TaskToken, "3rd workflow task must have valid task token") + s.Len(wt3.Messages, 1, "3rd workflow task must have a message with 1st update") + s.EqualValues(10, wt3.StartedEventId) + s.EqualValues(9, wt3.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt3.History) + + // Close shard, this will clear mutable state and update registry, and speculative WT3 will disappear. + s.closeShard(tv.WorkflowID()) + + // Send 2nd update (with DIFFERENT updateId). This will create a 4th WT as speculative. + go func() { + _, _ = s.sendUpdate(tv, "2") + }() + + // Poll the 4th speculative WT which must have 2nd update. + wt4, err := s.engine.PollWorkflowTaskQueue(testCtx, &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: tv.TaskQueue(), + }) + s.NoError(err) + s.NotNil(wt4) + s.NotEmpty(wt4.TaskToken, "4th workflow task must have valid task token") + s.Len(wt4.Messages, 1, "4th workflow task must have a message with 1st update") + s.EqualValues(10, wt4.StartedEventId) + s.EqualValues(9, wt4.Messages[0].GetEventId()) + s.EqualHistory(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted`, wt4.History) + + // Now try to complete 3rd speculative WT, it should fail. + _, err = s.engine.RespondWorkflowTaskCompleted(testCtx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt3.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "1"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("2"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt3.Messages[0], "1"), + }) + s.Error(err, "Must fail because start time is different.") + s.Contains(err.Error(), "Workflow task not found") + + // Now try to complete 4th speculative WT. It should also fail, because the previous attempt already mark the WT as failed. + _, err = s.engine.RespondWorkflowTaskCompleted(testCtx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: s.namespace, + TaskToken: wt4.TaskToken, + Commands: append( + s.acceptUpdateCommands(tv, "2"), + &commandpb.Command{ + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: tv.ActivityID("3"), + ActivityType: tv.ActivityType(), + TaskQueue: tv.TaskQueue(), + ScheduleToCloseTimeout: tv.InfiniteTimeout(), + }}, + }), + Messages: s.acceptUpdateMessages(tv, wt4.Messages[0], "2"), + }) + s.NoError(err) + + events := s.getHistory(s.namespace, tv.WorkflowExecution()) + s.EqualHistoryEvents(` + 1 WorkflowExecutionStarted + 2 WorkflowTaskScheduled + 3 WorkflowTaskStarted + 4 WorkflowTaskCompleted + 5 ActivityTaskScheduled + 6 WorkflowTaskScheduled + 7 WorkflowTaskStarted + 8 WorkflowTaskCompleted + 9 WorkflowTaskScheduled + 10 WorkflowTaskStarted + 11 WorkflowTaskCompleted + 12 WorkflowExecutionUpdateAccepted + 13 ActivityTaskScheduled + `, events) +} diff -Nru temporal-1.21.5-1/src/tests/user_timers_test.go temporal-1.22.5/src/tests/user_timers_test.go --- temporal-1.21.5-1/src/tests/user_timers_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/user_timers_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -105,13 +105,13 @@ } for i := 0; i < 4; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) } s.False(workflowComplete) - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) } diff -Nru temporal-1.21.5-1/src/tests/versioning_test.go temporal-1.22.5/src/tests/versioning_test.go --- temporal-1.21.5-1/src/tests/versioning_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/versioning_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -22,6 +22,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// nolint:revive package tests import ( @@ -37,6 +38,8 @@ "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" @@ -47,6 +50,7 @@ "go.temporal.io/sdk/worker" "go.temporal.io/sdk/workflow" + "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/matchingservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common/dynamicconfig" @@ -398,6 +402,7 @@ } func (s *versioningIntegSuite) TestDisableUserData_DefaultTasksBecomeUnversioned() { + // force one partition so that we can unload the task queue dc := s.testCluster.host.dcClient dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueReadPartitions, 1) defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) @@ -634,7 +639,6 @@ if act1state.Add(1) == 1 { switch failMode { case failActivity: - // nolint:goerr113 return "", errors.New("try again") case timeoutActivity: time.Sleep(5 * time.Second) @@ -647,7 +651,6 @@ if act2state.Add(1) == 1 { switch failMode { case failActivity: - // nolint:goerr113 return "", errors.New("try again") case timeoutActivity: time.Sleep(5 * time.Second) @@ -800,6 +803,80 @@ s.Equal("v1.1", out) } +func (s *versioningIntegSuite) TestDispatchActivityEager() { + dc := s.testCluster.host.dcClient + dc.OverrideValue(dynamicconfig.EnableActivityEagerExecution, true) + defer dc.RemoveOverride(dynamicconfig.EnableActivityEagerExecution) + + tq := s.randomizeStr(s.T().Name()) + v1 := s.prefixed("v1") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err := s.sdkClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{TaskQueue: tq}, "wf") + s.Require().NoError(err) + + pollResponse, err := s.sdkClient.WorkflowService().PollWorkflowTaskQueue(ctx, &workflowservice.PollWorkflowTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: &taskqueuepb.TaskQueue{Name: tq}, + Identity: "test", + WorkerVersionCapabilities: &commonpb.WorkerVersionCapabilities{ + BuildId: v1, + }, + }) + s.Require().NoError(err) + startToCloseTimeout := time.Minute + + completionResponse, err := s.sdkClient.WorkflowService().RespondWorkflowTaskCompleted(ctx, &workflowservice.RespondWorkflowTaskCompletedRequest{ + Identity: "test", + WorkerVersionStamp: &commonpb.WorkerVersionStamp{ + BuildId: v1, + UseVersioning: true, + }, + TaskToken: pollResponse.TaskToken, + Commands: []*commandpb.Command{ + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ + ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "compatible", + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tq, + }, + StartToCloseTimeout: &startToCloseTimeout, + ActivityType: &commonpb.ActivityType{ + Name: "ignore", + }, + RequestEagerExecution: true, + UseCompatibleVersion: true, + }, + }, + }, + { + CommandType: enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK, + Attributes: &commandpb.Command_ScheduleActivityTaskCommandAttributes{ + ScheduleActivityTaskCommandAttributes: &commandpb.ScheduleActivityTaskCommandAttributes{ + ActivityId: "latest", + TaskQueue: &taskqueuepb.TaskQueue{ + Name: tq, + }, + StartToCloseTimeout: &startToCloseTimeout, + ActivityType: &commonpb.ActivityType{ + Name: "ignore", + }, + RequestEagerExecution: true, + UseCompatibleVersion: false, + }, + }, + }, + }, + }) + s.Require().NoError(err) + s.Require().Equal(1, len(completionResponse.ActivityTasks)) + s.Require().Equal("compatible", completionResponse.ActivityTasks[0].ActivityId) +} + func (s *versioningIntegSuite) TestDispatchActivityCrossTQFails() { dc := s.testCluster.host.dcClient defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) @@ -1073,7 +1150,7 @@ v11 := s.prefixed("v11") v2 := s.prefixed("v2") - started := make(chan struct{}, 2) + started := make(chan struct{}, 10) wf1 := func(ctx workflow.Context) error { if err := workflow.SetQueryHandler(ctx, "query", func() (string, error) { return "v1", nil }); err != nil { @@ -1095,7 +1172,6 @@ if err := workflow.SetQueryHandler(ctx, "query", func() (string, error) { return "v2", nil }); err != nil { return err } - workflow.GetSignalChannel(ctx, "wait").Receive(ctx, nil) return nil } @@ -1155,8 +1231,30 @@ s.NoError(val.Get(&out)) s.Equal("v1.1", out) - // let the workflow exit + // let the workflow complete s.NoError(s.sdkClient.SignalWorkflow(ctx, run.GetID(), run.GetRunID(), "wait", nil)) + + // wait for completion + s.NoError(run.Get(ctx, nil)) + + // query on closed workflow + val, err = s.sdkClient.QueryWorkflow(ctx, run.GetID(), run.GetRunID(), "query") + s.NoError(err) + s.NoError(val.Get(&out)) + s.Equal("v1.1", out) + + // start another wf on v2. should complete immediately. + run2, err := s.sdkClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{TaskQueue: tq}, "wf") + s.NoError(err) + + // wait for completion + s.NoError(run2.Get(ctx, nil)) + + // query on closed workflow + val, err = s.sdkClient.QueryWorkflow(ctx, run2.GetID(), run2.GetRunID(), "query") + s.NoError(err) + s.NoError(val.Get(&out)) + s.Equal("v2", out) } func (s *versioningIntegSuite) TestDispatchContinueAsNew() { @@ -1482,13 +1580,11 @@ // First insert some data (we'll try to read it below) s.addNewDefaultBuildId(ctx, tq, v1) - // unload so that we reload and pick up LoadUserData dynamic config - s.unloadTaskQueue(ctx, tq) - dc := s.testCluster.host.dcClient defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) dc.OverrideValue(dynamicconfig.MatchingLoadUserData, false) + // unload so that we reload and pick up LoadUserData dynamic config s.unloadTaskQueue(ctx, tq) // Verify update fails @@ -1540,15 +1636,21 @@ } func (s *versioningIntegSuite) TestDisableUserData_WorkflowGetsStuck() { + // force one partition so that we can unload the task queue + dc := s.testCluster.host.dcClient + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueReadPartitions, 1) + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueWritePartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueWritePartitions) + tq := s.T().Name() v1 := s.prefixed("v1") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() s.addNewDefaultBuildId(ctx, tq, v1) - dc := s.testCluster.host.dcClient - defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) dc.OverrideValue(dynamicconfig.MatchingLoadUserData, false) + defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) s.unloadTaskQueue(ctx, tq) @@ -1592,19 +1694,28 @@ s.Require().Equal(int32(1), runs.Load()) } -func (s *versioningIntegSuite) TestDisableUserData_QueryTimesOut() { +func (s *versioningIntegSuite) TestDisableUserData_QueryFails() { + // force one partition so that we can unload the task queue + dc := s.testCluster.host.dcClient + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueReadPartitions, 1) + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueWritePartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueWritePartitions) + tq := s.T().Name() v1 := s.prefixed("v1") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() + s.addNewDefaultBuildId(ctx, tq, v1) var runs atomic.Int32 wf := func(ctx workflow.Context) error { - return workflow.SetQueryHandler(ctx, "query", func() (string, error) { + workflow.SetQueryHandler(ctx, "query", func() (string, error) { runs.Add(1) return "response", nil }) + return nil } wrk := worker.New(s.sdkClient, tq, worker.Options{ BuildID: v1, @@ -1621,16 +1732,170 @@ }, "wf") s.Require().NoError(err) + // wait for it to complete + s.NoError(run.Get(ctx, nil)) + + dc.OverrideValue(dynamicconfig.MatchingLoadUserData, false) + defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) + + s.unloadTaskQueue(ctx, tq) + + _, err = s.sdkClient.QueryWorkflow(ctx, run.GetID(), run.GetRunID(), "query") + var failedPrecond *serviceerror.FailedPrecondition + s.ErrorAs(err, &failedPrecond, err) + s.Equal(int32(0), runs.Load()) +} + +func (s *versioningIntegSuite) TestDisableUserData_DLQ() { + // force one partition so we can unload easily dc := s.testCluster.host.dcClient + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueReadPartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueWritePartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueWritePartitions) + + tq := s.randomizeStr(s.T().Name()) + v1 := s.prefixed("v1") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + started := make(chan struct{}, 1) + + wf1 := func(ctx workflow.Context) (string, error) { + started <- struct{}{} + workflow.GetSignalChannel(ctx, "wait").Receive(ctx, nil) + return "done!", nil + } + + s.addNewDefaultBuildId(ctx, tq, v1) + s.waitForPropagation(ctx, tq, v1) + + w1 := worker.New(s.sdkClient, tq, worker.Options{ + BuildID: v1, + UseBuildIDForVersioning: true, + MaxConcurrentWorkflowTaskPollers: numPollers, + }) + w1.RegisterWorkflowWithOptions(wf1, workflow.RegisterOptions{Name: "wf"}) + s.NoError(w1.Start()) + defer w1.Stop() + + run, err := s.sdkClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ + TaskQueue: tq, + WorkflowTaskTimeout: 1 * time.Minute, // don't let this interfere + }, "wf") + s.NoError(err) + s.waitForChan(ctx, started) + time.Sleep(100 * time.Millisecond) // wait for worker to respond + + // disable user data and unload so it picks it up + dc.OverrideValue(dynamicconfig.MatchingLoadUserData, false) defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) + s.unloadTaskQueue(ctx, tq) + s.unloadTaskQueue(ctx, s.getStickyQueueName(ctx, run.GetID())) + + // unblock the workflow. the sticky task will get kicked back to the regular queue and then + // get redirected to the dlq. + s.NoError(s.sdkClient.SignalWorkflow(ctx, run.GetID(), run.GetRunID(), "wait", nil)) + + // workflow is blocked for > 2s + waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) + defer waitCancel() + s.Error(run.Get(waitCtx, nil)) + + // enable user data. task can be dispatched from dlq immediately since dlq is still loaded. + dc.OverrideValue(dynamicconfig.MatchingLoadUserData, true) + s.unloadTaskQueue(ctx, tq) + + // workflow can finish + var out string + s.NoError(run.Get(ctx, &out)) + s.Equal("done!", out) +} + +func (s *versioningIntegSuite) TestDisableUserData_DLQ_WithUnload() { + // force one partition so we can unload easily + dc := s.testCluster.host.dcClient + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueReadPartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueReadPartitions) + dc.OverrideValue(dynamicconfig.MatchingNumTaskqueueWritePartitions, 1) + defer dc.RemoveOverride(dynamicconfig.MatchingNumTaskqueueWritePartitions) + + tq := s.randomizeStr(s.T().Name()) + v1 := s.prefixed("v1") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + started := make(chan struct{}, 1) + + wf1 := func(ctx workflow.Context) (string, error) { + started <- struct{}{} + workflow.GetSignalChannel(ctx, "wait").Receive(ctx, nil) + return "done!", nil + } + + s.addNewDefaultBuildId(ctx, tq, v1) + s.waitForPropagation(ctx, tq, v1) + + w1 := worker.New(s.sdkClient, tq, worker.Options{ + BuildID: v1, + UseBuildIDForVersioning: true, + MaxConcurrentWorkflowTaskPollers: numPollers, + }) + w1.RegisterWorkflowWithOptions(wf1, workflow.RegisterOptions{Name: "wf"}) + s.NoError(w1.Start()) + defer w1.Stop() + + run, err := s.sdkClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ + TaskQueue: tq, + WorkflowTaskTimeout: 1 * time.Minute, // don't let this interfere + }, "wf") + s.NoError(err) + s.waitForChan(ctx, started) + time.Sleep(100 * time.Millisecond) // wait for worker to respond + + // disable user data and unload so it picks it up dc.OverrideValue(dynamicconfig.MatchingLoadUserData, false) + defer dc.RemoveOverride(dynamicconfig.MatchingLoadUserData) + s.unloadTaskQueue(ctx, tq) + s.unloadTaskQueue(ctx, s.getStickyQueueName(ctx, run.GetID())) + + // unblock the workflow. the sticky task will get kicked back to the regular queue and then + // get redirected to the dlq. + s.NoError(s.sdkClient.SignalWorkflow(ctx, run.GetID(), run.GetRunID(), "wait", nil)) + + // workflow is blocked for > 2s + waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) + defer waitCancel() + s.Error(run.Get(waitCtx, nil)) + // force unload dlq to test what would happen if it idled out + dlqName, err := tqname.Parse(tq) + s.NoError(err) + dlqName = dlqName.WithVersionSet("dlq") + s.unloadTaskQueue(ctx, dlqName.FullName()) + + // enable user data + dc.OverrideValue(dynamicconfig.MatchingLoadUserData, true) s.unloadTaskQueue(ctx, tq) - _, err = s.sdkClient.QueryWorkflow(ctx, run.GetID(), run.GetRunID(), "query") - var deadlineExceededError *serviceerror.DeadlineExceeded - s.Require().ErrorAs(err, &deadlineExceededError) - s.Require().Equal(int32(0), runs.Load()) + // workflow is still stuck because dlq is unloaded + waitCtx, waitCancel = context.WithTimeout(ctx, 2*time.Second) + defer waitCancel() + s.Error(run.Get(waitCtx, nil)) + + // force dlq to get loaded + _, _ = s.engine.DescribeTaskQueue(ctx, &workflowservice.DescribeTaskQueueRequest{ + Namespace: s.namespace, + TaskQueue: &taskqueuepb.TaskQueue{Name: dlqName.FullName(), Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + TaskQueueType: enumspb.TASK_QUEUE_TYPE_WORKFLOW, + }) + + // now workflow can finish + var out string + s.NoError(run.Get(ctx, &out)) + s.Equal("done!", out) } func (s *versioningIntegSuite) TestDescribeTaskQueue() { @@ -1879,6 +2144,15 @@ s.Require().NoError(err) } +func (s *versioningIntegSuite) getStickyQueueName(ctx context.Context, id string) string { + ms, err := s.adminClient.DescribeMutableState(ctx, &adminservice.DescribeMutableStateRequest{ + Namespace: s.namespace, + Execution: &commonpb.WorkflowExecution{WorkflowId: id}, + }) + s.NoError(err) + return ms.DatabaseMutableState.ExecutionInfo.StickyTaskQueue +} + func containsBuildId(data *persistencespb.VersioningData, buildId string) bool { for _, set := range data.GetVersionSets() { for _, id := range set.BuildIds { diff -Nru temporal-1.21.5-1/src/tests/workflow_buffered_events_test.go temporal-1.22.5/src/tests/workflow_buffered_events_test.go --- temporal-1.21.5-1/src/tests/workflow_buffered_events_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_buffered_events_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -127,14 +127,14 @@ } // first workflow task to send 101 signals, the last signal will force fail workflow task and flush buffered events. - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NotNil(err) s.IsType(&serviceerror.NotFound{}, err) s.Equal("Workflow task not found.", err.Error()) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -230,7 +230,7 @@ } // first workflow task, which sends signal and the signal event should be buffered to append after first workflow task closed - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -250,7 +250,7 @@ s.Equal(histResp.History.Events[5].GetEventType(), enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED) // Process signal in workflow - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.NotNil(signalEvent) @@ -358,16 +358,13 @@ } // first workflow task, which will schedule an activity and add marker - _, task, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - true, - false, - false, - false, - 0, - 1, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithDumpHistory, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.Logger.Info("pollAndProcessWorkflowTask", tag.Error(err)) + task := res.NewTask s.NoError(err) // This will cause activity start and complete to be buffered diff -Nru temporal-1.21.5-1/src/tests/workflow_delete_execution_test.go temporal-1.22.5/src/tests/workflow_delete_execution_test.go --- temporal-1.21.5-1/src/tests/workflow_delete_execution_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_delete_execution_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -92,7 +92,7 @@ } for range wes { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) } diff -Nru temporal-1.21.5-1/src/tests/workflow_failures_test.go temporal-1.22.5/src/tests/workflow_failures_test.go --- temporal-1.21.5-1/src/tests/workflow_failures_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_failures_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -245,7 +245,7 @@ } // Make first workflow task to schedule activity - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -256,7 +256,7 @@ // fail workflow task 5 times for i := 1; i <= 5; i++ { - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, false, int32(i)) + _, err := poller.PollAndProcessWorkflowTask(WithExpectedAttemptCount(i)) s.NoError(err) } @@ -264,7 +264,7 @@ s.NoError(err, "failed to send signal to execution") // process signal - _, err = poller.PollAndProcessWorkflowTask(true, false) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.Equal(1, signalCount) @@ -275,26 +275,26 @@ // fail workflow task 2 more times for i := 1; i <= 2; i++ { - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, false, int32(i)) + _, err := poller.PollAndProcessWorkflowTask(WithExpectedAttemptCount(i)) s.NoError(err) } s.Equal(3, signalCount) // now send a signal during failed workflow task sendSignal = true - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, false, 3) + _, err = poller.PollAndProcessWorkflowTask(WithExpectedAttemptCount(3)) s.NoError(err) s.Equal(4, signalCount) // fail workflow task 1 more times for i := 1; i <= 2; i++ { - _, err := poller.PollAndProcessWorkflowTaskWithAttempt(false, false, false, false, int32(i)) + _, err := poller.PollAndProcessWorkflowTask(WithExpectedAttemptCount(i)) s.NoError(err) } s.Equal(12, signalCount) // Make complete workflow workflow task - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 3) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithExpectedAttemptCount(3)) s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowComplete) @@ -372,7 +372,7 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) s.Equal("BadRecordMarkerAttributes: MarkerName is not set on command.", err.Error()) diff -Nru temporal-1.21.5-1/src/tests/workflow_memo_test.go temporal-1.22.5/src/tests/workflow_memo_test.go --- temporal-1.21.5-1/src/tests/workflow_memo_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_memo_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -183,7 +183,7 @@ s.Equal(memo, descResp.WorkflowExecutionInfo.Memo) // make progress of workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/workflow_test.go temporal-1.22.5/src/tests/workflow_test.go --- temporal-1.21.5-1/src/tests/workflow_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -48,7 +48,6 @@ "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/primitives/timestamp" - "go.temporal.io/server/service/matching" ) func (s *integrationSuite) TestStartWorkflowExecution() { @@ -199,7 +198,7 @@ T: s.T(), } - _, pollErr := poller.PollAndProcessWorkflowTask(true, false) + _, pollErr := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(pollErr) s.GreaterOrEqual(delayEndTime.Sub(reqStartTime), startDelay) @@ -287,7 +286,7 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -449,7 +448,7 @@ } for i := 0; i < 10; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) if i%2 == 0 { @@ -462,7 +461,7 @@ } s.False(workflowComplete) - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) } @@ -523,16 +522,14 @@ T: s.T(), } - _, newTask, err := poller.PollAndProcessWorkflowTaskWithAttemptAndRetryAndForceNewWorkflowTask( - false, - false, - true, - true, - 0, - 1, - true, - nil) + res, err := poller.PollAndProcessWorkflowTask( + WithPollSticky, + WithRespondSticky, + WithExpectedAttemptCount(0), + WithRetries(1), + WithForceNewWorkflowTask) s.NoError(err) + newTask := res.NewTask s.NotNil(newTask) s.NotNil(newTask.WorkflowTask) @@ -630,9 +627,9 @@ s.Logger.Info("Calling Workflow Task", tag.Counter(i)) var err error if dropWorkflowTask { - _, err = poller.PollAndProcessWorkflowTask(true, true) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithDropTask) } else { - _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 2) + _, err = poller.PollAndProcessWorkflowTask(WithDumpHistory, WithExpectedAttemptCount(2)) } if err != nil { historyResponse, err := s.engine.GetWorkflowExecutionHistory(NewContext(), &workflowservice.GetWorkflowExecutionHistoryRequest{ @@ -646,18 +643,18 @@ history := historyResponse.History common.PrettyPrint(history.Events) } - s.True(err == nil || err == matching.ErrNoTasks, "%v", err) + s.True(err == nil || err == errNoTasks, err) if !dropWorkflowTask { s.Logger.Info("Calling PollAndProcessActivityTask", tag.Counter(i)) err = poller.PollAndProcessActivityTask(i%4 == 0) - s.True(err == nil || err == matching.ErrNoTasks) + s.True(err == nil || err == errNoTasks) } } s.Logger.Info("Waiting for workflow to complete", tag.WorkflowRunID(we.RunId)) s.False(workflowComplete) - _, err := poller.PollAndProcessWorkflowTask(true, false) + _, err := poller.PollAndProcessWorkflowTask(WithDumpHistory) s.NoError(err) s.True(workflowComplete) } @@ -739,7 +736,7 @@ } for i := 1; i <= maximumAttempts; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) events := s.getHistory(s.namespace, executions[i-1]) if i == maximumAttempts { @@ -885,19 +882,19 @@ T: s.T(), } - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.NoError(err) events := s.getHistory(s.namespace, executions[0]) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, events[len(events)-1].GetEventType()) s.Equal(int32(1), events[0].GetWorkflowExecutionStartedEventAttributes().GetAttempt()) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(s.namespace, executions[1]) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, events[len(events)-1].GetEventType()) s.Equal(int32(2), events[0].GetWorkflowExecutionStartedEventAttributes().GetAttempt()) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(s.namespace, executions[2]) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, events[len(events)-1].GetEventType()) @@ -940,7 +937,7 @@ T: s.T(), } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(s.namespace, executions[0]) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, events[len(events)-1].GetEventType()) diff -Nru temporal-1.21.5-1/src/tests/workflow_timer_test.go temporal-1.22.5/src/tests/workflow_timer_test.go --- temporal-1.21.5-1/src/tests/workflow_timer_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_timer_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -134,20 +134,20 @@ } // schedule the timer - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "random signal name", payloads.EncodeString("random signal payload"), identity)) // receive the signal & cancel the timer - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "random signal name", payloads.EncodeString("random signal payload"), identity)) // complete the workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) @@ -266,20 +266,20 @@ } // schedule the timer - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "random signal name", payloads.EncodeString("random signal payload"), identity)) // receive the signal & cancel the timer - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "random signal name", payloads.EncodeString("random signal payload"), identity)) // complete the workflow - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.Logger.Info("PollAndProcessWorkflowTask: completed") s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/workflow_visibility_test.go temporal-1.22.5/src/tests/workflow_visibility_test.go --- temporal-1.21.5-1/src/tests/workflow_visibility_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/workflow_visibility_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -87,7 +87,7 @@ T: s.T(), } - _, err1 := poller.PollAndProcessWorkflowTask(false, false) + _, err1 := poller.PollAndProcessWorkflowTask() s.NoError(err1) // wait until the start workflow is done diff -Nru temporal-1.21.5-1/src/tests/xdc/advanced_visibility_test.go temporal-1.22.5/src/tests/xdc/advanced_visibility_test.go --- temporal-1.21.5-1/src/tests/xdc/advanced_visibility_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/xdc/advanced_visibility_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -302,7 +302,7 @@ T: s.T(), } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) diff -Nru temporal-1.21.5-1/src/tests/xdc/integration_failover_test.go temporal-1.22.5/src/tests/xdc/integration_failover_test.go --- temporal-1.21.5-1/src/tests/xdc/integration_failover_test.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tests/xdc/integration_failover_test.go 2024-02-23 09:45:43.000000000 +0000 @@ -301,7 +301,7 @@ } // make some progress in cluster 1 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -329,10 +329,10 @@ // process that query task, which should respond via RespondQueryTaskCompleted for { // loop until process the query task - isQueryTask, errInner := poller.PollAndProcessWorkflowTask(false, false) + res, errInner := poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessQueryTask", tag.Error(err)) s.NoError(errInner) - if isQueryTask { + if res.IsQueryTask { break } } @@ -351,10 +351,10 @@ // process that query task, which should respond via RespondQueryTaskCompleted for { // loop until process the query task - isQueryTask, errInner := poller2.PollAndProcessWorkflowTask(false, false) + res, errInner := poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessQueryTask", tag.Error(err)) s.NoError(errInner) - if isQueryTask { + if res.IsQueryTask { break } } @@ -394,10 +394,10 @@ // process that query task, which should respond via RespondQueryTaskCompleted for { // loop until process the query task - isQueryTask, errInner := poller.PollAndProcessWorkflowTask(false, false) + res, errInner := poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(errInner) - if isQueryTask { + if res.IsQueryTask { break } } @@ -413,10 +413,10 @@ // process that query task, which should respond via RespondQueryTaskCompleted for { // loop until process the query task - isQueryTask, errInner := poller2.PollAndProcessWorkflowTask(false, false) + res, errInner := poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(errInner) - if isQueryTask { + if res.IsQueryTask { break } } @@ -433,7 +433,7 @@ s.NoError(err) s.False(workflowComplete) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask 2", tag.Error(err)) s.NoError(err) s.True(workflowComplete) @@ -555,7 +555,7 @@ T: s.T(), } - _, err = poller1.PollAndProcessWorkflowTaskWithAttemptAndRetry(false, false, false, true, 1, 5) + _, err = poller1.PollAndProcessWorkflowTask(tests.WithRespondSticky) s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(firstCommandMade) @@ -577,7 +577,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) - _, err = poller2.PollAndProcessWorkflowTaskWithAttemptAndRetry(false, false, false, true, 1, 5) + _, err = poller2.PollAndProcessWorkflowTask(tests.WithRespondSticky) s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(secondCommandMade) @@ -596,7 +596,7 @@ s.failover(namespace, s.clusterNames[0], int64(11), client2) - _, err = poller1.PollAndProcessWorkflowTask(false, false) + _, err = poller1.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowCompleted) @@ -690,7 +690,7 @@ } // Complete the workflow in cluster 1 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.Equal(1, workflowCompleteTimes) @@ -719,7 +719,7 @@ s.NotNil(we.GetRunId()) s.logger.Info("StartWorkflowExecution in cluster 2: ", tag.WorkflowRunID(we.GetRunId())) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask 2", tag.Error(err)) s.NoError(err) s.Equal(2, workflowCompleteTimes) @@ -822,7 +822,7 @@ } // make some progress in cluster 1 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1001,7 +1001,7 @@ T: s.T(), } - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) @@ -1027,7 +1027,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(workflowComplete) @@ -1156,7 +1156,7 @@ // make some progress in cluster 1 and did some continueAsNew for i := 0; i < 3; i++ { - _, err := poller.PollAndProcessWorkflowTask(false, false) + _, err := poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err, strconv.Itoa(i)) } @@ -1165,13 +1165,13 @@ // finish the rest in cluster 2 for i := 0; i < 2; i++ { - _, err := poller2.PollAndProcessWorkflowTask(false, false) + _, err := poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err, strconv.Itoa(i)) } s.False(workflowComplete) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) s.True(workflowComplete) s.Equal(previousRunID, lastRunStartedEvent.GetWorkflowExecutionStartedEventAttributes().GetContinuedExecutionRunId()) @@ -1269,7 +1269,7 @@ } // Process start event in cluster 1 - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.NoError(err) s.False(eventSignaled) @@ -1304,7 +1304,7 @@ // Process signal in cluster 1 s.False(eventSignaled) - _, err = poller.PollAndProcessWorkflowTask(false, false) + _, err = poller.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) s.True(eventSignaled) @@ -1361,7 +1361,7 @@ // Process signal in cluster 2 eventSignaled = false - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.logger.Info("PollAndProcessWorkflowTask 2", tag.Error(err)) s.NoError(err) s.True(eventSignaled) @@ -1516,7 +1516,7 @@ } for i := 0; i < 2; i++ { - _, err = poller1.PollAndProcessWorkflowTask(false, false) + _, err = poller1.PollAndProcessWorkflowTask() if err != nil { timerCreated = false continue @@ -1531,7 +1531,7 @@ for i := 1; i < 20; i++ { if !workflowCompleted { - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) time.Sleep(time.Second) } @@ -1615,7 +1615,7 @@ } // this will fail the workflow task - _, err = poller1.PollAndProcessWorkflowTask(false, true) + _, err = poller1.PollAndProcessWorkflowTask(tests.WithDropTask) s.NoError(err) s.failover(namespace, s.clusterNames[1], int64(2), client1) @@ -1784,7 +1784,7 @@ } // this will fail the workflow task - _, err = poller1.PollAndProcessWorkflowTask(false, false) + _, err = poller1.PollAndProcessWorkflowTask() s.NoError(err) s.failover(namespace, s.clusterNames[1], int64(2), client1) @@ -1792,7 +1792,7 @@ // for failover transient workflow task, it is guaranteed that the transient workflow task // after the failover has attempt 1 // for details see ReplicateTransientWorkflowTaskScheduled - _, err = poller2.PollAndProcessWorkflowTaskWithAttempt(false, false, false, false, 1) + _, err = poller2.PollAndProcessWorkflowTask(tests.WithExpectedAttemptCount(1)) s.NoError(err) s.True(workflowFinished) } @@ -1871,7 +1871,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) s.True(wfCompleted) events := s.getHistory(client2, namespace, executions[0]) @@ -1969,7 +1969,7 @@ T: s.T(), } - _, err = poller1.PollAndProcessWorkflowTask(false, false) + _, err = poller1.PollAndProcessWorkflowTask() s.NoError(err) s.Equal(1, wfCompletionCount) events := s.getHistory(client1, namespace, executions[0]) @@ -1978,7 +1978,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) s.Equal(2, wfCompletionCount) events = s.getHistory(client2, namespace, executions[1]) @@ -2073,7 +2073,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) // First attempt - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) events := s.getHistory(client2, namespace, executions[0]) s.Equal(int64(1), events[0].GetVersion()) @@ -2082,7 +2082,7 @@ s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, events[len(events)-1].GetEventType()) // second attempt - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(client2, namespace, executions[1]) s.Equal(int64(2), events[0].GetVersion()) @@ -2177,7 +2177,7 @@ T: s.T(), } - _, err = poller1.PollAndProcessWorkflowTask(false, false) + _, err = poller1.PollAndProcessWorkflowTask() s.NoError(err) events := s.getHistory(client1, namespace, executions[0]) s.Equal(int64(1), events[0].GetVersion()) @@ -2187,7 +2187,7 @@ s.failover(namespace, s.clusterNames[1], int64(2), client1) - _, err = poller2.PollAndProcessWorkflowTask(false, false) + _, err = poller2.PollAndProcessWorkflowTask() s.NoError(err) events = s.getHistory(client2, namespace, executions[1]) s.Equal(int64(1), events[0].GetVersion()) diff -Nru temporal-1.21.5-1/src/tools/cassandra/setupTask_tests.go temporal-1.22.5/src/tools/cassandra/setupTask_tests.go --- temporal-1.21.5-1/src/tools/cassandra/setupTask_tests.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/cassandra/setupTask_tests.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package cassandra - -import ( - "os" - - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/environment" - "go.temporal.io/server/tools/common/schema/test" -) - -type ( - SetupSchemaTestSuite struct { - test.SetupSchemaTestBase - client *cqlClient - } -) - -func (s *SetupSchemaTestSuite) SetupSuite() { - if err := os.Setenv("CASSANDRA_HOST", environment.GetCassandraAddress()); err != nil { - s.Logger.Fatal("Failed to set CASSANDRA_HOST", tag.Error(err)) - } - client, err := newTestCQLClient(systemKeyspace) - if err != nil { - s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) - } - s.client = client - s.SetupSuiteBase(client, "") -} - -func (s *SetupSchemaTestSuite) TearDownSuite() { - s.TearDownSuiteBase() -} - -func (s *SetupSchemaTestSuite) TestCreateKeyspace() { - s.Nil(RunTool([]string{"./tool", "create", "-k", "foobar123", "--rf", "1"})) - err := s.client.dropKeyspace("foobar123") - s.Nil(err) -} - -func (s *SetupSchemaTestSuite) TestSetupSchema() { - client, err := newTestCQLClient(s.DBName) - s.Nil(err) - s.RunSetupTest(buildCLIOptions(), client, "-k", createTestCQLFileContent(), []string{"tasks", "events"}) -} diff -Nru temporal-1.21.5-1/src/tools/cassandra/setup_task_tests.go temporal-1.22.5/src/tools/cassandra/setup_task_tests.go --- temporal-1.21.5-1/src/tools/cassandra/setup_task_tests.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tools/cassandra/setup_task_tests.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,68 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package cassandra + +import ( + "os" + + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/environment" + "go.temporal.io/server/tools/common/schema/test" +) + +type ( + SetupSchemaTestSuite struct { + test.SetupSchemaTestBase + client *cqlClient + } +) + +func (s *SetupSchemaTestSuite) SetupSuite() { + if err := os.Setenv("CASSANDRA_HOST", environment.GetCassandraAddress()); err != nil { + s.Logger.Fatal("Failed to set CASSANDRA_HOST", tag.Error(err)) + } + client, err := newTestCQLClient(systemKeyspace) + if err != nil { + s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) + } + s.client = client + s.SetupSuiteBase(client, "") +} + +func (s *SetupSchemaTestSuite) TearDownSuite() { + s.TearDownSuiteBase() +} + +func (s *SetupSchemaTestSuite) TestCreateKeyspace() { + s.Nil(RunTool([]string{"./tool", "create", "-k", "foobar123", "--rf", "1"})) + err := s.client.dropKeyspace("foobar123") + s.Nil(err) +} + +func (s *SetupSchemaTestSuite) TestSetupSchema() { + client, err := newTestCQLClient(s.DBName) + s.Nil(err) + s.RunSetupTest(buildCLIOptions(), client, "-k", createTestCQLFileContent(), []string{"tasks", "events"}) +} diff -Nru temporal-1.21.5-1/src/tools/cassandra/updateTask_tests.go temporal-1.22.5/src/tools/cassandra/updateTask_tests.go --- temporal-1.21.5-1/src/tools/cassandra/updateTask_tests.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/cassandra/updateTask_tests.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package cassandra - -import ( - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/schema/cassandra" - "go.temporal.io/server/tools/common/schema/test" -) - -type UpdateSchemaTestSuite struct { - test.UpdateSchemaTestBase -} - -func (s *UpdateSchemaTestSuite) SetupSuite() { - client, err := newTestCQLClient(systemKeyspace) - if err != nil { - s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) - } - s.SetupSuiteBase(client, "") -} - -func (s *UpdateSchemaTestSuite) TearDownSuite() { - s.TearDownSuiteBase() -} - -func (s *UpdateSchemaTestSuite) TestUpdateSchema() { - client, err := newTestCQLClient(s.DBName) - s.Nil(err) - defer client.Close() - s.RunUpdateSchemaTest(buildCLIOptions(), client, "-k", createTestCQLFileContent(), []string{"events", "tasks"}) -} - -func (s *UpdateSchemaTestSuite) TestDryrun() { - client, err := newTestCQLClient(s.DBName) - s.Nil(err) - defer client.Close() - dir := "../../schema/cassandra/temporal/versioned" - s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, cassandra.Version) -} - -func (s *UpdateSchemaTestSuite) TestVisibilityDryrun() { - client, err := newTestCQLClient(s.DBName) - s.Nil(err) - defer client.Close() - dir := "../../schema/cassandra/visibility/versioned" - s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, cassandra.VisibilityVersion) -} diff -Nru temporal-1.21.5-1/src/tools/cassandra/update_task_tests.go temporal-1.22.5/src/tools/cassandra/update_task_tests.go --- temporal-1.21.5-1/src/tools/cassandra/update_task_tests.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tools/cassandra/update_task_tests.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,70 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package cassandra + +import ( + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/schema/cassandra" + "go.temporal.io/server/tools/common/schema/test" +) + +type UpdateSchemaTestSuite struct { + test.UpdateSchemaTestBase +} + +func (s *UpdateSchemaTestSuite) SetupSuite() { + client, err := newTestCQLClient(systemKeyspace) + if err != nil { + s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) + } + s.SetupSuiteBase(client, "") +} + +func (s *UpdateSchemaTestSuite) TearDownSuite() { + s.TearDownSuiteBase() +} + +func (s *UpdateSchemaTestSuite) TestUpdateSchema() { + client, err := newTestCQLClient(s.DBName) + s.Nil(err) + defer client.Close() + s.RunUpdateSchemaTest(buildCLIOptions(), client, "-k", createTestCQLFileContent(), []string{"events", "tasks"}) +} + +func (s *UpdateSchemaTestSuite) TestDryrun() { + client, err := newTestCQLClient(s.DBName) + s.Nil(err) + defer client.Close() + dir := "../../schema/cassandra/temporal/versioned" + s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, cassandra.Version) +} + +func (s *UpdateSchemaTestSuite) TestVisibilityDryrun() { + client, err := newTestCQLClient(s.DBName) + s.Nil(err) + defer client.Close() + dir := "../../schema/cassandra/visibility/versioned" + s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, cassandra.VisibilityVersion) +} diff -Nru temporal-1.21.5-1/src/tools/common/schema/updatetask.go temporal-1.22.5/src/tools/common/schema/updatetask.go --- temporal-1.21.5-1/src/tools/common/schema/updatetask.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/common/schema/updatetask.go 2024-02-23 09:45:43.000000000 +0000 @@ -38,7 +38,6 @@ "strings" "github.com/blang/semver/v4" - "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/persistence" @@ -131,7 +130,6 @@ task.logger.Debug(fmt.Sprintf("running %v updates for current version %v", len(updates), currVer)) for _, cs := range updates { - task.logger.Debug("running update", tag.NewAnyTag("cs", cs)) err := task.execStmts(cs.version, cs.cqlStmts) if err != nil { return err @@ -152,9 +150,20 @@ task.logger.Debug(fmt.Sprintf("---- Executing updates for version %v ----", ver)) for _, stmt := range stmts { task.logger.Debug(rmspaceRegex.ReplaceAllString(stmt, " ")) - e := task.db.Exec(stmt) - if e != nil { - return fmt.Errorf("error executing statement:%v", e) + err := task.db.Exec(stmt) + if err != nil { + // To make schema update idempotent, we need to handle error when retry on previous partially succeeded update attempt. + // There are 2 major cases that will be handled: + // 1) Add table or column that already exists (message contains 'already existing' for table or 'already exists' for column) + // 2) Drop column that is not found (message contains 'not found') + alreadyExists := strings.Contains(err.Error(), "already exist") + notFound := strings.Contains(err.Error(), "not found") + if alreadyExists || notFound { + task.logger.Warn("Duplicate update, most likely due to previous partially succeeded update attempt. Ignoring it and continue.", tag.Error(err)) + continue + } + + return fmt.Errorf("error executing statement: %w", err) } } task.logger.Debug("---- Done ----") @@ -162,25 +171,15 @@ } func (task *UpdateTask) updateSchemaVersion(oldVer string, cs *changeSet) error { - task.logger.Debug(fmt.Sprintf("updating schema version to %v", cs.version)) err := task.db.UpdateSchemaVersion(cs.version, cs.manifest.MinCompatibleVersion) if err != nil { return fmt.Errorf("failed to update schema_version table, err=%v", err.Error()) } - - task.logger.Debug("adding entry to schema_update_history for version", tag.NewAnyTag("cs", cs)) err = task.db.WriteSchemaUpdateLog(oldVer, cs.manifest.CurrVersion, cs.manifest.md5, cs.manifest.Description) if err != nil { return fmt.Errorf("failed to add entry to schema_update_history, err=%v", err.Error()) } - // todo: for debugging - latestVer, err := task.db.ReadSchemaVersion() - if err != nil { - return fmt.Errorf("error reading current schema version: %v", err.Error()) - } - task.logger.Debug(fmt.Sprintf("schema version now is %v", latestVer)) - return nil } diff -Nru temporal-1.21.5-1/src/tools/sql/clitest/setupTask_tests.go temporal-1.22.5/src/tools/sql/clitest/setupTask_tests.go --- temporal-1.21.5-1/src/tools/sql/clitest/setupTask_tests.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/sql/clitest/setupTask_tests.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package clitest - -import ( - "go.temporal.io/server/common/shuffle" - "go.temporal.io/server/tools/common/schema/test" - "go.temporal.io/server/tools/sql" -) - -type ( - // SetupSchemaTestSuite defines a test suite - SetupSchemaTestSuite struct { - test.SetupSchemaTestBase - host string - port string - pluginName string - sqlQuery string - - conn *sql.Connection - } -) - -const ( - testCLIDatabasePrefix = "test_" - testCLIDatabaseSuffix = "cli_database" -) - -// NewSetupSchemaTestSuite returns a test suite -func NewSetupSchemaTestSuite( - host string, - port string, - pluginName string, - sqlQuery string, -) *SetupSchemaTestSuite { - return &SetupSchemaTestSuite{ - host: host, - port: port, - pluginName: pluginName, - sqlQuery: sqlQuery, - } -} - -// SetupSuite setup test suite -func (s *SetupSchemaTestSuite) SetupSuite() { - conn, err := newTestConn("", s.host, s.port, s.pluginName) - if err != nil { - s.Fail("error creating sql connection:%v", err) - } - s.conn = conn - s.SetupSuiteBase(conn, s.pluginName) -} - -// TearDownSuite tear down test suite -func (s *SetupSchemaTestSuite) TearDownSuite() { - s.TearDownSuiteBase() -} - -// TestCreateDatabase test -func (s *SetupSchemaTestSuite) TestCreateDatabase() { - testDatabase := testCLIDatabasePrefix + shuffle.String(testCLIDatabaseSuffix) - err := sql.RunTool([]string{ - "./tool", - "--ep", s.host, - "--p", s.port, - "-u", testUser, - "--pw", testPassword, - "--pl", s.pluginName, - "--db", testDatabase, - "create", - }) - s.NoError(err) - err = s.conn.DropDatabase(testDatabase) - s.NoError(err) -} - -func (s *SetupSchemaTestSuite) TestCreateDatabaseIdempotent() { - testDatabase := testCLIDatabasePrefix + shuffle.String(testCLIDatabaseSuffix) - err := sql.RunTool([]string{ - "./tool", - "--ep", s.host, - "--p", s.port, - "-u", testUser, - "--pw", testPassword, - "--pl", s.pluginName, - "--db", testDatabase, - "create", - }) - s.NoError(err) - - err = sql.RunTool([]string{ - "./tool", - "--ep", s.host, - "--p", s.port, - "-u", testUser, - "--pw", testPassword, - "--pl", s.pluginName, - "--db", testDatabase, - "create", - }) - s.NoError(err) - - err = s.conn.DropDatabase(testDatabase) - s.NoError(err) -} - -// TestSetupSchema test -func (s *SetupSchemaTestSuite) TestSetupSchema() { - conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) - s.NoError(err) - defer conn.Close() - s.RunSetupTest(sql.BuildCLIOptions(), conn, "--db", s.sqlQuery, []string{"executions", "current_executions"}) -} diff -Nru temporal-1.21.5-1/src/tools/sql/clitest/setup_task_tests.go temporal-1.22.5/src/tools/sql/clitest/setup_task_tests.go --- temporal-1.21.5-1/src/tools/sql/clitest/setup_task_tests.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tools/sql/clitest/setup_task_tests.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,135 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clitest + +import ( + "go.temporal.io/server/common/shuffle" + "go.temporal.io/server/tools/common/schema/test" + "go.temporal.io/server/tools/sql" +) + +type ( + // SetupSchemaTestSuite defines a test suite + SetupSchemaTestSuite struct { + test.SetupSchemaTestBase + host string + port string + pluginName string + sqlQuery string + + conn *sql.Connection + } +) + +const ( + testCLIDatabasePrefix = "test_" + testCLIDatabaseSuffix = "cli_database" +) + +// NewSetupSchemaTestSuite returns a test suite +func NewSetupSchemaTestSuite( + host string, + port string, + pluginName string, + sqlQuery string, +) *SetupSchemaTestSuite { + return &SetupSchemaTestSuite{ + host: host, + port: port, + pluginName: pluginName, + sqlQuery: sqlQuery, + } +} + +// SetupSuite setup test suite +func (s *SetupSchemaTestSuite) SetupSuite() { + conn, err := newTestConn("", s.host, s.port, s.pluginName) + if err != nil { + s.Fail("error creating sql connection:%v", err) + } + s.conn = conn + s.SetupSuiteBase(conn, s.pluginName) +} + +// TearDownSuite tear down test suite +func (s *SetupSchemaTestSuite) TearDownSuite() { + s.TearDownSuiteBase() +} + +// TestCreateDatabase test +func (s *SetupSchemaTestSuite) TestCreateDatabase() { + testDatabase := testCLIDatabasePrefix + shuffle.String(testCLIDatabaseSuffix) + err := sql.RunTool([]string{ + "./tool", + "--ep", s.host, + "--p", s.port, + "-u", testUser, + "--pw", testPassword, + "--pl", s.pluginName, + "--db", testDatabase, + "create", + }) + s.NoError(err) + err = s.conn.DropDatabase(testDatabase) + s.NoError(err) +} + +func (s *SetupSchemaTestSuite) TestCreateDatabaseIdempotent() { + testDatabase := testCLIDatabasePrefix + shuffle.String(testCLIDatabaseSuffix) + err := sql.RunTool([]string{ + "./tool", + "--ep", s.host, + "--p", s.port, + "-u", testUser, + "--pw", testPassword, + "--pl", s.pluginName, + "--db", testDatabase, + "create", + }) + s.NoError(err) + + err = sql.RunTool([]string{ + "./tool", + "--ep", s.host, + "--p", s.port, + "-u", testUser, + "--pw", testPassword, + "--pl", s.pluginName, + "--db", testDatabase, + "create", + }) + s.NoError(err) + + err = s.conn.DropDatabase(testDatabase) + s.NoError(err) +} + +// TestSetupSchema test +func (s *SetupSchemaTestSuite) TestSetupSchema() { + conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) + s.NoError(err) + defer conn.Close() + s.RunSetupTest(sql.BuildCLIOptions(), conn, "--db", s.sqlQuery, []string{"executions", "current_executions"}) +} diff -Nru temporal-1.21.5-1/src/tools/sql/clitest/updateTask_tests.go temporal-1.22.5/src/tools/sql/clitest/updateTask_tests.go --- temporal-1.21.5-1/src/tools/sql/clitest/updateTask_tests.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/sql/clitest/updateTask_tests.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package clitest - -import ( - "path/filepath" - - "go.temporal.io/server/common/log/tag" - "go.temporal.io/server/tools/common/schema/test" - "go.temporal.io/server/tools/sql" -) - -// UpdateSchemaTestSuite defines a test suite -type UpdateSchemaTestSuite struct { - test.UpdateSchemaTestBase - host string - port string - pluginName string - sqlQuery string - executionSchemaVersionDir string - executionVersion string - visibilitySchemaVersionDir string - visibilityVersion string -} - -// NewUpdateSchemaTestSuite returns a test suite -func NewUpdateSchemaTestSuite( - host string, - port string, - pluginName string, - sqlQuery string, - executionSchemaVersionDir string, - executionVersion string, - visibilitySchemaVersionDir string, - visibilityVersion string, -) *UpdateSchemaTestSuite { - return &UpdateSchemaTestSuite{ - host: host, - port: port, - pluginName: pluginName, - sqlQuery: sqlQuery, - executionSchemaVersionDir: executionSchemaVersionDir, - executionVersion: executionVersion, - visibilitySchemaVersionDir: visibilitySchemaVersionDir, - visibilityVersion: visibilityVersion, - } -} - -// SetupSuite setups test suite -func (s *UpdateSchemaTestSuite) SetupSuite() { - conn, err := newTestConn("", s.host, s.port, s.pluginName) - if err != nil { - s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) - } - s.SetupSuiteBase(conn, s.pluginName) -} - -// TearDownSuite tear down test suite -func (s *UpdateSchemaTestSuite) TearDownSuite() { - s.TearDownSuiteBase() -} - -// TestUpdateSchema test -func (s *UpdateSchemaTestSuite) TestUpdateSchema() { - conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) - s.Nil(err) - defer conn.Close() - s.RunUpdateSchemaTest(sql.BuildCLIOptions(), conn, "--db", s.sqlQuery, []string{"executions", "current_executions"}) -} - -// TestDryrun test -func (s *UpdateSchemaTestSuite) TestDryrun() { - conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) - s.NoError(err) - defer conn.Close() - dir, err := filepath.Abs(s.executionSchemaVersionDir) - s.NoError(err) - s.RunDryrunTest(sql.BuildCLIOptions(), conn, "--db", dir, s.executionVersion) -} - -// TestVisibilityDryrun test -func (s *UpdateSchemaTestSuite) TestVisibilityDryrun() { - conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) - s.NoError(err) - defer conn.Close() - dir, err := filepath.Abs(s.visibilitySchemaVersionDir) - s.NoError(err) - s.RunDryrunTest(sql.BuildCLIOptions(), conn, "--db", dir, s.visibilityVersion) -} diff -Nru temporal-1.21.5-1/src/tools/sql/clitest/update_task_tests.go temporal-1.22.5/src/tools/sql/clitest/update_task_tests.go --- temporal-1.21.5-1/src/tools/sql/clitest/update_task_tests.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/tools/sql/clitest/update_task_tests.go 2024-02-23 09:45:43.000000000 +0000 @@ -0,0 +1,111 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package clitest + +import ( + "path/filepath" + + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/tools/common/schema/test" + "go.temporal.io/server/tools/sql" +) + +// UpdateSchemaTestSuite defines a test suite +type UpdateSchemaTestSuite struct { + test.UpdateSchemaTestBase + host string + port string + pluginName string + sqlQuery string + executionSchemaVersionDir string + executionVersion string + visibilitySchemaVersionDir string + visibilityVersion string +} + +// NewUpdateSchemaTestSuite returns a test suite +func NewUpdateSchemaTestSuite( + host string, + port string, + pluginName string, + sqlQuery string, + executionSchemaVersionDir string, + executionVersion string, + visibilitySchemaVersionDir string, + visibilityVersion string, +) *UpdateSchemaTestSuite { + return &UpdateSchemaTestSuite{ + host: host, + port: port, + pluginName: pluginName, + sqlQuery: sqlQuery, + executionSchemaVersionDir: executionSchemaVersionDir, + executionVersion: executionVersion, + visibilitySchemaVersionDir: visibilitySchemaVersionDir, + visibilityVersion: visibilityVersion, + } +} + +// SetupSuite setups test suite +func (s *UpdateSchemaTestSuite) SetupSuite() { + conn, err := newTestConn("", s.host, s.port, s.pluginName) + if err != nil { + s.Logger.Fatal("Error creating CQLClient", tag.Error(err)) + } + s.SetupSuiteBase(conn, s.pluginName) +} + +// TearDownSuite tear down test suite +func (s *UpdateSchemaTestSuite) TearDownSuite() { + s.TearDownSuiteBase() +} + +// TestUpdateSchema test +func (s *UpdateSchemaTestSuite) TestUpdateSchema() { + conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) + s.Nil(err) + defer conn.Close() + s.RunUpdateSchemaTest(sql.BuildCLIOptions(), conn, "--db", s.sqlQuery, []string{"executions", "current_executions"}) +} + +// TestDryrun test +func (s *UpdateSchemaTestSuite) TestDryrun() { + conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) + s.NoError(err) + defer conn.Close() + dir, err := filepath.Abs(s.executionSchemaVersionDir) + s.NoError(err) + s.RunDryrunTest(sql.BuildCLIOptions(), conn, "--db", dir, s.executionVersion) +} + +// TestVisibilityDryrun test +func (s *UpdateSchemaTestSuite) TestVisibilityDryrun() { + conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) + s.NoError(err) + defer conn.Close() + dir, err := filepath.Abs(s.visibilitySchemaVersionDir) + s.NoError(err) + s.RunDryrunTest(sql.BuildCLIOptions(), conn, "--db", dir, s.visibilityVersion) +} diff -Nru temporal-1.21.5-1/src/tools/tdbg/commands.go temporal-1.22.5/src/tools/tdbg/commands.go --- temporal-1.21.5-1/src/tools/tdbg/commands.go 2023-09-29 14:03:07.000000000 +0000 +++ temporal-1.22.5/src/tools/tdbg/commands.go 2024-02-23 09:45:43.000000000 +0000 @@ -68,7 +68,6 @@ client := cFactory.AdminClient(c) serializer := serialization.NewSerializer() - var history []*commonpb.DataBlob ctx, cancel := newContext(c) defer cancel() @@ -78,26 +77,32 @@ return err } - resp, err := client.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ - NamespaceId: nsID.String(), - Execution: &commonpb.WorkflowExecution{ - WorkflowId: wid, - RunId: rid, - }, - StartEventId: startEventId, - EndEventId: endEventId, - StartEventVersion: startEventVerion, - EndEventVersion: endEventVersion, - MaximumPageSize: 100, - NextPageToken: nil, - }) - if err != nil { - return fmt.Errorf("unable to read History Branch: %s", err) + var histories []*commonpb.DataBlob + var token []byte + for doContinue := true; doContinue; doContinue = len(token) != 0 { + resp, err := client.GetWorkflowExecutionRawHistoryV2(ctx, &adminservice.GetWorkflowExecutionRawHistoryV2Request{ + NamespaceId: nsID.String(), + Execution: &commonpb.WorkflowExecution{ + WorkflowId: wid, + RunId: rid, + }, + StartEventId: startEventId, + EndEventId: endEventId, + StartEventVersion: startEventVerion, + EndEventVersion: endEventVersion, + MaximumPageSize: 100, + NextPageToken: token, + }) + if err != nil { + return fmt.Errorf("unable to read History Branch: %s", err) + } + histories = append(histories, resp.HistoryBatches...) + token = resp.NextPageToken } allEvents := &historypb.History{} totalSize := 0 - for idx, b := range resp.HistoryBatches { + for idx, b := range histories { totalSize += len(b.Data) fmt.Printf("======== batch %v, blob len: %v ======\n", idx+1, len(b.Data)) historyBatch, err := serializer.DeserializeEvents(b) @@ -112,7 +117,7 @@ } fmt.Println(string(data)) } - fmt.Printf("======== total batches %v, total blob len: %v ======\n", len(history), totalSize) + fmt.Printf("======== total batches %v, total blob len: %v ======\n", len(histories), totalSize) if outputFileName != "" { encoder := codec.NewJSONPBEncoder() diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/compute/internal/version.go temporal-1.22.5/src/vendor/cloud.google.com/go/compute/internal/version.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/compute/internal/version.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/compute/internal/version.go 2024-02-23 09:46:08.000000000 +0000 @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.19.0" +const Version = "1.23.0" diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/CHANGES.md temporal-1.22.5/src/vendor/cloud.google.com/go/iam/CHANGES.md --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/CHANGES.md 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/iam/CHANGES.md 2024-02-23 09:46:08.000000000 +0000 @@ -1,5 +1,41 @@ # Changes + +## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) + + +### Documentation + +* **iam:** Minor formatting ([b4349cc](https://github.com/googleapis/google-cloud-go/commit/b4349cc507870ff8629bbc07de578b63bb889626)) + +## [1.1.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.0...iam/v1.1.1) (2023-06-20) + + +### Bug Fixes + +* **iam:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) + +## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) + + +### Features + +* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) + + +### Bug Fixes + +* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) + + +### Features + +* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) + ## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go 2024-02-23 09:46:08.000000000 +0000 @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/iam/v1/iam_policy.proto package iampb @@ -342,37 +342,37 @@ 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, - 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, - 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, + 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go 2024-02-23 09:46:08.000000000 +0000 @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/iam/v1/options.proto package iampb @@ -111,16 +111,16 @@ 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go 2024-02-23 09:46:08.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/iam/v1/policy.proto package iampb @@ -214,10 +214,13 @@ // only if the expression evaluates to `true`. A condition can add constraints // based on attributes of the request, the resource, or both. To learn which // resources support conditions in their IAM policies, see the -// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). // // **JSON example:** // +// ``` +// // { // "bindings": [ // { @@ -237,7 +240,8 @@ // "condition": { // "title": "expirable access", // "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", // } // } // ], @@ -245,8 +249,12 @@ // "version": 3 // } // +// ``` +// // **YAML example:** // +// ``` +// // bindings: // - members: // - user:mike@example.com @@ -264,6 +272,8 @@ // etag: BwWWja0YfJA= // version: 3 // +// ``` +// // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). type Policy struct { @@ -279,11 +289,11 @@ // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions + // * Getting a policy that includes a conditional role binding + // * Adding a conditional role binding to a policy + // * Changing a conditional role binding in a policy + // * Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -294,7 +304,8 @@ // specify any valid version or leave the field unset. // // To learn which resources support conditions in their IAM policies, see the - // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Associates a list of `members`, or principals, with a `role`. Optionally, // may specify a `condition` that determines how and when the `bindings` are @@ -393,46 +404,50 @@ // Role that is assigned to the list of `members`, or principals. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the principals requesting access for a Cloud Platform resource. + // Specifies the principals requesting access for a Google Cloud resource. // `members` can have the following values: // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. + // + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. + // + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // * `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. - // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // @@ -551,8 +566,8 @@ // } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. +// logging. It also exempts `jose@example.com` from DATA_READ logging, and +// `aliya@example.com` from DATA_WRITE logging. type AuditConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -640,7 +655,8 @@ LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` // Specifies the identities that do not cause logging for this type of // permission. - // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + // Follows the same format of + // [Binding.members][google.iam.v1.Binding.members]. ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` } @@ -762,7 +778,7 @@ // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. // Required Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. + // A single identity requesting access for a Google Cloud resource. // Follows the same format of Binding.members. // Required Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` @@ -999,16 +1015,15 @@ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/.repo-metadata-full.json temporal-1.22.5/src/vendor/cloud.google.com/go/internal/.repo-metadata-full.json --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/.repo-metadata-full.json 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/internal/.repo-metadata-full.json 2024-02-23 09:46:08.000000000 +0000 @@ -1,2009 +1,2462 @@ { "cloud.google.com/go/accessapproval/apiv1": { + "api_shortname": "accessapproval", "distribution_name": "cloud.google.com/go/accessapproval/apiv1", "description": "Access Approval API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/accesscontextmanager/apiv1": { + "api_shortname": "accesscontextmanager", "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", "description": "Access Context Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/advisorynotifications/apiv1": { + "api_shortname": "advisorynotifications", + "distribution_name": "cloud.google.com/go/advisorynotifications/apiv1", + "description": "Advisory Notifications API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/advisorynotifications/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1beta2": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1": { + "api_shortname": "aiplatform", "distribution_name": "cloud.google.com/go/aiplatform/apiv1", "description": "Vertex AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1beta1": { + "api_shortname": "aiplatform", "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", "description": "Vertex AI API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1", + "description": "AlloyDB API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1alpha": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1alpha", + "description": "AlloyDB API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1beta": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1beta", + "description": "AlloyDB API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1alpha": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { + "api_shortname": "analyticsadmin", "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", "description": "Google Analytics Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigateway/apiv1": { + "api_shortname": "apigateway", "distribution_name": "cloud.google.com/go/apigateway/apiv1", "description": "API Gateway API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeconnect/apiv1": { + "api_shortname": "apigeeconnect", "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", "description": "Apigee Connect API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeregistry/apiv1": { + "api_shortname": "apigeeregistry", "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", "description": "Apigee Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apikeys/apiv2": { + "api_shortname": "apikeys", "distribution_name": "cloud.google.com/go/apikeys/apiv2", "description": "API Keys API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/appengine/apiv1": { + "api_shortname": "appengine", "distribution_name": "cloud.google.com/go/appengine/apiv1", "description": "App Engine Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/area120/tables/apiv1alpha1": { + "api_shortname": "area120tables", "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", "description": "Area120 Tables API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1": { + "api_shortname": "artifactregistry", "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", "description": "Artifact Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1beta2": { + "api_shortname": "artifactregistry", "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", "description": "Artifact Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p2beta1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p5beta1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1": { + "api_shortname": "assuredworkloads", "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", "description": "Assured Workloads API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { + "api_shortname": "assuredworkloads", "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", "description": "Assured Workloads API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1": { + "api_shortname": "automl", "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1beta1": { + "api_shortname": "automl", "distribution_name": "cloud.google.com/go/automl/apiv1beta1", "description": "Cloud AutoML API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/baremetalsolution/apiv2": { + "api_shortname": "baremetalsolution", "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", "description": "Bare Metal Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/batch/apiv1": { + "api_shortname": "batch", "distribution_name": "cloud.google.com/go/batch/apiv1", "description": "Batch API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery": { + "api_shortname": "bigquery", "distribution_name": "cloud.google.com/go/bigquery", "description": "BigQuery", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "api_shortname": "analyticshub", "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", "description": "Analytics Hub API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", + "description": "BigLake API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1": { + "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { + "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", "description": "BigQuery Connection API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { + "api_shortname": "analyticshub", "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", "description": "Analytics Hub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datapolicies/apiv1": { + "api_shortname": "bigquerydatapolicy", "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", "description": "BigQuery Data Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "api_shortname": "bigquerydatapolicy", "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", "description": "BigQuery Data Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { + "api_shortname": "bigquerydatatransfer", "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", "description": "BigQuery Data Transfer API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2": { + "api_shortname": "bigquerymigration", "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", "description": "BigQuery Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2alpha": { + "api_shortname": "bigquerymigration", "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", "description": "BigQuery Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/reservation/apiv1": { + "api_shortname": "bigqueryreservation", "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", "description": "BigQuery Reservation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta2": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigtable": { + "api_shortname": "bigtable", "distribution_name": "cloud.google.com/go/bigtable", "description": "Cloud BigTable", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/billing/apiv1": { + "api_shortname": "cloudbilling", "distribution_name": "cloud.google.com/go/billing/apiv1", "description": "Cloud Billing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1": { + "api_shortname": "billingbudgets", "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", "description": "Cloud Billing Budget API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { + "api_shortname": "billingbudgets", "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", "description": "Cloud Billing Budget API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1": { + "api_shortname": "binaryauthorization", "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", "description": "Binary Authorization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { + "api_shortname": "binaryauthorization", "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", "description": "Binary Authorization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/certificatemanager/apiv1": { + "api_shortname": "certificatemanager", "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", "description": "Certificate Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { + "api_shortname": "cloudchannel", "distribution_name": "cloud.google.com/go/channel/apiv1", "description": "Cloud Channel API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { + "api_shortname": "cloudbuild", "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", "description": "Cloud Build API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudbuild/apiv2": { + "api_shortname": "cloudbuild", + "distribution_name": "cloud.google.com/go/cloudbuild/apiv2", + "description": "Cloud Build API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/clouddms/apiv1": { + "api_shortname": "datamigration", "distribution_name": "cloud.google.com/go/clouddms/apiv1", "description": "Database Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta2": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta3": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/commerce/consumer/procurement/apiv1": { + "api_shortname": "cloudcommerceconsumerprocurement", + "distribution_name": "cloud.google.com/go/commerce/consumer/procurement/apiv1", + "description": "Cloud Commerce Consumer Procurement API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { + "api_shortname": "compute", "distribution_name": "cloud.google.com/go/compute/apiv1", "description": "Google Compute Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/metadata": { + "api_shortname": "compute-metadata", "distribution_name": "cloud.google.com/go/compute/metadata", "description": "Service Metadata API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", + "release_level": "stable", "library_type": "CORE" }, + "cloud.google.com/go/confidentialcomputing/apiv1": { + "api_shortname": "confidentialcomputing", + "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1", + "description": "Confidential Computing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { + "api_shortname": "confidentialcomputing", + "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1alpha1", + "description": "Confidential Computing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/config/apiv1": { + "api_shortname": "config", + "distribution_name": "cloud.google.com/go/config/apiv1", + "description": "Infrastructure Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/contactcenterinsights/apiv1": { + "api_shortname": "contactcenterinsights", "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", "description": "Contact Center AI Insights API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/container/apiv1": { + "api_shortname": "container", "distribution_name": "cloud.google.com/go/container/apiv1", "description": "Kubernetes Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/containeranalysis/apiv1beta1": { + "api_shortname": "containeranalysis", "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", "description": "Container Analysis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/containeranalysis/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1": { + "api_shortname": "datacatalog", "distribution_name": "cloud.google.com/go/datacatalog/apiv1", "description": "Google Cloud Data Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1beta1": { + "api_shortname": "datacatalog", "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", "description": "Google Cloud Data Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/lineage/apiv1": { + "api_shortname": "datalineage", "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", "description": "Data Lineage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataflow/apiv1beta3": { + "api_shortname": "dataflow", "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", "description": "Dataflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataform/apiv1alpha2": { + "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", "description": "Dataform API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataform/apiv1beta1": { + "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", "description": "Dataform API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datafusion/apiv1": { + "api_shortname": "datafusion", "distribution_name": "cloud.google.com/go/datafusion/apiv1", "description": "Cloud Data Fusion API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datalabeling/apiv1beta1": { + "api_shortname": "datalabeling", "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", "description": "Data Labeling API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataplex/apiv1": { + "api_shortname": "dataplex", "distribution_name": "cloud.google.com/go/dataplex/apiv1", "description": "Cloud Dataplex API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/dataproc/apiv1": { - "distribution_name": "cloud.google.com/go/dataproc/apiv1", + "cloud.google.com/go/dataproc/v2/apiv1": { + "api_shortname": "dataproc", + "distribution_name": "cloud.google.com/go/dataproc/v2/apiv1", "description": "Cloud Dataproc API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataqna/apiv1alpha": { + "api_shortname": "dataqna", "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", "description": "Data QnA API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastore": { + "api_shortname": "datastore", "distribution_name": "cloud.google.com/go/datastore", "description": "Cloud Datastore", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/datastore/admin/apiv1": { + "api_shortname": "datastore", "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", "description": "Cloud Datastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1": { + "api_shortname": "datastream", "distribution_name": "cloud.google.com/go/datastream/apiv1", "description": "Datastream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { + "api_shortname": "datastream", "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", "description": "Datastream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/debugger/apiv2": { + "api_shortname": "clouddebugger", "distribution_name": "cloud.google.com/go/debugger/apiv2", "description": "Stackdriver Debugger API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/deploy/apiv1": { + "api_shortname": "clouddeploy", "distribution_name": "cloud.google.com/go/deploy/apiv1", - "description": "Google Cloud Deploy API", - "language": "Go", + "description": "Cloud Deploy API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2beta1": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/discoveryengine/apiv1": { + "api_shortname": "discoveryengine", + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1", + "description": "Discovery Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/discoveryengine/apiv1beta": { + "api_shortname": "discoveryengine", "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", "description": "Discovery Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dlp/apiv2": { + "api_shortname": "dlp", "distribution_name": "cloud.google.com/go/dlp/apiv2", "description": "Cloud Data Loss Prevention (DLP) API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1": { + "api_shortname": "documentai", "distribution_name": "cloud.google.com/go/documentai/apiv1", "description": "Cloud Document AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1beta3": { + "api_shortname": "documentai", "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", "description": "Cloud Document AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/domains/apiv1beta1": { + "api_shortname": "domains", "distribution_name": "cloud.google.com/go/domains/apiv1beta1", "description": "Cloud Domains API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/edgecontainer/apiv1": { + "api_shortname": "edgecontainer", "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", "description": "Distributed Cloud Edge Container API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/errorreporting": { + "api_shortname": "clouderrorreporting", "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", + "release_level": "preview", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/errorreporting/apiv1beta1": { + "api_shortname": "clouderrorreporting", "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", "description": "Error Reporting API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/essentialcontacts/apiv1": { + "api_shortname": "essentialcontacts", "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", "description": "Essential Contacts API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/apiv1": { + "api_shortname": "eventarc", "distribution_name": "cloud.google.com/go/eventarc/apiv1", "description": "Eventarc API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/publishing/apiv1": { + "api_shortname": "eventarcpublishing", "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", "description": "Eventarc Publishing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/filestore/apiv1": { + "api_shortname": "file", "distribution_name": "cloud.google.com/go/filestore/apiv1", "description": "Cloud Filestore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore": { + "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore", "description": "Cloud Firestore API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/firestore/apiv1": { + "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore/apiv1", "description": "Cloud Firestore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/firestore/apiv1/admin": { - "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", - "description": "Cloud Firestore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv1": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv1", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv2": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv2", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv2beta": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv2beta", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/metadata": { + "api_shortname": "firestore-metadata", "distribution_name": "cloud.google.com/go/functions/metadata", "description": "Cloud Functions", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", + "release_level": "preview", "library_type": "CORE" }, - "cloud.google.com/go/gaming/apiv1": { - "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gaming/apiv1beta": { - "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/gkebackup/apiv1": { + "api_shortname": "gkebackup", "distribution_name": "cloud.google.com/go/gkebackup/apiv1", "description": "Backup for GKE API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { + "api_shortname": "connectgateway", "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", "description": "Connect Gateway API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkehub/apiv1beta1": { + "api_shortname": "gkehub", "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", "description": "GKE Hub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkemulticloud/apiv1": { + "api_shortname": "gkemulticloud", "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", "description": "Anthos Multi-Cloud API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gsuiteaddons/apiv1": { + "api_shortname": "gsuiteaddons", "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", "description": "Google Workspace Add-ons API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam": { + "api_shortname": "iam", "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", + "release_level": "stable", "library_type": "CORE" }, "cloud.google.com/go/iam/apiv1": { + "api_shortname": "iam-meta-api", "distribution_name": "cloud.google.com/go/iam/apiv1", "description": "IAM Meta API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam/apiv2": { + "api_shortname": "iam", "distribution_name": "cloud.google.com/go/iam/apiv2", "description": "Identity and Access Management (IAM) API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam/credentials/apiv1": { + "api_shortname": "iamcredentials", "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iap/apiv1": { + "api_shortname": "iap", "distribution_name": "cloud.google.com/go/iap/apiv1", "description": "Cloud Identity-Aware Proxy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/ids/apiv1": { + "api_shortname": "ids", "distribution_name": "cloud.google.com/go/ids/apiv1", "description": "Cloud IDS API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { + "api_shortname": "cloudiot", "distribution_name": "cloud.google.com/go/iot/apiv1", "description": "Cloud IoT API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/kms/apiv1": { + "api_shortname": "cloudkms", "distribution_name": "cloud.google.com/go/kms/apiv1", "description": "Cloud Key Management Service (KMS) API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/kms/inventory/apiv1": { + "api_shortname": "kmsinventory", + "distribution_name": "cloud.google.com/go/kms/inventory/apiv1", + "description": "KMS Inventory API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/inventory/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1": { + "api_shortname": "language", "distribution_name": "cloud.google.com/go/language/apiv1", "description": "Cloud Natural Language API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1beta2": { + "api_shortname": "language", "distribution_name": "cloud.google.com/go/language/apiv1beta2", "description": "Cloud Natural Language API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/language/apiv2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv2", + "description": "Cloud Natural Language API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/lifesciences/apiv2beta": { + "api_shortname": "lifesciences", "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", "description": "Cloud Life Sciences API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/logging": { + "api_shortname": "logging", "distribution_name": "cloud.google.com/go/logging", "description": "Cloud Logging API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/logging/apiv2": { + "api_shortname": "logging", "distribution_name": "cloud.google.com/go/logging/apiv2", "description": "Cloud Logging API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/longrunning/autogen": { + "api_shortname": "longrunning", "distribution_name": "cloud.google.com/go/longrunning/autogen", "description": "Long Running Operations API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/managedidentities/apiv1": { + "api_shortname": "managedidentities", "distribution_name": "cloud.google.com/go/managedidentities/apiv1", "description": "Managed Service for Microsoft Active Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/addressvalidation/apiv1": { + "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", "description": "Address Validation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { + "api_shortname": "mapsplatformdatasets", "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", "description": "Maps Platform Datasets API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/places/apiv1": { + "api_shortname": "places", + "distribution_name": "cloud.google.com/go/maps/places/apiv1", + "description": "Places API (New)", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routing/apiv2": { + "api_shortname": "routes", "distribution_name": "cloud.google.com/go/maps/routing/apiv2", "description": "Routes API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { + "api_shortname": "mediatranslation", "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", "description": "Media Translation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1": { + "api_shortname": "memcache", "distribution_name": "cloud.google.com/go/memcache/apiv1", "description": "Cloud Memorystore for Memcached API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1beta2": { + "api_shortname": "memcache", "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1alpha": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1beta": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1beta", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/migrationcenter/apiv1": { + "api_shortname": "migrationcenter", + "distribution_name": "cloud.google.com/go/migrationcenter/apiv1", + "description": "Migration Center API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/dashboard/apiv1": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/metricsscope/apiv1": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/netapp/apiv1": { + "api_shortname": "netapp", + "distribution_name": "cloud.google.com/go/netapp/apiv1", + "description": "NetApp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { + "api_shortname": "networkconnectivity", "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", "description": "Network Connectivity API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1alpha1": { + "api_shortname": "networkconnectivity", "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", "description": "Network Connectivity API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkmanagement/apiv1": { + "api_shortname": "networkmanagement", "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", "description": "Network Management API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networksecurity/apiv1beta1": { + "api_shortname": "networksecurity", "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", "description": "Network Security API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1": { + "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", "description": "Notebooks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { + "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", "description": "Notebooks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv2": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv2", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/optimization/apiv1": { + "api_shortname": "cloudoptimization", "distribution_name": "cloud.google.com/go/optimization/apiv1", "description": "Cloud Optimization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { + "api_shortname": "composer", "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", "description": "Cloud Composer API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orgpolicy/apiv2": { + "api_shortname": "orgpolicy", "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", "description": "Organization Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1alpha": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1beta": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1": { + "api_shortname": "oslogin", "distribution_name": "cloud.google.com/go/oslogin/apiv1", "description": "Cloud OS Login API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1beta": { + "api_shortname": "oslogin", "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", "description": "Cloud OS Login API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/phishingprotection/apiv1beta1": { + "api_shortname": "phishingprotection", "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", "description": "Phishing Protection API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policysimulator/apiv1": { + "api_shortname": "policysimulator", + "distribution_name": "cloud.google.com/go/policysimulator/apiv1", + "description": "Policy Simulator API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { + "api_shortname": "policytroubleshooter", "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", "description": "Policy Troubleshooter API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policytroubleshooter/iam/apiv3": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", + "description": "Policy Troubleshooter API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/privatecatalog/apiv1beta1": { + "api_shortname": "cloudprivatecatalog", "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", "description": "Cloud Private Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/profiler": { + "api_shortname": "cloudprofiler", "distribution_name": "cloud.google.com/go/profiler", "description": "Cloud Profiler", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", + "release_level": "stable", "library_type": "AGENT" }, "cloud.google.com/go/pubsub": { + "api_shortname": "pubsub", "distribution_name": "cloud.google.com/go/pubsub", "description": "Cloud PubSub", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsub/apiv1": { + "api_shortname": "pubsub", "distribution_name": "cloud.google.com/go/pubsub/apiv1", "description": "Cloud Pub/Sub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/pubsublite": { + "api_shortname": "pubsublite", "distribution_name": "cloud.google.com/go/pubsublite", "description": "Cloud PubSub Lite", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { + "api_shortname": "pubsublite", "distribution_name": "cloud.google.com/go/pubsublite/apiv1", "description": "Pub/Sub Lite API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/rapidmigrationassessment/apiv1": { + "api_shortname": "rapidmigrationassessment", + "distribution_name": "cloud.google.com/go/rapidmigrationassessment/apiv1", + "description": "Rapid Migration Assessment API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/rapidmigrationassessment/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "api_shortname": "recaptchaenterprise", "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "api_shortname": "recaptchaenterprise", "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { + "api_shortname": "recommendationengine", "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", "description": "Recommendations AI", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1": { + "api_shortname": "recommender", "distribution_name": "cloud.google.com/go/recommender/apiv1", "description": "Recommender API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1beta1": { + "api_shortname": "recommender", "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", "description": "Recommender API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1": { + "api_shortname": "redis", "distribution_name": "cloud.google.com/go/redis/apiv1", "description": "Google Cloud Memorystore for Redis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1beta1": { + "api_shortname": "redis", "distribution_name": "cloud.google.com/go/redis/apiv1beta1", "description": "Google Cloud Memorystore for Redis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { + "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", "description": "Cloud Resource Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv3": { + "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", "description": "Cloud Resource Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcesettings/apiv1": { + "api_shortname": "resourcesettings", "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", "description": "Resource Settings API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2alpha": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2beta": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/rpcreplay": { + "api_shortname": "rpcreplay", "distribution_name": "cloud.google.com/go/rpcreplay", "description": "RPC Replay", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", + "release_level": "stable", "library_type": "OTHER" }, "cloud.google.com/go/run/apiv2": { + "api_shortname": "run", "distribution_name": "cloud.google.com/go/run/apiv2", "description": "Cloud Run Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1": { + "api_shortname": "cloudscheduler", "distribution_name": "cloud.google.com/go/scheduler/apiv1", "description": "Cloud Scheduler API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1beta1": { + "api_shortname": "cloudscheduler", "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", "description": "Cloud Scheduler API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/secretmanager/apiv1": { + "api_shortname": "secretmanager", "distribution_name": "cloud.google.com/go/secretmanager/apiv1", "description": "Secret Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1": { + "api_shortname": "privateca", "distribution_name": "cloud.google.com/go/security/privateca/apiv1", "description": "Certificate Authority API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/security/privateca/apiv1beta1": { - "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", - "description": "Certificate Authority API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/publicca/apiv1beta1": { + "api_shortname": "publicca", "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", "description": "Public Certificate Authority API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", "description": "Cloud Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicecontrol/apiv1": { + "api_shortname": "servicecontrol", "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", "description": "Service Control API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1": { + "api_shortname": "servicedirectory", "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", "description": "Service Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { + "api_shortname": "servicedirectory", "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", "description": "Service Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { + "api_shortname": "servicemanagement", "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", "description": "Service Management API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/serviceusage/apiv1": { + "api_shortname": "serviceusage", "distribution_name": "cloud.google.com/go/serviceusage/apiv1", "description": "Service Usage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/shell/apiv1": { + "api_shortname": "cloudshell", "distribution_name": "cloud.google.com/go/shell/apiv1", "description": "Cloud Shell API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", "description": "Cloud Spanner", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/spanner/admin/database/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", "description": "Cloud Spanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/admin/instance/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", "description": "Cloud Spanner Instance Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/apiv1", "description": "Cloud Spanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv1", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1p1beta1": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv2": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv2", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage": { + "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage", "description": "Cloud Storage (GCS)", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/storage/internal/apiv2": { + "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage/internal/apiv2", "description": "Cloud Storage API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/storageinsights/apiv1": { + "api_shortname": "storageinsights", + "distribution_name": "cloud.google.com/go/storageinsights/apiv1", + "description": "Storage Insights API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storageinsights/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storagetransfer/apiv1": { + "api_shortname": "storagetransfer", "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", "description": "Storage Transfer API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/support/apiv2": { + "api_shortname": "cloudsupport", + "distribution_name": "cloud.google.com/go/support/apiv2", + "description": "Google Cloud Support API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/support/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4": { + "api_shortname": "jobs", "distribution_name": "cloud.google.com/go/talent/apiv4", "description": "Cloud Talent Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4beta1": { + "api_shortname": "jobs", "distribution_name": "cloud.google.com/go/talent/apiv4beta1", "description": "Cloud Talent Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/texttospeech/apiv1": { + "api_shortname": "texttospeech", "distribution_name": "cloud.google.com/go/texttospeech/apiv1", "description": "Cloud Text-to-Speech API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/tpu/apiv1": { + "api_shortname": "tpu", "distribution_name": "cloud.google.com/go/tpu/apiv1", "description": "Cloud TPU API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv1": { + "api_shortname": "cloudtrace", "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv2": { + "api_shortname": "cloudtrace", "distribution_name": "cloud.google.com/go/trace/apiv2", "description": "Stackdriver Trace API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/translate/apiv3": { + "api_shortname": "translate", "distribution_name": "cloud.google.com/go/translate/apiv3", "description": "Cloud Translation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/livestream/apiv1": { + "api_shortname": "livestream", "distribution_name": "cloud.google.com/go/video/livestream/apiv1", "description": "Live Stream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/stitcher/apiv1": { + "api_shortname": "videostitcher", "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", "description": "Video Stitcher API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/stitcher/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { + "api_shortname": "transcoder", "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", "description": "Transcoder API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1", "description": "Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1beta2": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", "description": "Google Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", "description": "Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vision/v2/apiv1": { + "api_shortname": "vision", "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "api_shortname": "vision", "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { + "api_shortname": "vmmigration", "distribution_name": "cloud.google.com/go/vmmigration/apiv1", "description": "VM Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmwareengine/apiv1": { + "api_shortname": "vmwareengine", "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", "description": "VMware Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { + "api_shortname": "vpcaccess", "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", "description": "Serverless VPC Access API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1": { + "api_shortname": "webrisk", "distribution_name": "cloud.google.com/go/webrisk/apiv1", "description": "Web Risk API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1beta1": { + "api_shortname": "webrisk", "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", "description": "Web Risk API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/websecurityscanner/apiv1": { + "api_shortname": "websecurityscanner", "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", "description": "Web Security Scanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1": { + "api_shortname": "workflows", "distribution_name": "cloud.google.com/go/workflows/apiv1", "description": "Workflows API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { + "api_shortname": "workflows", "distribution_name": "cloud.google.com/go/workflows/apiv1beta", "description": "Workflows API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1": { + "api_shortname": "workflowexecutions", "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", "description": "Workflow Executions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1beta": { + "api_shortname": "workflowexecutions", "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", "description": "Workflow Executions API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workstations/apiv1": { + "api_shortname": "workstations", + "distribution_name": "cloud.google.com/go/workstations/apiv1", + "description": "Cloud Workstations API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workstations/apiv1beta": { + "api_shortname": "workstations", + "distribution_name": "cloud.google.com/go/workstations/apiv1beta", + "description": "Cloud Workstations API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" } } diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/README.md temporal-1.22.5/src/vendor/cloud.google.com/go/internal/README.md --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/README.md 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/internal/README.md 2024-02-23 09:46:08.000000000 +0000 @@ -17,27 +17,13 @@ list of packages and use the `.repo-metadata.json` files to get the additional metadata required. For now, `.repo-metadata-full.json` includes everything. -## cloudbuild.yaml - -To kick off a build locally run from the repo root: - -```bash -gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml -``` - ### Updating OwlBot SHA -You may want to manually update the which version of the post processor will be -used -- to do this you need to update the SHA in the OwlBot lock file. Start by -running the following commands: - -```bash -docker pull gcr.io/cloud-devrel-public-resources/owlbot-go:latest -docker inspect --format='{{index .RepoDigests 0}}' gcr.io/cloud-devrel-public-resources/owlbot-go:latest -``` +You may want to manually update the which version of the post-processor will be +used -- to do this you need to update the SHA in the OwlBot lock file. -This will give you a SHA. You can use this value to update the value in -`.github/.OwlBot.lock.yaml`. +See the [postprocessor/README](postprocessor/README.md) for detailed +instructions. *Note*: OwlBot will eventually open a pull request to update this value if it discovers a new version of the container. diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/cloudbuild.yaml temporal-1.22.5/src/vendor/cloud.google.com/go/internal/cloudbuild.yaml --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/cloudbuild.yaml 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/internal/cloudbuild.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# note: /workspace is a special directory in the docker image where all the files in this folder -# get placed on your behalf - -timeout: 7200s # 2 hours -steps: -- name: gcr.io/cloud-builders/docker - args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.'] - dir: internal - -images: -- gcr.io/cloud-devrel-public-resources/owlbot-go:latest diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/retry.go temporal-1.22.5/src/vendor/cloud.google.com/go/internal/retry.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/internal/retry.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/internal/retry.go 2024-02-23 09:46:08.000000000 +0000 @@ -20,7 +20,6 @@ "time" gax "github.com/googleapis/gax-go/v2" - "google.golang.org/grpc/status" ) // Retry calls the supplied function f repeatedly according to the provided @@ -75,11 +74,3 @@ func (e wrappedCallErr) Is(err error) bool { return e.ctxErr == err || e.wrappedErr == err } - -// GRPCStatus allows the wrapped error to be used with status.FromError. -func (e wrappedCallErr) GRPCStatus() *status.Status { - if s, ok := status.FromError(e.wrappedErr); ok { - return s - } - return nil -} diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/CHANGES.md temporal-1.22.5/src/vendor/cloud.google.com/go/storage/CHANGES.md --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/CHANGES.md 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/CHANGES.md 2024-02-23 09:46:08.000000000 +0000 @@ -1,6 +1,29 @@ # Changes +## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21) + + +### Bug Fixes + +* **storage:** Retract versions with Copier bug ([#7583](https://github.com/googleapis/google-cloud-go/issues/7583)) ([9c10b6f](https://github.com/googleapis/google-cloud-go/commit/9c10b6f8a54cb8447260148b5e4a9b5160281020)) + * Versions v1.25.0-v1.27.0 are retracted due to [#6857](https://github.com/googleapis/google-cloud-go/issues/6857). +* **storage:** SignedURL v4 allows headers with colons in value ([#7603](https://github.com/googleapis/google-cloud-go/issues/7603)) ([6b50f9b](https://github.com/googleapis/google-cloud-go/commit/6b50f9b368f5b271ade1706c342865cef46712e6)) + +## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.29.0...storage/v1.30.0) (2023-03-15) + + +### Features + +* **storage/internal:** Update routing annotation for CreateBucketRequest docs: Add support for end-to-end checksumming in the gRPC WriteObject flow feat!: BREAKING CHANGE - renaming Notification to NotificationConfig ([2fef56f](https://github.com/googleapis/google-cloud-go/commit/2fef56f75a63dc4ff6e0eea56c7b26d4831c8e27)) +* **storage:** Json downloads ([#7158](https://github.com/googleapis/google-cloud-go/issues/7158)) ([574a86c](https://github.com/googleapis/google-cloud-go/commit/574a86c614445f8c3f5a54446820df774c31cd47)) +* **storage:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + + +### Bug Fixes + +* **storage:** Specify credentials with STORAGE_EMULATOR_HOST ([#7271](https://github.com/googleapis/google-cloud-go/issues/7271)) ([940ae15](https://github.com/googleapis/google-cloud-go/commit/940ae15f725ff384e345e627feb03d22e1fd8db5)) + ## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/client.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/client.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/client.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/client.go 2024-02-23 09:46:08.000000000 +0000 @@ -19,9 +19,9 @@ "io" "time" + "cloud.google.com/go/iam/apiv1/iampb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" - iampb "google.golang.org/genproto/googleapis/iam/v1" ) // TODO(noahdietz): Move existing factory methods to this file. diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/doc.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/doc.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/doc.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/doc.go 2024-02-23 09:46:08.000000000 +0000 @@ -36,6 +36,9 @@ reused instead of created as needed. The methods of [Client] are safe for concurrent use by multiple goroutines. +You may configure the client by passing in options from the [google.golang.org/api/option] +package. You may also use options defined in this package, such as [WithJSONReads]. + If you only wish to access public data, you can create an unauthenticated client with diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/grpc_client.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/grpc_client.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/grpc_client.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/grpc_client.go 2024-02-23 09:46:08.000000000 +0000 @@ -17,11 +17,13 @@ import ( "context" "encoding/base64" + "errors" "fmt" "io" "net/url" "os" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" @@ -29,7 +31,6 @@ "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -110,6 +111,11 @@ s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + config := newStorageConfig(s.clientOption...) + if config.readAPIWasSet { + return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") + } + g, err := gapic.NewClient(ctx, s.clientOption...) if err != nil { return nil, err @@ -855,13 +861,6 @@ ctx = setUserProjectMetadata(ctx, s.userProject) } - // A negative length means "read to the end of the object", but the - // read_limit field it corresponds to uses zero to mean the same thing. Thus - // we coerce the length to 0 to read to the end of the object. - if params.length < 0 { - params.length = 0 - } - b := bucketResourceName(globalProjectAlias, params.bucket) req := &storagepb.ReadObjectRequest{ Bucket: b, @@ -884,13 +883,20 @@ cc, cancel := context.WithCancel(ctx) - start := params.offset + seen + req.ReadOffset = params.offset + seen + + // A negative length means "read to the end of the object", but the + // read_limit field it corresponds to uses zero to mean the same thing. Thus + // we coerce the length to 0 to read to the end of the object. + if params.length < 0 { + params.length = 0 + } + // Only set a ReadLimit if length is greater than zero, because zero // means read it all. if params.length > 0 { req.ReadLimit = params.length - seen } - req.ReadOffset = start if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { cancel() @@ -963,7 +969,7 @@ cr := msg.GetContentRange() if cr != nil { r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() + 1 + r.remain = cr.GetEnd() - cr.GetStart() } else { r.remain = size } @@ -1254,12 +1260,12 @@ if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - req := &storagepb.ListNotificationsRequest{ + req := &storagepb.ListNotificationConfigsRequest{ Parent: bucketResourceName(globalProjectAlias, bucket), } - var notifications []*storagepb.Notification + var notifications []*storagepb.NotificationConfig err = run(ctx, func() error { - gitr := c.raw.ListNotifications(ctx, req, s.gax...) + gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) for { // PageSize is not set and fallbacks to the API default pageSize of 100. items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) @@ -1286,14 +1292,14 @@ defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Notification: toProtoNotification(n), + req := &storagepb.CreateNotificationConfigRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + NotificationConfig: toProtoNotification(n), } - var pbn *storagepb.Notification + var pbn *storagepb.NotificationConfig err = run(ctx, func() error { var err error - pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) + pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) return err }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) if err != nil { @@ -1307,9 +1313,9 @@ defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationRequest{Name: id} + req := &storagepb.DeleteNotificationConfigRequest{Name: id} return run(ctx, func() error { - return c.raw.DeleteNotification(ctx, req, s.gax...) + return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) } diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/http_client.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/http_client.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/http_client.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/http_client.go 2024-02-23 09:46:08.000000000 +0000 @@ -29,6 +29,7 @@ "strings" "time" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" "golang.org/x/oauth2/google" @@ -39,7 +40,6 @@ raw "google.golang.org/api/storage/v1" "google.golang.org/api/transport" htransport "google.golang.org/api/transport/http" - iampb "google.golang.org/genproto/googleapis/iam/v1" ) // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic @@ -53,6 +53,7 @@ raw *raw.Service scheme string settings *settings + config *storageConfig } // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON @@ -62,6 +63,7 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) o := s.clientOption + config := newStorageConfig(o...) var creds *google.Credentials // In general, it is recommended to use raw.NewService instead of htransport.NewClient @@ -134,6 +136,7 @@ raw: rawService, scheme: u.Scheme, settings: s, + config: &config, }, nil } @@ -779,6 +782,13 @@ s := callSettings(c.settings, opts...) + if c.config.useJSONforReads { + return c.newRangeReaderJSON(ctx, params, s) + } + return c.newRangeReaderXML(ctx, params, s) +} + +func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { u := &url.URL{ Scheme: c.scheme, Host: c.readHost, @@ -793,186 +803,51 @@ return nil, err } req = req.WithContext(ctx) + if s.userProject != "" { req.Header.Set("X-Goog-User-Project", s.userProject) } - if params.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { + + if err := setRangeReaderHeaders(req.Header, params); err != nil { return nil, err } - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - // If the context has already expired, return immediately without making a - // call. - if err := ctx.Err(); err != nil { - return nil, err - } - start := params.offset + seen - if params.length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if params.length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if params.length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - if err := setConditionsHeaders(req.Header, params.conds); err != nil { - return nil, err - } - // If an object generation is specified, include generation as query string parameters. - if params.gen >= 0 { - req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) - } - - var res *http.Response - err = run(ctx, func() error { - res, err = c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && params.length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - params.gen = gen64 - } - return nil - }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) - if err != nil { - return nil, err - } - return res, nil - } + reopen := readerReopen(ctx, req.Header, params, s, + func() (*http.Response, error) { return c.hc.Do(req) }, + func() error { return setConditionsHeaders(req.Header, params.conds) }, + func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) res, err := reopen(0) if err != nil { return nil, err } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - // Content range is formatted -/. We take - // the total size. - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } + return parseReadResponse(res, params, reopen) +} - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } +func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { + call := c.raw.Objects.Get(params.bucket, params.object) - remain := res.ContentLength - body := res.Body - if params.length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } + setClientHeader(call.Header()) + call.Context(ctx) + call.Projection("full") + + if s.userProject != "" { + call.UserProject(s.userProject) } - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } + if err := setRangeReaderHeaders(call.Header(), params); err != nil { + return nil, err } - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: params.gen, - Metageneration: metaGen, + reopen := readerReopen(ctx, call.Header(), params, s, func() (*http.Response, error) { return call.Download() }, + func() error { return applyConds("NewReader", params.gen, params.conds, call) }, + func() { call.Generation(params.gen) }) + + res, err := reopen(0) + if err != nil { + return nil, err } - return &Reader{ - Attrs: attrs, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reader: &httpReader{ - reopen: reopen, - body: body, - }, - }, nil + return parseReadResponse(res, params, reopen) } func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { @@ -1349,3 +1224,195 @@ func (r *httpReader) Close() error { return r.body.Close() } + +func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { + if params.readCompressed { + h.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(h, params.encryptionKey, false); err != nil { + return err + } + return nil +} + +// readerReopen initiates a Read with offset and length, assuming we +// have already read seen bytes. +func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, + doDownload func() (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { + return func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := applyConditions(); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + setGeneration() + } + + var err error + var res *http.Response + err = run(ctx, func() error { + res, err = doDownload() + if err != nil { + var e *googleapi.Error + if errors.As(err, &e) { + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err + } + + if res.StatusCode == http.StatusNotFound { + // this check is necessary only for XML + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + if err != nil { + return nil, err + } + return res, nil + } +} + +func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (*Reader, error) { + var err error + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + }, + }, nil +} diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/iam.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/iam.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/iam.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/iam.go 2024-02-23 09:46:08.000000000 +0000 @@ -18,9 +18,9 @@ "context" "cloud.google.com/go/iam" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" raw "google.golang.org/api/storage/v1" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/genproto/googleapis/type/expr" ) diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json 2024-02-23 09:46:08.000000000 +0000 @@ -30,9 +30,9 @@ "CreateHmacKey" ] }, - "CreateNotification": { + "CreateNotificationConfig": { "methods": [ - "CreateNotification" + "CreateNotificationConfig" ] }, "DeleteBucket": { @@ -45,9 +45,9 @@ "DeleteHmacKey" ] }, - "DeleteNotification": { + "DeleteNotificationConfig": { "methods": [ - "DeleteNotification" + "DeleteNotificationConfig" ] }, "DeleteObject": { @@ -70,9 +70,9 @@ "GetIamPolicy" ] }, - "GetNotification": { + "GetNotificationConfig": { "methods": [ - "GetNotification" + "GetNotificationConfig" ] }, "GetObject": { @@ -95,9 +95,9 @@ "ListHmacKeys" ] }, - "ListNotifications": { + "ListNotificationConfigs": { "methods": [ - "ListNotifications" + "ListNotificationConfigs" ] }, "ListObjects": { diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go 2024-02-23 09:46:08.000000000 +0000 @@ -24,13 +24,13 @@ "regexp" "strings" + iampb "cloud.google.com/go/iam/apiv1/iampb" storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" @@ -49,10 +49,10 @@ SetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption UpdateBucket []gax.CallOption - DeleteNotification []gax.CallOption - GetNotification []gax.CallOption - CreateNotification []gax.CallOption - ListNotifications []gax.CallOption + DeleteNotificationConfig []gax.CallOption + GetNotificationConfig []gax.CallOption + CreateNotificationConfig []gax.CallOption + ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption CancelResumableWrite []gax.CallOption @@ -95,10 +95,10 @@ SetIamPolicy: []gax.CallOption{}, TestIamPermissions: []gax.CallOption{}, UpdateBucket: []gax.CallOption{}, - DeleteNotification: []gax.CallOption{}, - GetNotification: []gax.CallOption{}, - CreateNotification: []gax.CallOption{}, - ListNotifications: []gax.CallOption{}, + DeleteNotificationConfig: []gax.CallOption{}, + GetNotificationConfig: []gax.CallOption{}, + CreateNotificationConfig: []gax.CallOption{}, + ListNotificationConfigs: []gax.CallOption{}, ComposeObject: []gax.CallOption{}, DeleteObject: []gax.CallOption{}, CancelResumableWrite: []gax.CallOption{}, @@ -133,10 +133,10 @@ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotification(context.Context, *storagepb.DeleteNotificationRequest, ...gax.CallOption) error - GetNotification(context.Context, *storagepb.GetNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - CreateNotification(context.Context, *storagepb.CreateNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator + DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error + GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) @@ -267,27 +267,27 @@ return c.internalClient.UpdateBucket(ctx, req, opts...) } -// DeleteNotification permanently deletes a notification subscription. -func (c *Client) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotification(ctx, req, opts...) +// DeleteNotificationConfig permanently deletes a NotificationConfig. +func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) } -// GetNotification view a notification config. -func (c *Client) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.GetNotification(ctx, req, opts...) +// GetNotificationConfig view a NotificationConfig. +func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.GetNotificationConfig(ctx, req, opts...) } -// CreateNotification creates a notification subscription for a given bucket. -// These notifications, when triggered, publish messages to the specified -// Pub/Sub topics. -// See https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.CreateNotification(ctx, req, opts...) +// CreateNotificationConfig creates a NotificationConfig for a given bucket. +// These NotificationConfigs, when triggered, publish messages to the +// specified Pub/Sub topics. See +// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). +func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.CreateNotificationConfig(ctx, req, opts...) } -// ListNotifications retrieves a list of notification subscriptions for a given bucket. -func (c *Client) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { - return c.internalClient.ListNotifications(ctx, req, opts...) +// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. +func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { + return c.internalClient.ListNotificationConfigs(ctx, req, opts...) } // ComposeObject concatenates a list of existing objects into a new object in the same @@ -365,8 +365,9 @@ // returned persisted_size; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the -// persisted offset. This behavior can make client-side handling simpler -// in some cases. +// persisted offset. Even though the data isn’t written, it may still +// incur a performance cost over resuming at the correct write offset. +// This behavior can make client-side handling simpler in some cases. // // The service will not view the object as complete until the client has // sent a WriteObjectRequest with finish_write set to true. Sending any @@ -603,6 +604,9 @@ if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1]) + } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } @@ -816,7 +820,7 @@ return resp, nil } -func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { +func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { @@ -829,16 +833,16 @@ md := metadata.Pairs("x-goog-request-params", routingHeaders) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) + opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteNotification(ctx, req, settings.GRPC...) + _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) return err } -func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { +func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { @@ -851,11 +855,11 @@ md := metadata.Pairs("x-goog-request-params", routingHeaders) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) - var resp *storagepb.Notification + opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetNotification(ctx, req, settings.GRPC...) + resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -864,7 +868,7 @@ return resp, nil } -func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { +func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { @@ -877,11 +881,11 @@ md := metadata.Pairs("x-goog-request-params", routingHeaders) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) - var resp *storagepb.Notification + opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateNotification(ctx, req, settings.GRPC...) + resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -890,7 +894,7 @@ return resp, nil } -func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { +func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { @@ -903,11 +907,11 @@ md := metadata.Pairs("x-goog-request-params", routingHeaders) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) - it := &NotificationIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Notification, string, error) { - resp := &storagepb.ListNotificationsResponse{} + opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) + it := &NotificationConfigIterator{} + req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { + resp := &storagepb.ListNotificationConfigsResponse{} if pageToken != "" { req.PageToken = pageToken } @@ -918,7 +922,7 @@ } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.ListNotifications(ctx, req, settings.GRPC...) + resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -926,7 +930,7 @@ } it.Response = resp - return resp.GetNotifications(), resp.GetNextPageToken(), nil + return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -1520,9 +1524,9 @@ return b } -// NotificationIterator manages a stream of *storagepb.Notification. -type NotificationIterator struct { - items []*storagepb.Notification +// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. +type NotificationConfigIterator struct { + items []*storagepb.NotificationConfig pageInfo *iterator.PageInfo nextFunc func() error @@ -1537,18 +1541,18 @@ // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Notification, nextPageToken string, err error) + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationIterator) PageInfo() *iterator.PageInfo { +func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationIterator) Next() (*storagepb.Notification, error) { - var item *storagepb.Notification +func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { + var item *storagepb.NotificationConfig if err := it.nextFunc(); err != nil { return item, err } @@ -1557,11 +1561,11 @@ return item, nil } -func (it *NotificationIterator) bufLen() int { +func (it *NotificationConfigIterator) bufLen() int { return len(it.items) } -func (it *NotificationIterator) takeBuf() interface{} { +func (it *NotificationConfigIterator) takeBuf() interface{} { b := it.items it.items = nil return b diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go 2024-02-23 09:46:08.000000000 +0000 @@ -25,8 +25,8 @@ reflect "reflect" sync "sync" + iampb "cloud.google.com/go/iam/apiv1/iampb" _ "google.golang.org/genproto/googleapis/api/annotations" - v1 "google.golang.org/genproto/googleapis/iam/v1" date "google.golang.org/genproto/googleapis/type/date" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -742,18 +742,18 @@ return nil } -// Request message for DeleteNotification. -type DeleteNotificationRequest struct { +// Request message for DeleteNotificationConfig. +type DeleteNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The parent bucket of the notification. + // Required. The parent bucket of the NotificationConfig. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *DeleteNotificationRequest) Reset() { - *x = DeleteNotificationRequest{} +func (x *DeleteNotificationConfigRequest) Reset() { + *x = DeleteNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -761,13 +761,13 @@ } } -func (x *DeleteNotificationRequest) String() string { +func (x *DeleteNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteNotificationRequest) ProtoMessage() {} +func (*DeleteNotificationConfigRequest) ProtoMessage() {} -func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -779,32 +779,32 @@ return mi.MessageOf(x) } -// Deprecated: Use DeleteNotificationRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} } -func (x *DeleteNotificationRequest) GetName() string { +func (x *DeleteNotificationConfigRequest) GetName() string { if x != nil { return x.Name } return "" } -// Request message for GetNotification. -type GetNotificationRequest struct { +// Request message for GetNotificationConfig. +type GetNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The parent bucket of the notification. + // Required. The parent bucket of the NotificationConfig. // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *GetNotificationRequest) Reset() { - *x = GetNotificationRequest{} +func (x *GetNotificationConfigRequest) Reset() { + *x = GetNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -812,13 +812,13 @@ } } -func (x *GetNotificationRequest) String() string { +func (x *GetNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetNotificationRequest) ProtoMessage() {} +func (*GetNotificationConfigRequest) ProtoMessage() {} -func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -830,32 +830,32 @@ return mi.MessageOf(x) } -// Deprecated: Use GetNotificationRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} } -func (x *GetNotificationRequest) GetName() string { +func (x *GetNotificationConfigRequest) GetName() string { if x != nil { return x.Name } return "" } -// Request message for CreateNotification. -type CreateNotificationRequest struct { +// Request message for CreateNotificationConfig. +type CreateNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The bucket to which this notification belongs. + // Required. The bucket to which this NotificationConfig belongs. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the notification to be inserted. - Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` + // Required. Properties of the NotificationConfig to be inserted. + NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` } -func (x *CreateNotificationRequest) Reset() { - *x = CreateNotificationRequest{} +func (x *CreateNotificationConfigRequest) Reset() { + *x = CreateNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -863,13 +863,13 @@ } } -func (x *CreateNotificationRequest) String() string { +func (x *CreateNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateNotificationRequest) ProtoMessage() {} +func (*CreateNotificationConfigRequest) ProtoMessage() {} -func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -881,48 +881,47 @@ return mi.MessageOf(x) } -// Deprecated: Use CreateNotificationRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} } -func (x *CreateNotificationRequest) GetParent() string { +func (x *CreateNotificationConfigRequest) GetParent() string { if x != nil { return x.Parent } return "" } -func (x *CreateNotificationRequest) GetNotification() *Notification { +func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { if x != nil { - return x.Notification + return x.NotificationConfig } return nil } // Request message for ListNotifications. -type ListNotificationsRequest struct { +type ListNotificationConfigsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. Name of a Google Cloud Storage bucket. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of notifications to return. The service may return fewer - // than this value. - // The default value is 100. Specifying a value above 100 will result in a - // page_size of 100. + // The maximum number of NotificationConfigs to return. The service may + // return fewer than this value. The default value is 100. Specifying a value + // above 100 will result in a page_size of 100. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotifications` call. + // A page token, received from a previous `ListNotificationConfigs` call. // Provide this to retrieve the subsequent page. // - // When paginating, all other parameters provided to `ListNotifications` must - // match the call that provided the page token. + // When paginating, all other parameters provided to `ListNotificationConfigs` + // must match the call that provided the page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` } -func (x *ListNotificationsRequest) Reset() { - *x = ListNotificationsRequest{} +func (x *ListNotificationConfigsRequest) Reset() { + *x = ListNotificationConfigsRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -930,13 +929,13 @@ } } -func (x *ListNotificationsRequest) String() string { +func (x *ListNotificationConfigsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNotificationsRequest) ProtoMessage() {} +func (*ListNotificationConfigsRequest) ProtoMessage() {} -func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { +func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -948,47 +947,47 @@ return mi.MessageOf(x) } -// Deprecated: Use ListNotificationsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} } -func (x *ListNotificationsRequest) GetParent() string { +func (x *ListNotificationConfigsRequest) GetParent() string { if x != nil { return x.Parent } return "" } -func (x *ListNotificationsRequest) GetPageSize() int32 { +func (x *ListNotificationConfigsRequest) GetPageSize() int32 { if x != nil { return x.PageSize } return 0 } -func (x *ListNotificationsRequest) GetPageToken() string { +func (x *ListNotificationConfigsRequest) GetPageToken() string { if x != nil { return x.PageToken } return "" } -// The result of a call to Notifications.ListNotifications -type ListNotificationsResponse struct { +// The result of a call to ListNotificationConfigs +type ListNotificationConfigsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The list of items. - Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` + NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` // A token, which can be sent as `page_token` to retrieve the next page. // If this field is omitted, there are no subsequent pages. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } -func (x *ListNotificationsResponse) Reset() { - *x = ListNotificationsResponse{} +func (x *ListNotificationConfigsResponse) Reset() { + *x = ListNotificationConfigsResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -996,13 +995,13 @@ } } -func (x *ListNotificationsResponse) String() string { +func (x *ListNotificationConfigsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNotificationsResponse) ProtoMessage() {} +func (*ListNotificationConfigsResponse) ProtoMessage() {} -func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { +func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1014,19 +1013,19 @@ return mi.MessageOf(x) } -// Deprecated: Use ListNotificationsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} } -func (x *ListNotificationsResponse) GetNotifications() []*Notification { +func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { if x != nil { - return x.Notifications + return x.NotificationConfigs } return nil } -func (x *ListNotificationsResponse) GetNextPageToken() string { +func (x *ListNotificationConfigsResponse) GetNextPageToken() string { if x != nil { return x.NextPageToken } @@ -4432,39 +4431,39 @@ } // A directive to publish Pub/Sub notifications upon changes to a bucket. -type Notification struct { +type NotificationConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of this notification. + // Required. The resource name of this NotificationConfig. // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` // The `{project}` portion may be `_` for globally unique buckets. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The Pub/Sub topic to which this subscription publishes. Formatted // as: // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the Notification. - // If included in the metadata of GetNotificationRequest, the operation will - // only be performed if the etag matches that of the Notification. + // The etag of the NotificationConfig. + // If included in the metadata of GetNotificationConfigRequest, the operation + // will only be performed if the etag matches that of the NotificationConfig. Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If empty, - // sent notifications for all event types. + // If present, only send notifications about listed event types. If + // empty, sent notifications for all event types. EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` // A list of additional attributes to attach to each Pub/Sub - // message published for this notification subscription. + // message published for this NotificationConfig. CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this notification config to object names that + // If present, only apply this NotificationConfig to object names that // begin with this prefix. ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` // Required. The desired content of the Payload. PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` } -func (x *Notification) Reset() { - *x = Notification{} +func (x *NotificationConfig) Reset() { + *x = NotificationConfig{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4472,13 +4471,13 @@ } } -func (x *Notification) String() string { +func (x *NotificationConfig) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Notification) ProtoMessage() {} +func (*NotificationConfig) ProtoMessage() {} -func (x *Notification) ProtoReflect() protoreflect.Message { +func (x *NotificationConfig) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4490,54 +4489,54 @@ return mi.MessageOf(x) } -// Deprecated: Use Notification.ProtoReflect.Descriptor instead. -func (*Notification) Descriptor() ([]byte, []int) { +// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. +func (*NotificationConfig) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} } -func (x *Notification) GetName() string { +func (x *NotificationConfig) GetName() string { if x != nil { return x.Name } return "" } -func (x *Notification) GetTopic() string { +func (x *NotificationConfig) GetTopic() string { if x != nil { return x.Topic } return "" } -func (x *Notification) GetEtag() string { +func (x *NotificationConfig) GetEtag() string { if x != nil { return x.Etag } return "" } -func (x *Notification) GetEventTypes() []string { +func (x *NotificationConfig) GetEventTypes() []string { if x != nil { return x.EventTypes } return nil } -func (x *Notification) GetCustomAttributes() map[string]string { +func (x *NotificationConfig) GetCustomAttributes() map[string]string { if x != nil { return x.CustomAttributes } return nil } -func (x *Notification) GetObjectNamePrefix() string { +func (x *NotificationConfig) GetObjectNamePrefix() string { if x != nil { return x.ObjectNamePrefix } return "" } -func (x *Notification) GetPayloadFormat() string { +func (x *NotificationConfig) GetPayloadFormat() string { if x != nil { return x.PayloadFormat } @@ -6678,1491 +6677,1503 @@ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, + 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x59, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xbd, 0x01, 0x0a, 0x1f, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, + 0x0a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9b, 0x01, 0x0a, 0x1e, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, + 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, + 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, - 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, - 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, - 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, - 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, - 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, - 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, + 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, - 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, - 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, + 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, + 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, - 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, - 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, - 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, + 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, + 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, - 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, + 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, + 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, + 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, + 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, + 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, - 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, + 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, - 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, - 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, - 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, - 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, - 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, + 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, + 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, + 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, + 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, + 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, + 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, + 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, + 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, - 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, - 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, - 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, - 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, - 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, - 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, - 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, + 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, + 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, + 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, - 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, - 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, + 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, + 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, + 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, + 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, + 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, + 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, + 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, + 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, + 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, + 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, + 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, + 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, + 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, + 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, + 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, + 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, + 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, + 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, + 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, + 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, + 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, + 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, + 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, + 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, + 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, + 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, + 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, + 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, + 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, + 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, - 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, - 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, - 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, - 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, - 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, - 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, - 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, - 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, - 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, - 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, - 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, - 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, - 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, - 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, - 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, - 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, - 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, - 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, - 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, - 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, - 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, - 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, - 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, - 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, - 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, - 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, - 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, - 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, - 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, - 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, - 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, + 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, - 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, + 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, + 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, - 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, - 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, - 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, - 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, + 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, + 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, + 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, + 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, + 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, - 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, - 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, - 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, - 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, - 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, - 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, - 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, + 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, + 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, + 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, - 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, - 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, - 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, - 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, - 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, - 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, - 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, - 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, - 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, - 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, + 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, + 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, + 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, + 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, + 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, - 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, - 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, - 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, - 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, - 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, + 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, + 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, + 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, + 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, + 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, + 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, + 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, + 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, + 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, + 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, + 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, + 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, + 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, + 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, + 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, - 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, + 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, + 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, + 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, + 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, - 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, - 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, - 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, - 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, - 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, - 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, + 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, + 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, + 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, + 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, + 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, + 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, + 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, + 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, + 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, + 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, + 0x6c, 0x64, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, - 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, - 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, - 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, - 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaf, 0x25, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, - 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, + 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, + 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x26, + 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xab, + 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x22, 0x58, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, + 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, - 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, + 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, + 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, + 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, + 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, - 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, - 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, - 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, + 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, + 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xba, 0x01, 0x0a, 0x14, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, + 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, + 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, - 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, + 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, + 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x80, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, + 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, - 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, - 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, - 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa7, + 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, + 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, + 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, + 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, 0x0a, + 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8188,11 +8199,11 @@ (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationRequest)(nil), // 8: google.storage.v2.DeleteNotificationRequest - (*GetNotificationRequest)(nil), // 9: google.storage.v2.GetNotificationRequest - (*CreateNotificationRequest)(nil), // 10: google.storage.v2.CreateNotificationRequest - (*ListNotificationsRequest)(nil), // 11: google.storage.v2.ListNotificationsRequest - (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse + (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest + (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest + (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest + (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest + (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest @@ -8226,7 +8237,7 @@ (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata - (*Notification)(nil), // 46: google.storage.v2.Notification + (*NotificationConfig)(nil), // 46: google.storage.v2.NotificationConfig (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption (*Object)(nil), // 48: google.storage.v2.Object (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl @@ -8253,18 +8264,18 @@ (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 73: google.storage.v2.Notification.CustomAttributesEntry + nil, // 73: google.storage.v2.NotificationConfig.CustomAttributesEntry nil, // 74: google.storage.v2.Object.MetadataEntry (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp (*durationpb.Duration)(nil), // 77: google.protobuf.Duration (*date.Date)(nil), // 78: google.type.Date - (*v1.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest + (*iampb.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest (*emptypb.Empty)(nil), // 82: google.protobuf.Empty - (*v1.Policy)(nil), // 83: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse + (*iampb.Policy)(nil), // 83: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask @@ -8273,8 +8284,8 @@ 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification - 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification + 46, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig + 46, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams @@ -8331,7 +8342,7 @@ 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 73, // 64: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry + 73, // 64: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp @@ -8366,10 +8377,10 @@ 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 99: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest - 9, // 100: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest - 10, // 101: google.storage.v2.Storage.CreateNotification:input_type -> google.storage.v2.CreateNotificationRequest - 11, // 102: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest + 8, // 99: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 100: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 101: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 102: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest @@ -8396,10 +8407,10 @@ 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 82, // 129: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification - 46, // 131: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification - 12, // 132: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse + 82, // 129: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 46, // 130: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 46, // 131: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 132: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse @@ -8515,7 +8526,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNotificationRequest); i { + switch v := v.(*DeleteNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8527,7 +8538,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationRequest); i { + switch v := v.(*GetNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8539,7 +8550,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNotificationRequest); i { + switch v := v.(*CreateNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8551,7 +8562,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsRequest); i { + switch v := v.(*ListNotificationConfigsRequest); i { case 0: return &v.state case 1: @@ -8563,7 +8574,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsResponse); i { + switch v := v.(*ListNotificationConfigsResponse); i { case 0: return &v.state case 1: @@ -8971,7 +8982,7 @@ } } file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Notification); i { + switch v := v.(*NotificationConfig); i { case 0: return &v.state case 1: @@ -9361,31 +9372,31 @@ // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a notification config. - GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -9440,8 +9451,9 @@ // returned `persisted_size`; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9544,8 +9556,8 @@ return out, nil } -func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -9553,8 +9565,8 @@ return out, nil } -func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -9562,8 +9574,8 @@ return out, nil } -func (c *storageClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { - out := new(v1.TestIamPermissionsResponse) +func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { + out := new(iampb.TestIamPermissionsResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) if err != nil { return nil, err @@ -9580,36 +9592,36 @@ return out, nil } -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotification", in, out, opts...) +func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotification", in, out, opts...) +func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) { - out := new(ListNotificationsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotifications", in, out, opts...) +func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { + out := new(ListNotificationConfigsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) if err != nil { return nil, err } @@ -9833,31 +9845,31 @@ // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be // projects/_/buckets/ for a bucket or // projects/_/buckets//objects/ for an object. - TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) - // View a notification config. - GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) @@ -9912,8 +9924,9 @@ // returned `persisted_size`; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9982,29 +9995,29 @@ func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") } -func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") } -func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") } -func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") } func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") +func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") } -func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotification not implemented") +func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") } -func (*UnimplementedStorageServer) CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotification not implemented") +func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") } -func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotifications not implemented") +func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") } func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") @@ -10153,7 +10166,7 @@ } func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.GetIamPolicyRequest) + in := new(iampb.GetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -10165,13 +10178,13 @@ FullMethod: "/google.storage.v2.Storage/GetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + return srv.(StorageServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.SetIamPolicyRequest) + in := new(iampb.SetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -10183,13 +10196,13 @@ FullMethod: "/google.storage.v2.Storage/SetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + return srv.(StorageServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.TestIamPermissionsRequest) + in := new(iampb.TestIamPermissionsRequest) if err := dec(in); err != nil { return nil, err } @@ -10201,7 +10214,7 @@ FullMethod: "/google.storage.v2.Storage/TestIamPermissions", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + return srv.(StorageServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) } return interceptor(ctx, in, info, handler) } @@ -10224,74 +10237,74 @@ return interceptor(ctx, in, info, handler) } -func _Storage_DeleteNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationRequest) +func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).DeleteNotification(ctx, in) + return srv.(StorageServer).DeleteNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotification", + FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotification(ctx, req.(*DeleteNotificationRequest)) + return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_GetNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationRequest) +func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).GetNotification(ctx, in) + return srv.(StorageServer).GetNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotification", + FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotification(ctx, req.(*GetNotificationRequest)) + return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_CreateNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationRequest) +func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).CreateNotification(ctx, in) + return srv.(StorageServer).CreateNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotification", + FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotification(ctx, req.(*CreateNotificationRequest)) + return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_ListNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationsRequest) +func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationConfigsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).ListNotifications(ctx, in) + return srv.(StorageServer).ListNotificationConfigs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotifications", + FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotifications(ctx, req.(*ListNotificationsRequest)) + return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) } return interceptor(ctx, in, info, handler) } @@ -10654,20 +10667,20 @@ Handler: _Storage_UpdateBucket_Handler, }, { - MethodName: "DeleteNotification", - Handler: _Storage_DeleteNotification_Handler, + MethodName: "DeleteNotificationConfig", + Handler: _Storage_DeleteNotificationConfig_Handler, }, { - MethodName: "GetNotification", - Handler: _Storage_GetNotification_Handler, + MethodName: "GetNotificationConfig", + Handler: _Storage_GetNotificationConfig_Handler, }, { - MethodName: "CreateNotification", - Handler: _Storage_CreateNotification_Handler, + MethodName: "CreateNotificationConfig", + Handler: _Storage_CreateNotificationConfig_Handler, }, { - MethodName: "ListNotifications", - Handler: _Storage_ListNotifications_Handler, + MethodName: "ListNotificationConfigs", + Handler: _Storage_ListNotificationConfigs_Handler, }, { MethodName: "ComposeObject", diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/version.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/version.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/internal/version.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/internal/version.go 2024-02-23 09:46:08.000000000 +0000 @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.29.0" +const Version = "1.30.1" diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/notifications.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/notifications.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/notifications.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/notifications.go 2024-02-23 09:46:08.000000000 +0000 @@ -92,7 +92,7 @@ return n } -func toNotificationFromProto(pbn *storagepb.Notification) *Notification { +func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { n := &Notification{ ID: pbn.GetName(), EventTypes: pbn.GetEventTypes(), @@ -104,8 +104,8 @@ return n } -func toProtoNotification(n *Notification) *storagepb.Notification { - return &storagepb.Notification{ +func toProtoNotification(n *Notification) *storagepb.NotificationConfig { + return &storagepb.NotificationConfig{ Name: n.ID, Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", n.TopicProjectID, n.TopicID), @@ -182,7 +182,7 @@ return m } -func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { +func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { m := map[string]*Notification{} for _, n := range ns { m[n.Name] = toNotificationFromProto(n) diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/option.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/option.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/option.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/option.go 2024-02-23 09:46:08.000000000 +0000 @@ -0,0 +1,75 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" +) + +// storageConfig contains the Storage client option configuration that can be +// set through storageClientOptions. +type storageConfig struct { + useJSONforReads bool + readAPIWasSet bool +} + +// newStorageConfig generates a new storageConfig with all the given +// storageClientOptions applied. +func newStorageConfig(opts ...option.ClientOption) storageConfig { + var conf storageConfig + for _, opt := range opts { + if storageOpt, ok := opt.(storageClientOption); ok { + storageOpt.ApplyStorageOpt(&conf) + } + } + return conf +} + +// A storageClientOption is an option for a Google Storage client. +type storageClientOption interface { + option.ClientOption + ApplyStorageOpt(*storageConfig) +} + +// WithJSONReads is an option that may be passed to a Storage Client on creation. +// It sets the client to use the JSON API for object reads. Currently, the +// default API used for reads is XML. +// Setting this option is required to use the GenerationNotMatch condition. +// +// Note that when this option is set, reads will return a zero date for +// [ReaderObjectAttrs].LastModified and may return a different value for +// [ReaderObjectAttrs].CacheControl. +func WithJSONReads() option.ClientOption { + return &withReadAPI{useJSON: true} +} + +// WithXMLReads is an option that may be passed to a Storage Client on creation. +// It sets the client to use the JSON API for object reads. +// +// This is the current default. +func WithXMLReads() option.ClientOption { + return &withReadAPI{useJSON: false} +} + +type withReadAPI struct { + internaloption.EmbeddableAdapter + useJSON bool +} + +func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { + c.useJSONforReads = w.useJSON + c.readAPIWasSet = true +} diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/reader.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/reader.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/reader.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/reader.go 2024-02-23 09:46:08.000000000 +0000 @@ -139,15 +139,23 @@ res.Header.Get("Content-Encoding") != "gzip" } +// parseCRC32c parses the crc32c hash from the X-Goog-Hash header. +// It can parse headers in the form [crc32c=xxx md5=xxx] (XML responses) or the +// form [crc32c=xxx,md5=xxx] (JSON responses). The md5 hash is ignored. func parseCRC32c(res *http.Response) (uint32, bool) { const prefix = "crc32c=" for _, spec := range res.Header["X-Goog-Hash"] { - if strings.HasPrefix(spec, prefix) { - c, err := decodeUint32(spec[len(prefix):]) - if err == nil { - return c, true + values := strings.Split(spec, ",") + + for _, v := range values { + if strings.HasPrefix(v, prefix) { + c, err := decodeUint32(v[len(prefix):]) + if err == nil { + return c, true + } } } + } return 0, false } diff -Nru temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/storage.go temporal-1.22.5/src/vendor/cloud.google.com/go/storage/storage.go --- temporal-1.21.5-1/src/vendor/cloud.google.com/go/storage/storage.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/cloud.google.com/go/storage/storage.go 2024-02-23 09:46:08.000000000 +0000 @@ -129,8 +129,10 @@ // // Clients should be reused instead of created as needed. The methods of Client // are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. You may also use options defined in this package, such as [WithJSONReads]. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - // Use the experimental gRPC client if the env var is set. // This is an experimental API and not intended for public use. if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { @@ -179,10 +181,12 @@ endpoint := hostURL.String() // Append the emulator host as default endpoint for the user - opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) - - opts = append(opts, internaloption.WithDefaultEndpoint(endpoint)) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint)) + opts = append([]option.ClientOption{ + option.WithoutAuthentication(), + internaloption.SkipDialSettingsValidation(), + internaloption.WithDefaultEndpoint(endpoint), + internaloption.WithDefaultMTLSEndpoint(endpoint), + }, opts...) } // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. @@ -535,7 +539,7 @@ sanitizedHeader := strings.TrimSpace(hdr) var key, value string - headerMatches := strings.Split(sanitizedHeader, ":") + headerMatches := strings.SplitN(sanitizedHeader, ":", 2) if len(headerMatches) < 2 { continue } @@ -649,7 +653,7 @@ func extractHeaderNames(kvs []string) []string { var res []string for _, header := range kvs { - nameValue := strings.Split(header, ":") + nameValue := strings.SplitN(header, ":", 2) res = append(res, nameValue[0]) } return res @@ -793,7 +797,7 @@ headersMap := map[string]string{} var headersKeys []string for _, h := range hdrs { - parts := strings.Split(h, ":") + parts := strings.SplitN(h, ":", 2) k := parts[0] v := parts[1] headersMap[k] = v @@ -1713,6 +1717,8 @@ // GenerationNotMatch specifies that the object must not have the given // generation for the operation to occur. // If GenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. GenerationNotMatch int64 // DoesNotExist specifies that the object must not exist in the bucket for @@ -1731,6 +1737,8 @@ // MetagenerationNotMatch specifies that the object must not have the given // metageneration for the operation to occur. // If MetagenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. MetagenerationNotMatch int64 } diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/config.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/config.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/config.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/config.go 2024-02-23 09:46:09.000000000 +0000 @@ -20,16 +20,16 @@ // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. // Should be used when wanting to see all errors while attempting to @@ -192,6 +192,23 @@ // EC2MetadataDisableTimeoutOverride *bool + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. @@ -283,16 +300,16 @@ // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -432,6 +449,13 @@ return c } +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { @@ -576,6 +600,10 @@ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go 2024-02-23 09:46:09.000000000 +0000 @@ -9,7 +9,7 @@ ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. -Assume Role +# Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. @@ -27,7 +27,7 @@ // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with static MFA Token +# Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials @@ -49,7 +49,7 @@ // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with MFA Token Provider +# Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -74,7 +74,6 @@ // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) - */ package stscreds @@ -199,6 +198,10 @@ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -320,6 +323,7 @@ Tags: p.Tags, PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, } if p.Policy != nil { input.Policy = p.Policy diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go 2024-02-23 09:46:09.000000000 +0000 @@ -57,13 +57,13 @@ // New creates a new instance of the EC2Metadata client with a session. // This client is safe to use across multiple goroutines. // -// // Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) // -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { c := p.ClientConfig(ServiceName, cfgs...) return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go 2024-02-23 09:46:09.000000000 +0000 @@ -1,6 +1,7 @@ package ec2metadata import ( + "fmt" "net/http" "sync/atomic" "time" @@ -33,11 +34,15 @@ return &tokenProvider{client: c, configuredTTL: duration} } +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + // fetchTokenHandler fetches token for EC2Metadata service client by default. func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { return } @@ -49,23 +54,21 @@ output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } - // change the disabled flag on token provider to true, - // when error is request timeout error. + // change the disabled flag on token provider to true and fallback if requestFailureError, ok := err.(awserr.RequestFailure); ok { switch requestFailureError.StatusCode() { case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: atomic.StoreUint32(&t.disabled, 1) + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) case http.StatusBadRequest: r.Error = requestFailureError } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } } return } diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go 2024-02-23 09:46:09.000000000 +0000 @@ -13,6 +13,8 @@ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. ) // AWS Standard partition's regions. @@ -69,8 +71,14 @@ UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) +// AWS ISOE (Europe) partition's regions. +const () + +// AWS ISOF partition's regions. +const () + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -78,7 +86,7 @@ } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -94,6 +102,8 @@ awsusgovPartition, awsisoPartition, awsisobPartition, + awsisoePartition, + awsisofPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -593,6 +603,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -602,6 +615,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -614,12 +630,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -857,6 +879,9 @@ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -912,6 +937,9 @@ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -1838,6 +1866,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -1847,18 +1878,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -2048,6 +2088,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -2391,24 +2434,39 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -2418,6 +2476,9 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -2962,6 +3023,15 @@ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -3131,6 +3201,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -3146,12 +3219,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -3161,6 +3240,9 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -3191,6 +3273,12 @@ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -3209,6 +3297,12 @@ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ @@ -3222,9 +3316,27 @@ "arc-zonal-shift": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -3234,21 +3346,54 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ Region: "us-west-2", }: endpoint{}, }, @@ -3469,6 +3614,12 @@ Hostname: "athena-fips.us-east-1.amazonaws.com", }, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, + endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ @@ -3484,6 +3635,12 @@ Hostname: "athena-fips.us-east-2.amazonaws.com", }, endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, + endpointKey{ Region: "us-west-1", }: endpoint{}, endpointKey{ @@ -3499,6 +3656,12 @@ Hostname: "athena-fips.us-west-1.amazonaws.com", }, endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ @@ -3513,6 +3676,12 @@ }: endpoint{ Hostname: "athena-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, }, }, "auditmanager": service{ @@ -3741,6 +3910,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -3750,18 +3922,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -3905,6 +4086,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -3914,18 +4098,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -5093,6 +5286,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -5102,18 +5298,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -5237,12 +5442,18 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -5288,6 +5499,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -5549,6 +5763,9 @@ "codepipeline": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ Region: "ap-east-1", }: endpoint{}, endpointKey{ @@ -5579,6 +5796,9 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ @@ -5639,6 +5859,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -5749,6 +5972,9 @@ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -5889,6 +6115,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", @@ -5925,6 +6160,12 @@ Region: "us-west-1", }: endpoint{}, endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ @@ -6594,11 +6835,41 @@ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, }, }, "connect-campaigns": service{ @@ -6681,12 +6952,21 @@ "controltower": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ Region: "ap-south-1", }: endpoint{}, endpointKey{ @@ -6696,6 +6976,9 @@ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -6720,6 +7003,9 @@ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -6729,6 +7015,9 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -6768,6 +7057,24 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ @@ -7312,6 +7619,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -7512,6 +7822,12 @@ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ @@ -7527,6 +7843,15 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "devops-guru-fips.us-east-1.amazonaws.com", @@ -7545,6 +7870,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "devops-guru-fips.us-west-2.amazonaws.com", @@ -7578,6 +7912,12 @@ Region: "us-west-1", }: endpoint{}, endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ @@ -7797,12 +8137,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -8225,6 +8571,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -9095,6 +9444,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -9545,6 +9897,15 @@ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", }, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -9572,6 +9933,15 @@ Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", }, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -9707,6 +10077,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", @@ -9734,6 +10113,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", @@ -10391,6 +10779,9 @@ "emr-containers": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ @@ -10475,6 +10866,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -10518,6 +10912,9 @@ "emr-serverless": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ @@ -10602,6 +10999,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -11075,6 +11475,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -11240,6 +11643,9 @@ Hostname: "fms-fips.ap-south-1.amazonaws.com", }, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -11261,6 +11667,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -11279,6 +11688,9 @@ Hostname: "fms-fips.eu-central-1.amazonaws.com", }, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ @@ -11291,6 +11703,9 @@ Hostname: "fms-fips.eu-south-1.amazonaws.com", }, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -11748,6 +12163,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -11757,6 +12175,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -11769,12 +12190,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -12316,12 +12743,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -12524,6 +12957,12 @@ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ @@ -12533,14 +12972,68 @@ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + }, }, }, "groundstation": service{ @@ -12667,6 +13160,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -12831,6 +13327,9 @@ }, Endpoints: serviceEndpoints{ endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ @@ -13276,16 +13775,124 @@ }: endpoint{}, }, }, - "iot": service{ + "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", }, }, Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ Region: "ap-east-1", }: endpoint{}, endpointKey{ @@ -13331,45 +13938,35 @@ Region: "fips-ca-central-1", }: endpoint{ Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-1", }: endpoint{ Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -14021,11 +14618,41 @@ Region: "eu-west-1", }: endpoint{}, endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + }, }, }, "iotwireless": service{ @@ -14122,6 +14749,31 @@ }: endpoint{}, }, }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14143,6 +14795,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -14152,18 +14807,33 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -14173,6 +14843,54 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -14182,14 +14900,38 @@ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, + endpointKey{ Region: "us-west-1", }: endpoint{}, endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, }, }, "kafkaconnect": service{ @@ -14265,6 +15007,9 @@ Region: "eu-west-1", }: endpoint{}, endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "kendra-fips.us-east-1.amazonaws.com", @@ -14394,6 +15139,12 @@ Hostname: "kendra-ranking.ca-central-1.api.aws", }, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, + endpointKey{ Region: "eu-central-2", }: endpoint{ Hostname: "kendra-ranking.eu-central-2.api.aws", @@ -14444,11 +15195,23 @@ Hostname: "kendra-ranking.us-east-1.api.aws", }, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, + endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "kendra-ranking.us-east-2.api.aws", }, endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, + endpointKey{ Region: "us-west-1", }: endpoint{ Hostname: "kendra-ranking.us-west-1.api.aws", @@ -14458,6 +15221,12 @@ }: endpoint{ Hostname: "kendra-ranking.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, }, }, "kinesis": service{ @@ -14626,6 +15395,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -14635,18 +15407,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -14656,6 +15437,9 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -15099,6 +15883,14 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ Region: "me-central-1", }: endpoint{}, endpointKey{ @@ -15253,18 +16045,27 @@ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -15310,6 +16111,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -15793,6 +16597,12 @@ "license-manager-linux-subscriptions": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ @@ -15805,21 +16615,39 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -15865,6 +16693,12 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -17014,6 +17848,55 @@ }: endpoint{}, }, }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "mediastore": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17295,6 +18178,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -17304,24 +18190,36 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ Region: "me-central-1", }: endpoint{}, endpointKey{ @@ -17390,6 +18288,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -17399,18 +18300,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -17456,6 +18366,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -17837,6 +18750,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -17846,18 +18762,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -18212,6 +19137,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -18275,18 +19203,33 @@ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ Region: "us-west-2", }: endpoint{}, }, @@ -18556,6 +19499,94 @@ }, }, }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18668,6 +19699,40 @@ }, }, }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -19226,6 +20291,9 @@ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ Region: "ap-south-1", }: endpoint{}, endpointKey{ @@ -19533,17 +20601,62 @@ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, }, }, "projects.iot1click": service{ @@ -19723,9 +20836,6 @@ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -20650,12 +21760,18 @@ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ @@ -20668,12 +21784,18 @@ Region: "eu-west-2", }: endpoint{}, endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ Region: "us-west-2", }: endpoint{}, }, @@ -21009,16 +22131,6 @@ }, Endpoints: serviceEndpoints{ endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, - endpointKey{ Region: "ap-northeast-1", }: endpoint{ Hostname: "resource-explorer-2.ap-northeast-1.api.aws", @@ -21437,6 +22549,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -21446,18 +22561,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -21662,6 +22786,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -21671,18 +22798,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -22849,30 +23985,84 @@ "scheduler": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ Region: "us-west-2", }: endpoint{}, }, @@ -23149,6 +24339,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -23158,18 +24351,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -23267,6 +24469,15 @@ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ @@ -23276,12 +24487,21 @@ Region: "eu-west-1", }: endpoint{}, endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ Region: "us-west-2", }: endpoint{}, }, @@ -23406,6 +24626,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -23415,18 +24638,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -23436,6 +24668,9 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -23756,6 +24991,15 @@ Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", }, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.amazonaws.com", + }, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -24029,6 +25273,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -24038,18 +25285,27 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -24201,6 +25457,130 @@ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24693,6 +26073,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ @@ -25288,6 +26671,12 @@ Region: "ca-central-1", }: endpoint{}, endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ @@ -25306,6 +26695,51 @@ Region: "eu-west-3", }: endpoint{}, endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -25315,14 +26749,38 @@ Region: "us-east-1", }: endpoint{}, endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ Region: "us-west-1", }: endpoint{}, endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, }, }, "sso": service{ @@ -25561,6 +27019,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -25570,6 +27031,9 @@ Region: "ap-southeast-3", }: endpoint{}, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ @@ -26628,12 +28092,21 @@ "transcribestreaming": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ @@ -26791,6 +28264,9 @@ Region: "ap-south-1", }: endpoint{}, endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ @@ -26812,12 +28288,18 @@ Region: "eu-central-1", }: endpoint{}, endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ @@ -26872,6 +28354,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ Region: "me-south-1", }: endpoint{}, endpointKey{ @@ -27017,6 +28502,91 @@ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "voice-chime": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27166,6 +28736,31 @@ }, }, }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -27330,6 +28925,23 @@ }, }, endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ Region: "ap-southeast-1", }: endpoint{ Hostname: "waf-regional.ap-southeast-1.amazonaws.com", @@ -27381,6 +28993,23 @@ }, }, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ Region: "ca-central-1", }: endpoint{ Hostname: "waf-regional.ca-central-1.amazonaws.com", @@ -27415,6 +29044,23 @@ }, }, endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ Region: "eu-north-1", }: endpoint{ Hostname: "waf-regional.eu-north-1.amazonaws.com", @@ -27449,6 +29095,23 @@ }, }, endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ Region: "eu-west-1", }: endpoint{ Hostname: "waf-regional.eu-west-1.amazonaws.com", @@ -27554,6 +29217,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", @@ -27581,6 +29253,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", @@ -27599,6 +29280,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-eu-north-1", }: endpoint{ Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", @@ -27617,6 +29307,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-eu-west-1", }: endpoint{ Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", @@ -27644,6 +29343,14 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ Region: "fips-me-central-1", }: endpoint{ Hostname: "waf-regional-fips.me-central-1.amazonaws.com", @@ -27932,6 +29639,23 @@ }, }, endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ Region: "ap-southeast-1", }: endpoint{ Hostname: "wafv2.ap-southeast-1.amazonaws.com", @@ -27983,6 +29707,23 @@ }, }, endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ Region: "ca-central-1", }: endpoint{ Hostname: "wafv2.ca-central-1.amazonaws.com", @@ -28017,6 +29758,23 @@ }, }, endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ Region: "eu-north-1", }: endpoint{ Hostname: "wafv2.eu-north-1.amazonaws.com", @@ -28051,6 +29809,23 @@ }, }, endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ Region: "eu-west-1", }: endpoint{ Hostname: "wafv2.eu-west-1.amazonaws.com", @@ -28156,6 +29931,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", @@ -28183,6 +29967,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "wafv2-fips.ca-central-1.amazonaws.com", @@ -28201,6 +29994,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-eu-north-1", }: endpoint{ Hostname: "wafv2-fips.eu-north-1.amazonaws.com", @@ -28219,6 +30021,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-eu-west-1", }: endpoint{ Hostname: "wafv2-fips.eu-west-1.amazonaws.com", @@ -28246,6 +30057,14 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ Region: "fips-me-central-1", }: endpoint{ Hostname: "wafv2-fips.me-central-1.amazonaws.com", @@ -28961,6 +30780,16 @@ }: endpoint{}, }, }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.ecr": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29339,7 +31168,10 @@ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, @@ -29602,6 +31434,16 @@ }: endpoint{}, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29767,14 +31609,32 @@ }, }, }, - "iot": service{ + "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", }, }, + }, + "iot": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -29941,6 +31801,16 @@ }: endpoint{}, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30028,6 +31898,16 @@ }, }, }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -30141,6 +32021,16 @@ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -30358,6 +32248,16 @@ }: endpoint{}, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30783,6 +32683,24 @@ }, }, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", @@ -30790,6 +32708,24 @@ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "acm": service{ @@ -31216,13 +33152,45 @@ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, }, }, "applicationinsights": service{ @@ -31265,6 +33233,24 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, endpointKey{ @@ -31320,6 +33306,12 @@ Hostname: "athena-fips.us-gov-east-1.amazonaws.com", }, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, endpointKey{ @@ -31334,6 +33326,12 @@ }: endpoint{ Hostname: "athena-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, }, }, "autoscaling": service{ @@ -31456,6 +33454,24 @@ }, }, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{ Hostname: "cassandra.us-gov-west-1.amazonaws.com", @@ -31463,6 +33479,24 @@ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudcontrolapi": service{ @@ -31510,6 +33544,21 @@ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudformation": service{ @@ -31781,6 +33830,9 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, endpointKey{ @@ -31955,8 +34007,23 @@ "connect": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, }, }, "controltower": service{ @@ -32145,8 +34212,38 @@ Region: "us-gov-east-1", }: endpoint{}, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "dms": service{ @@ -32524,6 +34621,24 @@ }, }, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", @@ -32531,6 +34646,24 @@ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "elasticfilesystem": service{ @@ -32697,6 +34830,16 @@ }, }, }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33372,31 +35515,45 @@ }: endpoint{}, }, }, - "iot": service{ + "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", }, }, + }, + "iot": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -33541,14 +35698,82 @@ }, }, }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "kendra": service{ @@ -33895,12 +36120,22 @@ "mediaconvert": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", }, }, }, @@ -33971,6 +36206,46 @@ }: endpoint{}, }, }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34265,12 +36540,22 @@ "participant.connect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "participant.connect.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", }, }, }, @@ -34722,8 +37007,34 @@ Region: "us-gov-east-1", }: endpoint{}, endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, }, }, "runtime.lex": service{ @@ -35121,20 +37432,44 @@ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, @@ -35326,6 +37661,16 @@ }, }, }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35460,14 +37805,14 @@ endpointKey{ Region: "us-gov-west-1", }: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, }, }, @@ -36200,6 +38545,15 @@ "workspaces": service{ Endpoints: serviceEndpoints{ endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", @@ -36209,6 +38563,15 @@ Deprecated: boxedTrue, }, endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ Region: "us-gov-west-1", }: endpoint{}, endpointKey{ @@ -36370,6 +38733,13 @@ }: endpoint{}, }, }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36382,6 +38752,16 @@ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36451,6 +38831,16 @@ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -36805,6 +39195,9 @@ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "logs": service{ @@ -36865,6 +39258,28 @@ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36904,6 +39319,9 @@ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ @@ -36936,6 +39354,9 @@ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "snowball": service{ @@ -37054,6 +39475,9 @@ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "transcribe": service{ @@ -37523,6 +39947,28 @@ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37578,6 +40024,13 @@ }: endpoint{}, }, }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "snowball": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37689,3 +40142,71 @@ }, }, } + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go 2024-02-23 09:46:09.000000000 +0000 @@ -174,7 +174,6 @@ // Options provides the means to control how a Session is created and what // configuration values will be loaded. -// type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field @@ -224,7 +223,7 @@ // from stdin for the MFA token code. // // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. + // the config enables assume role with MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) // When the SDK's shared config is configured to assume a role this option @@ -322,24 +321,24 @@ // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) // -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) // -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig var err error @@ -375,7 +374,7 @@ // This helper is intended to be used in variable initialization to load the // Session and configuration at startup. Such as: // -// var sess = session.Must(session.NewSession()) +// var sess = session.Must(session.NewSession()) func Must(sess *Session, err error) *Session { if err != nil { panic(err) @@ -780,16 +779,6 @@ cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - cfg.S3UseARNRegion = userCfg.S3UseARNRegion if cfg.S3UseARNRegion == nil { cfg.S3UseARNRegion = &envCfg.S3UseARNRegion @@ -812,6 +801,17 @@ } } + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + return nil } @@ -845,8 +845,8 @@ // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go 2024-02-23 09:46:09.000000000 +0000 @@ -3,7 +3,7 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage @@ -14,10 +14,10 @@ // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -695,7 +695,8 @@ includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" s3Presign := ctx.isPresign && (ctx.ServiceName == "s3" || diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/version.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/version.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/aws/version.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/aws/version.go 2024-02-23 09:46:09.000000000 +0000 @@ -5,4 +5,4 @@ const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.203" +const SDKVersion = "1.44.289" diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go 2024-02-23 09:46:09.000000000 +0000 @@ -2,6 +2,7 @@ import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -40,52 +41,30 @@ resp *http.Response, respMeta protocol.ResponseMetadata, ) (error, error) { + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err + } - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil } - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } - - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } - return v, nil + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err } - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil + return v, nil } // UnmarshalErrorHandler is a named request handler for unmarshaling restjson @@ -99,36 +78,80 @@ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), + awserr.New(code, msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) } type jsonErrorResponse struct { + Type string `json:"__type"` Code string `json:"code"` Message string `json:"message"` } + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/s3/api.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/s3/api.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/s3/api.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/s3/api.go 2024-02-23 09:46:09.000000000 +0000 @@ -186,9 +186,15 @@ // to complete. After Amazon S3 begins processing the request, it sends an HTTP // response header that specifies a 200 OK response. While processing is in // progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. Because a request could fail after the initial -// 200 OK response has been sent, it is important that you check the response -// body to determine whether the request succeeded. +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either +// a success or an error. If you call the S3 API directly, make sure to design +// your application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs +// detect the embedded error and apply error handling per your configuration +// settings (including automatically retrying the request as appropriate). If +// the condition persists, the SDKs throws an exception (or, for the SDKs that +// don't use exceptions, they return the error). // // Note that if CompleteMultipartUpload fails, applications should be prepared // to retry the failed requests. For more information, see Amazon S3 Error Best @@ -324,8 +330,13 @@ // action starts, you receive a standard Amazon S3 error. If the error occurs // during the copy operation, the error response is embedded in the 200 OK response. // This means that a 200 OK response can contain either a success or an error. -// Design your application to parse the contents of the response and handle -// it appropriately. +// If you call the S3 API directly, make sure to design your application to +// parse the contents of the response and handle it appropriately. If you use +// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the +// embedded error and apply error handling per your configuration settings (including +// automatically retrying the request as appropriate). If the condition persists, +// the SDKs throws an exception (or, for the SDKs that don't use exceptions, +// they return the error). // // If the copy is successful, you receive a response with information about // the copied object. @@ -345,11 +356,11 @@ // // # Metadata // -// When copying an object, you can preserve all metadata (default) or specify -// new metadata. However, the ACL is not preserved and is set to private for -// the user making the request. To override the default ACL setting, specify -// a new ACL when generating a copy request. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// When copying an object, you can preserve all metadata (the default) or specify +// new metadata. However, the access control list (ACL) is not preserved and +// is set to private for the user making the request. To override the default +// ACL setting, specify a new ACL when generating a copy request. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). // // To specify whether you want the object metadata copied from the source object // or replaced with metadata provided in the request, you can optionally add @@ -360,6 +371,9 @@ // in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition // keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). // +// x-amz-website-redirect-location is unique to each object and must be specified +// in the request headers to copy the value. +// // x-amz-copy-source-if Headers // // To only copy an object under certain conditions, such as whether the Etag @@ -395,13 +409,30 @@ // // # Server-side encryption // -// When you perform a CopyObject operation, you can optionally use the appropriate -// encryption-related headers to encrypt the object using server-side encryption -// with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a -// customer-provided encryption key. With server-side encryption, Amazon S3 -// encrypts your data as it writes it to disks in its data centers and decrypts -// the data when you access it. For more information about server-side encryption, -// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// Amazon S3 automatically encrypts all new objects that are copied to an S3 +// bucket. When copying an object, if you don't specify encryption information +// in your copy request, the encryption setting of the target object is set +// to the default encryption configuration of the destination bucket. By default, +// all buckets have a base level of encryption configuration that uses server-side +// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket +// has a default encryption configuration that uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding +// KMS key, or a customer-provided key to encrypt the target object copy. +// +// When you perform a CopyObject operation, if you want to use a different type +// of encryption setting for the target object, you can use other appropriate +// encryption-related headers to encrypt the target object with a KMS key, an +// Amazon S3 managed key, or a customer-provided key. With server-side encryption, +// Amazon S3 encrypts your data as it writes your data to disks in its data +// centers and decrypts the data when you access it. If the encryption setting +// in your request is different from the default encryption configuration of +// the destination bucket, the encryption setting in your request takes precedence. +// If the source object for the copy is stored in Amazon S3 using SSE-C, you +// must provide the necessary encryption information in your request so that +// Amazon S3 can decrypt the object for copying. For more information about +// server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) @@ -412,9 +443,9 @@ // When copying an object, you can optionally use headers to grant ACL-based // permissions. By default, all objects are private. Only the owner has full // access control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the ACL on the object. For more information, -// see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// Amazon Web Services accounts or to predefined groups that are defined by +// Amazon S3. These permissions are then added to the ACL on the object. For +// more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // // If the bucket that you're copying objects to uses the bucket owner enforced @@ -435,22 +466,27 @@ // # Checksums // // When copying an object, if it has a checksum, that checksum will be copied -// to the new object by default. When you copy the object over, you may optionally +// to the new object by default. When you copy the object over, you can optionally // specify a different checksum algorithm to use with the x-amz-checksum-algorithm // header. // // # Storage Class Options // // You can use the CopyObject action to change the storage class of an object -// that is already stored in Amazon S3 using the StorageClass parameter. For -// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// that is already stored in Amazon S3 by using the StorageClass parameter. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. // +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // # Versioning // -// By default, x-amz-copy-source identifies the current version of an object -// to copy. If the current version is a delete marker, Amazon S3 behaves as -// if the object was deleted. To copy a different version, use the versionId +// By default, x-amz-copy-source header identifies the current version of an +// object to copy. If the current version is a delete marker, Amazon S3 behaves +// as if the object was deleted. To copy a different version, use the versionId // subresource. // // If you enable versioning on the target bucket, Amazon S3 generates a unique @@ -461,18 +497,12 @@ // If you do not enable versioning or suspend it on the target bucket, the version // ID that Amazon S3 generates is always null. // -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// // The following operations are related to CopyObject: // // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -576,68 +606,51 @@ // your application must be able to handle 307 redirect. For more information, // see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). // -// Access control lists (ACLs) -// -// When creating a bucket using this operation, you can optionally configure -// the bucket ACL to specify the accounts or groups that should be granted specific -// permissions on the bucket. -// -// If your CreateBucket request sets bucket owner enforced for S3 Object Ownership -// and specifies a bucket ACL that provides access to an external Amazon Web -// Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership -// error code. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// There are two ways to grant the appropriate permissions using the request -// headers. -// -// - Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. For more information, see -// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. These headers map to the set of permissions Amazon S3 supports -// in an ACL. For more information, see Access control list (ACL) overview -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: id – if the value specified is the canonical user ID -// of an Amazon Web Services account uri – if you are granting permissions -// to a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// // # Permissions // // In addition to s3:CreateBucket, the following permissions are required when -// your CreateBucket includes specific headers: +// your CreateBucket request includes specific headers: // -// - ACLs - If your CreateBucket request specifies ACL permissions and the -// ACL is public-read, public-read-write, authenticated-read, or if you specify -// access permissions explicitly through any other ACL, both s3:CreateBucket -// and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket -// request is private or doesn't specify any ACLs, only s3:CreateBucket permission -// is needed. +// - Access control lists (ACLs) - If your CreateBucket request specifies +// access control list (ACL) permissions and the ACL is public-read, public-read-write, +// authenticated-read, or if you specify access permissions explicitly through +// any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are +// needed. If the ACL for the CreateBucket request is private or if the request +// doesn't specify any ACLs, only s3:CreateBucket permission is needed. // // - Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket // request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning // permissions are required. // -// - S3 Object Ownership - If your CreateBucket request includes the the -// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission -// is required. +// - S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership +// header, then the s3:PutBucketOwnershipControls permission is required. +// By default, ObjectOwnership is set to BucketOWnerEnforced and ACLs are +// disabled. We recommend keeping ACLs disabled, except in uncommon use cases +// where you must control access for each object individually. If you want +// to change the ObjectOwnership setting, you can use the x-amz-object-ownership +// header in your CreateBucket request to set the ObjectOwnership setting +// of your choice. For more information about S3 Object Ownership, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// - S3 Block Public Access - If your specific use case requires granting +// public access to your S3 resources, you can disable Block Public Access. +// You can create a new bucket with Block Public Access enabled, then separately +// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. By default, all Block Public Access settings are enabled for +// new buckets. To avoid inadvertent exposure of your resources, we recommend +// keeping the S3 Block Public Access settings enabled. For more information +// about S3 Block Public Access, see Blocking public access to your Amazon +// S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object +// Ownership and specifies a bucket ACL that provides access to an external +// Amazon Web Services account, your request fails with a 400 error and returns +// the InvalidBucketAcLWithObjectOwnership error code. For more information, +// see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) +// in the Amazon S3 User Guide. // // The following operations are related to CreateBucket: // @@ -745,7 +758,7 @@ // lifecycle configuration. Otherwise, the incomplete multipart upload becomes // eligible for an abort action and Amazon S3 aborts the multipart upload. For // more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // For information about the permissions required to use the multipart upload // API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). @@ -763,22 +776,40 @@ // parts and stop charging you for storing them only after you either complete // or abort a multipart upload. // -// You can optionally request server-side encryption. For server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You can provide your own encryption key, -// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. +// Server-side encryption is for data encryption at rest. Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it when +// you access it. Amazon S3 automatically encrypts all new objects that are +// uploaded to an S3 bucket. When doing a multipart upload, if you don't specify +// encryption information in your request, the encryption setting of the uploaded +// parts is set to the default encryption configuration of the destination bucket. +// By default, all buckets have a base level of encryption configuration that +// uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the +// destination bucket has a default encryption configuration that uses server-side +// encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided +// key to encrypt the uploaded parts. When you perform a CreateMultipartUpload +// operation, if you want to use a different type of encryption setting for +// the uploaded parts, you can request that Amazon S3 encrypts the object with +// a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption +// setting in your request is different from the default encryption configuration +// of the destination bucket, the encryption setting in your request takes precedence. // If you choose to provide your own encryption key, the request headers you // provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload. +// by using CreateMultipartUpload. You can request that Amazon S3 save the uploaded +// parts encrypted with server-side encryption with an Amazon S3 managed key +// (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C). // -// To perform a multipart upload with encryption using an Amazon Web Services +// To perform a multipart upload with encryption by using an Amazon Web Services // KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* // actions on the key. These permissions are required because Amazon S3 must // decrypt and read data from the encrypted file parts before it completes the // multipart upload. For more information, see Multipart upload API and permissions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) // in the Amazon S3 User Guide. // // If your Identity and Access Management (IAM) user or role is in the same @@ -808,32 +839,35 @@ // // # Server-Side- Encryption-Specific Request Headers // -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use Amazon Web Services managed encryption keys or provide your own encryption -// key. -// -// - Use encryption keys managed by Amazon S3 or customer managed key stored -// in Amazon Web Services Key Management Service (Amazon Web Services KMS) -// – If you want Amazon Web Services to manage the keys used to encrypt +// Amazon S3 encrypts data by using server-side encryption with an Amazon S3 +// managed key (SSE-S3) by default. Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its data +// centers and decrypts it when you access it. You can request that Amazon S3 +// encrypts data at rest by using server-side encryption with other key options. +// The option you use depends on whether you want to use KMS keys (SSE-KMS) +// or provide your own encryption keys (SSE-C). +// +// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed +// key (aws/s3) and KMS customer managed keys stored in Key Management Service +// (KMS) – If you want Amazon Web Services to manage the keys used to encrypt // data, specify the following headers in the request. x-amz-server-side-encryption // x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context // If you specify x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed key in Amazon Web Services KMS to protect the data. -// All GET and PUT requests for an object protected by Amazon Web Services -// KMS fail if you don't make them with SSL or by using SigV4. For more information -// about server-side encryption with KMS key (SSE-KMS), see Protecting Data -// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// Web Services managed key (aws/s3 key) in KMS to protect the data. All +// GET and PUT requests for an object protected by KMS fail if you don't +// make them by using Secure Sockets Layer (SSL), Transport Layer Security +// (TLS), or Signature Version 4. For more information about server-side +// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). // -// - Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. +// - Use customer-provided encryption keys (SSE-C) – If you want to manage +// your own encryption keys, provide all the following headers in the request. // x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key // x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using -// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// server-side encryption with customer-provided encryption keys (SSE-C), +// see Protecting data using server-side encryption with customer-provided +// encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). // // # Access-Control-List (ACL)-Specific Request Headers // @@ -960,7 +994,7 @@ // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. // -// Related Resources +// The following operations are related to DeleteBucket: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -1139,7 +1173,7 @@ // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) // in the Amazon S3 User Guide. // -// Related Resources: +// Related Resources // // - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // @@ -1217,9 +1251,10 @@ // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE action removes default encryption from -// the bucket. For information about the Amazon S3 default encryption feature, -// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// This implementation of the DELETE action resets the default encryption for +// the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// For information about the bucket default encryption feature, see Amazon S3 +// Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration @@ -1229,7 +1264,7 @@ // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to DeleteBucketEncryption: // // - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // @@ -1783,9 +1818,13 @@ // using an identity that belongs to the bucket owner's account, Amazon S3 returns // a 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. // // For more information about bucket policies, see Using Bucket Policies and // UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). @@ -2141,10 +2180,10 @@ // null version, Amazon S3 does not remove any objects but will still respond // that the command was successful. // -// To remove a specific version, you must be the bucket owner and you must use -// the version Id subresource. Using this subresource permanently deletes the -// version. If the object deleted is a delete marker, Amazon S3 sets the response -// header, x-amz-delete-marker, to true. +// To remove a specific version, you must use the version Id subresource. Using +// this subresource permanently deletes the version. If the object deleted is +// a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, +// to true. // // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request @@ -2246,7 +2285,7 @@ // in the request. You will need permission for the s3:DeleteObjectVersionTagging // action. // -// The following operations are related to DeleteBucketMetricsConfiguration: +// The following operations are related to DeleteObjectTagging: // // - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // @@ -2553,7 +2592,7 @@ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAccelerateConfiguration: // // - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) // @@ -2634,13 +2673,22 @@ // is granted to the anonymous user, you can return the ACL of the bucket without // using an authorization header. // +// To use this API operation against an access point, provide the alias of the +// access point in place of the bucket name. +// +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// // If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the bucket-owner-full-control // ACL with the owner being the account that created the bucket. For more information, // see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAcl: // // - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // @@ -2729,7 +2777,7 @@ // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAnalyticsConfiguration: // // - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // @@ -2815,6 +2863,15 @@ // action. By default, the bucket owner has this permission and can grant it // to others. // +// To use this API operation against an access point, provide the alias of the +// access point in place of the bucket name. +// +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// // For more information about CORS, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). // @@ -2895,12 +2952,12 @@ // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the default encryption configuration for an Amazon S3 bucket. If -// the bucket does not have a default encryption configuration, GetBucketEncryption -// returns ServerSideEncryptionConfigurationNotFoundError. -// -// For information about the Amazon S3 default encryption feature, see Amazon -// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. // // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner @@ -3388,10 +3445,18 @@ // the LocationConstraint request parameter in a CreateBucket request. For more // information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). // -// To use this implementation of the operation, you must be the bucket owner. +// To use this API operation against an access point, provide the alias of the +// access point in place of the bucket name. // -// To use this API against an access point, provide the alias of the access -// point in place of the bucket name. +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. // // The following operations are related to GetBucketLocation: // @@ -3471,7 +3536,7 @@ // GetBucketLogging API operation for Amazon Simple Storage Service. // // Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. +// view and modify that status. // // The following operations are related to GetBucketLogging: // @@ -3735,6 +3800,15 @@ // to other users to read this configuration with the s3:GetBucketNotification // permission. // +// To use this API operation against an access point, provide the alias of the +// access point in place of the bucket name. +// +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// // For more information about setting and reading the notification configuration // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). @@ -3908,9 +3982,22 @@ // identity that belongs to the bucket owner's account, Amazon S3 returns a // 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// To use this API operation against an access point, provide the alias of the +// access point in place of the bucket name. +// +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). // // For more information about bucket policies, see Using Bucket Policies and // User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). @@ -4440,7 +4527,7 @@ // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. // -// The following operations are related to DeleteBucketWebsite: +// The following operations are related to GetBucketWebsite: // // - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) // @@ -4538,18 +4625,19 @@ // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // -// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier -// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering -// Deep Archive tiers, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this action returns an InvalidObjectStateError error. For information +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive +// or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the +// object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this action returns an InvalidObjectState error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not // be sent for GET requests if your object uses server-side encryption with -// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption -// keys (SSE-S3). If your object does use these types of keys, you’ll get -// an HTTP 400 BadRequest error. +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). If your object does use these +// types of keys, you’ll get an HTTP 400 Bad Request error. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -4573,14 +4661,14 @@ // // You need the relevant read object (or version) permission for this operation. // For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. +// If the object that you request doesn’t exist, the error that Amazon S3 +// returns depends on whether you also have the s3:ListBucket permission. // -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 will -// return an HTTP status code 404 ("no such key") error. +// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 (Not Found) error. // -// - If you don’t have the s3:ListBucket permission, Amazon S3 will return -// an HTTP status code 403 ("access denied") error. +// If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 ("access denied") error. // // # Versioning // @@ -4589,7 +4677,9 @@ // // - If you supply a versionId, you need the s3:GetObjectVersion permission // to access a specific version of an object. If you request a specific version, -// you do not need to have the s3:GetObject permission. +// you do not need to have the s3:GetObject permission. If you request the +// current version without a specific version ID, only s3:GetObject permission +// is required. s3:GetObjectVersion permission won't be required. // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in @@ -4628,7 +4718,7 @@ // // - response-content-encoding // -// # Additional Considerations about Request Headers +// # Overriding Response Header Values // // If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since @@ -4734,8 +4824,6 @@ // // This action is not supported by Amazon S3 on Outposts. // -// # Versioning -// // By default, GET returns ACL information about the current version of an object. // To return ACL information about a different version, use the versionId subresource. // @@ -4835,10 +4923,9 @@ // This action is useful if you're interested only in an object's metadata. // To use GetObjectAttributes, you must have READ access to the object. // -// GetObjectAttributes combines the functionality of GetObjectAcl, GetObjectLegalHold, -// GetObjectLockConfiguration, GetObjectRetention, GetObjectTagging, HeadObject, -// and ListParts. All of the data returned with each of those individual calls -// can be returned with a single call to GetObjectAttributes. +// GetObjectAttributes combines the functionality of HeadObject and ListParts. +// All of the data returned with each of those individual calls can be returned +// with a single call to GetObjectAttributes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -4857,9 +4944,9 @@ // - Encryption request headers, such as x-amz-server-side-encryption, should // not be sent for GET requests if your object uses server-side encryption // with Amazon Web Services KMS keys stored in Amazon Web Services Key Management -// Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption -// keys (SSE-S3). If your object does use these types of keys, you'll get -// an HTTP 400 Bad Request error. +// Service (SSE-KMS) or server-side encryption with Amazon S3 managed keys +// (SSE-S3). If your object does use these types of keys, you'll get an HTTP +// 400 Bad Request error. // // - The last modified property in this case is the creation date of the // object. @@ -5326,8 +5413,7 @@ // GetObjectTorrent API operation for Amazon Simple Storage Service. // // Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. For more information about BitTorrent, see -// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// you're distributing large files. // // You can get torrent only for objects that are less than 5 GB in size, and // that are not encrypted using server-side encryption with a customer-provided @@ -5514,9 +5600,9 @@ // permission to access it. // // If the bucket does not exist or you do not have permission to access it, -// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A -// message body is not included, so you cannot determine the exception beyond -// these error codes. +// the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 +// Not Found code. A message body is not included, so you cannot determine the +// exception beyond these error codes. // // To use this operation, you must have permissions to perform the s3:ListBucket // action. The bucket owner has this permission by default and can grant this @@ -5524,12 +5610,18 @@ // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// To use this API against an access point, you must provide the alias of the -// access point in place of the bucket name or specify the access point ARN. -// When using the access point ARN, you must direct requests to the access point -// hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. +// To use this API operation against an access point, you must provide the alias +// of the access point in place of the bucket name or specify the access point +// ARN. When using the access point ARN, you must direct requests to the access +// point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using the Amazon Web Services SDKs, you provide the ARN in place of -// the bucket name. For more information see, Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// the bucket name. For more information, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// +// To use this API operation against an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5613,9 +5705,9 @@ // // A HEAD request has the same options as a GET action on an object. The response // is identical to the GET response except that there is no response body. Because -// of this, if the HEAD request generates an error, it returns a generic 404 -// Not Found or 403 Forbidden code. It is not possible to retrieve the exact -// exception beyond these error codes. +// of this, if the HEAD request generates an error, it returns a generic 400 +// Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve +// the exact exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5632,9 +5724,10 @@ // // - Encryption request headers, like x-amz-server-side-encryption, should // not be sent for GET requests if your object uses server-side encryption -// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, -// you’ll get an HTTP 400 BadRequest error. +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). If your object +// does use these types of keys, you’ll get an HTTP 400 Bad Request error. // // - The last modified property in this case is the creation date of the // object. @@ -5659,15 +5752,16 @@ // # Permissions // // You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. +// For more information, see Actions, resources, and condition keys for Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// If the object you request doesn't exist, the error that Amazon S3 returns +// depends on whether you also have the s3:ListBucket permission. // // - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 ("no such key") error. +// an HTTP status code 404 error. // // - If you don’t have the s3:ListBucket permission, Amazon S3 returns -// an HTTP status code 403 ("access denied") error. +// an HTTP status code 403 error. // // The following actions are related to HeadObject: // @@ -6148,6 +6242,9 @@ // Returns a list of all buckets owned by the authenticated sender of the request. // To use this operation, you must have the s3:ListAllMyBuckets permission. // +// For information about Amazon S3 buckets, see Creating, configuring, and working +// with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7156,9 +7253,9 @@ // object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// # Access Permissions +// # Permissions // -// You can set access permissions using one of the following methods: +// You can set access permissions by using one of the following methods: // // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports // a set of predefined ACLs, known as canned ACLs. Each canned ACL has a @@ -7208,7 +7305,7 @@ // xsi:type="Group"><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // - By Email address: <>Grantees@email.com<>lt;/Grantee> +// xsi:type="AmazonCustomerByEmail"><>Grantees@email.com<>& // The grantee is resolved to the CanonicalUser and, in a response to a GET // Object acl request, appears as the CanonicalUser. Using email addresses // to specify a grantee is only supported in the following Amazon Web Services @@ -7218,7 +7315,7 @@ // Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) // in the Amazon Web Services General Reference. // -// Related Resources +// The following operations are related to PutBucketAcl: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -7323,7 +7420,7 @@ // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// Special Errors +// PutBucketAnalyticsConfiguration has the following special errors: // // - HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid // argument. @@ -7336,7 +7433,7 @@ // the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration // bucket permission to set the configuration on the bucket. // -// Related Resources +// The following operations are related to PutBucketAnalyticsConfiguration: // // - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // @@ -7456,7 +7553,7 @@ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon // S3 User Guide. // -// Related Resources +// The following operations are related to PutBucketCors: // // - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) // @@ -7541,15 +7638,17 @@ // PutBucketEncryption API operation for Amazon Simple Storage Service. // // This action uses the encryption subresource to configure default encryption -// and Amazon S3 Bucket Key for an existing bucket. +// and Amazon S3 Bucket Keys for an existing bucket. // -// Default encryption for a bucket can use server-side encryption with Amazon -// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify -// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket -// Key. When the default encryption is SSE-KMS, if you upload an object to the -// bucket and do not specify the KMS key to use for encryption, Amazon S3 uses -// the default Amazon Web Services managed KMS key for your account. For information -// about default encryption, see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally +// configure default encryption for a bucket by using server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption +// with customer-provided keys (SSE-C). If you specify default encryption by +// using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information +// about bucket default encryption, see Amazon S3 bucket default encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) // in the Amazon S3 User Guide. @@ -7557,14 +7656,14 @@ // This action requires Amazon Web Services Signature Version 4. For more information, // see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to PutBucketEncryption: // // - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // @@ -7674,17 +7773,26 @@ // move objects stored in the S3 Intelligent-Tiering storage class to the Archive // Access or Deep Archive Access tier. // -// Special Errors +// PutBucketIntelligentTieringConfiguration has the following special errors: // -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// # HTTP 400 Bad Request Error // -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. +// Code: InvalidArgument // -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration -// bucket permission to set the configuration on the bucket. +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7780,26 +7888,50 @@ // an example policy, see Granting Permissions for Amazon S3 Inventory and Storage // Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). // -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// # Permissions +// +// To use this operation, you must have permission to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an S3 +// Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify +// the destination bucket to store the inventory. A user with read access to +// objects in the destination bucket can also access all object metadata fields +// that are available in the inventory report. +// +// To restrict access to an inventory report, see Restricting access to an Amazon +// S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Special Errors +// PutBucketInventoryConfiguration has the following special errors: // -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// # HTTP 400 Bad Request Error // -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. +// Code: InvalidArgument // -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. +// Cause: Invalid Argument // -// Related Resources +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutInventoryConfiguration bucket permission to set the configuration +// on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration: // // - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // @@ -7922,7 +8054,7 @@ // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). // -// Related Resources +// The following operations are related to PutBucketLifecycle: // // - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) // @@ -8037,11 +8169,11 @@ // S3 Lifecycle configuration can have up to 1,000 rules. This limit is not // adjustable. Each rule consists of the following: // -// - Filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination +// - A filter identifying a subset of objects to which the rule applies. +// The filter can be based on a key name prefix, object tags, or a combination // of both. // -// - Status whether the rule is in effect. +// - A status indicating whether the rule is in effect. // // - One or more lifecycle transition and expiration actions that you want // Amazon S3 to perform on the objects identified by the filter. If the state @@ -8062,10 +8194,10 @@ // optionally grant access permissions to others by writing an access policy. // For this operation, a user must get the s3:PutLifecycleConfiguration permission. // -// You can also explicitly deny permissions. Explicit deny also supersedes any -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: +// You can also explicitly deny permissions. An explicit deny also supersedes +// any other permissions. If you want to block users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: // // - s3:DeleteObject // @@ -8076,7 +8208,7 @@ // For more information about permissions, see Managing Access Permissions to // Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// The following are related to PutBucketLifecycleConfiguration: +// The following operations are related to PutBucketLifecycleConfiguration: // // - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) // @@ -8178,7 +8310,7 @@ // # Grantee Values // // You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: +// (by using request elements) in the following ways: // // - By the person's ID: <>ID<><>GranteesEmail<> @@ -8186,8 +8318,8 @@ // // - By Email address: <>Grantees@email.com<> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. +// The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl +// request, appears as the CanonicalUser. // // - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> @@ -8310,7 +8442,7 @@ // // - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // -// GetBucketLifecycle has the following special error: +// PutBucketMetricsConfiguration has the following special error: // // - Error code: TooManyConfigurations Description: You are attempting to // create a new configuration but have already reached the 1,000-configuration @@ -8511,7 +8643,8 @@ // // By default, only the bucket owner can configure notifications on a bucket. // However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with s3:PutBucketNotification permission. +// users to set this configuration with the required s3:PutBucketNotification +// permission. // // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. @@ -8519,8 +8652,6 @@ // messages to your SNS topic. If the message fails, the entire PUT action will // fail, and Amazon S3 will not add the configuration to your bucket. // -// # Responses -// // If the configuration in the request body includes only one TopicConfiguration // specifying only the s3:ReducedRedundancyLostObject event type, the response // will also include the x-amz-sns-test-message-id header containing the message @@ -8707,9 +8838,13 @@ // identity that belongs to the bucket owner's account, Amazon S3 returns a // 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. // // For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). // @@ -9159,15 +9294,15 @@ // you must include the x-amz-mfa request header and the Status and the MfaDelete // request elements in a request to set the versioning state of the bucket. // -// If you have an object expiration lifecycle policy in your non-versioned bucket -// and you want to maintain the same permanent delete behavior when you enable -// versioning, you must add a noncurrent expiration policy. The noncurrent expiration -// lifecycle policy will manage the deletes of the noncurrent object versions -// in the version-enabled bucket. (A version-enabled bucket maintains one current -// and zero or more noncurrent object versions.) For more information, see Lifecycle -// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). // -// Related Resources +// The following operations are related to PutBucketVersioning: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -9393,12 +9528,14 @@ // add an object to it. // // Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. +// Amazon S3 added the entire object to the bucket. You cannot use PutObject +// to only update a single piece of metadata for an existing object. You must +// put the entire object with updated metadata if you want to update some values. // // Amazon S3 is a distributed system. If it receives multiple write requests // for the same object simultaneously, it overwrites all but the last object -// written. Amazon S3 does not provide object locking; if you need this, make -// sure to build it into your application layer or use versioning instead. +// written. To prevent objects from being deleted or overwritten, you can use +// Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). // // To ensure that data is not corrupted traversing the network, use the Content-MD5 // header. When you use this header, Amazon S3 checks the object against the @@ -9412,34 +9549,29 @@ // - To successfully change the objects acl of your PutObject request, you // must have the s3:PutObjectAcl in your IAM permissions. // +// - To successfully set the tag-set with your PutObject request, you must +// have the s3:PutObjectTagging in your IAM permissions. +// // - The Content-MD5 header is required for any request to upload an object // with a retention period configured using Amazon S3 Object Lock. For more // information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) // in the Amazon S3 User Guide. // -// # Server-side Encryption -// -// You can optionally request server-side encryption. With server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts the data when you access it. You have the option to provide -// your own encryption key or use Amazon Web Services managed encryption keys -// (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// -// If you request server-side encryption using Amazon Web Services Key Management -// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For -// more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// # Access Control List (ACL)-Specific Request Headers -// -// You can use headers to grant ACL- based permissions. By default, all objects -// are private. Only the owner has full access control. When adding a new object, -// you can grant permissions to individual Amazon Web Services accounts or to -// predefined groups defined by Amazon S3. These permissions are then added -// to the ACL on the object. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// You have four mutually exclusive options to protect data using server-side +// encryption in Amazon S3, depending on how you choose to manage the encryption +// keys. Specifically, the encryption key options are Amazon S3 managed keys +// (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided +// keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using +// Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon +// S3 to encrypt data at rest by using server-side encryption with other key +// options. For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// When adding a new object, you can use headers to grant ACL-based permissions +// to individual Amazon Web Services accounts or to predefined groups defined +// by Amazon S3. These permissions are then added to the ACL on the object. +// By default, all objects are private. Only the owner has full access control. +// For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // // If the bucket that you're uploading objects to uses the bucket owner enforced @@ -9449,18 +9581,15 @@ // as the bucket-owner-full-control canned ACL or an equivalent form of this // ACL expressed in the XML format. PUT requests that contain other ACLs (for // example, custom grants to certain Amazon Web Services accounts) fail and -// return a 400 error with the error code AccessControlListNotSupported. -// -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// return a 400 error with the error code AccessControlListNotSupported. For +// more information, see Controlling ownership of objects and disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // // If your bucket uses the bucket owner enforced setting for Object Ownership, // all objects written to the bucket by any account will be owned by the bucket // owner. // -// # Storage Class Options -// // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. @@ -9468,20 +9597,16 @@ // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. // -// # Versioning -// // If you enable versioning for a bucket, Amazon S3 automatically generates // a unique version ID for the object being stored. Amazon S3 returns this ID // in the response. When you enable versioning for a bucket, if Amazon S3 receives // multiple write requests for the same object simultaneously, it stores all -// of the objects. -// -// For more information about versioning, see Adding Objects to Versioning Enabled -// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// of the objects. For more information about versioning, see Adding Objects +// to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). // For information about returning the versioning state of a bucket, see GetBucketVersioning // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). // -// Related Resources +// For more information about related Amazon S3 APIs, see the following: // // - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // @@ -9585,7 +9710,7 @@ // object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// # Access Permissions +// # Permissions // // You can set access permissions using one of the following methods: // @@ -9651,7 +9776,7 @@ // sets the ACL of the current version of an object. To set the ACL of a different // version, use the versionId subresource. // -// Related Resources +// The following operations are related to PutObjectAcl: // // - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // @@ -10009,7 +10134,7 @@ // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// Special Errors +// PutObjectTagging has the following special errors: // // - Code: InvalidTagError Cause: The tag provided was not a valid tag. This // error can occur if the tag did not pass input validation. For more information, @@ -10023,7 +10148,7 @@ // - Code: InternalError Cause: The service was unable to apply the provided // tag to the object. // -// Related Resources +// The following operations are related to PutObjectTagging: // // - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // @@ -10120,7 +10245,7 @@ // For more information about when Amazon S3 considers a bucket or an object // public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // -// Related Resources +// The following operations are related to PutPublicAccessBlock: // // - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // @@ -10211,55 +10336,34 @@ // // - restore an archive - Restore an archived object // -// To use this operation, you must have permissions to perform the s3:RestoreObject -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// # Querying Archives with Select Requests +// For more information about the S3 structure in the request body, see the +// following: // -// You use a select type of request to perform SQL queries on archived objects. -// The archived objects that are being queried by the select request must be -// formatted as uncompressed comma-separated values (CSV) files. You can run -// queries and custom analytics on your archived data without having to restore -// your data to a hotter Amazon S3 tier. For an overview about select requests, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// When making a select request, do the following: +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide // -// - Define an output location for the select query's output. This must be -// an Amazon S3 bucket in the same Amazon Web Services Region as the bucket -// that contains the archive object that is being queried. The Amazon Web -// Services account that initiates the job must have permissions to write -// to the S3 bucket. You can specify the storage class and encryption for -// the output objects stored in the bucket. For more information about output, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. For more information about the S3 structure -// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide // -// - Define the SQL expression for the SELECT type of restoration for your -// query in the request body's SelectParameters structure. You can use expressions -// like the following examples. The following expression returns all records -// from the specified object. SELECT * FROM Object Assuming that you are -// not using any headers for data stored in the object, you can specify columns -// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > -// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// Define the SQL expression for the SELECT type of restoration for your query +// in the request body's SelectParameters structure. You can use expressions +// like the following examples. +// +// - The following expression returns all records from the specified object. +// SELECT * FROM Object +// +// - Assuming that you are not using any headers for data stored in the object, +// you can specify columns with positional headers. SELECT s._1, s._2 FROM +// Object s WHERE s._3 > 100 +// +// - If you have headers and you set the fileHeaderInfo in the CSV structure // in the request body to USE, you can specify headers in the query. (If // you set the fileHeaderInfo field to IGNORE, the first row is skipped for // the query.) You cannot mix ordinal positions with header column names. // SELECT s.Id, s.FirstName, s.SSN FROM S3Object s // -// For more information about using SQL with S3 Glacier Select restore, see -// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// // When making a select request, you can also do the following: // // - To expedite your queries, specify the Expedited tier. For more information @@ -10273,59 +10377,74 @@ // // - The output results are new Amazon S3 objects. Unlike archive retrievals, // they are stored until explicitly deleted-manually or through a lifecycle -// policy. +// configuration. // // - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. // // - Amazon S3 accepts a select request even if the object has already been // restored. A select request doesn’t return error response 409. // +// # Permissions +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// // # Restoring objects // -// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage -// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers are not accessible in real time. For objects in Archive Access -// or Deep Archive Access tiers you must first initiate a restore request, and -// then wait until the object is moved into the Frequent Access tier. For objects -// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate -// a restore request, and then wait until a temporary copy of the object is -// available. To access an archived object, you must restore the object for -// the duration (number of days) that you specify. +// Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive +// or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real +// time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage classes, you must first initiate a restore +// request, and then wait until a temporary copy of the object is available. +// If you want a permanent copy of the object, create a copy of it in the Amazon +// S3 Standard storage class in your S3 bucket. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. +// For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. // -// When restoring an archived object (or using a select request), you can specify -// one of the following data access tier options in the Tier element of the -// request body: +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: // // - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive -// tier when occasional urgent requests for a subset of archives are required. -// For all but the largest archived objects (250 MB+), data accessed using -// Expedited retrievals is typically made available within 1–5 minutes. -// Provisioned capacity ensures that retrieval capacity for Expedited retrievals -// is available when you need it. Expedited retrievals and provisioned capacity -// are not available for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier when occasional urgent requests +// for restoring archives are required. For all but the largest archived +// objects (250 MB+), data accessed using Expedited retrievals is typically +// made available within 1–5 minutes. Provisioned capacity ensures that +// retrieval capacity for Expedited retrievals is available when you need +// it. Expedited retrievals and provisioned capacity are not available for +// objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. // // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval // requests that do not specify the retrieval option. Standard retrievals // typically finish within 3–5 hours for objects stored in the S3 Glacier -// storage class or S3 Intelligent-Tiering Archive tier. They typically finish -// within 12 hours for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals -// are free for objects stored in S3 Intelligent-Tiering. -// -// - Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, -// enabling you to retrieve large amounts, even petabytes, of data inexpensively. -// Bulk retrievals typically finish within 5–12 hours for objects stored -// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. -// They typically finish within 48 hours for objects stored in the S3 Glacier -// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. -// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. They typically finish within 12 hours for objects stored +// in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. Standard retrievals are free for objects stored in +// S3 Intelligent-Tiering. +// +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to +// retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals +// typically finish within 5–12 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. Bulk retrievals are also the lowest-cost retrieval option +// when restoring objects from S3 Glacier Deep Archive. They typically finish +// within 48 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) @@ -10368,11 +10487,9 @@ // - If the object is previously restored, Amazon S3 returns 200 OK in the // response. // -// Special Errors -// -// - Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. -// (This error does not apply to SELECT type requests.) HTTP Status Code: -// 409 Conflict SOAP Fault Code Prefix: Client +// - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore +// is already in progress. (This error does not apply to SELECT type requests.) +// HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client // // - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals // are currently not available. Try again later. (Returned if there is insufficient @@ -10380,15 +10497,12 @@ // Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP // Status Code: 503 SOAP Fault Code Prefix: N/A // -// Related Resources +// The following operations are related to RestoreObject: // // - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // // - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // -// - SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10489,10 +10603,6 @@ // and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) // in the Amazon S3 User Guide. // -// For more information about using SQL with Amazon S3 Select, see SQL Reference -// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// // # Permissions // // You must have s3:GetObject permission for this operation. Amazon S3 Select @@ -10522,10 +10632,10 @@ // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. For objects that are encrypted with Amazon -// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), -// server-side encryption is handled transparently, so you don't need to -// specify anything. For more information about server-side encryption, including -// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide. // // # Working with the Response Body @@ -10545,9 +10655,13 @@ // in the request parameters), you cannot specify the range of bytes of an // object to return. // -// - GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot -// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. -// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// - The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or +// the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, +// or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS +// or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +// class. For more information about storage classes, see Using Amazon S3 +// storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) // in the Amazon S3 User Guide. // // # Special Errors @@ -10555,7 +10669,7 @@ // For a list of special errors for this operation, see List of SELECT Object // Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // -// Related Resources +// The following operations are related to SelectObjectContent: // // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -10847,24 +10961,32 @@ // go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) // in the Amazon S3 User Guide. // -// You can optionally request server-side encryption where Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it for -// you when you access it. You have the option of providing your own encryption -// key, or you can use the Amazon Web Services managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide -// in the request must match the headers you used in the request to initiate -// the upload by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// Server-side encryption is for data encryption at rest. Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it when +// you access it. You have three mutually exclusive options to protect data +// using server-side encryption in Amazon S3, depending on how you choose to +// manage the encryption keys. Specifically, the encryption key options are +// Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), +// and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side +// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally +// tell Amazon S3 to encrypt data at rest using server-side encryption with +// other key options. The option you use depends on whether you want to use +// KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose +// to provide your own encryption key, the request headers you provide in the +// request must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) // in the Amazon S3 User Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless -// you are using a customer-provided encryption key, you don't need to specify -// the encryption parameters in each UploadPart request. Instead, you only need -// to specify the server-side encryption parameters in the initial Initiate -// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// you are using a customer-provided encryption key (SSE-C), you don't need +// to specify the encryption parameters in each UploadPart request. Instead, +// you only need to specify the server-side encryption parameters in the initial +// Initiate Multipart request. For more information, see CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // // If you requested server-side encryption using a customer-provided encryption -// key in your initiate multipart upload request, you must provide identical +// key (SSE-C) in your initiate multipart upload request, you must provide identical // encryption information in each part upload using the following headers. // // - x-amz-server-side-encryption-customer-algorithm @@ -10873,14 +10995,14 @@ // // - x-amz-server-side-encryption-customer-key-MD5 // -// Special Errors +// UploadPart has the following special errors: // // - Code: NoSuchUpload Cause: The specified multipart upload does not exist. // The upload ID might be invalid, or the multipart upload might have been // aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code // Prefix: Client // -// Related Resources +// The following operations are related to UploadPart: // // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // @@ -11030,7 +11152,7 @@ // // x-amz-copy-source: /bucket/object?versionId=version id // -// Special Errors +// Special errors // // - Code: NoSuchUpload Cause: The specified multipart upload does not exist. // The upload ID might be invalid, or the multipart upload might have been @@ -11039,7 +11161,7 @@ // - Code: InvalidRequest Cause: The specified copy source is not supported // as a byte-range copy source. HTTP Status Code: 400 Bad Request // -// Related Resources +// The following operations are related to UploadPartCopy: // // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // @@ -11205,7 +11327,7 @@ // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -11251,12 +11373,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12261,7 +12383,9 @@ // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character - // to indicate a comment line. + // to indicate a comment line. The default character is #. + // + // Default: # Comments *string `type:"string"` // A single character used to separate individual fields in a record. You can @@ -12638,12 +12762,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12900,17 +13024,17 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -12969,19 +13093,16 @@ // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS key in your initiate multipart - // upload request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created object, in case the bucket has versioning @@ -13347,21 +13468,21 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with a COPY action doesn’t affect bucket-level settings // for S3 Bucket Key. @@ -13544,12 +13665,11 @@ // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the Amazon Web Services KMS key ID to use for object encryption. - // All GET and PUT requests for an object protected by Amazon Web Services KMS - // will fail if not made via SSL or using SigV4. For information about configuring - // using any of the officially supported Amazon Web Services SDKs and Amazon - // Web Services CLI, see Specifying the Signature Version in Request Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the KMS key ID to use for object encryption. All GET and PUT requests + // for an object protected by KMS will fail if they're not made via SSL or using + // SigV4. For information about configuring any of the officially supported + // Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the + // Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. // // SSEKMSKeyId is a sensitive parameter and its value will be @@ -13558,7 +13678,7 @@ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created @@ -13580,7 +13700,9 @@ // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. This value is unique to + // each object and is not copied when using the x-amz-metadata-directive header. + // Instead, you may opt to provide this header in combination with the directive. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -13925,7 +14047,7 @@ _ struct{} `type:"structure" payload:"CopyObjectResult"` // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. @@ -13960,9 +14082,8 @@ // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectOutput's @@ -13970,7 +14091,7 @@ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. @@ -14491,21 +14612,21 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. @@ -14614,12 +14735,12 @@ // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the ID of the symmetric customer managed key to use for object - // encryption. All GET and PUT requests for an object protected by Amazon Web - // Services KMS will fail if not made via SSL or using SigV4. For information - // about configuring using any of the officially supported Amazon Web Services - // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in - // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the ID of the symmetric encryption customer managed key to use + // for object encryption. All GET and PUT requests for an object protected by + // KMS will fail if they're not made via SSL or using SigV4. For information + // about configuring any of the officially supported Amazon Web Services SDKs + // and Amazon Web Services CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. // // SSEKMSKeyId is a sensitive parameter and its value will be @@ -14917,7 +15038,7 @@ // name in the request, the response includes this header. The header indicates // when the initiated multipart upload becomes eligible for an abort operation. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // The response also includes the x-amz-abort-rule-id header that provides the // ID of the lifecycle configuration rule that defines this action. @@ -14938,17 +15059,17 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `locationName:"Bucket" type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The algorithm that was used to create a checksum of the object. @@ -14980,9 +15101,8 @@ // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's @@ -15163,7 +15283,7 @@ type Delete struct { _ struct{} `type:"structure"` - // The objects to delete. + // The object to delete. // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -16062,7 +16182,8 @@ // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -16908,12 +17029,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17131,12 +17252,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17298,12 +17419,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17871,9 +17992,9 @@ KMSContext *string `type:"string"` // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed key to use for encryption of job results. - // Amazon S3 only supports symmetric keys. For more information, see Using symmetric - // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // the symmetric encryption customer managed key to use for encryption of job + // results. Amazon S3 only supports symmetric encryption KMS keys. For more + // information, see Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. // // KMSKeyId is a sensitive parameter and its value will be @@ -17939,8 +18060,8 @@ // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric, customer managed KMS keys. For more information, - // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -18019,9 +18140,8 @@ // The error code is a string that uniquely identifies an error condition. It // is meant to be read and understood by programs that detect and handle errors - // by type. - // - // Amazon S3 error codes + // by type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 // Forbidden SOAP Fault Code Prefix: Client @@ -18341,8 +18461,8 @@ // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP - // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // * Code: ServiceUnavailable Description: Service is unable to handle request. + // HTTP Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server // // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: // 503 Slow Down SOAP Fault Code Prefix: Server @@ -18510,6 +18630,8 @@ type ExistingObjectReplication struct { _ struct{} `type:"structure"` + // Specifies whether Amazon S3 replicates existing source bucket objects. + // // Status is a required field Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` } @@ -18609,6 +18731,13 @@ // different account, the request fails with the HTTP status code 403 Forbidden // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation. @@ -18664,6 +18793,12 @@ return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetBucketAccelerateConfigurationInput) SetRequestPayer(v string) *GetBucketAccelerateConfigurationInput { + s.RequestPayer = &v + return s +} + func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -18694,6 +18829,10 @@ type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // The accelerate configuration of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -18716,6 +18855,12 @@ return s.String() } +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetRequestCharged(v string) *GetBucketAccelerateConfigurationOutput { + s.RequestCharged = &v + return s +} + // SetStatus sets the Status field's value. func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { s.Status = &v @@ -18727,6 +18872,15 @@ // Specifies the S3 bucket whose ACL is being requested. // + // To use this API operation against an access point, provide the alias of the + // access point in place of the bucket name. + // + // To use this API operation against an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19000,6 +19154,15 @@ // The bucket name for which to get the cors configuration. // + // To use this API operation against an access point, provide the alias of the + // access point in place of the bucket name. + // + // To use this API operation against an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19770,6 +19933,15 @@ // The name of the bucket for which to get the location. // + // To use this API operation against an access point, provide the alias of the + // access point in place of the bucket name. + // + // To use this API operation against an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20033,7 +20205,8 @@ // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -20164,6 +20337,15 @@ // The name of the bucket for which to get the notification configuration. // + // To use this API operation against an access point, provide the alias of the + // access point in place of the bucket name. + // + // To use this API operation against an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20384,6 +20566,15 @@ // The bucket name for which to get the bucket policy. // + // To use this API operation against an access point, provide the alias of the + // access point in place of the bucket name. + // + // To use this API operation against an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21498,12 +21689,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -21917,12 +22108,12 @@ // // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -21966,8 +22157,8 @@ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-range). // // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` @@ -22542,7 +22733,7 @@ Body io.ReadCloser `type:"blob"` // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -22668,9 +22859,8 @@ // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectOutput's @@ -22678,7 +22868,7 @@ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this @@ -23115,12 +23305,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -23807,12 +23997,18 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, + // see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -23938,12 +24134,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -23988,8 +24184,9 @@ // object. PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - // Because HeadObject returns only the metadata for an object, this parameter - // has no effect. + // HeadObject returns only the metadata for an object. If the Range is satisfiable, + // only the ContentLength is affected in the response. If the Range is not satisfiable, + // S3 returns a 416 - Requested Range Not Satisfiable error. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. @@ -24204,7 +24401,7 @@ ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -24376,19 +24573,16 @@ // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by HeadObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If the object is stored using server-side encryption either with an Amazon - // Web Services KMS key or an Amazon S3-managed encryption key, the response - // includes this header with the value of the server-side encryption algorithm - // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this @@ -25596,7 +25790,8 @@ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -25670,6 +25865,9 @@ } // Container for lifecycle rules. You can add as many as 1000 rules. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -25727,11 +25925,14 @@ } // Container for the expiration for the lifecycle of the object. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleExpiration struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` // Indicates the lifetime, in days, of the objects that are subject to the rule. @@ -25782,13 +25983,16 @@ } // A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleRule struct { _ struct{} `type:"structure"` // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` @@ -26842,12 +27046,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -26898,6 +27102,13 @@ // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker @@ -26989,6 +27200,12 @@ return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListMultipartUploadsInput) SetRequestPayer(v string) *ListMultipartUploadsInput { + s.RequestPayer = &v + return s +} + // SetUploadIdMarker sets the UploadIdMarker field's value. func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { s.UploadIdMarker = &v @@ -27072,6 +27289,10 @@ // prefix. The result contains only keys starting with the specified prefix. Prefix *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Upload ID after which listing began. UploadIdMarker *string `type:"string"` @@ -27165,6 +27386,12 @@ return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListMultipartUploadsOutput) SetRequestCharged(v string) *ListMultipartUploadsOutput { + s.RequestCharged = &v + return s +} + // SetUploadIdMarker sets the UploadIdMarker field's value. func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { s.UploadIdMarker = &v @@ -27223,6 +27450,13 @@ // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Specifies the object version you want to start listing from. VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } @@ -27310,6 +27544,12 @@ return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectVersionsInput) SetRequestPayer(v string) *ListObjectVersionsInput { + s.RequestPayer = &v + return s +} + // SetVersionIdMarker sets the VersionIdMarker field's value. func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { s.VersionIdMarker = &v @@ -27400,6 +27640,10 @@ // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` @@ -27491,6 +27735,12 @@ return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectVersionsOutput) SetRequestCharged(v string) *ListObjectVersionsOutput { + s.RequestCharged = &v + return s +} + // SetVersionIdMarker sets the VersionIdMarker field's value. func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { s.VersionIdMarker = &v @@ -27515,12 +27765,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27734,6 +27984,10 @@ // Keys that begin with the indicated prefix. Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation. @@ -27814,6 +28068,12 @@ return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput { + s.RequestCharged = &v + return s +} + type ListObjectsV2Input struct { _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` @@ -27826,12 +28086,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -28052,8 +28312,8 @@ IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than or equals to MaxKeys field. Say you ask for 50 keys, - // your result will include less than equals 50 keys + // always be less than or equal to the MaxKeys field. Say you ask for 50 keys, + // your result will include 50 keys or fewer. KeyCount *int64 `type:"integer"` // Sets the maximum number of keys returned in the response. By default the @@ -28070,12 +28330,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Name *string `type:"string"` @@ -28088,6 +28348,10 @@ // Keys that begin with the indicated prefix. Prefix *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -28176,6 +28440,12 @@ return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsV2Output) SetRequestCharged(v string) *ListObjectsV2Output { + s.RequestCharged = &v + return s +} + // SetStartAfter sets the StartAfter field's value. func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { s.StartAfter = &v @@ -28194,12 +28464,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -28409,7 +28679,7 @@ // name in the request, then the response includes this header indicating when // the initiated multipart upload will become eligible for abort operation. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // The response will also include the x-amz-abort-rule-id header that will provide // the ID of the lifecycle configuration rule that defines this action. @@ -29006,7 +29276,8 @@ // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). Filter *MetricsFilter `type:"structure"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `type:"string" required:"true"` @@ -29504,7 +29775,8 @@ } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// filtering, see Configuring event notifications using object key name filtering +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -30153,7 +30425,24 @@ type Owner struct { _ struct{} `type:"structure"` - // Container for the display name of the owner. + // Container for the display name of the owner. This value is only supported + // in the following Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) DisplayName *string `type:"string"` // Container for the ID of the owner. @@ -31364,9 +31653,12 @@ _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by + // using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) + // or a customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -32004,6 +32296,9 @@ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1000 rules. + // + // For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) + // in the Amazon S3 User Guide. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -32301,7 +32596,8 @@ // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -33953,12 +34249,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Key is a required field @@ -34185,21 +34481,21 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with a PUT action doesn’t affect bucket-level settings // for S3 Bucket Key. @@ -34256,21 +34552,21 @@ ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + // body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The base64-encoded 128-bit MD5 digest of the message (without the headers) @@ -34282,7 +34578,7 @@ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a @@ -34291,7 +34587,7 @@ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -34362,21 +34658,23 @@ // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms, but - // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses - // the Amazon Web Services managed key to protect the data. If the KMS key does - // not exist in the same account issuing the command, you must use the full - // ARN and not just the ID. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. If you specify + // x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's @@ -34384,7 +34682,7 @@ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created @@ -35121,7 +35419,7 @@ _ struct{} `type:"structure"` // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -35182,27 +35480,26 @@ // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 - // string holding JSON with the encryption context key-value pairs. + // string holding JSON with the encryption context key-value pairs. This value + // is stored as object metadata and automatically gets passed on to Amazon Web + // Services KMS for future GetObject or CopyObject operations on this object. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If you specified server-side encryption either with an Amazon Web Services - // KMS key or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 - // used to encrypt the object. + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. @@ -35543,12 +35840,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -35929,7 +36226,8 @@ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -36929,12 +37227,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -37334,7 +37632,7 @@ // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` @@ -37469,9 +37767,8 @@ type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the Amazon Web Services Key Management Service (Amazon - // Web Services KMS) symmetric customer managed key to use for encrypting inventory - // reports. + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. // // KeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by SSEKMS's @@ -38157,18 +38454,19 @@ // and only if SSEAlgorithm is set to aws:kms. // // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. - // However, if you are using encryption with cross-account or Amazon Web Services - // service operations you must use a fully qualified KMS key ARN. For more information, - // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // If you use a key ID, you can run into a LogDestination undeliverable error + // when creating a VPC flow log. // - // For example: + // If you are using encryption with cross-account or Amazon Web Services service + // operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // - // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For - // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. // // KMSMasterKeyID is a sensitive parameter and its value will be @@ -38966,7 +39264,8 @@ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -39173,12 +39472,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -39525,7 +39824,7 @@ _ struct{} `type:"structure" payload:"CopyPartResult"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. @@ -39549,9 +39848,8 @@ // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyOutput's @@ -39644,12 +39942,12 @@ // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests + // When you use this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // you use this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts access point ARN in place of the bucket name. For + // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -39955,7 +40253,7 @@ _ struct{} `type:"structure"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -40007,9 +40305,8 @@ // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key was used for the - // object. + // If present, specifies the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key was used for the object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartOutput's @@ -40428,8 +40725,8 @@ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // stored in Amazon S3 object. + // (Amazon Web Services KMS) symmetric encryption customer managed key that + // was used for stored in Amazon S3 object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's @@ -40441,9 +40738,7 @@ ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The integer status code for an HTTP response of a corresponding GetObject - // request. - // - // Status Codes + // request. The following is a list of status codes. // // * 200 - OK // @@ -41704,6 +41999,9 @@ // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value ObjectStorageClassGlacierIr = "GLACIER_IR" + + // ObjectStorageClassSnow is a ObjectStorageClass enum value + ObjectStorageClassSnow = "SNOW" ) // ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum @@ -41718,6 +42016,7 @@ ObjectStorageClassDeepArchive, ObjectStorageClassOutposts, ObjectStorageClassGlacierIr, + ObjectStorageClassSnow, } } @@ -41942,6 +42241,9 @@ // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value ServerSideEncryptionAwsKms = "aws:kms" + + // ServerSideEncryptionAwsKmsDsse is a ServerSideEncryption enum value + ServerSideEncryptionAwsKmsDsse = "aws:kms:dsse" ) // ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum @@ -41949,6 +42251,7 @@ return []string{ ServerSideEncryptionAes256, ServerSideEncryptionAwsKms, + ServerSideEncryptionAwsKmsDsse, } } @@ -41995,6 +42298,9 @@ // StorageClassGlacierIr is a StorageClass enum value StorageClassGlacierIr = "GLACIER_IR" + + // StorageClassSnow is a StorageClass enum value + StorageClassSnow = "SNOW" ) // StorageClass_Values returns all elements of the StorageClass enum @@ -42009,6 +42315,7 @@ StorageClassDeepArchive, StorageClassOutposts, StorageClassGlacierIr, + StorageClassSnow, } } diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go 2024-02-23 09:46:09.000000000 +0000 @@ -85,9 +85,9 @@ // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is // allowed to assume the role in the role trust policy. // // To assume a role from a different account, your Amazon Web Services account @@ -96,9 +96,9 @@ // are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. // // To allow a user to assume a role in the same account, you can do either of // the following: @@ -517,10 +517,8 @@ // a user. You can also supply the user with a consistent identity throughout // the lifetime of an application. // -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web // Services security credentials. Therefore, you can distribute an application @@ -984,11 +982,11 @@ // call the operation. // // No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1063,18 +1061,26 @@ // GetFederationToken API operation for AWS Security Token Service. // // Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// // You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, // or an OpenID Connect-compatible identity provider. In this case, we recommend @@ -1083,21 +1089,13 @@ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// // # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). // // # Permissions // @@ -1267,12 +1265,13 @@ // or IAM user. The credentials consist of an access key ID, a secret access // key, and a security token. Typically, you use GetSessionToken if you want // to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) @@ -1287,13 +1286,12 @@ // # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. // // # Permissions // @@ -1305,20 +1303,20 @@ // // - You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1900,8 +1898,12 @@ // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2036,7 +2038,7 @@ // IAM. // // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. + // a user. // // The following pseudocode shows how the hash value is calculated: // @@ -2266,8 +2268,12 @@ // the user who is using your application with a web identity provider before // the application makes an AssumeRoleWithWebIdentity call. // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. + // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2573,8 +2579,12 @@ // The secret access key that can be used to sign requests. // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` // The token that users must pass to the service API to use the temporary credentials. // @@ -2922,10 +2932,9 @@ // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the diff -Nru temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go --- temporal-1.21.5-1/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go 2024-02-23 09:46:09.000000000 +0000 @@ -4,10 +4,9 @@ // requests to AWS Security Token Service. // // Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff -Nru temporal-1.21.5-1/src/vendor/github.com/benbjohnson/clock/clock.go temporal-1.22.5/src/vendor/github.com/benbjohnson/clock/clock.go --- temporal-1.21.5-1/src/vendor/github.com/benbjohnson/clock/clock.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/benbjohnson/clock/clock.go 2024-02-23 09:46:09.000000000 +0000 @@ -74,7 +74,10 @@ // Mock represents a mock clock that only moves forward programmically. // It can be preferable to a real-time clock when testing time-based functionality. type Mock struct { - mu sync.Mutex + // mu protects all other fields in this struct, and the data that they + // point to. + mu sync.Mutex + now time.Time // current time timers clockTimers // tickers & timers } @@ -89,7 +92,9 @@ // This should only be called from a single goroutine at a time. func (m *Mock) Add(d time.Duration) { // Calculate the final current time. + m.mu.Lock() t := m.now.Add(d) + m.mu.Unlock() // Continue to execute timers until there are no more before the new time. for { @@ -126,6 +131,23 @@ gosched() } +// WaitForAllTimers sets the clock until all timers are expired +func (m *Mock) WaitForAllTimers() time.Time { + // Continue to execute timers until there are no more + for { + m.mu.Lock() + if len(m.timers) == 0 { + m.mu.Unlock() + return m.Now() + } + + sort.Sort(m.timers) + next := m.timers[len(m.timers)-1].Next() + m.mu.Unlock() + m.Set(next) + } +} + // runNextTimer executes the next timer in chronological order and moves the // current time to the timer's next tick time. The next time is not executed if // its next time is after the max time. Returns true if a timer was executed. @@ -150,10 +172,11 @@ // Move "now" forward and unlock clock. m.now = t.Next() + now := m.now m.mu.Unlock() // Execute timer. - t.Tick(m.now) + t.Tick(now) return true } @@ -162,12 +185,20 @@ return m.Timer(d).C } -// AfterFunc waits for the duration to elapse and then executes a function. +// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine. // A Timer is returned that can be stopped. func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { - t := m.Timer(d) - t.C = nil - t.fn = f + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time, 1) + t := &Timer{ + c: ch, + fn: f, + mock: m, + next: m.now.Add(d), + stopped: false, + } + m.timers = append(m.timers, (*internalTimer)(t)) return t } @@ -219,7 +250,6 @@ // Timer creates a new instance of Timer. func (m *Mock) Timer(d time.Duration) *Timer { m.mu.Lock() - defer m.mu.Unlock() ch := make(chan time.Time, 1) t := &Timer{ C: ch, @@ -229,9 +259,14 @@ stopped: false, } m.timers = append(m.timers, (*internalTimer)(t)) + now := m.now + m.mu.Unlock() + m.runNextTimer(now) return t } +// removeClockTimer removes a timer from m.timers. m.mu MUST be held +// when this method is called. func (m *Mock) removeClockTimer(t clockTimer) { for i, timer := range m.timers { if timer == t { @@ -313,7 +348,7 @@ t.mock.mu.Lock() if t.fn != nil { // defer function execution until the lock is released, and - defer t.fn() + defer func() { go t.fn() }() } else { t.c <- now } @@ -324,12 +359,13 @@ // Ticker holds a channel that receives "ticks" at regular intervals. type Ticker struct { - C <-chan time.Time - c chan time.Time - ticker *time.Ticker // realtime impl, if set - next time.Time // next tick time - mock *Mock // mock clock, if set - d time.Duration // time between ticks + C <-chan time.Time + c chan time.Time + ticker *time.Ticker // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + d time.Duration // time between ticks + stopped bool // True if stopped, false if running } // Stop turns off the ticker. @@ -339,6 +375,7 @@ } else { t.mock.mu.Lock() t.mock.removeClockTimer((*internalTicker)(t)) + t.stopped = true t.mock.mu.Unlock() } } @@ -353,6 +390,11 @@ t.mock.mu.Lock() defer t.mock.mu.Unlock() + if t.stopped { + t.mock.timers = append(t.mock.timers, (*internalTicker)(t)) + t.stopped = false + } + t.d = dur t.next = t.mock.now.Add(dur) } @@ -365,7 +407,9 @@ case t.c <- now: default: } + t.mock.mu.Lock() t.next = now.Add(t.d) + t.mock.mu.Unlock() gosched() } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/BENCHMARKS.md temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/BENCHMARKS.md --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/BENCHMARKS.md 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/BENCHMARKS.md 2024-02-23 09:46:09.000000000 +0000 @@ -1,176 +1,458 @@ go test -bench=. -benchmem -goos: darwin -goarch: amd64 -pkg: github.com/brianvoe/gofakeit +goos: linux +goarch: amd64 +pkg: github.com/brianvoe/gofakeit/v6 +cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz Table generated with tablesgenerator.com/markdown_tables -| Benchmark | Ops | CPU | MEM | MEM alloc | -|---------------------------------|-----------|-------------|------------|--------------| -| BenchmarkAddress-16 | 797298 | 1649 ns/op | 248 B/op | 7 allocs/op | -| BenchmarkStreet-16 | 1987233 | 601 ns/op | 62 B/op | 3 allocs/op | -| BenchmarkStreetNumber-16 | 4808812 | 252 ns/op | 36 B/op | 2 allocs/op | -| BenchmarkStreetPrefix-16 | 12750800 | 83.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStreetName-16 | 14026328 | 81.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStreetSuffix-16 | 13836478 | 81.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCity-16 | 4617508 | 245 ns/op | 15 B/op | 1 allocs/op | -| BenchmarkState-16 | 12095868 | 86.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStateAbr-16 | 13337152 | 83.5 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkZip-16 | 6264339 | 201 ns/op | 5 B/op | 1 allocs/op | -| BenchmarkCountry-16 | 12378775 | 85.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCountryAbr-16 | 12770610 | 86.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLatitude-16 | 30935530 | 32.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLongitude-16 | 35305698 | 32.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLatitudeInRange-16 | 35285907 | 34.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLongitudeInRange-16 | 29716158 | 34.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPetName-16 | 15559858 | 69.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAnimal-16 | 15493616 | 71.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAnimalType-16 | 15802927 | 72.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFarmAnimal-16 | 13610484 | 81.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCat-16 | 14966256 | 76.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDog-16 | 12833390 | 88.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUsername-16 | 5720742 | 220 ns/op | 16 B/op | 2 allocs/op | -| BenchmarkPassword-16 | 2016339 | 593 ns/op | 304 B/op | 6 allocs/op | -| BenchmarkBeerName-16 | 15339746 | 72.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerStyle-16 | 12902784 | 86.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerHop-16 | 15131584 | 71.5 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerYeast-16 | 14747238 | 73.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerMalt-16 | 14982421 | 78.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerIbu-16 | 20595496 | 53.4 ns/op | 8 B/op | 1 allocs/op | -| BenchmarkBeerAlcohol-16 | 3921880 | 299 ns/op | 40 B/op | 3 allocs/op | -| BenchmarkBeerBlg-16 | 4150712 | 300 ns/op | 48 B/op | 3 allocs/op | -| BenchmarkCar-16 | 1996923 | 597 ns/op | 96 B/op | 1 allocs/op | -| BenchmarkCarType-16 | 17076163 | 65.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarFuelType-16 | 14844217 | 73.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarTransmissionType-16 | 16047379 | 68.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarMaker-16 | 14501310 | 76.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarModel-16 | 12503469 | 87.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkColor-16 | 14812000 | 75.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSafeColor-16 | 17647850 | 66.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHexColor-16 | 3710928 | 326 ns/op | 24 B/op | 3 allocs/op | -| BenchmarkRGBColor-16 | 12641104 | 83.3 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkCompany-16 | 17277220 | 67.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCompanySuffix-16 | 17099479 | 60.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBuzzWord-16 | 15963231 | 68.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBS-16 | 15149085 | 71.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJob-16 | 3203989 | 383 ns/op | 64 B/op | 1 allocs/op | -| BenchmarkJobTitle-16 | 15232904 | 70.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJobDescriptor-16 | 15603652 | 69.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJobLevel-16 | 14281743 | 77.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmoji-16 | 15016417 | 75.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiDescription-16 | 14764699 | 76.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiCategory-16 | 13463936 | 78.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiAlias-16 | 12291789 | 87.5 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiTag-16 | 13451284 | 82.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFileMimeType-16 | 16654501 | 67.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFileExtension-16 | 13656126 | 73.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFruit-16 | 15039096 | 74.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVegetable-16 | 14397543 | 76.5 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBreakfast-16 | 5960095 | 181 ns/op | 35 B/op | 1 allocs/op | -| BenchmarkLunch-16 | 6350367 | 189 ns/op | 37 B/op | 1 allocs/op | -| BenchmarkDinner-16 | 6245412 | 177 ns/op | 37 B/op | 1 allocs/op | -| BenchmarkSnack-16 | 5891965 | 192 ns/op | 36 B/op | 1 allocs/op | -| BenchmarkDessert-16 | 6603031 | 186 ns/op | 34 B/op | 2 allocs/op | -| BenchmarkGamertag-16 | 3237366 | 352 ns/op | 36 B/op | 3 allocs/op | -| BenchmarkGenerate-16 | 457622 | 2652 ns/op | 916 B/op | 23 allocs/op | -| BenchmarkMap-16 | 290334 | 4145 ns/op | 1082 B/op | 16 allocs/op | -| BenchmarkHackerPhrase-16 | 200760 | 5623 ns/op | 2909 B/op | 37 allocs/op | -| BenchmarkHackerAbbreviation-16 | 15939142 | 71.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerAdjective-16 | 14837203 | 70.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerNoun-16 | 14633212 | 72.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerVerb-16 | 13376676 | 82.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerIngverb-16 | 14869647 | 72.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkReplaceWithNumbers-16 | 4214044 | 287 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkHipsterWord-16 | 14753112 | 71.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHipsterSentence-16 | 871815 | 1396 ns/op | 305 B/op | 3 allocs/op | -| BenchmarkHipsterParagraph-16 | 42579 | 28624 ns/op | 10560 B/op | 48 allocs/op | -| BenchmarkImageURL-16 | 10556988 | 121 ns/op | 38 B/op | 3 allocs/op | -| BenchmarkDomainName-16 | 2852584 | 428 ns/op | 53 B/op | 2 allocs/op | -| BenchmarkDomainSuffix-16 | 15614646 | 70.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkURL-16 | 1128352 | 1056 ns/op | 189 B/op | 4 allocs/op | -| BenchmarkHTTPMethod-16 | 15604741 | 72.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkIPv4Address-16 | 3539780 | 332 ns/op | 48 B/op | 5 allocs/op | -| BenchmarkIPv6Address-16 | 2419968 | 490 ns/op | 96 B/op | 7 allocs/op | -| BenchmarkMacAddress-16 | 2003596 | 619 ns/op | 79 B/op | 6 allocs/op | -| BenchmarkHTTPStatusCode-16 | 22232200 | 50.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHTTPStatusCodeSimple-16 | 21198192 | 48.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLogLevel-16 | 13729278 | 78.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUserAgent-16 | 1000000 | 1044 ns/op | 300 B/op | 5 allocs/op | -| BenchmarkChromeUserAgent-16 | 1591886 | 783 ns/op | 188 B/op | 5 allocs/op | -| BenchmarkFirefoxUserAgent-16 | 742941 | 1400 ns/op | 362 B/op | 6 allocs/op | -| BenchmarkSafariUserAgent-16 | 930159 | 1306 ns/op | 551 B/op | 7 allocs/op | -| BenchmarkOperaUserAgent-16 | 1454205 | 829 ns/op | 216 B/op | 5 allocs/op | -| BenchmarkLanguage-16 | 15265677 | 71.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLanguageAbbreviation-16 | 16144437 | 68.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProgrammingLanguage-16 | 16125788 | 71.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProgrammingLanguageBest-16 | 1000000000 | 0.229 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBool-16 | 79448815 | 15.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUUID-16 | 11553009 | 106 ns/op | 48 B/op | 1 allocs/op | -| BenchmarkNumber-16 | 59585859 | 17.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint8-16 | 79947612 | 15.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint16-16 | 79249844 | 15.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint32-16 | 81112372 | 14.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint64-16 | 50800922 | 20.5 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt8-16 | 66494482 | 15.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt16-16 | 79505629 | 15.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt32-16 | 79967979 | 15.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt64-16 | 60566858 | 19.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat32-16 | 84251548 | 14.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat32Range-16 | 80528571 | 14.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat64-16 | 94149510 | 12.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat64Range-16 | 94346104 | 12.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkShuffleInts-16 | 9075564 | 130 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCurrency-16 | 11273227 | 115 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkCurrencyShort-16 | 15565836 | 72.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCurrencyLong-16 | 15420937 | 71.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPrice-16 | 80781411 | 15.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCreditCard-16 | 1637452 | 751 ns/op | 88 B/op | 4 allocs/op | -| BenchmarkCreditCardType-16 | 17678868 | 64.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCreditCardNumber-16 | 3361524 | 349 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkCreditCardNumberLuhn-16 | 279769 | 3620 ns/op | 160 B/op | 10 allocs/op | -| BenchmarkCreditCardExp-16 | 7523824 | 150 ns/op | 5 B/op | 1 allocs/op | -| BenchmarkCreditCardCvv-16 | 15185973 | 73.6 ns/op | 3 B/op | 1 allocs/op | -| BenchmarkName-16 | 5436148 | 215 ns/op | 17 B/op | 1 allocs/op | -| BenchmarkFirstName-16 | 14362125 | 72.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLastName-16 | 15530926 | 72.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNamePrefix-16 | 18074462 | 66.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNameSuffix-16 | 14657012 | 73.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSSN-16 | 18693813 | 68.9 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkGender-16 | 72023787 | 15.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPerson-16 | 282908 | 4377 ns/op | 752 B/op | 24 allocs/op | -| BenchmarkContact-16 | 1369327 | 911 ns/op | 146 B/op | 6 allocs/op | -| BenchmarkPhone-16 | 6015615 | 204 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkPhoneFormatted-16 | 3928914 | 296 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkEmail-16 | 1901041 | 653 ns/op | 98 B/op | 4 allocs/op | -| BenchmarkLetter-16 | 65959573 | 17.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDigit-16 | 58815334 | 17.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNumerify-16 | 5526314 | 207 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkLexify-16 | 9712312 | 129 ns/op | 8 B/op | 1 allocs/op | -| BenchmarkShuffleStrings-16 | 8997901 | 119 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDate-16 | 5949220 | 194 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDateRange-16 | 7122076 | 166 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMonth-16 | 56979296 | 19.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDay-16 | 61808844 | 17.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkWeekDay-16 | 62598561 | 19.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkYear-16 | 14533374 | 76.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHour-16 | 62130793 | 17.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinute-16 | 66836017 | 17.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSecond-16 | 69860632 | 17.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNanoSecond-16 | 66957362 | 17.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZone-16 | 13841594 | 78.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneFull-16 | 12788362 | 89.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneAbv-16 | 14413452 | 77.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneOffset-16 | 10699014 | 103 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNoun-16 | 15025992 | 74.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerb-16 | 13394044 | 82.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverb-16 | 13968967 | 78.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPreposition-16 | 14575834 | 81.1 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjective-16 | 13957762 | 82.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkWord-16 | 11083752 | 98.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSentence-16 | 647954 | 1642 ns/op | 251 B/op | 2 allocs/op | -| BenchmarkParagraph-16 | 34026 | 35489 ns/op | 9587 B/op | 47 allocs/op | -| BenchmarkLoremIpsumWord-16 | 15156211 | 70.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLoremIpsumSentence-16 | 908371 | 1293 ns/op | 229 B/op | 2 allocs/op | -| BenchmarkLoremIpsumParagraph-16 | 41920 | 27860 ns/op | 9214 B/op | 45 allocs/op | -| BenchmarkQuestion-16 | 1000000 | 1152 ns/op | 315 B/op | 4 allocs/op | -| BenchmarkQuote-16 | 924054 | 1263 ns/op | 268 B/op | 3 allocs/op | -| BenchmarkPhrase-16 | 11034157 | 94.6 ns/op | 0 B/op | 0 allocs/op | \ No newline at end of file +| Benchmark | Ops | CPU | MEM | MEM alloc | +|--------------------------------------------------|------------|------------------|----------------|-------------------| +| BenchmarkAddress/package-8 | 1270872 | 940.1 ns/op | 197 B/op | 5 allocs/op | +| BenchmarkAddress/Faker_math-8 | 1238563 | 1042 ns/op | 197 B/op | 5 allocs/op | +| BenchmarkAddress/Faker_crypto-8 | 139857 | 7862 ns/op | 197 B/op | 5 allocs/op | +| BenchmarkStreet/package-8 | 2955518 | 422.6 ns/op | 26 B/op | 2 allocs/op | +| BenchmarkStreet/Faker_math-8 | 3027224 | 427.3 ns/op | 26 B/op | 2 allocs/op | +| BenchmarkStreet/Faker_crypto-8 | 352165 | 3559 ns/op | 26 B/op | 2 allocs/op | +| BenchmarkStreetNumber/package-8 | 6842211 | 149.2 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkStreetNumber/Faker_math-8 | 6924288 | 158.8 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkStreetNumber/Faker_crypto-8 | 549988 | 1900 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkStreetPrefix/package-8 | 18441643 | 74.12 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetPrefix/Faker_math-8 | 17888110 | 67.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetPrefix/Faker_crypto-8 | 2650390 | 458.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetName/package-8 | 18799832 | 62.90 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetName/Faker_math-8 | 16124620 | 63.57 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetName/Faker_crypto-8 | 2873138 | 428.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetSuffix/package-8 | 17192164 | 72.19 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetSuffix/Faker_math-8 | 16545355 | 65.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetSuffix/Faker_crypto-8 | 2986934 | 450.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCity/package-8 | 18553683 | 64.93 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCity/Faker_math-8 | 17648109 | 63.77 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCity/Faker_crypto-8 | 2567427 | 470.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkState/package-8 | 18262387 | 66.25 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkState/Faker_math-8 | 16690209 | 73.21 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkState/Faker_crypto-8 | 2599795 | 401.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStateAbr/package-8 | 17492332 | 63.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStateAbr/Faker_math-8 | 18612169 | 64.82 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStateAbr/Faker_crypto-8 | 2821579 | 460.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkZip/package-8 | 7573238 | 157.1 ns/op | 5 B/op | 1 allocs/op | +| BenchmarkZip/Faker_math-8 | 6644562 | 163.4 ns/op | 5 B/op | 1 allocs/op | +| BenchmarkZip/Faker_crypto-8 | 484525 | 2470 ns/op | 5 B/op | 1 allocs/op | +| BenchmarkCountry/package-8 | 15623450 | 65.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountry/Faker_math-8 | 17786485 | 76.22 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountry/Faker_crypto-8 | 3002818 | 400.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountryAbr/package-8 | 17296935 | 66.75 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountryAbr/Faker_math-8 | 17862819 | 67.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountryAbr/Faker_crypto-8 | 2931120 | 426.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitude/package-8 | 46248466 | 26.11 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitude/Faker_math-8 | 46120956 | 26.00 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitude/Faker_crypto-8 | 3512108 | 366.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitude/package-8 | 47443129 | 24.03 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitude/Faker_math-8 | 46691144 | 24.64 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitude/Faker_crypto-8 | 3501789 | 365.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitudeInRange/package-8 | 44125588 | 26.96 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitudeInRange/Faker_math-8 | 40113348 | 27.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitudeInRange/Faker_crypto-8 | 3227358 | 378.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitudeInRange/package-8 | 38948743 | 32.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitudeInRange/Faker_math-8 | 36491187 | 27.86 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitudeInRange/Faker_crypto-8 | 3004773 | 350.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPetName/package-8 | 23445927 | 60.81 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPetName/Faker_math-8 | 23982228 | 53.68 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPetName/Faker_crypto-8 | 2681886 | 458.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimal/package-8 | 23230071 | 55.13 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimal/Faker_math-8 | 21923606 | 53.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimal/Faker_crypto-8 | 2680177 | 411.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimalType/package-8 | 18826995 | 53.45 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimalType/Faker_math-8 | 22170756 | 63.39 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimalType/Faker_crypto-8 | 2780270 | 399.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFarmAnimal/package-8 | 18548028 | 64.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFarmAnimal/Faker_math-8 | 17291526 | 62.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFarmAnimal/Faker_crypto-8 | 2543520 | 409.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCat/package-8 | 21213028 | 68.91 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCat/Faker_math-8 | 19973062 | 58.74 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCat/Faker_crypto-8 | 2985601 | 405.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDog/package-8 | 16995627 | 68.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDog/Faker_math-8 | 17296502 | 81.35 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDog/Faker_crypto-8 | 2530860 | 433.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBird/package-8 | 14445968 | 81.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBird/Faker_math-8 | 14545851 | 82.69 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBird/Faker_crypto-8 | 2892721 | 420.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAppName/package-8 | 2799828 | 438.6 ns/op | 25 B/op | 1 allocs/op | +| BenchmarkAppName/Faker_math-8 | 2784135 | 431.1 ns/op | 25 B/op | 1 allocs/op | +| BenchmarkAppName/Faker_crypto-8 | 611072 | 1923 ns/op | 25 B/op | 1 allocs/op | +| BenchmarkAppVersion/package-8 | 7552165 | 154.1 ns/op | 7 B/op | 1 allocs/op | +| BenchmarkAppVersion/Faker_math-8 | 8020767 | 156.6 ns/op | 7 B/op | 1 allocs/op | +| BenchmarkAppVersion/Faker_crypto-8 | 875899 | 1209 ns/op | 7 B/op | 1 allocs/op | +| BenchmarkAppAuthor/package-8 | 9596493 | 119.7 ns/op | 8 B/op | 0 allocs/op | +| BenchmarkAppAuthor/Faker_math-8 | 10068729 | 121.0 ns/op | 8 B/op | 0 allocs/op | +| BenchmarkAppAuthor/Faker_crypto-8 | 1212542 | 983.7 ns/op | 8 B/op | 0 allocs/op | +| BenchmarkUsername/package-8 | 6687600 | 174.6 ns/op | 16 B/op | 2 allocs/op | +| BenchmarkUsername/Faker_math-8 | 7233685 | 173.3 ns/op | 16 B/op | 2 allocs/op | +| BenchmarkUsername/Faker_crypto-8 | 616884 | 2166 ns/op | 16 B/op | 2 allocs/op | +| BenchmarkPassword/package-8 | 2966407 | 401.0 ns/op | 336 B/op | 6 allocs/op | +| BenchmarkPassword/Faker_math-8 | 3080845 | 399.8 ns/op | 336 B/op | 6 allocs/op | +| BenchmarkPassword/Faker_crypto-8 | 182074 | 5963 ns/op | 336 B/op | 6 allocs/op | +| BenchmarkBeerName/package-8 | 23768442 | 53.26 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerName/Faker_math-8 | 22010898 | 63.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerName/Faker_crypto-8 | 2569424 | 392.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerStyle/package-8 | 17567354 | 69.64 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerStyle/Faker_math-8 | 16695721 | 80.73 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerStyle/Faker_crypto-8 | 2710214 | 407.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerHop/package-8 | 20877854 | 56.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerHop/Faker_math-8 | 22603234 | 65.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerHop/Faker_crypto-8 | 2618493 | 419.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerYeast/package-8 | 20738073 | 67.89 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerYeast/Faker_math-8 | 21325231 | 67.34 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerYeast/Faker_crypto-8 | 3042529 | 399.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerMalt/package-8 | 15756969 | 65.67 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerMalt/Faker_math-8 | 18026910 | 71.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerMalt/Faker_crypto-8 | 2949741 | 429.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerIbu/package-8 | 32683443 | 35.57 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkBeerIbu/Faker_math-8 | 29983339 | 36.03 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkBeerIbu/Faker_crypto-8 | 3094896 | 386.6 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkBeerAlcohol/package-8 | 4744302 | 243.6 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkBeerAlcohol/Faker_math-8 | 4718923 | 252.0 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkBeerAlcohol/Faker_crypto-8 | 1952740 | 656.0 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkBeerBlg/package-8 | 4086861 | 270.6 ns/op | 40 B/op | 3 allocs/op | +| BenchmarkBeerBlg/Faker_math-8 | 4488897 | 259.5 ns/op | 40 B/op | 3 allocs/op | +| BenchmarkBeerBlg/Faker_crypto-8 | 1865367 | 646.7 ns/op | 40 B/op | 3 allocs/op | +| BenchmarkCar/package-8 | 2800782 | 400.5 ns/op | 96 B/op | 1 allocs/op | +| BenchmarkCar/Faker_math-8 | 2938509 | 396.5 ns/op | 96 B/op | 1 allocs/op | +| BenchmarkCar/Faker_crypto-8 | 461906 | 2590 ns/op | 96 B/op | 1 allocs/op | +| BenchmarkCarType/package-8 | 23655384 | 51.72 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarType/Faker_math-8 | 25902462 | 50.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarType/Faker_crypto-8 | 3035287 | 455.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarFuelType/package-8 | 18750069 | 63.80 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarFuelType/Faker_math-8 | 18858705 | 63.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarFuelType/Faker_crypto-8 | 3028026 | 387.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarTransmissionType/package-8 | 22570701 | 54.01 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarTransmissionType/Faker_math-8 | 21484246 | 64.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarTransmissionType/Faker_crypto-8 | 3061364 | 387.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarMaker/package-8 | 17628445 | 68.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarMaker/Faker_math-8 | 21573310 | 64.19 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarMaker/Faker_crypto-8 | 2688284 | 475.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarModel/package-8 | 18500498 | 73.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarModel/Faker_math-8 | 16116993 | 66.91 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarModel/Faker_crypto-8 | 2487638 | 440.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityActor/package-8 | 18712833 | 74.12 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityActor/Faker_math-8 | 18564168 | 68.96 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityActor/Faker_crypto-8 | 2593150 | 415.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityBusiness/package-8 | 18721152 | 68.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityBusiness/Faker_math-8 | 16916186 | 70.66 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityBusiness/Faker_crypto-8 | 2578786 | 407.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebritySport/package-8 | 16716724 | 87.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebritySport/Faker_math-8 | 16602294 | 86.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebritySport/Faker_crypto-8 | 2919696 | 419.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkColor/package-8 | 17871778 | 62.28 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkColor/Faker_math-8 | 21601353 | 62.63 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkColor/Faker_crypto-8 | 3040459 | 463.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNiceColors/package-8 | 81438092 | 14.86 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNiceColors/Faker_math-8 | 75775309 | 18.52 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNiceColors/Faker_crypto-8 | 3450939 | 353.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSafeColor/package-8 | 22775230 | 53.52 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSafeColor/Faker_math-8 | 24526308 | 59.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSafeColor/Faker_crypto-8 | 3103851 | 413.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHexColor/package-8 | 4640522 | 255.2 ns/op | 24 B/op | 3 allocs/op | +| BenchmarkHexColor/Faker_math-8 | 4723542 | 257.2 ns/op | 24 B/op | 3 allocs/op | +| BenchmarkHexColor/Faker_crypto-8 | 283828 | 4447 ns/op | 24 B/op | 3 allocs/op | +| BenchmarkRGBColor/package-8 | 19721971 | 59.64 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkRGBColor/Faker_math-8 | 18808492 | 67.35 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkRGBColor/Faker_crypto-8 | 1000000 | 1066 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkCompany/package-8 | 22072651 | 48.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompany/Faker_math-8 | 22528284 | 53.94 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompany/Faker_crypto-8 | 2690668 | 402.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompanySuffix/package-8 | 28169413 | 48.00 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompanySuffix/Faker_math-8 | 20685153 | 52.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompanySuffix/Faker_crypto-8 | 3018765 | 418.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBuzzWord/package-8 | 24238677 | 54.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBuzzWord/Faker_math-8 | 22195419 | 52.30 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBuzzWord/Faker_crypto-8 | 2840428 | 392.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBS/package-8 | 23481436 | 56.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBS/Faker_math-8 | 23195737 | 65.66 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBS/Faker_crypto-8 | 3027972 | 419.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJob/package-8 | 4432520 | 253.5 ns/op | 64 B/op | 1 allocs/op | +| BenchmarkJob/Faker_math-8 | 4513154 | 253.7 ns/op | 64 B/op | 1 allocs/op | +| BenchmarkJob/Faker_crypto-8 | 686028 | 1716 ns/op | 64 B/op | 1 allocs/op | +| BenchmarkJobTitle/package-8 | 20079558 | 54.21 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobTitle/Faker_math-8 | 21871627 | 54.86 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobTitle/Faker_crypto-8 | 3017896 | 413.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobDescriptor/package-8 | 21579855 | 53.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobDescriptor/Faker_math-8 | 24638751 | 55.91 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobDescriptor/Faker_crypto-8 | 2984810 | 415.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobLevel/package-8 | 18311070 | 59.35 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobLevel/Faker_math-8 | 17051210 | 59.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobLevel/Faker_crypto-8 | 2991106 | 426.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCSVLookup100-8 | 1468 | 780852 ns/op | 437416 B/op | 5933 allocs/op | +| BenchmarkCSVLookup1000-8 | 151 | 7853471 ns/op | 4224820 B/op | 59612 allocs/op | +| BenchmarkCSVLookup10000-8 | 14 | 78165009 ns/op | 41208010 B/op | 597842 allocs/op | +| BenchmarkCSVLookup100000-8 | 2 | 768800840 ns/op | 437275164 B/op | 5980461 allocs/op | +| BenchmarkEmoji/package-8 | 22212386 | 54.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmoji/Faker_math-8 | 21471013 | 51.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmoji/Faker_crypto-8 | 3036081 | 458.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiDescription/package-8 | 18250413 | 57.08 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiDescription/Faker_math-8 | 21924381 | 57.58 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiDescription/Faker_crypto-8 | 2837050 | 387.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiCategory/package-8 | 21270252 | 55.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiCategory/Faker_math-8 | 21421813 | 59.59 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiCategory/Faker_crypto-8 | 2635878 | 491.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiAlias/package-8 | 18760875 | 68.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiAlias/Faker_math-8 | 16918242 | 67.60 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiAlias/Faker_crypto-8 | 2854717 | 488.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiTag/package-8 | 19953885 | 65.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiTag/Faker_math-8 | 18220396 | 72.91 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiTag/Faker_crypto-8 | 2802847 | 426.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkError/package-8 | 1547610 | 786.6 ns/op | 279 B/op | 8 allocs/op | +| BenchmarkError/Faker_math-8 | 1504578 | 794.1 ns/op | 279 B/op | 8 allocs/op | +| BenchmarkError/Faker_crypto-8 | 800712 | 1476 ns/op | 279 B/op | 8 allocs/op | +| BenchmarkErrorObject/package-8 | 6054552 | 190.3 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkErrorObject/Faker_math-8 | 5968180 | 190.3 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkErrorObject/Faker_crypto-8 | 2088008 | 618.0 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkErrorDatabase/package-8 | 5275713 | 212.8 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorDatabase/Faker_math-8 | 5407803 | 217.3 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorDatabase/Faker_crypto-8 | 2005333 | 628.7 ns/op | 63 B/op | 3 allocs/op | +| BenchmarkErrorGRPC/package-8 | 5700810 | 202.9 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorGRPC/Faker_math-8 | 5907589 | 202.5 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorGRPC/Faker_crypto-8 | 2027650 | 643.3 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorHTTP/package-8 | 3182026 | 321.6 ns/op | 157 B/op | 4 allocs/op | +| BenchmarkErrorHTTP/Faker_math-8 | 3667356 | 314.9 ns/op | 157 B/op | 4 allocs/op | +| BenchmarkErrorHTTP/Faker_crypto-8 | 1590696 | 720.2 ns/op | 157 B/op | 4 allocs/op | +| BenchmarkErrorHTTPClient/package-8 | 5745494 | 204.0 ns/op | 52 B/op | 3 allocs/op | +| BenchmarkErrorHTTPClient/Faker_math-8 | 5549187 | 212.8 ns/op | 52 B/op | 3 allocs/op | +| BenchmarkErrorHTTPClient/Faker_crypto-8 | 2011905 | 596.7 ns/op | 52 B/op | 3 allocs/op | +| BenchmarkErrorHTTPServer/package-8 | 5466012 | 214.7 ns/op | 59 B/op | 3 allocs/op | +| BenchmarkErrorHTTPServer/Faker_math-8 | 5542838 | 207.3 ns/op | 59 B/op | 3 allocs/op | +| BenchmarkErrorHTTPServer/Faker_crypto-8 | 1939080 | 633.9 ns/op | 59 B/op | 3 allocs/op | +| BenchmarkErrorRuntime/package-8 | 4245986 | 263.4 ns/op | 150 B/op | 3 allocs/op | +| BenchmarkErrorRuntime/Faker_math-8 | 4355534 | 263.1 ns/op | 150 B/op | 3 allocs/op | +| BenchmarkErrorRuntime/Faker_crypto-8 | 1782044 | 651.4 ns/op | 150 B/op | 3 allocs/op | +| BenchmarkErrorValidation/package-8 | 1659858 | 715.7 ns/op | 268 B/op | 7 allocs/op | +| BenchmarkErrorValidation/Faker_math-8 | 1690849 | 716.4 ns/op | 268 B/op | 7 allocs/op | +| BenchmarkErrorValidation/Faker_crypto-8 | 883600 | 1348 ns/op | 268 B/op | 7 allocs/op | +| BenchmarkFileMimeType/package-8 | 18005230 | 56.88 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileMimeType/Faker_math-8 | 21229381 | 54.62 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileMimeType/Faker_crypto-8 | 2605701 | 462.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileExtension/package-8 | 19272264 | 73.07 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileExtension/Faker_math-8 | 20149288 | 60.79 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileExtension/Faker_crypto-8 | 2627210 | 423.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCusip/package-8 | 5402995 | 224.9 ns/op | 24 B/op | 2 allocs/op | +| BenchmarkCusip/Faker_math-8 | 5367218 | 221.1 ns/op | 24 B/op | 2 allocs/op | +| BenchmarkCusip/Faker_crypto-8 | 363460 | 2967 ns/op | 24 B/op | 2 allocs/op | +| BenchmarkIsin/package-8 | 1742368 | 701.4 ns/op | 533 B/op | 8 allocs/op | +| BenchmarkIsin/Faker_math-8 | 1653408 | 715.5 ns/op | 533 B/op | 8 allocs/op | +| BenchmarkIsin/Faker_crypto-8 | 330396 | 3583 ns/op | 533 B/op | 8 allocs/op | +| BenchmarkFruit/package-8 | 21421066 | 55.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFruit/Faker_math-8 | 22680361 | 55.68 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFruit/Faker_crypto-8 | 2914611 | 486.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVegetable/package-8 | 21113413 | 56.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVegetable/Faker_math-8 | 21101716 | 60.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVegetable/Faker_crypto-8 | 2811384 | 467.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBreakfast/package-8 | 8954784 | 127.7 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkBreakfast/Faker_math-8 | 9430814 | 128.8 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkBreakfast/Faker_crypto-8 | 2132481 | 496.5 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkLunch/package-8 | 8934501 | 125.9 ns/op | 34 B/op | 1 allocs/op | +| BenchmarkLunch/Faker_math-8 | 8668546 | 128.9 ns/op | 34 B/op | 1 allocs/op | +| BenchmarkLunch/Faker_crypto-8 | 2216348 | 518.3 ns/op | 34 B/op | 1 allocs/op | +| BenchmarkDinner/package-8 | 9317936 | 125.2 ns/op | 36 B/op | 1 allocs/op | +| BenchmarkDinner/Faker_math-8 | 9023473 | 126.3 ns/op | 36 B/op | 1 allocs/op | +| BenchmarkDinner/Faker_crypto-8 | 2435984 | 518.9 ns/op | 36 B/op | 1 allocs/op | +| BenchmarkDrink/package-8 | 7698025 | 143.4 ns/op | 7 B/op | 2 allocs/op | +| BenchmarkDrink/Faker_math-8 | 8096294 | 139.8 ns/op | 7 B/op | 2 allocs/op | +| BenchmarkDrink/Faker_crypto-8 | 2247427 | 536.2 ns/op | 7 B/op | 2 allocs/op | +| BenchmarkSnack/package-8 | 8109601 | 149.2 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkSnack/Faker_math-8 | 7993006 | 150.5 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkSnack/Faker_crypto-8 | 2214736 | 535.7 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkDessert/package-8 | 8295364 | 133.9 ns/op | 31 B/op | 2 allocs/op | +| BenchmarkDessert/Faker_math-8 | 8610325 | 134.1 ns/op | 31 B/op | 2 allocs/op | +| BenchmarkDessert/Faker_crypto-8 | 2205777 | 507.4 ns/op | 31 B/op | 2 allocs/op | +| BenchmarkGamertag/package-8 | 2111506 | 544.8 ns/op | 83 B/op | 5 allocs/op | +| BenchmarkGamertag/Faker_math-8 | 2203573 | 551.4 ns/op | 83 B/op | 5 allocs/op | +| BenchmarkGamertag/Faker_crypto-8 | 487366 | 2428 ns/op | 83 B/op | 5 allocs/op | +| BenchmarkDice/package-8 | 43259642 | 26.58 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkDice/Faker_math-8 | 42908084 | 26.84 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkDice/Faker_crypto-8 | 2953483 | 395.5 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkGenerate/package-8 | 383122 | 2767 ns/op | 1139 B/op | 29 allocs/op | +| BenchmarkGenerate/Complex-8 | 135508 | 8555 ns/op | 4440 B/op | 80 allocs/op | +| BenchmarkGenerate/Faker_math-8 | 377151 | 2817 ns/op | 1139 B/op | 29 allocs/op | +| BenchmarkGenerate/Faker_crypto-8 | 152226 | 7234 ns/op | 1139 B/op | 29 allocs/op | +| BenchmarkRegex/package-8 | 628683 | 1922 ns/op | 1632 B/op | 27 allocs/op | +| BenchmarkRegex/Faker_math-8 | 591548 | 1940 ns/op | 1632 B/op | 27 allocs/op | +| BenchmarkRegex/Faker_crypto-8 | 616701 | 1934 ns/op | 1632 B/op | 27 allocs/op | +| BenchmarkRegexEmail/package-8 | 174812 | 6607 ns/op | 4084 B/op | 90 allocs/op | +| BenchmarkRegexEmail/Faker_math-8 | 174512 | 6619 ns/op | 4084 B/op | 90 allocs/op | +| BenchmarkRegexEmail/Faker_crypto-8 | 62312 | 18793 ns/op | 4083 B/op | 90 allocs/op | +| BenchmarkMap/package-8 | 318559 | 3275 ns/op | 1113 B/op | 16 allocs/op | +| BenchmarkMap/Faker_math-8 | 315990 | 3319 ns/op | 1113 B/op | 16 allocs/op | +| BenchmarkMap/Faker_crypto-8 | 46202 | 23997 ns/op | 1115 B/op | 16 allocs/op | +| BenchmarkHackerPhrase/package-8 | 155998 | 7191 ns/op | 3004 B/op | 50 allocs/op | +| BenchmarkHackerPhrase/Faker_math-8 | 154675 | 7305 ns/op | 3008 B/op | 50 allocs/op | +| BenchmarkHackerPhrase/Faker_crypto-8 | 109282 | 10268 ns/op | 3007 B/op | 50 allocs/op | +| BenchmarkHackerAbbreviation/package-8 | 21881574 | 57.57 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAbbreviation/Faker_math-8 | 18534495 | 59.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAbbreviation/Faker_crypto-8 | 2607735 | 401.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAdjective/package-8 | 24286845 | 55.74 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAdjective/Faker_math-8 | 22684101 | 55.22 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAdjective/Faker_crypto-8 | 2953530 | 490.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerNoun/package-8 | 22554241 | 55.35 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerNoun/Faker_math-8 | 18360708 | 56.78 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerNoun/Faker_crypto-8 | 2823256 | 464.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerVerb/package-8 | 19236123 | 65.49 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerVerb/Faker_math-8 | 18090754 | 68.18 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerVerb/Faker_crypto-8 | 2880181 | 439.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackeringVerb/package-8 | 19090326 | 71.74 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackeringVerb/Faker_math-8 | 19048659 | 63.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackeringVerb/Faker_crypto-8 | 3020748 | 404.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkReplaceWithNumbers-8 | 162931 | 7098 ns/op | 32 B/op | 2 allocs/op | +| BenchmarkHipsterWord/package-8 | 24059244 | 54.69 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHipsterWord/Faker_math-8 | 21708511 | 52.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHipsterWord/Faker_crypto-8 | 2870858 | 396.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHipsterSentence/package-8 | 1278764 | 927.7 ns/op | 288 B/op | 3 allocs/op | +| BenchmarkHipsterSentence/Faker_math-8 | 1287939 | 955.0 ns/op | 288 B/op | 3 allocs/op | +| BenchmarkHipsterSentence/Faker_crypto-8 | 237703 | 4595 ns/op | 288 B/op | 3 allocs/op | +| BenchmarkHipsterParagraph/package-8 | 57895 | 18466 ns/op | 10521 B/op | 48 allocs/op | +| BenchmarkHipsterParagraph/Faker_math-8 | 61772 | 19188 ns/op | 10520 B/op | 48 allocs/op | +| BenchmarkHipsterParagraph/Faker_crypto-8 | 12978 | 91733 ns/op | 10522 B/op | 48 allocs/op | +| BenchmarkInputName/package-8 | 15728428 | 74.49 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInputName/Faker_math-8 | 13243030 | 89.75 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInputName/Faker_crypto-8 | 2736225 | 478.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSvg/package-8 | 172828 | 7906 ns/op | 8871 B/op | 52 allocs/op | +| BenchmarkSvg/Faker_math-8 | 161821 | 6754 ns/op | 8875 B/op | 52 allocs/op | +| BenchmarkSvg/Faker_crypto-8 | 29023 | 40910 ns/op | 8862 B/op | 52 allocs/op | +| BenchmarkImageURL/package-8 | 11692422 | 94.34 ns/op | 38 B/op | 3 allocs/op | +| BenchmarkImageURL/Faker_math-8 | 11451087 | 91.39 ns/op | 38 B/op | 3 allocs/op | +| BenchmarkImageURL/Faker_crypto-8 | 12107578 | 92.30 ns/op | 38 B/op | 3 allocs/op | +| BenchmarkImage/package-8 | 50 | 20495942 ns/op | 2457673 B/op | 307202 allocs/op | +| BenchmarkImage/Faker_math-8 | 51 | 20349126 ns/op | 2457780 B/op | 307202 allocs/op | +| BenchmarkImage/Faker_crypto-8 | 3 | 393591549 ns/op | 2457685 B/op | 307202 allocs/op | +| BenchmarkImageJpeg/package-8 | 31 | 32857846 ns/op | 2982318 B/op | 307214 allocs/op | +| BenchmarkImageJpeg/Faker_math-8 | 34 | 31873165 ns/op | 2982479 B/op | 307214 allocs/op | +| BenchmarkImageJpeg/Faker_crypto-8 | 3 | 387670345 ns/op | 2982357 B/op | 307215 allocs/op | +| BenchmarkImagePng/package-8 | 16 | 65425256 ns/op | 5899024 B/op | 307270 allocs/op | +| BenchmarkImagePng/Faker_math-8 | 18 | 67804235 ns/op | 5899314 B/op | 307270 allocs/op | +| BenchmarkImagePng/Faker_crypto-8 | 3 | 396378778 ns/op | 5899005 B/op | 307270 allocs/op | +| BenchmarkDomainName/package-8 | 2344912 | 505.6 ns/op | 95 B/op | 5 allocs/op | +| BenchmarkDomainName/Faker_math-8 | 2265744 | 512.5 ns/op | 95 B/op | 5 allocs/op | +| BenchmarkDomainName/Faker_crypto-8 | 639775 | 1788 ns/op | 95 B/op | 5 allocs/op | +| BenchmarkDomainSuffix/package-8 | 19431498 | 59.95 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDomainSuffix/Faker_math-8 | 20097267 | 59.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDomainSuffix/Faker_crypto-8 | 2498906 | 437.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkURL/package-8 | 1000000 | 1155 ns/op | 277 B/op | 10 allocs/op | +| BenchmarkURL/Faker_math-8 | 1000000 | 1165 ns/op | 277 B/op | 10 allocs/op | +| BenchmarkURL/Faker_crypto-8 | 275793 | 4371 ns/op | 276 B/op | 10 allocs/op | +| BenchmarkHTTPMethod/package-8 | 17651594 | 59.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPMethod/Faker_math-8 | 20081227 | 61.28 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPMethod/Faker_crypto-8 | 2844322 | 460.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkIPv4Address/package-8 | 5215255 | 229.2 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkIPv4Address/Faker_math-8 | 4852905 | 224.9 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkIPv4Address/Faker_crypto-8 | 670951 | 1827 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkIPv6Address/package-8 | 2312482 | 510.0 ns/op | 111 B/op | 8 allocs/op | +| BenchmarkIPv6Address/Faker_math-8 | 2261472 | 521.2 ns/op | 111 B/op | 8 allocs/op | +| BenchmarkIPv6Address/Faker_crypto-8 | 338601 | 3623 ns/op | 111 B/op | 8 allocs/op | +| BenchmarkMacAddress/package-8 | 2809762 | 426.2 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkMacAddress/Faker_math-8 | 2863842 | 425.5 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkMacAddress/Faker_crypto-8 | 376604 | 2688 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkHTTPStatusCode/package-8 | 13488582 | 88.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCode/Faker_math-8 | 14188726 | 73.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCode/Faker_crypto-8 | 2497014 | 463.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCodeSimple/package-8 | 17822486 | 81.54 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCodeSimple/Faker_math-8 | 16282341 | 70.72 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCodeSimple/Faker_crypto-8 | 2360576 | 451.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLogLevel/package-8 | 19343472 | 67.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLogLevel/Faker_math-8 | 19445798 | 61.84 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLogLevel/Faker_crypto-8 | 2296162 | 468.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUserAgent/package-8 | 1503814 | 813.9 ns/op | 297 B/op | 5 allocs/op | +| BenchmarkUserAgent/Faker_math-8 | 1462177 | 803.6 ns/op | 298 B/op | 5 allocs/op | +| BenchmarkUserAgent/Faker_crypto-8 | 181178 | 6157 ns/op | 298 B/op | 5 allocs/op | +| BenchmarkChromeUserAgent/package-8 | 1911201 | 596.8 ns/op | 184 B/op | 5 allocs/op | +| BenchmarkChromeUserAgent/Faker_math-8 | 1969712 | 598.1 ns/op | 184 B/op | 5 allocs/op | +| BenchmarkChromeUserAgent/Faker_crypto-8 | 264816 | 4433 ns/op | 184 B/op | 5 allocs/op | +| BenchmarkFirefoxUserAgent/package-8 | 1000000 | 1043 ns/op | 362 B/op | 6 allocs/op | +| BenchmarkFirefoxUserAgent/Faker_math-8 | 1000000 | 1054 ns/op | 362 B/op | 6 allocs/op | +| BenchmarkFirefoxUserAgent/Faker_crypto-8 | 166128 | 7646 ns/op | 362 B/op | 6 allocs/op | +| BenchmarkSafariUserAgent/package-8 | 1000000 | 1022 ns/op | 551 B/op | 7 allocs/op | +| BenchmarkSafariUserAgent/Faker_math-8 | 1000000 | 1017 ns/op | 551 B/op | 7 allocs/op | +| BenchmarkSafariUserAgent/Faker_crypto-8 | 146463 | 7525 ns/op | 551 B/op | 7 allocs/op | +| BenchmarkOperaUserAgent/package-8 | 1844185 | 643.8 ns/op | 212 B/op | 5 allocs/op | +| BenchmarkOperaUserAgent/Faker_math-8 | 1805168 | 654.3 ns/op | 212 B/op | 5 allocs/op | +| BenchmarkOperaUserAgent/Faker_crypto-8 | 219927 | 5257 ns/op | 212 B/op | 5 allocs/op | +| BenchmarkJSONLookup100-8 | 894 | 1194698 ns/op | 537673 B/op | 8141 allocs/op | +| BenchmarkJSONLookup1000-8 | 91 | 12099728 ns/op | 5616708 B/op | 81606 allocs/op | +| BenchmarkJSONLookup10000-8 | 8 | 128144166 ns/op | 62638763 B/op | 817708 allocs/op | +| BenchmarkJSONLookup100000-8 | 1 | 1324756016 ns/op | 616116744 B/op | 8179136 allocs/op | +| BenchmarkLanguage/package-8 | 20946056 | 68.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguage/Faker_math-8 | 16884613 | 61.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguage/Faker_crypto-8 | 2889944 | 442.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageAbbreviation/package-8 | 20782443 | 53.79 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageAbbreviation/Faker_math-8 | 17936367 | 56.26 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageAbbreviation/Faker_crypto-8 | 2630406 | 423.8 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageBCP/package-8 | 19858063 | 59.00 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageBCP/Faker_math-8 | 20712447 | 60.02 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageBCP/Faker_crypto-8 | 2654044 | 469.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguage/package-8 | 17849598 | 58.34 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguage/Faker_math-8 | 20090289 | 70.59 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguage/Faker_crypto-8 | 2628798 | 424.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguageBest/package-8 | 1000000000 | 0.4044 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguageBest/Faker_math-8 | 1000000000 | 0.2975 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguageBest/Faker_crypto-8 | 1000000000 | 0.2543 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLoremIpsumWord-8 | 22434632 | 54.96 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLoremIpsumSentence-8 | 1000000 | 1038 ns/op | 219 B/op | 2 allocs/op | +| BenchmarkLoremIpsumParagraph-8 | 59320 | 19442 ns/op | 8479 B/op | 40 allocs/op | +| BenchmarkMinecraftOre/package-8 | 14624242 | 90.01 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftOre/Faker_math-8 | 16379578 | 86.91 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftOre/Faker_crypto-8 | 2757652 | 477.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWood/package-8 | 15815132 | 83.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWood/Faker_math-8 | 14872902 | 75.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWood/Faker_crypto-8 | 2524514 | 514.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorTier/package-8 | 15296107 | 78.58 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorTier/Faker_math-8 | 14341870 | 86.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorTier/Faker_crypto-8 | 2344278 | 473.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorPart/package-8 | 16863422 | 82.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorPart/Faker_math-8 | 14052031 | 76.92 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorPart/Faker_crypto-8 | 2770314 | 474.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeapon/package-8 | 15759004 | 77.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeapon/Faker_math-8 | 15945940 | 81.48 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeapon/Faker_crypto-8 | 2254436 | 464.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftTool/package-8 | 15887787 | 76.39 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftTool/Faker_math-8 | 14269508 | 91.01 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftTool/Faker_crypto-8 | 2718507 | 525.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftDye/package-8 | 16131942 | 71.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftDye/Faker_math-8 | 16802478 | 73.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftDye/Faker_crypto-8 | 2584966 | 476.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftFood/package-8 | 14680048 | 87.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftFood/Faker_math-8 | 13558227 | 86.71 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftFood/Faker_crypto-8 | 2329946 | 435.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftAnimal/package-8 | 15871832 | 85.92 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftAnimal/Faker_math-8 | 12411510 | 83.88 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftAnimal/Faker_crypto-8 | 2528960 | 441.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerJob/package-8 | 13549438 | 80.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerJob/Faker_math-8 | 13769702 | 104.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerJob/Faker_crypto-8 | 2397300 | 452.2 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerStation/package-8 | 15069139 | 93.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerStation/Faker_math-8 | 15468883 | 82.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerStation/Faker_crypto-8 | 2469778 | 453.9 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerLevel/package-8 | 13468396 | 102.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerLevel/Faker_math-8 | 14354506 | 92.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerLevel/Faker_crypto-8 | 2416441 | 544.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobPassive/package-8 | 13299806 | 84.84 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobPassive/Faker_math-8 | 14181126 | 87.18 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobPassive/Faker_crypto-8 | 2539264 | 510.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobNeutral/package-8 | 11043175 | 110.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobNeutral/Faker_math-8 | 13059249 | 99.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobNeutral/Faker_crypto-8 | 2394342 | 544.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobHostile/package-8 | 13963809 | 95.66 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobHostile/Faker_math-8 | 15182318 | 96.90 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobHostile/Faker_crypto-8 | 2204600 | 538.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobBoss/package-8 | 12737437 | 89.68 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobBoss/Faker_math-8 | 13494093 | 90.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobBoss/Faker_crypto-8 | 2671172 | 461.3 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftBiome/package-8 | 13233918 | 81.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftBiome/Faker_math-8 | 16109408 | 85.68 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftBiome/Faker_crypto-8 | 2205704 | 499.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeather/package-8 | 13371518 | 79.93 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeather/Faker_math-8 | 14987182 | 80.69 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeather/Faker_crypto-8 | 2373735 | 473.6 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBool/package-8 | 75772935 | 15.03 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBool/Faker_math-8 | 76893664 | 19.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBool/Faker_crypto-8 | 3141634 | 376.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUUID/package-8 | 9382911 | 115.3 ns/op | 64 B/op | 2 allocs/op | +| BenchmarkUUID/Faker_math-8 | 9492183 | 114.1 ns/op | 64 B/op | 2 allocs/op | +| BenchmarkUUID/Faker_crypto-8 | 1000000 | 1039 ns/op | 64 B/op | 2 allocs/op | +| BenchmarkShuffleAnySlice/package-8 | 2234314 | 511.5 ns/op | 24 B/op | 1 allocs/op | \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/README.md temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/README.md --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/README.md 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/README.md 2024-02-23 09:46:09.000000000 +0000 @@ -20,6 +20,7 @@ - [Issue](https://github.com/brianvoe/gofakeit/issues) ## Contributors + Thanks to everyone who has contributed to Gofakeit! @@ -168,6 +169,47 @@ // Nested Struct Fields and Embedded Fields ``` +## Fakeable types + +It is possible to extend a struct by implementing the `Fakeable` interface +in order to control the generation. + +For example, this is useful when it is not possible to modify the struct that you want to fake by adding struct tags to a field but you still need to be able to control the generation process. + +```go +// Custom string that you want to generate your own data for +// or just return a static value +type CustomString string + +func (c *CustomString) Fake(faker *gofakeit.Faker) interface{} { + return CustomString("my custom string") +} + +// Imagine a CustomTime type that is needed to support a custom JSON Marshaler +type CustomTime time.Time + +func (c *CustomTime) Fake(faker *gofakeit.Faker) interface{} { + return CustomTime(time.Now()) +} + +func (c *CustomTime) MarshalJSON() ([]byte, error) { + //... +} + +// This is the struct that we cannot modify to add struct tags +type NotModifiable struct { + Token string + Value CustomString + Creation *CustomTime +} + +var f NotModifiable +gofakeit.Struct(&f) +fmt.Printf("%s", f.Token) // yvqqdH +fmt.Printf("%s", f.Value) // my custom string +fmt.Printf("%s", f.Creation) // 2023-04-02 23:00:00 +0000 UTC m=+0.000000001 +``` + ## Custom Functions In a lot of situations you may need to use your own random function usage for your specific needs. @@ -226,7 +268,10 @@ ### File +Passing `nil` to `CSV`, `JSON` or `XML` it will auto generate data using a random set of generators. + ```go +CSV(co *CSVOptions) ([]byte, error) JSON(jo *JSONOptions) ([]byte, error) XML(xo *XMLOptions) ([]byte, error) FileExtension() string @@ -241,6 +286,7 @@ NamePrefix() string NameSuffix() string FirstName() string +MiddleName() string LastName() string Gender() string SSN() string @@ -533,10 +579,18 @@ BitcoinPrivateKey() string ``` +### Finance + +```go +Cusip() string +Isin() string +``` + ### Company ```go BS() string +Blurb() string BuzzWord() string Company() string CompanySuffix() string @@ -544,6 +598,7 @@ JobDescriptor() string JobLevel() string JobTitle() string +Slogan() string ``` ### Hacker @@ -674,6 +729,23 @@ MinecraftWeather() string ``` +### Book + +```go +Book() *BookInfo +BookTitle() string +BookAuthor() string +BookGenre() string +``` + +### Movie + +```go +Movie() *MovieInfo +MovieName() string +MovieGenre() string +``` + ### Error Unlike most `gofakeit` methods which return a `string`, the error methods return a Go `error`. Access the error message as a string by chaining the `.Error()` method. @@ -687,4 +759,4 @@ ErrorHTTPServer() error ErrorInput() error ErrorRuntime() error -``` \ No newline at end of file +``` diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/book.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/book.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/book.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/book.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,85 @@ +package gofakeit + +import "math/rand" + +func BookTitle() string { return bookTitle(globalFaker.Rand) } + +func (f *Faker) BookTitle() string { return bookTitle(f.Rand) } + +func bookTitle(r *rand.Rand) string { return getRandValue(r, []string{"book", "title"}) } + +func BookAuthor() string { return bookAuthor(globalFaker.Rand) } + +func (f *Faker) BookAuthor() string { return bookAuthor(f.Rand) } + +func bookAuthor(r *rand.Rand) string { return getRandValue(r, []string{"book", "author"}) } + +func BookGenre() string { return bookGenre(globalFaker.Rand) } + +func (f *Faker) BookGenre() string { return bookGenre(f.Rand) } + +func bookGenre(r *rand.Rand) string { return getRandValue(r, []string{"book", "genre"}) } + +type BookInfo struct { + Title string `json:"title" xml:"name"` + Author string `json:"author" xml:"author"` + Genre string `json:"genre" xml:"genre"` +} + +func Book() *BookInfo { return book(globalFaker.Rand) } + +func (f *Faker) Book() *BookInfo { return book(f.Rand) } + +func book(r *rand.Rand) *BookInfo { + return &BookInfo{ + Title: bookTitle(r), + Author: bookAuthor(r), + Genre: bookGenre(r), + } +} + +func addBookLookup() { + AddFuncLookup("book", Info{ + Display: "Book", + Category: "book", + Description: "Random Book data set", + Example: `{title: "Hamlet", author: "Mark Twain", genre: "Adventure"}`, + Output: "map[string]string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return book(r), nil + }, + }) + + AddFuncLookup("booktitle", Info{ + Display: "Title", + Category: "book", + Description: "Random Book title", + Example: "Hamlet", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return bookTitle(r), nil + }, + }) + + AddFuncLookup("bookauthor", Info{ + Display: "Author", + Category: "book", + Description: "Random Book author", + Example: "Mark Twain", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return bookAuthor(r), nil + }, + }) + + AddFuncLookup("bookgenre", Info{ + Display: "Genre", + Category: "book", + Description: "Random Book genre", + Example: "Adventure", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return bookGenre(r), nil + }, + }) +} \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/company.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/company.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/company.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/company.go 2024-02-23 09:46:09.000000000 +0000 @@ -18,6 +18,13 @@ func companySuffix(r *rand.Rand) string { return getRandValue(r, []string{"company", "suffix"}) } +// Blurb will generate a random company blurb string +func Blurb() string { return blurb(globalFaker.Rand) } + +func (f *Faker) Blurb() string { return blurb(f.Rand) } + +func blurb(r *rand.Rand) string { return getRandValue(r, []string{"company", "blurb"}) } + // BuzzWord will generate a random company buzz word string func BuzzWord() string { return buzzWord(globalFaker.Rand) } @@ -81,6 +88,30 @@ func jobLevel(r *rand.Rand) string { return getRandValue(r, []string{"job", "level"}) } +// Slogan will generate a random company slogan +func Slogan() string { return slogan(globalFaker.Rand) } + +// Slogan will generate a random company slogan +func (f *Faker) Slogan() string { return slogan(f.Rand) } + +// Slogan will generate a random company slogan +func slogan(r *rand.Rand) string { + slogan := "" + var sloganStyle = number(r, 0, 2) + switch sloganStyle { + // Noun. Buzzword! + case 0: + slogan = getRandValue(r, []string{"company", "blurb"}) + ". " + getRandValue(r, []string{"company", "buzzwords"}) + "!" + // Buzzword Noun, Buzzword Noun. + case 1: + slogan = getRandValue(r, []string{"company", "buzzwords"}) + " " + getRandValue(r, []string{"company", "blurb"}) + ", " + getRandValue(r, []string{"company", "buzzwords"}) + " " + getRandValue(r, []string{"company", "blurb"}) + "." + // Buzzword bs Noun, Buzzword. + case 2: + slogan = getRandValue(r, []string{"company", "buzzwords"}) + " " + getRandValue(r, []string{"company", "bs"}) + " " + getRandValue(r, []string{"company", "blurb"}) + ", " + getRandValue(r, []string{"company", "buzzwords"}) + "." + } + return slogan +} + func addCompanyLookup() { AddFuncLookup("company", Info{ Display: "Company", @@ -115,6 +146,17 @@ }, }) + AddFuncLookup("blurb", Info{ + Display: "Blurb", + Category: "company", + Description: "Random company blurb", + Example: "word", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return blurb(r), nil + }, + }) + AddFuncLookup("buzzword", Info{ Display: "Buzzword", Category: "company", @@ -169,4 +211,15 @@ return jobLevel(r), nil }, }) + + AddFuncLookup("slogan", Info{ + Display: "Slogan", + Category: "comapny", + Description: "Random company slogan", + Example: "Universal seamless Focus, interactive.", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return slogan(r), nil + }, + }) } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/csv.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/csv.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/csv.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/csv.go 2024-02-23 09:46:09.000000000 +0000 @@ -13,18 +13,28 @@ // CSVOptions defines values needed for csv generation type CSVOptions struct { - Delimiter string `json:"delimiter" xml:"delimiter"` - RowCount int `json:"row_count" xml:"row_count"` - Fields []Field `json:"fields" xml:"fields"` + Delimiter string `json:"delimiter" xml:"delimiter" fake:"{randomstring:[,,tab]}"` + RowCount int `json:"row_count" xml:"row_count" fake:"{number:1,10}"` + Fields []Field `json:"fields" xml:"fields" fake:"{internal_exampleFields}"` } // CSV generates an object or an array of objects in json format -func CSV(co *CSVOptions) ([]byte, error) { return csvFunc(globalFaker.Rand, co) } +// A nil CSVOptions returns a randomly structured CSV. +func CSV(co *CSVOptions) ([]byte, error) { return csvFunc(globalFaker, co) } // CSV generates an object or an array of objects in json format -func (f *Faker) CSV(co *CSVOptions) ([]byte, error) { return csvFunc(f.Rand, co) } +// A nil CSVOptions returns a randomly structured CSV. +func (f *Faker) CSV(co *CSVOptions) ([]byte, error) { return csvFunc(f, co) } + +func csvFunc(f *Faker, co *CSVOptions) ([]byte, error) { + if co == nil { + // We didn't get a CSVOptions, so create a new random one + err := f.Struct(&co) + if err != nil { + return nil, err + } + } -func csvFunc(r *rand.Rand, co *CSVOptions) ([]byte, error) { // Check delimiter if co.Delimiter == "" { co.Delimiter = "," @@ -74,7 +84,7 @@ return nil, errors.New("invalid function, " + field.Function + " does not exist") } - value, err := funcInfo.Generate(r, &field.Params, funcInfo) + value, err := funcInfo.Generate(f.Rand, &field.Params, funcInfo) if err != nil { return nil, err } @@ -165,7 +175,8 @@ } co.Delimiter = delimiter - csvOut, err := csvFunc(r, &co) + f := &Faker{Rand: r} + csvOut, err := csvFunc(f, &co) if err != nil { return nil, err } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/book.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/book.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/book.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/book.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,101 @@ +package data + +var Books = map[string][]string{ + "title": { + "Anna Karenina", + "Beloved", + "Blindness", + "Bostan", + "Buddenbrooks", + "Crime and Punishment", + "Don Quijote De La Mancha", + "Fairy tales", + "Faust", + "Gulliver's Travels", + "Gypsy Ballads", + "Hamlet", + "Harry potter and the sorcerer's stone", + "King Lear", + "Leaves of Grass", + "Lolita", + "Madame Bovary", + "Memoirs of Hadrian", + "Metamorphoses", + "Moby Dick", + "Nineteen Eighty-Four", + "Odyssey", + "Oedipus the King", + "One Hundred Years of Solitude", + "One Thousand and One Nights", + "Othello", + "Pippi Longstocking", + "Pride and Prejudice", + "Romeo & Juliet", + "Sherlock Holmes", + "Sons and Lovers", + "The Adventures of Huckleberry Finn", + "The Book Of Job", + "The Brothers Karamazov", + "The Golden Notebook", + "The Idiot", + "The Old Man and the Sea", + "The Stranger", + "Things Fall Apart", + "Ulysses", + "War and Peace", + "Wuthering Heights", + "Zorba the Greek", + }, + "author": { + "Albert Camus", + "Astrid Lindgren", + "Charles Dickens", + "D. H. Lawrence", + "Edgar Allan Poe", + "Emily Brontë", + "Ernest Hemingway", + "Franz Kafka", + "Fyodor Dostoevsky", + "George Orwell", + "Hans Christian Andersen", + "Homer", + "James Joyce", + "Jane Austen", + "Johann Wolfgang von Goethe", + "Jorge Luis Borges", + "Joanne K. Rowling", + "Leo Tolstoy", + "Marcel Proust", + "Mark Twain", + "Paul Celan", + "Salman Rushdie", + "Sophocles", + "Thomas Mann", + "Toni Morrison", + "Vladimir Nabokov", + "William Faulkner", + "William Shakespeare", + "Yasunari Kawabata", + }, + "genre": { + "Adventure", + "Comic", + "Crime", + "Erotic", + "Fiction", + "Fantasy", + "Historical", + "Horror", + "Magic", + "Mystery", + "Philosophical", + "Political", + "Romance", + "Saga", + "Satire", + "Science", + "Speculative", + "Thriller", + "Urban", + }, +} \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/company.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/company.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/company.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/company.go 2024-02-23 09:46:09.000000000 +0000 @@ -6,4 +6,5 @@ "suffix": {"Inc", "and Sons", "LLC", "Group"}, "buzzwords": {"Adaptive", "Advanced", "Ameliorated", "Assimilated", "Automated", "Balanced", "Business-focused", "Centralized", "Cloned", "Compatible", "Configurable", "Cross-group", "Cross-platform", "Customer-focused", "Customizable", "De-engineered", "Decentralized", "Devolved", "Digitized", "Distributed", "Diverse", "Down-sized", "Enhanced", "Enterprise-wide", "Ergonomic", "Exclusive", "Expanded", "Extended", "Face to face", "Focused", "Front-line", "Fully-configurable", "Function-based", "Fundamental", "Future-proofed", "Grass-roots", "Horizontal", "Implemented", "Innovative", "Integrated", "Intuitive", "Inverse", "Managed", "Mandatory", "Monitored", "Multi-channelled", "Multi-lateral", "Multi-layered", "Multi-tiered", "Networked", "Object-based", "Open-architected", "Open-source", "Operative", "Optimized", "Optional", "Organic", "Organized", "Persevering", "Persistent", "Phased", "Polarised", "Pre-emptive", "Proactive", "Profit-focused", "Profound", "Programmable", "Progressive", "Public-key", "Quality-focused", "Re-contextualized", "Re-engineered", "Reactive", "Realigned", "Reduced", "Reverse-engineered", "Right-sized", "Robust", "Seamless", "Secured", "Self-enabling", "Sharable", "Stand-alone", "Streamlined", "Switchable", "Synchronised", "Synergistic", "Synergized", "Team-oriented", "Total", "Triple-buffered", "Universal", "Up-sized", "Upgradable", "User-centric", "User-friendly", "Versatile", "Virtual", "Vision-oriented", "Visionary", "24 hour", "24/7", "3rd generation", "4th generation", "5th generation", "6th generation", "actuating", "analyzing", "asymmetric", "asynchronous", "attitude-oriented", "background", "bandwidth-monitored", "bi-directional", "bifurcated", "bottom-line", "clear-thinking", "client-driven", "client-server", "coherent", "cohesive", "composite", "content-based", "context-sensitive", "contextually-based", "dedicated", "demand-driven", "didactic", "directional", "discrete", "disintermediate", "dynamic", "eco-centric", "empowering", "encompassing", "even-keeled", "executive", "explicit", "exuding", "fault-tolerant", "foreground", "fresh-thinking", "full-range", "global", "grid-enabled", "heuristic", "high-level", "holistic", "homogeneous", "human-resource", "hybrid", "impactful", "incremental", "intangible", "interactive", "intermediate", "leading edge", "local", "logistical", "maximized", "methodical", "mission-critical", "mobile", "modular", "motivating", "multi-state", "multi-tasking", "multimedia", "national", "needs-based", "neutral", "next generation", "non-volatile", "object-oriented", "optimal", "optimizing", "radical", "real-time", "reciprocal", "regional", "responsive", "scalable", "secondary", "solution-oriented", "stable", "static", "system-worthy", "systematic", "systemic", "tangible", "tertiary", "transitional", "uniform", "upward-trending", "user-facing", "value-added", "web-enabled", "well-modulated", "zero administration", "zero defect", "zero tolerance", "Graphic Interface", "Graphical User Interface", "ability", "access", "adapter", "algorithm", "alliance", "analyzer", "application", "approach", "architecture", "archive", "array", "artificial intelligence", "attitude", "benchmark", "budgetary management", "capability", "capacity", "challenge", "circuit", "collaboration", "complexity", "concept", "conglomeration", "contingency", "core", "customer loyalty", "data-warehouse", "database", "definition", "emulation", "encoding", "encryption", "extranet", "firmware", "flexibility", "focus group", "forecast", "frame", "framework", "function", "functionalities", "groupware", "hardware", "help-desk", "hierarchy", "hub", "implementation", "info-mediaries", "infrastructure", "initiative", "installation", "instruction set", "interface", "internet solution", "intranet", "knowledge base", "knowledge user", "leverage", "local area network", "matrices", "matrix", "methodology", "middleware", "migration", "model", "moderator", "monitoring", "moratorium", "neural-net", "open architecture", "open system", "orchestration", "paradigm", "parallelism", "policy", "portal", "pricing structure", "process improvement", "product", "productivity", "project", "projection", "protocol", "secured line", "service-desk", "software", "solution", "standardization", "strategy", "structure", "success", "superstructure", "support", "synergy", "system engine", "task-force", "throughput", "time-frame", "toolset", "utilisation", "website", "workforce"}, "bs": {"aggregate", "architect", "benchmark", "brand", "cultivate", "deliver", "deploy", "disintermediate", "drive", "e-enable", "embrace", "empower", "enable", "engage", "engineer", "enhance", "envisioneer", "evolve", "expedite", "exploit", "extend", "facilitate", "generate", "grow", "harness", "implement", "incentivize", "incubate", "innovate", "integrate", "iterate", "leverage", "matrix", "maximize", "mesh", "monetize", "morph", "optimize", "orchestrate", "productize", "recontextualize", "redefine", "reintermediate", "reinvent", "repurpose", "revolutionize", "scale", "seize", "strategize", "streamline", "syndicate", "synergize", "synthesize", "target", "transform", "transition", "unleash", "utilize", "visualize", "whiteboard", "24-365", "24-7", "B2B", "B2C", "back-end", "best-of-breed", "bleeding-edge", "bricks-and-clicks", "clicks-and-mortar", "collaborative", "compelling", "cross-media", "cross-platform", "customized", "cutting-edge", "distributed", "dot-com", "dynamic", "e-business", "efficient", "end-to-end", "enterprise", "extensible", "frictionless", "front-end", "global", "granular", "holistic", "impactful", "innovative", "integrated", "interactive", "intuitive", "killer", "leading-edge", "magnetic", "mission-critical", "next-generation", "one-to-one", "open-source", "out-of-the-box", "plug-and-play", "proactive", "real-time", "revolutionary", "rich", "robust", "scalable", "seamless", "sexy", "sticky", "strategic", "synergistic", "transparent", "turn-key", "ubiquitous", "user-centric", "value-added", "vertical", "viral", "virtual", "visionary", "web-enabled", "wireless", "world-class", "ROI", "action-items", "applications", "architectures", "bandwidth", "channels", "communities", "content", "convergence", "deliverables", "e-business", "e-commerce", "e-markets", "e-services", "e-tailers", "experiences", "eyeballs", "functionalities", "infomediaries", "infrastructures", "initiatives", "interfaces", "markets", "methodologies", "metrics", "mindshare", "models", "networks", "niches", "paradigms", "partnerships", "platforms", "portals", "relationships", "schemas", "solutions", "supply-chains", "synergies", "systems", "technologies", "users", "vortals", "web services", "web-readiness"}, + "blurb": {"Advancement", "Advantage", "Ambition", "Balance", "Belief", "Benefits", "Care", "Challenge", "Change", "Choice", "Commitment", "Comfort", "Connection", "Consistency", "Creativity", "Dedication", "Discovery", "Diversity", "Dream", "Dreams", "Drive", "Ease", "Efficiency", "Empowerment", "Endurance", "Energy", "Engagement", "Environment", "Enterprise", "Excellence", "Exclusivity", "Experience", "Exploration", "Expression", "Family", "Flexibility", "Focus", "Freedom", "Future", "Future", "Growth", "Harmony", "Health", "Heart", "History", "Home", "Honesty", "Hope", "Impact", "Innovation", "Inspiration", "Integrity", "Joy", "Journey", "Knowledge", "Leadership", "Legacy", "Life", "Luxury", "Money", "Motivation", "Optimism", "Partnership", "Passion", "Peace", "People", "Performance", "Perseverance", "Pleasure", "Power", "Pride", "Progress", "Promise", "Quality", "Quality", "Reliability", "Resilience", "Respect", "Revolution", "Safety", "Service", "Simplicity", "Solutions", "Solidarity", "Strength", "Style", "Success", "Sustainability", "Taste", "Teamwork", "Technology", "Time", "Transformation", "Trust", "Unity", "Value", "Versatility", "Vision", "Wellness", "World"}, } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/data.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/data.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/data.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/data.go 2024-02-23 09:46:09.000000000 +0000 @@ -28,6 +28,8 @@ "celebrity": Celebrity, "error": Error, "html": Html, + "book": Books, + "movie": Movies, } func List() map[string][]string { diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/movie.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/movie.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/movie.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/movie.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,130 @@ +package data + +// From IMDB - Top 250 Movies subset to 100 +var Movies = map[string][]string{ + "name": { + "12 Years a Slave", + "1917", + "2001: A Space Odyssey", + "3 Idiots", + "A Beautiful Mind", + "A Clockwork Orange", + "Alien", + "American Beauty", + "American History X", + "Apocalypse Now", + "Avengers: Infinity War", + "Back to the Future", + "Batman Begins", + "Ben-Hur", + "Blade Runner", + "Casablanca", + "Casino", + "Catch Me If You Can", + "Das Leben der Anderen", + "Dead Poets Society", + "Die Hard", + "Django Unchained", + "Fight Club", + "Finding Nemo", + "Forrest Gump", + "Full Metal Jacket", + "Gandhi", + "Gladiator", + "Gone with the Wind", + "Good Will Hunting", + "Goodfellas", + "Green Book", + "Groundhog Day", + "Harry Potter and the Deathly Hallows - Part 2", + "Heat", + "Inception", + "Indiana Jones and the Last Crusade", + "Inglourious Basterds", + "Interstellar", + "Into the Wild", + "Intouchables", + "Joker", + "Judgment at Nuremberg", + "Jurassic Park", + "Kill Bill: Vol. 1", + "L.A. Confidential", + "La vita è bella", + "Lock, Stock and Two Smoking Barrels", + "Léon", + "Mad Max: Fury Road", + "Memento", + "Million Dollar Baby", + "Monsters, Inc.", + "Monty Python and the Holy Grail", + "No Country for Old Men", + "Once Upon a Time in America", + "One Flew Over the Cuckoo's Nest", + "Pirates of the Caribbean: The Curse of the Black Pearl", + "Platoon", + "Prisoners", + "Psycho", + "Pulp Fiction", + "Raiders of the Lost Ark", + "Ratatouille", + "Reservoir Dogs", + "Rocky", + "Saving Private Ryan", + "Scarface", + "Schindler's List", + "Se7en", + "Sherlock Jr.", + "Shutter Island", + "Snatch", + "Spider-Man: No Way Home", + "Star Wars: Episode VI - Return of the Jedi", + "Taxi Driver", + "Terminator 2: Judgment Day", + "The Big Lebowski", + "The Dark Knight", + "The Departed", + "The Empire Strikes Back", + "The Godfather", + "The Green Mile", + "The Lion King", + "The Lord of the Rings: The Fellowship of the Ring", + "The Matrix", + "The Pianist", + "The Prestige", + "The Shawshank Redemption", + "The Terminator", + "The Usual Suspects", + "The Wolf of Wall Street", + "Top Gun: Maverick", + "Toy Story", + "Unforgiven", + "Up", + "V for Vendetta", + "WALL·E", + "Warrior", + "Whiplash", + }, + "genre": { + "Action", + "Adventure", + "Animation", + "Biography", + "Comedy", + "Crime", + "Drama", + "Family", + "Fantasy", + "Film-Noir", + "History", + "Horror", + "Music", + "Musical", + "Mystery", + "Romance", + "Sci-Fi", + "Sport", + "Thriller", + "War", + "Western", + }, +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/person.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/person.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/data/person.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/data/person.go 2024-02-23 09:46:09.000000000 +0000 @@ -5,6 +5,7 @@ "prefix": {"Mr.", "Mrs.", "Ms.", "Miss", "Dr."}, "suffix": {"Jr.", "Sr.", "I", "II", "III", "IV", "V", "MD", "DDS", "PhD", "DVM"}, "first": {"Aaliyah", "Aaron", "Abagail", "Abbey", "Abbie", "Abbigail", "Abby", "Abdiel", "Abdul", "Abdullah", "Abe", "Abel", "Abelardo", "Abigail", "Abigale", "Abigayle", "Abner", "Abraham", "Ada", "Adah", "Adalberto", "Adaline", "Adam", "Adan", "Addie", "Addison", "Adela", "Adelbert", "Adele", "Adelia", "Adeline", "Adell", "Adella", "Adelle", "Aditya", "Adolf", "Adolfo", "Adolph", "Adolphus", "Adonis", "Adrain", "Adrian", "Adriana", "Adrianna", "Adriel", "Adrien", "Adrienne", "Afton", "Aglae", "Agnes", "Agustin", "Agustina", "Ahmad", "Ahmed", "Aida", "Aidan", "Aiden", "Aileen", "Aimee", "Aisha", "Aiyana", "Akeem", "Al", "Alaina", "Alan", "Alana", "Alanis", "Alanna", "Alayna", "Alba", "Albert", "Alberta", "Albertha", "Alberto", "Albin", "Albina", "Alda", "Alden", "Alec", "Aleen", "Alejandra", "Alejandrin", "Alek", "Alena", "Alene", "Alessandra", "Alessandro", "Alessia", "Aletha", "Alex", "Alexa", "Alexander", "Alexandra", "Alexandre", "Alexandrea", "Alexandria", "Alexandrine", "Alexandro", "Alexane", "Alexanne", "Alexie", "Alexis", "Alexys", "Alexzander", "Alf", "Alfonso", "Alfonzo", "Alford", "Alfred", "Alfreda", "Alfredo", "Ali", "Alia", "Alice", "Alicia", "Alisa", "Alisha", "Alison", "Alivia", "Aliya", "Aliyah", "Aliza", "Alize", "Allan", "Allen", "Allene", "Allie", "Allison", "Ally", "Alphonso", "Alta", "Althea", "Alva", "Alvah", "Alvena", "Alvera", "Alverta", "Alvina", "Alvis", "Alyce", "Alycia", "Alysa", "Alysha", "Alyson", "Alysson", "Amalia", "Amanda", "Amani", "Amara", "Amari", "Amaya", "Amber", "Ambrose", "Amelia", "Amelie", "Amely", "America", "Americo", "Amie", "Amina", "Amir", "Amira", "Amiya", "Amos", "Amparo", "Amy", "Amya", "Ana", "Anabel", "Anabelle", "Anahi", "Anais", "Anastacio", "Anastasia", "Anderson", "Andre", "Andreane", "Andreanne", "Andres", "Andrew", "Andy", "Angel", "Angela", "Angelica", "Angelina", "Angeline", "Angelita", "Angelo", "Angie", "Angus", "Anibal", "Anika", "Anissa", "Anita", "Aniya", "Aniyah", "Anjali", "Anna", "Annabel", "Annabell", "Annabelle", "Annalise", "Annamae", "Annamarie", "Anne", "Annetta", "Annette", "Annie", "Ansel", "Ansley", "Anthony", "Antoinette", "Antone", "Antonetta", "Antonette", "Antonia", "Antonietta", "Antonina", "Antonio", "Antwan", "Antwon", "Anya", "April", "Ara", "Araceli", "Aracely", "Arch", "Archibald", "Ardella", "Arden", "Ardith", "Arely", "Ari", "Ariane", "Arianna", "Aric", "Ariel", "Arielle", "Arjun", "Arlene", "Arlie", "Arlo", "Armand", "Armando", "Armani", "Arnaldo", "Arne", "Arno", "Arnold", "Arnoldo", "Arnulfo", "Aron", "Art", "Arthur", "Arturo", "Arvel", "Arvid", "Arvilla", "Aryanna", "Asa", "Asha", "Ashlee", "Ashleigh", "Ashley", "Ashly", "Ashlynn", "Ashton", "Ashtyn", "Asia", "Assunta", "Astrid", "Athena", "Aubree", "Aubrey", "Audie", "Audra", "Audreanne", "Audrey", "August", "Augusta", "Augustine", "Augustus", "Aurelia", "Aurelie", "Aurelio", "Aurore", "Austen", "Austin", "Austyn", "Autumn", "Ava", "Avery", "Avis", "Axel", "Ayana", "Ayden", "Ayla", "Aylin", "Baby", "Bailee", "Bailey", "Barbara", "Barney", "Baron", "Barrett", "Barry", "Bart", "Bartholome", "Barton", "Baylee", "Beatrice", "Beau", "Beaulah", "Bell", "Bella", "Belle", "Ben", "Benedict", "Benjamin", "Bennett", "Bennie", "Benny", "Benton", "Berenice", "Bernadette", "Bernadine", "Bernard", "Bernardo", "Berneice", "Bernhard", "Bernice", "Bernie", "Berniece", "Bernita", "Berry", "Bert", "Berta", "Bertha", "Bertram", "Bertrand", "Beryl", "Bessie", "Beth", "Bethany", "Bethel", "Betsy", "Bette", "Bettie", "Betty", "Bettye", "Beulah", "Beverly", "Bianka", "Bill", "Billie", "Billy", "Birdie", "Blair", "Blaise", "Blake", "Blanca", "Blanche", "Blaze", "Bo", "Bobbie", "Bobby", "Bonita", "Bonnie", "Boris", "Boyd", "Brad", "Braden", "Bradford", "Bradley", "Bradly", "Brady", "Braeden", "Brain", "Brandi", "Brando", "Brandon", "Brandt", "Brandy", "Brandyn", "Brannon", "Branson", "Brant", "Braulio", "Braxton", "Brayan", "Breana", "Breanna", "Breanne", "Brenda", "Brendan", "Brenden", "Brendon", "Brenna", "Brennan", "Brennon", "Brent", "Bret", "Brett", "Bria", "Brian", "Briana", "Brianne", "Brice", "Bridget", "Bridgette", "Bridie", "Brielle", "Brigitte", "Brionna", "Brisa", "Britney", "Brittany", "Brock", "Broderick", "Brody", "Brook", "Brooke", "Brooklyn", "Brooks", "Brown", "Bruce", "Bryana", "Bryce", "Brycen", "Bryon", "Buck", "Bud", "Buddy", "Buford", "Bulah", "Burdette", "Burley", "Burnice", "Buster", "Cade", "Caden", "Caesar", "Caitlyn", "Cale", "Caleb", "Caleigh", "Cali", "Calista", "Callie", "Camden", "Cameron", "Camila", "Camilla", "Camille", "Camren", "Camron", "Camryn", "Camylle", "Candace", "Candelario", "Candice", "Candida", "Candido", "Cara", "Carey", "Carissa", "Carlee", "Carleton", "Carley", "Carli", "Carlie", "Carlo", "Carlos", "Carlotta", "Carmel", "Carmela", "Carmella", "Carmelo", "Carmen", "Carmine", "Carol", "Carolanne", "Carole", "Carolina", "Caroline", "Carolyn", "Carolyne", "Carrie", "Carroll", "Carson", "Carter", "Cary", "Casandra", "Casey", "Casimer", "Casimir", "Casper", "Cassandra", "Cassandre", "Cassidy", "Cassie", "Catalina", "Caterina", "Catharine", "Catherine", "Cathrine", "Cathryn", "Cathy", "Cayla", "Ceasar", "Cecelia", "Cecil", "Cecile", "Cecilia", "Cedrick", "Celestine", "Celestino", "Celia", "Celine", "Cesar", "Chad", "Chadd", "Chadrick", "Chaim", "Chance", "Chandler", "Chanel", "Chanelle", "Charity", "Charlene", "Charles", "Charley", "Charlie", "Charlotte", "Chase", "Chasity", "Chauncey", "Chaya", "Chaz", "Chelsea", "Chelsey", "Chelsie", "Chesley", "Chester", "Chet", "Cheyanne", "Cheyenne", "Chloe", "Chris", "Christ", "Christa", "Christelle", "Christian", "Christiana", "Christina", "Christine", "Christop", "Christophe", "Christopher", "Christy", "Chyna", "Ciara", "Cicero", "Cielo", "Cierra", "Cindy", "Citlalli", "Clair", "Claire", "Clara", "Clarabelle", "Clare", "Clarissa", "Clark", "Claud", "Claude", "Claudia", "Claudie", "Claudine", "Clay", "Clemens", "Clement", "Clementina", "Clementine", "Clemmie", "Cleo", "Cleora", "Cleta", "Cletus", "Cleve", "Cleveland", "Clifford", "Clifton", "Clint", "Clinton", "Clotilde", "Clovis", "Cloyd", "Clyde", "Coby", "Cody", "Colby", "Cole", "Coleman", "Colin", "Colleen", "Collin", "Colt", "Colten", "Colton", "Columbus", "Concepcion", "Conner", "Connie", "Connor", "Conor", "Conrad", "Constance", "Constantin", "Consuelo", "Cooper", "Cora", "Coralie", "Corbin", "Cordelia", "Cordell", "Cordia", "Cordie", "Corene", "Corine", "Cornelius", "Cornell", "Corrine", "Cortez", "Cortney", "Cory", "Coty", "Courtney", "Coy", "Craig", "Crawford", "Creola", "Cristal", "Cristian", "Cristina", "Cristobal", "Cristopher", "Cruz", "Crystal", "Crystel", "Cullen", "Curt", "Curtis", "Cydney", "Cynthia", "Cyril", "Cyrus", "Dagmar", "Dahlia", "Daija", "Daisha", "Daisy", "Dakota", "Dale", "Dallas", "Dallin", "Dalton", "Damaris", "Dameon", "Damian", "Damien", "Damion", "Damon", "Dan", "Dana", "Dandre", "Dane", "Dangelo", "Dangelo", "Danial", "Daniela", "Daniella", "Danielle", "Danika", "Dannie", "Danny", "Dante", "Danyka", "Daphne", "Daphnee", "Daphney", "Darby", "Daren", "Darian", "Dariana", "Darien", "Dario", "Darion", "Darius", "Darlene", "Daron", "Darrel", "Darrell", "Darren", "Darrick", "Darrin", "Darrion", "Darron", "Darryl", "Darwin", "Daryl", "Dashawn", "Dasia", "Dave", "David", "Davin", "Davion", "Davon", "Davonte", "Dawn", "Dawson", "Dax", "Dayana", "Dayna", "Dayne", "Dayton", "Dean", "Deangelo", "Deanna", "Deborah", "Declan", "Dedric", "Dedrick", "Dee", "Deion", "Deja", "Dejah", "Dejon", "Dejuan", "Delaney", "Delbert", "Delfina", "Delia", "Delilah", "Dell", "Della", "Delmer", "Delores", "Delpha", "Delphia", "Delphine", "Delta", "Demarco", "Demarcus", "Demario", "Demetris", "Demetrius", "Demond", "Dena", "Denis", "Dennis", "Deon", "Deondre", "Deontae", "Deonte", "Dereck", "Derek", "Derick", "Deron", "Derrick", "Deshaun", "Deshawn", "Desiree", "Desmond", "Dessie", "Destany", "Destin", "Destinee", "Destiney", "Destini", "Destiny", "Devan", "Devante", "Deven", "Devin", "Devon", "Devonte", "Devyn", "Dewayne", "Dewitt", "Dexter", "Diamond", "Diana", "Dianna", "Diego", "Dillan", "Dillon", "Dimitri", "Dina", "Dino", "Dion", "Dixie", "Dock", "Dolly", "Dolores", "Domenic", "Domenica", "Domenick", "Domenico", "Domingo", "Dominic", "Dominique", "Don", "Donald", "Donato", "Donavon", "Donna", "Donnell", "Donnie", "Donny", "Dora", "Dorcas", "Dorian", "Doris", "Dorothea", "Dorothy", "Dorris", "Dortha", "Dorthy", "Doug", "Douglas", "Dovie", "Doyle", "Drake", "Drew", "Duane", "Dudley", "Dulce", "Duncan", "Durward", "Dustin", "Dusty", "Dwight", "Dylan", "Earl", "Earlene", "Earline", "Earnest", "Earnestine", "Easter", "Easton", "Ebba", "Ebony", "Ed", "Eda", "Edd", "Eddie", "Eden", "Edgar", "Edgardo", "Edison", "Edmond", "Edmund", "Edna", "Eduardo", "Edward", "Edwardo", "Edwin", "Edwina", "Edyth", "Edythe", "Effie", "Efrain", "Efren", "Eileen", "Einar", "Eino", "Eladio", "Elaina", "Elbert", "Elda", "Eldon", "Eldora", "Eldred", "Eldridge", "Eleanora", "Eleanore", "Eleazar", "Electa", "Elena", "Elenor", "Elenora", "Eleonore", "Elfrieda", "Eli", "Elian", "Eliane", "Elias", "Eliezer", "Elijah", "Elinor", "Elinore", "Elisa", "Elisabeth", "Elise", "Eliseo", "Elisha", "Elissa", "Eliza", "Elizabeth", "Ella", "Ellen", "Ellie", "Elliot", "Elliott", "Ellis", "Ellsworth", "Elmer", "Elmira", "Elmo", "Elmore", "Elna", "Elnora", "Elody", "Eloisa", "Eloise", "Elouise", "Eloy", "Elroy", "Elsa", "Else", "Elsie", "Elta", "Elton", "Elva", "Elvera", "Elvie", "Elvis", "Elwin", "Elwyn", "Elyse", "Elyssa", "Elza", "Emanuel", "Emelia", "Emelie", "Emely", "Emerald", "Emerson", "Emery", "Emie", "Emil", "Emile", "Emilia", "Emiliano", "Emilie", "Emilio", "Emily", "Emma", "Emmalee", "Emmanuel", "Emmanuelle", "Emmet", "Emmett", "Emmie", "Emmitt", "Emmy", "Emory", "Ena", "Enid", "Enoch", "Enola", "Enos", "Enrico", "Enrique", "Ephraim", "Era", "Eriberto", "Eric", "Erica", "Erich", "Erick", "Ericka", "Erik", "Erika", "Erin", "Erling", "Erna", "Ernest", "Ernestina", "Ernestine", "Ernesto", "Ernie", "Ervin", "Erwin", "Eryn", "Esmeralda", "Esperanza", "Esta", "Esteban", "Estefania", "Estel", "Estell", "Estella", "Estelle", "Estevan", "Esther", "Estrella", "Etha", "Ethan", "Ethel", "Ethelyn", "Ethyl", "Ettie", "Eudora", "Eugene", "Eugenia", "Eula", "Eulah", "Eulalia", "Euna", "Eunice", "Eusebio", "Eva", "Evalyn", "Evan", "Evangeline", "Evans", "Eve", "Eveline", "Evelyn", "Everardo", "Everett", "Everette", "Evert", "Evie", "Ewald", "Ewell", "Ezekiel", "Ezequiel", "Ezra", "Fabian", "Fabiola", "Fae", "Fannie", "Fanny", "Fatima", "Faustino", "Fausto", "Favian", "Fay", "Faye", "Federico", "Felicia", "Felicita", "Felicity", "Felipa", "Felipe", "Felix", "Felton", "Fermin", "Fern", "Fernando", "Ferne", "Fidel", "Filiberto", "Filomena", "Finn", "Fiona", "Flavie", "Flavio", "Fleta", "Fletcher", "Flo", "Florence", "Florencio", "Florian", "Florida", "Florine", "Flossie", "Floy", "Floyd", "Ford", "Forest", "Forrest", "Foster", "Frances", "Francesca", "Francesco", "Francis", "Francisca", "Francisco", "Franco", "Frank", "Frankie", "Franz", "Fred", "Freda", "Freddie", "Freddy", "Frederic", "Frederick", "Frederik", "Frederique", "Fredrick", "Fredy", "Freeda", "Freeman", "Freida", "Frida", "Frieda", "Friedrich", "Fritz", "Furman", "Gabe", "Gabriel", "Gabriella", "Gabrielle", "Gaetano", "Gage", "Gail", "Gardner", "Garett", "Garfield", "Garland", "Garnet", "Garnett", "Garret", "Garrett", "Garrick", "Garrison", "Garry", "Garth", "Gaston", "Gavin", "Gay", "Gayle", "Gaylord", "Gene", "General", "Genesis", "Genevieve", "Gennaro", "Genoveva", "Geo", "Geoffrey", "George", "Georgette", "Georgiana", "Georgianna", "Geovanni", "Geovanny", "Geovany", "Gerald", "Geraldine", "Gerard", "Gerardo", "Gerda", "Gerhard", "Germaine", "German", "Gerry", "Gerson", "Gertrude", "Gia", "Gianni", "Gideon", "Gilbert", "Gilberto", "Gilda", "Giles", "Gillian", "Gina", "Gino", "Giovani", "Giovanna", "Giovanni", "Giovanny", "Gisselle", "Giuseppe", "Gladyce", "Gladys", "Glen", "Glenda", "Glenna", "Glennie", "Gloria", "Godfrey", "Golda", "Golden", "Gonzalo", "Gordon", "Grace", "Gracie", "Graciela", "Grady", "Graham", "Grant", "Granville", "Grayce", "Grayson", "Green", "Greg", "Gregg", "Gregoria", "Gregorio", "Gregory", "Greta", "Gretchen", "Greyson", "Griffin", "Grover", "Guadalupe", "Gudrun", "Guido", "Guillermo", "Guiseppe", "Gunnar", "Gunner", "Gus", "Gussie", "Gust", "Gustave", "Guy", "Gwen", "Gwendolyn", "Hadley", "Hailee", "Hailey", "Hailie", "Hal", "Haleigh", "Haley", "Halie", "Halle", "Hallie", "Hank", "Hanna", "Hannah", "Hans", "Hardy", "Harley", "Harmon", "Harmony", "Harold", "Harrison", "Harry", "Harvey", "Haskell", "Hassan", "Hassie", "Hattie", "Haven", "Hayden", "Haylee", "Hayley", "Haylie", "Hazel", "Hazle", "Heath", "Heather", "Heaven", "Heber", "Hector", "Heidi", "Helen", "Helena", "Helene", "Helga", "Hellen", "Helmer", "Heloise", "Henderson", "Henri", "Henriette", "Henry", "Herbert", "Herman", "Hermann", "Hermina", "Herminia", "Herminio", "Hershel", "Herta", "Hertha", "Hester", "Hettie", "Hilario", "Hilbert", "Hilda", "Hildegard", "Hillard", "Hillary", "Hilma", "Hilton", "Hipolito", "Hiram", "Hobart", "Holden", "Hollie", "Hollis", "Holly", "Hope", "Horace", "Horacio", "Hortense", "Hosea", "Houston", "Howard", "Howell", "Hoyt", "Hubert", "Hudson", "Hugh", "Hulda", "Humberto", "Hunter", "Hyman", "Ian", "Ibrahim", "Icie", "Ida", "Idell", "Idella", "Ignacio", "Ignatius", "Ike", "Ila", "Ilene", "Iliana", "Ima", "Imani", "Imelda", "Immanuel", "Imogene", "Ines", "Irma", "Irving", "Irwin", "Isaac", "Isabel", "Isabell", "Isabella", "Isabelle", "Isac", "Isadore", "Isai", "Isaiah", "Isaias", "Isidro", "Ismael", "Isobel", "Isom", "Israel", "Issac", "Itzel", "Iva", "Ivah", "Ivory", "Ivy", "Izabella", "Izaiah", "Jabari", "Jace", "Jacey", "Jacinthe", "Jacinto", "Jack", "Jackeline", "Jackie", "Jacklyn", "Jackson", "Jacky", "Jaclyn", "Jacquelyn", "Jacques", "Jacynthe", "Jada", "Jade", "Jaden", "Jadon", "Jadyn", "Jaeden", "Jaida", "Jaiden", "Jailyn", "Jaime", "Jairo", "Jakayla", "Jake", "Jakob", "Jaleel", "Jalen", "Jalon", "Jalyn", "Jamaal", "Jamal", "Jamar", "Jamarcus", "Jamel", "Jameson", "Jamey", "Jamie", "Jamil", "Jamir", "Jamison", "Jammie", "Jan", "Jana", "Janae", "Jane", "Janelle", "Janessa", "Janet", "Janice", "Janick", "Janie", "Janis", "Janiya", "Jannie", "Jany", "Jaquan", "Jaquelin", "Jaqueline", "Jared", "Jaren", "Jarod", "Jaron", "Jarred", "Jarrell", "Jarret", "Jarrett", "Jarrod", "Jarvis", "Jasen", "Jasmin", "Jason", "Jasper", "Jaunita", "Javier", "Javon", "Javonte", "Jay", "Jayce", "Jaycee", "Jayda", "Jayde", "Jayden", "Jaydon", "Jaylan", "Jaylen", "Jaylin", "Jaylon", "Jayme", "Jayne", "Jayson", "Jazlyn", "Jazmin", "Jazmyn", "Jazmyne", "Jean", "Jeanette", "Jeanie", "Jeanne", "Jed", "Jedediah", "Jedidiah", "Jeff", "Jefferey", "Jeffery", "Jeffrey", "Jeffry", "Jena", "Jenifer", "Jennie", "Jennifer", "Jennings", "Jennyfer", "Jensen", "Jerad", "Jerald", "Jeramie", "Jeramy", "Jerel", "Jeremie", "Jeremy", "Jermain", "Jermaine", "Jermey", "Jerod", "Jerome", "Jeromy", "Jerrell", "Jerrod", "Jerrold", "Jerry", "Jess", "Jesse", "Jessica", "Jessie", "Jessika", "Jessy", "Jessyca", "Jesus", "Jett", "Jettie", "Jevon", "Jewel", "Jewell", "Jillian", "Jimmie", "Jimmy", "Jo", "Joan", "Joana", "Joanie", "Joanne", "Joannie", "Joanny", "Joany", "Joaquin", "Jocelyn", "Jodie", "Jody", "Joe", "Joel", "Joelle", "Joesph", "Joey", "Johan", "Johann", "Johanna", "Johathan", "John", "Johnathan", "Johnathon", "Johnnie", "Johnny", "Johnpaul", "Johnson", "Jolie", "Jon", "Jonas", "Jonatan", "Jonathan", "Jonathon", "Jordan", "Jordane", "Jordi", "Jordon", "Jordy", "Jordyn", "Jorge", "Jose", "Josefa", "Josefina", "Joseph", "Josephine", "Josh", "Joshua", "Joshuah", "Josiah", "Josiane", "Josianne", "Josie", "Josue", "Jovan", "Jovani", "Jovanny", "Jovany", "Joy", "Joyce", "Juana", "Juanita", "Judah", "Judd", "Jude", "Judge", "Judson", "Judy", "Jules", "Julia", "Julian", "Juliana", "Julianne", "Julie", "Julien", "Juliet", "Julio", "Julius", "June", "Junior", "Junius", "Justen", "Justice", "Justina", "Justine", "Juston", "Justus", "Justyn", "Juvenal", "Juwan", "Kacey", "Kaci", "Kacie", "Kade", "Kaden", "Kadin", "Kaela", "Kaelyn", "Kaia", "Kailee", "Kailey", "Kailyn", "Kaitlin", "Kaitlyn", "Kale", "Kaleb", "Kaleigh", "Kaley", "Kali", "Kallie", "Kameron", "Kamille", "Kamren", "Kamron", "Kamryn", "Kane", "Kara", "Kareem", "Karelle", "Karen", "Kari", "Kariane", "Karianne", "Karina", "Karine", "Karl", "Karlee", "Karley", "Karli", "Karlie", "Karolann", "Karson", "Kasandra", "Kasey", "Kassandra", "Katarina", "Katelin", "Katelyn", "Katelynn", "Katharina", "Katherine", "Katheryn", "Kathleen", "Kathlyn", "Kathryn", "Kathryne", "Katlyn", "Katlynn", "Katrina", "Katrine", "Kattie", "Kavon", "Kay", "Kaya", "Kaycee", "Kayden", "Kayla", "Kaylah", "Kaylee", "Kayleigh", "Kayley", "Kayli", "Kaylie", "Kaylin", "Keagan", "Keanu", "Keara", "Keaton", "Keegan", "Keeley", "Keely", "Keenan", "Keira", "Keith", "Kellen", "Kelley", "Kelli", "Kellie", "Kelly", "Kelsi", "Kelsie", "Kelton", "Kelvin", "Ken", "Kendall", "Kendra", "Kendrick", "Kenna", "Kennedi", "Kennedy", "Kenneth", "Kennith", "Kenny", "Kenton", "Kenya", "Kenyatta", "Kenyon", "Keon", "Keshaun", "Keshawn", "Keven", "Kevin", "Kevon", "Keyon", "Keyshawn", "Khalid", "Khalil", "Kian", "Kiana", "Kianna", "Kiara", "Kiarra", "Kiel", "Kiera", "Kieran", "Kiley", "Kim", "Kimberly", "King", "Kip", "Kira", "Kirk", "Kirsten", "Kirstin", "Kitty", "Kobe", "Koby", "Kody", "Kolby", "Kole", "Korbin", "Korey", "Kory", "Kraig", "Kris", "Krista", "Kristian", "Kristin", "Kristina", "Kristofer", "Kristoffer", "Kristopher", "Kristy", "Krystal", "Krystel", "Krystina", "Kurt", "Kurtis", "Kyla", "Kyle", "Kylee", "Kyleigh", "Kyler", "Kylie", "Kyra", "Lacey", "Lacy", "Ladarius", "Lafayette", "Laila", "Laisha", "Lamar", "Lambert", "Lamont", "Lance", "Landen", "Lane", "Laney", "Larissa", "Laron", "Larry", "Larue", "Laura", "Laurel", "Lauren", "Laurence", "Lauretta", "Lauriane", "Laurianne", "Laurie", "Laurine", "Laury", "Lauryn", "Lavada", "Lavern", "Laverna", "Laverne", "Lavina", "Lavinia", "Lavon", "Lavonne", "Lawrence", "Lawson", "Layla", "Layne", "Lazaro", "Lea", "Leann", "Leanna", "Leanne", "Leatha", "Leda", "Lee", "Leif", "Leila", "Leilani", "Lela", "Lelah", "Leland", "Lelia", "Lempi", "Lemuel", "Lenna", "Lennie", "Lenny", "Lenora", "Lenore", "Leo", "Leola", "Leon", "Leonard", "Leonardo", "Leone", "Leonel", "Leonie", "Leonor", "Leonora", "Leopold", "Leopoldo", "Leora", "Lera", "Lesley", "Leslie", "Lesly", "Lessie", "Lester", "Leta", "Letha", "Letitia", "Levi", "Lew", "Lewis", "Lexi", "Lexie", "Lexus", "Lia", "Liam", "Liana", "Libbie", "Libby", "Lila", "Lilian", "Liliana", "Liliane", "Lilla", "Lillian", "Lilliana", "Lillie", "Lilly", "Lily", "Lilyan", "Lina", "Lincoln", "Linda", "Lindsay", "Lindsey", "Linnea", "Linnie", "Linwood", "Lionel", "Lisa", "Lisandro", "Lisette", "Litzy", "Liza", "Lizeth", "Lizzie", "Llewellyn", "Lloyd", "Logan", "Lois", "Lola", "Lolita", "Loma", "Lon", "London", "Lonie", "Lonnie", "Lonny", "Lonzo", "Lora", "Loraine", "Loren", "Lorena", "Lorenz", "Lorenza", "Lorenzo", "Lori", "Lorine", "Lorna", "Lottie", "Lou", "Louie", "Louisa", "Lourdes", "Louvenia", "Lowell", "Loy", "Loyal", "Loyce", "Lucas", "Luciano", "Lucie", "Lucienne", "Lucile", "Lucinda", "Lucio", "Lucious", "Lucius", "Lucy", "Ludie", "Ludwig", "Lue", "Luella", "Luigi", "Luis", "Luisa", "Lukas", "Lula", "Lulu", "Luna", "Lupe", "Lura", "Lurline", "Luther", "Luz", "Lyda", "Lydia", "Lyla", "Lynn", "Lyric", "Lysanne", "Mabel", "Mabelle", "Mable", "Mac", "Macey", "Maci", "Macie", "Mack", "Mackenzie", "Macy", "Madaline", "Madalyn", "Maddison", "Madeline", "Madelyn", "Madelynn", "Madge", "Madie", "Madilyn", "Madisen", "Madison", "Madisyn", "Madonna", "Madyson", "Mae", "Maegan", "Maeve", "Mafalda", "Magali", "Magdalen", "Magdalena", "Maggie", "Magnolia", "Magnus", "Maia", "Maida", "Maiya", "Major", "Makayla", "Makenna", "Makenzie", "Malachi", "Malcolm", "Malika", "Malinda", "Mallie", "Mallory", "Malvina", "Mandy", "Manley", "Manuel", "Manuela", "Mara", "Marc", "Marcel", "Marcelina", "Marcelino", "Marcella", "Marcelle", "Marcellus", "Marcelo", "Marcia", "Marco", "Marcos", "Marcus", "Margaret", "Margarete", "Margarett", "Margaretta", "Margarette", "Margarita", "Marge", "Margie", "Margot", "Margret", "Marguerite", "Maria", "Mariah", "Mariam", "Marian", "Mariana", "Mariane", "Marianna", "Marianne", "Mariano", "Maribel", "Marie", "Mariela", "Marielle", "Marietta", "Marilie", "Marilou", "Marilyne", "Marina", "Mario", "Marion", "Marisa", "Marisol", "Maritza", "Marjolaine", "Marjorie", "Marjory", "Mark", "Markus", "Marlee", "Marlen", "Marlene", "Marley", "Marlin", "Marlon", "Marques", "Marquis", "Marquise", "Marshall", "Marta", "Martin", "Martina", "Martine", "Marty", "Marvin", "Mary", "Maryam", "Maryjane", "Maryse", "Mason", "Mateo", "Mathew", "Mathias", "Mathilde", "Matilda", "Matilde", "Matt", "Matteo", "Mattie", "Maud", "Maude", "Maudie", "Maureen", "Maurice", "Mauricio", "Maurine", "Maverick", "Mavis", "Max", "Maxie", "Maxime", "Maximilian", "Maximillia", "Maximillian", "Maximo", "Maximus", "Maxine", "Maxwell", "May", "Maya", "Maybell", "Maybelle", "Maye", "Maymie", "Maynard", "Mayra", "Mazie", "Mckayla", "Mckenna", "Mckenzie", "Meagan", "Meaghan", "Meda", "Megane", "Meggie", "Meghan", "Mekhi", "Melany", "Melba", "Melisa", "Melissa", "Mellie", "Melody", "Melvin", "Melvina", "Melyna", "Melyssa", "Mercedes", "Meredith", "Merl", "Merle", "Merlin", "Merritt", "Mertie", "Mervin", "Meta", "Mia", "Micaela", "Micah", "Michael", "Michaela", "Michale", "Micheal", "Michel", "Michele", "Michelle", "Miguel", "Mikayla", "Mike", "Mikel", "Milan", "Miles", "Milford", "Miller", "Millie", "Milo", "Milton", "Mina", "Minerva", "Minnie", "Miracle", "Mireille", "Mireya", "Misael", "Missouri", "Misty", "Mitchel", "Mitchell", "Mittie", "Modesta", "Modesto", "Mohamed", "Mohammad", "Mohammed", "Moises", "Mollie", "Molly", "Mona", "Monica", "Monique", "Monroe", "Monserrat", "Monserrate", "Montana", "Monte", "Monty", "Morgan", "Moriah", "Morris", "Mortimer", "Morton", "Mose", "Moses", "Moshe", "Mossie", "Mozell", "Mozelle", "Muhammad", "Muriel", "Murl", "Murphy", "Murray", "Mustafa", "Mya", "Myah", "Mylene", "Myles", "Myra", "Myriam", "Myrl", "Myrna", "Myron", "Myrtice", "Myrtie", "Myrtis", "Myrtle", "Nadia", "Nakia", "Name", "Nannie", "Naomi", "Naomie", "Napoleon", "Narciso", "Nash", "Nasir", "Nat", "Natalia", "Natalie", "Natasha", "Nathan", "Nathanael", "Nathanial", "Nathaniel", "Nathen", "Nayeli", "Neal", "Ned", "Nedra", "Neha", "Neil", "Nelda", "Nella", "Nelle", "Nellie", "Nels", "Nelson", "Neoma", "Nestor", "Nettie", "Neva", "Newell", "Newton", "Nia", "Nicholas", "Nicholaus", "Nichole", "Nick", "Nicklaus", "Nickolas", "Nico", "Nicola", "Nicolas", "Nicole", "Nicolette", "Nigel", "Nikita", "Nikki", "Nikko", "Niko", "Nikolas", "Nils", "Nina", "Noah", "Noble", "Noe", "Noel", "Noelia", "Noemi", "Noemie", "Noemy", "Nola", "Nolan", "Nona", "Nora", "Norbert", "Norberto", "Norene", "Norma", "Norris", "Norval", "Norwood", "Nova", "Novella", "Nya", "Nyah", "Nyasia", "Obie", "Oceane", "Ocie", "Octavia", "Oda", "Odell", "Odessa", "Odie", "Ofelia", "Okey", "Ola", "Olaf", "Ole", "Olen", "Oleta", "Olga", "Olin", "Oliver", "Ollie", "Oma", "Omari", "Omer", "Ona", "Onie", "Opal", "Ophelia", "Ora", "Oral", "Oran", "Oren", "Orie", "Orin", "Orion", "Orland", "Orlando", "Orlo", "Orpha", "Orrin", "Orval", "Orville", "Osbaldo", "Osborne", "Oscar", "Osvaldo", "Oswald", "Oswaldo", "Otha", "Otho", "Otilia", "Otis", "Ottilie", "Ottis", "Otto", "Ova", "Owen", "Ozella", "Pablo", "Paige", "Palma", "Pamela", "Pansy", "Paolo", "Paris", "Parker", "Pascale", "Pasquale", "Pat", "Patience", "Patricia", "Patrick", "Patsy", "Pattie", "Paul", "Paula", "Pauline", "Paxton", "Payton", "Pearl", "Pearlie", "Pearline", "Pedro", "Peggie", "Penelope", "Percival", "Percy", "Perry", "Pete", "Peter", "Petra", "Peyton", "Philip", "Phoebe", "Phyllis", "Pierce", "Pierre", "Pietro", "Pink", "Pinkie", "Piper", "Polly", "Porter", "Precious", "Presley", "Preston", "Price", "Prince", "Princess", "Priscilla", "Providenci", "Prudence", "Queen", "Queenie", "Quentin", "Quincy", "Quinn", "Quinten", "Quinton", "Rachael", "Rachel", "Rachelle", "Rae", "Raegan", "Rafael", "Rafaela", "Raheem", "Rahsaan", "Rahul", "Raina", "Raleigh", "Ralph", "Ramiro", "Ramon", "Ramona", "Randal", "Randall", "Randi", "Randy", "Ransom", "Raoul", "Raphael", "Raphaelle", "Raquel", "Rashad", "Rashawn", "Rasheed", "Raul", "Raven", "Ray", "Raymond", "Raymundo", "Reagan", "Reanna", "Reba", "Rebeca", "Rebecca", "Rebeka", "Rebekah", "Reece", "Reed", "Reese", "Regan", "Reggie", "Reginald", "Reid", "Reilly", "Reina", "Reinhold", "Remington", "Rene", "Renee", "Ressie", "Reta", "Retha", "Retta", "Reuben", "Reva", "Rex", "Rey", "Reyes", "Reymundo", "Reyna", "Reynold", "Rhea", "Rhett", "Rhianna", "Rhiannon", "Rhoda", "Ricardo", "Richard", "Richie", "Richmond", "Rick", "Rickey", "Rickie", "Ricky", "Rico", "Rigoberto", "Riley", "Rita", "River", "Robb", "Robbie", "Robert", "Roberta", "Roberto", "Robin", "Robyn", "Rocio", "Rocky", "Rod", "Roderick", "Rodger", "Rodolfo", "Rodrick", "Rodrigo", "Roel", "Rogelio", "Roger", "Rogers", "Rolando", "Rollin", "Roma", "Romaine", "Roman", "Ron", "Ronaldo", "Ronny", "Roosevelt", "Rory", "Rosa", "Rosalee", "Rosalia", "Rosalind", "Rosalinda", "Rosalyn", "Rosamond", "Rosanna", "Rosario", "Roscoe", "Rose", "Rosella", "Roselyn", "Rosemarie", "Rosemary", "Rosendo", "Rosetta", "Rosie", "Rosina", "Roslyn", "Ross", "Rossie", "Rowan", "Rowena", "Rowland", "Roxane", "Roxanne", "Roy", "Royal", "Royce", "Rozella", "Ruben", "Rubie", "Ruby", "Rubye", "Rudolph", "Rudy", "Rupert", "Russ", "Russel", "Russell", "Rusty", "Ruth", "Ruthe", "Ruthie", "Ryan", "Ryann", "Ryder", "Rylan", "Rylee", "Ryleigh", "Ryley", "Sabina", "Sabrina", "Sabryna", "Sadie", "Sadye", "Sage", "Saige", "Sallie", "Sally", "Salma", "Salvador", "Salvatore", "Sam", "Samanta", "Samantha", "Samara", "Samir", "Sammie", "Sammy", "Samson", "Sandra", "Sandrine", "Sandy", "Sanford", "Santa", "Santiago", "Santina", "Santino", "Santos", "Sarah", "Sarai", "Sarina", "Sasha", "Saul", "Savanah", "Savanna", "Savannah", "Savion", "Scarlett", "Schuyler", "Scot", "Scottie", "Scotty", "Seamus", "Sean", "Sebastian", "Sedrick", "Selena", "Selina", "Selmer", "Serena", "Serenity", "Seth", "Shad", "Shaina", "Shakira", "Shana", "Shane", "Shanel", "Shanelle", "Shania", "Shanie", "Shaniya", "Shanna", "Shannon", "Shanny", "Shanon", "Shany", "Sharon", "Shaun", "Shawn", "Shawna", "Shaylee", "Shayna", "Shayne", "Shea", "Sheila", "Sheldon", "Shemar", "Sheridan", "Sherman", "Sherwood", "Shirley", "Shyann", "Shyanne", "Sibyl", "Sid", "Sidney", "Sienna", "Sierra", "Sigmund", "Sigrid", "Sigurd", "Silas", "Sim", "Simeon", "Simone", "Sincere", "Sister", "Skye", "Skyla", "Skylar", "Sofia", "Soledad", "Solon", "Sonia", "Sonny", "Sonya", "Sophia", "Sophie", "Spencer", "Stacey", "Stacy", "Stan", "Stanford", "Stanley", "Stanton", "Stefan", "Stefanie", "Stella", "Stephan", "Stephania", "Stephanie", "Stephany", "Stephen", "Stephon", "Sterling", "Steve", "Stevie", "Stewart", "Stone", "Stuart", "Summer", "Sunny", "Susan", "Susana", "Susanna", "Susie", "Suzanne", "Sven", "Syble", "Sydnee", "Sydney", "Sydni", "Sydnie", "Sylvan", "Sylvester", "Sylvia", "Tabitha", "Tad", "Talia", "Talon", "Tamara", "Tamia", "Tania", "Tanner", "Tanya", "Tara", "Taryn", "Tate", "Tatum", "Tatyana", "Taurean", "Tavares", "Taya", "Taylor", "Teagan", "Ted", "Telly", "Terence", "Teresa", "Terrance", "Terrell", "Terrence", "Terrill", "Terry", "Tess", "Tessie", "Tevin", "Thad", "Thaddeus", "Thalia", "Thea", "Thelma", "Theo", "Theodora", "Theodore", "Theresa", "Therese", "Theresia", "Theron", "Thomas", "Thora", "Thurman", "Tia", "Tiana", "Tianna", "Tiara", "Tierra", "Tiffany", "Tillman", "Timmothy", "Timmy", "Timothy", "Tina", "Tito", "Titus", "Tobin", "Toby", "Tod", "Tom", "Tomas", "Tomasa", "Tommie", "Toney", "Toni", "Tony", "Torey", "Torrance", "Torrey", "Toy", "Trace", "Tracey", "Tracy", "Travis", "Travon", "Tre", "Tremaine", "Tremayne", "Trent", "Trenton", "Tressa", "Tressie", "Treva", "Trever", "Trevion", "Trevor", "Trey", "Trinity", "Trisha", "Tristian", "Tristin", "Triston", "Troy", "Trudie", "Trycia", "Trystan", "Turner", "Twila", "Tyler", "Tyra", "Tyree", "Tyreek", "Tyrel", "Tyrell", "Tyrese", "Tyrique", "Tyshawn", "Tyson", "Ubaldo", "Ulices", "Ulises", "Una", "Unique", "Urban", "Uriah", "Uriel", "Ursula", "Vada", "Valentin", "Valentina", "Valentine", "Valerie", "Vallie", "Van", "Vance", "Vanessa", "Vaughn", "Veda", "Velda", "Vella", "Velma", "Velva", "Vena", "Verda", "Verdie", "Vergie", "Verla", "Verlie", "Vern", "Verna", "Verner", "Vernice", "Vernie", "Vernon", "Verona", "Veronica", "Vesta", "Vicenta", "Vicente", "Vickie", "Vicky", "Victor", "Victoria", "Vida", "Vidal", "Vilma", "Vince", "Vincent", "Vincenza", "Vincenzo", "Vinnie", "Viola", "Violet", "Violette", "Virgie", "Virgil", "Virginia", "Virginie", "Vita", "Vito", "Viva", "Vivian", "Viviane", "Vivianne", "Vivien", "Vivienne", "Vladimir", "Wade", "Waino", "Waldo", "Walker", "Wallace", "Walter", "Walton", "Wanda", "Ward", "Warren", "Watson", "Wava", "Waylon", "Wayne", "Webster", "Weldon", "Wellington", "Wendell", "Wendy", "Werner", "Westley", "Weston", "Whitney", "Wilber", "Wilbert", "Wilburn", "Wiley", "Wilford", "Wilfred", "Wilfredo", "Wilfrid", "Wilhelm", "Wilhelmine", "Will", "Willa", "Willard", "William", "Willie", "Willis", "Willow", "Willy", "Wilma", "Wilmer", "Wilson", "Wilton", "Winfield", "Winifred", "Winnifred", "Winona", "Winston", "Woodrow", "Wyatt", "Wyman", "Xander", "Xavier", "Xzavier", "Yadira", "Yasmeen", "Yasmin", "Yasmine", "Yazmin", "Yesenia", "Yessenia", "Yolanda", "Yoshiko", "Yvette", "Yvonne", "Zachariah", "Zachary", "Zachery", "Zack", "Zackary", "Zackery", "Zakary", "Zander", "Zane", "Zaria", "Zechariah", "Zelda", "Zella", "Zelma", "Zena", "Zetta", "Zion", "Zita", "Zoe", "Zoey", "Zoie", "Zoila", "Zola", "Zora", "Zula"}, + "middle": {"Abdul", "Abdullah", "Abigail", "Ada", "Adam", "Adelaide", "Adele", "Adelina", "Adrian", "Adriana", "Agnes", "Agnolo", "Ahmed", "Aida", "Aileen", "Aimee", "Akilesh", "Akio", "Alan", "Alana", "Alejandro", "Alex", "Ali", "Alice", "Alicia", "Alina", "Alison", "Alita", "Allegretta", "Alonzo", "Alyssa", "Aman", "Amara", "Amelda", "Amelia", "Amenra", "Amina", "Amir", "Amitabh", "Amy", "Ana", "Anastasia", "André", "Andrea", "Andrei", "Andrew", "Andy", "Angel", "Angela", "Anita", "Ann", "Anna", "Anne", "Annette", "Anthony", "Antioco", "Antonio", "Arduino", "Aria", "Ariana", "Ariel", "Aris", "Arjun", "Armando", "Asha", "Ashton", "Asong", "Athena", "Audrey", "August", "Aura", "Aurelia", "Austen", "Ava", "Avery", "Avril", "Badru", "Bailey", "Bakul", "Baldwin", "Bao", "Barack", "Bear", "Beatrice", "Beau", "Belinda", "Bella", "Belle", "Ben", "Benjamin", "Bertha", "Beverly", "Bharati", "Bhoja", "Bhuma", "Bianca", "Bird", "Birdie", "Bishvajit", "Bjorn", "Blair", "Blake", "Blanca", "Bliss", "Blue", "Bo", "Bobbie", "Bonnie", "Boris", "Bradley", "Brandt", "Braulia", "Breck", "Bree", "Brett", "Brianna", "Bridget", "Brie", "Brielle", "Brittany", "Brizio", "Brook", "Brooke", "Brooks", "Bruce", "Bryce", "Bryn", "Brynn", "Burke", "Cajetan", "Calvin", "Cameron", "Camilla", "Candice", "Carla", "Carlos", "Carmen", "Caroline", "Carson", "Casey", "Cash", "Cassandra", "Cassidy", "Catherine", "Cecelia", "Cecilia", "Cedric", "Celeste", "Celia", "Celso", "Chahna", "Chance", "Chander", "Chandler", "Chang", "Charles", "Charlie", "Charlotte", "Chen", "Chintak", "Chloe", "Chris", "Christine", "Chung", "Cimeron", "Cindy", "Ciprianna", "Ciro", "Claire", "Clara", "Clarissa", "Clark", "Clarke", "Claude", "Claudia", "Clay", "Clementine", "Clint", "Cody", "Cole", "Colette", "Cora", "Cordelia", "Corey", "Corinne", "Cory", "Cosme", "Courtney", "Cree", "Crew", "Cynthia", "Cyprienne", "Cyrus", "Daan", "Dada", "Daisy", "Dakota", "Dale", "Damodar", "Dan", "Dana", "Dane", "Daniel", "Danielle", "Danveer", "Daphne", "Darla", "David", "Davide", "Dawn", "Dax", "Dean", "Deborah", "Delilah", "Denise", "Denver", "Deshal", "Deshawn", "Dev", "Devin", "Dhavala", "Diana", "Diane", "Diego", "Dmitri", "Dolores", "Dolorita", "Donato", "Dong", "Donna", "Donte", "Donya", "Dora", "Doris", "Dorothy", "Drake", "Drew", "Dru", "Dylan", "Ean", "Edith", "Eduardo", "Edward", "Eila", "Eileen", "Elaine", "Elda", "Eleanor", "Elena", "Eliana", "Elias", "Elise", "Eliza", "Elizabeth", "Ella", "Elle", "Ellen", "Ellie", "Ellis", "Eloise", "Elsa", "Elsie", "Em", "Emerson", "Emery", "Emilie", "Emilio", "Emily", "Emma", "Emmett", "Enrico", "Enrique", "Epifania", "Erica", "Erik", "Erin", "Eroica", "Esperanza", "Estelle", "Esther", "Etta", "Ettore", "Eva", "Evan", "Eve", "Evelyn", "Everett", "Faith", "Farid", "Faye", "Federico", "Felicity", "Felipe", "Felix", "Fern", "Fernando", "Finley", "Finn", "Fiona", "Fitz", "Flint", "Flora", "Florence", "Flynn", "Folke", "Fonzo", "Fox", "Frances", "Francis", "Francisco", "Francois", "François", "Frank", "Frankie", "Freya", "Fumio", "Fynn", "Gabriel", "Gabriella", "Gael", "Gage", "Gail", "Gemma", "Genevieve", "George", "Georgia", "Geraldine", "Giannino", "Ginetta", "Gioia", "Giselle", "Giuseppe", "Giustino", "Glenn", "Gloria", "Glory", "Grace", "Grant", "Gray", "Greer", "Greta", "Guido", "Guillermo", "Gulshan", "Gus", "Gwen", "Gyula", "Hank", "Hannah", "Hans", "Harley", "Harper", "Harriet", "Harrison", "Harshad", "Haruki", "Hayden", "Hayes", "Haze", "Hazel", "Heath", "Heather", "Hector", "Helen", "Helena", "Henry", "Hideki", "Hidetoshi", "Himesh", "Hiro", "Hiroaki", "Hirofumi", "Hirokazu", "Hiroshi", "Hiroto", "Hiroyuki", "Holly", "Honor", "Hope", "Hugh", "Hugo", "Hunter", "Ida", "Ignacio", "Imogen", "Ingrid", "Irene", "Iris", "Isaac", "Isabel", "Isabella", "Isabelle", "Ivan", "Ivy", "Jace", "Jack", "Jacqueline", "Jade", "Jaden", "Jae", "Jai", "Jaime", "Jamal", "James", "Jamie", "Jan", "Janak", "Jane", "Janet", "Janice", "Jasmine", "Jasper", "Javier", "Jax", "Jay", "Jayden", "Jayne", "Jean", "Jeanne", "Jed", "Jenna", "Jennifer", "Jesse", "Jessica", "Jill", "Jin", "Joan", "Joanna", "João", "Jocelyn", "Jodi", "Jody", "Joe", "Joey", "Johanna", "Johar", "John", "Jolene", "Jordan", "Jorge", "Jose", "José", "Joseph", "Josephine", "Josie", "Joy", "Joyce", "Juan", "Juanita", "Judd", "Jude", "Judith", "Jules", "Julia", "Julian", "Juliana", "Julianne", "Julie", "June", "Justine", "Kael", "Kai", "Kane", "Karen", "Kate", "Katherine", "Kathleen", "Kathryn", "Katie", "Katrina", "Kay", "Kayla", "Kazuki", "Keira", "Kelly", "Kelsey", "Kendall", "Kendra", "Kennedy", "Kent", "Kenta", "Kerry", "Khaled", "Khloe", "Kiara", "Kim", "Kimberly", "Kit", "Kiyoshi", "Klaus", "Knight", "Knox", "Koen", "Koi", "Koichi", "Koji", "Kolt", "Kristen", "Kristina", "Kurt", "Kwame", "Kye", "Kylie", "Lacey", "Laine", "Lake", "Lakshman", "Lalika", "Lane", "Lark", "Lars", "Laurel", "Layne", "Lee", "Leif", "Lennon", "Leo", "Leon", "Leslie", "Liam", "Liberty", "Lilian", "Lillian", "Lillie", "Link", "Liz", "Locke", "Logan", "Lona", "Lorena", "Lorenzo", "Lou", "Louise", "Love", "Lucia", "Lucy", "Luis", "Luiz", "Luke", "Lupita", "Lux", "Luz", "Lydia", "Lynn", "Mabel", "Mac", "Mack", "Mackenzie", "Madeline", "Madison", "Madona", "Mae", "Mael", "Makoto", "Manuel", "Manuela", "Maple", "Marc", "Marco", "Margaret", "Margo", "Margot", "Maria", "Mariano", "Maricela", "Marilyn", "Mario", "Mark", "Marley", "Mars", "Marti", "Mary", "Mason", "Matthew", "Mavis", "Max", "May", "Mazie", "Mei", "Melody", "Mercy", "Merle", "Micah", "Michael", "Miguel", "Mina", "Ming", "Mohamed", "Mollie", "Monroe", "Morgan", "Muhammad", "Musetta", "Myra", "Nadine", "Naomi", "Nardo", "Nat", "Natalie", "Neal", "Neil", "Nellie", "Nerola", "Nevada", "Neve", "Nikolai", "Niles", "Noel", "Nola", "Nora", "Nuru", "Oakley", "Olive", "Oliver", "Opal", "Orazio", "Ortensa", "Ortensia", "Osamu", "Oscar", "Otto", "Pablo", "Paige", "Pancho", "Paris", "Parker", "Pat", "Patrick", "Paul", "Pauli", "Pax", "Peace", "Pearl", "Pedro", "Penelope", "Penn", "Penny", "Peter", "Petra", "Peyton", "Phoenix", "Pierce", "Pierre", "Pilar", "Porter", "Praise", "Pratap", "Presley", "Priscilla", "Quinn", "Rachanna", "Radames", "Rae", "Rafael", "Rain", "Raine", "Ramiro", "Ramon", "Ramona", "Raphael", "Raul", "Ravi", "Ray", "Rayne", "Reagan", "Reece", "Reed", "Reese", "Rei", "Reid", "Reilly", "Remy", "Ren", "Reyes", "Rhodes", "Ricardo", "Richard", "Riley", "Rita", "River", "Rivera", "Roan", "Robert", "Roberto", "Robin", "Robt", "Rodrigo", "Roma", "Romelia", "Rory", "Rosa", "Rosalee", "Rosalie", "Rosalynn", "Rosario", "Rose", "Ross", "Rowan", "Ruben", "Ruby", "Rue", "Rush", "Russell", "Ruth", "Ryan", "Saad", "Saariq", "Sade", "Sadie", "Sagara", "Sage", "Saige", "Saint", "Salvadora", "Sam", "Samir", "Samuel", "Sante", "Santiago", "Sara", "Sasha", "Satoshi", "Scott", "Sean", "Sebastian", "Sergei", "Sergio", "Seth", "Shae", "Shai", "Shane", "Shannon", "Shashi", "Shaun", "Shawn", "Shawnee", "Shay", "Shea", "Shelby", "Shin", "Sidney", "Simon", "Sky", "Skye", "Skyler", "Sol", "Sophie", "Spencer", "Star", "Starr", "Stella", "Steve", "Stevie", "Storm", "Susan", "Sven", "Sybil", "Sydney", "Tahj", "Takashi", "Takeshi", "Taryn", "Tatum", "Taylor", "Teagan", "Terry", "Tess", "Thea", "Theodore", "Thomas", "Tilly", "Timothy", "Tosca", "Trent", "Tripp", "Tristan", "Truth", "Tyler", "Tyrone", "Uberto", "Ursus", "Val", "Vandelia", "Vaughn", "Vera", "Vernon", "Verona", "Vianna", "Victoria", "Vida", "Vieda", "Vince", "Vincent", "Violet", "Virginia", "Vivian", "Vladimir", "Wade", "Wayne", "Wes", "Wesley", "West", "Whitney", "Will", "Willa", "William", "Willie", "Winston", "Winter", "Wolf", "Wren", "Wynn", "Xavier", "Yasuo", "Yoel", "Yolanda", "Yoshi", "Yoshiaki", "Yoshihiro", "Yoshiki", "Yoshinori", "Yoshio", "Yusuf", "Yutaka", "Zain", "Zane", "Zayd", "Zelda", "Zeus", "Zev", "Zhang", "Zhen", "Zola", "Zora", "Zuni"}, "last": {"Abbott", "Abernathy", "Abshire", "Adams", "Altenwerth", "Anderson", "Ankunding", "Armstrong", "Auer", "Aufderhar", "Bahringer", "Bailey", "Balistreri", "Barrows", "Bartell", "Bartoletti", "Barton", "Bashirian", "Batz", "Bauch", "Baumbach", "Bayer", "Beahan", "Beatty", "Bechtelar", "Becker", "Bednar", "Beer", "Beier", "Berge", "Bergnaum", "Bergstrom", "Bernhard", "Bernier", "Bins", "Blanda", "Blick", "Block", "Bode", "Boehm", "Bogan", "Bogisich", "Borer", "Bosco", "Botsford", "Boyer", "Boyle", "Bradtke", "Brakus", "Braun", "Breitenberg", "Brekke", "Brown", "Bruen", "Buckridge", "Carroll", "Carter", "Cartwright", "Casper", "Cassin", "Champlin", "Christiansen", "Cole", "Collier", "Collins", "Conn", "Connelly", "Conroy", "Considine", "Corkery", "Cormier", "Corwin", "Cremin", "Crist", "Crona", "Cronin", "Crooks", "Cruickshank", "Cummerata", "Cummings", "Dach", "Damore", "Daniel", "Dare", "Daugherty", "Davis", "Deckow", "Denesik", "Dibbert", "Dickens", "Dicki", "Dickinson", "Dietrich", "Donnelly", "Dooley", "Douglas", "Doyle", "DuBuque", "Durgan", "Ebert", "Effertz", "Eichmann", "Emard", "Emmerich", "Erdman", "Ernser", "Fadel", "Fahey", "Farrell", "Fay", "Feeney", "Feest", "Feil", "Ferry", "Fisher", "Flatley", "Frami", "Franecki", "Friesen", "Fritsch", "Funk", "Gaylord", "Gerhold", "Gerlach", "Gibson", "Gislason", "Gleason", "Gleichner", "Glover", "Goldner", "Goodwin", "Gorczany", "Gottlieb", "Goyette", "Grady", "Graham", "Grant", "Green", "Greenfelder", "Greenholt", "Grimes", "Gulgowski", "Gusikowski", "Gutkowski", "Gutmann", "Haag", "Hackett", "Hagenes", "Hahn", "Haley", "Halvorson", "Hamill", "Hammes", "Hand", "Hane", "Hansen", "Harber", "Harris", "Hartmann", "Harvey", "Hauck", "Hayes", "Heaney", "Heathcote", "Hegmann", "Heidenreich", "Heller", "Herman", "Hermann", "Hermiston", "Herzog", "Hessel", "Hettinger", "Hickle", "Hilll", "Hills", "Hilpert", "Hintz", "Hirthe", "Hodkiewicz", "Hoeger", "Homenick", "Hoppe", "Howe", "Howell", "Hudson", "Huel", "Huels", "Hyatt", "Jacobi", "Jacobs", "Jacobson", "Jakubowski", "Jaskolski", "Jast", "Jenkins", "Jerde", "Jewess", "Johns", "Johnson", "Johnston", "Jones", "Kassulke", "Kautzer", "Keebler", "Keeling", "Kemmer", "Kerluke", "Kertzmann", "Kessler", "Kiehn", "Kihn", "Kilback", "King", "Kirlin", "Klein", "Kling", "Klocko", "Koch", "Koelpin", "Koepp", "Kohler", "Konopelski", "Koss", "Kovacek", "Kozey", "Krajcik", "Kreiger", "Kris", "Kshlerin", "Kub", "Kuhic", "Kuhlman", "Kuhn", "Kulas", "Kunde", "Kunze", "Kuphal", "Kutch", "Kuvalis", "Labadie", "Lakin", "Lang", "Langosh", "Langworth", "Larkin", "Larson", "Leannon", "Lebsack", "Ledner", "Leffler", "Legros", "Lehner", "Lemke", "Lesch", "Leuschke", "Lind", "Lindgren", "Littel", "Little", "Lockman", "Lowe", "Lubowitz", "Lueilwitz", "Luettgen", "Lynch", "Macejkovic", "Maggio", "Mann", "Mante", "Marks", "Marquardt", "Marvin", "Mayer", "Mayert", "McClure", "McCullough", "McDermott", "McGlynn", "McKenzie", "McLaughlin", "Medhurst", "Mertz", "Metz", "Miller", "Mills", "Mitchell", "Moen", "Mohr", "Monahan", "Moore", "Morar", "Morissette", "Mosciski", "Mraz", "Mueller", "Muller", "Murazik", "Murphy", "Murray", "Nader", "Nicolas", "Nienow", "Nikolaus", "Nitzsche", "Nolan", "Oberbrunner", "Okuneva", "Olson", "Ondricka", "OReilly", "Orn", "Ortiz", "Osinski", "Pacocha", "Padberg", "Pagac", "Parisian", "Parker", "Paucek", "Pfannerstill", "Pfeffer", "Pollich", "Pouros", "Powlowski", "Predovic", "Price", "Prohaska", "Prosacco", "Purdy", "Quigley", "Quitzon", "Rath", "Ratke", "Rau", "Raynor", "Reichel", "Reichert", "Reilly", "Reinger", "Rempel", "Renner", "Reynolds", "Rice", "Rippin", "Ritchie", "Robel", "Roberts", "Rodriguez", "Rogahn", "Rohan", "Rolfson", "Romaguera", "Roob", "Rosenbaum", "Rowe", "Ruecker", "Runolfsdottir", "Runolfsson", "Runte", "Russel", "Rutherford", "Ryan", "Sanford", "Satterfield", "Sauer", "Sawayn", "Schaden", "Schaefer", "Schamberger", "Schiller", "Schimmel", "Schinner", "Schmeler", "Schmidt", "Schmitt", "Schneider", "Schoen", "Schowalter", "Schroeder", "Schulist", "Schultz", "Schumm", "Schuppe", "Schuster", "Senger", "Shanahan", "Shields", "Simonis", "Sipes", "Skiles", "Smith", "Smitham", "Spencer", "Spinka", "Sporer", "Stamm", "Stanton", "Stark", "Stehr", "Steuber", "Stiedemann", "Stokes", "Stoltenberg", "Stracke", "Streich", "Stroman", "Strosin", "Swaniawski", "Swift", "Terry", "Thiel", "Thompson", "Tillman", "Torp", "Torphy", "Towne", "Toy", "Trantow", "Tremblay", "Treutel", "Tromp", "Turcotte", "Turner", "Ullrich", "Upton", "Vandervort", "Veum", "Volkman", "Von", "VonRueden", "Waelchi", "Walker", "Walsh", "Walter", "Ward", "Waters", "Watsica", "Weber", "Wehner", "Weimann", "Weissnat", "Welch", "West", "White", "Wiegand", "Wilderman", "Wilkinson", "Will", "Williamson", "Willms", "Windler", "Wintheiser", "Wisoky", "Wisozk", "Witting", "Wiza", "Wolf", "Wolff", "Wuckert", "Wunsch", "Wyman", "Yost", "Yundt", "Zboncak", "Zemlak", "Ziemann", "Zieme", "Zulauf"}, "hobby": {"3D printing", "Acrobatics", "Acting", "Amateur radio", "Animation", "Aquascaping", "Astrology", "Astronomy", "Baking", "Baton twirling", "Blogging", "Building", "Board/tabletop games", "Book discussion clubs", "Book restoration", "Bowling", "Brazilian jiu-jitsu", "Breadmaking", "Bullet journaling", "Cabaret", "Calligraphy", "Candle making", "Candy making", "Car fixing & building", "Card games", "Cheesemaking", "Cleaning", "Clothesmaking", "Coffee roasting", "Collecting", "Coloring", "Computer programming", "Confectionery", "Cooking", "Cosplaying", "Couponing", "Craft", "Creative writing", "Crocheting", "Cross-stitch", "Crossword puzzles", "Cryptography", "Cue sports", "Dance", "Digital arts", "Distro Hopping", "DJing", "Do it yourself", "Drama", "Drawing", "Drink mixing", "Drinking", "Electronic games", "Electronics", "Embroidery", "Experimenting", "Fantasy sports", "Fashion", "Fashion design", "Fishkeeping", "Filmmaking", "Flower arranging", "Fly tying", "Foreign language learning", "Furniture building", "Gaming", "Genealogy", "Gingerbread house making", "Glassblowing", "Graphic design", "Gunsmithing", "Gymnastics", "Hacking", "Herp keeping", "Home improvement", "Homebrewing", "Houseplant care", "Hula hooping", "Humor", "Hydroponics", "Ice skating", "Jewelry making", "Jigsaw puzzles", "Journaling", "Juggling", "Karaoke", "Karate", "Kendama", "Knife making", "Knitting", "Knot tying", "Kombucha brewing", "Lace making", "Lapidary", "Leather crafting", "Lego building", "Lock picking", "Listening to music", "Listening to podcasts", "Machining", "Macrame", "Magic", "Makeup", "Mazes (indoor/outdoor)", "Metalworking", "Model building", "Model engineering", "Nail art", "Needlepoint", "Origami", "Painting", "Palmistry", "Pet adoption & fostering", "Philately", "Photography", "Practical jokes", "Pressed flower craft", "Playing musical instruments", "Poi", "Pottery", "Powerlifting", "Puzzles", "Quilling", "Quilting", "Quizzes", "Radio-controlled model", "Rail transport modeling", "Rapping", "Reading", "Refinishing", "Reiki", "Robot combat", "Rubik's Cube", "Scrapbooking", "Sculpting", "Sewing", "Shoemaking", "Singing", "Sketching", "Skipping rope", "Slot car", "Soapmaking", "Social media", "Spreadsheets", "Stand-up comedy", "Stamp collecting", "Table tennis", "Tarot", "Taxidermy", "Thrifting", "Video editing", "Video game developing", "Video gaming", "Watching movies", "Watching television", "Videography", "Virtual reality", "Waxing", "Weaving", "Weight training", "Welding", "Whittling", "Wikipedia editing", "Winemaking", "Wood carving", "Woodworking", "Worldbuilding", "Writing", "Word searches", "Yo-yoing", "Yoga", "Zumba", "Amusement park visiting", "Air sports", "Airsoft", "Amateur geology", "Archery", "Astronomy", "Backpacking", "Badminton", "BASE jumping", "Baseball", "Basketball", "Beekeeping", "Birdwatching", "Blacksmithing", "BMX", "Board sports", "Bodybuilding", "Bonsai", "Butterfly watching", "Bus riding", "Camping", "Canoeing", "Canyoning", "Car riding", "Caving", "Composting", "Cycling", "Dowsing", "Driving", "Farming", "Fishing", "Flag football", "Flower growing", "Flying", "Flying disc", "Foraging", "Fossicking", "Freestyle football", "Gardening", "Geocaching", "Ghost hunting", "Gold prospecting", "Graffiti", "Handball", "Herbalism", "Herping", "High-power rocketry", "Hiking", "Hobby horsing", "Hobby tunneling", "Hooping", "Horseback riding", "Hunting", "Inline skating", "Jogging", "Jumping rope", "Kayaking", "Kite flying", "Kitesurfing", "Lacrosse", "LARPing", "Letterboxing", "Longboarding", "Martial arts", "Metal detecting", "Meteorology", "Motor sports", "Mountain biking", "Mountaineering", "Museum visiting", "Mushroom hunting", "Netball", "Nordic skating", "Orienteering", "Paintball", "Parkour", "Photography", "Podcast hosting", "Polo", "Public transport riding", "Rafting", "Railway journeys", "Rappelling", "Road biking", "Rock climbing", "Roller skating", "Rugby", "Running", "Radio-controlled model", "Sailing", "Sand art", "Scouting", "Scuba diving", "Sculling", "Shooting", "Shopping", "Shuffleboard", "Skateboarding", "Skiing", "Skimboarding", "Skydiving", "Slacklining", "Snowboarding", "Snowmobiling", "Snowshoeing", "Soccer", "Stone skipping", "Sun bathing", "Surfing", "Survivalism", "Swimming", "Taekwondo", "Tai chi", "Tennis", "Topiary", "Tourism", "Thru-hiking", "Trade fair visiting", "Travel", "Urban exploration", "Vacation", "Vegetable farming", "Videography", "Vehicle restoration", "Walking", "Water sports", "Astronomy", "Biology", "Chemistry", "Electrochemistry", "Physics", "Psychology", "Sports science", "Geography", "History", "Mathematics", "Railway studies", "Action figure", "Antiquing", "Ant-keeping", "Art collecting", "Book collecting", "Button collecting", "Cartophily", "Coin collecting", "Comic book collecting", "Deltiology", "Die-cast toy", "Digital hoarding", "Dolls", "Element collecting", "Ephemera collecting", "Fusilately", "Knife collecting", "Lotology", "Movie and movie memorabilia collecting", "Fingerprint collecting", "Perfume", "Phillumeny", "Radio-controlled model", "Rail transport modelling", "Record collecting", "Rock tumbling", "Scutelliphily", "Shoes", "Slot car", "Sports memorabilia", "Stamp collecting", "Stuffed toy collecting", "Tea bag collecting", "Ticket collecting", "Toys", "Transit map collecting", "Video game collecting", "Vintage cars", "Vintage clothing", "Vinyl Records", "Antiquities", "Auto audiophilia", "Flower collecting and pressing", "Fossil hunting", "Insect collecting", "Magnet fishing", "Metal detecting", "Mineral collecting", "Rock balancing", "Sea glass collecting", "Seashell collecting", "Stone collecting", "Animal fancy", "Axe throwing", "Backgammon", "Badminton", "Baton twirling", "Beauty pageants", "Billiards", "Bowling", "Boxing", "Bridge", "Checkers (draughts)", "Cheerleading", "Chess", "Color guard", "Cribbage", "Curling", "Dancing", "Darts", "Debate", "Dominoes", "Eating", "Esports", "Fencing", "Go", "Gymnastics", "Ice hockey", "Ice skating", "Judo", "Jujitsu", "Kabaddi", "Knowledge/word games", "Laser tag", "Longboarding", "Mahjong", "Marbles", "Martial arts", "Model United Nations", "Poker", "Pool", "Role-playing games", "Shogi", "Slot car racing", "Speedcubing", "Sport stacking", "Table football", "Table tennis", "Volleyball", "Weightlifting", "Wrestling", "Airsoft", "Archery", "Association football", "Australian rules football", "Auto racing", "Baseball", "Beach volleyball", "Breakdancing", "Climbing", "Cricket", "Croquet", "Cycling", "Disc golf", "Dog sport", "Equestrianism", "Exhibition drill", "Field hockey", "Figure skating", "Fishing", "Footbag", "Frisbee", "Golfing", "Handball", "Horseback riding", "Horseshoes", "Iceboat racing", "Jukskei", "Kart racing", "Knife throwing", "Lacrosse", "Longboarding", "Long-distance running", "Marching band", "Model aircraft", "Orienteering", "Pickleball", "Quidditch", "Race walking", "Racquetball", "Radio-controlled car racing", "Roller derby", "Rugby league football", "Sculling", "Shooting sport", "Skateboarding", "Skiing", "Sled dog racing", "Softball", "Speed skating", "Squash", "Surfing", "Swimming", "Table tennis", "Tennis", "Tennis polo", "Tether car", "Tour skating", "Tourism", "Trapshooting", "Triathlon", "Ultimate frisbee", "Volleyball", "Water polo", "Fishkeeping", "Learning", "Meditation", "Microscopy", "Reading", "Research", "Shortwave listening", "Audiophile", "Aircraft spotting", "Amateur astronomy", "Birdwatching", "Bus spotting", "Geocaching", "Gongoozling", "Herping", "Hiking", "Meteorology", "Photography", "Satellite watching", "Trainspotting", "Whale watching"}, "phone": {"###-###-####", "(###)###-####", "1-###-###-####", "###.###.####"}, diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/fakeable.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/fakeable.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/fakeable.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/fakeable.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,80 @@ +package gofakeit + +import ( + "errors" + "fmt" + "reflect" +) + +// Fakeable is an interface that can be implemented by a type to provide a custom fake value. +type Fakeable interface { + // Fake returns a fake value for the type. + Fake(faker *Faker) interface{} +} + +func isFakeable(t reflect.Type) bool { + fakeableTyp := reflect.TypeOf((*Fakeable)(nil)).Elem() + + return t.Implements(fakeableTyp) || reflect.PtrTo(t).Implements(fakeableTyp) +} + +func callFake(faker *Faker, v reflect.Value, possibleKinds ...reflect.Kind) (interface{}, error) { + f, ok := v.Addr().Interface().(Fakeable) + if !ok { + return nil, errors.New("not a Fakeable type") + } + + fakedValue := f.Fake(faker) + k := reflect.TypeOf(fakedValue).Kind() + if !containsKind(possibleKinds, k) { + return nil, fmt.Errorf("returned value kind %q is not amongst the valid ones: %v", k, possibleKinds) + } + + switch k { + case reflect.String: + return reflect.ValueOf(fakedValue).String(), nil + case reflect.Bool: + return reflect.ValueOf(fakedValue).Bool(), nil + case reflect.Int: + return int(reflect.ValueOf(fakedValue).Int()), nil + case reflect.Int8: + return int8(reflect.ValueOf(fakedValue).Int()), nil + case reflect.Int16: + return int16(reflect.ValueOf(fakedValue).Int()), nil + case reflect.Int32: + return int32(reflect.ValueOf(fakedValue).Int()), nil + case reflect.Int64: + return int64(reflect.ValueOf(fakedValue).Int()), nil + case reflect.Uint: + return uint(reflect.ValueOf(fakedValue).Uint()), nil + case reflect.Uint8: + return uint8(reflect.ValueOf(fakedValue).Uint()), nil + case reflect.Uint16: + return uint16(reflect.ValueOf(fakedValue).Uint()), nil + case reflect.Uint32: + return uint32(reflect.ValueOf(fakedValue).Uint()), nil + case reflect.Uint64: + return uint64(reflect.ValueOf(fakedValue).Uint()), nil + case reflect.Float32: + return float32(reflect.ValueOf(fakedValue).Float()), nil + case reflect.Float64: + return float64(reflect.ValueOf(fakedValue).Float()), nil + case reflect.Slice: + return reflect.ValueOf(fakedValue).Interface(), nil + case reflect.Map: + return reflect.ValueOf(fakedValue).Interface(), nil + case reflect.Struct: + return reflect.ValueOf(fakedValue).Interface(), nil + default: + return nil, fmt.Errorf("unsupported type %q", k) + } +} + +func containsKind(possibleKinds []reflect.Kind, kind reflect.Kind) bool { + for _, k := range possibleKinds { + if k == kind { + return true + } + } + return false +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/finance.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/finance.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/finance.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/finance.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,128 @@ +package gofakeit + +import ( + "math/rand" + "strconv" + "unicode" +) + +const cusipStr = upperStr + numericStr + +// CUSIP +func Cusip() string { + return cusip(globalFaker.Rand) +} + +func (f *Faker) Cusip() string { + return cusip(f.Rand) +} + +func cusip(r *rand.Rand) string { + cusipBytes := make([]byte, 8) + for i := 0; i < len(cusipBytes); i++ { + cusipBytes[i] = byte(cusipStr[r.Intn(len(cusipStr))]) + } + + baseCusip := string(cusipBytes) + + chkDigit := cusipChecksumDigit(baseCusip) + return baseCusip + chkDigit +} + +// ISIN +func Isin() string { + return isin(globalFaker.Rand) +} + +func (f *Faker) Isin() string { + return isin(f.Rand) +} + +func isin(r *rand.Rand) string { + countryCode := CountryAbr() + nsin := cusip(r) + isinChkDig := isinChecksumDigit(countryCode + nsin) + return countryCode + nsin + isinChkDig +} + +// cusipChecksumDigit returns the checksum digit for a CUSIP +func cusipChecksumDigit(cusip string) string { + sum := 0 + for i, c := range cusip { + v := 0 + if unicode.IsDigit(c) { + v = int(c - '0') + } + if unicode.IsLetter(c) { + //0-indexed ordinal position of Letter + 10 + v = int(c-'A') + 10 + } + if i%2 != 0 { + // Multiply odd digits by two + v = v * 2 + } + + sum = sum + int(v/10) + v%10 + } + + return strconv.Itoa((10 - (sum % 10)) % 10) +} + +// isinChecksumDigit returns the checksum digit for an ISIN +func isinChecksumDigit(isin string) string { + isinDigits := make([]int, 0) + for _, c := range isin { + if unicode.IsLetter(c) { + letterVal := int(c) - 55 + // Each digit is added as a separate value + isinDigits = append(isinDigits, letterVal/10) + isinDigits = append(isinDigits, letterVal%10) + } + if unicode.IsDigit(c) { + isinDigits = append(isinDigits, int(c-'0')) + } + } + + oddSum := 0 + evenSum := 0 + + // Take the per digit sum of the digitized ISIN, doubling even indexed digits + for i, d := range isinDigits { + if i%2 == 0 { + elem := 2 * d + if elem > 9 { + // If the element now has two digits, sum those digits + elem = (elem % 10) + (elem / 10) + } + evenSum += elem + } else { + oddSum += d + } + } + + return strconv.Itoa((10 - (oddSum+evenSum)%10) % 10) +} + +// Lookup Adds +func addFinanceLookup() { + AddFuncLookup("cusip", Info{ + Display: "CUSIP", + Category: "finance", + Description: "Random CUSIP", + Example: "38259P508", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return cusip(r), nil + }, + }) + AddFuncLookup("isin", Info{ + Display: "ISIN", + Category: "finance", + Description: "Random ISIN", + Example: "", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return isin(r), nil + }, + }) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/json.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/json.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/json.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/json.go 2024-02-23 09:46:09.000000000 +0000 @@ -4,14 +4,17 @@ "bytes" "encoding/json" "errors" + "fmt" "math/rand" + "reflect" + "strconv" ) // JSONOptions defines values needed for json generation type JSONOptions struct { - Type string `json:"type" xml:"type"` // array or object - RowCount int `json:"row_count" xml:"row_count"` - Fields []Field `json:"fields" xml:"fields"` + Type string `json:"type" xml:"type" fake:"{randomstring:[array,object]}"` // array or object + RowCount int `json:"row_count" xml:"row_count" fake:"{number:1,10}"` + Fields []Field `json:"fields" xml:"fields" fake:"{internal_exampleFields}"` Indent bool `json:"indent" xml:"indent"` } @@ -54,14 +57,24 @@ return buf.Bytes(), nil } -// JSON generates an object or an array of objects in json format -func JSON(jo *JSONOptions) ([]byte, error) { return jsonFunc(globalFaker.Rand, jo) } +// JSON generates an object or an array of objects in json format. +// A nil JSONOptions returns a randomly structured JSON. +func JSON(jo *JSONOptions) ([]byte, error) { return jsonFunc(globalFaker, jo) } + +// JSON generates an object or an array of objects in json format. +// A nil JSONOptions returns a randomly structured JSON. +func (f *Faker) JSON(jo *JSONOptions) ([]byte, error) { return jsonFunc(f, jo) } // JSON generates an object or an array of objects in json format -func (f *Faker) JSON(jo *JSONOptions) ([]byte, error) { return jsonFunc(f.Rand, jo) } +func jsonFunc(f *Faker, jo *JSONOptions) ([]byte, error) { + if jo == nil { + // We didn't get a JSONOptions, so create a new random one + err := f.Struct(&jo) + if err != nil { + return nil, err + } + } -// JSON generates an object or an array of objects in json format -func jsonFunc(r *rand.Rand, jo *JSONOptions) ([]byte, error) { // Check to make sure they passed in a type if jo.Type != "array" && jo.Type != "object" { return nil, errors.New("invalid type, must be array or object") @@ -89,7 +102,7 @@ } // Call function value - value, err := funcInfo.Generate(r, &field.Params, funcInfo) + value, err := funcInfo.Generate(f.Rand, &field.Params, funcInfo) if err != nil { return nil, err } @@ -143,7 +156,7 @@ } // Call function value - value, err := funcInfo.Generate(r, &field.Params, funcInfo) + value, err := funcInfo.Generate(f.Rand, &field.Params, funcInfo) if err != nil { return nil, err } @@ -234,7 +247,89 @@ } jo.Indent = indent - return jsonFunc(r, &jo) + f := &Faker{Rand: r} + return jsonFunc(f, &jo) }, }) } + +// encoding/json.RawMessage is a special case of []byte +// it cannot be handled as a reflect.Array/reflect.Slice +// because it needs additional structure in the output +func rJsonRawMessage(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { + b, err := f.JSON(nil) + if err != nil { + return err + } + + v.SetBytes(b) + return nil +} + +// encoding/json.Number is a special case of string +// that represents a JSON number literal. +// It cannot be handled as a string because it needs to +// represent an integer or a floating-point number. +func rJsonNumber(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { + var ret json.Number + + var numberType string + + if tag == "" { + numberType = f.RandomString([]string{"int", "float"}) + + switch numberType { + case "int": + retInt := f.Int16() + ret = json.Number(strconv.Itoa(int(retInt))) + case "float": + retFloat := f.Float64() + ret = json.Number(strconv.FormatFloat(retFloat, 'f', -1, 64)) + } + } else { + fName, fParams := parseNameAndParamsFromTag(tag) + info := GetFuncLookup(fName) + if info == nil { + return fmt.Errorf("invalid function, %s does not exist", fName) + } + + // Parse map params + mapParams := parseMapParams(info, fParams) + + valueIface, err := info.Generate(f.Rand, mapParams, info) + if err != nil { + return err + } + + switch value := valueIface.(type) { + case int: + ret = json.Number(strconv.FormatInt(int64(value), 10)) + case int8: + ret = json.Number(strconv.FormatInt(int64(value), 10)) + case int16: + ret = json.Number(strconv.FormatInt(int64(value), 10)) + case int32: + ret = json.Number(strconv.FormatInt(int64(value), 10)) + case int64: + ret = json.Number(strconv.FormatInt(int64(value), 10)) + case uint: + ret = json.Number(strconv.FormatUint(uint64(value), 10)) + case uint8: + ret = json.Number(strconv.FormatUint(uint64(value), 10)) + case uint16: + ret = json.Number(strconv.FormatUint(uint64(value), 10)) + case uint32: + ret = json.Number(strconv.FormatUint(uint64(value), 10)) + case uint64: + ret = json.Number(strconv.FormatUint(uint64(value), 10)) + case float32: + ret = json.Number(strconv.FormatFloat(float64(value), 'f', -1, 64)) + case float64: + ret = json.Number(strconv.FormatFloat(float64(value), 'f', -1, 64)) + default: + return fmt.Errorf("invalid type, %s is not a valid type for json.Number", reflect.TypeOf(value)) + } + } + v.Set(reflect.ValueOf(ret)) + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/lookup.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/lookup.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/lookup.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/lookup.go 2024-02-23 09:46:09.000000000 +0000 @@ -5,6 +5,7 @@ "fmt" "math/rand" "reflect" + "sort" "strconv" "strings" "sync" @@ -14,6 +15,60 @@ var FuncLookups map[string]Info var lockFuncLookups sync.Mutex +// internalFuncLookups is the internal map array with mapping to all available data +var internalFuncLookups map[string]Info = map[string]Info{ + "internal_exampleFields": { + Description: "Example fields for generating csv, json and xml", + Output: "gofakeit.Field", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + name, _ := getRandomFuncLookup(r, excludeWithParams, + validTypes("string", "int", "[]string", "[]int")) + return Field{ + Name: name, + Function: name, + }, nil + }, + }, +} + +// filterFuncLookup returns true when the lookup should be accepted +type filterFuncLookup func(Info) bool + +var ( + excludeWithParams filterFuncLookup = func(info Info) bool { + return len(info.Params) == 0 + } + + validTypes = func(acceptedTypes ...string) filterFuncLookup { + return func(info Info) bool { + for _, t := range acceptedTypes { + if info.Output == t { + return true + } + } + return false + } + } +) + +func getRandomFuncLookup(r *rand.Rand, filters ...filterFuncLookup) (string, Info) { + var keys []string + for k, v := range FuncLookups { + isValid := true + for _, filter := range filters { + isValid = isValid && filter(v) + } + if isValid { + keys = append(keys, k) + } + } + + sort.Stable(sort.StringSlice(keys)) + + selected := keys[r.Intn(len(keys))] + return selected, FuncLookups[selected] +} + // MapParams is the values to pass into a lookup generate type MapParams map[string]MapParamsValue @@ -98,6 +153,9 @@ addDatabaseSQLLookup() addErrorLookup() addHtmlLookup() + addFinanceLookup() + addBookLookup() + addMovieLookup() } // NewMapParams will create a new MapParams @@ -170,6 +228,10 @@ // AddFuncLookup takes a field and adds it to map func AddFuncLookup(functionName string, info Info) { + if _, ok := internalFuncLookups[functionName]; ok { + panic(fmt.Sprintf("Function %s is used internally and cannot be overwritten", functionName)) + } + if FuncLookups == nil { FuncLookups = make(map[string]Info) } @@ -186,16 +248,28 @@ // GetFuncLookup will lookup func GetFuncLookup(functionName string) *Info { - info, ok := FuncLookups[functionName] - if !ok { - return nil + var info Info + var ok bool + + info, ok = internalFuncLookups[functionName] + if ok { + return &info + } + + info, ok = FuncLookups[functionName] + if ok { + return &info } - return &info + return nil } // RemoveFuncLookup will remove a function from lookup func RemoveFuncLookup(functionName string) { + if _, ok := internalFuncLookups[functionName]; ok { + panic(fmt.Sprintf("Function %s is used internally and cannot be overwritten", functionName)) + } + _, ok := FuncLookups[functionName] if !ok { return diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/movie.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/movie.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/movie.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/movie.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,66 @@ +package gofakeit + +import "math/rand" + +func MovieName() string { return movieName(globalFaker.Rand) } + +func (f *Faker) MovieName() string { return movieName(f.Rand) } + +func movieName(r *rand.Rand) string { return getRandValue(r, []string{"movie", "name"}) } + +func MovieGenre() string { return movieGenre(globalFaker.Rand) } + +func (f *Faker) MovieGenre() string { return movieGenre(f.Rand) } + +func movieGenre(r *rand.Rand) string { return getRandValue(r, []string{"movie", "genre"}) } + +type MovieInfo struct { + Name string `json:"name" xml:"name"` + Genre string `json:"genre" xml:"genre"` +} + +func Movie() *MovieInfo { return movie(globalFaker.Rand) } + +func (f *Faker) Movie() *MovieInfo { return movie(f.Rand) } + +func movie(r *rand.Rand) *MovieInfo { + return &MovieInfo{ + Name: movieName(r), + Genre: movieGenre(r), + } +} + +func addMovieLookup() { + AddFuncLookup("movie", Info{ + Display: "Movie", + Category: "movie", + Description: "Random Movie data set", + Example: `{name: "The Matrix", genre: "Action"}`, + Output: "map[string]string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return movie(r), nil + }, + }) + + AddFuncLookup("moviename", Info{ + Display: "Movie Name", + Category: "movie", + Description: "Random movie name", + Example: "The Matrix", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return movieName(r), nil + }, + }) + + AddFuncLookup("moviegenre", Info{ + Display: "Genre", + Category: "movie", + Description: "Random movie genre", + Example: "Action", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return movieGenre(r), nil + }, + }) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/person.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/person.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/person.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/person.go 2024-02-23 09:46:09.000000000 +0000 @@ -60,6 +60,14 @@ func firstName(r *rand.Rand) string { return getRandValue(r, []string{"person", "first"}) } +// MiddleName will generate a random middle name +func MiddleName() string { return middleName(globalFaker.Rand) } + +// MiddleName will generate a random middle name +func (f *Faker) MiddleName() string { return middleName(f.Rand) } + +func middleName(r *rand.Rand) string { return getRandValue(r, []string{"person", "middle"}) } + // LastName will generate a random last name func LastName() string { return lastName(globalFaker.Rand) } @@ -287,6 +295,17 @@ }, }) + AddFuncLookup("middlename", Info{ + Display: "Middle Name", + Category: "person", + Description: "Random middle name", + Example: "Belinda", + Output: "string", + Generate: func(r *rand.Rand, m *MapParams, info *Info) (interface{}, error) { + return middleName(r), nil + }, + }) + AddFuncLookup("lastname", Info{ Display: "Last Name", Category: "person", diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/slice.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/slice.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/slice.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/slice.go 2024-02-23 09:46:09.000000000 +0000 @@ -1,16 +1,15 @@ package gofakeit import ( - "math/rand" "reflect" ) // Slice fills built-in types and exported fields of a struct with random data. -func Slice(v interface{}) { sliceFunc(globalFaker.Rand, v) } +func Slice(v interface{}) { sliceFunc(globalFaker, v) } // Slice fills built-in types and exported fields of a struct with random data. -func (f *Faker) Slice(v interface{}) { sliceFunc(f.Rand, v) } +func (f *Faker) Slice(v interface{}) { sliceFunc(f, v) } -func sliceFunc(ra *rand.Rand, v interface{}) { - r(ra, reflect.TypeOf(v), reflect.ValueOf(v), "", -1) +func sliceFunc(f *Faker, v interface{}) { + r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", -1) } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/struct.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/struct.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/struct.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/struct.go 2024-02-23 09:46:09.000000000 +0000 @@ -2,7 +2,7 @@ import ( "errors" - "math/rand" + "fmt" "reflect" "strconv" "strings" @@ -10,62 +10,82 @@ ) // Struct fills in exported fields of a struct with random data -// based on the value of `fake` tag of exported fields. +// based on the value of `fake` tag of exported fields +// or with the result of a call to the Fake() method +// if the field type implements `Fakeable`. // Use `fake:"skip"` to explicitly skip an element. // All built-in types are supported, with templating support // for string types. -func Struct(v interface{}) error { return structFunc(globalFaker.Rand, v) } +func Struct(v interface{}) error { return structFunc(globalFaker, v) } // Struct fills in exported fields of a struct with random data // based on the value of `fake` tag of exported fields. // Use `fake:"skip"` to explicitly skip an element. // All built-in types are supported, with templating support // for string types. -func (f *Faker) Struct(v interface{}) error { return structFunc(f.Rand, v) } +func (f *Faker) Struct(v interface{}) error { return structFunc(f, v) } -func structFunc(ra *rand.Rand, v interface{}) error { - return r(ra, reflect.TypeOf(v), reflect.ValueOf(v), "", 0) +func structFunc(f *Faker, v interface{}) error { + return r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", 0) } -func r(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string, size int) error { +func r(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { + // Handle special types + + if t.PkgPath() == "encoding/json" { + // encoding/json has two special types: + // - RawMessage + // - Number + + switch t.Name() { + case "RawMessage": + return rJsonRawMessage(f, t, v, tag, size) + case "Number": + return rJsonNumber(f, t, v, tag, size) + default: + return errors.New("unknown encoding/json type: " + t.Name()) + } + } + + // Handle generic types switch t.Kind() { case reflect.Ptr: - return rPointer(ra, t, v, tag, size) + return rPointer(f, t, v, tag, size) case reflect.Struct: - return rStruct(ra, t, v, tag) + return rStruct(f, t, v, tag) case reflect.String: - return rString(ra, v, tag) + return rString(f, t, v, tag) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rUint(ra, t, v, tag) + return rUint(f, t, v, tag) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rInt(ra, t, v, tag) + return rInt(f, t, v, tag) case reflect.Float32, reflect.Float64: - return rFloat(ra, t, v, tag) + return rFloat(f, t, v, tag) case reflect.Bool: - return rBool(ra, v, tag) + return rBool(f, t, v, tag) case reflect.Array, reflect.Slice: - return rSlice(ra, t, v, tag, size) + return rSlice(f, t, v, tag, size) case reflect.Map: - return rMap(ra, t, v, tag, size) + return rMap(f, t, v, tag, size) } return nil } -func rCustom(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string) error { +func rCustom(f *Faker, t reflect.Type, v reflect.Value, tag string) error { // If tag is empty return error if tag == "" { return errors.New("tag is empty") } fName, fParams := parseNameAndParamsFromTag(tag) - // Check to see if its a replaceable lookup function + // Check to see if it's a replaceable lookup function if info := GetFuncLookup(fName); info != nil { // Parse map params mapParams := parseMapParams(info, fParams) // Call function - fValue, err := info.Generate(ra, mapParams, info) + fValue, err := info.Generate(f.Rand, mapParams, info) if err != nil { return err } @@ -75,7 +95,7 @@ field.Elem().Set(reflect.ValueOf(fValue)) // Check if element is pointer if so - // grab the underlyning value + // grab the underlying value fieldElem := field.Elem() if fieldElem.Kind() == reflect.Ptr { fieldElem = fieldElem.Elem() @@ -88,80 +108,88 @@ } // Set the value - v.Set(fieldElem) + v.Set(fieldElem.Convert(v.Type())) // If a function is called to set the struct // stop from going through sub fields return nil } - return errors.New("function not found") + return fmt.Errorf("function %q not found", tag) } -func rStruct(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string) error { +func rStruct(f *Faker, t reflect.Type, v reflect.Value, tag string) error { // Check if tag exists, if so run custom function if t.Name() != "" && tag != "" { - return rCustom(ra, t, v, tag) - } + return rCustom(f, t, v, tag) + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Struct) + if err != nil { + return err + } - n := t.NumField() - for i := 0; i < n; i++ { - elementT := t.Field(i) - elementV := v.Field(i) - fakeTag, ok := elementT.Tag.Lookup("fake") - - // Check whether or not to skip this field - if ok && fakeTag == "skip" { - // Do nothing, skip it - continue - } - - // Check to make sure you can set it or that its an embeded(anonymous) field - if elementV.CanSet() || elementT.Anonymous { - // Check if reflect type is of values we can specifically set - switch elementT.Type.String() { - case "time.Time": - err := rTime(ra, elementT, elementV, fakeTag) - if err != nil { - return err - } + v.Set(reflect.ValueOf(value)) + } else { + + n := t.NumField() + for i := 0; i < n; i++ { + elementT := t.Field(i) + elementV := v.Field(i) + fakeTag, ok := elementT.Tag.Lookup("fake") + + // Check whether or not to skip this field + if ok && fakeTag == "skip" { + // Do nothing, skip it continue } - // Check if fakesize is set - size := -1 // Set to -1 to indicate fakesize was not set - fs, ok := elementT.Tag.Lookup("fakesize") - if ok { - var err error - - // Check if size has params separated by , - if strings.Contains(fs, ",") { - sizeSplit := strings.SplitN(fs, ",", 2) - if len(sizeSplit) == 2 { - var sizeMin int - var sizeMax int + // Check to make sure you can set it or that it's an embedded(anonymous) field + if elementV.CanSet() || elementT.Anonymous { + // Check if reflect type is of values we can specifically set + switch elementT.Type.String() { + case "time.Time": + err := rTime(f, elementT, elementV, fakeTag) + if err != nil { + return err + } + continue + } - sizeMin, err = strconv.Atoi(sizeSplit[0]) - if err != nil { - return err + // Check if fakesize is set + size := -1 // Set to -1 to indicate fakesize was not set + fs, ok := elementT.Tag.Lookup("fakesize") + if ok { + var err error + + // Check if size has params separated by , + if strings.Contains(fs, ",") { + sizeSplit := strings.SplitN(fs, ",", 2) + if len(sizeSplit) == 2 { + var sizeMin int + var sizeMax int + + sizeMin, err = strconv.Atoi(sizeSplit[0]) + if err != nil { + return err + } + sizeMax, err = strconv.Atoi(sizeSplit[1]) + if err != nil { + return err + } + + size = f.Rand.Intn(sizeMax-sizeMin+1) + sizeMin } - sizeMax, err = strconv.Atoi(sizeSplit[1]) + } else { + size, err = strconv.Atoi(fs) if err != nil { return err } - - size = ra.Intn(sizeMax-sizeMin+1) + sizeMin - } - } else { - size, err = strconv.Atoi(fs) - if err != nil { - return err } } - } - err := r(ra, elementT.Type, elementV, fakeTag, size) - if err != nil { - return err + err := r(f, elementT.Type, elementV, fakeTag, size) + if err != nil { + return err + } } } } @@ -169,17 +197,18 @@ return nil } -func rPointer(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string, size int) error { +func rPointer(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { elemT := t.Elem() if v.IsNil() { - nv := reflect.New(elemT) - err := r(ra, elemT, nv.Elem(), tag, size) + nv := reflect.New(elemT).Elem() + err := r(f, elemT, nv, tag, size) if err != nil { return err } - v.Set(nv) + + v.Set(nv.Addr()) } else { - err := r(ra, elemT, v.Elem(), tag, size) + err := r(f, elemT, v.Elem(), tag, size) if err != nil { return err } @@ -188,7 +217,7 @@ return nil } -func rSlice(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string, size int) error { +func rSlice(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { // If you cant even set it dont even try if !v.CanSet() { return errors.New("cannot set slice") @@ -197,10 +226,18 @@ // Check if tag exists, if so run custom function if t.Name() != "" && tag != "" { // Check to see if custom function works if not continue to normal loop of values - err := rCustom(ra, t, v, tag) + err := rCustom(f, t, v, tag) if err == nil { return nil } + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Slice) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(value)) + return nil } // Grab original size to use if needed for sub arrays @@ -210,7 +247,7 @@ // use that instead of the requested size elemLen := v.Len() if elemLen == 0 && size == -1 { - size = number(ra, 1, 10) + size = number(f.Rand, 1, 10) } else if elemLen != 0 && (size == -1 || elemLen < size) { size = elemLen } @@ -221,7 +258,7 @@ // Loop through the elements length and set based upon the index for i := 0; i < size; i++ { nv := reflect.New(elemT) - err := r(ra, elemT, nv.Elem(), tag, ogSize) + err := r(f, elemT, nv.Elem(), tag, ogSize) if err != nil { return err } @@ -237,7 +274,7 @@ return nil } -func rMap(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string, size int) error { +func rMap(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { // If you cant even set it dont even try if !v.CanSet() { return errors.New("cannot set slice") @@ -245,13 +282,23 @@ // Check if tag exists, if so run custom function if t.Name() != "" && tag != "" { - return rCustom(ra, t, v, tag) + return rCustom(f, t, v, tag) + } else if size > 0 { + // NOOP + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Map) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(value)) + return nil } // Set a size newSize := size if newSize == -1 { - newSize = number(ra, 1, 10) + newSize = number(f.Rand, 1, 10) } // Create new map based upon map key value type @@ -261,14 +308,14 @@ for i := 0; i < newSize; i++ { // Create new key mapIndex := reflect.New(t.Key()) - err := r(ra, t.Key(), mapIndex.Elem(), "", -1) + err := r(f, t.Key(), mapIndex.Elem(), "", -1) if err != nil { return err } // Create new value mapValue := reflect.New(t.Elem()) - err = r(ra, t.Elem(), mapValue.Elem(), "", -1) + err = r(f, t.Elem(), mapValue.Elem(), "", -1) if err != nil { return err } @@ -286,116 +333,196 @@ return nil } -func rString(ra *rand.Rand, v reflect.Value, tag string) error { +func rString(f *Faker, t reflect.Type, v reflect.Value, tag string) error { if tag != "" { - v.SetString(generate(ra, tag)) + v.SetString(generate(f.Rand, tag)) + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.String) + if err != nil { + return err + } + + valueStr, ok := value.(string) + if !ok { + return errors.New("call to Fake method did not return a string") + } + v.SetString(valueStr) } else { - v.SetString(generate(ra, strings.Repeat("?", number(ra, 4, 10)))) + v.SetString(generate(f.Rand, strings.Repeat("?", number(f.Rand, 4, 10)))) } return nil } -func rInt(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string) error { +func rInt(f *Faker, t reflect.Type, v reflect.Value, tag string) error { if tag != "" { - i, err := strconv.ParseInt(generate(ra, tag), 10, 64) + i, err := strconv.ParseInt(generate(f.Rand, tag), 10, 64) if err != nil { return err } v.SetInt(i) - return nil - } + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64) + if err != nil { + return err + } - // If no tag or error converting to int, set with random value - switch t.Kind() { - case reflect.Int: - v.SetInt(int64Func(ra)) - case reflect.Int8: - v.SetInt(int64(int8Func(ra))) - case reflect.Int16: - v.SetInt(int64(int16Func(ra))) - case reflect.Int32: - v.SetInt(int64(int32Func(ra))) - case reflect.Int64: - v.SetInt(int64Func(ra)) + switch i := value.(type) { + case int: + v.SetInt(int64(i)) + case int8: + v.SetInt(int64(i)) + case int16: + v.SetInt(int64(i)) + case int32: + v.SetInt(int64(i)) + case int64: + v.SetInt(int64(i)) + default: + return errors.New("call to Fake method did not return an integer") + } + } else { + // If no tag or error converting to int, set with random value + switch t.Kind() { + case reflect.Int: + v.SetInt(int64Func(f.Rand)) + case reflect.Int8: + v.SetInt(int64(int8Func(f.Rand))) + case reflect.Int16: + v.SetInt(int64(int16Func(f.Rand))) + case reflect.Int32: + v.SetInt(int64(int32Func(f.Rand))) + case reflect.Int64: + v.SetInt(int64Func(f.Rand)) + } } return nil } -func rUint(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string) error { +func rUint(f *Faker, t reflect.Type, v reflect.Value, tag string) error { if tag != "" { - u, err := strconv.ParseUint(generate(ra, tag), 10, 64) + u, err := strconv.ParseUint(generate(f.Rand, tag), 10, 64) if err != nil { return err } v.SetUint(u) - return nil - } + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64) + if err != nil { + return err + } - // If no tag or error converting to uint, set with random value - switch t.Kind() { - case reflect.Uint: - v.SetUint(uint64Func(ra)) - case reflect.Uint8: - v.SetUint(uint64(uint8Func(ra))) - case reflect.Uint16: - v.SetUint(uint64(uint16Func(ra))) - case reflect.Uint32: - v.SetUint(uint64(uint32Func(ra))) - case reflect.Uint64: - v.SetUint(uint64Func(ra)) + switch i := value.(type) { + case uint: + v.SetUint(uint64(i)) + case uint8: + v.SetUint(uint64(i)) + case uint16: + v.SetUint(uint64(i)) + case uint32: + v.SetUint(uint64(i)) + case uint64: + v.SetUint(uint64(i)) + default: + return errors.New("call to Fake method did not return an unsigned integer") + } + } else { + // If no tag or error converting to uint, set with random value + switch t.Kind() { + case reflect.Uint: + v.SetUint(uint64Func(f.Rand)) + case reflect.Uint8: + v.SetUint(uint64(uint8Func(f.Rand))) + case reflect.Uint16: + v.SetUint(uint64(uint16Func(f.Rand))) + case reflect.Uint32: + v.SetUint(uint64(uint32Func(f.Rand))) + case reflect.Uint64: + v.SetUint(uint64Func(f.Rand)) + } } return nil } -func rFloat(ra *rand.Rand, t reflect.Type, v reflect.Value, tag string) error { +func rFloat(f *Faker, t reflect.Type, v reflect.Value, tag string) error { if tag != "" { - f, err := strconv.ParseFloat(generate(ra, tag), 64) + f, err := strconv.ParseFloat(generate(f.Rand, tag), 64) if err != nil { return err } v.SetFloat(f) - return nil - } + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Float32, reflect.Float64) + if err != nil { + return err + } - // If no tag or error converting to float, set with random value - switch t.Kind() { - case reflect.Float64: - v.SetFloat(float64Func(ra)) - case reflect.Float32: - v.SetFloat(float64(float32Func(ra))) + switch i := value.(type) { + case float32: + v.SetFloat(float64(i)) + case float64: + v.SetFloat(float64(i)) + default: + return errors.New("call to Fake method did not return a float") + } + } else { + // If no tag or error converting to float, set with random value + switch t.Kind() { + case reflect.Float64: + v.SetFloat(float64Func(f.Rand)) + case reflect.Float32: + v.SetFloat(float64(float32Func(f.Rand))) + } } return nil } -func rBool(ra *rand.Rand, v reflect.Value, tag string) error { +func rBool(f *Faker, t reflect.Type, v reflect.Value, tag string) error { if tag != "" { - b, err := strconv.ParseBool(generate(ra, tag)) + b, err := strconv.ParseBool(generate(f.Rand, tag)) if err != nil { return err } v.SetBool(b) - return nil - } + } else if isFakeable(t) { + value, err := callFake(f, v, reflect.Bool) + if err != nil { + return err + } - // If no tag or error converting to boolean, set with random value - v.SetBool(boolFunc(ra)) + switch i := value.(type) { + case bool: + v.SetBool(bool(i)) + default: + return errors.New("call to Fake method did not return a boolean") + } + } else { + // If no tag or error converting to boolean, set with random value + v.SetBool(boolFunc(f.Rand)) + } return nil } // rTime will set a time.Time field the best it can from either the default date tag or from the generate tag -func rTime(ra *rand.Rand, t reflect.StructField, v reflect.Value, tag string) error { +func rTime(f *Faker, t reflect.StructField, v reflect.Value, tag string) error { if tag != "" { // Generate time - timeOutput := generate(ra, tag) + timeOutput := generate(f.Rand, tag) + + // Check to see if timeOutput has monotonic clock reading + // if so, remove it. This is because time.Parse() does not + // support parsing the monotonic clock reading + if strings.Contains(timeOutput, " m=") { + timeOutput = strings.Split(timeOutput, " m=")[0] + } // Check to see if they are passing in a format to parse the time timeFormat, timeFormatOK := t.Tag.Lookup("format") @@ -428,6 +555,6 @@ return nil } - v.Set(reflect.ValueOf(date(ra))) + v.Set(reflect.ValueOf(date(f.Rand))) return nil } diff -Nru temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/xml.go temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/xml.go --- temporal-1.21.5-1/src/vendor/github.com/brianvoe/gofakeit/v6/xml.go 2023-09-29 14:03:30.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/brianvoe/gofakeit/v6/xml.go 2024-02-23 09:46:09.000000000 +0000 @@ -11,11 +11,11 @@ // XMLOptions defines values needed for json generation type XMLOptions struct { - Type string `json:"type" xml:"type"` // single or multiple + Type string `json:"type" xml:"type" fake:"{randomstring:[array,single]}"` // single or array RootElement string `json:"root_element" xml:"root_element"` RecordElement string `json:"record_element" xml:"record_element"` - RowCount int `json:"row_count" xml:"row_count"` - Fields []Field `json:"fields" xml:"fields"` + RowCount int `json:"row_count" xml:"row_count" fake:"{number:1,10}"` + Fields []Field `json:"fields" xml:"fields" fake:"{internal_exampleFields}"` Indent bool `json:"indent" xml:"indent"` } @@ -128,12 +128,22 @@ } // XML generates an object or an array of objects in json format -func XML(xo *XMLOptions) ([]byte, error) { return xmlFunc(globalFaker.Rand, xo) } +// A nil XMLOptions returns a randomly structured XML. +func XML(xo *XMLOptions) ([]byte, error) { return xmlFunc(globalFaker, xo) } // XML generates an object or an array of objects in json format -func (f *Faker) XML(xo *XMLOptions) ([]byte, error) { return xmlFunc(f.Rand, xo) } +// A nil XMLOptions returns a randomly structured XML. +func (f *Faker) XML(xo *XMLOptions) ([]byte, error) { return xmlFunc(f, xo) } + +func xmlFunc(f *Faker, xo *XMLOptions) ([]byte, error) { + if xo == nil { + // We didn't get a XMLOptions, so create a new random one + err := f.Struct(&xo) + if err != nil { + return nil, err + } + } -func xmlFunc(r *rand.Rand, xo *XMLOptions) ([]byte, error) { // Check to make sure they passed in a type if xo.Type != "single" && xo.Type != "array" { return nil, errors.New("invalid type, must be array or object") @@ -175,7 +185,7 @@ return nil, errors.New("invalid function, " + field.Function + " does not exist") } - value, err := funcInfo.Generate(r, &field.Params, funcInfo) + value, err := funcInfo.Generate(f.Rand, &field.Params, funcInfo) if err != nil { return nil, err } @@ -228,7 +238,7 @@ return nil, errors.New("invalid function, " + field.Function + " does not exist") } - value, err := funcInfo.Generate(r, &field.Params, funcInfo) + value, err := funcInfo.Generate(f.Rand, &field.Params, funcInfo) if err != nil { return nil, err } @@ -336,7 +346,8 @@ } xo.Indent = indent - return xmlFunc(r, &xo) + f := &Faker{Rand: r} + return xmlFunc(f, &xo) }, }) } diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/LICENSE.md temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/LICENSE.md --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/LICENSE.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/LICENSE.md 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2012-2016 Eli Janssen + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/buffer_pool.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/buffer_pool.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/buffer_pool.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/buffer_pool.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,31 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "bytes" + "sync" +) + +type bufferPool struct { + *sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + &sync.Pool{New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 1700)) + }}, + } +} + +func (bp *bufferPool) Get() *bytes.Buffer { + return (bp.Pool.Get()).(*bytes.Buffer) +} + +func (bp *bufferPool) Put(b *bytes.Buffer) { + b.Truncate(0) + bp.Pool.Put(b) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,315 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "time" +) + +var bufPool = newBufferPool() + +// The StatSender interface wraps all the statsd metric methods +type StatSender interface { + Inc(string, int64, float32, ...Tag) error + Dec(string, int64, float32, ...Tag) error + Gauge(string, int64, float32, ...Tag) error + GaugeDelta(string, int64, float32, ...Tag) error + Timing(string, int64, float32, ...Tag) error + TimingDuration(string, time.Duration, float32, ...Tag) error + Set(string, string, float32, ...Tag) error + SetInt(string, int64, float32, ...Tag) error + Raw(string, string, float32, ...Tag) error +} + +// The Statter interface defines the behavior of a stat client +type Statter interface { + StatSender + NewSubStatter(string) SubStatter + SetPrefix(string) + Close() error +} + +// The SubStatter interface defines the behavior of a stat child/subclient +type SubStatter interface { + StatSender + SetSamplerFunc(SamplerFunc) + NewSubStatter(string) SubStatter +} + +// The SamplerFunc type defines a function that can serve +// as a Client sampler function. +type SamplerFunc func(float32) bool + +// DefaultSampler is the default rate sampler function +func DefaultSampler(rate float32) bool { + if rate < 1 { + return rand.Float32() < rate + } + return true +} + +// A Client is a statsd client. +type Client struct { + // prefix for statsd name + prefix string + // packet sender + sender Sender + // sampler method + sampler SamplerFunc + // tag handler + tagFormat TagFormat +} + +// Close closes the connection and cleans up. +func (s *Client) Close() error { + if s == nil { + return nil + } + + err := s.sender.Close() + return err +} + +// Inc increments a statsd count type. +// stat is a string name for the metric. +// value is the integer value +// rate is the sample rate (0.0 to 1.0) +// tags is a []Tag +func (s *Client) Inc(stat string, value int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", value, "|c", rate, tags) +} + +// Dec decrements a statsd count type. +// stat is a string name for the metric. +// value is the integer value. +// rate is the sample rate (0.0 to 1.0). +func (s *Client) Dec(stat string, value int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", -value, "|c", rate, tags) +} + +// Gauge submits/updates a statsd gauge type. +// stat is a string name for the metric. +// value is the integer value. +// rate is the sample rate (0.0 to 1.0). +func (s *Client) Gauge(stat string, value int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", value, "|g", rate, tags) +} + +// GaugeDelta submits a delta to a statsd gauge. +// stat is the string name for the metric. +// value is the (positive or negative) change. +// rate is the sample rate (0.0 to 1.0). +func (s *Client) GaugeDelta(stat string, value int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + // if negative, the submit formatter will prefix with a - already + // so only special case the positive value. + // don't pull out the prefix here, avoids some tiny amount of stack space by + // inlining like this. performance + if value >= 0 { + return s.submit(stat, "+", value, "|g", rate, tags) + } + return s.submit(stat, "", value, "|g", rate, tags) +} + +// Timing submits a statsd timing type. +// stat is a string name for the metric. +// delta is the time duration value in milliseconds +// rate is the sample rate (0.0 to 1.0). +func (s *Client) Timing(stat string, delta int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", delta, "|ms", rate, tags) +} + +// TimingDuration submits a statsd timing type. +// stat is a string name for the metric. +// delta is the timing value as time.Duration +// rate is the sample rate (0.0 to 1.0). +func (s *Client) TimingDuration(stat string, delta time.Duration, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + ms := float64(delta) / float64(time.Millisecond) + return s.submit(stat, "", ms, "|ms", rate, tags) +} + +// Set submits a stats set type +// stat is a string name for the metric. +// value is the string value +// rate is the sample rate (0.0 to 1.0). +func (s *Client) Set(stat string, value string, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", value, "|s", rate, tags) +} + +// SetInt submits a number as a stats set type. +// stat is a string name for the metric. +// value is the integer value +// rate is the sample rate (0.0 to 1.0). +func (s *Client) SetInt(stat string, value int64, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", value, "|s", rate, tags) +} + +// Raw submits a preformatted value. +// stat is the string name for the metric. +// value is a preformatted "raw" value string. +// rate is the sample rate (0.0 to 1.0). +func (s *Client) Raw(stat string, value string, rate float32, tags ...Tag) error { + if !s.includeStat(rate) { + return nil + } + + return s.submit(stat, "", value, "", rate, tags) +} + +// SetSamplerFunc sets a sampler function to something other than the default +// sampler is a function that determines whether the metric is +// to be accepted, or discarded. +// An example use case is for submitted pre-sampled metrics. +func (s *Client) SetSamplerFunc(sampler SamplerFunc) { + s.sampler = sampler +} + +// submit an already sampled raw stat +func (s *Client) submit(stat, vprefix string, value interface{}, suffix string, rate float32, tags []Tag) error { + skiptags := false + if len(tags) == 0 { + skiptags = true + } + + buf := bufPool.Get() + defer bufPool.Put(buf) + // sadly, no way to jam this back into the bytes.Buffer without + // doing a few allocations... avoiding those is the whole point here... + // so from here on out just use it as a raw []byte + data := buf.Bytes() + + if s.prefix != "" { + data = append(data, s.prefix...) + data = append(data, '.') + } + + data = append(data, stat...) + + // infix tags, if present + if !skiptags && s.tagFormat&AllInfix != 0 { + data = s.tagFormat.WriteInfix(data, tags) + // if we did infix already, no suffix also. + skiptags = true + } + + data = append(data, ':') + + if vprefix != "" { + data = append(data, vprefix...) + } + + switch v := value.(type) { + case string: + data = append(data, v...) + case int64: + data = strconv.AppendInt(data, v, 10) + case float64: + data = strconv.AppendFloat(data, v, 'f', -1, 64) + default: + return fmt.Errorf("No matching type format") + } + + if suffix != "" { + data = append(data, suffix...) + } + + if rate < 1 { + data = append(data, "|@"...) + data = strconv.AppendFloat(data, float64(rate), 'f', 6, 32) + } + + // suffix tags if present + if !skiptags && s.tagFormat&AllSuffix != 0 { + data = s.tagFormat.WriteSuffix(data, tags) + } + + _, err := s.sender.Send(data) + return err +} + +// check for nil client, and perform sampling calculation +func (s *Client) includeStat(rate float32) bool { + if s == nil { + return false + } + + // test for nil in case someone builds their own + // client without calling new (result is nil sampler) + if s.sampler != nil { + return s.sampler(rate) + } + return DefaultSampler(rate) +} + +// SetPrefix sets/updates the statsd client prefix. +// Note: Does not change the prefix of any SubStatters. +func (s *Client) SetPrefix(prefix string) { + if s == nil { + return + } + + s.prefix = prefix +} + +// NewSubStatter returns a SubStatter with appended prefix +func (s *Client) NewSubStatter(prefix string) SubStatter { + var c *Client + if s != nil { + c = &Client{ + prefix: joinPathComp(s.prefix, prefix), + sender: s.sender, + sampler: s.sampler, + } + } + return c +} + +// joinPathComp is a helper that ensures we combine path components with a dot +// when it's appropriate to do so; prefix is the existing prefix and suffix is +// the new component being added. +// +// It returns the joined prefix. +func joinPathComp(prefix, suffix string) string { + suffix = strings.TrimLeft(suffix, ".") + if prefix != "" && suffix != "" { + return prefix + "." + suffix + } + return prefix + suffix +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_config.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_config.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_config.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_config.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,132 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "fmt" + "time" +) + +type ClientConfig struct { + // addr is a string of the format "hostname:port", and must be something + // validly parsable by net.ResolveUDPAddr. + Address string + + // prefix is the statsd client prefix. Can be "" if no prefix is desired. + Prefix string + + // ResInterval is the interval over which the addr is re-resolved. + // Do note that this /does/ add overhead! + // If you need higher performance, leave unset (or set to 0), + // in which case the address will not be re-resolved. + // + // Note that if Address is an {ip}:{port} and not a {hostname}:{port}, then + // ResInterval will be ignored. + ResInterval time.Duration + + // UseBuffered determines whether a buffered sender is used or not. + // If a buffered sender is /not/ used, FlushInterval and FlushBytes values are + // ignored. Default is false. + UseBuffered bool + + // FlushInterval is a time.Duration, and specifies the maximum interval for + // packet sending. Note that if you send lots of metrics, you will send more + // often. This is just a maximal threshold. + // If FlushInterval is 0, defaults to 300ms. + FlushInterval time.Duration + + // If flushBytes is 0, defaults to 1432 bytes, which is considered safe + // for local traffic. If sending over the public internet, 512 bytes is + // the recommended value. + FlushBytes int + + // The desired tag format to use for tags (note: statsd tag support varies) + // Supported formats are one of: statsd.DataDog, statsd.Grahpite, statsd.Influx + TagFormat TagFormat +} + +// NewClientWithConfig returns a new BufferedClient +// +// config is a ClientConfig, which holds various configuration values. +func NewClientWithConfig(config *ClientConfig) (Statter, error) { + var sender Sender + var err error + + // guard against nil config + if config == nil { + return nil, fmt.Errorf("config cannot be nil") + } + + // Use a re-resolving simple sender iff: + // * The time duration greater than 0 + // * The Address is not an ip (eg. {ip}:{port}). + // Otherwise, re-resolution is not required. + if config.ResInterval > 0 && !mustBeIP(config.Address) { + sender, err = NewResolvingSimpleSender(config.Address, config.ResInterval) + } else { + sender, err = NewSimpleSender(config.Address) + } + if err != nil { + return nil, err + } + + if config.UseBuffered { + return newBufferedC(sender, config) + } else { + return NewClientWithSender(sender, config.Prefix, config.TagFormat) + } +} + +func newBufferedC(baseSender Sender, config *ClientConfig) (Statter, error) { + + flushBytes := config.FlushBytes + if flushBytes <= 0 { + // ref: + // github.com/etsy/statsd/blob/master/docs/metric_types.md#multi-metric-packets + flushBytes = 1432 + } + + flushInterval := config.FlushInterval + if flushInterval <= time.Duration(0) { + flushInterval = 300 * time.Millisecond + } + + bufsender, err := NewBufferedSenderWithSender(baseSender, flushInterval, flushBytes) + if err != nil { + return nil, err + } + + return NewClientWithSender(bufsender, config.Prefix, config.TagFormat) +} + +// NewClientWithSender returns a pointer to a new Client and an error. +// +// sender is an instance of a statsd.Sender interface and may not be nil +// +// prefix is the stastd client prefix. Can be "" if no prefix is desired. +// +// tagFormat is the desired tag format, if any. If you don't plan on using +// tags, use 0 to use the default. +func NewClientWithSender(sender Sender, prefix string, tagFormat TagFormat) (Statter, error) { + if sender == nil { + return nil, fmt.Errorf("Client sender may not be nil") + } + + // if zero value is supplied, pick something as a default + if tagFormat == 0 { + tagFormat = SuffixOctothorpe + } + + if tagFormat&(AllInfix|AllSuffix) == 0 { + return nil, fmt.Errorf("Invalid tagFormat section") + } + + client := &Client{ + prefix: prefix, + sender: sender, + tagFormat: tagFormat, + } + return client, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_legacy.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_legacy.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_legacy.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/client_legacy.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,73 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import "time" + +// Deprecated stuff here... + +// NewBufferedClient returns a new BufferedClient +// +// addr is a string of the format "hostname:port", and must be parsable by +// net.ResolveUDPAddr. +// +// prefix is the statsd client prefix. Can be "" if no prefix is desired. +// +// flushInterval is a time.Duration, and specifies the maximum interval for +// packet sending. Note that if you send lots of metrics, you will send more +// often. This is just a maximal threshold. +// +// If flushInterval is 0ms, defaults to 300ms. +// +// flushBytes specifies the maximum udp packet size you wish to send. If adding +// a metric would result in a larger packet than flushBytes, the packet will +// first be send, then the new data will be added to the next packet. +// +// If flushBytes is 0, defaults to 1432 bytes, which is considered safe +// for local traffic. If sending over the public internet, 512 bytes is +// the recommended value. +// +// Deprecated: This interface is "legacy", and it is recommented to migrate to +// using NewClientWithConfig in the future. +func NewBufferedClient(addr, prefix string, flushInterval time.Duration, flushBytes int) (Statter, error) { + config := &ClientConfig{ + Address: addr, + Prefix: prefix, + UseBuffered: true, + FlushInterval: flushInterval, + FlushBytes: flushBytes, + } + return NewClientWithConfig(config) +} + +// NewClient returns a pointer to a new Client, and an error. +// +// addr is a string of the format "hostname:port", and must be parsable by +// net.ResolveUDPAddr. +// +// prefix is the statsd client prefix. Can be "" if no prefix is desired. +// +// Deprecated: This interface is "legacy", and it is recommented to migrate to +// using NewClientWithConfig in the future. +func NewClient(addr, prefix string) (Statter, error) { + config := &ClientConfig{ + Address: addr, + Prefix: prefix, + UseBuffered: false, + } + return NewClientWithConfig(config) +} + +// Dial is a compatibility alias for NewClient +// +// Deprecated: This interface is "legacy", and it is recommented to migrate to +// using NewClientWithConfig in the future. +var Dial = NewClient + +// New is a compatibility alias for NewClient +// +// Deprecated: This interface is "legacy", and it is recommented to migrate to +// using NewClientWithConfig in the future. +var New = NewClient diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/doc.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/doc.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/doc.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,38 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +/* +Package statsd provides a StatsD client implementation that is safe for +concurrent use by multiple goroutines and for efficiency can be created and +reused. + +Example usage: + + // First create a client config. Here is a simple config that sends one + // stat per packet (for compatibility). + config := &statsd.ClientConfig{ + Address: "127.0.0.1:8125", + Prefix: "test-client", + } + + // Now create the client + client, err := statsd.NewClientWithConfig(config) + + // and handle any initialization errors + if err != nil { + log.Fatal(err) + } + + // make sure to clean up + defer client.Close() + + // Send a stat + err = client.Inc("stat1", 42, 1.0) + // handle any errors + if err != nil { + log.Printf("Error sending metric: %+v", err) + } + +*/ +package statsd diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,69 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "errors" + "net" +) + +// The Sender interface wraps a Send and Close +type Sender interface { + Send(data []byte) (int, error) + Close() error +} + +// SimpleSender provides a socket send interface. +type SimpleSender struct { + // underlying connection + c net.PacketConn + // resolved udp address + ra *net.UDPAddr +} + +// Send sends the data to the server endpoint. +func (s *SimpleSender) Send(data []byte) (int, error) { + // no need for locking here, as the underlying fdNet + // already serialized writes + n, err := s.c.(*net.UDPConn).WriteToUDP(data, s.ra) + if err != nil { + return 0, err + } + if n == 0 { + return n, errors.New("Wrote no bytes") + } + return n, nil +} + +// Close closes the SimpleSender and cleans up. +func (s *SimpleSender) Close() error { + err := s.c.Close() + return err +} + +// NewSimpleSender returns a new SimpleSender for sending to the supplied +// addresss. +// +// addr is a string of the format "hostname:port", and must be parsable by +// net.ResolveUDPAddr. +func NewSimpleSender(addr string) (Sender, error) { + c, err := net.ListenPacket("udp", ":0") + if err != nil { + return nil, err + } + + ra, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + c.Close() + return nil, err + } + + sender := &SimpleSender{ + c: c, + ra: ra, + } + + return sender, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_buffered.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_buffered.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_buffered.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_buffered.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,194 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "bytes" + "fmt" + "sync" + "time" +) + +var senderPool = newBufferPool() + +// BufferedSender provides a buffered statsd udp, sending multiple +// metrics, where possible. +type BufferedSender struct { + sender Sender + flushBytes int + flushInterval time.Duration + // buffers + bufmx sync.Mutex + buffer *bytes.Buffer + bufs chan *bytes.Buffer + // lifecycle + runmx sync.RWMutex + shutdown chan chan error + running bool +} + +// Send bytes. +func (s *BufferedSender) Send(data []byte) (int, error) { + s.runmx.RLock() + if !s.running { + s.runmx.RUnlock() + return 0, fmt.Errorf("BufferedSender is not running") + } + + s.withBufferLock(func() { + blen := s.buffer.Len() + if blen > 0 && blen+len(data)+1 >= s.flushBytes { + s.swapnqueue() + } + + s.buffer.Write(data) + s.buffer.WriteByte('\n') + + if s.buffer.Len() >= s.flushBytes { + s.swapnqueue() + } + }) + s.runmx.RUnlock() + return len(data), nil +} + +// Close closes the Buffered Sender and cleans up. +func (s *BufferedSender) Close() error { + // since we are running, write lock during cleanup + s.runmx.Lock() + defer s.runmx.Unlock() + if !s.running { + return nil + } + + errChan := make(chan error) + s.running = false + s.shutdown <- errChan + return <-errChan +} + +// Start Buffered Sender +// Begins ticker and read loop +func (s *BufferedSender) Start() { + // write lock to start running + s.runmx.Lock() + defer s.runmx.Unlock() + if s.running { + return + } + + s.running = true + s.bufs = make(chan *bytes.Buffer, 32) + go s.run() +} + +func (s *BufferedSender) withBufferLock(fn func()) { + s.bufmx.Lock() + fn() + s.bufmx.Unlock() +} + +func (s *BufferedSender) swapnqueue() { + if s.buffer.Len() == 0 { + return + } + ob := s.buffer + nb := senderPool.Get() + s.buffer = nb + s.bufs <- ob +} + +func (s *BufferedSender) run() { + ticker := time.NewTicker(s.flushInterval) + defer ticker.Stop() + + doneChan := make(chan bool) + go func() { + for buf := range s.bufs { + s.flush(buf) + senderPool.Put(buf) + } + doneChan <- true + }() + + for { + select { + case <-ticker.C: + s.withBufferLock(func() { + s.swapnqueue() + }) + case errChan := <-s.shutdown: + s.withBufferLock(func() { + s.swapnqueue() + }) + close(s.bufs) + <-doneChan + errChan <- s.sender.Close() + return + } + } +} + +// send to remove endpoint and truncate buffer +func (s *BufferedSender) flush(b *bytes.Buffer) (int, error) { + bb := b.Bytes() + bbl := len(bb) + if bb[bbl-1] == '\n' { + bb = bb[:bbl-1] + } + //n, err := s.sender.Send(bytes.TrimSuffix(b.Bytes(), []byte("\n"))) + n, err := s.sender.Send(bb) + b.Truncate(0) // clear the buffer + return n, err +} + +// NewBufferedSender returns a new BufferedSender +// +// addr is a string of the format "hostname:port", and must be parsable by +// net.ResolveUDPAddr. +// +// flushInterval is a time.Duration, and specifies the maximum interval for +// packet sending. Note that if you send lots of metrics, you will send more +// often. This is just a maximal threshold. +// +// flushBytes specifies the maximum udp packet size you wish to send. If adding +// a metric would result in a larger packet than flushBytes, the packet will +// first be send, then the new data will be added to the next packet. +func NewBufferedSender(addr string, flushInterval time.Duration, flushBytes int) (Sender, error) { + simpleSender, err := NewSimpleSender(addr) + if err != nil { + return nil, err + } + return NewBufferedSenderWithSender(simpleSender, flushInterval, flushBytes) +} + +// NewBufferedSenderWithSender returns a new BufferedSender, wrapping the +// provided sender. +// +// sender is an instance of a statsd.Sender interface. Sender is required. +// +// flushInterval is a time.Duration, and specifies the maximum interval for +// packet sending. Note that if you send lots of metrics, you will send more +// often. This is just a maximal threshold. +// +// flushBytes specifies the maximum udp packet size you wish to send. If adding +// a metric would result in a larger packet than flushBytes, the packet will +// first be send, then the new data will be added to the next packet. +func NewBufferedSenderWithSender(sender Sender, flushInterval time.Duration, flushBytes int) (Sender, error) { + if sender == nil { + return nil, fmt.Errorf("sender may not be nil") + } + + bufSender := &BufferedSender{ + flushBytes: flushBytes, + flushInterval: flushInterval, + sender: sender, + buffer: senderPool.Get(), + shutdown: make(chan chan error), + } + + bufSender.Start() + return bufSender, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_resolving.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_resolving.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_resolving.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/sender_resolving.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,170 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "errors" + "fmt" + "net" + "sync" + "time" +) + +// ResolvingSimpleSender provides a socket send interface that re-resolves and +// reconnects. +type ResolvingSimpleSender struct { + // underlying connection + conn net.PacketConn + // resolved udp address + addrResolved *net.UDPAddr + // unresolved addr + addrUnresolved string + // interval time + reresolveInterval time.Duration + // lifecycle + mx sync.RWMutex + doneChan chan struct{} + running bool +} + +// Send sends the data to the server endpoint. +func (s *ResolvingSimpleSender) Send(data []byte) (int, error) { + s.mx.RLock() + if !s.running { + s.mx.RUnlock() + return 0, fmt.Errorf("ResolvingSimpleSender is not running") + } + + // no need for locking here, as the underlying fdNet + // already serialized writes + n, err := s.conn.(*net.UDPConn).WriteToUDP(data, s.addrResolved) + + // unlock manually, and early (vs doing a defer) to avoid some overhead + s.mx.RUnlock() + + if err != nil { + return 0, err + } + if n == 0 { + return n, errors.New("Wrote no bytes") + } + return n, nil +} + +// Close closes the ResolvingSender and cleans up +func (s *ResolvingSimpleSender) Close() error { + // lock to guard against ra reconnection modification + s.mx.Lock() + defer s.mx.Unlock() + + if !s.running { + return nil + } + + s.running = false + close(s.doneChan) + + err := s.conn.Close() + return err +} + +func (s *ResolvingSimpleSender) Reconnect() { + // lock to guard against s.running mutation + s.mx.RLock() + + if !s.running { + s.mx.RUnlock() + return + } + + // get old addr for comparison, then release lock (asap) + oldAddr := s.addrResolved.String() + + // done with rlock for now + s.mx.RUnlock() + + // ro doesn't change, so no need to lock + addrResolved, err := net.ResolveUDPAddr("udp", s.addrUnresolved) + + if err != nil { + // no good new address.. so continue with old address + return + } + + if oldAddr == addrResolved.String() { + // got same address.. so continue with old address + return + } + + // acquire write lock to both guard against s.running having been mutated in the + // meantime, as well as for safely setting s.ra + s.mx.Lock() + + // check running again, just to be sure nothing was terminated in the meantime... + if s.running { + s.addrResolved = addrResolved + } + s.mx.Unlock() +} + +// Start Resolving Simple Sender +// Begins ticker and read loop +func (s *ResolvingSimpleSender) Start() { + // write lock to start running + s.mx.Lock() + defer s.mx.Unlock() + + if s.running { + return + } + + s.running = true + go s.run() +} + +func (s *ResolvingSimpleSender) run() { + ticker := time.NewTicker(s.reresolveInterval) + defer ticker.Stop() + + for { + select { + case <-s.doneChan: + return + case <-ticker.C: + // reconnect locks/checks running, so no need to do it here + s.Reconnect() + } + } +} + +// NewResolvingSimpleSender returns a new ResolvingSimpleSender for +// sending to the supplied addresss. +// +// addr is a string of the format "hostname:port", and must be parsable by +// net.ResolveUDPAddr. +func NewResolvingSimpleSender(addr string, interval time.Duration) (Sender, error) { + conn, err := net.ListenPacket("udp", ":0") + if err != nil { + return nil, err + } + + addrResolved, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + conn.Close() + return nil, err + } + + sender := &ResolvingSimpleSender{ + conn: conn, + addrResolved: addrResolved, + addrUnresolved: addr, + reresolveInterval: interval, + doneChan: make(chan struct{}), + running: false, + } + + sender.Start() + return sender, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/tags.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/tags.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/tags.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/tags.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,54 @@ +package statsd + +type Tag [2]string +type TagFormat uint8 + +func (tf TagFormat) WriteInfix(data []byte, tags []Tag) []byte { + switch { + case tf&InfixComma != 0: + for _, v := range tags { + data = append(data, ',') + data = append(data, v[0]...) + data = append(data, '=') + data = append(data, v[1]...) + } + return data + case tf&InfixSemicolon != 0: + for _, v := range tags { + data = append(data, ';') + data = append(data, v[0]...) + data = append(data, '=') + data = append(data, v[1]...) + } + } + + return data +} + +func (tf TagFormat) WriteSuffix(data []byte, tags []Tag) []byte { + switch { + // make the zero value useful + case tf == 0, tf&SuffixOctothorpe != 0: + data = append(data, "|#"...) + tlen := len(tags) + for i, v := range tags { + data = append(data, v[0]...) + data = append(data, ':') + data = append(data, v[1]...) + if tlen > 1 && i < tlen-1 { + data = append(data, ',') + } + } + } + + return data +} + +const ( + SuffixOctothorpe TagFormat = 1 << iota + InfixSemicolon + InfixComma + + AllInfix = InfixSemicolon | InfixComma + AllSuffix = SuffixOctothorpe +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/validator.go temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/validator.go --- temporal-1.21.5-1/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/validator.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/cactus/go-statsd-client/v5/statsd/validator.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,37 @@ +// Copyright (c) 2012-2016 Eli Janssen +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package statsd + +import ( + "fmt" + "net" + "regexp" +) + +// The ValidatorFunc type defines a function that can serve +// as a stat name validation function. +type ValidatorFunc func(string) error + +var safeName = regexp.MustCompile(`^[a-zA-Z0-9\-_.]+$`) + +// CheckName may be used to validate whether a stat name contains invalid +// characters. If invalid characters are found, the function will return an +// error. +func CheckName(stat string) error { + if !safeName.MatchString(stat) { + return fmt.Errorf("invalid stat name: %s", stat) + } + return nil +} + +func mustBeIP(hostport string) bool { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return false + } + + ip := net.ParseIP(host) + return ip != nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/fatih/color/color_windows.go temporal-1.22.5/src/vendor/github.com/fatih/color/color_windows.go --- temporal-1.21.5-1/src/vendor/github.com/fatih/color/color_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/fatih/color/color_windows.go 2024-02-23 09:46:09.000000000 +0000 @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/AUTHORS temporal-1.22.5/src/vendor/github.com/gocql/gocql/AUTHORS --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/AUTHORS 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/AUTHORS 2024-02-23 09:46:10.000000000 +0000 @@ -137,3 +137,5 @@ Wojciech Przytuła João Reis Lauro Ramos Venancio +Dmitry Kropachev +Oliver Boyle diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/CHANGELOG.md temporal-1.22.5/src/vendor/github.com/gocql/gocql/CHANGELOG.md --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/CHANGELOG.md 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/CHANGELOG.md 2024-02-23 09:46:10.000000000 +0000 @@ -12,6 +12,21 @@ ### Fixed +## [1.5.0] - 2023-06-12 + +### Added + +- gocql now advertises the driver name and version in the STARTUP message to the server. + The values are taken from the Go module's path and version + (or from the replacement module, if used). (#1702) + That allows the server to track which fork of the driver is being used. +- Query.Values() to retrieve the values bound to the Query. + This makes writing wrappers around Query easier. (#1700) + +### Fixed +- Potential panic on deserialization (#1695) +- Unmarshalling of dates outside of `[1677-09-22, 2262-04-11]` range. (#1692) + ## [1.4.0] - 2023-04-26 ### Added diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/conn.go temporal-1.22.5/src/vendor/github.com/gocql/gocql/conn.go --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/conn.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/conn.go 2024-02-23 09:46:10.000000000 +0000 @@ -422,7 +422,9 @@ func (s *startupCoordinator) startup(ctx context.Context, supported map[string][]string) error { m := map[string]string{ - "CQL_VERSION": s.conn.cfg.CQLVersion, + "CQL_VERSION": s.conn.cfg.CQLVersion, + "DRIVER_NAME": driverName, + "DRIVER_VERSION": driverVersion, } if s.conn.compressor != nil { diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/doc.go temporal-1.22.5/src/vendor/github.com/gocql/gocql/doc.go --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/doc.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/doc.go 2024-02-23 09:46:10.000000000 +0000 @@ -30,6 +30,9 @@ // protocol version explicitly, as it's not defined which version will be used in certain situations (for example // during upgrade of the cluster when some of the nodes support different set of protocol versions than other nodes). // +// The driver advertises the module name and version in the STARTUP message, so servers are able to detect the version. +// If you use replace directive in go.mod, the driver will send information about the replacement module instead. +// // When ready, create a session from the configuration. Don't forget to Close the session once you are done with it: // // session, err := cluster.CreateSession() diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/marshal.go temporal-1.22.5/src/vendor/github.com/gocql/gocql/marshal.go --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/marshal.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/marshal.go 2024-02-23 09:46:10.000000000 +0000 @@ -1169,6 +1169,9 @@ case Unmarshaler: return v.UnmarshalCQL(info, data) case *inf.Dec: + if len(data) < 4 { + return unmarshalErrorf("inf.Dec needs at least 4 bytes, while value has only %d", len(data)) + } scale := decInt(data[0:4]) unscaled := decBigInt2C(data[4:], nil) *v = *inf.NewDecBig(unscaled, inf.Scale(scale)) @@ -1326,6 +1329,8 @@ return unmarshalErrorf("can not unmarshal %s into %T", info, value) } +const millisecondsInADay int64 = 24 * 60 * 60 * 1000 + func marshalDate(info TypeInfo, value interface{}) ([]byte, error) { var timestamp int64 switch v := value.(type) { @@ -1335,21 +1340,21 @@ return nil, nil case int64: timestamp = v - x := timestamp/86400000 + int64(1<<31) + x := timestamp/millisecondsInADay + int64(1<<31) return encInt(int32(x)), nil case time.Time: if v.IsZero() { return []byte{}, nil } timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) + x := timestamp/millisecondsInADay + int64(1<<31) return encInt(int32(x)), nil case *time.Time: if v.IsZero() { return []byte{}, nil } timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) + x := timestamp/millisecondsInADay + int64(1<<31) return encInt(int32(x)), nil case string: if v == "" { @@ -1360,7 +1365,7 @@ return nil, marshalErrorf("can not marshal %T into %s, date layout must be '2006-01-02'", value, info) } timestamp = int64(t.UTC().Unix()*1e3) + int64(t.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) + x := timestamp/millisecondsInADay + int64(1<<31) return encInt(int32(x)), nil } @@ -1381,8 +1386,8 @@ } var origin uint32 = 1 << 31 var current uint32 = binary.BigEndian.Uint32(data) - timestamp := (int64(current) - int64(origin)) * 86400000 - *v = time.Unix(0, timestamp*int64(time.Millisecond)).In(time.UTC) + timestamp := (int64(current) - int64(origin)) * millisecondsInADay + *v = time.UnixMilli(timestamp).In(time.UTC) return nil case *string: if len(data) == 0 { @@ -1391,8 +1396,8 @@ } var origin uint32 = 1 << 31 var current uint32 = binary.BigEndian.Uint32(data) - timestamp := (int64(current) - int64(origin)) * 86400000 - *v = time.Unix(0, timestamp*int64(time.Millisecond)).In(time.UTC).Format("2006-01-02") + timestamp := (int64(current) - int64(origin)) * millisecondsInADay + *v = time.UnixMilli(timestamp).In(time.UTC).Format("2006-01-02") return nil } return unmarshalErrorf("can not unmarshal %s into %T", info, value) @@ -1443,7 +1448,10 @@ } return nil } - months, days, nanos := decVints(data) + months, days, nanos, err := decVints(data) + if err != nil { + return unmarshalErrorf("failed to unmarshal %s into %T: %s", info, value, err.Error()) + } *v = Duration{ Months: months, Days: days, @@ -1454,25 +1462,40 @@ return unmarshalErrorf("can not unmarshal %s into %T", info, value) } -func decVints(data []byte) (int32, int32, int64) { - month, i := decVint(data) - days, j := decVint(data[i:]) - nanos, _ := decVint(data[i+j:]) - return int32(month), int32(days), nanos +func decVints(data []byte) (int32, int32, int64, error) { + month, i, err := decVint(data, 0) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract month: %s", err.Error()) + } + days, i, err := decVint(data, i) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract days: %s", err.Error()) + } + nanos, _, err := decVint(data, i) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract nanoseconds: %s", err.Error()) + } + return int32(month), int32(days), nanos, err } -func decVint(data []byte) (int64, int) { - firstByte := data[0] +func decVint(data []byte, start int) (int64, int, error) { + if len(data) <= start { + return 0, 0, errors.New("unexpected eof") + } + firstByte := data[start] if firstByte&0x80 == 0 { - return decIntZigZag(uint64(firstByte)), 1 + return decIntZigZag(uint64(firstByte)), start + 1, nil } numBytes := bits.LeadingZeros32(uint32(^firstByte)) - 24 ret := uint64(firstByte & (0xff >> uint(numBytes))) - for i := 0; i < numBytes; i++ { + if len(data) < start+numBytes+1 { + return 0, 0, fmt.Errorf("data expect to have %d bytes, but it has only %d", start+numBytes+1, len(data)) + } + for i := start; i < start+numBytes; i++ { ret <<= 8 ret |= uint64(data[i+1] & 0xff) } - return decIntZigZag(ret), numBytes + 1 + return decIntZigZag(ret), start + numBytes + 1, nil } func decIntZigZag(n uint64) int64 { @@ -1648,13 +1671,12 @@ return err } data = data[p:] - if len(data) < m { - return unmarshalErrorf("unmarshal list: unexpected eof") - } - // In case m < 0, the value is null, and unmarshalData should be nil. var unmarshalData []byte if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal list: unexpected eof") + } unmarshalData = data[:m] data = data[m:] } @@ -1764,14 +1786,13 @@ return err } data = data[p:] - if len(data) < m { - return unmarshalErrorf("unmarshal map: unexpected eof") - } key := reflect.New(t.Key()) - // In case m < 0, the key is null, and unmarshalData should be nil. var unmarshalData []byte if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal map: unexpected eof") + } unmarshalData = data[:m] data = data[m:] } @@ -1784,14 +1805,14 @@ return err } data = data[p:] - if len(data) < m { - return unmarshalErrorf("unmarshal map: unexpected eof") - } val := reflect.New(t.Elem()) // In case m < 0, the value is null, and unmarshalData should be nil. unmarshalData = nil if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal map: unexpected eof") + } unmarshalData = data[:m] data = data[m:] } @@ -2281,14 +2302,16 @@ case UDTUnmarshaler: udt := info.(UDTTypeInfo) - for _, e := range udt.Elements { + for id, e := range udt.Elements { if len(data) == 0 { return nil } + if len(data) < 4 { + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) + } var p []byte p, data = readBytes(data) - if err := v.UnmarshalUDT(e.Name, e.Type, p); err != nil { return err } @@ -2315,10 +2338,13 @@ rv.Set(reflect.MakeMap(t)) m := *v - for _, e := range udt.Elements { + for id, e := range udt.Elements { if len(data) == 0 { return nil } + if len(data) < 4 { + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) + } valType, err := goType(e.Type) if err != nil { @@ -2368,10 +2394,13 @@ } udt := info.(UDTTypeInfo) - for _, e := range udt.Elements { + for id, e := range udt.Elements { + if len(data) == 0 { + return nil + } if len(data) < 4 { // UDT def does not match the column value - return nil + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) } var p []byte diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/session.go temporal-1.22.5/src/vendor/github.com/gocql/gocql/session.go --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/session.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/session.go 2024-02-23 09:46:10.000000000 +0000 @@ -935,6 +935,12 @@ return q.stmt } +// Values returns the values passed in via Bind. +// This can be used by a wrapper type that needs to access the bound values. +func (q Query) Values() []interface{} { + return q.values +} + // String implements the stringer interface. func (q Query) String() string { return fmt.Sprintf("[query statement=%q values=%+v consistency=%s]", q.stmt, q.values, q.cons) diff -Nru temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/version.go temporal-1.22.5/src/vendor/github.com/gocql/gocql/version.go --- temporal-1.21.5-1/src/vendor/github.com/gocql/gocql/version.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/gocql/gocql/version.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,28 @@ +package gocql + +import "runtime/debug" + +const ( + mainModule = "github.com/gocql/gocql" +) + +var driverName string + +var driverVersion string + +func init() { + buildInfo, ok := debug.ReadBuildInfo() + if ok { + for _, d := range buildInfo.Deps { + if d.Path == mainModule { + driverName = mainModule + driverVersion = d.Version + if d.Replace != nil { + driverName = d.Replace.Path + driverVersion = d.Replace.Version + } + break + } + } + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/descriptor/descriptor.go temporal-1.22.5/src/vendor/github.com/golang/protobuf/descriptor/descriptor.go --- temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/descriptor/descriptor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/golang/protobuf/descriptor/descriptor.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,180 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descriptor provides functions for obtaining the protocol buffer +// descriptors of generated Go types. +// +// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package +// for how to obtain an EnumDescriptor or MessageDescriptor in order to +// programatically interact with the protobuf type system. +package descriptor + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Message is proto.Message with a method to return its descriptor. +// +// Deprecated: The Descriptor method may not be generated by future +// versions of protoc-gen-go, meaning that this interface may not +// be implemented by many concrete message types. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns the file descriptor proto containing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +// +// Deprecated: Not all concrete message types satisfy the Message interface. +// Use MessageDescriptorProto instead. If possible, the calling code should +// be rewritten to use protobuf reflection instead. +// See package "google.golang.org/protobuf/reflect/protoreflect" for details. +func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + return MessageDescriptorProto(m) +} + +type rawDesc struct { + fileDesc []byte + indexes []int +} + +var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc + +func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { + // Fast-path: check whether raw descriptors are already cached. + origDesc := d + if v, ok := rawDescCache.Load(origDesc); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + + // Slow-path: derive the raw descriptor from the v2 descriptor. + + // Start with the leaf (a given enum or message declaration) and + // ascend upwards until we hit the parent file descriptor. + var idxs []int + for { + idxs = append(idxs, d.Index()) + d = d.Parent() + if d == nil { + // TODO: We could construct a FileDescriptor stub for standalone + // descriptors to satisfy the API. + return nil, nil + } + if _, ok := d.(protoreflect.FileDescriptor); ok { + break + } + } + + // Obtain the raw file descriptor. + fd := d.(protoreflect.FileDescriptor) + b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) + file := protoimpl.X.CompressGZIP(b) + + // Reverse the indexes, since we populated it in reverse. + for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { + idxs[i], idxs[j] = idxs[j], idxs[i] + } + + if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + return file, idxs +} + +// EnumRawDescriptor returns the GZIP'd raw file descriptor representing +// the enum and the index path to reach the enum declaration. +// The returned slices must not be mutated. +func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { + if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { + return ev.EnumDescriptor() + } + ed := protoimpl.X.EnumTypeOf(e) + return deriveRawDescriptor(ed.Descriptor()) +} + +// MessageRawDescriptor returns the GZIP'd raw file descriptor representing +// the message and the index path to reach the message declaration. +// The returned slices must not be mutated. +func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { + if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { + return mv.Descriptor() + } + md := protoimpl.X.MessageTypeOf(m) + return deriveRawDescriptor(md.Descriptor()) +} + +var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto + +func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { + // Fast-path: check whether descriptor protos are already cached. + if v, ok := fileDescCache.Load(&rawDesc[0]); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + + // Slow-path: derive the descriptor proto from the GZIP'd message. + zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) + if err != nil { + panic(err) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + fd := new(descriptorpb.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + panic(err) + } + if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + return fd +} + +// EnumDescriptorProto returns the file descriptor proto representing +// the enum and the enum descriptor proto for the enum itself. +// The returned proto messages must not be mutated. +func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { + rawDesc, idxs := EnumRawDescriptor(e) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + if len(idxs) == 1 { + return fd, fd.EnumType[idxs[0]] + } + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1 : len(idxs)-1] { + md = md.NestedType[i] + } + ed := md.EnumType[idxs[len(idxs)-1]] + return fd, ed +} + +// MessageDescriptorProto returns the file descriptor proto representing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + rawDesc, idxs := MessageRawDescriptor(m) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1:] { + md = md.NestedType[i] + } + return fd, md +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go temporal-1.22.5/src/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go --- temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto + +package descriptor + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/descriptor.proto. + +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type + +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 + +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value + +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label + +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED + +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value + +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode + +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME + +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value + +type FieldOptions_CType = descriptorpb.FieldOptions_CType + +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE + +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value + +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType + +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER + +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value + +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel + +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT + +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value + +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go temporal-1.22.5/src/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go --- temporal-1.21.5-1/src/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +package wrappers + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/wrappers.proto. + +type DoubleValue = wrapperspb.DoubleValue +type FloatValue = wrapperspb.FloatValue +type Int64Value = wrapperspb.Int64Value +type UInt64Value = wrapperspb.UInt64Value +type Int32Value = wrapperspb.Int32Value +type UInt32Value = wrapperspb.UInt32Value +type BoolValue = wrapperspb.BoolValue +type StringValue = wrapperspb.StringValue +type BytesValue = wrapperspb.BytesValue + +var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() } +func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() { + if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/golang-jwt/jwt/v4/token.go temporal-1.22.5/src/vendor/github.com/golang-jwt/jwt/v4/token.go --- temporal-1.21.5-1/src/vendor/github.com/golang-jwt/jwt/v4/token.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/golang-jwt/jwt/v4/token.go 2024-02-23 09:46:10.000000000 +0000 @@ -14,6 +14,12 @@ // To use the non-recommended decoding, set this boolean to `true` prior to using this package. var DecodePaddingAllowed bool +// DecodeStrict will switch the codec used for decoding JWTs into strict mode. +// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5. +// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use strict decoding, set this boolean to `true` prior to using this package. +var DecodeStrict bool + // TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). // You can override it to use another time value. This is useful for testing or if your // server uses a different time zone than your tokens. @@ -121,12 +127,17 @@ // Deprecated: In a future release, we will demote this function to a non-exported function, since it // should only be used internally func DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + if DecodePaddingAllowed { if l := len(seg) % 4; l > 0 { seg += strings.Repeat("=", 4-l) } - return base64.URLEncoding.DecodeString(seg) + encoding = base64.URLEncoding } - return base64.RawURLEncoding.DecodeString(seg) + if DecodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) } diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/.gitignore temporal-1.22.5/src/vendor/github.com/google/s2a-go/.gitignore --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/.gitignore 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,6 @@ +# Ignore binaries without extension +//example/client/client +//example/server/server +//internal/v2/fakes2av2_server/fakes2av2_server + +.idea/ \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md temporal-1.22.5/src/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,93 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the +Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/CONTRIBUTING.md temporal-1.22.5/src/vendor/github.com/google/s2a-go/CONTRIBUTING.md --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/CONTRIBUTING.md 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,29 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement (CLA). You (or your employer) retain the copyright to your +contribution; this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/LICENSE.md temporal-1.22.5/src/vendor/github.com/google/s2a-go/LICENSE.md --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/LICENSE.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/LICENSE.md 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/README.md temporal-1.22.5/src/vendor/github.com/google/s2a-go/README.md --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/README.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/README.md 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,17 @@ +# Secure Session Agent Client Libraries + +The Secure Session Agent is a service that enables a workload to offload select +operations from the mTLS handshake and protects a workload's private key +material from exfiltration. Specifically, the workload asks the Secure Session +Agent for the TLS configuration to use during the handshake, to perform private +key operations, and to validate the peer certificate chain. The Secure Session +Agent's client libraries enable applications to communicate with the Secure +Session Agent during the TLS handshake, and to encrypt traffic to the peer +after the TLS handshake is complete. + +This repository contains the source code for the Secure Session Agent's Go +client libraries, which allow gRPC-Go applications to use the Secure Session +Agent. This repository supports the Bazel and Golang build systems. + +All code in this repository is experimental and subject to change. We do not +guarantee API stability at this time. diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,167 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fallback provides default implementations of fallback options when S2A fails. +package fallback + +import ( + "context" + "crypto/tls" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +const ( + alpnProtoStrH2 = "h2" + alpnProtoStrHTTP = "http/1.1" + defaultHTTPSPort = "443" +) + +// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. +// It supports GRPC use case, thus the alpn is set to 'h2'. +var FallbackTLSConfigGRPC = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2}, +} + +// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. +// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. +var FallbackTLSConfigHTTP = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, +} + +// ClientHandshake establishes a TLS connection and returns it, plus its auth info. +// Inputs: +// +// targetServer: the server attempted with S2A. +// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. +// If fallback is successful, the `conn` should be closed. +// err: the error encountered when performing the client-side TLS handshake with S2A. +type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) + +// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, +// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. +// Example use: +// +// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, +// FallbackOpts: &s2a.FallbackOptions{ // optional +// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), +// }, +// }) +// +// The fallback server's certificate must be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { + var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} + return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) +} + +func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, err + } + return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { + fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) + if fbErr != nil { + grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) + return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) + } + + tc, success := fbConn.(*tls.Conn) + if !success { + grpclog.Infof("the connection with fallback server is expected to be tls but isn't") + return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) + } + + tlsInfo := credentials.TLSInfo{ + State: tc.ConnectionState(), + CommonAuthInfo: credentials.CommonAuthInfo{ + SecurityLevel: credentials.PrivacyAndIntegrity, + }, + } + if grpclog.V(1) { + grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) + grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) + grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) + } + conn.Close() + return fbConn, tlsInfo, nil + }, nil +} + +// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. +// Example use: +// +// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// FallbackOpts: &s2a.FallbackOptions{ +// FallbackDialer: &s2a.FallbackDialer{ +// Dialer: fallbackDialer, +// ServerAddr: fallbackServerAddr, +// }, +// }, +// }) +// +// The fallback server's certificate should be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, "", err + } + return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil +} + +func processFallbackAddr(fallbackAddr string) (string, error) { + var fallbackServerAddr string + var err error + + if fallbackAddr == "" { + return "", fmt.Errorf("empty fallback address") + } + _, _, err = net.SplitHostPort(fallbackAddr) + if err != nil { + // fallbackAddr does not have port suffix + fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) + } else { + // FallbackServerAddr already has port suffix + fallbackServerAddr = fallbackAddr + } + return fallbackServerAddr, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,119 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provides authentication and authorization information that +// results from the TLS handshake. +package authinfo + +import ( + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" + grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "google.golang.org/grpc/credentials" +) + +var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) + +const s2aAuthType = "s2a" + +// S2AAuthInfo exposes authentication and authorization information from the +// S2A session result to the gRPC stack. +type S2AAuthInfo struct { + s2aContext *contextpb.S2AContext + commonAuthInfo credentials.CommonAuthInfo +} + +// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. +func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { + return newS2AAuthInfo(result) +} + +func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { + if result == nil { + return nil, errors.New("NewS2aAuthInfo given nil session result") + } + return &S2AAuthInfo{ + s2aContext: &contextpb.S2AContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + TlsVersion: result.GetState().GetTlsVersion(), + Ciphersuite: result.GetState().GetTlsCiphersuite(), + PeerIdentity: result.GetPeerIdentity(), + LocalIdentity: result.GetLocalIdentity(), + PeerCertFingerprint: result.GetPeerCertFingerprint(), + LocalCertFingerprint: result.GetLocalCertFingerprint(), + IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), + }, + commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + }, nil +} + +// AuthType returns the authentication type. +func (s *S2AAuthInfo) AuthType() string { + return s2aAuthType +} + +// ApplicationProtocol returns the application protocol, e.g. "grpc". +func (s *S2AAuthInfo) ApplicationProtocol() string { + return s.s2aContext.GetApplicationProtocol() +} + +// TLSVersion returns the TLS version negotiated during the handshake. +func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { + return s.s2aContext.GetTlsVersion() +} + +// Ciphersuite returns the ciphersuite negotiated during the handshake. +func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { + return s.s2aContext.GetCiphersuite() +} + +// PeerIdentity returns the authenticated identity of the peer. +func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { + return s.s2aContext.GetPeerIdentity() +} + +// LocalIdentity returns the local identity of the application used during +// session setup. +func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { + return s.s2aContext.GetLocalIdentity() +} + +// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in +// the S2A handshake. +func (s *S2AAuthInfo) PeerCertFingerprint() []byte { + return s.s2aContext.GetPeerCertFingerprint() +} + +// LocalCertFingerprint returns the SHA256 hash of the local certificate used +// in the S2A handshake. +func (s *S2AAuthInfo) LocalCertFingerprint() []byte { + return s.s2aContext.GetLocalCertFingerprint() +} + +// IsHandshakeResumed returns true if a cached session was used to resume +// the handshake. +func (s *S2AAuthInfo) IsHandshakeResumed() bool { + return s.s2aContext.GetIsHandshakeResumed() +} + +// SecurityLevel returns the security level of the connection. +func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { + return s.commonAuthInfo.SecurityLevel +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,438 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker communicates with the S2A handshaker service. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + "github.com/google/s2a-go/internal/authinfo" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/record" + "github.com/google/s2a-go/internal/tokenmanager" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // appProtocol contains the application protocol accepted by the handshaker. + appProtocol = "grpc" + // frameLimit is the maximum size of a frame in bytes. + frameLimit = 1024 * 64 + // peerNotRespondingError is the error thrown when the peer doesn't respond. + errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") +) + +// Handshaker defines a handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a TLS handshake from the client side, + // and returns a secure connection along with additional auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a TLS handshake from the server side, + // and returns a secure connection along with additional auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the handshake + // is complete. + Close() error +} + +// ClientHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the client-side. +type ClientHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the client. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the client. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // client. + TLSCiphersuites []commonpb.Ciphersuite + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []*commonpb.Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity. + LocalIdentity *commonpb.Identity + // TargetName is the allowed server name, which may be used for server + // authorization check by the S2A if it is provided. + TargetName string + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// ServerHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the server-side. +type ServerHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the server. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the server. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // server. + TLSCiphersuites []commonpb.Ciphersuite + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity. + LocalIdentities []*commonpb.Identity +} + +// s2aHandshaker performs a TLS handshake using the S2A handshaker service. +type s2aHandshaker struct { + // stream is used to communicate with the S2A handshaker service. + stream s2apb.S2AService_SetUpSessionClient + // conn is the connection to the peer. + conn net.Conn + // clientOpts should be non-nil iff the handshaker is client-side. + clientOpts *ClientHandshakerOptions + // serverOpts should be non-nil iff the handshaker is server-side. + serverOpts *ServerHandshakerOptions + // isClient determines if the handshaker is client or server side. + isClient bool + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // localIdentities is the set of local identities for whom the + // tokenManager should fetch a token when preparing a request to be + // sent to S2A. + localIdentities []*commonpb.Identity +} + +// NewClientHandshaker creates an s2aHandshaker instance that performs a +// client-side TLS handshake using the S2A handshaker service. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = []*commonpb.Identity{opts.LocalIdentity} + } + return &s2aHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + isClient: true, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// NewServerHandshaker creates an s2aHandshaker instance that performs a +// server-side TLS handshake using the S2A handshaker service. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = opts.LocalIdentities + } + return &s2aHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + isClient: false, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// ClientHandshake performs a client-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if !h.isClient { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") + } + // Extract the hostname from the target name. The target name is assumed to be an authority. + hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) + if err != nil { + // If the target name had no host port or could not be parsed, use it as is. + hostname = h.clientOpts.TargetName + } + + // Prepare a client start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ClientStart{ + ClientStart: &s2apb.ClientSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.clientOpts.MinTLSVersion, + MaxTlsVersion: h.clientOpts.MaxTLSVersion, + TlsCiphersuites: h.clientOpts.TLSCiphersuites, + TargetIdentities: h.clientOpts.TargetIdentities, + LocalIdentity: h.clientOpts.LocalIdentity, + TargetName: hostname, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// ServerHandshake performs a server-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if h.isClient { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") + } + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + // Prepare a server start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ServerStart{ + ServerStart: &s2apb.ServerSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.serverOpts.MinTLSVersion, + MaxTlsVersion: h.serverOpts.MaxTLSVersion, + TlsCiphersuites: h.serverOpts.TLSCiphersuites, + LocalIdentities: h.serverOpts.LocalIdentities, + InBytes: p[:n], + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// setUpSession proxies messages between the peer and the S2A handshaker +// service. +func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check if the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + // Calculate the extra unread bytes from the Session. Attempting to consume + // more than the bytes sent will throw an error. + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + if result.GetLocalIdentity() == nil { + return nil, nil, errors.New("local identity must be populated in session result") + } + + // Create a new TLS record protocol using the Session Result. + newConn, err := record.NewConn(&record.ConnParameters{ + NetConn: h.conn, + Ciphersuite: result.GetState().GetTlsCiphersuite(), + TLSVersion: result.GetState().GetTlsVersion(), + InTrafficSecret: result.GetState().GetInKey(), + OutTrafficSecret: result.GetState().GetOutKey(), + UnusedBuf: extra, + InSequence: result.GetState().GetInSequence(), + OutSequence: result.GetState().GetOutSequence(), + HSAddr: h.hsAddr, + ConnectionID: result.GetState().GetConnectionId(), + LocalIdentity: result.GetLocalIdentity(), + EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), + }) + if err != nil { + return nil, nil, err + } + return newConn, result, nil +} + +func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { + if h.clientOpts == nil { + return nil + } + return h.clientOpts.EnsureProcessSessionTickets +} + +// accessHandshakerService sends the session request to the S2A handshaker +// service and returns the session response. +func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone continues proxying messages between the peer and the S2A +// handshaker service until the handshaker service returns the SessionResult at +// the end of the handshake or an error occurs. +func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, unusedBytes, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service and nothing is + // received from the peer, then we are stuck. This covers the case when + // the peer is not responding. Note that handshaker service connection + // issues are caught in accessHandshakerService before we even get + // here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, errPeerNotResponding + } + // Append extra bytes from the previous interaction with the handshaker + // service with the current buffer read from conn. + p := append(unusedBytes, buf[:n]...) + // From here on, p and unusedBytes point to the same slice. + resp, err = h.accessHandshakerService(&s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_Next{ + Next: &s2apb.SessionNextReq{ + InBytes: p, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + }) + if err != nil { + return nil, nil, err + } + + // Cache the local identity returned by S2A, if it is populated. This + // overwrites any existing local identities. This is done because, once the + // S2A has selected a local identity, then only that local identity should + // be asserted in future requests until the end of the current handshake. + if resp.GetLocalIdentity() != nil { + h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} + } + + // Set unusedBytes based on the handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + unusedBytes = p[resp.GetBytesConsumed():] + } +} + +// Close shuts down the handshaker and the stream to the S2A handshaker service +// when the handshake is complete. It should be called when the caller obtains +// the secure connection at the end of the handshake. +func (h *s2aHandshaker) Close() error { + return h.stream.CloseSend() +} + +func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if h.tokenManager == nil { + return nil + } + // First handle the special case when no local identities have been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if len(h.localIdentities) == 0 { + token, err := h.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has provided + // one or more local identities. + var authMechanisms []*s2apb.AuthenticationMechanism + for _, localIdentity := range h.localIdentities { + token, err := h.tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) + continue + } + + authMechanism := &s2apb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + } + authMechanisms = append(authMechanisms, authMechanism) + } + return authMechanisms +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,99 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service is a utility for calling the S2A handshaker service. +package service + +import ( + "context" + "net" + "os" + "strings" + "sync" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. +const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" + +var ( + // appEngineDialerHook is an AppEngine-specific dial option that is set + // during init time. If nil, then the application is not running on Google + // AppEngine. + appEngineDialerHook func(context.Context) grpc.DialOption + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConnMap represents a mapping from an S2A handshaker service address + // to a corresponding connection to an S2A handshaker service instance. + hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +func init() { + if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { + return + } + appEngineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} + +// Dial dials the S2A handshaker service. If a connection has already been +// established, this function returns it. Otherwise, a new connection is +// created. +func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[handshakerServiceAddress] + if !ok { + // Create a new connection to the S2A handshaker service. Note that + // this connection stays open until the application is closed. + grpcOpts := []grpc.DialOption{ + grpc.WithInsecure(), + } + if enableAppEngineDialer() && appEngineDialerHook != nil { + if grpclog.V(1) { + grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") + } + grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + } + var err error + hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + if err != nil { + return nil, err + } + hsConnMap[handshakerServiceAddress] = hsConn + } + return hsConn, nil +} + +func enableAppEngineDialer() bool { + if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { + return true + } + return false +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,389 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The ciphersuites supported by S2A. The name determines the confidentiality, +// and authentication ciphers as well as the hash algorithm used for PRF in +// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: +// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. +// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. +type Ciphersuite int32 + +const ( + Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 + Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 + Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "AES_128_GCM_SHA256", + 1: "AES_256_GCM_SHA384", + 2: "CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "AES_128_GCM_SHA256": 0, + "AES_256_GCM_SHA384": 1, + "CHACHA20_POLY1305_SHA256": 2, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS1_2 TLSVersion = 0 + TLSVersion_TLS1_3 TLSVersion = 1 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS1_2", + 1: "TLS1_3", + } + TLSVersion_value = map[string]int32{ + "TLS1_2": 0, + "TLS1_3": 1, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_MdbUsername + // *Identity_GaiaId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetMdbUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { + return x.MdbUsername + } + return "" +} + +func (x *Identity) GetGaiaId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { + return x.GaiaId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_MdbUsername struct { + // The MDB username of a connection endpoint. + MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +} + +type Identity_GaiaId struct { + // The Gaia ID of a connection endpoint. + GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} + +func (*Identity_GaiaId) isIdentity_IdentityOneof() {} + +var File_internal_proto_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_common_common_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, + 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, + 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, + 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, + 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, + 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_common_common_proto_rawDescOnce sync.Once + file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc +) + +func file_internal_proto_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) + }) + return file_internal_proto_common_common_proto_rawDescData +} + +var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.TLSVersion + (*Identity)(nil), // 2: s2a.proto.Identity + nil, // 3: s2a.proto.Identity.AttributesEntry +} +var file_internal_proto_common_common_proto_depIdxs = []int32{ + 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_common_common_proto_init() } +func file_internal_proto_common_common_proto_init() { + if File_internal_proto_common_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_MdbUsername)(nil), + (*Identity_GaiaId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_common_common_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_common_common_proto_msgTypes, + }.Build() + File_internal_proto_common_common_proto = out.File + file_internal_proto_common_common_proto_rawDesc = nil + file_internal_proto_common_common_proto_goTypes = nil + file_internal_proto_common_common_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,267 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection, e.g., 'grpc'. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // Set to true if a cached session was reused to resume the handshake. + IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +func (x *S2AContext) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *S2AContext) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, + 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.S2AContext + (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite + (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity +} +var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion + 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite + 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity + 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } +func file_internal_proto_s2a_context_s2a_context_proto_init() { + if File_internal_proto_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_s2a_context_s2a_context_proto = out.File + file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,1377 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (Optional) Application may specify an identity associated to an + // authentication mechanism. Otherwise, S2A assumes that the authentication + // mechanism is associated with the default identity. If the default identity + // cannot be determined, session setup fails. + Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to the S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type ClientSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the client, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the client is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, session setup fails. + TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, S2A chooses + // the default local identity. If the default identity cannot be determined, + // session setup fails. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The target name that is used by S2A to configure SNI in the TLS handshake. + // It is also used to perform server authorization check if avaiable. This + // check is intended to verify that the peer authenticated identity is + // authorized to run a service with the target name. + // This field MUST only contain the host portion of the server address. It + // MUST not contain the scheme or the port number. For example, if the server + // address is dns://www.example.com:443, the value of this field should be + // set to www.example.com. + TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` +} + +func (x *ClientSessionStartReq) Reset() { + *x = ClientSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSessionStartReq) ProtoMessage() {} + +func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. +func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +type ServerSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the server is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, S2A chooses the default local identity. If the + // default identity cannot be determined, session setup fails. + LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // The byte representation of the first handshake message received from the + // client peer. It is possible that this first message is split into multiple + // chunks. In this case, the first chunk is sent using this field and the + // following chunks are sent using the in_bytes field of SessionNextReq + // Specifically, if the client peer is using S2A, this field contains the + // bytes in the out_frames field of SessionResp message that the client peer + // received from its S2A after initiating the handshake. + InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *ServerSessionStartReq) Reset() { + *x = ServerSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerSessionStartReq) ProtoMessage() {} + +func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. +func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +func (x *ServerSessionStartReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type SessionNextReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of session setup, i.e., handshake messages. + // Specifically: + // - All handshake messages sent from the server to the client. + // - All, except for the first, handshake messages sent from the client to + // the server. Note that the first message is communicated to S2A using the + // in_bytes field of ServerSessionStartReq. + // + // If the peer is using S2A, this field contains the bytes in the out_frames + // field of SessionResp message that the peer received from its S2A. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *SessionNextReq) Reset() { + *x = SessionNextReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionNextReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionNextReq) ProtoMessage() {} + +func (x *SessionNextReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. +func (*SessionNextReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *SessionNextReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type ResumptionTicketReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of a NewSessionTicket message received from the + // server. + InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // A connection identifier that was created and sent by S2A at the end of a + // handshake. + ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // The local identity that was used by S2A during session setup and included + // in |SessionResult|. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` +} + +func (x *ResumptionTicketReq) Reset() { + *x = ResumptionTicketReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumptionTicketReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumptionTicketReq) ProtoMessage() {} + +func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. +func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (x *ResumptionTicketReq) GetInBytes() [][]byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *ResumptionTicketReq) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *SessionReq_ClientStart + // *SessionReq_ServerStart + // *SessionReq_Next + // *SessionReq_ResumptionTicket + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` + // (Optional) The authentication mechanisms that the client wishes to use to + // authenticate to the S2A, ordered by preference. The S2A will always use the + // first authentication mechanism that appears in the list and is supported by + // the S2A. + AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetClientStart() *ClientSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *SessionReq) GetServerStart() *ServerSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *SessionReq) GetNext() *SessionNextReq { + if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { + return x.Next + } + return nil +} + +func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { + if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { + return x.ResumptionTicket + } + return nil +} + +func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthMechanisms + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_ClientStart struct { + // The client session setup request message. + ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type SessionReq_ServerStart struct { + // The server session setup request message. + ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type SessionReq_Next struct { + // The next session setup message request message. + Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +type SessionReq_ResumptionTicket struct { + // The resumption ticket that is received from the server. This message is + // only accepted by S2A if it is running as a client and if it is received + // after session setup is complete. If S2A is running as a server and it + // receives this message, the session is terminated. + ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` +} + +func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_Next) isSessionReq_ReqOneof() {} + +func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} + +type SessionState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` + // The sequence number of the next, incoming, TLS record. + InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` + // The sequence number of the next, outgoing, TLS record. + OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` + // The key for the inbound direction. + InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` + // The key for the outbound direction. + OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` + // The constant part of the record nonce for the outbound direction. + InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` + // The constant part of the record nonce for the inbound direction. + OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` + // A connection identifier that can be provided to S2A to perform operations + // related to this connection. This identifier will be stored by the record + // protocol, and included in the |ResumptionTicketReq| message that is later + // sent back to S2A. This field is set only for client-side connections. + ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // Set to true if a cached session was reused to do an abbreviated handshake. + IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *SessionState) Reset() { + *x = SessionState{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionState) ProtoMessage() {} + +func (x *SessionState) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. +func (*SessionState) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *SessionState) GetInSequence() uint64 { + if x != nil { + return x.InSequence + } + return 0 +} + +func (x *SessionState) GetOutSequence() uint64 { + if x != nil { + return x.OutSequence + } + return 0 +} + +func (x *SessionState) GetInKey() []byte { + if x != nil { + return x.InKey + } + return nil +} + +func (x *SessionState) GetOutKey() []byte { + if x != nil { + return x.OutKey + } + return nil +} + +func (x *SessionState) GetInFixedNonce() []byte { + if x != nil { + return x.InFixedNonce + } + return nil +} + +func (x *SessionState) GetOutFixedNonce() []byte { + if x != nil { + return x.OutFixedNonce + } + return nil +} + +func (x *SessionState) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *SessionState) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +type SessionResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this session. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The session state at the end. This state contains all cryptographic + // material required to initialize the record protocol object. + State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` +} + +func (x *SessionResult) Reset() { + *x = SessionResult{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResult) ProtoMessage() {} + +func (x *SessionResult) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. +func (*SessionResult) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *SessionResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *SessionResult) GetState() *SessionState { + if x != nil { + return x.State + } + return nil +} + +func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResult) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *SessionResult) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +type SessionStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *SessionStatus) Reset() { + *x = SessionStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionStatus) ProtoMessage() {} + +func (x *SessionStatus) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. +func (*SessionStatus) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *SessionStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *SessionStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + // + // If the SessionResult is populated, then this must coincide with the local + // identity specified in the SessionResult; otherwise, the handshake must + // fail. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The byte representation of the frames that should be sent to the peer. May + // be empty if nothing needs to be sent to the peer or if in_bytes in the + // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent + // to the peer even if the session setup status is not OK as these frames may + // contain appropriate alerts. + OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes field that are consumed by S2A. It is + // possible that part of in_bytes is unrelated to the session setup process. + BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set if the session is successfully set up. out_frames may + // still be set to frames that needs to be forwarded to the peer. + Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + // Status of session setup at the current stage. + Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *SessionResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *SessionResp) GetResult() *SessionResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *SessionResp) GetStatus() *SessionStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, + 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, + 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, + 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, + 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, + 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, + 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, + 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, + 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, + 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, + 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, + 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, + 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, + 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, + 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, + 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ + (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism + (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq + (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq + (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq + (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq + (*SessionReq)(nil), // 5: s2a.proto.SessionReq + (*SessionState)(nil), // 6: s2a.proto.SessionState + (*SessionResult)(nil), // 7: s2a.proto.SessionResult + (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus + (*SessionResp)(nil), // 9: s2a.proto.SessionResp + (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity + (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite +} +var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ + 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity + 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity + 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity + 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity + 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq + 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq + 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq + 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq + 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism + 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion + 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite + 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState + 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity + 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity + 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity + 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult + 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus + 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq + 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp + 25, // [25:26] is the sub-list for method output_type + 24, // [24:25] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_s2a_proto_init() } +func file_internal_proto_s2a_s2a_proto_init() { + if File_internal_proto_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionNextReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumptionTicketReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*SessionReq_ClientStart)(nil), + (*SessionReq_ServerStart)(nil), + (*SessionReq_Next)(nil), + (*SessionReq_ResumptionTicket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_s2a_s2a_proto = out.File + file_internal_proto_s2a_s2a_proto_rawDesc = nil + file_internal_proto_s2a_s2a_proto_goTypes = nil + file_internal_proto_s2a_s2a_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,173 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/s2a/s2a.proto", +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,367 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using +// S2A. +type Ciphersuite int32 + +const ( + Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "CIPHERSUITE_UNSPECIFIED", + 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "CIPHERSUITE_UNSPECIFIED": 0, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, + "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, + "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 + TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 + TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 + TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 + TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS_VERSION_UNSPECIFIED", + 1: "TLS_VERSION_1_0", + 2: "TLS_VERSION_1_1", + 3: "TLS_VERSION_1_2", + 4: "TLS_VERSION_1_3", + } + TLSVersion_value = map[string]int32{ + "TLS_VERSION_UNSPECIFIED": 0, + "TLS_VERSION_1_0": 1, + "TLS_VERSION_1_1": 2, + "TLS_VERSION_1_2": 3, + "TLS_VERSION_1_3": 4, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} +} + +// The side in the TLS connection. +type ConnectionSide int32 + +const ( + ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 + ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 + ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 +) + +// Enum value maps for ConnectionSide. +var ( + ConnectionSide_name = map[int32]string{ + 0: "CONNECTION_SIDE_UNSPECIFIED", + 1: "CONNECTION_SIDE_CLIENT", + 2: "CONNECTION_SIDE_SERVER", + } + ConnectionSide_value = map[string]int32{ + "CONNECTION_SIDE_UNSPECIFIED": 0, + "CONNECTION_SIDE_CLIENT": 1, + "CONNECTION_SIDE_SERVER": 2, + } +) + +func (x ConnectionSide) Enum() *ConnectionSide { + p := new(ConnectionSide) + *p = x + return p +} + +func (x ConnectionSide) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() +} + +func (ConnectionSide) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[2] +} + +func (x ConnectionSide) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionSide.Descriptor instead. +func (ConnectionSide) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} +} + +// The ALPN protocols that the application can negotiate during a TLS handshake. +type AlpnProtocol int32 + +const ( + AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 + AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 + AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 + AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 +) + +// Enum value maps for AlpnProtocol. +var ( + AlpnProtocol_name = map[int32]string{ + 0: "ALPN_PROTOCOL_UNSPECIFIED", + 1: "ALPN_PROTOCOL_GRPC", + 2: "ALPN_PROTOCOL_HTTP2", + 3: "ALPN_PROTOCOL_HTTP1_1", + } + AlpnProtocol_value = map[string]int32{ + "ALPN_PROTOCOL_UNSPECIFIED": 0, + "ALPN_PROTOCOL_GRPC": 1, + "ALPN_PROTOCOL_HTTP2": 2, + "ALPN_PROTOCOL_HTTP1_1": 3, + } +) + +func (x AlpnProtocol) Enum() *AlpnProtocol { + p := new(AlpnProtocol) + *p = x + return p +} + +func (x AlpnProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() +} + +func (AlpnProtocol) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[3] +} + +func (x AlpnProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlpnProtocol.Descriptor instead. +func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} +} + +var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, + 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, + 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, + 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, + 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, + 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, + 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, + 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, + 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once + file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc +) + +func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) + }) + return file_internal_proto_v2_common_common_proto_rawDescData +} + +var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion + (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide + (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol +} +var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_common_common_proto_init() } +func file_internal_proto_v2_common_common_proto_init() { + if File_internal_proto_v2_common_common_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + }.Build() + File_internal_proto_v2_common_common_proto = out.File + file_internal_proto_v2_common_common_proto_rawDesc = nil + file_internal_proto_v2_common_common_proto_goTypes = nil + file_internal_proto_v2_common_common_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,248 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The SPIFFE ID from the peer leaf certificate, if present. + // + // This field is only populated if the leaf certificate is a valid SPIFFE + // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid + // SPIFFE ID. + LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` + // The URIs that are present in the SubjectAltName extension of the peer leaf + // certificate. + // + // Note that the extracted URIs are not validated and may not be properly + // formatted. + LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` + // The DNSNames that are present in the SubjectAltName extension of the peer + // leaf certificate. + LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` + // The (ordered) list of fingerprints in the certificate chain used to verify + // the given leaf certificate. The order MUST be from leaf certificate + // fingerprint to root certificate fingerprint. + // + // A fingerprint is the base-64 encoding of the SHA256 hash of the + // DER-encoding of a certificate. The list MAY be populated even if the peer + // certificate chain was NOT validated successfully. + PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` + // The local identity used during session setup. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the DER-encoding of the local leaf certificate used in + // the handshake. + LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetLeafCertSpiffeId() string { + if x != nil { + return x.LeafCertSpiffeId + } + return "" +} + +func (x *S2AContext) GetLeafCertUris() []string { + if x != nil { + return x.LeafCertUris + } + return nil +} + +func (x *S2AContext) GetLeafCertDnsnames() []string { + if x != nil { + return x.LeafCertDnsnames + } + return nil +} + +func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { + if x != nil { + return x.PeerCertificateChainFingerprints + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { + if x != nil { + return x.LocalLeafCertFingerprint + } + return nil +} + +var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, + 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, + 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, + 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext + (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity +} +var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } +func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { + if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_context_s2a_context_proto = out.File + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,2494 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignatureAlgorithm int32 + +const ( + SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 + // RSA Public-Key Cryptography Standards #1. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 + // ECDSA. + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 + // RSA Probabilistic Signature Scheme. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 + // ED25519. + SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 +) + +// Enum value maps for SignatureAlgorithm. +var ( + SignatureAlgorithm_name = map[int32]string{ + 0: "S2A_SSL_SIGN_UNSPECIFIED", + 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", + 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", + 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", + 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", + 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", + 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", + 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", + 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", + 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", + 10: "S2A_SSL_SIGN_ED25519", + } + SignatureAlgorithm_value = map[string]int32{ + "S2A_SSL_SIGN_UNSPECIFIED": 0, + "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, + "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, + "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, + "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, + "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, + "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, + "S2A_SSL_SIGN_ED25519": 10, + } +) + +func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { + p := new(SignatureAlgorithm) + *p = x + return p +} + +func (x SignatureAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() +} + +func (SignatureAlgorithm) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] +} + +func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureAlgorithm.Descriptor instead. +func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 + +const ( + GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 + GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 +) + +// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. +var ( + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "DONT_REQUEST_CLIENT_CERTIFICATE", + 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", + 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", + } + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ + "UNSPECIFIED": 0, + "DONT_REQUEST_CLIENT_CERTIFICATE": 1, + "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, + "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, + } +) + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) + *p = x + return p +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} +} + +type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 + +const ( + OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 + // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of + // the TLS handshake must be signed to prove possession of the private key. + // + // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. + OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 + // When performing a TLS 1.2 handshake using an RSA algorithm, the key + // exchange algorithm involves the client generating a premaster secret, + // encrypting it using the server's public key, and sending this encrypted + // blob to the server in a ClientKeyExchange message. + // + // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. + OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 +) + +// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. +var ( + OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SIGN", + 2: "DECRYPT", + } + OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "SIGN": 1, + "DECRYPT": 2, + } +) + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { + p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) + *p = x + return p +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} +} + +type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 + +const ( + OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 + OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 + OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 +) + +// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. +var ( + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "ENCRYPT", + 2: "DECRYPT", + } + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "ENCRYPT": 1, + "DECRYPT": 2, + } +) + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) + *p = x + return p +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} +} + +type ValidatePeerCertificateChainReq_VerificationMode int32 + +const ( + // The default verification mode supported by S2A. + ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 + // The SPIFFE verification mode selects the set of trusted certificates to + // use for path building based on the SPIFFE trust domain in the peer's leaf + // certificate. + ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 + // The connect-to-Google verification mode uses the trust bundle for + // connecting to Google, e.g. *.mtls.googleapis.com endpoints. + ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 +) + +// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. +var ( + ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SPIFFE", + 2: "CONNECT_TO_GOOGLE", + } + ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + } +) + +func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { + p := new(ValidatePeerCertificateChainReq_VerificationMode) + *p = x + return p +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. +func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +type ValidatePeerCertificateChainResp_ValidationResult int32 + +const ( + ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 + ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 + ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 +) + +// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. +var ( + ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUCCESS", + 2: "FAILURE", + } + ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUCCESS": 1, + "FAILURE": 2, + } +) + +func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { + p := new(ValidatePeerCertificateChainResp_ValidationResult) + *p = x + return p +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. +func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} +} + +type AlpnPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the application MUST perform ALPN negotiation. + EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` + // The ordered list of ALPN protocols that specify how the application SHOULD + // negotiate ALPN during the TLS handshake. + // + // The application MAY ignore any ALPN protocols in this list that are not + // supported by the application. + AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` +} + +func (x *AlpnPolicy) Reset() { + *x = AlpnPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlpnPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlpnPolicy) ProtoMessage() {} + +func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. +func (*AlpnPolicy) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { + if x != nil { + return x.EnableAlpnNegotiation + } + return false +} + +func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Applications may specify an identity associated to an authentication + // mechanism. Otherwise, S2A assumes that the authentication mechanism is + // associated with the default identity. If the default identity cannot be + // determined, the request is rejected. + Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *Status) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type GetTlsConfigurationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The role of the application in the TLS connection. + ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` + // The server name indication (SNI) extension, which MAY be populated when a + // server is offloading to S2A. The SNI is used to determine the server + // identity if the local identity in the request is empty. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` +} + +func (x *GetTlsConfigurationReq) Reset() { + *x = GetTlsConfigurationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationReq) ProtoMessage() {} + +func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { + if x != nil { + return x.ConnectionSide + } + return common_go_proto.ConnectionSide(0) +} + +func (x *GetTlsConfigurationReq) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +type GetTlsConfigurationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TlsConfiguration: + // + // *GetTlsConfigurationResp_ClientTlsConfiguration_ + // *GetTlsConfigurationResp_ServerTlsConfiguration_ + TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` +} + +func (x *GetTlsConfigurationResp) Reset() { + *x = GetTlsConfigurationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp) ProtoMessage() {} + +func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { + if m != nil { + return m.TlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { + return x.ClientTlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { + return x.ServerTlsConfiguration + } + return nil +} + +type isGetTlsConfigurationResp_TlsConfiguration interface { + isGetTlsConfigurationResp_TlsConfiguration() +} + +type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { + ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { + ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +type OffloadPrivateKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the private key is used for. + Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` + // The signature algorithm to be used for signing operations. + SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` + // The input bytes to be signed or decrypted. + // + // Types that are assignable to InBytes: + // + // *OffloadPrivateKeyOperationReq_RawBytes + // *OffloadPrivateKeyOperationReq_Sha256Digest + // *OffloadPrivateKeyOperationReq_Sha384Digest + // *OffloadPrivateKeyOperationReq_Sha512Digest + InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` +} + +func (x *OffloadPrivateKeyOperationReq) Reset() { + *x = OffloadPrivateKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { + if x != nil { + return x.Operation + } + return OffloadPrivateKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { + if x != nil { + return x.SignatureAlgorithm + } + return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED +} + +func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { + if m != nil { + return m.InBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { + return x.RawBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { + return x.Sha256Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { + return x.Sha384Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { + return x.Sha512Digest + } + return nil +} + +type isOffloadPrivateKeyOperationReq_InBytes interface { + isOffloadPrivateKeyOperationReq_InBytes() +} + +type OffloadPrivateKeyOperationReq_RawBytes struct { + // Raw bytes to be hashed and signed, or decrypted. + RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha256Digest struct { + // A SHA256 hash to be signed. Must be 32 bytes. + Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha384Digest struct { + // A SHA384 hash to be signed. Must be 48 bytes. + Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha512Digest struct { + // A SHA512 hash to be signed. Must be 64 bytes. + Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` +} + +func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +type OffloadPrivateKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The signed or decrypted output bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadPrivateKeyOperationResp) Reset() { + *x = OffloadPrivateKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type OffloadResumptionKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the resumption key is used for. + Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` + // The bytes to be encrypted or decrypted. + InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationReq) Reset() { + *x = OffloadResumptionKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + if x != nil { + return x.Operation + } + return OffloadResumptionKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type OffloadResumptionKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encrypted or decrypted bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationResp) Reset() { + *x = OffloadResumptionKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type ValidatePeerCertificateChainReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification mode that S2A MUST use to validate the peer certificate + // chain. + Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` + // Types that are assignable to PeerOneof: + // + // *ValidatePeerCertificateChainReq_ClientPeer_ + // *ValidatePeerCertificateChainReq_ServerPeer_ + PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` +} + +func (x *ValidatePeerCertificateChainReq) Reset() { + *x = ValidatePeerCertificateChainReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { + if x != nil { + return x.Mode + } + return ValidatePeerCertificateChainReq_UNSPECIFIED +} + +func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { + if m != nil { + return m.PeerOneof + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { + return x.ClientPeer + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { + return x.ServerPeer + } + return nil +} + +type isValidatePeerCertificateChainReq_PeerOneof interface { + isValidatePeerCertificateChainReq_PeerOneof() +} + +type ValidatePeerCertificateChainReq_ClientPeer_ struct { + ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` +} + +type ValidatePeerCertificateChainReq_ServerPeer_ struct { + ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` +} + +func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +type ValidatePeerCertificateChainResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The result of validating the peer certificate chain. + ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` + // The validation details. This field is only populated when the validation + // result is NOT SUCCESS. + ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` + // The S2A context contains information from the peer certificate chain. + // + // The S2A context MAY be populated even if validation of the peer certificate + // chain fails. + Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ValidatePeerCertificateChainResp) Reset() { + *x = ValidatePeerCertificateChainResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainResp) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { + if x != nil { + return x.ValidationResult + } + return ValidatePeerCertificateChainResp_UNSPECIFIED +} + +func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { + if x != nil { + return x.ValidationDetails + } + return "" +} + +func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { + if x != nil { + return x.Context + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identity corresponding to the TLS configurations that MUST be used for + // the TLS handshake. + // + // If a managed identity already exists, the local identity and authentication + // mechanisms are ignored. If a managed identity doesn't exist and the local + // identity is not populated, S2A will try to deduce the managed identity to + // use from the SNI extension. If that also fails, S2A uses the default + // identity (if one exists). + LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The authentication mechanisms that the application wishes to use to + // authenticate to S2A, ordered by preference. S2A will always use the first + // authentication mechanism that matches the managed identity. + AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` + // Types that are assignable to ReqOneof: + // + // *SessionReq_GetTlsConfigurationReq + // *SessionReq_OffloadPrivateKeyOperationReq + // *SessionReq_OffloadResumptionKeyOperationReq + // *SessionReq_ValidatePeerCertificateChainReq + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} +} + +func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthenticationMechanisms + } + return nil +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { + if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { + return x.GetTlsConfigurationReq + } + return nil +} + +func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { + return x.OffloadPrivateKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { + return x.OffloadResumptionKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { + if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { + return x.ValidatePeerCertificateChainReq + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_GetTlsConfigurationReq struct { + // Requests the certificate chain and TLS configuration corresponding to the + // local identity, which the application MUST use to negotiate the TLS + // handshake. + GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` +} + +type SessionReq_OffloadPrivateKeyOperationReq struct { + // Signs or decrypts the input bytes using a private key corresponding to + // the local identity in the request. + // + // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the + // S2Av2 by a server during a TLS 1.2 handshake. + OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` +} + +type SessionReq_OffloadResumptionKeyOperationReq struct { + // Encrypts or decrypts the input bytes using a resumption key corresponding + // to the local identity in the request. + OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` +} + +type SessionReq_ValidatePeerCertificateChainReq struct { + // Verifies the peer's certificate chain using + // (a) trust bundles corresponding to the local identity in the request, and + // (b) the verification mode in the request. + ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` +} + +func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Status of the session response. + // + // The status field is populated so that if an error occurs when making an + // individual request, then communication with the S2A may continue. If an + // error is returned directly (e.g. at the gRPC layer), then it may result + // that the bidirectional stream being closed. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Types that are assignable to RespOneof: + // + // *SessionResp_GetTlsConfigurationResp + // *SessionResp_OffloadPrivateKeyOperationResp + // *SessionResp_OffloadResumptionKeyOperationResp + // *SessionResp_ValidatePeerCertificateChainResp + RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} +} + +func (x *SessionResp) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { + if m != nil { + return m.RespOneof + } + return nil +} + +func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { + if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { + return x.GetTlsConfigurationResp + } + return nil +} + +func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { + return x.OffloadPrivateKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { + return x.OffloadResumptionKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { + if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { + return x.ValidatePeerCertificateChainResp + } + return nil +} + +type isSessionResp_RespOneof interface { + isSessionResp_RespOneof() +} + +type SessionResp_GetTlsConfigurationResp struct { + // Contains the certificate chain and TLS configurations corresponding to + // the local identity. + GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` +} + +type SessionResp_OffloadPrivateKeyOperationResp struct { + // Contains the signed or encrypted output bytes using the private key + // corresponding to the local identity. + OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` +} + +type SessionResp_OffloadResumptionKeyOperationResp struct { + // Contains the encrypted or decrypted output bytes using the resumption key + // corresponding to the local identity. + OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` +} + +type SessionResp_ValidatePeerCertificateChainResp struct { + // Contains the validation result, peer identity and fingerprints of peer + // certificates. + ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` +} + +func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} + +// Next ID: 8 +type GetTlsConfigurationResp_ClientTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the client MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // minimum version of the client's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // maximum version of the client's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // The policy that dictates how the client negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ClientTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +// Next ID: 12 +type GetTlsConfigurationResp_ServerTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the server MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // minimum version of the server's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // maximum version of the server's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // Whether to enable TLS resumption. + TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` + // Whether the server MUST request a client certificate (i.e. to negotiate + // TLS vs. mTLS). + RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` + // Returns the maximum number of extra bytes that + // |OffloadResumptionKeyOperation| can add to the number of unencrypted + // bytes to form the encrypted bytes. + MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` + // The policy that dictates how the server negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ServerTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { + if x != nil { + return x.TlsResumptionEnabled + } + return false +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + if x != nil { + return x.RequestClientCertificate + } + return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { + if x != nil { + return x.MaxOverheadOfTicketAead + } + return 0 +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +type ValidatePeerCertificateChainReq_ClientPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ClientPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +type ValidatePeerCertificateChainReq_ServerPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The expected hostname of the server. + ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` + // The UnrestrictedClientPolicy specified by the user. + SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ServerPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { + if x != nil { + return x.ServerHostname + } + return "" +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { + if x != nil { + return x.SerializedUnrestrictedClientPolicy + } + return nil +} + +var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, + 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, + 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, + 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, + 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, + 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, + 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, + 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, + 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, + 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, + 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, + 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, + 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, + 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, + 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, + 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, + 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, + 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, + 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, + 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, + 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, + 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, + 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, + 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, + 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ + (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm + (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy + (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism + (*Status)(nil), // 8: s2a.proto.v2.Status + (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq + (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp + (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq + (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp + (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq + (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp + (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq + (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp + (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq + (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp + (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol + (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide + (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext + (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion + (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite +} +var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ + 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide + 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm + 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism + 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq + 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq + 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq + 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq + 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status + 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp + 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp + 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp + 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp + 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq + 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_s2a_proto_init() } +func file_internal_proto_v2_s2a_s2a_proto_init() { + if File_internal_proto_v2_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlpnPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), + (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OffloadPrivateKeyOperationReq_RawBytes)(nil), + (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), + (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SessionReq_GetTlsConfigurationReq)(nil), + (*SessionReq_OffloadPrivateKeyOperationReq)(nil), + (*SessionReq_OffloadResumptionKeyOperationReq)(nil), + (*SessionReq_ValidatePeerCertificateChainReq)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*SessionResp_GetTlsConfigurationResp)(nil), + (*SessionResp_OffloadPrivateKeyOperationResp)(nil), + (*SessionResp_OffloadResumptionKeyOperationResp)(nil), + (*SessionResp_ValidatePeerCertificateChainResp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, + NumEnums: 6, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, + EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, + MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_s2a_proto = out.File + file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil + file_internal_proto_v2_s2a_s2a_proto_goTypes = nil + file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,159 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.v2.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/v2/s2a/s2a.proto", +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package aeadcrypter provides the interface for AEAD cipher implementations +// used by S2A's record protocol. +package aeadcrypter + +// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record +// protocol. +type S2AAEADCrypter interface { + // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. + // dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) + // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may + // fully overlap or not at all. + Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) + // TagSize returns the tag size in bytes. + TagSize() int +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" +) + +// Supported key sizes in bytes. +const ( + AES128GCMKeySize = 16 + AES256GCMKeySize = 32 +) + +// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. +type aesgcm struct { + aead cipher.AEAD +} + +// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be +// either 128 bits or 256 bits. +func NewAESGCM(key []byte) (S2AAEADCrypter, error) { + if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { + return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aesgcm{aead: a}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *aesgcm) TagSize() int { + return TagSize +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// Supported key size in bytes. +const ( + Chacha20Poly1305KeySize = 32 +) + +// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD +// crypter. +type chachapoly struct { + aead cipher.AEAD +} + +// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must +// be Chacha20Poly1305KeySize bytes in length. +func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { + if len(key) != Chacha20Poly1305KeySize { + return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) + } + c, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + return &chachapoly{aead: c}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *chachapoly) TagSize() int { + return TagSize +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,92 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" +) + +const ( + // TagSize is the tag size in bytes for AES-128-GCM-SHA256, + // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + TagSize = 16 + // NonceSize is the size of the nonce in number of bytes for + // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + NonceSize = 12 + // SHA256DigestSize is the digest size of sha256 in bytes. + SHA256DigestSize = 32 + // SHA384DigestSize is the digest size of sha384 in bytes. + SHA384DigestSize = 48 +) + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// encrypt is the encryption function for an AEAD crypter. aead determines +// the type of AEAD crypter. dst can contain bytes at the beginning of the +// ciphertext that will not be encrypted but will be authenticated. If dst has +// enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If we need to allocate an output buffer, we want to include space for + // the tag to avoid forcing the caller to reallocate as well. + dlen := len(dst) + dst, out := sliceForAppend(dst, len(plaintext)+TagSize) + data := out[:len(plaintext)] + copy(data, plaintext) // data may fully overlap plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, sliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = aead.Seal(dst[:dlen], nonce, data, aad) + return dst, nil +} + +// decrypt is the decryption function for an AEAD crypter, where aead determines +// the type of AEAD crypter, and dst the destination bytes for the decrypted +// ciphertext. The dst buffer may fully overlap with plaintext or not at all. +func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := aead.Open(dst, nonce, ciphertext, aad) + if err != nil { + return nil, fmt.Errorf("message auth failed: %v", err) + } + return plaintext, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,98 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" +) + +// ciphersuite is the interface for retrieving ciphersuite-specific information +// and utilities. +type ciphersuite interface { + // keySize returns the key size in bytes. This refers to the key used by + // the AEAD crypter. This is derived by calling HKDF expand on the traffic + // secret. + keySize() int + // nonceSize returns the nonce size in bytes. + nonceSize() int + // trafficSecretSize returns the traffic secret size in bytes. This refers + // to the secret used to derive the traffic key and nonce, as specified in + // https://tools.ietf.org/html/rfc8446#section-7. + trafficSecretSize() int + // hashFunction returns the hash function for the ciphersuite. + hashFunction() func() hash.Hash + // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite + // using that key. + aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) +} + +func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { + switch ciphersuite { + case s2apb.Ciphersuite_AES_128_GCM_SHA256: + return &aesgcm128sha256{}, nil + case s2apb.Ciphersuite_AES_256_GCM_SHA384: + return &aesgcm256sha384{}, nil + case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: + return &chachapolysha256{}, nil + default: + return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) + } +} + +// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite +// interface. +type aesgcm128sha256 struct{} + +func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } +func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } +func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite +// interface. +type aesgcm256sha384 struct{} + +func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } +func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } +func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } +func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite +// interface. +type chachapolysha256 struct{} + +func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } +func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } +func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } +func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewChachaPoly(key) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import "errors" + +// counter is a 64-bit counter. +type counter struct { + val uint64 + hasOverflowed bool +} + +// newCounter creates a new counter with the initial value set to val. +func newCounter(val uint64) counter { + return counter{val: val} +} + +// value returns the current value of the counter. +func (c *counter) value() (uint64, error) { + if c.hasOverflowed { + return 0, errors.New("counter has overflowed") + } + return c.val, nil +} + +// increment increments the counter and checks for overflow. +func (c *counter) increment() { + // If the counter is already invalid due to overflow, there is no need to + // increase it. We check for the hasOverflowed flag in the call to value(). + if c.hasOverflowed { + return + } + c.val++ + if c.val == 0 { + c.hasOverflowed = true + } +} + +// reset sets the counter value to zero and sets the hasOverflowed flag to +// false. +func (c *counter) reset() { + c.val = 0 + c.hasOverflowed = false +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,59 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "fmt" + "hash" + + "golang.org/x/crypto/hkdf" +) + +// hkdfExpander is the interface for the HKDF expansion function; see +// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is +// specified in https://tools.ietf.org/html/rfc8446#section-7.2 +type hkdfExpander interface { + // expand takes a secret, a label, and the output length in bytes, and + // returns the resulting expanded key. + expand(secret, label []byte, length int) ([]byte, error) +} + +// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf +// for HKDF expansion. +type defaultHKDFExpander struct { + h func() hash.Hash +} + +// newDefaultHKDFExpander creates an instance of the default HKDF expander +// using the given hash function. +func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { + return &defaultHKDFExpander{h: h} +} + +func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { + outBuf := make([]byte, length) + n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) + if err != nil { + return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) + } + if n < length { + return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) + } + return outBuf, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,193 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 +// connection. +package halfconn + +import ( + "fmt" + "sync" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" + "golang.org/x/crypto/cryptobyte" +) + +// The constants below were taken from Section 7.2 and 7.3 in +// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label +// in HKDF-Expand-Label. +const ( + tls13Key = "tls13 key" + tls13Nonce = "tls13 iv" + tls13Update = "tls13 traffic upd" +) + +// S2AHalfConnection stores the state of the TLS 1.3 connection in the +// inbound or outbound direction. +type S2AHalfConnection struct { + cs ciphersuite + expander hkdfExpander + // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. + mutex sync.Mutex + aeadCrypter aeadcrypter.S2AAEADCrypter + sequence counter + trafficSecret []byte + nonce []byte +} + +// New creates a new instance of S2AHalfConnection given a ciphersuite and a +// traffic secret. +func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { + cs, err := newCiphersuite(ciphersuite) + if err != nil { + return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) + } + if cs.trafficSecretSize() != len(trafficSecret) { + return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) + } + + hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) + } + + return hc, nil +} + +// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. +// dst and plaintext may fully overlap or not at all. Note that the sequence +// number will still be incremented on failure, unless the sequence has +// overflowed. +func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Encrypt(dst, plaintext, nonce, aad) +} + +// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may +// fully overlap or not at all. Note that the sequence number will still be +// incremented on failure, unless the sequence has overflowed. +func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Decrypt(dst, ciphertext, nonce, aad) +} + +// UpdateKey advances the traffic secret key, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives +// a new key and nonce, and resets the sequence number. +func (hc *S2AHalfConnection) UpdateKey() error { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + var err error + hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) + if err != nil { + return fmt.Errorf("failed to derive traffic secret: %v", err) + } + + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return fmt.Errorf("failed to update half connection: %v", err) + } + + hc.sequence.reset() + return nil +} + +// TagSize returns the tag size in bytes of the underlying AEAD crypter. +func (hc *S2AHalfConnection) TagSize() int { + return hc.aeadCrypter.TagSize() +} + +// updateCrypterAndNonce takes a new traffic secret and updates the crypter +// and nonce. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { + key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) + if err != nil { + return fmt.Errorf("failed to update key: %v", err) + } + + hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) + if err != nil { + return fmt.Errorf("failed to update nonce: %v", err) + } + + hc.aeadCrypter, err = hc.cs.aeadCrypter(key) + if err != nil { + return fmt.Errorf("failed to update AEAD crypter: %v", err) + } + return nil +} + +// getAndIncrement returns the current sequence number and increments it. Note +// that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { + sequence, err := hc.sequence.value() + if err != nil { + return 0, err + } + hc.sequence.increment() + return sequence, nil +} + +// maskedNonce creates a copy of the nonce that is masked with the sequence +// number. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { + const uint64Size = 8 + nonce := make([]byte, len(hc.nonce)) + copy(nonce, hc.nonce) + for i := 0; i < uint64Size; i++ { + nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) + } + return nonce +} + +// deriveSecret implements the Derive-Secret function, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.1. +func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { + var hkdfLabel cryptobyte.Builder + hkdfLabel.AddUint16(uint16(length)) + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(label) + }) + // Append an empty `Context` field to the label, as specified in the RFC. + // The half connection does not use the `Context` field. + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte("")) + }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + return nil, fmt.Errorf("deriveSecret failed: %v", err) + } + return hc.expander.expand(secret, hkdfLabelBytes, length) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/record.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/record.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/record.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/record.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,757 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package record implements the TLS 1.3 record protocol used by the S2A +// transport credentials. +package record + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "sync" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/halfconn" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/grpclog" +) + +// recordType is the `ContentType` as described in +// https://tools.ietf.org/html/rfc8446#section-5.1. +type recordType byte + +const ( + alert recordType = 21 + handshake recordType = 22 + applicationData recordType = 23 +) + +// keyUpdateRequest is the `KeyUpdateRequest` as described in +// https://tools.ietf.org/html/rfc8446#section-4.6.3. +type keyUpdateRequest byte + +const ( + updateNotRequested keyUpdateRequest = 0 + updateRequested keyUpdateRequest = 1 +) + +// alertDescription is the `AlertDescription` as described in +// https://tools.ietf.org/html/rfc8446#section-6. +type alertDescription byte + +const ( + closeNotify alertDescription = 0 +) + +// sessionTicketState is used to determine whether session tickets have not yet +// been received, are in the process of being received, or have finished +// receiving. +type sessionTicketState byte + +const ( + ticketsNotYetReceived sessionTicketState = 0 + receivingTickets sessionTicketState = 1 + notReceivingTickets sessionTicketState = 2 +) + +const ( + // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, + // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from + // https://tools.ietf.org/html/rfc8446#section-5.1. + + // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext + // in a single TLS 1.3 record. + tlsRecordMaxPlaintextSize = 16384 // 2^14 + // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. + tlsRecordTypeSize = 1 + // tlsTagSize is the size in bytes of the tag of the following three + // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, + // CHACHA20-POLY1305-SHA256. + tlsTagSize = 16 + // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a + // single TLS 1.3 record. This is the maximum size of the plaintext plus the + // record type byte and 16 bytes of the tag. + tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize + // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record + // header type. + tlsRecordHeaderTypeSize = 1 + // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS + // 1.3 record header legacy record version. + tlsRecordHeaderLegacyRecordVersionSize = 2 + // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 + // record header payload length. + tlsRecordHeaderPayloadLengthSize = 2 + // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. + tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize + // tlsRecordMaxSize + tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize + // tlsApplicationData is the application data type of the TLS 1.3 record + // header. + tlsApplicationData = 23 + // tlsLegacyRecordVersion is the legacy record version of the TLS record. + tlsLegacyRecordVersion = 3 + // tlsAlertSize is the size in bytes of an alert of TLS 1.3. + tlsAlertSize = 2 +) + +const ( + // These are TLS 1.3 handshake-specific constants. + + // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session + // ticket message of TLS 1.3. + tlsHandshakeNewSessionTicketType = 4 + // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message + // of TLS 1.3. + tlsHandshakeKeyUpdateType = 24 + // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake + // message type field. + tlsHandshakeMsgTypeSize = 1 + // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake + // message length field. + tlsHandshakeLengthSize = 3 + // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 + // handshake key update message. + tlsHandshakeKeyUpdateMsgSize = 1 + // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 + // handshake message. + tlsHandshakePrefixSize = 4 + // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message + // in TLS 1.3. This is the sum of the max sizes of all the fields in the + // NewSessionTicket struct specified in + // https://tools.ietf.org/html/rfc8446#section-4.6.1. + tlsMaxSessionTicketSize = 131338 +) + +const ( + // outBufMaxRecords is the maximum number of records that can fit in the + // ourRecordsBuf buffer. + outBufMaxRecords = 16 + // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. + outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize + // maxAllowedTickets is the maximum number of session tickets that are + // allowed. The number of tickets are limited to ensure that the size of the + // ticket queue does not grow indefinitely. S2A also keeps a limit on the + // number of tickets that it caches. + maxAllowedTickets = 5 +) + +// preConstructedKeyUpdateMsg holds the key update message. This is needed as an +// optimization so that the same message does not need to be constructed every +// time a key update message is sent. +var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() + +// conn represents a secured TLS connection. It implements the net.Conn +// interface. +type conn struct { + net.Conn + // inConn is the half connection responsible for decrypting incoming bytes. + inConn *halfconn.S2AHalfConnection + // outConn is the half connection responsible for encrypting outgoing bytes. + outConn *halfconn.S2AHalfConnection + // pendingApplicationData holds data that has been read from the connection + // and decrypted, but has not yet been returned by Read. + pendingApplicationData []byte + // unusedBuf holds data read from the network that has not yet been + // decrypted. This data might not consist of a complete record. It may + // consist of several records, the last of which could be incomplete. + unusedBuf []byte + // outRecordsBuf is a buffer used to store outgoing TLS records before + // they are written to the network. + outRecordsBuf []byte + // nextRecord stores the next record info in the unusedBuf buffer. + nextRecord []byte + // overheadSize is the overhead size in bytes of each TLS 1.3 record, which + // is computed as overheadSize = header size + record type byte + tag size. + // Note that there is no padding by zeros in the overhead calculation. + overheadSize int + // readMutex guards against concurrent calls to Read. This is required since + // Close may be called during a Read. + readMutex sync.Mutex + // writeMutex guards against concurrent calls to Write. This is required + // since Close may be called during a Write, and also because a key update + // message may be written during a Read. + writeMutex sync.Mutex + // handshakeBuf holds handshake messages while they are being processed. + handshakeBuf []byte + // ticketState is the current processing state of the session tickets. + ticketState sessionTicketState + // sessionTickets holds the completed session tickets until they are sent to + // the handshaker service for processing. + sessionTickets [][]byte + // ticketSender sends session tickets to the S2A handshaker service. + ticketSender s2aTicketSender + // callComplete is a channel that blocks closing the record protocol until a + // pending call to the S2A completes. + callComplete chan bool +} + +// ConnParameters holds the parameters used for creating a new conn object. +type ConnParameters struct { + // NetConn is the TCP connection to the peer. This parameter is required. + NetConn net.Conn + // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker + // service. This parameter is required. + Ciphersuite commonpb.Ciphersuite + // TLSVersion is the TLS version number negotiated by the S2A handshaker + // service. This parameter is required. + TLSVersion commonpb.TLSVersion + // InTrafficSecret is the traffic secret used to derive the session key for + // the inbound direction. This parameter is required. + InTrafficSecret []byte + // OutTrafficSecret is the traffic secret used to derive the session key + // for the outbound direction. This parameter is required. + OutTrafficSecret []byte + // UnusedBuf is the data read from the network that has not yet been + // decrypted. This parameter is optional. If not provided, then no + // application data was sent in the same flight of messages as the final + // handshake message. + UnusedBuf []byte + // InSequence is the sequence number of the next, incoming, TLS record. + // This parameter is required. + InSequence uint64 + // OutSequence is the sequence number of the next, outgoing, TLS record. + // This parameter is required. + OutSequence uint64 + // HSAddr stores the address of the S2A handshaker service. This parameter + // is optional. If not provided, then TLS resumption is disabled. + HSAddr string + // ConnectionId is the connection identifier that was created and sent by + // S2A at the end of a handshake. + ConnectionID uint64 + // LocalIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + LocalIdentity *commonpb.Identity + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// NewConn creates a TLS record protocol that wraps the TCP connection. +func NewConn(o *ConnParameters) (net.Conn, error) { + if o == nil { + return nil, errors.New("conn options must not be nil") + } + if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { + return nil, errors.New("TLS version must be TLS 1.3") + } + + inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) + if err != nil { + return nil, fmt.Errorf("failed to create inbound half connection: %v", err) + } + outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) + if err != nil { + return nil, fmt.Errorf("failed to create outbound half connection: %v", err) + } + + // The tag size for the in/out connections should be the same. + overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() + var unusedBuf []byte + if o.UnusedBuf == nil { + // We pre-allocate unusedBuf to be of size + // 2*tlsRecordMaxSize-1 during initialization. We only read from the + // network into unusedBuf when unusedBuf does not contain a complete + // record and the incomplete record is at most tlsRecordMaxSize-1 + // (bytes). And we read at most tlsRecordMaxSize bytes of data from the + // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 + // is large enough to buffer data read from the network. + unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) + } else { + unusedBuf = make([]byte, len(o.UnusedBuf)) + copy(unusedBuf, o.UnusedBuf) + } + + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + + s2aConn := &conn{ + Conn: o.NetConn, + inConn: inConn, + outConn: outConn, + unusedBuf: unusedBuf, + outRecordsBuf: make([]byte, tlsRecordMaxSize), + nextRecord: unusedBuf, + overheadSize: overheadSize, + ticketState: ticketsNotYetReceived, + // Pre-allocate the buffer for one session ticket message and the max + // plaintext size. This is the largest size that handshakeBuf will need + // to hold. The largest incomplete handshake message is the + // [handshake header size] + [max session ticket size] - 1. + // Then, tlsRecordMaxPlaintextSize is the maximum size that will be + // appended to the handshakeBuf before the handshake message is + // completed. Therefore, the buffer size below should be large enough to + // buffer any handshake messages. + handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), + ticketSender: &ticketSender{ + hsAddr: o.HSAddr, + connectionID: o.ConnectionID, + localIdentity: o.LocalIdentity, + tokenManager: tokenManager, + ensureProcessSessionTickets: o.EnsureProcessSessionTickets, + }, + callComplete: make(chan bool), + } + return s2aConn, nil +} + +// Read reads and decrypts a TLS 1.3 record from the underlying connection, and +// copies any application data received from the peer into b. If the size of the +// payload is greater than len(b), Read retains the remaining bytes in an +// internal buffer, and subsequent calls to Read will read from this buffer +// until it is exhausted. At most 1 TLS record worth of application data is +// written to b for each call to Read. +// +// Note that for the user to efficiently call this method, the user should +// ensure that the buffer b is allocated such that the buffer does not have any +// unused segments. This can be done by calling Read via io.ReadFull, which +// continually calls Read until the specified buffer has been filled. Also note +// that the user should close the connection via Close() if an error is thrown +// by a call to Read. +func (p *conn) Read(b []byte) (n int, err error) { + p.readMutex.Lock() + defer p.readMutex.Unlock() + // Check if p.pendingApplication data has leftover application data from + // the previous call to Read. + if len(p.pendingApplicationData) == 0 { + // Read a full record from the wire. + record, err := p.readFullRecord() + if err != nil { + return 0, err + } + // Now we have a complete record, so split the header and validate it + // The TLS record is split into 2 pieces: the record header and the + // payload. The payload has the following form: + // [payload] = [ciphertext of application data] + // + [ciphertext of record type byte] + // + [(optionally) ciphertext of padding by zeros] + // + [tag] + header, payload, err := splitAndValidateHeader(record) + if err != nil { + return 0, err + } + // Decrypt the ciphertext. + p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) + if err != nil { + return 0, err + } + // Remove the padding by zeros and the record type byte from the + // p.pendingApplicationData buffer. + msgType, err := p.stripPaddingAndType() + if err != nil { + return 0, err + } + // Check that the length of the plaintext after stripping the padding + // and record type byte is under the maximum plaintext size. + if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { + return 0, errors.New("plaintext size larger than maximum") + } + // The expected message types are application data, alert, and + // handshake. For application data, the bytes are directly copied into + // b. For an alert, the type of the alert is checked and the connection + // is closed on a close notify alert. For a handshake message, the + // handshake message type is checked. The handshake message type can be + // a key update type, for which we advance the traffic secret, and a + // new session ticket type, for which we send the received ticket to S2A + // for processing. + switch msgType { + case applicationData: + if len(p.handshakeBuf) > 0 { + return 0, errors.New("application data received while processing fragmented handshake messages") + } + if p.ticketState == receivingTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + case alert: + return 0, p.handleAlertMessage() + case handshake: + if err = p.handleHandshakeMessage(); err != nil { + return 0, err + } + return 0, nil + default: + return 0, errors.New("unknown record type") + } + } + // Write as much application data as possible to b, the output buffer. + n = copy(b, p.pendingApplicationData) + p.pendingApplicationData = p.pendingApplicationData[n:] + return n, nil +} + +// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a +// TLS 1.3 record (of type "application data") from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) Write(b []byte) (n int, err error) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + return p.writeTLSRecord(b, tlsApplicationData) +} + +// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, +// builds a TLS 1.3 record (of type recordType) from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { + // Create a record of only header, record type, and tag if given empty + // byte array. + if len(b) == 0 { + recordEndIndex, _, err := p.buildRecord(b, recordType, 0) + if err != nil { + return 0, err + } + + // Write the bytes stored in outRecordsBuf to p.Conn. Since we return + // the number of plaintext bytes written without overhead, we will + // always return 0 while p.Conn.Write returns the entire record length. + _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + return 0, err + } + + numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) + totalRecordsSize := len(b) + numRecords*p.overheadSize + partialBSize := len(b) + if totalRecordsSize > outBufMaxSize { + totalRecordsSize = outBufMaxSize + partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize + } + if len(p.outRecordsBuf) < totalRecordsSize { + p.outRecordsBuf = make([]byte, totalRecordsSize) + } + for bStart := 0; bStart < len(b); bStart += partialBSize { + bEnd := bStart + partialBSize + if bEnd > len(b) { + bEnd = len(b) + } + partialB := b[bStart:bEnd] + recordEndIndex := 0 + for len(partialB) > 0 { + recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) + if err != nil { + // Return the amount of bytes written prior to the error. + return bStart, err + } + } + // Write the bytes stored in outRecordsBuf to p.Conn. If there is an + // error, calculate the total number of plaintext bytes of complete + // records successfully written to the peer and return it. + nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + if err != nil { + numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) + return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err + } + } + return len(b), nil +} + +// buildRecord builds a TLS 1.3 record of type recordType from plaintext, +// and writes the record to outRecordsBuf at recordStartIndex. The record will +// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the +// index of outRecordsBuf where the current record ends, as well as any +// remaining plaintext bytes. +func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { + // Construct the payload, which consists of application data and record type. + dataLen := len(plaintext) + if dataLen > tlsRecordMaxPlaintextSize { + dataLen = tlsRecordMaxPlaintextSize + } + remainingPlaintext = plaintext[dataLen:] + newRecordBuf := p.outRecordsBuf[recordStartIndex:] + + copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) + newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType + payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. + // Construct the header. + newRecordBuf[0] = tlsApplicationData + newRecordBuf[1] = tlsLegacyRecordVersion + newRecordBuf[2] = tlsLegacyRecordVersion + binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) + header := newRecordBuf[:tlsRecordHeaderSize] + + // Encrypt the payload using header as aad. + encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) + if err != nil { + return 0, plaintext, err + } + recordStartIndex += len(header) + len(encryptedPayload) + return recordStartIndex, remainingPlaintext, nil +} + +func (p *conn) Close() error { + p.readMutex.Lock() + defer p.readMutex.Unlock() + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + // If p.ticketState is equal to notReceivingTickets, then S2A has + // been sent a flight of session tickets, and we must wait for the + // call to S2A to complete before closing the record protocol. + if p.ticketState == notReceivingTickets { + <-p.callComplete + grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") + } + return p.Conn.Close() +} + +// stripPaddingAndType strips the padding by zeros and record type from +// p.pendingApplicationData and returns the record type. Note that +// p.pendingApplicationData should be of the form: +// [application data] + [record type byte] + [trailing zeros] +func (p *conn) stripPaddingAndType() (recordType, error) { + if len(p.pendingApplicationData) == 0 { + return 0, errors.New("application data had length 0") + } + i := len(p.pendingApplicationData) - 1 + // Search for the index of the record type byte. + for i > 0 { + if p.pendingApplicationData[i] != 0 { + break + } + i-- + } + rt := recordType(p.pendingApplicationData[i]) + p.pendingApplicationData = p.pendingApplicationData[:i] + return rt, nil +} + +// readFullRecord reads from the wire until a record is completed and returns +// the full record. +func (p *conn) readFullRecord() (fullRecord []byte, err error) { + fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + // Check whether the next record to be decrypted has been completely + // received. + if len(fullRecord) == 0 { + copy(p.unusedBuf, p.nextRecord) + p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] + // Always copy next incomplete record to the beginning of the + // unusedBuf buffer and reset nextRecord to it. + p.nextRecord = p.unusedBuf + } + // Keep reading from the wire until we have a complete record. + for len(fullRecord) == 0 { + if len(p.unusedBuf) == cap(p.unusedBuf) { + tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) + copy(tmp, p.unusedBuf) + p.unusedBuf = tmp + } + n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) + if err != nil { + return nil, err + } + p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] + fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + } + return fullRecord, nil +} + +// parseReadBuffer parses the provided buffer and returns a full record and any +// remaining bytes in that buffer. If the record is incomplete, nil is returned +// for the first return value and the given byte buffer is returned for the +// second return value. The length of the payload specified by the header should +// not be greater than maxLen, otherwise an error is returned. Note that this +// function does not allocate or copy any buffers. +func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { + // If the header is not complete, return the provided buffer as remaining + // buffer. + if len(b) < tlsRecordHeaderSize { + return nil, b, nil + } + msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] + length := binary.BigEndian.Uint16(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) + } + if len(b) < int(length)+tlsRecordHeaderSize { + // Record is not complete yet. + return nil, b, nil + } + return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil +} + +// splitAndValidateHeader splits the header from the payload in the TLS 1.3 +// record and returns them. Note that the header is checked for validity, and an +// error is returned when an invalid header is parsed. Also note that this +// function does not allocate or copy any buffers. +func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { + if len(record) < tlsRecordHeaderSize { + return nil, nil, fmt.Errorf("record was smaller than the header size") + } + header = record[:tlsRecordHeaderSize] + payload = record[tlsRecordHeaderSize:] + if header[0] != tlsApplicationData { + return nil, nil, fmt.Errorf("incorrect type in the header") + } + // Check the legacy record version, which should be 0x03, 0x03. + if header[1] != 0x03 || header[2] != 0x03 { + return nil, nil, fmt.Errorf("incorrect legacy record version in the header") + } + return header, payload, nil +} + +// handleAlertMessage handles an alert message. +func (p *conn) handleAlertMessage() error { + if len(p.pendingApplicationData) != tlsAlertSize { + return errors.New("invalid alert message size") + } + alertType := p.pendingApplicationData[1] + // Clear the body of the alert message. + p.pendingApplicationData = p.pendingApplicationData[:0] + if alertType == byte(closeNotify) { + return errors.New("received a close notify alert") + } + // TODO(matthewstevenson88): Add support for more alert types. + return fmt.Errorf("received an unrecognized alert type: %v", alertType) +} + +// parseHandshakeHeader parses a handshake message from the handshake buffer. +// It returns the message type, the message length, the message, the raw message +// that includes the type and length bytes and a flag indicating whether the +// handshake message has been fully parsed. i.e. whether the entire handshake +// message was in the handshake buffer. +func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { + // Handle the case where the 4 byte handshake header is fragmented. + if len(p.handshakeBuf) < tlsHandshakePrefixSize { + return 0, 0, nil, nil, false + } + msgType = p.handshakeBuf[0] + msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) + if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { + return 0, 0, nil, nil, false + } + msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] + rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] + p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] + return msgType, msgLen, msg, rawMsg, true +} + +// handleHandshakeMessage handles a handshake message. Note that the first +// complete handshake message from the handshake buffer is removed, if it +// exists. +func (p *conn) handleHandshakeMessage() error { + // Copy the pending application data to the handshake buffer. At this point, + // we are guaranteed that the pending application data contains only parts + // of a handshake message. + p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) + p.pendingApplicationData = p.pendingApplicationData[:0] + // Several handshake messages may be coalesced into a single record. + // Continue reading them until the handshake buffer is empty. + for len(p.handshakeBuf) > 0 { + handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + if !ok { + // The handshake could not be fully parsed, so read in another + // record and try again later. + break + } + switch handshakeMsgType { + case tlsHandshakeKeyUpdateType: + if msgLen != tlsHandshakeKeyUpdateMsgSize { + return errors.New("invalid handshake key update message length") + } + if len(p.handshakeBuf) != 0 { + return errors.New("key update message must be the last message of a handshake record") + } + if err := p.handleKeyUpdateMsg(msg); err != nil { + return err + } + case tlsHandshakeNewSessionTicketType: + // Ignore tickets that are received after a batch of tickets has + // been sent to S2A. + if p.ticketState == notReceivingTickets { + continue + } + if p.ticketState == ticketsNotYetReceived { + p.ticketState = receivingTickets + } + p.sessionTickets = append(p.sessionTickets, rawMsg) + if len(p.sessionTickets) == maxAllowedTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + default: + return errors.New("unknown handshake message type") + } + } + return nil +} + +func buildKeyUpdateRequest() []byte { + b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) + b[0] = tlsHandshakeKeyUpdateType + b[1] = 0 + b[2] = 0 + b[3] = tlsHandshakeKeyUpdateMsgSize + b[4] = byte(updateNotRequested) + return b +} + +// handleKeyUpdateMsg handles a key update message. +func (p *conn) handleKeyUpdateMsg(msg []byte) error { + keyUpdateRequest := msg[0] + if keyUpdateRequest != byte(updateNotRequested) && + keyUpdateRequest != byte(updateRequested) { + return errors.New("invalid handshake key update message") + } + if err := p.inConn.UpdateKey(); err != nil { + return err + } + // Send a key update message back to the peer if requested. + if keyUpdateRequest == byte(updateRequested) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) + if err != nil { + return err + } + if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { + return errors.New("key update request message wrote less bytes than expected") + } + if err = p.outConn.UpdateKey(); err != nil { + return err + } + } + return nil +} + +// bidEndianInt24 converts the given byte buffer of at least size 3 and +// outputs the resulting 24 bit integer as a uint32. This is needed because +// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does +// not provide a way to transform a byte buffer into a 3 byte integer. +func bigEndianInt24(b []byte) uint32 { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/ticketsender.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/ticketsender.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/record/ticketsender.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/record/ticketsender.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package record + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/s2a-go/internal/handshaker/service" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +// sessionTimeout is the timeout for creating a session with the S2A handshaker +// service. +const sessionTimeout = time.Second * 5 + +// s2aTicketSender sends session tickets to the S2A handshaker service. +type s2aTicketSender interface { + // sendTicketsToS2A sends the given session tickets to the S2A handshaker + // service. + sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) +} + +// ticketStream is the stream used to send and receive session information. +type ticketStream interface { + Send(*s2apb.SessionReq) error + Recv() (*s2apb.SessionResp, error) +} + +type ticketSender struct { + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // connectionID is the connection identifier that was created and sent by + // S2A at the end of a handshake. + connectionID uint64 + // localIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + localIdentity *commonpb.Identity + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // ensureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + ensureProcessSessionTickets *sync.WaitGroup +} + +// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker +// service. This is done asynchronously and writes to the error logs if an error +// occurs. +func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { + // Note that the goroutine is in the function rather than at the caller + // because the fake ticket sender used for testing must run synchronously + // so that the session tickets can be accessed from it after the tests have + // been run. + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Add(1) + } + go func() { + if err := func() error { + defer func() { + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Done() + } + }() + hsConn, err := service.Dial(t.hsAddr) + if err != nil { + return err + } + client := s2apb.NewS2AServiceClient(hsConn) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + session, err := client.SetUpSession(ctx) + if err != nil { + return err + } + defer func() { + if err := session.CloseSend(); err != nil { + grpclog.Error(err) + } + }() + return t.writeTicketsToStream(session, sessionTickets) + }(); err != nil { + grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", + t.localIdentity, err) + } + callComplete <- true + close(callComplete) + }() +} + +// writeTicketsToStream writes the given session tickets to the given stream. +func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { + if err := stream.Send( + &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ResumptionTicket{ + ResumptionTicket: &s2apb.ResumptionTicketReq{ + InBytes: sessionTickets, + ConnectionId: t.connectionID, + LocalIdentity: t.localIdentity, + }, + }, + AuthMechanisms: t.getAuthMechanisms(), + }, + ); err != nil { + return err + } + sessionResp, err := stream.Recv() + if err != nil { + return err + } + if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { + return fmt.Errorf("s2a session ticket response had error status: %v, %v", + sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) + } + return nil +} + +func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if t.tokenManager == nil { + return nil + } + // First handle the special case when no local identity has been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if t.localIdentity == nil { + token, err := t.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has specified + // a local identity. + token, err := t.tokenManager.Token(t.localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + Identity: t.localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tokenmanager provides tokens for authenticating to S2A. +package tokenmanager + +import ( + "fmt" + "os" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +const ( + s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" +) + +// AccessTokenManager manages tokens for authenticating to S2A. +type AccessTokenManager interface { + // DefaultToken returns a token that an application with no specified local + // identity must use to authenticate to S2A. + DefaultToken() (token string, err error) + // Token returns a token that an application with local identity equal to + // identity must use to authenticate to S2A. + Token(identity *commonpb.Identity) (token string, err error) +} + +type singleTokenAccessTokenManager struct { + token string +} + +// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance +// that will always manage the same token. +// +// The token to be managed is read from the s2aAccessTokenEnvironmentVariable +// environment variable. If this environment variable is not set, then this +// function returns an error. +func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { + token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) + if !variableExists { + return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) + } + return &singleTokenAccessTokenManager{token: token}, nil +} + +// DefaultToken always returns the token managed by the +// singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { + return m.token, nil +} + +// Token always returns the token managed by the singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { + return m.token, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/README.md temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/README.md --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/README.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/README.md 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1 @@ +**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,122 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certverifier offloads verifications to S2Av2. +package certverifier + +import ( + "crypto/x509" + "fmt" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ + ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ + CertificateChain: rawCerts, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} + +// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ + ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ + CertificateChain: rawCerts, + ServerHostname: hostname, + SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der differ Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der differ diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package remotesigner offloads private key operations to S2Av2. +package remotesigner + +import ( + "crypto" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// remoteSigner implementes the crypto.Signer interface. +type remoteSigner struct { + leafCert *x509.Certificate + s2AStream stream.S2AStream +} + +// New returns an instance of RemoteSigner, an implementation of the +// crypto.Signer interface. +func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer { + return &remoteSigner{leafCert, s2AStream} +} + +func (s *remoteSigner) Public() crypto.PublicKey { + return s.leafCert.PublicKey +} + +func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert) + if err != nil { + return nil, err + } + + req, err := getSignReq(signatureAlgorithm, digest) + if err != nil { + return nil, err + } + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for signing operation.") + } + if err := s.s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{ + OffloadPrivateKeyOperationReq: req, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for signing operation.") + return nil, err + } + + resp, err := s.s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive signing operation response from S2Av2.") + return nil, err + } + + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil +} + +// getCert returns the leafCert field in s. +func (s *remoteSigner) getCert() *x509.Certificate { + return s.leafCert +} + +// getStream returns the s2AStream field in s. +func (s *remoteSigner) getStream() stream.S2AStream { + return s.s2AStream +} + +func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) { + if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{ + Sha256Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{ + Sha384Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{ + Sha512Digest: digest, + }, + }, nil + } else { + return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm) + } +} + +// getSignatureAlgorithm returns the signature algorithm that S2A must use when +// performing a signing operation that has been offloaded by an application +// using the crypto/tls libraries. +func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) { + if opts == nil || leafCert == nil { + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } + switch leafCert.PublicKeyAlgorithm { + case x509.RSA: + if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok { + return rsaPSSAlgorithm(rsaPSSOpts) + } + return rsaPPKCS1Algorithm(opts) + case x509.ECDSA: + return ecdsaAlgorithm(opts) + case x509.Ed25519: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm) + } +} + +func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der differ diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- Binary files /tmp/tmp59wy0n4p/rKeWR6Ifus/temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /tmp/tmp59wy0n4p/kO5fuMPlFY/temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der differ diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/s2av2.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/s2av2.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/s2av2.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/s2av2.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,354 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package v2 provides the S2Av2 transport credentials used by a gRPC +// application. +package v2 + +import ( + "context" + "crypto/tls" + "errors" + "net" + "os" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + defaultS2ATimeout = 3 * time.Second +) + +// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. +const s2aTimeoutEnv = "S2A_TIMEOUT" + +type s2av2TransportCreds struct { + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + tokenManager *tokenmanager.AccessTokenManager + // localIdentity should only be used by the client. + localIdentity *commonpbv1.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpbv1.Identity + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + fallbackClientHandshake fallback.ClientHandshake + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + serverAuthorizationPolicy []byte +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a server. +func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: true, + serverName: "", + s2av2Address: s2av2Address, + localIdentity: localIdentity, + verificationMode: verificationMode, + fallbackClientHandshake: fallbackClientHandshakeFunc, + getS2AStream: getS2AStream, + serverAuthorizationPolicy: serverAuthorizationPolicy, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created client S2Av2 transport credentials.") + } + return creds, nil +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a client. +func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: false, + s2av2Address: s2av2Address, + localIdentities: localIdentities, + verificationMode: verificationMode, + getS2AStream: getS2AStream, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created server S2Av2 transport credentials.") + } + return creds, nil +} + +// ClientHandshake performs a client-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + // Remove the port from serverAuthority. + serverName := removeServerNamePort(serverAuthority) + timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + var config *tls.Config + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + if c.serverName == "" { + config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } else { + config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } + if grpclog.V(1) { + grpclog.Infof("Got client TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + + conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + if err != nil { + grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + + return conn, authInfo, err +} + +// ServerHandshake performs a server-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + if err != nil { + grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) + return nil, nil, err + } + if grpclog.V(1) { + grpclog.Infof("Got server TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + return creds.ServerHandshake(rawConn) +} + +// Info returns protocol info of s2av2TransportCreds. +func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +// Clone makes a deep copy of s2av2TransportCreds. +func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + serverName := c.serverName + fallbackClientHandshake := c.fallbackClientHandshake + + s2av2Address := c.s2av2Address + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + verificationMode := c.verificationMode + var localIdentity *commonpbv1.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + } + var localIdentities []*commonpbv1.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + } + } + creds := &s2av2TransportCreds{ + info: &info, + isClient: c.isClient, + serverName: serverName, + fallbackClientHandshake: fallbackClientHandshake, + s2av2Address: s2av2Address, + localIdentity: localIdentity, + localIdentities: localIdentities, + verificationMode: verificationMode, + } + if c.tokenManager == nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &tokenManager + } + return creds +} + +// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as +// a client. The tls.Config MUST only be used to establish a single TLS connection. +func NewClientTLSConfig( + ctx context.Context, + s2av2Address string, + tokenManager tokenmanager.AccessTokenManager, + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, + serverName string, + serverAuthorizationPolicy []byte) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, nil) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, err + } + + return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy) +} + +// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol +// info. The ServerName MUST be a hostname. +func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error { + serverName := removeServerNamePort(serverNameOverride) + c.info.ServerName = serverName + c.serverName = serverName + return nil +} + +// Remove the trailing port from server name. +func removeServerNamePort(serverName string) string { + name, _, err := net.SplitHostPort(serverName) + if err != nil { + name = serverName + } + return name +} + +type s2AGrpcStream struct { + stream s2av2pb.S2AService_SetUpSessionClient +} + +func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error { + return x.stream.Send(m) +} + +func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) { + return x.stream.Recv() +} + +func (x s2AGrpcStream) CloseSend() error { + return x.stream.CloseSend() +} + +func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { + if getS2AStream != nil { + return getS2AStream(ctx, s2av2Address) + } + // TODO(rmehta19): Consider whether to close the connection to S2Av2. + conn, err := service.Dial(s2av2Address) + if err != nil { + return nil, err + } + client := s2av2pb.NewS2AServiceClient(conn) + gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...) + if err != nil { + return nil, err + } + return &s2AGrpcStream{ + stream: gRPCStream, + }, nil +} + +// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake. +func GetS2ATimeout() time.Duration { + timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv)) + if err != nil { + return defaultS2ATimeout + } + return timeout +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,404 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tlsconfigstore offloads operations to S2Av2. +package tlsconfigstore + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/certverifier" + "github.com/google/s2a-go/internal/v2/remotesigner" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + // HTTP/2 + h2 = "h2" +) + +// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) + + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client TLS config.") + } + // Send request to S2Av2 for config. + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: localIdentity, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client TLS config") + return nil, err + } + + // Get the response containing config from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client TLS config response from S2Av2.") + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + // Extract TLS configiguration from SessionResp. + tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + if len(tlsConfig.CertificateChain) > 0 { + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) + if err != nil { + return nil, err + } + + // Create mTLS credentials for client. + config := &tls.Config{ + VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), + ServerName: serverHostname, + InsecureSkipVerify: true, // NOLINT + ClientSessionCache: nil, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + } + if len(tlsConfig.CertificateChain) > 0 { + config.Certificates = []tls.Certificate{cert} + } + return config, nil +} + +// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { + return &tls.Config{ + GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), + }, nil +} + +// ClientConfig builds a TLS config for a server to establish a secure +// connection with a client, based on SNI communicated during ClientHello. +// Ensures that server presents the correct certificate to establish a TLS +// connection. +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) + if err != nil { + return nil, err + } + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) + if err != nil { + return nil, err + } + + clientAuth := getTLSClientAuthType(tlsConfig) + + var cipherSuites []uint16 + cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) + + // Create mTLS credentials for server. + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), + ClientAuth: clientAuth, + CipherSuites: cipherSuites, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + }, nil + } +} + +func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { + var tlsGoCipherSuites []uint16 + for _, v := range tlsConfigCipherSuites { + s := getTLSCipherSuite(v) + if s != 0xffff { + tlsGoCipherSuites = append(tlsGoCipherSuites, s) + } + } + return tlsGoCipherSuites +} + +func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { + switch tlsCipherSuite { + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + default: + return 0xffff + } +} + +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { + authMechanisms := getAuthMechanisms(tokenManager, localIdentities) + var locID *commonpbv1.Identity + if localIdentities != nil { + locID = localIdentities[0] + } + + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: locID, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, + Sni: sni, + }, + }, + }); err != nil { + return nil, err + } + + resp, err := s2AStream.Recv() + if err != nil { + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil +} + +func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { + var clientAuth tls.ClientAuthType + switch x := tlsConfig.RequestClientCertificate; x { + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: + clientAuth = tls.NoClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequestClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.VerifyClientCertIfGiven. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.RequireAndVerifyClientCert. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + default: + clientAuth = tls.RequireAnyClientCert + } + return clientAuth +} + +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { + if tokenManager == nil { + return nil + } + if len(localIdentities) == 0 { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get token for empty local identity: %v", err) + return nil + } + return []*s2av2pb.AuthenticationMechanism{ + { + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + var authMechanisms []*s2av2pb.AuthenticationMechanism + for _, localIdentity := range localIdentities { + if localIdentity == nil { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } else { + token, err := tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } + } + return authMechanisms +} + +// TODO(rmehta19): refactor switch statements into a helper function. +func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} + +func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,412 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package s2a provides the S2A transport credentials used by a gRPC +// application. +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + // defaultTimeout specifies the default server handshake timeout. + defaultTimeout = 30.0 * time.Second +) + +// s2aTransportCreds are the transport credentials required for establishing +// a secure connection using the S2A. They implement the +// credentials.TransportCredentials interface. +type s2aTransportCreds struct { + info *credentials.ProtocolInfo + minTLSVersion commonpb.TLSVersion + maxTLSVersion commonpb.TLSVersion + // tlsCiphersuites contains the ciphersuites used in the S2A connection. + // Note that these are currently unconfigurable. + tlsCiphersuites []commonpb.Ciphersuite + // localIdentity should only be used by the client. + localIdentity *commonpb.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpb.Identity + // targetIdentities should only be used by the client. + targetIdentities []*commonpb.Identity + isClient bool + s2aAddr string + ensureProcessSessionTickets *sync.WaitGroup +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2A to establish a secure connection with a server. +func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil client options") + } + var targetIdentities []*commonpb.Identity + for _, targetIdentity := range opts.TargetIdentities { + protoTargetIdentity, err := toProtoIdentity(targetIdentity) + if err != nil { + return nil, err + } + targetIdentities = append(targetIdentities, protoTargetIdentity) + } + localIdentity, err := toProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentity: localIdentity, + targetIdentities: targetIdentities, + isClient: true, + s2aAddr: opts.S2AAddress, + ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + var fallbackFunc fallback.ClientHandshake + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { + fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc + } + return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2A to establish a secure connection with a client. +func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil server options") + } + var localIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + localIdentities = append(localIdentities, protoLocalIdentity) + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentities: localIdentities, + isClient: false, + s2aAddr: opts.S2AAddress, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) +} + +// ClientHandshake initiates a client-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + opts := &handshaker.ClientHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + TargetIdentities: c.targetIdentities, + LocalIdentity: c.localIdentity, + TargetName: serverAuthority, + EnsureProcessSessionTickets: c.ensureProcessSessionTickets, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := chs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := chs.ClientHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +// ServerHandshake initiates a server-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + + opts := &handshaker.ServerHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + LocalIdentities: c.localIdentities, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := shs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := shs.ServerHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + var localIdentity *commonpb.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + } + var localIdentities []*commonpb.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + } + } + var targetIdentities []*commonpb.Identity + if c.targetIdentities != nil { + targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + for i, targetIdentity := range c.targetIdentities { + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + } + } + return &s2aTransportCreds{ + info: &info, + minTLSVersion: c.minTLSVersion, + maxTLSVersion: c.maxTLSVersion, + tlsCiphersuites: c.tlsCiphersuites, + localIdentity: localIdentity, + localIdentities: localIdentities, + targetIdentities: targetIdentities, + isClient: c.isClient, + s2aAddr: c.s2aAddr, + } +} + +func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { + c.info.ServerName = serverNameOverride + return nil +} + +// TLSClientConfigOptions specifies parameters for creating client TLS config. +type TLSClientConfigOptions struct { + // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. + // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ + // ServerName: "example.com", + // }) + ServerName string +} + +// TLSClientConfigFactory defines the interface for a client TLS config factory. +type TLSClientConfigFactory interface { + Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) +} + +// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. +func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { + if opts == nil { + return nil, fmt.Errorf("opts must be non-nil") + } + if opts.EnableLegacyMode { + return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + // The only possible error is: access token not set in the environment, + // which is okay in environments other than serverless. + grpclog.Infof("Access token manager not initialized: %v", err) + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: nil, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil + } + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: tokenManager, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil +} + +type s2aTLSClientConfigFactory struct { + s2av2Address string + tokenManager tokenmanager.AccessTokenManager + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + serverAuthorizationPolicy []byte +} + +func (f *s2aTLSClientConfigFactory) Build( + ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { + serverName := "" + if opts != nil && opts.ServerName != "" { + serverName = opts.ServerName + } + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) +} + +func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { + switch verificationMode { + case ConnectToGoogle: + return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE + case Spiffe: + return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + default: + return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED + } +} + +// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. +// Example use with http.RoundTripper: +// +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// }) +// transport := http.DefaultTransport +// transport.DialTLSContext = dialTLSContext +func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { + + return func(ctx context.Context, network, addr string) (net.Conn, error) { + + fallback := func(err error) (net.Conn, error) { + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && + opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { + fbDialer := opts.FallbackOpts.FallbackDialer + grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) + fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) + if fbErr != nil { + return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) + } + return fbConn, nil + } + return nil, err + } + + factory, err := NewTLSClientConfigFactory(opts) + if err != nil { + grpclog.Infof("error creating S2A client config factory: %v", err) + return fallback(err) + } + + serverName, _, err := net.SplitHostPort(addr) + if err != nil { + serverName = addr + } + timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) + defer cancel() + s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return fallback(err) + } + + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } + c, err := s2aDialer.DialContext(ctx, network, addr) + if err != nil { + grpclog.Infof("error dialing with S2A to %s: %v", addr, err) + return fallback(err) + } + grpclog.Infof("success dialing MTLS to %s with S2A", addr) + return c, nil + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a_options.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a_options.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a_options.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a_options.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,208 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "sync" + + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/stream" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +// Identity is the interface for S2A identities. +type Identity interface { + // Name returns the name of the identity. + Name() string +} + +type spiffeID struct { + spiffeID string +} + +func (s *spiffeID) Name() string { return s.spiffeID } + +// NewSpiffeID creates a SPIFFE ID from id. +func NewSpiffeID(id string) Identity { + return &spiffeID{spiffeID: id} +} + +type hostname struct { + hostname string +} + +func (h *hostname) Name() string { return h.hostname } + +// NewHostname creates a hostname from name. +func NewHostname(name string) Identity { + return &hostname{hostname: name} +} + +type uid struct { + uid string +} + +func (h *uid) Name() string { return h.uid } + +// NewUID creates a UID from name. +func NewUID(name string) Identity { + return &uid{uid: name} +} + +// VerificationModeType specifies the mode that S2A must use to verify the peer +// certificate chain. +type VerificationModeType int + +// Three types of verification modes. +const ( + Unspecified = iota + ConnectToGoogle + Spiffe +) + +// ClientOptions contains the client-side options used to establish a secure +// channel using the S2A handshaker service. +type ClientOptions struct { + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity, if one exists. + LocalIdentity Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // EnsureProcessSessionTickets waits for all session tickets to be sent to + // S2A before a process completes. + // + // This functionality is crucial for processes that complete very soon after + // using S2A to establish a TLS connection, but it can be ignored for longer + // lived processes. + // + // Usage example: + // func main() { + // var ensureProcessSessionTickets sync.WaitGroup + // clientOpts := &s2a.ClientOptions{ + // EnsureProcessSessionTickets: &ensureProcessSessionTickets, + // // Set other members. + // } + // creds, _ := s2a.NewClientCreds(clientOpts) + // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + // defer conn.Close() + // + // // Make RPC call. + // + // // The process terminates right after the RPC call ends. + // // ensureProcessSessionTickets can be used to ensure resumption + // // tickets are fully processed. If the process is long-lived, using + // // ensureProcessSessionTickets is not necessary. + // ensureProcessSessionTickets.Wait() + // } + EnsureProcessSessionTickets *sync.WaitGroup + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Optional fallback after dialing with S2A fails. + FallbackOpts *FallbackOptions + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + + // Serialized user specified policy for server authorization. + serverAuthorizationPolicy []byte +} + +// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. +type FallbackOptions struct { + // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). + // It will be called by ClientHandshake function, after handshake with S2A fails. + // s2a.NewClientCreds() ignores the other FallbackDialer field. + FallbackClientHandshakeFunc fallback.ClientHandshake + + // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). + // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. + // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. + FallbackDialer *FallbackDialer +} + +// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. +type FallbackDialer struct { + // Dialer specifies a fallback tls.Dialer. + Dialer *tls.Dialer + // ServerAddr is used by Dialer to establish fallback connection. + ServerAddr string +} + +// DefaultClientOptions returns the default client options. +func DefaultClientOptions(s2aAddress string) *ClientOptions { + return &ClientOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +// ServerOptions contains the server-side options used to establish a secure +// channel using the S2A handshaker service. +type ServerOptions struct { + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity, if one exists. + LocalIdentities []Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +} + +// DefaultServerOptions returns the default server options. +func DefaultServerOptions(s2aAddress string) *ServerOptions { + return &ServerOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a_utils.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a_utils.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/s2a_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/s2a_utils.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" +) + +// AuthInfo exposes security information from the S2A to the application. +type AuthInfo interface { + // AuthType returns the authentication type. + AuthType() string + // ApplicationProtocol returns the application protocol, e.g. "grpc". + ApplicationProtocol() string + // TLSVersion returns the TLS version negotiated during the handshake. + TLSVersion() commonpb.TLSVersion + // Ciphersuite returns the ciphersuite negotiated during the handshake. + Ciphersuite() commonpb.Ciphersuite + // PeerIdentity returns the authenticated identity of the peer. + PeerIdentity() *commonpb.Identity + // LocalIdentity returns the local identity of the application used during + // session setup. + LocalIdentity() *commonpb.Identity + // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in + // the S2A handshake. + PeerCertFingerprint() []byte + // LocalCertFingerprint returns the SHA256 hash of the local certificate used + // in the S2A handshake. + LocalCertFingerprint() []byte + // IsHandshakeResumed returns true if a cached session was used to resume + // the handshake. + IsHandshakeResumed() bool + // SecurityLevel returns the security level of the connection. + SecurityLevel() credentials.SecurityLevel +} + +// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given +// peer, if it exists. This API should be used by gRPC clients after +// obtaining a peer object using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no S2AAuthInfo found in Peer") + } + return s2aAuthInfo, nil +} + +// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given +// context, if it exists. This API should be used by gRPC server RPC handlers +// to get information about the peer. On the client-side, use the grpc.Peer() +// CallOption and the AuthInfoFromPeer function. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/stream/s2a_stream.go temporal-1.22.5/src/vendor/github.com/google/s2a-go/stream/s2a_stream.go --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/stream/s2a_stream.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/stream/s2a_stream.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stream provides an interface for bidirectional streaming to the S2A server. +package stream + +import ( + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. +type S2AStream interface { + // Send sends the message to the S2A server. + Send(*s2av2pb.SessionReq) error + // Recv receives the message from the S2A server. + Recv() (*s2av2pb.SessionResp, error) + // Closes the channel to the S2A server. + CloseSend() error +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/client_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/client_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/client_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/client_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/client_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/client_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/client_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/client_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/server_cert.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/server_cert.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/server_cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/server_cert.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/server_key.pem temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/server_key.pem --- temporal-1.21.5-1/src/vendor/github.com/google/s2a-go/testdata/server_key.pem 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/google/s2a-go/testdata/server_key.pem 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json 2024-02-23 09:46:10.000000000 +0000 @@ -1,3 +1,3 @@ { - "v2": "2.7.1" + "v2": "2.12.0" } diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/CHANGES.md temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/CHANGES.md --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/CHANGES.md 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/CHANGES.md 2024-02-23 09:46:10.000000000 +0000 @@ -1,5 +1,58 @@ # Changelog +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + ## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go 2024-02-23 09:46:10.000000000 +0000 @@ -29,6 +29,10 @@ // Package apierror implements a wrapper error for parsing error details from // API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. package apierror import ( @@ -345,3 +349,13 @@ return parseDetails(details) } + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/call_option.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/call_option.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/call_option.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/call_option.go 2024-02-23 09:46:10.000000000 +0000 @@ -218,6 +218,14 @@ s.Path = p.p } +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + // WithPath applies a Path override to the HTTP-based APICall. // // This is for internal use only. @@ -230,6 +238,15 @@ return grpcOpt(append([]grpc.CallOption(nil), opt...)) } +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + // CallSettings allow fine-grained control over how calls are made. type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. @@ -241,4 +258,8 @@ // Path is an HTTP override for an APICall. Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration } diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,74 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/header.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/header.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/header.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/header.go 2024-02-23 09:46:10.000000000 +0000 @@ -29,7 +29,79 @@ package gax -import "bytes" +import ( + "bytes" + "context" + "fmt" + "net/http" + "runtime" + "strings" + "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} // XGoogHeader is for use by the Google Cloud Libraries only. // @@ -51,3 +123,46 @@ } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/internal/version.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/internal/version.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/internal/version.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/internal/version.go 2024-02-23 09:46:10.000000000 +0000 @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.7.1" +const Version = "2.12.0" diff -Nru temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/invoke.go temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/invoke.go --- temporal-1.21.5-1/src/vendor/github.com/googleapis/gax-go/v2/invoke.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/googleapis/gax-go/v2/invoke.go 2024-02-23 09:46:10.000000000 +0000 @@ -68,6 +68,16 @@ // invoke implements Invoke, taking an additional sleeper argument for testing. func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + for { err := call(ctx, settings) if err == nil { diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go 2024-02-23 09:46:10.000000000 +0000 @@ -18,7 +18,7 @@ linear backoff with 10% jitter. For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever when a retry happens. +whenever a retry happens. Please see examples for more advanced use. */ diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go 2024-02-23 09:46:10.000000000 +0000 @@ -5,8 +5,8 @@ import ( "context" - "fmt" "io" + "strconv" "sync" "time" @@ -136,7 +136,6 @@ type serverStreamingRetryingStream struct { grpc.ClientStream bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful wasClosedSend bool // indicates that CloseSend was closed parentCtx context.Context callOpts *options @@ -209,17 +208,8 @@ } func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { - s.mu.RLock() - wasGood := s.receivedGood - s.mu.RUnlock() err := s.getStream().RecvMsg(m) if err == nil || err == io.EOF { - s.mu.Lock() - s.receivedGood = true - s.mu.Unlock() - return false, err - } else if wasGood { - // previous RecvMsg in the stream succeeded, no retry logic should interfere return false, err } if isContextError(err) { @@ -303,7 +293,7 @@ ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) } if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) ctx = mdClone.ToOutgoing(ctx) } return ctx diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go 2024-02-23 09:46:10.000000000 +0000 @@ -10,7 +10,7 @@ "google.golang.org/grpc/metadata" ) -// NiceMD is a convenience wrapper definiting extra functions on the metadata. +// NiceMD is a convenience wrapper defining extra functions on the metadata. type NiceMD metadata.MD // ExtractIncoming extracts an inbound metadata from the server-side context. @@ -39,7 +39,7 @@ // Clone performs a *deep* copy of the metadata.MD. // -// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed // all keys get copied. func (m NiceMD) Clone(copiedKeys ...string) NiceMD { newMd := NiceMD(metadata.Pairs()) @@ -61,7 +61,7 @@ newMd[k] = make([]string, len(vv)) copy(newMd[k], vv) } - return NiceMD(newMd) + return newMd } // ToOutgoing sets the given NiceMD as a client-side context for dispatching. diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,23 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["errors.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/errors.proto + +package internal + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Error is the generic error returned from unary RPCs. +type Error struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{0} +} + +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Error) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Error) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{1} +} + +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (m *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(m, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } + +var fileDescriptor_9b093362ca6d1e03 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, + 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, + 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, + 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, + 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, + 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, + 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, + 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, + 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, + 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, + 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, + 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, + 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, + 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,26 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// Error is the generic error returned from unary RPCs. +message Error { + string error = 1; + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + int32 code = 2; + string message = 3; + repeated google.protobuf.Any details = 4; +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,85 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//descriptor:go_default_library_gen", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//internal:go_default_library", + "//runtime/internal/examplepb:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,291 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r timestamp.Timestamp + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r duration.Duration + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,186 @@ +package runtime + +import ( + "context" + "io" + "net/http" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with an error. + // + // HTTPError is called: + // - From generated per-endpoint gateway handler code, when calling the backend results in an error. + // - From gateway runtime code, when forwarding the response message results in an error. + // + // The default value for HTTPError calls the custom error handler configured on the ServeMux via the + // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. + // + // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler + // serve option. + // + // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve + // option, set GlobalHTTPErrorHandler to a custom function. + // + // Setting this variable directly to customize error format is deprecated. + HTTPError = MuxOrGlobalHTTPError + + // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the + // WithProtoErrorHandler serve option. + // + // You can set a custom function to this variable to customize error format. + GlobalHTTPErrorHandler = DefaultHTTPError + + // OtherErrorHandler handles gateway errors from parsing and routing client requests for all + // ServeMux instances not using the WithProtoErrorHandler serve option. + // + // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest + // + // To customize parsing and routing error handling of a particular ServeMux instance, use the + // WithProtoErrorHandler serve option. + // + // To customize parsing and routing error handling of all ServeMux instances not using the + // WithProtoErrorHandler serve option, set a custom function to this variable. + OtherErrorHandler = DefaultOtherErrorHandler +) + +// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. +func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + if mux.protoErrorHandler != nil { + mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) + } else { + GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) + } +} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &internal.Error{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + var wantsTrailers bool + + if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { + wantsTrailers = true + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + if wantsTrailers { + handleForwardResponseTrailer(w, md) + } +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,89 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + descriptor2 "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/genproto/protobuf/field_mask" +) + +func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { + // TODO - should really gate this with a test that the marshaller has used json names + if md != nil { + for _, f := range md.Field { + if f.JsonName != nil && f.Name != nil && *f.JsonName == name { + var subType *descriptor.DescriptorProto + + // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. + if f.TypeName != nil { + typeSplit := strings.Split(*f.TypeName, ".") + typeName := typeSplit[len(typeSplit)-1] + for _, t := range md.NestedType { + if typeName == *t.Name { + subType = t + } + } + } + return *f.Name, subType + } + } + } + return name, nil +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, md: md}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + protoName, subMd := translateName(k, item.md) + if subMsg, ok := v.(descriptor2.Message); ok { + _, subMd = descriptor2.ForMessage(subMsg) + } + + var path string + if item.path == "" { + path = protoName + } else { + path = item.path + "." + protoName + } + queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // descriptor for parent message + md *descriptor.DescriptorProto +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,212 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + var buf []byte + switch { + case resp == nil: + buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) + default: + result := map[string]interface{}{"result": resp} + if rb, ok := resp.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + contentType = typeMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatibility with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,55 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called +// to set the Content-Type header on the response +type contentTypeMarshaler interface { + // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message + ContentTypeFromMessage(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,300 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated swagger output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to handle an error as general proto message defined by gRPC. +// When this option is used, the mux uses the configured error handler instead of HTTPError and +// OtherErrorHandler. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,406 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") + +var currentQueryParser QueryParameterParser = &defaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +type defaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + for _, op := range props.OneofTypes { + if name == op.Prop.OrigName || name == op.Prop.JSONName { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go 2024-02-23 09:46:10.000000000 +0000 @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel 2024-02-23 09:46:10.000000000 +0000 @@ -27,7 +27,6 @@ "//internal/httprule", "//utilities", "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//health/grpc_health_v1", @@ -38,6 +37,7 @@ "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//reflect/protoregistry", "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", "@org_golang_google_protobuf//types/known/structpb", "@org_golang_google_protobuf//types/known/timestamppb", "@org_golang_google_protobuf//types/known/wrapperspb", @@ -73,7 +73,6 @@ "@go_googleapis//google/api:httpbody_go_proto", "@go_googleapis//google/rpc:errdetails_go_proto", "@go_googleapis//google/rpc:status_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", @@ -84,6 +83,7 @@ "@org_golang_google_protobuf//testing/protocmp", "@org_golang_google_protobuf//types/known/durationpb", "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", "@org_golang_google_protobuf//types/known/structpb", "@org_golang_google_protobuf//types/known/timestamppb", "@org_golang_google_protobuf//types/known/wrapperspb", diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go 2024-02-23 09:46:10.000000000 +0000 @@ -7,9 +7,9 @@ "io" "sort" - "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" ) func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go 2024-02-23 09:46:10.000000000 +0000 @@ -389,8 +389,12 @@ return } - // lookup other methods to handle fallback from GET to POST and - // to determine if it is NotImplemented or NotFound. + // if no handler has found for the request, lookup for other methods + // to handle POST -> GET fallback if the request is subject to path + // length fallback. + // Note we are not eagerly checking the request here as we want to return the + // right HTTP status code, and we need to process the fallback candidates in + // order to do that. for m, handlers := range s.handlers { if m == r.Method { continue @@ -423,8 +427,11 @@ } continue } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { + // Also, only consider POST -> GET fallbacks, and avoid falling back to + // potentially dangerous operations like DELETE. + if s.isPathLengthFallback(r) && m == http.MethodGet { if err := r.ParseForm(); err != nil { _, outboundMarshaler := MarshalerForRequest(s, r) sterr := status.Error(codes.InvalidArgument, err.Error()) diff -Nru temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go --- temporal-1.21.5-1/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go 2024-02-23 09:46:10.000000000 +0000 @@ -10,13 +10,13 @@ "time" "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/.editorconfig temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/.editorconfig --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/.editorconfig 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/.editorconfig 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/.gitignore temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/.gitignore --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/.gitignore 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -/.idea/ - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/LICENSE temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/LICENSE --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/LICENSE 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/README.md temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/README.md --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/README.md 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -# clockwork - -[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#utilities) - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/jonboulle/clockwork/ci.yaml?style=flat-square)](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI) -[![Go Report Card](https://goreportcard.com/badge/github.com/jonboulle/clockwork?style=flat-square)](https://goreportcard.com/report/github.com/jonboulle/clockwork) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/jonboulle/clockwork) - -**A simple fake clock for Go.** - - -## Usage - -Replace uses of the `time` package with the `clockwork.Clock` interface instead. - -For example, instead of using `time.Sleep` directly: - -```go -func myFunc() { - time.Sleep(3 * time.Second) - doSomething() -} -``` - -Inject a clock and use its `Sleep` method instead: - -```go -func myFunc(clock clockwork.Clock) { - clock.Sleep(3 * time.Second) - doSomething() -} -``` - -Now you can easily test `myFunc` with a `FakeClock`: - -```go -func TestMyFunc(t *testing.T) { - c := clockwork.NewFakeClock() - - // Start our sleepy function - var wg sync.WaitGroup - wg.Add(1) - go func() { - myFunc(c) - wg.Done() - }() - - // Ensure we wait until myFunc is sleeping - c.BlockUntil(1) - - assertState() - - // Advance the FakeClock forward in time - c.Advance(3 * time.Second) - - // Wait until the function completes - wg.Wait() - - assertState() -} -``` - -and in production builds, simply inject the real clock instead: - -```go -myFunc(clockwork.NewRealClock()) -``` - -See [example_test.go](example_test.go) for a full example. - - -# Credits - -clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](https://blog.golang.org/playground#TOC_3.1.) - - -## License - -Apache License, Version 2.0. Please see [License File](LICENSE) for more information. diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/clockwork.go temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/clockwork.go --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/clockwork.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/clockwork.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,349 +0,0 @@ -package clockwork - -import ( - "context" - "sort" - "sync" - "time" -) - -// Clock provides an interface that packages can use instead of directly using -// the [time] module, so that chronology-related behavior can be tested. -type Clock interface { - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Now() time.Time - Since(t time.Time) time.Duration - NewTicker(d time.Duration) Ticker - NewTimer(d time.Duration) Timer - AfterFunc(d time.Duration, f func()) Timer -} - -// FakeClock provides an interface for a clock which can be manually advanced -// through time. -// -// FakeClock maintains a list of "waiters," which consists of all callers -// waiting on the underlying clock (i.e. Tickers and Timers including callers of -// Sleep or After). Users can call BlockUntil to block until the clock has an -// expected number of waiters. -type FakeClock interface { - Clock - // Advance advances the FakeClock to a new point in time, ensuring any existing - // waiters are notified appropriately before returning. - Advance(d time.Duration) - // BlockUntil blocks until the FakeClock has the given number of waiters. - BlockUntil(waiters int) -} - -// NewRealClock returns a Clock which simply delegates calls to the actual time -// package; it should be used by packages in production. -func NewRealClock() Clock { - return &realClock{} -} - -// NewFakeClock returns a FakeClock implementation which can be -// manually advanced through time for testing. The initial time of the -// FakeClock will be the current system time. -// -// Tests that require a deterministic time must use NewFakeClockAt. -func NewFakeClock() FakeClock { - return NewFakeClockAt(time.Now()) -} - -// NewFakeClockAt returns a FakeClock initialised at the given time.Time. -func NewFakeClockAt(t time.Time) FakeClock { - return &fakeClock{ - time: t, - } -} - -type realClock struct{} - -func (rc *realClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (rc *realClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -func (rc *realClock) Now() time.Time { - return time.Now() -} - -func (rc *realClock) Since(t time.Time) time.Duration { - return rc.Now().Sub(t) -} - -func (rc *realClock) NewTicker(d time.Duration) Ticker { - return realTicker{time.NewTicker(d)} -} - -func (rc *realClock) NewTimer(d time.Duration) Timer { - return realTimer{time.NewTimer(d)} -} - -func (rc *realClock) AfterFunc(d time.Duration, f func()) Timer { - return realTimer{time.AfterFunc(d, f)} -} - -type fakeClock struct { - // l protects all attributes of the clock, including all attributes of all - // waiters and blockers. - l sync.RWMutex - waiters []expirer - blockers []*blocker - time time.Time -} - -// blocker is a caller of BlockUntil. -type blocker struct { - count int - - // ch is closed when the underlying clock has the specificed number of blockers. - ch chan struct{} -} - -// expirer is a timer or ticker that expires at some point in the future. -type expirer interface { - // expire the expirer at the given time, returning the desired duration until - // the next expiration, if any. - expire(now time.Time) (next *time.Duration) - - // Get and set the expiration time. - expiry() time.Time - setExpiry(time.Time) -} - -// After mimics [time.After]; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - return fc.NewTimer(d).Chan() -} - -// Sleep blocks until the given duration has passed on the fakeClock. -func (fc *fakeClock) Sleep(d time.Duration) { - <-fc.After(d) -} - -// Now returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { - fc.l.RLock() - defer fc.l.RUnlock() - return fc.time -} - -// Since returns the duration that has passed since the given time on the -// fakeClock. -func (fc *fakeClock) Since(t time.Time) time.Duration { - return fc.Now().Sub(t) -} - -// NewTicker returns a Ticker that will expire only after calls to -// fakeClock.Advance() have moved the clock past the given duration. -func (fc *fakeClock) NewTicker(d time.Duration) Ticker { - var ft *fakeTicker - ft = &fakeTicker{ - firer: newFirer(), - d: d, - reset: func(d time.Duration) { fc.set(ft, d) }, - stop: func() { fc.stop(ft) }, - } - fc.set(ft, d) - return ft -} - -// NewTimer returns a Timer that will fire only after calls to -// fakeClock.Advance() have moved the clock past the given duration. -func (fc *fakeClock) NewTimer(d time.Duration) Timer { - return fc.newTimer(d, nil) -} - -// AfterFunc mimics [time.AfterFunc]; it returns a Timer that will invoke the -// given function only after calls to fakeClock.Advance() have moved the clock -// past the given duration. -func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer { - return fc.newTimer(d, f) -} - -// newTimer returns a new timer, using an optional afterFunc. -func (fc *fakeClock) newTimer(d time.Duration, afterfunc func()) *fakeTimer { - var ft *fakeTimer - ft = &fakeTimer{ - firer: newFirer(), - reset: func(d time.Duration) bool { - fc.l.Lock() - defer fc.l.Unlock() - // fc.l must be held across the calls to stopExpirer & setExpirer. - stopped := fc.stopExpirer(ft) - fc.setExpirer(ft, d) - return stopped - }, - stop: func() bool { return fc.stop(ft) }, - - afterFunc: afterfunc, - } - fc.set(ft, d) - return ft -} - -// Advance advances fakeClock to a new point in time, ensuring waiters and -// blockers are notified appropriately before returning. -func (fc *fakeClock) Advance(d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - end := fc.time.Add(d) - // Expire the earliest waiter until the earliest waiter's expiration is after - // end. - // - // We don't iterate because the callback of the waiter might register a new - // waiter, so the list of waiters might change as we execute this. - for len(fc.waiters) > 0 && !end.Before(fc.waiters[0].expiry()) { - w := fc.waiters[0] - fc.waiters = fc.waiters[1:] - - // Use the waiter's expriation as the current time for this expiration. - now := w.expiry() - fc.time = now - if d := w.expire(now); d != nil { - // Set the new exipration if needed. - fc.setExpirer(w, *d) - } - } - fc.time = end -} - -// BlockUntil blocks until the fakeClock has the given number of waiters. -// -// Prefer BlockUntilContext, which offers context cancellation to prevent -// deadlock. -// -// Deprecation warning: This function might be deprecated in later versions. -func (fc *fakeClock) BlockUntil(n int) { - b := fc.newBlocker(n) - if b == nil { - return - } - <-b.ch -} - -// BlockUntilContext blocks until the fakeClock has the given number of waiters -// or the context is cancelled. -func (fc *fakeClock) BlockUntilContext(ctx context.Context, n int) error { - b := fc.newBlocker(n) - if b == nil { - return nil - } - - select { - case <-b.ch: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (fc *fakeClock) newBlocker(n int) *blocker { - fc.l.Lock() - defer fc.l.Unlock() - // Fast path: we already have >= n waiters. - if len(fc.waiters) >= n { - return nil - } - // Set up a new blocker to wait for more waiters. - b := &blocker{ - count: n, - ch: make(chan struct{}), - } - fc.blockers = append(fc.blockers, b) - return b -} - -// stop stops an expirer, returning true if the expirer was stopped. -func (fc *fakeClock) stop(e expirer) bool { - fc.l.Lock() - defer fc.l.Unlock() - return fc.stopExpirer(e) -} - -// stopExpirer stops an expirer, returning true if the expirer was stopped. -// -// The caller must hold fc.l. -func (fc *fakeClock) stopExpirer(e expirer) bool { - for i, t := range fc.waiters { - if t == e { - // Remove element, maintaining order. - copy(fc.waiters[i:], fc.waiters[i+1:]) - fc.waiters[len(fc.waiters)-1] = nil - fc.waiters = fc.waiters[:len(fc.waiters)-1] - return true - } - } - return false -} - -// set sets an expirer to expire at a future point in time. -func (fc *fakeClock) set(e expirer, d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - fc.setExpirer(e, d) -} - -// setExpirer sets an expirer to expire at a future point in time. -// -// The caller must hold fc.l. -func (fc *fakeClock) setExpirer(e expirer, d time.Duration) { - if d.Nanoseconds() <= 0 { - // special case - trigger immediately, never reset. - // - // TODO: Explain what cases this covers. - e.expire(fc.time) - return - } - // Add the expirer to the set of waiters and notify any blockers. - e.setExpiry(fc.time.Add(d)) - fc.waiters = append(fc.waiters, e) - sort.Slice(fc.waiters, func(i int, j int) bool { - return fc.waiters[i].expiry().Before(fc.waiters[j].expiry()) - }) - - // Notify blockers of our new waiter. - var blocked []*blocker - count := len(fc.waiters) - for _, b := range fc.blockers { - if b.count <= count { - close(b.ch) - continue - } - blocked = append(blocked, b) - } - fc.blockers = blocked -} - -// firer is used by fakeTimer and fakeTicker used to help implement expirer. -type firer struct { - // The channel associated with the firer, used to send expriation times. - c chan time.Time - - // The time when the firer expires. Only meaningful if the firer is currently - // one of a fakeClock's waiters. - exp time.Time -} - -func newFirer() firer { - return firer{c: make(chan time.Time, 1)} -} - -func (f *firer) Chan() <-chan time.Time { - return f.c -} - -// expiry implements expirer. -func (f *firer) expiry() time.Time { - return f.exp -} - -// setExpiry implements expirer. -func (f *firer) setExpiry(t time.Time) { - f.exp = t -} diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/context.go temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/context.go --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/context.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/context.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -package clockwork - -import ( - "context" -) - -// contextKey is private to this package so we can ensure uniqueness here. This -// type identifies context values provided by this package. -type contextKey string - -// keyClock provides a clock for injecting during tests. If absent, a real clock should be used. -var keyClock = contextKey("clock") // clockwork.Clock - -// AddToContext creates a derived context that references the specified clock. -func AddToContext(ctx context.Context, clock Clock) context.Context { - return context.WithValue(ctx, keyClock, clock) -} - -// FromContext extracts a clock from the context. If not present, a real clock is returned. -func FromContext(ctx context.Context) Clock { - if clock, ok := ctx.Value(keyClock).(Clock); ok { - return clock - } - return NewRealClock() -} diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/ticker.go temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/ticker.go --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/ticker.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/ticker.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -package clockwork - -import "time" - -// Ticker provides an interface which can be used instead of directly using -// [time.Ticker]. The real-time ticker t provides ticks through t.C which -// becomes t.Chan() to make this channel requirement definable in this -// interface. -type Ticker interface { - Chan() <-chan time.Time - Reset(d time.Duration) - Stop() -} - -type realTicker struct{ *time.Ticker } - -func (r realTicker) Chan() <-chan time.Time { - return r.C -} - -type fakeTicker struct { - firer - - // reset and stop provide the implementation of the respective exported - // functions. - reset func(d time.Duration) - stop func() - - // The duration of the ticker. - d time.Duration -} - -func (f *fakeTicker) Reset(d time.Duration) { - f.reset(d) -} - -func (f *fakeTicker) Stop() { - f.stop() -} - -func (f *fakeTicker) expire(now time.Time) *time.Duration { - // Never block on expiration. - select { - case f.c <- now: - default: - } - return &f.d -} diff -Nru temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/timer.go temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/timer.go --- temporal-1.21.5-1/src/vendor/github.com/jonboulle/clockwork/timer.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/jonboulle/clockwork/timer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -package clockwork - -import "time" - -// Timer provides an interface which can be used instead of directly using -// [time.Timer]. The real-time timer t provides events through t.C which becomes -// t.Chan() to make this channel requirement definable in this interface. -type Timer interface { - Chan() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -type realTimer struct{ *time.Timer } - -func (r realTimer) Chan() <-chan time.Time { - return r.C -} - -type fakeTimer struct { - firer - - // reset and stop provide the implmenetation of the respective exported - // functions. - reset func(d time.Duration) bool - stop func() bool - - // If present when the timer fires, the timer calls afterFunc in its own - // goroutine rather than sending the time on Chan(). - afterFunc func() -} - -func (f *fakeTimer) Reset(d time.Duration) bool { - return f.reset(d) -} - -func (f *fakeTimer) Stop() bool { - return f.stop() -} - -func (f *fakeTimer) expire(now time.Time) *time.Duration { - if f.afterFunc != nil { - go f.afterFunc() - return nil - } - - // Never block on expiration. - select { - case f.c <- now: - default: - } - return nil -} diff -Nru temporal-1.21.5-1/src/vendor/github.com/lib/pq/conn.go temporal-1.22.5/src/vendor/github.com/lib/pq/conn.go --- temporal-1.21.5-1/src/vendor/github.com/lib/pq/conn.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/lib/pq/conn.go 2024-02-23 09:46:11.000000000 +0000 @@ -2,6 +2,7 @@ import ( "bufio" + "bytes" "context" "crypto/md5" "crypto/sha256" @@ -112,7 +113,9 @@ func (d defaultDialer) Dial(network, address string) (net.Conn, error) { return d.d.Dial(network, address) } -func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { +func (d defaultDialer) DialTimeout( + network, address string, timeout time.Duration, +) (net.Conn, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return d.DialContext(ctx, network, address) @@ -260,47 +263,56 @@ } defer file.Close() scanner := bufio.NewScanner(io.Reader(file)) + // From: https://github.com/tg/pgpass/blob/master/reader.go + for scanner.Scan() { + if scanText(scanner.Text(), o) { + break + } + } +} + +// GetFields is a helper function for scanText. +func getFields(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) +} + +// ScanText assists HandlePgpass in it's objective. +func scanText(line string, o values) bool { hostname := o["host"] ntw, _ := network(o) port := o["port"] db := o["dbname"] username := o["user"] - // From: https://github.com/tg/pgpass/blob/master/reader.go - getFields := func(s string) []string { - fs := make([]string, 0, 5) - f := make([]rune, 0, len(s)) - - var esc bool - for _, c := range s { - switch { - case esc: - f = append(f, c) - esc = false - case c == '\\': - esc = true - case c == ':': - fs = append(fs, string(f)) - f = f[:0] - default: - f = append(f, c) - } - } - return append(fs, string(f)) + if len(line) == 0 || line[0] == '#' { + return false } - for scanner.Scan() { - line := scanner.Text() - if len(line) == 0 || line[0] == '#' { - continue - } - split := getFields(line) - if len(split) != 5 { - continue - } - if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { - o["password"] = split[4] - return - } + split := getFields(line) + if len(split) != 5 { + return false + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return true } + return false } func (cn *conn) writeBuf(b byte) *writeBuf { @@ -765,7 +777,9 @@ // Decides which column formats to use for a prepared statement. The input is // an array of type oids, one element per result column. -func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { +func decideColumnFormats( + colTyps []fieldDesc, forceText bool, +) (colFmts []format, colFmtData []byte) { if len(colTyps) == 0 { return nil, colFmtDataAllText } @@ -1631,10 +1645,10 @@ // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be // used as part of an SQL statement. For example: // -// tblname := "my_table" -// data := "my_data" -// quoted := pq.QuoteIdentifier(tblname) -// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero @@ -1647,12 +1661,24 @@ return `"` + strings.Replace(name, `"`, `""`, -1) + `"` } +// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a +// byte buffer. +func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + buffer.WriteRune('"') + buffer.WriteString(strings.Replace(name, `"`, `""`, -1)) + buffer.WriteRune('"') +} + // QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal // to DDL and other statements that do not accept parameters) to be used as part // of an SQL statement. For example: // -// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") -// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) // // Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be // replaced by two backslashes (i.e. "\\") and the C-style escape identifier @@ -1808,7 +1834,11 @@ } } -func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { +func (cn *conn) readStatementDescribeResponse() ( + paramTyps []oid.Oid, + colNames []string, + colTyps []fieldDesc, +) { for { t, r := cn.recv1() switch t { @@ -1896,7 +1926,9 @@ } // Only for Exec(), since we ignore the returned data -func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { +func (cn *conn) readExecuteResponse( + protocolState string, +) (res driver.Result, commandTag string, err error) { for { t, r := cn.recv1() switch t { @@ -2062,3 +2094,19 @@ } return -1 // discard } + +// The database/sql/driver package says: +// All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator. +var _ driver.Pinger = &conn{} +var _ driver.SessionResetter = &conn{} + +func (cn *conn) ResetSession(ctx context.Context) error { + // Ensure bad connections are reported: From database/sql/driver: + // If a connection is never returned to the connection pool but immediately reused, then + // ResetSession is called prior to reuse but IsValid is not called. + return cn.err.get() +} + +func (cn *conn) IsValid() bool { + return cn.err.get() == nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/lib/pq/conn_go115.go temporal-1.22.5/src/vendor/github.com/lib/pq/conn_go115.go --- temporal-1.21.5-1/src/vendor/github.com/lib/pq/conn_go115.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/lib/pq/conn_go115.go 2024-02-23 09:46:11.000000000 +0000 @@ -0,0 +1,8 @@ +//go:build go1.15 +// +build go1.15 + +package pq + +import "database/sql/driver" + +var _ driver.Validator = &conn{} diff -Nru temporal-1.21.5-1/src/vendor/github.com/lib/pq/copy.go temporal-1.22.5/src/vendor/github.com/lib/pq/copy.go --- temporal-1.21.5-1/src/vendor/github.com/lib/pq/copy.go 2023-09-29 14:03:31.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/lib/pq/copy.go 2024-02-23 09:46:11.000000000 +0000 @@ -1,6 +1,7 @@ package pq import ( + "bytes" "context" "database/sql/driver" "encoding/binary" @@ -20,29 +21,35 @@ // CopyIn creates a COPY FROM statement which can be prepared with // Tx.Prepare(). The target table should be visible in search_path. func CopyIn(table string, columns ...string) string { - stmt := "COPY " + QuoteIdentifier(table) + " (" + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() +} + +// MakeStmt makes the stmt string for CopyIn and CopyInSchema. +func makeStmt(buffer *bytes.Buffer, columns ...string) { + //s := bytes.NewBufferString() for i, col := range columns { if i != 0 { - stmt += ", " + buffer.WriteString(", ") } - stmt += QuoteIdentifier(col) + BufferQuoteIdentifier(col, buffer) } - stmt += ") FROM STDIN" - return stmt + buffer.WriteString(") FROM STDIN") } // CopyInSchema creates a COPY FROM statement which can be prepared with // Tx.Prepare(). func CopyInSchema(schema, table string, columns ...string) string { - stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" - for i, col := range columns { - if i != 0 { - stmt += ", " - } - stmt += QuoteIdentifier(col) - } - stmt += ") FROM STDIN" - return stmt + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(schema, buffer) + buffer.WriteRune('.') + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() } type copyin struct { diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/desc.go temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/desc.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/desc.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/desc.go 2024-02-23 09:46:11.000000000 +0000 @@ -18,12 +18,12 @@ "sort" "strings" - "github.com/prometheus/client_golang/prometheus/internal" - "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus/internal" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go 2024-02-23 09:46:11.000000000 +0000 @@ -401,7 +401,7 @@ // Histogram by a Prometheus server with that feature enabled (requires // Prometheus v2.40+). Sparse buckets are exponential buckets covering // the whole float64 range (with the exception of the “zero” bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, + // NativeHistogramZeroThreshold below). From any one bucket to the next, // the width of the bucket grows by a constant // factor. NativeHistogramBucketFactor provides an upper bound for this // factor (exception see below). The smaller @@ -432,7 +432,7 @@ // bucket. For best results, this should be close to a bucket // boundary. This is usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // DefNativeHistogramZeroThreshold is used as the threshold. To configure // a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero @@ -639,8 +639,8 @@ if frac == 0.5 { key-- } - div := 1 << -schema - key = (key + div - 1) / div + offset := (1 << -schema) - 1 + key = (key + offset) >> -schema } if isInf { key++ @@ -817,7 +817,7 @@ } } -// limitSparsebuckets applies a strategy to limit the number of populated sparse +// limitBuckets applies a strategy to limit the number of populated sparse // buckets. It's generally best effort, and there are situations where the // number can go higher (if even the lowest resolution isn't enough to reduce // the number sufficiently, or if the provided counts aren't fully updated yet diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go 2024-02-23 09:46:11.000000000 +0000 @@ -37,6 +37,7 @@ "fmt" "io" "net/http" + "strconv" "strings" "sync" "time" @@ -47,9 +48,10 @@ ) const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" + processStartTimeHeader = "Process-Start-Time-Unix" ) var gzipPool = sync.Pool{ @@ -121,6 +123,9 @@ } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if !opts.ProcessStartTime.IsZero() { + rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) + } if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. @@ -366,6 +371,14 @@ // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // ProcessStartTime allows setting process start timevalue that will be exposed + // with "Process-Start-Time-Unix" response header along with the metrics + // payload. This allow callers to have efficient transformations to cumulative + // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per + // scrape target. + // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus + // exposition format. + ProcessStartTime time.Time } // gzipAccepted returns whether the client will accept gzip-encoded content. diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/vec.go temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/vec.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/client_golang/prometheus/vec.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/client_golang/prometheus/vec.go 2024-02-23 09:46:11.000000000 +0000 @@ -20,6 +20,24 @@ "github.com/prometheus/common/model" ) +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} + +func getLabelsFromPool() Labels { + return labelsPool.Get().(Labels) +} + +func putLabelsToPool(labels Labels) { + for k := range labels { + delete(labels, k) + } + + labelsPool.Put(labels) +} + // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -93,6 +111,8 @@ // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return false @@ -109,6 +129,8 @@ // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + return m.metricMap.deleteByLabels(labels, m.curry) } @@ -229,6 +251,8 @@ // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -647,15 +671,16 @@ } func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedValues := make(Labels, len(labels)) + constrainedLabels := getLabelsFromPool() for l, v := range labels { if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - constrainedValues[l] = desc.variableLabels[i].Constrain(v) - continue + v = desc.variableLabels[i].Constrain(v) } - constrainedValues[l] = v + + constrainedLabels[l] = v } - return constrainedValues + + return constrainedLabels } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/.golangci.yml temporal-1.22.5/src/vendor/github.com/prometheus/procfs/.golangci.yml --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/.golangci.yml 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/.golangci.yml 2024-02-23 09:46:11.000000000 +0000 @@ -2,6 +2,7 @@ linters: enable: - godot + - misspell - revive linter-settings: @@ -10,3 +11,5 @@ exclude: # Ignore "See: URL" - 'See:' + misspell: + locale: US diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/README.md temporal-1.22.5/src/vendor/github.com/prometheus/procfs/README.md --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/README.md 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/README.md 2024-02-23 09:46:11.000000000 +0000 @@ -51,11 +51,11 @@ extracting the ttar file using `make fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` Next, make the required changes to the extracted files in the `fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/arp.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/arp.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/arp.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/arp.go 2024-02-23 09:46:11.000000000 +0000 @@ -55,7 +55,7 @@ func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/buddyinfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/buddyinfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/buddyinfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/buddyinfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -55,7 +55,7 @@ parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } node := strings.TrimRight(parts[1], ",") @@ -66,7 +66,7 @@ bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/cpuinfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/cpuinfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/cpuinfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/cpuinfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -79,7 +79,7 @@ // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,9 +192,10 @@ scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -258,7 +259,7 @@ firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -283,7 +284,7 @@ if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -343,7 +344,7 @@ // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -421,7 +422,7 @@ firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -466,7 +467,7 @@ firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/crypto.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/crypto.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/crypto.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/crypto.go 2024-02-23 09:46:11.000000000 +0000 @@ -55,12 +55,13 @@ path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/fscache.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/fscache.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/fscache.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/fscache.go 2024-02-23 09:46:11.000000000 +0000 @@ -236,7 +236,7 @@ m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/ipvs.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/ipvs.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/ipvs.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/ipvs.go 2024-02-23 09:46:11.000000000 +0000 @@ -221,15 +221,16 @@ case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/loadavg.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/loadavg.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/loadavg.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/loadavg.go 2024-02-23 09:46:11.000000000 +0000 @@ -44,14 +44,14 @@ loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mdstat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mdstat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mdstat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mdstat.go 2024-02-23 09:46:11.000000000 +0000 @@ -70,7 +70,7 @@ } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +90,13 @@ deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +105,7 @@ active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -140,7 +140,7 @@ } else { syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -168,13 +168,13 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,17 +189,17 @@ matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) @@ -209,42 +209,42 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } return syncedBlocks, pct, finish, speed, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/meminfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/meminfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/meminfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/meminfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -152,7 +152,7 @@ m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) } return *m, nil @@ -165,7 +165,7 @@ // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) } v, err := strconv.ParseUint(fields[1], 0, 64) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mountinfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mountinfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mountinfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mountinfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -78,11 +78,11 @@ mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: %w", ErrFileParse, err) } } return mount, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mountstats.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mountstats.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/mountstats.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/mountstats.go 2024-02-23 09:46:11.000000000 +0000 @@ -266,7 +266,7 @@ if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -290,7 +290,7 @@ // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -308,7 +308,7 @@ for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -345,7 +345,7 @@ switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -360,7 +360,7 @@ } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -371,7 +371,7 @@ stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -381,7 +381,7 @@ stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -391,7 +391,7 @@ stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -430,7 +430,7 @@ // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -459,7 +459,7 @@ // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -523,7 +523,7 @@ } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -576,10 +576,10 @@ } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -588,13 +588,13 @@ } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_conntrackstat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_conntrackstat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_conntrackstat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_conntrackstat.go 2024-02-23 09:46:11.000000000 +0000 @@ -58,7 +58,7 @@ stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,11 +86,12 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err) + return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries) + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } stats := &ConntrackStatEntry{ diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_ip_socket.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_ip_socket.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_ip_socket.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_ip_socket.go 2024-02-23 09:46:11.000000000 +0000 @@ -130,7 +130,7 @@ var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,7 +144,7 @@ } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) } } @@ -153,7 +153,8 @@ line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +163,65 @@ // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } return line, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_protocols.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_protocols.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_protocols.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_protocols.go 2024-02-23 09:46:11.000000000 +0000 @@ -131,7 +131,7 @@ } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_route.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_route.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_route.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_route.go 2024-02-23 09:46:11.000000000 +0000 @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_sockstat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_sockstat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_sockstat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_sockstat.go 2024-02-23 09:46:11.000000000 +0000 @@ -16,7 +16,6 @@ import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_softnet.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_softnet.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_softnet.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_softnet.go 2024-02-23 09:46:11.000000000 +0000 @@ -64,7 +64,7 @@ entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -83,7 +83,7 @@ softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_unix.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_unix.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_unix.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_unix.go 2024-02-23 09:46:11.000000000 +0000 @@ -108,14 +108,14 @@ line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -126,7 +126,7 @@ l := len(fields) if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_wireless.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_wireless.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_wireless.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_wireless.go 2024-02-23 09:46:11.000000000 +0000 @@ -68,7 +68,7 @@ m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse wireless: %w", err) + return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) } return m, nil @@ -97,64 +97,64 @@ parts := strings.Split(line, ":") if len(parts) != 2 { - return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line) + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) } name := strings.TrimSpace(parts[0]) stats := strings.Fields(parts[1]) if len(stats) < 10 { - return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line) + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) } status, err := strconv.ParseUint(stats[0], 16, 16) if err != nil { - return nil, fmt.Errorf("invalid status in line %d: %q", n, line) + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) } qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err) + return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err) + return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err) + return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err) + return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err) + return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err) + return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err) + return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err) + return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err) + return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err) + return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_xfrm.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_xfrm.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/net_xfrm.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/net_xfrm.go 2024-02-23 09:46:11.000000000 +0000 @@ -115,7 +115,7 @@ fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc.go 2024-02-23 09:46:11.000000000 +0000 @@ -15,6 +15,7 @@ import ( "bytes" + "errors" "fmt" "io" "os" @@ -35,6 +36,12 @@ // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -42,7 +49,7 @@ // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -104,7 +111,7 @@ names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -205,7 +212,7 @@ for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -290,7 +297,7 @@ names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_cgroup.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_cgroup.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_cgroup.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_cgroup.go 2024-02-23 09:46:11.000000000 +0000 @@ -51,7 +51,7 @@ fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_cgroups.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_cgroups.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_cgroups.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_cgroups.go 2024-02-23 09:46:11.000000000 +0000 @@ -46,7 +46,7 @@ fields := strings.Fields(CgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) } CgroupSummary := &CgroupSummary{ @@ -54,15 +54,15 @@ } CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) } CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) } CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) } return CgroupSummary, nil } diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_fdinfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_fdinfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_fdinfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_fdinfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -111,7 +111,7 @@ } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_interrupts.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_interrupts.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_interrupts.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_interrupts.go 2024-02-23 09:46:11.000000000 +0000 @@ -66,7 +66,7 @@ continue } if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) } intName := parts[0][:len(parts[0])-1] // remove trailing : diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_limits.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_limits.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_limits.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_limits.go 2024-02-23 09:46:11.000000000 +0000 @@ -103,7 +103,7 @@ //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_maps.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_maps.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_maps.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_maps.go 2024-02-23 09:46:11.000000000 +0000 @@ -65,7 +65,7 @@ func parseDevice(s string) (uint64, error) { toks := strings.Split(s, ":") if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) } major, err := strconv.ParseUint(toks[0], 16, 0) @@ -95,7 +95,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) } saddr, err := parseAddress(toks[0]) @@ -114,7 +114,7 @@ // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -141,7 +141,7 @@ func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_netstat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_netstat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_netstat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_netstat.go 2024-02-23 09:46:11.000000000 +0000 @@ -195,8 +195,8 @@ // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_ns.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_ns.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_ns.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_ns.go 2024-02-23 09:46:11.000000000 +0000 @@ -40,7 +40,7 @@ names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_psi.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_psi.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_psi.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_psi.go 2024-02-23 09:46:11.000000000 +0000 @@ -61,7 +61,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(resource, bytes.NewReader(data)) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_snmp.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_snmp.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_snmp.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_snmp.go 2024-02-23 09:46:11.000000000 +0000 @@ -159,8 +159,8 @@ // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_stat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_stat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_stat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_stat.go 2024-02-23 09:46:11.000000000 +0000 @@ -138,7 +138,7 @@ ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_sys.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_sys.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/proc_sys.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/proc_sys.go 2024-02-23 09:46:11.000000000 +0000 @@ -44,7 +44,7 @@ vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/slab.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/slab.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/slab.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/slab.go 2024-02-23 09:46:11.000000000 +0000 @@ -68,7 +68,7 @@ l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/softirqs.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/softirqs.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/softirqs.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/softirqs.go 2024-02-23 09:46:11.000000000 +0000 @@ -57,7 +57,7 @@ ) if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) } for scanner.Scan() { @@ -74,7 +74,7 @@ softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/stat.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/stat.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/stat.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/stat.go 2024-02-23 09:46:11.000000000 +0000 @@ -93,10 +93,10 @@ &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -197,34 +197,34 @@ switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -247,7 +247,7 @@ } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/swaps.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/swaps.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/swaps.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/swaps.go 2024-02-23 09:46:11.000000000 +0000 @@ -64,7 +64,7 @@ swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/thread.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/thread.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/thread.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/thread.go 2024-02-23 09:46:11.000000000 +0000 @@ -45,7 +45,7 @@ names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/vm.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/vm.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/vm.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/vm.go 2024-02-23 09:46:11.000000000 +0000 @@ -86,7 +86,7 @@ return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } files, err := os.ReadDir(path) diff -Nru temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/zoneinfo.go temporal-1.22.5/src/vendor/github.com/prometheus/procfs/zoneinfo.go --- temporal-1.21.5-1/src/vendor/github.com/prometheus/procfs/zoneinfo.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/prometheus/procfs/zoneinfo.go 2024-02-23 09:46:11.000000000 +0000 @@ -75,11 +75,11 @@ func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/grapheme.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/grapheme.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/grapheme.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/grapheme.go 2024-02-23 09:46:11.000000000 +0000 @@ -187,8 +187,8 @@ const shiftGraphemePropState = 4 // FirstGraphemeCluster returns the first grapheme cluster found in the given -// byte slice according to the rules of Unicode Standard Annex #29, Grapheme -// Cluster Boundaries. This function can be called continuously to extract all +// byte slice according to the rules of [Unicode Standard Annex #29, Grapheme +// Cluster Boundaries]. This function can be called continuously to extract all // grapheme clusters from a byte slice, as illustrated in the example below. // // If you don't know the current state, for example when calling the function @@ -209,6 +209,8 @@ // While slightly less convenient than using the Graphemes class, this function // has much better performance and makes no allocations. It lends itself well to // large byte slices. +// +// [Unicode Standard Annex #29, Grapheme Cluster Boundaries]: http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/graphemerules.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/graphemerules.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/graphemerules.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/graphemerules.go 2024-02-23 09:46:11.000000000 +0000 @@ -48,7 +48,7 @@ {grControlLF, prAny}: {grAny, grBoundary, 40}, // GB3. - {grCR, prLF}: {grAny, grNoBoundary, 30}, + {grCR, prLF}: {grControlLF, grNoBoundary, 30}, // GB6. {grAny, prL}: {grL, grBoundary, 9990}, diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/line.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/line.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/line.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/line.go 2024-02-23 09:46:11.000000000 +0000 @@ -4,7 +4,7 @@ // FirstLineSegment returns the prefix of the given byte slice after which a // decision to break the string over to the next line can or must be made, -// according to the rules of Unicode Standard Annex #14. This is used to +// according to the rules of [Unicode Standard Annex #14]. This is used to // implement line breaking. // // Line breaking, also known as word wrapping, is the process of breaking a @@ -35,7 +35,7 @@ // // Given an empty byte slice "b", the function returns nil values. // -// Note that in accordance with UAX #14 LB3, the final segment will end with +// Note that in accordance with [UAX #14 LB3], the final segment will end with // "mustBreak" set to true. You can choose to ignore this by checking if the // length of the "rest" slice is 0 and calling [HasTrailingLineBreak] or // [HasTrailingLineBreakInString] on the last rune. @@ -43,6 +43,9 @@ // Note also that this algorithm may break within grapheme clusters. This is // addressed in Section 8.2 Example 6 of UAX #14. To avoid this, you can use // the [Step] function instead. +// +// [Unicode Standard Annex #14]: https://www.unicode.org/reports/tr14/ +// [UAX #14 LB3]: https://www.unicode.org/reports/tr14/#Algorithm func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/sentence.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/sentence.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/sentence.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/sentence.go 2024-02-23 09:46:11.000000000 +0000 @@ -3,7 +3,7 @@ import "unicode/utf8" // FirstSentence returns the first sentence found in the given byte slice -// according to the rules of Unicode Standard Annex #29, Sentence Boundaries. +// according to the rules of [Unicode Standard Annex #29, Sentence Boundaries]. // This function can be called continuously to extract all sentences from a byte // slice, as illustrated in the example below. // @@ -17,6 +17,8 @@ // slice is the sub-slice of the input slice containing the identified sentence. // // Given an empty byte slice "b", the function returns nil values. +// +// [Unicode Standard Annex #29, Sentence Boundaries]: http://unicode.org/reports/tr29/#Sentence_Boundaries func FirstSentence(b []byte, state int) (sentence, rest []byte, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/step.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/step.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/step.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/step.go 2024-02-23 09:46:11.000000000 +0000 @@ -83,10 +83,12 @@ // has much better performance and makes no allocations. It lends itself well to // large byte slices. // -// Note that in accordance with UAX #14 LB3, the final segment will end with +// Note that in accordance with [UAX #14 LB3], the final segment will end with // a mandatory line break (boundaries&MaskLine == LineMustBreak). You can choose // to ignore this by checking if the length of the "rest" slice is 0 and calling // [HasTrailingLineBreak] or [HasTrailingLineBreakInString] on the last rune. +// +// [UAX #14 LB3]: https://www.unicode.org/reports/tr14/#Algorithm func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { diff -Nru temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/word.go temporal-1.22.5/src/vendor/github.com/rivo/uniseg/word.go --- temporal-1.21.5-1/src/vendor/github.com/rivo/uniseg/word.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/rivo/uniseg/word.go 2024-02-23 09:46:11.000000000 +0000 @@ -3,7 +3,7 @@ import "unicode/utf8" // FirstWord returns the first word found in the given byte slice according to -// the rules of Unicode Standard Annex #29, Word Boundaries. This function can +// the rules of [Unicode Standard Annex #29, Word Boundaries]. This function can // be called continuously to extract all words from a byte slice, as illustrated // in the example below. // @@ -17,6 +17,8 @@ // the sub-slice of the input slice containing the identified word. // // Given an empty byte slice "b", the function returns nil values. +// +// [Unicode Standard Annex #29, Word Boundaries]: http://unicode.org/reports/tr29/#Word_Boundaries func FirstWord(b []byte, state int) (word, rest []byte, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { diff -Nru temporal-1.21.5-1/src/vendor/github.com/sirupsen/logrus/README.md temporal-1.22.5/src/vendor/github.com/sirupsen/logrus/README.md --- temporal-1.21.5-1/src/vendor/github.com/sirupsen/logrus/README.md 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/sirupsen/logrus/README.md 2024-02-23 09:46:11.000000000 +0000 @@ -9,7 +9,7 @@ This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff -Nru temporal-1.21.5-1/src/vendor/github.com/sirupsen/logrus/writer.go temporal-1.22.5/src/vendor/github.com/sirupsen/logrus/writer.go --- temporal-1.21.5-1/src/vendor/github.com/sirupsen/logrus/writer.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/sirupsen/logrus/writer.go 2024-02-23 09:46:11.000000000 +0000 @@ -4,6 +4,7 @@ "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/.travis.yml temporal-1.22.5/src/vendor/github.com/twmb/murmur3/.travis.yml --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/.travis.yml 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -sudo: false - -language: go - -go: - - "1.10" - - "1.11" - -notifications: - email: false diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur.go temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur.go --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur.go 2024-02-23 09:46:12.000000000 +0000 @@ -9,11 +9,6 @@ // architectures. package murmur3 -import ( - "reflect" - "unsafe" -) - type bmixer interface { bmix(p []byte) (tail []byte) Size() (n int) @@ -61,12 +56,3 @@ d.tail = nil d.bmixer.reset() } - -func strslice(slice []byte) string { - var str string - slicehdr := ((*reflect.SliceHeader)(unsafe.Pointer(&slice))) - strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) - strhdr.Data = slicehdr.Data - strhdr.Len = slicehdr.Len - return str -} diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_amd64.s temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_amd64.s --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_amd64.s 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_amd64.s 2024-02-23 09:46:12.000000000 +0000 @@ -1,4 +1,5 @@ -// +build go1.5,amd64 +//go:build go1.5 && amd64 && !gccgo +// +build go1.5,amd64,!gccgo // SeedSum128(seed1, seed2 uint64, data []byte) (h1 uint64, h2 uint64) TEXT ·SeedSum128(SB), $0-56 diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_decl.go temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_decl.go --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_decl.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_decl.go 2024-02-23 09:46:12.000000000 +0000 @@ -1,4 +1,5 @@ -// +build go1.5,amd64 +//go:build go1.5 && amd64 && !gccgo +// +build go1.5,amd64,!gccgo package murmur3 @@ -6,9 +7,10 @@ // Sum128 returns the murmur3 sum of data. It is equivalent to the following // sequence (without the extra burden and the extra allocation): -// hasher := New128() -// hasher.Write(data) -// return hasher.Sum128() +// +// hasher := New128() +// hasher.Write(data) +// return hasher.Sum128() func Sum128(data []byte) (h1 uint64, h2 uint64) //go:noescape diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_gen.go temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_gen.go --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur128_gen.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur128_gen.go 2024-02-23 09:46:12.000000000 +0000 @@ -1,9 +1,20 @@ -// +build !go1.5 !amd64 +//go:build !go1.5 || !amd64 || gccgo +// +build !go1.5 !amd64 gccgo package murmur3 import "math/bits" +// Sum128 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// +// hasher := New128() +// hasher.Write(data) +// return hasher.Sum128() +func Sum128(data []byte) (h1 uint64, h2 uint64) { + return SeedSum128(0, 0, data) +} + // SeedSum128 returns the murmur3 sum of data with digests initialized to seed1 // and seed2. // @@ -13,16 +24,106 @@ // This reads and processes the data in chunks of little endian uint64s; // thus, the returned hashes are portable across architectures. func SeedSum128(seed1, seed2 uint64, data []byte) (h1 uint64, h2 uint64) { - return SeedStringSum128(seed1, seed2, strslice(data)) -} + h1, h2 = seed1, seed2 + clen := len(data) + for len(data) >= 16 { + // yes, this is faster than using binary.LittleEndian.Uint64 + k1 := uint64(data[0]) | uint64(data[1])<<8 | uint64(data[2])<<16 | uint64(data[3])<<24 | uint64(data[4])<<32 | uint64(data[5])<<40 | uint64(data[6])<<48 | uint64(data[7])<<56 + k2 := uint64(data[8]) | uint64(data[9])<<8 | uint64(data[10])<<16 | uint64(data[11])<<24 | uint64(data[12])<<32 | uint64(data[13])<<40 | uint64(data[14])<<48 | uint64(data[15])<<56 -// Sum128 returns the murmur3 sum of data. It is equivalent to the following -// sequence (without the extra burden and the extra allocation): -// hasher := New128() -// hasher.Write(data) -// return hasher.Sum128() -func Sum128(data []byte) (h1 uint64, h2 uint64) { - return SeedStringSum128(0, 0, strslice(data)) + data = data[16:] + + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + + h1 = bits.RotateLeft64(h1, 27) + h1 += h2 + h1 = h1*5 + 0x52dce729 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + h2 = bits.RotateLeft64(h2, 31) + h2 += h1 + h2 = h2*5 + 0x38495ab5 + } + + var k1, k2 uint64 + switch len(data) { + case 15: + k2 ^= uint64(data[14]) << 48 + fallthrough + case 14: + k2 ^= uint64(data[13]) << 40 + fallthrough + case 13: + k2 ^= uint64(data[12]) << 32 + fallthrough + case 12: + k2 ^= uint64(data[11]) << 24 + fallthrough + case 11: + k2 ^= uint64(data[10]) << 16 + fallthrough + case 10: + k2 ^= uint64(data[9]) << 8 + fallthrough + case 9: + k2 ^= uint64(data[8]) << 0 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + fallthrough + + case 8: + k1 ^= uint64(data[7]) << 56 + fallthrough + case 7: + k1 ^= uint64(data[6]) << 48 + fallthrough + case 6: + k1 ^= uint64(data[5]) << 40 + fallthrough + case 5: + k1 ^= uint64(data[4]) << 32 + fallthrough + case 4: + k1 ^= uint64(data[3]) << 24 + fallthrough + case 3: + k1 ^= uint64(data[2]) << 16 + fallthrough + case 2: + k1 ^= uint64(data[1]) << 8 + fallthrough + case 1: + k1 ^= uint64(data[0]) << 0 + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + } + + h1 ^= uint64(clen) + h2 ^= uint64(clen) + + h1 += h2 + h2 += h1 + + h1 = fmix64(h1) + h2 = fmix64(h2) + + h1 += h2 + h2 += h1 + + return h1, h2 } // StringSum128 is the string version of Sum128. diff -Nru temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur32_gen.go temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur32_gen.go --- temporal-1.21.5-1/src/vendor/github.com/twmb/murmur3/murmur32_gen.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/twmb/murmur3/murmur32_gen.go 2024-02-23 09:46:12.000000000 +0000 @@ -2,22 +2,61 @@ import "math/bits" +// Sum32 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// +// hasher := New32() +// hasher.Write(data) +// return hasher.Sum32() +func Sum32(data []byte) uint32 { + return SeedSum32(0, data) +} + // SeedSum32 returns the murmur3 sum of data with the digest initialized to // seed. // // This reads and processes the data in chunks of little endian uint32s; // thus, the returned hash is portable across architectures. func SeedSum32(seed uint32, data []byte) (h1 uint32) { - return SeedStringSum32(seed, strslice(data)) -} + h1 = seed + clen := uint32(len(data)) + for len(data) >= 4 { + k1 := uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16 | uint32(data[3])<<24 + data = data[4:] -// Sum32 returns the murmur3 sum of data. It is equivalent to the following -// sequence (without the extra burden and the extra allocation): -// hasher := New32() -// hasher.Write(data) -// return hasher.Sum32() -func Sum32(data []byte) uint32 { - return SeedStringSum32(0, strslice(data)) + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + + h1 ^= k1 + h1 = bits.RotateLeft32(h1, 13) + h1 = h1*5 + 0xe6546b64 + } + var k1 uint32 + switch len(data) { + case 3: + k1 ^= uint32(data[2]) << 16 + fallthrough + case 2: + k1 ^= uint32(data[1]) << 8 + fallthrough + case 1: + k1 ^= uint32(data[0]) + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + h1 ^= k1 + } + + h1 ^= uint32(clen) + + h1 ^= h1 >> 16 + h1 *= 0x85ebca6b + h1 ^= h1 >> 13 + h1 *= 0xc2b2ae35 + h1 ^= h1 >> 16 + + return h1 } // StringSum32 is the string version of Sum32. diff -Nru temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/README.md temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/README.md --- temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/README.md 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/README.md 2024-02-23 09:46:12.000000000 +0000 @@ -58,7 +58,7 @@ ```go import ( "io" - "github.com/cactus/go-statsd-client/statsd" + "github.com/cactus/go-statsd-client/v5/statsd" "github.com/uber-go/tally" tallystatsd "github.com/uber-go/tally/statsd" // ... diff -Nru temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/scope_registry.go temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/scope_registry.go --- temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/scope_registry.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/scope_registry.go 2024-02-23 09:46:12.000000000 +0000 @@ -22,7 +22,6 @@ import ( "hash/maphash" - "log" "runtime" "sync" "unsafe" @@ -126,11 +125,11 @@ func (r *scopeRegistry) ForEachScope(f func(*scope)) { for _, subscopeBucket := range r.subscopes { + subscopeBucket.mu.RLock() for _, s := range subscopeBucket.s { - subscopeBucket.mu.RLock() f(s) - subscopeBucket.mu.RUnlock() } + subscopeBucket.mu.RUnlock() } } @@ -262,7 +261,6 @@ counters.Add(rootCounters.Load()) gauges.Add(rootGauges.Load()) histograms.Add(rootHistograms.Load()) - log.Printf("counters: %v, gauges: %v, histograms: %v\n", counters.Load(), gauges.Load(), histograms.Load()) if r.root.reporter != nil { r.root.reporter.ReportCounter(r.sanitizedCounterCardinalityName, internalTags, counters.Load()) diff -Nru temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/statsd/reporter.go temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/statsd/reporter.go --- temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/statsd/reporter.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/statsd/reporter.go 2024-02-23 09:46:12.000000000 +0000 @@ -26,7 +26,7 @@ "strconv" "time" - "github.com/cactus/go-statsd-client/statsd" + "github.com/cactus/go-statsd-client/v5/statsd" tally "github.com/uber-go/tally/v4" ) diff -Nru temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/version.go temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/version.go --- temporal-1.21.5-1/src/vendor/github.com/uber-go/tally/v4/version.go 2023-09-29 14:03:32.000000000 +0000 +++ temporal-1.22.5/src/vendor/github.com/uber-go/tally/v4/version.go 2024-02-23 09:46:12.000000000 +0000 @@ -21,4 +21,4 @@ package tally // Version is the current version of the library. -const Version = "4.1.6" +const Version = "4.1.7" diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto package v1 diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go 2024-02-23 09:46:13.000000000 +0000 @@ -77,20 +77,22 @@ var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_MetricsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -139,19 +141,21 @@ ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_MetricsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.1.0 -// - protoc v3.17.3 +// - protoc v3.21.6 // source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto package v1 diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go 2024-02-23 09:46:13.000000000 +0000 @@ -77,20 +77,22 @@ var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -139,19 +141,21 @@ ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -159,7 +163,7 @@ } var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) ) var ( diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.1.0 -// - protoc v3.17.3 +// - protoc v3.21.6 // source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/common/v1/common.proto package v1 @@ -361,8 +361,11 @@ unknownFields protoimpl.UnknownFields // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/metrics/v1/metrics.proto package v1 @@ -153,27 +153,29 @@ // enum is a bit-mask. To test the presence of a single flag in the flags of // a data point, for example, use an expression like: // -// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE +// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK // type DataPointFlags int32 const ( - DataPointFlags_FLAG_NONE DataPointFlags = 0 + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0 // This DataPoint is valid but has no recorded value. This value // SHOULD be used to reflect explicitly missing data in a series, as // for an equivalent to the Prometheus "staleness marker". - DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1 + DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1 ) // Enum value maps for DataPointFlags. var ( DataPointFlags_name = map[int32]string{ - 0: "FLAG_NONE", - 1: "FLAG_NO_RECORDED_VALUE", + 0: "DATA_POINT_FLAGS_DO_NOT_USE", + 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", } DataPointFlags_value = map[string]int32{ - "FLAG_NONE": 0, - "FLAG_NO_RECORDED_VALUE": 1, + "DATA_POINT_FLAGS_DO_NOT_USE": 0, + "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1, } ) @@ -1312,8 +1314,8 @@ // base = (2^(2^-scale)) // // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than or equal to (base^index) and - // less than (base^(index+1)). + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). // // The positive and negative ranges of the histogram are expressed // separately. Negative values are mapped by their absolute value @@ -1345,6 +1347,13 @@ Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` // max is the maximum value over (start_time, end_time]. Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` } func (x *ExponentialHistogramDataPoint) Reset() { @@ -1470,6 +1479,13 @@ return 0 } +func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { + if x != nil { + return x.ZeroThreshold + } + return 0 +} + // SummaryDataPoint is a single data point in a timeseries that describes the // time-varying values of a Summary metric. type SummaryDataPoint struct { @@ -1737,9 +1753,9 @@ // // Note: This uses a varint encoding as a simple form of compression. Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - // Count is an array of counts, where count[i] carries the count - // of the bucket at index (offset+i). count[i] is the count of - // values greater than or equal to base^(offset+i) and less than + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to // base^(offset+i+1). // // Note: By contrast, the explicit HistogramDataPoint uses @@ -2044,7 +2060,7 @@ 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73, 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x61, 0x78, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd3, 0x05, 0x0a, 0x1d, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xfa, 0x05, 0x0a, 0x1d, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, @@ -2083,78 +2099,82 @@ 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x1a, 0x46, 0x0a, 0x07, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, - 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73, 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, - 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x61, 0x78, 0x22, 0xa6, 0x03, 0x0a, 0x10, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, - 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, - 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x69, 0x0a, 0x0f, 0x71, 0x75, 0x61, 0x6e, - 0x74, 0x69, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x40, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, - 0x69, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, - 0x69, 0x6c, 0x65, 0x52, 0x0e, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x1a, 0x43, 0x0a, 0x0f, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, - 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, - 0x08, 0x01, 0x10, 0x02, 0x22, 0x85, 0x02, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x72, 0x12, 0x58, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, - 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, - 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, - 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, - 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, - 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x07, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, 0x8c, 0x01, 0x0a, - 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x23, 0x41, 0x47, 0x47, 0x52, 0x45, + 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x7a, + 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x1a, 0x46, 0x0a, 0x07, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73, + 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, + 0x61, 0x78, 0x22, 0xa6, 0x03, 0x0a, 0x10, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, + 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, + 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, + 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, + 0x69, 0x0a, 0x0f, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x0e, 0x71, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x1a, 0x43, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, + 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x85, 0x02, 0x0a, 0x08, + 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x58, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, + 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, + 0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x2a, 0x8c, 0x01, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x27, + 0x0a, 0x23, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, + 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, - 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x21, 0x0a, 0x1d, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x54, - 0x41, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, - 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x2a, 0x3b, 0x0a, 0x0e, 0x44, - 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x0d, 0x0a, - 0x09, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, - 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x45, 0x44, - 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x01, 0x42, 0x7f, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x67, - 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, - 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x54, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x54, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x41, 0x47, + 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, + 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, + 0x10, 0x02, 0x2a, 0x5e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x46, + 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f, 0x49, + 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f, + 0x49, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x43, + 0x4f, 0x52, 0x44, 0x45, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, + 0x10, 0x01, 0x42, 0x7f, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/resource/v1/resource.proto package v1 diff -Nru temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go --- temporal-1.21.5-1/src/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc v3.21.6 // source: opentelemetry/proto/trace/v1/trace.proto package v1 @@ -117,8 +117,8 @@ const ( // The default status. Status_STATUS_CODE_UNSET Status_StatusCode = 0 - // The Span has been validated by an Application developers or Operator to have - // completed successfully. + // The Span has been validated by an Application developer or Operator to + // have completed successfully. Status_STATUS_CODE_OK Status_StatusCode = 1 // The Span contains an error. Status_STATUS_CODE_ERROR Status_StatusCode = 2 @@ -374,20 +374,16 @@ unknownFields protoimpl.UnknownFields // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). // // This field is required. TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). // // This field is required. SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` @@ -433,8 +429,8 @@ // // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 + // "example.com/myattribute": true + // "example.com/score": 10.239 // // The OpenTelemetry API specification further restricts the allowed value types: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/common/v1/payload_json.go temporal-1.22.5/src/vendor/go.temporal.io/api/common/v1/payload_json.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/common/v1/payload_json.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/common/v1/payload_json.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,228 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import ( + "bytes" + "encoding/json" + + gogojsonpb "github.com/gogo/protobuf/jsonpb" + jsonpb "go.temporal.io/api/internal/temporaljsonpb" +) + +// !!! This file is copied from internal/temporalcommonv1 to common/v1. +// !!! DO NOT EDIT at common/v1/payload_json.go. + +// Key on the marshaler metadata specifying whether shorthand is disabled. +// +// WARNING: This is internal API and should not be called externally. +const DisablePayloadShorthandMetadataKey = "__temporal_disable_payload_shorthand" + +// MaybeMarshalJSONPB implements +// [go.temporal.io/api/internal/temporaljsonpb.JSONPBMaybeMarshaler.MaybeMarshalJSONPB]. +// +// WARNING: This is internal API and should not be called externally. +func (p *Payloads) MaybeMarshalJSONPB(m *jsonpb.Marshaler, currIndent string) (handled bool, b []byte, err error) { + // If this is nil, ignore + if p == nil { + return false, nil, nil + } + // If shorthand is disabled, ignore + if disabled, _ := m.Metadata[DisablePayloadShorthandMetadataKey].(bool); disabled { + return false, nil, nil + } + + // We only support marshalling to shorthand if all payloads are handled or + // there are no payloads + payloads := make([]interface{}, len(p.Payloads)) + for i, payload := range p.Payloads { + // If any are not handled or there is an error, return + if handled, payloads[i], err = payload.toJSONShorthand(); !handled || err != nil { + return handled, nil, err + } + } + // If we're indenting, use the current indent as prefix. Note, regardless of + // m.EmitDefaults, we always use an explicit empty array here if there are no + // values. + if m.Indent == "" { + b, err = json.Marshal(payloads) + } else { + b, err = json.MarshalIndent(payloads, currIndent, m.Indent) + } + return true, b, err +} + +// MaybeUnmarshalJSONPB implements +// [go.temporal.io/api/internal/temporaljsonpb.JSONPBMaybeUnmarshaler.MaybeUnmarshalJSONPB]. +// +// WARNING: This is internal API and should not be called externally. +func (p *Payloads) MaybeUnmarshalJSONPB(u *jsonpb.Unmarshaler, b []byte) (handled bool, err error) { + // If this is nil, ignore (should never be) + if p == nil { + return false, nil + } + // If shorthand is disabled, ignore + if disabled, _ := u.Metadata[DisablePayloadShorthandMetadataKey].(bool); disabled { + return false, nil + } + // Try to deserialize into slice. If this fails, it is not shorthand and this + // does not handle it. This means on invalid JSON, we let the proto JSON + // handler fail instead of here. + var payloadJSONs []json.RawMessage + if json.Unmarshal(b, &payloadJSONs) != nil { + return false, nil + } + // Convert each (some may be shorthand, some may not) + p.Payloads = make([]*Payload, len(payloadJSONs)) + for i, payloadJSON := range payloadJSONs { + p.Payloads[i] = &Payload{} + p.Payloads[i].fromJSONMaybeShorthand(payloadJSON) + } + return true, nil +} + +// MaybeMarshalJSONPB implements +// [go.temporal.io/api/internal/temporaljsonpb.JSONPBMaybeMarshaler.MaybeMarshalJSONPB]. +// +// WARNING: This is internal API and should not be called externally. +func (p *Payload) MaybeMarshalJSONPB(m *jsonpb.Marshaler, currIndent string) (handled bool, b []byte, err error) { + // If this is nil, ignore + if p == nil { + return false, nil, nil + } + // If shorthand is disabled, ignore + if disabled, _ := m.Metadata[DisablePayloadShorthandMetadataKey].(bool); disabled { + return false, nil, nil + } + handled, value, err := p.toJSONShorthand() + if !handled || err != nil { + return handled, nil, err + } + if m.Indent == "" { + b, err = json.Marshal(value) + } else { + b, err = json.MarshalIndent(value, currIndent, m.Indent) + } + return true, b, err +} + +// MaybeUnmarshalJSONPB implements +// [go.temporal.io/api/internal/temporaljsonpb.JSONPBMaybeUnmarshaler.MaybeUnmarshalJSONPB]. +// +// WARNING: This is internal API and should not be called externally. +func (p *Payload) MaybeUnmarshalJSONPB(u *jsonpb.Unmarshaler, b []byte) (handled bool, err error) { + // If this is nil, ignore (should never be) + if p == nil { + return false, nil + } + // If shorthand is disabled, ignore + if disabled, _ := u.Metadata[DisablePayloadShorthandMetadataKey].(bool); disabled { + return false, nil + } + // Always considered handled, unmarshaler ignored (unknown fields always + // disallowed for non-shorthand payloads at this time) + p.fromJSONMaybeShorthand(b) + return true, nil +} + +func (p *Payload) toJSONShorthand() (handled bool, value interface{}, err error) { + // Only support binary null, plain JSON and proto JSON + switch string(p.Metadata["encoding"]) { + case "binary/null": + // Leave value as nil + handled = true + case "json/plain": + // Must only have this single metadata + if len(p.Metadata) != 1 { + return false, nil, nil + } + // We unmarshal because we may have to indent. We let this error fail the + // marshaller. + handled = true + err = json.Unmarshal(p.Data, &value) + case "json/protobuf": + // Must have the message type and no other metadata + msgType := string(p.Metadata["messageType"]) + if msgType == "" || len(p.Metadata) != 2 { + return false, nil, nil + } + // Since this is a proto object, this must unmarshal to a object. We let + // this error fail the marshaller. + var valueMap map[string]interface{} + handled = true + err = json.Unmarshal(p.Data, &valueMap) + // Put the message type on the object + if valueMap != nil { + valueMap["_protoMessageType"] = msgType + } + value = valueMap + } + return +} + +func (p *Payload) fromJSONMaybeShorthand(b []byte) { + // We need to try to deserialize into the regular payload first. If it works + // and there is metadata _and_ data actually present (or null with a null + // metadata encoding), we assume it's a non-shorthand payload. If it fails + // (which it will if not an object or there is an unknown field or if + // 'metadata' is not string + base64 or if 'data' is not base64), we assume + // shorthand. We are ok disallowing unknown fields for payloads here even if + // the outer unmarshaler allows them. + if gogojsonpb.Unmarshal(bytes.NewReader(b), p) == nil && len(p.Metadata) > 0 { + // A raw payload must either have data or a binary/null encoding + if len(p.Data) > 0 || string(p.Metadata["encoding"]) == "binary/null" { + return + } + } + + // If it's "null", set no data and just metadata + if string(b) == "null" { + p.Data = nil + p.Metadata = map[string][]byte{"encoding": []byte("binary/null")} + return + } + + // Now that we know it is shorthand, it might be a proto JSON with a message + // type. If it does have the message type, we need to remove it and + // re-serialize it to data. So the quickest way to check whether it has the + // message type is to search for the key. + p.Data = b + p.Metadata = map[string][]byte{"encoding": []byte("json/plain")} + if bytes.Contains(p.Data, []byte(`"_protoMessageType"`)) { + // Try to unmarshal into map, extract and remove key, and re-serialize + var valueMap map[string]interface{} + if json.Unmarshal(p.Data, &valueMap) == nil { + if msgType, _ := valueMap["_protoMessageType"].(string); msgType != "" { + // Now we know it's a proto JSON, so remove the key and re-serialize + delete(valueMap, "_protoMessageType") + // This won't error. The resulting JSON payload data may not be exactly + // what user passed in sans message type (e.g. user may have indented or + // did not have same field order), but that is acceptable when going + // from shorthand to non-shorthand. + p.Data, _ = json.Marshal(valueMap) + p.Metadata["encoding"] = []byte("json/protobuf") + p.Metadata["messageType"] = []byte(msgType) + } + } + } +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/enums/v1/failed_cause.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/enums/v1/failed_cause.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/enums/v1/failed_cause.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/enums/v1/failed_cause.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -270,6 +270,8 @@ RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT ResourceExhaustedCause = 4 // Workflow is busy RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW ResourceExhaustedCause = 5 + // Caller exceeds action per second limit. + RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT ResourceExhaustedCause = 6 ) var ResourceExhaustedCause_name = map[int32]string{ @@ -279,6 +281,7 @@ 3: "SystemOverloaded", 4: "PersistenceLimit", 5: "BusyWorkflow", + 6: "ApsLimit", } var ResourceExhaustedCause_value = map[string]int32{ @@ -288,6 +291,7 @@ "SystemOverloaded": 3, "PersistenceLimit": 4, "BusyWorkflow": 5, + "ApsLimit": 6, } func (ResourceExhaustedCause) EnumDescriptor() ([]byte, []int) { @@ -307,12 +311,12 @@ } var fileDescriptor_b293cf8d1d965f2d = []byte{ - // 1042 bytes of a gzipped FileDescriptorProto + // 1048 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xdd, 0x72, 0xdb, 0x44, 0x14, 0xb6, 0xdc, 0x1f, 0x60, 0xf9, 0xa9, 0x58, 0x68, 0x53, 0x0a, 0x88, 0x81, 0x81, 0x4c, 0x6b, 0xc0, 0x69, 0x5a, 0xda, 0x4c, 0x1d, 0x98, 0x74, 0xbd, 0x3a, 0x8e, 0x77, 0x22, 0xad, 0xd4, 0xdd, 0x55, 0x62, 0xf7, 0x66, 0x47, 0xa4, 0x6e, 0xab, 0xa9, 0x1b, 0x7b, 0x1c, 0xa7, 0xe4, 0x92, 0x47, - 0x80, 0x87, 0x60, 0x86, 0xe1, 0x19, 0x78, 0x00, 0x2e, 0x73, 0xd9, 0x3b, 0x88, 0x73, 0xc3, 0x70, + 0x80, 0xb7, 0x60, 0x78, 0x06, 0x1e, 0x80, 0x1b, 0x66, 0x72, 0xd9, 0x3b, 0x88, 0x73, 0xc3, 0x70, 0xd5, 0x19, 0x5e, 0x80, 0x91, 0x63, 0x27, 0x4a, 0x62, 0x4b, 0x32, 0x77, 0x4a, 0xf6, 0xfb, 0xbe, 0x3d, 0xe7, 0xdb, 0xb3, 0xe7, 0x78, 0xd1, 0xf5, 0x7e, 0xeb, 0x79, 0xb7, 0xd3, 0x0b, 0xdb, 0x0b, 0x61, 0x37, 0x5a, 0x68, 0x6d, 0xed, 0x3c, 0xdf, 0x5e, 0x78, 0xb1, 0xb8, 0xf0, 0x38, 0x8c, 0xda, @@ -358,22 +362,22 @@ 0x8e, 0x71, 0x92, 0x92, 0x7d, 0x13, 0x05, 0xb3, 0x0a, 0xa4, 0x01, 0x8f, 0x53, 0x31, 0x62, 0x63, 0x67, 0x95, 0x9e, 0xec, 0xc9, 0xa0, 0x88, 0x4a, 0x32, 0x7a, 0xb2, 0x15, 0xe6, 0xf6, 0x64, 0xd4, 0x25, 0xff, 0xbf, 0x27, 0xb3, 0x0a, 0xcc, 0xe0, 0xc9, 0xac, 0xd2, 0x13, 0x3d, 0xc1, 0x1e, 0x5a, - 0x9b, 0x55, 0x68, 0x84, 0xa7, 0x5e, 0xc0, 0xd5, 0xe9, 0x5b, 0x7e, 0xae, 0xf4, 0x4b, 0x11, 0x5d, + 0x9b, 0x55, 0x68, 0x84, 0xa7, 0x5e, 0xc0, 0xd5, 0xe9, 0x5b, 0x7e, 0xae, 0xf4, 0x47, 0x11, 0x5d, 0x11, 0xad, 0xed, 0xce, 0x4e, 0x6f, 0xb3, 0x05, 0xbb, 0x4f, 0xc3, 0x9d, 0xed, 0xfe, 0xd8, 0xd0, 0xeb, 0xe8, 0x73, 0x01, 0xd2, 0x0b, 0xe2, 0x41, 0x07, 0x8d, 0x3a, 0x09, 0xa4, 0x9a, 0xe2, 0xdc, 0x3c, 0xfa, 0x6c, 0x2a, 0x52, 0xf8, 0xa3, 0x0e, 0x65, 0x1a, 0xf1, 0xc4, 0x9a, 0x8a, 0xa3, 0x1e, 0xa7, 0x81, 0x10, 0x30, 0x0e, 0xd2, 0x2c, 0xe2, 0x32, 0x2a, 0x4d, 0x85, 0xcb, 0xa6, 0x54, 0xe0, 0xea, 0x78, 0x9c, 0x3a, 0x1e, 0x19, 0xe6, 0x92, 0x8a, 0xf7, 0x41, 0x48, 0x26, 0x15, 0x70, 0x0a, - 0x23, 0xfd, 0xf3, 0xf1, 0x83, 0x61, 0x2a, 0xbe, 0x1a, 0xc8, 0xe3, 0x89, 0x61, 0x5e, 0xa8, 0xfe, - 0x6e, 0xec, 0xed, 0x5b, 0x85, 0x97, 0xfb, 0x56, 0xe1, 0xd5, 0xbe, 0x65, 0xfc, 0x38, 0xb0, 0x8c, - 0x5f, 0x07, 0x96, 0xf1, 0xc7, 0xc0, 0x32, 0xf6, 0x06, 0x96, 0xf1, 0xd7, 0xc0, 0x32, 0xfe, 0x1e, - 0x58, 0x85, 0x57, 0x03, 0xcb, 0xf8, 0xe9, 0xc0, 0x2a, 0xec, 0x1d, 0x58, 0x85, 0x97, 0x07, 0x56, - 0x01, 0x5d, 0x8d, 0x3a, 0xe5, 0x89, 0x2f, 0x99, 0xaa, 0x99, 0xa8, 0x5d, 0x3f, 0x7e, 0xf2, 0xf8, - 0xc6, 0xc3, 0x4f, 0x9f, 0x24, 0xd0, 0x51, 0xe7, 0xc4, 0x23, 0x69, 0x79, 0xf8, 0xf1, 0x5b, 0x71, - 0x4e, 0x8d, 0x00, 0x51, 0xa7, 0x4c, 0xba, 0x51, 0x19, 0x86, 0x82, 0xeb, 0x8b, 0xff, 0x14, 0xaf, - 0x1d, 0xaf, 0x54, 0x2a, 0xa4, 0x1b, 0x55, 0x2a, 0xc3, 0xb5, 0x4a, 0x65, 0x7d, 0xf1, 0xfb, 0x8b, - 0xc3, 0x57, 0xd5, 0xed, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x14, 0xbe, 0x77, 0x73, 0x81, 0x0d, - 0x00, 0x00, + 0x23, 0xfd, 0xf3, 0xf1, 0x83, 0x61, 0x2a, 0xbe, 0x1a, 0xc8, 0xe3, 0x89, 0x61, 0x5e, 0x48, 0x4d, + 0x91, 0x1c, 0xa5, 0x78, 0xb1, 0xfa, 0x9b, 0xb1, 0xb7, 0x6f, 0x15, 0x5e, 0xee, 0x5b, 0x85, 0x57, + 0xfb, 0x96, 0xf1, 0xe3, 0xc0, 0x32, 0x7e, 0x19, 0x58, 0xc6, 0xef, 0x03, 0xcb, 0xd8, 0x1b, 0x58, + 0xc6, 0x5f, 0x03, 0xcb, 0xf8, 0x7b, 0x60, 0x15, 0x5e, 0x0d, 0x2c, 0xe3, 0xa7, 0x03, 0xab, 0xb0, + 0x77, 0x60, 0x15, 0x5e, 0x1e, 0x58, 0x05, 0x74, 0x35, 0xea, 0x94, 0x27, 0xbe, 0x78, 0xaa, 0x66, + 0xa2, 0xc6, 0xfd, 0xf8, 0x69, 0xe4, 0x1b, 0x0f, 0x3f, 0x7d, 0x92, 0x40, 0x47, 0x9d, 0x13, 0x8f, + 0xa9, 0xe5, 0xe1, 0xc7, 0xaf, 0xc5, 0x39, 0x35, 0x02, 0x44, 0x9d, 0x32, 0xe9, 0x46, 0x65, 0x18, + 0x0a, 0xae, 0x2f, 0xfe, 0x53, 0xbc, 0x76, 0xbc, 0x52, 0xa9, 0x90, 0x6e, 0x54, 0xa9, 0x0c, 0xd7, + 0x2a, 0x95, 0xf5, 0xc5, 0xef, 0x2f, 0x0e, 0x5f, 0x5f, 0xb7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, + 0x6f, 0x85, 0x37, 0xfd, 0xa9, 0x0d, 0x00, 0x00, } func (x WorkflowTaskFailedCause) String() string { diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/README.md temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/README.md --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/README.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/README.md 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,4 @@ +# temporalgateway + +This package contains a gRPC gateway implementation of Temporal-specific proto JSON formatting. It is mostly equivalent +to https://github.com/gogo/gateway but altered for our JSON formatter that supports shorthand payloads. \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/marshal_jsonpb.go temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/marshal_jsonpb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/marshal_jsonpb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/marshal_jsonpb.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,281 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// This file taken from +// https://github.com/grpc-ecosystem/grpc-gateway/blob/v1.16.0/runtime/marshal_jsonpb.go +// and altered to support Temporal JSONPB implementation. Specifically, the +// JSONPb struct was altered to accept an unmarshaler also and to change the +// marshaler to Temporal's. + +package temporalgateway + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/gogo/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + jsonpb "go.temporal.io/api/internal/temporaljsonpb" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb struct { + Marshaler jsonpb.Marshaler + Unmarshaler jsonpb.Unmarshaler +} + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return j.Marshaler.Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.Marshaler.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = j.Marshaler.Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Marshaler.Indent != "" { + return json.MarshalIndent(m, "", j.Marshaler.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.Marshaler.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return j.unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) runtime.Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d, jsonpb: j} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder + jsonpb *JSONPb +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return d.jsonpb.decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) runtime.Encoder { + return runtime.EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func (j *JSONPb) unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return j.decodeJSONPb(d, v) +} + +func (j *JSONPb) decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return j.decodeNonProtoField(d, v) + } + return j.Unmarshaler.UnmarshalNext(d, p) +} + +func (j *JSONPb) decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + return j.Unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := j.unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/query.go temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/query.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporalgateway/query.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporalgateway/query.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,46 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// This code taken from the bottom of +// https://github.com/grpc-ecosystem/grpc-gateway/blob/v1.16.0/runtime/query.go. + +package temporalgateway + +import ( + "reflect" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" +) + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(runtime.String), + reflect.Bool: reflect.ValueOf(runtime.Bool), + reflect.Float64: reflect.ValueOf(runtime.Float64), + reflect.Float32: reflect.ValueOf(runtime.Float32), + reflect.Int64: reflect.ValueOf(runtime.Int64), + reflect.Int32: reflect.ValueOf(runtime.Int32), + reflect.Uint64: reflect.ValueOf(runtime.Uint64), + reflect.Uint32: reflect.ValueOf(runtime.Uint32), + reflect.Slice: reflect.ValueOf(runtime.Bytes), + } +) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/README.md temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/README.md --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/README.md 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/README.md 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,4 @@ +# temporaljsonpb + +This package contains essentially a copy of https://github.com/gogo/protobuf/blob/v1.3.2/jsonpb/jsonpb.go but altered +to support custom, optional JSON formatting/parsing for protobuf types. \ No newline at end of file diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/jsonpb.go temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/jsonpb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/jsonpb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/jsonpb.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,1482 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// This file comes from https://github.com/gogo/protobuf/blob/v1.3.2/jsonpb/jsonpb.go +// with slight changes to support JSONPBMaybeMarshaler and JSONPBMaybeUnmarshaler. + +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver + + Metadata map[string]interface{} +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +var ( + wktType = reflect.TypeOf((*isWkt)(nil)).Elem() + messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + if jsm, ok := v.(JSONPBMaybeMarshaler); ok { + if handled, b, err := jsm.MaybeMarshalJSONPB(m, indent); handled && err != nil { + return err + } else if handled { + out.write(string(b)) + return out.err + } + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if v.Type().Implements(wktType) { + wkt := v.Interface().(isWkt) + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver + + Metadata map[string]interface{} +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + _, isMaybeJSONPBUnmarshaler := target.Interface().(JSONPBMaybeUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler && !isMaybeJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + if jsu, ok := target.Addr().Interface().(JSONPBMaybeUnmarshaler); ok { + if handled, err := jsu.MaybeUnmarshalJSONPB(u, []byte(inputValue)); handled { + return err + } + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if v.Type().Implements(messageType) { + return checkRequiredFields(v.Interface().(proto.Message)) + } + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/marshal.go temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/marshal.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/internal/temporaljsonpb/marshal.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/internal/temporaljsonpb/marshal.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,42 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package jsonpb + +// JSONPBMaybeMarshaler is implemented by any proto struct that wants to +// customize optional Temporal-specific JSON conversion. +type JSONPBMaybeMarshaler interface { + // MaybeMarshalJSONPB is for formatting the proto message as JSON. If the + // "handled" result value is false, "b" and "err" are ignored and the default + // proto JSON behavior occurs. currIndent is the current prefix depth but + // should not be applied if m.Indent is empty. + MaybeMarshalJSONPB(m *Marshaler, currIndent string) (handled bool, b []byte, err error) +} + +// JSONPBMaybeUnmarshaler is implemented by any proto struct that wants to +// customize optional Temporal-specific JSON conversion. +type JSONPBMaybeUnmarshaler interface { + // MaybeUnmarshalJSONPB is for parsing the given JSON into the proto message. + // If the "handled" result value is false, "err" is ignored and the default + // behavior proto JSON occurs. + MaybeUnmarshalJSONPB(u *Unmarshaler, b []byte) (handled bool, err error) +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/operatorservice/v1/request_response.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/operatorservice/v1/request_response.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/operatorservice/v1/request_response.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/operatorservice/v1/request_response.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -334,7 +334,9 @@ // (-- api-linter: core::0135::request-name-required=disabled // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) type DeleteNamespaceRequest struct { - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Only one of namespace or namespace_id must be specified to identify namespace. + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` } func (m *DeleteNamespaceRequest) Reset() { *m = DeleteNamespaceRequest{} } @@ -376,6 +378,13 @@ return "" } +func (m *DeleteNamespaceRequest) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + type DeleteNamespaceResponse struct { // Temporary namespace name that is used during reclaim resources step. DeletedNamespace string `protobuf:"bytes,1,opt,name=deleted_namespace,json=deletedNamespace,proto3" json:"deleted_namespace,omitempty"` @@ -806,65 +815,66 @@ } var fileDescriptor_43cdd5e82c482041 = []byte{ - // 917 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xda, 0xa4, 0xc4, 0xaf, 0x7f, 0x92, 0x6c, 0xd2, 0x62, 0x25, 0xcd, 0xd6, 0x5d, 0x21, - 0x28, 0xaa, 0xb4, 0x21, 0x41, 0x54, 0x95, 0x2b, 0x04, 0x6e, 0x68, 0x51, 0xa5, 0x42, 0xc3, 0x3a, - 0xe4, 0xd0, 0xcb, 0x6a, 0xb2, 0xfb, 0x9a, 0x0c, 0xb5, 0x77, 0x96, 0x99, 0xb1, 0xa9, 0x7b, 0x40, - 0xbd, 0xc3, 0x81, 0x0b, 0xdf, 0xa1, 0xe2, 0x2b, 0xf0, 0x05, 0x38, 0xe6, 0xd8, 0x23, 0x71, 0x2e, - 0x88, 0x53, 0xc5, 0x27, 0x40, 0x33, 0x3b, 0xeb, 0xd8, 0xeb, 0x35, 0x2e, 0xa2, 0xbd, 0xd9, 0xbf, - 0xf7, 0xde, 0xef, 0xfd, 0xf6, 0xcd, 0x6f, 0x9e, 0x06, 0x6e, 0x48, 0xec, 0x24, 0x8c, 0x93, 0xf6, - 0x06, 0x49, 0xe8, 0x06, 0x4b, 0x90, 0x13, 0xc9, 0xb8, 0x40, 0xde, 0xa3, 0x21, 0x6e, 0xf4, 0x36, - 0x37, 0x38, 0x7e, 0xd7, 0x45, 0x21, 0x03, 0x8e, 0x22, 0x61, 0xb1, 0x40, 0x2f, 0xe1, 0x4c, 0x32, - 0xfb, 0x4a, 0x56, 0xe7, 0x91, 0x84, 0x7a, 0xb9, 0x3a, 0xaf, 0xb7, 0xb9, 0xea, 0x8e, 0x11, 0x63, - 0xdc, 0xed, 0x08, 0x45, 0x17, 0xb2, 0x4e, 0x87, 0xc5, 0x29, 0x89, 0xfb, 0xbc, 0x0c, 0xab, 0xcd, - 0x28, 0x6a, 0x21, 0xe1, 0xe1, 0x61, 0x53, 0x4a, 0x4e, 0xf7, 0xbb, 0x12, 0x85, 0x9f, 0xb6, 0xb4, - 0x7f, 0x80, 0x25, 0xa1, 0x43, 0x01, 0x19, 0xc6, 0x6a, 0x56, 0xbd, 0x72, 0xed, 0xec, 0xd6, 0xd7, - 0xde, 0x8c, 0xfe, 0xde, 0x74, 0x5e, 0x2f, 0x8f, 0xdf, 0x89, 0x25, 0xef, 0xfb, 0x8b, 0x22, 0x07, - 0xdb, 0x97, 0xa1, 0x1a, 0x93, 0x0e, 0x8a, 0x84, 0x84, 0x58, 0x2b, 0xd7, 0xad, 0x6b, 0x55, 0xff, - 0x14, 0x58, 0x6d, 0xc3, 0xc5, 0x42, 0x22, 0x7b, 0x11, 0x2a, 0x8f, 0xb1, 0x5f, 0xb3, 0x74, 0x81, - 0xfa, 0x69, 0x7f, 0x02, 0x73, 0x3d, 0xd2, 0xee, 0xa6, 0x24, 0x17, 0xb6, 0xde, 0x1f, 0x17, 0xaf, - 0x67, 0xa3, 0x24, 0xdf, 0x8b, 0x23, 0x7c, 0x82, 0xd1, 0x9e, 0x4a, 0xdd, 0xed, 0x27, 0xe8, 0xa7, - 0x55, 0x8d, 0xf2, 0x4d, 0xcb, 0x5d, 0x87, 0xb5, 0xc2, 0x2f, 0x4a, 0x0f, 0xc5, 0xfd, 0x16, 0xd6, - 0x7d, 0xec, 0xb0, 0x1e, 0x4e, 0x9b, 0xe5, 0xf5, 0x69, 0xb3, 0xac, 0xfe, 0xd7, 0x0f, 0x77, 0xeb, - 0xe0, 0x4c, 0xeb, 0x65, 0xd4, 0xdc, 0x82, 0xb5, 0xfb, 0x54, 0xc8, 0x69, 0x5a, 0xc6, 0xe8, 0xad, - 0x3c, 0xfd, 0x6f, 0x73, 0x70, 0xb9, 0xb8, 0x3a, 0x65, 0xb7, 0x9f, 0x59, 0xb0, 0x14, 0x76, 0x85, - 0x64, 0x9d, 0x49, 0x5f, 0xb4, 0x66, 0xfa, 0xe2, 0xdf, 0xa8, 0xbd, 0x6d, 0x4d, 0x3b, 0xe1, 0x8c, - 0x30, 0x07, 0x6b, 0x09, 0xa2, 0x2f, 0x24, 0x8e, 0x49, 0x28, 0xbf, 0x0e, 0x09, 0x2d, 0x4d, 0x3b, - 0x69, 0xce, 0x1c, 0x6c, 0x7f, 0x0f, 0x17, 0x84, 0x64, 0x9c, 0x1c, 0x60, 0x20, 0xc2, 0x43, 0xec, - 0x90, 0x5a, 0x45, 0xb7, 0xdf, 0xf9, 0x9f, 0xed, 0x53, 0xce, 0x96, 0xa6, 0x4c, 0x7b, 0x9f, 0x17, - 0xa3, 0x98, 0xf2, 0x7d, 0xe1, 0x98, 0xde, 0x88, 0xef, 0xf5, 0x2d, 0x2b, 0x9a, 0xc8, 0x9b, 0xe9, - 0xf6, 0x19, 0xd8, 0x93, 0x03, 0x28, 0x68, 0xb5, 0x32, 0xda, 0xaa, 0x3a, 0x7a, 0x4f, 0x6f, 0xc0, - 0xa5, 0xcf, 0xb1, 0x8d, 0x12, 0xbf, 0xca, 0x0c, 0xfd, 0x6a, 0xae, 0xbf, 0x0b, 0xef, 0x4c, 0xd4, - 0x19, 0xbf, 0x5f, 0x87, 0xa5, 0x48, 0x87, 0xa2, 0x20, 0x4f, 0xb0, 0x68, 0x02, 0xc3, 0x22, 0xf7, - 0x17, 0x0b, 0xae, 0x34, 0xa3, 0xe8, 0x01, 0xff, 0x26, 0x89, 0x88, 0x44, 0x75, 0x51, 0x25, 0x6e, - 0xb7, 0xbb, 0x42, 0x22, 0xcf, 0x94, 0x7c, 0x00, 0x8b, 0x8f, 0x38, 0x8b, 0x25, 0xc6, 0x51, 0x40, - 0xa2, 0x88, 0xa3, 0x10, 0x86, 0x6f, 0x21, 0xc3, 0x9b, 0x29, 0x6c, 0x7f, 0x01, 0x75, 0x8c, 0xc9, - 0x7e, 0x1b, 0x03, 0xae, 0x99, 0x82, 0x30, 0xa5, 0x0a, 0x42, 0x16, 0xc7, 0x18, 0x4a, 0xca, 0x62, - 0x3d, 0x83, 0x79, 0x7f, 0x3d, 0xcd, 0x1b, 0x6b, 0xb8, 0x3d, 0x4c, 0x72, 0x5d, 0xa8, 0x4f, 0x97, - 0x65, 0xd6, 0xc6, 0xa7, 0xb0, 0x9a, 0x2e, 0x96, 0x42, 0xd5, 0x57, 0xe1, 0x5c, 0xd6, 0x5c, 0x8d, - 0xc1, 0x28, 0x3e, 0x6b, 0x30, 0x35, 0x01, 0xb5, 0x24, 0x0b, 0x09, 0x0c, 0xff, 0x43, 0x58, 0x56, - 0xde, 0x37, 0xf0, 0x70, 0x1d, 0xad, 0x41, 0x35, 0xd1, 0xd7, 0x88, 0x3e, 0x4d, 0x59, 0xe7, 0xfc, - 0x79, 0x05, 0xb4, 0xe8, 0x53, 0xb4, 0xdf, 0x83, 0x85, 0x18, 0x9f, 0xc8, 0x40, 0x67, 0x48, 0xf6, - 0x18, 0xd3, 0xef, 0x3d, 0xe7, 0x9f, 0x57, 0xf0, 0x0e, 0x39, 0xc0, 0x5d, 0x05, 0xba, 0x3f, 0x59, - 0xb0, 0x32, 0x4e, 0x6e, 0x4e, 0xef, 0x3e, 0xcc, 0x1b, 0x89, 0xd9, 0x8e, 0xfa, 0x70, 0xe6, 0x0d, - 0x35, 0x24, 0x5f, 0xa2, 0x24, 0x11, 0x91, 0xc4, 0x1f, 0x32, 0x14, 0xc9, 0x79, 0xab, 0x48, 0xce, - 0x8f, 0x65, 0x58, 0xc8, 0xb1, 0xbc, 0xc2, 0x00, 0xed, 0x75, 0x80, 0x2c, 0x85, 0x46, 0xd9, 0xe6, - 0x37, 0xc8, 0xbd, 0xc8, 0xae, 0xc1, 0xdb, 0x99, 0x5f, 0x2a, 0x3a, 0x96, 0xfd, 0xb5, 0x6f, 0x42, - 0x8d, 0xc6, 0x54, 0x52, 0xd2, 0x0e, 0x1e, 0x11, 0xda, 0x66, 0x3d, 0xe4, 0x41, 0x0f, 0xb9, 0x50, - 0xfe, 0x50, 0x02, 0x2b, 0xfe, 0x25, 0x13, 0xbf, 0x6b, 0xc2, 0x7b, 0x69, 0xd4, 0xf6, 0x60, 0xf9, - 0x90, 0xaa, 0x0d, 0xd3, 0x0f, 0xc4, 0x21, 0xe1, 0x51, 0x10, 0xb2, 0x6e, 0x2c, 0x6b, 0x73, 0xfa, - 0x1c, 0x96, 0x4c, 0xa8, 0xa5, 0x22, 0xdb, 0x2a, 0x60, 0x6f, 0xc1, 0x45, 0x2a, 0x46, 0xec, 0x17, - 0xa4, 0xbe, 0x8b, 0x6a, 0x67, 0xb4, 0x0d, 0x97, 0xa9, 0x38, 0x75, 0xdd, 0x9d, 0x34, 0x74, 0xfb, - 0x6f, 0xeb, 0xe8, 0xd8, 0x29, 0xbd, 0x38, 0x76, 0x4a, 0x2f, 0x8f, 0x1d, 0xeb, 0xd9, 0xc0, 0xb1, - 0x9e, 0x0f, 0x1c, 0xeb, 0xf7, 0x81, 0x63, 0x1d, 0x0d, 0x1c, 0xeb, 0x8f, 0x81, 0x63, 0xfd, 0x39, - 0x70, 0x4a, 0x2f, 0x07, 0x8e, 0xf5, 0xf3, 0x89, 0x53, 0x3a, 0x3a, 0x71, 0x4a, 0x2f, 0x4e, 0x9c, - 0x12, 0xb8, 0x94, 0xcd, 0x3a, 0xaa, 0xdb, 0x2b, 0xc6, 0x45, 0xd9, 0x79, 0xef, 0xa8, 0x87, 0xcd, - 0x8e, 0xf5, 0xf0, 0xe3, 0x83, 0x91, 0x5a, 0xca, 0xa6, 0xbc, 0xae, 0x6e, 0xe5, 0xa0, 0x5f, 0xcb, - 0x57, 0x77, 0x4d, 0x11, 0x65, 0x5e, 0x33, 0xa1, 0xde, 0x03, 0x93, 0xd0, 0x32, 0x2d, 0xf7, 0x36, - 0xff, 0x2a, 0xbf, 0x7b, 0x9a, 0xd3, 0x68, 0x34, 0x13, 0xda, 0x68, 0xe4, 0xb2, 0x1a, 0x8d, 0xbd, - 0xcd, 0xfd, 0x33, 0xfa, 0x8d, 0xf5, 0xd1, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x68, 0xc9, - 0xfb, 0xe2, 0x09, 0x00, 0x00, + // 931 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0x3a, 0xa4, 0xc4, 0x2f, 0x69, 0x93, 0x6c, 0xd2, 0x62, 0x25, 0xcd, 0xd6, 0x59, 0x21, + 0x28, 0xaa, 0xb4, 0x21, 0x41, 0xa0, 0xca, 0x15, 0x02, 0x37, 0xb4, 0x28, 0x52, 0xa1, 0x61, 0x1d, + 0x22, 0xd1, 0xcb, 0x6a, 0xb2, 0xfb, 0x9a, 0x0c, 0xb5, 0x77, 0x96, 0x99, 0xb1, 0xa9, 0x7b, 0x40, + 0xbd, 0xc3, 0x81, 0x0b, 0xff, 0xa1, 0xe2, 0x2f, 0xf0, 0x07, 0x38, 0xe6, 0xd8, 0x23, 0x71, 0x2e, + 0x88, 0x53, 0xc5, 0x2f, 0x40, 0x33, 0x3b, 0xeb, 0xd8, 0xeb, 0x35, 0x29, 0xa2, 0xbd, 0xd9, 0xdf, + 0x7b, 0xef, 0x7b, 0xdf, 0xbc, 0xf9, 0xde, 0x68, 0xe1, 0x23, 0x89, 0xed, 0x84, 0x71, 0xd2, 0xda, + 0x20, 0x09, 0xdd, 0x60, 0x09, 0x72, 0x22, 0x19, 0x17, 0xc8, 0xbb, 0x34, 0xc4, 0x8d, 0xee, 0xe6, + 0x06, 0xc7, 0xef, 0x3a, 0x28, 0x64, 0xc0, 0x51, 0x24, 0x2c, 0x16, 0xe8, 0x25, 0x9c, 0x49, 0x66, + 0x5f, 0xcb, 0xea, 0x3c, 0x92, 0x50, 0x2f, 0x57, 0xe7, 0x75, 0x37, 0x57, 0xdc, 0x11, 0x62, 0x8c, + 0x3b, 0x6d, 0xa1, 0xe8, 0x42, 0xd6, 0x6e, 0xb3, 0x38, 0x25, 0x71, 0x9f, 0x95, 0x61, 0xa5, 0x11, + 0x45, 0x4d, 0x24, 0x3c, 0x3c, 0x6a, 0x48, 0xc9, 0xe9, 0x41, 0x47, 0xa2, 0xf0, 0xd3, 0x96, 0xf6, + 0x0f, 0xb0, 0x28, 0x74, 0x28, 0x20, 0x83, 0x58, 0xd5, 0xaa, 0x4d, 0x5d, 0x9f, 0xdd, 0xfa, 0xca, + 0x3b, 0xa7, 0xbf, 0x37, 0x99, 0xd7, 0xcb, 0xe3, 0x77, 0x62, 0xc9, 0x7b, 0xfe, 0x82, 0xc8, 0xc1, + 0xf6, 0x55, 0xa8, 0xc4, 0xa4, 0x8d, 0x22, 0x21, 0x21, 0x56, 0xcb, 0x35, 0xeb, 0x7a, 0xc5, 0x3f, + 0x03, 0x56, 0x5a, 0x70, 0xb9, 0x90, 0xc8, 0x5e, 0x80, 0xa9, 0x47, 0xd8, 0xab, 0x5a, 0xba, 0x40, + 0xfd, 0xb4, 0x3f, 0x86, 0xe9, 0x2e, 0x69, 0x75, 0x52, 0x92, 0x4b, 0x5b, 0xef, 0x8e, 0x8a, 0xd7, + 0xb3, 0x51, 0x92, 0x77, 0xe2, 0x08, 0x1f, 0x63, 0xb4, 0xaf, 0x52, 0xf7, 0x7a, 0x09, 0xfa, 0x69, + 0x55, 0xbd, 0x7c, 0xd3, 0x72, 0xd7, 0x60, 0xb5, 0xf0, 0x44, 0xe9, 0xa5, 0xb8, 0xdf, 0xc2, 0x9a, + 0x8f, 0x6d, 0xd6, 0xc5, 0x49, 0xb3, 0xbc, 0x31, 0x69, 0x96, 0x95, 0xff, 0x7a, 0x70, 0xb7, 0x06, + 0xce, 0xa4, 0x5e, 0x46, 0xcd, 0x2d, 0x58, 0xbd, 0x47, 0x85, 0x9c, 0xa4, 0x65, 0x84, 0xde, 0xca, + 0xd3, 0xff, 0x36, 0x0d, 0x57, 0x8b, 0xab, 0x53, 0x76, 0xfb, 0xa9, 0x05, 0x8b, 0x61, 0x47, 0x48, + 0xd6, 0x1e, 0xf7, 0x45, 0xf3, 0x5c, 0x5f, 0xfc, 0x1b, 0xb5, 0xb7, 0xad, 0x69, 0xc7, 0x9c, 0x11, + 0xe6, 0x60, 0x2d, 0x41, 0xf4, 0x84, 0xc4, 0x11, 0x09, 0xe5, 0x57, 0x21, 0xa1, 0xa9, 0x69, 0xc7, + 0xcd, 0x99, 0x83, 0xed, 0xef, 0xe1, 0x92, 0x90, 0x8c, 0x93, 0x43, 0x0c, 0x44, 0x78, 0x84, 0x6d, + 0x52, 0x9d, 0xd2, 0xed, 0x77, 0xff, 0x67, 0xfb, 0x94, 0xb3, 0xa9, 0x29, 0xd3, 0xde, 0x17, 0xc5, + 0x30, 0xa6, 0x7c, 0x5f, 0x38, 0xa6, 0xd7, 0xe2, 0x7b, 0xbd, 0x65, 0x45, 0x13, 0x79, 0x3d, 0xdd, + 0x3e, 0x05, 0x7b, 0x7c, 0x00, 0x05, 0xad, 0x96, 0x87, 0x5b, 0x55, 0x86, 0xf7, 0xf4, 0x1b, 0xb8, + 0xf2, 0x19, 0xb6, 0x50, 0xe2, 0x97, 0x99, 0xa1, 0x5f, 0xca, 0xf5, 0xf6, 0x3a, 0xcc, 0x0d, 0xfe, + 0x04, 0x34, 0x32, 0xc4, 0xb3, 0x03, 0x6c, 0x27, 0x72, 0xef, 0xc2, 0x5b, 0x63, 0xd4, 0x66, 0x25, + 0x6e, 0xc0, 0x62, 0xa4, 0x43, 0x51, 0x90, 0xef, 0xb1, 0x60, 0x02, 0x83, 0x22, 0xf7, 0x17, 0x0b, + 0xae, 0x35, 0xa2, 0xe8, 0x3e, 0xff, 0x3a, 0x89, 0x88, 0x44, 0xb5, 0xcb, 0x12, 0xb7, 0x5b, 0x1d, + 0x21, 0x91, 0x67, 0x62, 0xdf, 0x83, 0x85, 0x87, 0x9c, 0xc5, 0x12, 0xe3, 0x28, 0x20, 0x51, 0xc4, + 0x51, 0x08, 0xc3, 0x37, 0x9f, 0xe1, 0x8d, 0x14, 0xb6, 0x3f, 0x87, 0x1a, 0xc6, 0xe4, 0xa0, 0x85, + 0x01, 0xd7, 0x4c, 0x41, 0x98, 0x52, 0x05, 0x21, 0x8b, 0x63, 0x0c, 0x25, 0x65, 0xb1, 0x3e, 0xcd, + 0x8c, 0xbf, 0x96, 0xe6, 0x8d, 0x34, 0xdc, 0x1e, 0x24, 0xb9, 0x2e, 0xd4, 0x26, 0xcb, 0x32, 0x2f, + 0xcb, 0x27, 0xb0, 0x92, 0xbe, 0x3d, 0x85, 0xaa, 0xd7, 0x61, 0x2e, 0x6b, 0xae, 0xc6, 0x60, 0x14, + 0xcf, 0x1a, 0x4c, 0x4d, 0x40, 0xbd, 0xa3, 0x85, 0x04, 0x86, 0xff, 0x01, 0x2c, 0xa9, 0xf5, 0x30, + 0xf0, 0xe0, 0xc5, 0x5a, 0x85, 0x4a, 0xa2, 0x37, 0x8d, 0x3e, 0x49, 0x59, 0xa7, 0xfd, 0x19, 0x05, + 0x34, 0xe9, 0x13, 0xb4, 0xdf, 0x81, 0xf9, 0x18, 0x1f, 0xcb, 0x40, 0x67, 0x48, 0xf6, 0x08, 0xd3, + 0xf3, 0xce, 0xf9, 0x17, 0x15, 0xbc, 0x4b, 0x0e, 0x71, 0x4f, 0x81, 0xee, 0x4f, 0x16, 0x2c, 0x8f, + 0x92, 0x9b, 0xdb, 0xbb, 0x07, 0x33, 0x46, 0x62, 0xf6, 0x8c, 0xbd, 0x7f, 0xee, 0x12, 0x1b, 0x92, + 0x2f, 0x50, 0x92, 0x88, 0x48, 0xe2, 0x0f, 0x18, 0x8a, 0xe4, 0xbc, 0x51, 0x24, 0xe7, 0xc7, 0x32, + 0xcc, 0xe7, 0x58, 0x5e, 0x62, 0x80, 0xf6, 0x1a, 0x40, 0x96, 0x32, 0xb0, 0x69, 0xc5, 0x20, 0x3b, + 0x91, 0x5d, 0x85, 0x37, 0x33, 0xbf, 0x4c, 0xe9, 0x58, 0xf6, 0xd7, 0xbe, 0x09, 0x55, 0x1a, 0x53, + 0x49, 0x49, 0x2b, 0x78, 0x48, 0x68, 0x8b, 0x75, 0x91, 0x07, 0x5d, 0xe4, 0x42, 0xf9, 0x43, 0x09, + 0x9c, 0xf2, 0xaf, 0x98, 0xf8, 0x5d, 0x13, 0xde, 0x4f, 0xa3, 0xb6, 0x07, 0x4b, 0x47, 0x54, 0x3d, + 0x42, 0xbd, 0x40, 0x1c, 0x11, 0x1e, 0x05, 0x21, 0xeb, 0xc4, 0xb2, 0x3a, 0xad, 0xef, 0x61, 0xd1, + 0x84, 0x9a, 0x2a, 0xb2, 0xad, 0x02, 0xf6, 0x16, 0x5c, 0xa6, 0x62, 0xc8, 0x7e, 0x41, 0xea, 0xbb, + 0xa8, 0x7a, 0x41, 0xdb, 0x70, 0x89, 0x8a, 0x33, 0xd7, 0xdd, 0x49, 0x43, 0xb7, 0xff, 0xb6, 0x8e, + 0x4f, 0x9c, 0xd2, 0xf3, 0x13, 0xa7, 0xf4, 0xe2, 0xc4, 0xb1, 0x9e, 0xf6, 0x1d, 0xeb, 0x59, 0xdf, + 0xb1, 0x7e, 0xef, 0x3b, 0xd6, 0x71, 0xdf, 0xb1, 0xfe, 0xe8, 0x3b, 0xd6, 0x9f, 0x7d, 0xa7, 0xf4, + 0xa2, 0xef, 0x58, 0x3f, 0x9f, 0x3a, 0xa5, 0xe3, 0x53, 0xa7, 0xf4, 0xfc, 0xd4, 0x29, 0x81, 0x4b, + 0xd9, 0x79, 0x57, 0x75, 0x7b, 0xd9, 0xb8, 0x28, 0xbb, 0xef, 0x5d, 0xf5, 0xed, 0xb3, 0x6b, 0x3d, + 0xf8, 0xf0, 0x70, 0xa8, 0x96, 0xb2, 0x09, 0x1f, 0x60, 0xb7, 0x72, 0xd0, 0xaf, 0xe5, 0xf5, 0x3d, + 0x53, 0x44, 0x99, 0xd7, 0x48, 0xa8, 0x77, 0xdf, 0x24, 0x34, 0x4d, 0xcb, 0xfd, 0xcd, 0xbf, 0xca, + 0x6f, 0x9f, 0xe5, 0xd4, 0xeb, 0x8d, 0x84, 0xd6, 0xeb, 0xb9, 0xac, 0x7a, 0x7d, 0x7f, 0xf3, 0xe0, + 0x82, 0xfe, 0x0c, 0xfb, 0xe0, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x18, 0x1a, 0x9d, 0x05, + 0x0a, 0x00, 0x00, } func (this *AddSearchAttributesRequest) Equal(that interface{}) bool { @@ -1064,6 +1074,9 @@ if this.Namespace != that1.Namespace { return false } + if this.NamespaceId != that1.NamespaceId { + return false + } return true } func (this *DeleteNamespaceResponse) Equal(that interface{}) bool { @@ -1395,9 +1408,10 @@ if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&operatorservice.DeleteNamespaceRequest{") s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") + s = append(s, "NamespaceId: "+fmt.Sprintf("%#v", this.NamespaceId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1755,6 +1769,13 @@ _ = i var l int _ = l + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.NamespaceId))) + i-- + dAtA[i] = 0x12 + } if len(m.Namespace) > 0 { i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) @@ -2179,6 +2200,10 @@ if l > 0 { n += 1 + l + sovRequestResponse(uint64(l)) } + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovRequestResponse(uint64(l)) + } return n } @@ -2421,6 +2446,7 @@ } s := strings.Join([]string{`&DeleteNamespaceRequest{`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `NamespaceId:` + fmt.Sprintf("%v", this.NamespaceId) + `,`, `}`, }, "") return s @@ -3497,6 +3523,38 @@ } m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamespaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/proxy/interceptor.go temporal-1.22.5/src/vendor/go.temporal.io/api/proxy/interceptor.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/proxy/interceptor.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/proxy/interceptor.go 2024-02-23 09:46:13.000000000 +0000 @@ -1197,6 +1197,41 @@ return err } + case *workflowservice.CountWorkflowExecutionsResponse: + + if o == nil { + continue + } + ctx.Parent = o + if err := visitPayloads( + ctx, + options, + o.GetGroups(), + ); err != nil { + return err + } + + case []*workflowservice.CountWorkflowExecutionsResponse_AggregationGroup: + for _, x := range o { + if err := visitPayloads(ctx, options, x); err != nil { + return err + } + } + + case *workflowservice.CountWorkflowExecutionsResponse_AggregationGroup: + + if o == nil { + continue + } + ctx.Parent = o + if err := visitPayloads( + ctx, + options, + o.GetGroupValues(), + ); err != nil { + return err + } + case *workflowservice.CreateScheduleRequest: if o == nil { diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/proxy/marshal.go temporal-1.22.5/src/vendor/go.temporal.io/api/proxy/marshal.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/proxy/marshal.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/proxy/marshal.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,169 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package proxy + +import ( + "encoding/json" + "io" + + gogojsonpb "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "go.temporal.io/api/common/v1" + "go.temporal.io/api/internal/temporalgateway" + jsonpb "go.temporal.io/api/internal/temporaljsonpb" +) + +// JSONPBMarshaler is a protobuf JSON marshaler that supports Temporal-specific +// features. This is mostly equivalent to +// [github.com/gogo/protobuf/jsonpb.Marshaler]. +// +// One feature is "shorthand payloads". During marshal when shorthand payloads +// are enabled (which is the default), JSON payloads are represented as their +// actual data instead of the protobuf default which would be a base64'd data +// field and base64'd metadata fields. For JSON proto payloads, the same occurs +// but a special field in the object of "_protoMessageType" is present with the +// qualified protobuf message name. +type JSONPBMarshaler struct{ underlying jsonpb.Marshaler } + +// JSONPBMarshalerOptions is used for [NewJSONPBMarshaler]. Most of the options +// are copied from [github.com/gogo/protobuf/jsonpb.Marshaler]. +type JSONPBMarshalerOptions struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver gogojsonpb.AnyResolver + + // If true, this will never marshal to shorthand payloads. See + // [JSONPBMarshaler] for more detail. + DisablePayloadShorthand bool +} + +// NewJSONPBMarshaler creates a marshaler that supports Temporal-specific +// features. See [JSONPBMarshaler] for more detail. +func NewJSONPBMarshaler(options JSONPBMarshalerOptions) (*JSONPBMarshaler, error) { + ret := &JSONPBMarshaler{} + ret.underlying.EnumsAsInts = options.EnumsAsInts + ret.underlying.EmitDefaults = options.EmitDefaults + ret.underlying.Indent = options.Indent + ret.underlying.OrigName = options.OrigName + ret.underlying.AnyResolver = options.AnyResolver + if options.DisablePayloadShorthand { + ret.underlying.Metadata = map[string]interface{}{ + common.DisablePayloadShorthandMetadataKey: true, + } + } + return ret, nil +} + +// Marshal is the Temporal-specific equivalent of +// [github.com/gogo/protobuf/jsonpb.Marshaler.Marshal]. +func (j *JSONPBMarshaler) Marshal(out io.Writer, pb proto.Message) error { + return j.underlying.Marshal(out, pb) +} + +// Marshal is the Temporal-specific equivalent of +// [github.com/gogo/protobuf/jsonpb.Marshaler.MarshalToString]. +func (j *JSONPBMarshaler) MarshalToString(pb proto.Message) (string, error) { + return j.underlying.MarshalToString(pb) +} + +// JSONPBUnmarshaler is a protobuf JSON unmarshaler that supports +// Temporal-specific features. This is mostly equivalent to +// [github.com/gogo/protobuf/jsonpb.Unmarshaler]. +// +// One feature is "shorthand payloads". During unmarshal when a JSON is +// encountered that cannot be converted to a traditional protobuf JSON payload +// with metadata and data, it is assumed to be "shorthand". This means the JSON +// itself is the payload and it is turned into a payload with the proper +// metadata set. If the JSON is an object with a "_protoMessageType" field, it +// is assumed to be a proto JSON payload with that field containing the +// qualified message name. +type JSONPBUnmarshaler struct{ underlying jsonpb.Unmarshaler } + +// JSONPBUnmarshalerOptions is used for [NewJSONPBUnmarshaler]. Most of the +// options are copied from [github.com/gogo/protobuf/jsonpb.Unmarshaler]. +type JSONPBUnmarshalerOptions struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver gogojsonpb.AnyResolver + + // If true, this will never unmarshal from shorthand payloads. See + // [JSONPBUnmarshaler] for more detail. + DisablePayloadShorthand bool +} + +func NewJSONPBUnmarshaler(options JSONPBUnmarshalerOptions) (*JSONPBUnmarshaler, error) { + ret := &JSONPBUnmarshaler{} + ret.underlying.AllowUnknownFields = options.AllowUnknownFields + ret.underlying.AnyResolver = options.AnyResolver + if options.DisablePayloadShorthand { + ret.underlying.Metadata = map[string]interface{}{ + common.DisablePayloadShorthandMetadataKey: true, + } + } + return ret, nil +} + +// Unmarshal is the Temporal-specific equivalent of +// [github.com/gogo/protobuf/jsonpb.Unmarshaler.Unmarshal]. +func (j *JSONPBUnmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + return j.underlying.Unmarshal(r, pb) +} + +// UnmarshalNext is the Temporal-specific equivalent of +// [github.com/gogo/protobuf/jsonpb.Unmarshaler.UnmarshalNext]. +func (j *JSONPBUnmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return j.underlying.UnmarshalNext(dec, pb) +} + +// NewGRPCGatewayJSONPBMarshaler creates a new gRPC gateway marshaler for the +// given marshaler/unmarshaler pair. +func NewGRPCGatewayJSONPBMarshaler(marshaler *JSONPBMarshaler, unmarshaler *JSONPBUnmarshaler) runtime.Marshaler { + return &temporalgateway.JSONPb{ + Marshaler: marshaler.underlying, + Unmarshaler: unmarshaler.underlying, + } +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/schedule/v1/message.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/schedule/v1/message.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/schedule/v1/message.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/schedule/v1/message.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -444,6 +444,9 @@ // On input, calendar and cron_string fields will be compiled into // structured_calendar (and maybe interval and timezone_name), so if you // Describe a schedule, you'll see only structured_calendar, interval, etc. +// +// If a spec has no matching times after the current time, then the schedule +// will be subject to automatic deletion (after several days). type ScheduleSpec struct { // Calendar-based specifications of times. StructuredCalendar []*StructuredCalendarSpec `protobuf:"bytes,7,rep,name=structured_calendar,json=structuredCalendar,proto3" json:"structured_calendar,omitempty"` @@ -629,7 +632,7 @@ // If the Temporal server misses an action due to one or more components // being down, and comes back up, the action will be run if the scheduled // time is within this window from the current time. - // This value defaults to 60 seconds, and can't be less than 10 seconds. + // This value defaults to one year, and can't be less than 10 seconds. CatchupWindow *time.Duration `protobuf:"bytes,2,opt,name=catchup_window,json=catchupWindow,proto3,stdduration" json:"catchup_window,omitempty"` // If true, and a workflow run fails or times out, turn on "paused". // This applies after retry policies: the full chain of retries must fail to @@ -836,6 +839,8 @@ // is zero. Actions may still be taken by explicit request (i.e. trigger // immediately or backfill). Skipped actions (due to overlap policy) do not // count against remaining actions. + // If a schedule has no more remaining actions, then the schedule will be + // subject to automatic deletion (after several days). LimitedActions bool `protobuf:"varint,3,opt,name=limited_actions,json=limitedActions,proto3" json:"limited_actions,omitempty"` RemainingActions int64 `protobuf:"varint,4,opt,name=remaining_actions,json=remainingActions,proto3" json:"remaining_actions,omitempty"` } @@ -901,7 +906,7 @@ } type TriggerImmediatelyRequest struct { - // Override overlap policy for this one request. + // If set, override overlap policy for this one request. OverlapPolicy v1.ScheduleOverlapPolicy `protobuf:"varint,1,opt,name=overlap_policy,json=overlapPolicy,proto3,enum=temporal.api.enums.v1.ScheduleOverlapPolicy" json:"overlap_policy,omitempty"` } @@ -945,10 +950,15 @@ } type BackfillRequest struct { - // Time range to evaluate schedule in. + // Time range to evaluate schedule in. Currently, this time range is + // exclusive on start_time and inclusive on end_time. (This is admittedly + // counterintuitive and it may change in the future, so to be safe, use a + // start time strictly before a scheduled time.) Also note that an action + // nominally scheduled in the interval but with jitter that pushes it after + // end_time will not be included. StartTime *time.Time `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time,omitempty"` EndTime *time.Time `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time,omitempty"` - // Override overlap policy for this request. + // If set, override overlap policy for this request. OverlapPolicy v1.ScheduleOverlapPolicy `protobuf:"varint,3,opt,name=overlap_policy,json=overlapPolicy,proto3,enum=temporal.api.enums.v1.ScheduleOverlapPolicy" json:"overlap_policy,omitempty"` } @@ -1085,6 +1095,12 @@ MissedCatchupWindow int64 `protobuf:"varint,2,opt,name=missed_catchup_window,json=missedCatchupWindow,proto3" json:"missed_catchup_window,omitempty"` // Number of skipped actions due to overlap. OverlapSkipped int64 `protobuf:"varint,3,opt,name=overlap_skipped,json=overlapSkipped,proto3" json:"overlap_skipped,omitempty"` + // Number of dropped actions due to buffer limit. + BufferDropped int64 `protobuf:"varint,10,opt,name=buffer_dropped,json=bufferDropped,proto3" json:"buffer_dropped,omitempty"` + // Number of actions in the buffer. The buffer holds the actions that cannot + // be immediately triggered (due to the overlap policy). These actions can be a result of + // the normal schedule or a backfill. + BufferSize int64 `protobuf:"varint,11,opt,name=buffer_size,json=bufferSize,proto3" json:"buffer_size,omitempty"` // Currently-running workflows started by this schedule. (There might be // more than one if the overlap policy allows overlaps.) // Note that the run_ids in here are the original execution run ids as @@ -1154,6 +1170,20 @@ return 0 } +func (m *ScheduleInfo) GetBufferDropped() int64 { + if m != nil { + return m.BufferDropped + } + return 0 +} + +func (m *ScheduleInfo) GetBufferSize() int64 { + if m != nil { + return m.BufferSize + } + return 0 +} + func (m *ScheduleInfo) GetRunningWorkflows() []*v12.WorkflowExecution { if m != nil { return m.RunningWorkflows @@ -1448,111 +1478,113 @@ } var fileDescriptor_e6aeef3f4b308dee = []byte{ - // 1651 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x5b, 0x6f, 0x1b, 0x4b, - 0x1d, 0xcf, 0xfa, 0x92, 0x38, 0x63, 0x3b, 0x71, 0x26, 0x39, 0x95, 0x9b, 0x03, 0x4e, 0x8e, 0x39, - 0x6a, 0x72, 0x00, 0x39, 0x27, 0x8e, 0x50, 0x51, 0x8e, 0xe0, 0x10, 0xa7, 0xa9, 0x88, 0x44, 0xdb, - 0x68, 0x93, 0xb6, 0x12, 0x08, 0xad, 0x26, 0xbb, 0x63, 0x67, 0xc8, 0xee, 0xcc, 0xb2, 0x3b, 0x1b, - 0xd7, 0x3c, 0x21, 0xbe, 0x00, 0x95, 0x78, 0xe1, 0x23, 0x20, 0xbe, 0x08, 0xf0, 0x44, 0x1f, 0xcb, - 0x13, 0x34, 0xed, 0x03, 0xe2, 0xa9, 0xef, 0x08, 0x81, 0xe6, 0xb6, 0xb6, 0xe3, 0x98, 0x6c, 0x5b, - 0xf5, 0x6d, 0xe7, 0x7f, 0x9b, 0xf9, 0xff, 0xfe, 0x57, 0x1b, 0xdc, 0xe1, 0x38, 0x08, 0x59, 0x84, - 0xfc, 0x2d, 0x14, 0x92, 0xad, 0xd8, 0x3d, 0xc3, 0x5e, 0xe2, 0xe3, 0xad, 0x8b, 0xed, 0xad, 0x00, - 0xc7, 0x31, 0xea, 0xe1, 0x56, 0x18, 0x31, 0xce, 0x60, 0xdd, 0xc8, 0xb5, 0x50, 0x48, 0x5a, 0x46, - 0xae, 0x75, 0xb1, 0xbd, 0xda, 0xe8, 0x31, 0xd6, 0xf3, 0xf1, 0x96, 0x94, 0x3b, 0x4d, 0xba, 0x5b, - 0x5e, 0x12, 0x21, 0x4e, 0x18, 0x55, 0x9a, 0xab, 0x6b, 0x57, 0xf9, 0x9c, 0x04, 0x38, 0xe6, 0x28, - 0x08, 0xb5, 0xc0, 0x67, 0x1e, 0x0e, 0x31, 0xf5, 0x30, 0x75, 0x09, 0x8e, 0xb7, 0x7a, 0xac, 0xc7, - 0x24, 0x5d, 0x7e, 0x69, 0x91, 0xcf, 0xc7, 0x5e, 0xe9, 0xb2, 0x20, 0x60, 0x74, 0xe2, 0x8d, 0x57, - 0xa4, 0x30, 0x4d, 0x82, 0x58, 0x08, 0xa5, 0x8f, 0x55, 0x52, 0xe3, 0x1e, 0xf7, 0x59, 0x74, 0xde, - 0xf5, 0x59, 0x7f, 0xc2, 0x5a, 0xf3, 0xa5, 0x05, 0x2a, 0xfb, 0xc8, 0xc7, 0xd4, 0x43, 0xd1, 0x71, - 0x88, 0x5d, 0x78, 0x0b, 0xcc, 0xc6, 0xd8, 0x65, 0xd4, 0xab, 0x5b, 0xeb, 0xd6, 0xe6, 0xbc, 0xad, - 0x4f, 0x82, 0x1e, 0x10, 0x9a, 0x70, 0x5c, 0xcf, 0x29, 0xba, 0x3a, 0x41, 0x08, 0x0a, 0x67, 0x2c, - 0x89, 0xea, 0x79, 0x49, 0x95, 0xdf, 0x70, 0x1d, 0x54, 0x3c, 0x34, 0x70, 0x58, 0xd7, 0x09, 0x18, - 0xe5, 0x67, 0xf5, 0x82, 0xe4, 0x01, 0x0f, 0x0d, 0x1e, 0x75, 0x1f, 0x08, 0x0a, 0x5c, 0x01, 0x45, - 0xc5, 0x2a, 0x4a, 0x96, 0x3a, 0x08, 0x5b, 0x03, 0x8c, 0xa2, 0xfa, 0xac, 0xb2, 0x25, 0xbe, 0x61, - 0x03, 0x94, 0xb5, 0xad, 0x3e, 0xc6, 0xe7, 0xf5, 0x39, 0xc9, 0x9a, 0x97, 0xa6, 0x9e, 0x62, 0x7c, - 0x0e, 0xeb, 0x60, 0x4e, 0x20, 0x85, 0x29, 0xaf, 0x97, 0x24, 0xcf, 0x1c, 0x9b, 0xfb, 0xa0, 0x68, - 0x23, 0xda, 0xc3, 0xe2, 0xb2, 0x98, 0xa3, 0x88, 0x4b, 0x8f, 0x8a, 0xb6, 0x3a, 0xc0, 0x1a, 0xc8, - 0x63, 0xea, 0x49, 0x6f, 0x8a, 0xb6, 0xf8, 0x14, 0xd7, 0xc7, 0x1c, 0x87, 0xd2, 0x95, 0xa2, 0x2d, - 0xbf, 0x9b, 0x7f, 0xc9, 0x83, 0x5b, 0xc7, 0x3c, 0x4a, 0x5c, 0x9e, 0x44, 0xd8, 0x1b, 0x43, 0xea, - 0xee, 0x08, 0x52, 0xf9, 0xcd, 0x72, 0x7b, 0xad, 0x35, 0x2d, 0x7b, 0x5a, 0xf2, 0x1d, 0x29, 0x94, - 0x77, 0x47, 0xa0, 0xcc, 0xa6, 0xa8, 0xb1, 0xde, 0x49, 0xb1, 0xce, 0xa4, 0xa6, 0x82, 0xb1, 0x37, - 0x11, 0x8c, 0x4c, 0xca, 0xa3, 0xd1, 0xfa, 0xde, 0x30, 0x5a, 0x99, 0x74, 0x75, 0x38, 0x77, 0xd2, - 0x70, 0x66, 0x7b, 0xae, 0x8c, 0xf7, 0xd7, 0x57, 0xe3, 0x9d, 0x49, 0x37, 0x53, 0x42, 0xfc, 0xc6, - 0x02, 0x95, 0x43, 0xca, 0x71, 0x74, 0x81, 0x7c, 0x19, 0xc1, 0xaf, 0x40, 0x89, 0xe8, 0xb3, 0xcc, - 0x8d, 0x72, 0xfb, 0x76, 0x4b, 0xd5, 0x71, 0xcb, 0xd4, 0x71, 0xeb, 0x9e, 0xae, 0xf3, 0x4e, 0xe1, - 0xf7, 0x7f, 0x5f, 0xb3, 0xec, 0x54, 0x41, 0x80, 0x12, 0x9e, 0xa1, 0x58, 0xd5, 0x43, 0x06, 0x4d, - 0x25, 0xdd, 0x7c, 0x53, 0x04, 0x95, 0x63, 0xfd, 0x7e, 0xf9, 0x08, 0x04, 0x96, 0xe3, 0x34, 0xc1, - 0x1c, 0x57, 0x67, 0x98, 0x76, 0xfc, 0xcb, 0xe9, 0x8e, 0x5f, 0x9f, 0x95, 0x36, 0x8c, 0x27, 0xe8, - 0x70, 0x0d, 0x94, 0xdd, 0x88, 0x51, 0x27, 0xe6, 0x11, 0xa1, 0xbd, 0x7a, 0x69, 0x3d, 0x2f, 0xca, - 0x51, 0x90, 0x8e, 0x25, 0x05, 0x76, 0x40, 0x29, 0xbd, 0x58, 0x25, 0xf3, 0x9d, 0xe9, 0x17, 0x8f, - 0x5d, 0x97, 0xea, 0x09, 0x1b, 0x29, 0x98, 0xb9, 0x9b, 0x6c, 0x8c, 0x86, 0x61, 0x04, 0xd3, 0xc7, - 0xa0, 0x86, 0x9f, 0xb9, 0x7e, 0xe2, 0xe1, 0x21, 0x10, 0xf9, 0x77, 0x79, 0x4f, 0x27, 0x57, 0xb7, - 0xec, 0x45, 0x6d, 0x23, 0xf5, 0x3f, 0x04, 0x9f, 0x1a, 0xb3, 0xd7, 0x41, 0x3d, 0xff, 0x9e, 0x50, - 0xdf, 0xd6, 0x46, 0x27, 0xd9, 0xf0, 0x6b, 0x00, 0x64, 0x97, 0x71, 0xc4, 0x18, 0x90, 0xfd, 0xaf, - 0xdc, 0x5e, 0x9d, 0xc8, 0x90, 0x13, 0x33, 0x23, 0x3a, 0x85, 0xe7, 0x22, 0x45, 0xe6, 0xa5, 0x8e, - 0xa0, 0x8a, 0xd4, 0xc4, 0xd4, 0x53, 0xea, 0xc5, 0x8c, 0xea, 0x73, 0x98, 0x7a, 0x52, 0xf9, 0x2e, - 0x98, 0xfd, 0x05, 0xe1, 0x1c, 0xab, 0x4e, 0x9a, 0x21, 0x37, 0xb5, 0x38, 0xfc, 0x16, 0xa8, 0x8a, - 0x1b, 0x7f, 0xc5, 0x28, 0x76, 0x28, 0x0a, 0x70, 0x1d, 0xc8, 0x0a, 0xaa, 0x18, 0xe2, 0x43, 0x14, - 0xe0, 0x31, 0x21, 0x0f, 0x71, 0x54, 0x2f, 0xaf, 0x5b, 0x9b, 0x95, 0xa1, 0xd0, 0x3d, 0xc4, 0x51, - 0xf3, 0x6f, 0x16, 0xa8, 0x99, 0x34, 0x3f, 0x62, 0x3e, 0x11, 0x53, 0x0f, 0x1e, 0x83, 0x05, 0x76, - 0x81, 0x23, 0x1f, 0x85, 0x4e, 0x28, 0x68, 0x03, 0x59, 0x75, 0x0b, 0xed, 0xef, 0x8e, 0x43, 0x2f, - 0x67, 0x9a, 0xc4, 0x5d, 0x1b, 0x78, 0xa4, 0x94, 0xa4, 0x9d, 0x81, 0x5d, 0x65, 0xa3, 0x47, 0x78, - 0x1f, 0x2c, 0xb8, 0x88, 0xbb, 0x67, 0x49, 0xe8, 0xf4, 0x09, 0xf5, 0x58, 0x3f, 0x6b, 0x41, 0x56, - 0xb5, 0xda, 0x53, 0xa9, 0x05, 0x37, 0x41, 0x2d, 0x44, 0x49, 0x8c, 0x1d, 0x46, 0x9d, 0x2e, 0x22, - 0x7e, 0x12, 0x61, 0x39, 0x09, 0x4a, 0xf6, 0x82, 0xa4, 0x3f, 0xa2, 0xf7, 0x15, 0xb5, 0xd9, 0x07, - 0x0b, 0xe6, 0x65, 0x7b, 0xae, 0x30, 0x08, 0x7f, 0x06, 0x16, 0x54, 0xb8, 0xcd, 0xa0, 0xd5, 0xed, - 0xa4, 0x3d, 0xee, 0x98, 0xe1, 0x0a, 0xdf, 0x1e, 0xe2, 0xfe, 0x53, 0x7d, 0x3c, 0x78, 0x86, 0xdd, - 0x44, 0xd8, 0x39, 0xa4, 0x5d, 0xf6, 0xe3, 0x19, 0xbb, 0x2a, 0x6d, 0x19, 0x6e, 0xa7, 0x04, 0x66, - 0x91, 0xbc, 0xa6, 0xf9, 0x5f, 0x0b, 0xac, 0x8c, 0xdf, 0x6c, 0xe3, 0x38, 0xf1, 0x39, 0x3c, 0x00, - 0x55, 0x93, 0xaf, 0x2a, 0x65, 0xac, 0x8c, 0x29, 0x53, 0x31, 0x6a, 0x32, 0x6f, 0xf6, 0x40, 0x19, - 0xb9, 0x3c, 0x41, 0xbe, 0x32, 0x92, 0xcb, 0x68, 0x04, 0x28, 0x25, 0x69, 0xe2, 0xe7, 0xe0, 0x93, - 0x71, 0x24, 0x9c, 0x48, 0x3e, 0x51, 0x26, 0x49, 0xb9, 0xfd, 0xc5, 0x38, 0x20, 0x6a, 0xc7, 0x11, - 0x70, 0x4c, 0x60, 0x61, 0x2f, 0x8f, 0xa1, 0xa0, 0x1c, 0x6d, 0xfe, 0xce, 0x02, 0xd5, 0xb4, 0x7b, - 0x72, 0xc4, 0xe5, 0x70, 0xa7, 0x8c, 0xe3, 0x58, 0xaf, 0x2b, 0xea, 0x20, 0xb6, 0x15, 0x19, 0x34, - 0x35, 0xdf, 0x4b, 0xb6, 0x3e, 0xc1, 0x0d, 0xb0, 0xe8, 0x93, 0x80, 0x70, 0xec, 0x39, 0x0a, 0xd3, - 0xd8, 0xc4, 0x58, 0x93, 0x15, 0xac, 0x31, 0xfc, 0x0e, 0x58, 0x8a, 0x70, 0x80, 0x08, 0x25, 0xb4, - 0x97, 0x8a, 0x8a, 0x3a, 0xce, 0xdb, 0xb5, 0x94, 0xa1, 0x85, 0x9b, 0x21, 0xb8, 0x7d, 0x12, 0x91, - 0x5e, 0x0f, 0x47, 0x87, 0x41, 0x80, 0x3d, 0x82, 0x38, 0xf6, 0x07, 0x36, 0xfe, 0x65, 0x82, 0x63, - 0xfe, 0x51, 0x92, 0xbe, 0xf9, 0xc6, 0x02, 0x8b, 0x1d, 0xe4, 0x9e, 0x77, 0x89, 0xef, 0x9b, 0x8b, - 0xc6, 0x7b, 0x8e, 0xf5, 0x61, 0x3d, 0x27, 0xf7, 0xae, 0x3d, 0x67, 0xd2, 0xcd, 0xfc, 0x87, 0xbb, - 0xf9, 0xef, 0x91, 0x70, 0x1f, 0x89, 0x6a, 0x85, 0x1e, 0x58, 0xe6, 0x0a, 0x6a, 0x87, 0x0c, 0xb1, - 0xd6, 0xde, 0xee, 0x4c, 0x6f, 0xe1, 0x53, 0xe3, 0x63, 0x43, 0x3e, 0xc1, 0x82, 0x27, 0xa0, 0x76, - 0xaa, 0xd1, 0x75, 0x22, 0x25, 0xa7, 0x67, 0xda, 0x17, 0xd3, 0xaf, 0xb8, 0x12, 0x0f, 0x7b, 0xf1, - 0xf4, 0x4a, 0x80, 0x56, 0x40, 0x51, 0xa6, 0xa1, 0xde, 0x95, 0xd5, 0x41, 0xec, 0x2b, 0x09, 0x55, - 0x74, 0xb5, 0x27, 0x9b, 0x63, 0xf3, 0xaf, 0x85, 0xe1, 0xaa, 0x20, 0x5a, 0x03, 0xfc, 0x0c, 0x54, - 0x54, 0x2a, 0x3a, 0x2e, 0x4b, 0xa8, 0xda, 0x67, 0xf3, 0x76, 0x59, 0xd1, 0xf6, 0x05, 0x09, 0xb6, - 0xc1, 0x27, 0x01, 0x89, 0x63, 0x39, 0xde, 0x26, 0x9a, 0x62, 0xde, 0x5e, 0x56, 0xcc, 0xfd, 0xb1, - 0xce, 0xb7, 0x01, 0x16, 0x4d, 0xe8, 0xe2, 0x73, 0x12, 0x86, 0xd8, 0x93, 0x2f, 0xcc, 0xdb, 0x26, - 0xa2, 0xc7, 0x8a, 0x0a, 0x9f, 0x80, 0xa5, 0x28, 0xa1, 0xb2, 0x24, 0x4c, 0x79, 0xc7, 0x7a, 0x7a, - 0xbe, 0x43, 0x61, 0xd7, 0xb4, 0x0d, 0xc3, 0x89, 0xe1, 0x63, 0xb0, 0x10, 0x61, 0x17, 0x53, 0x3e, - 0x52, 0x69, 0xc2, 0x68, 0xeb, 0xff, 0x8c, 0xe4, 0x6b, 0xda, 0xa0, 0x5d, 0x55, 0x56, 0x4c, 0x0d, - 0x1f, 0x81, 0xe5, 0x6e, 0x22, 0xe6, 0xb2, 0x36, 0x2b, 0x33, 0x3b, 0xd6, 0x4b, 0xec, 0xcd, 0xa9, - 0xbd, 0xa4, 0x94, 0x95, 0x35, 0xc9, 0x14, 0x0d, 0xd2, 0x8d, 0x30, 0xe2, 0xba, 0xcb, 0xce, 0x66, - 0x6d, 0x90, 0x4a, 0xc9, 0xf4, 0xd8, 0x24, 0xf4, 0x52, 0x13, 0x73, 0x59, 0x4d, 0x28, 0x25, 0x69, - 0xe2, 0xfb, 0xe0, 0x16, 0xa1, 0x17, 0xc8, 0x27, 0x9e, 0x93, 0x76, 0x7d, 0x1c, 0x45, 0x2c, 0x52, - 0x0b, 0xaf, 0xdc, 0x81, 0x56, 0xb4, 0x84, 0xc1, 0xe9, 0x40, 0xf0, 0x9b, 0xbf, 0xcd, 0x81, 0x92, - 0xa1, 0xc0, 0x5d, 0x50, 0x88, 0x43, 0xec, 0xea, 0xda, 0xb9, 0x73, 0x33, 0xd6, 0x72, 0xe9, 0x91, - 0x3a, 0xf0, 0x47, 0x66, 0x26, 0xe9, 0x46, 0xb1, 0x99, 0x39, 0x52, 0x5a, 0x0f, 0xde, 0x07, 0xa5, - 0x50, 0xef, 0x05, 0x32, 0xdb, 0xca, 0xed, 0x6f, 0xdf, 0x6c, 0xc3, 0x6c, 0x12, 0x76, 0xaa, 0x0b, - 0x7f, 0x20, 0x7f, 0xdc, 0x71, 0xb3, 0x64, 0x6d, 0x64, 0x70, 0x43, 0x88, 0xdb, 0x4a, 0xab, 0xf9, - 0x36, 0x37, 0xdc, 0x53, 0x7e, 0x42, 0x62, 0x2e, 0xeb, 0xec, 0x43, 0x90, 0x39, 0x04, 0xd5, 0x74, - 0xf4, 0xf1, 0x41, 0x68, 0x3a, 0xe9, 0xe7, 0x37, 0xd5, 0xc7, 0xc9, 0x20, 0xc4, 0x76, 0xa5, 0x3f, - 0x72, 0x1a, 0x8e, 0xb6, 0xfc, 0xf5, 0xa3, 0xad, 0x30, 0x36, 0xda, 0x26, 0x8b, 0xa8, 0xf8, 0x11, - 0x8b, 0x68, 0xf6, 0xbd, 0x8b, 0xa8, 0xf9, 0x1f, 0x0b, 0x2c, 0x8d, 0x42, 0x7e, 0x40, 0x79, 0x34, - 0x10, 0xbf, 0x51, 0xd2, 0x64, 0x26, 0xe6, 0xcf, 0x07, 0x60, 0x48, 0x87, 0x1e, 0xfc, 0x12, 0x14, - 0x02, 0x1c, 0x30, 0x8d, 0xe7, 0x37, 0xa6, 0xe1, 0xf9, 0x00, 0x07, 0xcc, 0x96, 0x92, 0xf0, 0x31, - 0x58, 0x8a, 0x31, 0x8a, 0xdc, 0x33, 0x07, 0x71, 0x1e, 0x91, 0xd3, 0x84, 0xa7, 0xb9, 0xb6, 0x39, - 0x4d, 0xfd, 0x58, 0x2a, 0xec, 0xa5, 0xf2, 0x76, 0x2d, 0xbe, 0x42, 0x81, 0x3f, 0x04, 0x05, 0x42, - 0xbb, 0x4c, 0x27, 0x5c, 0x86, 0xac, 0x35, 0x79, 0x65, 0x4b, 0xbd, 0xce, 0x9f, 0xac, 0x17, 0xaf, - 0x1a, 0x33, 0x2f, 0x5f, 0x35, 0x66, 0xde, 0xbe, 0x6a, 0x58, 0xbf, 0xbe, 0x6c, 0x58, 0x7f, 0xb8, - 0x6c, 0x58, 0x7f, 0xbe, 0x6c, 0x58, 0x2f, 0x2e, 0x1b, 0xd6, 0x3f, 0x2e, 0x1b, 0xd6, 0x3f, 0x2f, - 0x1b, 0x33, 0x6f, 0x2f, 0x1b, 0xd6, 0xf3, 0xd7, 0x8d, 0x99, 0x17, 0xaf, 0x1b, 0x33, 0x2f, 0x5f, - 0x37, 0x66, 0xc0, 0xa7, 0x84, 0x4d, 0xbd, 0xaa, 0x53, 0x79, 0xa0, 0xfe, 0xd5, 0x39, 0x12, 0x71, - 0x38, 0xb2, 0x7e, 0xba, 0xd1, 0x1b, 0x11, 0x26, 0xec, 0xea, 0xdf, 0x5e, 0x5f, 0x99, 0xef, 0x3f, - 0xe6, 0x56, 0x4f, 0xb4, 0x18, 0x61, 0xad, 0xbd, 0x90, 0xa4, 0x8f, 0x6e, 0x3d, 0xd9, 0xfe, 0x57, - 0xee, 0x9b, 0x43, 0xe6, 0xee, 0xee, 0x5e, 0x48, 0x76, 0x77, 0x0d, 0x7b, 0x77, 0xf7, 0xc9, 0xf6, - 0xe9, 0xac, 0x0c, 0xfb, 0xce, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x29, 0xe3, 0x0c, 0x77, 0x5f, - 0x13, 0x00, 0x00, + // 1688 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xdb, 0x6f, 0x1b, 0x59, + 0x19, 0xcf, 0xd8, 0x71, 0xea, 0x7e, 0xb6, 0x13, 0xe7, 0xa4, 0x5b, 0xb9, 0x5d, 0x70, 0xbb, 0x66, + 0x69, 0xb3, 0x80, 0x9c, 0xad, 0x2b, 0x54, 0x94, 0x15, 0x2c, 0x71, 0x2f, 0x22, 0x12, 0xdd, 0x46, + 0xe3, 0xb4, 0x95, 0x40, 0x68, 0x74, 0x32, 0x73, 0xec, 0x1c, 0x32, 0x73, 0xce, 0x30, 0x73, 0x26, + 0x5e, 0xf7, 0x09, 0xf1, 0x0f, 0xb0, 0x12, 0x0f, 0xf0, 0x27, 0x20, 0xfe, 0x11, 0xe0, 0xad, 0x8f, + 0xe5, 0x09, 0x9a, 0xee, 0x03, 0xe2, 0xa9, 0xef, 0x08, 0x81, 0xce, 0x6d, 0x62, 0xc7, 0x35, 0x99, + 0x6e, 0xb5, 0x6f, 0x73, 0xbe, 0xdb, 0x39, 0xdf, 0xef, 0xbb, 0xda, 0x70, 0x43, 0x90, 0x28, 0xe6, + 0x09, 0x0e, 0xb7, 0x70, 0x4c, 0xb7, 0x52, 0xff, 0x90, 0x04, 0x59, 0x48, 0xb6, 0x8e, 0x6f, 0x6d, + 0x45, 0x24, 0x4d, 0xf1, 0x88, 0x74, 0xe3, 0x84, 0x0b, 0x8e, 0x5a, 0x56, 0xae, 0x8b, 0x63, 0xda, + 0xb5, 0x72, 0xdd, 0xe3, 0x5b, 0x57, 0xdb, 0x23, 0xce, 0x47, 0x21, 0xd9, 0x52, 0x72, 0x07, 0xd9, + 0x70, 0x2b, 0xc8, 0x12, 0x2c, 0x28, 0x67, 0x5a, 0xf3, 0xea, 0xb5, 0xb3, 0x7c, 0x41, 0x23, 0x92, + 0x0a, 0x1c, 0xc5, 0x46, 0xe0, 0x83, 0x80, 0xc4, 0x84, 0x05, 0x84, 0xf9, 0x94, 0xa4, 0x5b, 0x23, + 0x3e, 0xe2, 0x8a, 0xae, 0xbe, 0x8c, 0xc8, 0x87, 0x33, 0xaf, 0xf4, 0x79, 0x14, 0x71, 0x36, 0xf7, + 0xc6, 0x33, 0x52, 0x84, 0x65, 0x51, 0x2a, 0x85, 0xf2, 0xc7, 0x6a, 0xa9, 0x59, 0x8f, 0xc7, 0x3c, + 0x39, 0x1a, 0x86, 0x7c, 0x3c, 0x67, 0xad, 0xf3, 0xc2, 0x81, 0xfa, 0x5d, 0x1c, 0x12, 0x16, 0xe0, + 0x64, 0x10, 0x13, 0x1f, 0x5d, 0x86, 0x95, 0x94, 0xf8, 0x9c, 0x05, 0x2d, 0xe7, 0xba, 0xb3, 0x79, + 0xd1, 0x35, 0x27, 0x49, 0x8f, 0x28, 0xcb, 0x04, 0x69, 0x95, 0x34, 0x5d, 0x9f, 0x10, 0x82, 0xe5, + 0x43, 0x9e, 0x25, 0xad, 0xb2, 0xa2, 0xaa, 0x6f, 0x74, 0x1d, 0xea, 0x01, 0x9e, 0x78, 0x7c, 0xe8, + 0x45, 0x9c, 0x89, 0xc3, 0xd6, 0xb2, 0xe2, 0x41, 0x80, 0x27, 0x8f, 0x86, 0x0f, 0x25, 0x05, 0x5d, + 0x82, 0x8a, 0x66, 0x55, 0x14, 0x4b, 0x1f, 0xa4, 0xad, 0x09, 0xc1, 0x49, 0x6b, 0x45, 0xdb, 0x92, + 0xdf, 0xa8, 0x0d, 0x35, 0x63, 0x6b, 0x4c, 0xc8, 0x51, 0xeb, 0x82, 0x62, 0x5d, 0x54, 0xa6, 0x9e, + 0x12, 0x72, 0x84, 0x5a, 0x70, 0x41, 0x22, 0x45, 0x98, 0x68, 0x55, 0x15, 0xcf, 0x1e, 0x3b, 0x77, + 0xa1, 0xe2, 0x62, 0x36, 0x22, 0xf2, 0xb2, 0x54, 0xe0, 0x44, 0x28, 0x8f, 0x2a, 0xae, 0x3e, 0xa0, + 0x26, 0x94, 0x09, 0x0b, 0x94, 0x37, 0x15, 0x57, 0x7e, 0xca, 0xeb, 0x53, 0x41, 0x62, 0xe5, 0x4a, + 0xc5, 0x55, 0xdf, 0x9d, 0xbf, 0x96, 0xe1, 0xf2, 0x40, 0x24, 0x99, 0x2f, 0xb2, 0x84, 0x04, 0x33, + 0x48, 0xdd, 0x99, 0x42, 0xaa, 0xbc, 0x59, 0xeb, 0x5d, 0xeb, 0x2e, 0xca, 0x9e, 0xae, 0x7a, 0x47, + 0x0e, 0xe5, 0x9d, 0x29, 0x28, 0x8b, 0x29, 0x1a, 0xac, 0x6f, 0xe7, 0x58, 0x17, 0x52, 0xd3, 0xc1, + 0xd8, 0x99, 0x0b, 0x46, 0x21, 0xe5, 0xe9, 0x68, 0x7d, 0xff, 0x34, 0x5a, 0x85, 0x74, 0x4d, 0x38, + 0x6f, 0xe7, 0xe1, 0x2c, 0xf6, 0x5c, 0x15, 0xef, 0x4f, 0xcf, 0xc6, 0xbb, 0x90, 0x6e, 0xa1, 0x84, + 0xf8, 0x8d, 0x03, 0xf5, 0x5d, 0x26, 0x48, 0x72, 0x8c, 0x43, 0x15, 0xc1, 0x4f, 0xa0, 0x4a, 0xcd, + 0x59, 0xe5, 0x46, 0xad, 0x77, 0xa5, 0xab, 0xeb, 0xb8, 0x6b, 0xeb, 0xb8, 0x7b, 0xcf, 0xd4, 0x79, + 0x7f, 0xf9, 0x0f, 0x7f, 0xbf, 0xe6, 0xb8, 0xb9, 0x82, 0x04, 0x25, 0x3e, 0xc4, 0xa9, 0xae, 0x87, + 0x02, 0x9a, 0x5a, 0xba, 0xf3, 0x65, 0x05, 0xea, 0x03, 0xf3, 0x7e, 0xf5, 0x08, 0x0c, 0x1b, 0x69, + 0x9e, 0x60, 0x9e, 0x6f, 0x32, 0xcc, 0x38, 0xfe, 0xf1, 0x62, 0xc7, 0xdf, 0x9c, 0x95, 0x2e, 0x4a, + 0xe7, 0xe8, 0xe8, 0x1a, 0xd4, 0xfc, 0x84, 0x33, 0x2f, 0x15, 0x09, 0x65, 0xa3, 0x56, 0xf5, 0x7a, + 0x59, 0x96, 0xa3, 0x24, 0x0d, 0x14, 0x05, 0xf5, 0xa1, 0x9a, 0x5f, 0xac, 0x93, 0xf9, 0xc6, 0xe2, + 0x8b, 0x67, 0xae, 0xcb, 0xf5, 0xa4, 0x8d, 0x1c, 0xcc, 0xd2, 0x79, 0x36, 0xa6, 0xc3, 0x30, 0x85, + 0xe9, 0x63, 0x68, 0x92, 0xcf, 0xfd, 0x30, 0x0b, 0xc8, 0x29, 0x10, 0xe5, 0xb7, 0x79, 0x4f, 0xbf, + 0xd4, 0x72, 0xdc, 0x35, 0x63, 0x23, 0xf7, 0x3f, 0x86, 0xf7, 0xad, 0xd9, 0x37, 0x41, 0x7d, 0xf1, + 0x2b, 0x42, 0x7d, 0xc5, 0x18, 0x9d, 0x67, 0xa3, 0x4f, 0x01, 0x54, 0x97, 0xf1, 0xe4, 0x18, 0x50, + 0xfd, 0xaf, 0xd6, 0xbb, 0x3a, 0x97, 0x21, 0xfb, 0x76, 0x46, 0xf4, 0x97, 0xbf, 0x90, 0x29, 0x72, + 0x51, 0xe9, 0x48, 0xaa, 0x4c, 0x4d, 0xc2, 0x02, 0xad, 0x5e, 0x29, 0xa8, 0x7e, 0x81, 0xb0, 0x40, + 0x29, 0xdf, 0x81, 0x95, 0x5f, 0x52, 0x21, 0x88, 0xee, 0xa4, 0x05, 0x72, 0xd3, 0x88, 0xa3, 0x6f, + 0x41, 0x43, 0xde, 0xf8, 0x8c, 0x33, 0xe2, 0x31, 0x1c, 0x91, 0x16, 0xa8, 0x0a, 0xaa, 0x5b, 0xe2, + 0x67, 0x38, 0x22, 0x33, 0x42, 0x01, 0x16, 0xb8, 0x55, 0xbb, 0xee, 0x6c, 0xd6, 0x4f, 0x85, 0xee, + 0x61, 0x81, 0x3b, 0x7f, 0x73, 0xa0, 0x69, 0xd3, 0x7c, 0x8f, 0x87, 0x54, 0x4e, 0x3d, 0x34, 0x80, + 0x55, 0x7e, 0x4c, 0x92, 0x10, 0xc7, 0x5e, 0x2c, 0x69, 0x13, 0x55, 0x75, 0xab, 0xbd, 0xef, 0xcd, + 0x42, 0xaf, 0x66, 0x9a, 0xc2, 0xdd, 0x18, 0x78, 0xa4, 0x95, 0x94, 0x9d, 0x89, 0xdb, 0xe0, 0xd3, + 0x47, 0xf4, 0x00, 0x56, 0x7d, 0x2c, 0xfc, 0xc3, 0x2c, 0xf6, 0xc6, 0x94, 0x05, 0x7c, 0x5c, 0xb4, + 0x20, 0x1b, 0x46, 0xed, 0xa9, 0xd2, 0x42, 0x9b, 0xd0, 0x8c, 0x71, 0x96, 0x12, 0x8f, 0x33, 0x6f, + 0x88, 0x69, 0x98, 0x25, 0x44, 0x4d, 0x82, 0xaa, 0xbb, 0xaa, 0xe8, 0x8f, 0xd8, 0x03, 0x4d, 0xed, + 0x8c, 0x61, 0xd5, 0xbe, 0x6c, 0xc7, 0x97, 0x06, 0xd1, 0xcf, 0x61, 0x55, 0x87, 0xdb, 0x0e, 0x5a, + 0xd3, 0x4e, 0x7a, 0xb3, 0x8e, 0x59, 0xae, 0xf4, 0xed, 0x33, 0x32, 0x7e, 0x6a, 0x8e, 0xf7, 0x3f, + 0x27, 0x7e, 0x26, 0xed, 0xec, 0xb2, 0x21, 0xff, 0xc9, 0x92, 0xdb, 0x50, 0xb6, 0x2c, 0xb7, 0x5f, + 0x85, 0x15, 0xac, 0xae, 0xe9, 0xfc, 0xd7, 0x81, 0x4b, 0xb3, 0x37, 0xbb, 0x24, 0xcd, 0x42, 0x81, + 0xee, 0x43, 0xc3, 0xe6, 0xab, 0x4e, 0x19, 0xa7, 0x60, 0xca, 0xd4, 0xad, 0x9a, 0xca, 0x9b, 0x1d, + 0xa8, 0x61, 0x5f, 0x64, 0x38, 0xd4, 0x46, 0x4a, 0x05, 0x8d, 0x80, 0x56, 0x52, 0x26, 0x7e, 0x01, + 0xef, 0xcd, 0x22, 0xe1, 0x25, 0xea, 0x89, 0x2a, 0x49, 0x6a, 0xbd, 0x8f, 0x66, 0x01, 0xd1, 0x3b, + 0x8e, 0x84, 0x63, 0x0e, 0x0b, 0x77, 0x63, 0x06, 0x05, 0xed, 0x68, 0xe7, 0x77, 0x0e, 0x34, 0xf2, + 0xee, 0x29, 0xb0, 0x50, 0xc3, 0x9d, 0x71, 0x41, 0x52, 0xb3, 0xae, 0xe8, 0x83, 0xdc, 0x56, 0x54, + 0xd0, 0xf4, 0x7c, 0xaf, 0xba, 0xe6, 0x84, 0x6e, 0xc2, 0x5a, 0x48, 0x23, 0x2a, 0x48, 0xe0, 0x69, + 0x4c, 0x53, 0x1b, 0x63, 0x43, 0xd6, 0xb0, 0xa6, 0xe8, 0xbb, 0xb0, 0x9e, 0x90, 0x08, 0x53, 0x46, + 0xd9, 0x28, 0x17, 0x95, 0x75, 0x5c, 0x76, 0x9b, 0x39, 0xc3, 0x08, 0x77, 0x62, 0xb8, 0xb2, 0x9f, + 0xd0, 0xd1, 0x88, 0x24, 0xbb, 0x51, 0x44, 0x02, 0x8a, 0x05, 0x09, 0x27, 0x2e, 0xf9, 0x55, 0x46, + 0x52, 0xf1, 0xb5, 0x24, 0x7d, 0xe7, 0x4b, 0x07, 0xd6, 0xfa, 0xd8, 0x3f, 0x1a, 0xd2, 0x30, 0xb4, + 0x17, 0xcd, 0xf6, 0x1c, 0xe7, 0xdd, 0x7a, 0x4e, 0xe9, 0x6d, 0x7b, 0xce, 0xbc, 0x9b, 0xe5, 0x77, + 0x77, 0xf3, 0xdf, 0x53, 0xe1, 0xde, 0x93, 0xd5, 0x8a, 0x02, 0xd8, 0x10, 0x1a, 0x6a, 0x8f, 0x9e, + 0x62, 0x6d, 0xbc, 0xbd, 0xbd, 0xb8, 0x85, 0x2f, 0x8c, 0x8f, 0x8b, 0xc4, 0x1c, 0x0b, 0xed, 0x43, + 0xf3, 0xc0, 0xa0, 0xeb, 0x25, 0x5a, 0xce, 0xcc, 0xb4, 0x8f, 0x16, 0x5f, 0x71, 0x26, 0x1e, 0xee, + 0xda, 0xc1, 0x99, 0x00, 0x5d, 0x82, 0x8a, 0x4a, 0x43, 0xb3, 0x2b, 0xeb, 0x83, 0xdc, 0x57, 0x32, + 0xa6, 0xe9, 0x7a, 0x4f, 0xb6, 0xc7, 0xce, 0xef, 0xa7, 0x56, 0x05, 0xd9, 0x1a, 0xd0, 0x07, 0x50, + 0xd7, 0xa9, 0xe8, 0xf9, 0x3c, 0x63, 0x7a, 0x9f, 0x2d, 0xbb, 0x35, 0x4d, 0xbb, 0x2b, 0x49, 0xa8, + 0x07, 0xef, 0x45, 0x34, 0x4d, 0xd5, 0x78, 0x9b, 0x6b, 0x8a, 0x65, 0x77, 0x43, 0x33, 0xef, 0xce, + 0x74, 0xbe, 0x9b, 0xb0, 0x66, 0x43, 0x97, 0x1e, 0xd1, 0x38, 0x26, 0x81, 0x7a, 0x61, 0xd9, 0xb5, + 0x11, 0x1d, 0x68, 0x2a, 0xfa, 0x36, 0xac, 0x1e, 0x64, 0xc3, 0x21, 0x49, 0xbc, 0x20, 0xe1, 0x4a, + 0x0e, 0x94, 0x5c, 0x43, 0x53, 0xef, 0x69, 0xa2, 0x5c, 0x37, 0x8c, 0x58, 0x4a, 0x9f, 0x11, 0x55, + 0xf9, 0x65, 0x17, 0x34, 0x69, 0x40, 0x9f, 0x11, 0xf4, 0x04, 0xd6, 0x93, 0x8c, 0xa9, 0xd2, 0xb2, + 0x6d, 0x22, 0x35, 0x53, 0xf8, 0x2d, 0x1a, 0x44, 0xd3, 0xd8, 0xb0, 0x9c, 0x14, 0x3d, 0x86, 0xd5, + 0x84, 0xf8, 0x84, 0x89, 0xa9, 0x8a, 0x95, 0x46, 0xbb, 0xff, 0x67, 0xb4, 0xbf, 0xa1, 0x9d, 0xba, + 0x0d, 0x6d, 0xc5, 0xf6, 0x82, 0x3d, 0xd8, 0x18, 0x66, 0x72, 0xbe, 0x1b, 0xb3, 0xaa, 0x42, 0x52, + 0xb3, 0x0c, 0x9f, 0x5f, 0x22, 0xeb, 0x5a, 0x59, 0x5b, 0x53, 0x4c, 0xd9, 0x68, 0xfd, 0x84, 0x60, + 0x61, 0xba, 0xf5, 0x4a, 0xd1, 0x46, 0xab, 0x95, 0x6c, 0xaf, 0xce, 0xe2, 0x20, 0x37, 0x71, 0xa1, + 0xa8, 0x09, 0xad, 0xa4, 0x4c, 0xfc, 0x00, 0x2e, 0x53, 0x76, 0x8c, 0x43, 0x1a, 0x78, 0xf9, 0xf4, + 0x20, 0x49, 0xc2, 0x13, 0xbd, 0x38, 0xab, 0x5d, 0xea, 0x92, 0x91, 0xb0, 0x38, 0xdd, 0x97, 0xfc, + 0xce, 0x6f, 0x4b, 0x50, 0xb5, 0x14, 0xb4, 0x0d, 0xcb, 0x69, 0x4c, 0x7c, 0x53, 0x83, 0x37, 0xce, + 0xc7, 0x5a, 0x2d, 0x4f, 0x4a, 0x07, 0xfd, 0xd8, 0xce, 0x36, 0xd3, 0x70, 0x36, 0x0b, 0x47, 0xca, + 0xe8, 0xa1, 0x07, 0x50, 0x8d, 0xcd, 0x7e, 0xa1, 0xb2, 0xb6, 0xd6, 0xfb, 0xce, 0xf9, 0x36, 0xec, + 0x46, 0xe2, 0xe6, 0xba, 0xe8, 0x87, 0xea, 0x47, 0xa2, 0xb0, 0xcb, 0xda, 0xcd, 0x02, 0x6e, 0x48, + 0x71, 0x57, 0x6b, 0x75, 0x5e, 0x97, 0x4e, 0xf7, 0x9d, 0x9f, 0xd2, 0x54, 0xa8, 0x7a, 0x7d, 0x17, + 0x64, 0x76, 0xa1, 0x91, 0x8f, 0x50, 0x31, 0x89, 0x6d, 0x47, 0xfe, 0xf0, 0xbc, 0xfa, 0xd8, 0x9f, + 0xc4, 0xc4, 0xad, 0x8f, 0xa7, 0x4e, 0xa7, 0x23, 0xb2, 0xfc, 0xe6, 0x11, 0xb9, 0x3c, 0x33, 0x22, + 0xe7, 0x8b, 0xa8, 0xf2, 0x35, 0x16, 0xd1, 0xca, 0x57, 0x2e, 0xa2, 0xce, 0x7f, 0x1c, 0x58, 0x9f, + 0x86, 0xfc, 0x3e, 0x13, 0xc9, 0x44, 0x36, 0x9f, 0x3c, 0x99, 0xa9, 0xfd, 0x13, 0x03, 0x2c, 0x69, + 0x37, 0x40, 0x1f, 0xc3, 0x72, 0x44, 0x22, 0x6e, 0xf0, 0xfc, 0xc6, 0x22, 0x3c, 0x1f, 0x92, 0x88, + 0xbb, 0x4a, 0x12, 0x3d, 0x86, 0xf5, 0x94, 0xe0, 0xc4, 0x3f, 0xf4, 0xb0, 0x10, 0x09, 0x3d, 0xc8, + 0x44, 0x9e, 0x6b, 0x9b, 0x8b, 0xd4, 0x07, 0x4a, 0x61, 0x27, 0x97, 0x77, 0x9b, 0xe9, 0x19, 0x0a, + 0xfa, 0x11, 0x2c, 0x53, 0x36, 0xe4, 0x26, 0xe1, 0x0a, 0x64, 0xad, 0xcd, 0x2b, 0x57, 0xe9, 0xf5, + 0xff, 0xec, 0x3c, 0x7f, 0xd9, 0x5e, 0x7a, 0xf1, 0xb2, 0xbd, 0xf4, 0xfa, 0x65, 0xdb, 0xf9, 0xf5, + 0x49, 0xdb, 0xf9, 0xe3, 0x49, 0xdb, 0xf9, 0xcb, 0x49, 0xdb, 0x79, 0x7e, 0xd2, 0x76, 0xfe, 0x71, + 0xd2, 0x76, 0xfe, 0x79, 0xd2, 0x5e, 0x7a, 0x7d, 0xd2, 0x76, 0xbe, 0x78, 0xd5, 0x5e, 0x7a, 0xfe, + 0xaa, 0xbd, 0xf4, 0xe2, 0x55, 0x7b, 0x09, 0xde, 0xa7, 0x7c, 0xe1, 0x55, 0xfd, 0xfa, 0x43, 0xfd, + 0xef, 0xd0, 0x9e, 0x8c, 0xc3, 0x9e, 0xf3, 0xb3, 0x9b, 0xa3, 0x29, 0x61, 0xca, 0xcf, 0xfe, 0x7d, + 0xf6, 0x89, 0xfd, 0xfe, 0x53, 0xe9, 0xea, 0xbe, 0x11, 0xa3, 0xbc, 0xbb, 0x13, 0xd3, 0xfc, 0xd1, + 0xdd, 0x27, 0xb7, 0xfe, 0x55, 0xfa, 0xe6, 0x29, 0x73, 0x7b, 0x7b, 0x27, 0xa6, 0xdb, 0xdb, 0x96, + 0xbd, 0xbd, 0xfd, 0xe4, 0xd6, 0xc1, 0x8a, 0x0a, 0xfb, 0xed, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, + 0xfa, 0x29, 0xa8, 0x2f, 0xa7, 0x13, 0x00, 0x00, } func (this *CalendarSpec) Equal(that interface{}) bool { @@ -2136,6 +2168,12 @@ if this.OverlapSkipped != that1.OverlapSkipped { return false } + if this.BufferDropped != that1.BufferDropped { + return false + } + if this.BufferSize != that1.BufferSize { + return false + } if len(this.RunningWorkflows) != len(that1.RunningWorkflows) { return false } @@ -2497,11 +2535,13 @@ if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 15) s = append(s, "&schedule.ScheduleInfo{") s = append(s, "ActionCount: "+fmt.Sprintf("%#v", this.ActionCount)+",\n") s = append(s, "MissedCatchupWindow: "+fmt.Sprintf("%#v", this.MissedCatchupWindow)+",\n") s = append(s, "OverlapSkipped: "+fmt.Sprintf("%#v", this.OverlapSkipped)+",\n") + s = append(s, "BufferDropped: "+fmt.Sprintf("%#v", this.BufferDropped)+",\n") + s = append(s, "BufferSize: "+fmt.Sprintf("%#v", this.BufferSize)+",\n") if this.RunningWorkflows != nil { s = append(s, "RunningWorkflows: "+fmt.Sprintf("%#v", this.RunningWorkflows)+",\n") } @@ -3388,6 +3428,16 @@ _ = i var l int _ = l + if m.BufferSize != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.BufferSize)) + i-- + dAtA[i] = 0x58 + } + if m.BufferDropped != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.BufferDropped)) + i-- + dAtA[i] = 0x50 + } if len(m.RunningWorkflows) > 0 { for iNdEx := len(m.RunningWorkflows) - 1; iNdEx >= 0; iNdEx-- { { @@ -4097,6 +4147,12 @@ n += 1 + l + sovMessage(uint64(l)) } } + if m.BufferDropped != 0 { + n += 1 + sovMessage(uint64(m.BufferDropped)) + } + if m.BufferSize != 0 { + n += 1 + sovMessage(uint64(m.BufferSize)) + } return n } @@ -4455,6 +4511,8 @@ `UpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.UpdateTime), "Timestamp", "types.Timestamp", 1) + `,`, `InvalidScheduleError:` + fmt.Sprintf("%v", this.InvalidScheduleError) + `,`, `RunningWorkflows:` + repeatedStringForRunningWorkflows + `,`, + `BufferDropped:` + fmt.Sprintf("%v", this.BufferDropped) + `,`, + `BufferSize:` + fmt.Sprintf("%v", this.BufferSize) + `,`, `}`, }, "") return s @@ -7030,6 +7088,44 @@ return err } iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BufferDropped", wireType) + } + m.BufferDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BufferDropped |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BufferSize", wireType) + } + m.BufferSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BufferSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/sdk/v1/task_complete_metadata.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/sdk/v1/task_complete_metadata.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/sdk/v1/task_complete_metadata.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/sdk/v1/task_complete_metadata.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -75,6 +75,17 @@ // (-- api-linter: core::0141::forbidden-types=disabled // aip.dev/not-precedent: These really shouldn't have negative values. --) LangUsedFlags []uint32 `protobuf:"varint,2,rep,packed,name=lang_used_flags,json=langUsedFlags,proto3" json:"lang_used_flags,omitempty"` + // Name of the SDK that processed the task. This is usually something like "temporal-go" and is + // usually the same as client-name gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + // + // (-- api-linter: core::0122::name-suffix=disabled + // aip.dev/not-precedent: We're ok with a name suffix here. --) + SdkName string `protobuf:"bytes,3,opt,name=sdk_name,json=sdkName,proto3" json:"sdk_name,omitempty"` + // Version of the SDK that processed the task. This is usually something like "1.20.0" and is + // usually the same as client-version gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + SdkVersion string `protobuf:"bytes,4,opt,name=sdk_version,json=sdkVersion,proto3" json:"sdk_version,omitempty"` } func (m *WorkflowTaskCompletedMetadata) Reset() { *m = WorkflowTaskCompletedMetadata{} } @@ -123,6 +134,20 @@ return nil } +func (m *WorkflowTaskCompletedMetadata) GetSdkName() string { + if m != nil { + return m.SdkName + } + return "" +} + +func (m *WorkflowTaskCompletedMetadata) GetSdkVersion() string { + if m != nil { + return m.SdkVersion + } + return "" +} + func init() { proto.RegisterType((*WorkflowTaskCompletedMetadata)(nil), "temporal.api.sdk.v1.WorkflowTaskCompletedMetadata") } @@ -132,26 +157,29 @@ } var fileDescriptor_4c87c329f13a1874 = []byte{ - // 295 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4a, 0x03, 0x31, - 0x1c, 0x87, 0x93, 0x0a, 0x0e, 0x07, 0x45, 0xa8, 0x28, 0x75, 0xe8, 0x1f, 0x71, 0x10, 0xa7, 0x9c, - 0x87, 0x5b, 0x9c, 0x5a, 0xc1, 0x4d, 0x28, 0xb6, 0x56, 0x70, 0x39, 0x62, 0x93, 0x96, 0x90, 0x3b, - 0x13, 0x2e, 0xb1, 0xae, 0x3e, 0x82, 0x8f, 0x21, 0xbe, 0x82, 0x2f, 0xe0, 0x78, 0x63, 0x47, 0x2f, - 0xb7, 0x88, 0x53, 0x1f, 0x41, 0x4e, 0xaf, 0x7a, 0x83, 0xeb, 0x97, 0xef, 0x83, 0x5f, 0xfe, 0xc1, - 0xb1, 0x13, 0xa9, 0xd1, 0x19, 0x4b, 0x42, 0x66, 0x64, 0x68, 0xb9, 0x0a, 0x17, 0x51, 0xe8, 0x98, - 0x55, 0xf1, 0x54, 0xa7, 0x26, 0x11, 0x4e, 0xc4, 0xa9, 0x70, 0x8c, 0x33, 0xc7, 0x88, 0xc9, 0xb4, - 0xd3, 0x9d, 0xed, 0x75, 0x41, 0x98, 0x91, 0xc4, 0x72, 0x45, 0x16, 0xd1, 0x81, 0x0e, 0x7a, 0xd7, - 0x3a, 0x53, 0xb3, 0x44, 0x3f, 0x8c, 0x99, 0x55, 0x67, 0x75, 0xcb, 0x2f, 0xea, 0xb6, 0x73, 0x18, - 0x6c, 0x4d, 0x75, 0x26, 0xe2, 0x7b, 0x2b, 0x78, 0x3c, 0x4b, 0xd8, 0xdc, 0x76, 0xf1, 0xfe, 0xc6, - 0x51, 0xfb, 0xb2, 0x5d, 0xe1, 0x2b, 0x2b, 0xf8, 0x79, 0x05, 0x2b, 0x2f, 0x61, 0x77, 0xf3, 0xa6, - 0xd7, 0xfa, 0xf1, 0x2a, 0xfc, 0xeb, 0x0d, 0x5e, 0x71, 0x5e, 0x00, 0x5a, 0x16, 0x80, 0x56, 0x05, - 0xe0, 0x47, 0x0f, 0xf8, 0xd9, 0x03, 0x7e, 0xf3, 0x80, 0x73, 0x0f, 0xf8, 0xdd, 0x03, 0xfe, 0xf0, - 0x80, 0x56, 0x1e, 0xf0, 0x53, 0x09, 0x28, 0x2f, 0x01, 0x2d, 0x4b, 0x40, 0xc1, 0xae, 0xd4, 0xe4, - 0x9f, 0xfd, 0x83, 0xbd, 0xe6, 0xea, 0xf5, 0xe8, 0x61, 0xf5, 0xdf, 0x21, 0xbe, 0xe9, 0xcd, 0x1b, - 0x91, 0xd4, 0x8d, 0x4b, 0x9d, 0x5a, 0xae, 0x5e, 0x5a, 0x3b, 0xe3, 0xfa, 0x51, 0x6a, 0xd2, 0x37, - 0x92, 0x8c, 0xb8, 0x22, 0x93, 0xe8, 0xb3, 0xd5, 0xfd, 0xe3, 0x94, 0xf6, 0x8d, 0xa4, 0x74, 0xc4, - 0x15, 0xa5, 0x93, 0xe8, 0x76, 0xf3, 0xfb, 0x94, 0x27, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, - 0x42, 0x65, 0x68, 0x7e, 0x01, 0x00, 0x00, + // 341 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xbf, 0x4e, 0x2a, 0x41, + 0x14, 0xc6, 0x77, 0xe0, 0xe6, 0xde, 0xeb, 0x18, 0x62, 0x82, 0xd1, 0x2c, 0x05, 0x47, 0x62, 0x61, + 0xa8, 0x66, 0xdd, 0xd8, 0xad, 0x15, 0x98, 0xd8, 0x69, 0x08, 0x20, 0x26, 0x36, 0x9b, 0x91, 0x19, + 0xc8, 0x64, 0x76, 0x99, 0xc9, 0xce, 0x8a, 0xad, 0x8f, 0xe0, 0x63, 0x18, 0x7d, 0x04, 0x5f, 0xc0, + 0x92, 0x92, 0x52, 0x86, 0xc6, 0x58, 0xf1, 0x08, 0x66, 0xf9, 0x13, 0xb6, 0xb0, 0xfd, 0x7d, 0xdf, + 0xaf, 0xf8, 0xce, 0xc1, 0xa7, 0x29, 0x8f, 0xb5, 0x4a, 0x68, 0xe4, 0x51, 0x2d, 0x3c, 0xc3, 0xa4, + 0x37, 0xf6, 0xbd, 0x94, 0x1a, 0x19, 0xf6, 0x55, 0xac, 0x23, 0x9e, 0xf2, 0x30, 0xe6, 0x29, 0x65, + 0x34, 0xa5, 0x44, 0x27, 0x2a, 0x55, 0xe5, 0xfd, 0x8d, 0x41, 0xa8, 0x16, 0xc4, 0x30, 0x49, 0xc6, + 0xfe, 0xf1, 0x1b, 0xc2, 0xd5, 0x5b, 0x95, 0xc8, 0x41, 0xa4, 0x1e, 0xbb, 0xd4, 0xc8, 0x8b, 0xb5, + 0xcc, 0xae, 0xd6, 0x72, 0xf9, 0x04, 0xef, 0xf5, 0x55, 0xc2, 0xc3, 0x07, 0xc3, 0x59, 0x38, 0x88, + 0xe8, 0xd0, 0xb8, 0xa8, 0x56, 0xac, 0x97, 0xda, 0xa5, 0x0c, 0xdf, 0x18, 0xce, 0x2e, 0x33, 0x98, + 0xf5, 0x22, 0x3a, 0x1a, 0xe6, 0x7b, 0x85, 0x55, 0x2f, 0xc3, 0xdb, 0x5e, 0x05, 0xff, 0x37, 0x4c, + 0x86, 0x23, 0x1a, 0x73, 0xb7, 0x58, 0x43, 0xf5, 0x9d, 0xf6, 0x3f, 0xc3, 0xe4, 0x35, 0x8d, 0x79, + 0xf9, 0x08, 0xef, 0x66, 0xd1, 0x98, 0x27, 0x46, 0xa8, 0x91, 0xfb, 0x67, 0x99, 0x62, 0xc3, 0x64, + 0x6f, 0x45, 0x9a, 0xef, 0x68, 0x32, 0x03, 0x67, 0x3a, 0x03, 0x67, 0x31, 0x03, 0xf4, 0x64, 0x01, + 0xbd, 0x58, 0x40, 0x1f, 0x16, 0xd0, 0xc4, 0x02, 0xfa, 0xb4, 0x80, 0xbe, 0x2c, 0x38, 0x0b, 0x0b, + 0xe8, 0x79, 0x0e, 0xce, 0x64, 0x0e, 0xce, 0x74, 0x0e, 0x0e, 0x3e, 0x14, 0x8a, 0xfc, 0x32, 0xbe, + 0x59, 0xc9, 0x2f, 0xde, 0x0c, 0x6e, 0x65, 0xc7, 0x6a, 0xa1, 0xbb, 0xea, 0x30, 0x27, 0x09, 0x95, + 0x3b, 0xf3, 0xb9, 0x61, 0xf2, 0xb5, 0x70, 0xd0, 0x5d, 0x87, 0x42, 0x91, 0x86, 0x16, 0xa4, 0xc3, + 0x24, 0xe9, 0xf9, 0xdf, 0x05, 0x77, 0xcb, 0x83, 0xa0, 0xa1, 0x45, 0x10, 0x74, 0x98, 0x0c, 0x82, + 0x9e, 0x7f, 0xff, 0x77, 0xf9, 0x87, 0xb3, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x67, 0xe7, + 0x35, 0xbb, 0x01, 0x00, 0x00, } func (this *WorkflowTaskCompletedMetadata) Equal(that interface{}) bool { @@ -189,16 +217,24 @@ return false } } + if this.SdkName != that1.SdkName { + return false + } + if this.SdkVersion != that1.SdkVersion { + return false + } return true } func (this *WorkflowTaskCompletedMetadata) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 8) s = append(s, "&sdk.WorkflowTaskCompletedMetadata{") s = append(s, "CoreUsedFlags: "+fmt.Sprintf("%#v", this.CoreUsedFlags)+",\n") s = append(s, "LangUsedFlags: "+fmt.Sprintf("%#v", this.LangUsedFlags)+",\n") + s = append(s, "SdkName: "+fmt.Sprintf("%#v", this.SdkName)+",\n") + s = append(s, "SdkVersion: "+fmt.Sprintf("%#v", this.SdkVersion)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -230,6 +266,20 @@ _ = i var l int _ = l + if len(m.SdkVersion) > 0 { + i -= len(m.SdkVersion) + copy(dAtA[i:], m.SdkVersion) + i = encodeVarintTaskCompleteMetadata(dAtA, i, uint64(len(m.SdkVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.SdkName) > 0 { + i -= len(m.SdkName) + copy(dAtA[i:], m.SdkName) + i = encodeVarintTaskCompleteMetadata(dAtA, i, uint64(len(m.SdkName))) + i-- + dAtA[i] = 0x1a + } if len(m.LangUsedFlags) > 0 { dAtA2 := make([]byte, len(m.LangUsedFlags)*10) var j1 int @@ -300,6 +350,14 @@ } n += 1 + sovTaskCompleteMetadata(uint64(l)) + l } + l = len(m.SdkName) + if l > 0 { + n += 1 + l + sovTaskCompleteMetadata(uint64(l)) + } + l = len(m.SdkVersion) + if l > 0 { + n += 1 + l + sovTaskCompleteMetadata(uint64(l)) + } return n } @@ -316,6 +374,8 @@ s := strings.Join([]string{`&WorkflowTaskCompletedMetadata{`, `CoreUsedFlags:` + fmt.Sprintf("%v", this.CoreUsedFlags) + `,`, `LangUsedFlags:` + fmt.Sprintf("%v", this.LangUsedFlags) + `,`, + `SdkName:` + fmt.Sprintf("%v", this.SdkName) + `,`, + `SdkVersion:` + fmt.Sprintf("%v", this.SdkVersion) + `,`, `}`, }, "") return s @@ -509,6 +569,70 @@ } else { return fmt.Errorf("proto: wrong wireType = %d for field LangUsedFlags", wireType) } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SdkName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskCompleteMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskCompleteMetadata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskCompleteMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SdkName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SdkVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskCompleteMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskCompleteMetadata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskCompleteMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SdkVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTaskCompleteMetadata(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/request_response.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/request_response.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/request_response.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/request_response.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -4884,7 +4884,15 @@ } type CountWorkflowExecutionsResponse struct { + // If `query` is not grouping by any field, the count is an approximate number + // of workflows that matches the query. + // If `query` is grouping by a field, the count is simply the sum of the counts + // of the groups returned in the response. This number can be smaller than the + // total number of workflows matching the query. Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // `groups` contains the groups if the request is grouping by a field. + // The list might not be complete, and the counts of each group is approximate. + Groups []*CountWorkflowExecutionsResponse_AggregationGroup `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups,omitempty"` } func (m *CountWorkflowExecutionsResponse) Reset() { *m = CountWorkflowExecutionsResponse{} } @@ -4926,6 +4934,66 @@ return 0 } +func (m *CountWorkflowExecutionsResponse) GetGroups() []*CountWorkflowExecutionsResponse_AggregationGroup { + if m != nil { + return m.Groups + } + return nil +} + +type CountWorkflowExecutionsResponse_AggregationGroup struct { + GroupValues []*v13.Payload `protobuf:"bytes,1,rep,name=group_values,json=groupValues,proto3" json:"group_values,omitempty"` + Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) Reset() { + *m = CountWorkflowExecutionsResponse_AggregationGroup{} +} +func (*CountWorkflowExecutionsResponse_AggregationGroup) ProtoMessage() {} +func (*CountWorkflowExecutionsResponse_AggregationGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_2c30b34f996ae016, []int{63, 0} +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CountWorkflowExecutionsResponse_AggregationGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_CountWorkflowExecutionsResponse_AggregationGroup.Merge(m, src) +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) XXX_Size() int { + return m.Size() +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) XXX_DiscardUnknown() { + xxx_messageInfo_CountWorkflowExecutionsResponse_AggregationGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_CountWorkflowExecutionsResponse_AggregationGroup proto.InternalMessageInfo + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) GetGroupValues() []*v13.Payload { + if m != nil { + return m.GroupValues + } + return nil +} + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type GetSearchAttributesRequest struct { } @@ -5447,8 +5515,9 @@ } type DescribeTaskQueueRequest struct { - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // If unspecified (TASK_QUEUE_TYPE_UNSPECIFIED), then default value (TASK_QUEUE_TYPE_WORKFLOW) will be used. TaskQueueType v11.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` IncludeTaskQueueStatus bool `protobuf:"varint,4,opt,name=include_task_queue_status,json=includeTaskQueueStatus,proto3" json:"include_task_queue_status,omitempty"` } @@ -5816,6 +5885,9 @@ // True if the server knows about the sdk metadata field on WFT completions and will record // it in history SdkMetadata bool `protobuf:"varint,9,opt,name=sdk_metadata,json=sdkMetadata,proto3" json:"sdk_metadata,omitempty"` + // True if the server supports count group by execution status + // (-- api-linter: core::0140::prepositions=disabled --) + CountGroupByExecutionStatus bool `protobuf:"varint,10,opt,name=count_group_by_execution_status,json=countGroupByExecutionStatus,proto3" json:"count_group_by_execution_status,omitempty"` } func (m *GetSystemInfoResponse_Capabilities) Reset() { *m = GetSystemInfoResponse_Capabilities{} } @@ -5913,6 +5985,13 @@ return false } +func (m *GetSystemInfoResponse_Capabilities) GetCountGroupByExecutionStatus() bool { + if m != nil { + return m.CountGroupByExecutionStatus + } + return false +} + type ListTaskQueuePartitionsRequest struct { Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` @@ -7161,9 +7240,6 @@ } type UpdateWorkerBuildIdCompatibilityResponse struct { - // The id of the compatible set that the updated version was added to, or exists in. Users don't - // need to understand or care about this value, but it has value for debugging purposes. - VersionSetId string `protobuf:"bytes,1,opt,name=version_set_id,json=versionSetId,proto3" json:"version_set_id,omitempty"` } func (m *UpdateWorkerBuildIdCompatibilityResponse) Reset() { @@ -7200,13 +7276,6 @@ var xxx_messageInfo_UpdateWorkerBuildIdCompatibilityResponse proto.InternalMessageInfo -func (m *UpdateWorkerBuildIdCompatibilityResponse) GetVersionSetId() string { - if m != nil { - return m.VersionSetId - } - return "" -} - // (-- api-linter: core::0134::request-resource-required=disabled // aip.dev/not-precedent: GetWorkerBuildIdCompatibilityRequest RPC doesn't follow Google API format. --) type GetWorkerBuildIdCompatibilityRequest struct { @@ -8391,6 +8460,7 @@ proto.RegisterType((*ScanWorkflowExecutionsResponse)(nil), "temporal.api.workflowservice.v1.ScanWorkflowExecutionsResponse") proto.RegisterType((*CountWorkflowExecutionsRequest)(nil), "temporal.api.workflowservice.v1.CountWorkflowExecutionsRequest") proto.RegisterType((*CountWorkflowExecutionsResponse)(nil), "temporal.api.workflowservice.v1.CountWorkflowExecutionsResponse") + proto.RegisterType((*CountWorkflowExecutionsResponse_AggregationGroup)(nil), "temporal.api.workflowservice.v1.CountWorkflowExecutionsResponse.AggregationGroup") proto.RegisterType((*GetSearchAttributesRequest)(nil), "temporal.api.workflowservice.v1.GetSearchAttributesRequest") proto.RegisterType((*GetSearchAttributesResponse)(nil), "temporal.api.workflowservice.v1.GetSearchAttributesResponse") proto.RegisterMapType((map[string]v11.IndexedValueType)(nil), "temporal.api.workflowservice.v1.GetSearchAttributesResponse.KeysEntry") @@ -8453,409 +8523,415 @@ } var fileDescriptor_2c30b34f996ae016 = []byte{ - // 6429 bytes of a gzipped FileDescriptorProto + // 6517 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x24, 0xc7, - 0x75, 0x28, 0x7b, 0x1e, 0xe4, 0xcc, 0xe1, 0xf0, 0xd5, 0x7c, 0xec, 0x90, 0xbb, 0x3b, 0xe4, 0xf6, - 0xbe, 0xb8, 0x2b, 0x6b, 0x56, 0xbb, 0xba, 0x92, 0x2c, 0xda, 0xb2, 0xb4, 0xe4, 0x4a, 0x4b, 0xde, - 0xd5, 0x4a, 0x54, 0x93, 0xbb, 0x32, 0x04, 0xd9, 0xad, 0x66, 0x77, 0x91, 0x6c, 0x71, 0xa6, 0x7b, - 0xb6, 0xbb, 0x87, 0x5c, 0xfa, 0x02, 0xf7, 0x5e, 0x20, 0x81, 0xf3, 0x04, 0x62, 0x20, 0x06, 0x62, - 0x03, 0x8e, 0xe3, 0xe4, 0x23, 0x72, 0xf2, 0xe3, 0x0f, 0xe7, 0x23, 0xfe, 0x08, 0x90, 0xaf, 0x20, - 0x80, 0x61, 0xc4, 0xc8, 0x97, 0x81, 0x7c, 0x38, 0x5e, 0x23, 0x89, 0x11, 0x04, 0x81, 0x9d, 0x0f, - 0x27, 0xc8, 0x4f, 0x82, 0x7a, 0x76, 0x75, 0x4f, 0xcf, 0x8b, 0xa4, 0x2c, 0xc6, 0xd0, 0xdf, 0x74, - 0xd5, 0xa9, 0x53, 0xa7, 0xce, 0x39, 0x75, 0xea, 0x9c, 0xaa, 0x53, 0x35, 0xf0, 0x7c, 0x88, 0xea, - 0x0d, 0xcf, 0x37, 0x6b, 0x37, 0xcc, 0x86, 0x73, 0xe3, 0xc0, 0xf3, 0xf7, 0xb6, 0x6b, 0xde, 0x41, - 0x80, 0xfc, 0x7d, 0xc7, 0x42, 0x37, 0xf6, 0x6f, 0xde, 0xf0, 0xd1, 0xa3, 0x26, 0x0a, 0x42, 0xc3, - 0x47, 0x41, 0xc3, 0x73, 0x03, 0x54, 0x6d, 0xf8, 0x5e, 0xe8, 0xa9, 0xf3, 0xbc, 0x5d, 0xd5, 0x6c, - 0x38, 0xd5, 0x44, 0xbb, 0xea, 0xfe, 0xcd, 0xb9, 0xa7, 0x62, 0x88, 0x91, 0xdb, 0xac, 0x07, 0x18, - 0xdd, 0x96, 0x19, 0x5a, 0xbb, 0x86, 0xd7, 0x40, 0xbe, 0x19, 0x3a, 0x9e, 0x4b, 0xb1, 0xcd, 0x5d, - 0x4a, 0x07, 0xe6, 0x68, 0x19, 0xd4, 0xe5, 0x74, 0x28, 0xd7, 0xac, 0xa3, 0xa0, 0x61, 0x5a, 0x8c, - 0xb4, 0xb9, 0xc5, 0x74, 0xb0, 0x6d, 0xd3, 0xa9, 0x21, 0xdb, 0xb0, 0xcc, 0x26, 0x1f, 0xc4, 0x9c, - 0x96, 0x0e, 0x69, 0x79, 0xf5, 0xba, 0x20, 0xed, 0x42, 0x3a, 0xcc, 0xa3, 0x26, 0xf2, 0x0f, 0x3b, - 0x83, 0xf8, 0x28, 0x40, 0x21, 0x03, 0xb9, 0x92, 0x0e, 0x12, 0x9a, 0xc1, 0x9e, 0xf1, 0xa8, 0x89, - 0x9a, 0x28, 0x95, 0x11, 0x94, 0x10, 0x0c, 0x58, 0x47, 0x41, 0x60, 0xee, 0xa0, 0x54, 0x46, 0xec, - 0x3a, 0x41, 0xe8, 0xf9, 0x87, 0xad, 0x60, 0x57, 0x52, 0x65, 0xdb, 0x0d, 0x1d, 0xee, 0xd4, 0x74, - 0xed, 0x6e, 0x60, 0x98, 0x9d, 0x4d, 0x1f, 0xb5, 0x82, 0xc5, 0x87, 0xb0, 0xed, 0xd4, 0x42, 0xe4, - 0x77, 0xa3, 0x8d, 0x94, 0x59, 0x5e, 0xad, 0x15, 0xee, 0x6a, 0x0c, 0x4e, 0x88, 0xba, 0x15, 0xf0, - 0x62, 0x0c, 0x90, 0x88, 0xa7, 0x15, 0xe8, 0x5a, 0x0c, 0xc8, 0x47, 0x8d, 0x9a, 0x63, 0x11, 0x3d, - 0xec, 0x46, 0x60, 0x60, 0xed, 0x22, 0xbb, 0x59, 0x43, 0xdd, 0x08, 0xc4, 0x02, 0x25, 0xf2, 0xec, - 0xc6, 0x97, 0x66, 0xc3, 0x36, 0x43, 0xd4, 0x8d, 0xc9, 0xfb, 0xc8, 0x0f, 0x52, 0xa9, 0x8b, 0x8f, - 0x96, 0x4c, 0xaa, 0x56, 0xa0, 0x67, 0xe2, 0x43, 0xb0, 0xf7, 0x84, 0xca, 0x59, 0x5e, 0xbd, 0x51, - 0x43, 0x21, 0x32, 0xea, 0x28, 0x34, 0x6d, 0x33, 0x34, 0x59, 0x8b, 0xca, 0x8e, 0xe7, 0xed, 0xd4, - 0x10, 0x95, 0xc7, 0x56, 0x73, 0xfb, 0x86, 0xdd, 0x8c, 0xcd, 0xd3, 0xf9, 0x64, 0x7d, 0xe8, 0xd4, - 0x51, 0x10, 0x9a, 0xf5, 0x06, 0x9f, 0x0a, 0x36, 0x6a, 0x20, 0xd7, 0x46, 0xae, 0xe5, 0xa0, 0xe0, - 0xc6, 0x8e, 0xb7, 0xe3, 0x91, 0x72, 0xf2, 0x8b, 0x82, 0x68, 0xff, 0x35, 0x08, 0x65, 0x1d, 0xed, - 0x38, 0x41, 0x88, 0xfc, 0x37, 0xb8, 0x3c, 0x75, 0x6a, 0x65, 0xd4, 0x73, 0x50, 0x14, 0x32, 0x2e, - 0x2b, 0x0b, 0xca, 0x62, 0x51, 0x8f, 0x0a, 0xd4, 0x05, 0x18, 0xb6, 0x51, 0x60, 0xf9, 0x4e, 0x03, - 0xd3, 0x54, 0xce, 0x90, 0x7a, 0xb9, 0x48, 0x9d, 0x87, 0x61, 0xef, 0xc0, 0x45, 0xbe, 0x81, 0xea, - 0xa6, 0x53, 0x2b, 0x67, 0x09, 0x04, 0x90, 0xa2, 0x57, 0x71, 0x89, 0xea, 0xc2, 0x45, 0x3e, 0x11, - 0x0c, 0xf4, 0x18, 0x59, 0x4d, 0xdc, 0xcc, 0xf0, 0x51, 0x88, 0x5c, 0xf2, 0xab, 0x81, 0x7c, 0xc7, - 0xb3, 0xcb, 0xb9, 0x05, 0x65, 0x71, 0xf8, 0xd6, 0x6c, 0x95, 0x8e, 0xb7, 0xca, 0xc7, 0x5b, 0xbd, - 0xc3, 0xf8, 0xb1, 0x9c, 0xfb, 0xca, 0x0f, 0xe7, 0x15, 0x7d, 0x81, 0xe3, 0x7a, 0x95, 0xa3, 0xd2, - 0x39, 0xa6, 0x75, 0x82, 0x48, 0x7d, 0x0b, 0x0a, 0x56, 0xad, 0x89, 0xc7, 0x1a, 0x94, 0xf3, 0x0b, - 0xd9, 0xc5, 0xe1, 0x5b, 0xcf, 0x55, 0x63, 0xa6, 0x53, 0x52, 0xc2, 0xea, 0xfe, 0xcd, 0xea, 0x0a, - 0x05, 0xd6, 0xa3, 0xd2, 0x15, 0xcf, 0xdd, 0x76, 0x76, 0x74, 0x81, 0x46, 0xad, 0xc2, 0xa4, 0x69, - 0x85, 0xce, 0x3e, 0x32, 0x58, 0x91, 0x81, 0x39, 0x54, 0x1e, 0x24, 0x63, 0x9d, 0xa0, 0x55, 0x0c, - 0x0d, 0xe6, 0xaf, 0xfa, 0x36, 0xe4, 0xb0, 0x88, 0xcb, 0x43, 0xa4, 0xfb, 0x95, 0x6a, 0x17, 0xcb, - 0x5d, 0x6d, 0x27, 0x9c, 0xea, 0x1d, 0x33, 0x34, 0x5f, 0x75, 0x43, 0xff, 0x50, 0x27, 0x08, 0xd5, - 0xcb, 0x30, 0x1a, 0x20, 0xab, 0xe9, 0x3b, 0xe1, 0xa1, 0x11, 0x7a, 0x7b, 0xc8, 0x2d, 0x17, 0x08, - 0x0d, 0x23, 0xbc, 0x74, 0x13, 0x17, 0x62, 0x7a, 0x9d, 0xc0, 0xd8, 0xa9, 0x79, 0x5b, 0x66, 0xcd, - 0x88, 0xa4, 0x5b, 0x5c, 0x50, 0x16, 0x0b, 0xfa, 0x84, 0x13, 0xdc, 0x25, 0x35, 0xa2, 0x37, 0xf5, - 0x1d, 0x98, 0x61, 0x26, 0xcd, 0x30, 0x7d, 0x6b, 0xd7, 0xd9, 0x37, 0x6b, 0x46, 0x10, 0x9a, 0x21, - 0x2a, 0xc3, 0x82, 0xb2, 0x38, 0x7a, 0xeb, 0x52, 0x7c, 0x04, 0xc4, 0x98, 0x62, 0xba, 0x6f, 0x33, - 0xe0, 0x0d, 0x0c, 0xab, 0x4f, 0x31, 0x1c, 0xb1, 0x52, 0xf5, 0x19, 0x98, 0x6a, 0xc1, 0xdd, 0xf4, - 0x9d, 0xf2, 0x30, 0x21, 0x5c, 0x4d, 0xb4, 0x79, 0xe0, 0x3b, 0xea, 0x7b, 0x30, 0xbb, 0xef, 0x04, - 0xce, 0x96, 0x53, 0xc3, 0xc3, 0x4c, 0x10, 0x54, 0xea, 0x83, 0xa0, 0x33, 0x11, 0x9a, 0x38, 0x4d, - 0xcf, 0xc3, 0x99, 0xb4, 0x1e, 0x30, 0x59, 0x23, 0x84, 0xac, 0xe9, 0xd6, 0x96, 0x0f, 0x7c, 0x67, - 0xee, 0x05, 0x28, 0x0a, 0x89, 0xa8, 0xe3, 0x90, 0xdd, 0x43, 0x87, 0x6c, 0xca, 0xe0, 0x9f, 0xea, - 0x14, 0xe4, 0xf7, 0xcd, 0x5a, 0x13, 0xb1, 0x69, 0x42, 0x3f, 0x96, 0x32, 0x9f, 0x54, 0xb4, 0xb3, - 0x30, 0x9b, 0x22, 0x63, 0xba, 0xbc, 0x6b, 0xdf, 0x56, 0x60, 0xfa, 0x75, 0x27, 0x08, 0x45, 0x4d, - 0xc0, 0xe7, 0xe6, 0x59, 0x28, 0x36, 0xcc, 0x1d, 0x64, 0x04, 0xce, 0x17, 0xe8, 0xdc, 0xcc, 0xeb, - 0x05, 0x5c, 0xb0, 0xe1, 0x7c, 0x01, 0xa9, 0x57, 0x60, 0xcc, 0x45, 0x8f, 0x43, 0x83, 0x40, 0x50, - 0x65, 0xc0, 0xfd, 0x96, 0xf4, 0x11, 0x5c, 0xbc, 0x6e, 0xee, 0x20, 0xaa, 0x0c, 0x0f, 0x60, 0x5c, - 0xa8, 0x80, 0x41, 0x17, 0x07, 0x32, 0x4b, 0x87, 0x6f, 0x5d, 0x8f, 0x73, 0x31, 0x5a, 0xd5, 0xf7, - 0x6f, 0x56, 0x05, 0x31, 0xaf, 0x91, 0x16, 0xfa, 0x98, 0x1b, 0x2f, 0xd0, 0xbe, 0xa6, 0xc0, 0x4c, - 0x92, 0x6a, 0x3a, 0x20, 0xf5, 0x1d, 0x00, 0x01, 0x1d, 0x94, 0x15, 0x32, 0x09, 0x96, 0xba, 0x4e, - 0x82, 0x3b, 0xc4, 0xa8, 0x6c, 0xa1, 0x16, 0x06, 0xe9, 0x12, 0xb6, 0x5e, 0x47, 0xad, 0xad, 0x42, - 0x39, 0x05, 0x61, 0x2f, 0x26, 0x6f, 0x14, 0x32, 0x8e, 0xcd, 0x44, 0x98, 0x71, 0x6c, 0xed, 0xbb, - 0x59, 0x98, 0x6d, 0x4b, 0x9b, 0xfa, 0x26, 0x8c, 0x46, 0xdc, 0x75, 0xdc, 0x6d, 0x8f, 0x20, 0x1c, - 0xbe, 0xb5, 0xd8, 0x0b, 0x6f, 0xd7, 0xdc, 0x6d, 0x4f, 0x1f, 0x71, 0xe5, 0x4f, 0x75, 0x19, 0x06, - 0x2d, 0x62, 0x7f, 0x08, 0x09, 0x3d, 0x0a, 0x89, 0x59, 0x2c, 0xd6, 0x52, 0xdd, 0x06, 0x55, 0x32, - 0x72, 0x06, 0xc3, 0x47, 0x85, 0xfe, 0x42, 0x47, 0x63, 0x28, 0x0d, 0x30, 0x69, 0x0e, 0x27, 0xfc, - 0x64, 0x91, 0x7a, 0x0d, 0xc6, 0xb1, 0x53, 0xe2, 0xed, 0x23, 0xdf, 0x60, 0x0b, 0x27, 0xb1, 0xe3, - 0x59, 0x7d, 0x8c, 0x97, 0x3f, 0xa4, 0xc5, 0xed, 0x4c, 0x52, 0xbe, 0x9d, 0x49, 0x7a, 0x28, 0xa1, - 0x66, 0x36, 0xa2, 0x3c, 0x48, 0x34, 0xe9, 0xa9, 0x8e, 0x03, 0x78, 0x8d, 0x35, 0xc2, 0x13, 0xbd, - 0x19, 0x44, 0x74, 0xac, 0x52, 0x1c, 0xda, 0xb7, 0xb3, 0x30, 0xf3, 0x80, 0x78, 0x02, 0x7d, 0xaa, - 0xc5, 0x9b, 0x30, 0x4c, 0x3d, 0x08, 0x2a, 0x65, 0x2a, 0x9c, 0x6a, 0x07, 0xe1, 0x24, 0x7a, 0x21, - 0xb2, 0x06, 0x8a, 0x22, 0x21, 0xe8, 0xec, 0x09, 0x0b, 0x3a, 0x77, 0xe2, 0x82, 0x6e, 0x5d, 0x77, - 0xf2, 0x69, 0xeb, 0xce, 0x75, 0x98, 0xb0, 0x11, 0xf1, 0x72, 0xb6, 0x4c, 0xdb, 0xd8, 0x72, 0x5c, - 0x93, 0x48, 0x0d, 0x43, 0x8e, 0xd1, 0x8a, 0x65, 0xd3, 0x5e, 0x26, 0xc5, 0xea, 0x53, 0x30, 0xd1, - 0xf0, 0xbd, 0xba, 0x17, 0x22, 0x49, 0x1d, 0x86, 0x88, 0x3a, 0x8c, 0xb3, 0x0a, 0x41, 0xa3, 0xf6, - 0xb3, 0x0c, 0x9c, 0x69, 0x91, 0xda, 0xc7, 0x33, 0xf0, 0xc3, 0x9c, 0x81, 0xda, 0x7b, 0xd8, 0xec, - 0x35, 0x7c, 0x64, 0xf5, 0x3f, 0x57, 0x5a, 0xd5, 0x25, 0x93, 0xa2, 0x2e, 0xda, 0x39, 0x98, 0x4b, - 0xeb, 0x81, 0x2d, 0x8b, 0xdf, 0x02, 0x38, 0xbf, 0x11, 0x9a, 0x7e, 0xf8, 0x76, 0xab, 0xc7, 0xd7, - 0x0b, 0x11, 0xf3, 0x30, 0x2c, 0xfc, 0x4e, 0x61, 0xd0, 0x81, 0x17, 0xad, 0xd9, 0xea, 0x1a, 0x8c, - 0x08, 0x80, 0xf0, 0xb0, 0x81, 0x98, 0x78, 0x12, 0xbe, 0x05, 0x0b, 0x4d, 0xf7, 0x6f, 0x56, 0x39, - 0x1d, 0x9b, 0x87, 0x0d, 0xa4, 0x97, 0x0e, 0xa4, 0x2f, 0x75, 0x05, 0x20, 0x0a, 0x2c, 0xd9, 0xfc, - 0x4b, 0xe0, 0x11, 0x71, 0x0a, 0x46, 0xb5, 0x69, 0x06, 0x7b, 0x6f, 0xe1, 0x0f, 0xbd, 0x18, 0xf2, - 0x9f, 0xea, 0xf3, 0x90, 0x77, 0xdc, 0x46, 0x33, 0x24, 0x22, 0x19, 0xbe, 0xb5, 0xd0, 0x8e, 0x8e, - 0x75, 0xf3, 0xb0, 0xe6, 0x99, 0x76, 0xa0, 0x53, 0x70, 0xf5, 0x73, 0x30, 0x97, 0xe2, 0x60, 0xe3, - 0x38, 0xc1, 0x6b, 0x86, 0x64, 0xfa, 0xf5, 0xe0, 0x57, 0x97, 0x5b, 0xfc, 0xea, 0x4d, 0x8a, 0x40, - 0x7d, 0x0b, 0xa6, 0x04, 0x7a, 0xbf, 0x19, 0x21, 0x1e, 0xea, 0x0d, 0xb1, 0xca, 0x1b, 0xeb, 0x4d, - 0x81, 0x72, 0x03, 0xa6, 0x23, 0xce, 0x63, 0xbe, 0x71, 0x9c, 0x85, 0xde, 0x70, 0x4e, 0x0a, 0xe6, - 0x9b, 0xc1, 0x1e, 0x47, 0x3a, 0x07, 0x05, 0xc7, 0xc6, 0x91, 0x40, 0x78, 0x48, 0x3c, 0xdd, 0xa2, - 0x2e, 0xbe, 0xd5, 0xf3, 0x00, 0x7c, 0x57, 0xc5, 0xb1, 0x89, 0x53, 0x5b, 0xd4, 0x8b, 0xac, 0x64, - 0xcd, 0x56, 0x11, 0x94, 0x25, 0x55, 0x31, 0x7c, 0xd4, 0x0c, 0x90, 0xd1, 0xf0, 0x6a, 0x8e, 0x75, - 0x48, 0xfc, 0xd4, 0xd1, 0x5b, 0x9f, 0x68, 0xe3, 0x70, 0xbe, 0x2d, 0xd4, 0x49, 0xc7, 0x8d, 0xd6, - 0x49, 0x1b, 0x7d, 0xfa, 0x20, 0xad, 0x58, 0x7d, 0x0d, 0x4a, 0x3e, 0x0a, 0xfd, 0x43, 0x8e, 0xba, - 0x44, 0x46, 0x7b, 0xb1, 0x9d, 0x9c, 0x75, 0x0c, 0xcb, 0x30, 0x0e, 0xfb, 0xd1, 0x87, 0x7a, 0x11, - 0x46, 0x2c, 0xdf, 0x73, 0x0d, 0x1e, 0x22, 0x33, 0xa7, 0xb5, 0x84, 0x0b, 0x37, 0x58, 0x99, 0xfa, - 0x0c, 0xe4, 0xea, 0xa8, 0xee, 0x95, 0x47, 0x49, 0x27, 0xe7, 0xda, 0x75, 0x72, 0x1f, 0xd5, 0x3d, - 0x9d, 0x40, 0xaa, 0x0f, 0x60, 0x22, 0x40, 0xd8, 0x19, 0x36, 0xcc, 0x30, 0xf4, 0x9d, 0xad, 0x66, - 0x88, 0x82, 0xf2, 0x58, 0x9a, 0x2d, 0x8d, 0x9a, 0x6f, 0x90, 0x06, 0xb7, 0x05, 0xbc, 0x3e, 0x1e, - 0x24, 0x4a, 0xd4, 0xe7, 0x61, 0x70, 0x17, 0x99, 0x36, 0xf2, 0xcb, 0xe3, 0x04, 0x57, 0xa5, 0x1d, - 0xae, 0x55, 0x02, 0xa5, 0x33, 0x68, 0xec, 0xa4, 0x73, 0x99, 0x21, 0x73, 0x07, 0x07, 0x98, 0x5c, - 0x33, 0xcb, 0x13, 0xc4, 0x66, 0x4d, 0xb3, 0xea, 0x57, 0x71, 0xad, 0x50, 0x5b, 0xf5, 0x3e, 0x4c, - 0x58, 0x9e, 0x1b, 0x3a, 0x6e, 0x13, 0xd9, 0x06, 0xdb, 0x33, 0x29, 0xab, 0x69, 0x53, 0x8a, 0x55, - 0x72, 0xb7, 0xa1, 0xe9, 0x23, 0x7d, 0x5c, 0x34, 0x65, 0x25, 0xea, 0x43, 0x98, 0xa9, 0x99, 0x41, - 0xc8, 0x03, 0x78, 0x1a, 0xbb, 0x06, 0xcd, 0x5a, 0x58, 0x9e, 0xec, 0x71, 0x9a, 0x4e, 0xe1, 0xf6, - 0x2b, 0xa2, 0xb9, 0x4e, 0x5a, 0xc7, 0xa6, 0x55, 0x80, 0xcd, 0x9c, 0x61, 0xa3, 0x9a, 0x79, 0x58, - 0x9e, 0xea, 0x73, 0x5a, 0x11, 0x13, 0x79, 0x07, 0x37, 0xd5, 0x3e, 0x50, 0xa0, 0xd2, 0xce, 0x62, - 0xb2, 0xc5, 0x72, 0x1a, 0x06, 0xf1, 0x1c, 0x76, 0x6c, 0x66, 0x2f, 0xf3, 0x7e, 0xd3, 0x5d, 0xb3, - 0x55, 0x17, 0x26, 0x29, 0x8f, 0x63, 0xd3, 0x92, 0xad, 0x7f, 0x9f, 0xe9, 0xea, 0xba, 0xaf, 0x7b, - 0xb5, 0xda, 0xdb, 0xd2, 0x94, 0xa4, 0xa6, 0x8d, 0xbb, 0xef, 0x13, 0x04, 0xb5, 0x5c, 0xaf, 0x7d, - 0x39, 0x0b, 0xda, 0x5d, 0xd4, 0x4a, 0x27, 0xf3, 0xd2, 0x7a, 0x33, 0xf0, 0x77, 0xa1, 0x18, 0xa9, - 0x04, 0x25, 0xf5, 0x5a, 0x37, 0xdb, 0x1d, 0x71, 0x24, 0x6a, 0x8b, 0xdd, 0x96, 0xba, 0xf9, 0xd8, - 0xa9, 0x37, 0xeb, 0x46, 0x14, 0x6e, 0x65, 0x49, 0xb8, 0x35, 0xc6, 0x2a, 0xd6, 0x3b, 0x44, 0x5d, - 0xb9, 0xb4, 0xa8, 0xeb, 0x12, 0x8c, 0x1e, 0x98, 0x4e, 0x68, 0xb8, 0xe8, 0xc0, 0x40, 0xfb, 0xc8, - 0x0d, 0xd9, 0x42, 0x5b, 0xc2, 0xa5, 0x6f, 0xa0, 0x83, 0x57, 0x71, 0x99, 0xba, 0x0b, 0xb3, 0x3c, - 0x38, 0x26, 0x40, 0x2c, 0x3e, 0xa3, 0xcb, 0xd1, 0x20, 0xb1, 0x3c, 0x4f, 0xb7, 0xb1, 0x3c, 0x8c, - 0x55, 0x04, 0x0f, 0x0d, 0xc9, 0xc8, 0xba, 0xc4, 0x03, 0xf9, 0x44, 0x39, 0xb6, 0x19, 0xc1, 0x9e, - 0xd3, 0x10, 0xc1, 0x2e, 0x73, 0xb5, 0x4a, 0xb8, 0x90, 0x87, 0xb8, 0xda, 0x4f, 0x14, 0xb8, 0xd8, - 0x51, 0x2c, 0x4c, 0x8b, 0x96, 0x60, 0x88, 0xfb, 0xe4, 0x4a, 0xda, 0x24, 0x60, 0x95, 0x12, 0x99, - 0x3a, 0x6f, 0xa0, 0xde, 0x86, 0x61, 0xdf, 0x3c, 0x10, 0x3e, 0x7d, 0x86, 0xf8, 0xf4, 0x6d, 0x27, - 0x11, 0x0e, 0xb7, 0x97, 0x6b, 0xde, 0x96, 0x0e, 0xbe, 0x79, 0xc0, 0x70, 0xa5, 0xc9, 0x20, 0x9b, - 0x26, 0x83, 0x39, 0x28, 0xd0, 0xe1, 0x22, 0xba, 0xbd, 0x54, 0xd0, 0xc5, 0xb7, 0xf6, 0x8f, 0x0a, - 0x5c, 0xeb, 0x38, 0x54, 0xec, 0x4d, 0xa1, 0xff, 0xf9, 0x8a, 0xa8, 0x7d, 0x49, 0x81, 0xeb, 0xbd, - 0x0c, 0xf4, 0x04, 0x44, 0xdb, 0xa3, 0x5c, 0xb4, 0xef, 0x64, 0xe0, 0x5c, 0x1b, 0x93, 0xd1, 0x0b, - 0xbb, 0xe3, 0xce, 0x56, 0xe6, 0x68, 0xce, 0x96, 0xec, 0x2d, 0x64, 0x13, 0xde, 0xc2, 0x55, 0x18, - 0xa3, 0xb1, 0x8b, 0x61, 0xed, 0x22, 0x6b, 0x2f, 0x68, 0xd6, 0x09, 0x6b, 0x8b, 0xfa, 0x28, 0x2d, - 0x5e, 0x61, 0xa5, 0xea, 0x23, 0x38, 0x8b, 0xad, 0x61, 0xe4, 0x7b, 0x1b, 0x96, 0xd9, 0x30, 0xc9, - 0xbe, 0x91, 0x83, 0x02, 0xe6, 0xc7, 0xdd, 0xec, 0xa4, 0x0a, 0xc2, 0x3d, 0x5f, 0x91, 0x1a, 0xea, - 0xb3, 0x07, 0xed, 0xaa, 0xb4, 0x6f, 0x15, 0xe0, 0x7c, 0x47, 0x73, 0x8b, 0x7d, 0x1d, 0xea, 0x53, - 0x11, 0x01, 0x28, 0x44, 0x00, 0x64, 0xe0, 0x74, 0x52, 0x7c, 0x16, 0xd4, 0x56, 0x6f, 0xb1, 0x7f, - 0xad, 0x9d, 0x68, 0xf1, 0x17, 0x4f, 0xd2, 0x9f, 0x7e, 0x11, 0x66, 0x1b, 0x3e, 0xda, 0x77, 0xbc, - 0x66, 0x40, 0x17, 0x47, 0x64, 0x33, 0x03, 0xe9, 0xd8, 0x2c, 0xbe, 0x99, 0xe1, 0x00, 0x1b, 0xb4, - 0x9e, 0x18, 0xbc, 0x35, 0x5b, 0x5d, 0x84, 0xf1, 0x96, 0x16, 0x79, 0xd2, 0x62, 0x34, 0x88, 0x43, - 0x96, 0x61, 0xc8, 0x0c, 0x31, 0x6d, 0xd4, 0x49, 0xce, 0xeb, 0xfc, 0x53, 0xfd, 0x04, 0xa8, 0x5b, - 0xa6, 0xb5, 0x57, 0xf3, 0x76, 0x0c, 0xcb, 0x6b, 0xba, 0xa1, 0xb1, 0xeb, 0xb8, 0xd4, 0xe1, 0xcd, - 0xea, 0xe3, 0xac, 0x66, 0x05, 0x57, 0xac, 0x3a, 0x6e, 0x28, 0x4f, 0x99, 0xc2, 0x09, 0x4c, 0x99, - 0x62, 0x9a, 0x29, 0x5b, 0x82, 0x3c, 0x39, 0x60, 0x21, 0xbe, 0x6b, 0x0b, 0x4f, 0xe9, 0xd1, 0x98, - 0xc4, 0xd2, 0xb7, 0x70, 0x81, 0x4e, 0x9b, 0xa8, 0x3b, 0x70, 0x3e, 0x2d, 0x3e, 0x88, 0xa6, 0xd0, - 0x70, 0x1f, 0x53, 0x68, 0xae, 0x35, 0x4e, 0x10, 0x73, 0xea, 0x2e, 0x8c, 0x72, 0x97, 0xd4, 0x26, - 0x2e, 0x3d, 0xf3, 0x70, 0xe7, 0x5a, 0x9c, 0x99, 0x4d, 0x7e, 0x88, 0xb1, 0x9c, 0xfb, 0x12, 0xf6, - 0x66, 0x46, 0x44, 0x3b, 0x5c, 0xa3, 0xae, 0x40, 0x89, 0xcb, 0x90, 0xa0, 0x19, 0xe9, 0x11, 0xcd, - 0x30, 0x6b, 0x45, 0x90, 0x20, 0x18, 0xc2, 0xe3, 0xc7, 0x13, 0x71, 0x94, 0x2c, 0x32, 0xf7, 0x8e, - 0xe7, 0xc7, 0x54, 0xdf, 0xa2, 0xd8, 0xe8, 0x7e, 0x3c, 0xc7, 0xad, 0xbe, 0x04, 0x05, 0x76, 0x06, - 0x84, 0x9d, 0x65, 0xdc, 0xcf, 0x85, 0x78, 0x3f, 0xfc, 0xa4, 0x8d, 0x7a, 0xdb, 0x04, 0x52, 0x17, - 0x4d, 0xe6, 0xde, 0x83, 0x92, 0x8c, 0x37, 0x65, 0x57, 0x79, 0x49, 0xde, 0x55, 0xee, 0x59, 0xf4, - 0xd1, 0xde, 0xf3, 0x7f, 0x0e, 0xc1, 0x45, 0x3a, 0x06, 0x5b, 0x1e, 0x1b, 0x73, 0x48, 0x91, 0xcd, - 0x8d, 0x6e, 0x17, 0xbb, 0xf1, 0x69, 0x28, 0xb0, 0x73, 0xca, 0xa0, 0xfd, 0xa2, 0x6d, 0xba, 0x36, - 0x39, 0x52, 0xa1, 0x3f, 0x75, 0xd1, 0xa2, 0xa3, 0xb9, 0x35, 0x61, 0x22, 0x08, 0x1d, 0x6b, 0xef, - 0x50, 0x8e, 0x3b, 0x68, 0x0c, 0xfd, 0xbf, 0x3a, 0xe8, 0xe4, 0x06, 0x69, 0x23, 0xd4, 0x30, 0x16, - 0x83, 0x90, 0x2a, 0x29, 0x06, 0x79, 0x01, 0xca, 0x3e, 0x0a, 0x9b, 0xbe, 0x4b, 0xfc, 0xb1, 0xb8, - 0x93, 0x9b, 0xe7, 0xc1, 0x04, 0xae, 0x7f, 0x03, 0x1d, 0xc8, 0x4c, 0x52, 0x97, 0xa1, 0xb2, 0xed, - 0xf9, 0x16, 0x32, 0x2c, 0x1f, 0x99, 0x21, 0x4a, 0x69, 0x3e, 0x48, 0x9a, 0xcf, 0x11, 0xa8, 0x15, - 0x02, 0x94, 0xc4, 0x91, 0xb2, 0x9c, 0x0c, 0xa5, 0x2e, 0x27, 0xff, 0x07, 0x46, 0x88, 0x38, 0x59, - 0x80, 0x11, 0x94, 0x0b, 0x84, 0xcf, 0x0f, 0x7b, 0x38, 0x3f, 0xea, 0x2a, 0xde, 0x2a, 0xd5, 0x0b, - 0x8a, 0x98, 0xaa, 0x70, 0xe9, 0x91, 0x54, 0x14, 0x5f, 0x73, 0x8b, 0xc9, 0x35, 0xf7, 0x5d, 0x1a, - 0xad, 0x48, 0x2b, 0x1d, 0x99, 0x77, 0xcc, 0x1c, 0x5d, 0xef, 0x69, 0x89, 0xdb, 0xc0, 0x2d, 0x68, - 0xe0, 0x12, 0x2f, 0x8b, 0xcd, 0xa1, 0xe1, 0xbe, 0xe7, 0x90, 0xfa, 0x00, 0x4a, 0x81, 0xbd, 0x27, - 0x4e, 0x56, 0x99, 0xd5, 0xb9, 0x15, 0x47, 0x11, 0xd8, 0x7b, 0xb1, 0x45, 0x47, 0xe6, 0xd1, 0x7d, - 0xd6, 0x52, 0x1f, 0x0e, 0xec, 0x3d, 0xfe, 0x81, 0xe3, 0xe1, 0x3a, 0x0a, 0x91, 0xef, 0xb8, 0x3b, - 0x11, 0xee, 0x91, 0xce, 0xf1, 0xf0, 0x7d, 0xd6, 0x40, 0x60, 0x1c, 0xaf, 0x27, 0x4a, 0xe6, 0xf6, - 0x60, 0xa2, 0x45, 0x16, 0x29, 0xd3, 0xfe, 0x95, 0xf8, 0xb4, 0xbf, 0xde, 0xd3, 0xb4, 0x27, 0x28, - 0xe5, 0xc9, 0xff, 0x41, 0x06, 0x2e, 0x75, 0xd6, 0x0e, 0xe6, 0x35, 0x58, 0xf2, 0xe2, 0x8d, 0xf5, - 0x5a, 0x39, 0x91, 0xd8, 0xaf, 0x24, 0xef, 0xd4, 0xa8, 0x08, 0x46, 0xc9, 0x61, 0x29, 0xd9, 0x17, - 0x34, 0x83, 0x3d, 0x6e, 0x49, 0x7a, 0xeb, 0xe5, 0x36, 0x6b, 0xda, 0xda, 0xcb, 0x88, 0x29, 0x55, - 0x05, 0xea, 0xb3, 0x30, 0x43, 0x32, 0x41, 0x8c, 0x78, 0x6c, 0xe5, 0xd8, 0xc4, 0xf4, 0x64, 0xf5, - 0x49, 0x52, 0x2b, 0x07, 0x50, 0x6b, 0xb6, 0xf6, 0xe7, 0x59, 0x58, 0x48, 0xe1, 0xd4, 0x6b, 0x24, - 0x87, 0xa5, 0x47, 0x1b, 0x79, 0x07, 0xf2, 0x24, 0xd9, 0x85, 0xc8, 0x6c, 0x34, 0x79, 0x3a, 0xd0, - 0xb2, 0x69, 0x14, 0xe1, 0x5f, 0xc1, 0xad, 0x74, 0xda, 0x18, 0xfb, 0x13, 0x7c, 0xdb, 0x22, 0xdb, - 0xe3, 0xb6, 0x05, 0x6f, 0x10, 0xb3, 0xb3, 0xb9, 0xee, 0x6e, 0x6d, 0x3e, 0xd5, 0x0e, 0xc5, 0x4c, - 0xc1, 0x60, 0xd2, 0x14, 0xc8, 0x93, 0x75, 0xa8, 0xff, 0xc9, 0xfa, 0x16, 0x8c, 0xc6, 0x2d, 0x09, - 0x73, 0x9a, 0xfa, 0xb1, 0x21, 0x23, 0x31, 0x1b, 0xa2, 0x5d, 0x84, 0x0b, 0x1d, 0x24, 0xc7, 0xb6, - 0x93, 0xff, 0x95, 0x05, 0x1d, 0x29, 0x5a, 0x74, 0x2a, 0x82, 0x8e, 0x77, 0x61, 0x32, 0xea, 0x20, - 0xb2, 0x37, 0x74, 0x1d, 0xfc, 0x44, 0x2f, 0x3d, 0x09, 0x9b, 0x33, 0x11, 0x26, 0x8b, 0x3e, 0x8a, - 0x48, 0xe5, 0xcb, 0x45, 0x1a, 0xa9, 0xb4, 0x9d, 0xb6, 0xdd, 0x66, 0xd3, 0xd3, 0x52, 0xa4, 0x12, - 0x49, 0x86, 0xee, 0xe3, 0x8b, 0xf0, 0x23, 0x3a, 0x31, 0x3c, 0xc1, 0xf0, 0x23, 0x3d, 0x46, 0xca, - 0x9d, 0x4c, 0x8c, 0x14, 0x59, 0x40, 0x4c, 0x64, 0xbe, 0x33, 0x91, 0x82, 0x79, 0x84, 0x48, 0x53, - 0xfa, 0x52, 0xe7, 0x61, 0x58, 0xa0, 0x72, 0x6c, 0x36, 0x4f, 0x81, 0x17, 0xad, 0xd9, 0xd2, 0xc6, - 0xeb, 0x50, 0x9f, 0x1b, 0xaf, 0xec, 0x1c, 0xa2, 0xd0, 0xdf, 0x39, 0xc4, 0x7d, 0x98, 0xd8, 0x45, - 0xa6, 0x1f, 0x6e, 0x21, 0x33, 0x34, 0x6c, 0x14, 0x9a, 0x4e, 0x2d, 0x20, 0x9e, 0x44, 0x2f, 0x38, - 0xc6, 0x45, 0xd3, 0x3b, 0xb4, 0x65, 0x4a, 0x34, 0x01, 0x47, 0x8b, 0x26, 0x10, 0x54, 0xac, 0xa6, - 0xef, 0xe3, 0x25, 0x80, 0x05, 0x78, 0x46, 0x02, 0xf1, 0x70, 0x8f, 0x88, 0xcf, 0x32, 0x3c, 0xb7, - 0x29, 0x9a, 0x8d, 0x8e, 0x41, 0x4b, 0xe9, 0x28, 0x41, 0x8b, 0x14, 0x93, 0x8e, 0xc4, 0x63, 0xd2, - 0x77, 0x60, 0x96, 0x53, 0x6d, 0x84, 0x9e, 0x61, 0xd5, 0xbc, 0x00, 0x89, 0x73, 0x93, 0xd1, 0xde, - 0x36, 0x8d, 0x67, 0x38, 0x86, 0x4d, 0x6f, 0x05, 0xb7, 0xe7, 0x47, 0x27, 0x9b, 0x30, 0x43, 0xb7, - 0xa0, 0x5b, 0x10, 0x8f, 0xf5, 0x78, 0x20, 0x43, 0x9a, 0x27, 0xb0, 0xbe, 0x2e, 0xeb, 0x03, 0x47, - 0x38, 0xde, 0x1b, 0xc2, 0x48, 0x1d, 0x38, 0xb6, 0xe4, 0xe1, 0xc9, 0xc4, 0xd1, 0x0e, 0x4f, 0xb4, - 0xef, 0x28, 0xa0, 0xe9, 0xc8, 0xf2, 0x7c, 0x5b, 0x36, 0x4c, 0xab, 0xbc, 0xbb, 0x1e, 0x57, 0xfa, - 0x25, 0x18, 0xe2, 0x1a, 0x9e, 0xe9, 0x51, 0xc3, 0x79, 0x83, 0x8e, 0xab, 0x40, 0x6c, 0x11, 0xca, - 0x25, 0x16, 0x21, 0x6d, 0x1d, 0x47, 0x72, 0x1d, 0x48, 0x67, 0x76, 0xf5, 0x1a, 0x8c, 0x5b, 0xa6, - 0x6b, 0xa1, 0x9a, 0xc1, 0x4e, 0x48, 0x10, 0xdd, 0xee, 0x2f, 0xe8, 0x63, 0xb4, 0x5c, 0xe7, 0xc5, - 0xda, 0xbf, 0x2b, 0x70, 0xa5, 0x03, 0xca, 0xe5, 0xc3, 0x35, 0xfb, 0x84, 0x4e, 0x5b, 0xa3, 0x93, - 0x87, 0xac, 0x7c, 0xf2, 0x90, 0xb0, 0x62, 0xb9, 0x16, 0x2b, 0x26, 0x71, 0x3a, 0x7f, 0x1c, 0x4e, - 0x0f, 0xc6, 0x39, 0xad, 0x6d, 0xc2, 0xd5, 0xae, 0x03, 0xef, 0x9f, 0x9f, 0xbf, 0x95, 0x11, 0xc1, - 0xb6, 0x8c, 0xb7, 0xdf, 0x60, 0xfb, 0x93, 0x30, 0xc8, 0x0e, 0x99, 0x7a, 0xd5, 0x2e, 0x06, 0x7f, - 0x74, 0xe5, 0x4a, 0x71, 0xcc, 0xf2, 0xc7, 0x75, 0xcc, 0xae, 0x88, 0xe0, 0xa3, 0x0d, 0x33, 0x98, - 0x6f, 0xf6, 0x73, 0x05, 0x0b, 0xa3, 0x3d, 0xe0, 0x29, 0x50, 0xc3, 0x48, 0x22, 0xf9, 0x63, 0x48, - 0x24, 0xa9, 0x84, 0xd7, 0x61, 0xb1, 0xfb, 0xb8, 0x19, 0x93, 0x9e, 0x64, 0x44, 0x80, 0x22, 0x03, - 0xf7, 0x15, 0xa0, 0x48, 0xa1, 0x45, 0xe6, 0x38, 0xa1, 0x45, 0x7f, 0x9a, 0xc5, 0x8f, 0x50, 0x5b, - 0xbd, 0x83, 0x7c, 0x3f, 0x47, 0xa8, 0xab, 0x49, 0x0f, 0xa1, 0x55, 0x63, 0x07, 0x8f, 0xab, 0xb1, - 0xa6, 0x08, 0x25, 0xd2, 0x78, 0xcc, 0xec, 0xc1, 0xa7, 0xa1, 0xc0, 0x98, 0xc2, 0xb3, 0x1b, 0xbb, - 0xb3, 0x51, 0xb4, 0xd0, 0xbe, 0x97, 0x49, 0x9d, 0x15, 0xb4, 0x8f, 0xd3, 0x61, 0x70, 0xb9, 0x8e, - 0xe4, 0x8f, 0xa3, 0x23, 0x09, 0x5d, 0xef, 0xa0, 0x05, 0x43, 0xc7, 0xd1, 0x02, 0x0d, 0xc1, 0xe5, - 0x2e, 0xec, 0x3c, 0x11, 0xb1, 0xfd, 0x76, 0x06, 0xfb, 0x0d, 0xad, 0x73, 0x95, 0x2c, 0x00, 0xfd, - 0x4c, 0xc0, 0x5f, 0xbc, 0xdf, 0xf0, 0x61, 0x98, 0xf6, 0xcb, 0xe9, 0xeb, 0x9c, 0xe0, 0x06, 0x33, - 0x5a, 0xd4, 0xbf, 0x68, 0x0b, 0xf7, 0x4b, 0xec, 0x5f, 0x5c, 0x4b, 0x5f, 0xd2, 0x62, 0x03, 0x67, - 0x4c, 0xfa, 0x20, 0x83, 0x55, 0x98, 0x70, 0x81, 0xd6, 0x1f, 0x31, 0xe3, 0xed, 0xc3, 0x3b, 0xda, - 0xeb, 0xa4, 0x7a, 0xf1, 0xdc, 0xaa, 0x5c, 0x32, 0xb7, 0xea, 0x59, 0x98, 0xd9, 0x76, 0xfc, 0x20, - 0x94, 0xef, 0x7e, 0x50, 0x39, 0xd1, 0xcd, 0xa7, 0x49, 0x52, 0x1b, 0xf5, 0x4e, 0xa4, 0x36, 0x83, - 0x57, 0x5b, 0x33, 0x60, 0x16, 0xbd, 0xa8, 0xb3, 0x2f, 0x6d, 0x11, 0x6b, 0x53, 0x67, 0x46, 0x31, - 0x9e, 0xfe, 0x45, 0x16, 0x2a, 0x1b, 0xce, 0x8e, 0x6b, 0x9e, 0x3e, 0x66, 0xce, 0xc3, 0x70, 0x40, - 0x28, 0xa3, 0xb7, 0x48, 0xd8, 0x8d, 0x19, 0x5a, 0x44, 0xae, 0x8f, 0x88, 0x00, 0x3c, 0xd7, 0x5f, - 0x00, 0x2e, 0x4b, 0x29, 0xdf, 0x51, 0x4a, 0x83, 0x49, 0x29, 0x95, 0x61, 0xc8, 0xf2, 0xdc, 0xd0, - 0xf7, 0x6a, 0xec, 0x6c, 0x82, 0x7f, 0x4a, 0xbb, 0x08, 0x85, 0xbe, 0x76, 0x11, 0x5e, 0x82, 0xb3, - 0x24, 0xe1, 0x64, 0x07, 0xb9, 0xc8, 0x37, 0x43, 0x94, 0x38, 0x36, 0xa1, 0x77, 0x51, 0xca, 0x18, - 0xe4, 0x2e, 0x83, 0x88, 0x65, 0x08, 0x5d, 0x80, 0xf9, 0xb6, 0xe2, 0x63, 0x22, 0xfe, 0x2a, 0xc0, - 0x55, 0x06, 0xe3, 0x84, 0xbb, 0x1f, 0xa7, 0x8a, 0x7e, 0x9c, 0x2a, 0xfa, 0xcb, 0x93, 0x2a, 0x9a, - 0xb0, 0x11, 0xa5, 0x16, 0x1b, 0xb1, 0x02, 0x25, 0x06, 0x40, 0x15, 0x61, 0xa4, 0x47, 0x45, 0x60, - 0x68, 0xd7, 0x88, 0x3a, 0x48, 0xb3, 0x7e, 0x34, 0x3e, 0xeb, 0x93, 0xbb, 0x2d, 0x63, 0x27, 0x95, - 0xaa, 0x3a, 0xde, 0x21, 0x55, 0x75, 0xe2, 0x78, 0xa9, 0xaa, 0xea, 0x09, 0xa6, 0xaa, 0x4e, 0xf6, - 0x65, 0xeb, 0x4e, 0x3e, 0x97, 0xb3, 0x9b, 0xf9, 0x9c, 0xee, 0x62, 0x3e, 0x6f, 0xc3, 0x62, 0x77, - 0xd3, 0xd8, 0x31, 0x27, 0x54, 0xfb, 0xa7, 0x0c, 0x9c, 0xd7, 0x51, 0x80, 0xc2, 0x53, 0xb7, 0x80, - 0x46, 0xde, 0x41, 0x56, 0xf6, 0x0e, 0xd4, 0x57, 0xa4, 0x44, 0x17, 0x62, 0x2b, 0xb6, 0x1d, 0xd7, - 0x09, 0x76, 0x93, 0x99, 0x43, 0xb3, 0xb2, 0x49, 0x78, 0x8d, 0x80, 0xf0, 0x94, 0xa0, 0xf8, 0xe4, - 0xcf, 0x27, 0x27, 0xff, 0x03, 0x50, 0xe9, 0xc1, 0xa2, 0x8f, 0xcc, 0x46, 0xa3, 0x76, 0x28, 0xe7, - 0x69, 0x5e, 0x6d, 0x33, 0xed, 0x09, 0x0b, 0x75, 0x0a, 0x4f, 0x96, 0x83, 0x71, 0x3f, 0x51, 0xa2, - 0xbd, 0x00, 0x95, 0x76, 0x8c, 0xee, 0x2c, 0xa2, 0xbf, 0xcc, 0xc0, 0x85, 0x4d, 0xe4, 0xd7, 0x1d, - 0x57, 0x92, 0xff, 0xa9, 0x17, 0x93, 0xe4, 0x71, 0xe7, 0x8e, 0xe3, 0x71, 0x27, 0x5d, 0x9c, 0xf6, - 0x9e, 0xe6, 0x60, 0x5b, 0x4f, 0x53, 0xbb, 0x04, 0x5a, 0x27, 0x0e, 0x32, 0x57, 0xe3, 0x2b, 0x0a, - 0x54, 0xee, 0x90, 0x0b, 0x4c, 0xa7, 0x8d, 0xcb, 0xd8, 0x51, 0x6a, 0x4b, 0x19, 0xa3, 0xfe, 0x8f, - 0xb2, 0x70, 0xe1, 0x75, 0x27, 0x08, 0xdf, 0x6c, 0x20, 0xb7, 0x05, 0x2a, 0xe8, 0x6d, 0x00, 0xa9, - 0xa9, 0xa9, 0x99, 0x9e, 0x53, 0x53, 0x53, 0xf3, 0x73, 0x37, 0x60, 0x82, 0x1d, 0x3b, 0x38, 0x75, - 0x71, 0x35, 0x95, 0xaa, 0x44, 0x62, 0x36, 0xd1, 0x3a, 0x9a, 0xf5, 0x63, 0xfa, 0xe4, 0x4c, 0x80, - 0xdf, 0x4b, 0x0d, 0xe2, 0x05, 0xea, 0xbb, 0x30, 0x1e, 0xc9, 0x9f, 0xe1, 0xa4, 0x5e, 0xd2, 0x8d, - 0x76, 0x38, 0x5b, 0x78, 0x42, 0x51, 0xad, 0x0e, 0xe8, 0x63, 0x28, 0x5e, 0xa4, 0xde, 0x87, 0x61, - 0x3c, 0xe7, 0x39, 0xe2, 0xd4, 0xfd, 0xa6, 0x56, 0xc4, 0x78, 0x96, 0x0b, 0x9c, 0x10, 0x8a, 0xaf, - 0xe5, 0x22, 0x0c, 0x51, 0xe8, 0x40, 0xfb, 0x7d, 0x05, 0xb4, 0x4e, 0x42, 0x12, 0xb7, 0xdd, 0x40, - 0xd0, 0xc4, 0xb7, 0x31, 0x6e, 0xa4, 0xa7, 0x4f, 0xa4, 0x0e, 0x8d, 0x5e, 0x43, 0x8c, 0x50, 0xf4, - 0x7c, 0xa1, 0xf6, 0x67, 0x59, 0xb8, 0x88, 0xe9, 0x23, 0x47, 0x3c, 0xf6, 0xc7, 0x6a, 0x74, 0x8a, - 0xd4, 0x48, 0xbd, 0x07, 0x23, 0x01, 0xb9, 0xef, 0xca, 0x11, 0x0e, 0xa5, 0x85, 0x15, 0xb1, 0xd1, - 0x87, 0xcd, 0x40, 0xa0, 0x2a, 0x05, 0xd2, 0xb7, 0xac, 0x93, 0x7f, 0xa0, 0xc0, 0xa5, 0xce, 0x32, - 0xff, 0xa8, 0xb5, 0xf2, 0x2b, 0x0a, 0x9c, 0xc7, 0x14, 0x1e, 0x55, 0x1f, 0x63, 0x37, 0xec, 0x33, - 0xdd, 0x6f, 0xd8, 0xa7, 0x2a, 0xe0, 0x14, 0x4f, 0xce, 0xa5, 0x9b, 0x1f, 0xf4, 0x43, 0xfb, 0xaa, - 0x02, 0x95, 0x76, 0xa4, 0x7d, 0xd4, 0x6c, 0xfb, 0x86, 0x02, 0x97, 0x31, 0x6d, 0xb7, 0xd9, 0x75, - 0x88, 0x53, 0xc8, 0xbe, 0x3f, 0x54, 0xe0, 0x4a, 0x37, 0x12, 0x4f, 0x83, 0xf6, 0x6d, 0x58, 0xa6, - 0x7b, 0x4a, 0xb5, 0xaf, 0x1d, 0x69, 0x1f, 0x35, 0xdb, 0x36, 0xa1, 0x42, 0xb2, 0xe7, 0x8f, 0xca, - 0x36, 0x31, 0xe2, 0x8c, 0x3c, 0xe2, 0x17, 0x60, 0xbe, 0x2d, 0x56, 0x36, 0xe2, 0x29, 0xc8, 0x93, - 0x7c, 0x7e, 0x82, 0x32, 0xab, 0xd3, 0x0f, 0xed, 0x1c, 0xcc, 0xdd, 0x45, 0x61, 0x4b, 0x78, 0x48, - 0x49, 0xd1, 0xfe, 0x4e, 0x81, 0xb3, 0xa9, 0xd5, 0xe2, 0xb1, 0x8b, 0xdc, 0x1e, 0x3a, 0xe4, 0xfc, - 0x7b, 0xad, 0x6b, 0x26, 0x63, 0x07, 0x5c, 0xd5, 0x7b, 0xe8, 0x90, 0xe5, 0xe6, 0x12, 0x9c, 0x73, - 0xef, 0x41, 0x51, 0x14, 0xa5, 0xa4, 0x88, 0xbe, 0x24, 0xa7, 0x88, 0xb6, 0x8f, 0x40, 0xd6, 0x5c, - 0x1b, 0x3d, 0x46, 0xf6, 0x43, 0x0c, 0x4a, 0x22, 0x10, 0x29, 0x3f, 0xf4, 0xeb, 0xd1, 0xa1, 0x22, - 0xc9, 0x20, 0x3d, 0xca, 0x61, 0xf5, 0x7d, 0x18, 0xe5, 0xaf, 0x1b, 0xd9, 0x34, 0x22, 0xa2, 0xf4, - 0x5c, 0x69, 0x43, 0x8f, 0x94, 0xaa, 0x4a, 0xc8, 0x19, 0x11, 0xad, 0xd9, 0x06, 0x59, 0x49, 0xce, - 0x82, 0x4e, 0xcf, 0x81, 0x4c, 0xdb, 0xd9, 0x90, 0xd2, 0x99, 0xd5, 0x8b, 0x30, 0x82, 0x7c, 0xdf, - 0xf3, 0x0d, 0x96, 0x77, 0xc8, 0x26, 0x47, 0x89, 0x14, 0xb2, 0xa4, 0xc4, 0xce, 0x79, 0x8e, 0xff, - 0x3b, 0x57, 0xc8, 0x8f, 0x0f, 0x4a, 0xb9, 0x85, 0x69, 0xfc, 0x61, 0x0e, 0xf6, 0xaf, 0x2a, 0x70, - 0x96, 0x44, 0x70, 0x34, 0x27, 0xbd, 0xcf, 0xd4, 0xc2, 0x93, 0xba, 0x3e, 0xa6, 0x55, 0xe0, 0x5c, - 0x3a, 0x15, 0x8c, 0xcc, 0xaf, 0x65, 0x60, 0x8a, 0x8c, 0x82, 0x63, 0xf9, 0x05, 0x5f, 0x6f, 0x13, - 0x97, 0x58, 0xb2, 0xfd, 0x5f, 0x62, 0x31, 0x61, 0x86, 0x6b, 0xc5, 0xfb, 0xc8, 0x0a, 0x0d, 0xcb, - 0x73, 0x6d, 0x47, 0xa4, 0xe5, 0x8d, 0x26, 0x5f, 0x05, 0x49, 0x2a, 0x1b, 0x6e, 0xb3, 0xc2, 0x9b, - 0xe8, 0x53, 0x8f, 0x52, 0x4a, 0xb5, 0x3f, 0x51, 0x60, 0x3a, 0xc1, 0x1e, 0x36, 0xc7, 0x93, 0x2a, - 0xa9, 0x1c, 0x45, 0x25, 0xef, 0xc1, 0xa8, 0x3c, 0x02, 0x64, 0x77, 0xb9, 0xd0, 0x21, 0x51, 0x8e, - 0x6c, 0x7d, 0xe4, 0x91, 0xfc, 0xa9, 0xfd, 0x86, 0x02, 0x0b, 0xfc, 0x51, 0x9a, 0x23, 0x86, 0xa4, - 0x27, 0xa6, 0x76, 0xbf, 0x93, 0x83, 0x0b, 0x1d, 0x68, 0x61, 0x3c, 0x8c, 0x39, 0xd4, 0xec, 0x3d, - 0x0c, 0x25, 0x2d, 0xed, 0xb4, 0xe3, 0x9a, 0xc3, 0x5e, 0xc2, 0x88, 0x1c, 0x6a, 0xf6, 0x0e, 0xc6, - 0x0e, 0x9c, 0x49, 0xd9, 0xd8, 0x96, 0x5e, 0x6a, 0xe9, 0x7b, 0x61, 0x9b, 0x3e, 0x48, 0x2b, 0x56, - 0xdf, 0x05, 0xb5, 0x81, 0x5c, 0xdb, 0x71, 0x77, 0x0c, 0x76, 0x88, 0xe8, 0xa0, 0xa0, 0x9c, 0x25, - 0xc6, 0xff, 0xe9, 0xf6, 0x7d, 0xac, 0xd3, 0x36, 0xfc, 0x28, 0x90, 0xf4, 0x30, 0xd1, 0x88, 0x15, - 0x3a, 0x28, 0x50, 0x3f, 0x0f, 0xe3, 0x1c, 0xbb, 0xb5, 0xeb, 0xd4, 0x6c, 0x9f, 0xdc, 0xea, 0xc4, - 0xb8, 0x9f, 0xed, 0x8a, 0x7b, 0x05, 0x37, 0x88, 0x8f, 0x61, 0xac, 0x21, 0x55, 0xf9, 0xc8, 0x55, - 0x11, 0x4c, 0x73, 0xfc, 0xad, 0x97, 0x60, 0x3a, 0x4a, 0x82, 0x75, 0x22, 0x6f, 0x32, 0x92, 0x2e, - 0x26, 0x1b, 0xad, 0x15, 0xda, 0x17, 0x33, 0xd1, 0xeb, 0x4b, 0x1f, 0x45, 0x9e, 0xf5, 0xeb, 0x30, - 0x26, 0xe5, 0x52, 0x8b, 0x03, 0x9b, 0xf6, 0xef, 0x86, 0x09, 0x2c, 0x74, 0x41, 0x0a, 0xe5, 0x4f, - 0xf5, 0x45, 0x98, 0x75, 0x5c, 0xab, 0xd6, 0xb4, 0x91, 0x74, 0x69, 0xce, 0xa0, 0x31, 0x13, 0xbb, - 0x57, 0x3c, 0xc3, 0x00, 0x04, 0x1e, 0x1a, 0x61, 0x69, 0x7f, 0xa6, 0x44, 0x6f, 0x47, 0xb5, 0xe6, - 0x3f, 0xbf, 0x0c, 0x43, 0x0d, 0xaf, 0x56, 0x43, 0x3e, 0xf7, 0x1e, 0x2e, 0x77, 0x18, 0xe8, 0x3a, - 0x81, 0x24, 0x3c, 0xe7, 0xad, 0xd4, 0x87, 0x30, 0xd1, 0x4a, 0x51, 0xea, 0x7d, 0x91, 0x74, 0x9e, - 0xf1, 0x47, 0x92, 0xc2, 0x04, 0xd9, 0x67, 0x60, 0xfa, 0x2e, 0x0a, 0xd9, 0x8b, 0x76, 0xa4, 0x4b, - 0xe6, 0x0c, 0xfd, 0x6e, 0x0e, 0x66, 0x92, 0x35, 0x6c, 0x30, 0x5f, 0x80, 0x89, 0xa0, 0xd9, 0x68, - 0x78, 0x24, 0x01, 0xd6, 0xaa, 0x39, 0xc8, 0x0d, 0xf9, 0xb0, 0xee, 0xf7, 0xe2, 0x14, 0xa5, 0xe0, - 0xac, 0x6e, 0x70, 0x84, 0x2b, 0x14, 0x1f, 0xf5, 0x8d, 0xc6, 0x83, 0x44, 0x31, 0x7d, 0x6f, 0xc6, - 0x97, 0xdf, 0xc0, 0x11, 0xef, 0xcd, 0xf8, 0xd2, 0x0b, 0x38, 0xe7, 0x01, 0xf8, 0xfb, 0x7d, 0x22, - 0x4d, 0xa0, 0xc8, 0x4a, 0xd6, 0x6c, 0xf5, 0x2e, 0x94, 0x78, 0x72, 0x3c, 0x31, 0x1c, 0xa9, 0x67, - 0x73, 0x0c, 0x02, 0x13, 0xcd, 0xd0, 0x12, 0x8a, 0x87, 0xf7, 0xa3, 0x0f, 0xf5, 0x02, 0x94, 0x62, - 0xef, 0x04, 0xd2, 0x8d, 0xca, 0x61, 0x4b, 0x7a, 0x21, 0xb0, 0x0a, 0x93, 0xfc, 0x72, 0x4a, 0xb0, - 0x6b, 0xfa, 0x36, 0xbd, 0x67, 0xca, 0xee, 0xa1, 0x4e, 0xb0, 0xaa, 0x0d, 0x5c, 0x43, 0x7c, 0x5a, - 0xf2, 0x5a, 0x12, 0xee, 0x21, 0x08, 0x91, 0x6b, 0x61, 0x51, 0x7b, 0x3e, 0x62, 0x27, 0xb5, 0xe3, - 0x52, 0xc5, 0x06, 0x2e, 0x57, 0xaf, 0xc1, 0xb8, 0xf4, 0xbc, 0x1d, 0x85, 0xa5, 0xef, 0x04, 0x8e, - 0x45, 0xe5, 0x04, 0x74, 0x6e, 0x05, 0xa6, 0x53, 0x99, 0xdc, 0xd7, 0xeb, 0x76, 0x33, 0x30, 0x85, - 0xbd, 0xda, 0xc3, 0x20, 0x44, 0x75, 0x59, 0x5b, 0xbe, 0x91, 0x27, 0x7a, 0x24, 0x57, 0x30, 0x65, - 0x69, 0x15, 0x98, 0x92, 0x26, 0xb0, 0x1d, 0x28, 0xc5, 0xae, 0x29, 0x50, 0xd5, 0x5e, 0xe9, 0xc9, - 0xc7, 0x6e, 0xe9, 0xb4, 0x1a, 0xbb, 0xb8, 0x10, 0x43, 0x3c, 0xf7, 0xf3, 0x2c, 0x94, 0xe4, 0x6a, - 0xf5, 0x39, 0x38, 0xc3, 0x8e, 0xd7, 0x4c, 0xd7, 0x36, 0xe8, 0xba, 0xcd, 0x8e, 0x86, 0x68, 0xe6, - 0xe7, 0x14, 0xad, 0xbe, 0xcd, 0xfc, 0x42, 0x7a, 0x20, 0xa4, 0xde, 0x81, 0x8a, 0xe3, 0x86, 0xc8, - 0xc7, 0x0d, 0xa9, 0xff, 0x69, 0x3b, 0xdb, 0xdb, 0xc8, 0x47, 0x6e, 0xe8, 0x98, 0x62, 0xa1, 0x2d, - 0xe8, 0xe7, 0x38, 0xd4, 0xab, 0x18, 0xe8, 0x4e, 0x1c, 0x46, 0xbd, 0x07, 0x9a, 0xc8, 0x59, 0x61, - 0xf9, 0x47, 0x06, 0xb7, 0x40, 0x22, 0x7b, 0x8a, 0xe8, 0x6f, 0x41, 0x9f, 0xe7, 0x90, 0x2c, 0x63, - 0x69, 0x8d, 0xc2, 0x89, 0x2c, 0x29, 0xf5, 0x69, 0x50, 0xd9, 0x7c, 0x09, 0xc4, 0x29, 0x1c, 0x37, - 0x5b, 0x7c, 0xc6, 0x06, 0xfc, 0x28, 0x2e, 0x50, 0x3f, 0x0d, 0x73, 0xc8, 0xb5, 0x3c, 0x3b, 0x7a, - 0x3b, 0x45, 0x3e, 0x62, 0xa3, 0x77, 0x25, 0xcb, 0x0c, 0x82, 0x75, 0x29, 0x1d, 0xa0, 0xbd, 0x08, - 0xb3, 0x5b, 0x4d, 0xa7, 0x66, 0x1b, 0x8e, 0x6d, 0x6c, 0x99, 0x01, 0xb2, 0xb9, 0x7c, 0x1d, 0x77, - 0x87, 0xdd, 0x94, 0x9c, 0x21, 0x00, 0x6b, 0xf6, 0x32, 0xae, 0x7e, 0x28, 0x6a, 0xd5, 0x79, 0x18, - 0x6e, 0x36, 0x02, 0xe4, 0x87, 0x06, 0x39, 0x0b, 0xa4, 0xcf, 0x53, 0x00, 0x2d, 0xba, 0x8f, 0xea, - 0x9e, 0xfa, 0x0c, 0x4c, 0x25, 0xde, 0x28, 0x21, 0x7b, 0x76, 0x44, 0xb3, 0x0b, 0xba, 0x1a, 0x7b, - 0x64, 0x84, 0x6c, 0xef, 0xe1, 0x79, 0x18, 0xbb, 0x17, 0x48, 0x73, 0x0e, 0xe4, 0x3b, 0x7e, 0xda, - 0xaf, 0xb0, 0x4d, 0x1a, 0x61, 0x12, 0xd7, 0x4d, 0x3f, 0x74, 0xfa, 0x88, 0x45, 0x4f, 0x62, 0xbd, - 0xd2, 0xbe, 0x9a, 0x81, 0xf9, 0xb6, 0x54, 0x08, 0xfb, 0x5a, 0x89, 0xdd, 0x9d, 0x63, 0x46, 0xbf, - 0x21, 0x20, 0x99, 0xb1, 0x7d, 0xae, 0x97, 0xce, 0x05, 0x7e, 0x71, 0x67, 0xe8, 0xac, 0x99, 0xbc, - 0xa6, 0x13, 0xd1, 0x80, 0xfb, 0x8e, 0x1f, 0xac, 0xb5, 0xf4, 0x9d, 0x39, 0x56, 0xdf, 0x07, 0xc9, - 0xfb, 0x83, 0x51, 0xdf, 0xda, 0x37, 0xb3, 0x30, 0x4d, 0xef, 0xd5, 0x72, 0x25, 0xed, 0x39, 0xa7, - 0x43, 0xdc, 0x97, 0x88, 0x72, 0x3a, 0x78, 0xd1, 0x9a, 0xad, 0x7e, 0x06, 0x0a, 0xe2, 0x54, 0x9a, - 0x06, 0x24, 0x5a, 0xe2, 0xc6, 0x28, 0xab, 0x25, 0x3b, 0xa6, 0xbc, 0x6f, 0xd1, 0x46, 0x7d, 0x1d, - 0x46, 0x1c, 0xd7, 0x09, 0x1d, 0xb3, 0x66, 0x34, 0xcc, 0xd0, 0xda, 0x4d, 0xdf, 0x72, 0x4e, 0x43, - 0xb2, 0x8e, 0xc1, 0xf5, 0x12, 0x6b, 0x4d, 0xbe, 0x8e, 0x93, 0xbb, 0xc3, 0x8f, 0xcf, 0x87, 0x8e, - 0x77, 0x7c, 0x5e, 0x38, 0xee, 0xf1, 0xb9, 0xf6, 0x32, 0xcc, 0x24, 0x25, 0x15, 0xd9, 0x7b, 0xec, - 0xf2, 0xd7, 0x1c, 0x2b, 0x8c, 0xed, 0x22, 0x8c, 0xf0, 0x52, 0xba, 0x31, 0xf4, 0x59, 0x38, 0xc3, - 0xbd, 0xa5, 0x93, 0x15, 0xb6, 0xf6, 0x57, 0x92, 0x47, 0xda, 0x42, 0x9d, 0xac, 0x09, 0xca, 0x11, - 0x34, 0x61, 0x09, 0x72, 0x52, 0xa4, 0x71, 0xa5, 0x7b, 0x5b, 0xb2, 0x2c, 0x91, 0x36, 0x42, 0x78, - 0xd9, 0xe3, 0x09, 0x2f, 0x77, 0xec, 0xdc, 0x87, 0x56, 0x11, 0xe5, 0xd3, 0x44, 0xf4, 0x1f, 0x0a, - 0x4c, 0xd3, 0x97, 0x18, 0x4f, 0xd9, 0x74, 0x6c, 0xa5, 0x3f, 0x97, 0x42, 0xff, 0x31, 0xe6, 0x99, - 0x56, 0xe6, 0x2f, 0x87, 0x26, 0x15, 0x48, 0xfb, 0x1b, 0x05, 0xa6, 0xc8, 0x34, 0x3e, 0x61, 0x9e, - 0xbc, 0x04, 0x79, 0x6a, 0x5a, 0xb2, 0xfd, 0x99, 0x16, 0xda, 0xaa, 0xe3, 0x65, 0xe0, 0xce, 0x99, - 0x0e, 0x38, 0x02, 0x48, 0x0c, 0x88, 0x0d, 0xf5, 0x87, 0x0a, 0x2c, 0xe0, 0xa5, 0x8a, 0x57, 0xdc, - 0xc7, 0x50, 0x8e, 0xbb, 0x43, 0x6e, 0xb5, 0x9d, 0xd0, 0xb0, 0x5f, 0x06, 0x88, 0x0e, 0xf4, 0xd8, - 0xd8, 0xbb, 0xdf, 0xa3, 0x2b, 0x8a, 0x23, 0x3c, 0xf5, 0x53, 0x50, 0x40, 0x2e, 0xbb, 0x86, 0x97, - 0xeb, 0xb1, 0xf9, 0x10, 0x72, 0xc9, 0x15, 0x3c, 0xcd, 0xa6, 0x87, 0xe5, 0x6d, 0x06, 0x28, 0x42, - 0x37, 0x99, 0x44, 0xba, 0xf2, 0xf6, 0x43, 0xa2, 0xe6, 0xc3, 0x34, 0x3d, 0xb6, 0x3f, 0x61, 0x95, - 0xe9, 0x90, 0xa9, 0x8b, 0x15, 0x38, 0xd9, 0x27, 0x93, 0xea, 0xaf, 0x2b, 0x30, 0x25, 0x0f, 0xfa, - 0xa3, 0x3b, 0xcd, 0xd5, 0x7e, 0x93, 0xbd, 0x86, 0x2d, 0x91, 0xc2, 0x78, 0xbe, 0x06, 0xc5, 0xc8, - 0x81, 0x55, 0xd2, 0xde, 0x02, 0x4e, 0x9b, 0x11, 0x18, 0x17, 0x8d, 0x1b, 0xa3, 0xd6, 0x3d, 0x9f, - 0x54, 0xfc, 0xcb, 0x20, 0x5c, 0xa5, 0x73, 0x9e, 0x66, 0xc4, 0x2f, 0x53, 0xdf, 0x75, 0xc5, 0xab, - 0x37, 0xcc, 0x90, 0x05, 0x52, 0xbd, 0xb1, 0xea, 0x7c, 0x8b, 0x9f, 0x58, 0x94, 0x77, 0x2c, 0xee, - 0xc1, 0x45, 0xd3, 0xb6, 0xc9, 0xf3, 0x22, 0xc2, 0x81, 0x76, 0xe8, 0x6b, 0x25, 0x36, 0xda, 0x36, - 0x9b, 0xb5, 0xd0, 0x08, 0x10, 0xf5, 0xf9, 0x8b, 0xab, 0x03, 0xfa, 0x39, 0xd3, 0xb6, 0xdf, 0x40, - 0x07, 0x8c, 0x9c, 0x35, 0xf7, 0x0d, 0x74, 0x70, 0x87, 0x82, 0x6d, 0xa0, 0x50, 0xfd, 0x86, 0x02, - 0x67, 0x39, 0x36, 0x8b, 0x91, 0x5a, 0x43, 0x02, 0x31, 0x9b, 0x12, 0xef, 0x75, 0x0d, 0xa3, 0x7a, - 0x1c, 0x79, 0xf5, 0x36, 0x21, 0x66, 0x45, 0xf4, 0xc4, 0xbc, 0xfa, 0xd5, 0x01, 0xfd, 0x8c, 0x99, - 0xa8, 0x62, 0x68, 0xd4, 0xe7, 0xe1, 0x0c, 0x7f, 0xfd, 0x37, 0x40, 0xa1, 0xb1, 0x75, 0x18, 0x51, - 0x97, 0x67, 0x63, 0x9c, 0x64, 0x00, 0x1b, 0x28, 0x5c, 0x3e, 0xe4, 0xed, 0x3e, 0x03, 0x67, 0x79, - 0x3b, 0xc1, 0xa7, 0x03, 0x27, 0xdc, 0x75, 0x5c, 0xc2, 0x9f, 0x41, 0xd6, 0x96, 0x23, 0x67, 0xcd, - 0xde, 0x26, 0x10, 0x98, 0x35, 0x01, 0x40, 0x1d, 0xf9, 0x3b, 0xa4, 0x57, 0x7e, 0xf1, 0x44, 0x3f, - 0x31, 0x46, 0xdc, 0xc7, 0xa8, 0x37, 0x50, 0x18, 0xac, 0x0e, 0xe8, 0xc5, 0x3a, 0xff, 0x98, 0xfb, - 0x63, 0x05, 0xce, 0xb4, 0xe1, 0x91, 0xba, 0x00, 0x25, 0x59, 0xe8, 0x4c, 0x71, 0xc0, 0x15, 0xc2, - 0x55, 0x5f, 0x86, 0x73, 0xe8, 0xb1, 0x13, 0x84, 0x64, 0x53, 0x30, 0x45, 0x9a, 0x54, 0x97, 0x66, - 0x39, 0x4c, 0x2b, 0xaf, 0x17, 0x61, 0xbc, 0x6e, 0xee, 0x51, 0x46, 0x33, 0x65, 0x62, 0xc1, 0xe3, - 0x28, 0x2e, 0xdf, 0x40, 0x21, 0xd3, 0x9d, 0xb9, 0x47, 0x50, 0x14, 0x43, 0x50, 0x6f, 0xc0, 0x54, - 0xc3, 0x77, 0xea, 0xa6, 0x7f, 0x48, 0x45, 0x14, 0xa7, 0x70, 0x82, 0xd5, 0x61, 0xe9, 0xb0, 0x7e, - 0x9e, 0x85, 0x99, 0x00, 0x59, 0x9e, 0x6b, 0xb7, 0x34, 0xa1, 0x24, 0x4e, 0x8a, 0xda, 0xa8, 0xd1, - 0xf2, 0x30, 0x14, 0xc5, 0x5f, 0xd3, 0x68, 0xeb, 0xb0, 0xd8, 0x9d, 0xd5, 0xcc, 0x1a, 0x5c, 0x82, - 0x51, 0xf1, 0x14, 0x0d, 0x0a, 0x23, 0xc2, 0xf8, 0x1e, 0xce, 0x06, 0xc2, 0xeb, 0xd8, 0xff, 0x85, - 0x4b, 0xec, 0xf1, 0xbb, 0x0f, 0x71, 0xf2, 0xce, 0x42, 0xa1, 0x6e, 0x3e, 0xa6, 0x2a, 0x45, 0x5f, - 0xeb, 0x1b, 0xaa, 0x9b, 0x8f, 0x31, 0x13, 0xb5, 0x2f, 0x2a, 0x70, 0xb9, 0x0b, 0x01, 0x6c, 0x3c, - 0x9f, 0x03, 0xb5, 0x6e, 0xbe, 0xef, 0x49, 0x0f, 0xec, 0xa0, 0xb0, 0xcd, 0xa9, 0x6c, 0x2c, 0xae, - 0x6a, 0x51, 0xa9, 0x0d, 0x14, 0xea, 0xe3, 0x04, 0x55, 0x54, 0x10, 0x68, 0xdf, 0x55, 0x60, 0x41, - 0x10, 0x82, 0xe3, 0x2c, 0x1d, 0x99, 0xd6, 0xae, 0xd9, 0x0f, 0x17, 0xce, 0x42, 0x91, 0x4b, 0x94, - 0x06, 0x7c, 0x45, 0xbd, 0xc0, 0x82, 0xf9, 0x00, 0x2f, 0x4c, 0x11, 0x8b, 0xe8, 0x86, 0x78, 0x51, - 0x07, 0xc1, 0xa3, 0x40, 0xbd, 0x07, 0x25, 0x5f, 0xea, 0x92, 0x1d, 0xdb, 0x5c, 0xed, 0xb0, 0x21, - 0x1b, 0xa3, 0x30, 0xd6, 0x58, 0xfb, 0x35, 0x05, 0x2e, 0x74, 0x18, 0x0d, 0x63, 0xe9, 0x16, 0x4c, - 0x0b, 0x23, 0x11, 0xeb, 0x9b, 0x72, 0xb5, 0xda, 0x81, 0xab, 0x4c, 0x54, 0x31, 0xb4, 0x93, 0x5b, - 0xad, 0x85, 0xda, 0xf7, 0x32, 0x50, 0x89, 0x74, 0xf6, 0x54, 0xe5, 0x5f, 0xb6, 0xcf, 0x87, 0xcc, - 0xb6, 0xbf, 0x79, 0xb3, 0x02, 0xc3, 0xe4, 0xdd, 0x52, 0x96, 0xf7, 0x9d, 0x4b, 0xf3, 0xc4, 0xe9, - 0x23, 0xf6, 0x84, 0x0e, 0xd3, 0x09, 0x59, 0xda, 0x37, 0x1c, 0x88, 0xdf, 0xea, 0x8b, 0x30, 0xc4, - 0x5c, 0x49, 0x76, 0xb0, 0x30, 0xdf, 0x0e, 0x01, 0xe3, 0x91, 0xce, 0xe1, 0xb5, 0xaf, 0x2b, 0x30, - 0xdf, 0x96, 0x9f, 0x4c, 0xae, 0xaf, 0x00, 0x7b, 0x47, 0xdf, 0xf0, 0xd1, 0x36, 0x8b, 0xd8, 0x2e, - 0xb4, 0xeb, 0x81, 0x22, 0xd3, 0xd1, 0xb6, 0x5e, 0x6c, 0xf2, 0x9f, 0x98, 0x40, 0xaf, 0x19, 0x5a, - 0x5e, 0x9d, 0x6f, 0xd9, 0xb4, 0x25, 0xf0, 0x4d, 0x0a, 0xa6, 0x73, 0x78, 0xed, 0x6f, 0xf3, 0x30, - 0x47, 0xb6, 0x97, 0x96, 0xb1, 0x63, 0xf8, 0x26, 0xb7, 0x5d, 0xbd, 0x09, 0x3b, 0xbe, 0x33, 0x2b, - 0x27, 0x31, 0x48, 0x3b, 0xb3, 0x64, 0x43, 0x51, 0x9d, 0x86, 0xc1, 0xf7, 0xbd, 0x2d, 0xe9, 0x3e, - 0xdb, 0xfb, 0xde, 0x56, 0xec, 0x66, 0x54, 0x2e, 0x96, 0x54, 0xbb, 0x16, 0x4b, 0xe6, 0xa0, 0xff, - 0x7b, 0xd3, 0x87, 0xfa, 0xc8, 0x69, 0x1c, 0x3b, 0x30, 0x1d, 0xb2, 0x94, 0x58, 0xac, 0x33, 0xc2, - 0x3c, 0xb3, 0xf7, 0x37, 0x9e, 0x89, 0x63, 0x25, 0xff, 0x84, 0x44, 0xa6, 0x4c, 0x8c, 0x1f, 0x9b, - 0x11, 0x86, 0xd5, 0x01, 0x7d, 0x4a, 0x42, 0x28, 0x40, 0xd4, 0xcf, 0xc2, 0x38, 0xdb, 0x64, 0x8d, - 0xfa, 0xa0, 0x4f, 0x71, 0x3c, 0xd5, 0x53, 0x1f, 0x34, 0xc3, 0x7d, 0x75, 0x40, 0x1f, 0xa3, 0x68, - 0x22, 0xcc, 0xef, 0xc3, 0x0c, 0xbd, 0x99, 0x5f, 0x4b, 0x8e, 0xa1, 0x94, 0x76, 0xd0, 0xd5, 0x06, - 0xff, 0x8a, 0x84, 0x62, 0x75, 0x40, 0x9f, 0x96, 0x51, 0x46, 0x7d, 0x7d, 0x1e, 0x54, 0xf2, 0xdf, - 0x06, 0xf1, 0x7e, 0xe8, 0x7d, 0x8c, 0xa7, 0x7b, 0xea, 0xe7, 0x0e, 0x6b, 0xbe, 0x3a, 0xa0, 0x4f, - 0x70, 0x54, 0x11, 0xfe, 0x07, 0x30, 0x46, 0x93, 0xce, 0x23, 0xe4, 0xa3, 0x69, 0x47, 0x3c, 0x6d, - 0x90, 0x93, 0x8c, 0x80, 0xd5, 0x01, 0x7d, 0x94, 0x20, 0x11, 0xc5, 0xf1, 0x85, 0xf7, 0x3c, 0x9c, - 0x4d, 0xd5, 0xe9, 0x28, 0xbf, 0x61, 0x76, 0x23, 0xf4, 0x1a, 0x47, 0x51, 0xf9, 0x48, 0x8f, 0x33, - 0xe9, 0x7a, 0x1c, 0x4f, 0x0e, 0xef, 0x10, 0xb3, 0x6a, 0xe7, 0xf0, 0xcc, 0x6b, 0xa5, 0x82, 0x11, - 0xb9, 0x09, 0xe7, 0xf9, 0x0e, 0xcf, 0xc9, 0xd1, 0xa9, 0x7d, 0x90, 0x83, 0x4a, 0x3b, 0xb4, 0xcc, - 0x1c, 0xad, 0xc3, 0xa8, 0xe0, 0x24, 0x3d, 0x6c, 0x54, 0xc8, 0xda, 0x76, 0xad, 0xcd, 0xda, 0x96, - 0x98, 0x28, 0xe4, 0xc4, 0xd1, 0x93, 0x3f, 0xdb, 0xf1, 0xec, 0x15, 0xc8, 0xd3, 0x3f, 0x41, 0xa2, - 0x87, 0x99, 0xd7, 0x7b, 0xc2, 0x4f, 0xff, 0x0a, 0x89, 0x36, 0x4c, 0x84, 0xad, 0xb9, 0xfe, 0x23, - 0xeb, 0x97, 0x01, 0xa2, 0x07, 0x62, 0x98, 0x71, 0xef, 0x01, 0x81, 0xc5, 0xdf, 0x85, 0x51, 0x6f, - 0xc1, 0x74, 0xe8, 0x85, 0xf2, 0x94, 0x97, 0x8e, 0xbe, 0xb2, 0xfa, 0x24, 0xa9, 0x8c, 0x66, 0x20, - 0x39, 0xfc, 0xfa, 0x24, 0x94, 0xc5, 0xdf, 0xa7, 0x25, 0x9b, 0xd1, 0x47, 0x59, 0x67, 0x78, 0x7d, - 0xa2, 0xe5, 0xf3, 0x70, 0x86, 0x9f, 0x62, 0x24, 0x1b, 0x16, 0x48, 0xc3, 0x69, 0x56, 0x9d, 0x68, - 0xd7, 0xe9, 0x82, 0x58, 0xa4, 0xb9, 0x10, 0xbb, 0x9b, 0xfa, 0xff, 0x60, 0x0e, 0xc7, 0x9a, 0x71, - 0xee, 0xff, 0x02, 0x13, 0x01, 0xb5, 0xdf, 0x53, 0xe0, 0x6c, 0x2a, 0x05, 0x69, 0x7a, 0xca, 0xfe, - 0x28, 0x25, 0x65, 0x99, 0x68, 0x63, 0x47, 0xe8, 0x3f, 0xa5, 0x78, 0xf2, 0x67, 0xcf, 0x61, 0xf4, - 0x3f, 0x28, 0xa0, 0xc9, 0xef, 0x04, 0x8a, 0x75, 0x87, 0x2f, 0xce, 0xbd, 0xf0, 0x28, 0xbe, 0xea, - 0x67, 0x8e, 0xb0, 0xea, 0x77, 0xba, 0xc5, 0x7c, 0x12, 0x7e, 0x8f, 0xf6, 0x1e, 0x5c, 0xec, 0x38, - 0x4c, 0x26, 0x08, 0xc9, 0xfb, 0x50, 0xfa, 0xf3, 0x3e, 0x96, 0xff, 0x4d, 0xf9, 0xfe, 0x8f, 0x2a, - 0x03, 0x3f, 0xf8, 0x51, 0x65, 0xe0, 0xa7, 0x3f, 0xaa, 0x28, 0xff, 0xff, 0x49, 0x45, 0xf9, 0xe6, - 0x93, 0x8a, 0xf2, 0xd7, 0x4f, 0x2a, 0xca, 0xf7, 0x9f, 0x54, 0x94, 0xbf, 0x7f, 0x52, 0x51, 0x7e, - 0xf2, 0xa4, 0x32, 0xf0, 0xd3, 0x27, 0x15, 0xe5, 0x4b, 0x3f, 0xae, 0x0c, 0x7c, 0xff, 0xc7, 0x95, - 0x81, 0x1f, 0xfc, 0xb8, 0x32, 0x00, 0x9a, 0xe3, 0x75, 0x0b, 0x72, 0x97, 0xa7, 0xb8, 0x2f, 0xc6, - 0xc8, 0x5c, 0xc7, 0xb3, 0x79, 0x5d, 0x79, 0xe7, 0xb9, 0x1d, 0xa9, 0xad, 0xe3, 0xb5, 0xf9, 0xd7, - 0xd2, 0x4f, 0x25, 0x8a, 0xfe, 0x34, 0x73, 0x61, 0x93, 0x35, 0x72, 0xbc, 0xea, 0xed, 0x86, 0x23, - 0x9c, 0x8e, 0x0d, 0xd6, 0xe5, 0xc3, 0x9b, 0xff, 0x9c, 0xb9, 0x14, 0xc1, 0x2c, 0x2d, 0xdd, 0x6e, - 0x38, 0x4b, 0x4b, 0x09, 0xa8, 0xa5, 0xa5, 0x87, 0x37, 0xb7, 0x06, 0x89, 0x61, 0x79, 0xf6, 0xbf, - 0x03, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xdd, 0x31, 0x84, 0x3a, 0x75, 0x00, 0x00, + 0x75, 0x28, 0x7b, 0x1e, 0xe4, 0xcc, 0xe1, 0xf0, 0xd5, 0x7c, 0xec, 0x90, 0xdc, 0x1d, 0xee, 0xf6, + 0xbe, 0x57, 0xd6, 0xac, 0x76, 0x75, 0x25, 0x59, 0x6b, 0xcb, 0xd2, 0x92, 0x2b, 0x2d, 0xe9, 0xd5, + 0x4a, 0xdc, 0xe6, 0xee, 0xca, 0x10, 0x64, 0xb7, 0x9a, 0xdd, 0xc5, 0x61, 0x8b, 0x33, 0xdd, 0xb3, + 0xdd, 0x3d, 0xe4, 0xd2, 0x17, 0xb8, 0xf7, 0x02, 0x37, 0x70, 0x9e, 0x40, 0x04, 0xc4, 0x40, 0x6c, + 0xc0, 0x71, 0x9c, 0x7c, 0x44, 0x4e, 0x7e, 0xfc, 0xe1, 0x7c, 0xc4, 0x40, 0x02, 0xe4, 0x2b, 0x08, + 0x60, 0x18, 0x31, 0xf2, 0x65, 0x20, 0x1f, 0x8e, 0xd7, 0x88, 0x63, 0x04, 0x41, 0x60, 0xe7, 0x23, + 0x09, 0xf2, 0x93, 0xa0, 0x9e, 0x5d, 0xdd, 0xd3, 0xf3, 0x22, 0x29, 0x8b, 0x31, 0xf4, 0x37, 0x5d, + 0x75, 0xea, 0xd4, 0xa9, 0x73, 0x4e, 0x9d, 0x3a, 0xa7, 0xea, 0x54, 0x0d, 0x3c, 0x1f, 0xa2, 0x46, + 0xd3, 0xf3, 0xcd, 0xfa, 0x55, 0xb3, 0xe9, 0x5c, 0xdd, 0xf3, 0xfc, 0x9d, 0xad, 0xba, 0xb7, 0x17, + 0x20, 0x7f, 0xd7, 0xb1, 0xd0, 0xd5, 0xdd, 0x6b, 0x57, 0x7d, 0xf4, 0xa8, 0x85, 0x82, 0xd0, 0xf0, + 0x51, 0xd0, 0xf4, 0xdc, 0x00, 0x55, 0x9b, 0xbe, 0x17, 0x7a, 0xea, 0x12, 0x6f, 0x57, 0x35, 0x9b, + 0x4e, 0x35, 0xd1, 0xae, 0xba, 0x7b, 0x6d, 0xe1, 0xa9, 0x18, 0x62, 0xe4, 0xb6, 0x1a, 0x01, 0x46, + 0xb7, 0x69, 0x86, 0xd6, 0xb6, 0xe1, 0x35, 0x91, 0x6f, 0x86, 0x8e, 0xe7, 0x52, 0x6c, 0x0b, 0xe7, + 0xd2, 0x81, 0x39, 0x5a, 0x06, 0x75, 0x3e, 0x1d, 0xca, 0x35, 0x1b, 0x28, 0x68, 0x9a, 0x16, 0x23, + 0x6d, 0xe1, 0x52, 0x3a, 0xd8, 0x96, 0xe9, 0xd4, 0x91, 0x6d, 0x58, 0x66, 0x8b, 0x0f, 0x62, 0x41, + 0x4b, 0x87, 0xb4, 0xbc, 0x46, 0x43, 0x90, 0x76, 0x26, 0x1d, 0xe6, 0x51, 0x0b, 0xf9, 0xfb, 0xdd, + 0x41, 0x7c, 0x14, 0xa0, 0x90, 0x81, 0x5c, 0x48, 0x07, 0x09, 0xcd, 0x60, 0xc7, 0x78, 0xd4, 0x42, + 0x2d, 0x94, 0xca, 0x08, 0x4a, 0x08, 0x06, 0x6c, 0xa0, 0x20, 0x30, 0x6b, 0x28, 0x95, 0x11, 0xdb, + 0x4e, 0x10, 0x7a, 0xfe, 0x7e, 0x3b, 0xd8, 0x85, 0x54, 0xd9, 0xf6, 0x42, 0x87, 0x3b, 0x35, 0x5d, + 0xbb, 0x17, 0x18, 0x66, 0x67, 0xcb, 0x47, 0xed, 0x60, 0xf1, 0x21, 0x6c, 0x39, 0xf5, 0x10, 0xf9, + 0xbd, 0x68, 0x23, 0x65, 0x96, 0x57, 0x6f, 0x87, 0xbb, 0x18, 0x83, 0x13, 0xa2, 0x6e, 0x07, 0x3c, + 0x1b, 0x03, 0x24, 0xe2, 0x69, 0x07, 0xba, 0x1c, 0x03, 0xf2, 0x51, 0xb3, 0xee, 0x58, 0x44, 0x0f, + 0x7b, 0x11, 0x18, 0x58, 0xdb, 0xc8, 0x6e, 0xd5, 0x51, 0x2f, 0x02, 0xb1, 0x40, 0x89, 0x3c, 0x7b, + 0xf1, 0xa5, 0xd5, 0xb4, 0xcd, 0x10, 0xf5, 0x62, 0xf2, 0x2e, 0xf2, 0x83, 0x54, 0xea, 0xe2, 0xa3, + 0x25, 0x93, 0xaa, 0x1d, 0xe8, 0x99, 0xf8, 0x10, 0xec, 0x1d, 0xa1, 0x72, 0x96, 0xd7, 0x68, 0xd6, + 0x51, 0x88, 0x8c, 0x06, 0x0a, 0x4d, 0xdb, 0x0c, 0x4d, 0xd6, 0xa2, 0x52, 0xf3, 0xbc, 0x5a, 0x1d, + 0x51, 0x79, 0x6c, 0xb6, 0xb6, 0xae, 0xda, 0xad, 0xd8, 0x3c, 0x5d, 0x4a, 0xd6, 0x87, 0x4e, 0x03, + 0x05, 0xa1, 0xd9, 0x68, 0xf2, 0xa9, 0x60, 0xa3, 0x26, 0x72, 0x6d, 0xe4, 0x5a, 0x0e, 0x0a, 0xae, + 0xd6, 0xbc, 0x9a, 0x47, 0xca, 0xc9, 0x2f, 0x0a, 0xa2, 0xfd, 0xd7, 0x30, 0x94, 0x75, 0x54, 0x73, + 0x82, 0x10, 0xf9, 0x6f, 0x70, 0x79, 0xea, 0xd4, 0xca, 0xa8, 0x27, 0xa1, 0x28, 0x64, 0x5c, 0x56, + 0x4e, 0x2b, 0x97, 0x8a, 0x7a, 0x54, 0xa0, 0x9e, 0x86, 0x51, 0x1b, 0x05, 0x96, 0xef, 0x34, 0x31, + 0x4d, 0xe5, 0x0c, 0xa9, 0x97, 0x8b, 0xd4, 0x25, 0x18, 0xf5, 0xf6, 0x5c, 0xe4, 0x1b, 0xa8, 0x61, + 0x3a, 0xf5, 0x72, 0x96, 0x40, 0x00, 0x29, 0x7a, 0x15, 0x97, 0xa8, 0x2e, 0x9c, 0xe5, 0x13, 0xc1, + 0x40, 0x8f, 0x91, 0xd5, 0xc2, 0xcd, 0x0c, 0x1f, 0x85, 0xc8, 0x25, 0xbf, 0x9a, 0xc8, 0x77, 0x3c, + 0xbb, 0x9c, 0x3b, 0xad, 0x5c, 0x1a, 0xbd, 0x3e, 0x5f, 0xa5, 0xe3, 0xad, 0xf2, 0xf1, 0x56, 0x6f, + 0x31, 0x7e, 0x2c, 0xe7, 0xbe, 0xf2, 0xc3, 0x25, 0x45, 0x3f, 0xcd, 0x71, 0xbd, 0xca, 0x51, 0xe9, + 0x1c, 0xd3, 0x3a, 0x41, 0xa4, 0xde, 0x83, 0x82, 0x55, 0x6f, 0xe1, 0xb1, 0x06, 0xe5, 0xfc, 0xe9, + 0xec, 0xa5, 0xd1, 0xeb, 0xcf, 0x55, 0x63, 0xa6, 0x53, 0x52, 0xc2, 0xea, 0xee, 0xb5, 0xea, 0x0a, + 0x05, 0xd6, 0xa3, 0xd2, 0x15, 0xcf, 0xdd, 0x72, 0x6a, 0xba, 0x40, 0xa3, 0x56, 0x61, 0xda, 0xb4, + 0x42, 0x67, 0x17, 0x19, 0xac, 0xc8, 0xc0, 0x1c, 0x2a, 0x0f, 0x93, 0xb1, 0x4e, 0xd1, 0x2a, 0x86, + 0x06, 0xf3, 0x57, 0x7d, 0x0b, 0x72, 0x58, 0xc4, 0xe5, 0x11, 0xd2, 0xfd, 0x4a, 0xb5, 0x87, 0xe5, + 0xae, 0x76, 0x12, 0x4e, 0xf5, 0x96, 0x19, 0x9a, 0xaf, 0xba, 0xa1, 0xbf, 0xaf, 0x13, 0x84, 0xea, + 0x79, 0x18, 0x0f, 0x90, 0xd5, 0xf2, 0x9d, 0x70, 0xdf, 0x08, 0xbd, 0x1d, 0xe4, 0x96, 0x0b, 0x84, + 0x86, 0x31, 0x5e, 0x7a, 0x1f, 0x17, 0x62, 0x7a, 0x9d, 0xc0, 0xa8, 0xd5, 0xbd, 0x4d, 0xb3, 0x6e, + 0x44, 0xd2, 0x2d, 0x9e, 0x56, 0x2e, 0x15, 0xf4, 0x29, 0x27, 0xb8, 0x4d, 0x6a, 0x44, 0x6f, 0xea, + 0xdb, 0x30, 0xc7, 0x4c, 0x9a, 0x61, 0xfa, 0xd6, 0xb6, 0xb3, 0x6b, 0xd6, 0x8d, 0x20, 0x34, 0x43, + 0x54, 0x86, 0xd3, 0xca, 0xa5, 0xf1, 0xeb, 0xe7, 0xe2, 0x23, 0x20, 0xc6, 0x14, 0xd3, 0x7d, 0x93, + 0x01, 0x6f, 0x60, 0x58, 0x7d, 0x86, 0xe1, 0x88, 0x95, 0xaa, 0xcf, 0xc0, 0x4c, 0x1b, 0xee, 0x96, + 0xef, 0x94, 0x47, 0x09, 0xe1, 0x6a, 0xa2, 0xcd, 0x03, 0xdf, 0x51, 0xdf, 0x85, 0xf9, 0x5d, 0x27, + 0x70, 0x36, 0x9d, 0x3a, 0x1e, 0x66, 0x82, 0xa0, 0xd2, 0x00, 0x04, 0x9d, 0x88, 0xd0, 0xc4, 0x69, + 0x7a, 0x1e, 0x4e, 0xa4, 0xf5, 0x80, 0xc9, 0x1a, 0x23, 0x64, 0xcd, 0xb6, 0xb7, 0x7c, 0xe0, 0x3b, + 0x0b, 0x2f, 0x40, 0x51, 0x48, 0x44, 0x9d, 0x84, 0xec, 0x0e, 0xda, 0x67, 0x53, 0x06, 0xff, 0x54, + 0x67, 0x20, 0xbf, 0x6b, 0xd6, 0x5b, 0x88, 0x4d, 0x13, 0xfa, 0x71, 0x23, 0xf3, 0x49, 0x45, 0x5b, + 0x84, 0xf9, 0x14, 0x19, 0xd3, 0xe5, 0x5d, 0xfb, 0xb6, 0x02, 0xb3, 0xaf, 0x3b, 0x41, 0x28, 0x6a, + 0x02, 0x3e, 0x37, 0x17, 0xa1, 0xd8, 0x34, 0x6b, 0xc8, 0x08, 0x9c, 0x2f, 0xd2, 0xb9, 0x99, 0xd7, + 0x0b, 0xb8, 0x60, 0xc3, 0xf9, 0x22, 0x52, 0x2f, 0xc0, 0x84, 0x8b, 0x1e, 0x87, 0x06, 0x81, 0xa0, + 0xca, 0x80, 0xfb, 0x2d, 0xe9, 0x63, 0xb8, 0x78, 0xdd, 0xac, 0x21, 0xaa, 0x0c, 0x0f, 0x60, 0x52, + 0xa8, 0x80, 0x41, 0x17, 0x07, 0x32, 0x4b, 0x47, 0xaf, 0x5f, 0x89, 0x73, 0x31, 0x5a, 0xd5, 0x77, + 0xaf, 0x55, 0x05, 0x31, 0xaf, 0x91, 0x16, 0xfa, 0x84, 0x1b, 0x2f, 0xd0, 0xbe, 0xa6, 0xc0, 0x5c, + 0x92, 0x6a, 0x3a, 0x20, 0xf5, 0x6d, 0x00, 0x01, 0x1d, 0x94, 0x15, 0x32, 0x09, 0x6e, 0xf4, 0x9c, + 0x04, 0xb7, 0x88, 0x51, 0xd9, 0x44, 0x6d, 0x0c, 0xd2, 0x25, 0x6c, 0xfd, 0x8e, 0x5a, 0x5b, 0x85, + 0x72, 0x0a, 0xc2, 0x7e, 0x4c, 0xde, 0x38, 0x64, 0x1c, 0x9b, 0x89, 0x30, 0xe3, 0xd8, 0xda, 0x77, + 0xb3, 0x30, 0xdf, 0x91, 0x36, 0xf5, 0x4d, 0x18, 0x8f, 0xb8, 0xeb, 0xb8, 0x5b, 0x1e, 0x41, 0x38, + 0x7a, 0xfd, 0x52, 0x3f, 0xbc, 0x5d, 0x73, 0xb7, 0x3c, 0x7d, 0xcc, 0x95, 0x3f, 0xd5, 0x65, 0x18, + 0xb6, 0x88, 0xfd, 0x21, 0x24, 0xf4, 0x29, 0x24, 0x66, 0xb1, 0x58, 0x4b, 0x75, 0x0b, 0x54, 0xc9, + 0xc8, 0x19, 0x0c, 0x1f, 0x15, 0xfa, 0x0b, 0x5d, 0x8d, 0xa1, 0x34, 0xc0, 0xa4, 0x39, 0x9c, 0xf2, + 0x93, 0x45, 0xea, 0x65, 0x98, 0xc4, 0x4e, 0x89, 0xb7, 0x8b, 0x7c, 0x83, 0x2d, 0x9c, 0xc4, 0x8e, + 0x67, 0xf5, 0x09, 0x5e, 0xfe, 0x90, 0x16, 0x77, 0x32, 0x49, 0xf9, 0x4e, 0x26, 0xe9, 0xa1, 0x84, + 0x9a, 0xd9, 0x88, 0xf2, 0x30, 0xd1, 0xa4, 0xa7, 0xba, 0x0e, 0xe0, 0x35, 0xd6, 0x08, 0x4f, 0xf4, + 0x56, 0x10, 0xd1, 0xb1, 0x4a, 0x71, 0x68, 0xdf, 0xce, 0xc2, 0xdc, 0x03, 0xe2, 0x09, 0x0c, 0xa8, + 0x16, 0x6f, 0xc2, 0x28, 0xf5, 0x20, 0xa8, 0x94, 0xa9, 0x70, 0xaa, 0x5d, 0x84, 0x93, 0xe8, 0x85, + 0xc8, 0x1a, 0x28, 0x8a, 0x84, 0xa0, 0xb3, 0x47, 0x2c, 0xe8, 0xdc, 0x91, 0x0b, 0xba, 0x7d, 0xdd, + 0xc9, 0xa7, 0xad, 0x3b, 0x57, 0x60, 0xca, 0x46, 0xc4, 0xcb, 0xd9, 0x34, 0x6d, 0x63, 0xd3, 0x71, + 0x4d, 0x22, 0x35, 0x0c, 0x39, 0x41, 0x2b, 0x96, 0x4d, 0x7b, 0x99, 0x14, 0xab, 0x4f, 0xc1, 0x54, + 0xd3, 0xf7, 0x1a, 0x5e, 0x88, 0x24, 0x75, 0x18, 0x21, 0xea, 0x30, 0xc9, 0x2a, 0x04, 0x8d, 0xda, + 0xcf, 0x33, 0x70, 0xa2, 0x4d, 0x6a, 0x1f, 0xcf, 0xc0, 0x0f, 0x73, 0x06, 0x6a, 0xef, 0x62, 0xb3, + 0xd7, 0xf4, 0x91, 0x35, 0xf8, 0x5c, 0x69, 0x57, 0x97, 0x4c, 0x8a, 0xba, 0x68, 0x27, 0x61, 0x21, + 0xad, 0x07, 0xb6, 0x2c, 0x7e, 0x0b, 0xe0, 0xd4, 0x46, 0x68, 0xfa, 0xe1, 0x5b, 0xed, 0x1e, 0x5f, + 0x3f, 0x44, 0x2c, 0xc1, 0xa8, 0xf0, 0x3b, 0x85, 0x41, 0x07, 0x5e, 0xb4, 0x66, 0xab, 0x6b, 0x30, + 0x26, 0x00, 0xc2, 0xfd, 0x26, 0x62, 0xe2, 0x49, 0xf8, 0x16, 0x2c, 0x34, 0xdd, 0xbd, 0x56, 0xe5, + 0x74, 0xdc, 0xdf, 0x6f, 0x22, 0xbd, 0xb4, 0x27, 0x7d, 0xa9, 0x2b, 0x00, 0x51, 0x60, 0xc9, 0xe6, + 0x5f, 0x02, 0x8f, 0x88, 0x53, 0x30, 0xaa, 0xfb, 0x66, 0xb0, 0x73, 0x0f, 0x7f, 0xe8, 0xc5, 0x90, + 0xff, 0x54, 0x9f, 0x87, 0xbc, 0xe3, 0x36, 0x5b, 0x21, 0x11, 0xc9, 0xe8, 0xf5, 0xd3, 0x9d, 0xe8, + 0x58, 0x37, 0xf7, 0xeb, 0x9e, 0x69, 0x07, 0x3a, 0x05, 0x57, 0x3f, 0x0f, 0x0b, 0x29, 0x0e, 0x36, + 0x8e, 0x13, 0xbc, 0x56, 0x48, 0xa6, 0x5f, 0x1f, 0x7e, 0x75, 0xb9, 0xcd, 0xaf, 0xbe, 0x4f, 0x11, + 0xa8, 0xf7, 0x60, 0x46, 0xa0, 0xf7, 0x5b, 0x11, 0xe2, 0x91, 0xfe, 0x10, 0xab, 0xbc, 0xb1, 0xde, + 0x12, 0x28, 0x37, 0x60, 0x36, 0xe2, 0x3c, 0xe6, 0x1b, 0xc7, 0x59, 0xe8, 0x0f, 0xe7, 0xb4, 0x60, + 0xbe, 0x19, 0xec, 0x70, 0xa4, 0x0b, 0x50, 0x70, 0x6c, 0x1c, 0x09, 0x84, 0xfb, 0xc4, 0xd3, 0x2d, + 0xea, 0xe2, 0x5b, 0x3d, 0x05, 0xc0, 0x77, 0x55, 0x1c, 0x9b, 0x38, 0xb5, 0x45, 0xbd, 0xc8, 0x4a, + 0xd6, 0x6c, 0x15, 0x41, 0x59, 0x52, 0x15, 0xc3, 0x47, 0xad, 0x00, 0x19, 0x4d, 0xaf, 0xee, 0x58, + 0xfb, 0xc4, 0x4f, 0x1d, 0xbf, 0xfe, 0x89, 0x0e, 0x0e, 0xe7, 0x5b, 0x42, 0x9d, 0x74, 0xdc, 0x68, + 0x9d, 0xb4, 0xd1, 0x67, 0xf7, 0xd2, 0x8a, 0xd5, 0xd7, 0xa0, 0xe4, 0xa3, 0xd0, 0xdf, 0xe7, 0xa8, + 0x4b, 0x64, 0xb4, 0x67, 0x3b, 0xc9, 0x59, 0xc7, 0xb0, 0x0c, 0xe3, 0xa8, 0x1f, 0x7d, 0xa8, 0x67, + 0x61, 0xcc, 0xf2, 0x3d, 0xd7, 0xe0, 0x21, 0x32, 0x73, 0x5a, 0x4b, 0xb8, 0x70, 0x83, 0x95, 0xa9, + 0xcf, 0x40, 0xae, 0x81, 0x1a, 0x5e, 0x79, 0x9c, 0x74, 0x72, 0xb2, 0x53, 0x27, 0x77, 0x51, 0xc3, + 0xd3, 0x09, 0xa4, 0xfa, 0x00, 0xa6, 0x02, 0x84, 0x9d, 0x61, 0xc3, 0x0c, 0x43, 0xdf, 0xd9, 0x6c, + 0x85, 0x28, 0x28, 0x4f, 0xa4, 0xd9, 0xd2, 0xa8, 0xf9, 0x06, 0x69, 0x70, 0x53, 0xc0, 0xeb, 0x93, + 0x41, 0xa2, 0x44, 0x7d, 0x1e, 0x86, 0xb7, 0x91, 0x69, 0x23, 0xbf, 0x3c, 0x49, 0x70, 0x55, 0x3a, + 0xe1, 0x5a, 0x25, 0x50, 0x3a, 0x83, 0xc6, 0x4e, 0x3a, 0x97, 0x19, 0x32, 0x6b, 0x38, 0xc0, 0xe4, + 0x9a, 0x59, 0x9e, 0x22, 0x36, 0x6b, 0x96, 0x55, 0xbf, 0x8a, 0x6b, 0x85, 0xda, 0xaa, 0x77, 0x61, + 0xca, 0xf2, 0xdc, 0xd0, 0x71, 0x5b, 0xc8, 0x36, 0xd8, 0x9e, 0x49, 0x59, 0x4d, 0x9b, 0x52, 0xac, + 0x92, 0xbb, 0x0d, 0x2d, 0x1f, 0xe9, 0x93, 0xa2, 0x29, 0x2b, 0x51, 0x1f, 0xc2, 0x5c, 0xdd, 0x0c, + 0x42, 0x1e, 0xc0, 0xd3, 0xd8, 0x35, 0x68, 0xd5, 0xc3, 0xf2, 0x74, 0x9f, 0xd3, 0x74, 0x06, 0xb7, + 0x5f, 0x11, 0xcd, 0x75, 0xd2, 0x3a, 0x36, 0xad, 0x02, 0x6c, 0xe6, 0x0c, 0x1b, 0xd5, 0xcd, 0xfd, + 0xf2, 0xcc, 0x80, 0xd3, 0x8a, 0x98, 0xc8, 0x5b, 0xb8, 0xa9, 0xf6, 0x81, 0x02, 0x95, 0x4e, 0x16, + 0x93, 0x2d, 0x96, 0xb3, 0x30, 0x8c, 0xe7, 0xb0, 0x63, 0x33, 0x7b, 0x99, 0xf7, 0x5b, 0xee, 0x9a, + 0xad, 0xba, 0x30, 0x4d, 0x79, 0x1c, 0x9b, 0x96, 0x6c, 0xfd, 0xfb, 0x4c, 0x4f, 0xd7, 0x7d, 0xdd, + 0xab, 0xd7, 0xdf, 0x92, 0xa6, 0x24, 0x35, 0x6d, 0xdc, 0x7d, 0x9f, 0x22, 0xa8, 0xe5, 0x7a, 0xed, + 0xcb, 0x59, 0xd0, 0x6e, 0xa3, 0x76, 0x3a, 0x99, 0x97, 0xd6, 0x9f, 0x81, 0xbf, 0x0d, 0xc5, 0x48, + 0x25, 0x28, 0xa9, 0x97, 0x7b, 0xd9, 0xee, 0x88, 0x23, 0x51, 0x5b, 0xec, 0xb6, 0x34, 0xcc, 0xc7, + 0x4e, 0xa3, 0xd5, 0x30, 0xa2, 0x70, 0x2b, 0x4b, 0xc2, 0xad, 0x09, 0x56, 0xb1, 0xde, 0x25, 0xea, + 0xca, 0xa5, 0x45, 0x5d, 0xe7, 0x60, 0x7c, 0xcf, 0x74, 0x42, 0xc3, 0x45, 0x7b, 0x06, 0xda, 0x45, + 0x6e, 0xc8, 0x16, 0xda, 0x12, 0x2e, 0x7d, 0x03, 0xed, 0xbd, 0x8a, 0xcb, 0xd4, 0x6d, 0x98, 0xe7, + 0xc1, 0x31, 0x01, 0x62, 0xf1, 0x19, 0x5d, 0x8e, 0x86, 0x89, 0xe5, 0x79, 0xba, 0x83, 0xe5, 0x61, + 0xac, 0x22, 0x78, 0x68, 0x48, 0x46, 0xd6, 0x25, 0x1e, 0xc8, 0x27, 0xca, 0xb1, 0xcd, 0x08, 0x76, + 0x9c, 0xa6, 0x08, 0x76, 0x99, 0xab, 0x55, 0xc2, 0x85, 0x3c, 0xc4, 0xd5, 0x7e, 0xaa, 0xc0, 0xd9, + 0xae, 0x62, 0x61, 0x5a, 0x74, 0x03, 0x46, 0xb8, 0x4f, 0xae, 0xa4, 0x4d, 0x02, 0x56, 0x29, 0x91, + 0xa9, 0xf3, 0x06, 0xea, 0x4d, 0x18, 0xf5, 0xcd, 0x3d, 0xe1, 0xd3, 0x67, 0x88, 0x4f, 0xdf, 0x71, + 0x12, 0xe1, 0x70, 0x7b, 0xb9, 0xee, 0x6d, 0xea, 0xe0, 0x9b, 0x7b, 0x0c, 0x57, 0x9a, 0x0c, 0xb2, + 0x69, 0x32, 0x58, 0x80, 0x02, 0x1d, 0x2e, 0xa2, 0xdb, 0x4b, 0x05, 0x5d, 0x7c, 0x6b, 0x3f, 0x51, + 0xe0, 0x72, 0xd7, 0xa1, 0x62, 0x6f, 0x0a, 0xfd, 0xcf, 0x57, 0x44, 0xed, 0x7d, 0x05, 0xae, 0xf4, + 0x33, 0xd0, 0x23, 0x10, 0x6d, 0x9f, 0x72, 0xd1, 0xbe, 0x93, 0x81, 0x93, 0x1d, 0x4c, 0x46, 0x3f, + 0xec, 0x8e, 0x3b, 0x5b, 0x99, 0x83, 0x39, 0x5b, 0xb2, 0xb7, 0x90, 0x4d, 0x78, 0x0b, 0x17, 0x61, + 0x82, 0xc6, 0x2e, 0x86, 0xb5, 0x8d, 0xac, 0x9d, 0xa0, 0xd5, 0x20, 0xac, 0x2d, 0xea, 0xe3, 0xb4, + 0x78, 0x85, 0x95, 0xaa, 0x8f, 0x60, 0x11, 0x5b, 0xc3, 0xc8, 0xf7, 0x36, 0x2c, 0xb3, 0x69, 0x92, + 0x7d, 0x23, 0x07, 0x05, 0xcc, 0x8f, 0xbb, 0xd6, 0x4d, 0x15, 0x84, 0x7b, 0xbe, 0x22, 0x35, 0xd4, + 0xe7, 0xf7, 0x3a, 0x55, 0x69, 0xdf, 0x2a, 0xc0, 0xa9, 0xae, 0xe6, 0x16, 0xfb, 0x3a, 0xd4, 0xa7, + 0x22, 0x02, 0x50, 0x88, 0x00, 0xc8, 0xc0, 0xe9, 0xa4, 0xf8, 0x1c, 0xa8, 0xed, 0xde, 0xe2, 0xe0, + 0x5a, 0x3b, 0xd5, 0xe6, 0x2f, 0x1e, 0xa5, 0x3f, 0xfd, 0x22, 0xcc, 0x37, 0x7d, 0xb4, 0xeb, 0x78, + 0xad, 0x80, 0x2e, 0x8e, 0xc8, 0x66, 0x06, 0xd2, 0xb1, 0x59, 0x7c, 0x33, 0xc7, 0x01, 0x36, 0x68, + 0x3d, 0x31, 0x78, 0x6b, 0xb6, 0x7a, 0x09, 0x26, 0xdb, 0x5a, 0xe4, 0x49, 0x8b, 0xf1, 0x20, 0x0e, + 0x59, 0x86, 0x11, 0x33, 0xc4, 0xb4, 0x51, 0x27, 0x39, 0xaf, 0xf3, 0x4f, 0xf5, 0x13, 0xa0, 0x6e, + 0x9a, 0xd6, 0x4e, 0xdd, 0xab, 0x19, 0x96, 0xd7, 0x72, 0x43, 0x63, 0xdb, 0x71, 0xa9, 0xc3, 0x9b, + 0xd5, 0x27, 0x59, 0xcd, 0x0a, 0xae, 0x58, 0x75, 0xdc, 0x50, 0x9e, 0x32, 0x85, 0x23, 0x98, 0x32, + 0xc5, 0x34, 0x53, 0x76, 0x03, 0xf2, 0xe4, 0x80, 0x85, 0xf8, 0xae, 0x6d, 0x3c, 0xa5, 0x47, 0x63, + 0x12, 0x4b, 0xef, 0xe1, 0x02, 0x9d, 0x36, 0x51, 0x6b, 0x70, 0x2a, 0x2d, 0x3e, 0x88, 0xa6, 0xd0, + 0xe8, 0x00, 0x53, 0x68, 0xa1, 0x3d, 0x4e, 0x10, 0x73, 0xea, 0x36, 0x8c, 0x73, 0x97, 0xd4, 0x26, + 0x2e, 0x3d, 0xf3, 0x70, 0x17, 0xda, 0x9c, 0x99, 0xfb, 0xfc, 0x10, 0x63, 0x39, 0xf7, 0x3e, 0xf6, + 0x66, 0xc6, 0x44, 0x3b, 0x5c, 0xa3, 0xae, 0x40, 0x89, 0xcb, 0x90, 0xa0, 0x19, 0xeb, 0x13, 0xcd, + 0x28, 0x6b, 0x45, 0x90, 0x20, 0x18, 0xc1, 0xe3, 0xc7, 0x13, 0x71, 0x9c, 0x2c, 0x32, 0x77, 0x0e, + 0xe7, 0xc7, 0x54, 0xef, 0x51, 0x6c, 0x74, 0x3f, 0x9e, 0xe3, 0x56, 0x5f, 0x82, 0x02, 0x3b, 0x03, + 0xc2, 0xce, 0x32, 0xee, 0xe7, 0x4c, 0xbc, 0x1f, 0x7e, 0xd2, 0x46, 0xbd, 0x6d, 0x02, 0xa9, 0x8b, + 0x26, 0x0b, 0xef, 0x42, 0x49, 0xc6, 0x9b, 0xb2, 0xab, 0x7c, 0x43, 0xde, 0x55, 0xee, 0x5b, 0xf4, + 0xd1, 0xde, 0xf3, 0x7f, 0x8e, 0xc0, 0x59, 0x3a, 0x06, 0x5b, 0x1e, 0x1b, 0x73, 0x48, 0x91, 0xcd, + 0x8d, 0x6e, 0x0f, 0xbb, 0xf1, 0x69, 0x28, 0xb0, 0x73, 0xca, 0xa0, 0xf3, 0xa2, 0x6d, 0xba, 0x36, + 0x39, 0x52, 0xa1, 0x3f, 0x75, 0xd1, 0xa2, 0xab, 0xb9, 0x35, 0x61, 0x2a, 0x08, 0x1d, 0x6b, 0x67, + 0x5f, 0x8e, 0x3b, 0x68, 0x0c, 0xfd, 0xbf, 0xba, 0xe8, 0xe4, 0x06, 0x69, 0x23, 0xd4, 0x30, 0x16, + 0x83, 0x90, 0x2a, 0x29, 0x06, 0x79, 0x01, 0xca, 0x3e, 0x0a, 0x5b, 0xbe, 0x4b, 0xfc, 0xb1, 0xb8, + 0x93, 0x9b, 0xe7, 0xc1, 0x04, 0xae, 0x7f, 0x03, 0xed, 0xc9, 0x4c, 0x52, 0x97, 0xa1, 0xb2, 0xe5, + 0xf9, 0x16, 0x32, 0x2c, 0x1f, 0x99, 0x21, 0x4a, 0x69, 0x3e, 0x4c, 0x9a, 0x2f, 0x10, 0xa8, 0x15, + 0x02, 0x94, 0xc4, 0x91, 0xb2, 0x9c, 0x8c, 0xa4, 0x2e, 0x27, 0xff, 0x1b, 0xc6, 0x88, 0x38, 0x59, + 0x80, 0x11, 0x94, 0x0b, 0x84, 0xcf, 0x0f, 0xfb, 0x38, 0x3f, 0xea, 0x29, 0xde, 0x2a, 0xd5, 0x0b, + 0x8a, 0x98, 0xaa, 0x70, 0xe9, 0x91, 0x54, 0x14, 0x5f, 0x73, 0x8b, 0xc9, 0x35, 0xf7, 0x1d, 0x1a, + 0xad, 0x48, 0x2b, 0x1d, 0x99, 0x77, 0xcc, 0x1c, 0x5d, 0xe9, 0x6b, 0x89, 0xdb, 0xc0, 0x2d, 0x68, + 0xe0, 0x12, 0x2f, 0x8b, 0xcd, 0xa1, 0xd1, 0x81, 0xe7, 0x90, 0xfa, 0x00, 0x4a, 0x81, 0xbd, 0x23, + 0x4e, 0x56, 0x99, 0xd5, 0xb9, 0x1e, 0x47, 0x11, 0xd8, 0x3b, 0xb1, 0x45, 0x47, 0xe6, 0xd1, 0x5d, + 0xd6, 0x52, 0x1f, 0x0d, 0xec, 0x1d, 0xfe, 0x81, 0xe3, 0xe1, 0x06, 0x0a, 0x91, 0xef, 0xb8, 0xb5, + 0x08, 0xf7, 0x58, 0xf7, 0x78, 0xf8, 0x2e, 0x6b, 0x20, 0x30, 0x4e, 0x36, 0x12, 0x25, 0x0b, 0x3b, + 0x30, 0xd5, 0x26, 0x8b, 0x94, 0x69, 0xff, 0x4a, 0x7c, 0xda, 0x5f, 0xe9, 0x6b, 0xda, 0x13, 0x94, + 0xf2, 0xe4, 0xff, 0x20, 0x03, 0xe7, 0xba, 0x6b, 0x07, 0xf3, 0x1a, 0x2c, 0x79, 0xf1, 0xc6, 0x7a, + 0xad, 0x1c, 0x49, 0xec, 0x57, 0x92, 0x77, 0x6a, 0x54, 0x04, 0xe3, 0xe4, 0xb0, 0x94, 0xec, 0x0b, + 0x9a, 0xc1, 0x0e, 0xb7, 0x24, 0xfd, 0xf5, 0x72, 0x93, 0x35, 0x6d, 0xef, 0x65, 0xcc, 0x94, 0xaa, + 0x02, 0xf5, 0x59, 0x98, 0x23, 0x99, 0x20, 0x46, 0x3c, 0xb6, 0x72, 0x6c, 0x62, 0x7a, 0xb2, 0xfa, + 0x34, 0xa9, 0x95, 0x03, 0xa8, 0x35, 0x5b, 0xfb, 0xb3, 0x2c, 0x9c, 0x4e, 0xe1, 0xd4, 0x6b, 0x24, + 0x87, 0xa5, 0x4f, 0x1b, 0x79, 0x0b, 0xf2, 0x24, 0xd9, 0x85, 0xc8, 0x6c, 0x3c, 0x79, 0x3a, 0xd0, + 0xb6, 0x69, 0x14, 0xe1, 0x5f, 0xc1, 0xad, 0x74, 0xda, 0x18, 0xfb, 0x13, 0x7c, 0xdb, 0x22, 0xdb, + 0xe7, 0xb6, 0x05, 0x6f, 0x10, 0xb3, 0xb3, 0xb9, 0xde, 0x6e, 0x6d, 0x3e, 0xd5, 0x0e, 0xc5, 0x4c, + 0xc1, 0x70, 0xd2, 0x14, 0xc8, 0x93, 0x75, 0x64, 0xf0, 0xc9, 0x7a, 0x0f, 0xc6, 0xe3, 0x96, 0x84, + 0x39, 0x4d, 0x83, 0xd8, 0x90, 0xb1, 0x98, 0x0d, 0xd1, 0xce, 0xc2, 0x99, 0x2e, 0x92, 0x63, 0xdb, + 0xc9, 0xff, 0xc2, 0x82, 0x8e, 0x14, 0x2d, 0x3a, 0x16, 0x41, 0xc7, 0x3b, 0x30, 0x1d, 0x75, 0x10, + 0xd9, 0x1b, 0xba, 0x0e, 0x7e, 0xa2, 0x9f, 0x9e, 0x84, 0xcd, 0x99, 0x0a, 0x93, 0x45, 0x1f, 0x45, + 0xa4, 0xf2, 0xe5, 0x22, 0x8d, 0x54, 0x3a, 0x4e, 0xdb, 0x5e, 0xb3, 0xe9, 0x69, 0x29, 0x52, 0x89, + 0x24, 0x43, 0xf7, 0xf1, 0x45, 0xf8, 0x11, 0x9d, 0x18, 0x1e, 0x61, 0xf8, 0x91, 0x1e, 0x23, 0xe5, + 0x8e, 0x26, 0x46, 0x8a, 0x2c, 0x20, 0x26, 0x32, 0xdf, 0x9d, 0x48, 0xc1, 0x3c, 0x42, 0xa4, 0x29, + 0x7d, 0xa9, 0x4b, 0x30, 0x2a, 0x50, 0x39, 0x36, 0x9b, 0xa7, 0xc0, 0x8b, 0xd6, 0x6c, 0x69, 0xe3, + 0x75, 0x64, 0xc0, 0x8d, 0x57, 0x76, 0x0e, 0x51, 0x18, 0xec, 0x1c, 0xe2, 0x2e, 0x4c, 0x6d, 0x23, + 0xd3, 0x0f, 0x37, 0x91, 0x19, 0x1a, 0x36, 0x0a, 0x4d, 0xa7, 0x1e, 0x10, 0x4f, 0xa2, 0x1f, 0x1c, + 0x93, 0xa2, 0xe9, 0x2d, 0xda, 0x32, 0x25, 0x9a, 0x80, 0x83, 0x45, 0x13, 0x08, 0x2a, 0x56, 0xcb, + 0xf7, 0xf1, 0x12, 0xc0, 0x02, 0x3c, 0x23, 0x81, 0x78, 0xb4, 0x4f, 0xc4, 0x8b, 0x0c, 0xcf, 0x4d, + 0x8a, 0x66, 0xa3, 0x6b, 0xd0, 0x52, 0x3a, 0x48, 0xd0, 0x22, 0xc5, 0xa4, 0x63, 0xf1, 0x98, 0xf4, + 0x6d, 0x98, 0xe7, 0x54, 0x1b, 0xa1, 0x67, 0x58, 0x75, 0x2f, 0x40, 0xe2, 0xdc, 0x64, 0xbc, 0xbf, + 0x4d, 0xe3, 0x39, 0x8e, 0xe1, 0xbe, 0xb7, 0x82, 0xdb, 0xf3, 0xa3, 0x93, 0xfb, 0x30, 0x47, 0xb7, + 0xa0, 0xdb, 0x10, 0x4f, 0xf4, 0x79, 0x20, 0x43, 0x9a, 0x27, 0xb0, 0xbe, 0x2e, 0xeb, 0x03, 0x47, + 0x38, 0xd9, 0x1f, 0xc2, 0x48, 0x1d, 0x38, 0xb6, 0xe4, 0xe1, 0xc9, 0xd4, 0xc1, 0x0e, 0x4f, 0xb4, + 0xef, 0x28, 0xa0, 0xe9, 0xc8, 0xf2, 0x7c, 0x5b, 0x36, 0x4c, 0xab, 0xbc, 0xbb, 0x3e, 0x57, 0xfa, + 0x1b, 0x30, 0xc2, 0x35, 0x3c, 0xd3, 0xa7, 0x86, 0xf3, 0x06, 0x5d, 0x57, 0x81, 0xd8, 0x22, 0x94, + 0x4b, 0x2c, 0x42, 0xda, 0x3a, 0x8e, 0xe4, 0xba, 0x90, 0xce, 0xec, 0xea, 0x65, 0x98, 0xb4, 0x4c, + 0xd7, 0x42, 0x75, 0x83, 0x9d, 0x90, 0x20, 0xba, 0xdd, 0x5f, 0xd0, 0x27, 0x68, 0xb9, 0xce, 0x8b, + 0xb5, 0x7f, 0x57, 0xe0, 0x42, 0x17, 0x94, 0xcb, 0xfb, 0x6b, 0xf6, 0x11, 0x9d, 0xb6, 0x46, 0x27, + 0x0f, 0x59, 0xf9, 0xe4, 0x21, 0x61, 0xc5, 0x72, 0x6d, 0x56, 0x4c, 0xe2, 0x74, 0xfe, 0x30, 0x9c, + 0x1e, 0x8e, 0x73, 0x5a, 0xbb, 0x0f, 0x17, 0x7b, 0x0e, 0x7c, 0x70, 0x7e, 0xfe, 0x66, 0x46, 0x04, + 0xdb, 0x32, 0xde, 0x41, 0x83, 0xed, 0x4f, 0xc2, 0x30, 0x3b, 0x64, 0xea, 0x57, 0xbb, 0x18, 0xfc, + 0xc1, 0x95, 0x2b, 0xc5, 0x31, 0xcb, 0x1f, 0xd6, 0x31, 0xbb, 0x20, 0x82, 0x8f, 0x0e, 0xcc, 0x60, + 0xbe, 0xd9, 0xbf, 0x29, 0x58, 0x18, 0x9d, 0x01, 0x8f, 0x81, 0x1a, 0x46, 0x12, 0xc9, 0x1f, 0x42, + 0x22, 0x49, 0x25, 0xbc, 0x02, 0x97, 0x7a, 0x8f, 0x9b, 0x31, 0xe9, 0x49, 0x46, 0x04, 0x28, 0x32, + 0xf0, 0x40, 0x01, 0x8a, 0x14, 0x5a, 0x64, 0x0e, 0x13, 0x5a, 0x0c, 0xa6, 0x59, 0xfc, 0x08, 0xb5, + 0xdd, 0x3b, 0xc8, 0x0f, 0x72, 0x84, 0xba, 0x9a, 0xf4, 0x10, 0xda, 0x35, 0x76, 0xf8, 0xb0, 0x1a, + 0x6b, 0x8a, 0x50, 0x22, 0x8d, 0xc7, 0xcc, 0x1e, 0x7c, 0x1a, 0x0a, 0x8c, 0x29, 0x3c, 0xbb, 0xb1, + 0x37, 0x1b, 0x45, 0x0b, 0xed, 0x7b, 0x99, 0xd4, 0x59, 0x41, 0xfb, 0x38, 0x1e, 0x06, 0x97, 0xeb, + 0x48, 0xfe, 0x30, 0x3a, 0x92, 0xd0, 0xf5, 0x2e, 0x5a, 0x30, 0x72, 0x18, 0x2d, 0xd0, 0x10, 0x9c, + 0xef, 0xc1, 0xce, 0x23, 0x11, 0xdb, 0x6f, 0x65, 0xb0, 0xdf, 0xd0, 0x3e, 0x57, 0xc9, 0x02, 0x30, + 0xc8, 0x04, 0xfc, 0xc5, 0xfb, 0x0d, 0x1f, 0x86, 0x69, 0x3f, 0x9f, 0xbe, 0xce, 0x09, 0x6e, 0x30, + 0xa3, 0x45, 0xfd, 0x8b, 0x8e, 0x70, 0xbf, 0xc4, 0xfe, 0xc5, 0xe5, 0xf4, 0x25, 0x2d, 0x36, 0x70, + 0xc6, 0xa4, 0x0f, 0x32, 0x58, 0x85, 0x09, 0x17, 0x68, 0xfd, 0x01, 0x33, 0xde, 0x3e, 0xbc, 0xa3, + 0xbd, 0x6e, 0xaa, 0x17, 0xcf, 0xad, 0xca, 0x25, 0x73, 0xab, 0x9e, 0x85, 0xb9, 0x2d, 0xc7, 0x0f, + 0x42, 0xf9, 0xee, 0x07, 0x95, 0x13, 0xdd, 0x7c, 0x9a, 0x26, 0xb5, 0x51, 0xef, 0x44, 0x6a, 0x73, + 0x78, 0xb5, 0x35, 0x03, 0x66, 0xd1, 0x8b, 0x3a, 0xfb, 0xd2, 0x2e, 0x61, 0x6d, 0xea, 0xce, 0x28, + 0xc6, 0xd3, 0xbf, 0xc8, 0x42, 0x65, 0xc3, 0xa9, 0xb9, 0xe6, 0xf1, 0x63, 0xe6, 0x12, 0x8c, 0x06, + 0x84, 0x32, 0x7a, 0x8b, 0x84, 0xdd, 0x98, 0xa1, 0x45, 0xe4, 0xfa, 0x88, 0x08, 0xc0, 0x73, 0x83, + 0x05, 0xe0, 0xb2, 0x94, 0xf2, 0x5d, 0xa5, 0x34, 0x9c, 0x94, 0x52, 0x19, 0x46, 0x2c, 0xcf, 0x0d, + 0x7d, 0xaf, 0xce, 0xce, 0x26, 0xf8, 0xa7, 0xb4, 0x8b, 0x50, 0x18, 0x68, 0x17, 0xe1, 0x25, 0x58, + 0x24, 0x09, 0x27, 0x35, 0xe4, 0x22, 0xdf, 0x0c, 0x51, 0xe2, 0xd8, 0x84, 0xde, 0x45, 0x29, 0x63, + 0x90, 0xdb, 0x0c, 0x22, 0x96, 0x21, 0x74, 0x06, 0x96, 0x3a, 0x8a, 0x8f, 0x89, 0xf8, 0xab, 0x00, + 0x17, 0x19, 0x8c, 0x13, 0x6e, 0x7f, 0x9c, 0x2a, 0xfa, 0x71, 0xaa, 0xe8, 0x2f, 0x4f, 0xaa, 0x68, + 0xc2, 0x46, 0x94, 0xda, 0x6c, 0xc4, 0x0a, 0x94, 0x18, 0x00, 0x55, 0x84, 0xb1, 0x3e, 0x15, 0x81, + 0xa1, 0x5d, 0x23, 0xea, 0x20, 0xcd, 0xfa, 0xf1, 0xf8, 0xac, 0x4f, 0xee, 0xb6, 0x4c, 0x1c, 0x55, + 0xaa, 0xea, 0x64, 0x97, 0x54, 0xd5, 0xa9, 0xc3, 0xa5, 0xaa, 0xaa, 0x47, 0x98, 0xaa, 0x3a, 0x3d, + 0x90, 0xad, 0x3b, 0xfa, 0x5c, 0xce, 0x5e, 0xe6, 0x73, 0xb6, 0x87, 0xf9, 0xbc, 0x09, 0x97, 0x7a, + 0x9b, 0xc6, 0xae, 0x39, 0xa1, 0xda, 0x3f, 0x66, 0xe0, 0x94, 0x8e, 0x02, 0x14, 0x1e, 0xbb, 0x05, + 0x34, 0xf2, 0x0e, 0xb2, 0xb2, 0x77, 0xa0, 0xbe, 0x22, 0x25, 0xba, 0x10, 0x5b, 0xb1, 0xe5, 0xb8, + 0x4e, 0xb0, 0x9d, 0xcc, 0x1c, 0x9a, 0x97, 0x4d, 0xc2, 0x6b, 0x04, 0x84, 0xa7, 0x04, 0xc5, 0x27, + 0x7f, 0x3e, 0x39, 0xf9, 0x1f, 0x80, 0x4a, 0x0f, 0x16, 0x7d, 0x64, 0x36, 0x9b, 0xf5, 0x7d, 0x39, + 0x4f, 0xf3, 0x62, 0x87, 0x69, 0x4f, 0x58, 0xa8, 0x53, 0x78, 0xb2, 0x1c, 0x4c, 0xfa, 0x89, 0x12, + 0xed, 0x05, 0xa8, 0x74, 0x62, 0x74, 0x77, 0x11, 0xfd, 0x65, 0x06, 0xce, 0xdc, 0x47, 0x7e, 0xc3, + 0x71, 0x25, 0xf9, 0x1f, 0x7b, 0x31, 0x49, 0x1e, 0x77, 0xee, 0x30, 0x1e, 0x77, 0xd2, 0xc5, 0xe9, + 0xec, 0x69, 0x0e, 0x77, 0xf4, 0x34, 0xb5, 0x73, 0xa0, 0x75, 0xe3, 0x20, 0x73, 0x35, 0xbe, 0xa2, + 0x40, 0xe5, 0x16, 0xb9, 0xc0, 0x74, 0xdc, 0xb8, 0x8c, 0x1d, 0xa5, 0x8e, 0x94, 0x31, 0xea, 0xff, + 0x30, 0x0b, 0x67, 0x5e, 0x77, 0x82, 0xf0, 0xcd, 0x26, 0x72, 0xdb, 0xa0, 0x82, 0xfe, 0x06, 0x90, + 0x9a, 0x9a, 0x9a, 0xe9, 0x3b, 0x35, 0x35, 0x35, 0x3f, 0x77, 0x03, 0xa6, 0xd8, 0xb1, 0x83, 0xd3, + 0x10, 0x57, 0x53, 0xa9, 0x4a, 0x24, 0x66, 0x13, 0xad, 0xa3, 0x59, 0x3f, 0xa6, 0x4f, 0xce, 0x04, + 0xf8, 0xbd, 0xd4, 0x20, 0x5e, 0xa0, 0xbe, 0x03, 0x93, 0x91, 0xfc, 0x19, 0x4e, 0xea, 0x25, 0x5d, + 0xed, 0x84, 0xb3, 0x8d, 0x27, 0x14, 0xd5, 0xea, 0x90, 0x3e, 0x81, 0xe2, 0x45, 0xea, 0x5d, 0x18, + 0xc5, 0x73, 0x9e, 0x23, 0x4e, 0xdd, 0x6f, 0x6a, 0x47, 0x8c, 0x67, 0xb9, 0xc0, 0x09, 0xa1, 0xf8, + 0x5a, 0x2e, 0xc2, 0x08, 0x85, 0x0e, 0xb4, 0xdf, 0x53, 0x40, 0xeb, 0x26, 0x24, 0x71, 0xdb, 0x0d, + 0x04, 0x4d, 0x7c, 0x1b, 0xe3, 0x6a, 0x7a, 0xfa, 0x44, 0xea, 0xd0, 0xe8, 0x35, 0xc4, 0x08, 0x45, + 0xdf, 0x17, 0x6a, 0x7f, 0x9e, 0x85, 0xb3, 0x98, 0x3e, 0x72, 0xc4, 0x63, 0x7f, 0xac, 0x46, 0xc7, + 0x48, 0x8d, 0xd4, 0x3b, 0x30, 0x16, 0x90, 0xfb, 0xae, 0x1c, 0xe1, 0x48, 0x5a, 0x58, 0x11, 0x1b, + 0x7d, 0xd8, 0x0a, 0x04, 0xaa, 0x52, 0x20, 0x7d, 0xcb, 0x3a, 0xf9, 0xfb, 0x0a, 0x9c, 0xeb, 0x2e, + 0xf3, 0x8f, 0x5a, 0x2b, 0xbf, 0xa2, 0xc0, 0x29, 0x4c, 0xe1, 0x41, 0xf5, 0x31, 0x76, 0xc3, 0x3e, + 0xd3, 0xfb, 0x86, 0x7d, 0xaa, 0x02, 0xce, 0xf0, 0xe4, 0x5c, 0xba, 0xf9, 0x41, 0x3f, 0xb4, 0xaf, + 0x2a, 0x50, 0xe9, 0x44, 0xda, 0x47, 0xcd, 0xb6, 0x6f, 0x28, 0x70, 0x1e, 0xd3, 0x76, 0x93, 0x5d, + 0x87, 0x38, 0x86, 0xec, 0xfb, 0x03, 0x05, 0x2e, 0xf4, 0x22, 0xf1, 0x38, 0x68, 0xdf, 0x86, 0x65, + 0xba, 0xc7, 0x54, 0xfb, 0x3a, 0x91, 0xf6, 0x51, 0xb3, 0xed, 0x3e, 0x54, 0x48, 0xf6, 0xfc, 0x41, + 0xd9, 0x26, 0x46, 0x9c, 0x91, 0x47, 0xfc, 0x7e, 0x06, 0x96, 0x3a, 0xa2, 0x65, 0x43, 0x9e, 0x81, + 0x3c, 0x49, 0xe8, 0x27, 0x38, 0xb3, 0x3a, 0xfd, 0x50, 0x1d, 0x18, 0xae, 0xf9, 0x5e, 0xab, 0xc9, + 0xd3, 0x11, 0xef, 0xf5, 0x4c, 0x47, 0xec, 0xd1, 0x4f, 0xf5, 0x66, 0xad, 0xe6, 0xa3, 0x1a, 0x89, + 0xf1, 0x6e, 0x63, 0xcc, 0x3a, 0xeb, 0x60, 0xa1, 0x0e, 0x93, 0xc9, 0x3a, 0x75, 0x19, 0x4a, 0xa4, + 0xd6, 0x20, 0x69, 0x9b, 0x5c, 0x12, 0x4b, 0x3d, 0x9c, 0x62, 0x7d, 0x94, 0x34, 0x7a, 0x48, 0xda, + 0x44, 0x03, 0xcb, 0x48, 0x03, 0xd3, 0x4e, 0xc2, 0xc2, 0x6d, 0x14, 0xb6, 0x05, 0xbe, 0x94, 0xc9, + 0xda, 0xdf, 0x29, 0xb0, 0x98, 0x5a, 0x2d, 0x9e, 0xf1, 0xc8, 0xed, 0xa0, 0x7d, 0x4e, 0xcf, 0x6b, + 0x3d, 0x99, 0xd2, 0x05, 0x57, 0xf5, 0x0e, 0xda, 0x67, 0x59, 0xc7, 0x04, 0xe7, 0xc2, 0xbb, 0x50, + 0x14, 0x45, 0x29, 0xc9, 0xaf, 0x2f, 0xc9, 0xc9, 0xaf, 0x9d, 0x63, 0xab, 0x35, 0xd7, 0x46, 0x8f, + 0x91, 0x4d, 0x78, 0x40, 0x62, 0x2b, 0x29, 0xf3, 0xf5, 0xeb, 0xd1, 0x71, 0x29, 0xc9, 0x8d, 0x3d, + 0xc8, 0x31, 0xfc, 0x5d, 0x18, 0xe7, 0xef, 0x36, 0xd9, 0x34, 0xd6, 0xa3, 0xf4, 0x5c, 0xe8, 0x40, + 0x8f, 0x94, 0x84, 0x4b, 0xc8, 0x19, 0x13, 0xad, 0xd9, 0xd6, 0x5f, 0x49, 0xce, 0xef, 0x4e, 0xcf, + 0xee, 0x4c, 0xdb, 0xb3, 0x91, 0x12, 0xb5, 0xd5, 0xb3, 0x30, 0x86, 0x7c, 0xdf, 0xf3, 0x0d, 0x96, + 0x51, 0xc9, 0xa6, 0x7d, 0x89, 0x14, 0xb2, 0x74, 0xcb, 0xee, 0x19, 0x9c, 0x9f, 0xcd, 0x15, 0xf2, + 0x93, 0xc3, 0x52, 0xd6, 0x64, 0x1a, 0x7f, 0x58, 0xe8, 0xf0, 0x2b, 0x0a, 0x2c, 0x92, 0xd8, 0x94, + 0x66, 0xdb, 0x0f, 0x98, 0x34, 0x79, 0x54, 0x17, 0xe3, 0xb4, 0x0a, 0x9c, 0x4c, 0xa7, 0x82, 0x91, + 0xf9, 0xb5, 0x0c, 0xcc, 0x90, 0x51, 0x70, 0x2c, 0xbf, 0xe0, 0x8b, 0x7b, 0xe2, 0x7a, 0x4e, 0x76, + 0xf0, 0xeb, 0x39, 0x26, 0xcc, 0x71, 0xad, 0x78, 0x0f, 0x59, 0xa1, 0x61, 0x79, 0xae, 0xed, 0x88, + 0x84, 0xc3, 0xf1, 0xe4, 0x7b, 0x27, 0x49, 0x65, 0xc3, 0x6d, 0x56, 0x78, 0x13, 0x7d, 0xe6, 0x51, + 0x4a, 0xa9, 0xf6, 0xc7, 0x0a, 0xcc, 0x26, 0xd8, 0xc3, 0xe6, 0x78, 0x52, 0x25, 0x95, 0x83, 0xa8, + 0xe4, 0x1d, 0x18, 0x97, 0x47, 0x80, 0xec, 0x1e, 0x57, 0x55, 0x24, 0xca, 0x91, 0xad, 0x8f, 0x3d, + 0x92, 0x3f, 0xb5, 0x5f, 0x57, 0xe0, 0x34, 0x7f, 0x6e, 0xe7, 0x80, 0xc1, 0xf6, 0x91, 0xa9, 0xdd, + 0x6f, 0xe7, 0xe0, 0x4c, 0x17, 0x5a, 0x18, 0x0f, 0x63, 0xa1, 0x02, 0x7b, 0xe9, 0x43, 0x49, 0x4b, + 0xa8, 0xed, 0xba, 0x9a, 0xb2, 0x37, 0x3e, 0xa2, 0x50, 0x81, 0xbd, 0xf0, 0x51, 0x83, 0x13, 0x29, + 0x5b, 0xf6, 0xd2, 0x1b, 0x34, 0x03, 0x2f, 0xd9, 0xb3, 0x7b, 0x69, 0xc5, 0xea, 0x3b, 0xa0, 0x36, + 0x91, 0x6b, 0x3b, 0x6e, 0xcd, 0x60, 0xc7, 0xa3, 0x0e, 0x0a, 0xca, 0x59, 0x62, 0xfc, 0x9f, 0xee, + 0xdc, 0xc7, 0x3a, 0x6d, 0xc3, 0x0f, 0x39, 0x49, 0x0f, 0x53, 0xcd, 0x58, 0xa1, 0x83, 0x02, 0xf5, + 0x0b, 0x30, 0xc9, 0xb1, 0x5b, 0xdb, 0x4e, 0xdd, 0xf6, 0xc9, 0x7d, 0x55, 0x8c, 0xfb, 0xd9, 0x9e, + 0xb8, 0x57, 0x70, 0x83, 0xf8, 0x18, 0x26, 0x9a, 0x52, 0x95, 0x8f, 0x5c, 0x15, 0xc1, 0x2c, 0xc7, + 0xdf, 0x7e, 0xbd, 0xa7, 0xab, 0x24, 0x58, 0x27, 0xf2, 0xf6, 0x29, 0xe9, 0x62, 0xba, 0xd9, 0x5e, + 0xa1, 0x7d, 0x29, 0x13, 0xbd, 0x2b, 0xf5, 0x51, 0x64, 0x90, 0xbf, 0x0e, 0x13, 0x52, 0x96, 0xb8, + 0x38, 0x8a, 0xea, 0xfc, 0x22, 0x9a, 0xc0, 0x42, 0x17, 0xa4, 0x50, 0xfe, 0x54, 0x5f, 0x84, 0x79, + 0xc7, 0xb5, 0xea, 0x2d, 0x1b, 0x49, 0xd7, 0x01, 0x0d, 0x1a, 0x0d, 0xb2, 0x1b, 0xd3, 0x73, 0x0c, + 0x40, 0xe0, 0xa1, 0xb1, 0xa3, 0xf6, 0xa7, 0x4a, 0xf4, 0x2a, 0x56, 0x7b, 0x66, 0xf7, 0xcb, 0x30, + 0xd2, 0xf4, 0xea, 0x75, 0xe4, 0x73, 0xef, 0xe1, 0x7c, 0x97, 0x81, 0xae, 0x13, 0x48, 0xc2, 0x73, + 0xde, 0x4a, 0x7d, 0x08, 0x53, 0xed, 0x14, 0xa5, 0xde, 0x84, 0x49, 0xe7, 0x19, 0x7f, 0xfe, 0x29, + 0x4c, 0x90, 0x7d, 0x02, 0x66, 0x6f, 0xa3, 0x90, 0xbd, 0xd5, 0x47, 0xba, 0x64, 0xce, 0xd0, 0xef, + 0xe4, 0x60, 0x2e, 0x59, 0xc3, 0x06, 0xf3, 0x45, 0x98, 0x0a, 0x5a, 0xcd, 0xa6, 0x47, 0x52, 0x7b, + 0xad, 0xba, 0x83, 0xdc, 0x90, 0x0f, 0xeb, 0x6e, 0x3f, 0x4e, 0x51, 0x0a, 0xce, 0xea, 0x06, 0x47, + 0xb8, 0x42, 0xf1, 0x51, 0xdf, 0x68, 0x32, 0x48, 0x14, 0xd3, 0x97, 0x74, 0x7c, 0xf9, 0x75, 0x1f, + 0xf1, 0x92, 0x8e, 0x2f, 0xbd, 0xed, 0x73, 0x0a, 0x80, 0xbf, 0x4c, 0x28, 0x12, 0x20, 0x8a, 0xac, + 0x64, 0xcd, 0x56, 0x6f, 0x43, 0x89, 0xa7, 0xfd, 0x13, 0xc3, 0x91, 0x7a, 0xea, 0xc8, 0x20, 0x30, + 0xd1, 0x0c, 0x2d, 0xa1, 0x78, 0x74, 0x37, 0xfa, 0x50, 0xcf, 0x40, 0x29, 0xf6, 0x02, 0x22, 0xdd, + 0x82, 0x1d, 0xb5, 0xa4, 0xb7, 0x0f, 0xab, 0x30, 0xcd, 0xaf, 0xdd, 0x04, 0xdb, 0xa6, 0x6f, 0xd3, + 0x1b, 0xb4, 0xec, 0x86, 0xed, 0x14, 0xab, 0xda, 0xc0, 0x35, 0xc4, 0x89, 0x26, 0xef, 0x40, 0xe1, + 0x1e, 0x82, 0x10, 0xb9, 0x16, 0x16, 0xb5, 0xe7, 0x23, 0x76, 0x06, 0x3d, 0x29, 0x55, 0x6c, 0xe0, + 0x72, 0xf5, 0x32, 0x4c, 0x4a, 0x0f, 0xf7, 0x51, 0x58, 0xfa, 0x02, 0xe2, 0x44, 0x54, 0x4e, 0x40, + 0x17, 0x56, 0x60, 0x36, 0x95, 0xc9, 0x03, 0xbd, 0xdb, 0x37, 0x07, 0x33, 0xd8, 0xab, 0xdd, 0x0f, + 0x42, 0xd4, 0x90, 0xb5, 0xe5, 0x27, 0x79, 0xa2, 0x47, 0x72, 0x05, 0x53, 0x96, 0x76, 0x81, 0x29, + 0x69, 0x02, 0xab, 0x41, 0x29, 0x76, 0x01, 0x83, 0xaa, 0xf6, 0x4a, 0x5f, 0x3e, 0x76, 0x5b, 0xa7, + 0xd5, 0xd8, 0x95, 0x8c, 0x18, 0xe2, 0x85, 0x3f, 0xcf, 0x41, 0x49, 0xae, 0x56, 0x9f, 0x83, 0x13, + 0xec, 0xe0, 0xd0, 0x74, 0x6d, 0x83, 0xae, 0xdb, 0xec, 0xd0, 0x8b, 0xe6, 0xb4, 0xce, 0xd0, 0xea, + 0x9b, 0xcc, 0x2f, 0xa4, 0x47, 0x5d, 0xea, 0x2d, 0xa8, 0x38, 0x6e, 0x88, 0x7c, 0xdc, 0x90, 0xfa, + 0x9f, 0xb6, 0xb3, 0xb5, 0x85, 0x7c, 0xe4, 0x86, 0x8e, 0x29, 0x16, 0xda, 0x82, 0x7e, 0x92, 0x43, + 0xbd, 0x8a, 0x81, 0x6e, 0xc5, 0x61, 0xd4, 0x3b, 0xa0, 0x89, 0x6c, 0x1c, 0x96, 0x59, 0x65, 0x70, + 0x0b, 0x24, 0xf2, 0xc2, 0x88, 0xfe, 0x16, 0xf4, 0x25, 0x0e, 0xc9, 0x72, 0xb1, 0xd6, 0x28, 0x9c, + 0xc8, 0xff, 0x52, 0x9f, 0x06, 0x95, 0xcd, 0x97, 0x40, 0x9c, 0x2f, 0x72, 0xb3, 0xc5, 0x67, 0x6c, + 0xc0, 0x0f, 0x19, 0x03, 0xf5, 0xd3, 0xb0, 0x80, 0x5c, 0xcb, 0xb3, 0xa3, 0x57, 0x61, 0xe4, 0xc3, + 0x43, 0x7a, 0x0b, 0xb4, 0xcc, 0x20, 0x58, 0x97, 0xd2, 0xd1, 0xe0, 0x8b, 0x30, 0xbf, 0xd9, 0x72, + 0xea, 0xb6, 0xe1, 0xd8, 0xc6, 0xa6, 0x19, 0x20, 0x9b, 0xcb, 0xd7, 0x71, 0x6b, 0xec, 0x0e, 0xe8, + 0x1c, 0x01, 0x58, 0xb3, 0x97, 0x71, 0xf5, 0x43, 0x51, 0xab, 0x2e, 0xc1, 0x68, 0xab, 0x19, 0x20, + 0x3f, 0x34, 0xc8, 0x29, 0x27, 0x7d, 0x78, 0x03, 0x68, 0xd1, 0x5d, 0xd4, 0xf0, 0xd4, 0x67, 0x60, + 0x26, 0xf1, 0xfa, 0x0a, 0xd9, 0x8d, 0x24, 0x9a, 0x5d, 0xd0, 0xd5, 0xd8, 0xf3, 0x29, 0x64, 0xe3, + 0x12, 0xcf, 0xc3, 0xd8, 0x8d, 0x47, 0x9a, 0x4d, 0x11, 0xbb, 0xbd, 0x78, 0x0b, 0x96, 0xe8, 0xdd, + 0x75, 0x1a, 0x5b, 0x6e, 0xee, 0x4b, 0xde, 0x03, 0xb3, 0xa7, 0x40, 0x5a, 0x2d, 0x12, 0x30, 0x12, + 0x8a, 0x2e, 0x47, 0xd7, 0x69, 0x99, 0xbd, 0xfc, 0xff, 0x6c, 0x13, 0x4b, 0x18, 0xd6, 0x75, 0xd3, + 0x0f, 0x9d, 0x01, 0x62, 0xf5, 0xa3, 0x58, 0xf5, 0xb4, 0xaf, 0x66, 0x60, 0xa9, 0x23, 0x15, 0xc2, + 0x4a, 0x57, 0x62, 0x77, 0x0b, 0xd9, 0xd2, 0xd1, 0x14, 0x90, 0xcc, 0x64, 0x3f, 0xd7, 0x4f, 0xe7, + 0x02, 0xbf, 0xb8, 0x53, 0xb5, 0x68, 0x26, 0xaf, 0x31, 0x45, 0x34, 0xe0, 0xbe, 0xe3, 0x07, 0x8f, + 0x6d, 0x7d, 0x67, 0x0e, 0xd5, 0xf7, 0x5e, 0xf2, 0x7e, 0x65, 0xd4, 0xb7, 0xf6, 0xcd, 0x2c, 0xcc, + 0xd2, 0x7b, 0xc7, 0x5c, 0xd5, 0xfb, 0xce, 0x79, 0x11, 0xf7, 0x49, 0xa2, 0x9c, 0x17, 0x5e, 0xb4, + 0x66, 0xab, 0x9f, 0x81, 0x82, 0x38, 0xb5, 0xa7, 0x61, 0x8d, 0x96, 0xb8, 0x51, 0xcb, 0x6a, 0xc9, + 0x8e, 0x32, 0xef, 0x5b, 0xb4, 0x51, 0x5f, 0x87, 0x31, 0xc7, 0x75, 0x42, 0xc7, 0xac, 0x1b, 0x4d, + 0x33, 0xb4, 0xb6, 0xd3, 0xb7, 0xe4, 0xd3, 0x90, 0xac, 0x63, 0x70, 0xbd, 0xc4, 0x5a, 0x93, 0xaf, + 0xc3, 0xe4, 0x36, 0xf1, 0xf4, 0x82, 0x91, 0xc3, 0xa5, 0x17, 0x14, 0x0e, 0x9b, 0x5e, 0xa0, 0xbd, + 0x0c, 0x73, 0x49, 0x49, 0x45, 0xab, 0x06, 0x0e, 0x1c, 0xea, 0x8e, 0x15, 0xc6, 0xf6, 0x22, 0xc6, + 0x78, 0x29, 0xdd, 0x38, 0xfb, 0x1c, 0x9c, 0xe0, 0x3e, 0xd7, 0xd1, 0x0a, 0x5b, 0xfb, 0x2b, 0xc9, + 0xaf, 0x6d, 0xa3, 0x4e, 0xd6, 0x04, 0xe5, 0x00, 0x9a, 0x70, 0x03, 0x72, 0x52, 0xbc, 0x72, 0xa1, + 0x77, 0x5b, 0xb2, 0xb8, 0x91, 0x36, 0x42, 0x78, 0xd9, 0xc3, 0x09, 0x2f, 0x77, 0xe8, 0xdc, 0x90, + 0x76, 0x11, 0xe5, 0xd3, 0x44, 0xf4, 0x1f, 0x0a, 0xcc, 0xd2, 0x97, 0x2a, 0x8f, 0xd9, 0x74, 0x6c, + 0xa7, 0x3f, 0x97, 0x42, 0xff, 0x21, 0xe6, 0x99, 0x56, 0xe6, 0x2f, 0xab, 0x26, 0x15, 0x48, 0xfb, + 0x1b, 0x05, 0x66, 0xc8, 0x34, 0x3e, 0x62, 0x9e, 0xbc, 0x04, 0x79, 0x6a, 0x5a, 0xb2, 0x83, 0x99, + 0x16, 0xda, 0xaa, 0xeb, 0x65, 0xe9, 0xee, 0x99, 0x20, 0x38, 0x8e, 0x48, 0x0c, 0x88, 0x0d, 0xf5, + 0x87, 0x0a, 0x9c, 0xc6, 0x4b, 0x15, 0xaf, 0xb8, 0x8b, 0xa1, 0x1c, 0xb7, 0x46, 0x6e, 0xfd, 0x1d, + 0xd1, 0xb0, 0x5f, 0x06, 0x88, 0x0e, 0x3c, 0xd9, 0xd8, 0x7b, 0xdf, 0x33, 0x2c, 0x8a, 0x23, 0x4e, + 0xf5, 0x53, 0x50, 0x40, 0x2e, 0xbb, 0xa6, 0x98, 0xeb, 0xb3, 0xf9, 0x08, 0x72, 0xc9, 0x15, 0x45, + 0xcd, 0xa6, 0xc9, 0x04, 0x1d, 0x06, 0x28, 0x02, 0x40, 0x99, 0x44, 0xba, 0xf2, 0x0e, 0x42, 0xa2, + 0xe6, 0xc3, 0x2c, 0x4d, 0x6b, 0x38, 0x62, 0x95, 0xe9, 0x92, 0xc9, 0x8c, 0x15, 0x38, 0xd9, 0x27, + 0x93, 0xea, 0xaf, 0x29, 0x30, 0x23, 0x0f, 0xfa, 0xa3, 0x3b, 0xed, 0xd6, 0x7e, 0x83, 0xbd, 0x16, + 0x2e, 0x91, 0xc2, 0x78, 0xbe, 0x06, 0xc5, 0xc8, 0x0d, 0x56, 0xd2, 0xde, 0x4a, 0x4e, 0x9b, 0x11, + 0x18, 0x17, 0x8d, 0x3e, 0xa3, 0xd6, 0x7d, 0x9f, 0xe4, 0xfc, 0xf3, 0x30, 0x5c, 0xa4, 0x73, 0x9e, + 0xde, 0x18, 0x58, 0xa6, 0x1e, 0xf0, 0x8a, 0xd7, 0x68, 0x9a, 0x21, 0x0b, 0xc7, 0xfa, 0x63, 0xd5, + 0xa9, 0x36, 0x3f, 0xb1, 0x28, 0xef, 0x7b, 0xdc, 0x81, 0xb3, 0xa6, 0x6d, 0x93, 0xe7, 0x57, 0x84, + 0x1b, 0xee, 0xd0, 0xd7, 0x5c, 0x6c, 0xb4, 0x65, 0xb6, 0xea, 0xa1, 0x11, 0x20, 0x1a, 0x39, 0x14, + 0x57, 0x87, 0xf4, 0x93, 0xa6, 0x6d, 0xbf, 0x81, 0xf6, 0x18, 0x39, 0x6b, 0xee, 0x1b, 0x68, 0xef, + 0x16, 0x05, 0xdb, 0x40, 0xa1, 0xfa, 0x0d, 0x05, 0x16, 0x39, 0x36, 0x8b, 0x91, 0x5a, 0x47, 0x02, + 0x31, 0x9b, 0x12, 0xef, 0xf6, 0x0c, 0xc6, 0xfa, 0x1c, 0x79, 0xf5, 0x26, 0x21, 0x66, 0x45, 0xf4, + 0xc4, 0x62, 0x83, 0xd5, 0x21, 0xfd, 0x84, 0x99, 0xa8, 0x62, 0x68, 0xd4, 0xe7, 0xe1, 0x04, 0x7f, + 0x1d, 0x39, 0x40, 0x21, 0xf6, 0xde, 0x05, 0x75, 0x79, 0x36, 0xc6, 0x69, 0x06, 0xb0, 0x81, 0xc2, + 0xe5, 0x7d, 0xde, 0xee, 0x33, 0xb0, 0xc8, 0xdb, 0x09, 0x3e, 0xed, 0x39, 0xe1, 0xb6, 0xe3, 0x12, + 0xfe, 0x0c, 0xb3, 0xb6, 0x1c, 0x39, 0x6b, 0xf6, 0x16, 0x81, 0xc0, 0xac, 0x09, 0x00, 0x1a, 0xc8, + 0xaf, 0x91, 0x5e, 0xf9, 0xc5, 0x1c, 0xfd, 0xc8, 0x18, 0x71, 0x17, 0xa3, 0xde, 0x40, 0x61, 0xb0, + 0x3a, 0xa4, 0x17, 0x1b, 0xfc, 0x63, 0xe1, 0x8f, 0x14, 0x38, 0xd1, 0x81, 0x47, 0xea, 0x69, 0x28, + 0xc9, 0x42, 0x67, 0x8a, 0x03, 0xae, 0x10, 0xae, 0xfa, 0x32, 0x9c, 0x44, 0x8f, 0x9d, 0x20, 0x24, + 0x5b, 0x8b, 0x29, 0xd2, 0xa4, 0xba, 0x34, 0xcf, 0x61, 0xda, 0x79, 0x7d, 0x09, 0x26, 0x1b, 0xe6, + 0x0e, 0x65, 0x34, 0x53, 0x26, 0x16, 0x82, 0x8e, 0xe3, 0xf2, 0x0d, 0x14, 0x32, 0xdd, 0x59, 0x78, + 0x04, 0x45, 0x31, 0x04, 0xf5, 0x2a, 0xcc, 0x34, 0x7d, 0xa7, 0x61, 0xfa, 0xfb, 0x54, 0x44, 0x71, + 0x0a, 0xa7, 0x58, 0x1d, 0x96, 0x0e, 0xeb, 0xe7, 0x59, 0x98, 0x0b, 0x90, 0xe5, 0xb9, 0x76, 0x5b, + 0x13, 0x4a, 0xe2, 0xb4, 0xa8, 0x8d, 0x1a, 0x2d, 0x8f, 0x42, 0x51, 0xfc, 0x75, 0x8f, 0xf6, 0x0a, + 0x5c, 0xea, 0xcd, 0x6a, 0x6a, 0x0d, 0x3e, 0x9b, 0x2b, 0x28, 0x93, 0x19, 0x7d, 0x5c, 0x3c, 0xd7, + 0x83, 0xf0, 0x42, 0xa6, 0xfd, 0x1f, 0x38, 0xc7, 0x1e, 0x03, 0xfc, 0x10, 0x27, 0xeb, 0x3c, 0x14, + 0x1a, 0xe6, 0x63, 0xaa, 0x42, 0xf4, 0xf5, 0xc2, 0x91, 0x86, 0xf9, 0x18, 0x33, 0x4d, 0xfb, 0x92, + 0x02, 0xe7, 0x7b, 0x10, 0xc0, 0xac, 0xd9, 0xe7, 0x41, 0x6d, 0x98, 0xef, 0x79, 0xd2, 0x83, 0x43, + 0x28, 0xec, 0x70, 0x4a, 0x1d, 0x8b, 0xa3, 0xda, 0x54, 0x68, 0x03, 0x85, 0xfa, 0x24, 0x41, 0x15, + 0x15, 0x04, 0xda, 0x77, 0x15, 0x38, 0x2d, 0x08, 0xc1, 0x71, 0x95, 0x8e, 0x4c, 0x6b, 0xdb, 0x1c, + 0x84, 0x0b, 0x8b, 0x50, 0xe4, 0x12, 0xa4, 0x01, 0x5e, 0x51, 0x2f, 0xb0, 0x2d, 0x80, 0x00, 0x2f, + 0x44, 0x11, 0x8b, 0xe8, 0x36, 0x7a, 0x51, 0x07, 0xc1, 0xa3, 0x40, 0xbd, 0x03, 0x25, 0x5f, 0xea, + 0x92, 0x1d, 0xf6, 0x5c, 0xec, 0xb2, 0x8d, 0x1b, 0xa3, 0x30, 0xd6, 0x58, 0xfb, 0x55, 0x05, 0xce, + 0x74, 0x19, 0x0d, 0x63, 0xe9, 0x26, 0xcc, 0x0a, 0xa3, 0x10, 0xeb, 0x9b, 0x72, 0xb5, 0xda, 0x85, + 0xab, 0x4c, 0x54, 0x31, 0xb4, 0xd3, 0x9b, 0xed, 0x85, 0xda, 0xf7, 0x32, 0x50, 0x89, 0x74, 0xf4, + 0x58, 0xe5, 0xa3, 0x76, 0xce, 0x0f, 0xcd, 0x76, 0xbe, 0x89, 0xb4, 0x02, 0xa3, 0xe4, 0x1d, 0x57, + 0x96, 0x07, 0x9f, 0x4b, 0xf3, 0xbc, 0xe9, 0xa3, 0xfe, 0x84, 0x0e, 0xd3, 0x09, 0x59, 0x1a, 0x3c, + 0xec, 0x89, 0xdf, 0xea, 0x8b, 0x30, 0xc2, 0x5c, 0x47, 0x76, 0x1c, 0xb1, 0xd4, 0x09, 0x01, 0xe3, + 0x91, 0xce, 0xe1, 0xb5, 0xaf, 0x2b, 0xb0, 0xd4, 0x91, 0x9f, 0x4c, 0xae, 0xaf, 0x00, 0xfb, 0x5f, + 0x01, 0xc3, 0x47, 0x5b, 0x2c, 0x42, 0x3b, 0xd3, 0xa9, 0x07, 0x8a, 0x4c, 0x47, 0x5b, 0x7a, 0xb1, + 0xc5, 0x7f, 0x62, 0x02, 0xbd, 0x56, 0x68, 0x79, 0x0d, 0xbe, 0x45, 0xd3, 0x91, 0xc0, 0x37, 0x29, + 0x98, 0xce, 0xe1, 0xb5, 0xbf, 0xcd, 0xc3, 0x02, 0xd9, 0x94, 0x5a, 0xc6, 0x8e, 0xe0, 0x9b, 0xdc, + 0x56, 0xf5, 0x27, 0xec, 0xf8, 0x7e, 0xae, 0x9c, 0xd4, 0x21, 0xed, 0xe7, 0x92, 0x6d, 0x48, 0x75, + 0x16, 0x86, 0xdf, 0xf3, 0x36, 0xa5, 0xfb, 0x7d, 0xef, 0x79, 0x9b, 0xb1, 0x9b, 0x62, 0xb9, 0x58, + 0x92, 0xf1, 0x5a, 0x2c, 0xb9, 0x85, 0xfe, 0x0f, 0xd0, 0x00, 0xea, 0x23, 0xa7, 0xb5, 0xd4, 0x60, + 0x36, 0x64, 0x29, 0xc2, 0x58, 0x67, 0x84, 0x39, 0x66, 0xef, 0x91, 0x3c, 0x13, 0xc7, 0x4a, 0xfe, + 0x19, 0x8a, 0x4c, 0x99, 0x18, 0x3f, 0xee, 0x47, 0x18, 0x56, 0x87, 0xf4, 0x19, 0x09, 0xa1, 0x00, + 0x51, 0x3f, 0x07, 0x93, 0x6c, 0x6b, 0x36, 0xea, 0x83, 0x3e, 0x4d, 0xf2, 0x54, 0x5f, 0x7d, 0xd0, + 0x8c, 0xff, 0xd5, 0x21, 0x7d, 0x82, 0xa2, 0x89, 0x30, 0xbf, 0x07, 0x73, 0xf4, 0xa5, 0x82, 0x7a, + 0x72, 0x0c, 0xa5, 0xb4, 0xe3, 0xb1, 0x0e, 0xf8, 0x57, 0x24, 0x14, 0xab, 0x43, 0xfa, 0xac, 0x8c, + 0x32, 0xea, 0xeb, 0x0b, 0xa0, 0x92, 0xff, 0x7a, 0x88, 0xf7, 0x43, 0xef, 0xa7, 0x3c, 0xdd, 0x57, + 0x3f, 0xb7, 0x58, 0xf3, 0xd5, 0x21, 0x7d, 0x8a, 0xa3, 0x8a, 0xf0, 0x3f, 0x80, 0x09, 0x9a, 0x84, + 0x1f, 0x21, 0x1f, 0x4f, 0x3b, 0x18, 0xea, 0x80, 0x9c, 0xe4, 0x11, 0xac, 0x0e, 0xe9, 0xe3, 0x04, + 0x89, 0x28, 0x8e, 0x2f, 0xb4, 0xa7, 0x60, 0x31, 0x55, 0xa7, 0xa3, 0xac, 0x88, 0xf9, 0x8d, 0xd0, + 0x6b, 0x1e, 0x44, 0xe5, 0x23, 0x3d, 0xce, 0xa4, 0xeb, 0x71, 0x3c, 0x59, 0xbe, 0x4b, 0x8c, 0xaa, + 0x9d, 0xc4, 0x33, 0xaf, 0x9d, 0x0a, 0x46, 0xe4, 0x7d, 0x38, 0xc5, 0x77, 0x74, 0x8e, 0x8e, 0x4e, + 0xed, 0x83, 0x1c, 0x54, 0x3a, 0xa1, 0x65, 0xe6, 0x68, 0x1d, 0xc6, 0x05, 0x27, 0xe9, 0x11, 0xa5, + 0x42, 0xd6, 0xb6, 0xcb, 0x1d, 0xd6, 0xb6, 0xc4, 0x44, 0x21, 0xe7, 0x94, 0x9e, 0xfc, 0xd9, 0x89, + 0x67, 0xaf, 0x40, 0x9e, 0xfe, 0x29, 0x14, 0x3d, 0x02, 0xbd, 0xd2, 0x17, 0x7e, 0xfa, 0xd7, 0x50, + 0xb4, 0x61, 0x22, 0x4c, 0xcd, 0x0d, 0x1e, 0x49, 0xbf, 0x0c, 0x10, 0x3d, 0x98, 0xc3, 0x8c, 0x7b, + 0x1f, 0x08, 0x2c, 0xfe, 0x4e, 0x8e, 0x7a, 0x1d, 0x66, 0x43, 0x2f, 0x94, 0xa7, 0xbc, 0x74, 0x60, + 0x96, 0xd5, 0xa7, 0x49, 0x65, 0x34, 0x03, 0xc9, 0x91, 0xd9, 0x27, 0xa1, 0x2c, 0xfe, 0x4e, 0x2e, + 0xd9, 0x8c, 0x3e, 0x52, 0x3b, 0xc7, 0xeb, 0x13, 0x2d, 0x9f, 0x87, 0x13, 0xfc, 0xec, 0x23, 0xd9, + 0xb0, 0x40, 0x1a, 0xce, 0xb2, 0xea, 0x44, 0xbb, 0x6e, 0x17, 0xe6, 0x22, 0xcd, 0x85, 0xd8, 0x5d, + 0xdd, 0xff, 0x0b, 0x0b, 0x38, 0xb6, 0x8c, 0x73, 0xff, 0x17, 0x98, 0x18, 0xa9, 0xfd, 0xae, 0x02, + 0x8b, 0xa9, 0x14, 0xa4, 0xe9, 0x29, 0xfb, 0xe3, 0x98, 0x94, 0x65, 0xa2, 0x83, 0x1d, 0xa1, 0xff, + 0x1c, 0xe3, 0xc9, 0x9f, 0x7d, 0x87, 0xcd, 0xff, 0xa0, 0x80, 0x26, 0xbf, 0x9b, 0x28, 0xd6, 0x1d, + 0xbe, 0x38, 0xf7, 0xc3, 0xa3, 0xf8, 0xaa, 0x9f, 0x39, 0xc0, 0xaa, 0xdf, 0xed, 0x56, 0xf7, 0x51, + 0xf8, 0x3d, 0xda, 0xbb, 0x70, 0xb6, 0xeb, 0x30, 0x99, 0x20, 0x24, 0xef, 0x43, 0x19, 0xcc, 0xfb, + 0x58, 0xfe, 0x57, 0xe5, 0xfb, 0x3f, 0xaa, 0x0c, 0xfd, 0xe0, 0x47, 0x95, 0xa1, 0x9f, 0xfd, 0xa8, + 0xa2, 0xfc, 0xbf, 0x27, 0x15, 0xe5, 0x9b, 0x4f, 0x2a, 0xca, 0x5f, 0x3f, 0xa9, 0x28, 0xdf, 0x7f, + 0x52, 0x51, 0xfe, 0xfe, 0x49, 0x45, 0xf9, 0xe9, 0x93, 0xca, 0xd0, 0xcf, 0x9e, 0x54, 0x94, 0xf7, + 0x7f, 0x5c, 0x19, 0xfa, 0xfe, 0x8f, 0x2b, 0x43, 0x3f, 0xf8, 0x71, 0x65, 0x08, 0x34, 0xc7, 0xeb, + 0x15, 0xd4, 0x2e, 0xcf, 0x70, 0x5f, 0x8c, 0x91, 0xb9, 0x8e, 0x67, 0xf3, 0xba, 0xf2, 0xf6, 0x73, + 0x35, 0xa9, 0xad, 0xe3, 0x75, 0xf8, 0x17, 0xd7, 0x4f, 0x25, 0x8a, 0xfe, 0x24, 0x73, 0xe6, 0x3e, + 0x6b, 0xe4, 0x78, 0xd5, 0x9b, 0x4d, 0x47, 0x38, 0x1d, 0x1b, 0xac, 0xcb, 0x87, 0xd7, 0xfe, 0x29, + 0x73, 0x2e, 0x82, 0xb9, 0x71, 0xe3, 0x66, 0xd3, 0xb9, 0x71, 0x23, 0x01, 0x75, 0xe3, 0xc6, 0xc3, + 0x6b, 0x9b, 0xc3, 0xc4, 0xb0, 0x3c, 0xfb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x2b, 0x3e, + 0x4f, 0x4a, 0x76, 0x00, 0x00, } func (this *RegisterNamespaceRequest) Equal(that interface{}) bool { @@ -11362,6 +11438,46 @@ if this.Count != that1.Count { return false } + if len(this.Groups) != len(that1.Groups) { + return false + } + for i := range this.Groups { + if !this.Groups[i].Equal(that1.Groups[i]) { + return false + } + } + return true +} +func (this *CountWorkflowExecutionsResponse_AggregationGroup) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CountWorkflowExecutionsResponse_AggregationGroup) + if !ok { + that2, ok := that.(CountWorkflowExecutionsResponse_AggregationGroup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.GroupValues) != len(that1.GroupValues) { + return false + } + for i := range this.GroupValues { + if !this.GroupValues[i].Equal(that1.GroupValues[i]) { + return false + } + } + if this.Count != that1.Count { + return false + } return true } func (this *GetSearchAttributesRequest) Equal(that interface{}) bool { @@ -11882,6 +11998,9 @@ if this.SdkMetadata != that1.SdkMetadata { return false } + if this.CountGroupByExecutionStatus != that1.CountGroupByExecutionStatus { + return false + } return true } func (this *ListTaskQueuePartitionsRequest) Equal(that interface{}) bool { @@ -12612,9 +12731,6 @@ } else if this == nil { return false } - if this.VersionSetId != that1.VersionSetId { - return false - } return true } func (this *GetWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { @@ -14382,9 +14498,25 @@ if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&workflowservice.CountWorkflowExecutionsResponse{") s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") + if this.Groups != nil { + s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CountWorkflowExecutionsResponse_AggregationGroup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&workflowservice.CountWorkflowExecutionsResponse_AggregationGroup{") + if this.GroupValues != nil { + s = append(s, "GroupValues: "+fmt.Sprintf("%#v", this.GroupValues)+",\n") + } + s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -14631,7 +14763,7 @@ if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 14) s = append(s, "&workflowservice.GetSystemInfoResponse_Capabilities{") s = append(s, "SignalAndQueryHeader: "+fmt.Sprintf("%#v", this.SignalAndQueryHeader)+",\n") s = append(s, "InternalErrorDifferentiation: "+fmt.Sprintf("%#v", this.InternalErrorDifferentiation)+",\n") @@ -14642,6 +14774,7 @@ s = append(s, "UpsertMemo: "+fmt.Sprintf("%#v", this.UpsertMemo)+",\n") s = append(s, "EagerWorkflowStart: "+fmt.Sprintf("%#v", this.EagerWorkflowStart)+",\n") s = append(s, "SdkMetadata: "+fmt.Sprintf("%#v", this.SdkMetadata)+",\n") + s = append(s, "CountGroupByExecutionStatus: "+fmt.Sprintf("%#v", this.CountGroupByExecutionStatus)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -14942,9 +15075,8 @@ if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 4) s = append(s, "&workflowservice.UpdateWorkerBuildIdCompatibilityResponse{") - s = append(s, "VersionSetId: "+fmt.Sprintf("%#v", this.VersionSetId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -19380,6 +19512,20 @@ _ = i var l int _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if m.Count != 0 { i = encodeVarintRequestResponse(dAtA, i, uint64(m.Count)) i-- @@ -19388,6 +19534,48 @@ return len(dAtA) - i, nil } +func (m *CountWorkflowExecutionsResponse_AggregationGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintRequestResponse(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x10 + } + if len(m.GroupValues) > 0 { + for iNdEx := len(m.GroupValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.GroupValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRequestResponse(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *GetSearchAttributesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20143,6 +20331,16 @@ _ = i var l int _ = l + if m.CountGroupByExecutionStatus { + i-- + if m.CountGroupByExecutionStatus { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } if m.SdkMetadata { i-- if m.SdkMetadata { @@ -21231,13 +21429,6 @@ _ = i var l int _ = l - if len(m.VersionSetId) > 0 { - i -= len(m.VersionSetId) - copy(dAtA[i:], m.VersionSetId) - i = encodeVarintRequestResponse(dAtA, i, uint64(len(m.VersionSetId))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } @@ -23895,6 +24086,30 @@ if m.Count != 0 { n += 1 + sovRequestResponse(uint64(m.Count)) } + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + } + return n +} + +func (m *CountWorkflowExecutionsResponse_AggregationGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.GroupValues) > 0 { + for _, e := range m.GroupValues { + l = e.Size() + n += 1 + l + sovRequestResponse(uint64(l)) + } + } + if m.Count != 0 { + n += 1 + sovRequestResponse(uint64(m.Count)) + } return n } @@ -24232,6 +24447,9 @@ if m.SdkMetadata { n += 2 } + if m.CountGroupByExecutionStatus { + n += 2 + } return n } @@ -24679,10 +24897,6 @@ } var l int _ = l - l = len(m.VersionSetId) - if l > 0 { - n += 1 + l + sovRequestResponse(uint64(l)) - } return n } @@ -26102,8 +26316,30 @@ if this == nil { return "nil" } + repeatedStringForGroups := "[]*CountWorkflowExecutionsResponse_AggregationGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(fmt.Sprintf("%v", f), "CountWorkflowExecutionsResponse_AggregationGroup", "CountWorkflowExecutionsResponse_AggregationGroup", 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&CountWorkflowExecutionsResponse{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Groups:` + repeatedStringForGroups + `,`, + `}`, + }, "") + return s +} +func (this *CountWorkflowExecutionsResponse_AggregationGroup) String() string { + if this == nil { + return "nil" + } + repeatedStringForGroupValues := "[]*Payload{" + for _, f := range this.GroupValues { + repeatedStringForGroupValues += strings.Replace(fmt.Sprintf("%v", f), "Payload", "v13.Payload", 1) + "," + } + repeatedStringForGroupValues += "}" + s := strings.Join([]string{`&CountWorkflowExecutionsResponse_AggregationGroup{`, + `GroupValues:` + repeatedStringForGroupValues + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `}`, }, "") return s @@ -26338,6 +26574,7 @@ `UpsertMemo:` + fmt.Sprintf("%v", this.UpsertMemo) + `,`, `EagerWorkflowStart:` + fmt.Sprintf("%v", this.EagerWorkflowStart) + `,`, `SdkMetadata:` + fmt.Sprintf("%v", this.SdkMetadata) + `,`, + `CountGroupByExecutionStatus:` + fmt.Sprintf("%v", this.CountGroupByExecutionStatus) + `,`, `}`, }, "") return s @@ -26640,7 +26877,6 @@ return "nil" } s := strings.Join([]string{`&UpdateWorkerBuildIdCompatibilityResponse{`, - `VersionSetId:` + fmt.Sprintf("%v", this.VersionSetId) + `,`, `}`, }, "") return s @@ -39648,6 +39884,146 @@ break } } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, &CountWorkflowExecutionsResponse_AggregationGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRequestResponse(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRequestResponse + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CountWorkflowExecutionsResponse_AggregationGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRequestResponse + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRequestResponse + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupValues = append(m.GroupValues, &v13.Payload{}) + if err := m.GroupValues[len(m.GroupValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -42089,6 +42465,26 @@ } } m.SdkMetadata = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountGroupByExecutionStatus", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRequestResponse + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CountGroupByExecutionStatus = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) @@ -44992,38 +45388,6 @@ return fmt.Errorf("proto: UpdateWorkerBuildIdCompatibilityResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VersionSetId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRequestResponse - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRequestResponse - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRequestResponse - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VersionSetId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRequestResponse(dAtA[iNdEx:]) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.go temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.go 2024-02-23 09:46:13.000000000 +0000 @@ -31,6 +31,7 @@ math "math" proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -52,98 +53,141 @@ } var fileDescriptor_bded41be6e20a31f = []byte{ - // 1447 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0xcd, 0x8b, 0x1c, 0xc5, - 0x1b, 0xc7, 0xa7, 0x72, 0xf8, 0x1d, 0x9a, 0x9f, 0x46, 0xdb, 0x37, 0x8c, 0xda, 0xd1, 0xe0, 0xd5, - 0x59, 0x56, 0xc9, 0x9a, 0xcc, 0x26, 0xbb, 0xce, 0xcb, 0xee, 0xec, 0xfa, 0x96, 0xcd, 0xce, 0x1a, - 0xc1, 0x8b, 0xf4, 0xf6, 0x3c, 0xd9, 0x29, 0x76, 0x66, 0xba, 0xad, 0xae, 0x99, 0xb8, 0xb7, 0x80, - 0x20, 0x08, 0x82, 0x22, 0x08, 0x82, 0xe0, 0x49, 0x51, 0x02, 0x82, 0x20, 0x08, 0x9e, 0xbc, 0x7a, - 0xdc, 0x63, 0x8e, 0xee, 0xec, 0x45, 0x72, 0xca, 0x9f, 0x20, 0x3d, 0x33, 0x55, 0x3b, 0xd5, 0x5d, - 0xd5, 0xfd, 0x74, 0x77, 0x6e, 0x49, 0xe6, 0xf9, 0x7c, 0xeb, 0x9b, 0xe7, 0xa9, 0x7a, 0xaa, 0xab, - 0xba, 0xad, 0xd7, 0x38, 0x0c, 0x02, 0x9f, 0xb9, 0xfd, 0x25, 0x37, 0xa0, 0x4b, 0x77, 0x7c, 0x76, - 0x78, 0xbb, 0xef, 0xdf, 0x09, 0x81, 0x8d, 0xa9, 0x07, 0x4b, 0xe3, 0xe5, 0xa5, 0xf9, 0x1f, 0xab, - 0x01, 0xf3, 0xb9, 0x6f, 0x5f, 0x14, 0xe1, 0x55, 0x37, 0xa0, 0xd5, 0x58, 0x78, 0x75, 0xbc, 0x7c, - 0x61, 0x25, 0x4b, 0x8f, 0xc1, 0x27, 0x23, 0x08, 0xf9, 0xc7, 0x0c, 0xc2, 0xc0, 0x1f, 0x86, 0x73, - 0xe1, 0xd7, 0x7f, 0x6a, 0x5b, 0xe7, 0x3f, 0x9c, 0x47, 0x77, 0x66, 0xd1, 0xf6, 0x97, 0xc4, 0x7a, - 0x72, 0x17, 0x0e, 0x68, 0xc8, 0x81, 0xbd, 0xef, 0x0e, 0x20, 0x0c, 0x5c, 0x0f, 0xec, 0xab, 0xd5, - 0x0c, 0x0f, 0xd5, 0x04, 0xb3, 0x3b, 0x1b, 0xf3, 0x42, 0xad, 0x08, 0x3a, 0xb3, 0x79, 0xa9, 0x32, - 0xb5, 0xd3, 0x82, 0xd0, 0x63, 0x74, 0x1f, 0xf2, 0xd8, 0x49, 0x30, 0x78, 0x3b, 0x1a, 0x54, 0xda, - 0xf9, 0x8c, 0x58, 0x8f, 0xbf, 0x4b, 0x43, 0x2e, 0x7f, 0x0b, 0xed, 0x95, 0x4c, 0x41, 0x15, 0x10, - 0x46, 0xde, 0xcc, 0xcd, 0x49, 0x17, 0x9f, 0x13, 0xeb, 0xfc, 0x07, 0x41, 0xd7, 0xe5, 0x0b, 0x29, - 0xc9, 0x96, 0x8b, 0x11, 0xc2, 0xc7, 0x95, 0xfc, 0xa0, 0x34, 0xf2, 0x15, 0xb1, 0xec, 0x16, 0x04, - 0x0c, 0x3c, 0xc5, 0x0b, 0x26, 0xc7, 0x71, 0x48, 0xd8, 0x59, 0x2d, 0xc4, 0x4a, 0x47, 0xdf, 0x13, - 0xeb, 0xd9, 0x0e, 0x77, 0x19, 0x17, 0xf3, 0x7a, 0xe3, 0x53, 0xf0, 0x46, 0x9c, 0xfa, 0x43, 0x7b, - 0x2d, 0x53, 0x59, 0x0f, 0x0a, 0x67, 0xeb, 0x85, 0x79, 0xe9, 0xee, 0x1e, 0xb1, 0x5e, 0x68, 0x43, - 0x32, 0x64, 0x8b, 0x86, 0xdc, 0x67, 0x47, 0x76, 0x33, 0x73, 0x88, 0x14, 0x5a, 0xf8, 0x6c, 0x95, - 0x13, 0x91, 0x66, 0xff, 0x22, 0xd6, 0xa5, 0xd4, 0xc8, 0x31, 0xb0, 0x10, 0xec, 0xb7, 0xcb, 0x0d, - 0x37, 0x15, 0x11, 0xd6, 0xdf, 0x79, 0x24, 0x5a, 0xf2, 0x7f, 0xf0, 0x1d, 0xb1, 0x9e, 0xd9, 0xf1, - 0xfb, 0x7d, 0x41, 0xec, 0xb9, 0xe1, 0xe1, 0xcd, 0x11, 0x8c, 0xc0, 0xbe, 0x9e, 0x39, 0x90, 0x96, - 0x13, 0x3e, 0xd7, 0x8a, 0xe2, 0xd2, 0xda, 0xaf, 0xc4, 0x7a, 0x71, 0xf6, 0xd7, 0xee, 0x62, 0x58, - 0xd3, 0x1f, 0x04, 0x7d, 0xe0, 0xd0, 0xb5, 0x5b, 0x88, 0xb6, 0x69, 0xc6, 0x85, 0xd1, 0x8d, 0x92, - 0x2a, 0xd2, 0xef, 0x8f, 0xc4, 0x7a, 0x5e, 0x13, 0xba, 0xe9, 0xd2, 0x3e, 0x74, 0xed, 0x7a, 0x91, - 0x61, 0x66, 0xac, 0x70, 0xda, 0x28, 0x23, 0x91, 0xa8, 0x78, 0xdd, 0xe3, 0x74, 0x4c, 0xf9, 0x51, - 0xde, 0x8a, 0x27, 0xb8, 0x7c, 0x15, 0xd7, 0xe0, 0xca, 0xda, 0xdf, 0x05, 0xcf, 0x67, 0xdd, 0xc5, - 0xa8, 0x2d, 0x70, 0x19, 0xdf, 0x07, 0x97, 0x23, 0xd6, 0x7e, 0x0a, 0x8d, 0x5f, 0xfb, 0xa9, 0x22, - 0xd2, 0xec, 0x1f, 0xc4, 0xba, 0x98, 0x12, 0xd9, 0x38, 0xda, 0xee, 0xda, 0xed, 0x32, 0x63, 0x45, - 0x0a, 0xc2, 0xf4, 0x56, 0x79, 0x21, 0xdd, 0xba, 0x5a, 0x0c, 0x2f, 0xb0, 0xae, 0xb4, 0x78, 0xee, - 0x75, 0x65, 0x50, 0x91, 0x7e, 0xff, 0x24, 0xd6, 0xcb, 0x69, 0xa1, 0xd3, 0x4c, 0x6f, 0x95, 0x1a, - 0x6d, 0x31, 0xd5, 0xdb, 0x8f, 0x40, 0x49, 0xd7, 0x13, 0x16, 0xc3, 0xf3, 0xf6, 0x84, 0x24, 0x9b, - 0xbb, 0x27, 0xe8, 0x24, 0xa4, 0xcd, 0xdf, 0x88, 0xf5, 0x92, 0x31, 0x6e, 0x9a, 0xdf, 0x8d, 0xe2, - 0xe3, 0x2c, 0x26, 0x77, 0xb3, 0xac, 0x4c, 0xac, 0x57, 0x24, 0x0b, 0xe1, 0x0e, 0x3d, 0x88, 0x72, - 0xdb, 0x2c, 0x54, 0xc6, 0x39, 0x9d, 0xa7, 0x57, 0xa4, 0x88, 0xc4, 0x7a, 0x85, 0x31, 0x12, 0xdd, - 0x2b, 0x52, 0x15, 0xf2, 0xf4, 0x8a, 0x0c, 0x21, 0x69, 0xfc, 0x77, 0x62, 0x39, 0x73, 0xdd, 0x59, - 0x44, 0xf2, 0x99, 0x11, 0x53, 0xd2, 0x34, 0x01, 0x61, 0xbb, 0x5d, 0x5a, 0x47, 0xba, 0xfe, 0x81, - 0x58, 0xcf, 0x75, 0xe8, 0xc1, 0xd0, 0xd5, 0xd8, 0x45, 0x3c, 0xa2, 0xea, 0x49, 0xe1, 0xf3, 0xad, - 0xe2, 0x02, 0x4a, 0x4b, 0x9b, 0x47, 0x51, 0xde, 0x33, 0x3c, 0x8c, 0x6f, 0x61, 0x07, 0x32, 0x4a, - 0xe0, 0x5b, 0x5a, 0xb6, 0x92, 0x72, 0x7c, 0xd8, 0x85, 0x10, 0x0a, 0x1d, 0x1f, 0xf4, 0x20, 0xfe, - 0xf8, 0x60, 0xe2, 0xa5, 0xbb, 0x9f, 0x89, 0x75, 0x61, 0x0f, 0xd8, 0x80, 0x0e, 0x5d, 0x0e, 0x49, - 0x87, 0xd9, 0xed, 0xd2, 0x0c, 0x0b, 0x97, 0xcd, 0x52, 0x1a, 0xca, 0x24, 0x6d, 0x41, 0xb4, 0x69, - 0x14, 0x99, 0xa4, 0x06, 0x12, 0x3f, 0x49, 0x8d, 0x02, 0x4a, 0x2a, 0xa3, 0xf3, 0xf5, 0x8d, 0x00, - 0x86, 0x89, 0xb8, 0x10, 0x91, 0x4a, 0x33, 0x8c, 0x4f, 0x65, 0x9a, 0x86, 0xf2, 0x44, 0x13, 0x05, - 0x36, 0xfb, 0x7e, 0x08, 0x5d, 0x8d, 0xd7, 0x16, 0x6a, 0x1c, 0x13, 0x8e, 0x7f, 0xa2, 0x49, 0x57, - 0x51, 0x96, 0x50, 0x14, 0xaa, 0x71, 0xba, 0x86, 0x1a, 0xc3, 0xec, 0x71, 0xbd, 0x30, 0xaf, 0xf4, - 0xfc, 0x28, 0xa8, 0xce, 0xbc, 0x1e, 0x1d, 0x6b, 0xf3, 0xb9, 0x89, 0x1a, 0xc5, 0x2c, 0x80, 0xef, - 0xf9, 0x59, 0x3a, 0xea, 0xad, 0x86, 0xe7, 0x0e, 0x0b, 0xe5, 0x54, 0x0f, 0xe6, 0xb8, 0xd5, 0x30, - 0xf0, 0xca, 0x62, 0x6f, 0xfa, 0xa3, 0xa1, 0xae, 0xe4, 0xd9, 0xf2, 0x06, 0x12, 0xbf, 0xd8, 0x8d, - 0x02, 0xd2, 0xe0, 0x37, 0xc4, 0x7a, 0xaa, 0x0d, 0xbc, 0x03, 0x2e, 0xf3, 0x7a, 0x75, 0xce, 0x19, - 0xdd, 0x1f, 0x71, 0x08, 0xed, 0x55, 0xcc, 0x75, 0x43, 0x9c, 0x12, 0xc6, 0xae, 0x15, 0x83, 0x75, - 0x4f, 0xcf, 0x37, 0x47, 0xc0, 0x62, 0xc7, 0x14, 0xf4, 0xd3, 0x73, 0x92, 0xcd, 0xfd, 0xf4, 0xac, - 0x93, 0x90, 0x36, 0xbf, 0x25, 0xd6, 0xd3, 0xd3, 0x8d, 0xa9, 0xc3, 0xa9, 0x77, 0xb8, 0x70, 0xa0, - 0xbe, 0x86, 0xdb, 0xcf, 0x62, 0x98, 0x30, 0x77, 0xbd, 0x20, 0x2d, 0x7d, 0xdd, 0x25, 0xd6, 0x63, - 0x53, 0xe3, 0xa2, 0xf2, 0xf6, 0xe5, 0x4c, 0x49, 0x25, 0x5e, 0x38, 0x59, 0xc9, 0x8b, 0x29, 0x15, - 0x14, 0x97, 0xc5, 0xc9, 0x6d, 0xae, 0x8e, 0xbe, 0x68, 0x36, 0x6e, 0x74, 0x8d, 0x32, 0x12, 0xda, - 0x2b, 0xf4, 0xb3, 0xf2, 0xe1, 0xaf, 0xd0, 0x13, 0xb5, 0xab, 0x15, 0x41, 0x95, 0x2b, 0xf4, 0x36, - 0xf0, 0x66, 0x7f, 0x14, 0x72, 0x60, 0xdb, 0xc3, 0xdb, 0x3e, 0xe2, 0x0a, 0x5d, 0x05, 0xf0, 0x57, - 0xe8, 0x71, 0x4e, 0x99, 0x3e, 0xd1, 0xfa, 0x3c, 0x0a, 0x39, 0x0c, 0xa6, 0x26, 0x2e, 0xa3, 0xd6, - 0xb3, 0x8c, 0xc7, 0x4f, 0x9f, 0x18, 0xa6, 0xb4, 0xcd, 0x68, 0x07, 0x90, 0x49, 0xda, 0x71, 0x19, - 0xa7, 0xd8, 0xb6, 0x69, 0x20, 0xf1, 0x6d, 0xd3, 0x28, 0xa0, 0x54, 0xaa, 0xc9, 0xc0, 0xe5, 0xd0, - 0xf1, 0x7a, 0xd0, 0x1d, 0xf5, 0x01, 0x51, 0x29, 0x15, 0xc0, 0x57, 0x2a, 0xce, 0x49, 0x17, 0x5f, - 0x10, 0xeb, 0x09, 0x31, 0x9f, 0xa4, 0x8f, 0x2b, 0xe8, 0x29, 0x18, 0x77, 0x72, 0xb5, 0x00, 0xa9, - 0x64, 0x64, 0xf6, 0x36, 0x24, 0x47, 0x46, 0x54, 0x00, 0x9f, 0x91, 0x38, 0xa7, 0xcc, 0xdd, 0x1d, - 0x97, 0x7b, 0x3d, 0x69, 0x22, 0x7b, 0xee, 0x2a, 0xf1, 0xf8, 0xb9, 0x1b, 0xc3, 0x94, 0xd6, 0x17, - 0x4d, 0x20, 0xf1, 0xd3, 0x7b, 0x51, 0x1c, 0x1d, 0x1e, 0xec, 0xd1, 0x01, 0x84, 0x88, 0xd6, 0x67, - 0x64, 0xf1, 0xad, 0x2f, 0x45, 0x42, 0xa9, 0xd7, 0xec, 0x2c, 0x90, 0xa3, 0x5e, 0x2a, 0x80, 0xaf, - 0x57, 0x9c, 0x53, 0xea, 0xb5, 0xe8, 0x36, 0x44, 0xd4, 0x4b, 0x89, 0xc7, 0xd7, 0x2b, 0x86, 0x29, - 0x67, 0xf2, 0xd9, 0x7c, 0x8a, 0x76, 0x0a, 0x60, 0x8d, 0x11, 0xed, 0x77, 0xb7, 0xbb, 0xd1, 0x86, - 0xef, 0x72, 0xba, 0x4f, 0xfb, 0x94, 0x1f, 0x21, 0xce, 0xe4, 0x59, 0x12, 0xf8, 0x33, 0x79, 0xb6, - 0x92, 0x72, 0x7f, 0x37, 0x7f, 0xed, 0x63, 0x30, 0xbe, 0x81, 0x7d, 0x6d, 0x94, 0xee, 0x7a, 0xb3, - 0xac, 0x8c, 0xb2, 0x3c, 0x64, 0x6c, 0xd4, 0x64, 0x77, 0xc1, 0xf5, 0x7a, 0xee, 0xdc, 0x6e, 0x1d, - 0x3f, 0x4e, 0x9c, 0xc5, 0x2f, 0x8f, 0x14, 0x09, 0x65, 0x07, 0x3a, 0x2b, 0x44, 0xde, 0x53, 0xba, - 0x81, 0xc4, 0xef, 0x40, 0x46, 0x01, 0xe5, 0x1e, 0x74, 0xf1, 0x4d, 0x9a, 0x8c, 0x99, 0xa1, 0x88, - 0x7b, 0xd0, 0x14, 0x1a, 0x7f, 0x0f, 0x9a, 0x2a, 0xa2, 0x9c, 0x32, 0xa6, 0x17, 0x4c, 0x8d, 0xa8, - 0x1b, 0xdd, 0x08, 0x80, 0xb9, 0xd3, 0x4c, 0xae, 0xe2, 0xde, 0x1b, 0xab, 0x14, 0xfe, 0x94, 0xa1, - 0x85, 0x95, 0x37, 0xf4, 0x1d, 0xee, 0x07, 0x31, 0x4f, 0x35, 0x84, 0x6c, 0x1c, 0xc2, 0xbf, 0xa1, - 0xd7, 0xb1, 0xca, 0x59, 0x56, 0x6c, 0xb1, 0x31, 0x57, 0x6b, 0xe8, 0xbd, 0x59, 0xef, 0x6c, 0xbd, - 0x30, 0xaf, 0x14, 0x31, 0x6a, 0xa2, 0x6a, 0x00, 0xe6, 0xa8, 0xa8, 0xa1, 0xf0, 0x45, 0xd4, 0xc2, - 0xc2, 0x54, 0xe3, 0x01, 0x39, 0x3e, 0x71, 0x2a, 0xf7, 0x4f, 0x9c, 0xca, 0xc3, 0x13, 0x87, 0xdc, - 0x9d, 0x38, 0xe4, 0x97, 0x89, 0x43, 0xfe, 0x9e, 0x38, 0xe4, 0x78, 0xe2, 0x90, 0x7f, 0x26, 0x0e, - 0xf9, 0x77, 0xe2, 0x54, 0x1e, 0x4e, 0x1c, 0xf2, 0xf5, 0xa9, 0x53, 0x39, 0x3e, 0x75, 0x2a, 0xf7, - 0x4f, 0x9d, 0x8a, 0x75, 0x89, 0xfa, 0x59, 0x03, 0x37, 0xfe, 0x3f, 0xff, 0xee, 0x67, 0x87, 0xf9, - 0xdc, 0xdf, 0x21, 0x1f, 0x5d, 0x3e, 0x58, 0x60, 0xa8, 0x6f, 0xf8, 0xa0, 0x68, 0x35, 0xf6, 0x4f, - 0xf7, 0xce, 0xbd, 0xb2, 0x37, 0x87, 0xa8, 0x5f, 0xad, 0x07, 0xb4, 0x1a, 0xfb, 0xac, 0xa8, 0x7a, - 0x6b, 0xf9, 0xc1, 0xb9, 0x57, 0xcf, 0x62, 0x6a, 0xb5, 0x7a, 0x40, 0x6b, 0xb5, 0x58, 0x54, 0xad, - 0x76, 0x6b, 0x79, 0xff, 0x7f, 0xd3, 0x6f, 0x93, 0xde, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x1b, - 0xe8, 0x9b, 0x71, 0x25, 0x25, 0x00, 0x00, + // 2129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x5f, 0x6c, 0x1c, 0x47, + 0x1d, 0xf6, 0xdc, 0x03, 0x0f, 0x23, 0xe2, 0xd2, 0x49, 0x6a, 0x2b, 0x4e, 0xb2, 0x0d, 0x2b, 0x40, + 0x28, 0xd2, 0xdd, 0xd6, 0x0d, 0x4e, 0xe2, 0x73, 0xec, 0xe4, 0x7c, 0x76, 0xec, 0x94, 0x96, 0xb8, + 0x3e, 0x37, 0x94, 0x46, 0x8a, 0xb5, 0xde, 0x9b, 0xd8, 0xd3, 0x9c, 0x6f, 0x37, 0x3b, 0x73, 0x0e, + 0x96, 0x55, 0x09, 0x21, 0x21, 0xde, 0x0a, 0x02, 0x21, 0x10, 0x15, 0xf0, 0x82, 0x04, 0x54, 0x42, + 0xaa, 0x78, 0x40, 0xe2, 0x11, 0x09, 0xa9, 0x20, 0x5e, 0x22, 0x55, 0x88, 0x4a, 0x80, 0x44, 0x9c, + 0x22, 0x55, 0x51, 0x05, 0xe1, 0x01, 0x21, 0x21, 0x1e, 0xaa, 0x99, 0x9d, 0xd9, 0xbb, 0xdd, 0xdb, + 0x3f, 0xb3, 0x7b, 0x7e, 0x89, 0xef, 0x72, 0xf3, 0x7d, 0xf3, 0x7d, 0xbf, 0xdf, 0xfc, 0x9f, 0x81, + 0x55, 0x86, 0x77, 0x3d, 0xd7, 0xb7, 0x3b, 0x96, 0xed, 0x11, 0xeb, 0xbe, 0xeb, 0xdf, 0xbd, 0xd3, + 0x71, 0xef, 0x53, 0xec, 0xef, 0x11, 0x07, 0x5b, 0x7b, 0xd3, 0x96, 0xfc, 0x58, 0xf3, 0x7c, 0x97, + 0xb9, 0xe8, 0x59, 0x55, 0xbc, 0x66, 0x7b, 0xa4, 0x16, 0x2b, 0x5e, 0xdb, 0x9b, 0x9e, 0xba, 0x90, + 0xc7, 0xe7, 0xe3, 0x7b, 0x3d, 0x4c, 0xd9, 0xa6, 0x8f, 0xa9, 0xe7, 0x76, 0xa9, 0x24, 0x9e, 0x3a, + 0xbd, 0xed, 0xba, 0xdb, 0x1d, 0x2c, 0x50, 0x76, 0xb7, 0xeb, 0x32, 0x9b, 0x11, 0xb7, 0x4b, 0x83, + 0x5f, 0x9f, 0xff, 0xe7, 0x2d, 0xf8, 0xd4, 0x97, 0x25, 0x57, 0x2b, 0xe0, 0x42, 0xbf, 0x00, 0xf0, + 0xe9, 0x75, 0xbc, 0x4d, 0x28, 0xc3, 0xfe, 0x97, 0xec, 0x5d, 0x4c, 0x3d, 0xdb, 0xc1, 0x68, 0xb6, + 0x96, 0xa3, 0xb0, 0x36, 0x84, 0x59, 0x0f, 0x14, 0x4d, 0xd5, 0xcb, 0x40, 0x03, 0x13, 0xe6, 0x99, + 0xaf, 0xbf, 0xf7, 0xc1, 0x77, 0x2b, 0x93, 0x26, 0x12, 0x06, 0xf6, 0xa6, 0xad, 0xae, 0x2a, 0x42, + 0xeb, 0xe0, 0x1c, 0xfa, 0x15, 0x80, 0x4f, 0x2f, 0x61, 0xea, 0xf8, 0x64, 0x0b, 0x17, 0xd1, 0x3a, + 0x84, 0xd1, 0xd7, 0x9a, 0x00, 0x95, 0x5a, 0x3f, 0x27, 0xb4, 0x9e, 0x45, 0xc6, 0xb0, 0x56, 0xeb, + 0x20, 0xfc, 0xfc, 0x06, 0xfa, 0x11, 0x80, 0xe3, 0x2f, 0x12, 0xca, 0x42, 0x06, 0x8a, 0x2e, 0xe4, + 0x56, 0x1b, 0x05, 0x28, 0xb9, 0x17, 0x0b, 0xe3, 0xa4, 0xd6, 0x29, 0xa1, 0xf5, 0x04, 0x4a, 0x88, + 0x2b, 0xfa, 0x35, 0x80, 0x4f, 0xbd, 0xe2, 0xb5, 0x6d, 0x36, 0x10, 0xd2, 0xfc, 0x8a, 0x62, 0x08, + 0xa5, 0xf0, 0x52, 0x71, 0xa0, 0x94, 0xf8, 0x9c, 0x90, 0x78, 0xce, 0xfc, 0x6c, 0x76, 0x38, 0xad, + 0x9e, 0xc0, 0xf3, 0xd6, 0xf0, 0x2d, 0x00, 0xd1, 0x12, 0xf6, 0x7c, 0xec, 0x44, 0xb4, 0xeb, 0xe4, + 0x34, 0x0e, 0x52, 0xf2, 0xe7, 0x4a, 0x61, 0xa5, 0x83, 0x31, 0xf4, 0x57, 0x00, 0x27, 0x5a, 0xcc, + 0xf6, 0x99, 0xea, 0x64, 0xcb, 0x5f, 0xc5, 0x4e, 0x8f, 0xf7, 0x40, 0xb4, 0x90, 0xcb, 0x9c, 0x0c, + 0x54, 0xca, 0xae, 0x94, 0xc6, 0x4b, 0x75, 0x0d, 0x11, 0xdf, 0x39, 0xf3, 0x42, 0x4e, 0x7c, 0x43, + 0x66, 0xeb, 0x40, 0x7d, 0xdc, 0x24, 0xed, 0x37, 0x78, 0xc0, 0xff, 0x07, 0xe0, 0xa9, 0x15, 0x3c, + 0x5c, 0xc7, 0x2a, 0xa1, 0xcc, 0xf5, 0xf7, 0x51, 0x33, 0x57, 0x63, 0x06, 0x5a, 0x19, 0x5d, 0x1a, + 0x8d, 0x44, 0xba, 0x5d, 0x13, 0x6e, 0x5f, 0x40, 0xab, 0xfa, 0x6e, 0xb1, 0xe2, 0xaa, 0x0d, 0xfa, + 0xb6, 0x76, 0xa4, 0xb9, 0xef, 0x57, 0xa0, 0x99, 0x59, 0xf3, 0x1e, 0xf6, 0x29, 0x46, 0x2f, 0x8c, + 0x26, 0x5f, 0x90, 0xa8, 0x50, 0x7c, 0xf1, 0x48, 0xb8, 0x64, 0x44, 0x5e, 0x15, 0x11, 0x59, 0x47, + 0x6b, 0x47, 0x15, 0x91, 0xaa, 0x2f, 0x2d, 0xff, 0x00, 0xc0, 0x67, 0xd6, 0xdc, 0x4e, 0x47, 0x29, + 0xd9, 0xb0, 0xe9, 0xdd, 0x97, 0x7b, 0xb8, 0x87, 0xd1, 0x7c, 0xae, 0x81, 0x44, 0x9c, 0xf2, 0xbf, + 0x50, 0x16, 0x1e, 0x76, 0xc8, 0x5f, 0x02, 0x78, 0x3a, 0xf8, 0xda, 0x1e, 0x2c, 0xd6, 0x74, 0x77, + 0xbd, 0x0e, 0x66, 0xb8, 0x8d, 0x96, 0x34, 0x26, 0xab, 0x74, 0xb8, 0x12, 0xba, 0x3c, 0x22, 0x4b, + 0xa8, 0xf7, 0xa7, 0x00, 0x9e, 0x4c, 0x28, 0x7a, 0xcd, 0x26, 0x1d, 0xdc, 0x46, 0x8d, 0x32, 0xd5, + 0x04, 0x58, 0xa5, 0x74, 0x71, 0x14, 0x8a, 0x50, 0xa6, 0xca, 0x78, 0xc3, 0x61, 0x64, 0x8f, 0xb0, + 0xfd, 0xa2, 0x19, 0x1f, 0xc2, 0x15, 0xcb, 0x78, 0x02, 0x3c, 0x94, 0xf6, 0x21, 0x80, 0xa7, 0xd6, + 0xb1, 0xe3, 0xfa, 0xed, 0xc1, 0x52, 0xab, 0xd8, 0xf6, 0xd9, 0x16, 0xb6, 0x99, 0xc6, 0x18, 0x95, + 0x81, 0xd6, 0x1f, 0xa3, 0x32, 0x49, 0xa4, 0xd8, 0x05, 0xd1, 0x23, 0x2f, 0x99, 0xe7, 0x73, 0x7a, + 0xa4, 0x1d, 0xb0, 0x10, 0x4c, 0xad, 0x1d, 0x45, 0x22, 0x87, 0xe3, 0x67, 0x33, 0xea, 0x59, 0xdc, + 0xbf, 0xde, 0x46, 0x2b, 0xa3, 0x28, 0xe5, 0x0c, 0xca, 0xf2, 0xea, 0xe8, 0x44, 0xd2, 0xf6, 0x92, + 0xb0, 0xbd, 0x60, 0xce, 0x96, 0xb0, 0x5d, 0xdd, 0xda, 0xaf, 0x92, 0x36, 0x37, 0xff, 0xb8, 0xdf, + 0xb3, 0x07, 0xab, 0x2c, 0xd1, 0xb3, 0x13, 0xe1, 0x85, 0x7b, 0x76, 0x0a, 0x8b, 0xf4, 0x3c, 0x2f, + 0x3c, 0x5f, 0x34, 0x9f, 0xd7, 0xf7, 0xec, 0x48, 0x12, 0x6e, 0xf6, 0xff, 0x00, 0x9e, 0xcd, 0xaa, + 0x47, 0xa4, 0x7a, 0x75, 0x24, 0xa9, 0x83, 0xb9, 0xbe, 0x7e, 0x04, 0x4c, 0xd2, 0x78, 0x53, 0x18, + 0x9f, 0x37, 0x2f, 0x15, 0x37, 0xde, 0xcf, 0xf5, 0xdf, 0xfa, 0xa3, 0xe2, 0x60, 0x8d, 0x45, 0x47, + 0xc5, 0x61, 0x6c, 0xe1, 0x51, 0x31, 0x89, 0x42, 0x3a, 0x9d, 0x15, 0x4e, 0xcf, 0x9b, 0x35, 0x7d, + 0xa7, 0x77, 0x6c, 0xd2, 0xe1, 0xfe, 0xfe, 0x05, 0xe0, 0x99, 0xd4, 0x0a, 0x44, 0x6e, 0x97, 0xcb, + 0x0b, 0x1c, 0x4c, 0xec, 0xb5, 0x51, 0x69, 0xa4, 0xd7, 0x2b, 0xc2, 0xeb, 0xac, 0xf9, 0x85, 0x62, + 0x5e, 0xfb, 0x19, 0xfd, 0x87, 0x18, 0xa5, 0x87, 0xdb, 0x90, 0xdd, 0x75, 0x30, 0xcf, 0x69, 0xb3, + 0x54, 0x0b, 0x94, 0xe8, 0x22, 0xa3, 0x74, 0x06, 0x89, 0xf4, 0x3a, 0x27, 0xbc, 0xce, 0x98, 0xcf, + 0x15, 0x68, 0xc1, 0x82, 0x83, 0xfb, 0xfc, 0xaf, 0x18, 0xa2, 0x53, 0x2b, 0xd1, 0x1e, 0xa2, 0x33, + 0x19, 0x8a, 0x0c, 0xd1, 0x39, 0x44, 0x05, 0xf7, 0x0a, 0x43, 0x9e, 0xfb, 0x19, 0x7e, 0xb3, 0x02, + 0x0d, 0x29, 0x2c, 0xa8, 0x62, 0x78, 0x4b, 0xa4, 0xd3, 0x1a, 0xb3, 0x08, 0x94, 0xef, 0x95, 0x91, + 0x79, 0xa4, 0xed, 0x5b, 0xc2, 0xf6, 0x2b, 0xe6, 0x5a, 0x89, 0x2d, 0x52, 0xca, 0x5a, 0xb9, 0xdf, + 0x14, 0xbe, 0x59, 0x81, 0x93, 0x2d, 0xb2, 0xdd, 0xb5, 0x13, 0x22, 0xa1, 0xb1, 0xb9, 0x4b, 0x46, + 0xaa, 0x10, 0x5c, 0x2d, 0x4f, 0x20, 0xbd, 0x13, 0xe1, 0xdd, 0x31, 0x6f, 0x1f, 0x9d, 0x77, 0x2a, + 0xaa, 0xb4, 0x0e, 0x82, 0xbf, 0x9b, 0x9c, 0x44, 0x6c, 0x23, 0x7f, 0x58, 0x81, 0x67, 0xa5, 0x1c, + 0xc2, 0x76, 0x52, 0xf6, 0xcb, 0xab, 0xba, 0x8e, 0x52, 0x29, 0xf4, 0x67, 0xb3, 0x7c, 0x26, 0x19, + 0x24, 0x5b, 0x04, 0xe9, 0x96, 0x79, 0xb3, 0xdc, 0x1e, 0x5a, 0x46, 0xa4, 0x7a, 0x9f, 0xb0, 0x9d, + 0x2a, 0xe5, 0x55, 0x0d, 0x07, 0xe7, 0x3f, 0x00, 0x4e, 0xac, 0x63, 0x8a, 0x4b, 0x1d, 0x21, 0x24, + 0x03, 0xf5, 0x8f, 0x10, 0xd2, 0xf0, 0xd2, 0xfe, 0x6b, 0xc2, 0xfe, 0x86, 0x79, 0xe3, 0xe8, 0xda, + 0x88, 0xcf, 0x6b, 0xe4, 0xbe, 0xbf, 0x51, 0x81, 0x53, 0x1b, 0xd8, 0xdf, 0x25, 0x5d, 0x9b, 0xe1, + 0x61, 0xef, 0xf9, 0x33, 0x74, 0x3a, 0x58, 0xf9, 0x6f, 0x8e, 0xc4, 0x21, 0x63, 0x70, 0x5b, 0xc4, + 0xe0, 0x55, 0xb3, 0x75, 0x74, 0x31, 0x60, 0xaa, 0x56, 0x1e, 0x87, 0x1f, 0x03, 0x38, 0xb9, 0x84, + 0xf9, 0xfa, 0xa7, 0xcc, 0x30, 0x91, 0x82, 0xd4, 0x1f, 0x26, 0x52, 0x09, 0xc2, 0x0d, 0xd6, 0xcf, + 0x00, 0x9c, 0x7a, 0x91, 0x50, 0x76, 0xc3, 0xc3, 0xdd, 0xa1, 0x72, 0x54, 0x23, 0x51, 0xe9, 0x60, + 0xfd, 0x44, 0x65, 0x71, 0x44, 0x36, 0xff, 0xbc, 0x60, 0xb3, 0xe3, 0x52, 0xdc, 0x4e, 0xd0, 0xba, + 0xa4, 0x55, 0x4f, 0x1a, 0x5c, 0x7f, 0x8b, 0x90, 0xcd, 0x12, 0xea, 0xfd, 0x23, 0x80, 0x13, 0xbc, + 0x68, 0x82, 0xd2, 0x05, 0xad, 0x3a, 0xd2, 0x35, 0x5e, 0x29, 0x8d, 0x8f, 0x9e, 0xce, 0xa2, 0xcf, + 0xeb, 0x36, 0x7b, 0xf4, 0x11, 0x80, 0x06, 0x27, 0x6d, 0xf8, 0xce, 0x0e, 0xd9, 0x4b, 0x8c, 0xff, + 0x35, 0x2d, 0x55, 0xe9, 0x04, 0xfa, 0x0b, 0x80, 0x3c, 0x9e, 0xe8, 0x1a, 0x1e, 0x4d, 0xe7, 0xad, + 0x7b, 0x24, 0x55, 0xb5, 0x6f, 0xf7, 0x2d, 0x00, 0x27, 0x5a, 0x8e, 0xdd, 0x2d, 0x95, 0xbc, 0x64, + 0x60, 0x81, 0xa3, 0xdf, 0x14, 0x7c, 0xd8, 0xb4, 0xde, 0x03, 0x70, 0xb2, 0xe9, 0xf6, 0xba, 0x49, + 0x6d, 0x2b, 0x9f, 0x3e, 0x05, 0xa9, 0x3f, 0xaa, 0xa4, 0x12, 0x48, 0x81, 0x33, 0x22, 0xee, 0x16, + 0xaa, 0x6a, 0xb6, 0xae, 0xaa, 0xc3, 0x09, 0xd1, 0x77, 0x00, 0x3c, 0xbe, 0x82, 0x59, 0x0b, 0xf3, + 0x7c, 0x34, 0x18, 0xf3, 0xc9, 0x56, 0x8f, 0x61, 0x8a, 0xe6, 0x74, 0xce, 0x4d, 0xe3, 0x28, 0xe5, + 0xe6, 0x72, 0x39, 0x70, 0xd2, 0x11, 0xde, 0xcb, 0x3d, 0xec, 0xc7, 0x4e, 0x25, 0xb4, 0x37, 0xab, + 0xc3, 0xd8, 0xc2, 0x9b, 0xd5, 0x24, 0x8a, 0x50, 0xe6, 0xf7, 0x00, 0x3c, 0x21, 0xa6, 0xfb, 0x16, + 0x23, 0xce, 0xdd, 0x81, 0x13, 0xbc, 0xcb, 0x7a, 0xab, 0x84, 0x18, 0x4c, 0x89, 0x9b, 0x2f, 0x89, + 0x0e, 0x75, 0x7d, 0x00, 0xe0, 0x31, 0x21, 0x5c, 0x35, 0x17, 0x34, 0x93, 0x4b, 0x19, 0x29, 0xaf, + 0x94, 0x5c, 0x28, 0x0a, 0x93, 0x12, 0xda, 0xa2, 0x2d, 0xde, 0x36, 0xbf, 0x32, 0xf2, 0x39, 0xf9, + 0x3d, 0xce, 0x6f, 0x1d, 0x88, 0x3f, 0x35, 0xf1, 0xef, 0x26, 0xdb, 0xf7, 0x82, 0x65, 0xde, 0x47, + 0x00, 0x9e, 0x54, 0x57, 0x8b, 0xc3, 0x13, 0x7d, 0x43, 0xfb, 0x5a, 0x32, 0x75, 0xaa, 0x5f, 0x1c, + 0x85, 0x42, 0x86, 0x62, 0x55, 0x84, 0x62, 0x11, 0x5d, 0x1d, 0x35, 0x14, 0xe8, 0xc1, 0xc0, 0xc5, + 0x6d, 0xbf, 0xa9, 0xe9, 0x5f, 0xdc, 0x0e, 0xb5, 0xb3, 0x7a, 0x19, 0x68, 0xf4, 0x00, 0x12, 0x5d, + 0xce, 0xb1, 0xc5, 0x6c, 0x7a, 0xb7, 0x7a, 0x8f, 0x43, 0xa9, 0x75, 0xc0, 0xbf, 0x6c, 0x8a, 0x2f, + 0x35, 0xb1, 0x56, 0x47, 0x3f, 0x01, 0x70, 0x7c, 0x05, 0xb3, 0x66, 0xa7, 0x47, 0x19, 0xf6, 0xaf, + 0x77, 0xef, 0xb8, 0x1a, 0xd7, 0xba, 0x51, 0x80, 0xfe, 0xb5, 0x6e, 0x1c, 0x27, 0x9d, 0x9c, 0x16, + 0x4e, 0x26, 0xd0, 0x09, 0xe5, 0xc4, 0x09, 0x0a, 0x55, 0x09, 0x97, 0xf3, 0x16, 0x80, 0xc7, 0xf8, + 0x58, 0xb5, 0x4f, 0x19, 0xde, 0x15, 0x02, 0x67, 0xb4, 0xc6, 0xb6, 0xb0, 0xbc, 0x7e, 0x57, 0x8a, + 0xc1, 0xa4, 0xbc, 0x53, 0x42, 0xde, 0x33, 0xe8, 0xb8, 0x92, 0x47, 0x45, 0x99, 0x40, 0x1d, 0x5f, + 0xe8, 0xf2, 0x69, 0x39, 0xcc, 0xcf, 0x9a, 0xed, 0x33, 0xa2, 0x3b, 0x25, 0xa5, 0x20, 0xf5, 0xa7, + 0xa4, 0x54, 0x82, 0x70, 0x24, 0xfa, 0x1d, 0x80, 0xe3, 0x4d, 0x1f, 0xdb, 0x0c, 0xb7, 0x9c, 0x1d, + 0xdc, 0xee, 0x75, 0xb0, 0x46, 0x82, 0xa3, 0x00, 0xfd, 0x04, 0xc7, 0x71, 0x05, 0x0f, 0x62, 0xa8, + 0x04, 0x52, 0xeb, 0x40, 0x7d, 0x54, 0x97, 0xb6, 0xef, 0x02, 0xf8, 0x29, 0xd5, 0x17, 0x42, 0x23, + 0x97, 0xb4, 0xbb, 0x4f, 0xdc, 0xca, 0x6c, 0x09, 0x64, 0xf4, 0xbe, 0x03, 0x95, 0x34, 0xc3, 0xd7, + 0xc7, 0xe3, 0xc1, 0xeb, 0x81, 0x02, 0x09, 0x89, 0x02, 0xf4, 0x13, 0x12, 0xc7, 0x45, 0x87, 0x44, + 0x73, 0xbe, 0x9c, 0x87, 0x81, 0xd7, 0x0b, 0xef, 0x02, 0x78, 0x6c, 0xcd, 0x66, 0xce, 0x4e, 0x68, + 0x26, 0xbf, 0x77, 0x46, 0xca, 0xeb, 0xf7, 0xce, 0x18, 0x4c, 0x5a, 0x59, 0x11, 0x56, 0x1a, 0xe6, + 0xe5, 0x92, 0x56, 0x3c, 0xce, 0xca, 0x9d, 0xfc, 0x1b, 0xc0, 0x93, 0xbc, 0x3b, 0xa9, 0x1a, 0x5e, + 0xe2, 0x3f, 0x90, 0xee, 0xf6, 0x06, 0xd9, 0xc5, 0x54, 0x63, 0x2e, 0x4b, 0xc5, 0xea, 0xcf, 0x65, + 0x19, 0x14, 0xd2, 0xed, 0x4b, 0xc2, 0xed, 0x0a, 0x5a, 0x2e, 0xe9, 0x76, 0x57, 0xb2, 0x56, 0x99, + 0x70, 0xf5, 0x5b, 0x00, 0xc7, 0x83, 0xbd, 0x72, 0x81, 0xb6, 0x18, 0x05, 0xe8, 0xb7, 0xc5, 0x38, + 0x2e, 0xda, 0x9f, 0xce, 0x95, 0xed, 0x4f, 0xef, 0x00, 0x78, 0x6c, 0x30, 0x70, 0x54, 0xa3, 0x05, + 0x46, 0xca, 0xeb, 0xb7, 0xc0, 0x18, 0xac, 0xe0, 0xa6, 0x32, 0x34, 0x80, 0x7e, 0x03, 0xe0, 0xd9, + 0xa0, 0x67, 0xf2, 0x55, 0x0b, 0xf6, 0x17, 0x7b, 0xa4, 0xd3, 0xbe, 0xde, 0xe6, 0x0b, 0x5c, 0x9b, + 0x91, 0x2d, 0xd2, 0x21, 0x6c, 0x5f, 0xe3, 0xe8, 0x30, 0x8f, 0x42, 0xff, 0xe8, 0x30, 0x9f, 0x29, + 0x9c, 0x4f, 0xde, 0x01, 0xf0, 0x8c, 0x7c, 0xaf, 0x91, 0x22, 0x7c, 0x59, 0xf7, 0xbd, 0x47, 0xb6, + 0xea, 0x6b, 0xa3, 0xd2, 0x44, 0xf6, 0x32, 0x61, 0x59, 0x3e, 0x5b, 0xae, 0x63, 0xdb, 0xd9, 0xb1, + 0xa5, 0xdc, 0x86, 0x7e, 0x3d, 0x71, 0xac, 0x7e, 0xcf, 0xce, 0xa0, 0x08, 0x65, 0xbe, 0x59, 0x81, + 0x93, 0xfd, 0x44, 0x14, 0x3d, 0x33, 0x4b, 0x41, 0xea, 0x2f, 0x25, 0x52, 0x09, 0xa4, 0x40, 0x4f, + 0x34, 0xf3, 0xd7, 0x4d, 0x7c, 0x74, 0x47, 0x86, 0xc1, 0x34, 0x62, 0x1d, 0xc8, 0x57, 0xa0, 0x35, + 0xd2, 0xf5, 0x7a, 0xac, 0x16, 0x1e, 0x22, 0xbf, 0x0d, 0xe0, 0xa9, 0xc1, 0xa7, 0x31, 0xa1, 0xa6, + 0x40, 0xaa, 0xc6, 0xf5, 0x5a, 0x06, 0x5a, 0xff, 0x7a, 0x2d, 0x93, 0x24, 0xcc, 0xde, 0x9f, 0x00, + 0x3c, 0x2e, 0xce, 0xdd, 0x17, 0xf9, 0x10, 0x7b, 0xc3, 0xc3, 0xbe, 0x78, 0xb3, 0xaa, 0xb1, 0x8b, + 0x4f, 0x40, 0xe9, 0xef, 0xe2, 0x13, 0xc1, 0x05, 0x6f, 0xad, 0xb7, 0x38, 0xbc, 0xea, 0x2a, 0x3c, + 0xb5, 0x0e, 0x5e, 0x77, 0xb7, 0xd4, 0xc2, 0xeb, 0xcf, 0x00, 0xa2, 0x16, 0x73, 0xbd, 0x98, 0xad, + 0xba, 0x86, 0xb2, 0x38, 0x48, 0xff, 0x79, 0x62, 0x12, 0xb6, 0xe0, 0xd2, 0x25, 0xd5, 0x94, 0x45, + 0x99, 0xeb, 0x71, 0x67, 0x7f, 0x01, 0x70, 0x42, 0xad, 0xf2, 0x62, 0xee, 0x16, 0xb4, 0x97, 0x87, + 0xc9, 0x0e, 0xaf, 0x94, 0xc6, 0x4b, 0x97, 0x57, 0x85, 0xcb, 0x3a, 0x2a, 0x9d, 0x3a, 0xf4, 0x07, + 0x00, 0x8f, 0xf3, 0xf9, 0x2a, 0x5a, 0x81, 0xce, 0xa9, 0x52, 0x02, 0x4a, 0xbf, 0x3d, 0x26, 0x82, + 0xa5, 0xa9, 0x8b, 0xc2, 0xd4, 0x34, 0xb2, 0x0a, 0x9a, 0x5a, 0x7c, 0x0c, 0x1e, 0x3c, 0x34, 0xc6, + 0xde, 0x7f, 0x68, 0x8c, 0x3d, 0x79, 0x68, 0x80, 0xaf, 0x1d, 0x1a, 0xe0, 0xe7, 0x87, 0x06, 0xf8, + 0xfd, 0xa1, 0x01, 0x1e, 0x1c, 0x1a, 0xe0, 0xef, 0x87, 0x06, 0xf8, 0xf0, 0xd0, 0x18, 0x7b, 0x72, + 0x68, 0x80, 0x6f, 0x3f, 0x32, 0xc6, 0x1e, 0x3c, 0x32, 0xc6, 0xde, 0x7f, 0x64, 0x8c, 0x41, 0x93, + 0xb8, 0x79, 0x7a, 0x17, 0x3f, 0x29, 0x1f, 0x90, 0xaf, 0xf9, 0x2e, 0x73, 0xd7, 0xc0, 0x6b, 0x33, + 0xdb, 0x03, 0x18, 0xe2, 0xa6, 0xbc, 0x5b, 0x9f, 0x8b, 0xfd, 0xd7, 0xdb, 0x95, 0x4f, 0x6f, 0x48, + 0x10, 0x71, 0x6b, 0x0d, 0x8f, 0xd4, 0x62, 0xef, 0xd3, 0x6b, 0x37, 0xa7, 0x1f, 0x57, 0x3e, 0xd3, + 0x2f, 0x53, 0xaf, 0x37, 0x3c, 0x52, 0xaf, 0xc7, 0x4a, 0xd5, 0xeb, 0x37, 0xa7, 0xb7, 0x3e, 0x21, + 0x1e, 0xb9, 0x9f, 0xff, 0x38, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x04, 0x1c, 0x95, 0x8c, 0x2f, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -171,17 +215,15 @@ ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) // UpdateNamespace is used to update the information and configuration of a registered // namespace. - // - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) UpdateNamespace(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) // DeprecateNamespace is used to update the state of a registered namespace to DEPRECATED. // // Once the namespace is deprecated it cannot be used to start new workflow executions. Existing // workflow executions will continue to run on deprecated namespaces. // Deprecated. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Deprecated. --) DeprecateNamespace(ctx context.Context, in *DeprecateNamespaceRequest, opts ...grpc.CallOption) (*DeprecateNamespaceResponse, error) // StartWorkflowExecution starts a new workflow execution. // @@ -202,6 +244,9 @@ // tasks. The worker is expected to call `RespondWorkflowTaskCompleted` when it is done // processing the task. The service will create a `WorkflowTaskStarted` event in the history for // this task before handing it to the worker. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) PollWorkflowTaskQueue(ctx context.Context, in *PollWorkflowTaskQueueRequest, opts ...grpc.CallOption) (*PollWorkflowTaskQueueResponse, error) // RespondWorkflowTaskCompleted is called by workers to successfully complete workflow tasks // they received from `PollWorkflowTaskQueue`. @@ -209,6 +254,9 @@ // Completing a WorkflowTask will write a `WORKFLOW_TASK_COMPLETED` event to the workflow's // history, along with events corresponding to whatever commands the SDK generated while // executing the task (ex timer started, activity task scheduled, etc). + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondWorkflowTaskCompleted(ctx context.Context, in *RespondWorkflowTaskCompletedRequest, opts ...grpc.CallOption) (*RespondWorkflowTaskCompletedResponse, error) // RespondWorkflowTaskFailed is called by workers to indicate the processing of a workflow task // failed. @@ -219,6 +267,9 @@ // // Temporal will only append first WorkflowTaskFailed event to the history of workflow execution // for consecutive failures. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondWorkflowTaskFailed(ctx context.Context, in *RespondWorkflowTaskFailedRequest, opts ...grpc.CallOption) (*RespondWorkflowTaskFailedResponse, error) // PollActivityTaskQueue is called by workers to process activity tasks from a specific task // queue. @@ -232,6 +283,9 @@ // (`ACTIVITY_TASK_COMPLETED` / `ACTIVITY_TASK_FAILED` / `ACTIVITY_TASK_TIMED_OUT`) will both be // written permanently to Workflow execution history when Activity is finished. This is done to // avoid writing many events in the case of a failure/retry loop. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) PollActivityTaskQueue(ctx context.Context, in *PollActivityTaskQueueRequest, opts ...grpc.CallOption) (*PollActivityTaskQueueResponse, error) // RecordActivityTaskHeartbeat is optionally called by workers while they execute activities. // @@ -321,30 +375,44 @@ // WorkflowExecution.run_id is provided) or the latest Workflow Execution (when // WorkflowExecution.run_id is not provided). If the Workflow Execution is Running, it will be // terminated before deletion. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Workflow deletion not exposed to HTTP, users should use cancel or terminate. --) DeleteWorkflowExecution(ctx context.Context, in *DeleteWorkflowExecutionRequest, opts ...grpc.CallOption) (*DeleteWorkflowExecutionResponse, error) // ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ListOpenWorkflowExecutions(ctx context.Context, in *ListOpenWorkflowExecutionsRequest, opts ...grpc.CallOption) (*ListOpenWorkflowExecutionsResponse, error) // ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ListClosedWorkflowExecutions(ctx context.Context, in *ListClosedWorkflowExecutionsRequest, opts ...grpc.CallOption) (*ListClosedWorkflowExecutionsResponse, error) // ListWorkflowExecutions is a visibility API to list workflow executions in a specific namespace. ListWorkflowExecutions(ctx context.Context, in *ListWorkflowExecutionsRequest, opts ...grpc.CallOption) (*ListWorkflowExecutionsResponse, error) // ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific namespace. ListArchivedWorkflowExecutions(ctx context.Context, in *ListArchivedWorkflowExecutionsRequest, opts ...grpc.CallOption) (*ListArchivedWorkflowExecutionsResponse, error) // ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific namespace without order. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ScanWorkflowExecutions(ctx context.Context, in *ScanWorkflowExecutionsRequest, opts ...grpc.CallOption) (*ScanWorkflowExecutionsResponse, error) // CountWorkflowExecutions is a visibility API to count of workflow executions in a specific namespace. CountWorkflowExecutions(ctx context.Context, in *CountWorkflowExecutionsRequest, opts ...grpc.CallOption) (*CountWorkflowExecutionsResponse, error) // GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this search attribute API to HTTP (but may expose on OperatorService). --) GetSearchAttributes(ctx context.Context, in *GetSearchAttributesRequest, opts ...grpc.CallOption) (*GetSearchAttributesResponse, error) // RespondQueryTaskCompleted is called by workers to complete queries which were delivered on // the `query` (not `queries`) field of a `PollWorkflowTaskQueueResponse`. // // Completing the query will unblock the corresponding client call to `QueryWorkflow` and return // the query result a response. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondQueryTaskCompleted(ctx context.Context, in *RespondQueryTaskCompletedRequest, opts ...grpc.CallOption) (*RespondQueryTaskCompletedResponse, error) // ResetStickyTaskQueue resets the sticky task queue related information in the mutable state of // a given workflow. This is prudent for workers to perform if a workflow has been paged out of @@ -353,6 +421,9 @@ // Things cleared are: // 1. StickyTaskQueue // 2. StickyScheduleToStartTimeout + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) ResetStickyTaskQueue(ctx context.Context, in *ResetStickyTaskQueueRequest, opts ...grpc.CallOption) (*ResetStickyTaskQueueResponse, error) // QueryWorkflow requests a query be executed for a specified workflow execution. QueryWorkflow(ctx context.Context, in *QueryWorkflowRequest, opts ...grpc.CallOption) (*QueryWorkflowResponse, error) @@ -364,34 +435,20 @@ GetClusterInfo(ctx context.Context, in *GetClusterInfoRequest, opts ...grpc.CallOption) (*GetClusterInfoResponse, error) // GetSystemInfo returns information about the system. GetSystemInfo(ctx context.Context, in *GetSystemInfoRequest, opts ...grpc.CallOption) (*GetSystemInfoResponse, error) + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this low-level API to HTTP. --) ListTaskQueuePartitions(ctx context.Context, in *ListTaskQueuePartitionsRequest, opts ...grpc.CallOption) (*ListTaskQueuePartitionsResponse, error) // Creates a new schedule. - // (-- api-linter: core::0133::method-signature=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::response-message-name=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::http-uri-parent=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) CreateSchedule(ctx context.Context, in *CreateScheduleRequest, opts ...grpc.CallOption) (*CreateScheduleResponse, error) // Returns the schedule description and current state of an existing schedule. DescribeSchedule(ctx context.Context, in *DescribeScheduleRequest, opts ...grpc.CallOption) (*DescribeScheduleResponse, error) // Changes the configuration or state of an existing schedule. - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) UpdateSchedule(ctx context.Context, in *UpdateScheduleRequest, opts ...grpc.CallOption) (*UpdateScheduleResponse, error) // Makes a specific change to a schedule or triggers an immediate action. - // (-- api-linter: core::0134::synonyms=disabled - // aip.dev/not-precedent: we have both patch and update. --) PatchSchedule(ctx context.Context, in *PatchScheduleRequest, opts ...grpc.CallOption) (*PatchScheduleResponse, error) // Lists matching times within a range. ListScheduleMatchingTimes(ctx context.Context, in *ListScheduleMatchingTimesRequest, opts ...grpc.CallOption) (*ListScheduleMatchingTimesResponse, error) // Deletes a schedule, removing it from the system. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) DeleteSchedule(ctx context.Context, in *DeleteScheduleRequest, opts ...grpc.CallOption) (*DeleteScheduleResponse, error) // List all schedules in a namespace. ListSchedules(ctx context.Context, in *ListSchedulesRequest, opts ...grpc.CallOption) (*ListSchedulesResponse, error) @@ -408,12 +465,13 @@ // NOTE: The number of task queues mapped to a single build id is limited by the `limit.taskQueuesPerBuildId` // (default is 20), if this limit is exceeded this API will error with a FailedPrecondition. // - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) UpdateWorkerBuildIdCompatibility(ctx context.Context, in *UpdateWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*UpdateWorkerBuildIdCompatibilityResponse, error) // Fetches the worker build id versioning sets for a task queue. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) GetWorkerBuildIdCompatibility(ctx context.Context, in *GetWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetches task reachability to determine whether a worker may be retired. // The request may specify task queues to query for or let the server fetch all task queues mapped to the given @@ -427,17 +485,19 @@ // // Open source users can adjust this limit by setting the server's dynamic config value for // `limit.reachabilityTaskQueueScan` with the caveat that this call can strain the visibility store. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) GetWorkerTaskReachability(ctx context.Context, in *GetWorkerTaskReachabilityRequest, opts ...grpc.CallOption) (*GetWorkerTaskReachabilityResponse, error) // Invokes the specified update function on user workflow code. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) UpdateWorkflowExecution(ctx context.Context, in *UpdateWorkflowExecutionRequest, opts ...grpc.CallOption) (*UpdateWorkflowExecutionResponse, error) // Polls a workflow execution for the outcome of a workflow execution update // previously issued through the UpdateWorkflowExecution RPC. The effective // timeout on this call will be shorter of the the caller-supplied gRPC // timeout and the server's configured long-poll timeout. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We don't expose update polling API to HTTP in favor of a potential future non-blocking form. --) PollWorkflowExecutionUpdate(ctx context.Context, in *PollWorkflowExecutionUpdateRequest, opts ...grpc.CallOption) (*PollWorkflowExecutionUpdateResponse, error) // StartBatchOperation starts a new batch operation StartBatchOperation(ctx context.Context, in *StartBatchOperationRequest, opts ...grpc.CallOption) (*StartBatchOperationResponse, error) @@ -985,17 +1045,15 @@ ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) // UpdateNamespace is used to update the information and configuration of a registered // namespace. - // - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) UpdateNamespace(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) // DeprecateNamespace is used to update the state of a registered namespace to DEPRECATED. // // Once the namespace is deprecated it cannot be used to start new workflow executions. Existing // workflow executions will continue to run on deprecated namespaces. // Deprecated. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Deprecated. --) DeprecateNamespace(context.Context, *DeprecateNamespaceRequest) (*DeprecateNamespaceResponse, error) // StartWorkflowExecution starts a new workflow execution. // @@ -1016,6 +1074,9 @@ // tasks. The worker is expected to call `RespondWorkflowTaskCompleted` when it is done // processing the task. The service will create a `WorkflowTaskStarted` event in the history for // this task before handing it to the worker. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponse, error) // RespondWorkflowTaskCompleted is called by workers to successfully complete workflow tasks // they received from `PollWorkflowTaskQueue`. @@ -1023,6 +1084,9 @@ // Completing a WorkflowTask will write a `WORKFLOW_TASK_COMPLETED` event to the workflow's // history, along with events corresponding to whatever commands the SDK generated while // executing the task (ex timer started, activity task scheduled, etc). + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondWorkflowTaskCompleted(context.Context, *RespondWorkflowTaskCompletedRequest) (*RespondWorkflowTaskCompletedResponse, error) // RespondWorkflowTaskFailed is called by workers to indicate the processing of a workflow task // failed. @@ -1033,6 +1097,9 @@ // // Temporal will only append first WorkflowTaskFailed event to the history of workflow execution // for consecutive failures. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondWorkflowTaskFailed(context.Context, *RespondWorkflowTaskFailedRequest) (*RespondWorkflowTaskFailedResponse, error) // PollActivityTaskQueue is called by workers to process activity tasks from a specific task // queue. @@ -1046,6 +1113,9 @@ // (`ACTIVITY_TASK_COMPLETED` / `ACTIVITY_TASK_FAILED` / `ACTIVITY_TASK_TIMED_OUT`) will both be // written permanently to Workflow execution history when Activity is finished. This is done to // avoid writing many events in the case of a failure/retry loop. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) PollActivityTaskQueue(context.Context, *PollActivityTaskQueueRequest) (*PollActivityTaskQueueResponse, error) // RecordActivityTaskHeartbeat is optionally called by workers while they execute activities. // @@ -1135,30 +1205,44 @@ // WorkflowExecution.run_id is provided) or the latest Workflow Execution (when // WorkflowExecution.run_id is not provided). If the Workflow Execution is Running, it will be // terminated before deletion. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Workflow deletion not exposed to HTTP, users should use cancel or terminate. --) DeleteWorkflowExecution(context.Context, *DeleteWorkflowExecutionRequest) (*DeleteWorkflowExecutionResponse, error) // ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ListOpenWorkflowExecutions(context.Context, *ListOpenWorkflowExecutionsRequest) (*ListOpenWorkflowExecutionsResponse, error) // ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ListClosedWorkflowExecutions(context.Context, *ListClosedWorkflowExecutionsRequest) (*ListClosedWorkflowExecutionsResponse, error) // ListWorkflowExecutions is a visibility API to list workflow executions in a specific namespace. ListWorkflowExecutions(context.Context, *ListWorkflowExecutionsRequest) (*ListWorkflowExecutionsResponse, error) // ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific namespace. ListArchivedWorkflowExecutions(context.Context, *ListArchivedWorkflowExecutionsRequest) (*ListArchivedWorkflowExecutionsResponse, error) // ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific namespace without order. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) ScanWorkflowExecutions(context.Context, *ScanWorkflowExecutionsRequest) (*ScanWorkflowExecutionsResponse, error) // CountWorkflowExecutions is a visibility API to count of workflow executions in a specific namespace. CountWorkflowExecutions(context.Context, *CountWorkflowExecutionsRequest) (*CountWorkflowExecutionsResponse, error) // GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this search attribute API to HTTP (but may expose on OperatorService). --) GetSearchAttributes(context.Context, *GetSearchAttributesRequest) (*GetSearchAttributesResponse, error) // RespondQueryTaskCompleted is called by workers to complete queries which were delivered on // the `query` (not `queries`) field of a `PollWorkflowTaskQueueResponse`. // // Completing the query will unblock the corresponding client call to `QueryWorkflow` and return // the query result a response. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) RespondQueryTaskCompleted(context.Context, *RespondQueryTaskCompletedRequest) (*RespondQueryTaskCompletedResponse, error) // ResetStickyTaskQueue resets the sticky task queue related information in the mutable state of // a given workflow. This is prudent for workers to perform if a workflow has been paged out of @@ -1167,6 +1251,9 @@ // Things cleared are: // 1. StickyTaskQueue // 2. StickyScheduleToStartTimeout + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) ResetStickyTaskQueue(context.Context, *ResetStickyTaskQueueRequest) (*ResetStickyTaskQueueResponse, error) // QueryWorkflow requests a query be executed for a specified workflow execution. QueryWorkflow(context.Context, *QueryWorkflowRequest) (*QueryWorkflowResponse, error) @@ -1178,34 +1265,20 @@ GetClusterInfo(context.Context, *GetClusterInfoRequest) (*GetClusterInfoResponse, error) // GetSystemInfo returns information about the system. GetSystemInfo(context.Context, *GetSystemInfoRequest) (*GetSystemInfoResponse, error) + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this low-level API to HTTP. --) ListTaskQueuePartitions(context.Context, *ListTaskQueuePartitionsRequest) (*ListTaskQueuePartitionsResponse, error) // Creates a new schedule. - // (-- api-linter: core::0133::method-signature=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::response-message-name=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::http-uri-parent=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) CreateSchedule(context.Context, *CreateScheduleRequest) (*CreateScheduleResponse, error) // Returns the schedule description and current state of an existing schedule. DescribeSchedule(context.Context, *DescribeScheduleRequest) (*DescribeScheduleResponse, error) // Changes the configuration or state of an existing schedule. - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) UpdateSchedule(context.Context, *UpdateScheduleRequest) (*UpdateScheduleResponse, error) // Makes a specific change to a schedule or triggers an immediate action. - // (-- api-linter: core::0134::synonyms=disabled - // aip.dev/not-precedent: we have both patch and update. --) PatchSchedule(context.Context, *PatchScheduleRequest) (*PatchScheduleResponse, error) // Lists matching times within a range. ListScheduleMatchingTimes(context.Context, *ListScheduleMatchingTimesRequest) (*ListScheduleMatchingTimesResponse, error) // Deletes a schedule, removing it from the system. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) DeleteSchedule(context.Context, *DeleteScheduleRequest) (*DeleteScheduleResponse, error) // List all schedules in a namespace. ListSchedules(context.Context, *ListSchedulesRequest) (*ListSchedulesResponse, error) @@ -1222,12 +1295,13 @@ // NOTE: The number of task queues mapped to a single build id is limited by the `limit.taskQueuesPerBuildId` // (default is 20), if this limit is exceeded this API will error with a FailedPrecondition. // - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) UpdateWorkerBuildIdCompatibility(context.Context, *UpdateWorkerBuildIdCompatibilityRequest) (*UpdateWorkerBuildIdCompatibilityResponse, error) // Fetches the worker build id versioning sets for a task queue. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) GetWorkerBuildIdCompatibility(context.Context, *GetWorkerBuildIdCompatibilityRequest) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetches task reachability to determine whether a worker may be retired. // The request may specify task queues to query for or let the server fetch all task queues mapped to the given @@ -1241,17 +1315,19 @@ // // Open source users can adjust this limit by setting the server's dynamic config value for // `limit.reachabilityTaskQueueScan` with the caveat that this call can strain the visibility store. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) GetWorkerTaskReachability(context.Context, *GetWorkerTaskReachabilityRequest) (*GetWorkerTaskReachabilityResponse, error) // Invokes the specified update function on user workflow code. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) UpdateWorkflowExecution(context.Context, *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) // Polls a workflow execution for the outcome of a workflow execution update // previously issued through the UpdateWorkflowExecution RPC. The effective // timeout on this call will be shorter of the the caller-supplied gRPC // timeout and the server's configured long-poll timeout. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We don't expose update polling API to HTTP in favor of a potential future non-blocking form. --) PollWorkflowExecutionUpdate(context.Context, *PollWorkflowExecutionUpdateRequest) (*PollWorkflowExecutionUpdateResponse, error) // StartBatchOperation starts a new batch operation StartBatchOperation(context.Context, *StartBatchOperationRequest) (*StartBatchOperationResponse, error) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.gw.go temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.gw.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.gw.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/api/workflowservice/v1/service.pb.gw.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,5158 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: temporal/api/workflowservice/v1/service.proto + +/* +Package workflowservice is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package workflowservice + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_WorkflowService_RegisterNamespace_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RegisterNamespaceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RegisterNamespace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RegisterNamespace_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RegisterNamespaceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RegisterNamespace(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_DescribeNamespace_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_DescribeNamespace_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeNamespaceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeNamespace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DescribeNamespace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DescribeNamespace_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeNamespaceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeNamespace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DescribeNamespace(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListNamespaces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_WorkflowService_ListNamespaces_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListNamespacesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListNamespaces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListNamespaces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListNamespaces_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListNamespacesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListNamespaces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListNamespaces(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_UpdateNamespace_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateNamespaceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.UpdateNamespace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_UpdateNamespace_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateNamespaceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.UpdateNamespace(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_StartWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StartWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_id") + } + + protoReq.WorkflowId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_id", err) + } + + msg, err := client.StartWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_StartWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StartWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_id") + } + + protoReq.WorkflowId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_id", err) + } + + msg, err := server.StartWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_GetWorkflowExecutionHistory_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "execution": 1, "workflow_id": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 3, 2, 4}} +) + +func request_WorkflowService_GetWorkflowExecutionHistory_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetWorkflowExecutionHistoryRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_GetWorkflowExecutionHistory_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetWorkflowExecutionHistory(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_GetWorkflowExecutionHistory_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetWorkflowExecutionHistoryRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_GetWorkflowExecutionHistory_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetWorkflowExecutionHistory(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_GetWorkflowExecutionHistoryReverse_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "execution": 1, "workflow_id": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 3, 2, 4}} +) + +func request_WorkflowService_GetWorkflowExecutionHistoryReverse_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetWorkflowExecutionHistoryReverseRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_GetWorkflowExecutionHistoryReverse_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetWorkflowExecutionHistoryReverse(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_GetWorkflowExecutionHistoryReverse_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetWorkflowExecutionHistoryReverseRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_GetWorkflowExecutionHistoryReverse_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetWorkflowExecutionHistoryReverse(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RecordActivityTaskHeartbeat_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RecordActivityTaskHeartbeatRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RecordActivityTaskHeartbeat(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RecordActivityTaskHeartbeat_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RecordActivityTaskHeartbeatRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RecordActivityTaskHeartbeat(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RecordActivityTaskHeartbeatById_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RecordActivityTaskHeartbeatByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RecordActivityTaskHeartbeatById(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RecordActivityTaskHeartbeatById_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RecordActivityTaskHeartbeatByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RecordActivityTaskHeartbeatById(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskCompleted_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCompletedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskCompleted(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskCompleted_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCompletedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskCompleted(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskCompletedById_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCompletedByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskCompletedById(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskCompletedById_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCompletedByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskCompletedById(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskFailed_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskFailedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskFailed(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskFailed_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskFailedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskFailed(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskFailedById_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskFailedByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskFailedById(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskFailedById_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskFailedByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskFailedById(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskCanceled_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCanceledRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskCanceled(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskCanceled_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCanceledRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskCanceled(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RespondActivityTaskCanceledById_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCanceledByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.RespondActivityTaskCanceledById(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RespondActivityTaskCanceledById_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RespondActivityTaskCanceledByIdRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.RespondActivityTaskCanceledById(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_RequestCancelWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RequestCancelWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := client.RequestCancelWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_RequestCancelWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RequestCancelWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := server.RequestCancelWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_SignalWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SignalWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + val, ok = pathParams["signal_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "signal_name") + } + + protoReq.SignalName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "signal_name", err) + } + + msg, err := client.SignalWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_SignalWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SignalWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + val, ok = pathParams["signal_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "signal_name") + } + + protoReq.SignalName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "signal_name", err) + } + + msg, err := server.SignalWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_SignalWithStartWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SignalWithStartWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_id") + } + + protoReq.WorkflowId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_id", err) + } + + val, ok = pathParams["signal_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "signal_name") + } + + protoReq.SignalName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "signal_name", err) + } + + msg, err := client.SignalWithStartWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_SignalWithStartWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SignalWithStartWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_id") + } + + protoReq.WorkflowId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_id", err) + } + + val, ok = pathParams["signal_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "signal_name") + } + + protoReq.SignalName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "signal_name", err) + } + + msg, err := server.SignalWithStartWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_ResetWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ResetWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := client.ResetWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ResetWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ResetWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := server.ResetWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_TerminateWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq TerminateWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := client.TerminateWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_TerminateWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq TerminateWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + msg, err := server.TerminateWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListWorkflowExecutions_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_ListWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListWorkflowExecutions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListWorkflowExecutions(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListArchivedWorkflowExecutions_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_ListArchivedWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListArchivedWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListArchivedWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListArchivedWorkflowExecutions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListArchivedWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListArchivedWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListArchivedWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListArchivedWorkflowExecutions(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_CountWorkflowExecutions_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_CountWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CountWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_CountWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CountWorkflowExecutions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_CountWorkflowExecutions_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CountWorkflowExecutionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_CountWorkflowExecutions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CountWorkflowExecutions(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_QueryWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryWorkflowRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + val, ok = pathParams["query.query_type"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "query.query_type") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "query.query_type", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "query.query_type", err) + } + + msg, err := client.QueryWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_QueryWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryWorkflowRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + val, ok = pathParams["query.query_type"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "query.query_type") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "query.query_type", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "query.query_type", err) + } + + msg, err := server.QueryWorkflow(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_DescribeWorkflowExecution_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "execution": 1, "workflow_id": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 3, 2, 4}} +) + +func request_WorkflowService_DescribeWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeWorkflowExecution_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DescribeWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DescribeWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "execution.workflow_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeWorkflowExecution_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DescribeWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_DescribeTaskQueue_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "task_queue": 1, "name": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 3, 2, 4}} +) + +func request_WorkflowService_DescribeTaskQueue_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeTaskQueueRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["task_queue.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "task_queue.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "task_queue.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "task_queue.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeTaskQueue_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DescribeTaskQueue(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DescribeTaskQueue_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeTaskQueueRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["task_queue.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "task_queue.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "task_queue.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "task_queue.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DescribeTaskQueue_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DescribeTaskQueue(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_GetClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetClusterInfoRequest + var metadata runtime.ServerMetadata + + msg, err := client.GetClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_GetClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetClusterInfoRequest + var metadata runtime.ServerMetadata + + msg, err := server.GetClusterInfo(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_GetSystemInfo_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetSystemInfoRequest + var metadata runtime.ServerMetadata + + msg, err := client.GetSystemInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_GetSystemInfo_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetSystemInfoRequest + var metadata runtime.ServerMetadata + + msg, err := server.GetSystemInfo(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_CreateSchedule_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := client.CreateSchedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_CreateSchedule_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := server.CreateSchedule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_DescribeSchedule_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := client.DescribeSchedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DescribeSchedule_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := server.DescribeSchedule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_UpdateSchedule_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := client.UpdateSchedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_UpdateSchedule_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := server.UpdateSchedule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_PatchSchedule_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq PatchScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := client.PatchSchedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_PatchSchedule_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq PatchScheduleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + msg, err := server.PatchSchedule(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListScheduleMatchingTimes_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "schedule_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_WorkflowService_ListScheduleMatchingTimes_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListScheduleMatchingTimesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListScheduleMatchingTimes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListScheduleMatchingTimes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListScheduleMatchingTimes_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListScheduleMatchingTimesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListScheduleMatchingTimes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListScheduleMatchingTimes(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_DeleteSchedule_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "schedule_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_WorkflowService_DeleteSchedule_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DeleteSchedule_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DeleteSchedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DeleteSchedule_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["schedule_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "schedule_id") + } + + protoReq.ScheduleId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "schedule_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_DeleteSchedule_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DeleteSchedule(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListSchedules_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_ListSchedules_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListSchedulesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListSchedules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListSchedules(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListSchedules_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListSchedulesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListSchedules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListSchedules(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_UpdateWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + val, ok = pathParams["request.input.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "request.input.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "request.input.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "request.input.name", err) + } + + msg, err := client.UpdateWorkflowExecution(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_UpdateWorkflowExecution_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateWorkflowExecutionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["workflow_execution.workflow_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow_execution.workflow_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow_execution.workflow_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow_execution.workflow_id", err) + } + + val, ok = pathParams["request.input.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "request.input.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "request.input.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "request.input.name", err) + } + + msg, err := server.UpdateWorkflowExecution(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_StartBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StartBatchOperationRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := client.StartBatchOperation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_StartBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StartBatchOperationRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := server.StartBatchOperation(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_StopBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StopBatchOperationRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := client.StopBatchOperation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_StopBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StopBatchOperationRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := server.StopBatchOperation(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_DescribeBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeBatchOperationRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := client.DescribeBatchOperation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_DescribeBatchOperation_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DescribeBatchOperationRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["job_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_id") + } + + protoReq.JobId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_id", err) + } + + msg, err := server.DescribeBatchOperation(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_ListBatchOperations_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_ListBatchOperations_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListBatchOperationsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListBatchOperations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListBatchOperations(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_ListBatchOperations_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListBatchOperationsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_ListBatchOperations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListBatchOperations(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterWorkflowServiceHandlerServer registers the http handlers for service WorkflowService to "mux". +// UnaryRPC :call WorkflowServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterWorkflowServiceHandlerFromEndpoint instead. +func RegisterWorkflowServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WorkflowServiceServer) error { + + mux.Handle("POST", pattern_WorkflowService_RegisterNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RegisterNamespace_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RegisterNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DescribeNamespace_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListNamespaces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListNamespaces_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListNamespaces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_UpdateNamespace_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StartWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_StartWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StartWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetWorkflowExecutionHistory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_GetWorkflowExecutionHistory_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetWorkflowExecutionHistory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetWorkflowExecutionHistoryReverse_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_GetWorkflowExecutionHistoryReverse_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetWorkflowExecutionHistoryReverse_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RecordActivityTaskHeartbeat_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RecordActivityTaskHeartbeat_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RecordActivityTaskHeartbeat_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RecordActivityTaskHeartbeatById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RecordActivityTaskHeartbeatById_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RecordActivityTaskHeartbeatById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCompleted_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskCompleted_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCompleted_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCompletedById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskCompletedById_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCompletedById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskFailed_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskFailed_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskFailed_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskFailedById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskFailedById_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskFailedById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCanceled_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskCanceled_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCanceled_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCanceledById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RespondActivityTaskCanceledById_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCanceledById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RequestCancelWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_RequestCancelWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RequestCancelWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_SignalWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_SignalWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_SignalWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_SignalWithStartWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_SignalWithStartWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_SignalWithStartWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_ResetWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ResetWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ResetWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_TerminateWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_TerminateWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_TerminateWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListWorkflowExecutions_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListArchivedWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListArchivedWorkflowExecutions_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListArchivedWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_CountWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_CountWorkflowExecutions_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_CountWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_QueryWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_QueryWorkflow_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_QueryWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DescribeWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeTaskQueue_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DescribeTaskQueue_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeTaskQueue_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_GetClusterInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetSystemInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_GetSystemInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetSystemInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_CreateSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_CreateSchedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_CreateSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DescribeSchedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_UpdateSchedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_PatchSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_PatchSchedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_PatchSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListScheduleMatchingTimes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListScheduleMatchingTimes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListScheduleMatchingTimes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_WorkflowService_DeleteSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DeleteSchedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DeleteSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListSchedules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListSchedules_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListSchedules_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_UpdateWorkflowExecution_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StartBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_StartBatchOperation_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StartBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StopBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_StopBatchOperation_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StopBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_DescribeBatchOperation_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListBatchOperations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_ListBatchOperations_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListBatchOperations_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterWorkflowServiceHandlerFromEndpoint is same as RegisterWorkflowServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterWorkflowServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterWorkflowServiceHandler(ctx, mux, conn) +} + +// RegisterWorkflowServiceHandler registers the http handlers for service WorkflowService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterWorkflowServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterWorkflowServiceHandlerClient(ctx, mux, NewWorkflowServiceClient(conn)) +} + +// RegisterWorkflowServiceHandlerClient registers the http handlers for service WorkflowService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WorkflowServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WorkflowServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WorkflowServiceClient" to call the correct interceptors. +func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WorkflowServiceClient) error { + + mux.Handle("POST", pattern_WorkflowService_RegisterNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RegisterNamespace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RegisterNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DescribeNamespace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListNamespaces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListNamespaces_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListNamespaces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateNamespace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_UpdateNamespace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateNamespace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StartWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_StartWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StartWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetWorkflowExecutionHistory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_GetWorkflowExecutionHistory_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetWorkflowExecutionHistory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetWorkflowExecutionHistoryReverse_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_GetWorkflowExecutionHistoryReverse_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetWorkflowExecutionHistoryReverse_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RecordActivityTaskHeartbeat_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RecordActivityTaskHeartbeat_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RecordActivityTaskHeartbeat_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RecordActivityTaskHeartbeatById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RecordActivityTaskHeartbeatById_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RecordActivityTaskHeartbeatById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCompleted_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskCompleted_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCompleted_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCompletedById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskCompletedById_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCompletedById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskFailed_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskFailed_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskFailed_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskFailedById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskFailedById_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskFailedById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCanceled_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskCanceled_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCanceled_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RespondActivityTaskCanceledById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RespondActivityTaskCanceledById_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RespondActivityTaskCanceledById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_RequestCancelWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_RequestCancelWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_RequestCancelWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_SignalWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_SignalWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_SignalWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_SignalWithStartWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_SignalWithStartWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_SignalWithStartWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_ResetWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ResetWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ResetWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_TerminateWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_TerminateWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_TerminateWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListWorkflowExecutions_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListArchivedWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListArchivedWorkflowExecutions_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListArchivedWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_CountWorkflowExecutions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_CountWorkflowExecutions_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_CountWorkflowExecutions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_QueryWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_QueryWorkflow_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_QueryWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DescribeWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeTaskQueue_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DescribeTaskQueue_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeTaskQueue_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_GetClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_GetSystemInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_GetSystemInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_GetSystemInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_CreateSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_CreateSchedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_CreateSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DescribeSchedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_UpdateSchedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_PatchSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_PatchSchedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_PatchSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListScheduleMatchingTimes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListScheduleMatchingTimes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListScheduleMatchingTimes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_WorkflowService_DeleteSchedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DeleteSchedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DeleteSchedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListSchedules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListSchedules_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListSchedules_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_UpdateWorkflowExecution_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_UpdateWorkflowExecution_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_UpdateWorkflowExecution_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StartBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_StartBatchOperation_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StartBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowService_StopBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_StopBatchOperation_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_StopBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_DescribeBatchOperation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_DescribeBatchOperation_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_DescribeBatchOperation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_ListBatchOperations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_ListBatchOperations_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_ListBatchOperations_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_WorkflowService_RegisterNamespace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "namespaces"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DescribeNamespace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "namespaces", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListNamespaces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "namespaces"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_UpdateNamespace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "update"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_StartWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_GetWorkflowExecutionHistory_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "workflows", "execution.workflow_id", "history"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_GetWorkflowExecutionHistoryReverse_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "workflows", "execution.workflow_id", "history-reverse"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RecordActivityTaskHeartbeat_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "heartbeat"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RecordActivityTaskHeartbeatById_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "heartbeat-by-id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskCompleted_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "complete"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskCompletedById_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "complete-by-id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskFailed_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "fail"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskFailedById_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "fail-by-id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskCanceled_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "cancel"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RespondActivityTaskCanceledById_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 2, 5}, []string{"api", "v1", "namespaces", "namespace", "activities", "cancel-by-id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_RequestCancelWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_execution.workflow_id", "cancel"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_SignalWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_execution.workflow_id", "signal", "signal_name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_SignalWithStartWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_id", "signal-with-start", "signal_name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ResetWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_execution.workflow_id", "reset"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_TerminateWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_execution.workflow_id", "terminate"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListWorkflowExecutions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListArchivedWorkflowExecutions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "archived-workflows"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_CountWorkflowExecutions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "workflow-count"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_QueryWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"api", "v1", "namespaces", "namespace", "workflows", "execution.workflow_id", "query", "query.query_type"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DescribeWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "workflows", "execution.workflow_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DescribeTaskQueue_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "task-queues", "task_queue.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_GetClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "cluster-info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_GetSystemInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "system-info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_CreateSchedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DescribeSchedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_UpdateSchedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id", "update"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_PatchSchedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id", "patch"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListScheduleMatchingTimes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id", "matching-times"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DeleteSchedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "schedules", "schedule_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListSchedules_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "schedules"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_UpdateWorkflowExecution_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"api", "v1", "namespaces", "namespace", "workflows", "workflow_execution.workflow_id", "update", "request.input.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_StartBatchOperation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "batch-operations", "job_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_StopBatchOperation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "namespaces", "namespace", "batch-operations", "job_id", "stop"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_DescribeBatchOperation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "namespaces", "namespace", "batch-operations", "job_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_ListBatchOperations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "namespaces", "namespace", "batch-operations"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_WorkflowService_RegisterNamespace_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DescribeNamespace_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListNamespaces_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_UpdateNamespace_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_StartWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_GetWorkflowExecutionHistory_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_GetWorkflowExecutionHistoryReverse_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RecordActivityTaskHeartbeat_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RecordActivityTaskHeartbeatById_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskCompleted_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskCompletedById_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskFailed_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskFailedById_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskCanceled_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RespondActivityTaskCanceledById_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_RequestCancelWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_SignalWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_SignalWithStartWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ResetWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_TerminateWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListWorkflowExecutions_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListArchivedWorkflowExecutions_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_CountWorkflowExecutions_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_QueryWorkflow_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DescribeWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DescribeTaskQueue_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_GetClusterInfo_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_GetSystemInfo_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_CreateSchedule_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DescribeSchedule_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_UpdateSchedule_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_PatchSchedule_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListScheduleMatchingTimes_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DeleteSchedule_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListSchedules_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_UpdateWorkflowExecution_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_StartBatchOperation_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_StopBatchOperation_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_DescribeBatchOperation_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_ListBatchOperations_0 = runtime.ForwardResponseMessage +) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/activity/activity.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/activity/activity.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/activity/activity.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/activity/activity.go 2024-02-23 09:46:13.000000000 +0000 @@ -71,6 +71,9 @@ // // details - the details that you provided here can be seen in the workflow when it receives TimeoutError, you // can check error with TimeoutType()/Details(). +// +// Note: If using asynchronous activity completion, +// after returning [ErrResultPending] users should heartbeat with [client.Client.RecordActivityHeartbeat] func RecordHeartbeat(ctx context.Context, details ...interface{}) { internal.RecordActivityHeartbeat(ctx, details...) } @@ -101,3 +104,8 @@ func GetWorkerStopChannel(ctx context.Context) <-chan struct{} { return internal.GetWorkerStopChannel(ctx) } + +// IsActivity check if the context is an activity context from a normal or local activity. +func IsActivity(ctx context.Context) bool { + return internal.IsActivity(ctx) +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/activity/doc.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/activity/doc.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/activity/doc.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/activity/doc.go 2024-02-23 09:46:13.000000000 +0000 @@ -74,6 +74,14 @@ other Go service code. You can use the usual loggers and metrics collectors. You can use the standard Go concurrency constructs. +# Context Cancellation + +The first parameter to an activity function can be an optional context.Context. The context will be cancelled when: +* The activity function returns. +* The context deadline is exceeded. The deadline is calculated based on the minimum of the ScheduleToClose timeout plus +the activity task scheduled time and the StartToClose timeout plus the activity task start time. +* The activity calls RecordHeartbeat after being cancelled by the Temporal server. + # Failing the Activity To mark an Activity as failed, all that needs to happen is for the Activity function to return an error via the error @@ -94,7 +102,7 @@ } When the Activity times out due to a missed heartbeat, the last value of the details (progress in the above sample) is -returned from the workflow.ExecuteActivity function as the details field of TimeoutError with TimeoutType_HEARTBEAT. +returned from the [workflow.ExecuteActivity] function as the details field of [temporal.TimeoutError] with TimeoutType_HEARTBEAT. It is also possible to heartbeat an Activity from an external source: @@ -105,7 +113,7 @@ err := client.RecordActivityHeartbeat(ctx, taskToken, details) It expects an additional parameter, "taskToken", which is the value of the binary "TaskToken" field of the -"ActivityInfo" struct retrieved inside the Activity (GetActivityInfo(ctx).TaskToken). "details" is the serializable +[activity.Info] retrieved inside the Activity (GetActivityInfo(ctx).TaskToken). "details" is the serializable payload containing progress information. # Activity Cancellation diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/client/client.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/client/client.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/client/client.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/client/client.go 2024-02-23 09:46:13.000000000 +0000 @@ -25,6 +25,7 @@ //go:generate mockgen -copyright_file ../LICENSE -package client -source client.go -destination client_mock.go // Package client is used by external programs to communicate with Temporal service. +// // NOTE: DO NOT USE THIS API INSIDE OF ANY WORKFLOW CODE!!! package client @@ -43,6 +44,25 @@ "go.temporal.io/sdk/internal/common/metrics" ) +// TaskReachability specifies which category of tasks may reach a worker on a versioned task queue. +// Used both in a reachability query and its response. +// WARNING: Worker versioning is currently experimental +type TaskReachability = internal.TaskReachability + +const ( + // TaskReachabilityUnspecified indicates the reachability was not specified + TaskReachabilityUnspecified = internal.TaskReachabilityUnspecified + // TaskReachabilityNewWorkflows indicates the Build Id might be used by new workflows + TaskReachabilityNewWorkflows = internal.TaskReachabilityNewWorkflows + // TaskReachabilityExistingWorkflows indicates the Build Id might be used by open workflows + // and/or closed workflows. + TaskReachabilityExistingWorkflows = internal.TaskReachabilityExistingWorkflows + // TaskReachabilityOpenWorkflows indicates the Build Id might be used by open workflows. + TaskReachabilityOpenWorkflows = internal.TaskReachabilityOpenWorkflows + // TaskReachabilityClosedWorkflows indicates the Build Id might be used by closed workflows + TaskReachabilityClosedWorkflows = internal.TaskReachabilityClosedWorkflows +) + const ( // DefaultHostPort is the host:port which is used if not passed with options. DefaultHostPort = internal.LocalHostPort @@ -57,6 +77,10 @@ // QueryTypeOpenSessions is the build in query type for Client.QueryWorkflow() call. Use this query type to get all open // sessions in the workflow. The result will be a list of SessionInfo encoded in the converter.EncodedValue. QueryTypeOpenSessions string = internal.QueryTypeOpenSessions + + // UnversionedBuildID is a stand-in for a Build Id for unversioned Workers. + // WARNING: Worker versioning is currently experimental + UnversionedBuildID string = internal.UnversionedBuildID ) type ( @@ -102,6 +126,12 @@ // ScheduleSpec describes when a schedules action should occur. ScheduleSpec = internal.ScheduleSpec + // SchedulePolicies describes the current polcies of a schedule. + SchedulePolicies = internal.SchedulePolicies + + // ScheduleState describes the current state of a schedule. + ScheduleState = internal.ScheduleState + // ScheduleBackfill desribes a time periods and policy and takes Actions as if that time passed by right now, all at once. ScheduleBackfill = internal.ScheduleBackfill @@ -209,6 +239,22 @@ // WARNING: Worker versioning is currently experimental BuildIDOpPromoteIDWithinSet = internal.BuildIDOpPromoteIDWithinSet + // GetWorkerTaskReachabilityOptions is the input to Client.GetWorkerTaskReachability. + // WARNING: Worker versioning is currently experimental + GetWorkerTaskReachabilityOptions = internal.GetWorkerTaskReachabilityOptions + + // WorkerTaskReachability is the response for Client.GetWorkerTaskReachability. + // WARNING: Worker versioning is currently experimental + WorkerTaskReachability = internal.WorkerTaskReachability + + // BuildIDReachability describes the reachability of a buildID + // WARNING: Worker versioning is currently experimental + BuildIDReachability = internal.BuildIDReachability + + // TaskQueueReachability Describes how the Build ID may be reachable from the task queue. + // WARNING: Worker versioning is currently experimental + TaskQueueReachability = internal.TaskQueueReachability + // Client is the client for starting and getting information about a workflow executions as well as // completing activities asynchronously. Client interface { @@ -394,8 +440,8 @@ // ListWorkflow gets workflow executions based on query. The query is basically the SQL WHERE clause, examples: // - "(WorkflowID = 'wid1' or (WorkflowType = 'type2' and WorkflowID = 'wid2'))". // - "CloseTime between '2019-08-27T15:04:05+00:00' and '2019-08-28T15:04:05+00:00'". - // - to list only open workflow use "CloseTime = missing" - // Advanced queries require ElasticSearch, but simple queries do not. + // - to list only open workflow use "CloseTime is null" + // For supported operations on different server versions see https://docs.temporal.io/visibility. // Retrieved workflow executions are sorted by StartTime in descending order when list open workflow, // and sorted by CloseTime in descending order for other queries. // The errors it can return: @@ -414,11 +460,11 @@ // - serviceerror.Unavailable ListArchivedWorkflow(ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest) (*workflowservice.ListArchivedWorkflowExecutionsResponse, error) - // ScanWorkflow gets workflow executions based on query. This API only works with ElasticSearch, - // and will return serviceerror.InvalidArgument when using Cassandra or MySQL. The query is basically the SQL WHERE clause + // ScanWorkflow gets workflow executions based on query. The query is basically the SQL WHERE clause // (see ListWorkflow for query examples). + // For supported operations on different server versions see https://docs.temporal.io/visibility. // ScanWorkflow should be used when retrieving large amount of workflows and order is not needed. - // It will use more ElasticSearch resources than ListWorkflow, but will be several times faster + // It will use more resources than ListWorkflow, but will be several times faster // when retrieving millions of workflows. // The errors it can return: // - serviceerror.InvalidArgument @@ -426,9 +472,9 @@ // - serviceerror.Unavailable ScanWorkflow(ctx context.Context, request *workflowservice.ScanWorkflowExecutionsRequest) (*workflowservice.ScanWorkflowExecutionsResponse, error) - // CountWorkflow gets number of workflow executions based on query. This API only works with ElasticSearch, - // and will return serviceerror.InvalidArgument when using Cassandra or MySQL. The query is basically the SQL WHERE clause + // CountWorkflow gets number of workflow executions based on query. The query is basically the SQL WHERE clause // (see ListWorkflow for query examples). + // For supported operations on different server versions see https://docs.temporal.io/visibility. // The errors it can return: // - serviceerror.InvalidArgument // - serviceerror.Internal @@ -506,6 +552,11 @@ // WARNING: Worker versioning is currently experimental GetWorkerBuildIdCompatibility(ctx context.Context, options *GetWorkerBuildIdCompatibilityOptions) (*WorkerBuildIDVersionSets, error) + // GetWorkerTaskReachability + // Returns which versions are is still in use by open or closed workflows + // WARNING: Worker versioning is currently experimental + GetWorkerTaskReachability(ctx context.Context, options *GetWorkerTaskReachabilityOptions) (*WorkerTaskReachability, error) + // CheckHealth performs a server health check using the gRPC health check // API. If the check fails, an error is returned. CheckHealth(ctx context.Context, request *CheckHealthRequest) (*CheckHealthResponse, error) @@ -666,7 +717,7 @@ _ internal.NamespaceClient = NamespaceClient(nil) ) -// NewValue creates a new converter.EncodedValue which can be used to decode binary data returned by Temporal. For example: +// NewValue creates a new [converter.EncodedValue] which can be used to decode binary data returned by Temporal. For example: // User had Activity.RecordHeartbeat(ctx, "my-heartbeat") and then got response from calling Client.DescribeWorkflowExecution. // The response contains binary field PendingActivityInfo.HeartbeatDetails, // which can be decoded by using: @@ -677,7 +728,7 @@ return internal.NewValue(data) } -// NewValues creates a new converter.EncodedValues which can be used to decode binary data returned by Temporal. For example: +// NewValues creates a new [converter.EncodedValues] which can be used to decode binary data returned by Temporal. For example: // User had Activity.RecordHeartbeat(ctx, "my-heartbeat", 123) and then got response from calling Client.DescribeWorkflowExecution. // The response contains binary field PendingActivityInfo.HeartbeatDetails, // which can be decoded by using: diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/interceptor/interceptor.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/interceptor/interceptor.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/interceptor/interceptor.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/interceptor/interceptor.go 2024-02-23 09:46:13.000000000 +0000 @@ -37,12 +37,12 @@ // calls will be intercepted by it. If an implementation of this interceptor is // provided via worker options, all worker calls will be intercepted by it. // -// All implementations of this should embed InterceptorBase but are not required +// All implementations of this should embed [InterceptorBase] but are not required // to. type Interceptor = internal.Interceptor // InterceptorBase is a default implementation of Interceptor meant for -// embedding. It simply embeds ClientInterceptorBase and WorkerInterceptorBase. +// embedding. It simply embeds [ClientInterceptorBase] and [WorkerInterceptorBase]. type InterceptorBase = internal.InterceptorBase // WorkerInterceptor is an interface for all calls that can be intercepted @@ -55,9 +55,9 @@ // changes. type WorkerInterceptor = internal.WorkerInterceptor -// WorkerInterceptorBase is a default implementation of WorkerInterceptor that -// simply instantiates ActivityInboundInterceptorBase or -// WorkflowInboundInterceptorBase when called to intercept activities or +// WorkerInterceptorBase is a default implementation of [WorkerInterceptor] that +// simply instantiates [ActivityInboundInterceptorBase] or +// [WorkflowInboundInterceptorBase] when called to intercept activities or // workflows respectively. // // This must be embedded into all WorkerInterceptor implementations to safely @@ -69,15 +69,15 @@ // activity calls, can change the outbound interceptor in Init before the next // call in the chain. // -// All implementations must embed ActivityInboundInterceptorBase to safely +// All implementations must embed [ActivityInboundInterceptorBase] to safely // handle future changes. type ActivityInboundInterceptor = internal.ActivityInboundInterceptor // ActivityInboundInterceptorBase is a default implementation of -// ActivityInboundInterceptor that forwards calls to the next inbound +// [ActivityInboundInterceptor] that forwards calls to the next inbound // interceptor and uses an ActivityOutboundInterceptorBase on Init. // -// This must be embedded into all ActivityInboundInterceptor implementations to +// This must be embedded into all [ActivityInboundInterceptor] implementations to // safely handle future changes. type ActivityInboundInterceptorBase = internal.ActivityInboundInterceptorBase @@ -87,12 +87,12 @@ // ActivityOutboundInterceptor is an interface for all activity calls // originating from the SDK. // -// All implementations must embed ActivityOutboundInterceptorBase to safely +// All implementations must embed [ActivityOutboundInterceptorBase] to safely // handle future changes. type ActivityOutboundInterceptor = internal.ActivityOutboundInterceptor // ActivityOutboundInterceptorBase is a default implementation of -// ActivityOutboundInterceptor that forwards calls to the next outbound +// [ActivityOutboundInterceptor] that forwards calls to the next outbound // interceptor. // // This must be embedded into all ActivityOutboundInterceptor implementations to @@ -104,15 +104,15 @@ // workflow calls, can change the outbound interceptor in Init before the next // call in the chain. // -// All implementations must embed WorkflowInboundInterceptorBase to safely +// All implementations must embed [WorkflowInboundInterceptorBase] to safely // handle future changes. type WorkflowInboundInterceptor = internal.WorkflowInboundInterceptor // WorkflowInboundInterceptorBase is a default implementation of -// WorkflowInboundInterceptor that forwards calls to the next inbound +// [WorkflowInboundInterceptor] that forwards calls to the next inbound // interceptor and uses an WorkflowOutboundInterceptorBase on Init. // -// This must be embedded into all WorkflowInboundInterceptor implementations to +// This must be embedded into all [WorkflowInboundInterceptor] implementations to // safely handle future changes. type WorkflowInboundInterceptorBase = internal.WorkflowInboundInterceptorBase @@ -125,50 +125,56 @@ // HandleQueryInput is input for WorkflowInboundInterceptor.HandleQuery. type HandleQueryInput = internal.HandleQueryInput +// UpdateInput is input for WorkflowInboundInterceptor.ExecuteUpdate +// and WorkflowInboundInterceptor.ValidateUpdate. +// +// NOTE: Experimental +type UpdateInput = internal.UpdateInput + // WorkflowOutboundInterceptor is an interface for all workflow calls // originating from the SDK. // -// All implementations must embed WorkflowOutboundInterceptorBase to safely +// All implementations must embed [WorkflowOutboundInterceptorBase] to safely // handle future changes. type WorkflowOutboundInterceptor = internal.WorkflowOutboundInterceptor // WorkflowOutboundInterceptorBase is a default implementation of -// WorkflowOutboundInterceptor that forwards calls to the next outbound +// [WorkflowOutboundInterceptor] that forwards calls to the next outbound // interceptor. // -// This must be embedded into all WorkflowOutboundInterceptor implementations to +// This must be embedded into all [WorkflowOutboundInterceptor] implementations to // safely handle future changes. type WorkflowOutboundInterceptorBase = internal.WorkflowOutboundInterceptorBase -// ClientInterceptor for providing a ClientOutboundInterceptor to intercept +// ClientInterceptor for providing a [ClientOutboundInterceptor] to intercept // certain workflow-specific client calls from the SDK. If an implementation of // this is provided via client or worker options, certain client calls will be // intercepted by it. // -// All implementations must embed ClientInterceptorBase to safely handle future +// All implementations must embed [ClientInterceptorBase] to safely handle future // changes. type ClientInterceptor = internal.ClientInterceptor -// ClientInterceptorBase is a default implementation of ClientInterceptor that -// simply instantiates ClientOutboundInterceptorBase when called to intercept +// ClientInterceptorBase is a default implementation of [ClientInterceptor] that +// simply instantiates [ClientOutboundInterceptorBase] when called to intercept // the client. // -// This must be embedded into all ClientInterceptor implementations to safely +// This must be embedded into all [ClientInterceptor] implementations to safely // handle future changes. type ClientInterceptorBase = internal.ClientInterceptorBase // ClientOutboundInterceptor is an interface for certain workflow-specific calls // originating from the SDK. // -// All implementations must embed ClientOutboundInterceptorBase to safely handle +// All implementations must embed [ClientOutboundInterceptorBase] to safely handle // future changes. type ClientOutboundInterceptor = internal.ClientOutboundInterceptor // ClientOutboundInterceptorBase is a default implementation of -// ClientOutboundInterceptor that forwards calls to the next outbound +// [ClientOutboundInterceptor] that forwards calls to the next outbound // interceptor. // -// This must be embedded into all ActivityInboundInterceptor implementations to +// This must be embedded into all [ClientOutboundInterceptor] implementations to // safely handle future changes. type ClientOutboundInterceptorBase = internal.ClientOutboundInterceptorBase diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/interceptor/tracing_interceptor.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/interceptor/tracing_interceptor.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/interceptor/tracing_interceptor.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/interceptor/tracing_interceptor.go 2024-02-23 09:46:13.000000000 +0000 @@ -100,7 +100,7 @@ // never be nil. // // This is used internally to set the span on contexts not natively supported - // by tracing systems such as workflow.Context. + // by tracing systems such as [workflow.Context]. SpanContextKey interface{} // HeaderKey is the key name on the Temporal header to serialize the span to. @@ -112,6 +112,11 @@ // DisableQueryTracing can be set to disable query tracing. DisableQueryTracing bool + + // AllowInvalidParentSpans will swallow errors interpreting parent + // spans from headers. Useful when migrating from one tracing library + // to another, while workflows/activities may be in progress. + AllowInvalidParentSpans bool } // TracerStartSpanOptions are options for Tracer.StartSpan. @@ -693,7 +698,7 @@ // Get parent span from header if not already present and allowed if options.Parent == nil && options.FromHeader { - if span, err := t.readSpanFromHeader(header); err != nil { + if span, err := t.readSpanFromHeader(header); err != nil && !t.options.AllowInvalidParentSpans { return nil, err } else if span != nil { options.Parent = span diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/activity.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/activity.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/activity.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/activity.go 2024-02-23 09:46:13.000000000 +0000 @@ -58,6 +58,7 @@ StartedTime time.Time // Time of activity start Deadline time.Time // Time of activity timeout Attempt int32 // Attempt starts from 1, and increased by 1 for every retry if retry policy is specified. + IsLocalActivity bool // true if it is a local activity } // RegisterActivityOptions consists of options for registering an activity @@ -99,7 +100,6 @@ // better to rely on the default value. // ScheduleToStartTimeout is always non-retryable. Retrying after this timeout doesn't make sense as it would // just put the Activity Task back into the same Task Queue. - // If ScheduleToClose is not provided then this timeout is required. // Optional: Defaults to unlimited. ScheduleToStartTimeout time.Duration @@ -108,7 +108,7 @@ // to detect that an Activity that didn't complete on time. So this timeout should be as short as the longest // possible execution of the Activity body. Potentially long running Activities must specify HeartbeatTimeout // and call Activity.RecordHeartbeat(ctx, "my-heartbeat") periodically for timely failure detection. - // If ScheduleToClose is not provided then this timeout is required: Defaults to the ScheduleToCloseTimeout value. + // Either this option or ScheduleToClose is required: Defaults to the ScheduleToCloseTimeout value. StartToCloseTimeout time.Duration // HeartbeatTimeout - Heartbeat interval. Activity must call Activity.RecordHeartbeat(ctx, "my-heartbeat") @@ -155,11 +155,13 @@ // LocalActivityOptions stores local activity specific parameters that will be stored inside of a context. LocalActivityOptions struct { // ScheduleToCloseTimeout - The end to end timeout for the local activity including retries. - // This field is required. + // At least one of ScheduleToCloseTimeout or StartToCloseTimeout is required. + // defaults to StartToCloseTimeout if not set. ScheduleToCloseTimeout time.Duration // StartToCloseTimeout - The timeout for a single execution of the local activity. - // Optional: defaults to ScheduleToClose + // At least one of ScheduleToCloseTimeout or StartToCloseTimeout is required. + // defaults to ScheduleToCloseTimeout if not set. StartToCloseTimeout time.Duration // RetryPolicy specify how to retry activity if error happens. @@ -179,6 +181,12 @@ return getActivityOutboundInterceptor(ctx).HasHeartbeatDetails(ctx) } +// IsActivity check if the context is an activity context from a normal or local activity. +func IsActivity(ctx context.Context) bool { + a := ctx.Value(activityInterceptorContextKey) + return a != nil +} + // GetHeartbeatDetails extract heartbeat details from last failed attempt. This is used in combination with retry policy. // An activity could be scheduled with an optional retry policy on ActivityOptions. If the activity failed then server // would attempt to dispatch another activity task to retry according to the retry policy. If there was heartbeat @@ -247,25 +255,12 @@ contextPropagators []ContextPropagator, interceptors []WorkerInterceptor, ) (context.Context, error) { - var deadline time.Time scheduled := common.TimeValue(task.GetScheduledTime()) started := common.TimeValue(task.GetStartedTime()) scheduleToCloseTimeout := common.DurationValue(task.GetScheduleToCloseTimeout()) startToCloseTimeout := common.DurationValue(task.GetStartToCloseTimeout()) heartbeatTimeout := common.DurationValue(task.GetHeartbeatTimeout()) - - startToCloseDeadline := started.Add(startToCloseTimeout) - if scheduleToCloseTimeout > 0 { - scheduleToCloseDeadline := scheduled.Add(scheduleToCloseTimeout) - // Minimum of the two deadlines. - if scheduleToCloseDeadline.Before(startToCloseDeadline) { - deadline = scheduleToCloseDeadline - } else { - deadline = startToCloseDeadline - } - } else { - deadline = startToCloseDeadline - } + deadline := calculateActivityDeadline(scheduled, started, scheduleToCloseTimeout, startToCloseTimeout) logger = log.With(logger, tagActivityID, task.ActivityId, @@ -326,6 +321,21 @@ tagWorkflowID, task.params.WorkflowInfo.WorkflowExecution.ID, tagRunID, task.params.WorkflowInfo.WorkflowExecution.RunID, ) + startedTime := time.Now() + scheduleToCloseTimeout := task.params.ScheduleToCloseTimeout + startToCloseTimeout := task.params.StartToCloseTimeout + + if startToCloseTimeout == 0 { + startToCloseTimeout = scheduleToCloseTimeout + } + if scheduleToCloseTimeout == 0 { + scheduleToCloseTimeout = startToCloseTimeout + } + deadline := calculateActivityDeadline(task.scheduledTime, startedTime, scheduleToCloseTimeout, startToCloseTimeout) + if task.attempt > 1 && !task.expireTime.IsZero() && task.expireTime.Before(deadline) { + // this is attempt and expire time is before SCHEDULE_TO_CLOSE timeout + deadline = task.expireTime + } return newActivityContext(ctx, interceptors, &activityEnvironment{ workflowType: &workflowTypeLocal, workflowNamespace: task.params.WorkflowInfo.Namespace, @@ -336,6 +346,9 @@ logger: logger, metricsHandler: metricsHandler, isLocalActivity: true, + deadline: deadline, + scheduledTime: task.scheduledTime, + startedTime: startedTime, dataConverter: dataConverter, attempt: task.attempt, }) @@ -368,3 +381,15 @@ return ctx, nil } + +func calculateActivityDeadline(scheduled, started time.Time, scheduleToCloseTimeout, startToCloseTimeout time.Duration) time.Time { + startToCloseDeadline := started.Add(startToCloseTimeout) + if scheduleToCloseTimeout > 0 { + scheduleToCloseDeadline := scheduled.Add(scheduleToCloseTimeout) + // Minimum of the two deadlines. + if scheduleToCloseDeadline.Before(startToCloseDeadline) { + return scheduleToCloseDeadline + } + } + return startToCloseDeadline +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/client.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/client.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/client.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/client.go 2024-02-23 09:46:13.000000000 +0000 @@ -172,7 +172,7 @@ GetWorkflowHistory(ctx context.Context, workflowID string, runID string, isLongPoll bool, filterType enumspb.HistoryEventFilterType) HistoryEventIterator // CompleteActivity reports activity completed. - // activity Execute method can return acitivity.activity.ErrResultPending to + // activity Execute method can return activity.ErrResultPending to // indicate the activity is not completed when it's Execute method returns. In that case, this CompleteActivity() method // should be called when that activity is completed with the actual result and error. If err is nil, activity task // completed event will be reported; if err is CanceledError, activity task canceled event will be reported; otherwise, @@ -233,18 +233,20 @@ // - serviceerror.NamespaceNotFound ListOpenWorkflow(ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest) (*workflowservice.ListOpenWorkflowExecutionsResponse, error) - // ListWorkflow gets workflow executions based on query. This API only works with ElasticSearch, - // and will return serviceerror.InvalidArgument when using Cassandra or MySQL. The query is basically the SQL WHERE clause, + // ListWorkflow gets workflow executions based on query.The query is basically the SQL WHERE clause, // examples: // - "(WorkflowID = 'wid1' or (WorkflowType = 'type2' and WorkflowID = 'wid2'))". // - "CloseTime between '2019-08-27T15:04:05+00:00' and '2019-08-28T15:04:05+00:00'". - // - to list only open workflow use "CloseTime = missing" + // - to list only open workflow use "CloseTime is null" // Retrieved workflow executions are sorted by StartTime in descending order when list open workflow, // and sorted by CloseTime in descending order for other queries. + // For supported operations on different server versions see [Visibility]. // The errors it can return: // - serviceerror.InvalidArgument // - serviceerror.Internal // - serviceerror.Unavailable + // + // [Visibility]: https://docs.temporal.io/visibility ListWorkflow(ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest) (*workflowservice.ListWorkflowExecutionsResponse, error) // ListArchivedWorkflow gets archived workflow executions based on query. This API will return BadRequest if Temporal @@ -257,25 +259,28 @@ // - serviceerror.Unavailable ListArchivedWorkflow(ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest) (*workflowservice.ListArchivedWorkflowExecutionsResponse, error) - // ScanWorkflow gets workflow executions based on query. This API only works with ElasticSearch, - // and will return serviceerror.InvalidArgument when using Cassandra or MySQL. The query is basically the SQL WHERE clause + // ScanWorkflow gets workflow executions based on query. The query is basically the SQL WHERE clause // (see ListWorkflow for query examples). // ScanWorkflow should be used when retrieving large amount of workflows and order is not needed. - // It will use more ElasticSearch resources than ListWorkflow, but will be several times faster + // It will use more resources than ListWorkflow, but will be several times faster // when retrieving millions of workflows. + // For supported operations on different server versions see [Visibility]. // The errors it can return: // - serviceerror.InvalidArgument // - serviceerror.Internal // - serviceerror.Unavailable + // [Visibility]: https://docs.temporal.io/visibility ScanWorkflow(ctx context.Context, request *workflowservice.ScanWorkflowExecutionsRequest) (*workflowservice.ScanWorkflowExecutionsResponse, error) - // CountWorkflow gets number of workflow executions based on query. This API only works with ElasticSearch, - // and will return serviceerror.InvalidArgument when using Cassandra or MySQL. The query is basically the SQL WHERE clause + // CountWorkflow gets number of workflow executions based on query. The query is basically the SQL WHERE clause // (see ListWorkflow for query examples). + // For supported operations on different server versions see [Visibility]. // The errors it can return: // - serviceerror.InvalidArgument // - serviceerror.Internal // - serviceerror.Unavailable + // + // [Visibility]: https://docs.temporal.io/visibility CountWorkflow(ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest) (*workflowservice.CountWorkflowExecutionsResponse, error) // GetSearchAttributes returns valid search attributes keys and value types. @@ -344,6 +349,9 @@ // GetWorkerBuildIdCompatibility returns the worker-build-id based version sets for a particular task queue. GetWorkerBuildIdCompatibility(ctx context.Context, options *GetWorkerBuildIdCompatibilityOptions) (*WorkerBuildIDVersionSets, error) + // GetWorkerTaskReachability returns which versions are is still in use by open or closed workflows. + GetWorkerTaskReachability(ctx context.Context, options *GetWorkerTaskReachabilityOptions) (*WorkerTaskReachability, error) + // CheckHealth performs a server health check using the gRPC health check // API. If the check fails, an error is returned. CheckHealth(ctx context.Context, request *CheckHealthRequest) (*CheckHealthResponse, error) @@ -592,10 +600,28 @@ // Memo - Optional non-indexed info that will be shown in list workflow. Memo map[string]interface{} - // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs (only - // supported when Temporal server is using ElasticSearch). The key and value type must be registered on Temporal server side. + // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes map[string]interface{} + + // EnableEagerStart - request eager execution for this workflow, if a local worker is available. + // + // WARNING: Eager start does not respect worker versioning. An eagerly started workflow may run on + // any available local worker even if that worker is not in the default build ID set. + // + // NOTE: Experimental + EnableEagerStart bool + + // StartDelay - Time to wait before dispatching the first workflow task. + // If the workflow gets a signal before the delay, a workflow task will be dispatched and the rest + // of the delay will be ignored. A signal from signal with start will not trigger a workflow task. + // Cannot be set the same time as a CronSchedule. + // + // NOTE: Experimental + StartDelay time.Duration } // RetryPolicy defines the retry policy. @@ -813,6 +839,9 @@ contextPropagators: options.ContextPropagators, workerInterceptors: workerInterceptors, excludeInternalFromRetry: options.ConnectionOptions.excludeInternalFromRetry, + eagerDispatcher: &eagerWorkflowDispatcher{ + workersByTaskQueue: make(map[string][]eagerWorker), + }, } // Create outbound interceptor by wrapping backwards through chain diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/error.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/error.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/error.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/error.go 2024-02-23 09:46:13.000000000 +0000 @@ -49,7 +49,7 @@ what kind of error it was and take actions based on it. The details is encoded payload which workflow code could extract to strong typed variable. Workflow code needs to know what the types of the encoded details are before extracting them. - If activity implementation returns errors other than from NewApplicationError() API. In this case GetOriginalType() - will return orginal type of an error represented as string. Workflow code could check this type to determine what kind of error it was + will return original type of an error represented as string. Workflow code could check this type to determine what kind of error it was and take actions based on the type. These errors are retryable by default, unless error type is specified in retry policy. 2) *CanceledError: If activity was canceled, internal error will be an instance of *CanceledError. When activity cancels itself by diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/interceptor.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/interceptor.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/interceptor.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/interceptor.go 2024-02-23 09:46:13.000000000 +0000 @@ -27,6 +27,7 @@ "time" commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" updatepb "go.temporal.io/api/update/v1" "go.temporal.io/sdk/converter" "go.temporal.io/sdk/internal/common/metrics" @@ -127,12 +128,16 @@ // as part of its optional configuration. The same prohibition against // mutating workflow state that is demanded of UpdateOptions.Validator // functions also applies to this function. + // + // NOTE: Experimental ValidateUpdate(ctx Context, in *UpdateInput) error // ExecuteUpdate is called after ValidateUpdate if and only if the latter // returns nil. interceptor.WorkflowHeader will return a non-nil map for // this context. ExecuteUpdate is allowed to mutate workflow state and // perform workflow actions such as scheduling activities, timers, etc. + // + // NOTE: Experimental ExecuteUpdate(ctx Context, in *UpdateInput) (interface{}, error) mustEmbedWorkflowInboundInterceptorBase() @@ -171,6 +176,12 @@ // Go intercepts workflow.Go. Go(ctx Context, name string, f func(ctx Context)) Context + // Await intercepts workflow.Await. + Await(ctx Context, condition func() bool) error + + // AwaitWithTimeout intercepts workflow.AwaitWithTimeout. + AwaitWithTimeout(ctx Context, timeout time.Duration, condition func() bool) (bool, error) + // ExecuteActivity intercepts workflow.ExecuteActivity. // interceptor.WorkflowHeader will return a non-nil map for this context. ExecuteActivity(ctx Context, activityType string, args ...interface{}) Future @@ -186,6 +197,11 @@ // GetInfo intercepts workflow.GetInfo. GetInfo(ctx Context) *WorkflowInfo + // GetUpdateInfo intercepts workflow.GetUpdateInfo. + // + // NOTE: Experimental + GetUpdateInfo(ctx Context) *UpdateInfo + // GetLogger intercepts workflow.GetLogger. GetLogger(ctx Context) log.Logger @@ -390,8 +406,9 @@ // ClientQueryWorkflowInput is the input to // ClientOutboundInterceptor.QueryWorkflow. type ClientQueryWorkflowInput struct { - WorkflowID string - RunID string - QueryType string - Args []interface{} + WorkflowID string + RunID string + QueryType string + Args []interface{} + QueryRejectCondition enumspb.QueryRejectCondition } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/interceptor_base.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/interceptor_base.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/interceptor_base.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/interceptor_base.go 2024-02-23 09:46:13.000000000 +0000 @@ -195,6 +195,16 @@ return w.Next.ExecuteActivity(ctx, activityType, args...) } +// Await implements WorkflowOutboundInterceptor.Await. +func (w *WorkflowOutboundInterceptorBase) Await(ctx Context, condition func() bool) error { + return w.Next.Await(ctx, condition) +} + +// AwaitWithTimeout implements WorkflowOutboundInterceptor.AwaitWithTimeout. +func (w *WorkflowOutboundInterceptorBase) AwaitWithTimeout(ctx Context, timeout time.Duration, condition func() bool) (bool, error) { + return w.Next.AwaitWithTimeout(ctx, timeout, condition) +} + // ExecuteLocalActivity implements WorkflowOutboundInterceptor.ExecuteLocalActivity. func (w *WorkflowOutboundInterceptorBase) ExecuteLocalActivity( ctx Context, @@ -218,6 +228,11 @@ return w.Next.GetInfo(ctx) } +// GetUpdateInfo implements WorkflowOutboundInterceptor.GetUpdateInfo. +func (w *WorkflowOutboundInterceptorBase) GetUpdateInfo(ctx Context) *UpdateInfo { + return w.Next.GetUpdateInfo(ctx) +} + // GetLogger implements WorkflowOutboundInterceptor.GetLogger. func (w *WorkflowOutboundInterceptorBase) GetLogger(ctx Context) log.Logger { return w.Next.GetLogger(ctx) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_activity.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_activity.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_activity.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_activity.go 2024-02-23 09:46:13.000000000 +0000 @@ -217,7 +217,8 @@ } if p.ScheduleToCloseTimeout == 0 { p.ScheduleToCloseTimeout = p.StartToCloseTimeout - } else { + } + if p.StartToCloseTimeout == 0 { p.StartToCloseTimeout = p.ScheduleToCloseTimeout } return p, nil @@ -378,6 +379,7 @@ Attempt: a.env.attempt, WorkflowType: a.env.workflowType, WorkflowNamespace: a.env.workflowNamespace, + IsLocalActivity: a.env.isLocalActivity, } } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_command_state_machine.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_command_state_machine.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_command_state_machine.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_command_state_machine.go 2024-02-23 09:46:13.000000000 +0000 @@ -85,10 +85,6 @@ cancelActivityStateMachine struct { *commandStateMachineBase attributes *commandpb.RequestCancelActivityTaskCommandAttributes - - // The commandsHelper.nextCommandEventIDResetCounter when this command - // incremented commandsHelper.commandsCancelledDuringWFCancellation. - cancelledOnEventIDResetCounter uint64 } timerCommandStateMachine struct { @@ -99,10 +95,6 @@ cancelTimerCommandStateMachine struct { *commandStateMachineBase attributes *commandpb.CancelTimerCommandAttributes - - // The commandsHelper.nextCommandEventIDResetCounter when this command - // incremented commandsHelper.commandsCancelledDuringWFCancellation. - cancelledOnEventIDResetCounter uint64 } childWorkflowCommandStateMachine struct { @@ -150,21 +142,13 @@ orderedCommands *list.List commands map[commandID]*list.Element - scheduledEventIDToActivityID map[int64]string - scheduledEventIDToCancellationID map[int64]string - scheduledEventIDToSignalID map[int64]string - versionMarkerLookup map[int64]versionMarker - commandsCancelledDuringWFCancellation int64 - workflowExecutionIsCancelling bool - - // Incremented everytime nextCommandEventID and - // commandsCancelledDuringWFCancellation is reset (i.e. on new workflow - // task). Won't ever happen, but technically the way this value is compared - // is safe for overflow wrap around. - nextCommandEventIDResetCounter uint64 + scheduledEventIDToActivityID map[int64]string + scheduledEventIDToCancellationID map[int64]string + scheduledEventIDToSignalID map[int64]string + versionMarkerLookup map[int64]versionMarker } - // panic when command state machine is in illegal state + // panic when command or message state machine is in illegal state stateMachineIllegalStatePanic struct { message string } @@ -477,9 +461,6 @@ case commandStateCommandSent: d.moveState(commandStateCancellationCommandSent, eventCancel) case commandStateInitiated: - if d.helper.workflowExecutionIsCancelling { - d.helper.commandsCancelledDuringWFCancellation++ - } d.moveState(commandStateCanceledAfterInitiated, eventCancel) default: d.failStateTransition(eventCancel) @@ -589,10 +570,6 @@ } cancelCmd := d.helper.newCancelActivityStateMachine(attribs) d.helper.addCommand(cancelCmd) - // We must mark the event ID reset counter for when we performed this - // increment so a potential decrement can only decrement if it wasn't - // reset - cancelCmd.cancelledOnEventIDResetCounter = d.helper.nextCommandEventIDResetCounter // We also mark the schedule command as not eager if we haven't sent it yet. // Server behavior differs on eager vs non-eager when scheduling and // cancelling during the same task completion. If it has not been sent this @@ -614,10 +591,6 @@ } cancelCmd := d.helper.newCancelTimerCommandStateMachine(attribs) d.helper.addCommand(cancelCmd) - // We must mark the event ID reset counter for when we performed this - // increment so a potential decrement can only decrement if it wasn't - // reset - cancelCmd.cancelledOnEventIDResetCounter = d.helper.nextCommandEventIDResetCounter } d.commandStateMachineBase.cancel() @@ -729,9 +702,6 @@ func (d *childWorkflowCommandStateMachine) cancel() { switch d.state { case commandStateStarted: - if d.helper.workflowExecutionIsCancelling { - d.helper.commandsCancelledDuringWFCancellation++ - } d.moveState(commandStateCanceledAfterStarted, eventCancel) // A child workflow may be canceled _after_ something like an activity start // happens inside a simulated goroutine. However, since the state of the @@ -888,11 +858,10 @@ orderedCommands: list.New(), commands: make(map[commandID]*list.Element), - scheduledEventIDToActivityID: make(map[int64]string), - scheduledEventIDToCancellationID: make(map[int64]string), - scheduledEventIDToSignalID: make(map[int64]string), - versionMarkerLookup: make(map[int64]versionMarker), - commandsCancelledDuringWFCancellation: 0, + scheduledEventIDToActivityID: make(map[int64]string), + scheduledEventIDToCancellationID: make(map[int64]string), + scheduledEventIDToSignalID: make(map[int64]string), + versionMarkerLookup: make(map[int64]versionMarker), } } @@ -905,13 +874,20 @@ // corresponding history event after processing. So we can use workflow task started event id + 2 as the offset as // workflow task completed event is always the first event in the workflow task followed by events generated from // commands. This allows client sdk to deterministically predict history event ids generated by processing of the - // command. We must also add the number of cancel commands that were spawned during cancellation of the workflow - // execution as those canceled command events will show up *after* the workflow task completed event. - h.nextCommandEventID = workflowTaskStartedEventID + 2 + h.commandsCancelledDuringWFCancellation - h.commandsCancelledDuringWFCancellation = 0 - // We must change the counter here so that others who mutate - // commandsCancelledDuringWFCancellation know it has since been reset - h.nextCommandEventIDResetCounter++ + // command. It is possible, notably during workflow cancellation, that commands are generated before the workflow + // task started event is processed. In this case we need to adjust the nextCommandEventID to account for these unsent + // commands.git + var uncountedCommands int64 + for curr := h.orderedCommands.Front(); curr != nil; { + d := curr.Value.(commandStateMachine) + command := d.getCommand() + if command != nil { + uncountedCommands += 1 + } + curr = curr.Next() + } + + h.nextCommandEventID = workflowTaskStartedEventID + 2 + uncountedCommands } func (h *commandsHelper) getNextID() int64 { @@ -974,24 +950,7 @@ orderedCmdEl, ok := h.commands[commandID] if ok { delete(h.commands, commandID) - command := h.orderedCommands.Remove(orderedCmdEl) - // Sometimes commandsCancelledDuringWFCancellation was incremented before - // it was reset and sometimes not. We make sure the workflow execution is - // actually cancelling since that's the only time we increment the counter - // in the first place. Also, we use the reset counter to see if we're still - // on the same iteration where we may have incremented it before. - if h.workflowExecutionIsCancelling { - switch command := command.(type) { - case *cancelActivityStateMachine: - if command.cancelledOnEventIDResetCounter == h.nextCommandEventIDResetCounter { - h.commandsCancelledDuringWFCancellation-- - } - case *cancelTimerCommandStateMachine: - if command.cancelledOnEventIDResetCounter == h.nextCommandEventIDResetCounter { - h.commandsCancelledDuringWFCancellation-- - } - } - } + _ = h.orderedCommands.Remove(orderedCmdEl) } } @@ -1171,7 +1130,7 @@ func (h *commandsHelper) recordMutableSideEffectMarker(mutableSideEffectID string, callCountHint int, data *commonpb.Payloads, dc converter.DataConverter) commandStateMachine { // In order to avoid duplicate marker IDs, we must append the counter to the // user-provided ID - mutableSideEffectID = fmt.Sprintf("%v_%v", mutableSideEffectID, h.nextCommandEventID) + mutableSideEffectID = fmt.Sprintf("%v_%v", mutableSideEffectID, h.getNextID()) markerID := fmt.Sprintf("%v_%v", mutableSideEffectMarkerName, mutableSideEffectID) mutableSideEffectIDPayload, err := dc.ToPayloads(mutableSideEffectID) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,35 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package internal + +// eagerWorker is the minimal worker interface needed for eager activities and workflows +type eagerWorker interface { + // tryReserveSlot tries to reserver a task slot on the worker without blocking + // caller is expected to release the slot with releaseSlot + tryReserveSlot() bool + // releaseSlot release a task slot acquired by tryReserveSlot + releaseSlot() + // pushEagerTask pushes a new eager workflow task to the workers task queue. + // should only be called with a reserved slot. + pushEagerTask(task eagerTask) +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager_activity.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager_activity.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager_activity.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager_activity.go 2024-02-23 09:46:13.000000000 +0000 @@ -34,7 +34,7 @@ type eagerActivityExecutor struct { eagerActivityExecutorOptions - activityWorker *activityWorker + activityWorker eagerWorker heldSlotCount int countLock sync.Mutex } @@ -97,11 +97,8 @@ // No more room return false } - // Reserve a spot for our request via a non-blocking attempt to take a poller - // request entry which essentially reserves a spot - select { - case <-e.activityWorker.worker.pollerRequestCh: - default: + // Reserve a spot for our request via a non-blocking attempt + if !e.activityWorker.tryReserveSlot() { return false } @@ -131,35 +128,23 @@ // Put every unfulfilled slot back on the poller channel for i := 0; i < unfulfilledSlots; i++ { - // Like other parts that push onto this channel, we assume there is room - // because we took it, so we do a blocking send - e.activityWorker.worker.pollerRequestCh <- struct{}{} + e.activityWorker.releaseSlot() } // Start each activity asynchronously for _, activity := range resp.GetActivityTasks() { - // Before starting the goroutine we have to increase the wait group counter - // that the poller would have otherwise increased - e.activityWorker.worker.stopWG.Add(1) // Asynchronously execute - task := &activityTask{activity} - go func() { - // Mark completed when complete - defer func() { - // Like other sends to this channel, we assume there is room because we - // reserved it, so we make a blocking send. The processTask does not do - // this itself because our task is *activityTask, not *polledTask. - e.activityWorker.worker.pollerRequestCh <- struct{}{} - // Decrement executing count - e.countLock.Lock() - e.heldSlotCount-- - e.countLock.Unlock() - }() - - // Process the task synchronously. We call the processor on the base - // worker instead of a higher level so we can get the benefits of metrics, - // stop wait group update, etc. - e.activityWorker.worker.processTask(task) - }() + e.activityWorker.pushEagerTask( + eagerTask{ + task: &activityTask{activity}, + callback: func() { + // The processTaskAsync does not do this itself because our task is *activityTask, not *polledTask. + e.activityWorker.releaseSlot() + // Decrement executing count + e.countLock.Lock() + e.heldSlotCount-- + e.countLock.Unlock() + }, + }) } } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager_workflow.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager_workflow.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_eager_workflow.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_eager_workflow.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,100 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package internal + +import ( + "math/rand" + "sync" + "sync/atomic" + + "go.temporal.io/api/workflowservice/v1" +) + +// eagerWorkflowDispatcher is responsible for finding an available worker for an eager workflow task. +type eagerWorkflowDispatcher struct { + lock sync.RWMutex + workersByTaskQueue map[string][]eagerWorker +} + +// registerWorker registers a worker that can be used for eager workflow dispatch +func (e *eagerWorkflowDispatcher) registerWorker(worker *workflowWorker) { + e.lock.Lock() + defer e.lock.Unlock() + e.workersByTaskQueue[worker.executionParameters.TaskQueue] = append(e.workersByTaskQueue[worker.executionParameters.TaskQueue], worker.worker) +} + +// applyToRequest updates request if eager workflow dispatch is possible and returns the eagerWorkflowExecutor to use +func (e *eagerWorkflowDispatcher) applyToRequest(request *workflowservice.StartWorkflowExecutionRequest) *eagerWorkflowExecutor { + // Try every worker that is assigned to the desired task queue. + e.lock.RLock() + workers := e.workersByTaskQueue[request.GetTaskQueue().Name] + randWorkers := make([]eagerWorker, len(workers)) + // Copy the slice so we can release the lock. + copy(randWorkers, workers) + e.lock.RUnlock() + rand.Shuffle(len(randWorkers), func(i, j int) { randWorkers[i], randWorkers[j] = randWorkers[j], randWorkers[i] }) + for _, worker := range randWorkers { + if worker.tryReserveSlot() { + request.RequestEagerExecution = true + return &eagerWorkflowExecutor{ + worker: worker, + } + } + } + return nil +} + +// eagerWorkflowExecutor is a worker-scoped executor for an eager workflow task. +type eagerWorkflowExecutor struct { + handledResponse atomic.Bool + worker eagerWorker +} + +// handleResponse of an eager workflow task from a StartWorkflowExecution request. +func (e *eagerWorkflowExecutor) handleResponse(response *workflowservice.PollWorkflowTaskQueueResponse) { + if !e.handledResponse.CompareAndSwap(false, true) { + panic("eagerWorkflowExecutor trying to handle multiple responses") + } + // Asynchronously execute the task + e.worker.pushEagerTask( + eagerTask{ + task: &eagerWorkflowTask{ + task: response, + }, + // The processTaskAsync does not do this itself because our task is *eagerWorkflowTask, not *polledTask. + callback: e.worker.releaseSlot, + }) +} + +// release the executor task slot this eagerWorkflowExecutor was holding. +// If it is currently handling a responses or has already released the task slot +// then do nothing. +func (e *eagerWorkflowExecutor) release() { + if e.handledResponse.CompareAndSwap(false, true) { + // Assume there is room because it is reserved on creation, so we make a blocking send. + // The processTask does not do this itself because our task is not *polledTask. + e.worker.releaseSlot() + } else { + panic("trying to release an eagerWorkflowExecutor that has already been released") + } +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_event_handlers.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_event_handlers.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_event_handlers.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_event_handlers.go 2024-02-23 09:46:13.000000000 +0000 @@ -153,7 +153,7 @@ cancelHandler func() // A cancel handler to be invoked on a cancel notification signalHandler func(name string, input *commonpb.Payloads, header *commonpb.Header) error // A signal handler to be invoked on a signal event queryHandler func(queryType string, queryArgs *commonpb.Payloads, header *commonpb.Header) (*commonpb.Payloads, error) - updateHandler func(name string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) + updateHandler func(name string, id string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) logger log.Logger isReplay bool // flag to indicate if workflow is in replay mode @@ -166,6 +166,10 @@ contextPropagators []ContextPropagator deadlockDetectionTimeout time.Duration sdkFlags *sdkFlags + sdkVersionUpdated bool + sdkVersion string + sdkNameUpdated bool + sdkName string protocols *protocol.Registry } @@ -184,6 +188,7 @@ pastFirstWFT bool // Set true once this LA has lived for more than one workflow task retryPolicy *RetryPolicy expireTime time.Time + scheduledTime time.Time // Time the activity was scheduled initially. header *commonpb.Header } @@ -238,7 +243,8 @@ mutableSideEffectCallCounter: make(map[string]int), sdkFlags: newSDKFlags(capabilities), } - context.logger = ilog.NewReplayLogger( + // Attempt to skip 1 log level to remove the ReplayLogger from the stack. + context.logger = log.Skip(ilog.NewReplayLogger( log.With(logger, tagWorkflowType, workflowInfo.WorkflowType.Name, tagWorkflowID, workflowInfo.WorkflowExecution.ID, @@ -246,7 +252,7 @@ tagAttempt, workflowInfo.Attempt, ), &context.isReplay, - &context.enableLoggingInReplay) + &context.enableLoggingInReplay), 1) if metricsHandler != nil { context.metricsHandler = metrics.NewReplayAwareHandler(&context.isReplay, metricsHandler). @@ -323,8 +329,8 @@ return retval } -func (wc *workflowEnvironmentImpl) ScheduleUpdate(name string, args *commonpb.Payloads, hdr *commonpb.Header, callbacks UpdateCallbacks) { - wc.updateHandler(name, args, hdr, callbacks) +func (wc *workflowEnvironmentImpl) ScheduleUpdate(name string, id string, args *commonpb.Payloads, hdr *commonpb.Header, callbacks UpdateCallbacks) { + wc.updateHandler(name, id, args, hdr, callbacks) } func withExpectedEventPredicate(pred func(*historypb.HistoryEvent) bool) msgSendOpt { @@ -348,6 +354,22 @@ wc.outbox = append(wc.outbox, outboxEntry{msg: msg, eventPredicate: sendCfg.pred}) } +func (wc *workflowEnvironmentImpl) getNewSdkNameAndReset() string { + if wc.sdkNameUpdated { + wc.sdkNameUpdated = false + return wc.sdkName + } + return "" +} + +func (wc *workflowEnvironmentImpl) getNewSdkVersionAndReset() string { + if wc.sdkVersionUpdated { + wc.sdkVersionUpdated = false + return wc.sdkVersion + } + return "" +} + func (wc *workflowEnvironmentImpl) getNextLocalActivityID() string { wc.localActivityCounterID++ return getStringID(wc.localActivityCounterID) @@ -494,7 +516,6 @@ func (wc *workflowEnvironmentImpl) RegisterCancelHandler(handler func()) { wrappedHandler := func() { - wc.commandsHelper.workflowExecutionIsCancelling = true handler() } wc.cancelHandler = wrappedHandler @@ -577,7 +598,7 @@ } func (wc *workflowEnvironmentImpl) RegisterUpdateHandler( - handler func(string, *commonpb.Payloads, *commonpb.Header, UpdateCallbacks), + handler func(string, string, *commonpb.Payloads, *commonpb.Header, UpdateCallbacks), ) { wc.updateHandler = handler } @@ -683,12 +704,13 @@ func newLocalActivityTask(params ExecuteLocalActivityParams, callback LocalActivityResultHandler, activityID string) *localActivityTask { task := &localActivityTask{ - activityID: activityID, - params: ¶ms, - callback: callback, - retryPolicy: params.RetryPolicy, - attempt: params.Attempt, - header: params.Header, + activityID: activityID, + params: ¶ms, + callback: callback, + retryPolicy: params.RetryPolicy, + attempt: params.Attempt, + header: params.Header, + scheduledTime: time.Now(), } if params.ScheduleToCloseTimeout > 0 { @@ -899,7 +921,7 @@ // During replay, we only generate a command if there was a known marker // recorded on the next task. We have to append the current command // counter to the user-provided ID to avoid duplicates. - if wc.mutableSideEffectsRecorded[fmt.Sprintf("%v_%v", id, wc.commandsHelper.nextCommandEventID)] { + if wc.mutableSideEffectsRecorded[fmt.Sprintf("%v_%v", id, wc.commandsHelper.getNextID())] { return wc.recordMutableSideEffect(id, callCount, result) } return encodedResult @@ -1052,8 +1074,10 @@ case enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED: // Set replay clock. weh.SetCurrentReplayTime(common.TimeValue(event.GetEventTime())) - // Set history length as this event's ID + // Update workflow info fields weh.workflowInfo.currentHistoryLength = int(event.EventId) + weh.workflowInfo.continueAsNewSuggested = event.GetWorkflowTaskStartedEventAttributes().GetSuggestContinueAsNew() + weh.workflowInfo.currentHistorySize = int(event.GetWorkflowTaskStartedEventAttributes().GetHistorySizeBytes()) // Reset the counter on command helper used for generating ID for commands weh.commandsHelper.setCurrentWorkflowTaskStartedEventID(event.GetEventId()) weh.workflowDefinition.OnWorkflowTaskStarted(weh.deadlockDetectionTimeout) @@ -1199,6 +1223,15 @@ isReplay bool, isLast bool, ) error { + defer func() { + if p := recover(); p != nil { + weh.metricsHandler.Counter(metrics.WorkflowTaskExecutionFailureCounter).Inc(1) + topLine := fmt.Sprintf("process message for %s [panic]:", weh.workflowInfo.TaskQueueName) + st := getStackTraceRaw(topLine, 7, 0) + weh.Complete(nil, newWorkflowPanicError(p, st)) + } + }() + ctor, err := weh.protocolConstructorForMessage(msg) if err != nil { return nil diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_public.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_public.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_public.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_public.go 2024-02-23 09:46:13.000000000 +0000 @@ -78,6 +78,8 @@ // WorkflowTaskHandler represents workflow task handlers. WorkflowTaskHandler interface { + WorkflowContextManager + // Processes the workflow task // The response could be: // - RespondWorkflowTaskCompletedRequest @@ -85,8 +87,21 @@ // - RespondQueryTaskCompletedRequest ProcessWorkflowTask( task *workflowTask, + ctx *workflowExecutionContextImpl, f workflowTaskHeartbeatFunc, - ) (response interface{}, resetter EventLevelResetter, err error) + ) (response interface{}, err error) + } + + WorkflowContextManager interface { + // GetOrCreateWorkflowContext finds an existing cached context object + // for the provided task's run ID or creates a new object, adds it to + // cache, and returns it. In all non-error cases the returned context + // object is in a locked state (i.e. + // workflowExecutionContextImpl.Lock() has been called). + GetOrCreateWorkflowContext( + task *workflowservice.PollWorkflowTaskQueueResponse, + historyIterator HistoryIterator, + ) (*workflowExecutionContextImpl, error) } // ActivityTaskHandler represents activity task handlers. diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_task_handlers.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_task_handlers.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_task_handlers.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_task_handlers.go 2024-02-23 09:46:13.000000000 +0000 @@ -49,6 +49,7 @@ "go.temporal.io/api/workflowservice/v1" "go.temporal.io/sdk/internal/common/retry" + "go.temporal.io/sdk/internal/protocol" "go.temporal.io/sdk/converter" "go.temporal.io/sdk/internal/common" @@ -93,6 +94,11 @@ laRetryCh chan *localActivityTask } + // eagerWorkflowTask represents a workflow task sent from an eager workflow executor + eagerWorkflowTask struct { + task *workflowservice.PollWorkflowTaskQueueResponse + } + // activityTask wraps a activity task. activityTask struct { task *workflowservice.PollActivityTaskQueueResponse @@ -173,6 +179,8 @@ next []*historypb.HistoryEvent nextFlags []sdkFlag binaryChecksum string + sdkVersion string + sdkName string } workflowTaskHeartbeatError struct { @@ -182,6 +190,24 @@ historyMismatchError struct { message string } + + preparedTask struct { + events []*historypb.HistoryEvent + markers []*historypb.HistoryEvent + flags []sdkFlag + msgs []*protocolpb.Message + binaryChecksum string + sdkVersion string + sdkName string + } + + finishedTask struct { + isFailed bool + binaryChecksum string + flags []sdkFlag + sdkVersion string + sdkName string + } ) func newHistory(task *workflowTask, eventsHandler *workflowExecutionEventHandlerImpl) *history { @@ -223,14 +249,19 @@ return event.GetEventId() <= eh.workflowTask.task.GetPreviousStartedEventId() || isCommandEvent(event.GetEventType()) } -func (eh *history) IsNextWorkflowTaskFailed() (isFailed bool, binaryChecksum string, flags []sdkFlag, err error) { +// isNextWorkflowTaskFailed checks if the workflow task failed or completed. If it did complete returns some information +// on the completed workflow task. +func (eh *history) isNextWorkflowTaskFailed() (task finishedTask, err error) { nextIndex := eh.currentIndex + 1 - if nextIndex >= len(eh.loadedEvents) && eh.hasMoreEvents() { // current page ends and there is more pages + // Server can return an empty page so if we need the next event we must keep checking until we either get it + // or know we have no more pages to check + for nextIndex >= len(eh.loadedEvents) && eh.hasMoreEvents() { // current page ends and there is more pages if err := eh.loadMoreEvents(); err != nil { - return false, "", nil, err + return finishedTask{}, err } } + // If not replaying we should not expect to find any more events if nextIndex < len(eh.loadedEvents) { nextEvent := eh.loadedEvents[nextIndex] nextEventType := nextEvent.GetEventType() @@ -243,14 +274,20 @@ f := sdkFlagFromUint(flag) if !f.isValid() { // If a flag is not recognized (value is too high or not defined), it must fail the workflow task - return false, "", nil, errors.New("could not recognize SDK flag") + return finishedTask{}, errors.New("could not recognize SDK flag") } flags = append(flags, f) } } - return isFailed, binaryChecksum, flags, nil + return finishedTask{ + isFailed: isFailed, + binaryChecksum: binaryChecksum, + flags: flags, + sdkName: nextEvent.GetWorkflowTaskCompletedEventAttributes().GetSdkMetadata().GetSdkName(), + sdkVersion: nextEvent.GetWorkflowTaskCompletedEventAttributes().GetSdkMetadata().GetSdkVersion(), + }, nil } - return false, "", nil, nil + return finishedTask{}, nil } func (eh *history) loadMoreEvents() error { @@ -290,23 +327,48 @@ } } -// NextCommandEvents returns events that there processed as new by the next command. -// TODO(maxim): Refactor to return a struct instead of multiple parameters -func (eh *history) NextCommandEvents() (result []*historypb.HistoryEvent, markers []*historypb.HistoryEvent, binaryChecksum string, sdkFlags []sdkFlag, err error) { +// nextTask returns the next task to be processed. +func (eh *history) nextTask() (*preparedTask, error) { if eh.next == nil { - eh.next, _, eh.nextFlags, err = eh.nextCommandEvents() + firstTask, err := eh.prepareTask() if err != nil { - return result, markers, eh.binaryChecksum, sdkFlags, err + return nil, err } + eh.next = firstTask.events + eh.nextFlags = firstTask.flags + eh.sdkName = firstTask.sdkName + eh.sdkVersion = firstTask.sdkVersion } - result = eh.next + result := eh.next checksum := eh.binaryChecksum - sdkFlags = eh.nextFlags + sdkFlags := eh.nextFlags + sdkName := eh.sdkName + sdkVersion := eh.sdkVersion + + var markers []*historypb.HistoryEvent + var msgs []*protocolpb.Message if len(result) > 0 { - eh.next, markers, eh.nextFlags, err = eh.nextCommandEvents() - } - return result, markers, checksum, sdkFlags, err + nextTaskEvents, err := eh.prepareTask() + if err != nil { + return nil, err + } + eh.next = nextTaskEvents.events + eh.nextFlags = nextTaskEvents.flags + eh.sdkName = nextTaskEvents.sdkName + eh.sdkVersion = nextTaskEvents.sdkVersion + markers = nextTaskEvents.markers + msgs = nextTaskEvents.msgs + } + return &preparedTask{ + events: result, + markers: markers, + flags: sdkFlags, + msgs: msgs, + binaryChecksum: checksum, + sdkName: sdkName, + sdkVersion: sdkVersion, + }, nil } func (eh *history) hasMoreEvents() bool { @@ -334,54 +396,60 @@ return nil } -func (eh *history) nextCommandEvents() (nextEvents []*historypb.HistoryEvent, markers []*historypb.HistoryEvent, sdkFlags []sdkFlag, err error) { +func (eh *history) prepareTask() (*preparedTask, error) { if eh.currentIndex == len(eh.loadedEvents) && !eh.hasMoreEvents() { if err := eh.verifyAllEventsProcessed(); err != nil { - return nil, nil, nil, err + return nil, err } - return []*historypb.HistoryEvent{}, []*historypb.HistoryEvent{}, []sdkFlag{}, nil + return &preparedTask{}, nil } // Process events - + var taskEvents preparedTask OrderEvents: for { // load more history events if needed for eh.currentIndex == len(eh.loadedEvents) { if !eh.hasMoreEvents() { - if err = eh.verifyAllEventsProcessed(); err != nil { - return + if err := eh.verifyAllEventsProcessed(); err != nil { + return nil, err } break OrderEvents } - if err = eh.loadMoreEvents(); err != nil { - return + if err := eh.loadMoreEvents(); err != nil { + return nil, err } } event := eh.loadedEvents[eh.currentIndex] eventID := event.GetEventId() if eventID != eh.nextEventID { - err = fmt.Errorf( + err := fmt.Errorf( "missing history events, expectedNextEventID=%v but receivedNextEventID=%v", eh.nextEventID, eventID) - return + return nil, err } eh.nextEventID++ switch event.GetEventType() { case enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED: - isFailed, binaryChecksum, newFlags, err1 := eh.IsNextWorkflowTaskFailed() + finishedTask, err1 := eh.isNextWorkflowTaskFailed() if err1 != nil { - err = err1 - return + err := err1 + return nil, err } - if !isFailed { - eh.binaryChecksum = binaryChecksum + if !finishedTask.isFailed { + eh.binaryChecksum = finishedTask.binaryChecksum eh.currentIndex++ - nextEvents = append(nextEvents, event) - sdkFlags = append(sdkFlags, newFlags...) + taskEvents.events = append(taskEvents.events, event) + taskEvents.flags = append(taskEvents.flags, finishedTask.flags...) + if finishedTask.sdkName != "" { + taskEvents.sdkName = finishedTask.sdkName + } + if finishedTask.sdkVersion != "" { + taskEvents.sdkVersion = finishedTask.sdkVersion + } break OrderEvents } case enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, @@ -390,9 +458,11 @@ // Skip default: if isPreloadMarkerEvent(event) { - markers = append(markers, event) + taskEvents.markers = append(taskEvents.markers, event) + } else if attrs := event.GetWorkflowExecutionUpdateAcceptedEventAttributes(); attrs != nil { + taskEvents.msgs = append(taskEvents.msgs, inferMessage(attrs)) } - nextEvents = append(nextEvents, event) + taskEvents.events = append(taskEvents.events, event) } eh.currentIndex++ } @@ -408,13 +478,24 @@ eh.currentIndex = 0 - return nextEvents, markers, sdkFlags, nil + return &taskEvents, nil } func isPreloadMarkerEvent(event *historypb.HistoryEvent) bool { return event.GetEventType() == enumspb.EVENT_TYPE_MARKER_RECORDED } +func inferMessage(attrs *historypb.WorkflowExecutionUpdateAcceptedEventAttributes) *protocolpb.Message { + return &protocolpb.Message{ + Id: attrs.GetAcceptedRequestMessageId(), + ProtocolInstanceId: attrs.GetProtocolInstanceId(), + SequencingId: &protocolpb.Message_EventId{ + EventId: attrs.GetAcceptedRequestSequencingEventId(), + }, + Body: protocol.MustMarshalAny(attrs.GetAcceptedRequest()), + } +} + // newWorkflowTaskHandler returns an implementation of workflow task handler. func newWorkflowTaskHandler(params workerExecutionParameters, ppMgr pressurePointMgr, registry *registry) WorkflowTaskHandler { ensureRequiredParams(¶ms) @@ -450,11 +531,19 @@ return workflowContext } +// Lock acquires the lock on this context object, use Unlock(error) to release +// the lock. func (w *workflowExecutionContextImpl) Lock() { w.mutex.Lock() } +// Unlock cleans up after the provided error and it's own internal view of the +// workflow error state by clearing itself and removing itself from cache as +// needed. It is an error to call this function without having called the Lock +// function first and the behavior is undefined. Regardless of the error +// handling involved, the context will be unlocked when this call returns. func (w *workflowExecutionContextImpl) Unlock(err error) { + defer w.mutex.Unlock() if err != nil || w.err != nil || w.isWorkflowCompleted || (w.wth.cache.MaxWorkflowCacheSize() <= 0 && !w.hasPendingLocalActivityWork()) { // TODO: in case of closed, it asumes the close command always succeed. need server side change to return @@ -472,8 +561,6 @@ // exited w.clearState() } - - w.mutex.Unlock() } func (w *workflowExecutionContextImpl) getEventHandler() *workflowExecutionEventHandlerImpl { @@ -566,7 +653,6 @@ if taskQueue == nil || taskQueue.Name == "" { return nil, errors.New("nil or empty TaskQueue in WorkflowExecutionStarted event") } - task.Messages = append(inferMessages(task.GetHistory().GetEvents()), task.Messages...) runID := task.WorkflowExecution.GetRunId() workflowID := task.WorkflowExecution.GetWorkflowId() @@ -608,7 +694,7 @@ return newWorkflowExecutionContext(workflowInfo, wth), nil } -func (wth *workflowTaskHandlerImpl) getOrCreateWorkflowContext( +func (wth *workflowTaskHandlerImpl) GetOrCreateWorkflowContext( task *workflowservice.PollWorkflowTaskQueueResponse, historyIterator HistoryIterator, ) (workflowContext *workflowExecutionContextImpl, err error) { @@ -713,7 +799,6 @@ return err } } - task.Messages = append(inferMessages(task.GetHistory().GetEvents()), task.Messages...) if w.workflowInfo != nil { // Reset the search attributes and memos from the WorkflowExecutionStartedEvent. // The search attributes and memo may have been modified by calls like UpsertMemo @@ -734,10 +819,11 @@ // ProcessWorkflowTask processes all the events of the workflow task. func (wth *workflowTaskHandlerImpl) ProcessWorkflowTask( workflowTask *workflowTask, + workflowContext *workflowExecutionContextImpl, heartbeatFunc workflowTaskHeartbeatFunc, -) (completeRequest interface{}, resetter EventLevelResetter, errRet error) { +) (completeRequest interface{}, errRet error) { if workflowTask == nil || workflowTask.task == nil { - return nil, nil, errors.New("nil workflow task provided") + return nil, errors.New("nil workflow task provided") } task := workflowTask.task if task.History == nil || len(task.History.Events) == 0 { @@ -746,11 +832,11 @@ } } if task.Query == nil && len(task.History.Events) == 0 { - return nil, nil, errors.New("nil or empty history") + return nil, errors.New("nil or empty history") } if task.Query != nil && len(task.Queries) != 0 { - return nil, nil, errors.New("invalid query workflow task") + return nil, errors.New("invalid query workflow task") } runID := task.WorkflowExecution.GetRunId() @@ -764,18 +850,12 @@ tagPreviousStartedEventID, task.GetPreviousStartedEventId()) }) - workflowContext, err := wth.getOrCreateWorkflowContext(task, workflowTask.historyIterator) - if err != nil { - return nil, nil, err - } - - defer func() { - workflowContext.Unlock(errRet) - }() - - var response interface{} + var ( + response interface{} + err error + heartbeatTimer *time.Timer + ) - var heartbeatTimer *time.Timer defer func() { if heartbeatTimer != nil { heartbeatTimer.Stop() @@ -860,7 +940,6 @@ } errRet = err completeRequest = response - resetter = workflowContext.SetPreviousStartedEventID return } @@ -878,8 +957,7 @@ var replayCommands []*commandpb.Command var respondEvents []*historypb.HistoryEvent - msgs := indexMessagesByEventID(workflowTask.task.GetMessages()) - + taskMessages := workflowTask.task.GetMessages() skipReplayCheck := w.skipReplayCheck() shouldForceReplayCheck := func() bool { isInReplayer := IsReplayNamespace(w.wth.namespace) @@ -899,10 +977,36 @@ ProcessEvents: for { - reorderedEvents, markers, binaryChecksum, flags, err := reorderedHistory.NextCommandEvents() + nextTask, err := reorderedHistory.nextTask() if err != nil { return nil, err } + reorderedEvents := nextTask.events + markers := nextTask.markers + historyMessages := nextTask.msgs + flags := nextTask.flags + binaryChecksum := nextTask.binaryChecksum + // Check if we are replaying so we know if we should use the messages in the WFT or the history + isReplay := len(reorderedEvents) > 0 && reorderedHistory.IsReplayEvent(reorderedEvents[len(reorderedEvents)-1]) + var msgs *eventMsgIndex + if isReplay { + msgs = indexMessagesByEventID(historyMessages) + + eventHandler.sdkVersion = nextTask.sdkVersion + eventHandler.sdkName = nextTask.sdkName + } else { + msgs = indexMessagesByEventID(taskMessages) + taskMessages = []*protocolpb.Message{} + if eventHandler.sdkVersion != SDKVersion { + eventHandler.sdkVersionUpdated = true + eventHandler.sdkVersion = SDKVersion + } + if eventHandler.sdkName != SDKName { + eventHandler.sdkNameUpdated = true + eventHandler.sdkName = SDKName + } + } + eventHandler.sdkFlags.set(flags...) if len(reorderedEvents) == 0 { break ProcessEvents @@ -962,22 +1066,27 @@ if err != nil { return nil, err } + if w.isWorkflowCompleted && !shouldForceReplayCheck() { + break ProcessEvents + } } err = eventHandler.ProcessEvent(event, isInReplay, isLast) if err != nil { return nil, err } + if w.isWorkflowCompleted && !shouldForceReplayCheck() { + break ProcessEvents + } for _, msg := range msgs.takeLTE(event.GetEventId()) { err := eventHandler.ProcessMessage(msg, isInReplay, isLast) if err != nil { return nil, err } - } - - if w.isWorkflowCompleted && !shouldForceReplayCheck() { - break ProcessEvents + if w.isWorkflowCompleted && !shouldForceReplayCheck() { + break ProcessEvents + } } } @@ -993,7 +1102,6 @@ } } } - isReplay := len(reorderedEvents) > 0 && reorderedHistory.IsReplayEvent(reorderedEvents[len(reorderedEvents)-1]) if isReplay { eventCommands := eventHandler.commandsHelper.getCommands(true) if !skipReplayCheck { @@ -1164,6 +1272,9 @@ task := eventHandler.pendingLaTasks[activityID] task.wc = w task.workflowTask = workflowTask + + task.scheduledTime = time.Now() + if !w.laTunnel.sendTask(task) { unstartedLaTasks[activityID] = struct{}{} task.wc = nil @@ -1220,8 +1331,6 @@ } func (w *workflowExecutionContextImpl) SetPreviousStartedEventID(eventID int64) { - w.mutex.Lock() // This call can race against the cache eviction thread - see clearState - defer w.mutex.Unlock() w.previousStartedEventID = eventID } @@ -1668,6 +1777,8 @@ MeteringMetadata: &commonpb.MeteringMetadata{NonfirstLocalActivityExecutionAttempts: nonfirstLAAttempts}, SdkMetadata: &sdk.WorkflowTaskCompletedMetadata{ LangUsedFlags: langUsedFlags, + SdkName: eventHandler.getNewSdkNameAndReset(), + SdkVersion: eventHandler.getNewSdkVersionAndReset(), }, WorkerVersionStamp: &commonpb.WorkerVersionStamp{ BuildId: wth.workerBuildID, diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_task_pollers.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_task_pollers.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_task_pollers.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_task_pollers.go 2024-02-23 09:46:13.000000000 +0000 @@ -94,6 +94,7 @@ identity string service workflowservice.WorkflowServiceClient taskHandler WorkflowTaskHandler + contextManager WorkflowContextManager logger log.Logger dataConverter converter.DataConverter failureConverter converter.FailureConverter @@ -265,6 +266,7 @@ // newWorkflowTaskPoller creates a new workflow task poller which must have a one to one relationship to workflow worker func newWorkflowTaskPoller( taskHandler WorkflowTaskHandler, + contextManager WorkflowContextManager, service workflowservice.WorkflowServiceClient, params workerExecutionParameters, ) *workflowTaskPoller { @@ -281,6 +283,7 @@ taskQueueName: params.TaskQueue, identity: params.Identity, taskHandler: taskHandler, + contextManager: contextManager, logger: params.Logger, dataConverter: params.DataConverter, failureConverter: params.FailureConverter, @@ -313,6 +316,8 @@ switch task := task.(type) { case *workflowTask: return wtp.processWorkflowTask(task) + case *eagerWorkflowTask: + return wtp.processWorkflowTask(wtp.toWorkflowTask(task.task)) default: panic("unknown task type.") } @@ -333,14 +338,22 @@ // close doneCh so local activity worker won't get blocked forever when trying to send back result to laResultCh. defer close(doneCh) + wfctx, err := wtp.contextManager.GetOrCreateWorkflowContext(task.task, task.historyIterator) + if err != nil { + return err + } + var taskErr error + defer func() { wfctx.Unlock(taskErr) }() + for { - var response *workflowservice.RespondWorkflowTaskCompletedResponse startTime := time.Now() task.doneCh = doneCh task.laResultCh = laResultCh task.laRetryCh = laRetryCh - completedRequest, resetter, err := wtp.taskHandler.ProcessWorkflowTask( + var completedRequest interface{} + completedRequest, taskErr = wtp.taskHandler.ProcessWorkflowTask( task, + wfctx, func(response interface{}, startTime time.Time) (*workflowTask, error) { wtp.logger.Debug("Force RespondWorkflowTaskCompleted.", "TaskStartedEventID", task.task.GetStartedEventId()) heartbeatResponse, err := wtp.RespondTaskCompletedWithMetrics(response, nil, task.task, startTime) @@ -357,22 +370,22 @@ return task, nil }, ) - if completedRequest == nil && err == nil { + if completedRequest == nil && taskErr == nil { return nil } - if _, ok := err.(workflowTaskHeartbeatError); ok { - return err + if _, ok := taskErr.(workflowTaskHeartbeatError); ok { + return taskErr } - response, err = wtp.RespondTaskCompletedWithMetrics(completedRequest, err, task.task, startTime) + response, err := wtp.RespondTaskCompletedWithMetrics(completedRequest, taskErr, task.task, startTime) if err != nil { return err } if eventLevel := response.GetResetHistoryEventId(); eventLevel != 0 { - resetter(eventLevel) + wfctx.SetPreviousStartedEventID(eventLevel) } - if response == nil || response.WorkflowTask == nil { + if response == nil || response.WorkflowTask == nil || taskErr != nil { return nil } @@ -405,7 +418,10 @@ return } -func (wtp *workflowTaskPoller) RespondTaskCompleted(completedRequest interface{}, task *workflowservice.PollWorkflowTaskQueueResponse) (response *workflowservice.RespondWorkflowTaskCompletedResponse, err error) { +func (wtp *workflowTaskPoller) RespondTaskCompleted( + completedRequest interface{}, + task *workflowservice.PollWorkflowTaskQueueResponse, +) (response *workflowservice.RespondWorkflowTaskCompletedResponse, err error) { ctx := context.Background() // Respond task completion. grpcCtx, cancel := newGRPCContext(ctx, grpcMetricsHandler( @@ -562,18 +578,8 @@ return &localActivityResult{task: task, err: err} } - timeout := task.params.ScheduleToCloseTimeout - if task.params.StartToCloseTimeout != 0 && task.params.StartToCloseTimeout < timeout { - timeout = task.params.StartToCloseTimeout - } - timeoutDuration := timeout - deadline := time.Now().Add(timeoutDuration) - if task.attempt > 1 && !task.expireTime.IsZero() && task.expireTime.Before(deadline) { - // this is attempt and expire time is before SCHEDULE_TO_CLOSE timeout - deadline = task.expireTime - } - - ctx, cancel := context.WithDeadline(ctx, deadline) + info := getActivityEnv(ctx) + ctx, cancel := context.WithDeadline(ctx, info.deadline) defer cancel() task.Lock() @@ -615,13 +621,14 @@ laResult, err = ae.ExecuteWithActualArgs(ctx, task.params.InputArgs) executionLatency := time.Since(laStartTime) metricsHandler.Timer(metrics.LocalActivityExecutionLatency).Record(executionLatency) - if executionLatency > timeoutDuration { + if time.Now().After(info.deadline) { // If local activity takes longer than expected timeout, the context would already be DeadlineExceeded and // the result would be discarded. Print a warning in this case. lath.logger.Warn("LocalActivity takes too long to complete.", "LocalActivityID", task.activityID, "LocalActivityType", activityType, "ScheduleToCloseTimeout", task.params.ScheduleToCloseTimeout, + "StartToCloseTimeout", task.params.StartToCloseTimeout, "ActualExecutionDuration", executionLatency) } }(doneCh) @@ -642,7 +649,11 @@ metricsHandler.Counter(metrics.LocalActivityExecutionCanceledCounter).Inc(1) return &localActivityResult{err: ErrCanceled, task: task} } else if ctx.Err() == context.DeadlineExceeded { - return &localActivityResult{err: ErrDeadlineExceeded, task: task} + if task.params.ScheduleToCloseTimeout != 0 && time.Now().After(info.scheduledTime.Add(task.params.ScheduleToCloseTimeout)) { + return &localActivityResult{err: ErrDeadlineExceeded, task: task} + } else { + return &localActivityResult{err: NewTimeoutError("deadline exceeded", enumspb.TIMEOUT_TYPE_START_TO_CLOSE, nil), task: task} + } } else { // should not happen return &localActivityResult{err: NewApplicationError("unexpected context done", "", true, nil), task: task} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_update.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_update.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_update.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_update.go 2024-02-23 09:46:13.000000000 +0000 @@ -66,7 +66,7 @@ Complete(success interface{}, err error) } - // UpdateScheduluer allows an update state machine to spawn coroutines and + // UpdateScheduler allows an update state machine to spawn coroutines and // yield itself as necessary. UpdateScheduler interface { // Spawn starts a new named coroutine, executing the given function f. @@ -88,13 +88,13 @@ // updateProtocol wraps an updateEnv and some protocol metadata to // implement the UpdateCallbacks abstraction. It handles callbacks by - // sending protocol lmessages. + // sending protocol messages. updateProtocol struct { protoInstanceID string + clientIdentity string requestMsgID string requestSeqID int64 - initialRequest updatepb.Request - scheduleUpdate func(name string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) + scheduleUpdate func(name string, id string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) env updateEnv state updateState } @@ -114,7 +114,7 @@ // update callbacks. func newUpdateProtocol( protoInstanceID string, - scheduleUpdate func(name string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks), + scheduleUpdate func(name string, id string, args *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks), env updateEnv, ) *updateProtocol { return &updateProtocol{ @@ -131,18 +131,19 @@ return } } - panic(fmt.Sprintf("invalid action %q in update protocol from state %s", action, up.state)) + panicIllegalState(fmt.Sprintf("invalid action %q in update protocol %v", action, up)) } func (up *updateProtocol) HandleMessage(msg *protocolpb.Message) error { - if err := types.UnmarshalAny(msg.Body, &up.initialRequest); err != nil { + var req updatepb.Request + if err := types.UnmarshalAny(msg.Body, &req); err != nil { return err } up.requireState("update request", updateStateNew) up.requestMsgID = msg.GetId() up.requestSeqID = msg.GetEventId() - input := up.initialRequest.GetInput() - up.scheduleUpdate(input.GetName(), input.GetArgs(), input.GetHeader(), up) + input := req.GetInput() + up.scheduleUpdate(input.GetName(), req.GetMeta().GetUpdateId(), input.GetArgs(), input.GetHeader(), up) up.state = updateStateRequestInitiated return nil } @@ -157,7 +158,7 @@ Body: protocol.MustMarshalAny(&updatepb.Acceptance{ AcceptedRequestMessageId: up.requestMsgID, AcceptedRequestSequencingEventId: up.requestSeqID, - AcceptedRequest: &up.initialRequest, + // AcceptedRequest field no longer read by server - will be removed from API soon }), }, withExpectedEventPredicate(up.checkAcceptedEvent)) up.state = updateStateAccepted @@ -172,8 +173,8 @@ Body: protocol.MustMarshalAny(&updatepb.Rejection{ RejectedRequestMessageId: up.requestMsgID, RejectedRequestSequencingEventId: up.requestSeqID, - RejectedRequest: &up.initialRequest, Failure: up.env.GetFailureConverter().ErrorToFailure(err), + // RejectedRequest field no longer read by server - will be removed from API soon }), }) up.state = updateStateCompleted @@ -201,7 +202,10 @@ Id: up.protoInstanceID + "/complete", ProtocolInstanceId: up.protoInstanceID, Body: protocol.MustMarshalAny(&updatepb.Response{ - Meta: up.initialRequest.GetMeta(), + Meta: &updatepb.Meta{ + UpdateId: up.protoInstanceID, + Identity: up.clientIdentity, + }, Outcome: outcome, }), }, withExpectedEventPredicate(up.checkCompletedEvent)) @@ -223,10 +227,11 @@ } return attrs.AcceptedRequest.GetMeta().GetUpdateId() == up.protoInstanceID && attrs.AcceptedRequestMessageId == up.requestMsgID && - attrs.AcceptedRequestSequencingEventId == up.requestSeqID + attrs.AcceptedRequestSequencingEventId == up.requestSeqID && + attrs.AcceptedRequest != nil } -// defaultHandler receives the initial invocation of an upate during WFT +// defaultHandler receives the initial invocation of an update during WFT // processing. The implementation will verify that an updateHandler exists for // the supplied name (rejecting the update otherwise) and use the provided spawn // function to create a new coroutine that will execute in the workflow context. @@ -236,6 +241,7 @@ func defaultUpdateHandler( rootCtx Context, name string, + id string, serializedArgs *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks, @@ -248,6 +254,10 @@ return } scheduler.Spawn(ctx, name, func(ctx Context) { + ctx = WithValue(ctx, updateInfoContextKey, &UpdateInfo{ + ID: id, + }) + eo := getWorkflowEnvOptions(ctx) // If we suspect that handler registration has not occurred (e.g. @@ -284,7 +294,12 @@ if !IsReplaying(ctx) { // we don't execute update validation during replay so that // validation routines can change across versions - if err := envInterceptor.inboundInterceptor.ValidateUpdate(ctx, &input); err != nil { + err = func() error { + defer getState(ctx).dispatcher.setIsReadOnly(false) + getState(ctx).dispatcher.setIsReadOnly(true) + return envInterceptor.inboundInterceptor.ValidateUpdate(ctx, &input) + }() + if err != nil { callbacks.Reject(err) return } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_worker.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_worker.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_worker.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_worker.go 2024-02-23 09:46:13.000000000 +0000 @@ -49,7 +49,6 @@ commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" - protocolpb "go.temporal.io/api/protocol/v1" "go.temporal.io/api/workflowservice/v1" "go.temporal.io/api/workflowservicemock/v1" @@ -58,7 +57,6 @@ "go.temporal.io/sdk/internal/common/serializer" "go.temporal.io/sdk/internal/common/util" ilog "go.temporal.io/sdk/internal/log" - "go.temporal.io/sdk/internal/protocol" "go.temporal.io/sdk/log" ) @@ -292,18 +290,19 @@ } else { taskHandler = newWorkflowTaskHandler(params, ppMgr, registry) } - return newWorkflowTaskWorkerInternal(taskHandler, service, params, workerStopChannel, registry.interceptors) + return newWorkflowTaskWorkerInternal(taskHandler, taskHandler, service, params, workerStopChannel, registry.interceptors) } func newWorkflowTaskWorkerInternal( taskHandler WorkflowTaskHandler, + contextManager WorkflowContextManager, service workflowservice.WorkflowServiceClient, params workerExecutionParameters, stopC chan struct{}, interceptors []WorkerInterceptor, ) *workflowWorker { ensureRequiredParams(¶ms) - poller := newWorkflowTaskPoller(taskHandler, service, params) + poller := newWorkflowTaskPoller(taskHandler, contextManager, service, params) worker := newBaseWorker(baseWorkerOptions{ pollerCount: params.MaxConcurrentWorkflowTaskQueuePollers, pollerRate: defaultPollerRate, @@ -952,6 +951,9 @@ if err := aw.workflowWorker.Start(); err != nil { return err } + if aw.client.eagerDispatcher != nil { + aw.client.eagerDispatcher.registerWorker(aw.workflowWorker) + } } if !util.IsInterfaceNil(aw.activityWorker) { if err := aw.activityWorker.Start(); err != nil { @@ -1120,6 +1122,7 @@ failureConverter converter.FailureConverter contextPropagators []ContextPropagator enableLoggingInReplay bool + disableDeadlockDetection bool mu sync.Mutex workflowExecutionResults map[string]*commonpb.Payloads } @@ -1151,6 +1154,10 @@ // This is only useful for debugging purpose. // default: false EnableLoggingInReplay bool + + // Optional: Disable the default 1 second deadlock detection timeout. This option can be used to step through + // workflow code with multiple breakpoints in a debugger. + DisableDeadlockDetection bool } // ReplayWorkflowHistoryOptions are options for replaying a workflow. @@ -1170,6 +1177,7 @@ failureConverter: options.FailureConverter, contextPropagators: options.ContextPropagators, enableLoggingInReplay: options.EnableLoggingInReplay, + disableDeadlockDetection: options.DisableDeadlockDetection, workflowExecutionResults: make(map[string]*commonpb.Payloads), }, nil } @@ -1289,27 +1297,6 @@ return dc.FromPayloads(payloads, valuePtr) } -// inferMessages extracts the set of *interactionpb.Invocation objects that -// should be attached to a workflow task (i.e. the -// PollWorkflowTaskQueueResponse.Messages) if that task were to carry the -// provided slice of history events. -func inferMessages(events []*historypb.HistoryEvent) []*protocolpb.Message { - var messages []*protocolpb.Message - for _, e := range events { - if attrs := e.GetWorkflowExecutionUpdateAcceptedEventAttributes(); attrs != nil { - messages = append(messages, &protocolpb.Message{ - Id: attrs.GetAcceptedRequestMessageId(), - ProtocolInstanceId: attrs.GetProtocolInstanceId(), - SequencingId: &protocolpb.Message_EventId{ - EventId: attrs.GetAcceptedRequestSequencingEventId(), - }, - Body: protocol.MustMarshalAny(attrs.GetAcceptedRequest()), - }) - } - } - return messages -} - func (aw *WorkflowReplayer) replayWorkflowHistory(logger log.Logger, service workflowservice.WorkflowServiceClient, namespace string, originalExecution WorkflowExecution, history *historypb.History) error { taskQueue := "ReplayTaskQueue" events := history.Events @@ -1375,6 +1362,9 @@ FailureConverter: aw.failureConverter, ContextPropagators: aw.contextPropagators, EnableLoggingInReplay: aw.enableLoggingInReplay, + // Hardcoding NopHandler avoids "No metrics handler configured for temporal worker" + // logs during replay. + MetricsHandler: metrics.NopHandler, capabilities: &workflowservice.GetSystemInfoResponse_Capabilities{ SignalAndQueryHeader: true, InternalErrorDifferentiation: true, @@ -1386,8 +1376,16 @@ SdkMetadata: true, }, } + if aw.disableDeadlockDetection { + params.DeadlockDetectionTimeout = math.MaxInt64 + } taskHandler := newWorkflowTaskHandler(params, nil, aw.registry) - resp, _, err := taskHandler.ProcessWorkflowTask(&workflowTask{task: task, historyIterator: iterator}, nil) + wfctx, err := taskHandler.GetOrCreateWorkflowContext(task, iterator) + defer wfctx.Unlock(err) + if err != nil { + return err + } + resp, err := taskHandler.ProcessWorkflowTask(&workflowTask{task: task, historyIterator: iterator}, wfctx, nil) if err != nil { return err } @@ -1485,6 +1483,12 @@ panic("cannot set MaxConcurrentWorkflowTaskExecutionSize to 1") } + // Sessions are not currently compatible with worker versioning + // See: https://github.com/temporalio/sdk-go/issues/1227 + if options.EnableSessionWorker && options.UseBuildIDForVersioning { + panic("cannot set both EnableSessionWorker and UseBuildIDForVersioning") + } + // Need reference to result for fatal error handler var aw *AggregatedWorker fatalErrorCallback := func(err error) { @@ -1589,7 +1593,7 @@ if !options.LocalActivityWorkerOnly { activityWorker = newActivityWorker(client.workflowService, workerParams, nil, registry, nil) // Set the activity worker on the eager executor - workerParams.eagerActivityExecutor.activityWorker = activityWorker + workerParams.eagerActivityExecutor.activityWorker = activityWorker.worker } var sessionWorker *sessionWorker diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_worker_base.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_worker_base.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_worker_base.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_worker_base.go 2024-02-23 09:46:13.000000000 +0000 @@ -112,7 +112,7 @@ handler func(queryType string, queryArgs *commonpb.Payloads, header *commonpb.Header) (*commonpb.Payloads, error), ) RegisterUpdateHandler( - handler func(string, *commonpb.Payloads, *commonpb.Header, UpdateCallbacks), + handler func(string, string, *commonpb.Payloads, *commonpb.Header, UpdateCallbacks), ) IsReplaying() bool MutableSideEffect(id string, f func() interface{}, equals func(a, b interface{}) bool) converter.EncodedValue @@ -181,6 +181,7 @@ pollerRequestCh chan struct{} taskQueueCh chan interface{} + eagerTaskQueueCh chan eagerTask fatalErrCb func(error) sessionTokenBucket *sessionTokenBucket @@ -192,9 +193,16 @@ polledTask struct { task interface{} } + + eagerTask struct { + // task to process. + task interface{} + // callback to run once the task is processed. + callback func() + } ) -// SetRetryLongPollGracePeriod sets the amount of time a long poller retrys on +// SetRetryLongPollGracePeriod sets the amount of time a long poller retries on // fatal errors before it actually fails. For test use only, // not safe to call with a running worker. func SetRetryLongPollGracePeriod(period time.Duration) { @@ -240,7 +248,8 @@ metricsHandler: metricsHandler.WithTags(metrics.WorkerTags(options.workerType)), taskSlotsAvailable: int32(options.maxConcurrentTask), pollerRequestCh: make(chan struct{}, options.maxConcurrentTask), - taskQueueCh: make(chan interface{}), // no buffer, so poller only able to poll new task after previous is dispatched. + taskQueueCh: make(chan interface{}), // no buffer, so poller only able to poll new task after previous is dispatched. + eagerTaskQueueCh: make(chan eagerTask, options.maxConcurrentTask), // allow enough capacity so that eager dispatch will not block fatalErrCb: options.fatalErrCb, limiterContext: ctx, @@ -274,6 +283,9 @@ bw.stopWG.Add(1) go bw.runTaskDispatcher() + bw.stopWG.Add(1) + go bw.runEagerTaskDispatcher() + bw.isWorkerStarted = true traceLog(func() { bw.logger.Info("Started Worker", @@ -310,6 +322,41 @@ } } +func (bw *baseWorker) tryReserveSlot() bool { + if bw.isStop() { + return false + } + // Reserve a executor slot via a non-blocking attempt to take a poller + // request entry which essentially reserves a slot + select { + case <-bw.pollerRequestCh: + return true + default: + return false + } +} + +func (bw *baseWorker) releaseSlot() { + // Like other sends to this channel, we assume there is room because we + // reserved it, so we make a blocking send. + bw.pollerRequestCh <- struct{}{} +} + +func (bw *baseWorker) pushEagerTask(task eagerTask) { + // Should always be non blocking if a slot was reserved. + bw.eagerTaskQueueCh <- task +} + +func (bw *baseWorker) processTaskAsync(task interface{}, callback func()) { + bw.stopWG.Add(1) + go func() { + if callback != nil { + defer callback() + } + bw.processTask(task) + }() +} + func (bw *baseWorker) runTaskDispatcher() { defer bw.stopWG.Done() @@ -321,17 +368,35 @@ // wait for new task or worker stop select { case <-bw.stopCh: + // Currently we can drop any tasks received when closing. + // https://github.com/temporalio/sdk-go/issues/1197 return case task := <-bw.taskQueueCh: - // for non-polled-task (local activity result as task), we don't need to rate limit + // for non-polled-task (local activity result as task or eager task), we don't need to rate limit _, isPolledTask := task.(*polledTask) if isPolledTask && bw.taskLimiter.Wait(bw.limiterContext) != nil { if bw.isStop() { return } } - bw.stopWG.Add(1) - go bw.processTask(task) + bw.processTaskAsync(task, nil) + } + } +} + +func (bw *baseWorker) runEagerTaskDispatcher() { + defer bw.stopWG.Done() + for { + select { + case <-bw.stopCh: + // drain eager dispatch queue + for len(bw.eagerTaskQueueCh) > 0 { + eagerTask := <-bw.eagerTaskQueueCh + bw.processTaskAsync(eagerTask.task, eagerTask.callback) + } + return + case eagerTask := <-bw.eagerTaskQueueCh: + bw.processTaskAsync(eagerTask.task, eagerTask.callback) } } } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow.go 2024-02-23 09:46:13.000000000 +0000 @@ -47,7 +47,7 @@ const ( defaultSignalChannelSize = 100000 // really large buffering size(100K) - panicIllegalAccessCoroutinueState = "getState: illegal access from outside of workflow context" + panicIllegalAccessCoroutineState = "getState: illegal access from outside of workflow context" ) type ( @@ -170,6 +170,7 @@ closed bool interceptor WorkflowOutboundInterceptor deadlockDetector *deadlockDetector + readOnly bool } // WorkflowOptions options passed to the workflow function @@ -259,6 +260,7 @@ workflowResultContextKey = "workflowResult" coroutinesContextKey = "coroutines" workflowEnvOptionsContextKey = "wfEnvOptions" + updateInfoContextKey = "updateInfo" ) // Assert that structs do indeed implement the interfaces @@ -315,6 +317,7 @@ } func (f *futureImpl) Get(ctx Context, valuePtr interface{}) error { + assertNotInReadOnlyState(ctx) more := f.channel.Receive(ctx, nil) if more { panic("not closed") @@ -436,6 +439,7 @@ } func (f *childWorkflowFutureImpl) SignalChildWorkflow(ctx Context, signalName string, data interface{}) Future { + assertNotInReadOnlyState(ctx) var childExec WorkflowExecution if err := f.GetChildWorkflowExecution().Get(ctx, &childExec); err != nil { return f.GetChildWorkflowExecution() @@ -538,8 +542,8 @@ ) getWorkflowEnvironment(d.rootCtx).RegisterUpdateHandler( - func(name string, serializedArgs *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) { - defaultUpdateHandler(d.rootCtx, name, serializedArgs, header, callbacks, coroScheduler{d.dispatcher}) + func(name string, id string, serializedArgs *commonpb.Payloads, header *commonpb.Header, callbacks UpdateCallbacks) { + defaultUpdateHandler(d.rootCtx, name, id, serializedArgs, header, callbacks, coroScheduler{d.dispatcher}) }) getWorkflowEnvironment(d.rootCtx).RegisterQueryHandler( @@ -646,11 +650,38 @@ } state := s.(*coroutineState) if !state.dispatcher.IsExecuting() { - panic(panicIllegalAccessCoroutinueState) + panic(panicIllegalAccessCoroutineState) } return state } +func assertNotInReadOnlyState(ctx Context) { + state := getState(ctx) + // use the dispatcher state instead of the coroutine state because contexts can be + // shared + if state.dispatcher.getIsReadOnly() { + panic(panicIllegalAccessCoroutineState) + } +} + +func assertNotInReadOnlyStateCancellation(ctx Context) { + s := ctx.Value(coroutinesContextKey) + if s == nil { + panic("assertNotInReadOnlyStateCtxCancellation: not workflow context") + } + state := s.(*coroutineState) + // For cancellation the dispatcher may not be running because workflow cancellation + // is sent outside of the dispatchers loop. + if state.dispatcher.IsClosed() { + panic(panicIllegalAccessCoroutineState) + } + // use the dispatcher state instead of the coroutine state because contexts can be + // shared + if state.dispatcher.getIsReadOnly() { + panic(panicIllegalAccessCoroutineState) + } +} + func getStateIfRunning(ctx Context) *coroutineState { if ctx == nil { return nil @@ -675,6 +706,7 @@ } func (c *channelImpl) Receive(ctx Context, valuePtr interface{}) (more bool) { + assertNotInReadOnlyState(ctx) state := getState(ctx) hasResult := false var result interface{} @@ -1039,12 +1071,20 @@ return c } +func (d *dispatcherImpl) IsClosed() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + return d.closed +} + func (d *dispatcherImpl) ExecuteUntilAllBlocked(deadlockDetectionTimeout time.Duration) (err error) { d.mutex.Lock() if d.closed { + d.mutex.Unlock() panic("dispatcher is closed") } if d.executing { + d.mutex.Unlock() panic("call to ExecuteUntilAllBlocked (possibly from a coroutine) while it is already running") } d.executing = true @@ -1103,6 +1143,18 @@ return d.executing } +func (d *dispatcherImpl) getIsReadOnly() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + return d.readOnly +} + +func (d *dispatcherImpl) setIsReadOnly(readOnly bool) { + d.mutex.Lock() + defer d.mutex.Unlock() + d.readOnly = readOnly +} + func (d *dispatcherImpl) Close() { d.mutex.Lock() if d.closed { @@ -1111,6 +1163,10 @@ } d.closed = true d.mutex.Unlock() + // This loop breaks our expectation that only one workflow + // coroutine is running at any time because it triggers all workflow goroutines + // to call their defers at once. Adding synchronization seemed more problematic because + // it could block eviction if there was a deadlock. for i := 0; i < len(d.coroutines); i++ { c := d.coroutines[i] if !c.closed.Load() { @@ -1170,6 +1226,7 @@ } func (s *selectorImpl) Select(ctx Context) { + assertNotInReadOnlyState(ctx) state := getState(ctx) var readyBranch func() var cleanups []func() @@ -1521,7 +1578,7 @@ if p := recover(); p != nil { result = nil st := getStackTraceRaw("query handler [panic]:", 7, 0) - if p == panicIllegalAccessCoroutinueState { + if p == panicIllegalAccessCoroutineState { // query handler code try to access workflow functions outside of workflow context, make error message // more descriptive and clear. p = "query handler must not use temporal context to do things like workflow.NewChannel(), " + @@ -1567,11 +1624,12 @@ wg.Add(-1) } -// Wait blocks and waits for specified number of couritines to +// Wait blocks and waits for specified number of coroutines to // finish executing and then unblocks once the counter has reached 0. // // param ctx Context -> workflow context func (wg *waitGroupImpl) Wait(ctx Context) { + assertNotInReadOnlyState(ctx) if wg.n <= 0 { return } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow_client.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow_client.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow_client.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow_client.go 2024-02-23 09:46:13.000000000 +0000 @@ -92,6 +92,7 @@ excludeInternalFromRetry *uberatomic.Bool capabilities *workflowservice.GetSystemInfoResponse_Capabilities capabilitiesLock sync.RWMutex + eagerDispatcher *eagerWorkflowDispatcher // The pointer value is shared across multiple clients. If non-nil, only // access/mutate atomically. @@ -194,6 +195,11 @@ // func which use a next token to get next page of history events paginate func(nexttoken []byte) (*workflowservice.GetWorkflowExecutionHistoryResponse, error) } + + // queryRejectedError is a wrapper for QueryRejected + queryRejectedError struct { + queryRejected *querypb.QueryRejected + } ) // ExecuteWorkflow starts a workflow execution and returns a WorkflowRun that will allow you to wait until this workflow @@ -879,43 +885,28 @@ return nil, err } - var input *commonpb.Payloads - if len(request.Args) > 0 { - var err error - if input, err = encodeArgs(wc.dataConverter, request.Args); err != nil { - return nil, err - } - } - req := &workflowservice.QueryWorkflowRequest{ - Namespace: wc.namespace, - Execution: &commonpb.WorkflowExecution{ - WorkflowId: request.WorkflowID, - RunId: request.RunID, - }, - Query: &querypb.WorkflowQuery{ - QueryType: request.QueryType, - QueryArgs: input, - Header: request.Header, - }, - QueryRejectCondition: request.QueryRejectCondition, - } - - grpcCtx, cancel := newGRPCContext(ctx, defaultGrpcRetryParameters(ctx)) - defer cancel() - resp, err := wc.workflowService.QueryWorkflow(grpcCtx, req) + // Set header before interceptor run + ctx, err := contextWithHeaderPropagated(ctx, request.Header, wc.contextPropagators) if err != nil { return nil, err } - if resp.QueryRejected != nil { - return &QueryWorkflowWithOptionsResponse{ - QueryRejected: resp.QueryRejected, - QueryResult: nil, - }, nil + result, err := wc.interceptor.QueryWorkflow(ctx, &ClientQueryWorkflowInput{ + WorkflowID: request.WorkflowID, + RunID: request.RunID, + QueryType: request.QueryType, + Args: request.Args, + }) + if err != nil { + if err, ok := err.(*queryRejectedError); ok { + return &QueryWorkflowWithOptionsResponse{ + QueryRejected: err.queryRejected, + }, nil + } + return nil, err } return &QueryWorkflowWithOptionsResponse{ - QueryRejected: nil, - QueryResult: newEncodedValue(resp.QueryResult, wc.dataConverter), + QueryResult: result, }, nil } @@ -1016,6 +1007,29 @@ return converted, nil } +// GetWorkerTaskReachability returns which versions are is still in use by open or closed workflows. +func (wc *WorkflowClient) GetWorkerTaskReachability(ctx context.Context, options *GetWorkerTaskReachabilityOptions) (*WorkerTaskReachability, error) { + if err := wc.ensureInitialized(); err != nil { + return nil, err + } + + grpcCtx, cancel := newGRPCContext(ctx, defaultGrpcRetryParameters(ctx)) + defer cancel() + + request := &workflowservice.GetWorkerTaskReachabilityRequest{ + Namespace: wc.namespace, + BuildIds: options.BuildIDs, + TaskQueues: options.TaskQueues, + Reachability: taskReachabilityToProto(options.Reachability), + } + resp, err := wc.workflowService.GetWorkerTaskReachability(grpcCtx, request) + if err != nil { + return nil, err + } + converted := workerTaskReachabilityFromProtoResponse(resp) + return converted, nil +} + func (wc *WorkflowClient) UpdateWorkflowWithOptions( ctx context.Context, req *UpdateWorkflowWithOptionsRequest, @@ -1453,7 +1467,9 @@ return &commonpb.SearchAttributes{IndexedFields: attr}, nil } -type workflowClientInterceptor struct{ client *WorkflowClient } +type workflowClientInterceptor struct { + client *WorkflowClient +} func (w *workflowClientInterceptor) ExecuteWorkflow( ctx context.Context, @@ -1516,6 +1532,15 @@ Header: header, } + var eagerExecutor *eagerWorkflowExecutor + if in.Options.EnableEagerStart && w.client.capabilities.GetEagerWorkflowStart() && w.client.eagerDispatcher != nil { + eagerExecutor = w.client.eagerDispatcher.applyToRequest(startRequest) + } + + if in.Options.StartDelay != 0 { + startRequest.WorkflowStartDelay = &in.Options.StartDelay + } + var response *workflowservice.StartWorkflowExecutionResponse grpcCtx, cancel := newGRPCContext(ctx, grpcMetricsHandler( @@ -1524,7 +1549,12 @@ defer cancel() response, err = w.client.workflowService.StartWorkflowExecution(grpcCtx, startRequest) - + eagerWorkflowTask := response.GetEagerWorkflowTask() + if eagerWorkflowTask != nil && eagerExecutor != nil { + eagerExecutor.handleResponse(eagerWorkflowTask) + } else if eagerExecutor != nil { + eagerExecutor.release() + } // Allow already-started error var runID string if e, ok := err.(*serviceerror.WorkflowExecutionAlreadyStarted); ok && !in.Options.WorkflowExecutionErrorWhenAlreadyStarted { @@ -1644,6 +1674,10 @@ Header: header, } + if in.Options.StartDelay != 0 { + signalWithStartRequest.WorkflowStartDelay = &in.Options.StartDelay + } + var response *workflowservice.SignalWithStartWorkflowExecutionResponse // Start creating workflow request. @@ -1724,17 +1758,40 @@ return nil, err } - result, err := w.client.QueryWorkflowWithOptions(ctx, &QueryWorkflowWithOptionsRequest{ - WorkflowID: in.WorkflowID, - RunID: in.RunID, - QueryType: in.QueryType, - Args: in.Args, - Header: header, - }) + var input *commonpb.Payloads + if len(in.Args) > 0 { + var err error + if input, err = encodeArgs(w.client.dataConverter, in.Args); err != nil { + return nil, err + } + } + req := &workflowservice.QueryWorkflowRequest{ + Namespace: w.client.namespace, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: in.WorkflowID, + RunId: in.RunID, + }, + Query: &querypb.WorkflowQuery{ + QueryType: in.QueryType, + QueryArgs: input, + Header: header, + }, + QueryRejectCondition: in.QueryRejectCondition, + } + + grpcCtx, cancel := newGRPCContext(ctx, defaultGrpcRetryParameters(ctx)) + defer cancel() + resp, err := w.client.workflowService.QueryWorkflow(grpcCtx, req) if err != nil { return nil, err } - return result.QueryResult, nil + + if resp.QueryRejected != nil { + return nil, &queryRejectedError{ + queryRejected: resp.QueryRejected, + } + } + return newEncodedValue(resp.QueryResult, w.client.dataConverter), nil } func (w *workflowClientInterceptor) UpdateWorkflow( @@ -1745,9 +1802,11 @@ if err != nil { return nil, err } - header, _ := headerPropagated(ctx, w.client.contextPropagators) - - grpcCtx, cancel := newGRPCContext(ctx, defaultGrpcRetryParameters(ctx)) + header, err := headerPropagated(ctx, w.client.contextPropagators) + if err != nil { + return nil, err + } + grpcCtx, cancel := newGRPCContext(ctx, grpcTimeout(pollUpdateTimeout), grpcLongPoll(true), defaultGrpcRetryParameters(ctx)) defer cancel() wfexec := &commonpb.WorkflowExecution{ WorkflowId: in.WorkflowID, @@ -1873,3 +1932,7 @@ } return enc.Get(valuePtr) } + +func (q *queryRejectedError) Error() string { + return q.queryRejected.GoString() +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow_testsuite.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow_testsuite.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/internal_workflow_testsuite.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/internal_workflow_testsuite.go 2024-02-23 09:46:13.000000000 +0000 @@ -185,7 +185,7 @@ workflowCancelHandler func() signalHandler func(name string, input *commonpb.Payloads, header *commonpb.Header) error queryHandler func(string, *commonpb.Payloads, *commonpb.Header) (*commonpb.Payloads, error) - updateHandler func(name string, input *commonpb.Payloads, header *commonpb.Header, resp UpdateCallbacks) + updateHandler func(name string, id string, input *commonpb.Payloads, header *commonpb.Header, resp UpdateCallbacks) startedHandler func(r WorkflowExecution, e error) isWorkflowCompleted bool @@ -345,6 +345,18 @@ env.workflowInfo.currentHistoryLength = length } +func (env *testWorkflowEnvironmentImpl) setCurrentHistorySize(size int) { + env.workflowInfo.currentHistorySize = size +} + +func (env *testWorkflowEnvironmentImpl) setContinueAsNewSuggested(suggest bool) { + env.workflowInfo.continueAsNewSuggested = suggest +} + +func (env *testWorkflowEnvironmentImpl) setContinuedExecutionRunID(rid string) { + env.workflowInfo.ContinuedExecutionRunID = rid +} + func (env *testWorkflowEnvironmentImpl) newTestWorkflowEnvironmentForChild(params *ExecuteWorkflowParams, callback ResultHandler, startedHandler func(r WorkflowExecution, e error)) (*testWorkflowEnvironmentImpl, error) { // create a new test env childEnv := newTestWorkflowEnvironmentImpl(env.testSuite, env.registry) @@ -634,7 +646,8 @@ params: ¶ms, callback: func(lar *LocalActivityResultWrapper) { }, - attempt: 1, + attempt: 1, + scheduledTime: time.Now(), } taskHandler := localActivityTaskHandler{ userContext: env.workerOptions.BackgroundActivityContext, @@ -2021,7 +2034,7 @@ } func (env *testWorkflowEnvironmentImpl) RegisterUpdateHandler( - handler func(name string, input *commonpb.Payloads, header *commonpb.Header, resp UpdateCallbacks), + handler func(name string, id string, input *commonpb.Payloads, header *commonpb.Header, resp UpdateCallbacks), ) { env.updateHandler = handler } @@ -2361,12 +2374,28 @@ return newEncodedValue(blob, env.GetDataConverter()), nil } -func (env *testWorkflowEnvironmentImpl) updateWorkflow(name string, uc UpdateCallbacks, args ...interface{}) { +func (env *testWorkflowEnvironmentImpl) updateWorkflow(name string, id string, uc UpdateCallbacks, args ...interface{}) { data, err := encodeArgs(env.GetDataConverter(), args) if err != nil { panic(err) } - env.updateHandler(name, data, nil, uc) + env.updateHandler(name, id, data, nil, uc) +} + +func (env *testWorkflowEnvironmentImpl) updateWorkflowByID(workflowID, name, id string, uc UpdateCallbacks, args ...interface{}) error { + if workflowHandle, ok := env.runningWorkflows[workflowID]; ok { + if workflowHandle.handled { + return serviceerror.NewNotFound(fmt.Sprintf("Workflow %v already completed", workflowID)) + } + data, err := encodeArgs(env.GetDataConverter(), args) + if err != nil { + panic(err) + } + env.updateHandler(name, id, data, nil, uc) + return nil + } + + return serviceerror.NewNotFound(fmt.Sprintf("Workflow %v not exists", workflowID)) } func (env *testWorkflowEnvironmentImpl) queryWorkflowByID(workflowID, queryType string, args ...interface{}) (converter.EncodedValue, error) { @@ -2431,6 +2460,10 @@ wf.WorkflowTaskTimeout = options.WorkflowTaskTimeout } if len(options.ID) > 0 { + // Reassign the ID in running Workflows so SignalWorkflowByID can find the workflow + originalID := wf.WorkflowExecution.ID + env.runningWorkflows[options.ID] = env.runningWorkflows[wf.WorkflowExecution.ID] + delete(env.runningWorkflows, originalID) wf.WorkflowExecution.ID = options.ID } if len(options.TaskQueue) > 0 { diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/log/memory_logger.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/log/memory_logger.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/log/memory_logger.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/log/memory_logger.go 2024-02-23 09:46:13.000000000 +0000 @@ -31,21 +31,21 @@ "go.temporal.io/sdk/log" ) -// MemoryLogger is Logger implementation that stores logs in memory (useful for testing). Use Lines() to get log lines. -type MemoryLogger struct { +// MemoryLoggerWithoutWith is a Logger implementation that stores logs in memory (useful for testing). Use Lines() to get log lines. +type MemoryLoggerWithoutWith struct { lines *[]string globalKeyvals string } -// NewMemoryLogger creates new instance of MemoryLogger. -func NewMemoryLogger() *MemoryLogger { +// NewMemoryLoggerWithoutWith creates new instance of MemoryLoggerWithoutWith. +func NewMemoryLoggerWithoutWith() *MemoryLoggerWithoutWith { var lines []string - return &MemoryLogger{ + return &MemoryLoggerWithoutWith{ lines: &lines, } } -func (l *MemoryLogger) println(level, msg string, keyvals []interface{}) { +func (l *MemoryLoggerWithoutWith) println(level, msg string, keyvals []interface{}) { // To avoid extra space when globalKeyvals is not specified. if l.globalKeyvals == "" { *l.lines = append(*l.lines, fmt.Sprintln(append([]interface{}{level, msg}, keyvals...)...)) @@ -55,28 +55,44 @@ } // Debug appends message to the log. -func (l *MemoryLogger) Debug(msg string, keyvals ...interface{}) { +func (l *MemoryLoggerWithoutWith) Debug(msg string, keyvals ...interface{}) { l.println("DEBUG", msg, keyvals) } // Info appends message to the log. -func (l *MemoryLogger) Info(msg string, keyvals ...interface{}) { +func (l *MemoryLoggerWithoutWith) Info(msg string, keyvals ...interface{}) { l.println("INFO ", msg, keyvals) } // Warn appends message to the log. -func (l *MemoryLogger) Warn(msg string, keyvals ...interface{}) { +func (l *MemoryLoggerWithoutWith) Warn(msg string, keyvals ...interface{}) { l.println("WARN ", msg, keyvals) } // Error appends message to the log. -func (l *MemoryLogger) Error(msg string, keyvals ...interface{}) { +func (l *MemoryLoggerWithoutWith) Error(msg string, keyvals ...interface{}) { l.println("ERROR", msg, keyvals) } -// With returns new logger the prepend every log entry with keyvals. +// Lines returns written log lines. +func (l *MemoryLoggerWithoutWith) Lines() []string { + return *l.lines +} + +type MemoryLogger struct { + *MemoryLoggerWithoutWith +} + +// NewMemoryLogger creates new instance of MemoryLogger. +func NewMemoryLogger() *MemoryLogger { + return &MemoryLogger{ + NewMemoryLoggerWithoutWith(), + } +} + +// With returns new logger that prepend every log entry with keyvals. func (l *MemoryLogger) With(keyvals ...interface{}) log.Logger { - logger := &MemoryLogger{ + logger := &MemoryLoggerWithoutWith{ lines: l.lines, } @@ -88,8 +104,3 @@ return logger } - -// Lines returns written log lines. -func (l *MemoryLogger) Lines() []string { - return *l.lines -} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/log/replay_logger.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/log/replay_logger.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/log/replay_logger.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/log/replay_logger.go 2024-02-23 09:46:13.000000000 +0000 @@ -28,6 +28,10 @@ "go.temporal.io/sdk/log" ) +var _ log.Logger = (*ReplayLogger)(nil) +var _ log.WithLogger = (*ReplayLogger)(nil) +var _ log.WithSkipCallers = (*ReplayLogger)(nil) + // ReplayLogger is Logger implementation that is aware of replay. type ReplayLogger struct { logger log.Logger @@ -76,7 +80,14 @@ } } -// With returns new logger the prepend every log entry with keyvals. +// With returns new logger that prepend every log entry with keyvals. func (l *ReplayLogger) With(keyvals ...interface{}) log.Logger { return NewReplayLogger(log.With(l.logger, keyvals...), l.isReplay, l.enableLoggingInReplay) } + +func (l *ReplayLogger) WithCallerSkip(depth int) log.Logger { + if sl, ok := l.logger.(log.WithSkipCallers); ok { + return NewReplayLogger(sl.WithCallerSkip(depth), l.isReplay, l.enableLoggingInReplay) + } + return l +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/schedule_client.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/schedule_client.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/schedule_client.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/schedule_client.go 2024-02-23 09:46:13.000000000 +0000 @@ -269,10 +269,12 @@ // On ScheduleHandle.Describe() or ScheduleHandle.Update() Memo will be returned as *commonpb.Payload. Memo map[string]interface{} - // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs (only - // supported when Temporal server is using advanced visiblity). The key and value type must be registered on Temporal server side. + // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. // On ScheduleHandle.Describe() or ScheduleHandle.Update() SearchAttributes will be returned as *commonpb.Payload. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes map[string]interface{} } @@ -338,9 +340,11 @@ // Memo - Optional non-indexed info that will be shown in list schedules. Memo map[string]interface{} - // SearchAttributes - Optional indexed info that can be used in query of List schedules APIs (only - // supported when Temporal server is using advanced visibility). The key and value type must be registered on Temporal server side. + // SearchAttributes - Optional indexed info that can be used in query of List schedules APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes map[string]interface{} } @@ -395,9 +399,11 @@ // Memo - Non-indexed user supplied information. Memo *commonpb.Memo - // SearchAttributes - Indexed info that can be used in query of List schedules APIs (only - // supported when Temporal server is using advanced visibility). The key and value type must be registered on Temporal server side. + // SearchAttributes - Indexed info that can be used in query of List schedules APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes *commonpb.SearchAttributes } @@ -497,7 +503,7 @@ // ScheduleHandle represents a created schedule. ScheduleHandle interface { - // GetID returns the schedule ID asssociated with this handle. + // GetID returns the schedule ID associated with this handle. GetID() string // Delete the Schedule @@ -571,9 +577,11 @@ // Memo - Non-indexed user supplied information. Memo *commonpb.Memo - // SearchAttributes - Indexed info that can be used in query of List schedules APIs (only - // supported when Temporal server is using advanced visibility). The key and value type must be registered on Temporal server side. + // SearchAttributes - Indexed info that can be used in query of List schedules APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes *commonpb.SearchAttributes } @@ -599,7 +607,7 @@ // Create a new Schedule. Create(ctx context.Context, options ScheduleOptions) (ScheduleHandle, error) - // List returns an interator to list all schedules + // List returns an iterator to list all schedules // // Note: When using advanced visibility List is eventually consistent. List(ctx context.Context, options ScheduleListOptions) (ScheduleListIterator, error) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/session.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/session.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/session.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/session.go 2024-02-23 09:46:13.000000000 +0000 @@ -195,7 +195,7 @@ // returns an error under the same situation as CreateSession() or the token passed in is invalid. // It also has the same usage as CreateSession(). // -// The main usage of RecreateSession is for long sessions that are splited into multiple runs. At the end of +// The main usage of RecreateSession is for long sessions that are split into multiple runs. At the end of // one run, complete the current session, get recreateToken from sessionInfo by calling SessionInfo.GetRecreateToken() // and pass the token to the next run. In the new run, session can be recreated using that token. func RecreateSession(ctx Context, recreateToken []byte, sessionOptions *SessionOptions) (Context, error) { @@ -232,7 +232,7 @@ // even though the creation activity has been canceled, the session worker doesn't know. The worker will wait until // next heartbeat to figure out that the workflow is completed and then release the resource. We need to make sure the // completion activity is executed before the workflow exits. - // the taskqueue will be overrided to use the one stored in sessionInfo. + // the taskqueue will be overridden to use the one stored in sessionInfo. err := ExecuteActivity(completionCtx, sessionCompletionActivityName, sessionInfo.SessionID).Get(completionCtx, nil) if err != nil { GetLogger(completionCtx).Warn("Complete session activity failed", tagError, err) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/version.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/version.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/version.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/version.go 2024-02-23 09:46:13.000000000 +0000 @@ -30,7 +30,10 @@ const ( // SDKVersion is a semver (https://semver.org/) that represents the version of this Temporal GoSDK. // Server validates if SDKVersion fits its supported range and rejects request if it doesn't. - SDKVersion = "1.23.0" + SDKVersion = "1.25.1" + + // SDKName represents the name of the SDK. + SDKName = clientNameHeaderValue // SupportedServerVersions is a semver rages (https://github.com/blang/semver#ranges) of server versions that // are supported by this Temporal SDK. diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/worker.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/worker.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/worker.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/worker.go 2024-02-23 09:46:13.000000000 +0000 @@ -238,6 +238,7 @@ // operate on workflows it claims to be compatible with. You must set BuildID if this flag // is true. // NOTE: Experimental + // Note: Cannot be enabled at the same time as EnableSessionWorker UseBuildIDForVersioning bool } ) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/worker_version_sets.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/worker_version_sets.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/worker_version_sets.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/worker_version_sets.go 2024-02-23 09:46:13.000000000 +0000 @@ -25,10 +25,14 @@ import ( "errors" + enumspb "go.temporal.io/api/enums/v1" taskqueuepb "go.temporal.io/api/taskqueue/v1" "go.temporal.io/api/workflowservice/v1" ) +// A stand-in for a Build Id for unversioned Workers. +const UnversionedBuildID = "" + // VersioningIntent indicates whether the user intends certain commands to be run on // a compatible worker build ID version or not. type VersioningIntent int @@ -47,6 +51,24 @@ VersioningIntentDefault ) +// TaskReachability specifies which category of tasks may reach a worker on a versioned task queue. +// Used both in a reachability query and its response. +type TaskReachability int + +const ( + // TaskReachabilityUnspecified indicates the reachability was not specified + TaskReachabilityUnspecified = iota + // TaskReachabilityNewWorkflows indicates the Build Id might be used by new workflows + TaskReachabilityNewWorkflows + // TaskReachabilityExistingWorkflows indicates the Build Id might be used by open workflows + // and/or closed workflows. + TaskReachabilityExistingWorkflows + // TaskReachabilityOpenWorkflows indicates the Build Id might be used by open workflows. + TaskReachabilityOpenWorkflows + // TaskReachabilityClosedWorkflows indicates the Build Id might be used by closed workflows + TaskReachabilityClosedWorkflows +) + type ( // UpdateWorkerBuildIdCompatibilityOptions is the input to // Client.UpdateWorkerBuildIdCompatibility. @@ -72,7 +94,7 @@ } BuildIDOpAddNewCompatibleVersion struct { BuildID string - ExistingCompatibleBuildId string + ExistingCompatibleBuildID string MakeSetDefault bool } BuildIDOpPromoteSet struct { @@ -102,13 +124,13 @@ } case *BuildIDOpAddNewCompatibleVersion: - if v.ExistingCompatibleBuildId == "" { - return nil, errors.New("missing ExistingCompatibleBuildId") + if v.ExistingCompatibleBuildID == "" { + return nil, errors.New("missing ExistingCompatibleBuildID") } req.Operation = &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleBuildId{ AddNewCompatibleBuildId: &workflowservice.UpdateWorkerBuildIdCompatibilityRequest_AddNewCompatibleVersion{ NewBuildId: v.BuildID, - ExistingCompatibleBuildId: v.ExistingCompatibleBuildId, + ExistingCompatibleBuildId: v.ExistingCompatibleBuildID, MakeSetDefault: v.MakeSetDefault, }, } @@ -130,6 +152,38 @@ MaxSets int } +type GetWorkerTaskReachabilityOptions struct { + // BuildIDs - The build IDs to query the reachability of. At least one build ID must be provided. + BuildIDs []string + // TaskQueues - The task queues with Build IDs defined on them that the request is + // concerned with. + // Optional: defaults to all task queues + TaskQueues []string + // Reachability - The reachability this request is concerned with. + // Optional: defaults to all types of reachability + Reachability TaskReachability +} + +type WorkerTaskReachability struct { + // BuildIDReachability - map of build IDs and their reachability information + // May contain an entry with UnversionedBuildID for an unversioned worker + BuildIDReachability map[string]*BuildIDReachability +} + +type BuildIDReachability struct { + // TaskQueueReachable map of task queues and their reachability information. + TaskQueueReachable map[string]*TaskQueueReachability + // UnretrievedTaskQueues is a list of task queues not retrieved because the server limits + // the number that can be queried at once. + UnretrievedTaskQueues []string +} + +type TaskQueueReachability struct { + // TaskQueueReachability for a worker in a single task queue. + // If TaskQueueReachability is empty, this worker is considered unreachable in this task queue. + TaskQueueReachability []TaskReachability +} + // WorkerBuildIDVersionSets is the response for Client.GetWorkerBuildIdCompatibility and represents the sets // of worker build id based versions. type WorkerBuildIDVersionSets struct { @@ -176,6 +230,88 @@ return result } +func workerTaskReachabilityFromProtoResponse(response *workflowservice.GetWorkerTaskReachabilityResponse) *WorkerTaskReachability { + if response == nil { + return nil + } + return &WorkerTaskReachability{ + BuildIDReachability: buildIDReachabilityFromProto(response.GetBuildIdReachability()), + } +} + +func buildIDReachabilityFromProto(sets []*taskqueuepb.BuildIdReachability) map[string]*BuildIDReachability { + if sets == nil { + return nil + } + result := make(map[string]*BuildIDReachability, len(sets)) + for _, s := range sets { + retrievedTaskQueues, unretrievedTaskQueues := taskQueueReachabilityFromProto(s.GetTaskQueueReachability()) + result[s.GetBuildId()] = &BuildIDReachability{ + TaskQueueReachable: retrievedTaskQueues, + UnretrievedTaskQueues: unretrievedTaskQueues, + } + } + return result +} + +func taskQueueReachabilityFromProto(sets []*taskqueuepb.TaskQueueReachability) (map[string]*TaskQueueReachability, []string) { + if sets == nil { + return nil, nil + } + retrievedTaskQueues := make(map[string]*TaskQueueReachability, len(sets)) + unretrievedTaskQueues := make([]string, 0, len(sets)) + for _, s := range sets { + reachability := make([]TaskReachability, len(s.GetReachability())) + for i, r := range s.GetReachability() { + reachability[i] = taskReachabilityFromProto(r) + } + if len(reachability) == 1 && reachability[0] == TaskReachabilityUnspecified { + unretrievedTaskQueues = append(unretrievedTaskQueues, s.GetTaskQueue()) + } else { + retrievedTaskQueues[s.GetTaskQueue()] = &TaskQueueReachability{ + TaskQueueReachability: reachability, + } + } + + } + return retrievedTaskQueues, unretrievedTaskQueues +} + +func taskReachabilityToProto(r TaskReachability) enumspb.TaskReachability { + switch r { + case TaskReachabilityUnspecified: + return enumspb.TASK_REACHABILITY_UNSPECIFIED + case TaskReachabilityNewWorkflows: + return enumspb.TASK_REACHABILITY_NEW_WORKFLOWS + case TaskReachabilityExistingWorkflows: + return enumspb.TASK_REACHABILITY_EXISTING_WORKFLOWS + case TaskReachabilityOpenWorkflows: + return enumspb.TASK_REACHABILITY_OPEN_WORKFLOWS + case TaskReachabilityClosedWorkflows: + return enumspb.TASK_REACHABILITY_CLOSED_WORKFLOWS + default: + panic("unknown task reachability") + + } +} + +func taskReachabilityFromProto(r enumspb.TaskReachability) TaskReachability { + switch r { + case enumspb.TASK_REACHABILITY_UNSPECIFIED: + return TaskReachabilityUnspecified + case enumspb.TASK_REACHABILITY_NEW_WORKFLOWS: + return TaskReachabilityNewWorkflows + case enumspb.TASK_REACHABILITY_EXISTING_WORKFLOWS: + return TaskReachabilityExistingWorkflows + case enumspb.TASK_REACHABILITY_OPEN_WORKFLOWS: + return TaskReachabilityOpenWorkflows + case enumspb.TASK_REACHABILITY_CLOSED_WORKFLOWS: + return TaskReachabilityClosedWorkflows + default: + panic("unknown task reachability") + } +} + func (v *BuildIDOpAddNewIDInNewDefaultSet) targetedBuildId() string { return v.BuildID } func (v *BuildIDOpAddNewCompatibleVersion) targetedBuildId() string { return v.BuildID } func (v *BuildIDOpPromoteSet) targetedBuildId() string { return v.BuildID } @@ -188,8 +324,9 @@ if intent == VersioningIntentDefault { useCompat = false } else if intent == VersioningIntentUnspecified { - // If the target task queue doesn't match ours, use the default version - if workerTq != TargetTq { + // If the target task queue doesn't match ours, use the default version. Empty target counts + // as matching. + if TargetTq != "" && workerTq != TargetTq { useCompat = false } } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/workflow.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/workflow.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/workflow.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/workflow.go 2024-02-23 09:46:13.000000000 +0000 @@ -124,9 +124,8 @@ // The branch is automatically removed after the channel is closed and callback function is called once // with more parameter set to false. AddReceive(c ReceiveChannel, f func(c ReceiveChannel, more bool)) Selector - // AddSend registers a callback function to be called when sending message to channel is not going to block. - // The callback is called when Select(ctx) is called. - // The sending message to the channel is expected to be done by the callback function + // AddSend registers a callback function to be called when a message is sent on a channel. + // The callback is called after the message is sent to the channel and Select(ctx) is called AddSend(c SendChannel, v interface{}, f func()) Selector // AddFuture registers a callback function to be called when a future is ready. // The callback is called when Select(ctx) is called. @@ -289,9 +288,11 @@ // Memo - Optional non-indexed info that will be shown in list workflow. Memo map[string]interface{} - // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs (only - // supported when Temporal server is using ElasticSearch). The key and value type must be registered on Temporal server side. + // SearchAttributes - Optional indexed info that can be used in query of List/Scan/Count workflow APIs. The key and value type must be registered on Temporal server side. // Use GetSearchAttributes API to get valid key and corresponding value type. + // For supported operations on different server versions see [Visibility]. + // + // [Visibility]: https://docs.temporal.io/visibility SearchAttributes map[string]interface{} // ParentClosePolicy - Optional policy to decide what to do for the child. @@ -343,6 +344,12 @@ // Await blocks the calling thread until condition() returns true // Returns CanceledError if the ctx is canceled. func Await(ctx Context, condition func() bool) error { + assertNotInReadOnlyState(ctx) + state := getState(ctx) + return state.dispatcher.interceptor.Await(ctx, condition) +} + +func (wc *workflowEnvironmentInterceptor) Await(ctx Context, condition func() bool) error { state := getState(ctx) defer state.unblocked() @@ -362,6 +369,12 @@ // AwaitWithTimeout blocks the calling thread until condition() returns true // Returns ok equals to false if timed out and err equals to CanceledError if the ctx is canceled. func AwaitWithTimeout(ctx Context, timeout time.Duration, condition func() bool) (ok bool, err error) { + assertNotInReadOnlyState(ctx) + state := getState(ctx) + return state.dispatcher.interceptor.AwaitWithTimeout(ctx, timeout, condition) +} + +func (wc *workflowEnvironmentInterceptor) AwaitWithTimeout(ctx Context, timeout time.Duration, condition func() bool) (ok bool, err error) { state := getState(ctx) defer state.unblocked() timer := NewTimer(ctx, timeout) @@ -417,18 +430,21 @@ // NewNamedSelector creates a new Selector instance with a given human readable name. // Name appears in stack traces that are blocked on this Selector. -func NewNamedSelector(_ Context, name string) Selector { +func NewNamedSelector(ctx Context, name string) Selector { + assertNotInReadOnlyState(ctx) return &selectorImpl{name: name} } // NewWaitGroup creates a new WaitGroup instance. func NewWaitGroup(ctx Context) WaitGroup { + assertNotInReadOnlyState(ctx) f, s := NewFuture(ctx) return &waitGroupImpl{future: f, settable: s} } // Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow. func Go(ctx Context, f func(ctx Context)) { + assertNotInReadOnlyState(ctx) state := getState(ctx) state.dispatcher.interceptor.Go(ctx, "", f) } @@ -437,12 +453,14 @@ // It has similar semantic to goroutine in a context of the workflow. // Name appears in stack traces that are blocked on this Channel. func GoNamed(ctx Context, name string, f func(ctx Context)) { + assertNotInReadOnlyState(ctx) state := getState(ctx) state.dispatcher.interceptor.Go(ctx, name, f) } // NewFuture creates a new future as well as associated Settable that is used to set its value. func NewFuture(ctx Context) (Future, Settable) { + assertNotInReadOnlyState(ctx) impl := &futureImpl{channel: NewChannel(ctx).(*channelImpl)} return impl, impl } @@ -547,6 +565,7 @@ // // ExecuteActivity returns Future with activity result or failure. func ExecuteActivity(ctx Context, activity interface{}, args ...interface{}) Future { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) registry := getRegistryFromWorkflowContext(ctx) activityType := getActivityFunctionName(registry, activity) @@ -618,6 +637,7 @@ if cancellable { cancellationCallback.fn = func(v interface{}, more bool) bool { + assertNotInReadOnlyStateCancellation(ctx) if ctx.Err() == ErrCanceled { wc.env.RequestCancelActivity(a) } @@ -667,6 +687,7 @@ // // ExecuteLocalActivity returns Future with local activity result or failure. func ExecuteLocalActivity(ctx Context, activity interface{}, args ...interface{}) Future { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) env := getWorkflowEnvironment(ctx) activityType, isMethod := getFunctionName(activity) @@ -817,6 +838,7 @@ if cancellable { cancellationCallback.fn = func(v interface{}, more bool) bool { + assertNotInReadOnlyStateCancellation(ctx) if ctx.Err() == ErrCanceled { getWorkflowEnvironment(ctx).RequestCancelLocalActivity(la) } @@ -855,6 +877,7 @@ // // ExecuteChildWorkflow returns ChildWorkflowFuture. func ExecuteChildWorkflow(ctx Context, childWorkflow interface{}, args ...interface{}) ChildWorkflowFuture { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) env := getWorkflowEnvironment(ctx) workflowType, err := getWorkflowFunctionName(env.GetRegistry(), childWorkflow) @@ -929,6 +952,7 @@ // which would result in an uncanceled workflow. if cancellable { cancellationCallback.fn = func(v interface{}, _ bool) bool { + assertNotInReadOnlyStateCancellation(ctx) if ctx.Err() == ErrCanceled && !mainFuture.IsReady() { // child workflow started, and ctx canceled getWorkflowEnvironment(ctx).RequestCancelChildWorkflow(options.Namespace, r.ID) @@ -980,7 +1004,14 @@ // workflow, it is this worker's current value. BinaryChecksum string - currentHistoryLength int + continueAsNewSuggested bool + currentHistorySize int + currentHistoryLength int +} + +// UpdateInfo information about a currently running update +type UpdateInfo struct { + ID string } // GetBinaryChecksum return binary checksum. @@ -997,6 +1028,19 @@ return wInfo.currentHistoryLength } +// GetCurrentHistorySize returns the current byte size of history when called. +// This value may change throughout the life of the workflow. +func (wInfo *WorkflowInfo) GetCurrentHistorySize() int { + return wInfo.currentHistorySize +} + +// GetContinueAsNewSuggested returns true if the server is configured to suggest continue as new +// and it is suggested. +// This value may change throughout the life of the workflow. +func (wInfo *WorkflowInfo) GetContinueAsNewSuggested() bool { + return wInfo.continueAsNewSuggested +} + // GetWorkflowInfo extracts info of a current workflow from a context. func GetWorkflowInfo(ctx Context) *WorkflowInfo { i := getWorkflowOutboundInterceptor(ctx) @@ -1007,6 +1051,20 @@ return wc.env.WorkflowInfo() } +// GetUpdateInfo extracts info of a currently running update from a context. +func GetUpdateInfo(ctx Context) *UpdateInfo { + i := getWorkflowOutboundInterceptor(ctx) + return i.GetUpdateInfo(ctx) +} + +func (wc *workflowEnvironmentInterceptor) GetUpdateInfo(ctx Context) *UpdateInfo { + uc := ctx.Value(updateInfoContextKey) + if uc == nil { + panic("getWorkflowOutboundInterceptor: No update associated with this context") + } + return uc.(*UpdateInfo) +} + // GetLogger returns a logger to be used in workflow's context func GetLogger(ctx Context) log.Logger { i := getWorkflowOutboundInterceptor(ctx) @@ -1043,6 +1101,7 @@ // timer by cancel the Context (using context from workflow.WithCancel(ctx)) and that will cancel the timer. After timer // is canceled, the returned Future become ready, and Future.Get() will return *CanceledError. func NewTimer(ctx Context, d time.Duration) Future { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.NewTimer(ctx, d) } @@ -1066,6 +1125,7 @@ if timerID != nil && cancellable { cancellationCallback.fn = func(v interface{}, more bool) bool { + assertNotInReadOnlyStateCancellation(ctx) if !future.IsReady() { wc.env.RequestCancelTimer(*timerID) } @@ -1086,6 +1146,7 @@ // reasons the ctx could be canceled: 1) your workflow code cancel the ctx (with workflow.WithCancel(ctx)); // 2) your workflow itself is canceled by external request. func Sleep(ctx Context, d time.Duration) (err error) { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.Sleep(ctx, d) } @@ -1107,6 +1168,7 @@ // // RequestCancelExternalWorkflow return Future with failure or empty success result. func RequestCancelExternalWorkflow(ctx Context, workflowID, runID string) Future { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.RequestCancelExternalWorkflow(ctx, workflowID, runID) } @@ -1146,6 +1208,7 @@ // // SignalExternalWorkflow return Future with failure or empty success result. func SignalExternalWorkflow(ctx Context, workflowID, runID, signalName string, arg interface{}) Future { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) // Put header on context before executing ctx = workflowContextWithNewHeader(ctx) @@ -1235,8 +1298,11 @@ // "CustomKeywordField": "seattle", // } // -// This is only supported when using ElasticSearch. +// For supported operations on different server versions see [Visibility]. +// +// [Visibility]: https://docs.temporal.io/visibility func UpsertSearchAttributes(ctx Context, attributes map[string]interface{}) error { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.UpsertSearchAttributes(ctx, attributes) } @@ -1275,6 +1341,7 @@ // // This is only supported with Temporal Server 1.18+ func UpsertMemo(ctx Context, memo map[string]interface{}) error { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.UpsertMemo(ctx, memo) } @@ -1405,6 +1472,7 @@ // GetSignalChannel returns channel corresponding to the signal name. func GetSignalChannel(ctx Context, signalName string) ReceiveChannel { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.GetSignalChannel(ctx, signalName) } @@ -1473,6 +1541,7 @@ // .... // } func SideEffect(ctx Context, f func(ctx Context) interface{}) converter.EncodedValue { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.SideEffect(ctx, f) } @@ -1481,6 +1550,9 @@ dc := getDataConverterFromWorkflowContext(ctx) future, settable := NewFuture(ctx) wrapperFunc := func() (*commonpb.Payloads, error) { + coroutineState := getState(ctx) + defer coroutineState.dispatcher.setIsReadOnly(false) + coroutineState.dispatcher.setIsReadOnly(true) r := f(ctx) return encodeArg(dc, r) } @@ -1498,7 +1570,7 @@ // MutableSideEffect executes the provided function once, then it looks up the history for the value with the given id. // If there is no existing value, then it records the function result as a value with the given id on history; // otherwise, it compares whether the existing value from history has changed from the new function result by calling -// theprovided equals function. If they are equal, it returns the value without recording a new one in history; +// the provided equals function. If they are equal, it returns the value without recording a new one in history; // // otherwise, it records the new value with the same id on history. // @@ -1512,12 +1584,16 @@ // // One good use case of MutableSideEffect() is to access dynamically changing config without breaking determinism. func MutableSideEffect(ctx Context, id string, f func(ctx Context) interface{}, equals func(a, b interface{}) bool) converter.EncodedValue { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.MutableSideEffect(ctx, id, f, equals) } func (wc *workflowEnvironmentInterceptor) MutableSideEffect(ctx Context, id string, f func(ctx Context) interface{}, equals func(a, b interface{}) bool) converter.EncodedValue { wrapperFunc := func() interface{} { + coroutineState := getState(ctx) + defer coroutineState.dispatcher.setIsReadOnly(false) + coroutineState.dispatcher.setIsReadOnly(true) return f(ctx) } return wc.env.MutableSideEffect(id, wrapperFunc, equals) @@ -1546,7 +1622,7 @@ // // The backwards compatible way to execute the update is // -// v := GetVersion(ctx, "fooChange", DefaultVersion, 1) +// v := GetVersion(ctx, "fooChange", DefaultVersion, 0) // if v == DefaultVersion { // err = workflow.ExecuteActivity(ctx, foo).Get(ctx, nil) // } else { @@ -1555,10 +1631,10 @@ // // Then bar has to be changed to baz: // -// v := GetVersion(ctx, "fooChange", DefaultVersion, 2) +// v := GetVersion(ctx, "fooChange", DefaultVersion, 1) // if v == DefaultVersion { // err = workflow.ExecuteActivity(ctx, foo).Get(ctx, nil) -// } else if v == 1 { +// } else if v == 0 { // err = workflow.ExecuteActivity(ctx, bar).Get(ctx, nil) // } else { // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) @@ -1566,8 +1642,8 @@ // // Later when there are no workflow executions running DefaultVersion the correspondent branch can be removed: // -// v := GetVersion(ctx, "fooChange", 1, 2) -// if v == 1 { +// v := GetVersion(ctx, "fooChange", 0, 1) +// if v == 0 { // err = workflow.ExecuteActivity(ctx, bar).Get(ctx, nil) // } else { // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) @@ -1575,12 +1651,12 @@ // // It is recommended to keep the GetVersion() call even if single branch is left: // -// GetVersion(ctx, "fooChange", 2, 2) +// GetVersion(ctx, "fooChange", 1, 1) // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) // // The reason to keep it is: 1) it ensures that if there is older version execution still running, it will fail here // and not proceed; 2) if you ever need to make more changes for “fooChange”, for example change activity from baz to qux, -// you just need to update the maxVersion from 2 to 3. +// you just need to update the maxVersion from 1 to 2. // // Note that, you only need to preserve the first call to GetVersion() for each changeID. All subsequent call to GetVersion() // with same changeID are safe to remove. However, if you really want to get rid of the first GetVersion() call as well, @@ -1588,13 +1664,14 @@ // as changeID. If you ever need to make changes to that same part like change from baz to qux, you would need to use a // different changeID like “fooChange-fix2”, and start minVersion from DefaultVersion again. The code would looks like: // -// v := workflow.GetVersion(ctx, "fooChange-fix2", workflow.DefaultVersion, 1) +// v := workflow.GetVersion(ctx, "fooChange-fix2", workflow.DefaultVersion, 0) // if v == workflow.DefaultVersion { // err = workflow.ExecuteActivity(ctx, baz, data).Get(ctx, nil) // } else { // err = workflow.ExecuteActivity(ctx, qux, data).Get(ctx, nil) // } func GetVersion(ctx Context, changeID string, minSupported, maxSupported Version) Version { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.GetVersion(ctx, changeID, minSupported, maxSupported) } @@ -1643,6 +1720,7 @@ // return nil // } func SetQueryHandler(ctx Context, queryType string, handler interface{}) error { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.SetQueryHandler(ctx, queryType, handler) } @@ -1672,6 +1750,7 @@ // // NOTE: Experimental func SetUpdateHandler(ctx Context, updateName string, handler interface{}, opts UpdateHandlerOptions) error { + assertNotInReadOnlyState(ctx) i := getWorkflowOutboundInterceptor(ctx) return i.SetUpdateHandler(ctx, updateName, handler, opts) } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/workflow_testsuite.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/workflow_testsuite.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/internal/workflow_testsuite.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/internal/workflow_testsuite.go 2024-02-23 09:46:13.000000000 +0000 @@ -296,10 +296,34 @@ // SetCurrentHistoryLength sets the value that is returned from // GetInfo(ctx).GetCurrentHistoryLength(). +// +// Note: this value may not be up to date if accessed inside a query. func (e *TestWorkflowEnvironment) SetCurrentHistoryLength(length int) { e.impl.setCurrentHistoryLength(length) } +// setCurrentHistoryLength sets the value that is returned from +// GetInfo(ctx).GetCurrentHistorySize(). +// +// Note: this value may not be up to date if accessed inside a query. +func (e *TestWorkflowEnvironment) SetCurrentHistorySize(length int) { + e.impl.setCurrentHistorySize(length) +} + +// SetContinueAsNewSuggested set sets the value that is returned from +// GetInfo(ctx).GetContinueAsNewSuggested(). +// +// Note: this value may not be up to date if accessed inside a query. +func (e *TestWorkflowEnvironment) SetContinueAsNewSuggested(suggest bool) { + e.impl.setContinueAsNewSuggested(suggest) +} + +// SetContinuedExecutionRunID sets the value that is returned from +// GetInfo(ctx).ContinuedExecutionRunID +func (e *TestWorkflowEnvironment) SetContinuedExecutionRunID(rid string) { + e.impl.setContinuedExecutionRunID(rid) +} + // OnActivity setup a mock call for activity. Parameter activity must be activity function (func) or activity name (string). // You must call Return() with appropriate parameters on the returned *MockCallWrapper instance. The supplied parameters to // the Return() call should either be a function that has exact same signature as the mocked activity, or it should be @@ -388,19 +412,19 @@ var call *mock.Call switch fType.Kind() { case reflect.Func: - fnType := reflect.TypeOf(workflow) - if err := validateFnFormat(fnType, true); err != nil { + if err := validateFnFormat(fType, true); err != nil { panic(err) } fnName, _ := getWorkflowFunctionName(e.impl.registry, workflow) if alias, ok := e.impl.registry.getWorkflowAlias(fnName); ok { fnName = alias } + e.impl.registry.RegisterWorkflowWithOptions(workflow, RegisterWorkflowOptions{DisableAlreadyRegisteredCheck: true}) call = e.mock.On(fnName, args...) case reflect.String: call = e.mock.On(workflow.(string), args...) default: - panic("activity must be function or string") + panic("workflow must be function or string") } return e.wrapCall(call) @@ -818,7 +842,7 @@ e.impl.signalWorkflow(name, input, false) } -// SignalWorkflowByID sends signal to the currently running test workflow. +// SignalWorkflowByID signals a workflow by its ID. func (e *TestWorkflowEnvironment) SignalWorkflowByID(workflowID, signalName string, input interface{}) error { return e.impl.signalWorkflowByID(workflowID, signalName, input) } @@ -828,8 +852,14 @@ return e.impl.queryWorkflow(queryType, args...) } -func (e *TestWorkflowEnvironment) UpdateWorkflow(name string, uc UpdateCallbacks, args ...interface{}) { - e.impl.updateWorkflow(name, uc, args...) +// UpdateWorkflow sends an update to the currently running workflow. +func (e *TestWorkflowEnvironment) UpdateWorkflow(updateName, updateID string, uc UpdateCallbacks, args ...interface{}) { + e.impl.updateWorkflow(updateName, updateID, uc, args...) +} + +// UpdateWorkflowByID sends an update to a running workflow by its ID. +func (e *TestWorkflowEnvironment) UpdateWorkflowByID(workflowID, updateName, updateID string, uc UpdateCallbacks, args interface{}) error { + return e.impl.updateWorkflowByID(workflowID, updateName, updateID, uc, args) } // QueryWorkflowByID queries a child workflow by its ID and returns the result synchronously diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/logger.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/logger.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/logger.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/logger.go 2024-02-23 09:46:13.000000000 +0000 @@ -32,4 +32,11 @@ Warn(msg string, keyvals ...interface{}) Error(msg string, keyvals ...interface{}) } + + // WithSkipCallers is an optional interface that a Logger can implement that + // may create a new child logger that skips the number of stack frames of the caller. + // This call must not mutate the original logger. + WithSkipCallers interface { + WithCallerSkip(int) Logger + } ) diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/slog.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/slog.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/slog.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/slog.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,94 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.21 + +package log + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +type slogLogger struct { + logger *slog.Logger + depth int +} + +// NewStructuredLogger creates an adapter around the given logger to be passed to Temporal. +func NewStructuredLogger(logger *slog.Logger) Logger { + return &slogLogger{ + logger: logger, + depth: 3, + } +} + +func (s *slogLogger) Debug(msg string, keyvals ...interface{}) { + s.log(context.Background(), slog.LevelDebug, msg, keyvals...) +} + +func (s *slogLogger) Info(msg string, keyvals ...interface{}) { + s.log(context.Background(), slog.LevelInfo, msg, keyvals...) +} + +func (s *slogLogger) Warn(msg string, keyvals ...interface{}) { + s.log(context.Background(), slog.LevelWarn, msg, keyvals...) +} + +func (s *slogLogger) Error(msg string, keyvals ...interface{}) { + s.log(context.Background(), slog.LevelError, msg, keyvals...) +} + +func (s *slogLogger) log(ctx context.Context, level slog.Level, msg string, args ...any) { + if !s.logger.Enabled(ctx, level) { + return + } + + var pcs [1]uintptr + runtime.Callers(s.depth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + record.Add(args...) + + if ctx == nil { + ctx = context.Background() + } + _ = s.logger.Handler().Handle(ctx, record) +} + +func (s *slogLogger) With(keyvals ...interface{}) Logger { + return &slogLogger{ + logger: s.logger.With(keyvals...), + depth: s.depth, + } +} + +func (s *slogLogger) WithCallerSkip(depth int) Logger { + return &slogLogger{ + logger: s.logger, + depth: s.depth + depth, + } +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/with_logger.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/with_logger.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/log/with_logger.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/log/with_logger.go 2024-02-23 09:46:13.000000000 +0000 @@ -39,6 +39,18 @@ return newWithLogger(logger, keyvals...) } +// Skip creates a child Logger that increase increases its' caller skip depth if it +// implements [WithSkipCallers]. Otherwise returns the original logger. +func Skip(logger Logger, depth int) Logger { + if sl, ok := logger.(WithSkipCallers); ok { + return sl.WithCallerSkip(depth) + } + return logger +} + +var _ Logger = (*withLogger)(nil) +var _ WithSkipCallers = (*withLogger)(nil) + type withLogger struct { logger Logger keyvals []interface{} @@ -71,3 +83,10 @@ func (l *withLogger) Error(msg string, keyvals ...interface{}) { l.logger.Error(msg, l.prependKeyvals(keyvals)...) } + +func (l *withLogger) WithCallerSkip(depth int) Logger { + if sl, ok := l.logger.(WithSkipCallers); ok { + return newWithLogger(sl.WithCallerSkip(depth), l.keyvals...) + } + return l +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/temporal/doc.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/temporal/doc.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/temporal/doc.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/temporal/doc.go 2024-02-23 09:46:13.000000000 +0000 @@ -47,7 +47,7 @@ activity code. - testsuite - unit testing framework for activity and workflow testing -How Temporal works +# How Temporal works The Temporal hosted service brokers and persists events generated during workflow execution. Worker nodes owned and operated by customers execute the coordination and task logic. To facilitate the implementation of worker nodes Temporal @@ -56,7 +56,7 @@ In Temporal, you can code the logical flow of events separately as a workflow and code business logic as activities. The workflow identifies the activities and sequences them, while an activity executes the logic. -Key Features +# Key Features Dynamic workflow execution graphs - Determine the workflow execution graphs at runtime based on the data you are processing. Temporal does not pre-compute the execution graphs at compile time or at workflow start time. Therefore, you diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/devserver.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/devserver.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/devserver.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/devserver.go 2024-02-23 09:46:13.000000000 +0000 @@ -39,7 +39,6 @@ "path/filepath" "runtime" "strings" - "syscall" "time" "go.temporal.io/sdk/client" @@ -110,8 +109,8 @@ } args := prepareCommand(&options, host, port, clientOptions.Namespace) - cmd := exec.Command(exePath, args...) - cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + + cmd := newCmd(exePath, args...) clientOptions.Logger.Info("Starting DevServer", "ExePath", exePath, "Args", args) if err := cmd.Start(); err != nil { return nil, fmt.Errorf("failed starting: %w", err) @@ -361,7 +360,7 @@ // Stop the running server and wait for shutdown to complete. Error is propagated from server shutdown. func (s *DevServer) Stop() error { - if err := s.cmd.Process.Signal(syscall.SIGTERM); err != nil { + if err := sendInterrupt(s.cmd.Process); err != nil { return err } return s.cmd.Wait() diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/freeport.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/freeport.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/freeport.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/freeport.go 2024-02-23 09:46:13.000000000 +0000 @@ -19,6 +19,7 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. + package testsuite import ( diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/process_nonwindows.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/process_nonwindows.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/process_nonwindows.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/process_nonwindows.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,43 @@ +// The MIT License +// +// Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !windows + +package testsuite + +import ( + "os" + "os/exec" + "syscall" +) + +// newCmd creates a new command with the given executable path and arguments. +func newCmd(exePath string, args ...string) *exec.Cmd { + cmd := exec.Command(exePath, args...) + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + return cmd +} + +// sendInterrupt sends an interrupt signal to the given process for graceful shutdown. +func sendInterrupt(process *os.Process) error { + return process.Signal(syscall.SIGINT) +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/process_windows.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/process_windows.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/testsuite/process_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/testsuite/process_windows.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,59 @@ +// The MIT License +// +// Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package testsuite + +import ( + "os" + "os/exec" + "syscall" + + "golang.org/x/sys/windows" +) + +// newCmd creates a new command with the given executable path and arguments. +func newCmd(exePath string, args ...string) *exec.Cmd { + cmd := exec.Command(exePath, args...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + // isolate the process and signals sent to it from the current console + CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, + } + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + return cmd +} + +// sendInterrupt calls the break event on the given process for graceful shutdown. +func sendInterrupt(process *os.Process) error { + dll, err := windows.LoadDLL("kernel32.dll") + if err != nil { + return err + } + p, err := dll.FindProc("GenerateConsoleCtrlEvent") + if err != nil { + return err + } + r, _, err := p.Call(uintptr(windows.CTRL_BREAK_EVENT), uintptr(process.Pid)) + if r == 0 { + return err + } + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/worker/worker.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/worker/worker.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/worker/worker.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/worker/worker.go 2024-02-23 09:46:13.000000000 +0000 @@ -78,7 +78,7 @@ // WorkflowRegistry exposes workflow registration functions to consumers. WorkflowRegistry interface { // RegisterWorkflow - registers a workflow function with the worker. - // A workflow takes a workflow.Context and input and returns a (result, error) or just error. + // A workflow takes a [workflow.Context] and input and returns a (result, error) or just error. // Examples: // func sampleWorkflow(ctx workflow.Context, input []byte) (result []byte, err error) // func sampleWorkflow(ctx workflow.Context, arg1 int, arg2 string) (result []byte, err error) @@ -155,7 +155,7 @@ // For example if a workflow failed in production then its history can be downloaded through UI or CLI // and replayed in a debugger as many times as necessary. // Use this class to create unit tests that check if workflow changes are backwards compatible. - // It is important to maintain backwards compatibility through use of workflow.GetVersion + // It is important to maintain backwards compatibility through use of [workflow.GetVersion] // to ensure that new deployments are not going to break open workflows. WorkflowReplayer interface { // RegisterWorkflow registers workflow that is going to be replayed @@ -206,7 +206,7 @@ // WorkflowPanicPolicy is used for configuring how worker deals with workflow // code panicking which includes non backwards compatible changes to the workflow code without appropriate - // versioning (see workflow.GetVersion). + // versioning (see [workflow.GetVersion]). // The default behavior is to block workflow execution until the problem is fixed. WorkflowPanicPolicy = internal.WorkflowPanicPolicy diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/deterministic_wrappers.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/deterministic_wrappers.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/deterministic_wrappers.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/deterministic_wrappers.go 2024-02-23 09:46:13.000000000 +0000 @@ -32,27 +32,27 @@ type ( - // Channel must be used instead of native go channel by workflow code. - // Use workflow.NewChannel(ctx) method to create Channel instance. - // Channel extends both ReadChanel and SendChannel. Prefer to use one of these interfaces - // to share Channel with consumers or producers. + // Channel must be used instead of a native go channel by workflow code. + // Use [workflow.NewChannel] to create a Channel instance. + // Channel extends both [ReceiveChannel] and [SendChannel]. Prefer using one of these interfaces + // to share a Channel with consumers or producers. Channel = internal.Channel - // ReceiveChannel is a read only view of the Channel + // ReceiveChannel is a read-only view of the Channel ReceiveChannel = internal.ReceiveChannel - // SendChannel is a write only view of the Channel + // SendChannel is a write-only view of the Channel SendChannel = internal.SendChannel // Selector must be used instead of native go select by workflow code. - // Use workflow.NewSelector(ctx) method to create a Selector instance. + // Use [workflow.NewSelector] method to create a Selector instance. Selector = internal.Selector // Future represents the result of an asynchronous computation. Future = internal.Future // Settable is used to set value or error on a future. - // See more: workflow.NewFuture(ctx). + // See more: [workflow.NewFuture]. Settable = internal.Settable // WaitGroup is used to wait for a collection of @@ -63,56 +63,58 @@ // Await blocks the calling thread until condition() returns true. // Do not mutate values or trigger side effects inside condition. // Returns CanceledError if the ctx is canceled. -// The following code is going to block until the captured count +// The following code will block until the captured count // variable is set to 5: -// workflow.Await(ctx, func() bool { -// return count == 5 -// }) +// +// workflow.Await(ctx, func() bool { +// return count == 5 +// }) // // The trigger is evaluated on every workflow state transition. // Note that conditions that wait for time can be error-prone as nothing might cause evaluation. // For example: -// workflow.Await(ctx, func() bool { -// return workflow.Now() > someTime -// }) -// might never return true unless some other event like Signal or activity completion would force the condition evaluation. +// +// workflow.Await(ctx, func() bool { +// return workflow.Now() > someTime +// }) +// +// might never return true unless some other event like a Signal or activity completion forces the condition evaluation. // For a time-based wait use workflow.AwaitWithTimeout function. func Await(ctx Context, condition func() bool) error { return internal.Await(ctx, condition) } // AwaitWithTimeout blocks the calling thread until condition() returns true -// or blocking time exceeds the passed timeout value -// Returns ok equals to false if timed out and err equals to -// CanceledError if the ctx is canceled. -// The following code is going to block until the captured count -// variable is set to 5 or one hour passes. +// or blocking time exceeds the passed timeout value. +// Returns ok=false if timed out, and err CanceledError if the ctx is canceled. +// The following code will block until the captured count +// variable is set to 5, or one hour passes. // -// workflow.AwaitWithTimeout(ctx, time.Hour, func() bool { -// return count == 5 -// }) +// workflow.AwaitWithTimeout(ctx, time.Hour, func() bool { +// return count == 5 +// }) func AwaitWithTimeout(ctx Context, timeout time.Duration, condition func() bool) (ok bool, err error) { return internal.AwaitWithTimeout(ctx, timeout, condition) } -// NewChannel create new Channel instance +// NewChannel creates a new Channel instance func NewChannel(ctx Context) Channel { return internal.NewChannel(ctx) } -// NewNamedChannel create new Channel instance with a given human readable name. -// Name appears in stack traces that are blocked on this channel. +// NewNamedChannel creates a new Channel instance with a given human-readable name. +// The name appears in stack traces that are blocked on this channel. func NewNamedChannel(ctx Context, name string) Channel { return internal.NewNamedChannel(ctx, name) } -// NewBufferedChannel create new buffered Channel instance +// NewBufferedChannel creates a new buffered Channel instance func NewBufferedChannel(ctx Context, size int) Channel { return internal.NewBufferedChannel(ctx, size) } -// NewNamedBufferedChannel create new BufferedChannel instance with a given human readable name. -// Name appears in stack traces that are blocked on this Channel. +// NewNamedBufferedChannel creates a new BufferedChannel instance with a given human-readable name. +// The name appears in stack traces that are blocked on this Channel. func NewNamedBufferedChannel(ctx Context, name string, size int) Channel { return internal.NewNamedBufferedChannel(ctx, name, size) } @@ -122,8 +124,8 @@ return internal.NewSelector(ctx) } -// NewNamedSelector creates a new Selector instance with a given human readable name. -// Name appears in stack traces that are blocked on this Selector. +// NewNamedSelector creates a new Selector instance with a given human-readable name. +// The name appears in stack traces that are blocked on this Selector. func NewNamedSelector(ctx Context, name string) Selector { return internal.NewNamedSelector(ctx, name) } @@ -133,43 +135,43 @@ return internal.NewWaitGroup(ctx) } -// Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow. +// Go creates a new coroutine. It has similar semantics to a goroutine, but in the context of the workflow. func Go(ctx Context, f func(ctx Context)) { internal.Go(ctx, f) } -// GoNamed creates a new coroutine with a given human readable name. -// It has similar semantic to goroutine in a context of the workflow. -// Name appears in stack traces that include this coroutine. +// GoNamed creates a new coroutine with a given human-readable name. +// It has similar semantics to a goroutine, but in the context of the workflow. +// The name appears in stack traces that include this coroutine. func GoNamed(ctx Context, name string, f func(ctx Context)) { internal.GoNamed(ctx, name, f) } -// NewFuture creates a new future as well as associated Settable that is used to set its value. +// NewFuture creates a new future as well as an associated Settable that is used to set its value. func NewFuture(ctx Context) (Future, Settable) { return internal.NewFuture(ctx) } // Now returns the current time when the workflow task is started or replayed. -// The workflow needs to use this Now() to get the wall clock time instead of the Go lang library one. +// Workflows must use this Now() to get the wall clock time, instead of Go's time.Now(). func Now(ctx Context) time.Time { return internal.Now(ctx) } -// NewTimer returns immediately and the future becomes ready after the specified duration d. The workflow needs to use -// this NewTimer() to get the timer instead of the Go lang library one(timer.NewTimer()). You can cancel the pending -// timer by cancel the Context (using context from workflow.WithCancel(ctx)) and that will cancel the timer. After timer -// is canceled, the returned Future become ready, and Future.Get() will return *CanceledError. +// NewTimer returns immediately and the future becomes ready after the specified duration d. Workflows must use +// this NewTimer() to get the timer, instead of Go's timer.NewTimer(). You can cancel the pending +// timer by canceling the Context (using the context from workflow.WithCancel(ctx)) and that will cancel the timer. After the timer +// is canceled, the returned Future becomes ready, and Future.Get() will return *CanceledError. func NewTimer(ctx Context, d time.Duration) Future { return internal.NewTimer(ctx, d) } // Sleep pauses the current workflow for at least the duration d. A negative or zero duration causes Sleep to return -// immediately. Workflow code needs to use this Sleep() to sleep instead of the Go lang library one(timer.Sleep()). -// You can cancel the pending sleep by cancel the Context (using context from workflow.WithCancel(ctx)). -// Sleep() returns nil if the duration d is passed, or it returns *CanceledError if the ctx is canceled. There are 2 -// reasons the ctx could be canceled: 1) your workflow code cancel the ctx (with workflow.WithCancel(ctx)); -// 2) your workflow itself is canceled by external request. +// immediately. Workflow code must use this Sleep() to sleep, instead of Go's timer.Sleep(). +// You can cancel the pending sleep by canceling the Context (using the context from workflow.WithCancel(ctx)). +// Sleep() returns nil if the duration d is passed, or *CanceledError if the ctx is canceled. There are two +// reasons the ctx might be canceled: 1) your workflow code canceled the ctx (with workflow.WithCancel(ctx)); +// 2) your workflow itself was canceled by external request. func Sleep(ctx Context, d time.Duration) (err error) { return internal.Sleep(ctx, d) } diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/doc.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/doc.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/doc.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/doc.go 2024-02-23 09:46:13.000000000 +0000 @@ -60,7 +60,7 @@ } ctx = workflow.WithActivityOptions(ctx, ao) - future := workflow.ExecuteActivity(ctx, SimpleActivity, value) + future := [workflow.ExecuteActivity](ctx, SimpleActivity, value) var result string if err := future.Get(ctx, &result); err != nil { return err @@ -73,16 +73,16 @@ # Declaration -In the Temporal programing model a workflow is implemented with a function. The function declaration specifies the +In the Temporal programming model a workflow is implemented with a function. The function declaration specifies the parameters the workflow accepts as well as any values it might return. func SimpleWorkflow(ctx workflow.Context, value string) error -The first parameter to the function is ctx workflow.Context. This is a required parameter for all workflow functions +The first parameter to the function is ctx [workflow.Context]. This is a required parameter for all workflow functions and is used by the Temporal client library to pass execution context. Virtually all the client library functions that are callable from the workflow functions require this ctx parameter. This **context** parameter is the same concept as the standard context.Context provided by Go. The only difference between workflow.Context and context.Context is that -the Done() function in workflow.Context returns workflow.Channel instead of the standard go chan. +the Done() function in [workflow.Context] returns [workflow.Channel] instead of the standard go chan. The second string parameter is a custom workflow parameter that can be used to pass in data into the workflow on start. A workflow can have one or more such parameters. All parameters to an workflow function must be serializable, which @@ -107,13 +107,13 @@ - Should really not affect changes in external systems other than through invocation of activities - Should interact with time only through the functions provided by the - Temporal client library (i.e. workflow.Now(), workflow.Sleep()) + Temporal client library (i.e. [workflow.Now](), [workflow.Sleep]()) - Should not create and interact with goroutines directly, it should instead use the functions provided by the Temporal client library. (i.e. - workflow.Go() instead of go, workflow.Channel instead of chan, - workflow.Selector instead of select) + [workflow.Go]() instead of go, [workflow.Channel] instead of chan, + [workflow.Selector] instead of select) - Should do all logging via the logger provided by the Temporal client - library (i.e. workflow.GetLogger()) + library (i.e. [workflow.GetLogger]()) - Should not iterate over maps using range as order of map iteration is randomized @@ -127,25 +127,26 @@ Coroutine related constructs: - - workflow.Go : This is a replacement for the the go statement - - workflow.Channel : This is a replacement for the native chan type. Temporal + - [workflow.Go] : This is a replacement for the the go statement + - [workflow.Channel] : This is a replacement for the native chan type. Temporal provides support for both buffered and unbuffered channels - - workflow.Selector : This is a replacement for the select statement + - [workflow.Selector] : This is a replacement for the select statement Time related functions: - - workflow.Now() : This is a replacement for time.Now() - - workflow.Sleep() : This is a replacement for time.Sleep() + - [workflow.Now]() : This is a replacement for [time.Now]() + - [workflow.Sleep]() : This is a replacement for [time.Sleep]() # Failing a Workflow To mark a workflow as failed all that needs to happen is for the workflow function to return an error via the err -return value. +return value. Returning an error and a result from a workflow are mutually exclusive. If an error is returned from a +workflow then any results returned are ignored. # Execute Activity The primary responsibility of the workflow implementation is to schedule activities for execution. The most -straightforward way to do that is via the library method workflow.ExecuteActivity: +straightforward way to do that is via the library method [workflow.ExecuteActivity]: ao := workflow.ActivityOptions{ TaskQueue: "sampleTaskQueue", @@ -157,20 +158,20 @@ } ctx = workflow.WithActivityOptions(ctx, ao) - future := workflow.ExecuteActivity(ctx, SimpleActivity, value) + future := [workflow.ExecuteActivity](ctx, SimpleActivity, value) var result string if err := future.Get(ctx, &result); err != nil { return err } -Before calling workflow.ExecuteActivity(), ActivityOptions must be configured for the invocation. These are for the +Before calling [workflow.ExecuteActivity](), [ActivityOptions] must be configured for the invocation. These are for the most part options to customize various execution timeouts. These options are passed in by creating a child context from the initial context and overwriting the desired values. The child context is then passed into the -workflow.ExecuteActivity() call. If multiple activities are sharing the same exact option values then the same context -instance can be used when calling workflow.ExecuteActivity(). +[workflow.ExecuteActivity]() call. If multiple activities are sharing the same exact option values then the same context +instance can be used when calling [workflow.ExecuteActivity](). The first parameter to the call is the required workflow.Context object. This type is an exact copy of context.Context -with the Done() method returning workflow.Channel instead of native go chan. +with the Done() method returning [workflow.Channel] instead of native go chan. The second parameter is the function that we registered as an activity function. This parameter can also be the a string representing the fully qualified name of the activity function. The benefit of passing in the actual function @@ -180,15 +181,15 @@ single parameter: **value**. This list of parameters must match the list of parameters declared by the activity function. Like mentioned above the Temporal client library will validate that this is indeed the case. -The method call returns immediately and returns a workflow.Future. This allows for more code to be executed without +The method call returns immediately and returns a [workflow.Future]. This allows for more code to be executed without having to wait for the scheduled activity to complete. When we are ready to process the results of the activity we call the Get() method on the future object returned. The -parameters to this method are the ctx object we passed to the workflow.ExecuteActivity() call and an output parameter +parameters to this method are the ctx object we passed to the [workflow.ExecuteActivity]() call and an output parameter that will receive the output of the activity. The type of the output parameter must match the type of the return value declared by the activity function. The Get() method will block until the activity completes and results are available. -The result value returned by workflow.ExecuteActivity() can be retrieved from the future and used like any normal +The result value returned by [workflow.ExecuteActivity]() can be retrieved from the future and used like any normal result from a synchronous function call. If the result above is a string value we could use it as follows: var result string @@ -205,17 +206,17 @@ return err } -In the example above we called the Get() method on the returned future immediately after workflow.ExecuteActivity(). +In the example above we called the Get() method on the returned future immediately after [workflow.ExecuteActivity](). However, this is not necessary. If we wish to execute multiple activities in parallel we can repeatedly call -workflow.ExecuteActivity() store the futures returned and then wait for all activities to complete by calling the +[workflow.ExecuteActivity]() store the futures returned and then wait for all activities to complete by calling the Get() methods of the future at a later time. -To implement more complex wait conditions on the returned future objects, use the workflow.Selector class. Take a look -at our Pickfirst sample for an example of how to use of workflow.Selector. +To implement more complex wait conditions on the returned future objects, use the [workflow.Selector] class. Take a look +at our Pickfirst sample for an example of how to use of [workflow.Selector]. # Child Workflow -workflow.ExecuteChildWorkflow enables the scheduling of other workflows from within a workflow's implementation. The +[workflow.ExecuteChildWorkflow] enables the scheduling of other workflows from within a workflow's implementation. The parent workflow has the ability to "monitor" and impact the life-cycle of the child workflow in a similar way it can do for an activity it invoked. @@ -233,14 +234,14 @@ return err } -Before calling workflow.ExecuteChildWorkflow(), ChildWorkflowOptions must be configured for the invocation. These are +Before calling [workflow.ExecuteChildWorkflow](), [ChildWorkflowOptions] must be configured for the invocation. These are for the most part options to customize various execution timeouts. These options are passed in by creating a child context from the initial context and overwriting the desired values. The child context is then passed into the -workflow.ExecuteChildWorkflow() call. If multiple activities are sharing the same exact option values then the same -context instance can be used when calling workflow.ExecuteChildWorkflow(). +[workflow.ExecuteChildWorkflow]() call. If multiple activities are sharing the same exact option values then the same +context instance can be used when calling [workflow.ExecuteChildWorkflow](). -The first parameter to the call is the required workflow.Context object. This type is an exact copy of context.Context -with the Done() method returning workflow.Channel instead of the native go chan. +The first parameter to the call is the required [workflow.Context] object. This type is an exact copy of context.Context +with the Done() method returning [workflow.Channel] instead of the native go chan. The second parameter is the function that we registered as a workflow function. This parameter can also be a string representing the fully qualified name of the workflow function. What's the benefit? When you pass in the actual @@ -253,13 +254,13 @@ having to wait for the scheduled workflow to complete. When we are ready to process the results of the workflow we call the Get() method on the future object returned. The -parameters to this method are the ctx object we passed to the workflow.ExecuteChildWorkflow() call and an output +parameters to this method are the ctx object we passed to the [workflow.ExecuteChildWorkflow]() call and an output parameter that will receive the output of the workflow. The type of the output parameter must match the type of the return value declared by the workflow function. The Get() method will block until the workflow completes and results are available. -The workflow.ExecuteChildWorkflow() function is very similar to the workflow.ExecuteActivity() function. All the -patterns described for using the workflow.ExecuteActivity() apply to the workflow.ExecuteChildWorkflow() function as +The [workflow.ExecuteChildWorkflow]() function is very similar to the [workflow.ExecuteActivity]() function. All the +patterns described for using the [workflow.ExecuteActivity]() apply to the [workflow.ExecuteChildWorkflow]() function as well. Child workflows can also be configured to continue to exist once their parent workflow is closed. When using this @@ -282,11 +283,11 @@ # Error Handling -Activities and child workflows can fail. Activity errors are *temporal.ActivityError and errors during child workflow -execution are *temporal.ChildWorkflowExecutionError. The cause of the errors may be types like -*temporal.ApplicationError, *temporal.TimeoutError, *temporal.CanceledError, and *temporal.PanicError. +Activities and child workflows can fail. Activity errors are *[temporal.ActivityError] and errors during child workflow +execution are *[temporal.ChildWorkflowExecutionError]. The cause of the errors may be types like +*[temporal.ApplicationError], *[temporal.TimeoutError], *[temporal.CanceledError], and *[temporal.PanicError]. -See ExecuteActivity() and ExecuteChildWorkflow() for details. +See [ExecuteActivity]() and [ExecuteChildWorkflow]() for details. # Signals @@ -312,7 +313,7 @@ signalChan := workflow.GetSignalChannel(ctx, signalName) s := workflow.NewSelector(ctx) - s.AddReceive(signalChan, func(c workflow.Channel, more bool) { + s.AddReceive(signalChan, func(c [workflow.Channel], more bool) { c.Receive(ctx, &signalVal) workflow.GetLogger(ctx).Info("Received signal!", "signal", signalName, "value", signalVal) }) @@ -322,8 +323,57 @@ return errors.New("signalVal") } -In the example above, the workflow code uses workflow.GetSignalChannel to open a workflow.Channel for the named signal. -We then use a workflow.Selector to wait on this channel and process the payload received with the signal. +In the example above, the workflow code uses [workflow.GetSignalChannel] to open a [workflow.Channel] for the named signal. +We then use a [workflow.Selector] to wait on this channel and process the payload received with the signal. + +# Updates + +## Handle Update + +Updates provide a fully async and durable mechanism to send data directly to a running workflow and receive a response +back. Unlike a Query handler and update handler has no restriction over normal workflow code so you can modify +workflow state, schedule activities, launch child workflow, etc. + + counter := param.StartCount + err := workflow.SetUpdateHandler(ctx, YourUpdateName, func(ctx workflow.Context, arg YourUpdateArg) (YourUpdateResult, error) { + counter += arg.Add + result := YourUpdateResult{ + Total: counter, + } + return result, nil + }) + +For more information see our docs on [handling updates] + +## Validate Updates + +Note: This is a feature for advanced users for pre-persistence, read-only validation. Other more advanced validation +can and should be done in the handler. + +Update validators provide a mechanism to perform read-only validation (i.e. not modify workflow state or schedule any commands). If +the update validator returns any error the update will fail and not be written into history. + + if err := workflow.SetUpdateHandlerWithOptions( + ctx, + FetchAndAdd, + func(ctx workflow.Context, i int) (int, error) { + tmp := counter + counter += i + return tmp, nil + }, + workflow.UpdateHandlerOptions{Validator: nonNegative}, + ); err != nil { + return 0, err + } + + func nonNegative(ctx workflow.Context, i int) error { + if i < 0 { + return fmt.Errorf("addend must be non-negative (%v)", i) + } + return nil + } + +For more information see our docs on [validator functions] # ContinueAsNew Workflow Completion @@ -331,7 +381,7 @@ logic of the workflow is inside the body of the for loop. The problem with this approach is that the history for that workflow will keep growing to a point where it reaches the maximum size enforced by the service. -ContinueAsNew is the low level construct that enables implementing such workflows without the risk of failures down the +[ContinueAsNew] is the low level construct that enables implementing such workflows without the risk of failures down the road. The operation atomically completes the current execution and starts a new execution of the workflow with the same workflow ID. The new execution will not carry over any history from the old execution. To trigger this behavior, the workflow function should terminate by returning the special ContinueAsNewError error: @@ -341,19 +391,19 @@ return workflow.NewContinueAsNewError(ctx, SimpleWorkflow, value) } -For a complete example implementing this pattern please refer to the Cron example. +For a complete example implementing this pattern please refer to our Cron example. # SideEffect API -workflow.SideEffect executes the provided function once, records its result into the workflow history, and doesn't +[workflow.SideEffect] executes the provided function once, records its result into the workflow history, and doesn't re-execute upon replay. Instead, it returns the recorded result. Use it only for short, nondeterministic code snippets, like getting a random value or generating a UUID. It can be seen as an "inline" activity. However, one thing to note -about workflow.SideEffect is that whereas for activities Temporal guarantees "at-most-once" execution, no such guarantee -exists for workflow.SideEffect. Under certain failure conditions, workflow.SideEffect can end up executing the function +about [workflow.SideEffect] is that whereas for activities Temporal guarantees "at-most-once" execution, no such guarantee +exists for [workflow.SideEffect]. Under certain failure conditions, [workflow.SideEffect] can end up executing the function more than once. -The only way to fail SideEffect is to panic, which causes workflow task failure. The workflow task after timeout is -rescheduled and re-executed giving SideEffect another chance to succeed. Be careful to not return any data from the +The only way to fail [SideEffect] is to panic, which causes workflow task failure. The workflow task after timeout is +rescheduled and re-executed giving [SideEffect] another chance to succeed. Be careful to not return any data from the SideEffect function any other way than through its recorded return value. encodedRandom := SideEffect(func(ctx workflow.Context) interface{} { @@ -378,7 +428,7 @@ The above cli command uses __stack_trace as the query type. The __stack_trace is a built-in query type that is supported by temporal client library. You can also add your own custom query types to support thing like query current state of the workflow, or query how many activities the workflow has completed. To do so, you need to setup your own -query handler using workflow.SetQueryHandler in your workflow code: +query handler using [workflow.SetQueryHandler] in your workflow code: func MyWorkflow(ctx workflow.Context, input string) error { currentState := "started" // this could be any serializable struct @@ -500,11 +550,11 @@ # Setup -First, we define a "test suite" struct that absorbs both the basic suite functionality from testify -http://godoc.org/github.com/stretchr/testify/suite via suite.Suite and the suite functionality from the Temporal test -framework via testsuite.WorkflowTestSuite. Since every test in this suite will test our workflow we add a property to +First, we define a "test suite" struct that absorbs both the basic suite functionality from [testify] +via suite.Suite and the suite functionality from the Temporal test +framework via [testsuite.WorkflowTestSuite]. Since every test in this suite will test our workflow we add a property to our struct to hold an instance of the test environment. This will allow us to initialize the test environment in a -setup method. For testing workflows we use a testsuite.TestWorkflowEnvironment. +setup method. For testing workflows we use a [testsuite.TestWorkflowEnvironment]. We then implement a SetupTest method to setup a new test environment before each test. Doing so ensure that each test runs in it's own isolated sandbox. We also implement an AfterTest function where we assert that all mocks we setup were @@ -591,5 +641,9 @@ NOTE: The default MaximumAttempts for retry policy set by server is 0 which means unlimited retries. However, during a unit test the default MaximumAttempts is 10 to avoid a test getting stuck. + +[testify]: http://godoc.org/github.com/stretchr/testify/suite +[handling updates]: https://docs.temporal.io/dev-guide/go/features#handle-update +[validator functions]: https://docs.temporal.io/dev-guide/go/features#validator-function */ package workflow diff -Nru temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/workflow.go temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/workflow.go --- temporal-1.21.5-1/src/vendor/go.temporal.io/sdk/workflow/workflow.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.temporal.io/sdk/workflow/workflow.go 2024-02-23 09:46:13.000000000 +0000 @@ -56,6 +56,11 @@ // Info information about currently executing workflow Info = internal.WorkflowInfo + // UpdateInfo information about a currently running update + // + // NOTE: Experimental + UpdateInfo = internal.UpdateInfo + // ContinueAsNewError can be returned by a workflow implementation function and indicates that // the workflow should continue as new with the same WorkflowID, but new RunID and new history. ContinueAsNewError = internal.ContinueAsNewError @@ -192,6 +197,10 @@ return internal.GetWorkflowInfo(ctx) } +func GetUpdateInfo(ctx Context) *UpdateInfo { + return internal.GetUpdateInfo(ctx) +} + // GetLogger returns a logger to be used in workflow's context func GetLogger(ctx Context) log.Logger { return internal.GetLogger(ctx) @@ -323,7 +332,7 @@ // // The backwards compatible way to execute the update is // -// v := GetVersion(ctx, "fooChange", DefaultVersion, 1) +// v := GetVersion(ctx, "fooChange", DefaultVersion, 0) // if v == DefaultVersion { // err = workflow.ExecuteActivity(ctx, foo).Get(ctx, nil) // } else { @@ -332,10 +341,10 @@ // // Then bar has to be changed to baz: // -// v := GetVersion(ctx, "fooChange", DefaultVersion, 2) +// v := GetVersion(ctx, "fooChange", DefaultVersion, 1) // if v == DefaultVersion { // err = workflow.ExecuteActivity(ctx, foo).Get(ctx, nil) -// } else if v == 1 { +// } else if v == 0 { // err = workflow.ExecuteActivity(ctx, bar).Get(ctx, nil) // } else { // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) @@ -343,8 +352,8 @@ // // Later when there are no workflow executions running DefaultVersion the correspondent branch can be removed: // -// v := GetVersion(ctx, "fooChange", 1, 2) -// if v == 1 { +// v := GetVersion(ctx, "fooChange", 0, 1) +// if v == 0 { // err = workflow.ExecuteActivity(ctx, bar).Get(ctx, nil) // } else { // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) @@ -352,12 +361,12 @@ // // It is recommended to keep the GetVersion() call even if single branch is left: // -// GetVersion(ctx, "fooChange", 2, 2) +// GetVersion(ctx, "fooChange", 1, 1) // err = workflow.ExecuteActivity(ctx, baz).Get(ctx, nil) // // The reason to keep it is: 1) it ensures that if there is older version execution still running, it will fail here // and not proceed; 2) if you ever need to make more changes for “fooChange”, for example change activity from baz to qux, -// you just need to update the maxVersion from 2 to 3. +// you just need to update the maxVersion from 1 to 2. // // Note that, you only need to preserve the first call to GetVersion() for each changeID. All subsequent call to GetVersion() // with same changeID are safe to remove. However, if you really want to get rid of the first GetVersion() call as well, @@ -365,7 +374,7 @@ // as changeID. If you ever need to make changes to that same part like change from baz to qux, you would need to use a // different changeID like “fooChange-fix2”, and start minVersion from DefaultVersion again. The code would looks like: // -// v := workflow.GetVersion(ctx, "fooChange-fix2", workflow.DefaultVersion, 1) +// v := workflow.GetVersion(ctx, "fooChange-fix2", workflow.DefaultVersion, 0) // if v == workflow.DefaultVersion { // err = workflow.ExecuteActivity(ctx, baz, data).Get(ctx, nil) // } else { @@ -382,7 +391,7 @@ // code. When client calls Client.QueryWorkflow() to temporal server, a task will be generated on server that will be dispatched // to a workflow worker, which will replay the history events and then execute a query handler based on the query type. // The query handler will be invoked out of the context of the workflow, meaning that the handler code must not use workflow -// context to do things like workflow.NewChannel(), workflow.Go() or to call any workflow blocking functions like +// context to do things like [workflow.NewChannel](), [workflow.Go]() or to call any workflow blocking functions like // Channel.Get() or Future.Get(). Trying to do so in query handler code will fail the query and client will receive // QueryFailedError. // Example of workflow code that support query type "current_state": @@ -431,7 +440,7 @@ // name such that update invocations specifying that name will invoke the // handler. The handler function can take as input any number of parameters so // long as they can be serialized/deserialized by the system. The handler can -// take a workflow.Context as its first parameter but this is not required. The +// take a [workflow.Context] as its first parameter but this is not required. The // update handler must return either a single error or a single serializable // object along with a single error. The update handler function is invoked in // the context of the workflow and thus is subject to the same restrictions as @@ -445,7 +454,7 @@ // the update request will be considered to have been rejected and as such will // not occupy any space in the workflow history. Validation functions must take // as inputs the same parameters as the associated update handler but my vary -// from said handler by the presence/absence of a workflow.Context as the first +// from said handler by the presence/absence of a [workflow.Context] as the first // parameter. Validation handlers must only return a single error. Validation // handlers must be deterministic and can observe workflow state but must not // mutate workflow state in any way. @@ -486,8 +495,8 @@ // Warning! Never make commands, like schedule activity/childWorkflow/timer or send/wait on future/channel, based on // this flag as it is going to break workflow determinism requirement. // The only reasonable use case for this flag is to avoid some external actions during replay, like custom logging or -// metric reporting. Please note that Temporal already provide standard logging/metric via workflow.GetLogger(ctx) and -// workflow.GetMetricsHandler(ctx), and those standard mechanism are replay-aware and it will automatically suppress +// metric reporting. Please note that Temporal already provide standard logging/metric via [workflow.GetLogger] and +// [workflow.GetMetricsHandler], and those standard mechanism are replay-aware and it will automatically suppress // during replay. Only use this flag if you need custom logging/metrics reporting, for example if you want to log to // kafka. // @@ -508,8 +517,8 @@ return internal.HasLastCompletionResult(ctx) } -// GetLastCompletionResult extract last completion result from the last successful run for this cron workflow. -// This is used in combination with cron schedule. A workflow can be started with an optional cron schedule. +// GetLastCompletionResult extract last completion result from the last successful run for this cron or schedule workflow. +// This is used in combination with cron schedule or schedule workflow. A workflow can be started with an optional cron schedule. // If a cron workflow wants to pass some data to next schedule, it can return any data and that data will become // available when next run starts. This will contain the last successful result even if the most recent run failed. // This GetLastCompletionResult() extract the data into expected data structure. @@ -523,7 +532,7 @@ } // GetLastError extracts the error from the last run of this workflow. If the last run of this workflow did not fail or -// this is the first run, this will be nil. +// this is the first run, this will be nil. This is used in combination with cron schedule or schedule workflow. // // See TestWorkflowEnvironment.SetLastError() for unit test support. func GetLastError(ctx Context) error { @@ -559,7 +568,9 @@ // "CustomKeywordField": "seattle", // } // -// This is only supported when using ElasticSearch. +// For supported operations on different server versions see [Visibility]. +// +// [Visibility]: https://docs.temporal.io/visibility func UpsertSearchAttributes(ctx Context, attributes map[string]interface{}) error { return internal.UpsertSearchAttributes(ctx, attributes) } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/CHANGELOG.md temporal-1.22.5/src/vendor/go.uber.org/atomic/CHANGELOG.md --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/CHANGELOG.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/CHANGELOG.md 2024-02-23 09:46:13.000000000 +0000 @@ -4,6 +4,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.11.0] - 2023-05-02 +### Fixed +- Fix initialization of `Value` wrappers. + +### Added +- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print +underlying values of pointers. + +[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 + ## [1.10.0] - 2022-08-11 ### Added - Add `atomic.Float32` type for atomic operations on `float32`. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/bool.go temporal-1.22.5/src/vendor/go.uber.org/atomic/bool.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/bool.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/bool.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/duration.go temporal-1.22.5/src/vendor/go.uber.org/atomic/duration.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/duration.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/duration.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/error.go temporal-1.22.5/src/vendor/go.uber.org/atomic/error.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/error.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/error.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -52,7 +52,17 @@ // CompareAndSwap is an atomic compare-and-swap for error values. func (x *Error) CompareAndSwap(old, new error) (swapped bool) { - return x.v.CompareAndSwap(packError(old), packError(new)) + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false } // Swap atomically stores the given error and returns the old diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/float32.go temporal-1.22.5/src/vendor/go.uber.org/atomic/float32.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/float32.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/float32.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/float64.go temporal-1.22.5/src/vendor/go.uber.org/atomic/float64.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/float64.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/float64.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/int32.go temporal-1.22.5/src/vendor/go.uber.org/atomic/int32.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/int32.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/int32.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/int64.go temporal-1.22.5/src/vendor/go.uber.org/atomic/int64.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/int64.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/int64.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/pointer_go118.go temporal-1.22.5/src/vendor/go.uber.org/atomic/pointer_go118.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/pointer_go118.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/pointer_go118.go 2024-02-23 09:46:13.000000000 +0000 @@ -18,43 +18,14 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -//go:build go1.18 && !go1.19 -// +build go1.18,!go1.19 +//go:build go1.18 +// +build go1.18 package atomic -import "unsafe" +import "fmt" -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p UnsafePointer -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(unsafe.Pointer(v)) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return (*T)(p.p.Load()) -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(unsafe.Pointer(val)) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return (*T)(p.p.Swap(unsafe.Pointer(val))) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +// String returns a human readable representation of a Pointer's underlying value. +func (p *Pointer[T]) String() string { + return fmt.Sprint(p.Load()) } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/pointer_go118_pre119.go temporal-1.22.5/src/vendor/go.uber.org/atomic/pointer_go118_pre119.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/pointer_go118_pre119.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/pointer_go118_pre119.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/string.go temporal-1.22.5/src/vendor/go.uber.org/atomic/string.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/string.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/string.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -42,24 +42,31 @@ // Load atomically loads the wrapped string. func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString + return unpackString(x.v.Load()) } // Store atomically stores the passed string. func (x *String) Store(val string) { - x.v.Store(val) + x.v.Store(packString(val)) } // CompareAndSwap is an atomic compare-and-swap for string values. func (x *String) CompareAndSwap(old, new string) (swapped bool) { - return x.v.CompareAndSwap(old, new) + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false } // Swap atomically stores the given string and returns the old // value. func (x *String) Swap(val string) (old string) { - return x.v.Swap(val).(string) + return unpackString(x.v.Swap(packString(val))) } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/string_ext.go temporal-1.22.5/src/vendor/go.uber.org/atomic/string_ext.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/string_ext.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/string_ext.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,18 @@ package atomic -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} // String returns the wrapped value. func (s *String) String() string { diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/time.go temporal-1.22.5/src/vendor/go.uber.org/atomic/time.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/time.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/time.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uint32.go temporal-1.22.5/src/vendor/go.uber.org/atomic/uint32.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uint32.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/uint32.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uint64.go temporal-1.22.5/src/vendor/go.uber.org/atomic/uint64.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uint64.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/uint64.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uintptr.go temporal-1.22.5/src/vendor/go.uber.org/atomic/uintptr.go --- temporal-1.21.5-1/src/vendor/go.uber.org/atomic/uintptr.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/atomic/uintptr.go 2024-02-23 09:46:13.000000000 +0000 @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/CHANGELOG.md temporal-1.22.5/src/vendor/go.uber.org/dig/CHANGELOG.md --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/CHANGELOG.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/CHANGELOG.md 2024-02-23 09:46:13.000000000 +0000 @@ -4,6 +4,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.17.0] - 2023-05-02 +### Added +- Allow using `dig.As` with `dig.Group`. +- Add `FillInvokeInfo` Option and `InvokeInfo` struct to help + extract the types requested by an `Invoke` statement. +- To get visibility into constructor and decorator calls, introduce + `WithCallback` Option to provide callback functions. + +[1.17.0]: https://github.com/uber-go/dig/compare/v1.16.1...v1.17.0 + ## [1.16.1] - 2023-01-10 ### Fixed - A panic when `DryRun` was used with `Decorate`. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/callback.go temporal-1.22.5/src/vendor/go.uber.org/dig/callback.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/callback.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/callback.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,108 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package dig + +// CallbackInfo contains information about a provided function or decorator +// called by Dig, and is passed to a [Callback] registered with +// [WithProviderCallback] or [WithDecoratorCallback]. +type CallbackInfo struct { + + // Name is the name of the function in the format: + // . + Name string + + // Error contains the error returned by the [Callback]'s associated + // function, if any. When used in conjunction with [RecoverFromPanics], + // this will be set to a [PanicError] when the function panics. + Error error +} + +// Callback is a function that can be registered with a provided function +// or decorator with [WithCallback] to cause it to be called after the +// provided function or decorator is run. +type Callback func(CallbackInfo) + +// WithProviderCallback returns a [ProvideOption] which has Dig call +// the passed in [Callback] after the corresponding constructor finishes running. +// +// For example, the following prints a completion message +// after "myConstructor" finishes, including the error if any: +// +// c := dig.New() +// myCallback := func(ci CallbackInfo) { +// var errorAdd string +// if ci.Error != nil { +// errorAdd = fmt.Sprintf("with error: %v", ci.Error) +// } +// fmt.Printf("%q finished%v", ci.Name, errorAdd) +// } +// c.Provide(myConstructor, WithProviderCallback(myCallback)), +// +// Callbacks can also be specified for Decorators with [WithDecoratorCallback]. +// +// See [CallbackInfo] for more info on the information passed to the [Callback]. +func WithProviderCallback(callback Callback) ProvideOption { + return withCallbackOption{ + callback: callback, + } +} + +// WithDecoratorCallback returns a [DecorateOption] which has Dig call +// the passed in [Callback] after the corresponding decorator finishes running. +// +// For example, the following prints a completion message +// after "myDecorator" finishes, including the error if any: +// +// c := dig.New() +// myCallback := func(ci CallbackInfo) { +// var errorAdd string +// if ci.Error != nil { +// errorAdd = fmt.Sprintf("with error: %v", ci.Error) +// } +// fmt.Printf("%q finished%v", ci.Name, errorAdd) +// } +// c.Decorate(myDecorator, WithDecoratorCallback(myCallback)), +// +// Callbacks can also be specified for Constructors with [WithProviderCallback]. +// +// See [CallbackInfo] for more info on the information passed to the [Callback]. +func WithDecoratorCallback(callback Callback) DecorateOption { + return withCallbackOption{ + callback: callback, + } +} + +type withCallbackOption struct { + callback Callback +} + +var ( + _ ProvideOption = withCallbackOption{} + _ DecorateOption = withCallbackOption{} +) + +func (o withCallbackOption) applyProvideOption(po *provideOptions) { + po.Callback = o.callback +} + +func (o withCallbackOption) apply(do *decorateOptions) { + do.Callback = o.callback +} diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/constructor.go temporal-1.22.5/src/vendor/go.uber.org/dig/constructor.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/constructor.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/constructor.go 2024-02-23 09:46:13.000000000 +0000 @@ -54,15 +54,18 @@ // Type information about constructor results. resultList resultList - // order of this node in each Scopes' graphHolders. + // Order of this node in each Scopes' graphHolders. orders map[*Scope]int - // scope this node is part of + // Scope this node is part of. s *Scope - // scope this node was originally provided to. + // Scope this node was originally provided to. // This is different from s if and only if the constructor was Provided with ExportOption. origS *Scope + + // Callback for this provided function, if there is one. + callback Callback } type constructorOptions struct { @@ -72,6 +75,7 @@ ResultGroup string ResultAs []interface{} Location *digreflect.Func + Callback Callback } func newConstructorNode(ctor interface{}, s *Scope, origS *Scope, opts constructorOptions) (*constructorNode, error) { @@ -111,6 +115,7 @@ orders: make(map[*Scope]int), s: s, origS: origS, + callback: opts.Callback, } s.newGraphNode(n, n.orders) return n, nil @@ -142,6 +147,24 @@ } } + args, err := n.paramList.BuildList(c) + if err != nil { + return errArgumentsFailed{ + Func: n.location, + Reason: err, + } + } + + if n.callback != nil { + // Wrap in separate func to include PanicErrors + defer func() { + n.callback(CallbackInfo{ + Name: fmt.Sprintf("%v.%v", n.location.Package, n.location.Name), + Error: err, + }) + }() + } + if n.s.recoverFromPanics { defer func() { if p := recover(); p != nil { @@ -153,17 +176,9 @@ }() } - args, err := n.paramList.BuildList(c) - if err != nil { - return errArgumentsFailed{ - Func: n.location, - Reason: err, - } - } - receiver := newStagingContainerWriter() results := c.invoker()(reflect.ValueOf(n.ctor), args) - if err := n.resultList.ExtractList(receiver, false /* decorating */, results); err != nil { + if err = n.resultList.ExtractList(receiver, false /* decorating */, results); err != nil { return errConstructorFailed{Func: n.location, Reason: err} } @@ -173,7 +188,6 @@ // container. receiver.Commit(n.s) n.called = true - return nil } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/decorate.go temporal-1.22.5/src/vendor/go.uber.org/dig/decorate.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/decorate.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/decorate.go 2024-02-23 09:46:13.000000000 +0000 @@ -60,14 +60,17 @@ // Results of the decorator. results resultList - // order of this node in each Scopes' graphHolders. + // Order of this node in each Scopes' graphHolders. orders map[*Scope]int - // scope this node was originally provided to. + // Scope this node was originally provided to. s *Scope + + // Callback for this decorator, if there is one. + callback Callback } -func newDecoratorNode(dcor interface{}, s *Scope) (*decoratorNode, error) { +func newDecoratorNode(dcor interface{}, s *Scope, opts decorateOptions) (*decoratorNode, error) { dval := reflect.ValueOf(dcor) dtype := dval.Type() dptr := dval.Pointer() @@ -91,6 +94,7 @@ params: pl, results: rl, s: s, + callback: opts.Callback, } return n, nil } @@ -109,6 +113,24 @@ } } + args, err := n.params.BuildList(n.s) + if err != nil { + return errArgumentsFailed{ + Func: n.location, + Reason: err, + } + } + + if n.callback != nil { + // Wrap in separate func to include PanicErrors + defer func() { + n.callback(CallbackInfo{ + Name: fmt.Sprintf("%v.%v", n.location.Package, n.location.Name), + Error: err, + }) + }() + } + if n.s.recoverFromPanics { defer func() { if p := recover(); p != nil { @@ -120,16 +142,8 @@ }() } - args, err := n.params.BuildList(n.s) - if err != nil { - return errArgumentsFailed{ - Func: n.location, - Reason: err, - } - } - results := s.invoker()(reflect.ValueOf(n.dcor), args) - if err := n.results.ExtractList(n.s, true /* decorated */, results); err != nil { + if err = n.results.ExtractList(n.s, true /* decorated */, results); err != nil { return err } n.state = decoratorCalled @@ -146,7 +160,8 @@ } type decorateOptions struct { - Info *DecorateInfo + Info *DecorateInfo + Callback Callback } // FillDecorateInfo is a DecorateOption that writes info on what Dig was @@ -223,7 +238,7 @@ opt.apply(&options) } - dn, err := newDecoratorNode(decorator, s) + dn, err := newDecoratorNode(decorator, s, options) if err != nil { return err } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/invoke.go temporal-1.22.5/src/vendor/go.uber.org/dig/invoke.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/invoke.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/invoke.go 2024-02-23 09:46:13.000000000 +0000 @@ -22,16 +22,48 @@ import ( "fmt" - "reflect" - "go.uber.org/dig/internal/digreflect" "go.uber.org/dig/internal/graph" + "reflect" ) -// An InvokeOption modifies the default behavior of Invoke. It's included for -// future functionality; currently, there are no concrete implementations. +// An InvokeOption modifies the default behavior of Invoke. type InvokeOption interface { - unimplemented() + applyInvokeOption(*invokeOptions) +} + +type invokeOptions struct { + Info *InvokeInfo +} + +// InvokeInfo provides information about an Invoke. +type InvokeInfo struct { + Inputs []*Input +} + +// FillInvokeInfo is an InvokeOption that writes information on the types +// accepted by the Invoke function into the specified InvokeInfo. +// For example: +// +// var info dig.InvokeInfo +// err := c.Invoke(func(string, int){}, dig.FillInvokeInfo(&info)) +// +// info.Inputs[0].String() will be string. +// info.Inputs[1].String() will be int. +func FillInvokeInfo(info *InvokeInfo) InvokeOption { + return fillInvokeInfoOption{info: info} +} + +type fillInvokeInfoOption struct { + info *InvokeInfo +} + +func (o fillInvokeInfoOption) String() string { + return fmt.Sprintf("FillInvokeInfo(%p)", o.info) +} + +func (o fillInvokeInfoOption) applyInvokeOption(opts *invokeOptions) { + opts.Info = o.info } // Invoke runs the given function after instantiating its dependencies. @@ -105,6 +137,26 @@ }() } + var options invokeOptions + for _, o := range opts { + o.applyInvokeOption(&options) + } + + // Record info for the invoke if requested + if info := options.Info; info != nil { + params := pl.DotParam() + info.Inputs = make([]*Input, len(params)) + for i, p := range params { + info.Inputs[i] = &Input{ + t: p.Type, + optional: p.Optional, + name: p.Name, + group: p.Group, + } + } + + } + returned := s.invokerFn(reflect.ValueOf(function), args) if len(returned) == 0 { return nil diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/provide.go temporal-1.22.5/src/vendor/go.uber.org/dig/provide.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/provide.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/provide.go 2024-02-23 09:46:13.000000000 +0000 @@ -43,6 +43,7 @@ As []interface{} Location *digreflect.Func Exported bool + Callback Callback } func (o *provideOptions) Validate() error { @@ -51,10 +52,6 @@ return newErrInvalidInput( fmt.Sprintf("cannot use named values with value groups: name:%q provided with group:%q", o.Name, o.Group), nil) } - if len(o.As) > 0 { - return newErrInvalidInput( - fmt.Sprintf("cannot use dig.As with value groups: dig.As provided with group:%q", o.Group), nil) - } } // Names must be representable inside a backquoted string. The only @@ -471,6 +468,7 @@ ResultGroup: opts.Group, ResultAs: opts.As, Location: opts.Location, + Callback: opts.Callback, }, ) if err != nil { @@ -639,6 +637,10 @@ // value there. k := key{group: r.Group, t: r.Type} cv.keyPaths[k] = path + for _, asType := range r.As { + k := key{group: r.Group, t: asType} + cv.keyPaths[k] = path + } } return cv diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/result.go temporal-1.22.5/src/vendor/go.uber.org/dig/result.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/result.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/result.go 2024-02-23 09:46:13.000000000 +0000 @@ -88,6 +88,24 @@ fmt.Sprintf("cannot parse group %q", opts.Group), err) } rg := resultGrouped{Type: t, Group: g.Name, Flatten: g.Flatten} + if len(opts.As) > 0 { + var asTypes []reflect.Type + for _, as := range opts.As { + ifaceType := reflect.TypeOf(as).Elem() + if ifaceType == t { + continue + } + if !t.Implements(ifaceType) { + return nil, newErrInvalidInput( + fmt.Sprintf("invalid dig.As: %v does not implement %v", t, ifaceType), nil) + } + asTypes = append(asTypes, ifaceType) + } + if len(asTypes) > 0 { + rg.Type = asTypes[0] + rg.As = asTypes[1:] + } + } if g.Soft { return nil, newErrInvalidInput(fmt.Sprintf( "cannot use soft with result value groups: soft was used with group:%q", g.Name), nil) @@ -441,17 +459,27 @@ // as a group. Requires the value's slice to be a group. If set, Type will be // the type of individual elements rather than the group. Flatten bool + + // If specified, this is a list of types which the value will be made + // available as, in addition to its own type. + As []reflect.Type } func (rt resultGrouped) DotResult() []*dot.Result { - return []*dot.Result{ - { - Node: &dot.Node{ - Type: rt.Type, - Group: rt.Group, - }, + dotResults := make([]*dot.Result, 0, len(rt.As)+1) + dotResults = append(dotResults, &dot.Result{ + Node: &dot.Node{ + Type: rt.Type, + Group: rt.Group, }, + }) + + for _, asType := range rt.As { + dotResults = append(dotResults, &dot.Result{ + Node: &dot.Node{Type: asType, Group: rt.Group}, + }) } + return dotResults } // newResultGrouped(f) builds a new resultGrouped from the provided field. @@ -491,6 +519,9 @@ // Decorated values are always flattened. if !decorated && !rt.Flatten { cw.submitGroupedValue(rt.Group, rt.Type, v) + for _, asType := range rt.As { + cw.submitGroupedValue(rt.Group, asType, v) + } return } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/dig/version.go temporal-1.22.5/src/vendor/go.uber.org/dig/version.go --- temporal-1.21.5-1/src/vendor/go.uber.org/dig/version.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/dig/version.go 2024-02-23 09:46:13.000000000 +0000 @@ -21,4 +21,4 @@ package dig // Version of the library. -const Version = "1.16.1" +const Version = "1.17.0" diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/.codecov.yml temporal-1.22.5/src/vendor/go.uber.org/fx/.codecov.yml --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/.codecov.yml 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/.codecov.yml 2024-02-23 09:46:13.000000000 +0000 @@ -1,5 +1,6 @@ ignore: - "docs/ex/**/*.go" + - "internal/e2e/**/*.go" coverage: range: 80..100 diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/CHANGELOG.md temporal-1.22.5/src/vendor/go.uber.org/fx/CHANGELOG.md --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/CHANGELOG.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/CHANGELOG.md 2024-02-23 09:46:13.000000000 +0000 @@ -7,10 +7,41 @@ All notable changes to this project will be documented in this file. -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.20.0](https://github.com/uber-go/fx/compare/v1.19.3...v1.20.0) - 2023-06-12 + +### Added +- A new event `fxevent.Run` is now emitted when Fx runs a constructor, decorator, + or supply/replace stub. + +### Changed +- `fx.Populate` now works with `fx.Annotate`. +- Upgrade Dig dependency to v1.17.0. + +## [1.19.3](https://github.com/uber-go/fx/compare/v1.19.2...v1.19.3) - 2023-04-17 + +### Changed +- Fixed several typos in docs. +- WASM build support. +- Annotating In and Out structs with From/As annotations generated invalid results. + The annotation check now blocks this. +- `Shutdown`: Support calling from `Invoke`. + +### Deprecated +- Deprecate `ShutdownTimeout` option. + +### Fixed +- Respect Shutdowner ExitCode from calling `Run`. + +## [1.19.2](https://github.com/uber-go/fx/compare/v1.19.1...v1.19.2) - 2023-02-21 + +### Changed +- Update Dig dependency to v1.16.1. + +## [1.19.1](https://github.com/uber-go/fx/compare/v1.19.0...v1.19.1) - 2023-01-10 -## [1.19.1](https://github.com/uber-go/fx/compare/v1.18.0...v1.19.1) - 2023-01-10 ### Changed - Calling `fx.Stop()` after the `App` has already stopped no longer errors out. @@ -19,6 +50,7 @@ after running for startTimeout duration. ## [1.19.0](https://github.com/uber-go/fx/compare/v1.18.2...v1.19.0) - 2023-01-03 + ### Added - `fx.RecoverFromPanics` Option which allows Fx to recover from user-provided constructors and invoked functions. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/CONTRIBUTING.md temporal-1.22.5/src/vendor/go.uber.org/fx/CONTRIBUTING.md --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/CONTRIBUTING.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/CONTRIBUTING.md 2024-02-23 09:46:13.000000000 +0000 @@ -104,7 +104,7 @@ The review process will go more smoothly if you: - add tests for new functionality -- write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) +- write a [good commit message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) - maintain backward compatibility - follow our [style guide](https://github.com/uber-go/guide/blob/master/style.md) diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/Makefile temporal-1.22.5/src/vendor/go.uber.org/fx/Makefile --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/Makefile 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/Makefile 2024-02-23 09:46:13.000000000 +0000 @@ -9,7 +9,7 @@ find . '(' -path '*/.*' -o -path './vendor' -o -path '*/testdata/*' ')' -prune \ -o -name '*.go' -print | cut -b3-) -MODULES = . ./tools ./docs +MODULES = . ./tools ./docs ./internal/e2e # 'make cover' should not run on docs by default. # We run that separately explicitly on a specific platform. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/README.md temporal-1.22.5/src/vendor/go.uber.org/fx/README.md --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/README.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/README.md 2024-02-23 09:46:13.000000000 +0000 @@ -27,7 +27,7 @@ ## Stability -This library is `v1` and follows [SemVer](http://semver.org/) strictly. +This library is `v1` and follows [SemVer](https://semver.org/) strictly. No breaking changes will be made to exported APIs before `v2.0.0`. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/annotated.go temporal-1.22.5/src/vendor/go.uber.org/fx/annotated.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/annotated.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/annotated.go 2024-02-23 09:46:13.000000000 +0000 @@ -134,11 +134,85 @@ return e.err.Error() } +// Unwrap the wrapped error. +func (e *annotationError) Unwrap() error { + return e.err +} + type paramTagsAnnotation struct { tags []string } var _ Annotation = paramTagsAnnotation{} +var ( + errTagSyntaxSpace = errors.New(`multiple tags are not separated by space`) + errTagKeySyntax = errors.New("tag key is invalid, Use group, name or optional as tag keys") + errTagValueSyntaxQuote = errors.New(`tag value should start with double quote. i.e. key:"value" `) + errTagValueSyntaxEndingQuote = errors.New(`tag value should end in double quote. i.e. key:"value" `) +) + +// Collections of key value pairs within a tag should be separated by a space. +// Eg: `group:"some" optional:"true"`. +func verifyTagsSpaceSeparated(tagIdx int, tag string) error { + if tagIdx > 0 && tag != "" && tag[0] != ' ' { + return errTagSyntaxSpace + } + return nil +} + +// verify tag values are delimited with double quotes. +func verifyValueQuote(value string) (string, error) { + // starting quote should be a double quote + if value[0] != '"' { + return "", errTagValueSyntaxQuote + } + // validate tag value is within quotes + i := 1 + for i < len(value) && value[i] != '"' { + if value[i] == '\\' { + i++ + } + i++ + } + if i >= len(value) { + return "", errTagValueSyntaxEndingQuote + } + return value[i+1:], nil + +} + +// Check whether the tag follows valid struct. +// format and returns an error if it's invalid. (i.e. not following +// tag:"value" space-separated list ) +// Currently dig accepts only 'name', 'group', 'optional' as valid tag keys. +func verifyAnnotateTag(tag string) error { + tagIdx := 0 + validKeys := map[string]struct{}{"group": {}, "optional": {}, "name": {}} + for ; tag != ""; tagIdx++ { + if err := verifyTagsSpaceSeparated(tagIdx, tag); err != nil { + return err + } + i := 0 + if strings.TrimSpace(tag) == "" { + return nil + } + // parsing the key i.e. till reaching colon : + for i < len(tag) && tag[i] != ':' { + i++ + } + key := strings.TrimSpace(tag[:i]) + if _, ok := validKeys[key]; !ok { + return errTagKeySyntax + } + value, err := verifyValueQuote(tag[i+1:]) + if err != nil { + return err + } + tag = value + } + return nil + +} // Given func(T1, T2, T3, ..., TN), this generates a type roughly // equivalent to, @@ -154,11 +228,19 @@ // // If there has already been a ParamTag that was applied, this // will return an error. +// +// If the tag is invalid and has mismatched quotation for example, +// (`tag_name:"tag_value') , this will return an error. func (pt paramTagsAnnotation) apply(ann *annotated) error { if len(ann.ParamTags) > 0 { return errors.New("cannot apply more than one line of ParamTags") } + for _, tag := range pt.tags { + if err := verifyAnnotateTag(tag); err != nil { + return err + } + } ann.ParamTags = pt.tags return nil } @@ -256,6 +338,9 @@ // ParamTags is an Annotation that annotates the parameter(s) of a function. // When multiple tags are specified, each tag is mapped to the corresponding // positional parameter. +// +// ParamTags cannot be used in a function that takes an fx.In struct as a +// parameter. func ParamTags(tags ...string) Annotation { return paramTagsAnnotation{tags} } @@ -280,10 +365,18 @@ // // If there has already been a ResultTag that was applied, this // will return an error. +// +// If the tag is invalid and has mismatched quotation for example, +// (`tag_name:"tag_value') , this will return an error. func (rt resultTagsAnnotation) apply(ann *annotated) error { if len(ann.ResultTags) > 0 { return errors.New("cannot apply more than one line of ResultTags") } + for _, tag := range rt.tags { + if err := verifyAnnotateTag(tag); err != nil { + return err + } + } ann.ResultTags = rt.tags return nil } @@ -430,6 +523,8 @@ // ResultTags is an Annotation that annotates the result(s) of a function. // When multiple tags are specified, each tag is mapped to the corresponding // positional result. +// +// ResultTags cannot be used on a function that returns an fx.Out struct. func ResultTags(tags ...string) Annotation { return resultTagsAnnotation{tags} } @@ -554,7 +649,7 @@ ) // buildHookInstaller returns a function that appends a hook to Lifecycle when called, -// along with the new paramter types and a function that maps arguments to the annotated constructor +// along with the new parameter types and a function that maps arguments to the annotated constructor func (la *lifecycleHookAnnotation) buildHookInstaller(ann *annotated) ( hookInstaller reflect.Value, paramTypes []reflect.Type, @@ -802,7 +897,7 @@ return args } } - // If params are tagged or there's an untagged variadic arguement, + // If params are tagged or there's an untagged variadic argument, // add a Lifecycle field to the param struct if len(paramTypes) > 0 && isIn(paramTypes[0]) { taggedParam := paramTypes[0] @@ -911,8 +1006,8 @@ // } // // Only one OnStart annotation may be applied to a given function at a time, -// however functions may be annotated with other types of lifecylce Hooks, such -// as OnStart. The hook function passed into OnStart cannot take any arguments +// however functions may be annotated with other types of lifecycle Hooks, such +// as OnStop. The hook function passed into OnStart cannot take any arguments // outside of the annotated constructor's existing dependencies or results, except // a context.Context. func OnStart(onStart interface{}) Annotation { @@ -975,8 +1070,8 @@ // } // // Only one OnStop annotation may be applied to a given function at a time, -// however functions may be annotated with other types of lifecylce Hooks, such -// as OnStop. The hook function passed into OnStop cannot take any arguments +// however functions may be annotated with other types of lifecycle Hooks, such +// as OnStart. The hook function passed into OnStop cannot take any arguments // outside of the annotated constructor's existing dependencies or results, except // a context.Context. func OnStop(onStop interface{}) Annotation { @@ -1042,6 +1137,8 @@ // w, r := a() // return w, r // } +// +// As annotation cannot be used in a function that returns an [Out] struct as a return type. func As(interfaces ...interface{}) Annotation { return &asAnnotation{targets: interfaces} } @@ -1227,6 +1324,9 @@ // fx.Provide(func(r1 *FooRunner, r2 *BarRunner) *RunnerWraps { // return NewRunnerWraps(r1, r2) // }) +// +// From annotation cannot be used in a function that takes an [In] struct as a +// parameter. func From(interfaces ...interface{}) Annotation { return &fromAnnotation{targets: interfaces} } @@ -1496,8 +1596,8 @@ } // checks and returns a non-nil error if the target function: -// - returns an fx.Out struct as a result. -// - takes in an fx.In struct as a parameter. +// - returns an fx.Out struct as a result and has either a ResultTags or an As annotation +// - takes in an fx.In struct as a parameter and has either a ParamTags or a From annotation // - has an error result not as the last result. func (ann *annotated) typeCheckOrigFn() error { ft := reflect.TypeOf(ann.Target) @@ -1513,18 +1613,23 @@ if ot.Kind() != reflect.Struct { continue } - if dig.IsOut(reflect.New(ft.Out(i)).Elem().Interface()) { - return errors.New("fx.Out structs cannot be annotated") + if !dig.IsOut(reflect.New(ft.Out(i)).Elem().Interface()) { + continue + } + if len(ann.ResultTags) > 0 || len(ann.As) > 0 { + return errors.New("fx.Out structs cannot be annotated with fx.ResultTags or fx.As") } } - for i := 0; i < ft.NumIn(); i++ { it := ft.In(i) if it.Kind() != reflect.Struct { continue } - if dig.IsIn(reflect.New(ft.In(i)).Elem().Interface()) { - return errors.New("fx.In structs cannot be annotated") + if !dig.IsIn(reflect.New(ft.In(i)).Elem().Interface()) { + continue + } + if len(ann.ParamTags) > 0 || len(ann.From) > 0 { + return errors.New("fx.In structs cannot be annotated with fx.ParamTags or fx.From") } } return nil @@ -1587,9 +1692,6 @@ // return result{GW: NewGateway(p.RO, p.RW)} // }) // -// Annotate cannot be used on functions that takes in or returns -// [In] or [Out] structs. -// // Using the same annotation multiple times is invalid. // For example, the following will fail with an error: // diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/app.go temporal-1.22.5/src/vendor/go.uber.org/fx/app.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/app.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/app.go 2024-02-23 09:46:13.000000000 +0000 @@ -482,7 +482,7 @@ // Run decorators before executing any Invokes -- including the one // inside constructCustomLogger. - app.err = multierr.Append(app.err, app.root.decorate()) + app.err = multierr.Append(app.err, app.root.decorateAll()) // If you are thinking about returning here after provides: do not (just yet)! // If a custom logger was being used, we're still buffering messages. @@ -574,12 +574,12 @@ // Historically, we do not os.Exit(0) even though most applications // cede control to Fx with they call app.Run. To avoid a breaking // change, never os.Exit for success. - if code := app.run(app.Done()); code != 0 { + if code := app.run(app.Wait); code != 0 { app.exit(code) } } -func (app *App) run(done <-chan os.Signal) (exitCode int) { +func (app *App) run(done func() <-chan ShutdownSignal) (exitCode int) { startCtx, cancel := app.clock.WithTimeout(context.Background(), app.StartTimeout()) defer cancel() @@ -587,8 +587,9 @@ return 1 } - sig := <-done - app.log().LogEvent(&fxevent.Stopping{Signal: sig}) + sig := <-done() + app.log().LogEvent(&fxevent.Stopping{Signal: sig.Signal}) + exitCode = sig.ExitCode stopCtx, cancel := app.clock.WithTimeout(context.Background(), app.StopTimeout()) defer cancel() @@ -597,7 +598,7 @@ return 1 } - return 0 + return exitCode } // Err returns any error encountered during New's initialization. See the @@ -762,7 +763,7 @@ } // errHookCallbackExited is returned when a hook callback does not finish executing -var errHookCallbackExited = fmt.Errorf("goroutine exited without returning") +var errHookCallbackExited = errors.New("goroutine exited without returning") func withTimeout(ctx context.Context, param *withTimeoutParams) error { c := make(chan error, 1) diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/app_wasm.go temporal-1.22.5/src/vendor/go.uber.org/fx/app_wasm.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/app_wasm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/app_wasm.go 2024-02-23 09:46:13.000000000 +0000 @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build js && wasm +// +build js,wasm + +package fx + +import "syscall" + +const _sigINT = syscall.SIGINT +const _sigTERM = syscall.SIGTERM diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/decorate.go temporal-1.22.5/src/vendor/go.uber.org/fx/decorate.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/decorate.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/decorate.go 2024-02-23 09:46:13.000000000 +0000 @@ -22,6 +22,7 @@ import ( "fmt" + "reflect" "strings" "go.uber.org/dig" @@ -207,7 +208,8 @@ Stack fxreflect.Stack // Whether this decorator was specified via fx.Replace - IsReplace bool + IsReplace bool + ReplaceType reflect.Type // set only if IsReplace } func runDecorator(c container, d decorator, opts ...dig.DecorateOption) (err error) { diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/console.go temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/console.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/console.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/console.go 2024-02-23 09:46:14.000000000 +0000 @@ -82,7 +82,6 @@ if e.Err != nil { l.logf("Error after options were applied: %+v", e.Err) } - case *Replaced: for _, rtype := range e.OutputTypeNames { if e.ModuleName != "" { @@ -105,6 +104,16 @@ if e.Err != nil { l.logf("Error after options were applied: %+v", e.Err) } + case *Run: + var moduleStr string + if e.ModuleName != "" { + moduleStr = fmt.Sprintf(" from module %q", e.ModuleName) + } + l.logf("RUN\t%v: %v%v", e.Kind, e.Name, moduleStr) + if e.Err != nil { + l.logf("Error returned: %+v", e.Err) + } + case *Invoking: if e.ModuleName != "" { l.logf("INVOKE\t\t%s from module %q", e.FunctionName, e.ModuleName) diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/event.go temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/event.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/event.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/event.go 2024-02-23 09:46:14.000000000 +0000 @@ -39,6 +39,7 @@ func (*Provided) event() {} func (*Replaced) event() {} func (*Decorated) event() {} +func (*Run) event() {} func (*Invoking) event() {} func (*Invoked) event() {} func (*Stopping) event() {} @@ -48,7 +49,7 @@ func (*Started) event() {} func (*LoggerInitialized) event() {} -// OnStartExecuting is emitted before an OnStart hook is exeucted. +// OnStartExecuting is emitted before an OnStart hook is executed. type OnStartExecuting struct { // FunctionName is the name of the function that will be executed. FunctionName string @@ -78,7 +79,7 @@ Err error } -// OnStopExecuting is emitted before an OnStop hook is exeucted. +// OnStopExecuting is emitted before an OnStop hook is executed. type OnStopExecuting struct { // FunctionName is the name of the function that will be executed. FunctionName string @@ -109,6 +110,9 @@ // TypeName is the name of the type of value that was added. TypeName string + // StackTrace is the stack trace of the call to Supply. + StackTrace []string + // ModuleName is the name of the module in which the value was added to. ModuleName string @@ -122,6 +126,9 @@ // Fx. ConstructorName string + // StackTrace is the stack trace of where the constructor was provided to Fx. + StackTrace []string + // OutputTypeNames is a list of names of types that are produced by // this constructor. OutputTypeNames []string @@ -142,6 +149,9 @@ // OutputTypeNames is a list of names of types that were replaced. OutputTypeNames []string + // StackTrace is the stack trace of the call to Replace. + StackTrace []string + // ModuleName is the name of the module in which the value was added to. ModuleName string @@ -155,6 +165,9 @@ // provided to Fx. DecoratorName string + // StackTrace is the stack trace of where the decorator was given to Fx. + StackTrace []string + // ModuleName is the name of the module in which the value was added to. ModuleName string @@ -166,6 +179,23 @@ Err error } +// Run is emitted after a constructor, decorator, or supply/replace stub is run by Fx. +type Run struct { + // Name is the name of the function that was run. + Name string + + // Kind indicates which Fx option was used to pass along the function. + // It is either "provide", "decorate", "supply", or "replace". + Kind string + + // ModuleName is the name of the module in which the function belongs. + ModuleName string + + // Err is non-nil if the function returned an error. + // If fx.RecoverFromPanics is used, this will include panics. + Err error +} + // Invoking is emitted before we invoke a function specified with fx.Invoke. type Invoking struct { // FunctionName is the name of the function that will be invoked. diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/zap.go temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/zap.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/fxevent/zap.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/fxevent/zap.go 2024-02-23 09:46:14.000000000 +0000 @@ -104,11 +104,13 @@ if e.Err != nil { l.logError("error encountered while applying options", zap.String("type", e.TypeName), + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.Error(e.Err)) } else { l.logEvent("supplied", zap.String("type", e.TypeName), + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), ) } @@ -116,6 +118,7 @@ for _, rtype := range e.OutputTypeNames { l.logEvent("provided", zap.String("constructor", e.ConstructorName), + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.String("type", rtype), maybeBool("private", e.Private), @@ -124,17 +127,20 @@ if e.Err != nil { l.logError("error encountered while applying options", moduleField(e.ModuleName), + zap.Strings("stacktrace", e.StackTrace), zap.Error(e.Err)) } case *Replaced: for _, rtype := range e.OutputTypeNames { l.logEvent("replaced", + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.String("type", rtype), ) } if e.Err != nil { l.logError("error encountered while replacing", + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.Error(e.Err)) } @@ -142,15 +148,32 @@ for _, rtype := range e.OutputTypeNames { l.logEvent("decorated", zap.String("decorator", e.DecoratorName), + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.String("type", rtype), ) } if e.Err != nil { l.logError("error encountered while applying options", + zap.Strings("stacktrace", e.StackTrace), moduleField(e.ModuleName), zap.Error(e.Err)) } + case *Run: + if e.Err != nil { + l.logError("error returned", + zap.String("name", e.Name), + zap.String("kind", e.Kind), + moduleField(e.ModuleName), + zap.Error(e.Err), + ) + } else { + l.logEvent("run", + zap.String("name", e.Name), + zap.String("kind", e.Kind), + moduleField(e.ModuleName), + ) + } case *Invoking: // Do not log stack as it will make logs hard to read. l.logEvent("invoking", diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/internal/fxreflect/stack.go temporal-1.22.5/src/vendor/go.uber.org/fx/internal/fxreflect/stack.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/internal/fxreflect/stack.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/internal/fxreflect/stack.go 2024-02-23 09:46:14.000000000 +0000 @@ -85,14 +85,23 @@ // bar/baz/qux.go:12 type Stack []Frame -// Returns a single-line, semi-colon representation of a Stack. For a -// multi-line representation, use %+v. +// String returns a single-line, semi-colon representation of a Stack. +// For a list of strings where each represents one frame, use Strings. +// For a cleaner multi-line representation, use %+v. func (fs Stack) String() string { + return strings.Join(fs.Strings(), "; ") +} + +// Strings returns a list of strings, each representing a frame in the stack. +// Each line will be in the form, +// +// foo/bar.Baz() (path/to/foo.go:42) +func (fs Stack) Strings() []string { items := make([]string, len(fs)) for i, f := range fs { items[i] = f.String() } - return strings.Join(items, "; ") + return items } // Format implements fmt.Formatter to handle "%+v". diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go temporal-1.22.5/src/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go 2024-02-23 09:46:14.000000000 +0000 @@ -265,7 +265,7 @@ } l.mu.Lock() - if l.state != started && l.state != incompleteStart { + if l.state != started && l.state != incompleteStart && l.state != starting { defer l.mu.Unlock() return nil } @@ -280,15 +280,18 @@ l.mu.Lock() l.stopRecords = make(HookRecords, 0, l.numStarted) + // Take a snapshot of hook state to avoid races. + allHooks := l.hooks[:] + numStarted := l.numStarted l.mu.Unlock() // Run backward from last successful OnStart. var errs []error - for ; l.numStarted > 0; l.numStarted-- { + for ; numStarted > 0; numStarted-- { if err := ctx.Err(); err != nil { return err } - hook := l.hooks[l.numStarted-1] + hook := allHooks[numStarted-1] if hook.OnStop == nil { continue } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/module.go temporal-1.22.5/src/vendor/go.uber.org/fx/module.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/module.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/module.go 2024-02-23 09:46:13.000000000 +0000 @@ -146,34 +146,67 @@ return } + if p.IsSupply { + m.supply(p) + return + } + + funcName := fxreflect.FuncName(p.Target) var info dig.ProvideInfo - if err := runProvide(m.scope, p, dig.FillProvideInfo(&info), dig.Export(!p.Private)); err != nil { + opts := []dig.ProvideOption{ + dig.FillProvideInfo(&info), + dig.Export(!p.Private), + dig.WithProviderCallback(func(ci dig.CallbackInfo) { + m.log.LogEvent(&fxevent.Run{ + Name: funcName, + Kind: "provide", + ModuleName: m.name, + Err: ci.Error, + }) + }), + } + + if err := runProvide(m.scope, p, opts...); err != nil { m.app.err = err } - var ev fxevent.Event - switch { - case p.IsSupply: - ev = &fxevent.Supplied{ - TypeName: p.SupplyType.String(), - ModuleName: m.name, - Err: m.app.err, - } + outputNames := make([]string, len(info.Outputs)) + for i, o := range info.Outputs { + outputNames[i] = o.String() + } - default: - outputNames := make([]string, len(info.Outputs)) - for i, o := range info.Outputs { - outputNames[i] = o.String() - } + m.log.LogEvent(&fxevent.Provided{ + ConstructorName: funcName, + StackTrace: p.Stack.Strings(), + ModuleName: m.name, + OutputTypeNames: outputNames, + Err: m.app.err, + Private: p.Private, + }) +} - ev = &fxevent.Provided{ - ConstructorName: fxreflect.FuncName(p.Target), - ModuleName: m.name, - OutputTypeNames: outputNames, - Err: m.app.err, - Private: p.Private, - } +func (m *module) supply(p provide) { + typeName := p.SupplyType.String() + opts := []dig.ProvideOption{ + dig.Export(!p.Private), + dig.WithProviderCallback(func(ci dig.CallbackInfo) { + m.log.LogEvent(&fxevent.Run{ + Name: fmt.Sprintf("stub(%v)", typeName), + Kind: "supply", + ModuleName: m.name, + }) + }), + } + + if err := runProvide(m.scope, p, opts...); err != nil { + m.app.err = err } - m.log.LogEvent(ev) + + m.log.LogEvent(&fxevent.Supplied{ + TypeName: typeName, + StackTrace: p.Stack.Strings(), + ModuleName: m.name, + Err: m.app.err, + }) } // Constructs custom loggers for all modules in the tree @@ -253,38 +286,76 @@ return err } -func (m *module) decorate() (err error) { - for _, decorator := range m.decorators { - var info dig.DecorateInfo - err := runDecorator(m.scope, decorator, dig.FillDecorateInfo(&info)) - outputNames := make([]string, len(info.Outputs)) - for i, o := range info.Outputs { - outputNames[i] = o.String() - } - - if decorator.IsReplace { - m.log.LogEvent(&fxevent.Replaced{ - ModuleName: m.name, - OutputTypeNames: outputNames, - Err: err, - }) - } else { - - m.log.LogEvent(&fxevent.Decorated{ - DecoratorName: fxreflect.FuncName(decorator.Target), - ModuleName: m.name, - OutputTypeNames: outputNames, - Err: err, - }) - } - if err != nil { +func (m *module) decorateAll() error { + for _, d := range m.decorators { + if err := m.decorate(d); err != nil { return err } } + for _, m := range m.modules { - if err := m.decorate(); err != nil { + if err := m.decorateAll(); err != nil { return err } } return nil } + +func (m *module) decorate(d decorator) (err error) { + if d.IsReplace { + return m.replace(d) + } + + funcName := fxreflect.FuncName(d.Target) + var info dig.DecorateInfo + opts := []dig.DecorateOption{ + dig.FillDecorateInfo(&info), + dig.WithDecoratorCallback(func(ci dig.CallbackInfo) { + m.log.LogEvent(&fxevent.Run{ + Name: funcName, + Kind: "decorate", + ModuleName: m.name, + Err: ci.Error, + }) + }), + } + + err = runDecorator(m.scope, d, opts...) + outputNames := make([]string, len(info.Outputs)) + for i, o := range info.Outputs { + outputNames[i] = o.String() + } + + m.log.LogEvent(&fxevent.Decorated{ + DecoratorName: funcName, + StackTrace: d.Stack.Strings(), + ModuleName: m.name, + OutputTypeNames: outputNames, + Err: err, + }) + + return err +} + +func (m *module) replace(d decorator) error { + typeName := d.ReplaceType.String() + opts := []dig.DecorateOption{ + dig.WithDecoratorCallback(func(ci dig.CallbackInfo) { + m.log.LogEvent(&fxevent.Run{ + Name: fmt.Sprintf("stub(%v)", typeName), + Kind: "replace", + ModuleName: m.name, + Err: ci.Error, + }) + }), + } + + err := runDecorator(m.scope, d, opts...) + m.log.LogEvent(&fxevent.Replaced{ + ModuleName: m.name, + StackTrace: d.Stack.Strings(), + OutputTypeNames: []string{typeName}, + Err: err, + }) + return err +} diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/populate.go temporal-1.22.5/src/vendor/go.uber.org/fx/populate.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/populate.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/populate.go 2024-02-23 09:46:14.000000000 +0000 @@ -30,22 +30,69 @@ // values that must be populated. Pointers to structs that embed In are // supported, which can be used to populate multiple values in a struct. // +// Annotating each pointer with ParamTags is also supported as a shorthand +// to passing a pointer to a struct that embeds In with field tags. For example: +// +// var a A +// var b B +// fx.Populate( +// fx.Annotate( +// &a, +// fx.ParamTags(`name:"A"`) +// ), +// fx.Annotate( +// &b, +// fx.ParamTags(`name:"B"`) +// ) +// ) +// +// Code above is equivalent to the following: +// +// type Target struct { +// fx.In +// +// a A `name:"A"` +// b B `name:"B"` +// } +// var target Target +// ... +// fx.Populate(&target) +// // This is most helpful in unit tests: it lets tests leverage Fx's automatic // constructor wiring to build a few structs, but then extract those structs // for further testing. func Populate(targets ...interface{}) Option { // Validate all targets are non-nil pointers. - targetTypes := make([]reflect.Type, len(targets)) + fields := make([]reflect.StructField, len(targets)+1) + fields[0] = reflect.StructField{ + Name: "In", + Type: reflect.TypeOf(In{}), + Anonymous: true, + } for i, t := range targets { if t == nil { return Error(fmt.Errorf("failed to Populate: target %v is nil", i+1)) } - rt := reflect.TypeOf(t) + var ( + rt reflect.Type + tag reflect.StructTag + ) + switch t := t.(type) { + case annotated: + rt = reflect.TypeOf(t.Target) + tag = reflect.StructTag(t.ParamTags[0]) + targets[i] = t.Target + default: + rt = reflect.TypeOf(t) + } if rt.Kind() != reflect.Ptr { return Error(fmt.Errorf("failed to Populate: target %v is not a pointer type, got %T", i+1, t)) } - - targetTypes[i] = reflect.TypeOf(t).Elem() + fields[i+1] = reflect.StructField{ + Name: fmt.Sprintf("Field%d", i), + Type: rt.Elem(), + Tag: tag, + } } // Build a function that looks like: @@ -56,10 +103,11 @@ // [...] // } // - fnType := reflect.FuncOf(targetTypes, nil, false /* variadic */) + fnType := reflect.FuncOf([]reflect.Type{reflect.StructOf(fields)}, nil, false /* variadic */) fn := reflect.MakeFunc(fnType, func(args []reflect.Value) []reflect.Value { - for i, arg := range args { - reflect.ValueOf(targets[i]).Elem().Set(arg) + arg := args[0] + for i, target := range targets { + reflect.ValueOf(target).Elem().Set(arg.Field(i + 1)) } return nil }) diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/provide.go temporal-1.22.5/src/vendor/go.uber.org/fx/provide.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/provide.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/provide.go 2024-02-23 09:46:14.000000000 +0000 @@ -131,18 +131,18 @@ case annotationError: // fx.Annotate failed. Turn it into an Fx error. return fmt.Errorf( - "encountered error while applying annotation using fx.Annotate to %s: %+v", + "encountered error while applying annotation using fx.Annotate to %s: %w", fxreflect.FuncName(constructor.target), constructor.err) case annotated: ctor, err := constructor.Build() if err != nil { - return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", constructor, p.Stack, err) + return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %w", constructor, p.Stack, err) } opts = append(opts, dig.LocationForPC(constructor.FuncPtr)) if err := c.Provide(ctor, opts...); err != nil { - return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", constructor, p.Stack, err) + return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %w", constructor, p.Stack, err) } case Annotated: @@ -159,7 +159,7 @@ } if err := c.Provide(ann.Target, opts...); err != nil { - return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", ann, p.Stack, err) + return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %w", ann, p.Stack, err) } default: @@ -180,7 +180,7 @@ } if err := c.Provide(constructor, opts...); err != nil { - return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", fxreflect.FuncName(constructor), p.Stack, err) + return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %w", fxreflect.FuncName(constructor), p.Stack, err) } } return nil diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/replace.go temporal-1.22.5/src/vendor/go.uber.org/fx/replace.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/replace.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/replace.go 2024-02-23 09:46:14.000000000 +0000 @@ -103,11 +103,12 @@ } func (o replaceOption) apply(m *module) { - for _, target := range o.Targets { + for i, target := range o.Targets { m.decorators = append(m.decorators, decorator{ - Target: target, - Stack: o.Stack, - IsReplace: true, + Target: target, + Stack: o.Stack, + IsReplace: true, + ReplaceType: o.Types[i], }) } } diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/shutdown.go temporal-1.22.5/src/vendor/go.uber.org/fx/shutdown.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/shutdown.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/shutdown.go 2024-02-23 09:46:14.000000000 +0000 @@ -21,7 +21,6 @@ package fx import ( - "context" "time" ) @@ -57,9 +56,7 @@ type shutdownTimeoutOption time.Duration -func (to shutdownTimeoutOption) apply(s *shutdowner) { - s.shutdownTimeout = time.Duration(to) -} +func (shutdownTimeoutOption) apply(*shutdowner) {} var _ ShutdownOption = shutdownTimeoutOption(0) @@ -67,39 +64,25 @@ // for a given call to Shutdown method of the [Shutdowner] interface. As the // Shutdown method will block while waiting for a signal receiver relay // goroutine to stop. +// +// Deprecated: This option has no effect. Shutdown is not a blocking operation. func ShutdownTimeout(timeout time.Duration) ShutdownOption { return shutdownTimeoutOption(timeout) } type shutdowner struct { - app *App - exitCode int - shutdownTimeout time.Duration + app *App + exitCode int } // Shutdown broadcasts a signal to all of the application's Done channels // and begins the Stop process. Applications can be shut down only after they // have finished starting up. -// In practice this means Shutdowner.Shutdown should not be called from an -// fx.Invoke, but from a fx.Lifecycle.OnStart hook. func (s *shutdowner) Shutdown(opts ...ShutdownOption) error { for _, opt := range opts { opt.apply(s) } - ctx := context.Background() - - if s.shutdownTimeout != time.Duration(0) { - c, cancel := context.WithTimeout( - context.Background(), - s.shutdownTimeout, - ) - defer cancel() - ctx = c - } - - defer s.app.receivers.Stop(ctx) - return s.app.receivers.Broadcast(ShutdownSignal{ Signal: _sigTERM, ExitCode: s.exitCode, diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/signal.go temporal-1.22.5/src/vendor/go.uber.org/fx/signal.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/signal.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/signal.go 2024-02-23 09:46:14.000000000 +0000 @@ -109,7 +109,6 @@ return } - recv.last = nil recv.finished = make(chan struct{}, 1) recv.shutdown = make(chan struct{}, 1) recv.notify(recv.signals, os.Interrupt, _sigINT, _sigTERM) @@ -135,11 +134,12 @@ close(recv.finished) recv.shutdown = nil recv.finished = nil + recv.last = nil return nil } } -func (recv *signalReceivers) Done() chan os.Signal { +func (recv *signalReceivers) Done() <-chan os.Signal { recv.m.Lock() defer recv.m.Unlock() @@ -157,7 +157,7 @@ return ch } -func (recv *signalReceivers) Wait() chan ShutdownSignal { +func (recv *signalReceivers) Wait() <-chan ShutdownSignal { recv.m.Lock() defer recv.m.Unlock() diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/fx/version.go temporal-1.22.5/src/vendor/go.uber.org/fx/version.go --- temporal-1.21.5-1/src/vendor/go.uber.org/fx/version.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/fx/version.go 2024-02-23 09:46:14.000000000 +0000 @@ -21,4 +21,4 @@ package fx // Version is exported for runtime compatibility checks. -const Version = "1.19.1" +const Version = "1.20.0" diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/CHANGELOG.md temporal-1.22.5/src/vendor/go.uber.org/multierr/CHANGELOG.md --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/CHANGELOG.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/CHANGELOG.md 2024-02-23 09:46:14.000000000 +0000 @@ -1,6 +1,21 @@ Releases ======== +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + v1.9.0 (2022-12-12) =================== diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/README.md temporal-1.22.5/src/vendor/go.uber.org/multierr/README.md --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/README.md 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/README.md 2024-02-23 09:46:14.000000000 +0000 @@ -2,9 +2,29 @@ `multierr` allows combining one or more Go `error`s together. +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + ## Installation - go get -u go.uber.org/multierr +```bash +go get -u go.uber.org/multierr@latest +``` ## Status diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error.go temporal-1.22.5/src/vendor/go.uber.org/multierr/error.go --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/error.go 2024-02-23 09:46:14.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright (c) 2017-2021 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -147,8 +147,7 @@ "io" "strings" "sync" - - "go.uber.org/atomic" + "sync/atomic" ) var ( @@ -196,23 +195,7 @@ // // Callers of this function are free to modify the returned slice. func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Errors()...) + return extractErrors(err) } // multiError is an error that holds one or more errors. @@ -227,8 +210,6 @@ errors []error } -var _ errorGroup = (*multiError)(nil) - // Errors returns the list of underlying errors. // // This slice MUST NOT be modified. @@ -239,33 +220,6 @@ return merr.errors } -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - func (merr *multiError) Error() string { if merr == nil { return "" @@ -281,6 +235,17 @@ return result } +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + func (merr *multiError) Format(f fmt.State, c rune) { if c == 'v' && f.Flag('+') { merr.writeMultiline(f) diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error_post_go120.go temporal-1.22.5/src/vendor/go.uber.org/multierr/error_post_go120.go --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error_post_go120.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/error_post_go120.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error_pre_go120.go temporal-1.22.5/src/vendor/go.uber.org/multierr/error_pre_go120.go --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/error_pre_go120.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/error_pre_go120.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff -Nru temporal-1.21.5-1/src/vendor/go.uber.org/multierr/glide.yaml temporal-1.22.5/src/vendor/go.uber.org/multierr/glide.yaml --- temporal-1.21.5-1/src/vendor/go.uber.org/multierr/glide.yaml 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/go.uber.org/multierr/glide.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,308 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,398 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms +// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/internal/alias" +) + +const ( + // KeySize is the size of the key used by this cipher, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // cipher, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20 variant of + // this cipher, in bytes. + NonceSizeX = 24 +) + +// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter + // (incremented after each block), and 3 of nonce. + key [8]uint32 + counter uint32 + nonce [3]uint32 + + // The last len bytes of buf are leftover key stream bytes from the previous + // XORKeyStream invocation. The size of buf depends on how many blocks are + // computed at a time by xorKeyStreamBlocks. + buf [bufSize]byte + len int + + // overflow is set when the counter overflowed, no more blocks can be + // generated, and the next XORKeyStream call should panic. + overflow bool + + // The counter-independent results of the first round are cached after they + // are computed the first time. + precompDone bool + p1, p5, p9, p13 uint32 + p2, p6, p10, p14 uint32 + p3, p7, p11, p15 uint32 +} + +var _ cipher.Stream = (*Cipher)(nil) + +// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given +// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, +// the XChaCha20 construction will be used. It returns an error if key or nonce +// have any other length. +// +// Note that ChaCha20, like all stream ciphers, is not authenticated and allows +// attackers to silently tamper with the plaintext. For this reason, it is more +// appropriate as a building block than as a standalone encryption mechanism. +// Instead, consider using package golang.org/x/crypto/chacha20poly1305. +func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { + // This function is split into a wrapper so that the Cipher allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + c := &Cipher{} + return newUnauthenticatedCipher(c, key, nonce) +} + +func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong key size") + } + if len(nonce) == NonceSizeX { + // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a + // derived key, allowing it to operate on a nonce of 24 bytes. See + // draft-irtf-cfrg-xchacha-01, Section 2.3. + key, _ = HChaCha20(key, nonce[0:16]) + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + nonce = cNonce + } else if len(nonce) != NonceSize { + return nil, errors.New("chacha20: wrong nonce size") + } + + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint + c.key = [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + } + c.nonce = [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + } + return c, nil +} + +// The constant first 4 words of the ChaCha20 state. +const ( + j0 uint32 = 0x61707865 // expa + j1 uint32 = 0x3320646e // nd 3 + j2 uint32 = 0x79622d32 // 2-by + j3 uint32 = 0x6b206574 // te k +) + +const blockSize = 64 + +// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. +// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 +// words each round, in columnar or diagonal groups of 4 at a time. +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = bits.RotateLeft32(d, 16) + c += d + b ^= c + b = bits.RotateLeft32(b, 12) + a += b + d ^= a + d = bits.RotateLeft32(d, 8) + c += d + b ^= c + b = bits.RotateLeft32(b, 7) + return a, b, c, d +} + +// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will +// behave as if (64 * counter) bytes had been encrypted so far. +// +// To prevent accidental counter reuse, SetCounter panics if counter is less +// than the current value. +// +// Note that the execution time of XORKeyStream is not independent of the +// counter value. +func (s *Cipher) SetCounter(counter uint32) { + // Internally, s may buffer multiple blocks, which complicates this + // implementation slightly. When checking whether the counter has rolled + // back, we must use both s.counter and s.len to determine how many blocks + // we have already output. + outputCounter := s.counter - uint32(s.len)/blockSize + if s.overflow || counter < outputCounter { + panic("chacha20: SetCounter attempted to rollback counter") + } + + // In the general case, we set the new counter value and reset s.len to 0, + // causing the next call to XORKeyStream to refill the buffer. However, if + // we're advancing within the existing buffer, we can save work by simply + // setting s.len. + if counter < s.counter { + s.len = int(s.counter-counter) * blockSize + } else { + s.counter = counter + s.len = 0 + } +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(src) == 0 { + return + } + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + dst = dst[:len(src)] + if alias.InexactOverlap(dst, src) { + panic("chacha20: invalid buffer overlap") + } + + // First, drain any remaining key stream from a previous XORKeyStream. + if s.len != 0 { + keyStream := s.buf[bufSize-s.len:] + if len(src) < len(keyStream) { + keyStream = keyStream[:len(src)] + } + _ = src[len(keyStream)-1] // bounds check elimination hint + for i, b := range keyStream { + dst[i] = src[i] ^ b + } + s.len -= len(keyStream) + dst, src = dst[len(keyStream):], src[len(keyStream):] + } + if len(src) == 0 { + return + } + + // If we'd need to let the counter overflow and keep generating output, + // panic immediately. If instead we'd only reach the last block, remember + // not to generate any more output after the buffer is drained. + numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize + if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { + panic("chacha20: counter overflow") + } else if uint64(s.counter)+numBlocks == 1<<32 { + s.overflow = true + } + + // xorKeyStreamBlocks implementations expect input lengths that are a + // multiple of bufSize. Platform-specific ones process multiple blocks at a + // time, so have bufSizes that are a multiple of blockSize. + + full := len(src) - len(src)%bufSize + if full > 0 { + s.xorKeyStreamBlocks(dst[:full], src[:full]) + } + dst, src = dst[full:], src[full:] + + // If using a multi-block xorKeyStreamBlocks would overflow, use the generic + // one that does one block at a time. + const blocksPerBuf = bufSize / blockSize + if uint64(s.counter)+blocksPerBuf > 1<<32 { + s.buf = [bufSize]byte{} + numBlocks := (len(src) + blockSize - 1) / blockSize + buf := s.buf[bufSize-numBlocks*blockSize:] + copy(buf, src) + s.xorKeyStreamBlocksGeneric(buf, buf) + s.len = len(buf) - copy(dst, buf) + return + } + + // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and + // keep the leftover keystream for the next XORKeyStream invocation. + if len(src) > 0 { + s.buf = [bufSize]byte{} + copy(s.buf[:], src) + s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) + s.len = bufSize - copy(dst, s.buf[:]) + } +} + +func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { + if len(dst) != len(src) || len(dst)%blockSize != 0 { + panic("chacha20: internal error: wrong dst and/or src length") + } + + // To generate each block of key stream, the initial cipher state + // (represented below) is passed through 20 rounds of shuffling, + // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) + // or by diagonals (like 1, 6, 11, 12). + // + // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc + // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk + // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk + // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn + // + // c=constant k=key b=blockcount n=nonce + var ( + c0, c1, c2, c3 = j0, j1, j2, j3 + c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] + c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] + _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] + ) + + // Three quarters of the first round don't depend on the counter, so we can + // calculate them here, and reuse them for multiple blocks in the loop, and + // for future XORKeyStream invocations. + if !s.precompDone { + s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) + s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) + s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) + s.precompDone = true + } + + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { + // The remainder of the first column round. + fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) + + // The second diagonal round. + x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) + x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) + x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) + x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) + + // The remaining 18 rounds. + for i := 0; i < 9; i++ { + // Column round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Diagonal round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) + + s.counter += 1 + + src, dst = src[blockSize:], dst[blockSize:] + } +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes +// key and a 16 bytes nonce. It returns an error if key or nonce have any other +// length. It is used as part of the XChaCha20 construction. +func HChaCha20(key, nonce []byte) ([]byte, error) { + // This function is split into a wrapper so that the slice allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + out := make([]byte, 32) + return hChaCha20(out, key, nonce) +} + +func hChaCha20(out, key, nonce []byte) ([]byte, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong HChaCha20 key size") + } + if len(nonce) != 16 { + return nil, errors.New("chacha20: wrong HChaCha20 nonce size") + } + + x0, x1, x2, x3 := j0, j1, j2, j3 + x4 := binary.LittleEndian.Uint32(key[0:4]) + x5 := binary.LittleEndian.Uint32(key[4:8]) + x6 := binary.LittleEndian.Uint32(key[8:12]) + x7 := binary.LittleEndian.Uint32(key[12:16]) + x8 := binary.LittleEndian.Uint32(key[16:20]) + x9 := binary.LittleEndian.Uint32(key[20:24]) + x10 := binary.LittleEndian.Uint32(key[24:28]) + x11 := binary.LittleEndian.Uint32(key[28:32]) + x12 := binary.LittleEndian.Uint32(nonce[0:4]) + x13 := binary.LittleEndian.Uint32(nonce[4:8]) + x14 := binary.LittleEndian.Uint32(nonce[8:12]) + x15 := binary.LittleEndian.Uint32(nonce[12:16]) + + for i := 0; i < 10; i++ { + // Diagonal round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Column round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + _ = out[31] // bounds check elimination hint + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x12) + binary.LittleEndian.PutUint32(out[20:24], x13) + binary.LittleEndian.PutUint32(out[24:28], x14) + binary.LittleEndian.PutUint32(out[28:32], x15) + return out, nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +// +build !arm64,!s390x,!ppc64le !gc purego + +package chacha20 + +const bufSize = blockSize + +func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { + s.xorKeyStreamBlocksGeneric(dst, src) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,450 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Code for the perl script that generates the ppc64 assembler +// can be found in the cryptogams repository at the link below. It is based on +// the original from openssl. + +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 + +// The differences in this and the original implementation are +// due to the calling conventions and initialization of constants. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 +#define TMP R15 + +#define CONSTBASE R16 +#define BLOCKS R17 + +DATA consts<>+0x00(SB)/8, $0x3320646e61707865 +DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 +DATA consts<>+0x10(SB)/8, $0x0000000000000001 +DATA consts<>+0x18(SB)/8, $0x0000000000000000 +DATA consts<>+0x20(SB)/8, $0x0000000000000004 +DATA consts<>+0x28(SB)/8, $0x0000000000000000 +DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA consts<>+0x38(SB)/8, $0x0203000106070405 +DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA consts<>+0x48(SB)/8, $0x0102030005060704 +DATA consts<>+0x50(SB)/8, $0x6170786561707865 +DATA consts<>+0x58(SB)/8, $0x6170786561707865 +DATA consts<>+0x60(SB)/8, $0x3320646e3320646e +DATA consts<>+0x68(SB)/8, $0x3320646e3320646e +DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x90(SB)/8, $0x0000000100000000 +DATA consts<>+0x98(SB)/8, $0x0000000300000002 +GLOBL consts<>(SB), RODATA, $0xa0 + +//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) +TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + // Addressing for constants + MOVD $consts<>+0x00(SB), CONSTBASE + MOVD $16, R8 + MOVD $32, R9 + MOVD $48, R10 + MOVD $64, R11 + SRD $6, LEN, BLOCKS + // V16 + LXVW4X (CONSTBASE)(R0), VS48 + ADD $80,CONSTBASE + + // Load key into V17,V18 + LXVW4X (KEY)(R0), VS49 + LXVW4X (KEY)(R8), VS50 + + // Load CNT, NONCE into V19 + LXVW4X (CNT)(R0), VS51 + + // Clear V27 + VXOR V27, V27, V27 + + // V28 + LXVW4X (CONSTBASE)(R11), VS60 + + // splat slot from V19 -> V26 + VSPLTW $0, V19, V26 + + VSLDOI $4, V19, V27, V19 + VSLDOI $12, V27, V19, V19 + + VADDUWM V26, V28, V26 + + MOVD $10, R14 + MOVD R14, CTR + +loop_outer_vsx: + // V0, V1, V2, V3 + LXVW4X (R0)(CONSTBASE), VS32 + LXVW4X (R8)(CONSTBASE), VS33 + LXVW4X (R9)(CONSTBASE), VS34 + LXVW4X (R10)(CONSTBASE), VS35 + + // splat values from V17, V18 into V4-V11 + VSPLTW $0, V17, V4 + VSPLTW $1, V17, V5 + VSPLTW $2, V17, V6 + VSPLTW $3, V17, V7 + VSPLTW $0, V18, V8 + VSPLTW $1, V18, V9 + VSPLTW $2, V18, V10 + VSPLTW $3, V18, V11 + + // VOR + VOR V26, V26, V12 + + // splat values from V19 -> V13, V14, V15 + VSPLTW $1, V19, V13 + VSPLTW $2, V19, V14 + VSPLTW $3, V19, V15 + + // splat const values + VSPLTISW $-16, V27 + VSPLTISW $12, V28 + VSPLTISW $8, V29 + VSPLTISW $7, V30 + +loop_vsx: + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + VRLW V15, V27, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V28, V4 + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + VRLW V15, V29, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V30, V4 + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V27, V15 + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + VRLW V4, V28, V4 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V29, V15 + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + VRLW V4, V30, V4 + BC 16, LT, loop_vsx + + VADDUWM V12, V26, V12 + + WORD $0x13600F8C // VMRGEW V0, V1, V27 + WORD $0x13821F8C // VMRGEW V2, V3, V28 + + WORD $0x10000E8C // VMRGOW V0, V1, V0 + WORD $0x10421E8C // VMRGOW V2, V3, V2 + + WORD $0x13A42F8C // VMRGEW V4, V5, V29 + WORD $0x13C63F8C // VMRGEW V6, V7, V30 + + XXPERMDI VS32, VS34, $0, VS33 + XXPERMDI VS32, VS34, $3, VS35 + XXPERMDI VS59, VS60, $0, VS32 + XXPERMDI VS59, VS60, $3, VS34 + + WORD $0x10842E8C // VMRGOW V4, V5, V4 + WORD $0x10C63E8C // VMRGOW V6, V7, V6 + + WORD $0x13684F8C // VMRGEW V8, V9, V27 + WORD $0x138A5F8C // VMRGEW V10, V11, V28 + + XXPERMDI VS36, VS38, $0, VS37 + XXPERMDI VS36, VS38, $3, VS39 + XXPERMDI VS61, VS62, $0, VS36 + XXPERMDI VS61, VS62, $3, VS38 + + WORD $0x11084E8C // VMRGOW V8, V9, V8 + WORD $0x114A5E8C // VMRGOW V10, V11, V10 + + WORD $0x13AC6F8C // VMRGEW V12, V13, V29 + WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + + XXPERMDI VS40, VS42, $0, VS41 + XXPERMDI VS40, VS42, $3, VS43 + XXPERMDI VS59, VS60, $0, VS40 + XXPERMDI VS59, VS60, $3, VS42 + + WORD $0x118C6E8C // VMRGOW V12, V13, V12 + WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + + VSPLTISW $4, V27 + VADDUWM V26, V27, V26 + + XXPERMDI VS44, VS46, $0, VS45 + XXPERMDI VS44, VS46, $3, VS47 + XXPERMDI VS61, VS62, $0, VS44 + XXPERMDI VS61, VS62, $3, VS46 + + VADDUWM V0, V16, V0 + VADDUWM V4, V17, V4 + VADDUWM V8, V18, V8 + VADDUWM V12, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + // Bottom of loop + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V1, V16, V0 + VADDUWM V5, V17, V4 + VADDUWM V9, V18, V8 + VADDUWM V13, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + VXOR V27, V0, V27 + + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(V10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V2, V16, V0 + VADDUWM V6, V17, V4 + VADDUWM V10, V18, V8 + VADDUWM V14, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V3, V16, V0 + VADDUWM V7, V17, V4 + VADDUWM V11, V18, V8 + VADDUWM V15, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + + MOVD $10, R14 + MOVD R14, CTR + BNE loop_outer_vsx + +done_vsx: + // Increment counter by number of 64 byte blocks + MOVD (CNT), R14 + ADD BLOCKS, R14 + MOVD R14, (CNT) + RET + +tail_vsx: + ADD $32, R1, R11 + MOVD LEN, CTR + + // Save values on stack to copy from + STXVW4X VS32, (R11)(R0) + STXVW4X VS36, (R11)(R8) + STXVW4X VS40, (R11)(R9) + STXVW4X VS44, (R11)(R10) + ADD $-1, R11, R12 + ADD $-1, INP + ADD $-1, OUT + +looptail_vsx: + // Copying the result to OUT + // in bytes. + MOVBZU 1(R12), KEY + MOVBZU 1(INP), TMP + XOR KEY, TMP, KEY + MOVBU KEY, 1(OUT) + BC 16, LT, looptail_vsx + + // Clear the stack values + STXVW4X VS48, (R11)(R0) + STXVW4X VS48, (R11)(R8) + STXVW4X VS48, (R11)(R9) + STXVW4X VS48, (R11)(R10) + BR done_vsx diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20 + +import "golang.org/x/sys/cpu" + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. Implementation in asm_s390x.s. +// +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + if cpu.S390X.HasVX { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } else { + c.xorKeyStreamBlocksGeneric(dst, src) + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,225 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + + VSTEF $0, CTR, (R7) + RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/xor.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/xor.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20/xor.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20/xor.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import "runtime" + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// addXor reads a little endian uint32 from src, XORs it with (a + b) and +// places the result in little endian byte order in dst. +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= a + b + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,98 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its +// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and +// draft-irtf-cfrg-xchacha-01. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 + + // Overhead is the size of the Poly1305 authentication tag, and the + // difference between a ciphertext length and its plaintext. + Overhead = 16 +) + +type chacha20poly1305 struct { + key [KeySize]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return Overhead +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/alias" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = binary.LittleEndian.Uint32(key[0:4]) + state[5] = binary.LittleEndian.Uint32(key[4:8]) + state[6] = binary.LittleEndian.Uint32(key[8:12]) + state[7] = binary.LittleEndian.Uint32(key[12:16]) + state[8] = binary.LittleEndian.Uint32(key[16:20]) + state[9] = binary.LittleEndian.Uint32(key[20:24]) + state[10] = binary.LittleEndian.Uint32(key[24:28]) + state[11] = binary.LittleEndian.Uint32(key[28:32]) + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[0:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,2696 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" +// General register allocation +#define oup DI +#define inp SI +#define inl BX +#define adp CX // free to reuse, after we hash the additional data +#define keyp R8 // free to reuse, when we copy the key to stack +#define itr2 R9 // general iterator +#define itr1 CX // general iterator +#define acc0 R10 +#define acc1 R11 +#define acc2 R12 +#define t0 R13 +#define t1 R14 +#define t2 R15 +#define t3 R8 +// Register and stack allocation for the SSE code +#define rStore (0*16)(BP) +#define sStore (1*16)(BP) +#define state1Store (2*16)(BP) +#define state2Store (3*16)(BP) +#define tmpStore (4*16)(BP) +#define ctr0Store (5*16)(BP) +#define ctr1Store (6*16)(BP) +#define ctr2Store (7*16)(BP) +#define ctr3Store (8*16)(BP) +#define A0 X0 +#define A1 X1 +#define A2 X2 +#define B0 X3 +#define B1 X4 +#define B2 X5 +#define C0 X6 +#define C1 X7 +#define C2 X8 +#define D0 X9 +#define D1 X10 +#define D2 X11 +#define T0 X12 +#define T1 X13 +#define T2 X14 +#define T3 X15 +#define A3 T0 +#define B3 T1 +#define C3 T2 +#define D3 T3 +// Register and stack allocation for the AVX2 code +#define rsStoreAVX2 (0*32)(BP) +#define state1StoreAVX2 (1*32)(BP) +#define state2StoreAVX2 (2*32)(BP) +#define ctr0StoreAVX2 (3*32)(BP) +#define ctr1StoreAVX2 (4*32)(BP) +#define ctr2StoreAVX2 (5*32)(BP) +#define ctr3StoreAVX2 (6*32)(BP) +#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack +#define AA0 Y0 +#define AA1 Y5 +#define AA2 Y6 +#define AA3 Y7 +#define BB0 Y14 +#define BB1 Y9 +#define BB2 Y10 +#define BB3 Y11 +#define CC0 Y12 +#define CC1 Y13 +#define CC2 Y8 +#define CC3 Y15 +#define DD0 Y4 +#define DD1 Y1 +#define DD2 Y2 +#define DD3 Y3 +#define TT0 DD3 +#define TT1 AA3 +#define TT2 BB3 +#define TT3 CC3 +// ChaCha20 constants +DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 +// Some macros +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t0, t1 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" +) + +func writeWithPadding(p *poly1305.MAC, b []byte) { + p.Write(b) + if rem := len(b) % 16; rem != 0 { + var buf [16]byte + padLen := 16 - rem + p.Write(buf[:padLen]) + } +} + +func writeUint64(p *poly1305.MAC, n int) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], uint64(n)) + p.Write(buf[:]) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + s.XORKeyStream(ciphertext, plaintext) + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(plaintext)) + p.Sum(tag[:0]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + tag := ciphertext[len(ciphertext)-16:] + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(ciphertext)) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !p.Verify(tag) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" + + "golang.org/x/crypto/chacha20" +) + +type xchacha20poly1305 struct { + key [KeySize]byte +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return Overhead +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,824 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/builder.go temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/builder.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/builder.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/builder.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/string.go temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/string.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/cryptobyte/string.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/cryptobyte/string.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/hkdf/hkdf.go temporal-1.22.5/src/vendor/golang.org/x/crypto/hkdf/hkdf.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/hkdf/hkdf.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/hkdf/hkdf.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/alias/alias.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/alias/alias.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/alias/alias.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/alias/alias.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.13 +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.13 +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego +// +build !amd64,!ppc64le,!s390x !gc purego + +package poly1305 + +type mac struct{ macGeneric } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,309 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import "encoding/binary" + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,182 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP R5, $0 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP R17, $0 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP R5, $0 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +// +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s --- temporal-1.21.5-1/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accommodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/exp/slices/slices.go temporal-1.22.5/src/vendor/golang.org/x/exp/slices/slices.go --- temporal-1.21.5-1/src/vendor/golang.org/x/exp/slices/slices.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/exp/slices/slices.go 2024-02-23 09:46:14.000000000 +0000 @@ -104,8 +104,8 @@ // Index returns the index of the first occurrence of v in s, // or -1 if not present. func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { + for i := range s { + if v == s[i] { return i } } @@ -115,8 +115,8 @@ // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { + for i := range s { + if f(s[i]) { return i } } @@ -207,12 +207,12 @@ return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -224,12 +224,12 @@ return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/exp/slices/sort.go temporal-1.22.5/src/vendor/golang.org/x/exp/slices/sort.go --- temporal-1.21.5-1/src/vendor/golang.org/x/exp/slices/sort.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/exp/slices/sort.go 2024-02-23 09:46:14.000000000 +0000 @@ -81,10 +81,12 @@ } // BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/Dockerfile temporal-1.22.5/src/vendor/golang.org/x/net/http2/Dockerfile --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/Dockerfile 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget https://curl.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/Makefile temporal-1.22.5/src/vendor/golang.org/x/net/http2/Makefile --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/Makefile 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/server.go temporal-1.22.5/src/vendor/golang.org/x/net/http2/server.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/server.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/server.go 2024-02-23 09:46:14.000000000 +0000 @@ -441,7 +441,7 @@ if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = newRoundRobinWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -581,9 +581,11 @@ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push + curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream + unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -981,6 +983,8 @@ return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() + case handlerDoneMsg: + sc.handlerDone() default: panic("unknown timer") } @@ -1012,14 +1016,6 @@ } } -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - type serverMessage int // Message values sent to serveMsgCh. @@ -1028,6 +1024,7 @@ idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) + handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1900,9 +1897,11 @@ // onReadTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's ReadTimeout has fired. func (st *stream) onReadTimeout() { - // Wrap the ErrDeadlineExceeded to avoid callers depending on us - // returning the bare error. - st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + if st.body != nil { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + } } // onWriteTimeout is run on its own goroutine (from time.AfterFunc) @@ -2020,13 +2019,10 @@ // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) - if st.body != nil { - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) - } + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - go sc.runHandler(rw, req, handler) - return nil + return sc.scheduleHandler(id, rw, req, handler) } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2046,6 +2042,10 @@ sc.conn.SetReadDeadline(time.Time{}) } + // This is the first request on the connection, + // so start the handler directly rather than going + // through scheduleHandler. + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2286,8 +2286,62 @@ return &responseWriter{rws: rws} } +type unstartedHandler struct { + streamID uint32 + rw *responseWriter + req *http.Request + handler func(http.ResponseWriter, *http.Request) +} + +// scheduleHandler starts a handler goroutine, +// or schedules one to start as soon as an existing handler finishes. +func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { + sc.serveG.check() + maxHandlers := sc.advMaxStreams + if sc.curHandlers < maxHandlers { + sc.curHandlers++ + go sc.runHandler(rw, req, handler) + return nil + } + if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { + return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) + } + sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ + streamID: streamID, + rw: rw, + req: req, + handler: handler, + }) + return nil +} + +func (sc *serverConn) handlerDone() { + sc.serveG.check() + sc.curHandlers-- + i := 0 + maxHandlers := sc.advMaxStreams + for ; i < len(sc.unstartedHandlers); i++ { + u := sc.unstartedHandlers[i] + if sc.streams[u.streamID] == nil { + // This stream was reset before its goroutine had a chance to start. + continue + } + if sc.curHandlers >= maxHandlers { + break + } + sc.curHandlers++ + go sc.runHandler(u.rw, u.req, u.handler) + sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references + } + sc.unstartedHandlers = sc.unstartedHandlers[i:] + if len(sc.unstartedHandlers) == 0 { + sc.unstartedHandlers = nil + } +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() @@ -2429,7 +2483,7 @@ conn *serverConn closeOnce sync.Once // for use by Close only sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body + pipe *pipe // non-nil if we have an HTTP entity message body needsContinue bool // need to send a 100-continue } @@ -2569,7 +2623,8 @@ clen = "" } } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + _, hasContentLength := rws.snapHeader["Content-Length"] + if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] @@ -2774,7 +2829,7 @@ err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it + // (writeChunk with zero bytes), so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. _, err = chunkWriter{rws}.Write(nil) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/transport.go temporal-1.22.5/src/vendor/golang.org/x/net/http2/transport.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/transport.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/transport.go 2024-02-23 09:46:14.000000000 +0000 @@ -19,6 +19,7 @@ "io/fs" "log" "math" + "math/bits" mathrand "math/rand" "net" "net/http" @@ -290,8 +291,7 @@ // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tconnClosed bool + tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -518,11 +518,14 @@ func authorityAddr(scheme string, authority string) (addr string) { host, port, err := net.SplitHostPort(authority) if err != nil { // authority didn't have a port + host = authority + port = "" + } + if port == "" { // authority's port was empty port = "443" if scheme == "http" { port = "80" } - host = authority } if a, err := idna.ToASCII(host); err == nil { host = a @@ -1268,21 +1271,23 @@ cancelRequest := func(cs *clientStream, err error) error { cs.cc.mu.Lock() - defer cs.cc.mu.Unlock() - cs.abortStreamLocked(err) - if cs.ID != 0 { - // This request may have failed because of a problem with the connection, - // or for some unrelated reason. (For example, the user might have canceled - // the request without waiting for a response.) Mark the connection as - // not reusable, since trying to reuse a dead connection is worse than - // unnecessarily creating a new one. - // - // If cs.ID is 0, then the request was never allocated a stream ID and - // whatever went wrong was unrelated to the connection. We might have - // timed out waiting for a stream slot when StrictMaxConcurrentStreams - // is set, for example, in which case retrying on a different connection - // will not help. - cs.cc.doNotReuse = true + bodyClosed := cs.reqBodyClosed + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed } return err } @@ -1301,11 +1306,14 @@ return handleResponseHeaders() default: waitDone() - return nil, cancelRequest(cs, cs.abortErr) + return nil, cs.abortErr } case <-ctx.Done(): - return nil, cancelRequest(cs, ctx.Err()) + err := ctx.Err() + cs.abortStream(err) + return nil, cancelRequest(cs, err) case <-cs.reqCancel: + cs.abortStream(errRequestCanceled) return nil, cancelRequest(cs, errRequestCanceled) } } @@ -1672,7 +1680,27 @@ return int(n) // doesn't truncate; max is 512K } -var bufPool sync.Pool // of *[]byte +// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running +// streaming requests using small frame sizes occupy large buffers initially allocated for prior +// requests needing big buffers. The size ranges are as follows: +// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB], +// {256 KB, 512 KB], {512 KB, infinity} +// In practice, the maximum scratch buffer size should not exceed 512 KB due to +// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used. +// It exists mainly as a safety measure, for potential future increases in max buffer size. +var bufPools [7]sync.Pool // of *[]byte +func bufPoolIndex(size int) int { + if size <= 16384 { + return 0 + } + size -= 1 + bits := bits.Len(uint(size)) + index := bits - 14 + if index >= len(bufPools) { + return len(bufPools) - 1 + } + return index +} func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { cc := cs.cc @@ -1690,12 +1718,13 @@ // Scratch buffer for reading into & writing from. scratchLen := cs.frameScratchBufferLen(maxFrameSize) var buf []byte - if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { - defer bufPool.Put(bp) + index := bufPoolIndex(scratchLen) + if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPools[index].Put(bp) buf = *bp } else { buf = make([]byte, scratchLen) - defer bufPool.Put(&buf) + defer bufPools[index].Put(&buf) } var sawEOF bool @@ -1863,6 +1892,9 @@ if err != nil { return nil, err } + if !httpguts.ValidHostHeader(host) { + return nil, errors.New("http2: invalid Host header") + } var path string if req.Method != "CONNECT" { @@ -1899,7 +1931,7 @@ // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of + // followed by the query production, see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) m := req.Method diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/writesched.go temporal-1.22.5/src/vendor/golang.org/x/net/http2/writesched.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/writesched.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/writesched.go 2024-02-23 09:46:14.000000000 +0000 @@ -184,7 +184,8 @@ // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []FrameWriteRequest + s []FrameWriteRequest + prev, next *writeQueue } func (q *writeQueue) empty() bool { return len(q.s) == 0 } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/writesched_roundrobin.go temporal-1.22.5/src/vendor/golang.org/x/net/http2/writesched_roundrobin.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/http2/writesched_roundrobin.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/http2/writesched_roundrobin.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type roundRobinWriteScheduler struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // streams maps stream ID to a queue. + streams map[uint32]*writeQueue + + // stream queues are stored in a circular linked list. + // head is the next stream to write, or nil if there are no streams open. + head *writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +// newRoundRobinWriteScheduler constructs a new write scheduler. +// The round robin scheduler priorizes control frames +// like SETTINGS and PING over DATA frames. +// When there are no control frames to send, it performs a round-robin +// selection from the ready streams. +func newRoundRobinWriteScheduler() WriteScheduler { + ws := &roundRobinWriteScheduler{ + streams: make(map[uint32]*writeQueue), + } + return ws +} + +func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + if ws.streams[streamID] != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = q + if ws.head == nil { + ws.head = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.head.prev + q.next = ws.head + q.prev.next = q + q.next.prev = q + } +} + +func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) { + q := ws.streams[streamID] + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.head = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.head == q { + ws.head = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {} + +func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()] + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + if ws.head == nil { + return FrameWriteRequest{}, false + } + q := ws.head + for { + if wr, ok := q.consume(math.MaxInt32); ok { + ws.head = q.next + return wr, true + } + q = q.next + if q == ws.head { + break + } + } + return FrameWriteRequest{}, false +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/idna9.0.0.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/idna9.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/idna9.0.0.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/idna9.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -121,7 +121,7 @@ } } -// StrictDomainName limits the set of permissable ASCII characters to those +// StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration, // but is only useful if ValidateLabels is set. diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/tables13.0.0.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/tables13.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/tables13.0.0.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/tables13.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -1,151 +1,294 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "13.0.0" -var mappings string = "" + // Size: 8188 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x06令和\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニ" + - "ング\x09インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー" + - "\x09ガロン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0f" + - "キロワット\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル" + - "\x0fサンチーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット" + - "\x09ハイツ\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0c" + - "フィート\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ" + - "\x0cポイント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク" + - "\x0fマンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09" + - "ユアン\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x04" + - "2点\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x02ʍ\x04𤋮\x04𢡊\x04𢡄\x04𣏕" + - "\x04𥉉\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ" + - "\x04יִ\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּ" + - "ׂ\x04אַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04" + - "ךּ\x04כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ" + - "\x04תּ\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ" + - "\x02ڤ\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ" + - "\x02ڳ\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ" + - "\x02ۅ\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02" + - "ی\x04ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04" + - "تح\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج" + - "\x04حم\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح" + - "\x04ضخ\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ" + - "\x04فم\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل" + - "\x04كم\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ" + - "\x04مم\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى" + - "\x04هي\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 " + - "ٍّ\x05 َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04ت" + - "ر\x04تز\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04" + - "ين\x04ئخ\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه" + - "\x04شم\x04شه\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي" + - "\x04سى\x04سي\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي" + - "\x04ضى\x04ضي\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06ت" + - "حج\x06تحم\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سج" + - "ح\x06سجى\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم" + - "\x06ضحى\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي" + - "\x06غمى\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح" + - "\x06محج\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم" + - "\x06نحم\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى" + - "\x06تخي\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي" + - "\x06ضحي\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي" + - "\x06كمي\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي" + - "\x06سخي\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08" + - "عليه\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:" + - "\x01!\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\" + - "\x01$\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ" + - "\x02إ\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز" + - "\x02س\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن" + - "\x02ه\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~" + - "\x02¢\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲" + - "\x08𝆹𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η" + - "\x02κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ" + - "\x02ڡ\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029," + - "\x03(a)\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)" + - "\x03(k)\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)" + - "\x03(u)\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03p" + - "pv\x02wc\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ" + - "\x03二\x03多\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終" + - "\x03生\x03販\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指" + - "\x03走\x03打\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔" + - "三〕\x09〔二〕\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03" + - "丸\x03乁\x03你\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03" + - "具\x03㒹\x03內\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03" + - "㔕\x03勇\x03勉\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03" + - "灰\x03及\x03叟\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03" + - "啣\x03善\x03喙\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03" + - "埴\x03堍\x03型\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03" + - "姘\x03婦\x03㛮\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03" + - "屮\x03峀\x03岍\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03" + - "㡢\x03㡼\x03庰\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03" + - "忍\x03志\x03忹\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03" + - "憤\x03憯\x03懞\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03" + - "掃\x03揤\x03搢\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03" + - "書\x03晉\x03㬙\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03" + - "朡\x03杞\x03杓\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03" + - "槪\x03檨\x03櫛\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03" + - "汧\x03洖\x03派\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03" + - "淹\x03潮\x03濆\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03" + - "爵\x03牐\x03犀\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03" + - "㼛\x03甤\x03甾\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03" + - "䂖\x03硎\x03碌\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03" + - "築\x03䈧\x03糒\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03" + - "罺\x03羕\x03翺\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03" + - "䑫\x03芑\x03芋\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03" + - "莽\x03菧\x03著\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03" + - "䕫\x03虐\x03虜\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03" + - "蠁\x03䗹\x03衠\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03" + - "豕\x03貫\x03賁\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03" + - "鈸\x03鋗\x03鋘\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03" + - "䩶\x03韠\x03䪲\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03" + - "鳽\x03䳎\x03䳭\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" +var mappings string = "" + // Size: 6539 bytes + " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" + + "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" + + "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" + + "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" + + "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" + + "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" + + "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" + + ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" + + "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" + + ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" + + "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" + + "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" + + "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" + + "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" + + "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" + + "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" + + "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" + + "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" + + "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" + + "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" + + "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" + + "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" + + "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" + + "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" + + "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" + + "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" + + "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" + + "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" + + "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" + + "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" + + "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" + + "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" + + "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" + + "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱" + + "𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκλμνξοστυψ∇∂ϝٮڡٯ0,1,2,3,4,5,6,7,8,9,(a)(b)(c" + + ")(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y)(z)〔s" + + "〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申割営配〔" + + "本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉卑博即卽" + + "卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢㠯巽帨帽" + + "幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最暜肭䏙朗" + + "望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸瑇瑜瑱璅" + + "瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦若茝荣莭" + + "茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷䧦雃嶲霣" + + "䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻" + +var mappingIndex = []uint16{ // 1650 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a, + 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024, + 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036, + 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048, + 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e, + 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086, + 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6, + 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6, + // Entry 40 - 7F + 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116, + 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c, + 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174, + 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182, + 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c, + 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199, + 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8, + 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6, + // Entry 80 - BF + 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6, + 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6, + 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5, + 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5, + 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211, + 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239, + 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261, + 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287, + // Entry C0 - FF + 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa, + 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8, + 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8, + 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f, + 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, + 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326, + 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340, + 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368, + // Entry 100 - 13F + 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386, + 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1, + 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1, + 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db, + 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401, + 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417, + 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441, + 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469, + // Entry 140 - 17F + 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491, + 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc, + 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7, + 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f, + 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537, + 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f, + 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e, + 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e, + // Entry 180 - 1BF + 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6, + 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6, + 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc, + 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee, + 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a, + 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e, + 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf, + 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738, + // Entry 1C0 - 1FF + 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c, + 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce, + 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822, + 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873, + 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be, + 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f, + 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d, + 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997, + // Entry 200 - 23F + 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9, + 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1, + 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00, + 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a, + 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c, + 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c, + 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f, + 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e, + // Entry 240 - 27F + 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e, + 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2, + 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2, + 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2, + 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6, + 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e, + 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46, + 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c, + // Entry 280 - 2BF + 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a, + 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92, + 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8, + 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8, + 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8, + 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08, + 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28, + 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e, + // Entry 2C0 - 2FF + 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e, + 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e, + 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e, + 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e, + 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a, + 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8, + 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8, + 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8, + // Entry 300 - 33F + 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18, + 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38, + 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58, + 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78, + 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98, + 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8, + 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8, + 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8, + // Entry 340 - 37F + 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18, + 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e, + 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e, + 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e, + 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e, + 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2, + 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4, + 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04, + // Entry 380 - 3BF + 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24, + 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a, + 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a, + 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa, + 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda, + 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a, + 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a, + 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a, + // Entry 3C0 - 3FF + 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a, + 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca, + 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa, + 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a, + 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a, + 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190, + 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee, + 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6, + // Entry 400 - 43F + 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe, + 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215, + 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227, + 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237, + 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247, + 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257, + 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f, + 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279, + // Entry 440 - 47F + 0x127b, 0x127d, 0x127f, 0x1287, 0x128f, 0x129b, 0x12a7, 0x12b3, + 0x12bf, 0x12cb, 0x12d3, 0x12db, 0x12e7, 0x12f3, 0x12ff, 0x130b, + 0x130d, 0x130f, 0x1311, 0x1313, 0x1315, 0x1317, 0x1319, 0x131b, + 0x131d, 0x131f, 0x1321, 0x1323, 0x1325, 0x1327, 0x1329, 0x132b, + 0x132e, 0x1331, 0x1333, 0x1335, 0x1337, 0x1339, 0x133b, 0x133d, + 0x133f, 0x1341, 0x1343, 0x1345, 0x1347, 0x1349, 0x134b, 0x134d, + 0x1350, 0x1353, 0x1356, 0x1359, 0x135c, 0x135f, 0x1362, 0x1365, + 0x1368, 0x136b, 0x136e, 0x1371, 0x1374, 0x1377, 0x137a, 0x137d, + // Entry 480 - 4BF + 0x1380, 0x1383, 0x1386, 0x1389, 0x138c, 0x138f, 0x1392, 0x1395, + 0x1398, 0x139b, 0x13a2, 0x13a4, 0x13a6, 0x13a8, 0x13ab, 0x13ad, + 0x13af, 0x13b1, 0x13b3, 0x13b5, 0x13bb, 0x13c1, 0x13c4, 0x13c7, + 0x13ca, 0x13cd, 0x13d0, 0x13d3, 0x13d6, 0x13d9, 0x13dc, 0x13df, + 0x13e2, 0x13e5, 0x13e8, 0x13eb, 0x13ee, 0x13f1, 0x13f4, 0x13f7, + 0x13fa, 0x13fd, 0x1400, 0x1403, 0x1406, 0x1409, 0x140c, 0x140f, + 0x1412, 0x1415, 0x1418, 0x141b, 0x141e, 0x1421, 0x1424, 0x1427, + 0x142a, 0x142d, 0x1430, 0x1433, 0x1436, 0x1439, 0x143c, 0x143f, + // Entry 4C0 - 4FF + 0x1442, 0x1445, 0x1448, 0x1451, 0x145a, 0x1463, 0x146c, 0x1475, + 0x147e, 0x1487, 0x1490, 0x1499, 0x149c, 0x149f, 0x14a2, 0x14a5, + 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7, 0x14ba, 0x14bd, + 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf, 0x14d2, 0x14d5, + 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7, 0x14ea, 0x14ed, + 0x14f0, 0x14f3, 0x14f6, 0x14f9, 0x14fc, 0x14ff, 0x1502, 0x1505, + 0x1508, 0x150b, 0x150e, 0x1511, 0x1514, 0x1517, 0x151a, 0x151d, + 0x1520, 0x1523, 0x1526, 0x1529, 0x152c, 0x152f, 0x1532, 0x1535, + // Entry 500 - 53F + 0x1538, 0x153b, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d, + 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565, + 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d, + 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595, + 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad, + 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5, + 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd, + 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5, + // Entry 540 - 57F + 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d, + 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625, + 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d, + 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655, + 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d, + 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685, + 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d, + 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5, + // Entry 580 - 5BF + 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd, + 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5, + 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd, + 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715, + 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d, + 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745, + 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d, + 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775, + // Entry 5C0 - 5FF + 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d, + 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5, + 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd, + 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5, + 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed, + 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805, + 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d, + 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835, + // Entry 600 - 63F + 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d, + 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865, + 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d, + 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895, + 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad, + 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5, + 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd, + 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5, + // Entry 640 - 67F + 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d, + 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925, + 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d, + 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955, + 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d, + 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985, + 0x1988, 0x198b, +} // Size: 3324 bytes var xorData string = "" + // Size: 4862 bytes "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + @@ -547,7 +690,7 @@ return 0 } -// idnaTrie. Total size: 30288 bytes (29.58 KiB). Checksum: c0cd84404a2f6f19. +// idnaTrie. Total size: 30196 bytes (29.49 KiB). Checksum: e2ae95a945f04016. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { @@ -600,11 +743,11 @@ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018, + 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018, + 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018, // Block 0x4, offset 0x100 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, @@ -614,12 +757,12 @@ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079, // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008, 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, @@ -628,7 +771,7 @@ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089, // Block 0x6, offset 0x180 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, @@ -642,8 +785,8 @@ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091, + 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, @@ -663,22 +806,22 @@ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008, // Block 0x9, offset 0x240 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109, 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d, 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, @@ -687,10 +830,10 @@ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d, // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a, 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, @@ -782,8 +925,8 @@ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139, + 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, // Block 0x13, offset 0x4c0 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, @@ -826,8 +969,8 @@ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, - 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, - 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x598: 0x0159, 0x599: 0x0161, 0x59a: 0x0169, 0x59b: 0x0171, 0x59c: 0x0179, 0x59d: 0x0181, + 0x59e: 0x0189, 0x59f: 0x0191, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, @@ -850,8 +993,8 @@ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, - 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, - 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0199, 0x61d: 0x01a1, + 0x61e: 0x0040, 0x61f: 0x01a9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, @@ -866,16 +1009,16 @@ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x01b1, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x01b9, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, // Block 0x1a, offset 0x680 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, - 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x698: 0x0040, 0x699: 0x01c1, 0x69a: 0x01c9, 0x69b: 0x01d1, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x01d9, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, @@ -922,7 +1065,7 @@ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x3308, 0x796: 0x3308, 0x797: 0x3008, - 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x01e1, 0x79d: 0x01e9, 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, @@ -998,32 +1141,32 @@ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, - 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x01f9, 0x934: 0x3308, 0x935: 0x3308, 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0211, 0x944: 0x0008, 0x945: 0x0008, 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0219, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0221, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0229, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0231, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0239, 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, - 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, - 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0241, 0x974: 0x3308, 0x975: 0x0249, + 0x976: 0x0251, 0x977: 0x0259, 0x978: 0x0261, 0x979: 0x0269, 0x97a: 0x3308, 0x97b: 0x3308, 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, // Block 0x26, offset 0x980 - 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x980: 0x3308, 0x981: 0x0271, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, - 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, - 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, - 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, - 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, - 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x992: 0x3308, 0x993: 0x0279, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0281, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0289, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0291, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0299, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, - 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x02a1, 0x9ba: 0x3308, 0x9bb: 0x3308, 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, @@ -1033,34 +1176,34 @@ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, - 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, - 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, - 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, - 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0019, 0x9ed: 0x02e1, 0x9ee: 0x02e9, 0x9ef: 0x0008, + 0x9f0: 0x02f1, 0x9f1: 0x02f9, 0x9f2: 0x0301, 0x9f3: 0x0309, 0x9f4: 0x00a9, 0x9f5: 0x0311, + 0x9f6: 0x00b1, 0x9f7: 0x0319, 0x9f8: 0x0101, 0x9f9: 0x0321, 0x9fa: 0x0329, 0x9fb: 0x0008, + 0x9fc: 0x0051, 0x9fd: 0x0331, 0x9fe: 0x0339, 0x9ff: 0x00b9, // Block 0x28, offset 0xa00 - 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, - 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, - 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, - 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, - 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, - 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, - 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, - 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa00: 0x0341, 0xa01: 0x0349, 0xa02: 0x00c1, 0xa03: 0x0019, 0xa04: 0x0351, 0xa05: 0x0359, + 0xa06: 0x05b5, 0xa07: 0x02e9, 0xa08: 0x02f1, 0xa09: 0x02f9, 0xa0a: 0x0361, 0xa0b: 0x0369, + 0xa0c: 0x0371, 0xa0d: 0x0309, 0xa0e: 0x0008, 0xa0f: 0x0319, 0xa10: 0x0321, 0xa11: 0x0379, + 0xa12: 0x0051, 0xa13: 0x0381, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0339, 0xa17: 0x0341, + 0xa18: 0x0349, 0xa19: 0x05b5, 0xa1a: 0x0389, 0xa1b: 0x0391, 0xa1c: 0x05e5, 0xa1d: 0x0399, + 0xa1e: 0x03a1, 0xa1f: 0x03a9, 0xa20: 0x03b1, 0xa21: 0x03b9, 0xa22: 0x0311, 0xa23: 0x00b9, + 0xa24: 0x0349, 0xa25: 0x0391, 0xa26: 0x0399, 0xa27: 0x03a1, 0xa28: 0x03c1, 0xa29: 0x03b1, + 0xa2a: 0x03b9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, - 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x03c9, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, - 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, - 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, - 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, - 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, - 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, - 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x03d1, 0xa5c: 0x03d9, 0xa5d: 0x03e1, + 0xa5e: 0x03e9, 0xa5f: 0x0371, 0xa60: 0x03f1, 0xa61: 0x03f9, 0xa62: 0x0401, 0xa63: 0x0409, + 0xa64: 0x0411, 0xa65: 0x0419, 0xa66: 0x0421, 0xa67: 0x05fd, 0xa68: 0x0429, 0xa69: 0x0431, + 0xa6a: 0xe17d, 0xa6b: 0x0439, 0xa6c: 0x0441, 0xa6d: 0x0449, 0xa6e: 0x0451, 0xa6f: 0x0459, + 0xa70: 0x0461, 0xa71: 0x0469, 0xa72: 0x0471, 0xa73: 0x0479, 0xa74: 0x0481, 0xa75: 0x0489, + 0xa76: 0x0491, 0xa77: 0x0499, 0xa78: 0x0615, 0xa79: 0x04a1, 0xa7a: 0x04a9, 0xa7b: 0x04b1, + 0xa7c: 0x04b9, 0xa7d: 0x04c1, 0xa7e: 0x04c9, 0xa7f: 0x04d1, // Block 0x2a, offset 0xa80 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, @@ -1079,7 +1222,7 @@ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, - 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xade: 0x04d9, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, @@ -1094,33 +1237,33 @@ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, - 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, - 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, - 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + 0xb30: 0x0008, 0xb31: 0x04e1, 0xb32: 0x0008, 0xb33: 0x04e9, 0xb34: 0x0008, 0xb35: 0x04f1, + 0xb36: 0x0008, 0xb37: 0x04f9, 0xb38: 0x0008, 0xb39: 0x0501, 0xb3a: 0x0008, 0xb3b: 0x0509, + 0xb3c: 0x0008, 0xb3d: 0x0511, 0xb3e: 0x0040, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 - 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, - 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, - 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, - 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, - 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, - 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, - 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, - 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, - 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, - 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, + 0xb40: 0x0519, 0xb41: 0x0521, 0xb42: 0x0529, 0xb43: 0x0531, 0xb44: 0x0539, 0xb45: 0x0541, + 0xb46: 0x0549, 0xb47: 0x0551, 0xb48: 0x0519, 0xb49: 0x0521, 0xb4a: 0x0529, 0xb4b: 0x0531, + 0xb4c: 0x0539, 0xb4d: 0x0541, 0xb4e: 0x0549, 0xb4f: 0x0551, 0xb50: 0x0559, 0xb51: 0x0561, + 0xb52: 0x0569, 0xb53: 0x0571, 0xb54: 0x0579, 0xb55: 0x0581, 0xb56: 0x0589, 0xb57: 0x0591, + 0xb58: 0x0559, 0xb59: 0x0561, 0xb5a: 0x0569, 0xb5b: 0x0571, 0xb5c: 0x0579, 0xb5d: 0x0581, + 0xb5e: 0x0589, 0xb5f: 0x0591, 0xb60: 0x0599, 0xb61: 0x05a1, 0xb62: 0x05a9, 0xb63: 0x05b1, + 0xb64: 0x05b9, 0xb65: 0x05c1, 0xb66: 0x05c9, 0xb67: 0x05d1, 0xb68: 0x0599, 0xb69: 0x05a1, + 0xb6a: 0x05a9, 0xb6b: 0x05b1, 0xb6c: 0x05b9, 0xb6d: 0x05c1, 0xb6e: 0x05c9, 0xb6f: 0x05d1, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x05d9, 0xb73: 0x05e1, 0xb74: 0x05e9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x05f1, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x04e1, + 0xb7c: 0x05e1, 0xb7d: 0x067e, 0xb7e: 0x05f9, 0xb7f: 0x069e, // Block 0x2e, offset 0xb80 - 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, - 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, - 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, - 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, - 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, - 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xb80: 0x06be, 0xb81: 0x0602, 0xb82: 0x0609, 0xb83: 0x0611, 0xb84: 0x0619, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x0621, 0xb88: 0x06dd, 0xb89: 0x04e9, 0xb8a: 0x06f5, 0xb8b: 0x04f1, + 0xb8c: 0x0611, 0xb8d: 0x062a, 0xb8e: 0x0632, 0xb8f: 0x063a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x0641, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x04f9, 0xb9c: 0x0040, 0xb9d: 0x064a, + 0xb9e: 0x0652, 0xb9f: 0x065a, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x0661, 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, - 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, - 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, - 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, - 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, + 0xbaa: 0x0725, 0xbab: 0x0509, 0xbac: 0xe04d, 0xbad: 0x066a, 0xbae: 0x012a, 0xbaf: 0x0672, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x0679, 0xbb3: 0x0681, 0xbb4: 0x0689, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x0691, 0xbb8: 0x073d, 0xbb9: 0x0501, 0xbba: 0x0515, 0xbbb: 0x0511, + 0xbbc: 0x0681, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, // Block 0x2f, offset 0xbc0 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, @@ -1130,72 +1273,72 @@ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, - 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0699, 0xbf4: 0x06a1, 0xbf5: 0x0018, + 0xbf6: 0x06a9, 0xbf7: 0x06b1, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x06ba, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, // Block 0x30, offset 0xc00 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, - 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc06: 0x0018, 0xc07: 0x06c2, 0xc08: 0x06ca, 0xc09: 0x06d2, 0xc0a: 0x0018, 0xc0b: 0x0018, 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, - 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x06d9, 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, - 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, - 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, - 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + 0xc30: 0x06e1, 0xc31: 0x0311, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x06e9, 0xc35: 0x06f1, + 0xc36: 0x06f9, 0xc37: 0x0701, 0xc38: 0x0709, 0xc39: 0x0711, 0xc3a: 0x071a, 0xc3b: 0x07d5, + 0xc3c: 0x0722, 0xc3d: 0x072a, 0xc3e: 0x0732, 0xc3f: 0x0329, // Block 0x31, offset 0xc40 - 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, - 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, - 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, - 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, - 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc40: 0x06e1, 0xc41: 0x0049, 0xc42: 0x0029, 0xc43: 0x0031, 0xc44: 0x06e9, 0xc45: 0x06f1, + 0xc46: 0x06f9, 0xc47: 0x0701, 0xc48: 0x0709, 0xc49: 0x0711, 0xc4a: 0x071a, 0xc4b: 0x07ed, + 0xc4c: 0x0722, 0xc4d: 0x072a, 0xc4e: 0x0732, 0xc4f: 0x0040, 0xc50: 0x0019, 0xc51: 0x02f9, + 0xc52: 0x0051, 0xc53: 0x0109, 0xc54: 0x0361, 0xc55: 0x00a9, 0xc56: 0x0319, 0xc57: 0x0101, + 0xc58: 0x0321, 0xc59: 0x0329, 0xc5a: 0x0339, 0xc5b: 0x0089, 0xc5c: 0x0341, 0xc5d: 0x0040, 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, - 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x0739, 0xc69: 0x0018, 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, // Block 0x32, offset 0xc80 - 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, - 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, - 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, - 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, - 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, - 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, - 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, - 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, - 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, - 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, - 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x03d9, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, + 0xc86: 0x0886, 0xc87: 0x0369, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0309, 0xc8b: 0x00a9, + 0xc8c: 0x00a9, 0xc8d: 0x00a9, 0xc8e: 0x00a9, 0xc8f: 0x0741, 0xc90: 0x0311, 0xc91: 0x0311, + 0xc92: 0x0101, 0xc93: 0x0101, 0xc94: 0x0018, 0xc95: 0x0329, 0xc96: 0x0749, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0339, 0xc9a: 0x0751, 0xc9b: 0x00b9, 0xc9c: 0x00b9, 0xc9d: 0x00b9, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x0759, 0xca1: 0x08c5, 0xca2: 0x0761, 0xca3: 0x0018, + 0xca4: 0x04b1, 0xca5: 0x0018, 0xca6: 0x0769, 0xca7: 0x0018, 0xca8: 0x04b1, 0xca9: 0x0018, + 0xcaa: 0x0319, 0xcab: 0x0771, 0xcac: 0x02e9, 0xcad: 0x03d9, 0xcae: 0x0018, 0xcaf: 0x02f9, + 0xcb0: 0x02f9, 0xcb1: 0x03f1, 0xcb2: 0x0040, 0xcb3: 0x0321, 0xcb4: 0x0051, 0xcb5: 0x0779, + 0xcb6: 0x0781, 0xcb7: 0x0789, 0xcb8: 0x0791, 0xcb9: 0x0311, 0xcba: 0x0018, 0xcbb: 0x08e5, + 0xcbc: 0x0799, 0xcbd: 0x03a1, 0xcbe: 0x03a1, 0xcbf: 0x0799, // Block 0x33, offset 0xcc0 - 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, - 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, - 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, - 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, - 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, - 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, - 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, - 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, - 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, - 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, - 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x02f1, + 0xcc6: 0x02f1, 0xcc7: 0x02f9, 0xcc8: 0x0311, 0xcc9: 0x00b1, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x07a1, 0xcd1: 0x07a9, + 0xcd2: 0x07b1, 0xcd3: 0x07b9, 0xcd4: 0x07c1, 0xcd5: 0x07c9, 0xcd6: 0x07d1, 0xcd7: 0x07d9, + 0xcd8: 0x07e1, 0xcd9: 0x07e9, 0xcda: 0x07f1, 0xcdb: 0x07f9, 0xcdc: 0x0801, 0xcdd: 0x0809, + 0xcde: 0x0811, 0xcdf: 0x0819, 0xce0: 0x0311, 0xce1: 0x0821, 0xce2: 0x091d, 0xce3: 0x0829, + 0xce4: 0x0391, 0xce5: 0x0831, 0xce6: 0x093d, 0xce7: 0x0839, 0xce8: 0x0841, 0xce9: 0x0109, + 0xcea: 0x0849, 0xceb: 0x095d, 0xcec: 0x0101, 0xced: 0x03d9, 0xcee: 0x02f1, 0xcef: 0x0321, + 0xcf0: 0x0311, 0xcf1: 0x0821, 0xcf2: 0x097d, 0xcf3: 0x0829, 0xcf4: 0x0391, 0xcf5: 0x0831, + 0xcf6: 0x099d, 0xcf7: 0x0839, 0xcf8: 0x0841, 0xcf9: 0x0109, 0xcfa: 0x0849, 0xcfb: 0x09bd, + 0xcfc: 0x0101, 0xcfd: 0x03d9, 0xcfe: 0x02f1, 0xcff: 0x0321, // Block 0x34, offset 0xd00 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, - 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, - 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, - 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, - 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x0049, 0xd21: 0x0029, 0xd22: 0x0031, 0xd23: 0x06e9, + 0xd24: 0x06f1, 0xd25: 0x06f9, 0xd26: 0x0701, 0xd27: 0x0709, 0xd28: 0x0711, 0xd29: 0x0879, + 0xd2a: 0x0881, 0xd2b: 0x0889, 0xd2c: 0x0891, 0xd2d: 0x0899, 0xd2e: 0x08a1, 0xd2f: 0x08a9, + 0xd30: 0x08b1, 0xd31: 0x08b9, 0xd32: 0x08c1, 0xd33: 0x08c9, 0xd34: 0x0a1e, 0xd35: 0x0a3e, 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, - 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + 0xd3c: 0x0b1e, 0xd3d: 0x08d2, 0xd3e: 0x08da, 0xd3f: 0x08e2, // Block 0x35, offset 0xd40 - 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, - 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd40: 0x08ea, 0xd41: 0x08f2, 0xd42: 0x08fa, 0xd43: 0x0902, 0xd44: 0x090a, 0xd45: 0x0912, + 0xd46: 0x091a, 0xd47: 0x0922, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, @@ -1203,17 +1346,17 @@ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, - 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, - 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + 0xd76: 0x0019, 0xd77: 0x02e9, 0xd78: 0x03d9, 0xd79: 0x02f1, 0xd7a: 0x02f9, 0xd7b: 0x03f1, + 0xd7c: 0x0309, 0xd7d: 0x00a9, 0xd7e: 0x0311, 0xd7f: 0x00b1, // Block 0x36, offset 0xd80 - 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, - 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, - 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, - 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, - 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, - 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, - 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, - 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xd80: 0x0319, 0xd81: 0x0101, 0xd82: 0x0321, 0xd83: 0x0329, 0xd84: 0x0051, 0xd85: 0x0339, + 0xd86: 0x0751, 0xd87: 0x00b9, 0xd88: 0x0089, 0xd89: 0x0341, 0xd8a: 0x0349, 0xd8b: 0x0391, + 0xd8c: 0x00c1, 0xd8d: 0x0109, 0xd8e: 0x00c9, 0xd8f: 0x04b1, 0xd90: 0x0019, 0xd91: 0x02e9, + 0xd92: 0x03d9, 0xd93: 0x02f1, 0xd94: 0x02f9, 0xd95: 0x03f1, 0xd96: 0x0309, 0xd97: 0x00a9, + 0xd98: 0x0311, 0xd99: 0x00b1, 0xd9a: 0x0319, 0xd9b: 0x0101, 0xd9c: 0x0321, 0xd9d: 0x0329, + 0xd9e: 0x0051, 0xd9f: 0x0339, 0xda0: 0x0751, 0xda1: 0x00b9, 0xda2: 0x0089, 0xda3: 0x0341, + 0xda4: 0x0349, 0xda5: 0x0391, 0xda6: 0x00c1, 0xda7: 0x0109, 0xda8: 0x00c9, 0xda9: 0x04b1, + 0xdaa: 0x06e1, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, @@ -1223,12 +1366,12 @@ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, - 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, - 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, - 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, - 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x0941, 0xde3: 0x0ed5, + 0xde4: 0x0949, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0359, 0xdee: 0x0441, 0xdef: 0x0351, + 0xdf0: 0x03d1, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, - 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + 0xdfc: 0x00b1, 0xdfd: 0x0391, 0xdfe: 0x0951, 0xdff: 0x0959, // Block 0x38, offset 0xe00 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, @@ -1254,7 +1397,7 @@ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, // Block 0x3a, offset 0xe80 - 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x0961, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, @@ -1290,17 +1433,17 @@ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0008, 0xf3c: 0x0008, 0xf3d: 0x0008, 0xf3e: 0x0008, 0xf3f: 0x0008, // Block 0x3d, offset 0xf40 - 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, + 0xf40: 0x0b82, 0xf41: 0x0b8a, 0xf42: 0x0b92, 0xf43: 0x0b9a, 0xf44: 0x32d5, 0xf45: 0x32f5, 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, - 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, - 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, - 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, - 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x0ba1, + 0xf52: 0x0ba9, 0xf53: 0x0bb1, 0xf54: 0x0bb9, 0xf55: 0x0bc1, 0xf56: 0x0bc9, 0xf57: 0x0bd1, + 0xf58: 0x0bd9, 0xf59: 0x0be1, 0xf5a: 0x0be9, 0xf5b: 0x0bf1, 0xf5c: 0x0bf9, 0xf5d: 0x0c01, + 0xf5e: 0x0c09, 0xf5f: 0x0c11, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, - 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, + 0xf7c: 0x0c19, 0xf7d: 0x0c21, 0xf7e: 0x36d5, 0xf7f: 0x0018, // Block 0x3e, offset 0xf80 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, @@ -1310,13 +1453,13 @@ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, - 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, - 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, - 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + 0xfb0: 0x3cb5, 0xfb1: 0x0c29, 0xfb2: 0x0c31, 0xfb3: 0x0c39, 0xfb4: 0x0c41, 0xfb5: 0x0c49, + 0xfb6: 0x0c51, 0xfb7: 0x0c59, 0xfb8: 0x0c61, 0xfb9: 0x0c69, 0xfba: 0x0c71, 0xfbb: 0x0c79, + 0xfbc: 0x0c81, 0xfbd: 0x0c89, 0xfbe: 0x0c91, 0xfbf: 0x0c99, // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, - 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, - 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, + 0xfc0: 0x0ca1, 0xfc1: 0x0ca9, 0xfc2: 0x0cb1, 0xfc3: 0x0cb9, 0xfc4: 0x0cc1, 0xfc5: 0x0cc9, + 0xfc6: 0x0cd1, 0xfc7: 0x0cd9, 0xfc8: 0x0ce1, 0xfc9: 0x0ce9, 0xfca: 0x0cf1, 0xfcb: 0x0cf9, + 0xfcc: 0x0d01, 0xfcd: 0x3cd5, 0xfce: 0x0d09, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, @@ -1324,769 +1467,769 @@ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, - 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x3cc9, + 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0d11, // Block 0x40, offset 0x1000 - 0x1000: 0x3d01, 0x1001: 0x3d69, 0x1002: 0x3dd1, 0x1003: 0x3e39, 0x1004: 0x3e89, 0x1005: 0x3ef1, - 0x1006: 0x3f41, 0x1007: 0x3f91, 0x1008: 0x4011, 0x1009: 0x4079, 0x100a: 0x40c9, 0x100b: 0x4119, - 0x100c: 0x4169, 0x100d: 0x41d1, 0x100e: 0x4239, 0x100f: 0x4289, 0x1010: 0x42d9, 0x1011: 0x4311, - 0x1012: 0x4361, 0x1013: 0x43c9, 0x1014: 0x4431, 0x1015: 0x4469, 0x1016: 0x44e9, 0x1017: 0x4581, - 0x1018: 0x4601, 0x1019: 0x4651, 0x101a: 0x46d1, 0x101b: 0x4751, 0x101c: 0x47b9, 0x101d: 0x4809, - 0x101e: 0x4859, 0x101f: 0x48a9, 0x1020: 0x4911, 0x1021: 0x4991, 0x1022: 0x49f9, 0x1023: 0x4a49, - 0x1024: 0x4a99, 0x1025: 0x4ae9, 0x1026: 0x4b21, 0x1027: 0x4b59, 0x1028: 0x4b91, 0x1029: 0x4bc9, - 0x102a: 0x4c19, 0x102b: 0x4c69, 0x102c: 0x4ce9, 0x102d: 0x4d39, 0x102e: 0x4da1, 0x102f: 0x4e21, - 0x1030: 0x4e71, 0x1031: 0x4ea9, 0x1032: 0x4ee1, 0x1033: 0x4f61, 0x1034: 0x4fc9, 0x1035: 0x5049, - 0x1036: 0x5099, 0x1037: 0x5119, 0x1038: 0x5151, 0x1039: 0x51a1, 0x103a: 0x51f1, 0x103b: 0x5241, - 0x103c: 0x5291, 0x103d: 0x52e1, 0x103e: 0x5349, 0x103f: 0x5399, + 0x1000: 0x10f9, 0x1001: 0x1101, 0x1002: 0x40a5, 0x1003: 0x1109, 0x1004: 0x1111, 0x1005: 0x1119, + 0x1006: 0x1121, 0x1007: 0x1129, 0x1008: 0x40c5, 0x1009: 0x1131, 0x100a: 0x1139, 0x100b: 0x1141, + 0x100c: 0x40e5, 0x100d: 0x40e5, 0x100e: 0x1149, 0x100f: 0x1151, 0x1010: 0x1159, 0x1011: 0x4105, + 0x1012: 0x4125, 0x1013: 0x4145, 0x1014: 0x4165, 0x1015: 0x4185, 0x1016: 0x1161, 0x1017: 0x1169, + 0x1018: 0x1171, 0x1019: 0x1179, 0x101a: 0x1181, 0x101b: 0x41a5, 0x101c: 0x1189, 0x101d: 0x1191, + 0x101e: 0x1199, 0x101f: 0x41c5, 0x1020: 0x41e5, 0x1021: 0x11a1, 0x1022: 0x4205, 0x1023: 0x4225, + 0x1024: 0x4245, 0x1025: 0x11a9, 0x1026: 0x4265, 0x1027: 0x11b1, 0x1028: 0x11b9, 0x1029: 0x10f9, + 0x102a: 0x4285, 0x102b: 0x42a5, 0x102c: 0x42c5, 0x102d: 0x42e5, 0x102e: 0x11c1, 0x102f: 0x11c9, + 0x1030: 0x11d1, 0x1031: 0x11d9, 0x1032: 0x4305, 0x1033: 0x11e1, 0x1034: 0x11e9, 0x1035: 0x11f1, + 0x1036: 0x4325, 0x1037: 0x11f9, 0x1038: 0x1201, 0x1039: 0x11f9, 0x103a: 0x1209, 0x103b: 0x1211, + 0x103c: 0x4345, 0x103d: 0x1219, 0x103e: 0x1221, 0x103f: 0x1219, // Block 0x41, offset 0x1040 - 0x1040: 0x53d1, 0x1041: 0x5421, 0x1042: 0x5471, 0x1043: 0x54c1, 0x1044: 0x5529, 0x1045: 0x5579, - 0x1046: 0x55c9, 0x1047: 0x5619, 0x1048: 0x5699, 0x1049: 0x5701, 0x104a: 0x5739, 0x104b: 0x57b9, - 0x104c: 0x57f1, 0x104d: 0x5859, 0x104e: 0x58c1, 0x104f: 0x5911, 0x1050: 0x5961, 0x1051: 0x59b1, - 0x1052: 0x5a19, 0x1053: 0x5a51, 0x1054: 0x5aa1, 0x1055: 0x5b09, 0x1056: 0x5b41, 0x1057: 0x5bc1, - 0x1058: 0x5c11, 0x1059: 0x5c39, 0x105a: 0x5c61, 0x105b: 0x5c89, 0x105c: 0x5cb1, 0x105d: 0x5cd9, - 0x105e: 0x5d01, 0x105f: 0x5d29, 0x1060: 0x5d51, 0x1061: 0x5d79, 0x1062: 0x5da1, 0x1063: 0x5dd1, - 0x1064: 0x5e01, 0x1065: 0x5e31, 0x1066: 0x5e61, 0x1067: 0x5e91, 0x1068: 0x5ec1, 0x1069: 0x5ef1, - 0x106a: 0x5f21, 0x106b: 0x5f51, 0x106c: 0x5f81, 0x106d: 0x5fb1, 0x106e: 0x5fe1, 0x106f: 0x6011, - 0x1070: 0x6041, 0x1071: 0x4045, 0x1072: 0x6071, 0x1073: 0x6089, 0x1074: 0x4065, 0x1075: 0x60a1, - 0x1076: 0x60b9, 0x1077: 0x60d1, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60e9, 0x107b: 0x6101, - 0x107c: 0x6139, 0x107d: 0x6171, 0x107e: 0x61a9, 0x107f: 0x61e1, + 0x1040: 0x4365, 0x1041: 0x4385, 0x1042: 0x0040, 0x1043: 0x1229, 0x1044: 0x1231, 0x1045: 0x1239, + 0x1046: 0x1241, 0x1047: 0x0040, 0x1048: 0x1249, 0x1049: 0x1251, 0x104a: 0x1259, 0x104b: 0x1261, + 0x104c: 0x1269, 0x104d: 0x1271, 0x104e: 0x1199, 0x104f: 0x1279, 0x1050: 0x1281, 0x1051: 0x1289, + 0x1052: 0x43a5, 0x1053: 0x1291, 0x1054: 0x1121, 0x1055: 0x43c5, 0x1056: 0x43e5, 0x1057: 0x1299, + 0x1058: 0x0040, 0x1059: 0x4405, 0x105a: 0x12a1, 0x105b: 0x12a9, 0x105c: 0x12b1, 0x105d: 0x12b9, + 0x105e: 0x12c1, 0x105f: 0x12c9, 0x1060: 0x12d1, 0x1061: 0x12d9, 0x1062: 0x12e1, 0x1063: 0x12e9, + 0x1064: 0x12f1, 0x1065: 0x12f9, 0x1066: 0x1301, 0x1067: 0x1309, 0x1068: 0x1311, 0x1069: 0x1319, + 0x106a: 0x1321, 0x106b: 0x1329, 0x106c: 0x1331, 0x106d: 0x1339, 0x106e: 0x1341, 0x106f: 0x1349, + 0x1070: 0x1351, 0x1071: 0x1359, 0x1072: 0x1361, 0x1073: 0x1369, 0x1074: 0x1371, 0x1075: 0x1379, + 0x1076: 0x1381, 0x1077: 0x1389, 0x1078: 0x1391, 0x1079: 0x1399, 0x107a: 0x13a1, 0x107b: 0x13a9, + 0x107c: 0x13b1, 0x107d: 0x13b9, 0x107e: 0x13c1, 0x107f: 0x4425, // Block 0x42, offset 0x1080 - 0x1080: 0x6249, 0x1081: 0x6261, 0x1082: 0x40a5, 0x1083: 0x6279, 0x1084: 0x6291, 0x1085: 0x62a9, - 0x1086: 0x62c1, 0x1087: 0x62d9, 0x1088: 0x40c5, 0x1089: 0x62f1, 0x108a: 0x6319, 0x108b: 0x6331, - 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6349, 0x108f: 0x6361, 0x1090: 0x6379, 0x1091: 0x4105, - 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6391, 0x1097: 0x63a9, - 0x1098: 0x63c1, 0x1099: 0x63d9, 0x109a: 0x63f1, 0x109b: 0x41a5, 0x109c: 0x6409, 0x109d: 0x6421, - 0x109e: 0x6439, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6451, 0x10a2: 0x4205, 0x10a3: 0x4225, - 0x10a4: 0x4245, 0x10a5: 0x6469, 0x10a6: 0x4265, 0x10a7: 0x6481, 0x10a8: 0x64b1, 0x10a9: 0x6249, - 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64e9, 0x10af: 0x6529, - 0x10b0: 0x6571, 0x10b1: 0x6589, 0x10b2: 0x4305, 0x10b3: 0x65a1, 0x10b4: 0x65b9, 0x10b5: 0x65d1, - 0x10b6: 0x4325, 0x10b7: 0x65e9, 0x10b8: 0x6601, 0x10b9: 0x65e9, 0x10ba: 0x6619, 0x10bb: 0x6631, - 0x10bc: 0x4345, 0x10bd: 0x6649, 0x10be: 0x6661, 0x10bf: 0x6649, + 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, + 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, + 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, + 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, + 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, + 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, + 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, + 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x3308, + 0x10b0: 0x3318, 0x10b1: 0x3318, 0x10b2: 0x3318, 0x10b3: 0x0018, 0x10b4: 0x3308, 0x10b5: 0x3308, + 0x10b6: 0x3308, 0x10b7: 0x3308, 0x10b8: 0x3308, 0x10b9: 0x3308, 0x10ba: 0x3308, 0x10bb: 0x3308, + 0x10bc: 0x3308, 0x10bd: 0x3308, 0x10be: 0x0018, 0x10bf: 0x0008, // Block 0x43, offset 0x10c0 - 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6679, 0x10c4: 0x6691, 0x10c5: 0x66a9, - 0x10c6: 0x66c1, 0x10c7: 0x0040, 0x10c8: 0x66f9, 0x10c9: 0x6711, 0x10ca: 0x6729, 0x10cb: 0x6741, - 0x10cc: 0x6759, 0x10cd: 0x6771, 0x10ce: 0x6439, 0x10cf: 0x6789, 0x10d0: 0x67a1, 0x10d1: 0x67b9, - 0x10d2: 0x43a5, 0x10d3: 0x67d1, 0x10d4: 0x62c1, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67e9, - 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x6801, 0x10db: 0x6819, 0x10dc: 0x6831, 0x10dd: 0x6849, - 0x10de: 0x6861, 0x10df: 0x6891, 0x10e0: 0x68c1, 0x10e1: 0x68e9, 0x10e2: 0x6911, 0x10e3: 0x6939, - 0x10e4: 0x6961, 0x10e5: 0x6989, 0x10e6: 0x69b1, 0x10e7: 0x69d9, 0x10e8: 0x6a01, 0x10e9: 0x6a29, - 0x10ea: 0x6a59, 0x10eb: 0x6a89, 0x10ec: 0x6ab9, 0x10ed: 0x6ae9, 0x10ee: 0x6b19, 0x10ef: 0x6b49, - 0x10f0: 0x6b79, 0x10f1: 0x6ba9, 0x10f2: 0x6bd9, 0x10f3: 0x6c09, 0x10f4: 0x6c39, 0x10f5: 0x6c69, - 0x10f6: 0x6c99, 0x10f7: 0x6cc9, 0x10f8: 0x6cf9, 0x10f9: 0x6d29, 0x10fa: 0x6d59, 0x10fb: 0x6d89, - 0x10fc: 0x6db9, 0x10fd: 0x6de9, 0x10fe: 0x6e19, 0x10ff: 0x4425, + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x02d1, 0x10dd: 0x13c9, + 0x10de: 0x3308, 0x10df: 0x3308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, + 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, + 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, + 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, + 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, + 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, - 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, + 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, + 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, + 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, + 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, + 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, - 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, - 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, - 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, + 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, + 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e49, - 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, - 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, - 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, - 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, + 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, + 0x117c: 0x0008, 0x117d: 0x4445, 0x117e: 0xe00d, 0x117f: 0x0008, // Block 0x46, offset 0x1180 - 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, - 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, - 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, - 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, - 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, - 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, + 0x118c: 0x0008, 0x118d: 0x0409, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11aa: 0x13d1, 0x11ab: 0x0371, 0x11ac: 0x0401, 0x11ad: 0x13d9, 0x11ae: 0x0421, 0x11af: 0x0008, + 0x11b0: 0x13e1, 0x11b1: 0x13e9, 0x11b2: 0x0429, 0x11b3: 0x4465, 0x11b4: 0xe00d, 0x11b5: 0x0008, 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, - 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, - 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, - 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, - 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, + 0x11c0: 0x650d, 0x11c1: 0x652d, 0x11c2: 0x654d, 0x11c3: 0x656d, 0x11c4: 0x658d, 0x11c5: 0x65ad, + 0x11c6: 0x65cd, 0x11c7: 0x65ed, 0x11c8: 0x660d, 0x11c9: 0x662d, 0x11ca: 0x664d, 0x11cb: 0x666d, + 0x11cc: 0x668d, 0x11cd: 0x66ad, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x66cd, 0x11d1: 0x0008, + 0x11d2: 0x66ed, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x670d, 0x11d6: 0x672d, 0x11d7: 0x674d, + 0x11d8: 0x676d, 0x11d9: 0x678d, 0x11da: 0x67ad, 0x11db: 0x67cd, 0x11dc: 0x67ed, 0x11dd: 0x680d, + 0x11de: 0x682d, 0x11df: 0x0008, 0x11e0: 0x684d, 0x11e1: 0x0008, 0x11e2: 0x686d, 0x11e3: 0x0008, + 0x11e4: 0x0008, 0x11e5: 0x688d, 0x11e6: 0x68ad, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, + 0x11ea: 0x68cd, 0x11eb: 0x68ed, 0x11ec: 0x690d, 0x11ed: 0x692d, 0x11ee: 0x694d, 0x11ef: 0x696d, + 0x11f0: 0x698d, 0x11f1: 0x69ad, 0x11f2: 0x69cd, 0x11f3: 0x69ed, 0x11f4: 0x6a0d, 0x11f5: 0x6a2d, + 0x11f6: 0x6a4d, 0x11f7: 0x6a6d, 0x11f8: 0x6a8d, 0x11f9: 0x6aad, 0x11fa: 0x6acd, 0x11fb: 0x6aed, + 0x11fc: 0x6b0d, 0x11fd: 0x6b2d, 0x11fe: 0x6b4d, 0x11ff: 0x6b6d, // Block 0x48, offset 0x1200 - 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, - 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, - 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, - 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, - 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, - 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, - 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, - 0x122a: 0x6e61, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e79, 0x122e: 0x1221, 0x122f: 0x0008, - 0x1230: 0x6e91, 0x1231: 0x6ea9, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, - 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, - 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, + 0x1200: 0x7acd, 0x1201: 0x7aed, 0x1202: 0x7b0d, 0x1203: 0x7b2d, 0x1204: 0x7b4d, 0x1205: 0x7b6d, + 0x1206: 0x7b8d, 0x1207: 0x7bad, 0x1208: 0x7bcd, 0x1209: 0x7bed, 0x120a: 0x7c0d, 0x120b: 0x7c2d, + 0x120c: 0x7c4d, 0x120d: 0x7c6d, 0x120e: 0x7c8d, 0x120f: 0x1409, 0x1210: 0x1411, 0x1211: 0x1419, + 0x1212: 0x7cad, 0x1213: 0x7ccd, 0x1214: 0x7ced, 0x1215: 0x1421, 0x1216: 0x1429, 0x1217: 0x1431, + 0x1218: 0x7d0d, 0x1219: 0x7d2d, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, + 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 - 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, - 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, - 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, - 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, - 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, - 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, - 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, - 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, - 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, - 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, - 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + 0x1240: 0x1439, 0x1241: 0x1441, 0x1242: 0x1449, 0x1243: 0x7d4d, 0x1244: 0x7d6d, 0x1245: 0x1451, + 0x1246: 0x1451, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, + 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, + 0x1252: 0x0040, 0x1253: 0x1459, 0x1254: 0x1461, 0x1255: 0x1469, 0x1256: 0x1471, 0x1257: 0x1479, + 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x1481, + 0x125e: 0x3308, 0x125f: 0x1489, 0x1260: 0x1491, 0x1261: 0x0779, 0x1262: 0x0791, 0x1263: 0x1499, + 0x1264: 0x14a1, 0x1265: 0x14a9, 0x1266: 0x14b1, 0x1267: 0x14b9, 0x1268: 0x14c1, 0x1269: 0x071a, + 0x126a: 0x14c9, 0x126b: 0x14d1, 0x126c: 0x14d9, 0x126d: 0x14e1, 0x126e: 0x14e9, 0x126f: 0x14f1, + 0x1270: 0x14f9, 0x1271: 0x1501, 0x1272: 0x1509, 0x1273: 0x1511, 0x1274: 0x1519, 0x1275: 0x1521, + 0x1276: 0x1529, 0x1277: 0x0040, 0x1278: 0x1531, 0x1279: 0x1539, 0x127a: 0x1541, 0x127b: 0x1549, + 0x127c: 0x1551, 0x127d: 0x0040, 0x127e: 0x1559, 0x127f: 0x0040, // Block 0x4a, offset 0x1280 - 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, - 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, - 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6f19, 0x1290: 0x6f41, 0x1291: 0x6f69, - 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f91, 0x1296: 0x6fb9, 0x1297: 0x6fe1, - 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, - 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, - 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, - 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, - 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, - 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, - 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + 0x1280: 0x1561, 0x1281: 0x1569, 0x1282: 0x0040, 0x1283: 0x1571, 0x1284: 0x1579, 0x1285: 0x0040, + 0x1286: 0x1581, 0x1287: 0x1589, 0x1288: 0x1591, 0x1289: 0x1599, 0x128a: 0x15a1, 0x128b: 0x15a9, + 0x128c: 0x15b1, 0x128d: 0x15b9, 0x128e: 0x15c1, 0x128f: 0x15c9, 0x1290: 0x15d1, 0x1291: 0x15d1, + 0x1292: 0x15d9, 0x1293: 0x15d9, 0x1294: 0x15d9, 0x1295: 0x15d9, 0x1296: 0x15e1, 0x1297: 0x15e1, + 0x1298: 0x15e1, 0x1299: 0x15e1, 0x129a: 0x15e9, 0x129b: 0x15e9, 0x129c: 0x15e9, 0x129d: 0x15e9, + 0x129e: 0x15f1, 0x129f: 0x15f1, 0x12a0: 0x15f1, 0x12a1: 0x15f1, 0x12a2: 0x15f9, 0x12a3: 0x15f9, + 0x12a4: 0x15f9, 0x12a5: 0x15f9, 0x12a6: 0x1601, 0x12a7: 0x1601, 0x12a8: 0x1601, 0x12a9: 0x1601, + 0x12aa: 0x1609, 0x12ab: 0x1609, 0x12ac: 0x1609, 0x12ad: 0x1609, 0x12ae: 0x1611, 0x12af: 0x1611, + 0x12b0: 0x1611, 0x12b1: 0x1611, 0x12b2: 0x1619, 0x12b3: 0x1619, 0x12b4: 0x1619, 0x12b5: 0x1619, + 0x12b6: 0x1621, 0x12b7: 0x1621, 0x12b8: 0x1621, 0x12b9: 0x1621, 0x12ba: 0x1629, 0x12bb: 0x1629, + 0x12bc: 0x1629, 0x12bd: 0x1629, 0x12be: 0x1631, 0x12bf: 0x1631, // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7009, 0x12c1: 0x7021, 0x12c2: 0x7039, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7051, - 0x12c6: 0x7051, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, - 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, - 0x12d2: 0x0040, 0x12d3: 0x7069, 0x12d4: 0x7091, 0x12d5: 0x70b9, 0x12d6: 0x70e1, 0x12d7: 0x7109, - 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x7131, - 0x12de: 0x3308, 0x12df: 0x7159, 0x12e0: 0x7181, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7199, - 0x12e4: 0x71b1, 0x12e5: 0x71c9, 0x12e6: 0x71e1, 0x12e7: 0x71f9, 0x12e8: 0x7211, 0x12e9: 0x1fb2, - 0x12ea: 0x7229, 0x12eb: 0x7251, 0x12ec: 0x7279, 0x12ed: 0x72b1, 0x12ee: 0x72e9, 0x12ef: 0x7311, - 0x12f0: 0x7339, 0x12f1: 0x7361, 0x12f2: 0x7389, 0x12f3: 0x73b1, 0x12f4: 0x73d9, 0x12f5: 0x7401, - 0x12f6: 0x7429, 0x12f7: 0x0040, 0x12f8: 0x7451, 0x12f9: 0x7479, 0x12fa: 0x74a1, 0x12fb: 0x74c9, - 0x12fc: 0x74f1, 0x12fd: 0x0040, 0x12fe: 0x7519, 0x12ff: 0x0040, + 0x12c0: 0x1631, 0x12c1: 0x1631, 0x12c2: 0x1639, 0x12c3: 0x1639, 0x12c4: 0x1641, 0x12c5: 0x1641, + 0x12c6: 0x1649, 0x12c7: 0x1649, 0x12c8: 0x1651, 0x12c9: 0x1651, 0x12ca: 0x1659, 0x12cb: 0x1659, + 0x12cc: 0x1661, 0x12cd: 0x1661, 0x12ce: 0x1669, 0x12cf: 0x1669, 0x12d0: 0x1669, 0x12d1: 0x1669, + 0x12d2: 0x1671, 0x12d3: 0x1671, 0x12d4: 0x1671, 0x12d5: 0x1671, 0x12d6: 0x1679, 0x12d7: 0x1679, + 0x12d8: 0x1679, 0x12d9: 0x1679, 0x12da: 0x1681, 0x12db: 0x1681, 0x12dc: 0x1681, 0x12dd: 0x1681, + 0x12de: 0x1689, 0x12df: 0x1689, 0x12e0: 0x1691, 0x12e1: 0x1691, 0x12e2: 0x1691, 0x12e3: 0x1691, + 0x12e4: 0x1699, 0x12e5: 0x1699, 0x12e6: 0x16a1, 0x12e7: 0x16a1, 0x12e8: 0x16a1, 0x12e9: 0x16a1, + 0x12ea: 0x16a9, 0x12eb: 0x16a9, 0x12ec: 0x16a9, 0x12ed: 0x16a9, 0x12ee: 0x16b1, 0x12ef: 0x16b1, + 0x12f0: 0x16b9, 0x12f1: 0x16b9, 0x12f2: 0x0818, 0x12f3: 0x0818, 0x12f4: 0x0818, 0x12f5: 0x0818, + 0x12f6: 0x0818, 0x12f7: 0x0818, 0x12f8: 0x0818, 0x12f9: 0x0818, 0x12fa: 0x0818, 0x12fb: 0x0818, + 0x12fc: 0x0818, 0x12fd: 0x0818, 0x12fe: 0x0818, 0x12ff: 0x0818, // Block 0x4c, offset 0x1300 - 0x1300: 0x7541, 0x1301: 0x7569, 0x1302: 0x0040, 0x1303: 0x7591, 0x1304: 0x75b9, 0x1305: 0x0040, - 0x1306: 0x75e1, 0x1307: 0x7609, 0x1308: 0x7631, 0x1309: 0x7659, 0x130a: 0x7681, 0x130b: 0x76a9, - 0x130c: 0x76d1, 0x130d: 0x76f9, 0x130e: 0x7721, 0x130f: 0x7749, 0x1310: 0x7771, 0x1311: 0x7771, - 0x1312: 0x7789, 0x1313: 0x7789, 0x1314: 0x7789, 0x1315: 0x7789, 0x1316: 0x77a1, 0x1317: 0x77a1, - 0x1318: 0x77a1, 0x1319: 0x77a1, 0x131a: 0x77b9, 0x131b: 0x77b9, 0x131c: 0x77b9, 0x131d: 0x77b9, - 0x131e: 0x77d1, 0x131f: 0x77d1, 0x1320: 0x77d1, 0x1321: 0x77d1, 0x1322: 0x77e9, 0x1323: 0x77e9, - 0x1324: 0x77e9, 0x1325: 0x77e9, 0x1326: 0x7801, 0x1327: 0x7801, 0x1328: 0x7801, 0x1329: 0x7801, - 0x132a: 0x7819, 0x132b: 0x7819, 0x132c: 0x7819, 0x132d: 0x7819, 0x132e: 0x7831, 0x132f: 0x7831, - 0x1330: 0x7831, 0x1331: 0x7831, 0x1332: 0x7849, 0x1333: 0x7849, 0x1334: 0x7849, 0x1335: 0x7849, - 0x1336: 0x7861, 0x1337: 0x7861, 0x1338: 0x7861, 0x1339: 0x7861, 0x133a: 0x7879, 0x133b: 0x7879, - 0x133c: 0x7879, 0x133d: 0x7879, 0x133e: 0x7891, 0x133f: 0x7891, + 0x1300: 0x0818, 0x1301: 0x0818, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, + 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, + 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, + 0x1312: 0x0040, 0x1313: 0x16c1, 0x1314: 0x16c1, 0x1315: 0x16c1, 0x1316: 0x16c1, 0x1317: 0x16c9, + 0x1318: 0x16c9, 0x1319: 0x16d1, 0x131a: 0x16d1, 0x131b: 0x16d9, 0x131c: 0x16d9, 0x131d: 0x0149, + 0x131e: 0x16e1, 0x131f: 0x16e1, 0x1320: 0x16e9, 0x1321: 0x16e9, 0x1322: 0x16f1, 0x1323: 0x16f1, + 0x1324: 0x16f9, 0x1325: 0x16f9, 0x1326: 0x16f9, 0x1327: 0x16f9, 0x1328: 0x1701, 0x1329: 0x1701, + 0x132a: 0x1709, 0x132b: 0x1709, 0x132c: 0x1711, 0x132d: 0x1711, 0x132e: 0x1719, 0x132f: 0x1719, + 0x1330: 0x1721, 0x1331: 0x1721, 0x1332: 0x1729, 0x1333: 0x1729, 0x1334: 0x1731, 0x1335: 0x1731, + 0x1336: 0x1739, 0x1337: 0x1739, 0x1338: 0x1739, 0x1339: 0x1741, 0x133a: 0x1741, 0x133b: 0x1741, + 0x133c: 0x1749, 0x133d: 0x1749, 0x133e: 0x1749, 0x133f: 0x1749, // Block 0x4d, offset 0x1340 - 0x1340: 0x7891, 0x1341: 0x7891, 0x1342: 0x78a9, 0x1343: 0x78a9, 0x1344: 0x78c1, 0x1345: 0x78c1, - 0x1346: 0x78d9, 0x1347: 0x78d9, 0x1348: 0x78f1, 0x1349: 0x78f1, 0x134a: 0x7909, 0x134b: 0x7909, - 0x134c: 0x7921, 0x134d: 0x7921, 0x134e: 0x7939, 0x134f: 0x7939, 0x1350: 0x7939, 0x1351: 0x7939, - 0x1352: 0x7951, 0x1353: 0x7951, 0x1354: 0x7951, 0x1355: 0x7951, 0x1356: 0x7969, 0x1357: 0x7969, - 0x1358: 0x7969, 0x1359: 0x7969, 0x135a: 0x7981, 0x135b: 0x7981, 0x135c: 0x7981, 0x135d: 0x7981, - 0x135e: 0x7999, 0x135f: 0x7999, 0x1360: 0x79b1, 0x1361: 0x79b1, 0x1362: 0x79b1, 0x1363: 0x79b1, - 0x1364: 0x79c9, 0x1365: 0x79c9, 0x1366: 0x79e1, 0x1367: 0x79e1, 0x1368: 0x79e1, 0x1369: 0x79e1, - 0x136a: 0x79f9, 0x136b: 0x79f9, 0x136c: 0x79f9, 0x136d: 0x79f9, 0x136e: 0x7a11, 0x136f: 0x7a11, - 0x1370: 0x7a29, 0x1371: 0x7a29, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, - 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, - 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + 0x1340: 0x1949, 0x1341: 0x1951, 0x1342: 0x1959, 0x1343: 0x1961, 0x1344: 0x1969, 0x1345: 0x1971, + 0x1346: 0x1979, 0x1347: 0x1981, 0x1348: 0x1989, 0x1349: 0x1991, 0x134a: 0x1999, 0x134b: 0x19a1, + 0x134c: 0x19a9, 0x134d: 0x19b1, 0x134e: 0x19b9, 0x134f: 0x19c1, 0x1350: 0x19c9, 0x1351: 0x19d1, + 0x1352: 0x19d9, 0x1353: 0x19e1, 0x1354: 0x19e9, 0x1355: 0x19f1, 0x1356: 0x19f9, 0x1357: 0x1a01, + 0x1358: 0x1a09, 0x1359: 0x1a11, 0x135a: 0x1a19, 0x135b: 0x1a21, 0x135c: 0x1a29, 0x135d: 0x1a31, + 0x135e: 0x1a3a, 0x135f: 0x1a42, 0x1360: 0x1a4a, 0x1361: 0x1a52, 0x1362: 0x1a5a, 0x1363: 0x1a62, + 0x1364: 0x1a69, 0x1365: 0x1a71, 0x1366: 0x1761, 0x1367: 0x1a79, 0x1368: 0x1741, 0x1369: 0x1769, + 0x136a: 0x1a81, 0x136b: 0x1a89, 0x136c: 0x1789, 0x136d: 0x1a91, 0x136e: 0x1791, 0x136f: 0x1799, + 0x1370: 0x1a99, 0x1371: 0x1aa1, 0x1372: 0x17b9, 0x1373: 0x1aa9, 0x1374: 0x17c1, 0x1375: 0x17c9, + 0x1376: 0x1ab1, 0x1377: 0x1ab9, 0x1378: 0x17d9, 0x1379: 0x1ac1, 0x137a: 0x17e1, 0x137b: 0x17e9, + 0x137c: 0x18d1, 0x137d: 0x18d9, 0x137e: 0x18f1, 0x137f: 0x18f9, // Block 0x4e, offset 0x1380 - 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, - 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, - 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, - 0x1392: 0x0040, 0x1393: 0x7a41, 0x1394: 0x7a41, 0x1395: 0x7a41, 0x1396: 0x7a41, 0x1397: 0x7a59, - 0x1398: 0x7a59, 0x1399: 0x7a71, 0x139a: 0x7a71, 0x139b: 0x7a89, 0x139c: 0x7a89, 0x139d: 0x0479, - 0x139e: 0x7aa1, 0x139f: 0x7aa1, 0x13a0: 0x7ab9, 0x13a1: 0x7ab9, 0x13a2: 0x7ad1, 0x13a3: 0x7ad1, - 0x13a4: 0x7ae9, 0x13a5: 0x7ae9, 0x13a6: 0x7ae9, 0x13a7: 0x7ae9, 0x13a8: 0x7b01, 0x13a9: 0x7b01, - 0x13aa: 0x7b19, 0x13ab: 0x7b19, 0x13ac: 0x7b41, 0x13ad: 0x7b41, 0x13ae: 0x7b69, 0x13af: 0x7b69, - 0x13b0: 0x7b91, 0x13b1: 0x7b91, 0x13b2: 0x7bb9, 0x13b3: 0x7bb9, 0x13b4: 0x7be1, 0x13b5: 0x7be1, - 0x13b6: 0x7c09, 0x13b7: 0x7c09, 0x13b8: 0x7c09, 0x13b9: 0x7c31, 0x13ba: 0x7c31, 0x13bb: 0x7c31, - 0x13bc: 0x7c59, 0x13bd: 0x7c59, 0x13be: 0x7c59, 0x13bf: 0x7c59, + 0x1380: 0x1901, 0x1381: 0x1921, 0x1382: 0x1929, 0x1383: 0x1931, 0x1384: 0x1939, 0x1385: 0x1959, + 0x1386: 0x1961, 0x1387: 0x1969, 0x1388: 0x1ac9, 0x1389: 0x1989, 0x138a: 0x1ad1, 0x138b: 0x1ad9, + 0x138c: 0x19b9, 0x138d: 0x1ae1, 0x138e: 0x19c1, 0x138f: 0x19c9, 0x1390: 0x1a31, 0x1391: 0x1ae9, + 0x1392: 0x1af1, 0x1393: 0x1a09, 0x1394: 0x1af9, 0x1395: 0x1a11, 0x1396: 0x1a19, 0x1397: 0x1751, + 0x1398: 0x1759, 0x1399: 0x1b01, 0x139a: 0x1761, 0x139b: 0x1b09, 0x139c: 0x1771, 0x139d: 0x1779, + 0x139e: 0x1781, 0x139f: 0x1789, 0x13a0: 0x1b11, 0x13a1: 0x17a1, 0x13a2: 0x17a9, 0x13a3: 0x17b1, + 0x13a4: 0x17b9, 0x13a5: 0x1b19, 0x13a6: 0x17d9, 0x13a7: 0x17f1, 0x13a8: 0x17f9, 0x13a9: 0x1801, + 0x13aa: 0x1809, 0x13ab: 0x1811, 0x13ac: 0x1821, 0x13ad: 0x1829, 0x13ae: 0x1831, 0x13af: 0x1839, + 0x13b0: 0x1841, 0x13b1: 0x1849, 0x13b2: 0x1b21, 0x13b3: 0x1851, 0x13b4: 0x1859, 0x13b5: 0x1861, + 0x13b6: 0x1869, 0x13b7: 0x1871, 0x13b8: 0x1879, 0x13b9: 0x1889, 0x13ba: 0x1891, 0x13bb: 0x1899, + 0x13bc: 0x18a1, 0x13bd: 0x18a9, 0x13be: 0x18b1, 0x13bf: 0x18b9, // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8649, 0x13c1: 0x8671, 0x13c2: 0x8699, 0x13c3: 0x86c1, 0x13c4: 0x86e9, 0x13c5: 0x8711, - 0x13c6: 0x8739, 0x13c7: 0x8761, 0x13c8: 0x8789, 0x13c9: 0x87b1, 0x13ca: 0x87d9, 0x13cb: 0x8801, - 0x13cc: 0x8829, 0x13cd: 0x8851, 0x13ce: 0x8879, 0x13cf: 0x88a1, 0x13d0: 0x88c9, 0x13d1: 0x88f1, - 0x13d2: 0x8919, 0x13d3: 0x8941, 0x13d4: 0x8969, 0x13d5: 0x8991, 0x13d6: 0x89b9, 0x13d7: 0x89e1, - 0x13d8: 0x8a09, 0x13d9: 0x8a31, 0x13da: 0x8a59, 0x13db: 0x8a81, 0x13dc: 0x8aa9, 0x13dd: 0x8ad1, - 0x13de: 0x8afa, 0x13df: 0x8b2a, 0x13e0: 0x8b5a, 0x13e1: 0x8b8a, 0x13e2: 0x8bba, 0x13e3: 0x8bea, - 0x13e4: 0x8c19, 0x13e5: 0x8c41, 0x13e6: 0x7cc1, 0x13e7: 0x8c69, 0x13e8: 0x7c31, 0x13e9: 0x7ce9, - 0x13ea: 0x8c91, 0x13eb: 0x8cb9, 0x13ec: 0x7d89, 0x13ed: 0x8ce1, 0x13ee: 0x7db1, 0x13ef: 0x7dd9, - 0x13f0: 0x8d09, 0x13f1: 0x8d31, 0x13f2: 0x7e79, 0x13f3: 0x8d59, 0x13f4: 0x7ea1, 0x13f5: 0x7ec9, - 0x13f6: 0x8d81, 0x13f7: 0x8da9, 0x13f8: 0x7f19, 0x13f9: 0x8dd1, 0x13fa: 0x7f41, 0x13fb: 0x7f69, - 0x13fc: 0x83f1, 0x13fd: 0x8419, 0x13fe: 0x8491, 0x13ff: 0x84b9, + 0x13c0: 0x18c1, 0x13c1: 0x18c9, 0x13c2: 0x18e1, 0x13c3: 0x18e9, 0x13c4: 0x1909, 0x13c5: 0x1911, + 0x13c6: 0x1919, 0x13c7: 0x1921, 0x13c8: 0x1929, 0x13c9: 0x1941, 0x13ca: 0x1949, 0x13cb: 0x1951, + 0x13cc: 0x1959, 0x13cd: 0x1b29, 0x13ce: 0x1971, 0x13cf: 0x1979, 0x13d0: 0x1981, 0x13d1: 0x1989, + 0x13d2: 0x19a1, 0x13d3: 0x19a9, 0x13d4: 0x19b1, 0x13d5: 0x19b9, 0x13d6: 0x1b31, 0x13d7: 0x19d1, + 0x13d8: 0x19d9, 0x13d9: 0x1b39, 0x13da: 0x19f1, 0x13db: 0x19f9, 0x13dc: 0x1a01, 0x13dd: 0x1a09, + 0x13de: 0x1b41, 0x13df: 0x1761, 0x13e0: 0x1b09, 0x13e1: 0x1789, 0x13e2: 0x1b11, 0x13e3: 0x17b9, + 0x13e4: 0x1b19, 0x13e5: 0x17d9, 0x13e6: 0x1b49, 0x13e7: 0x1841, 0x13e8: 0x1b51, 0x13e9: 0x1b59, + 0x13ea: 0x1b61, 0x13eb: 0x1921, 0x13ec: 0x1929, 0x13ed: 0x1959, 0x13ee: 0x19b9, 0x13ef: 0x1b31, + 0x13f0: 0x1a09, 0x13f1: 0x1b41, 0x13f2: 0x1b69, 0x13f3: 0x1b71, 0x13f4: 0x1b79, 0x13f5: 0x1b81, + 0x13f6: 0x1b89, 0x13f7: 0x1b91, 0x13f8: 0x1b99, 0x13f9: 0x1ba1, 0x13fa: 0x1ba9, 0x13fb: 0x1bb1, + 0x13fc: 0x1bb9, 0x13fd: 0x1bc1, 0x13fe: 0x1bc9, 0x13ff: 0x1bd1, // Block 0x50, offset 0x1400 - 0x1400: 0x84e1, 0x1401: 0x8581, 0x1402: 0x85a9, 0x1403: 0x85d1, 0x1404: 0x85f9, 0x1405: 0x8699, - 0x1406: 0x86c1, 0x1407: 0x86e9, 0x1408: 0x8df9, 0x1409: 0x8789, 0x140a: 0x8e21, 0x140b: 0x8e49, - 0x140c: 0x8879, 0x140d: 0x8e71, 0x140e: 0x88a1, 0x140f: 0x88c9, 0x1410: 0x8ad1, 0x1411: 0x8e99, - 0x1412: 0x8ec1, 0x1413: 0x8a09, 0x1414: 0x8ee9, 0x1415: 0x8a31, 0x1416: 0x8a59, 0x1417: 0x7c71, - 0x1418: 0x7c99, 0x1419: 0x8f11, 0x141a: 0x7cc1, 0x141b: 0x8f39, 0x141c: 0x7d11, 0x141d: 0x7d39, - 0x141e: 0x7d61, 0x141f: 0x7d89, 0x1420: 0x8f61, 0x1421: 0x7e01, 0x1422: 0x7e29, 0x1423: 0x7e51, - 0x1424: 0x7e79, 0x1425: 0x8f89, 0x1426: 0x7f19, 0x1427: 0x7f91, 0x1428: 0x7fb9, 0x1429: 0x7fe1, - 0x142a: 0x8009, 0x142b: 0x8031, 0x142c: 0x8081, 0x142d: 0x80a9, 0x142e: 0x80d1, 0x142f: 0x80f9, - 0x1430: 0x8121, 0x1431: 0x8149, 0x1432: 0x8fb1, 0x1433: 0x8171, 0x1434: 0x8199, 0x1435: 0x81c1, - 0x1436: 0x81e9, 0x1437: 0x8211, 0x1438: 0x8239, 0x1439: 0x8289, 0x143a: 0x82b1, 0x143b: 0x82d9, - 0x143c: 0x8301, 0x143d: 0x8329, 0x143e: 0x8351, 0x143f: 0x8379, + 0x1400: 0x1bd9, 0x1401: 0x1be1, 0x1402: 0x1be9, 0x1403: 0x1bf1, 0x1404: 0x1bf9, 0x1405: 0x1c01, + 0x1406: 0x1c09, 0x1407: 0x1c11, 0x1408: 0x1c19, 0x1409: 0x1c21, 0x140a: 0x1c29, 0x140b: 0x1c31, + 0x140c: 0x1b59, 0x140d: 0x1c39, 0x140e: 0x1c41, 0x140f: 0x1c49, 0x1410: 0x1c51, 0x1411: 0x1b81, + 0x1412: 0x1b89, 0x1413: 0x1b91, 0x1414: 0x1b99, 0x1415: 0x1ba1, 0x1416: 0x1ba9, 0x1417: 0x1bb1, + 0x1418: 0x1bb9, 0x1419: 0x1bc1, 0x141a: 0x1bc9, 0x141b: 0x1bd1, 0x141c: 0x1bd9, 0x141d: 0x1be1, + 0x141e: 0x1be9, 0x141f: 0x1bf1, 0x1420: 0x1bf9, 0x1421: 0x1c01, 0x1422: 0x1c09, 0x1423: 0x1c11, + 0x1424: 0x1c19, 0x1425: 0x1c21, 0x1426: 0x1c29, 0x1427: 0x1c31, 0x1428: 0x1b59, 0x1429: 0x1c39, + 0x142a: 0x1c41, 0x142b: 0x1c49, 0x142c: 0x1c51, 0x142d: 0x1c21, 0x142e: 0x1c29, 0x142f: 0x1c31, + 0x1430: 0x1b59, 0x1431: 0x1b51, 0x1432: 0x1b61, 0x1433: 0x1881, 0x1434: 0x1829, 0x1435: 0x1831, + 0x1436: 0x1839, 0x1437: 0x1c21, 0x1438: 0x1c29, 0x1439: 0x1c31, 0x143a: 0x1881, 0x143b: 0x1889, + 0x143c: 0x1c59, 0x143d: 0x1c59, 0x143e: 0x0018, 0x143f: 0x0018, // Block 0x51, offset 0x1440 - 0x1440: 0x83a1, 0x1441: 0x83c9, 0x1442: 0x8441, 0x1443: 0x8469, 0x1444: 0x8509, 0x1445: 0x8531, - 0x1446: 0x8559, 0x1447: 0x8581, 0x1448: 0x85a9, 0x1449: 0x8621, 0x144a: 0x8649, 0x144b: 0x8671, - 0x144c: 0x8699, 0x144d: 0x8fd9, 0x144e: 0x8711, 0x144f: 0x8739, 0x1450: 0x8761, 0x1451: 0x8789, - 0x1452: 0x8801, 0x1453: 0x8829, 0x1454: 0x8851, 0x1455: 0x8879, 0x1456: 0x9001, 0x1457: 0x88f1, - 0x1458: 0x8919, 0x1459: 0x9029, 0x145a: 0x8991, 0x145b: 0x89b9, 0x145c: 0x89e1, 0x145d: 0x8a09, - 0x145e: 0x9051, 0x145f: 0x7cc1, 0x1460: 0x8f39, 0x1461: 0x7d89, 0x1462: 0x8f61, 0x1463: 0x7e79, - 0x1464: 0x8f89, 0x1465: 0x7f19, 0x1466: 0x9079, 0x1467: 0x8121, 0x1468: 0x90a1, 0x1469: 0x90c9, - 0x146a: 0x90f1, 0x146b: 0x8581, 0x146c: 0x85a9, 0x146d: 0x8699, 0x146e: 0x8879, 0x146f: 0x9001, - 0x1470: 0x8a09, 0x1471: 0x9051, 0x1472: 0x9119, 0x1473: 0x9151, 0x1474: 0x9189, 0x1475: 0x91c1, - 0x1476: 0x91e9, 0x1477: 0x9211, 0x1478: 0x9239, 0x1479: 0x9261, 0x147a: 0x9289, 0x147b: 0x92b1, - 0x147c: 0x92d9, 0x147d: 0x9301, 0x147e: 0x9329, 0x147f: 0x9351, + 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, + 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, + 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x1c61, 0x1451: 0x1c69, + 0x1452: 0x1c69, 0x1453: 0x1c71, 0x1454: 0x1c79, 0x1455: 0x1c81, 0x1456: 0x1c89, 0x1457: 0x1c91, + 0x1458: 0x1c99, 0x1459: 0x1c99, 0x145a: 0x1ca1, 0x145b: 0x1ca9, 0x145c: 0x1cb1, 0x145d: 0x1cb9, + 0x145e: 0x1cc1, 0x145f: 0x1cc9, 0x1460: 0x1cc9, 0x1461: 0x1cd1, 0x1462: 0x1cd9, 0x1463: 0x1cd9, + 0x1464: 0x1ce1, 0x1465: 0x1ce1, 0x1466: 0x1ce9, 0x1467: 0x1cf1, 0x1468: 0x1cf1, 0x1469: 0x1cf9, + 0x146a: 0x1d01, 0x146b: 0x1d01, 0x146c: 0x1d09, 0x146d: 0x1d09, 0x146e: 0x1d11, 0x146f: 0x1d19, + 0x1470: 0x1d19, 0x1471: 0x1d21, 0x1472: 0x1d21, 0x1473: 0x1d29, 0x1474: 0x1d31, 0x1475: 0x1d39, + 0x1476: 0x1d41, 0x1477: 0x1d41, 0x1478: 0x1d49, 0x1479: 0x1d51, 0x147a: 0x1d59, 0x147b: 0x1d61, + 0x147c: 0x1d69, 0x147d: 0x1d69, 0x147e: 0x1d71, 0x147f: 0x1d79, // Block 0x52, offset 0x1480 - 0x1480: 0x9379, 0x1481: 0x93a1, 0x1482: 0x93c9, 0x1483: 0x93f1, 0x1484: 0x9419, 0x1485: 0x9441, - 0x1486: 0x9469, 0x1487: 0x9491, 0x1488: 0x94b9, 0x1489: 0x94e1, 0x148a: 0x9509, 0x148b: 0x9531, - 0x148c: 0x90c9, 0x148d: 0x9559, 0x148e: 0x9581, 0x148f: 0x95a9, 0x1490: 0x95d1, 0x1491: 0x91c1, - 0x1492: 0x91e9, 0x1493: 0x9211, 0x1494: 0x9239, 0x1495: 0x9261, 0x1496: 0x9289, 0x1497: 0x92b1, - 0x1498: 0x92d9, 0x1499: 0x9301, 0x149a: 0x9329, 0x149b: 0x9351, 0x149c: 0x9379, 0x149d: 0x93a1, - 0x149e: 0x93c9, 0x149f: 0x93f1, 0x14a0: 0x9419, 0x14a1: 0x9441, 0x14a2: 0x9469, 0x14a3: 0x9491, - 0x14a4: 0x94b9, 0x14a5: 0x94e1, 0x14a6: 0x9509, 0x14a7: 0x9531, 0x14a8: 0x90c9, 0x14a9: 0x9559, - 0x14aa: 0x9581, 0x14ab: 0x95a9, 0x14ac: 0x95d1, 0x14ad: 0x94e1, 0x14ae: 0x9509, 0x14af: 0x9531, - 0x14b0: 0x90c9, 0x14b1: 0x90a1, 0x14b2: 0x90f1, 0x14b3: 0x8261, 0x14b4: 0x80a9, 0x14b5: 0x80d1, - 0x14b6: 0x80f9, 0x14b7: 0x94e1, 0x14b8: 0x9509, 0x14b9: 0x9531, 0x14ba: 0x8261, 0x14bb: 0x8289, - 0x14bc: 0x95f9, 0x14bd: 0x95f9, 0x14be: 0x0018, 0x14bf: 0x0018, + 0x1480: 0x1f29, 0x1481: 0x1f31, 0x1482: 0x1f39, 0x1483: 0x1f11, 0x1484: 0x1d39, 0x1485: 0x1ce9, + 0x1486: 0x1f41, 0x1487: 0x1f49, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, + 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, + 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, + 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, + 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, + 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, + 0x14b0: 0x1f51, 0x14b1: 0x1f59, 0x14b2: 0x1f61, 0x14b3: 0x1f69, 0x14b4: 0x1f71, 0x14b5: 0x1f79, + 0x14b6: 0x1f81, 0x14b7: 0x1f89, 0x14b8: 0x1f91, 0x14b9: 0x1f99, 0x14ba: 0x1fa2, 0x14bb: 0x1faa, + 0x14bc: 0x1fb1, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, // Block 0x53, offset 0x14c0 - 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, - 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x9621, 0x14d1: 0x9659, - 0x14d2: 0x9659, 0x14d3: 0x9691, 0x14d4: 0x96c9, 0x14d5: 0x9701, 0x14d6: 0x9739, 0x14d7: 0x9771, - 0x14d8: 0x97a9, 0x14d9: 0x97a9, 0x14da: 0x97e1, 0x14db: 0x9819, 0x14dc: 0x9851, 0x14dd: 0x9889, - 0x14de: 0x98c1, 0x14df: 0x98f9, 0x14e0: 0x98f9, 0x14e1: 0x9931, 0x14e2: 0x9969, 0x14e3: 0x9969, - 0x14e4: 0x99a1, 0x14e5: 0x99a1, 0x14e6: 0x99d9, 0x14e7: 0x9a11, 0x14e8: 0x9a11, 0x14e9: 0x9a49, - 0x14ea: 0x9a81, 0x14eb: 0x9a81, 0x14ec: 0x9ab9, 0x14ed: 0x9ab9, 0x14ee: 0x9af1, 0x14ef: 0x9b29, - 0x14f0: 0x9b29, 0x14f1: 0x9b61, 0x14f2: 0x9b61, 0x14f3: 0x9b99, 0x14f4: 0x9bd1, 0x14f5: 0x9c09, - 0x14f6: 0x9c41, 0x14f7: 0x9c41, 0x14f8: 0x9c79, 0x14f9: 0x9cb1, 0x14fa: 0x9ce9, 0x14fb: 0x9d21, - 0x14fc: 0x9d59, 0x14fd: 0x9d59, 0x14fe: 0x9d91, 0x14ff: 0x9dc9, + 0x14c0: 0x33c0, 0x14c1: 0x33c0, 0x14c2: 0x33c0, 0x14c3: 0x33c0, 0x14c4: 0x33c0, 0x14c5: 0x33c0, + 0x14c6: 0x33c0, 0x14c7: 0x33c0, 0x14c8: 0x33c0, 0x14c9: 0x33c0, 0x14ca: 0x33c0, 0x14cb: 0x33c0, + 0x14cc: 0x33c0, 0x14cd: 0x33c0, 0x14ce: 0x33c0, 0x14cf: 0x33c0, 0x14d0: 0x1fba, 0x14d1: 0x7d8d, + 0x14d2: 0x0040, 0x14d3: 0x1fc2, 0x14d4: 0x0122, 0x14d5: 0x1fca, 0x14d6: 0x1fd2, 0x14d7: 0x7dad, + 0x14d8: 0x7dcd, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x3308, 0x14e1: 0x3308, 0x14e2: 0x3308, 0x14e3: 0x3308, + 0x14e4: 0x3308, 0x14e5: 0x3308, 0x14e6: 0x3308, 0x14e7: 0x3308, 0x14e8: 0x3308, 0x14e9: 0x3308, + 0x14ea: 0x3308, 0x14eb: 0x3308, 0x14ec: 0x3308, 0x14ed: 0x3308, 0x14ee: 0x3308, 0x14ef: 0x3308, + 0x14f0: 0x0040, 0x14f1: 0x7ded, 0x14f2: 0x7e0d, 0x14f3: 0x1fda, 0x14f4: 0x1fda, 0x14f5: 0x072a, + 0x14f6: 0x0732, 0x14f7: 0x1fe2, 0x14f8: 0x1fea, 0x14f9: 0x7e2d, 0x14fa: 0x7e4d, 0x14fb: 0x7e6d, + 0x14fc: 0x7e2d, 0x14fd: 0x7e8d, 0x14fe: 0x7ead, 0x14ff: 0x7e8d, // Block 0x54, offset 0x1500 - 0x1500: 0xa999, 0x1501: 0xa9d1, 0x1502: 0xaa09, 0x1503: 0xa8f1, 0x1504: 0x9c09, 0x1505: 0x99d9, - 0x1506: 0xaa41, 0x1507: 0xaa79, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, - 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, - 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, - 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, - 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0xaab1, 0x1531: 0xaae9, 0x1532: 0xab21, 0x1533: 0xab69, 0x1534: 0xabb1, 0x1535: 0xabf9, - 0x1536: 0xac41, 0x1537: 0xac89, 0x1538: 0xacd1, 0x1539: 0xad19, 0x153a: 0xad52, 0x153b: 0xae62, - 0x153c: 0xaee1, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + 0x1500: 0x7ecd, 0x1501: 0x7eed, 0x1502: 0x7f0d, 0x1503: 0x7eed, 0x1504: 0x7f2d, 0x1505: 0x0018, + 0x1506: 0x0018, 0x1507: 0x1ff2, 0x1508: 0x1ffa, 0x1509: 0x7f4e, 0x150a: 0x7f6e, 0x150b: 0x7f8e, + 0x150c: 0x7fae, 0x150d: 0x1fda, 0x150e: 0x1fda, 0x150f: 0x1fda, 0x1510: 0x1fba, 0x1511: 0x7fcd, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0122, 0x1515: 0x1fc2, 0x1516: 0x1fd2, 0x1517: 0x1fca, + 0x1518: 0x7fed, 0x1519: 0x072a, 0x151a: 0x0732, 0x151b: 0x1fe2, 0x151c: 0x1fea, 0x151d: 0x7ecd, + 0x151e: 0x7f2d, 0x151f: 0x2002, 0x1520: 0x200a, 0x1521: 0x2012, 0x1522: 0x071a, 0x1523: 0x2019, + 0x1524: 0x2022, 0x1525: 0x202a, 0x1526: 0x0722, 0x1527: 0x0040, 0x1528: 0x2032, 0x1529: 0x203a, + 0x152a: 0x2042, 0x152b: 0x204a, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x800e, 0x1531: 0x2051, 0x1532: 0x802e, 0x1533: 0x0808, 0x1534: 0x804e, 0x1535: 0x0040, + 0x1536: 0x806e, 0x1537: 0x2059, 0x1538: 0x808e, 0x1539: 0x2061, 0x153a: 0x80ae, 0x153b: 0x2069, + 0x153c: 0x80ce, 0x153d: 0x2071, 0x153e: 0x80ee, 0x153f: 0x2079, // Block 0x55, offset 0x1540 - 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, - 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, - 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaf2a, 0x1551: 0x7d8d, - 0x1552: 0x0040, 0x1553: 0xaf3a, 0x1554: 0x03c2, 0x1555: 0xaf4a, 0x1556: 0xaf5a, 0x1557: 0x7dad, - 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, - 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, - 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, - 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, - 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf6a, 0x1574: 0xaf6a, 0x1575: 0x1fd2, - 0x1576: 0x1fe2, 0x1577: 0xaf7a, 0x1578: 0xaf8a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, - 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + 0x1540: 0x2081, 0x1541: 0x2089, 0x1542: 0x2089, 0x1543: 0x2091, 0x1544: 0x2091, 0x1545: 0x2099, + 0x1546: 0x2099, 0x1547: 0x20a1, 0x1548: 0x20a1, 0x1549: 0x20a9, 0x154a: 0x20a9, 0x154b: 0x20a9, + 0x154c: 0x20a9, 0x154d: 0x20b1, 0x154e: 0x20b1, 0x154f: 0x20b9, 0x1550: 0x20b9, 0x1551: 0x20b9, + 0x1552: 0x20b9, 0x1553: 0x20c1, 0x1554: 0x20c1, 0x1555: 0x20c9, 0x1556: 0x20c9, 0x1557: 0x20c9, + 0x1558: 0x20c9, 0x1559: 0x20d1, 0x155a: 0x20d1, 0x155b: 0x20d1, 0x155c: 0x20d1, 0x155d: 0x20d9, + 0x155e: 0x20d9, 0x155f: 0x20d9, 0x1560: 0x20d9, 0x1561: 0x20e1, 0x1562: 0x20e1, 0x1563: 0x20e1, + 0x1564: 0x20e1, 0x1565: 0x20e9, 0x1566: 0x20e9, 0x1567: 0x20e9, 0x1568: 0x20e9, 0x1569: 0x20f1, + 0x156a: 0x20f1, 0x156b: 0x20f9, 0x156c: 0x20f9, 0x156d: 0x2101, 0x156e: 0x2101, 0x156f: 0x2109, + 0x1570: 0x2109, 0x1571: 0x2111, 0x1572: 0x2111, 0x1573: 0x2111, 0x1574: 0x2111, 0x1575: 0x2119, + 0x1576: 0x2119, 0x1577: 0x2119, 0x1578: 0x2119, 0x1579: 0x2121, 0x157a: 0x2121, 0x157b: 0x2121, + 0x157c: 0x2121, 0x157d: 0x2129, 0x157e: 0x2129, 0x157f: 0x2129, // Block 0x56, offset 0x1580 - 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, - 0x1586: 0x0018, 0x1587: 0xaf9a, 0x1588: 0xafaa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, - 0x158c: 0x7fae, 0x158d: 0xaf6a, 0x158e: 0xaf6a, 0x158f: 0xaf6a, 0x1590: 0xaf2a, 0x1591: 0x7fcd, - 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaf3a, 0x1596: 0xaf5a, 0x1597: 0xaf4a, - 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf7a, 0x159c: 0xaf8a, 0x159d: 0x7ecd, - 0x159e: 0x7f2d, 0x159f: 0xafba, 0x15a0: 0xafca, 0x15a1: 0xafda, 0x15a2: 0x1fb2, 0x15a3: 0xafe9, - 0x15a4: 0xaffa, 0x15a5: 0xb00a, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xb01a, 0x15a9: 0xb02a, - 0x15aa: 0xb03a, 0x15ab: 0xb04a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, - 0x15b0: 0x800e, 0x15b1: 0xb059, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, - 0x15b6: 0x806e, 0x15b7: 0xb081, 0x15b8: 0x808e, 0x15b9: 0xb0a9, 0x15ba: 0x80ae, 0x15bb: 0xb0d1, - 0x15bc: 0x80ce, 0x15bd: 0xb0f9, 0x15be: 0x80ee, 0x15bf: 0xb121, + 0x1580: 0x2129, 0x1581: 0x2131, 0x1582: 0x2131, 0x1583: 0x2131, 0x1584: 0x2131, 0x1585: 0x2139, + 0x1586: 0x2139, 0x1587: 0x2139, 0x1588: 0x2139, 0x1589: 0x2141, 0x158a: 0x2141, 0x158b: 0x2141, + 0x158c: 0x2141, 0x158d: 0x2149, 0x158e: 0x2149, 0x158f: 0x2149, 0x1590: 0x2149, 0x1591: 0x2151, + 0x1592: 0x2151, 0x1593: 0x2151, 0x1594: 0x2151, 0x1595: 0x2159, 0x1596: 0x2159, 0x1597: 0x2159, + 0x1598: 0x2159, 0x1599: 0x2161, 0x159a: 0x2161, 0x159b: 0x2161, 0x159c: 0x2161, 0x159d: 0x2169, + 0x159e: 0x2169, 0x159f: 0x2169, 0x15a0: 0x2169, 0x15a1: 0x2171, 0x15a2: 0x2171, 0x15a3: 0x2171, + 0x15a4: 0x2171, 0x15a5: 0x2179, 0x15a6: 0x2179, 0x15a7: 0x2179, 0x15a8: 0x2179, 0x15a9: 0x2181, + 0x15aa: 0x2181, 0x15ab: 0x2181, 0x15ac: 0x2181, 0x15ad: 0x2189, 0x15ae: 0x2189, 0x15af: 0x1701, + 0x15b0: 0x1701, 0x15b1: 0x2191, 0x15b2: 0x2191, 0x15b3: 0x2191, 0x15b4: 0x2191, 0x15b5: 0x2199, + 0x15b6: 0x2199, 0x15b7: 0x21a1, 0x15b8: 0x21a1, 0x15b9: 0x21a9, 0x15ba: 0x21a9, 0x15bb: 0x21b1, + 0x15bc: 0x21b1, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, // Block 0x57, offset 0x15c0 - 0x15c0: 0xb149, 0x15c1: 0xb161, 0x15c2: 0xb161, 0x15c3: 0xb179, 0x15c4: 0xb179, 0x15c5: 0xb191, - 0x15c6: 0xb191, 0x15c7: 0xb1a9, 0x15c8: 0xb1a9, 0x15c9: 0xb1c1, 0x15ca: 0xb1c1, 0x15cb: 0xb1c1, - 0x15cc: 0xb1c1, 0x15cd: 0xb1d9, 0x15ce: 0xb1d9, 0x15cf: 0xb1f1, 0x15d0: 0xb1f1, 0x15d1: 0xb1f1, - 0x15d2: 0xb1f1, 0x15d3: 0xb209, 0x15d4: 0xb209, 0x15d5: 0xb221, 0x15d6: 0xb221, 0x15d7: 0xb221, - 0x15d8: 0xb221, 0x15d9: 0xb239, 0x15da: 0xb239, 0x15db: 0xb239, 0x15dc: 0xb239, 0x15dd: 0xb251, - 0x15de: 0xb251, 0x15df: 0xb251, 0x15e0: 0xb251, 0x15e1: 0xb269, 0x15e2: 0xb269, 0x15e3: 0xb269, - 0x15e4: 0xb269, 0x15e5: 0xb281, 0x15e6: 0xb281, 0x15e7: 0xb281, 0x15e8: 0xb281, 0x15e9: 0xb299, - 0x15ea: 0xb299, 0x15eb: 0xb2b1, 0x15ec: 0xb2b1, 0x15ed: 0xb2c9, 0x15ee: 0xb2c9, 0x15ef: 0xb2e1, - 0x15f0: 0xb2e1, 0x15f1: 0xb2f9, 0x15f2: 0xb2f9, 0x15f3: 0xb2f9, 0x15f4: 0xb2f9, 0x15f5: 0xb311, - 0x15f6: 0xb311, 0x15f7: 0xb311, 0x15f8: 0xb311, 0x15f9: 0xb329, 0x15fa: 0xb329, 0x15fb: 0xb329, - 0x15fc: 0xb329, 0x15fd: 0xb341, 0x15fe: 0xb341, 0x15ff: 0xb341, + 0x15c0: 0x0040, 0x15c1: 0x1fca, 0x15c2: 0x21ba, 0x15c3: 0x2002, 0x15c4: 0x203a, 0x15c5: 0x2042, + 0x15c6: 0x200a, 0x15c7: 0x21c2, 0x15c8: 0x072a, 0x15c9: 0x0732, 0x15ca: 0x2012, 0x15cb: 0x071a, + 0x15cc: 0x1fba, 0x15cd: 0x2019, 0x15ce: 0x0961, 0x15cf: 0x21ca, 0x15d0: 0x06e1, 0x15d1: 0x0049, + 0x15d2: 0x0029, 0x15d3: 0x0031, 0x15d4: 0x06e9, 0x15d5: 0x06f1, 0x15d6: 0x06f9, 0x15d7: 0x0701, + 0x15d8: 0x0709, 0x15d9: 0x0711, 0x15da: 0x1fc2, 0x15db: 0x0122, 0x15dc: 0x2022, 0x15dd: 0x0722, + 0x15de: 0x202a, 0x15df: 0x1fd2, 0x15e0: 0x204a, 0x15e1: 0x0019, 0x15e2: 0x02e9, 0x15e3: 0x03d9, + 0x15e4: 0x02f1, 0x15e5: 0x02f9, 0x15e6: 0x03f1, 0x15e7: 0x0309, 0x15e8: 0x00a9, 0x15e9: 0x0311, + 0x15ea: 0x00b1, 0x15eb: 0x0319, 0x15ec: 0x0101, 0x15ed: 0x0321, 0x15ee: 0x0329, 0x15ef: 0x0051, + 0x15f0: 0x0339, 0x15f1: 0x0751, 0x15f2: 0x00b9, 0x15f3: 0x0089, 0x15f4: 0x0341, 0x15f5: 0x0349, + 0x15f6: 0x0391, 0x15f7: 0x00c1, 0x15f8: 0x0109, 0x15f9: 0x00c9, 0x15fa: 0x04b1, 0x15fb: 0x1ff2, + 0x15fc: 0x2032, 0x15fd: 0x1ffa, 0x15fe: 0x21d2, 0x15ff: 0x1fda, // Block 0x58, offset 0x1600 - 0x1600: 0xb341, 0x1601: 0xb359, 0x1602: 0xb359, 0x1603: 0xb359, 0x1604: 0xb359, 0x1605: 0xb371, - 0x1606: 0xb371, 0x1607: 0xb371, 0x1608: 0xb371, 0x1609: 0xb389, 0x160a: 0xb389, 0x160b: 0xb389, - 0x160c: 0xb389, 0x160d: 0xb3a1, 0x160e: 0xb3a1, 0x160f: 0xb3a1, 0x1610: 0xb3a1, 0x1611: 0xb3b9, - 0x1612: 0xb3b9, 0x1613: 0xb3b9, 0x1614: 0xb3b9, 0x1615: 0xb3d1, 0x1616: 0xb3d1, 0x1617: 0xb3d1, - 0x1618: 0xb3d1, 0x1619: 0xb3e9, 0x161a: 0xb3e9, 0x161b: 0xb3e9, 0x161c: 0xb3e9, 0x161d: 0xb401, - 0x161e: 0xb401, 0x161f: 0xb401, 0x1620: 0xb401, 0x1621: 0xb419, 0x1622: 0xb419, 0x1623: 0xb419, - 0x1624: 0xb419, 0x1625: 0xb431, 0x1626: 0xb431, 0x1627: 0xb431, 0x1628: 0xb431, 0x1629: 0xb449, - 0x162a: 0xb449, 0x162b: 0xb449, 0x162c: 0xb449, 0x162d: 0xb461, 0x162e: 0xb461, 0x162f: 0x7b01, - 0x1630: 0x7b01, 0x1631: 0xb479, 0x1632: 0xb479, 0x1633: 0xb479, 0x1634: 0xb479, 0x1635: 0xb491, - 0x1636: 0xb491, 0x1637: 0xb4b9, 0x1638: 0xb4b9, 0x1639: 0xb4e1, 0x163a: 0xb4e1, 0x163b: 0xb509, - 0x163c: 0xb509, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + 0x1600: 0x0672, 0x1601: 0x0019, 0x1602: 0x02e9, 0x1603: 0x03d9, 0x1604: 0x02f1, 0x1605: 0x02f9, + 0x1606: 0x03f1, 0x1607: 0x0309, 0x1608: 0x00a9, 0x1609: 0x0311, 0x160a: 0x00b1, 0x160b: 0x0319, + 0x160c: 0x0101, 0x160d: 0x0321, 0x160e: 0x0329, 0x160f: 0x0051, 0x1610: 0x0339, 0x1611: 0x0751, + 0x1612: 0x00b9, 0x1613: 0x0089, 0x1614: 0x0341, 0x1615: 0x0349, 0x1616: 0x0391, 0x1617: 0x00c1, + 0x1618: 0x0109, 0x1619: 0x00c9, 0x161a: 0x04b1, 0x161b: 0x1fe2, 0x161c: 0x21da, 0x161d: 0x1fea, + 0x161e: 0x21e2, 0x161f: 0x810d, 0x1620: 0x812d, 0x1621: 0x0961, 0x1622: 0x814d, 0x1623: 0x814d, + 0x1624: 0x816d, 0x1625: 0x818d, 0x1626: 0x81ad, 0x1627: 0x81cd, 0x1628: 0x81ed, 0x1629: 0x820d, + 0x162a: 0x822d, 0x162b: 0x824d, 0x162c: 0x826d, 0x162d: 0x828d, 0x162e: 0x82ad, 0x162f: 0x82cd, + 0x1630: 0x82ed, 0x1631: 0x830d, 0x1632: 0x832d, 0x1633: 0x834d, 0x1634: 0x836d, 0x1635: 0x838d, + 0x1636: 0x83ad, 0x1637: 0x83cd, 0x1638: 0x83ed, 0x1639: 0x840d, 0x163a: 0x842d, 0x163b: 0x844d, + 0x163c: 0x81ed, 0x163d: 0x846d, 0x163e: 0x848d, 0x163f: 0x824d, // Block 0x59, offset 0x1640 - 0x1640: 0x0040, 0x1641: 0xaf4a, 0x1642: 0xb532, 0x1643: 0xafba, 0x1644: 0xb02a, 0x1645: 0xb03a, - 0x1646: 0xafca, 0x1647: 0xb542, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xafda, 0x164b: 0x1fb2, - 0x164c: 0xaf2a, 0x164d: 0xafe9, 0x164e: 0x29d1, 0x164f: 0xb552, 0x1650: 0x1f41, 0x1651: 0x00c9, - 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, - 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaf3a, 0x165b: 0x03c2, 0x165c: 0xaffa, 0x165d: 0x1fc2, - 0x165e: 0xb00a, 0x165f: 0xaf5a, 0x1660: 0xb04a, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, - 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, - 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, - 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, - 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf9a, - 0x167c: 0xb01a, 0x167d: 0xafaa, 0x167e: 0xb562, 0x167f: 0xaf6a, + 0x1640: 0x84ad, 0x1641: 0x84cd, 0x1642: 0x84ed, 0x1643: 0x850d, 0x1644: 0x852d, 0x1645: 0x854d, + 0x1646: 0x856d, 0x1647: 0x858d, 0x1648: 0x850d, 0x1649: 0x85ad, 0x164a: 0x850d, 0x164b: 0x85cd, + 0x164c: 0x85cd, 0x164d: 0x85ed, 0x164e: 0x85ed, 0x164f: 0x860d, 0x1650: 0x854d, 0x1651: 0x862d, + 0x1652: 0x864d, 0x1653: 0x862d, 0x1654: 0x866d, 0x1655: 0x864d, 0x1656: 0x868d, 0x1657: 0x868d, + 0x1658: 0x86ad, 0x1659: 0x86ad, 0x165a: 0x86cd, 0x165b: 0x86cd, 0x165c: 0x864d, 0x165d: 0x814d, + 0x165e: 0x86ed, 0x165f: 0x870d, 0x1660: 0x0040, 0x1661: 0x872d, 0x1662: 0x874d, 0x1663: 0x876d, + 0x1664: 0x878d, 0x1665: 0x876d, 0x1666: 0x87ad, 0x1667: 0x87cd, 0x1668: 0x87ed, 0x1669: 0x87ed, + 0x166a: 0x880d, 0x166b: 0x880d, 0x166c: 0x882d, 0x166d: 0x882d, 0x166e: 0x880d, 0x166f: 0x880d, + 0x1670: 0x884d, 0x1671: 0x886d, 0x1672: 0x888d, 0x1673: 0x88ad, 0x1674: 0x88cd, 0x1675: 0x88ed, + 0x1676: 0x88ed, 0x1677: 0x88ed, 0x1678: 0x890d, 0x1679: 0x890d, 0x167a: 0x890d, 0x167b: 0x890d, + 0x167c: 0x87ed, 0x167d: 0x87ed, 0x167e: 0x87ed, 0x167f: 0x0040, // Block 0x5a, offset 0x1680 - 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, - 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, - 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, - 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, - 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf7a, 0x169c: 0xb572, 0x169d: 0xaf8a, - 0x169e: 0xb582, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, - 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, - 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, - 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, - 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, - 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x874d, 0x1683: 0x872d, 0x1684: 0x892d, 0x1685: 0x872d, + 0x1686: 0x874d, 0x1687: 0x872d, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x894d, 0x168b: 0x874d, + 0x168c: 0x896d, 0x168d: 0x892d, 0x168e: 0x896d, 0x168f: 0x874d, 0x1690: 0x0040, 0x1691: 0x0040, + 0x1692: 0x898d, 0x1693: 0x89ad, 0x1694: 0x88ad, 0x1695: 0x896d, 0x1696: 0x892d, 0x1697: 0x896d, + 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x89cd, 0x169b: 0x89ed, 0x169c: 0x89cd, 0x169d: 0x0040, + 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0x21e9, 0x16a1: 0x21f1, 0x16a2: 0x21f9, 0x16a3: 0x8a0e, + 0x16a4: 0x2201, 0x16a5: 0x2209, 0x16a6: 0x8a2d, 0x16a7: 0x0040, 0x16a8: 0x8a4d, 0x16a9: 0x8a6d, + 0x16aa: 0x8a8d, 0x16ab: 0x8a6d, 0x16ac: 0x8aad, 0x16ad: 0x8acd, 0x16ae: 0x8aed, 0x16af: 0x0040, + 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, + 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, + 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, // Block 0x5b, offset 0x16c0 - 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, - 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, - 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, - 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, - 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, - 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, - 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, - 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, - 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, - 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, - 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + 0x16c0: 0x0a08, 0x16c1: 0x0a08, 0x16c2: 0x0a08, 0x16c3: 0x0a08, 0x16c4: 0x0a08, 0x16c5: 0x0c08, + 0x16c6: 0x0808, 0x16c7: 0x0c08, 0x16c8: 0x0818, 0x16c9: 0x0c08, 0x16ca: 0x0c08, 0x16cb: 0x0808, + 0x16cc: 0x0808, 0x16cd: 0x0908, 0x16ce: 0x0c08, 0x16cf: 0x0c08, 0x16d0: 0x0c08, 0x16d1: 0x0c08, + 0x16d2: 0x0c08, 0x16d3: 0x0a08, 0x16d4: 0x0a08, 0x16d5: 0x0a08, 0x16d6: 0x0a08, 0x16d7: 0x0908, + 0x16d8: 0x0a08, 0x16d9: 0x0a08, 0x16da: 0x0a08, 0x16db: 0x0a08, 0x16dc: 0x0a08, 0x16dd: 0x0c08, + 0x16de: 0x0a08, 0x16df: 0x0a08, 0x16e0: 0x0a08, 0x16e1: 0x0c08, 0x16e2: 0x0808, 0x16e3: 0x0808, + 0x16e4: 0x0c08, 0x16e5: 0x3308, 0x16e6: 0x3308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, + 0x16ea: 0x0040, 0x16eb: 0x0a18, 0x16ec: 0x0a18, 0x16ed: 0x0a18, 0x16ee: 0x0a18, 0x16ef: 0x0c18, + 0x16f0: 0x0818, 0x16f1: 0x0818, 0x16f2: 0x0818, 0x16f3: 0x0818, 0x16f4: 0x0818, 0x16f5: 0x0818, + 0x16f6: 0x0818, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 - 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, - 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, - 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, - 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, - 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb591, 0x1721: 0xb5a9, 0x1722: 0xb5c1, 0x1723: 0x8a0e, - 0x1724: 0xb5d9, 0x1725: 0xb5f1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, - 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1700: 0x0a08, 0x1701: 0x0c08, 0x1702: 0x0a08, 0x1703: 0x0c08, 0x1704: 0x0c08, 0x1705: 0x0c08, + 0x1706: 0x0a08, 0x1707: 0x0a08, 0x1708: 0x0a08, 0x1709: 0x0c08, 0x170a: 0x0a08, 0x170b: 0x0a08, + 0x170c: 0x0c08, 0x170d: 0x0a08, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0a08, 0x1711: 0x0c08, + 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, + 0x1718: 0x0040, 0x1719: 0x0818, 0x171a: 0x0818, 0x171b: 0x0818, 0x171c: 0x0818, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, + 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0c18, + 0x172a: 0x0c18, 0x172b: 0x0c18, 0x172c: 0x0c18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0818, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, - 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, - 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, - 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, - 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, - 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, - 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, - 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, - 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, - 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + 0x1740: 0x3308, 0x1741: 0x3308, 0x1742: 0x3008, 0x1743: 0x3008, 0x1744: 0x0040, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, + 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, + 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, + 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, + 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x3308, + 0x177c: 0x3308, 0x177d: 0x0008, 0x177e: 0x3008, 0x177f: 0x3008, // Block 0x5e, offset 0x1780 - 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, - 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, - 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, - 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, - 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, - 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, - 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x1780: 0x3308, 0x1781: 0x3008, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x3008, 0x1785: 0x0040, + 0x1786: 0x0040, 0x1787: 0x3008, 0x1788: 0x3008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x3008, + 0x178c: 0x3008, 0x178d: 0x3808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x3008, + 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x3008, 0x17a3: 0x3008, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x3308, 0x17a7: 0x3308, 0x17a8: 0x3308, 0x17a9: 0x3308, + 0x17aa: 0x3308, 0x17ab: 0x3308, 0x17ac: 0x3308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, + 0x17b0: 0x3308, 0x17b1: 0x3308, 0x17b2: 0x3308, 0x17b3: 0x3308, 0x17b4: 0x3308, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, - 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, - 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17c0: 0x0008, 0x17c1: 0x0008, 0x17c2: 0x0008, 0x17c3: 0x0008, 0x17c4: 0x0008, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0040, 0x17c8: 0x0040, 0x17c9: 0x0008, 0x17ca: 0x0040, 0x17cb: 0x0040, + 0x17cc: 0x0008, 0x17cd: 0x0008, 0x17ce: 0x0008, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0008, + 0x17d2: 0x0008, 0x17d3: 0x0008, 0x17d4: 0x0040, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0040, 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, - 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0008, 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, - 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, - 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, - 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + 0x17f0: 0x3008, 0x17f1: 0x3008, 0x17f2: 0x3008, 0x17f3: 0x3008, 0x17f4: 0x3008, 0x17f5: 0x3008, + 0x17f6: 0x0040, 0x17f7: 0x3008, 0x17f8: 0x3008, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x3808, 0x17fe: 0x3b08, 0x17ff: 0x0008, // Block 0x60, offset 0x1800 - 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, - 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, - 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, - 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, - 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, - 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, - 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, - 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, - 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, - 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, - 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + 0x1800: 0x0019, 0x1801: 0x02e9, 0x1802: 0x03d9, 0x1803: 0x02f1, 0x1804: 0x02f9, 0x1805: 0x03f1, + 0x1806: 0x0309, 0x1807: 0x00a9, 0x1808: 0x0311, 0x1809: 0x00b1, 0x180a: 0x0319, 0x180b: 0x0101, + 0x180c: 0x0321, 0x180d: 0x0329, 0x180e: 0x0051, 0x180f: 0x0339, 0x1810: 0x0751, 0x1811: 0x00b9, + 0x1812: 0x0089, 0x1813: 0x0341, 0x1814: 0x0349, 0x1815: 0x0391, 0x1816: 0x00c1, 0x1817: 0x0109, + 0x1818: 0x00c9, 0x1819: 0x04b1, 0x181a: 0x0019, 0x181b: 0x02e9, 0x181c: 0x03d9, 0x181d: 0x02f1, + 0x181e: 0x02f9, 0x181f: 0x03f1, 0x1820: 0x0309, 0x1821: 0x00a9, 0x1822: 0x0311, 0x1823: 0x00b1, + 0x1824: 0x0319, 0x1825: 0x0101, 0x1826: 0x0321, 0x1827: 0x0329, 0x1828: 0x0051, 0x1829: 0x0339, + 0x182a: 0x0751, 0x182b: 0x00b9, 0x182c: 0x0089, 0x182d: 0x0341, 0x182e: 0x0349, 0x182f: 0x0391, + 0x1830: 0x00c1, 0x1831: 0x0109, 0x1832: 0x00c9, 0x1833: 0x04b1, 0x1834: 0x0019, 0x1835: 0x02e9, + 0x1836: 0x03d9, 0x1837: 0x02f1, 0x1838: 0x02f9, 0x1839: 0x03f1, 0x183a: 0x0309, 0x183b: 0x00a9, + 0x183c: 0x0311, 0x183d: 0x00b1, 0x183e: 0x0319, 0x183f: 0x0101, // Block 0x61, offset 0x1840 - 0x1840: 0x0008, 0x1841: 0x0008, 0x1842: 0x0008, 0x1843: 0x0008, 0x1844: 0x0008, 0x1845: 0x0008, - 0x1846: 0x0008, 0x1847: 0x0040, 0x1848: 0x0040, 0x1849: 0x0008, 0x184a: 0x0040, 0x184b: 0x0040, - 0x184c: 0x0008, 0x184d: 0x0008, 0x184e: 0x0008, 0x184f: 0x0008, 0x1850: 0x0008, 0x1851: 0x0008, - 0x1852: 0x0008, 0x1853: 0x0008, 0x1854: 0x0040, 0x1855: 0x0008, 0x1856: 0x0008, 0x1857: 0x0040, - 0x1858: 0x0008, 0x1859: 0x0008, 0x185a: 0x0008, 0x185b: 0x0008, 0x185c: 0x0008, 0x185d: 0x0008, - 0x185e: 0x0008, 0x185f: 0x0008, 0x1860: 0x0008, 0x1861: 0x0008, 0x1862: 0x0008, 0x1863: 0x0008, - 0x1864: 0x0008, 0x1865: 0x0008, 0x1866: 0x0008, 0x1867: 0x0008, 0x1868: 0x0008, 0x1869: 0x0008, - 0x186a: 0x0008, 0x186b: 0x0008, 0x186c: 0x0008, 0x186d: 0x0008, 0x186e: 0x0008, 0x186f: 0x0008, - 0x1870: 0x3008, 0x1871: 0x3008, 0x1872: 0x3008, 0x1873: 0x3008, 0x1874: 0x3008, 0x1875: 0x3008, - 0x1876: 0x0040, 0x1877: 0x3008, 0x1878: 0x3008, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x3308, - 0x187c: 0x3308, 0x187d: 0x3808, 0x187e: 0x3b08, 0x187f: 0x0008, + 0x1840: 0x0321, 0x1841: 0x0329, 0x1842: 0x0051, 0x1843: 0x0339, 0x1844: 0x0751, 0x1845: 0x00b9, + 0x1846: 0x0089, 0x1847: 0x0341, 0x1848: 0x0349, 0x1849: 0x0391, 0x184a: 0x00c1, 0x184b: 0x0109, + 0x184c: 0x00c9, 0x184d: 0x04b1, 0x184e: 0x0019, 0x184f: 0x02e9, 0x1850: 0x03d9, 0x1851: 0x02f1, + 0x1852: 0x02f9, 0x1853: 0x03f1, 0x1854: 0x0309, 0x1855: 0x0040, 0x1856: 0x0311, 0x1857: 0x00b1, + 0x1858: 0x0319, 0x1859: 0x0101, 0x185a: 0x0321, 0x185b: 0x0329, 0x185c: 0x0051, 0x185d: 0x0339, + 0x185e: 0x0751, 0x185f: 0x00b9, 0x1860: 0x0089, 0x1861: 0x0341, 0x1862: 0x0349, 0x1863: 0x0391, + 0x1864: 0x00c1, 0x1865: 0x0109, 0x1866: 0x00c9, 0x1867: 0x04b1, 0x1868: 0x0019, 0x1869: 0x02e9, + 0x186a: 0x03d9, 0x186b: 0x02f1, 0x186c: 0x02f9, 0x186d: 0x03f1, 0x186e: 0x0309, 0x186f: 0x00a9, + 0x1870: 0x0311, 0x1871: 0x00b1, 0x1872: 0x0319, 0x1873: 0x0101, 0x1874: 0x0321, 0x1875: 0x0329, + 0x1876: 0x0051, 0x1877: 0x0339, 0x1878: 0x0751, 0x1879: 0x00b9, 0x187a: 0x0089, 0x187b: 0x0341, + 0x187c: 0x0349, 0x187d: 0x0391, 0x187e: 0x00c1, 0x187f: 0x0109, // Block 0x62, offset 0x1880 - 0x1880: 0x0039, 0x1881: 0x0ee9, 0x1882: 0x1159, 0x1883: 0x0ef9, 0x1884: 0x0f09, 0x1885: 0x1199, - 0x1886: 0x0f31, 0x1887: 0x0249, 0x1888: 0x0f41, 0x1889: 0x0259, 0x188a: 0x0f51, 0x188b: 0x0359, - 0x188c: 0x0f61, 0x188d: 0x0f71, 0x188e: 0x00d9, 0x188f: 0x0f99, 0x1890: 0x2039, 0x1891: 0x0269, - 0x1892: 0x01d9, 0x1893: 0x0fa9, 0x1894: 0x0fb9, 0x1895: 0x1089, 0x1896: 0x0279, 0x1897: 0x0369, - 0x1898: 0x0289, 0x1899: 0x13d1, 0x189a: 0x0039, 0x189b: 0x0ee9, 0x189c: 0x1159, 0x189d: 0x0ef9, - 0x189e: 0x0f09, 0x189f: 0x1199, 0x18a0: 0x0f31, 0x18a1: 0x0249, 0x18a2: 0x0f41, 0x18a3: 0x0259, - 0x18a4: 0x0f51, 0x18a5: 0x0359, 0x18a6: 0x0f61, 0x18a7: 0x0f71, 0x18a8: 0x00d9, 0x18a9: 0x0f99, - 0x18aa: 0x2039, 0x18ab: 0x0269, 0x18ac: 0x01d9, 0x18ad: 0x0fa9, 0x18ae: 0x0fb9, 0x18af: 0x1089, - 0x18b0: 0x0279, 0x18b1: 0x0369, 0x18b2: 0x0289, 0x18b3: 0x13d1, 0x18b4: 0x0039, 0x18b5: 0x0ee9, - 0x18b6: 0x1159, 0x18b7: 0x0ef9, 0x18b8: 0x0f09, 0x18b9: 0x1199, 0x18ba: 0x0f31, 0x18bb: 0x0249, - 0x18bc: 0x0f41, 0x18bd: 0x0259, 0x18be: 0x0f51, 0x18bf: 0x0359, + 0x1880: 0x00c9, 0x1881: 0x04b1, 0x1882: 0x0019, 0x1883: 0x02e9, 0x1884: 0x03d9, 0x1885: 0x02f1, + 0x1886: 0x02f9, 0x1887: 0x03f1, 0x1888: 0x0309, 0x1889: 0x00a9, 0x188a: 0x0311, 0x188b: 0x00b1, + 0x188c: 0x0319, 0x188d: 0x0101, 0x188e: 0x0321, 0x188f: 0x0329, 0x1890: 0x0051, 0x1891: 0x0339, + 0x1892: 0x0751, 0x1893: 0x00b9, 0x1894: 0x0089, 0x1895: 0x0341, 0x1896: 0x0349, 0x1897: 0x0391, + 0x1898: 0x00c1, 0x1899: 0x0109, 0x189a: 0x00c9, 0x189b: 0x04b1, 0x189c: 0x0019, 0x189d: 0x0040, + 0x189e: 0x03d9, 0x189f: 0x02f1, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0309, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x00b1, 0x18a6: 0x0319, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0329, + 0x18aa: 0x0051, 0x18ab: 0x0339, 0x18ac: 0x0751, 0x18ad: 0x0040, 0x18ae: 0x0089, 0x18af: 0x0341, + 0x18b0: 0x0349, 0x18b1: 0x0391, 0x18b2: 0x00c1, 0x18b3: 0x0109, 0x18b4: 0x00c9, 0x18b5: 0x04b1, + 0x18b6: 0x0019, 0x18b7: 0x02e9, 0x18b8: 0x03d9, 0x18b9: 0x02f1, 0x18ba: 0x0040, 0x18bb: 0x03f1, + 0x18bc: 0x0040, 0x18bd: 0x00a9, 0x18be: 0x0311, 0x18bf: 0x00b1, // Block 0x63, offset 0x18c0 - 0x18c0: 0x0f61, 0x18c1: 0x0f71, 0x18c2: 0x00d9, 0x18c3: 0x0f99, 0x18c4: 0x2039, 0x18c5: 0x0269, - 0x18c6: 0x01d9, 0x18c7: 0x0fa9, 0x18c8: 0x0fb9, 0x18c9: 0x1089, 0x18ca: 0x0279, 0x18cb: 0x0369, - 0x18cc: 0x0289, 0x18cd: 0x13d1, 0x18ce: 0x0039, 0x18cf: 0x0ee9, 0x18d0: 0x1159, 0x18d1: 0x0ef9, - 0x18d2: 0x0f09, 0x18d3: 0x1199, 0x18d4: 0x0f31, 0x18d5: 0x0040, 0x18d6: 0x0f41, 0x18d7: 0x0259, - 0x18d8: 0x0f51, 0x18d9: 0x0359, 0x18da: 0x0f61, 0x18db: 0x0f71, 0x18dc: 0x00d9, 0x18dd: 0x0f99, - 0x18de: 0x2039, 0x18df: 0x0269, 0x18e0: 0x01d9, 0x18e1: 0x0fa9, 0x18e2: 0x0fb9, 0x18e3: 0x1089, - 0x18e4: 0x0279, 0x18e5: 0x0369, 0x18e6: 0x0289, 0x18e7: 0x13d1, 0x18e8: 0x0039, 0x18e9: 0x0ee9, - 0x18ea: 0x1159, 0x18eb: 0x0ef9, 0x18ec: 0x0f09, 0x18ed: 0x1199, 0x18ee: 0x0f31, 0x18ef: 0x0249, - 0x18f0: 0x0f41, 0x18f1: 0x0259, 0x18f2: 0x0f51, 0x18f3: 0x0359, 0x18f4: 0x0f61, 0x18f5: 0x0f71, - 0x18f6: 0x00d9, 0x18f7: 0x0f99, 0x18f8: 0x2039, 0x18f9: 0x0269, 0x18fa: 0x01d9, 0x18fb: 0x0fa9, - 0x18fc: 0x0fb9, 0x18fd: 0x1089, 0x18fe: 0x0279, 0x18ff: 0x0369, + 0x18c0: 0x0319, 0x18c1: 0x0101, 0x18c2: 0x0321, 0x18c3: 0x0329, 0x18c4: 0x0040, 0x18c5: 0x0339, + 0x18c6: 0x0751, 0x18c7: 0x00b9, 0x18c8: 0x0089, 0x18c9: 0x0341, 0x18ca: 0x0349, 0x18cb: 0x0391, + 0x18cc: 0x00c1, 0x18cd: 0x0109, 0x18ce: 0x00c9, 0x18cf: 0x04b1, 0x18d0: 0x0019, 0x18d1: 0x02e9, + 0x18d2: 0x03d9, 0x18d3: 0x02f1, 0x18d4: 0x02f9, 0x18d5: 0x03f1, 0x18d6: 0x0309, 0x18d7: 0x00a9, + 0x18d8: 0x0311, 0x18d9: 0x00b1, 0x18da: 0x0319, 0x18db: 0x0101, 0x18dc: 0x0321, 0x18dd: 0x0329, + 0x18de: 0x0051, 0x18df: 0x0339, 0x18e0: 0x0751, 0x18e1: 0x00b9, 0x18e2: 0x0089, 0x18e3: 0x0341, + 0x18e4: 0x0349, 0x18e5: 0x0391, 0x18e6: 0x00c1, 0x18e7: 0x0109, 0x18e8: 0x00c9, 0x18e9: 0x04b1, + 0x18ea: 0x0019, 0x18eb: 0x02e9, 0x18ec: 0x03d9, 0x18ed: 0x02f1, 0x18ee: 0x02f9, 0x18ef: 0x03f1, + 0x18f0: 0x0309, 0x18f1: 0x00a9, 0x18f2: 0x0311, 0x18f3: 0x00b1, 0x18f4: 0x0319, 0x18f5: 0x0101, + 0x18f6: 0x0321, 0x18f7: 0x0329, 0x18f8: 0x0051, 0x18f9: 0x0339, 0x18fa: 0x0751, 0x18fb: 0x00b9, + 0x18fc: 0x0089, 0x18fd: 0x0341, 0x18fe: 0x0349, 0x18ff: 0x0391, // Block 0x64, offset 0x1900 - 0x1900: 0x0289, 0x1901: 0x13d1, 0x1902: 0x0039, 0x1903: 0x0ee9, 0x1904: 0x1159, 0x1905: 0x0ef9, - 0x1906: 0x0f09, 0x1907: 0x1199, 0x1908: 0x0f31, 0x1909: 0x0249, 0x190a: 0x0f41, 0x190b: 0x0259, - 0x190c: 0x0f51, 0x190d: 0x0359, 0x190e: 0x0f61, 0x190f: 0x0f71, 0x1910: 0x00d9, 0x1911: 0x0f99, - 0x1912: 0x2039, 0x1913: 0x0269, 0x1914: 0x01d9, 0x1915: 0x0fa9, 0x1916: 0x0fb9, 0x1917: 0x1089, - 0x1918: 0x0279, 0x1919: 0x0369, 0x191a: 0x0289, 0x191b: 0x13d1, 0x191c: 0x0039, 0x191d: 0x0040, - 0x191e: 0x1159, 0x191f: 0x0ef9, 0x1920: 0x0040, 0x1921: 0x0040, 0x1922: 0x0f31, 0x1923: 0x0040, - 0x1924: 0x0040, 0x1925: 0x0259, 0x1926: 0x0f51, 0x1927: 0x0040, 0x1928: 0x0040, 0x1929: 0x0f71, - 0x192a: 0x00d9, 0x192b: 0x0f99, 0x192c: 0x2039, 0x192d: 0x0040, 0x192e: 0x01d9, 0x192f: 0x0fa9, - 0x1930: 0x0fb9, 0x1931: 0x1089, 0x1932: 0x0279, 0x1933: 0x0369, 0x1934: 0x0289, 0x1935: 0x13d1, - 0x1936: 0x0039, 0x1937: 0x0ee9, 0x1938: 0x1159, 0x1939: 0x0ef9, 0x193a: 0x0040, 0x193b: 0x1199, - 0x193c: 0x0040, 0x193d: 0x0249, 0x193e: 0x0f41, 0x193f: 0x0259, + 0x1900: 0x00c1, 0x1901: 0x0109, 0x1902: 0x00c9, 0x1903: 0x04b1, 0x1904: 0x0019, 0x1905: 0x02e9, + 0x1906: 0x0040, 0x1907: 0x02f1, 0x1908: 0x02f9, 0x1909: 0x03f1, 0x190a: 0x0309, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x00b1, 0x190e: 0x0319, 0x190f: 0x0101, 0x1910: 0x0321, 0x1911: 0x0329, + 0x1912: 0x0051, 0x1913: 0x0339, 0x1914: 0x0751, 0x1915: 0x0040, 0x1916: 0x0089, 0x1917: 0x0341, + 0x1918: 0x0349, 0x1919: 0x0391, 0x191a: 0x00c1, 0x191b: 0x0109, 0x191c: 0x00c9, 0x191d: 0x0040, + 0x191e: 0x0019, 0x191f: 0x02e9, 0x1920: 0x03d9, 0x1921: 0x02f1, 0x1922: 0x02f9, 0x1923: 0x03f1, + 0x1924: 0x0309, 0x1925: 0x00a9, 0x1926: 0x0311, 0x1927: 0x00b1, 0x1928: 0x0319, 0x1929: 0x0101, + 0x192a: 0x0321, 0x192b: 0x0329, 0x192c: 0x0051, 0x192d: 0x0339, 0x192e: 0x0751, 0x192f: 0x00b9, + 0x1930: 0x0089, 0x1931: 0x0341, 0x1932: 0x0349, 0x1933: 0x0391, 0x1934: 0x00c1, 0x1935: 0x0109, + 0x1936: 0x00c9, 0x1937: 0x04b1, 0x1938: 0x0019, 0x1939: 0x02e9, 0x193a: 0x0040, 0x193b: 0x02f1, + 0x193c: 0x02f9, 0x193d: 0x03f1, 0x193e: 0x0309, 0x193f: 0x0040, // Block 0x65, offset 0x1940 - 0x1940: 0x0f51, 0x1941: 0x0359, 0x1942: 0x0f61, 0x1943: 0x0f71, 0x1944: 0x0040, 0x1945: 0x0f99, - 0x1946: 0x2039, 0x1947: 0x0269, 0x1948: 0x01d9, 0x1949: 0x0fa9, 0x194a: 0x0fb9, 0x194b: 0x1089, - 0x194c: 0x0279, 0x194d: 0x0369, 0x194e: 0x0289, 0x194f: 0x13d1, 0x1950: 0x0039, 0x1951: 0x0ee9, - 0x1952: 0x1159, 0x1953: 0x0ef9, 0x1954: 0x0f09, 0x1955: 0x1199, 0x1956: 0x0f31, 0x1957: 0x0249, - 0x1958: 0x0f41, 0x1959: 0x0259, 0x195a: 0x0f51, 0x195b: 0x0359, 0x195c: 0x0f61, 0x195d: 0x0f71, - 0x195e: 0x00d9, 0x195f: 0x0f99, 0x1960: 0x2039, 0x1961: 0x0269, 0x1962: 0x01d9, 0x1963: 0x0fa9, - 0x1964: 0x0fb9, 0x1965: 0x1089, 0x1966: 0x0279, 0x1967: 0x0369, 0x1968: 0x0289, 0x1969: 0x13d1, - 0x196a: 0x0039, 0x196b: 0x0ee9, 0x196c: 0x1159, 0x196d: 0x0ef9, 0x196e: 0x0f09, 0x196f: 0x1199, - 0x1970: 0x0f31, 0x1971: 0x0249, 0x1972: 0x0f41, 0x1973: 0x0259, 0x1974: 0x0f51, 0x1975: 0x0359, - 0x1976: 0x0f61, 0x1977: 0x0f71, 0x1978: 0x00d9, 0x1979: 0x0f99, 0x197a: 0x2039, 0x197b: 0x0269, - 0x197c: 0x01d9, 0x197d: 0x0fa9, 0x197e: 0x0fb9, 0x197f: 0x1089, + 0x1940: 0x0311, 0x1941: 0x00b1, 0x1942: 0x0319, 0x1943: 0x0101, 0x1944: 0x0321, 0x1945: 0x0040, + 0x1946: 0x0051, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x0089, 0x194b: 0x0341, + 0x194c: 0x0349, 0x194d: 0x0391, 0x194e: 0x00c1, 0x194f: 0x0109, 0x1950: 0x00c9, 0x1951: 0x0040, + 0x1952: 0x0019, 0x1953: 0x02e9, 0x1954: 0x03d9, 0x1955: 0x02f1, 0x1956: 0x02f9, 0x1957: 0x03f1, + 0x1958: 0x0309, 0x1959: 0x00a9, 0x195a: 0x0311, 0x195b: 0x00b1, 0x195c: 0x0319, 0x195d: 0x0101, + 0x195e: 0x0321, 0x195f: 0x0329, 0x1960: 0x0051, 0x1961: 0x0339, 0x1962: 0x0751, 0x1963: 0x00b9, + 0x1964: 0x0089, 0x1965: 0x0341, 0x1966: 0x0349, 0x1967: 0x0391, 0x1968: 0x00c1, 0x1969: 0x0109, + 0x196a: 0x00c9, 0x196b: 0x04b1, 0x196c: 0x0019, 0x196d: 0x02e9, 0x196e: 0x03d9, 0x196f: 0x02f1, + 0x1970: 0x02f9, 0x1971: 0x03f1, 0x1972: 0x0309, 0x1973: 0x00a9, 0x1974: 0x0311, 0x1975: 0x00b1, + 0x1976: 0x0319, 0x1977: 0x0101, 0x1978: 0x0321, 0x1979: 0x0329, 0x197a: 0x0051, 0x197b: 0x0339, + 0x197c: 0x0751, 0x197d: 0x00b9, 0x197e: 0x0089, 0x197f: 0x0341, // Block 0x66, offset 0x1980 - 0x1980: 0x0279, 0x1981: 0x0369, 0x1982: 0x0289, 0x1983: 0x13d1, 0x1984: 0x0039, 0x1985: 0x0ee9, - 0x1986: 0x0040, 0x1987: 0x0ef9, 0x1988: 0x0f09, 0x1989: 0x1199, 0x198a: 0x0f31, 0x198b: 0x0040, - 0x198c: 0x0040, 0x198d: 0x0259, 0x198e: 0x0f51, 0x198f: 0x0359, 0x1990: 0x0f61, 0x1991: 0x0f71, - 0x1992: 0x00d9, 0x1993: 0x0f99, 0x1994: 0x2039, 0x1995: 0x0040, 0x1996: 0x01d9, 0x1997: 0x0fa9, - 0x1998: 0x0fb9, 0x1999: 0x1089, 0x199a: 0x0279, 0x199b: 0x0369, 0x199c: 0x0289, 0x199d: 0x0040, - 0x199e: 0x0039, 0x199f: 0x0ee9, 0x19a0: 0x1159, 0x19a1: 0x0ef9, 0x19a2: 0x0f09, 0x19a3: 0x1199, - 0x19a4: 0x0f31, 0x19a5: 0x0249, 0x19a6: 0x0f41, 0x19a7: 0x0259, 0x19a8: 0x0f51, 0x19a9: 0x0359, - 0x19aa: 0x0f61, 0x19ab: 0x0f71, 0x19ac: 0x00d9, 0x19ad: 0x0f99, 0x19ae: 0x2039, 0x19af: 0x0269, - 0x19b0: 0x01d9, 0x19b1: 0x0fa9, 0x19b2: 0x0fb9, 0x19b3: 0x1089, 0x19b4: 0x0279, 0x19b5: 0x0369, - 0x19b6: 0x0289, 0x19b7: 0x13d1, 0x19b8: 0x0039, 0x19b9: 0x0ee9, 0x19ba: 0x0040, 0x19bb: 0x0ef9, - 0x19bc: 0x0f09, 0x19bd: 0x1199, 0x19be: 0x0f31, 0x19bf: 0x0040, + 0x1980: 0x0349, 0x1981: 0x0391, 0x1982: 0x00c1, 0x1983: 0x0109, 0x1984: 0x00c9, 0x1985: 0x04b1, + 0x1986: 0x0019, 0x1987: 0x02e9, 0x1988: 0x03d9, 0x1989: 0x02f1, 0x198a: 0x02f9, 0x198b: 0x03f1, + 0x198c: 0x0309, 0x198d: 0x00a9, 0x198e: 0x0311, 0x198f: 0x00b1, 0x1990: 0x0319, 0x1991: 0x0101, + 0x1992: 0x0321, 0x1993: 0x0329, 0x1994: 0x0051, 0x1995: 0x0339, 0x1996: 0x0751, 0x1997: 0x00b9, + 0x1998: 0x0089, 0x1999: 0x0341, 0x199a: 0x0349, 0x199b: 0x0391, 0x199c: 0x00c1, 0x199d: 0x0109, + 0x199e: 0x00c9, 0x199f: 0x04b1, 0x19a0: 0x0019, 0x19a1: 0x02e9, 0x19a2: 0x03d9, 0x19a3: 0x02f1, + 0x19a4: 0x02f9, 0x19a5: 0x03f1, 0x19a6: 0x0309, 0x19a7: 0x00a9, 0x19a8: 0x0311, 0x19a9: 0x00b1, + 0x19aa: 0x0319, 0x19ab: 0x0101, 0x19ac: 0x0321, 0x19ad: 0x0329, 0x19ae: 0x0051, 0x19af: 0x0339, + 0x19b0: 0x0751, 0x19b1: 0x00b9, 0x19b2: 0x0089, 0x19b3: 0x0341, 0x19b4: 0x0349, 0x19b5: 0x0391, + 0x19b6: 0x00c1, 0x19b7: 0x0109, 0x19b8: 0x00c9, 0x19b9: 0x04b1, 0x19ba: 0x0019, 0x19bb: 0x02e9, + 0x19bc: 0x03d9, 0x19bd: 0x02f1, 0x19be: 0x02f9, 0x19bf: 0x03f1, // Block 0x67, offset 0x19c0 - 0x19c0: 0x0f41, 0x19c1: 0x0259, 0x19c2: 0x0f51, 0x19c3: 0x0359, 0x19c4: 0x0f61, 0x19c5: 0x0040, - 0x19c6: 0x00d9, 0x19c7: 0x0040, 0x19c8: 0x0040, 0x19c9: 0x0040, 0x19ca: 0x01d9, 0x19cb: 0x0fa9, - 0x19cc: 0x0fb9, 0x19cd: 0x1089, 0x19ce: 0x0279, 0x19cf: 0x0369, 0x19d0: 0x0289, 0x19d1: 0x0040, - 0x19d2: 0x0039, 0x19d3: 0x0ee9, 0x19d4: 0x1159, 0x19d5: 0x0ef9, 0x19d6: 0x0f09, 0x19d7: 0x1199, - 0x19d8: 0x0f31, 0x19d9: 0x0249, 0x19da: 0x0f41, 0x19db: 0x0259, 0x19dc: 0x0f51, 0x19dd: 0x0359, - 0x19de: 0x0f61, 0x19df: 0x0f71, 0x19e0: 0x00d9, 0x19e1: 0x0f99, 0x19e2: 0x2039, 0x19e3: 0x0269, - 0x19e4: 0x01d9, 0x19e5: 0x0fa9, 0x19e6: 0x0fb9, 0x19e7: 0x1089, 0x19e8: 0x0279, 0x19e9: 0x0369, - 0x19ea: 0x0289, 0x19eb: 0x13d1, 0x19ec: 0x0039, 0x19ed: 0x0ee9, 0x19ee: 0x1159, 0x19ef: 0x0ef9, - 0x19f0: 0x0f09, 0x19f1: 0x1199, 0x19f2: 0x0f31, 0x19f3: 0x0249, 0x19f4: 0x0f41, 0x19f5: 0x0259, - 0x19f6: 0x0f51, 0x19f7: 0x0359, 0x19f8: 0x0f61, 0x19f9: 0x0f71, 0x19fa: 0x00d9, 0x19fb: 0x0f99, - 0x19fc: 0x2039, 0x19fd: 0x0269, 0x19fe: 0x01d9, 0x19ff: 0x0fa9, + 0x19c0: 0x0309, 0x19c1: 0x00a9, 0x19c2: 0x0311, 0x19c3: 0x00b1, 0x19c4: 0x0319, 0x19c5: 0x0101, + 0x19c6: 0x0321, 0x19c7: 0x0329, 0x19c8: 0x0051, 0x19c9: 0x0339, 0x19ca: 0x0751, 0x19cb: 0x00b9, + 0x19cc: 0x0089, 0x19cd: 0x0341, 0x19ce: 0x0349, 0x19cf: 0x0391, 0x19d0: 0x00c1, 0x19d1: 0x0109, + 0x19d2: 0x00c9, 0x19d3: 0x04b1, 0x19d4: 0x0019, 0x19d5: 0x02e9, 0x19d6: 0x03d9, 0x19d7: 0x02f1, + 0x19d8: 0x02f9, 0x19d9: 0x03f1, 0x19da: 0x0309, 0x19db: 0x00a9, 0x19dc: 0x0311, 0x19dd: 0x00b1, + 0x19de: 0x0319, 0x19df: 0x0101, 0x19e0: 0x0321, 0x19e1: 0x0329, 0x19e2: 0x0051, 0x19e3: 0x0339, + 0x19e4: 0x0751, 0x19e5: 0x00b9, 0x19e6: 0x0089, 0x19e7: 0x0341, 0x19e8: 0x0349, 0x19e9: 0x0391, + 0x19ea: 0x00c1, 0x19eb: 0x0109, 0x19ec: 0x00c9, 0x19ed: 0x04b1, 0x19ee: 0x0019, 0x19ef: 0x02e9, + 0x19f0: 0x03d9, 0x19f1: 0x02f1, 0x19f2: 0x02f9, 0x19f3: 0x03f1, 0x19f4: 0x0309, 0x19f5: 0x00a9, + 0x19f6: 0x0311, 0x19f7: 0x00b1, 0x19f8: 0x0319, 0x19f9: 0x0101, 0x19fa: 0x0321, 0x19fb: 0x0329, + 0x19fc: 0x0051, 0x19fd: 0x0339, 0x19fe: 0x0751, 0x19ff: 0x00b9, // Block 0x68, offset 0x1a00 - 0x1a00: 0x0fb9, 0x1a01: 0x1089, 0x1a02: 0x0279, 0x1a03: 0x0369, 0x1a04: 0x0289, 0x1a05: 0x13d1, - 0x1a06: 0x0039, 0x1a07: 0x0ee9, 0x1a08: 0x1159, 0x1a09: 0x0ef9, 0x1a0a: 0x0f09, 0x1a0b: 0x1199, - 0x1a0c: 0x0f31, 0x1a0d: 0x0249, 0x1a0e: 0x0f41, 0x1a0f: 0x0259, 0x1a10: 0x0f51, 0x1a11: 0x0359, - 0x1a12: 0x0f61, 0x1a13: 0x0f71, 0x1a14: 0x00d9, 0x1a15: 0x0f99, 0x1a16: 0x2039, 0x1a17: 0x0269, - 0x1a18: 0x01d9, 0x1a19: 0x0fa9, 0x1a1a: 0x0fb9, 0x1a1b: 0x1089, 0x1a1c: 0x0279, 0x1a1d: 0x0369, - 0x1a1e: 0x0289, 0x1a1f: 0x13d1, 0x1a20: 0x0039, 0x1a21: 0x0ee9, 0x1a22: 0x1159, 0x1a23: 0x0ef9, - 0x1a24: 0x0f09, 0x1a25: 0x1199, 0x1a26: 0x0f31, 0x1a27: 0x0249, 0x1a28: 0x0f41, 0x1a29: 0x0259, - 0x1a2a: 0x0f51, 0x1a2b: 0x0359, 0x1a2c: 0x0f61, 0x1a2d: 0x0f71, 0x1a2e: 0x00d9, 0x1a2f: 0x0f99, - 0x1a30: 0x2039, 0x1a31: 0x0269, 0x1a32: 0x01d9, 0x1a33: 0x0fa9, 0x1a34: 0x0fb9, 0x1a35: 0x1089, - 0x1a36: 0x0279, 0x1a37: 0x0369, 0x1a38: 0x0289, 0x1a39: 0x13d1, 0x1a3a: 0x0039, 0x1a3b: 0x0ee9, - 0x1a3c: 0x1159, 0x1a3d: 0x0ef9, 0x1a3e: 0x0f09, 0x1a3f: 0x1199, + 0x1a00: 0x0089, 0x1a01: 0x0341, 0x1a02: 0x0349, 0x1a03: 0x0391, 0x1a04: 0x00c1, 0x1a05: 0x0109, + 0x1a06: 0x00c9, 0x1a07: 0x04b1, 0x1a08: 0x0019, 0x1a09: 0x02e9, 0x1a0a: 0x03d9, 0x1a0b: 0x02f1, + 0x1a0c: 0x02f9, 0x1a0d: 0x03f1, 0x1a0e: 0x0309, 0x1a0f: 0x00a9, 0x1a10: 0x0311, 0x1a11: 0x00b1, + 0x1a12: 0x0319, 0x1a13: 0x0101, 0x1a14: 0x0321, 0x1a15: 0x0329, 0x1a16: 0x0051, 0x1a17: 0x0339, + 0x1a18: 0x0751, 0x1a19: 0x00b9, 0x1a1a: 0x0089, 0x1a1b: 0x0341, 0x1a1c: 0x0349, 0x1a1d: 0x0391, + 0x1a1e: 0x00c1, 0x1a1f: 0x0109, 0x1a20: 0x00c9, 0x1a21: 0x04b1, 0x1a22: 0x0019, 0x1a23: 0x02e9, + 0x1a24: 0x03d9, 0x1a25: 0x02f1, 0x1a26: 0x02f9, 0x1a27: 0x03f1, 0x1a28: 0x0309, 0x1a29: 0x00a9, + 0x1a2a: 0x0311, 0x1a2b: 0x00b1, 0x1a2c: 0x0319, 0x1a2d: 0x0101, 0x1a2e: 0x0321, 0x1a2f: 0x0329, + 0x1a30: 0x0051, 0x1a31: 0x0339, 0x1a32: 0x0751, 0x1a33: 0x00b9, 0x1a34: 0x0089, 0x1a35: 0x0341, + 0x1a36: 0x0349, 0x1a37: 0x0391, 0x1a38: 0x00c1, 0x1a39: 0x0109, 0x1a3a: 0x00c9, 0x1a3b: 0x04b1, + 0x1a3c: 0x0019, 0x1a3d: 0x02e9, 0x1a3e: 0x03d9, 0x1a3f: 0x02f1, // Block 0x69, offset 0x1a40 - 0x1a40: 0x0f31, 0x1a41: 0x0249, 0x1a42: 0x0f41, 0x1a43: 0x0259, 0x1a44: 0x0f51, 0x1a45: 0x0359, - 0x1a46: 0x0f61, 0x1a47: 0x0f71, 0x1a48: 0x00d9, 0x1a49: 0x0f99, 0x1a4a: 0x2039, 0x1a4b: 0x0269, - 0x1a4c: 0x01d9, 0x1a4d: 0x0fa9, 0x1a4e: 0x0fb9, 0x1a4f: 0x1089, 0x1a50: 0x0279, 0x1a51: 0x0369, - 0x1a52: 0x0289, 0x1a53: 0x13d1, 0x1a54: 0x0039, 0x1a55: 0x0ee9, 0x1a56: 0x1159, 0x1a57: 0x0ef9, - 0x1a58: 0x0f09, 0x1a59: 0x1199, 0x1a5a: 0x0f31, 0x1a5b: 0x0249, 0x1a5c: 0x0f41, 0x1a5d: 0x0259, - 0x1a5e: 0x0f51, 0x1a5f: 0x0359, 0x1a60: 0x0f61, 0x1a61: 0x0f71, 0x1a62: 0x00d9, 0x1a63: 0x0f99, - 0x1a64: 0x2039, 0x1a65: 0x0269, 0x1a66: 0x01d9, 0x1a67: 0x0fa9, 0x1a68: 0x0fb9, 0x1a69: 0x1089, - 0x1a6a: 0x0279, 0x1a6b: 0x0369, 0x1a6c: 0x0289, 0x1a6d: 0x13d1, 0x1a6e: 0x0039, 0x1a6f: 0x0ee9, - 0x1a70: 0x1159, 0x1a71: 0x0ef9, 0x1a72: 0x0f09, 0x1a73: 0x1199, 0x1a74: 0x0f31, 0x1a75: 0x0249, - 0x1a76: 0x0f41, 0x1a77: 0x0259, 0x1a78: 0x0f51, 0x1a79: 0x0359, 0x1a7a: 0x0f61, 0x1a7b: 0x0f71, - 0x1a7c: 0x00d9, 0x1a7d: 0x0f99, 0x1a7e: 0x2039, 0x1a7f: 0x0269, + 0x1a40: 0x02f9, 0x1a41: 0x03f1, 0x1a42: 0x0309, 0x1a43: 0x00a9, 0x1a44: 0x0311, 0x1a45: 0x00b1, + 0x1a46: 0x0319, 0x1a47: 0x0101, 0x1a48: 0x0321, 0x1a49: 0x0329, 0x1a4a: 0x0051, 0x1a4b: 0x0339, + 0x1a4c: 0x0751, 0x1a4d: 0x00b9, 0x1a4e: 0x0089, 0x1a4f: 0x0341, 0x1a50: 0x0349, 0x1a51: 0x0391, + 0x1a52: 0x00c1, 0x1a53: 0x0109, 0x1a54: 0x00c9, 0x1a55: 0x04b1, 0x1a56: 0x0019, 0x1a57: 0x02e9, + 0x1a58: 0x03d9, 0x1a59: 0x02f1, 0x1a5a: 0x02f9, 0x1a5b: 0x03f1, 0x1a5c: 0x0309, 0x1a5d: 0x00a9, + 0x1a5e: 0x0311, 0x1a5f: 0x00b1, 0x1a60: 0x0319, 0x1a61: 0x0101, 0x1a62: 0x0321, 0x1a63: 0x0329, + 0x1a64: 0x0051, 0x1a65: 0x0339, 0x1a66: 0x0751, 0x1a67: 0x00b9, 0x1a68: 0x0089, 0x1a69: 0x0341, + 0x1a6a: 0x0349, 0x1a6b: 0x0391, 0x1a6c: 0x00c1, 0x1a6d: 0x0109, 0x1a6e: 0x00c9, 0x1a6f: 0x04b1, + 0x1a70: 0x0019, 0x1a71: 0x02e9, 0x1a72: 0x03d9, 0x1a73: 0x02f1, 0x1a74: 0x02f9, 0x1a75: 0x03f1, + 0x1a76: 0x0309, 0x1a77: 0x00a9, 0x1a78: 0x0311, 0x1a79: 0x00b1, 0x1a7a: 0x0319, 0x1a7b: 0x0101, + 0x1a7c: 0x0321, 0x1a7d: 0x0329, 0x1a7e: 0x0051, 0x1a7f: 0x0339, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x01d9, 0x1a81: 0x0fa9, 0x1a82: 0x0fb9, 0x1a83: 0x1089, 0x1a84: 0x0279, 0x1a85: 0x0369, - 0x1a86: 0x0289, 0x1a87: 0x13d1, 0x1a88: 0x0039, 0x1a89: 0x0ee9, 0x1a8a: 0x1159, 0x1a8b: 0x0ef9, - 0x1a8c: 0x0f09, 0x1a8d: 0x1199, 0x1a8e: 0x0f31, 0x1a8f: 0x0249, 0x1a90: 0x0f41, 0x1a91: 0x0259, - 0x1a92: 0x0f51, 0x1a93: 0x0359, 0x1a94: 0x0f61, 0x1a95: 0x0f71, 0x1a96: 0x00d9, 0x1a97: 0x0f99, - 0x1a98: 0x2039, 0x1a99: 0x0269, 0x1a9a: 0x01d9, 0x1a9b: 0x0fa9, 0x1a9c: 0x0fb9, 0x1a9d: 0x1089, - 0x1a9e: 0x0279, 0x1a9f: 0x0369, 0x1aa0: 0x0289, 0x1aa1: 0x13d1, 0x1aa2: 0x0039, 0x1aa3: 0x0ee9, - 0x1aa4: 0x1159, 0x1aa5: 0x0ef9, 0x1aa6: 0x0f09, 0x1aa7: 0x1199, 0x1aa8: 0x0f31, 0x1aa9: 0x0249, - 0x1aaa: 0x0f41, 0x1aab: 0x0259, 0x1aac: 0x0f51, 0x1aad: 0x0359, 0x1aae: 0x0f61, 0x1aaf: 0x0f71, - 0x1ab0: 0x00d9, 0x1ab1: 0x0f99, 0x1ab2: 0x2039, 0x1ab3: 0x0269, 0x1ab4: 0x01d9, 0x1ab5: 0x0fa9, - 0x1ab6: 0x0fb9, 0x1ab7: 0x1089, 0x1ab8: 0x0279, 0x1ab9: 0x0369, 0x1aba: 0x0289, 0x1abb: 0x13d1, - 0x1abc: 0x0039, 0x1abd: 0x0ee9, 0x1abe: 0x1159, 0x1abf: 0x0ef9, + 0x1a80: 0x0751, 0x1a81: 0x00b9, 0x1a82: 0x0089, 0x1a83: 0x0341, 0x1a84: 0x0349, 0x1a85: 0x0391, + 0x1a86: 0x00c1, 0x1a87: 0x0109, 0x1a88: 0x00c9, 0x1a89: 0x04b1, 0x1a8a: 0x0019, 0x1a8b: 0x02e9, + 0x1a8c: 0x03d9, 0x1a8d: 0x02f1, 0x1a8e: 0x02f9, 0x1a8f: 0x03f1, 0x1a90: 0x0309, 0x1a91: 0x00a9, + 0x1a92: 0x0311, 0x1a93: 0x00b1, 0x1a94: 0x0319, 0x1a95: 0x0101, 0x1a96: 0x0321, 0x1a97: 0x0329, + 0x1a98: 0x0051, 0x1a99: 0x0339, 0x1a9a: 0x0751, 0x1a9b: 0x00b9, 0x1a9c: 0x0089, 0x1a9d: 0x0341, + 0x1a9e: 0x0349, 0x1a9f: 0x0391, 0x1aa0: 0x00c1, 0x1aa1: 0x0109, 0x1aa2: 0x00c9, 0x1aa3: 0x04b1, + 0x1aa4: 0x2279, 0x1aa5: 0x2281, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0x2289, 0x1aa9: 0x0399, + 0x1aaa: 0x03a1, 0x1aab: 0x03a9, 0x1aac: 0x2291, 0x1aad: 0x2299, 0x1aae: 0x22a1, 0x1aaf: 0x04d1, + 0x1ab0: 0x05f9, 0x1ab1: 0x22a9, 0x1ab2: 0x22b1, 0x1ab3: 0x22b9, 0x1ab4: 0x22c1, 0x1ab5: 0x22c9, + 0x1ab6: 0x22d1, 0x1ab7: 0x0799, 0x1ab8: 0x03c1, 0x1ab9: 0x04d1, 0x1aba: 0x22d9, 0x1abb: 0x22e1, + 0x1abc: 0x22e9, 0x1abd: 0x03b1, 0x1abe: 0x03b9, 0x1abf: 0x22f1, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x0f09, 0x1ac1: 0x1199, 0x1ac2: 0x0f31, 0x1ac3: 0x0249, 0x1ac4: 0x0f41, 0x1ac5: 0x0259, - 0x1ac6: 0x0f51, 0x1ac7: 0x0359, 0x1ac8: 0x0f61, 0x1ac9: 0x0f71, 0x1aca: 0x00d9, 0x1acb: 0x0f99, - 0x1acc: 0x2039, 0x1acd: 0x0269, 0x1ace: 0x01d9, 0x1acf: 0x0fa9, 0x1ad0: 0x0fb9, 0x1ad1: 0x1089, - 0x1ad2: 0x0279, 0x1ad3: 0x0369, 0x1ad4: 0x0289, 0x1ad5: 0x13d1, 0x1ad6: 0x0039, 0x1ad7: 0x0ee9, - 0x1ad8: 0x1159, 0x1ad9: 0x0ef9, 0x1ada: 0x0f09, 0x1adb: 0x1199, 0x1adc: 0x0f31, 0x1add: 0x0249, - 0x1ade: 0x0f41, 0x1adf: 0x0259, 0x1ae0: 0x0f51, 0x1ae1: 0x0359, 0x1ae2: 0x0f61, 0x1ae3: 0x0f71, - 0x1ae4: 0x00d9, 0x1ae5: 0x0f99, 0x1ae6: 0x2039, 0x1ae7: 0x0269, 0x1ae8: 0x01d9, 0x1ae9: 0x0fa9, - 0x1aea: 0x0fb9, 0x1aeb: 0x1089, 0x1aec: 0x0279, 0x1aed: 0x0369, 0x1aee: 0x0289, 0x1aef: 0x13d1, - 0x1af0: 0x0039, 0x1af1: 0x0ee9, 0x1af2: 0x1159, 0x1af3: 0x0ef9, 0x1af4: 0x0f09, 0x1af5: 0x1199, - 0x1af6: 0x0f31, 0x1af7: 0x0249, 0x1af8: 0x0f41, 0x1af9: 0x0259, 0x1afa: 0x0f51, 0x1afb: 0x0359, - 0x1afc: 0x0f61, 0x1afd: 0x0f71, 0x1afe: 0x00d9, 0x1aff: 0x0f99, + 0x1ac0: 0x0769, 0x1ac1: 0x22f9, 0x1ac2: 0x2289, 0x1ac3: 0x0399, 0x1ac4: 0x03a1, 0x1ac5: 0x03a9, + 0x1ac6: 0x2291, 0x1ac7: 0x2299, 0x1ac8: 0x22a1, 0x1ac9: 0x04d1, 0x1aca: 0x05f9, 0x1acb: 0x22a9, + 0x1acc: 0x22b1, 0x1acd: 0x22b9, 0x1ace: 0x22c1, 0x1acf: 0x22c9, 0x1ad0: 0x22d1, 0x1ad1: 0x0799, + 0x1ad2: 0x03c1, 0x1ad3: 0x22d9, 0x1ad4: 0x22d9, 0x1ad5: 0x22e1, 0x1ad6: 0x22e9, 0x1ad7: 0x03b1, + 0x1ad8: 0x03b9, 0x1ad9: 0x22f1, 0x1ada: 0x0769, 0x1adb: 0x2301, 0x1adc: 0x2291, 0x1add: 0x04d1, + 0x1ade: 0x22a9, 0x1adf: 0x03b1, 0x1ae0: 0x03c1, 0x1ae1: 0x0799, 0x1ae2: 0x2289, 0x1ae3: 0x0399, + 0x1ae4: 0x03a1, 0x1ae5: 0x03a9, 0x1ae6: 0x2291, 0x1ae7: 0x2299, 0x1ae8: 0x22a1, 0x1ae9: 0x04d1, + 0x1aea: 0x05f9, 0x1aeb: 0x22a9, 0x1aec: 0x22b1, 0x1aed: 0x22b9, 0x1aee: 0x22c1, 0x1aef: 0x22c9, + 0x1af0: 0x22d1, 0x1af1: 0x0799, 0x1af2: 0x03c1, 0x1af3: 0x04d1, 0x1af4: 0x22d9, 0x1af5: 0x22e1, + 0x1af6: 0x22e9, 0x1af7: 0x03b1, 0x1af8: 0x03b9, 0x1af9: 0x22f1, 0x1afa: 0x0769, 0x1afb: 0x22f9, + 0x1afc: 0x2289, 0x1afd: 0x0399, 0x1afe: 0x03a1, 0x1aff: 0x03a9, // Block 0x6c, offset 0x1b00 - 0x1b00: 0x2039, 0x1b01: 0x0269, 0x1b02: 0x01d9, 0x1b03: 0x0fa9, 0x1b04: 0x0fb9, 0x1b05: 0x1089, - 0x1b06: 0x0279, 0x1b07: 0x0369, 0x1b08: 0x0289, 0x1b09: 0x13d1, 0x1b0a: 0x0039, 0x1b0b: 0x0ee9, - 0x1b0c: 0x1159, 0x1b0d: 0x0ef9, 0x1b0e: 0x0f09, 0x1b0f: 0x1199, 0x1b10: 0x0f31, 0x1b11: 0x0249, - 0x1b12: 0x0f41, 0x1b13: 0x0259, 0x1b14: 0x0f51, 0x1b15: 0x0359, 0x1b16: 0x0f61, 0x1b17: 0x0f71, - 0x1b18: 0x00d9, 0x1b19: 0x0f99, 0x1b1a: 0x2039, 0x1b1b: 0x0269, 0x1b1c: 0x01d9, 0x1b1d: 0x0fa9, - 0x1b1e: 0x0fb9, 0x1b1f: 0x1089, 0x1b20: 0x0279, 0x1b21: 0x0369, 0x1b22: 0x0289, 0x1b23: 0x13d1, - 0x1b24: 0xbad1, 0x1b25: 0xbae9, 0x1b26: 0x0040, 0x1b27: 0x0040, 0x1b28: 0xbb01, 0x1b29: 0x1099, - 0x1b2a: 0x10b1, 0x1b2b: 0x10c9, 0x1b2c: 0xbb19, 0x1b2d: 0xbb31, 0x1b2e: 0xbb49, 0x1b2f: 0x1429, - 0x1b30: 0x1a31, 0x1b31: 0xbb61, 0x1b32: 0xbb79, 0x1b33: 0xbb91, 0x1b34: 0xbba9, 0x1b35: 0xbbc1, - 0x1b36: 0xbbd9, 0x1b37: 0x2109, 0x1b38: 0x1111, 0x1b39: 0x1429, 0x1b3a: 0xbbf1, 0x1b3b: 0xbc09, - 0x1b3c: 0xbc21, 0x1b3d: 0x10e1, 0x1b3e: 0x10f9, 0x1b3f: 0xbc39, + 0x1b00: 0x2291, 0x1b01: 0x2299, 0x1b02: 0x22a1, 0x1b03: 0x04d1, 0x1b04: 0x05f9, 0x1b05: 0x22a9, + 0x1b06: 0x22b1, 0x1b07: 0x22b9, 0x1b08: 0x22c1, 0x1b09: 0x22c9, 0x1b0a: 0x22d1, 0x1b0b: 0x0799, + 0x1b0c: 0x03c1, 0x1b0d: 0x22d9, 0x1b0e: 0x22d9, 0x1b0f: 0x22e1, 0x1b10: 0x22e9, 0x1b11: 0x03b1, + 0x1b12: 0x03b9, 0x1b13: 0x22f1, 0x1b14: 0x0769, 0x1b15: 0x2301, 0x1b16: 0x2291, 0x1b17: 0x04d1, + 0x1b18: 0x22a9, 0x1b19: 0x03b1, 0x1b1a: 0x03c1, 0x1b1b: 0x0799, 0x1b1c: 0x2289, 0x1b1d: 0x0399, + 0x1b1e: 0x03a1, 0x1b1f: 0x03a9, 0x1b20: 0x2291, 0x1b21: 0x2299, 0x1b22: 0x22a1, 0x1b23: 0x04d1, + 0x1b24: 0x05f9, 0x1b25: 0x22a9, 0x1b26: 0x22b1, 0x1b27: 0x22b9, 0x1b28: 0x22c1, 0x1b29: 0x22c9, + 0x1b2a: 0x22d1, 0x1b2b: 0x0799, 0x1b2c: 0x03c1, 0x1b2d: 0x04d1, 0x1b2e: 0x22d9, 0x1b2f: 0x22e1, + 0x1b30: 0x22e9, 0x1b31: 0x03b1, 0x1b32: 0x03b9, 0x1b33: 0x22f1, 0x1b34: 0x0769, 0x1b35: 0x22f9, + 0x1b36: 0x2289, 0x1b37: 0x0399, 0x1b38: 0x03a1, 0x1b39: 0x03a9, 0x1b3a: 0x2291, 0x1b3b: 0x2299, + 0x1b3c: 0x22a1, 0x1b3d: 0x04d1, 0x1b3e: 0x05f9, 0x1b3f: 0x22a9, // Block 0x6d, offset 0x1b40 - 0x1b40: 0x2079, 0x1b41: 0xbc51, 0x1b42: 0xbb01, 0x1b43: 0x1099, 0x1b44: 0x10b1, 0x1b45: 0x10c9, - 0x1b46: 0xbb19, 0x1b47: 0xbb31, 0x1b48: 0xbb49, 0x1b49: 0x1429, 0x1b4a: 0x1a31, 0x1b4b: 0xbb61, - 0x1b4c: 0xbb79, 0x1b4d: 0xbb91, 0x1b4e: 0xbba9, 0x1b4f: 0xbbc1, 0x1b50: 0xbbd9, 0x1b51: 0x2109, - 0x1b52: 0x1111, 0x1b53: 0xbbf1, 0x1b54: 0xbbf1, 0x1b55: 0xbc09, 0x1b56: 0xbc21, 0x1b57: 0x10e1, - 0x1b58: 0x10f9, 0x1b59: 0xbc39, 0x1b5a: 0x2079, 0x1b5b: 0xbc71, 0x1b5c: 0xbb19, 0x1b5d: 0x1429, - 0x1b5e: 0xbb61, 0x1b5f: 0x10e1, 0x1b60: 0x1111, 0x1b61: 0x2109, 0x1b62: 0xbb01, 0x1b63: 0x1099, - 0x1b64: 0x10b1, 0x1b65: 0x10c9, 0x1b66: 0xbb19, 0x1b67: 0xbb31, 0x1b68: 0xbb49, 0x1b69: 0x1429, - 0x1b6a: 0x1a31, 0x1b6b: 0xbb61, 0x1b6c: 0xbb79, 0x1b6d: 0xbb91, 0x1b6e: 0xbba9, 0x1b6f: 0xbbc1, - 0x1b70: 0xbbd9, 0x1b71: 0x2109, 0x1b72: 0x1111, 0x1b73: 0x1429, 0x1b74: 0xbbf1, 0x1b75: 0xbc09, - 0x1b76: 0xbc21, 0x1b77: 0x10e1, 0x1b78: 0x10f9, 0x1b79: 0xbc39, 0x1b7a: 0x2079, 0x1b7b: 0xbc51, - 0x1b7c: 0xbb01, 0x1b7d: 0x1099, 0x1b7e: 0x10b1, 0x1b7f: 0x10c9, + 0x1b40: 0x22b1, 0x1b41: 0x22b9, 0x1b42: 0x22c1, 0x1b43: 0x22c9, 0x1b44: 0x22d1, 0x1b45: 0x0799, + 0x1b46: 0x03c1, 0x1b47: 0x22d9, 0x1b48: 0x22d9, 0x1b49: 0x22e1, 0x1b4a: 0x22e9, 0x1b4b: 0x03b1, + 0x1b4c: 0x03b9, 0x1b4d: 0x22f1, 0x1b4e: 0x0769, 0x1b4f: 0x2301, 0x1b50: 0x2291, 0x1b51: 0x04d1, + 0x1b52: 0x22a9, 0x1b53: 0x03b1, 0x1b54: 0x03c1, 0x1b55: 0x0799, 0x1b56: 0x2289, 0x1b57: 0x0399, + 0x1b58: 0x03a1, 0x1b59: 0x03a9, 0x1b5a: 0x2291, 0x1b5b: 0x2299, 0x1b5c: 0x22a1, 0x1b5d: 0x04d1, + 0x1b5e: 0x05f9, 0x1b5f: 0x22a9, 0x1b60: 0x22b1, 0x1b61: 0x22b9, 0x1b62: 0x22c1, 0x1b63: 0x22c9, + 0x1b64: 0x22d1, 0x1b65: 0x0799, 0x1b66: 0x03c1, 0x1b67: 0x04d1, 0x1b68: 0x22d9, 0x1b69: 0x22e1, + 0x1b6a: 0x22e9, 0x1b6b: 0x03b1, 0x1b6c: 0x03b9, 0x1b6d: 0x22f1, 0x1b6e: 0x0769, 0x1b6f: 0x22f9, + 0x1b70: 0x2289, 0x1b71: 0x0399, 0x1b72: 0x03a1, 0x1b73: 0x03a9, 0x1b74: 0x2291, 0x1b75: 0x2299, + 0x1b76: 0x22a1, 0x1b77: 0x04d1, 0x1b78: 0x05f9, 0x1b79: 0x22a9, 0x1b7a: 0x22b1, 0x1b7b: 0x22b9, + 0x1b7c: 0x22c1, 0x1b7d: 0x22c9, 0x1b7e: 0x22d1, 0x1b7f: 0x0799, // Block 0x6e, offset 0x1b80 - 0x1b80: 0xbb19, 0x1b81: 0xbb31, 0x1b82: 0xbb49, 0x1b83: 0x1429, 0x1b84: 0x1a31, 0x1b85: 0xbb61, - 0x1b86: 0xbb79, 0x1b87: 0xbb91, 0x1b88: 0xbba9, 0x1b89: 0xbbc1, 0x1b8a: 0xbbd9, 0x1b8b: 0x2109, - 0x1b8c: 0x1111, 0x1b8d: 0xbbf1, 0x1b8e: 0xbbf1, 0x1b8f: 0xbc09, 0x1b90: 0xbc21, 0x1b91: 0x10e1, - 0x1b92: 0x10f9, 0x1b93: 0xbc39, 0x1b94: 0x2079, 0x1b95: 0xbc71, 0x1b96: 0xbb19, 0x1b97: 0x1429, - 0x1b98: 0xbb61, 0x1b99: 0x10e1, 0x1b9a: 0x1111, 0x1b9b: 0x2109, 0x1b9c: 0xbb01, 0x1b9d: 0x1099, - 0x1b9e: 0x10b1, 0x1b9f: 0x10c9, 0x1ba0: 0xbb19, 0x1ba1: 0xbb31, 0x1ba2: 0xbb49, 0x1ba3: 0x1429, - 0x1ba4: 0x1a31, 0x1ba5: 0xbb61, 0x1ba6: 0xbb79, 0x1ba7: 0xbb91, 0x1ba8: 0xbba9, 0x1ba9: 0xbbc1, - 0x1baa: 0xbbd9, 0x1bab: 0x2109, 0x1bac: 0x1111, 0x1bad: 0x1429, 0x1bae: 0xbbf1, 0x1baf: 0xbc09, - 0x1bb0: 0xbc21, 0x1bb1: 0x10e1, 0x1bb2: 0x10f9, 0x1bb3: 0xbc39, 0x1bb4: 0x2079, 0x1bb5: 0xbc51, - 0x1bb6: 0xbb01, 0x1bb7: 0x1099, 0x1bb8: 0x10b1, 0x1bb9: 0x10c9, 0x1bba: 0xbb19, 0x1bbb: 0xbb31, - 0x1bbc: 0xbb49, 0x1bbd: 0x1429, 0x1bbe: 0x1a31, 0x1bbf: 0xbb61, + 0x1b80: 0x03c1, 0x1b81: 0x22d9, 0x1b82: 0x22d9, 0x1b83: 0x22e1, 0x1b84: 0x22e9, 0x1b85: 0x03b1, + 0x1b86: 0x03b9, 0x1b87: 0x22f1, 0x1b88: 0x0769, 0x1b89: 0x2301, 0x1b8a: 0x2291, 0x1b8b: 0x04d1, + 0x1b8c: 0x22a9, 0x1b8d: 0x03b1, 0x1b8e: 0x03c1, 0x1b8f: 0x0799, 0x1b90: 0x2289, 0x1b91: 0x0399, + 0x1b92: 0x03a1, 0x1b93: 0x03a9, 0x1b94: 0x2291, 0x1b95: 0x2299, 0x1b96: 0x22a1, 0x1b97: 0x04d1, + 0x1b98: 0x05f9, 0x1b99: 0x22a9, 0x1b9a: 0x22b1, 0x1b9b: 0x22b9, 0x1b9c: 0x22c1, 0x1b9d: 0x22c9, + 0x1b9e: 0x22d1, 0x1b9f: 0x0799, 0x1ba0: 0x03c1, 0x1ba1: 0x04d1, 0x1ba2: 0x22d9, 0x1ba3: 0x22e1, + 0x1ba4: 0x22e9, 0x1ba5: 0x03b1, 0x1ba6: 0x03b9, 0x1ba7: 0x22f1, 0x1ba8: 0x0769, 0x1ba9: 0x22f9, + 0x1baa: 0x2289, 0x1bab: 0x0399, 0x1bac: 0x03a1, 0x1bad: 0x03a9, 0x1bae: 0x2291, 0x1baf: 0x2299, + 0x1bb0: 0x22a1, 0x1bb1: 0x04d1, 0x1bb2: 0x05f9, 0x1bb3: 0x22a9, 0x1bb4: 0x22b1, 0x1bb5: 0x22b9, + 0x1bb6: 0x22c1, 0x1bb7: 0x22c9, 0x1bb8: 0x22d1, 0x1bb9: 0x0799, 0x1bba: 0x03c1, 0x1bbb: 0x22d9, + 0x1bbc: 0x22d9, 0x1bbd: 0x22e1, 0x1bbe: 0x22e9, 0x1bbf: 0x03b1, // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xbb79, 0x1bc1: 0xbb91, 0x1bc2: 0xbba9, 0x1bc3: 0xbbc1, 0x1bc4: 0xbbd9, 0x1bc5: 0x2109, - 0x1bc6: 0x1111, 0x1bc7: 0xbbf1, 0x1bc8: 0xbbf1, 0x1bc9: 0xbc09, 0x1bca: 0xbc21, 0x1bcb: 0x10e1, - 0x1bcc: 0x10f9, 0x1bcd: 0xbc39, 0x1bce: 0x2079, 0x1bcf: 0xbc71, 0x1bd0: 0xbb19, 0x1bd1: 0x1429, - 0x1bd2: 0xbb61, 0x1bd3: 0x10e1, 0x1bd4: 0x1111, 0x1bd5: 0x2109, 0x1bd6: 0xbb01, 0x1bd7: 0x1099, - 0x1bd8: 0x10b1, 0x1bd9: 0x10c9, 0x1bda: 0xbb19, 0x1bdb: 0xbb31, 0x1bdc: 0xbb49, 0x1bdd: 0x1429, - 0x1bde: 0x1a31, 0x1bdf: 0xbb61, 0x1be0: 0xbb79, 0x1be1: 0xbb91, 0x1be2: 0xbba9, 0x1be3: 0xbbc1, - 0x1be4: 0xbbd9, 0x1be5: 0x2109, 0x1be6: 0x1111, 0x1be7: 0x1429, 0x1be8: 0xbbf1, 0x1be9: 0xbc09, - 0x1bea: 0xbc21, 0x1beb: 0x10e1, 0x1bec: 0x10f9, 0x1bed: 0xbc39, 0x1bee: 0x2079, 0x1bef: 0xbc51, - 0x1bf0: 0xbb01, 0x1bf1: 0x1099, 0x1bf2: 0x10b1, 0x1bf3: 0x10c9, 0x1bf4: 0xbb19, 0x1bf5: 0xbb31, - 0x1bf6: 0xbb49, 0x1bf7: 0x1429, 0x1bf8: 0x1a31, 0x1bf9: 0xbb61, 0x1bfa: 0xbb79, 0x1bfb: 0xbb91, - 0x1bfc: 0xbba9, 0x1bfd: 0xbbc1, 0x1bfe: 0xbbd9, 0x1bff: 0x2109, + 0x1bc0: 0x03b9, 0x1bc1: 0x22f1, 0x1bc2: 0x0769, 0x1bc3: 0x2301, 0x1bc4: 0x2291, 0x1bc5: 0x04d1, + 0x1bc6: 0x22a9, 0x1bc7: 0x03b1, 0x1bc8: 0x03c1, 0x1bc9: 0x0799, 0x1bca: 0x2309, 0x1bcb: 0x2309, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x06e1, 0x1bcf: 0x0049, 0x1bd0: 0x0029, 0x1bd1: 0x0031, + 0x1bd2: 0x06e9, 0x1bd3: 0x06f1, 0x1bd4: 0x06f9, 0x1bd5: 0x0701, 0x1bd6: 0x0709, 0x1bd7: 0x0711, + 0x1bd8: 0x06e1, 0x1bd9: 0x0049, 0x1bda: 0x0029, 0x1bdb: 0x0031, 0x1bdc: 0x06e9, 0x1bdd: 0x06f1, + 0x1bde: 0x06f9, 0x1bdf: 0x0701, 0x1be0: 0x0709, 0x1be1: 0x0711, 0x1be2: 0x06e1, 0x1be3: 0x0049, + 0x1be4: 0x0029, 0x1be5: 0x0031, 0x1be6: 0x06e9, 0x1be7: 0x06f1, 0x1be8: 0x06f9, 0x1be9: 0x0701, + 0x1bea: 0x0709, 0x1beb: 0x0711, 0x1bec: 0x06e1, 0x1bed: 0x0049, 0x1bee: 0x0029, 0x1bef: 0x0031, + 0x1bf0: 0x06e9, 0x1bf1: 0x06f1, 0x1bf2: 0x06f9, 0x1bf3: 0x0701, 0x1bf4: 0x0709, 0x1bf5: 0x0711, + 0x1bf6: 0x06e1, 0x1bf7: 0x0049, 0x1bf8: 0x0029, 0x1bf9: 0x0031, 0x1bfa: 0x06e9, 0x1bfb: 0x06f1, + 0x1bfc: 0x06f9, 0x1bfd: 0x0701, 0x1bfe: 0x0709, 0x1bff: 0x0711, // Block 0x70, offset 0x1c00 - 0x1c00: 0x1111, 0x1c01: 0xbbf1, 0x1c02: 0xbbf1, 0x1c03: 0xbc09, 0x1c04: 0xbc21, 0x1c05: 0x10e1, - 0x1c06: 0x10f9, 0x1c07: 0xbc39, 0x1c08: 0x2079, 0x1c09: 0xbc71, 0x1c0a: 0xbb19, 0x1c0b: 0x1429, - 0x1c0c: 0xbb61, 0x1c0d: 0x10e1, 0x1c0e: 0x1111, 0x1c0f: 0x2109, 0x1c10: 0xbb01, 0x1c11: 0x1099, - 0x1c12: 0x10b1, 0x1c13: 0x10c9, 0x1c14: 0xbb19, 0x1c15: 0xbb31, 0x1c16: 0xbb49, 0x1c17: 0x1429, - 0x1c18: 0x1a31, 0x1c19: 0xbb61, 0x1c1a: 0xbb79, 0x1c1b: 0xbb91, 0x1c1c: 0xbba9, 0x1c1d: 0xbbc1, - 0x1c1e: 0xbbd9, 0x1c1f: 0x2109, 0x1c20: 0x1111, 0x1c21: 0x1429, 0x1c22: 0xbbf1, 0x1c23: 0xbc09, - 0x1c24: 0xbc21, 0x1c25: 0x10e1, 0x1c26: 0x10f9, 0x1c27: 0xbc39, 0x1c28: 0x2079, 0x1c29: 0xbc51, - 0x1c2a: 0xbb01, 0x1c2b: 0x1099, 0x1c2c: 0x10b1, 0x1c2d: 0x10c9, 0x1c2e: 0xbb19, 0x1c2f: 0xbb31, - 0x1c30: 0xbb49, 0x1c31: 0x1429, 0x1c32: 0x1a31, 0x1c33: 0xbb61, 0x1c34: 0xbb79, 0x1c35: 0xbb91, - 0x1c36: 0xbba9, 0x1c37: 0xbbc1, 0x1c38: 0xbbd9, 0x1c39: 0x2109, 0x1c3a: 0x1111, 0x1c3b: 0xbbf1, - 0x1c3c: 0xbbf1, 0x1c3d: 0xbc09, 0x1c3e: 0xbc21, 0x1c3f: 0x10e1, + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b3d, 0x1c1f: 0x8b3d, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, // Block 0x71, offset 0x1c40 - 0x1c40: 0x10f9, 0x1c41: 0xbc39, 0x1c42: 0x2079, 0x1c43: 0xbc71, 0x1c44: 0xbb19, 0x1c45: 0x1429, - 0x1c46: 0xbb61, 0x1c47: 0x10e1, 0x1c48: 0x1111, 0x1c49: 0x2109, 0x1c4a: 0xbc91, 0x1c4b: 0xbc91, - 0x1c4c: 0x0040, 0x1c4d: 0x0040, 0x1c4e: 0x1f41, 0x1c4f: 0x00c9, 0x1c50: 0x0069, 0x1c51: 0x0079, - 0x1c52: 0x1f51, 0x1c53: 0x1f61, 0x1c54: 0x1f71, 0x1c55: 0x1f81, 0x1c56: 0x1f91, 0x1c57: 0x1fa1, - 0x1c58: 0x1f41, 0x1c59: 0x00c9, 0x1c5a: 0x0069, 0x1c5b: 0x0079, 0x1c5c: 0x1f51, 0x1c5d: 0x1f61, - 0x1c5e: 0x1f71, 0x1c5f: 0x1f81, 0x1c60: 0x1f91, 0x1c61: 0x1fa1, 0x1c62: 0x1f41, 0x1c63: 0x00c9, - 0x1c64: 0x0069, 0x1c65: 0x0079, 0x1c66: 0x1f51, 0x1c67: 0x1f61, 0x1c68: 0x1f71, 0x1c69: 0x1f81, - 0x1c6a: 0x1f91, 0x1c6b: 0x1fa1, 0x1c6c: 0x1f41, 0x1c6d: 0x00c9, 0x1c6e: 0x0069, 0x1c6f: 0x0079, - 0x1c70: 0x1f51, 0x1c71: 0x1f61, 0x1c72: 0x1f71, 0x1c73: 0x1f81, 0x1c74: 0x1f91, 0x1c75: 0x1fa1, - 0x1c76: 0x1f41, 0x1c77: 0x00c9, 0x1c78: 0x0069, 0x1c79: 0x0079, 0x1c7a: 0x1f51, 0x1c7b: 0x1f61, - 0x1c7c: 0x1f71, 0x1c7d: 0x1f81, 0x1c7e: 0x1f91, 0x1c7f: 0x1fa1, + 0x1c40: 0x20b1, 0x1c41: 0x20b9, 0x1c42: 0x20d9, 0x1c43: 0x20f1, 0x1c44: 0x0040, 0x1c45: 0x2189, + 0x1c46: 0x2109, 0x1c47: 0x20e1, 0x1c48: 0x2131, 0x1c49: 0x2191, 0x1c4a: 0x2161, 0x1c4b: 0x2169, + 0x1c4c: 0x2171, 0x1c4d: 0x2179, 0x1c4e: 0x2111, 0x1c4f: 0x2141, 0x1c50: 0x2151, 0x1c51: 0x2121, + 0x1c52: 0x2159, 0x1c53: 0x2101, 0x1c54: 0x2119, 0x1c55: 0x20c9, 0x1c56: 0x20d1, 0x1c57: 0x20e9, + 0x1c58: 0x20f9, 0x1c59: 0x2129, 0x1c5a: 0x2139, 0x1c5b: 0x2149, 0x1c5c: 0x2311, 0x1c5d: 0x1689, + 0x1c5e: 0x2319, 0x1c5f: 0x2321, 0x1c60: 0x0040, 0x1c61: 0x20b9, 0x1c62: 0x20d9, 0x1c63: 0x0040, + 0x1c64: 0x2181, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0x20e1, 0x1c68: 0x0040, 0x1c69: 0x2191, + 0x1c6a: 0x2161, 0x1c6b: 0x2169, 0x1c6c: 0x2171, 0x1c6d: 0x2179, 0x1c6e: 0x2111, 0x1c6f: 0x2141, + 0x1c70: 0x2151, 0x1c71: 0x2121, 0x1c72: 0x2159, 0x1c73: 0x0040, 0x1c74: 0x2119, 0x1c75: 0x20c9, + 0x1c76: 0x20d1, 0x1c77: 0x20e9, 0x1c78: 0x0040, 0x1c79: 0x2129, 0x1c7a: 0x0040, 0x1c7b: 0x2149, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, // Block 0x72, offset 0x1c80 - 0x1c80: 0xe115, 0x1c81: 0xe115, 0x1c82: 0xe135, 0x1c83: 0xe135, 0x1c84: 0xe115, 0x1c85: 0xe115, - 0x1c86: 0xe175, 0x1c87: 0xe175, 0x1c88: 0xe115, 0x1c89: 0xe115, 0x1c8a: 0xe135, 0x1c8b: 0xe135, - 0x1c8c: 0xe115, 0x1c8d: 0xe115, 0x1c8e: 0xe1f5, 0x1c8f: 0xe1f5, 0x1c90: 0xe115, 0x1c91: 0xe115, - 0x1c92: 0xe135, 0x1c93: 0xe135, 0x1c94: 0xe115, 0x1c95: 0xe115, 0x1c96: 0xe175, 0x1c97: 0xe175, - 0x1c98: 0xe115, 0x1c99: 0xe115, 0x1c9a: 0xe135, 0x1c9b: 0xe135, 0x1c9c: 0xe115, 0x1c9d: 0xe115, - 0x1c9e: 0x8b3d, 0x1c9f: 0x8b3d, 0x1ca0: 0x04b5, 0x1ca1: 0x04b5, 0x1ca2: 0x0a08, 0x1ca3: 0x0a08, - 0x1ca4: 0x0a08, 0x1ca5: 0x0a08, 0x1ca6: 0x0a08, 0x1ca7: 0x0a08, 0x1ca8: 0x0a08, 0x1ca9: 0x0a08, - 0x1caa: 0x0a08, 0x1cab: 0x0a08, 0x1cac: 0x0a08, 0x1cad: 0x0a08, 0x1cae: 0x0a08, 0x1caf: 0x0a08, - 0x1cb0: 0x0a08, 0x1cb1: 0x0a08, 0x1cb2: 0x0a08, 0x1cb3: 0x0a08, 0x1cb4: 0x0a08, 0x1cb5: 0x0a08, - 0x1cb6: 0x0a08, 0x1cb7: 0x0a08, 0x1cb8: 0x0a08, 0x1cb9: 0x0a08, 0x1cba: 0x0a08, 0x1cbb: 0x0a08, - 0x1cbc: 0x0a08, 0x1cbd: 0x0a08, 0x1cbe: 0x0a08, 0x1cbf: 0x0a08, + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0x20d9, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0x20e1, 0x1c88: 0x0040, 0x1c89: 0x2191, 0x1c8a: 0x0040, 0x1c8b: 0x2169, + 0x1c8c: 0x0040, 0x1c8d: 0x2179, 0x1c8e: 0x2111, 0x1c8f: 0x2141, 0x1c90: 0x0040, 0x1c91: 0x2121, + 0x1c92: 0x2159, 0x1c93: 0x0040, 0x1c94: 0x2119, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0x20e9, + 0x1c98: 0x0040, 0x1c99: 0x2129, 0x1c9a: 0x0040, 0x1c9b: 0x2149, 0x1c9c: 0x0040, 0x1c9d: 0x1689, + 0x1c9e: 0x0040, 0x1c9f: 0x2321, 0x1ca0: 0x0040, 0x1ca1: 0x20b9, 0x1ca2: 0x20d9, 0x1ca3: 0x0040, + 0x1ca4: 0x2181, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0x20e1, 0x1ca8: 0x2131, 0x1ca9: 0x2191, + 0x1caa: 0x2161, 0x1cab: 0x0040, 0x1cac: 0x2171, 0x1cad: 0x2179, 0x1cae: 0x2111, 0x1caf: 0x2141, + 0x1cb0: 0x2151, 0x1cb1: 0x2121, 0x1cb2: 0x2159, 0x1cb3: 0x0040, 0x1cb4: 0x2119, 0x1cb5: 0x20c9, + 0x1cb6: 0x20d1, 0x1cb7: 0x20e9, 0x1cb8: 0x0040, 0x1cb9: 0x2129, 0x1cba: 0x2139, 0x1cbb: 0x2149, + 0x1cbc: 0x2311, 0x1cbd: 0x0040, 0x1cbe: 0x2319, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0xb1d9, 0x1cc1: 0xb1f1, 0x1cc2: 0xb251, 0x1cc3: 0xb299, 0x1cc4: 0x0040, 0x1cc5: 0xb461, - 0x1cc6: 0xb2e1, 0x1cc7: 0xb269, 0x1cc8: 0xb359, 0x1cc9: 0xb479, 0x1cca: 0xb3e9, 0x1ccb: 0xb401, - 0x1ccc: 0xb419, 0x1ccd: 0xb431, 0x1cce: 0xb2f9, 0x1ccf: 0xb389, 0x1cd0: 0xb3b9, 0x1cd1: 0xb329, - 0x1cd2: 0xb3d1, 0x1cd3: 0xb2c9, 0x1cd4: 0xb311, 0x1cd5: 0xb221, 0x1cd6: 0xb239, 0x1cd7: 0xb281, - 0x1cd8: 0xb2b1, 0x1cd9: 0xb341, 0x1cda: 0xb371, 0x1cdb: 0xb3a1, 0x1cdc: 0xbca9, 0x1cdd: 0x7999, - 0x1cde: 0xbcc1, 0x1cdf: 0xbcd9, 0x1ce0: 0x0040, 0x1ce1: 0xb1f1, 0x1ce2: 0xb251, 0x1ce3: 0x0040, - 0x1ce4: 0xb449, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb269, 0x1ce8: 0x0040, 0x1ce9: 0xb479, - 0x1cea: 0xb3e9, 0x1ceb: 0xb401, 0x1cec: 0xb419, 0x1ced: 0xb431, 0x1cee: 0xb2f9, 0x1cef: 0xb389, - 0x1cf0: 0xb3b9, 0x1cf1: 0xb329, 0x1cf2: 0xb3d1, 0x1cf3: 0x0040, 0x1cf4: 0xb311, 0x1cf5: 0xb221, - 0x1cf6: 0xb239, 0x1cf7: 0xb281, 0x1cf8: 0x0040, 0x1cf9: 0xb341, 0x1cfa: 0x0040, 0x1cfb: 0xb3a1, + 0x1cc0: 0x20b1, 0x1cc1: 0x20b9, 0x1cc2: 0x20d9, 0x1cc3: 0x20f1, 0x1cc4: 0x2181, 0x1cc5: 0x2189, + 0x1cc6: 0x2109, 0x1cc7: 0x20e1, 0x1cc8: 0x2131, 0x1cc9: 0x2191, 0x1cca: 0x0040, 0x1ccb: 0x2169, + 0x1ccc: 0x2171, 0x1ccd: 0x2179, 0x1cce: 0x2111, 0x1ccf: 0x2141, 0x1cd0: 0x2151, 0x1cd1: 0x2121, + 0x1cd2: 0x2159, 0x1cd3: 0x2101, 0x1cd4: 0x2119, 0x1cd5: 0x20c9, 0x1cd6: 0x20d1, 0x1cd7: 0x20e9, + 0x1cd8: 0x20f9, 0x1cd9: 0x2129, 0x1cda: 0x2139, 0x1cdb: 0x2149, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0x20b9, 0x1ce2: 0x20d9, 0x1ce3: 0x20f1, + 0x1ce4: 0x0040, 0x1ce5: 0x2189, 0x1ce6: 0x2109, 0x1ce7: 0x20e1, 0x1ce8: 0x2131, 0x1ce9: 0x2191, + 0x1cea: 0x0040, 0x1ceb: 0x2169, 0x1cec: 0x2171, 0x1ced: 0x2179, 0x1cee: 0x2111, 0x1cef: 0x2141, + 0x1cf0: 0x2151, 0x1cf1: 0x2121, 0x1cf2: 0x2159, 0x1cf3: 0x2101, 0x1cf4: 0x2119, 0x1cf5: 0x20c9, + 0x1cf6: 0x20d1, 0x1cf7: 0x20e9, 0x1cf8: 0x20f9, 0x1cf9: 0x2129, 0x1cfa: 0x2139, 0x1cfb: 0x2149, 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, // Block 0x74, offset 0x1d00 - 0x1d00: 0x0040, 0x1d01: 0x0040, 0x1d02: 0xb251, 0x1d03: 0x0040, 0x1d04: 0x0040, 0x1d05: 0x0040, - 0x1d06: 0x0040, 0x1d07: 0xb269, 0x1d08: 0x0040, 0x1d09: 0xb479, 0x1d0a: 0x0040, 0x1d0b: 0xb401, - 0x1d0c: 0x0040, 0x1d0d: 0xb431, 0x1d0e: 0xb2f9, 0x1d0f: 0xb389, 0x1d10: 0x0040, 0x1d11: 0xb329, - 0x1d12: 0xb3d1, 0x1d13: 0x0040, 0x1d14: 0xb311, 0x1d15: 0x0040, 0x1d16: 0x0040, 0x1d17: 0xb281, - 0x1d18: 0x0040, 0x1d19: 0xb341, 0x1d1a: 0x0040, 0x1d1b: 0xb3a1, 0x1d1c: 0x0040, 0x1d1d: 0x7999, - 0x1d1e: 0x0040, 0x1d1f: 0xbcd9, 0x1d20: 0x0040, 0x1d21: 0xb1f1, 0x1d22: 0xb251, 0x1d23: 0x0040, - 0x1d24: 0xb449, 0x1d25: 0x0040, 0x1d26: 0x0040, 0x1d27: 0xb269, 0x1d28: 0xb359, 0x1d29: 0xb479, - 0x1d2a: 0xb3e9, 0x1d2b: 0x0040, 0x1d2c: 0xb419, 0x1d2d: 0xb431, 0x1d2e: 0xb2f9, 0x1d2f: 0xb389, - 0x1d30: 0xb3b9, 0x1d31: 0xb329, 0x1d32: 0xb3d1, 0x1d33: 0x0040, 0x1d34: 0xb311, 0x1d35: 0xb221, - 0x1d36: 0xb239, 0x1d37: 0xb281, 0x1d38: 0x0040, 0x1d39: 0xb341, 0x1d3a: 0xb371, 0x1d3b: 0xb3a1, - 0x1d3c: 0xbca9, 0x1d3d: 0x0040, 0x1d3e: 0xbcc1, 0x1d3f: 0x0040, + 0x1d00: 0x0040, 0x1d01: 0x232a, 0x1d02: 0x2332, 0x1d03: 0x233a, 0x1d04: 0x2342, 0x1d05: 0x234a, + 0x1d06: 0x2352, 0x1d07: 0x235a, 0x1d08: 0x2362, 0x1d09: 0x236a, 0x1d0a: 0x2372, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0018, 0x1d0e: 0x0018, 0x1d0f: 0x0018, 0x1d10: 0x237a, 0x1d11: 0x2382, + 0x1d12: 0x238a, 0x1d13: 0x2392, 0x1d14: 0x239a, 0x1d15: 0x23a2, 0x1d16: 0x23aa, 0x1d17: 0x23b2, + 0x1d18: 0x23ba, 0x1d19: 0x23c2, 0x1d1a: 0x23ca, 0x1d1b: 0x23d2, 0x1d1c: 0x23da, 0x1d1d: 0x23e2, + 0x1d1e: 0x23ea, 0x1d1f: 0x23f2, 0x1d20: 0x23fa, 0x1d21: 0x2402, 0x1d22: 0x240a, 0x1d23: 0x2412, + 0x1d24: 0x241a, 0x1d25: 0x2422, 0x1d26: 0x242a, 0x1d27: 0x2432, 0x1d28: 0x243a, 0x1d29: 0x2442, + 0x1d2a: 0x2449, 0x1d2b: 0x03d9, 0x1d2c: 0x00b9, 0x1d2d: 0x1239, 0x1d2e: 0x2451, 0x1d2f: 0x0018, + 0x1d30: 0x0019, 0x1d31: 0x02e9, 0x1d32: 0x03d9, 0x1d33: 0x02f1, 0x1d34: 0x02f9, 0x1d35: 0x03f1, + 0x1d36: 0x0309, 0x1d37: 0x00a9, 0x1d38: 0x0311, 0x1d39: 0x00b1, 0x1d3a: 0x0319, 0x1d3b: 0x0101, + 0x1d3c: 0x0321, 0x1d3d: 0x0329, 0x1d3e: 0x0051, 0x1d3f: 0x0339, // Block 0x75, offset 0x1d40 - 0x1d40: 0xb1d9, 0x1d41: 0xb1f1, 0x1d42: 0xb251, 0x1d43: 0xb299, 0x1d44: 0xb449, 0x1d45: 0xb461, - 0x1d46: 0xb2e1, 0x1d47: 0xb269, 0x1d48: 0xb359, 0x1d49: 0xb479, 0x1d4a: 0x0040, 0x1d4b: 0xb401, - 0x1d4c: 0xb419, 0x1d4d: 0xb431, 0x1d4e: 0xb2f9, 0x1d4f: 0xb389, 0x1d50: 0xb3b9, 0x1d51: 0xb329, - 0x1d52: 0xb3d1, 0x1d53: 0xb2c9, 0x1d54: 0xb311, 0x1d55: 0xb221, 0x1d56: 0xb239, 0x1d57: 0xb281, - 0x1d58: 0xb2b1, 0x1d59: 0xb341, 0x1d5a: 0xb371, 0x1d5b: 0xb3a1, 0x1d5c: 0x0040, 0x1d5d: 0x0040, - 0x1d5e: 0x0040, 0x1d5f: 0x0040, 0x1d60: 0x0040, 0x1d61: 0xb1f1, 0x1d62: 0xb251, 0x1d63: 0xb299, - 0x1d64: 0x0040, 0x1d65: 0xb461, 0x1d66: 0xb2e1, 0x1d67: 0xb269, 0x1d68: 0xb359, 0x1d69: 0xb479, - 0x1d6a: 0x0040, 0x1d6b: 0xb401, 0x1d6c: 0xb419, 0x1d6d: 0xb431, 0x1d6e: 0xb2f9, 0x1d6f: 0xb389, - 0x1d70: 0xb3b9, 0x1d71: 0xb329, 0x1d72: 0xb3d1, 0x1d73: 0xb2c9, 0x1d74: 0xb311, 0x1d75: 0xb221, - 0x1d76: 0xb239, 0x1d77: 0xb281, 0x1d78: 0xb2b1, 0x1d79: 0xb341, 0x1d7a: 0xb371, 0x1d7b: 0xb3a1, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + 0x1d40: 0x0751, 0x1d41: 0x00b9, 0x1d42: 0x0089, 0x1d43: 0x0341, 0x1d44: 0x0349, 0x1d45: 0x0391, + 0x1d46: 0x00c1, 0x1d47: 0x0109, 0x1d48: 0x00c9, 0x1d49: 0x04b1, 0x1d4a: 0x2459, 0x1d4b: 0x11f9, + 0x1d4c: 0x2461, 0x1d4d: 0x04d9, 0x1d4e: 0x2469, 0x1d4f: 0x2471, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0x2479, 0x1d6b: 0x2481, 0x1d6c: 0x2489, 0x1d6d: 0x0018, 0x1d6e: 0x0018, 0x1d6f: 0x0018, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, // Block 0x76, offset 0x1d80 - 0x1d80: 0x0040, 0x1d81: 0xbcf2, 0x1d82: 0xbd0a, 0x1d83: 0xbd22, 0x1d84: 0xbd3a, 0x1d85: 0xbd52, - 0x1d86: 0xbd6a, 0x1d87: 0xbd82, 0x1d88: 0xbd9a, 0x1d89: 0xbdb2, 0x1d8a: 0xbdca, 0x1d8b: 0x0018, - 0x1d8c: 0x0018, 0x1d8d: 0x0018, 0x1d8e: 0x0018, 0x1d8f: 0x0018, 0x1d90: 0xbde2, 0x1d91: 0xbe02, - 0x1d92: 0xbe22, 0x1d93: 0xbe42, 0x1d94: 0xbe62, 0x1d95: 0xbe82, 0x1d96: 0xbea2, 0x1d97: 0xbec2, - 0x1d98: 0xbee2, 0x1d99: 0xbf02, 0x1d9a: 0xbf22, 0x1d9b: 0xbf42, 0x1d9c: 0xbf62, 0x1d9d: 0xbf82, - 0x1d9e: 0xbfa2, 0x1d9f: 0xbfc2, 0x1da0: 0xbfe2, 0x1da1: 0xc002, 0x1da2: 0xc022, 0x1da3: 0xc042, - 0x1da4: 0xc062, 0x1da5: 0xc082, 0x1da6: 0xc0a2, 0x1da7: 0xc0c2, 0x1da8: 0xc0e2, 0x1da9: 0xc102, - 0x1daa: 0xc121, 0x1dab: 0x1159, 0x1dac: 0x0269, 0x1dad: 0x66a9, 0x1dae: 0xc161, 0x1daf: 0x0018, - 0x1db0: 0x0039, 0x1db1: 0x0ee9, 0x1db2: 0x1159, 0x1db3: 0x0ef9, 0x1db4: 0x0f09, 0x1db5: 0x1199, - 0x1db6: 0x0f31, 0x1db7: 0x0249, 0x1db8: 0x0f41, 0x1db9: 0x0259, 0x1dba: 0x0f51, 0x1dbb: 0x0359, - 0x1dbc: 0x0f61, 0x1dbd: 0x0f71, 0x1dbe: 0x00d9, 0x1dbf: 0x0f99, + 0x1d80: 0x2499, 0x1d81: 0x24a1, 0x1d82: 0x24a9, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0x24b1, 0x1d91: 0x24b9, + 0x1d92: 0x24c1, 0x1d93: 0x24c9, 0x1d94: 0x24d1, 0x1d95: 0x24d9, 0x1d96: 0x24e1, 0x1d97: 0x24e9, + 0x1d98: 0x24f1, 0x1d99: 0x24f9, 0x1d9a: 0x2501, 0x1d9b: 0x2509, 0x1d9c: 0x2511, 0x1d9d: 0x2519, + 0x1d9e: 0x2521, 0x1d9f: 0x2529, 0x1da0: 0x2531, 0x1da1: 0x2539, 0x1da2: 0x2541, 0x1da3: 0x2549, + 0x1da4: 0x2551, 0x1da5: 0x2559, 0x1da6: 0x2561, 0x1da7: 0x2569, 0x1da8: 0x2571, 0x1da9: 0x2579, + 0x1daa: 0x2581, 0x1dab: 0x2589, 0x1dac: 0x2591, 0x1dad: 0x2599, 0x1dae: 0x25a1, 0x1daf: 0x25a9, + 0x1db0: 0x25b1, 0x1db1: 0x25b9, 0x1db2: 0x25c1, 0x1db3: 0x25c9, 0x1db4: 0x25d1, 0x1db5: 0x25d9, + 0x1db6: 0x25e1, 0x1db7: 0x25e9, 0x1db8: 0x25f1, 0x1db9: 0x25f9, 0x1dba: 0x2601, 0x1dbb: 0x2609, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0x2039, 0x1dc1: 0x0269, 0x1dc2: 0x01d9, 0x1dc3: 0x0fa9, 0x1dc4: 0x0fb9, 0x1dc5: 0x1089, - 0x1dc6: 0x0279, 0x1dc7: 0x0369, 0x1dc8: 0x0289, 0x1dc9: 0x13d1, 0x1dca: 0xc179, 0x1dcb: 0x65e9, - 0x1dcc: 0xc191, 0x1dcd: 0x1441, 0x1dce: 0xc1a9, 0x1dcf: 0xc1c9, 0x1dd0: 0x0018, 0x1dd1: 0x0018, - 0x1dd2: 0x0018, 0x1dd3: 0x0018, 0x1dd4: 0x0018, 0x1dd5: 0x0018, 0x1dd6: 0x0018, 0x1dd7: 0x0018, - 0x1dd8: 0x0018, 0x1dd9: 0x0018, 0x1dda: 0x0018, 0x1ddb: 0x0018, 0x1ddc: 0x0018, 0x1ddd: 0x0018, - 0x1dde: 0x0018, 0x1ddf: 0x0018, 0x1de0: 0x0018, 0x1de1: 0x0018, 0x1de2: 0x0018, 0x1de3: 0x0018, - 0x1de4: 0x0018, 0x1de5: 0x0018, 0x1de6: 0x0018, 0x1de7: 0x0018, 0x1de8: 0x0018, 0x1de9: 0x0018, - 0x1dea: 0xc1e1, 0x1deb: 0xc1f9, 0x1dec: 0xc211, 0x1ded: 0x0018, 0x1dee: 0x0018, 0x1def: 0x0018, - 0x1df0: 0x0018, 0x1df1: 0x0018, 0x1df2: 0x0018, 0x1df3: 0x0018, 0x1df4: 0x0018, 0x1df5: 0x0018, - 0x1df6: 0x0018, 0x1df7: 0x0018, 0x1df8: 0x0018, 0x1df9: 0x0018, 0x1dfa: 0x0018, 0x1dfb: 0x0018, - 0x1dfc: 0x0018, 0x1dfd: 0x0018, 0x1dfe: 0x0018, 0x1dff: 0x0018, + 0x1dc0: 0x2669, 0x1dc1: 0x2671, 0x1dc2: 0x2679, 0x1dc3: 0x8b55, 0x1dc4: 0x2681, 0x1dc5: 0x2689, + 0x1dc6: 0x2691, 0x1dc7: 0x2699, 0x1dc8: 0x26a1, 0x1dc9: 0x26a9, 0x1dca: 0x26b1, 0x1dcb: 0x26b9, + 0x1dcc: 0x26c1, 0x1dcd: 0x8b75, 0x1dce: 0x26c9, 0x1dcf: 0x26d1, 0x1dd0: 0x26d9, 0x1dd1: 0x26e1, + 0x1dd2: 0x8b95, 0x1dd3: 0x26e9, 0x1dd4: 0x26f1, 0x1dd5: 0x2521, 0x1dd6: 0x8bb5, 0x1dd7: 0x26f9, + 0x1dd8: 0x2701, 0x1dd9: 0x2709, 0x1dda: 0x2711, 0x1ddb: 0x2719, 0x1ddc: 0x8bd5, 0x1ddd: 0x2721, + 0x1dde: 0x2729, 0x1ddf: 0x2731, 0x1de0: 0x2739, 0x1de1: 0x2741, 0x1de2: 0x25f9, 0x1de3: 0x2749, + 0x1de4: 0x2751, 0x1de5: 0x2759, 0x1de6: 0x2761, 0x1de7: 0x2769, 0x1de8: 0x2771, 0x1de9: 0x2779, + 0x1dea: 0x2781, 0x1deb: 0x2789, 0x1dec: 0x2791, 0x1ded: 0x2799, 0x1dee: 0x27a1, 0x1def: 0x27a9, + 0x1df0: 0x27b1, 0x1df1: 0x27b9, 0x1df2: 0x27b9, 0x1df3: 0x27b9, 0x1df4: 0x8bf5, 0x1df5: 0x27c1, + 0x1df6: 0x27c9, 0x1df7: 0x27d1, 0x1df8: 0x8c15, 0x1df9: 0x27d9, 0x1dfa: 0x27e1, 0x1dfb: 0x27e9, + 0x1dfc: 0x27f1, 0x1dfd: 0x27f9, 0x1dfe: 0x2801, 0x1dff: 0x2809, // Block 0x78, offset 0x1e00 - 0x1e00: 0xc241, 0x1e01: 0xc279, 0x1e02: 0xc2b1, 0x1e03: 0x0040, 0x1e04: 0x0040, 0x1e05: 0x0040, - 0x1e06: 0x0040, 0x1e07: 0x0040, 0x1e08: 0x0040, 0x1e09: 0x0040, 0x1e0a: 0x0040, 0x1e0b: 0x0040, - 0x1e0c: 0x0040, 0x1e0d: 0x0040, 0x1e0e: 0x0040, 0x1e0f: 0x0040, 0x1e10: 0xc2d1, 0x1e11: 0xc2f1, - 0x1e12: 0xc311, 0x1e13: 0xc331, 0x1e14: 0xc351, 0x1e15: 0xc371, 0x1e16: 0xc391, 0x1e17: 0xc3b1, - 0x1e18: 0xc3d1, 0x1e19: 0xc3f1, 0x1e1a: 0xc411, 0x1e1b: 0xc431, 0x1e1c: 0xc451, 0x1e1d: 0xc471, - 0x1e1e: 0xc491, 0x1e1f: 0xc4b1, 0x1e20: 0xc4d1, 0x1e21: 0xc4f1, 0x1e22: 0xc511, 0x1e23: 0xc531, - 0x1e24: 0xc551, 0x1e25: 0xc571, 0x1e26: 0xc591, 0x1e27: 0xc5b1, 0x1e28: 0xc5d1, 0x1e29: 0xc5f1, - 0x1e2a: 0xc611, 0x1e2b: 0xc631, 0x1e2c: 0xc651, 0x1e2d: 0xc671, 0x1e2e: 0xc691, 0x1e2f: 0xc6b1, - 0x1e30: 0xc6d1, 0x1e31: 0xc6f1, 0x1e32: 0xc711, 0x1e33: 0xc731, 0x1e34: 0xc751, 0x1e35: 0xc771, - 0x1e36: 0xc791, 0x1e37: 0xc7b1, 0x1e38: 0xc7d1, 0x1e39: 0xc7f1, 0x1e3a: 0xc811, 0x1e3b: 0xc831, - 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + 0x1e00: 0x2811, 0x1e01: 0x2819, 0x1e02: 0x2821, 0x1e03: 0x2829, 0x1e04: 0x2831, 0x1e05: 0x2839, + 0x1e06: 0x2839, 0x1e07: 0x2841, 0x1e08: 0x2849, 0x1e09: 0x2851, 0x1e0a: 0x2859, 0x1e0b: 0x2861, + 0x1e0c: 0x2869, 0x1e0d: 0x2871, 0x1e0e: 0x2879, 0x1e0f: 0x2881, 0x1e10: 0x2889, 0x1e11: 0x2891, + 0x1e12: 0x2899, 0x1e13: 0x28a1, 0x1e14: 0x28a9, 0x1e15: 0x28b1, 0x1e16: 0x28b9, 0x1e17: 0x28c1, + 0x1e18: 0x28c9, 0x1e19: 0x8c35, 0x1e1a: 0x28d1, 0x1e1b: 0x28d9, 0x1e1c: 0x28e1, 0x1e1d: 0x24d9, + 0x1e1e: 0x28e9, 0x1e1f: 0x28f1, 0x1e20: 0x8c55, 0x1e21: 0x8c75, 0x1e22: 0x28f9, 0x1e23: 0x2901, + 0x1e24: 0x2909, 0x1e25: 0x2911, 0x1e26: 0x2919, 0x1e27: 0x2921, 0x1e28: 0x2040, 0x1e29: 0x2929, + 0x1e2a: 0x2931, 0x1e2b: 0x2931, 0x1e2c: 0x8c95, 0x1e2d: 0x2939, 0x1e2e: 0x2941, 0x1e2f: 0x2949, + 0x1e30: 0x2951, 0x1e31: 0x8cb5, 0x1e32: 0x2959, 0x1e33: 0x2961, 0x1e34: 0x2040, 0x1e35: 0x2969, + 0x1e36: 0x2971, 0x1e37: 0x2979, 0x1e38: 0x2981, 0x1e39: 0x2989, 0x1e3a: 0x2991, 0x1e3b: 0x8cd5, + 0x1e3c: 0x2999, 0x1e3d: 0x8cf5, 0x1e3e: 0x29a1, 0x1e3f: 0x29a9, // Block 0x79, offset 0x1e40 - 0x1e40: 0xcb61, 0x1e41: 0xcb81, 0x1e42: 0xcba1, 0x1e43: 0x8b55, 0x1e44: 0xcbc1, 0x1e45: 0xcbe1, - 0x1e46: 0xcc01, 0x1e47: 0xcc21, 0x1e48: 0xcc41, 0x1e49: 0xcc61, 0x1e4a: 0xcc81, 0x1e4b: 0xcca1, - 0x1e4c: 0xccc1, 0x1e4d: 0x8b75, 0x1e4e: 0xcce1, 0x1e4f: 0xcd01, 0x1e50: 0xcd21, 0x1e51: 0xcd41, - 0x1e52: 0x8b95, 0x1e53: 0xcd61, 0x1e54: 0xcd81, 0x1e55: 0xc491, 0x1e56: 0x8bb5, 0x1e57: 0xcda1, - 0x1e58: 0xcdc1, 0x1e59: 0xcde1, 0x1e5a: 0xce01, 0x1e5b: 0xce21, 0x1e5c: 0x8bd5, 0x1e5d: 0xce41, - 0x1e5e: 0xce61, 0x1e5f: 0xce81, 0x1e60: 0xcea1, 0x1e61: 0xcec1, 0x1e62: 0xc7f1, 0x1e63: 0xcee1, - 0x1e64: 0xcf01, 0x1e65: 0xcf21, 0x1e66: 0xcf41, 0x1e67: 0xcf61, 0x1e68: 0xcf81, 0x1e69: 0xcfa1, - 0x1e6a: 0xcfc1, 0x1e6b: 0xcfe1, 0x1e6c: 0xd001, 0x1e6d: 0xd021, 0x1e6e: 0xd041, 0x1e6f: 0xd061, - 0x1e70: 0xd081, 0x1e71: 0xd0a1, 0x1e72: 0xd0a1, 0x1e73: 0xd0a1, 0x1e74: 0x8bf5, 0x1e75: 0xd0c1, - 0x1e76: 0xd0e1, 0x1e77: 0xd101, 0x1e78: 0x8c15, 0x1e79: 0xd121, 0x1e7a: 0xd141, 0x1e7b: 0xd161, - 0x1e7c: 0xd181, 0x1e7d: 0xd1a1, 0x1e7e: 0xd1c1, 0x1e7f: 0xd1e1, + 0x1e40: 0x29b1, 0x1e41: 0x29b9, 0x1e42: 0x29c1, 0x1e43: 0x29c9, 0x1e44: 0x29d1, 0x1e45: 0x29d9, + 0x1e46: 0x29e1, 0x1e47: 0x29e9, 0x1e48: 0x29f1, 0x1e49: 0x8d15, 0x1e4a: 0x29f9, 0x1e4b: 0x2a01, + 0x1e4c: 0x2a09, 0x1e4d: 0x2a11, 0x1e4e: 0x2a19, 0x1e4f: 0x8d35, 0x1e50: 0x2a21, 0x1e51: 0x8d55, + 0x1e52: 0x8d75, 0x1e53: 0x2a29, 0x1e54: 0x2a31, 0x1e55: 0x2a31, 0x1e56: 0x2a39, 0x1e57: 0x8d95, + 0x1e58: 0x8db5, 0x1e59: 0x2a41, 0x1e5a: 0x2a49, 0x1e5b: 0x2a51, 0x1e5c: 0x2a59, 0x1e5d: 0x2a61, + 0x1e5e: 0x2a69, 0x1e5f: 0x2a71, 0x1e60: 0x2a79, 0x1e61: 0x2a81, 0x1e62: 0x2a89, 0x1e63: 0x2a91, + 0x1e64: 0x8dd5, 0x1e65: 0x2a99, 0x1e66: 0x2aa1, 0x1e67: 0x2aa9, 0x1e68: 0x2ab1, 0x1e69: 0x2aa9, + 0x1e6a: 0x2ab9, 0x1e6b: 0x2ac1, 0x1e6c: 0x2ac9, 0x1e6d: 0x2ad1, 0x1e6e: 0x2ad9, 0x1e6f: 0x2ae1, + 0x1e70: 0x2ae9, 0x1e71: 0x2af1, 0x1e72: 0x2af9, 0x1e73: 0x2b01, 0x1e74: 0x2b09, 0x1e75: 0x2b11, + 0x1e76: 0x2b19, 0x1e77: 0x2b21, 0x1e78: 0x8df5, 0x1e79: 0x2b29, 0x1e7a: 0x2b31, 0x1e7b: 0x2b39, + 0x1e7c: 0x2b41, 0x1e7d: 0x2b49, 0x1e7e: 0x8e15, 0x1e7f: 0x2b51, // Block 0x7a, offset 0x1e80 - 0x1e80: 0xd201, 0x1e81: 0xd221, 0x1e82: 0xd241, 0x1e83: 0xd261, 0x1e84: 0xd281, 0x1e85: 0xd2a1, - 0x1e86: 0xd2a1, 0x1e87: 0xd2c1, 0x1e88: 0xd2e1, 0x1e89: 0xd301, 0x1e8a: 0xd321, 0x1e8b: 0xd341, - 0x1e8c: 0xd361, 0x1e8d: 0xd381, 0x1e8e: 0xd3a1, 0x1e8f: 0xd3c1, 0x1e90: 0xd3e1, 0x1e91: 0xd401, - 0x1e92: 0xd421, 0x1e93: 0xd441, 0x1e94: 0xd461, 0x1e95: 0xd481, 0x1e96: 0xd4a1, 0x1e97: 0xd4c1, - 0x1e98: 0xd4e1, 0x1e99: 0x8c35, 0x1e9a: 0xd501, 0x1e9b: 0xd521, 0x1e9c: 0xd541, 0x1e9d: 0xc371, - 0x1e9e: 0xd561, 0x1e9f: 0xd581, 0x1ea0: 0x8c55, 0x1ea1: 0x8c75, 0x1ea2: 0xd5a1, 0x1ea3: 0xd5c1, - 0x1ea4: 0xd5e1, 0x1ea5: 0xd601, 0x1ea6: 0xd621, 0x1ea7: 0xd641, 0x1ea8: 0x2040, 0x1ea9: 0xd661, - 0x1eaa: 0xd681, 0x1eab: 0xd681, 0x1eac: 0x8c95, 0x1ead: 0xd6a1, 0x1eae: 0xd6c1, 0x1eaf: 0xd6e1, - 0x1eb0: 0xd701, 0x1eb1: 0x8cb5, 0x1eb2: 0xd721, 0x1eb3: 0xd741, 0x1eb4: 0x2040, 0x1eb5: 0xd761, - 0x1eb6: 0xd781, 0x1eb7: 0xd7a1, 0x1eb8: 0xd7c1, 0x1eb9: 0xd7e1, 0x1eba: 0xd801, 0x1ebb: 0x8cd5, - 0x1ebc: 0xd821, 0x1ebd: 0x8cf5, 0x1ebe: 0xd841, 0x1ebf: 0xd861, + 0x1e80: 0x2b59, 0x1e81: 0x2b61, 0x1e82: 0x2b69, 0x1e83: 0x2b71, 0x1e84: 0x2b79, 0x1e85: 0x2b81, + 0x1e86: 0x2b89, 0x1e87: 0x2b91, 0x1e88: 0x2b99, 0x1e89: 0x2ba1, 0x1e8a: 0x8e35, 0x1e8b: 0x2ba9, + 0x1e8c: 0x2bb1, 0x1e8d: 0x2bb9, 0x1e8e: 0x2bc1, 0x1e8f: 0x2bc9, 0x1e90: 0x2bd1, 0x1e91: 0x2bd9, + 0x1e92: 0x2be1, 0x1e93: 0x2be9, 0x1e94: 0x2bf1, 0x1e95: 0x2bf9, 0x1e96: 0x2c01, 0x1e97: 0x2c09, + 0x1e98: 0x2c11, 0x1e99: 0x2c19, 0x1e9a: 0x2c21, 0x1e9b: 0x2c29, 0x1e9c: 0x2c31, 0x1e9d: 0x8e55, + 0x1e9e: 0x2c39, 0x1e9f: 0x2c41, 0x1ea0: 0x2c49, 0x1ea1: 0x2c51, 0x1ea2: 0x2c59, 0x1ea3: 0x8e75, + 0x1ea4: 0x2c61, 0x1ea5: 0x2c69, 0x1ea6: 0x2c71, 0x1ea7: 0x2c79, 0x1ea8: 0x2c81, 0x1ea9: 0x2c89, + 0x1eaa: 0x2c91, 0x1eab: 0x2c99, 0x1eac: 0x7f0d, 0x1ead: 0x2ca1, 0x1eae: 0x2ca9, 0x1eaf: 0x2cb1, + 0x1eb0: 0x8e95, 0x1eb1: 0x2cb9, 0x1eb2: 0x2cc1, 0x1eb3: 0x2cc9, 0x1eb4: 0x2cd1, 0x1eb5: 0x2cd9, + 0x1eb6: 0x2ce1, 0x1eb7: 0x8eb5, 0x1eb8: 0x8ed5, 0x1eb9: 0x8ef5, 0x1eba: 0x2ce9, 0x1ebb: 0x8f15, + 0x1ebc: 0x2cf1, 0x1ebd: 0x2cf9, 0x1ebe: 0x2d01, 0x1ebf: 0x2d09, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xd881, 0x1ec1: 0xd8a1, 0x1ec2: 0xd8c1, 0x1ec3: 0xd8e1, 0x1ec4: 0xd901, 0x1ec5: 0xd921, - 0x1ec6: 0xd941, 0x1ec7: 0xd961, 0x1ec8: 0xd981, 0x1ec9: 0x8d15, 0x1eca: 0xd9a1, 0x1ecb: 0xd9c1, - 0x1ecc: 0xd9e1, 0x1ecd: 0xda01, 0x1ece: 0xda21, 0x1ecf: 0x8d35, 0x1ed0: 0xda41, 0x1ed1: 0x8d55, - 0x1ed2: 0x8d75, 0x1ed3: 0xda61, 0x1ed4: 0xda81, 0x1ed5: 0xda81, 0x1ed6: 0xdaa1, 0x1ed7: 0x8d95, - 0x1ed8: 0x8db5, 0x1ed9: 0xdac1, 0x1eda: 0xdae1, 0x1edb: 0xdb01, 0x1edc: 0xdb21, 0x1edd: 0xdb41, - 0x1ede: 0xdb61, 0x1edf: 0xdb81, 0x1ee0: 0xdba1, 0x1ee1: 0xdbc1, 0x1ee2: 0xdbe1, 0x1ee3: 0xdc01, - 0x1ee4: 0x8dd5, 0x1ee5: 0xdc21, 0x1ee6: 0xdc41, 0x1ee7: 0xdc61, 0x1ee8: 0xdc81, 0x1ee9: 0xdc61, - 0x1eea: 0xdca1, 0x1eeb: 0xdcc1, 0x1eec: 0xdce1, 0x1eed: 0xdd01, 0x1eee: 0xdd21, 0x1eef: 0xdd41, - 0x1ef0: 0xdd61, 0x1ef1: 0xdd81, 0x1ef2: 0xdda1, 0x1ef3: 0xddc1, 0x1ef4: 0xdde1, 0x1ef5: 0xde01, - 0x1ef6: 0xde21, 0x1ef7: 0xde41, 0x1ef8: 0x8df5, 0x1ef9: 0xde61, 0x1efa: 0xde81, 0x1efb: 0xdea1, - 0x1efc: 0xdec1, 0x1efd: 0xdee1, 0x1efe: 0x8e15, 0x1eff: 0xdf01, + 0x1ec0: 0x2d11, 0x1ec1: 0x2d19, 0x1ec2: 0x2d21, 0x1ec3: 0x2d29, 0x1ec4: 0x2d31, 0x1ec5: 0x2d39, + 0x1ec6: 0x8f35, 0x1ec7: 0x2d41, 0x1ec8: 0x2d49, 0x1ec9: 0x2d51, 0x1eca: 0x2d59, 0x1ecb: 0x2d61, + 0x1ecc: 0x2d69, 0x1ecd: 0x8f55, 0x1ece: 0x2d71, 0x1ecf: 0x2d79, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95, + 0x1ed2: 0x2d81, 0x1ed3: 0x2d89, 0x1ed4: 0x2d91, 0x1ed5: 0x2d99, 0x1ed6: 0x2da1, 0x1ed7: 0x2da9, + 0x1ed8: 0x2db1, 0x1ed9: 0x2db9, 0x1eda: 0x2dc1, 0x1edb: 0x8fb5, 0x1edc: 0x2dc9, 0x1edd: 0x8fd5, + 0x1ede: 0x2dd1, 0x1edf: 0x2040, 0x1ee0: 0x2dd9, 0x1ee1: 0x2de1, 0x1ee2: 0x2de9, 0x1ee3: 0x8ff5, + 0x1ee4: 0x2df1, 0x1ee5: 0x2df9, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0x2e01, 0x1ee9: 0x2e09, + 0x1eea: 0x2e11, 0x1eeb: 0x2e19, 0x1eec: 0x2e21, 0x1eed: 0x2e21, 0x1eee: 0x2e29, 0x1eef: 0x2e31, + 0x1ef0: 0x2e39, 0x1ef1: 0x2e41, 0x1ef2: 0x2e49, 0x1ef3: 0x2e51, 0x1ef4: 0x2e59, 0x1ef5: 0x9055, + 0x1ef6: 0x2e61, 0x1ef7: 0x9075, 0x1ef8: 0x2e69, 0x1ef9: 0x9095, 0x1efa: 0x2e71, 0x1efb: 0x90b5, + 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0x2e79, 0x1eff: 0x2e81, // Block 0x7c, offset 0x1f00 - 0x1f00: 0xe601, 0x1f01: 0xe621, 0x1f02: 0xe641, 0x1f03: 0xe661, 0x1f04: 0xe681, 0x1f05: 0xe6a1, - 0x1f06: 0x8f35, 0x1f07: 0xe6c1, 0x1f08: 0xe6e1, 0x1f09: 0xe701, 0x1f0a: 0xe721, 0x1f0b: 0xe741, - 0x1f0c: 0xe761, 0x1f0d: 0x8f55, 0x1f0e: 0xe781, 0x1f0f: 0xe7a1, 0x1f10: 0x8f75, 0x1f11: 0x8f95, - 0x1f12: 0xe7c1, 0x1f13: 0xe7e1, 0x1f14: 0xe801, 0x1f15: 0xe821, 0x1f16: 0xe841, 0x1f17: 0xe861, - 0x1f18: 0xe881, 0x1f19: 0xe8a1, 0x1f1a: 0xe8c1, 0x1f1b: 0x8fb5, 0x1f1c: 0xe8e1, 0x1f1d: 0x8fd5, - 0x1f1e: 0xe901, 0x1f1f: 0x2040, 0x1f20: 0xe921, 0x1f21: 0xe941, 0x1f22: 0xe961, 0x1f23: 0x8ff5, - 0x1f24: 0xe981, 0x1f25: 0xe9a1, 0x1f26: 0x9015, 0x1f27: 0x9035, 0x1f28: 0xe9c1, 0x1f29: 0xe9e1, - 0x1f2a: 0xea01, 0x1f2b: 0xea21, 0x1f2c: 0xea41, 0x1f2d: 0xea41, 0x1f2e: 0xea61, 0x1f2f: 0xea81, - 0x1f30: 0xeaa1, 0x1f31: 0xeac1, 0x1f32: 0xeae1, 0x1f33: 0xeb01, 0x1f34: 0xeb21, 0x1f35: 0x9055, - 0x1f36: 0xeb41, 0x1f37: 0x9075, 0x1f38: 0xeb61, 0x1f39: 0x9095, 0x1f3a: 0xeb81, 0x1f3b: 0x90b5, - 0x1f3c: 0x90d5, 0x1f3d: 0x90f5, 0x1f3e: 0xeba1, 0x1f3f: 0xebc1, + 0x1f00: 0x2e89, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0x2e91, + 0x1f06: 0x2e99, 0x1f07: 0x2e99, 0x1f08: 0x2ea1, 0x1f09: 0x2ea9, 0x1f0a: 0x2eb1, 0x1f0b: 0x2eb9, + 0x1f0c: 0x2ec1, 0x1f0d: 0x9195, 0x1f0e: 0x2ec9, 0x1f0f: 0x2ed1, 0x1f10: 0x2ed9, 0x1f11: 0x2ee1, + 0x1f12: 0x91b5, 0x1f13: 0x2ee9, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0x2ef1, 0x1f17: 0x2ef9, + 0x1f18: 0x2f01, 0x1f19: 0x2f09, 0x1f1a: 0x2f11, 0x1f1b: 0x2f19, 0x1f1c: 0x9215, 0x1f1d: 0x9235, + 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0x2f21, 0x1f21: 0x9275, 0x1f22: 0x2f29, 0x1f23: 0x2f31, + 0x1f24: 0x2f39, 0x1f25: 0x9295, 0x1f26: 0x2f41, 0x1f27: 0x2f49, 0x1f28: 0x2f51, 0x1f29: 0x2f59, + 0x1f2a: 0x2f61, 0x1f2b: 0x92b5, 0x1f2c: 0x2f69, 0x1f2d: 0x2f71, 0x1f2e: 0x2f79, 0x1f2f: 0x2f81, + 0x1f30: 0x2f89, 0x1f31: 0x2f91, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0x2f99, 0x1f35: 0x9315, + 0x1f36: 0x2fa1, 0x1f37: 0x9335, 0x1f38: 0x2fa9, 0x1f39: 0x2fb1, 0x1f3a: 0x2fb9, 0x1f3b: 0x9355, + 0x1f3c: 0x9375, 0x1f3d: 0x2fc1, 0x1f3e: 0x9395, 0x1f3f: 0x2fc9, // Block 0x7d, offset 0x1f40 - 0x1f40: 0xebe1, 0x1f41: 0x9115, 0x1f42: 0x9135, 0x1f43: 0x9155, 0x1f44: 0x9175, 0x1f45: 0xec01, - 0x1f46: 0xec21, 0x1f47: 0xec21, 0x1f48: 0xec41, 0x1f49: 0xec61, 0x1f4a: 0xec81, 0x1f4b: 0xeca1, - 0x1f4c: 0xecc1, 0x1f4d: 0x9195, 0x1f4e: 0xece1, 0x1f4f: 0xed01, 0x1f50: 0xed21, 0x1f51: 0xed41, - 0x1f52: 0x91b5, 0x1f53: 0xed61, 0x1f54: 0x91d5, 0x1f55: 0x91f5, 0x1f56: 0xed81, 0x1f57: 0xeda1, - 0x1f58: 0xedc1, 0x1f59: 0xede1, 0x1f5a: 0xee01, 0x1f5b: 0xee21, 0x1f5c: 0x9215, 0x1f5d: 0x9235, - 0x1f5e: 0x9255, 0x1f5f: 0x2040, 0x1f60: 0xee41, 0x1f61: 0x9275, 0x1f62: 0xee61, 0x1f63: 0xee81, - 0x1f64: 0xeea1, 0x1f65: 0x9295, 0x1f66: 0xeec1, 0x1f67: 0xeee1, 0x1f68: 0xef01, 0x1f69: 0xef21, - 0x1f6a: 0xef41, 0x1f6b: 0x92b5, 0x1f6c: 0xef61, 0x1f6d: 0xef81, 0x1f6e: 0xefa1, 0x1f6f: 0xefc1, - 0x1f70: 0xefe1, 0x1f71: 0xf001, 0x1f72: 0x92d5, 0x1f73: 0x92f5, 0x1f74: 0xf021, 0x1f75: 0x9315, - 0x1f76: 0xf041, 0x1f77: 0x9335, 0x1f78: 0xf061, 0x1f79: 0xf081, 0x1f7a: 0xf0a1, 0x1f7b: 0x9355, - 0x1f7c: 0x9375, 0x1f7d: 0xf0c1, 0x1f7e: 0x9395, 0x1f7f: 0xf0e1, + 0x1f40: 0x93b5, 0x1f41: 0x2fd1, 0x1f42: 0x2fd9, 0x1f43: 0x2fe1, 0x1f44: 0x2fe9, 0x1f45: 0x2ff1, + 0x1f46: 0x2ff9, 0x1f47: 0x93d5, 0x1f48: 0x93f5, 0x1f49: 0x9415, 0x1f4a: 0x9435, 0x1f4b: 0x2a29, + 0x1f4c: 0x3001, 0x1f4d: 0x3009, 0x1f4e: 0x3011, 0x1f4f: 0x3019, 0x1f50: 0x3021, 0x1f51: 0x3029, + 0x1f52: 0x3031, 0x1f53: 0x3039, 0x1f54: 0x3041, 0x1f55: 0x3049, 0x1f56: 0x3051, 0x1f57: 0x9455, + 0x1f58: 0x3059, 0x1f59: 0x3061, 0x1f5a: 0x3069, 0x1f5b: 0x3071, 0x1f5c: 0x3079, 0x1f5d: 0x3081, + 0x1f5e: 0x3089, 0x1f5f: 0x3091, 0x1f60: 0x3099, 0x1f61: 0x30a1, 0x1f62: 0x30a9, 0x1f63: 0x30b1, + 0x1f64: 0x9475, 0x1f65: 0x9495, 0x1f66: 0x94b5, 0x1f67: 0x30b9, 0x1f68: 0x30c1, 0x1f69: 0x30c9, + 0x1f6a: 0x30d1, 0x1f6b: 0x94d5, 0x1f6c: 0x30d9, 0x1f6d: 0x94f5, 0x1f6e: 0x30e1, 0x1f6f: 0x30e9, + 0x1f70: 0x9515, 0x1f71: 0x9535, 0x1f72: 0x30f1, 0x1f73: 0x30f9, 0x1f74: 0x3101, 0x1f75: 0x3109, + 0x1f76: 0x3111, 0x1f77: 0x3119, 0x1f78: 0x3121, 0x1f79: 0x3129, 0x1f7a: 0x3131, 0x1f7b: 0x3139, + 0x1f7c: 0x3141, 0x1f7d: 0x3149, 0x1f7e: 0x3151, 0x1f7f: 0x2040, // Block 0x7e, offset 0x1f80 - 0x1f80: 0xf721, 0x1f81: 0xf741, 0x1f82: 0xf761, 0x1f83: 0xf781, 0x1f84: 0xf7a1, 0x1f85: 0x9555, - 0x1f86: 0xf7c1, 0x1f87: 0xf7e1, 0x1f88: 0xf801, 0x1f89: 0xf821, 0x1f8a: 0xf841, 0x1f8b: 0x9575, - 0x1f8c: 0x9595, 0x1f8d: 0xf861, 0x1f8e: 0xf881, 0x1f8f: 0xf8a1, 0x1f90: 0xf8c1, 0x1f91: 0xf8e1, - 0x1f92: 0xf901, 0x1f93: 0x95b5, 0x1f94: 0xf921, 0x1f95: 0xf941, 0x1f96: 0xf961, 0x1f97: 0xf981, - 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0xf9a1, 0x1f9b: 0xf9c1, 0x1f9c: 0xf9e1, 0x1f9d: 0x9615, - 0x1f9e: 0xfa01, 0x1f9f: 0xfa21, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0xfa41, 0x1fa3: 0xfa61, - 0x1fa4: 0xfa81, 0x1fa5: 0x9655, 0x1fa6: 0xfaa1, 0x1fa7: 0xfac1, 0x1fa8: 0xfae1, 0x1fa9: 0xfb01, - 0x1faa: 0xfb21, 0x1fab: 0xfb41, 0x1fac: 0xfb61, 0x1fad: 0x9675, 0x1fae: 0xfb81, 0x1faf: 0xfba1, - 0x1fb0: 0xfbc1, 0x1fb1: 0x9695, 0x1fb2: 0xfbe1, 0x1fb3: 0xfc01, 0x1fb4: 0xfc21, 0x1fb5: 0xfc41, - 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0xfc61, 0x1fb9: 0xfc81, 0x1fba: 0xfca1, 0x1fbb: 0x96d5, - 0x1fbc: 0xfcc1, 0x1fbd: 0x96f5, 0x1fbe: 0xfce1, 0x1fbf: 0xfce1, + 0x1f80: 0x3159, 0x1f81: 0x3161, 0x1f82: 0x3169, 0x1f83: 0x3171, 0x1f84: 0x3179, 0x1f85: 0x9555, + 0x1f86: 0x3181, 0x1f87: 0x3189, 0x1f88: 0x3191, 0x1f89: 0x3199, 0x1f8a: 0x31a1, 0x1f8b: 0x9575, + 0x1f8c: 0x9595, 0x1f8d: 0x31a9, 0x1f8e: 0x31b1, 0x1f8f: 0x31b9, 0x1f90: 0x31c1, 0x1f91: 0x31c9, + 0x1f92: 0x31d1, 0x1f93: 0x95b5, 0x1f94: 0x31d9, 0x1f95: 0x31e1, 0x1f96: 0x31e9, 0x1f97: 0x31f1, + 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0x31f9, 0x1f9b: 0x3201, 0x1f9c: 0x3209, 0x1f9d: 0x9615, + 0x1f9e: 0x3211, 0x1f9f: 0x3219, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0x3221, 0x1fa3: 0x3229, + 0x1fa4: 0x3231, 0x1fa5: 0x9655, 0x1fa6: 0x3239, 0x1fa7: 0x3241, 0x1fa8: 0x3249, 0x1fa9: 0x3251, + 0x1faa: 0x3259, 0x1fab: 0x3261, 0x1fac: 0x3269, 0x1fad: 0x9675, 0x1fae: 0x3271, 0x1faf: 0x3279, + 0x1fb0: 0x3281, 0x1fb1: 0x9695, 0x1fb2: 0x3289, 0x1fb3: 0x3291, 0x1fb4: 0x3299, 0x1fb5: 0x32a1, + 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0x32a9, 0x1fb9: 0x32b1, 0x1fba: 0x32b9, 0x1fbb: 0x96d5, + 0x1fbc: 0x32c1, 0x1fbd: 0x96f5, 0x1fbe: 0x32c9, 0x1fbf: 0x32c9, // Block 0x7f, offset 0x1fc0 - 0x1fc0: 0xfd01, 0x1fc1: 0x9715, 0x1fc2: 0xfd21, 0x1fc3: 0xfd41, 0x1fc4: 0xfd61, 0x1fc5: 0xfd81, - 0x1fc6: 0xfda1, 0x1fc7: 0xfdc1, 0x1fc8: 0xfde1, 0x1fc9: 0x9735, 0x1fca: 0xfe01, 0x1fcb: 0xfe21, - 0x1fcc: 0xfe41, 0x1fcd: 0xfe61, 0x1fce: 0xfe81, 0x1fcf: 0xfea1, 0x1fd0: 0x9755, 0x1fd1: 0xfec1, - 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0xfee1, 0x1fd6: 0xff01, 0x1fd7: 0xff21, - 0x1fd8: 0xff41, 0x1fd9: 0xff61, 0x1fda: 0xff81, 0x1fdb: 0xffa1, 0x1fdc: 0xffc1, 0x1fdd: 0x97d5, + 0x1fc0: 0x32d1, 0x1fc1: 0x9715, 0x1fc2: 0x32d9, 0x1fc3: 0x32e1, 0x1fc4: 0x32e9, 0x1fc5: 0x32f1, + 0x1fc6: 0x32f9, 0x1fc7: 0x3301, 0x1fc8: 0x3309, 0x1fc9: 0x9735, 0x1fca: 0x3311, 0x1fcb: 0x3319, + 0x1fcc: 0x3321, 0x1fcd: 0x3329, 0x1fce: 0x3331, 0x1fcf: 0x3339, 0x1fd0: 0x9755, 0x1fd1: 0x3341, + 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0x3349, 0x1fd6: 0x3351, 0x1fd7: 0x3359, + 0x1fd8: 0x3361, 0x1fd9: 0x3369, 0x1fda: 0x3371, 0x1fdb: 0x3379, 0x1fdc: 0x3381, 0x1fdd: 0x97d5, 0x1fde: 0x0040, 0x1fdf: 0x0040, 0x1fe0: 0x0040, 0x1fe1: 0x0040, 0x1fe2: 0x0040, 0x1fe3: 0x0040, 0x1fe4: 0x0040, 0x1fe5: 0x0040, 0x1fe6: 0x0040, 0x1fe7: 0x0040, 0x1fe8: 0x0040, 0x1fe9: 0x0040, 0x1fea: 0x0040, 0x1feb: 0x0040, 0x1fec: 0x0040, 0x1fed: 0x0040, 0x1fee: 0x0040, 0x1fef: 0x0040, @@ -2134,7 +2277,7 @@ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, // Block 0x7, offset 0x1c0 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, - 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0xe3, 0x1cd: 0xe4, 0x1ce: 0x3e, 0x1cf: 0x3f, 0x1d0: 0xa0, 0x1d1: 0xa0, 0x1d2: 0xa0, 0x1d3: 0xa0, 0x1d4: 0xa0, 0x1d5: 0xa0, 0x1d6: 0xa0, 0x1d7: 0xa0, 0x1d8: 0xa0, 0x1d9: 0xa0, 0x1da: 0xa0, 0x1db: 0xa0, 0x1dc: 0xa0, 0x1dd: 0xa0, 0x1de: 0xa0, 0x1df: 0xa0, 0x1e0: 0xa0, 0x1e1: 0xa0, 0x1e2: 0xa0, 0x1e3: 0xa0, 0x1e4: 0xa0, 0x1e5: 0xa0, 0x1e6: 0xa0, 0x1e7: 0xa0, @@ -2167,143 +2310,143 @@ 0x2a0: 0xa0, 0x2a1: 0xa0, 0x2a2: 0xa0, 0x2a3: 0xa0, 0x2a4: 0xa0, 0x2a5: 0xa0, 0x2a6: 0xa0, 0x2a7: 0xa0, 0x2a8: 0xa0, 0x2a9: 0xa0, 0x2aa: 0xa0, 0x2ab: 0xa0, 0x2ac: 0xa0, 0x2ad: 0xa0, 0x2ae: 0xa0, 0x2af: 0xa0, 0x2b0: 0xa0, 0x2b1: 0xa0, 0x2b2: 0xa0, 0x2b3: 0xa0, 0x2b4: 0xa0, 0x2b5: 0xa0, 0x2b6: 0xa0, 0x2b7: 0xa0, - 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe3, + 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe5, // Block 0xb, offset 0x2c0 0x2c0: 0xa0, 0x2c1: 0xa0, 0x2c2: 0xa0, 0x2c3: 0xa0, 0x2c4: 0xa0, 0x2c5: 0xa0, 0x2c6: 0xa0, 0x2c7: 0xa0, 0x2c8: 0xa0, 0x2c9: 0xa0, 0x2ca: 0xa0, 0x2cb: 0xa0, 0x2cc: 0xa0, 0x2cd: 0xa0, 0x2ce: 0xa0, 0x2cf: 0xa0, - 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, - 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, - 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, - 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe6, 0x2d3: 0xe7, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, + 0x2d8: 0xe8, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe9, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xea, + 0x2e0: 0xeb, 0x2e1: 0xec, 0x2e2: 0xed, 0x2e3: 0xee, 0x2e4: 0xef, 0x2e5: 0xf0, 0x2e6: 0xf1, 0x2e7: 0xf2, + 0x2e8: 0xf3, 0x2e9: 0xf4, 0x2ea: 0xf5, 0x2eb: 0xf6, 0x2ec: 0xf7, 0x2ed: 0xf8, 0x2ee: 0xf9, 0x2ef: 0xfa, 0x2f0: 0xa0, 0x2f1: 0xa0, 0x2f2: 0xa0, 0x2f3: 0xa0, 0x2f4: 0xa0, 0x2f5: 0xa0, 0x2f6: 0xa0, 0x2f7: 0xa0, 0x2f8: 0xa0, 0x2f9: 0xa0, 0x2fa: 0xa0, 0x2fb: 0xa0, 0x2fc: 0xa0, 0x2fd: 0xa0, 0x2fe: 0xa0, 0x2ff: 0xa0, // Block 0xc, offset 0x300 0x300: 0xa0, 0x301: 0xa0, 0x302: 0xa0, 0x303: 0xa0, 0x304: 0xa0, 0x305: 0xa0, 0x306: 0xa0, 0x307: 0xa0, 0x308: 0xa0, 0x309: 0xa0, 0x30a: 0xa0, 0x30b: 0xa0, 0x30c: 0xa0, 0x30d: 0xa0, 0x30e: 0xa0, 0x30f: 0xa0, 0x310: 0xa0, 0x311: 0xa0, 0x312: 0xa0, 0x313: 0xa0, 0x314: 0xa0, 0x315: 0xa0, 0x316: 0xa0, 0x317: 0xa0, - 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xf9, 0x31f: 0xfa, + 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xfb, 0x31f: 0xfc, // Block 0xd, offset 0x340 - 0x340: 0xfb, 0x341: 0xfb, 0x342: 0xfb, 0x343: 0xfb, 0x344: 0xfb, 0x345: 0xfb, 0x346: 0xfb, 0x347: 0xfb, - 0x348: 0xfb, 0x349: 0xfb, 0x34a: 0xfb, 0x34b: 0xfb, 0x34c: 0xfb, 0x34d: 0xfb, 0x34e: 0xfb, 0x34f: 0xfb, - 0x350: 0xfb, 0x351: 0xfb, 0x352: 0xfb, 0x353: 0xfb, 0x354: 0xfb, 0x355: 0xfb, 0x356: 0xfb, 0x357: 0xfb, - 0x358: 0xfb, 0x359: 0xfb, 0x35a: 0xfb, 0x35b: 0xfb, 0x35c: 0xfb, 0x35d: 0xfb, 0x35e: 0xfb, 0x35f: 0xfb, - 0x360: 0xfb, 0x361: 0xfb, 0x362: 0xfb, 0x363: 0xfb, 0x364: 0xfb, 0x365: 0xfb, 0x366: 0xfb, 0x367: 0xfb, - 0x368: 0xfb, 0x369: 0xfb, 0x36a: 0xfb, 0x36b: 0xfb, 0x36c: 0xfb, 0x36d: 0xfb, 0x36e: 0xfb, 0x36f: 0xfb, - 0x370: 0xfb, 0x371: 0xfb, 0x372: 0xfb, 0x373: 0xfb, 0x374: 0xfb, 0x375: 0xfb, 0x376: 0xfb, 0x377: 0xfb, - 0x378: 0xfb, 0x379: 0xfb, 0x37a: 0xfb, 0x37b: 0xfb, 0x37c: 0xfb, 0x37d: 0xfb, 0x37e: 0xfb, 0x37f: 0xfb, + 0x340: 0xfd, 0x341: 0xfd, 0x342: 0xfd, 0x343: 0xfd, 0x344: 0xfd, 0x345: 0xfd, 0x346: 0xfd, 0x347: 0xfd, + 0x348: 0xfd, 0x349: 0xfd, 0x34a: 0xfd, 0x34b: 0xfd, 0x34c: 0xfd, 0x34d: 0xfd, 0x34e: 0xfd, 0x34f: 0xfd, + 0x350: 0xfd, 0x351: 0xfd, 0x352: 0xfd, 0x353: 0xfd, 0x354: 0xfd, 0x355: 0xfd, 0x356: 0xfd, 0x357: 0xfd, + 0x358: 0xfd, 0x359: 0xfd, 0x35a: 0xfd, 0x35b: 0xfd, 0x35c: 0xfd, 0x35d: 0xfd, 0x35e: 0xfd, 0x35f: 0xfd, + 0x360: 0xfd, 0x361: 0xfd, 0x362: 0xfd, 0x363: 0xfd, 0x364: 0xfd, 0x365: 0xfd, 0x366: 0xfd, 0x367: 0xfd, + 0x368: 0xfd, 0x369: 0xfd, 0x36a: 0xfd, 0x36b: 0xfd, 0x36c: 0xfd, 0x36d: 0xfd, 0x36e: 0xfd, 0x36f: 0xfd, + 0x370: 0xfd, 0x371: 0xfd, 0x372: 0xfd, 0x373: 0xfd, 0x374: 0xfd, 0x375: 0xfd, 0x376: 0xfd, 0x377: 0xfd, + 0x378: 0xfd, 0x379: 0xfd, 0x37a: 0xfd, 0x37b: 0xfd, 0x37c: 0xfd, 0x37d: 0xfd, 0x37e: 0xfd, 0x37f: 0xfd, // Block 0xe, offset 0x380 - 0x380: 0xfb, 0x381: 0xfb, 0x382: 0xfb, 0x383: 0xfb, 0x384: 0xfb, 0x385: 0xfb, 0x386: 0xfb, 0x387: 0xfb, - 0x388: 0xfb, 0x389: 0xfb, 0x38a: 0xfb, 0x38b: 0xfb, 0x38c: 0xfb, 0x38d: 0xfb, 0x38e: 0xfb, 0x38f: 0xfb, - 0x390: 0xfb, 0x391: 0xfb, 0x392: 0xfb, 0x393: 0xfb, 0x394: 0xfb, 0x395: 0xfb, 0x396: 0xfb, 0x397: 0xfb, - 0x398: 0xfb, 0x399: 0xfb, 0x39a: 0xfb, 0x39b: 0xfb, 0x39c: 0xfb, 0x39d: 0xfb, 0x39e: 0xfb, 0x39f: 0xfb, - 0x3a0: 0xfb, 0x3a1: 0xfb, 0x3a2: 0xfb, 0x3a3: 0xfb, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, - 0x3a8: 0x47, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, - 0x3b0: 0x102, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x103, 0x3b7: 0x52, - 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + 0x380: 0xfd, 0x381: 0xfd, 0x382: 0xfd, 0x383: 0xfd, 0x384: 0xfd, 0x385: 0xfd, 0x386: 0xfd, 0x387: 0xfd, + 0x388: 0xfd, 0x389: 0xfd, 0x38a: 0xfd, 0x38b: 0xfd, 0x38c: 0xfd, 0x38d: 0xfd, 0x38e: 0xfd, 0x38f: 0xfd, + 0x390: 0xfd, 0x391: 0xfd, 0x392: 0xfd, 0x393: 0xfd, 0x394: 0xfd, 0x395: 0xfd, 0x396: 0xfd, 0x397: 0xfd, + 0x398: 0xfd, 0x399: 0xfd, 0x39a: 0xfd, 0x39b: 0xfd, 0x39c: 0xfd, 0x39d: 0xfd, 0x39e: 0xfd, 0x39f: 0xfd, + 0x3a0: 0xfd, 0x3a1: 0xfd, 0x3a2: 0xfd, 0x3a3: 0xfd, 0x3a4: 0xfe, 0x3a5: 0xff, 0x3a6: 0x100, 0x3a7: 0x101, + 0x3a8: 0x45, 0x3a9: 0x102, 0x3aa: 0x103, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, + 0x3b0: 0x104, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x105, 0x3b7: 0x50, + 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, // Block 0xf, offset 0x3c0 - 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0xa0, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9c, 0x3c6: 0x108, 0x3c7: 0x109, - 0x3c8: 0xfb, 0x3c9: 0xfb, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, - 0x3d0: 0x110, 0x3d1: 0xa0, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xfb, 0x3d7: 0xfb, - 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xfb, 0x3df: 0xfb, - 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xfb, 0x3e6: 0x11c, 0x3e7: 0x11d, - 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5b, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5c, 0x3ef: 0xfb, - 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0x127, 0x3f5: 0xfb, 0x3f6: 0xfb, 0x3f7: 0xfb, - 0x3f8: 0xfb, 0x3f9: 0x128, 0x3fa: 0x129, 0x3fb: 0xfb, 0x3fc: 0x12a, 0x3fd: 0x12b, 0x3fe: 0x12c, 0x3ff: 0x12d, + 0x3c0: 0x106, 0x3c1: 0x107, 0x3c2: 0xa0, 0x3c3: 0x108, 0x3c4: 0x109, 0x3c5: 0x9c, 0x3c6: 0x10a, 0x3c7: 0x10b, + 0x3c8: 0xfd, 0x3c9: 0xfd, 0x3ca: 0x10c, 0x3cb: 0x10d, 0x3cc: 0x10e, 0x3cd: 0x10f, 0x3ce: 0x110, 0x3cf: 0x111, + 0x3d0: 0x112, 0x3d1: 0xa0, 0x3d2: 0x113, 0x3d3: 0x114, 0x3d4: 0x115, 0x3d5: 0x116, 0x3d6: 0xfd, 0x3d7: 0xfd, + 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x117, 0x3dd: 0x118, 0x3de: 0xfd, 0x3df: 0xfd, + 0x3e0: 0x119, 0x3e1: 0x11a, 0x3e2: 0x11b, 0x3e3: 0x11c, 0x3e4: 0x11d, 0x3e5: 0xfd, 0x3e6: 0x11e, 0x3e7: 0x11f, + 0x3e8: 0x120, 0x3e9: 0x121, 0x3ea: 0x122, 0x3eb: 0x59, 0x3ec: 0x123, 0x3ed: 0x124, 0x3ee: 0x5a, 0x3ef: 0xfd, + 0x3f0: 0x125, 0x3f1: 0x126, 0x3f2: 0x127, 0x3f3: 0x128, 0x3f4: 0x129, 0x3f5: 0xfd, 0x3f6: 0xfd, 0x3f7: 0xfd, + 0x3f8: 0xfd, 0x3f9: 0x12a, 0x3fa: 0x12b, 0x3fb: 0xfd, 0x3fc: 0x12c, 0x3fd: 0x12d, 0x3fe: 0x12e, 0x3ff: 0x12f, // Block 0x10, offset 0x400 - 0x400: 0x12e, 0x401: 0x12f, 0x402: 0x130, 0x403: 0x131, 0x404: 0x132, 0x405: 0x133, 0x406: 0x134, 0x407: 0x135, - 0x408: 0x136, 0x409: 0xfb, 0x40a: 0x137, 0x40b: 0x138, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xfb, 0x40f: 0xfb, - 0x410: 0x139, 0x411: 0x13a, 0x412: 0x13b, 0x413: 0x13c, 0x414: 0xfb, 0x415: 0xfb, 0x416: 0x13d, 0x417: 0x13e, - 0x418: 0x13f, 0x419: 0x140, 0x41a: 0x141, 0x41b: 0x142, 0x41c: 0x143, 0x41d: 0xfb, 0x41e: 0xfb, 0x41f: 0xfb, - 0x420: 0x144, 0x421: 0xfb, 0x422: 0x145, 0x423: 0x146, 0x424: 0x5f, 0x425: 0x147, 0x426: 0x148, 0x427: 0x149, - 0x428: 0x14a, 0x429: 0x14b, 0x42a: 0x14c, 0x42b: 0x14d, 0x42c: 0xfb, 0x42d: 0xfb, 0x42e: 0xfb, 0x42f: 0xfb, - 0x430: 0x14e, 0x431: 0x14f, 0x432: 0x150, 0x433: 0xfb, 0x434: 0x151, 0x435: 0x152, 0x436: 0x153, 0x437: 0xfb, - 0x438: 0xfb, 0x439: 0xfb, 0x43a: 0xfb, 0x43b: 0x154, 0x43c: 0xfb, 0x43d: 0xfb, 0x43e: 0x155, 0x43f: 0x156, + 0x400: 0x130, 0x401: 0x131, 0x402: 0x132, 0x403: 0x133, 0x404: 0x134, 0x405: 0x135, 0x406: 0x136, 0x407: 0x137, + 0x408: 0x138, 0x409: 0xfd, 0x40a: 0x139, 0x40b: 0x13a, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xfd, 0x40f: 0xfd, + 0x410: 0x13b, 0x411: 0x13c, 0x412: 0x13d, 0x413: 0x13e, 0x414: 0xfd, 0x415: 0xfd, 0x416: 0x13f, 0x417: 0x140, + 0x418: 0x141, 0x419: 0x142, 0x41a: 0x143, 0x41b: 0x144, 0x41c: 0x145, 0x41d: 0xfd, 0x41e: 0xfd, 0x41f: 0xfd, + 0x420: 0x146, 0x421: 0xfd, 0x422: 0x147, 0x423: 0x148, 0x424: 0x5d, 0x425: 0x149, 0x426: 0x14a, 0x427: 0x14b, + 0x428: 0x14c, 0x429: 0x14d, 0x42a: 0x14e, 0x42b: 0x14f, 0x42c: 0xfd, 0x42d: 0xfd, 0x42e: 0xfd, 0x42f: 0xfd, + 0x430: 0x150, 0x431: 0x151, 0x432: 0x152, 0x433: 0xfd, 0x434: 0x153, 0x435: 0x154, 0x436: 0x155, 0x437: 0xfd, + 0x438: 0xfd, 0x439: 0xfd, 0x43a: 0xfd, 0x43b: 0x156, 0x43c: 0xfd, 0x43d: 0xfd, 0x43e: 0x157, 0x43f: 0x158, // Block 0x11, offset 0x440 0x440: 0xa0, 0x441: 0xa0, 0x442: 0xa0, 0x443: 0xa0, 0x444: 0xa0, 0x445: 0xa0, 0x446: 0xa0, 0x447: 0xa0, - 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x157, 0x44f: 0xfb, - 0x450: 0x9c, 0x451: 0x158, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x159, 0x456: 0xfb, 0x457: 0xfb, - 0x458: 0xfb, 0x459: 0xfb, 0x45a: 0xfb, 0x45b: 0xfb, 0x45c: 0xfb, 0x45d: 0xfb, 0x45e: 0xfb, 0x45f: 0xfb, - 0x460: 0xfb, 0x461: 0xfb, 0x462: 0xfb, 0x463: 0xfb, 0x464: 0xfb, 0x465: 0xfb, 0x466: 0xfb, 0x467: 0xfb, - 0x468: 0xfb, 0x469: 0xfb, 0x46a: 0xfb, 0x46b: 0xfb, 0x46c: 0xfb, 0x46d: 0xfb, 0x46e: 0xfb, 0x46f: 0xfb, - 0x470: 0xfb, 0x471: 0xfb, 0x472: 0xfb, 0x473: 0xfb, 0x474: 0xfb, 0x475: 0xfb, 0x476: 0xfb, 0x477: 0xfb, - 0x478: 0xfb, 0x479: 0xfb, 0x47a: 0xfb, 0x47b: 0xfb, 0x47c: 0xfb, 0x47d: 0xfb, 0x47e: 0xfb, 0x47f: 0xfb, + 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x159, 0x44f: 0xfd, + 0x450: 0x9c, 0x451: 0x15a, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x15b, 0x456: 0xfd, 0x457: 0xfd, + 0x458: 0xfd, 0x459: 0xfd, 0x45a: 0xfd, 0x45b: 0xfd, 0x45c: 0xfd, 0x45d: 0xfd, 0x45e: 0xfd, 0x45f: 0xfd, + 0x460: 0xfd, 0x461: 0xfd, 0x462: 0xfd, 0x463: 0xfd, 0x464: 0xfd, 0x465: 0xfd, 0x466: 0xfd, 0x467: 0xfd, + 0x468: 0xfd, 0x469: 0xfd, 0x46a: 0xfd, 0x46b: 0xfd, 0x46c: 0xfd, 0x46d: 0xfd, 0x46e: 0xfd, 0x46f: 0xfd, + 0x470: 0xfd, 0x471: 0xfd, 0x472: 0xfd, 0x473: 0xfd, 0x474: 0xfd, 0x475: 0xfd, 0x476: 0xfd, 0x477: 0xfd, + 0x478: 0xfd, 0x479: 0xfd, 0x47a: 0xfd, 0x47b: 0xfd, 0x47c: 0xfd, 0x47d: 0xfd, 0x47e: 0xfd, 0x47f: 0xfd, // Block 0x12, offset 0x480 0x480: 0xa0, 0x481: 0xa0, 0x482: 0xa0, 0x483: 0xa0, 0x484: 0xa0, 0x485: 0xa0, 0x486: 0xa0, 0x487: 0xa0, 0x488: 0xa0, 0x489: 0xa0, 0x48a: 0xa0, 0x48b: 0xa0, 0x48c: 0xa0, 0x48d: 0xa0, 0x48e: 0xa0, 0x48f: 0xa0, - 0x490: 0x15a, 0x491: 0xfb, 0x492: 0xfb, 0x493: 0xfb, 0x494: 0xfb, 0x495: 0xfb, 0x496: 0xfb, 0x497: 0xfb, - 0x498: 0xfb, 0x499: 0xfb, 0x49a: 0xfb, 0x49b: 0xfb, 0x49c: 0xfb, 0x49d: 0xfb, 0x49e: 0xfb, 0x49f: 0xfb, - 0x4a0: 0xfb, 0x4a1: 0xfb, 0x4a2: 0xfb, 0x4a3: 0xfb, 0x4a4: 0xfb, 0x4a5: 0xfb, 0x4a6: 0xfb, 0x4a7: 0xfb, - 0x4a8: 0xfb, 0x4a9: 0xfb, 0x4aa: 0xfb, 0x4ab: 0xfb, 0x4ac: 0xfb, 0x4ad: 0xfb, 0x4ae: 0xfb, 0x4af: 0xfb, - 0x4b0: 0xfb, 0x4b1: 0xfb, 0x4b2: 0xfb, 0x4b3: 0xfb, 0x4b4: 0xfb, 0x4b5: 0xfb, 0x4b6: 0xfb, 0x4b7: 0xfb, - 0x4b8: 0xfb, 0x4b9: 0xfb, 0x4ba: 0xfb, 0x4bb: 0xfb, 0x4bc: 0xfb, 0x4bd: 0xfb, 0x4be: 0xfb, 0x4bf: 0xfb, + 0x490: 0x15c, 0x491: 0xfd, 0x492: 0xfd, 0x493: 0xfd, 0x494: 0xfd, 0x495: 0xfd, 0x496: 0xfd, 0x497: 0xfd, + 0x498: 0xfd, 0x499: 0xfd, 0x49a: 0xfd, 0x49b: 0xfd, 0x49c: 0xfd, 0x49d: 0xfd, 0x49e: 0xfd, 0x49f: 0xfd, + 0x4a0: 0xfd, 0x4a1: 0xfd, 0x4a2: 0xfd, 0x4a3: 0xfd, 0x4a4: 0xfd, 0x4a5: 0xfd, 0x4a6: 0xfd, 0x4a7: 0xfd, + 0x4a8: 0xfd, 0x4a9: 0xfd, 0x4aa: 0xfd, 0x4ab: 0xfd, 0x4ac: 0xfd, 0x4ad: 0xfd, 0x4ae: 0xfd, 0x4af: 0xfd, + 0x4b0: 0xfd, 0x4b1: 0xfd, 0x4b2: 0xfd, 0x4b3: 0xfd, 0x4b4: 0xfd, 0x4b5: 0xfd, 0x4b6: 0xfd, 0x4b7: 0xfd, + 0x4b8: 0xfd, 0x4b9: 0xfd, 0x4ba: 0xfd, 0x4bb: 0xfd, 0x4bc: 0xfd, 0x4bd: 0xfd, 0x4be: 0xfd, 0x4bf: 0xfd, // Block 0x13, offset 0x4c0 - 0x4c0: 0xfb, 0x4c1: 0xfb, 0x4c2: 0xfb, 0x4c3: 0xfb, 0x4c4: 0xfb, 0x4c5: 0xfb, 0x4c6: 0xfb, 0x4c7: 0xfb, - 0x4c8: 0xfb, 0x4c9: 0xfb, 0x4ca: 0xfb, 0x4cb: 0xfb, 0x4cc: 0xfb, 0x4cd: 0xfb, 0x4ce: 0xfb, 0x4cf: 0xfb, + 0x4c0: 0xfd, 0x4c1: 0xfd, 0x4c2: 0xfd, 0x4c3: 0xfd, 0x4c4: 0xfd, 0x4c5: 0xfd, 0x4c6: 0xfd, 0x4c7: 0xfd, + 0x4c8: 0xfd, 0x4c9: 0xfd, 0x4ca: 0xfd, 0x4cb: 0xfd, 0x4cc: 0xfd, 0x4cd: 0xfd, 0x4ce: 0xfd, 0x4cf: 0xfd, 0x4d0: 0xa0, 0x4d1: 0xa0, 0x4d2: 0xa0, 0x4d3: 0xa0, 0x4d4: 0xa0, 0x4d5: 0xa0, 0x4d6: 0xa0, 0x4d7: 0xa0, - 0x4d8: 0xa0, 0x4d9: 0x15b, 0x4da: 0xfb, 0x4db: 0xfb, 0x4dc: 0xfb, 0x4dd: 0xfb, 0x4de: 0xfb, 0x4df: 0xfb, - 0x4e0: 0xfb, 0x4e1: 0xfb, 0x4e2: 0xfb, 0x4e3: 0xfb, 0x4e4: 0xfb, 0x4e5: 0xfb, 0x4e6: 0xfb, 0x4e7: 0xfb, - 0x4e8: 0xfb, 0x4e9: 0xfb, 0x4ea: 0xfb, 0x4eb: 0xfb, 0x4ec: 0xfb, 0x4ed: 0xfb, 0x4ee: 0xfb, 0x4ef: 0xfb, - 0x4f0: 0xfb, 0x4f1: 0xfb, 0x4f2: 0xfb, 0x4f3: 0xfb, 0x4f4: 0xfb, 0x4f5: 0xfb, 0x4f6: 0xfb, 0x4f7: 0xfb, - 0x4f8: 0xfb, 0x4f9: 0xfb, 0x4fa: 0xfb, 0x4fb: 0xfb, 0x4fc: 0xfb, 0x4fd: 0xfb, 0x4fe: 0xfb, 0x4ff: 0xfb, + 0x4d8: 0xa0, 0x4d9: 0x15d, 0x4da: 0xfd, 0x4db: 0xfd, 0x4dc: 0xfd, 0x4dd: 0xfd, 0x4de: 0xfd, 0x4df: 0xfd, + 0x4e0: 0xfd, 0x4e1: 0xfd, 0x4e2: 0xfd, 0x4e3: 0xfd, 0x4e4: 0xfd, 0x4e5: 0xfd, 0x4e6: 0xfd, 0x4e7: 0xfd, + 0x4e8: 0xfd, 0x4e9: 0xfd, 0x4ea: 0xfd, 0x4eb: 0xfd, 0x4ec: 0xfd, 0x4ed: 0xfd, 0x4ee: 0xfd, 0x4ef: 0xfd, + 0x4f0: 0xfd, 0x4f1: 0xfd, 0x4f2: 0xfd, 0x4f3: 0xfd, 0x4f4: 0xfd, 0x4f5: 0xfd, 0x4f6: 0xfd, 0x4f7: 0xfd, + 0x4f8: 0xfd, 0x4f9: 0xfd, 0x4fa: 0xfd, 0x4fb: 0xfd, 0x4fc: 0xfd, 0x4fd: 0xfd, 0x4fe: 0xfd, 0x4ff: 0xfd, // Block 0x14, offset 0x500 - 0x500: 0xfb, 0x501: 0xfb, 0x502: 0xfb, 0x503: 0xfb, 0x504: 0xfb, 0x505: 0xfb, 0x506: 0xfb, 0x507: 0xfb, - 0x508: 0xfb, 0x509: 0xfb, 0x50a: 0xfb, 0x50b: 0xfb, 0x50c: 0xfb, 0x50d: 0xfb, 0x50e: 0xfb, 0x50f: 0xfb, - 0x510: 0xfb, 0x511: 0xfb, 0x512: 0xfb, 0x513: 0xfb, 0x514: 0xfb, 0x515: 0xfb, 0x516: 0xfb, 0x517: 0xfb, - 0x518: 0xfb, 0x519: 0xfb, 0x51a: 0xfb, 0x51b: 0xfb, 0x51c: 0xfb, 0x51d: 0xfb, 0x51e: 0xfb, 0x51f: 0xfb, + 0x500: 0xfd, 0x501: 0xfd, 0x502: 0xfd, 0x503: 0xfd, 0x504: 0xfd, 0x505: 0xfd, 0x506: 0xfd, 0x507: 0xfd, + 0x508: 0xfd, 0x509: 0xfd, 0x50a: 0xfd, 0x50b: 0xfd, 0x50c: 0xfd, 0x50d: 0xfd, 0x50e: 0xfd, 0x50f: 0xfd, + 0x510: 0xfd, 0x511: 0xfd, 0x512: 0xfd, 0x513: 0xfd, 0x514: 0xfd, 0x515: 0xfd, 0x516: 0xfd, 0x517: 0xfd, + 0x518: 0xfd, 0x519: 0xfd, 0x51a: 0xfd, 0x51b: 0xfd, 0x51c: 0xfd, 0x51d: 0xfd, 0x51e: 0xfd, 0x51f: 0xfd, 0x520: 0xa0, 0x521: 0xa0, 0x522: 0xa0, 0x523: 0xa0, 0x524: 0xa0, 0x525: 0xa0, 0x526: 0xa0, 0x527: 0xa0, - 0x528: 0x14d, 0x529: 0x15c, 0x52a: 0xfb, 0x52b: 0x15d, 0x52c: 0x15e, 0x52d: 0x15f, 0x52e: 0x160, 0x52f: 0xfb, - 0x530: 0xfb, 0x531: 0xfb, 0x532: 0xfb, 0x533: 0xfb, 0x534: 0xfb, 0x535: 0xfb, 0x536: 0xfb, 0x537: 0xfb, - 0x538: 0xfb, 0x539: 0x161, 0x53a: 0x162, 0x53b: 0xfb, 0x53c: 0xa0, 0x53d: 0x163, 0x53e: 0x164, 0x53f: 0x165, + 0x528: 0x14f, 0x529: 0x15e, 0x52a: 0xfd, 0x52b: 0x15f, 0x52c: 0x160, 0x52d: 0x161, 0x52e: 0x162, 0x52f: 0xfd, + 0x530: 0xfd, 0x531: 0xfd, 0x532: 0xfd, 0x533: 0xfd, 0x534: 0xfd, 0x535: 0xfd, 0x536: 0xfd, 0x537: 0xfd, + 0x538: 0xfd, 0x539: 0x163, 0x53a: 0x164, 0x53b: 0xfd, 0x53c: 0xa0, 0x53d: 0x165, 0x53e: 0x166, 0x53f: 0x167, // Block 0x15, offset 0x540 0x540: 0xa0, 0x541: 0xa0, 0x542: 0xa0, 0x543: 0xa0, 0x544: 0xa0, 0x545: 0xa0, 0x546: 0xa0, 0x547: 0xa0, 0x548: 0xa0, 0x549: 0xa0, 0x54a: 0xa0, 0x54b: 0xa0, 0x54c: 0xa0, 0x54d: 0xa0, 0x54e: 0xa0, 0x54f: 0xa0, 0x550: 0xa0, 0x551: 0xa0, 0x552: 0xa0, 0x553: 0xa0, 0x554: 0xa0, 0x555: 0xa0, 0x556: 0xa0, 0x557: 0xa0, - 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x166, + 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x168, 0x560: 0xa0, 0x561: 0xa0, 0x562: 0xa0, 0x563: 0xa0, 0x564: 0xa0, 0x565: 0xa0, 0x566: 0xa0, 0x567: 0xa0, 0x568: 0xa0, 0x569: 0xa0, 0x56a: 0xa0, 0x56b: 0xa0, 0x56c: 0xa0, 0x56d: 0xa0, 0x56e: 0xa0, 0x56f: 0xa0, - 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x167, 0x574: 0x168, 0x575: 0xfb, 0x576: 0xfb, 0x577: 0xfb, - 0x578: 0xfb, 0x579: 0xfb, 0x57a: 0xfb, 0x57b: 0xfb, 0x57c: 0xfb, 0x57d: 0xfb, 0x57e: 0xfb, 0x57f: 0xfb, + 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x169, 0x574: 0x16a, 0x575: 0xfd, 0x576: 0xfd, 0x577: 0xfd, + 0x578: 0xfd, 0x579: 0xfd, 0x57a: 0xfd, 0x57b: 0xfd, 0x57c: 0xfd, 0x57d: 0xfd, 0x57e: 0xfd, 0x57f: 0xfd, // Block 0x16, offset 0x580 - 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x169, 0x585: 0x16a, 0x586: 0xa0, 0x587: 0xa0, - 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16b, 0x58c: 0xfb, 0x58d: 0xfb, 0x58e: 0xfb, 0x58f: 0xfb, - 0x590: 0xfb, 0x591: 0xfb, 0x592: 0xfb, 0x593: 0xfb, 0x594: 0xfb, 0x595: 0xfb, 0x596: 0xfb, 0x597: 0xfb, - 0x598: 0xfb, 0x599: 0xfb, 0x59a: 0xfb, 0x59b: 0xfb, 0x59c: 0xfb, 0x59d: 0xfb, 0x59e: 0xfb, 0x59f: 0xfb, - 0x5a0: 0xfb, 0x5a1: 0xfb, 0x5a2: 0xfb, 0x5a3: 0xfb, 0x5a4: 0xfb, 0x5a5: 0xfb, 0x5a6: 0xfb, 0x5a7: 0xfb, - 0x5a8: 0xfb, 0x5a9: 0xfb, 0x5aa: 0xfb, 0x5ab: 0xfb, 0x5ac: 0xfb, 0x5ad: 0xfb, 0x5ae: 0xfb, 0x5af: 0xfb, - 0x5b0: 0xa0, 0x5b1: 0x16c, 0x5b2: 0x16d, 0x5b3: 0xfb, 0x5b4: 0xfb, 0x5b5: 0xfb, 0x5b6: 0xfb, 0x5b7: 0xfb, - 0x5b8: 0xfb, 0x5b9: 0xfb, 0x5ba: 0xfb, 0x5bb: 0xfb, 0x5bc: 0xfb, 0x5bd: 0xfb, 0x5be: 0xfb, 0x5bf: 0xfb, + 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x16b, 0x585: 0x16c, 0x586: 0xa0, 0x587: 0xa0, + 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16d, 0x58c: 0xfd, 0x58d: 0xfd, 0x58e: 0xfd, 0x58f: 0xfd, + 0x590: 0xfd, 0x591: 0xfd, 0x592: 0xfd, 0x593: 0xfd, 0x594: 0xfd, 0x595: 0xfd, 0x596: 0xfd, 0x597: 0xfd, + 0x598: 0xfd, 0x599: 0xfd, 0x59a: 0xfd, 0x59b: 0xfd, 0x59c: 0xfd, 0x59d: 0xfd, 0x59e: 0xfd, 0x59f: 0xfd, + 0x5a0: 0xfd, 0x5a1: 0xfd, 0x5a2: 0xfd, 0x5a3: 0xfd, 0x5a4: 0xfd, 0x5a5: 0xfd, 0x5a6: 0xfd, 0x5a7: 0xfd, + 0x5a8: 0xfd, 0x5a9: 0xfd, 0x5aa: 0xfd, 0x5ab: 0xfd, 0x5ac: 0xfd, 0x5ad: 0xfd, 0x5ae: 0xfd, 0x5af: 0xfd, + 0x5b0: 0xa0, 0x5b1: 0x16e, 0x5b2: 0x16f, 0x5b3: 0xfd, 0x5b4: 0xfd, 0x5b5: 0xfd, 0x5b6: 0xfd, 0x5b7: 0xfd, + 0x5b8: 0xfd, 0x5b9: 0xfd, 0x5ba: 0xfd, 0x5bb: 0xfd, 0x5bc: 0xfd, 0x5bd: 0xfd, 0x5be: 0xfd, 0x5bf: 0xfd, // Block 0x17, offset 0x5c0 - 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x16e, 0x5c4: 0x16f, 0x5c5: 0x170, 0x5c6: 0x171, 0x5c7: 0x172, - 0x5c8: 0x9c, 0x5c9: 0x173, 0x5ca: 0xfb, 0x5cb: 0x174, 0x5cc: 0x9c, 0x5cd: 0x175, 0x5ce: 0xfb, 0x5cf: 0xfb, - 0x5d0: 0x60, 0x5d1: 0x61, 0x5d2: 0x62, 0x5d3: 0x63, 0x5d4: 0x64, 0x5d5: 0x65, 0x5d6: 0x66, 0x5d7: 0x67, - 0x5d8: 0x68, 0x5d9: 0x69, 0x5da: 0x6a, 0x5db: 0x6b, 0x5dc: 0x6c, 0x5dd: 0x6d, 0x5de: 0x6e, 0x5df: 0x6f, + 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x170, 0x5c4: 0x171, 0x5c5: 0x172, 0x5c6: 0x173, 0x5c7: 0x174, + 0x5c8: 0x9c, 0x5c9: 0x175, 0x5ca: 0xfd, 0x5cb: 0x176, 0x5cc: 0x9c, 0x5cd: 0x177, 0x5ce: 0xfd, 0x5cf: 0xfd, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, 0x5e0: 0x9c, 0x5e1: 0x9c, 0x5e2: 0x9c, 0x5e3: 0x9c, 0x5e4: 0x9c, 0x5e5: 0x9c, 0x5e6: 0x9c, 0x5e7: 0x9c, - 0x5e8: 0x176, 0x5e9: 0x177, 0x5ea: 0x178, 0x5eb: 0xfb, 0x5ec: 0xfb, 0x5ed: 0xfb, 0x5ee: 0xfb, 0x5ef: 0xfb, - 0x5f0: 0xfb, 0x5f1: 0xfb, 0x5f2: 0xfb, 0x5f3: 0xfb, 0x5f4: 0xfb, 0x5f5: 0xfb, 0x5f6: 0xfb, 0x5f7: 0xfb, - 0x5f8: 0xfb, 0x5f9: 0xfb, 0x5fa: 0xfb, 0x5fb: 0xfb, 0x5fc: 0xfb, 0x5fd: 0xfb, 0x5fe: 0xfb, 0x5ff: 0xfb, + 0x5e8: 0x178, 0x5e9: 0x179, 0x5ea: 0x17a, 0x5eb: 0xfd, 0x5ec: 0xfd, 0x5ed: 0xfd, 0x5ee: 0xfd, 0x5ef: 0xfd, + 0x5f0: 0xfd, 0x5f1: 0xfd, 0x5f2: 0xfd, 0x5f3: 0xfd, 0x5f4: 0xfd, 0x5f5: 0xfd, 0x5f6: 0xfd, 0x5f7: 0xfd, + 0x5f8: 0xfd, 0x5f9: 0xfd, 0x5fa: 0xfd, 0x5fb: 0xfd, 0x5fc: 0xfd, 0x5fd: 0xfd, 0x5fe: 0xfd, 0x5ff: 0xfd, // Block 0x18, offset 0x600 - 0x600: 0x179, 0x601: 0xfb, 0x602: 0xfb, 0x603: 0xfb, 0x604: 0x17a, 0x605: 0x17b, 0x606: 0xfb, 0x607: 0xfb, - 0x608: 0xfb, 0x609: 0xfb, 0x60a: 0xfb, 0x60b: 0x17c, 0x60c: 0xfb, 0x60d: 0xfb, 0x60e: 0xfb, 0x60f: 0xfb, - 0x610: 0xfb, 0x611: 0xfb, 0x612: 0xfb, 0x613: 0xfb, 0x614: 0xfb, 0x615: 0xfb, 0x616: 0xfb, 0x617: 0xfb, - 0x618: 0xfb, 0x619: 0xfb, 0x61a: 0xfb, 0x61b: 0xfb, 0x61c: 0xfb, 0x61d: 0xfb, 0x61e: 0xfb, 0x61f: 0xfb, - 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x17d, 0x624: 0x70, 0x625: 0x17e, 0x626: 0xfb, 0x627: 0xfb, - 0x628: 0xfb, 0x629: 0xfb, 0x62a: 0xfb, 0x62b: 0xfb, 0x62c: 0xfb, 0x62d: 0xfb, 0x62e: 0xfb, 0x62f: 0xfb, - 0x630: 0xfb, 0x631: 0x17f, 0x632: 0x180, 0x633: 0xfb, 0x634: 0x181, 0x635: 0xfb, 0x636: 0xfb, 0x637: 0xfb, - 0x638: 0x71, 0x639: 0x72, 0x63a: 0x73, 0x63b: 0x182, 0x63c: 0xfb, 0x63d: 0xfb, 0x63e: 0xfb, 0x63f: 0xfb, + 0x600: 0x17b, 0x601: 0xfd, 0x602: 0xfd, 0x603: 0xfd, 0x604: 0x17c, 0x605: 0x17d, 0x606: 0xfd, 0x607: 0xfd, + 0x608: 0xfd, 0x609: 0xfd, 0x60a: 0xfd, 0x60b: 0x17e, 0x60c: 0xfd, 0x60d: 0xfd, 0x60e: 0xfd, 0x60f: 0xfd, + 0x610: 0xfd, 0x611: 0xfd, 0x612: 0xfd, 0x613: 0xfd, 0x614: 0xfd, 0x615: 0xfd, 0x616: 0xfd, 0x617: 0xfd, + 0x618: 0xfd, 0x619: 0xfd, 0x61a: 0xfd, 0x61b: 0xfd, 0x61c: 0xfd, 0x61d: 0xfd, 0x61e: 0xfd, 0x61f: 0xfd, + 0x620: 0x125, 0x621: 0x125, 0x622: 0x125, 0x623: 0x17f, 0x624: 0x6e, 0x625: 0x180, 0x626: 0xfd, 0x627: 0xfd, + 0x628: 0xfd, 0x629: 0xfd, 0x62a: 0xfd, 0x62b: 0xfd, 0x62c: 0xfd, 0x62d: 0xfd, 0x62e: 0xfd, 0x62f: 0xfd, + 0x630: 0xfd, 0x631: 0x181, 0x632: 0x182, 0x633: 0xfd, 0x634: 0x183, 0x635: 0xfd, 0x636: 0xfd, 0x637: 0xfd, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x184, 0x63c: 0xfd, 0x63d: 0xfd, 0x63e: 0xfd, 0x63f: 0xfd, // Block 0x19, offset 0x640 - 0x640: 0x183, 0x641: 0x9c, 0x642: 0x184, 0x643: 0x185, 0x644: 0x74, 0x645: 0x75, 0x646: 0x186, 0x647: 0x187, - 0x648: 0x76, 0x649: 0x188, 0x64a: 0xfb, 0x64b: 0xfb, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, + 0x640: 0x185, 0x641: 0x9c, 0x642: 0x186, 0x643: 0x187, 0x644: 0x72, 0x645: 0x73, 0x646: 0x188, 0x647: 0x189, + 0x648: 0x74, 0x649: 0x18a, 0x64a: 0xfd, 0x64b: 0xfd, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, 0x650: 0x9c, 0x651: 0x9c, 0x652: 0x9c, 0x653: 0x9c, 0x654: 0x9c, 0x655: 0x9c, 0x656: 0x9c, 0x657: 0x9c, - 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x189, 0x65c: 0x9c, 0x65d: 0x18a, 0x65e: 0x9c, 0x65f: 0x18b, - 0x660: 0x18c, 0x661: 0x18d, 0x662: 0x18e, 0x663: 0xfb, 0x664: 0x9c, 0x665: 0x18f, 0x666: 0x9c, 0x667: 0x190, - 0x668: 0x9c, 0x669: 0x191, 0x66a: 0x192, 0x66b: 0x193, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x194, 0x66f: 0x195, - 0x670: 0xfb, 0x671: 0xfb, 0x672: 0xfb, 0x673: 0xfb, 0x674: 0xfb, 0x675: 0xfb, 0x676: 0xfb, 0x677: 0xfb, - 0x678: 0xfb, 0x679: 0xfb, 0x67a: 0xfb, 0x67b: 0xfb, 0x67c: 0xfb, 0x67d: 0xfb, 0x67e: 0xfb, 0x67f: 0xfb, + 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x18b, 0x65c: 0x9c, 0x65d: 0x18c, 0x65e: 0x9c, 0x65f: 0x18d, + 0x660: 0x18e, 0x661: 0x18f, 0x662: 0x190, 0x663: 0xfd, 0x664: 0x9c, 0x665: 0x191, 0x666: 0x9c, 0x667: 0x192, + 0x668: 0x9c, 0x669: 0x193, 0x66a: 0x194, 0x66b: 0x195, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x196, 0x66f: 0x197, + 0x670: 0xfd, 0x671: 0xfd, 0x672: 0xfd, 0x673: 0xfd, 0x674: 0xfd, 0x675: 0xfd, 0x676: 0xfd, 0x677: 0xfd, + 0x678: 0xfd, 0x679: 0xfd, 0x67a: 0xfd, 0x67b: 0xfd, 0x67c: 0xfd, 0x67d: 0xfd, 0x67e: 0xfd, 0x67f: 0xfd, // Block 0x1a, offset 0x680 0x680: 0xa0, 0x681: 0xa0, 0x682: 0xa0, 0x683: 0xa0, 0x684: 0xa0, 0x685: 0xa0, 0x686: 0xa0, 0x687: 0xa0, 0x688: 0xa0, 0x689: 0xa0, 0x68a: 0xa0, 0x68b: 0xa0, 0x68c: 0xa0, 0x68d: 0xa0, 0x68e: 0xa0, 0x68f: 0xa0, 0x690: 0xa0, 0x691: 0xa0, 0x692: 0xa0, 0x693: 0xa0, 0x694: 0xa0, 0x695: 0xa0, 0x696: 0xa0, 0x697: 0xa0, - 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x196, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, + 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x198, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, 0x6a0: 0xa0, 0x6a1: 0xa0, 0x6a2: 0xa0, 0x6a3: 0xa0, 0x6a4: 0xa0, 0x6a5: 0xa0, 0x6a6: 0xa0, 0x6a7: 0xa0, 0x6a8: 0xa0, 0x6a9: 0xa0, 0x6aa: 0xa0, 0x6ab: 0xa0, 0x6ac: 0xa0, 0x6ad: 0xa0, 0x6ae: 0xa0, 0x6af: 0xa0, 0x6b0: 0xa0, 0x6b1: 0xa0, 0x6b2: 0xa0, 0x6b3: 0xa0, 0x6b4: 0xa0, 0x6b5: 0xa0, 0x6b6: 0xa0, 0x6b7: 0xa0, @@ -2312,8 +2455,8 @@ 0x6c0: 0xa0, 0x6c1: 0xa0, 0x6c2: 0xa0, 0x6c3: 0xa0, 0x6c4: 0xa0, 0x6c5: 0xa0, 0x6c6: 0xa0, 0x6c7: 0xa0, 0x6c8: 0xa0, 0x6c9: 0xa0, 0x6ca: 0xa0, 0x6cb: 0xa0, 0x6cc: 0xa0, 0x6cd: 0xa0, 0x6ce: 0xa0, 0x6cf: 0xa0, 0x6d0: 0xa0, 0x6d1: 0xa0, 0x6d2: 0xa0, 0x6d3: 0xa0, 0x6d4: 0xa0, 0x6d5: 0xa0, 0x6d6: 0xa0, 0x6d7: 0xa0, - 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x197, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, - 0x6e0: 0x198, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, + 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x199, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, + 0x6e0: 0x19a, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, 0x6e8: 0xa0, 0x6e9: 0xa0, 0x6ea: 0xa0, 0x6eb: 0xa0, 0x6ec: 0xa0, 0x6ed: 0xa0, 0x6ee: 0xa0, 0x6ef: 0xa0, 0x6f0: 0xa0, 0x6f1: 0xa0, 0x6f2: 0xa0, 0x6f3: 0xa0, 0x6f4: 0xa0, 0x6f5: 0xa0, 0x6f6: 0xa0, 0x6f7: 0xa0, 0x6f8: 0xa0, 0x6f9: 0xa0, 0x6fa: 0xa0, 0x6fb: 0xa0, 0x6fc: 0xa0, 0x6fd: 0xa0, 0x6fe: 0xa0, 0x6ff: 0xa0, @@ -2325,34 +2468,34 @@ 0x720: 0xa0, 0x721: 0xa0, 0x722: 0xa0, 0x723: 0xa0, 0x724: 0xa0, 0x725: 0xa0, 0x726: 0xa0, 0x727: 0xa0, 0x728: 0xa0, 0x729: 0xa0, 0x72a: 0xa0, 0x72b: 0xa0, 0x72c: 0xa0, 0x72d: 0xa0, 0x72e: 0xa0, 0x72f: 0xa0, 0x730: 0xa0, 0x731: 0xa0, 0x732: 0xa0, 0x733: 0xa0, 0x734: 0xa0, 0x735: 0xa0, 0x736: 0xa0, 0x737: 0xa0, - 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x199, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, + 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x19b, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, // Block 0x1d, offset 0x740 0x740: 0xa0, 0x741: 0xa0, 0x742: 0xa0, 0x743: 0xa0, 0x744: 0xa0, 0x745: 0xa0, 0x746: 0xa0, 0x747: 0xa0, 0x748: 0xa0, 0x749: 0xa0, 0x74a: 0xa0, 0x74b: 0xa0, 0x74c: 0xa0, 0x74d: 0xa0, 0x74e: 0xa0, 0x74f: 0xa0, 0x750: 0xa0, 0x751: 0xa0, 0x752: 0xa0, 0x753: 0xa0, 0x754: 0xa0, 0x755: 0xa0, 0x756: 0xa0, 0x757: 0xa0, 0x758: 0xa0, 0x759: 0xa0, 0x75a: 0xa0, 0x75b: 0xa0, 0x75c: 0xa0, 0x75d: 0xa0, 0x75e: 0xa0, 0x75f: 0xa0, 0x760: 0xa0, 0x761: 0xa0, 0x762: 0xa0, 0x763: 0xa0, 0x764: 0xa0, 0x765: 0xa0, 0x766: 0xa0, 0x767: 0xa0, - 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19a, - 0x770: 0xfb, 0x771: 0xfb, 0x772: 0xfb, 0x773: 0xfb, 0x774: 0xfb, 0x775: 0xfb, 0x776: 0xfb, 0x777: 0xfb, - 0x778: 0xfb, 0x779: 0xfb, 0x77a: 0xfb, 0x77b: 0xfb, 0x77c: 0xfb, 0x77d: 0xfb, 0x77e: 0xfb, 0x77f: 0xfb, + 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19c, + 0x770: 0xfd, 0x771: 0xfd, 0x772: 0xfd, 0x773: 0xfd, 0x774: 0xfd, 0x775: 0xfd, 0x776: 0xfd, 0x777: 0xfd, + 0x778: 0xfd, 0x779: 0xfd, 0x77a: 0xfd, 0x77b: 0xfd, 0x77c: 0xfd, 0x77d: 0xfd, 0x77e: 0xfd, 0x77f: 0xfd, // Block 0x1e, offset 0x780 - 0x780: 0xfb, 0x781: 0xfb, 0x782: 0xfb, 0x783: 0xfb, 0x784: 0xfb, 0x785: 0xfb, 0x786: 0xfb, 0x787: 0xfb, - 0x788: 0xfb, 0x789: 0xfb, 0x78a: 0xfb, 0x78b: 0xfb, 0x78c: 0xfb, 0x78d: 0xfb, 0x78e: 0xfb, 0x78f: 0xfb, - 0x790: 0xfb, 0x791: 0xfb, 0x792: 0xfb, 0x793: 0xfb, 0x794: 0xfb, 0x795: 0xfb, 0x796: 0xfb, 0x797: 0xfb, - 0x798: 0xfb, 0x799: 0xfb, 0x79a: 0xfb, 0x79b: 0xfb, 0x79c: 0xfb, 0x79d: 0xfb, 0x79e: 0xfb, 0x79f: 0xfb, - 0x7a0: 0x77, 0x7a1: 0x78, 0x7a2: 0x79, 0x7a3: 0x19b, 0x7a4: 0x7a, 0x7a5: 0x7b, 0x7a6: 0x19c, 0x7a7: 0x7c, - 0x7a8: 0x7d, 0x7a9: 0xfb, 0x7aa: 0xfb, 0x7ab: 0xfb, 0x7ac: 0xfb, 0x7ad: 0xfb, 0x7ae: 0xfb, 0x7af: 0xfb, - 0x7b0: 0xfb, 0x7b1: 0xfb, 0x7b2: 0xfb, 0x7b3: 0xfb, 0x7b4: 0xfb, 0x7b5: 0xfb, 0x7b6: 0xfb, 0x7b7: 0xfb, - 0x7b8: 0xfb, 0x7b9: 0xfb, 0x7ba: 0xfb, 0x7bb: 0xfb, 0x7bc: 0xfb, 0x7bd: 0xfb, 0x7be: 0xfb, 0x7bf: 0xfb, + 0x780: 0xfd, 0x781: 0xfd, 0x782: 0xfd, 0x783: 0xfd, 0x784: 0xfd, 0x785: 0xfd, 0x786: 0xfd, 0x787: 0xfd, + 0x788: 0xfd, 0x789: 0xfd, 0x78a: 0xfd, 0x78b: 0xfd, 0x78c: 0xfd, 0x78d: 0xfd, 0x78e: 0xfd, 0x78f: 0xfd, + 0x790: 0xfd, 0x791: 0xfd, 0x792: 0xfd, 0x793: 0xfd, 0x794: 0xfd, 0x795: 0xfd, 0x796: 0xfd, 0x797: 0xfd, + 0x798: 0xfd, 0x799: 0xfd, 0x79a: 0xfd, 0x79b: 0xfd, 0x79c: 0xfd, 0x79d: 0xfd, 0x79e: 0xfd, 0x79f: 0xfd, + 0x7a0: 0x75, 0x7a1: 0x76, 0x7a2: 0x77, 0x7a3: 0x78, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x7b, 0x7a7: 0x7c, + 0x7a8: 0x7d, 0x7a9: 0xfd, 0x7aa: 0xfd, 0x7ab: 0xfd, 0x7ac: 0xfd, 0x7ad: 0xfd, 0x7ae: 0xfd, 0x7af: 0xfd, + 0x7b0: 0xfd, 0x7b1: 0xfd, 0x7b2: 0xfd, 0x7b3: 0xfd, 0x7b4: 0xfd, 0x7b5: 0xfd, 0x7b6: 0xfd, 0x7b7: 0xfd, + 0x7b8: 0xfd, 0x7b9: 0xfd, 0x7ba: 0xfd, 0x7bb: 0xfd, 0x7bc: 0xfd, 0x7bd: 0xfd, 0x7be: 0xfd, 0x7bf: 0xfd, // Block 0x1f, offset 0x7c0 0x7c0: 0xa0, 0x7c1: 0xa0, 0x7c2: 0xa0, 0x7c3: 0xa0, 0x7c4: 0xa0, 0x7c5: 0xa0, 0x7c6: 0xa0, 0x7c7: 0xa0, - 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfb, 0x7cf: 0xfb, - 0x7d0: 0xfb, 0x7d1: 0xfb, 0x7d2: 0xfb, 0x7d3: 0xfb, 0x7d4: 0xfb, 0x7d5: 0xfb, 0x7d6: 0xfb, 0x7d7: 0xfb, - 0x7d8: 0xfb, 0x7d9: 0xfb, 0x7da: 0xfb, 0x7db: 0xfb, 0x7dc: 0xfb, 0x7dd: 0xfb, 0x7de: 0xfb, 0x7df: 0xfb, - 0x7e0: 0xfb, 0x7e1: 0xfb, 0x7e2: 0xfb, 0x7e3: 0xfb, 0x7e4: 0xfb, 0x7e5: 0xfb, 0x7e6: 0xfb, 0x7e7: 0xfb, - 0x7e8: 0xfb, 0x7e9: 0xfb, 0x7ea: 0xfb, 0x7eb: 0xfb, 0x7ec: 0xfb, 0x7ed: 0xfb, 0x7ee: 0xfb, 0x7ef: 0xfb, - 0x7f0: 0xfb, 0x7f1: 0xfb, 0x7f2: 0xfb, 0x7f3: 0xfb, 0x7f4: 0xfb, 0x7f5: 0xfb, 0x7f6: 0xfb, 0x7f7: 0xfb, - 0x7f8: 0xfb, 0x7f9: 0xfb, 0x7fa: 0xfb, 0x7fb: 0xfb, 0x7fc: 0xfb, 0x7fd: 0xfb, 0x7fe: 0xfb, 0x7ff: 0xfb, + 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfd, 0x7cf: 0xfd, + 0x7d0: 0xfd, 0x7d1: 0xfd, 0x7d2: 0xfd, 0x7d3: 0xfd, 0x7d4: 0xfd, 0x7d5: 0xfd, 0x7d6: 0xfd, 0x7d7: 0xfd, + 0x7d8: 0xfd, 0x7d9: 0xfd, 0x7da: 0xfd, 0x7db: 0xfd, 0x7dc: 0xfd, 0x7dd: 0xfd, 0x7de: 0xfd, 0x7df: 0xfd, + 0x7e0: 0xfd, 0x7e1: 0xfd, 0x7e2: 0xfd, 0x7e3: 0xfd, 0x7e4: 0xfd, 0x7e5: 0xfd, 0x7e6: 0xfd, 0x7e7: 0xfd, + 0x7e8: 0xfd, 0x7e9: 0xfd, 0x7ea: 0xfd, 0x7eb: 0xfd, 0x7ec: 0xfd, 0x7ed: 0xfd, 0x7ee: 0xfd, 0x7ef: 0xfd, + 0x7f0: 0xfd, 0x7f1: 0xfd, 0x7f2: 0xfd, 0x7f3: 0xfd, 0x7f4: 0xfd, 0x7f5: 0xfd, 0x7f6: 0xfd, 0x7f7: 0xfd, + 0x7f8: 0xfd, 0x7f9: 0xfd, 0x7fa: 0xfd, 0x7fb: 0xfd, 0x7fc: 0xfd, 0x7fd: 0xfd, 0x7fe: 0xfd, 0x7ff: 0xfd, // Block 0x20, offset 0x800 0x810: 0x0d, 0x811: 0x0e, 0x812: 0x0f, 0x813: 0x10, 0x814: 0x11, 0x815: 0x0b, 0x816: 0x12, 0x817: 0x07, 0x818: 0x13, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x14, 0x81c: 0x0b, 0x81d: 0x15, 0x81e: 0x16, 0x81f: 0x17, @@ -2370,14 +2513,14 @@ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, // Block 0x22, offset 0x880 - 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfb, 0x883: 0xfb, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, - 0x888: 0xfb, 0x889: 0xfb, 0x88a: 0xfb, 0x88b: 0xfb, 0x88c: 0xfb, 0x88d: 0xfb, 0x88e: 0xfb, 0x88f: 0xfb, - 0x890: 0xfb, 0x891: 0xfb, 0x892: 0xfb, 0x893: 0xfb, 0x894: 0xfb, 0x895: 0xfb, 0x896: 0xfb, 0x897: 0xfb, - 0x898: 0xfb, 0x899: 0xfb, 0x89a: 0xfb, 0x89b: 0xfb, 0x89c: 0xfb, 0x89d: 0xfb, 0x89e: 0xfb, 0x89f: 0xfb, - 0x8a0: 0xfb, 0x8a1: 0xfb, 0x8a2: 0xfb, 0x8a3: 0xfb, 0x8a4: 0xfb, 0x8a5: 0xfb, 0x8a6: 0xfb, 0x8a7: 0xfb, - 0x8a8: 0xfb, 0x8a9: 0xfb, 0x8aa: 0xfb, 0x8ab: 0xfb, 0x8ac: 0xfb, 0x8ad: 0xfb, 0x8ae: 0xfb, 0x8af: 0xfb, - 0x8b0: 0xfb, 0x8b1: 0xfb, 0x8b2: 0xfb, 0x8b3: 0xfb, 0x8b4: 0xfb, 0x8b5: 0xfb, 0x8b6: 0xfb, 0x8b7: 0xfb, - 0x8b8: 0xfb, 0x8b9: 0xfb, 0x8ba: 0xfb, 0x8bb: 0xfb, 0x8bc: 0xfb, 0x8bd: 0xfb, 0x8be: 0xfb, 0x8bf: 0xfb, + 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfd, 0x883: 0xfd, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, + 0x888: 0xfd, 0x889: 0xfd, 0x88a: 0xfd, 0x88b: 0xfd, 0x88c: 0xfd, 0x88d: 0xfd, 0x88e: 0xfd, 0x88f: 0xfd, + 0x890: 0xfd, 0x891: 0xfd, 0x892: 0xfd, 0x893: 0xfd, 0x894: 0xfd, 0x895: 0xfd, 0x896: 0xfd, 0x897: 0xfd, + 0x898: 0xfd, 0x899: 0xfd, 0x89a: 0xfd, 0x89b: 0xfd, 0x89c: 0xfd, 0x89d: 0xfd, 0x89e: 0xfd, 0x89f: 0xfd, + 0x8a0: 0xfd, 0x8a1: 0xfd, 0x8a2: 0xfd, 0x8a3: 0xfd, 0x8a4: 0xfd, 0x8a5: 0xfd, 0x8a6: 0xfd, 0x8a7: 0xfd, + 0x8a8: 0xfd, 0x8a9: 0xfd, 0x8aa: 0xfd, 0x8ab: 0xfd, 0x8ac: 0xfd, 0x8ad: 0xfd, 0x8ae: 0xfd, 0x8af: 0xfd, + 0x8b0: 0xfd, 0x8b1: 0xfd, 0x8b2: 0xfd, 0x8b3: 0xfd, 0x8b4: 0xfd, 0x8b5: 0xfd, 0x8b6: 0xfd, 0x8b7: 0xfd, + 0x8b8: 0xfd, 0x8b9: 0xfd, 0x8ba: 0xfd, 0x8bb: 0xfd, 0x8bc: 0xfd, 0x8bd: 0xfd, 0x8be: 0xfd, 0x8bf: 0xfd, // Block 0x23, offset 0x8c0 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, @@ -2393,10 +2536,10 @@ } // idnaSparseOffset: 292 entries, 584 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x330, 0x333, 0x337, 0x33c, 0x341, 0x347, 0x358, 0x368, 0x36e, 0x372, 0x381, 0x386, 0x38e, 0x398, 0x3a3, 0x3ab, 0x3bc, 0x3c5, 0x3d5, 0x3e2, 0x3ee, 0x3f3, 0x400, 0x404, 0x409, 0x40b, 0x40d, 0x411, 0x413, 0x417, 0x420, 0x426, 0x42a, 0x43a, 0x444, 0x449, 0x44c, 0x452, 0x459, 0x45e, 0x462, 0x468, 0x46d, 0x476, 0x47b, 0x481, 0x488, 0x48f, 0x496, 0x49a, 0x49f, 0x4a2, 0x4a7, 0x4b3, 0x4b9, 0x4be, 0x4c5, 0x4cd, 0x4d2, 0x4d6, 0x4e6, 0x4ed, 0x4f1, 0x4f5, 0x4fc, 0x4fe, 0x501, 0x504, 0x508, 0x511, 0x515, 0x51d, 0x525, 0x52d, 0x539, 0x545, 0x54b, 0x554, 0x560, 0x567, 0x570, 0x57b, 0x582, 0x591, 0x59e, 0x5ab, 0x5b4, 0x5b8, 0x5c7, 0x5cf, 0x5da, 0x5e3, 0x5e9, 0x5f1, 0x5fa, 0x605, 0x608, 0x614, 0x61d, 0x620, 0x625, 0x62e, 0x633, 0x640, 0x64b, 0x654, 0x65e, 0x661, 0x66b, 0x674, 0x680, 0x68d, 0x69a, 0x6a8, 0x6af, 0x6b3, 0x6b7, 0x6ba, 0x6bf, 0x6c2, 0x6c7, 0x6ca, 0x6d1, 0x6d8, 0x6dc, 0x6e7, 0x6ea, 0x6ed, 0x6f0, 0x6f6, 0x6fc, 0x705, 0x708, 0x70b, 0x70e, 0x711, 0x718, 0x71b, 0x720, 0x72a, 0x72d, 0x731, 0x740, 0x74c, 0x750, 0x755, 0x759, 0x75e, 0x762, 0x767, 0x770, 0x77b, 0x781, 0x787, 0x78d, 0x793, 0x79c, 0x79f, 0x7a2, 0x7a6, 0x7aa, 0x7ae, 0x7b4, 0x7ba, 0x7bf, 0x7c2, 0x7d2, 0x7d9, 0x7dc, 0x7e1, 0x7e5, 0x7eb, 0x7f2, 0x7f6, 0x7fa, 0x803, 0x80a, 0x80f, 0x813, 0x821, 0x824, 0x827, 0x82b, 0x82f, 0x832, 0x842, 0x853, 0x856, 0x85b, 0x85d, 0x85f} +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x32f, 0x331, 0x33a, 0x33d, 0x341, 0x346, 0x34b, 0x351, 0x362, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f8, 0x3fd, 0x40a, 0x40e, 0x413, 0x415, 0x417, 0x41b, 0x41d, 0x421, 0x42a, 0x430, 0x434, 0x444, 0x44e, 0x453, 0x456, 0x45c, 0x463, 0x468, 0x46c, 0x472, 0x477, 0x480, 0x485, 0x48b, 0x492, 0x499, 0x4a0, 0x4a4, 0x4a9, 0x4ac, 0x4b1, 0x4bd, 0x4c3, 0x4c8, 0x4cf, 0x4d7, 0x4dc, 0x4e0, 0x4f0, 0x4f7, 0x4fb, 0x4ff, 0x506, 0x508, 0x50b, 0x50e, 0x512, 0x51b, 0x51f, 0x527, 0x52f, 0x537, 0x543, 0x54f, 0x555, 0x55e, 0x56a, 0x571, 0x57a, 0x585, 0x58c, 0x59b, 0x5a8, 0x5b5, 0x5be, 0x5c2, 0x5d1, 0x5d9, 0x5e4, 0x5ed, 0x5f3, 0x5fb, 0x604, 0x60f, 0x612, 0x61e, 0x627, 0x62a, 0x62f, 0x638, 0x63d, 0x64a, 0x655, 0x65e, 0x668, 0x66b, 0x675, 0x67e, 0x68a, 0x697, 0x6a4, 0x6b2, 0x6b9, 0x6bd, 0x6c1, 0x6c4, 0x6c9, 0x6cc, 0x6d1, 0x6d4, 0x6db, 0x6e2, 0x6e6, 0x6f1, 0x6f4, 0x6f7, 0x6fa, 0x700, 0x706, 0x70f, 0x712, 0x715, 0x718, 0x71b, 0x722, 0x725, 0x72a, 0x734, 0x737, 0x73b, 0x74a, 0x756, 0x75a, 0x75f, 0x763, 0x768, 0x76c, 0x771, 0x77a, 0x785, 0x78b, 0x791, 0x797, 0x79d, 0x7a6, 0x7a9, 0x7ac, 0x7b0, 0x7b4, 0x7b8, 0x7be, 0x7c4, 0x7c9, 0x7cc, 0x7dc, 0x7e3, 0x7e6, 0x7eb, 0x7ef, 0x7f5, 0x7fc, 0x800, 0x804, 0x80d, 0x814, 0x819, 0x81d, 0x82b, 0x82e, 0x831, 0x835, 0x839, 0x83c, 0x83f, 0x844, 0x846, 0x848} -// idnaSparseValues: 2146 entries, 8584 bytes -var idnaSparseValues = [2146]valueRange{ +// idnaSparseValues: 2123 entries, 8492 bytes +var idnaSparseValues = [2123]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, @@ -2427,15 +2570,15 @@ // Block 0x2, offset 0x19 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x00a9, lo: 0xb0, hi: 0xb0}, {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x00b1, lo: 0xb2, hi: 0xb2}, + {value: 0x00b9, lo: 0xb3, hi: 0xb3}, {value: 0x034d, lo: 0xb4, hi: 0xb4}, {value: 0x0395, lo: 0xb5, hi: 0xb5}, {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x00c1, lo: 0xb7, hi: 0xb7}, + {value: 0x00c9, lo: 0xb8, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, @@ -2457,7 +2600,7 @@ // Block 0x6, offset 0x33 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0131, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, {value: 0x0018, lo: 0x89, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, @@ -2643,7 +2786,7 @@ {value: 0x0008, lo: 0x81, hi: 0xb0}, {value: 0x3308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x01f1, lo: 0xb3, hi: 0xb3}, {value: 0x3308, lo: 0xb4, hi: 0xb9}, {value: 0x3b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, @@ -2666,8 +2809,8 @@ {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0201, lo: 0x9c, hi: 0x9c}, + {value: 0x0209, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0x18, offset 0xf9 @@ -3075,13 +3218,13 @@ {value: 0x0018, lo: 0xbe, hi: 0xbf}, // Block 0x44, offset 0x260 {value: 0x0000, lo: 0x0c}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x02a9, lo: 0x80, hi: 0x80}, + {value: 0x02b1, lo: 0x81, hi: 0x81}, + {value: 0x02b9, lo: 0x82, hi: 0x82}, + {value: 0x02c1, lo: 0x83, hi: 0x83}, + {value: 0x02c9, lo: 0x84, hi: 0x85}, + {value: 0x02d1, lo: 0x86, hi: 0x86}, + {value: 0x02d9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x059d, lo: 0x90, hi: 0xba}, @@ -3133,18 +3276,18 @@ {value: 0x0040, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0851, lo: 0x89, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, // Block 0x4a, offset 0x29a {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0859, lo: 0xac, hi: 0xac}, + {value: 0x0861, lo: 0xad, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0869, lo: 0xaf, hi: 0xaf}, + {value: 0x0871, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, // Block 0x4b, offset 0x2a2 {value: 0x0000, lo: 0x05}, @@ -3166,19 +3309,19 @@ // Block 0x4e, offset 0x2b0 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0929, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, // Block 0x4f, offset 0x2b4 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0932, lo: 0xb5, hi: 0xb5}, {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0x50, offset 0x2ba {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0939, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, // Block 0x51, offset 0x2be {value: 0x0000, lo: 0x03}, @@ -3277,16 +3420,16 @@ {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x096a, lo: 0x9b, hi: 0x9b}, + {value: 0x0972, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0979, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, // Block 0x61, offset 0x315 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + {value: 0x0981, lo: 0xbf, hi: 0xbf}, // Block 0x62, offset 0x318 {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, @@ -3309,46 +3452,58 @@ {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x64, offset 0x32b - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x03}, + {value: 0x098a, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x65, offset 0x330 + {value: 0x0a82, lo: 0xa0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0008, lo: 0x01}, + {value: 0x0d19, lo: 0x80, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0008, lo: 0x08}, + {value: 0x0f19, lo: 0x80, hi: 0xb0}, + {value: 0x4045, lo: 0xb1, hi: 0xb1}, + {value: 0x10a1, lo: 0xb2, hi: 0xb3}, + {value: 0x4065, lo: 0xb4, hi: 0xb4}, + {value: 0x10b1, lo: 0xb5, hi: 0xb7}, + {value: 0x4085, lo: 0xb8, hi: 0xb8}, + {value: 0x4085, lo: 0xb9, hi: 0xb9}, + {value: 0x10c9, lo: 0xba, hi: 0xbf}, + // Block 0x67, offset 0x33a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x66, offset 0x333 + // Block 0x68, offset 0x33d {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x67, offset 0x337 + // Block 0x69, offset 0x341 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x68, offset 0x33c + // Block 0x6a, offset 0x346 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x69, offset 0x341 + // Block 0x6b, offset 0x34b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6a, offset 0x347 + // Block 0x6c, offset 0x351 {value: 0x0000, lo: 0x10}, {value: 0x0040, lo: 0x80, hi: 0x81}, {value: 0xe00d, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x83}, {value: 0x03f5, lo: 0x84, hi: 0x84}, - {value: 0x1329, lo: 0x85, hi: 0x85}, + {value: 0x0479, lo: 0x85, hi: 0x85}, {value: 0x447d, lo: 0x86, hi: 0x86}, {value: 0xe07d, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, @@ -3357,10 +3512,10 @@ {value: 0x0040, lo: 0x8b, hi: 0xb4}, {value: 0xe01d, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6ec1, lo: 0xb9, hi: 0xb9}, + {value: 0x0741, lo: 0xb8, hi: 0xb8}, + {value: 0x13f1, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6b, offset 0x358 + // Block 0x6d, offset 0x362 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x81}, {value: 0x3308, lo: 0x82, hi: 0x82}, @@ -3377,19 +3532,19 @@ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6c, offset 0x368 + // Block 0x6e, offset 0x372 {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6d, offset 0x36e + // Block 0x6f, offset 0x378 {value: 0x0000, lo: 0x03}, {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x6e, offset 0x372 + // Block 0x70, offset 0x37c {value: 0x0000, lo: 0x0e}, {value: 0x3008, lo: 0x80, hi: 0x83}, {value: 0x3b08, lo: 0x84, hi: 0x84}, @@ -3405,13 +3560,13 @@ {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x6f, offset 0x381 + // Block 0x71, offset 0x38b {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x3308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x70, offset 0x386 + // Block 0x72, offset 0x390 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x3308, lo: 0x87, hi: 0x91}, @@ -3420,7 +3575,7 @@ {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x71, offset 0x38e + // Block 0x73, offset 0x398 {value: 0x0000, lo: 0x09}, {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x3008, lo: 0x83, hi: 0x83}, @@ -3431,7 +3586,7 @@ {value: 0x3008, lo: 0xba, hi: 0xbb}, {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x72, offset 0x398 + // Block 0x74, offset 0x3a2 {value: 0x0000, lo: 0x0a}, {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, @@ -3443,7 +3598,7 @@ {value: 0x3308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x73, offset 0x3a3 + // Block 0x75, offset 0x3ad {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, {value: 0x3308, lo: 0xa9, hi: 0xae}, @@ -3452,7 +3607,7 @@ {value: 0x3008, lo: 0xb3, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x74, offset 0x3ab + // Block 0x76, offset 0x3b5 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, {value: 0x3308, lo: 0x83, hi: 0x83}, @@ -3470,7 +3625,7 @@ {value: 0x3308, lo: 0xbc, hi: 0xbc}, {value: 0x3008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x75, offset 0x3bc + // Block 0x77, offset 0x3c6 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb0}, @@ -3480,7 +3635,7 @@ {value: 0x3308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x76, offset 0x3c5 + // Block 0x78, offset 0x3cf {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x81}, @@ -3497,7 +3652,7 @@ {value: 0x3008, lo: 0xb5, hi: 0xb5}, {value: 0x3b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x77, offset 0x3d5 + // Block 0x79, offset 0x3df {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, @@ -3511,26 +3666,26 @@ {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x78, offset 0x3e2 + // Block 0x7a, offset 0x3ec {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, {value: 0x449d, lo: 0x9c, hi: 0x9c}, {value: 0x44b5, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0x0941, lo: 0x9e, hi: 0x9e}, {value: 0xe06d, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa8}, - {value: 0x6ed9, lo: 0xa9, hi: 0xa9}, + {value: 0x13f9, lo: 0xa9, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x44cd, lo: 0xb0, hi: 0xbf}, - // Block 0x79, offset 0x3ee + // Block 0x7b, offset 0x3f8 {value: 0x0000, lo: 0x04}, {value: 0x44ed, lo: 0x80, hi: 0x8f}, {value: 0x450d, lo: 0x90, hi: 0x9f}, {value: 0x452d, lo: 0xa0, hi: 0xaf}, {value: 0x450d, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f3 + // Block 0x7c, offset 0x3fd {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, {value: 0x3008, lo: 0xa3, hi: 0xa4}, @@ -3544,76 +3699,76 @@ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7b, offset 0x400 + // Block 0x7d, offset 0x40a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 + // Block 0x7e, offset 0x40e {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7d, offset 0x409 + // Block 0x7f, offset 0x413 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x7e, offset 0x40b + // Block 0x80, offset 0x415 {value: 0x0020, lo: 0x01}, {value: 0x454d, lo: 0x80, hi: 0xbf}, - // Block 0x7f, offset 0x40d + // Block 0x81, offset 0x417 {value: 0x0020, lo: 0x03}, {value: 0x4d4d, lo: 0x80, hi: 0x94}, {value: 0x4b0d, lo: 0x95, hi: 0x95}, {value: 0x4fed, lo: 0x96, hi: 0xbf}, - // Block 0x80, offset 0x411 + // Block 0x82, offset 0x41b {value: 0x0020, lo: 0x01}, {value: 0x552d, lo: 0x80, hi: 0xbf}, - // Block 0x81, offset 0x413 + // Block 0x83, offset 0x41d {value: 0x0020, lo: 0x03}, {value: 0x5d2d, lo: 0x80, hi: 0x84}, {value: 0x568d, lo: 0x85, hi: 0x85}, {value: 0x5dcd, lo: 0x86, hi: 0xbf}, - // Block 0x82, offset 0x417 + // Block 0x84, offset 0x421 {value: 0x0020, lo: 0x08}, {value: 0x6b8d, lo: 0x80, hi: 0x8f}, {value: 0x6d4d, lo: 0x90, hi: 0x90}, {value: 0x6d8d, lo: 0x91, hi: 0xab}, - {value: 0x6ef1, lo: 0xac, hi: 0xac}, + {value: 0x1401, lo: 0xac, hi: 0xac}, {value: 0x70ed, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x710d, lo: 0xb0, hi: 0xbf}, - // Block 0x83, offset 0x420 + // Block 0x85, offset 0x42a {value: 0x0020, lo: 0x05}, {value: 0x730d, lo: 0x80, hi: 0xad}, {value: 0x656d, lo: 0xae, hi: 0xae}, {value: 0x78cd, lo: 0xaf, hi: 0xb5}, {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, {value: 0x79ad, lo: 0xb7, hi: 0xbf}, - // Block 0x84, offset 0x426 - {value: 0x0028, lo: 0x03}, - {value: 0x7c71, lo: 0x80, hi: 0x82}, - {value: 0x7c31, lo: 0x83, hi: 0x83}, - {value: 0x7ce9, lo: 0x84, hi: 0xbf}, - // Block 0x85, offset 0x42a - {value: 0x0038, lo: 0x0f}, - {value: 0x9e01, lo: 0x80, hi: 0x83}, - {value: 0x9ea9, lo: 0x84, hi: 0x85}, - {value: 0x9ee1, lo: 0x86, hi: 0x87}, - {value: 0x9f19, lo: 0x88, hi: 0x8f}, + // Block 0x86, offset 0x430 + {value: 0x0008, lo: 0x03}, + {value: 0x1751, lo: 0x80, hi: 0x82}, + {value: 0x1741, lo: 0x83, hi: 0x83}, + {value: 0x1769, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x434 + {value: 0x0008, lo: 0x0f}, + {value: 0x1d81, lo: 0x80, hi: 0x83}, + {value: 0x1d99, lo: 0x84, hi: 0x85}, + {value: 0x1da1, lo: 0x86, hi: 0x87}, + {value: 0x1da9, lo: 0x88, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa0d9, lo: 0x92, hi: 0x97}, - {value: 0xa1f1, lo: 0x98, hi: 0x9c}, - {value: 0xa2d1, lo: 0x9d, hi: 0xb3}, - {value: 0x9d91, lo: 0xb4, hi: 0xb4}, - {value: 0x9e01, lo: 0xb5, hi: 0xb5}, - {value: 0xa7d9, lo: 0xb6, hi: 0xbb}, - {value: 0xa8b9, lo: 0xbc, hi: 0xbc}, - {value: 0xa849, lo: 0xbd, hi: 0xbd}, - {value: 0xa929, lo: 0xbe, hi: 0xbf}, - // Block 0x86, offset 0x43a + {value: 0x1de9, lo: 0x92, hi: 0x97}, + {value: 0x1e11, lo: 0x98, hi: 0x9c}, + {value: 0x1e31, lo: 0x9d, hi: 0xb3}, + {value: 0x1d71, lo: 0xb4, hi: 0xb4}, + {value: 0x1d81, lo: 0xb5, hi: 0xb5}, + {value: 0x1ee9, lo: 0xb6, hi: 0xbb}, + {value: 0x1f09, lo: 0xbc, hi: 0xbc}, + {value: 0x1ef9, lo: 0xbd, hi: 0xbd}, + {value: 0x1f19, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x444 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, @@ -3624,24 +3779,24 @@ {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x87, offset 0x444 + // Block 0x89, offset 0x44e {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x88, offset 0x449 + // Block 0x8a, offset 0x453 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x89, offset 0x44c + // Block 0x8b, offset 0x456 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8a, offset 0x452 + // Block 0x8c, offset 0x45c {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, @@ -3649,31 +3804,31 @@ {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8b, offset 0x459 + // Block 0x8d, offset 0x463 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8c, offset 0x45e + // Block 0x8e, offset 0x468 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8d, offset 0x462 + // Block 0x8f, offset 0x46c {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, {value: 0x3308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8e, offset 0x468 + // Block 0x90, offset 0x472 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xac}, {value: 0x0008, lo: 0xad, hi: 0xbf}, - // Block 0x8f, offset 0x46d + // Block 0x91, offset 0x477 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, @@ -3683,20 +3838,20 @@ {value: 0x0008, lo: 0x90, hi: 0xb5}, {value: 0x3308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x90, offset 0x476 + // Block 0x92, offset 0x480 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x91, offset 0x47b + // Block 0x93, offset 0x485 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x92, offset 0x481 + // Block 0x94, offset 0x48b {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3704,7 +3859,7 @@ {value: 0x8b0d, lo: 0x98, hi: 0x9f}, {value: 0x8b25, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x93, offset 0x488 + // Block 0x95, offset 0x492 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, @@ -3712,7 +3867,7 @@ {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8b25, lo: 0xb0, hi: 0xb7}, {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, - // Block 0x94, offset 0x48f + // Block 0x96, offset 0x499 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3720,28 +3875,28 @@ {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x95, offset 0x496 + // Block 0x97, offset 0x4a0 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x96, offset 0x49a + // Block 0x98, offset 0x4a4 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x49f + // Block 0x99, offset 0x4a9 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x98, offset 0x4a2 + // Block 0x9a, offset 0x4ac {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x99, offset 0x4a7 + // Block 0x9b, offset 0x4b1 {value: 0x0000, lo: 0x0b}, {value: 0x0808, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, @@ -3754,20 +3909,20 @@ {value: 0x0808, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x9a, offset 0x4b3 + // Block 0x9c, offset 0x4bd {value: 0x0000, lo: 0x05}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, {value: 0x0818, lo: 0x97, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb6}, {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9b, offset 0x4b9 + // Block 0x9d, offset 0x4c3 {value: 0x0000, lo: 0x04}, {value: 0x0808, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, {value: 0x0818, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9c, offset 0x4be + // Block 0x9e, offset 0x4c8 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb2}, @@ -3775,7 +3930,7 @@ {value: 0x0808, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9d, offset 0x4c5 + // Block 0x9f, offset 0x4cf {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0818, lo: 0x96, hi: 0x9b}, @@ -3784,18 +3939,18 @@ {value: 0x0808, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0x9e, offset 0x4cd + // Block 0xa0, offset 0x4d7 {value: 0x0000, lo: 0x04}, {value: 0x0808, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, {value: 0x0818, lo: 0xbc, hi: 0xbd}, {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0x9f, offset 0x4d2 + // Block 0xa1, offset 0x4dc {value: 0x0000, lo: 0x03}, {value: 0x0818, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0xa0, offset 0x4d6 + // Block 0xa2, offset 0x4e0 {value: 0x0000, lo: 0x0f}, {value: 0x0808, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x83}, @@ -3812,7 +3967,7 @@ {value: 0x3308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa1, offset 0x4e6 + // Block 0xa3, offset 0x4f0 {value: 0x0000, lo: 0x06}, {value: 0x0818, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, @@ -3820,17 +3975,17 @@ {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xbc}, {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa2, offset 0x4ed + // Block 0xa4, offset 0x4f7 {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0x9c}, {value: 0x0818, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa3, offset 0x4f1 + // Block 0xa5, offset 0x4fb {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa4, offset 0x4f5 + // Block 0xa6, offset 0x4ff {value: 0x0000, lo: 0x06}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, @@ -3838,23 +3993,23 @@ {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa5, offset 0x4fc + // Block 0xa7, offset 0x506 {value: 0x0000, lo: 0x01}, {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa6, offset 0x4fe + // Block 0xa8, offset 0x508 {value: 0x0000, lo: 0x02}, {value: 0x0808, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x501 + // Block 0xa9, offset 0x50b {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x504 + // Block 0xaa, offset 0x50e {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x508 + // Block 0xab, offset 0x512 {value: 0x0000, lo: 0x08}, {value: 0x0908, lo: 0x80, hi: 0x80}, {value: 0x0a08, lo: 0x81, hi: 0xa1}, @@ -3864,12 +4019,12 @@ {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0808, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xaa, offset 0x511 + // Block 0xac, offset 0x51b {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0818, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xab, offset 0x515 + // Block 0xad, offset 0x51f {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaa}, @@ -3878,7 +4033,7 @@ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0808, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xac, offset 0x51d + // Block 0xae, offset 0x527 {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0x9c}, {value: 0x0818, lo: 0x9d, hi: 0xa6}, @@ -3887,7 +4042,7 @@ {value: 0x0a08, lo: 0xb0, hi: 0xb2}, {value: 0x0c08, lo: 0xb3, hi: 0xb3}, {value: 0x0a08, lo: 0xb4, hi: 0xbf}, - // Block 0xad, offset 0x525 + // Block 0xaf, offset 0x52f {value: 0x0000, lo: 0x07}, {value: 0x0a08, lo: 0x80, hi: 0x84}, {value: 0x0808, lo: 0x85, hi: 0x85}, @@ -3896,7 +4051,7 @@ {value: 0x0c18, lo: 0x94, hi: 0x94}, {value: 0x0818, lo: 0x95, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xae, offset 0x52d + // Block 0xb0, offset 0x537 {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0a08, lo: 0xb0, hi: 0xb0}, @@ -3909,7 +4064,7 @@ {value: 0x0a08, lo: 0xbb, hi: 0xbc}, {value: 0x0c08, lo: 0xbd, hi: 0xbd}, {value: 0x0a08, lo: 0xbe, hi: 0xbf}, - // Block 0xaf, offset 0x539 + // Block 0xb1, offset 0x543 {value: 0x0000, lo: 0x0b}, {value: 0x0808, lo: 0x80, hi: 0x80}, {value: 0x0a08, lo: 0x81, hi: 0x81}, @@ -3922,14 +4077,14 @@ {value: 0x0040, lo: 0x8c, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x545 + // Block 0xb2, offset 0x54f {value: 0x0000, lo: 0x05}, {value: 0x3008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb1, offset 0x54b + // Block 0xb3, offset 0x555 {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x85}, {value: 0x3b08, lo: 0x86, hi: 0x86}, @@ -3939,7 +4094,7 @@ {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xb2, offset 0x554 + // Block 0xb4, offset 0x55e {value: 0x0000, lo: 0x0b}, {value: 0x3308, lo: 0x80, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, @@ -3952,7 +4107,7 @@ {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xb3, offset 0x560 + // Block 0xb5, offset 0x56a {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, @@ -3960,7 +4115,7 @@ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb4, offset 0x567 + // Block 0xb6, offset 0x571 {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, @@ -3970,7 +4125,7 @@ {value: 0x3b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xb5, offset 0x570 + // Block 0xb7, offset 0x57a {value: 0x0000, lo: 0x0a}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, @@ -3982,7 +4137,7 @@ {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb6, offset 0x57b + // Block 0xb8, offset 0x585 {value: 0x0000, lo: 0x06}, {value: 0x3308, lo: 0x80, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, @@ -3990,7 +4145,7 @@ {value: 0x3008, lo: 0xb3, hi: 0xb5}, {value: 0x3308, lo: 0xb6, hi: 0xbe}, {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb7, offset 0x582 + // Block 0xb9, offset 0x58c {value: 0x0000, lo: 0x0e}, {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, @@ -4006,7 +4161,7 @@ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb8, offset 0x591 + // Block 0xba, offset 0x59b {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, @@ -4020,7 +4175,7 @@ {value: 0x0018, lo: 0xb8, hi: 0xbd}, {value: 0x3308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb9, offset 0x59e + // Block 0xbb, offset 0x5a8 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4034,7 +4189,7 @@ {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xba, offset 0x5ab + // Block 0xbc, offset 0x5b5 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x3308, lo: 0x9f, hi: 0x9f}, @@ -4044,12 +4199,12 @@ {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xbb, offset 0x5b4 + // Block 0xbd, offset 0x5be {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x3008, lo: 0xb5, hi: 0xb7}, {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xbc, offset 0x5b8 + // Block 0xbe, offset 0x5c2 {value: 0x0000, lo: 0x0e}, {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x3b08, lo: 0x82, hi: 0x82}, @@ -4065,7 +4220,7 @@ {value: 0x3308, lo: 0x9e, hi: 0x9e}, {value: 0x0008, lo: 0x9f, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xbd, offset 0x5c7 + // Block 0xbf, offset 0x5d1 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb2}, @@ -4074,7 +4229,7 @@ {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x3008, lo: 0xbb, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xbe, offset 0x5cf + // Block 0xc0, offset 0x5d9 {value: 0x0000, lo: 0x0a}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x3008, lo: 0x81, hi: 0x81}, @@ -4086,7 +4241,7 @@ {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xbf, offset 0x5da + // Block 0xc1, offset 0x5e4 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x3008, lo: 0xaf, hi: 0xb1}, @@ -4096,14 +4251,14 @@ {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc0, offset 0x5e3 + // Block 0xc2, offset 0x5ed {value: 0x0000, lo: 0x05}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, {value: 0x3308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xc1, offset 0x5e9 + // Block 0xc3, offset 0x5f3 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb2}, @@ -4112,7 +4267,7 @@ {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5f1 + // Block 0xc4, offset 0x5fb {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, @@ -4122,7 +4277,7 @@ {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xc3, offset 0x5fa + // Block 0xc5, offset 0x604 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x3308, lo: 0xab, hi: 0xab}, @@ -4134,11 +4289,11 @@ {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc4, offset 0x605 + // Block 0xc6, offset 0x60f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xc5, offset 0x608 + // Block 0xc7, offset 0x612 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, @@ -4151,7 +4306,7 @@ {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc6, offset 0x614 + // Block 0xc8, offset 0x61e {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x3008, lo: 0xac, hi: 0xae}, @@ -4161,17 +4316,17 @@ {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xc7, offset 0x61d + // Block 0xc9, offset 0x627 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc8, offset 0x620 + // Block 0xca, offset 0x62a {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc9, offset 0x625 + // Block 0xcb, offset 0x62f {value: 0x0000, lo: 0x08}, {value: 0x3008, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x81}, @@ -4181,13 +4336,13 @@ {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xca, offset 0x62e + // Block 0xcc, offset 0x638 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa9}, {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xcb, offset 0x633 + // Block 0xcd, offset 0x63d {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x3008, lo: 0x91, hi: 0x93}, @@ -4201,7 +4356,7 @@ {value: 0x0008, lo: 0xa3, hi: 0xa3}, {value: 0x3008, lo: 0xa4, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xbf}, - // Block 0xcc, offset 0x640 + // Block 0xce, offset 0x64a {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x8a}, @@ -4213,7 +4368,7 @@ {value: 0x0008, lo: 0xba, hi: 0xba}, {value: 0x3308, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xcd, offset 0x64b + // Block 0xcf, offset 0x655 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x3b08, lo: 0x87, hi: 0x87}, @@ -4223,7 +4378,7 @@ {value: 0x3008, lo: 0x97, hi: 0x98}, {value: 0x3308, lo: 0x99, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0xbf}, - // Block 0xce, offset 0x654 + // Block 0xd0, offset 0x65e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x3308, lo: 0x8a, hi: 0x96}, @@ -4234,11 +4389,11 @@ {value: 0x0008, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xa2}, {value: 0x0040, lo: 0xa3, hi: 0xbf}, - // Block 0xcf, offset 0x65e + // Block 0xd1, offset 0x668 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd0, offset 0x661 + // Block 0xd2, offset 0x66b {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -4249,7 +4404,7 @@ {value: 0x3308, lo: 0xb8, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x66b + // Block 0xd3, offset 0x675 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, @@ -4259,7 +4414,7 @@ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xd2, offset 0x674 + // Block 0xd4, offset 0x67e {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, @@ -4272,7 +4427,7 @@ {value: 0x3008, lo: 0xb4, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xd3, offset 0x680 + // Block 0xd5, offset 0x68a {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4286,7 +4441,7 @@ {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xd4, offset 0x68d + // Block 0xd6, offset 0x697 {value: 0x0000, lo: 0x0c}, {value: 0x3308, lo: 0x80, hi: 0x83}, {value: 0x3b08, lo: 0x84, hi: 0x85}, @@ -4300,7 +4455,7 @@ {value: 0x0008, lo: 0xa7, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xa9}, {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xd5, offset 0x69a + // Block 0xd7, offset 0x6a4 {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x3008, lo: 0x8a, hi: 0x8e}, @@ -4315,7 +4470,7 @@ {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xbf}, - // Block 0xd6, offset 0x6a8 + // Block 0xd8, offset 0x6b2 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb2}, @@ -4323,41 +4478,41 @@ {value: 0x3008, lo: 0xb5, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd7, offset 0x6af + // Block 0xd9, offset 0x6b9 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0xd8, offset 0x6b3 + // Block 0xda, offset 0x6bd {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xd9, offset 0x6b7 + // Block 0xdb, offset 0x6c1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xda, offset 0x6ba + // Block 0xdc, offset 0x6c4 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xdb, offset 0x6bf + // Block 0xdd, offset 0x6c9 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xdc, offset 0x6c2 + // Block 0xde, offset 0x6cc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0340, lo: 0xb0, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xdd, offset 0x6c7 + // Block 0xdf, offset 0x6d1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xde, offset 0x6ca + // Block 0xe0, offset 0x6d4 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, @@ -4365,7 +4520,7 @@ {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xdf, offset 0x6d1 + // Block 0xe1, offset 0x6db {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, @@ -4373,12 +4528,12 @@ {value: 0x3308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe0, offset 0x6d8 + // Block 0xe2, offset 0x6e2 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xe1, offset 0x6dc + // Block 0xe3, offset 0x6e6 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, @@ -4390,33 +4545,33 @@ {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xe2, offset 0x6e7 + // Block 0xe4, offset 0x6f1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xe3, offset 0x6ea + // Block 0xe5, offset 0x6f4 {value: 0x0000, lo: 0x02}, {value: 0xe105, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xe4, offset 0x6ed + // Block 0xe6, offset 0x6f7 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0xe5, offset 0x6f0 + // Block 0xe7, offset 0x6fa {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, {value: 0x3308, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x3008, lo: 0x91, hi: 0xbf}, - // Block 0xe6, offset 0x6f6 + // Block 0xe8, offset 0x700 {value: 0x0000, lo: 0x05}, {value: 0x3008, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8e}, {value: 0x3308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe7, offset 0x6fc + // Block 0xe9, offset 0x706 {value: 0x0000, lo: 0x08}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa1}, @@ -4426,23 +4581,23 @@ {value: 0x0040, lo: 0xa5, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe8, offset 0x705 + // Block 0xea, offset 0x70f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xe9, offset 0x708 + // Block 0xeb, offset 0x712 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0xea, offset 0x70b + // Block 0xec, offset 0x715 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xeb, offset 0x70e + // Block 0xed, offset 0x718 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xec, offset 0x711 + // Block 0xee, offset 0x71b {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x92}, @@ -4450,17 +4605,17 @@ {value: 0x0008, lo: 0xa4, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xed, offset 0x718 + // Block 0xef, offset 0x722 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xee, offset 0x71b + // Block 0xf0, offset 0x725 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xef, offset 0x720 + // Block 0xf1, offset 0x72a {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, @@ -4471,32 +4626,32 @@ {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xf0, offset 0x72a + // Block 0xf2, offset 0x734 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xf1, offset 0x72d + // Block 0xf3, offset 0x737 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xf2, offset 0x731 + // Block 0xf4, offset 0x73b {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb609, lo: 0x9e, hi: 0x9e}, - {value: 0xb651, lo: 0x9f, hi: 0x9f}, - {value: 0xb699, lo: 0xa0, hi: 0xa0}, - {value: 0xb701, lo: 0xa1, hi: 0xa1}, - {value: 0xb769, lo: 0xa2, hi: 0xa2}, - {value: 0xb7d1, lo: 0xa3, hi: 0xa3}, - {value: 0xb839, lo: 0xa4, hi: 0xa4}, + {value: 0x2211, lo: 0x9e, hi: 0x9e}, + {value: 0x2219, lo: 0x9f, hi: 0x9f}, + {value: 0x2221, lo: 0xa0, hi: 0xa0}, + {value: 0x2229, lo: 0xa1, hi: 0xa1}, + {value: 0x2231, lo: 0xa2, hi: 0xa2}, + {value: 0x2239, lo: 0xa3, hi: 0xa3}, + {value: 0x2241, lo: 0xa4, hi: 0xa4}, {value: 0x3018, lo: 0xa5, hi: 0xa6}, {value: 0x3318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, {value: 0x3018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xf3, offset 0x740 + // Block 0xf5, offset 0x74a {value: 0x0000, lo: 0x0b}, {value: 0x3318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, @@ -4504,45 +4659,45 @@ {value: 0x0018, lo: 0x8c, hi: 0xa9}, {value: 0x3318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb8a1, lo: 0xbb, hi: 0xbb}, - {value: 0xb8e9, lo: 0xbc, hi: 0xbc}, - {value: 0xb931, lo: 0xbd, hi: 0xbd}, - {value: 0xb999, lo: 0xbe, hi: 0xbe}, - {value: 0xba01, lo: 0xbf, hi: 0xbf}, - // Block 0xf4, offset 0x74c + {value: 0x2249, lo: 0xbb, hi: 0xbb}, + {value: 0x2251, lo: 0xbc, hi: 0xbc}, + {value: 0x2259, lo: 0xbd, hi: 0xbd}, + {value: 0x2261, lo: 0xbe, hi: 0xbe}, + {value: 0x2269, lo: 0xbf, hi: 0xbf}, + // Block 0xf6, offset 0x756 {value: 0x0000, lo: 0x03}, - {value: 0xba69, lo: 0x80, hi: 0x80}, + {value: 0x2271, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xf5, offset 0x750 + // Block 0xf7, offset 0x75a {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x3318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xf6, offset 0x755 + // Block 0xf8, offset 0x75f {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xf7, offset 0x759 + // Block 0xf9, offset 0x763 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xf8, offset 0x75e + // Block 0xfa, offset 0x768 {value: 0x0000, lo: 0x03}, {value: 0x3308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xf9, offset 0x762 + // Block 0xfb, offset 0x76c {value: 0x0000, lo: 0x04}, {value: 0x3308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xfa, offset 0x767 + // Block 0xfc, offset 0x771 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x3308, lo: 0x84, hi: 0x84}, @@ -4552,7 +4707,7 @@ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x3308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xfb, offset 0x770 + // Block 0xfd, offset 0x77a {value: 0x0000, lo: 0x0a}, {value: 0x3308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4564,35 +4719,35 @@ {value: 0x0040, lo: 0xa5, hi: 0xa5}, {value: 0x3308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xfc, offset 0x77b + // Block 0xfe, offset 0x785 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xfd, offset 0x781 + // Block 0xff, offset 0x78b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xfe, offset 0x787 + // Block 0x100, offset 0x791 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x3308, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xff, offset 0x78d + // Block 0x101, offset 0x797 {value: 0x0000, lo: 0x05}, {value: 0x0808, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, {value: 0x0818, lo: 0x87, hi: 0x8f}, {value: 0x3308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x100, offset 0x793 + // Block 0x102, offset 0x79d {value: 0x0000, lo: 0x08}, {value: 0x0a08, lo: 0x80, hi: 0x83}, {value: 0x3308, lo: 0x84, hi: 0x8a}, @@ -4602,71 +4757,71 @@ {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0818, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x101, offset 0x79c + // Block 0x103, offset 0x7a6 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xb0}, {value: 0x0818, lo: 0xb1, hi: 0xbf}, - // Block 0x102, offset 0x79f + // Block 0x104, offset 0x7a9 {value: 0x0000, lo: 0x02}, {value: 0x0818, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x103, offset 0x7a2 + // Block 0x105, offset 0x7ac {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0818, lo: 0x81, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x104, offset 0x7a6 + // Block 0x106, offset 0x7b0 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0x105, offset 0x7aa + // Block 0x107, offset 0x7b4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x106, offset 0x7ae + // Block 0x108, offset 0x7b8 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x107, offset 0x7b4 + // Block 0x109, offset 0x7be {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x108, offset 0x7ba + // Block 0x10a, offset 0x7c4 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc229, lo: 0x90, hi: 0x90}, + {value: 0x2491, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0x109, offset 0x7bf + // Block 0x10b, offset 0x7c9 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0x10a, offset 0x7c2 + // Block 0x10c, offset 0x7cc {value: 0x0000, lo: 0x0f}, - {value: 0xc851, lo: 0x80, hi: 0x80}, - {value: 0xc8a1, lo: 0x81, hi: 0x81}, - {value: 0xc8f1, lo: 0x82, hi: 0x82}, - {value: 0xc941, lo: 0x83, hi: 0x83}, - {value: 0xc991, lo: 0x84, hi: 0x84}, - {value: 0xc9e1, lo: 0x85, hi: 0x85}, - {value: 0xca31, lo: 0x86, hi: 0x86}, - {value: 0xca81, lo: 0x87, hi: 0x87}, - {value: 0xcad1, lo: 0x88, hi: 0x88}, + {value: 0x2611, lo: 0x80, hi: 0x80}, + {value: 0x2619, lo: 0x81, hi: 0x81}, + {value: 0x2621, lo: 0x82, hi: 0x82}, + {value: 0x2629, lo: 0x83, hi: 0x83}, + {value: 0x2631, lo: 0x84, hi: 0x84}, + {value: 0x2639, lo: 0x85, hi: 0x85}, + {value: 0x2641, lo: 0x86, hi: 0x86}, + {value: 0x2649, lo: 0x87, hi: 0x87}, + {value: 0x2651, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcb21, lo: 0x90, hi: 0x90}, - {value: 0xcb41, lo: 0x91, hi: 0x91}, + {value: 0x2659, lo: 0x90, hi: 0x90}, + {value: 0x2661, lo: 0x91, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xbf}, - // Block 0x10b, offset 0x7d2 + // Block 0x10d, offset 0x7dc {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x9f}, @@ -4674,29 +4829,29 @@ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x10c, offset 0x7d9 + // Block 0x10e, offset 0x7e3 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x10d, offset 0x7dc + // Block 0x10f, offset 0x7e6 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x10e, offset 0x7e1 + // Block 0x110, offset 0x7eb {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x10f, offset 0x7e5 + // Block 0x111, offset 0x7ef {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x110, offset 0x7eb + // Block 0x112, offset 0x7f5 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, @@ -4704,17 +4859,17 @@ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0x111, offset 0x7f2 + // Block 0x113, offset 0x7fc {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0x112, offset 0x7f6 + // Block 0x114, offset 0x800 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x113, offset 0x7fa + // Block 0x115, offset 0x804 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, @@ -4724,7 +4879,7 @@ {value: 0x0040, lo: 0xb5, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x114, offset 0x803 + // Block 0x116, offset 0x80d {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, @@ -4732,109 +4887,74 @@ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x115, offset 0x80a + // Block 0x117, offset 0x814 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x116, offset 0x80f + // Block 0x118, offset 0x819 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x92}, {value: 0x0040, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0xbf}, - // Block 0x117, offset 0x813 + // Block 0x119, offset 0x81d {value: 0x0000, lo: 0x0d}, {value: 0x0018, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0xaf}, - {value: 0x1f41, lo: 0xb0, hi: 0xb0}, - {value: 0x00c9, lo: 0xb1, hi: 0xb1}, - {value: 0x0069, lo: 0xb2, hi: 0xb2}, - {value: 0x0079, lo: 0xb3, hi: 0xb3}, - {value: 0x1f51, lo: 0xb4, hi: 0xb4}, - {value: 0x1f61, lo: 0xb5, hi: 0xb5}, - {value: 0x1f71, lo: 0xb6, hi: 0xb6}, - {value: 0x1f81, lo: 0xb7, hi: 0xb7}, - {value: 0x1f91, lo: 0xb8, hi: 0xb8}, - {value: 0x1fa1, lo: 0xb9, hi: 0xb9}, + {value: 0x06e1, lo: 0xb0, hi: 0xb0}, + {value: 0x0049, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb2, hi: 0xb2}, + {value: 0x0031, lo: 0xb3, hi: 0xb3}, + {value: 0x06e9, lo: 0xb4, hi: 0xb4}, + {value: 0x06f1, lo: 0xb5, hi: 0xb5}, + {value: 0x06f9, lo: 0xb6, hi: 0xb6}, + {value: 0x0701, lo: 0xb7, hi: 0xb7}, + {value: 0x0709, lo: 0xb8, hi: 0xb8}, + {value: 0x0711, lo: 0xb9, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x118, offset 0x821 + // Block 0x11a, offset 0x82b {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x119, offset 0x824 + // Block 0x11b, offset 0x82e {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x11a, offset 0x827 + // Block 0x11c, offset 0x831 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x11b, offset 0x82b + // Block 0x11d, offset 0x835 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x11c, offset 0x82f + // Block 0x11e, offset 0x839 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x11d, offset 0x832 - {value: 0x0020, lo: 0x0f}, - {value: 0xdf21, lo: 0x80, hi: 0x89}, - {value: 0x8e35, lo: 0x8a, hi: 0x8a}, - {value: 0xe061, lo: 0x8b, hi: 0x9c}, - {value: 0x8e55, lo: 0x9d, hi: 0x9d}, - {value: 0xe2a1, lo: 0x9e, hi: 0xa2}, - {value: 0x8e75, lo: 0xa3, hi: 0xa3}, - {value: 0xe341, lo: 0xa4, hi: 0xab}, - {value: 0x7f0d, lo: 0xac, hi: 0xac}, - {value: 0xe441, lo: 0xad, hi: 0xaf}, - {value: 0x8e95, lo: 0xb0, hi: 0xb0}, - {value: 0xe4a1, lo: 0xb1, hi: 0xb6}, - {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, - {value: 0xe561, lo: 0xba, hi: 0xba}, - {value: 0x8f15, lo: 0xbb, hi: 0xbb}, - {value: 0xe581, lo: 0xbc, hi: 0xbf}, - // Block 0x11e, offset 0x842 - {value: 0x0020, lo: 0x10}, - {value: 0x93b5, lo: 0x80, hi: 0x80}, - {value: 0xf101, lo: 0x81, hi: 0x86}, - {value: 0x93d5, lo: 0x87, hi: 0x8a}, - {value: 0xda61, lo: 0x8b, hi: 0x8b}, - {value: 0xf1c1, lo: 0x8c, hi: 0x96}, - {value: 0x9455, lo: 0x97, hi: 0x97}, - {value: 0xf321, lo: 0x98, hi: 0xa3}, - {value: 0x9475, lo: 0xa4, hi: 0xa6}, - {value: 0xf4a1, lo: 0xa7, hi: 0xaa}, - {value: 0x94d5, lo: 0xab, hi: 0xab}, - {value: 0xf521, lo: 0xac, hi: 0xac}, - {value: 0x94f5, lo: 0xad, hi: 0xad}, - {value: 0xf541, lo: 0xae, hi: 0xaf}, - {value: 0x9515, lo: 0xb0, hi: 0xb1}, - {value: 0xf581, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0x11f, offset 0x853 + // Block 0x11f, offset 0x83c {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0xbf}, - // Block 0x120, offset 0x856 + // Block 0x120, offset 0x83f {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0x121, offset 0x85b + // Block 0x121, offset 0x844 {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x122, offset 0x85d + // Block 0x122, offset 0x846 {value: 0x0000, lo: 0x01}, {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x123, offset 0x85f + // Block 0x123, offset 0x848 {value: 0x0000, lo: 0x02}, {value: 0x33c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 43370 bytes (42KiB); checksum: EBD909C0 +// Total table size 44953 bytes (43KiB); checksum: D51909DD diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/tables15.0.0.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/tables15.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/tables15.0.0.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/tables15.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,5145 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +var mappings string = "" + // Size: 6704 bytes + " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" + + "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" + + "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" + + "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" + + "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" + + "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" + + "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" + + ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" + + "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" + + ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" + + "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" + + "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" + + "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" + + "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" + + "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" + + "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" + + "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" + + "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" + + "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" + + "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" + + "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" + + "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" + + "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" + + "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" + + "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" + + "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" + + "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" + + "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" + + "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" + + "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" + + "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" + + "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" + + "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" + + "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥ːˑʙɓʣꭦʥʤɖɗᶑɘɞʩɤɢ" + + "ɠʛʜɧʄʪʫꞎɮʎøɶɷɺɾʀʨʦꭧʧʈⱱʏʡʢʘǀǁǂ𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκ" + + "λμνξοστυψ∇∂ϝабгежзиклмпруфхцчшыэюꚉәіјөүӏґѕџҫꙑұٮڡٯ0,1,2,3,4,5,6,7,8,9,(a" + + ")(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y" + + ")(z)〔s〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申" + + "割営配〔本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉" + + "卑博即卽卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢" + + "㠯巽帨帽幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最" + + "暜肭䏙朗望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸" + + "瑇瑜瑱璅瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦" + + "若茝荣莭茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷" + + "䧦雃嶲霣䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻" + +var mappingIndex = []uint16{ // 1729 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a, + 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024, + 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036, + 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048, + 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e, + 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086, + 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6, + 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6, + // Entry 40 - 7F + 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116, + 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c, + 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174, + 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182, + 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c, + 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199, + 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8, + 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6, + // Entry 80 - BF + 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6, + 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6, + 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5, + 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5, + 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211, + 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239, + 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261, + 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287, + // Entry C0 - FF + 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa, + 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8, + 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8, + 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f, + 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, + 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326, + 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340, + 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368, + // Entry 100 - 13F + 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386, + 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1, + 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1, + 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db, + 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401, + 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417, + 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441, + 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469, + // Entry 140 - 17F + 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491, + 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc, + 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7, + 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f, + 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537, + 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f, + 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e, + 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e, + // Entry 180 - 1BF + 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6, + 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6, + 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc, + 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee, + 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a, + 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e, + 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf, + 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738, + // Entry 1C0 - 1FF + 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c, + 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce, + 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822, + 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873, + 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be, + 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f, + 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d, + 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997, + // Entry 200 - 23F + 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9, + 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1, + 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00, + 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a, + 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c, + 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c, + 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f, + 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e, + // Entry 240 - 27F + 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e, + 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2, + 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2, + 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2, + 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6, + 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e, + 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46, + 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c, + // Entry 280 - 2BF + 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a, + 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92, + 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8, + 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8, + 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8, + 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08, + 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28, + 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e, + // Entry 2C0 - 2FF + 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e, + 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e, + 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e, + 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e, + 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a, + 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8, + 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8, + 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8, + // Entry 300 - 33F + 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18, + 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38, + 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58, + 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78, + 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98, + 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8, + 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8, + 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8, + // Entry 340 - 37F + 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18, + 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e, + 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e, + 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e, + 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e, + 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2, + 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4, + 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04, + // Entry 380 - 3BF + 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24, + 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a, + 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a, + 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa, + 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda, + 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a, + 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a, + 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a, + // Entry 3C0 - 3FF + 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a, + 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca, + 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa, + 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a, + 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a, + 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190, + 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee, + 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6, + // Entry 400 - 43F + 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe, + 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215, + 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227, + 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237, + 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247, + 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257, + 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f, + 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279, + // Entry 440 - 47F + 0x127b, 0x127d, 0x127f, 0x1281, 0x1283, 0x1285, 0x1287, 0x1289, + 0x128c, 0x128e, 0x1290, 0x1292, 0x1294, 0x1297, 0x1299, 0x129b, + 0x129d, 0x129f, 0x12a1, 0x12a3, 0x12a5, 0x12a7, 0x12a9, 0x12ab, + 0x12ad, 0x12af, 0x12b2, 0x12b4, 0x12b6, 0x12b8, 0x12ba, 0x12bc, + 0x12be, 0x12c0, 0x12c2, 0x12c4, 0x12c6, 0x12c9, 0x12cb, 0x12cd, + 0x12d0, 0x12d2, 0x12d4, 0x12d6, 0x12d8, 0x12da, 0x12dc, 0x12de, + 0x12e6, 0x12ee, 0x12fa, 0x1306, 0x1312, 0x131e, 0x132a, 0x1332, + 0x133a, 0x1346, 0x1352, 0x135e, 0x136a, 0x136c, 0x136e, 0x1370, + // Entry 480 - 4BF + 0x1372, 0x1374, 0x1376, 0x1378, 0x137a, 0x137c, 0x137e, 0x1380, + 0x1382, 0x1384, 0x1386, 0x1388, 0x138a, 0x138d, 0x1390, 0x1392, + 0x1394, 0x1396, 0x1398, 0x139a, 0x139c, 0x139e, 0x13a0, 0x13a2, + 0x13a4, 0x13a6, 0x13a8, 0x13aa, 0x13ac, 0x13ae, 0x13b0, 0x13b2, + 0x13b4, 0x13b6, 0x13b8, 0x13ba, 0x13bc, 0x13bf, 0x13c1, 0x13c3, + 0x13c5, 0x13c7, 0x13c9, 0x13cb, 0x13cd, 0x13cf, 0x13d1, 0x13d3, + 0x13d6, 0x13d8, 0x13da, 0x13dc, 0x13de, 0x13e0, 0x13e2, 0x13e4, + 0x13e6, 0x13e8, 0x13ea, 0x13ec, 0x13ee, 0x13f0, 0x13f2, 0x13f5, + // Entry 4C0 - 4FF + 0x13f8, 0x13fb, 0x13fe, 0x1401, 0x1404, 0x1407, 0x140a, 0x140d, + 0x1410, 0x1413, 0x1416, 0x1419, 0x141c, 0x141f, 0x1422, 0x1425, + 0x1428, 0x142b, 0x142e, 0x1431, 0x1434, 0x1437, 0x143a, 0x143d, + 0x1440, 0x1447, 0x1449, 0x144b, 0x144d, 0x1450, 0x1452, 0x1454, + 0x1456, 0x1458, 0x145a, 0x1460, 0x1466, 0x1469, 0x146c, 0x146f, + 0x1472, 0x1475, 0x1478, 0x147b, 0x147e, 0x1481, 0x1484, 0x1487, + 0x148a, 0x148d, 0x1490, 0x1493, 0x1496, 0x1499, 0x149c, 0x149f, + 0x14a2, 0x14a5, 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7, + // Entry 500 - 53F + 0x14ba, 0x14bd, 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf, + 0x14d2, 0x14d5, 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7, + 0x14ea, 0x14ed, 0x14f6, 0x14ff, 0x1508, 0x1511, 0x151a, 0x1523, + 0x152c, 0x1535, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d, + 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565, + 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d, + 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595, + 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad, + // Entry 540 - 57F + 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5, + 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd, + 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5, + 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d, + 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625, + 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d, + 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655, + 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d, + // Entry 580 - 5BF + 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685, + 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d, + 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5, + 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd, + 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5, + 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd, + 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715, + 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d, + // Entry 5C0 - 5FF + 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745, + 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d, + 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775, + 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d, + 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5, + 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd, + 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5, + 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed, + // Entry 600 - 63F + 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805, + 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d, + 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835, + 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d, + 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865, + 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d, + 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895, + 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad, + // Entry 640 - 67F + 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5, + 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd, + 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5, + 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d, + 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925, + 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d, + 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955, + 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d, + // Entry 680 - 6BF + 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985, + 0x1988, 0x198b, 0x198e, 0x1991, 0x1994, 0x1997, 0x199a, 0x199d, + 0x19a0, 0x19a3, 0x19a6, 0x19a9, 0x19ac, 0x19af, 0x19b2, 0x19b5, + 0x19b8, 0x19bb, 0x19be, 0x19c1, 0x19c4, 0x19c7, 0x19ca, 0x19cd, + 0x19d0, 0x19d3, 0x19d6, 0x19d9, 0x19dc, 0x19df, 0x19e2, 0x19e5, + 0x19e8, 0x19eb, 0x19ee, 0x19f1, 0x19f4, 0x19f7, 0x19fa, 0x19fd, + 0x1a00, 0x1a03, 0x1a06, 0x1a09, 0x1a0c, 0x1a0f, 0x1a12, 0x1a15, + 0x1a18, 0x1a1b, 0x1a1e, 0x1a21, 0x1a24, 0x1a27, 0x1a2a, 0x1a2d, + // Entry 6C0 - 6FF + 0x1a30, +} // Size: 3482 bytes + +var xorData string = "" + // Size: 4907 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + + "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + + "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + + "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + + "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + + "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + + "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + + "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + + "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + + "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + + "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + + "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + + "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + + "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + + "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + + "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + + "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + + "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + + "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + + "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + + "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + + "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + + "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + + "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + + "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + + "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + + "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + + "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + + "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + + "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + + "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + + "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + + "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + + "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + + "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + + "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + + "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + + "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + + "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + + "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + + "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + + "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + + "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + + "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + + "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + + "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + + "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + + "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + + "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + + "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + + "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + + "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + + "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + + "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + + "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + + "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + + "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + + "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + + "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + + "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + + "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + + "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + + ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + + "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + + "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + + "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + + "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + + "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + + "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + + "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + + "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + + "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + + "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + + "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + + ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x03'\x02\x03)\x02\x03+" + + "\x02\x03/\x02\x03\x19\x02\x03\x1b\x02\x03\x1f\x03\x0d\x22\x18\x03\x0d" + + "\x22\x1a\x03\x0d\x22'\x03\x0d\x22/\x03\x0d\x223\x03\x0d\x22$\x02\x01\x1e" + + "\x03\x0f$!\x03\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08" + + "\x18\x03\x0f\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$" + + "\x03\x0e\x0d)\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d" + + "\x03\x0d. \x03\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03" + + "\x0d\x0d\x0f\x03\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03" + + "\x0c\x09:\x03\x0e\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18" + + "\x03\x0c\x1f\x1c\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03" + + "\x0b<+\x03\x0b8\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d" + + "\x22&\x03\x0b\x1a\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03" + + "\x0a!\x1a\x03\x0a!7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03" + + "\x0a\x00 \x03\x0a\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a" + + "\x1b-\x03\x09-\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091" + + "\x1f\x03\x093\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(" + + "\x16\x03\x09\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!" + + "\x03\x09\x1a\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03" + + "\x08\x02*\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03" + + "\x070\x0c\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x06" + + "71\x03\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 " + + "\x1d\x03\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 31598 bytes (30.86 KiB). Checksum: d3118eda0d6b5360. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 133: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 133 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 135 blocks, 8640 entries, 17280 bytes +// The third block is the zero block. +var idnaValues = [8640]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018, + 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018, + 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079, + // Block 0x5, offset 0x140 + 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091, + 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0818, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139, + 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08, + 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0c08, 0x571: 0x0c08, 0x572: 0x0c08, 0x573: 0x0c08, 0x574: 0x0c08, 0x575: 0x0c08, + 0x576: 0x0c08, 0x577: 0x0c08, 0x578: 0x0c08, 0x579: 0x0c08, 0x57a: 0x0c08, 0x57b: 0x0c08, + 0x57c: 0x0c08, 0x57d: 0x0c08, 0x57e: 0x0c08, 0x57f: 0x0c08, + // Block 0x16, offset 0x580 + 0x580: 0x0c08, 0x581: 0x0c08, 0x582: 0x0c08, 0x583: 0x0808, 0x584: 0x0808, 0x585: 0x0808, + 0x586: 0x0a08, 0x587: 0x0808, 0x588: 0x0818, 0x589: 0x0a08, 0x58a: 0x0a08, 0x58b: 0x0a08, + 0x58c: 0x0a08, 0x58d: 0x0a08, 0x58e: 0x0c08, 0x58f: 0x0040, 0x590: 0x0840, 0x591: 0x0840, + 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x0040, + 0x598: 0x3308, 0x599: 0x3308, 0x59a: 0x3308, 0x59b: 0x3308, 0x59c: 0x3308, 0x59d: 0x3308, + 0x59e: 0x3308, 0x59f: 0x3308, 0x5a0: 0x0a08, 0x5a1: 0x0a08, 0x5a2: 0x0a08, 0x5a3: 0x0a08, + 0x5a4: 0x0a08, 0x5a5: 0x0a08, 0x5a6: 0x0a08, 0x5a7: 0x0a08, 0x5a8: 0x0a08, 0x5a9: 0x0a08, + 0x5aa: 0x0c08, 0x5ab: 0x0c08, 0x5ac: 0x0c08, 0x5ad: 0x0808, 0x5ae: 0x0c08, 0x5af: 0x0a08, + 0x5b0: 0x0a08, 0x5b1: 0x0c08, 0x5b2: 0x0c08, 0x5b3: 0x0a08, 0x5b4: 0x0a08, 0x5b5: 0x0a08, + 0x5b6: 0x0a08, 0x5b7: 0x0a08, 0x5b8: 0x0a08, 0x5b9: 0x0c08, 0x5ba: 0x0a08, 0x5bb: 0x0a08, + 0x5bc: 0x0a08, 0x5bd: 0x0a08, 0x5be: 0x0a08, 0x5bf: 0x0a08, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x3308, + 0x5c6: 0x3308, 0x5c7: 0x3308, 0x5c8: 0x3308, 0x5c9: 0x3008, 0x5ca: 0x3008, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x3008, 0x5cf: 0x3008, 0x5d0: 0x0008, 0x5d1: 0x3308, + 0x5d2: 0x3308, 0x5d3: 0x3308, 0x5d4: 0x3308, 0x5d5: 0x3308, 0x5d6: 0x3308, 0x5d7: 0x3308, + 0x5d8: 0x0159, 0x5d9: 0x0161, 0x5da: 0x0169, 0x5db: 0x0171, 0x5dc: 0x0179, 0x5dd: 0x0181, + 0x5de: 0x0189, 0x5df: 0x0191, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0018, 0x5e5: 0x0018, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0018, 0x5f1: 0x0008, 0x5f2: 0x0008, 0x5f3: 0x0008, 0x5f4: 0x0008, 0x5f5: 0x0008, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0008, 0x5fb: 0x0008, + 0x5fc: 0x0008, 0x5fd: 0x0008, 0x5fe: 0x0008, 0x5ff: 0x0008, + // Block 0x18, offset 0x600 + 0x600: 0x0008, 0x601: 0x3308, 0x602: 0x3008, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0008, + 0x60c: 0x0008, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0040, 0x634: 0x0040, 0x635: 0x0040, + 0x636: 0x0008, 0x637: 0x0008, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0008, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3308, 0x644: 0x3308, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3008, 0x648: 0x3008, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3008, + 0x64c: 0x3008, 0x64d: 0x3b08, 0x64e: 0x0008, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x3008, + 0x658: 0x0040, 0x659: 0x0040, 0x65a: 0x0040, 0x65b: 0x0040, 0x65c: 0x0199, 0x65d: 0x01a1, + 0x65e: 0x0040, 0x65f: 0x01a9, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x3308, 0x663: 0x3308, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0008, 0x672: 0x0018, 0x673: 0x0018, 0x674: 0x0018, 0x675: 0x0018, + 0x676: 0x0018, 0x677: 0x0018, 0x678: 0x0018, 0x679: 0x0018, 0x67a: 0x0018, 0x67b: 0x0018, + 0x67c: 0x0008, 0x67d: 0x0018, 0x67e: 0x3308, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0040, + 0x68c: 0x0040, 0x68d: 0x0040, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0040, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x01b1, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x01b9, 0x6b7: 0x0040, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0040, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x0040, 0x6c4: 0x0040, 0x6c5: 0x0040, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x0040, 0x6ca: 0x0040, 0x6cb: 0x3308, + 0x6cc: 0x3308, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0040, 0x6d1: 0x3308, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x01c1, 0x6da: 0x01c9, 0x6db: 0x01d1, 0x6dc: 0x0008, 0x6dd: 0x0040, + 0x6de: 0x01d9, 0x6df: 0x0040, 0x6e0: 0x0040, 0x6e1: 0x0040, 0x6e2: 0x0040, 0x6e3: 0x0040, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x3308, 0x6f1: 0x3308, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0008, 0x6f5: 0x3308, + 0x6f6: 0x0018, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0040, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0008, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0008, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3008, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x3308, + 0x746: 0x0040, 0x747: 0x3308, 0x748: 0x3308, 0x749: 0x3008, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x0040, 0x757: 0x0040, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0040, 0x75d: 0x0040, + 0x75e: 0x0040, 0x75f: 0x0040, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0018, 0x772: 0x0040, 0x773: 0x0040, 0x774: 0x0040, 0x775: 0x0040, + 0x776: 0x0040, 0x777: 0x0040, 0x778: 0x0040, 0x779: 0x0008, 0x77a: 0x3308, 0x77b: 0x3308, + 0x77c: 0x3308, 0x77d: 0x3308, 0x77e: 0x3308, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x3308, 0x782: 0x3008, 0x783: 0x3008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0008, + 0x78c: 0x0008, 0x78d: 0x0040, 0x78e: 0x0040, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0008, 0x797: 0x0008, + 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0008, 0x79c: 0x0008, 0x79d: 0x0008, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x0008, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0008, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0040, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0040, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0040, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x3308, 0x7bd: 0x0008, 0x7be: 0x3008, 0x7bf: 0x3308, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3008, 0x7c1: 0x3308, 0x7c2: 0x3308, 0x7c3: 0x3308, 0x7c4: 0x3308, 0x7c5: 0x0040, + 0x7c6: 0x0040, 0x7c7: 0x3008, 0x7c8: 0x3008, 0x7c9: 0x0040, 0x7ca: 0x0040, 0x7cb: 0x3008, + 0x7cc: 0x3008, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x3008, + 0x7d8: 0x0040, 0x7d9: 0x0040, 0x7da: 0x0040, 0x7db: 0x0040, 0x7dc: 0x01e1, 0x7dd: 0x01e9, + 0x7de: 0x0040, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0018, 0x7f1: 0x0008, 0x7f2: 0x0018, 0x7f3: 0x0018, 0x7f4: 0x0018, 0x7f5: 0x0018, + 0x7f6: 0x0018, 0x7f7: 0x0018, 0x7f8: 0x0040, 0x7f9: 0x0040, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x0040, 0x7ff: 0x0040, + // Block 0x20, offset 0x800 + 0x800: 0x0040, 0x801: 0x0040, 0x802: 0x3308, 0x803: 0x0008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0040, + 0x80c: 0x0040, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0040, 0x817: 0x0040, + 0x818: 0x0040, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0008, 0x81d: 0x0040, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0040, 0x821: 0x0040, 0x822: 0x0040, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0040, 0x826: 0x0040, 0x827: 0x0040, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0040, 0x82c: 0x0040, 0x82d: 0x0040, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0008, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x3008, 0x83f: 0x3008, + // Block 0x21, offset 0x840 + 0x840: 0x3308, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3308, 0x848: 0x3308, 0x849: 0x0040, 0x84a: 0x3308, 0x84b: 0x3308, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3308, 0x856: 0x3308, 0x857: 0x0040, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0008, + 0x85e: 0x0040, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0040, 0x872: 0x0040, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0018, 0x87b: 0x0018, + 0x87c: 0x0018, 0x87d: 0x0018, 0x87e: 0x0018, 0x87f: 0x0018, + // Block 0x22, offset 0x880 + 0x880: 0x0008, 0x881: 0x3308, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x0018, 0x885: 0x0008, + 0x886: 0x0008, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0008, 0x88a: 0x0008, 0x88b: 0x0008, + 0x88c: 0x0008, 0x88d: 0x0040, 0x88e: 0x0008, 0x88f: 0x0008, 0x890: 0x0008, 0x891: 0x0040, + 0x892: 0x0008, 0x893: 0x0008, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, + 0x898: 0x0008, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, + 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, + 0x8a4: 0x0008, 0x8a5: 0x0008, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0040, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0008, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0008, 0x8b4: 0x0040, 0x8b5: 0x0008, + 0x8b6: 0x0008, 0x8b7: 0x0008, 0x8b8: 0x0008, 0x8b9: 0x0008, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x3308, 0x8bd: 0x0008, 0x8be: 0x3008, 0x8bf: 0x3308, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3008, 0x8c2: 0x3008, 0x8c3: 0x3008, 0x8c4: 0x3008, 0x8c5: 0x0040, + 0x8c6: 0x3308, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3308, 0x8cd: 0x3b08, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0040, 0x8d5: 0x3008, 0x8d6: 0x3008, 0x8d7: 0x0040, + 0x8d8: 0x0040, 0x8d9: 0x0040, 0x8da: 0x0040, 0x8db: 0x0040, 0x8dc: 0x0040, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0040, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0040, 0x8f1: 0x0008, 0x8f2: 0x0008, 0x8f3: 0x3008, 0x8f4: 0x0040, 0x8f5: 0x0040, + 0x8f6: 0x0040, 0x8f7: 0x0040, 0x8f8: 0x0040, 0x8f9: 0x0040, 0x8fa: 0x0040, 0x8fb: 0x0040, + 0x8fc: 0x0040, 0x8fd: 0x0040, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x3008, 0x901: 0x3308, 0x902: 0x3308, 0x903: 0x3308, 0x904: 0x3308, 0x905: 0x0040, + 0x906: 0x3008, 0x907: 0x3008, 0x908: 0x3008, 0x909: 0x0040, 0x90a: 0x3008, 0x90b: 0x3008, + 0x90c: 0x3008, 0x90d: 0x3b08, 0x90e: 0x0008, 0x90f: 0x0018, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x3008, + 0x918: 0x0018, 0x919: 0x0018, 0x91a: 0x0018, 0x91b: 0x0018, 0x91c: 0x0018, 0x91d: 0x0018, + 0x91e: 0x0018, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x3308, 0x923: 0x3308, + 0x924: 0x0040, 0x925: 0x0040, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0018, 0x931: 0x0018, 0x932: 0x0018, 0x933: 0x0018, 0x934: 0x0018, 0x935: 0x0018, + 0x936: 0x0018, 0x937: 0x0018, 0x938: 0x0018, 0x939: 0x0018, 0x93a: 0x0008, 0x93b: 0x0008, + 0x93c: 0x0008, 0x93d: 0x0008, 0x93e: 0x0008, 0x93f: 0x0008, + // Block 0x25, offset 0x940 + 0x940: 0x0040, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0040, 0x944: 0x0008, 0x945: 0x0040, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0040, + 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0040, 0x965: 0x0008, 0x966: 0x0040, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0008, 0x96e: 0x0008, 0x96f: 0x0008, + 0x970: 0x0008, 0x971: 0x3308, 0x972: 0x0008, 0x973: 0x01f9, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x3308, 0x97a: 0x3b08, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0008, 0x97e: 0x0040, 0x97f: 0x0040, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0211, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0040, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0219, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0221, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0229, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0231, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0239, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0040, 0x9ae: 0x0040, 0x9af: 0x0040, + 0x9b0: 0x0040, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x0241, 0x9b4: 0x3308, 0x9b5: 0x0249, + 0x9b6: 0x0251, 0x9b7: 0x0259, 0x9b8: 0x0261, 0x9b9: 0x0269, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x3308, 0x9be: 0x3308, 0x9bf: 0x3008, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x3308, 0x9c1: 0x0271, 0x9c2: 0x3308, 0x9c3: 0x3308, 0x9c4: 0x3b08, 0x9c5: 0x0018, + 0x9c6: 0x3308, 0x9c7: 0x3308, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x3308, 0x9ce: 0x3308, 0x9cf: 0x3308, 0x9d0: 0x3308, 0x9d1: 0x3308, + 0x9d2: 0x3308, 0x9d3: 0x0279, 0x9d4: 0x3308, 0x9d5: 0x3308, 0x9d6: 0x3308, 0x9d7: 0x3308, + 0x9d8: 0x0040, 0x9d9: 0x3308, 0x9da: 0x3308, 0x9db: 0x3308, 0x9dc: 0x3308, 0x9dd: 0x0281, + 0x9de: 0x3308, 0x9df: 0x3308, 0x9e0: 0x3308, 0x9e1: 0x3308, 0x9e2: 0x0289, 0x9e3: 0x3308, + 0x9e4: 0x3308, 0x9e5: 0x3308, 0x9e6: 0x3308, 0x9e7: 0x0291, 0x9e8: 0x3308, 0x9e9: 0x3308, + 0x9ea: 0x3308, 0x9eb: 0x3308, 0x9ec: 0x0299, 0x9ed: 0x3308, 0x9ee: 0x3308, 0x9ef: 0x3308, + 0x9f0: 0x3308, 0x9f1: 0x3308, 0x9f2: 0x3308, 0x9f3: 0x3308, 0x9f4: 0x3308, 0x9f5: 0x3308, + 0x9f6: 0x3308, 0x9f7: 0x3308, 0x9f8: 0x3308, 0x9f9: 0x02a1, 0x9fa: 0x3308, 0x9fb: 0x3308, + 0x9fc: 0x3308, 0x9fd: 0x0040, 0x9fe: 0x0018, 0x9ff: 0x0018, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x0008, 0xa1c: 0x0008, 0xa1d: 0x0008, + 0xa1e: 0x0008, 0xa1f: 0x0008, 0xa20: 0x0008, 0xa21: 0x0008, 0xa22: 0x0008, 0xa23: 0x0008, + 0xa24: 0x0008, 0xa25: 0x0008, 0xa26: 0x0008, 0xa27: 0x0008, 0xa28: 0x0008, 0xa29: 0x0008, + 0xa2a: 0x0008, 0xa2b: 0x0008, 0xa2c: 0x0019, 0xa2d: 0x02e1, 0xa2e: 0x02e9, 0xa2f: 0x0008, + 0xa30: 0x02f1, 0xa31: 0x02f9, 0xa32: 0x0301, 0xa33: 0x0309, 0xa34: 0x00a9, 0xa35: 0x0311, + 0xa36: 0x00b1, 0xa37: 0x0319, 0xa38: 0x0101, 0xa39: 0x0321, 0xa3a: 0x0329, 0xa3b: 0x0008, + 0xa3c: 0x0051, 0xa3d: 0x0331, 0xa3e: 0x0339, 0xa3f: 0x00b9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0341, 0xa41: 0x0349, 0xa42: 0x00c1, 0xa43: 0x0019, 0xa44: 0x0351, 0xa45: 0x0359, + 0xa46: 0x05b5, 0xa47: 0x02e9, 0xa48: 0x02f1, 0xa49: 0x02f9, 0xa4a: 0x0361, 0xa4b: 0x0369, + 0xa4c: 0x0371, 0xa4d: 0x0309, 0xa4e: 0x0008, 0xa4f: 0x0319, 0xa50: 0x0321, 0xa51: 0x0379, + 0xa52: 0x0051, 0xa53: 0x0381, 0xa54: 0x05cd, 0xa55: 0x05cd, 0xa56: 0x0339, 0xa57: 0x0341, + 0xa58: 0x0349, 0xa59: 0x05b5, 0xa5a: 0x0389, 0xa5b: 0x0391, 0xa5c: 0x05e5, 0xa5d: 0x0399, + 0xa5e: 0x03a1, 0xa5f: 0x03a9, 0xa60: 0x03b1, 0xa61: 0x03b9, 0xa62: 0x0311, 0xa63: 0x00b9, + 0xa64: 0x0349, 0xa65: 0x0391, 0xa66: 0x0399, 0xa67: 0x03a1, 0xa68: 0x03c1, 0xa69: 0x03b1, + 0xa6a: 0x03b9, 0xa6b: 0x0008, 0xa6c: 0x0008, 0xa6d: 0x0008, 0xa6e: 0x0008, 0xa6f: 0x0008, + 0xa70: 0x0008, 0xa71: 0x0008, 0xa72: 0x0008, 0xa73: 0x0008, 0xa74: 0x0008, 0xa75: 0x0008, + 0xa76: 0x0008, 0xa77: 0x0008, 0xa78: 0x03c9, 0xa79: 0x0008, 0xa7a: 0x0008, 0xa7b: 0x0008, + 0xa7c: 0x0008, 0xa7d: 0x0008, 0xa7e: 0x0008, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, + 0xa86: 0x0008, 0xa87: 0x0008, 0xa88: 0x0008, 0xa89: 0x0008, 0xa8a: 0x0008, 0xa8b: 0x0008, + 0xa8c: 0x0008, 0xa8d: 0x0008, 0xa8e: 0x0008, 0xa8f: 0x0008, 0xa90: 0x0008, 0xa91: 0x0008, + 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0008, 0xa9b: 0x03d1, 0xa9c: 0x03d9, 0xa9d: 0x03e1, + 0xa9e: 0x03e9, 0xa9f: 0x0371, 0xaa0: 0x03f1, 0xaa1: 0x03f9, 0xaa2: 0x0401, 0xaa3: 0x0409, + 0xaa4: 0x0411, 0xaa5: 0x0419, 0xaa6: 0x0421, 0xaa7: 0x05fd, 0xaa8: 0x0429, 0xaa9: 0x0431, + 0xaaa: 0xe17d, 0xaab: 0x0439, 0xaac: 0x0441, 0xaad: 0x0449, 0xaae: 0x0451, 0xaaf: 0x0459, + 0xab0: 0x0461, 0xab1: 0x0469, 0xab2: 0x0471, 0xab3: 0x0479, 0xab4: 0x0481, 0xab5: 0x0489, + 0xab6: 0x0491, 0xab7: 0x0499, 0xab8: 0x0615, 0xab9: 0x04a1, 0xaba: 0x04a9, 0xabb: 0x04b1, + 0xabc: 0x04b9, 0xabd: 0x04c1, 0xabe: 0x04c9, 0xabf: 0x04d1, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0xe00d, 0xad7: 0x0008, + 0xad8: 0xe00d, 0xad9: 0x0008, 0xada: 0xe00d, 0xadb: 0x0008, 0xadc: 0xe00d, 0xadd: 0x0008, + 0xade: 0xe00d, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0xe00d, 0xb01: 0x0008, 0xb02: 0xe00d, 0xb03: 0x0008, 0xb04: 0xe00d, 0xb05: 0x0008, + 0xb06: 0xe00d, 0xb07: 0x0008, 0xb08: 0xe00d, 0xb09: 0x0008, 0xb0a: 0xe00d, 0xb0b: 0x0008, + 0xb0c: 0xe00d, 0xb0d: 0x0008, 0xb0e: 0xe00d, 0xb0f: 0x0008, 0xb10: 0xe00d, 0xb11: 0x0008, + 0xb12: 0xe00d, 0xb13: 0x0008, 0xb14: 0xe00d, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0008, 0xb19: 0x0008, 0xb1a: 0x062d, 0xb1b: 0x064d, 0xb1c: 0x0008, 0xb1d: 0x0008, + 0xb1e: 0x04d9, 0xb1f: 0x0008, 0xb20: 0xe00d, 0xb21: 0x0008, 0xb22: 0xe00d, 0xb23: 0x0008, + 0xb24: 0xe00d, 0xb25: 0x0008, 0xb26: 0xe00d, 0xb27: 0x0008, 0xb28: 0xe00d, 0xb29: 0x0008, + 0xb2a: 0xe00d, 0xb2b: 0x0008, 0xb2c: 0xe00d, 0xb2d: 0x0008, 0xb2e: 0xe00d, 0xb2f: 0x0008, + 0xb30: 0xe00d, 0xb31: 0x0008, 0xb32: 0xe00d, 0xb33: 0x0008, 0xb34: 0xe00d, 0xb35: 0x0008, + 0xb36: 0xe00d, 0xb37: 0x0008, 0xb38: 0xe00d, 0xb39: 0x0008, 0xb3a: 0xe00d, 0xb3b: 0x0008, + 0xb3c: 0xe00d, 0xb3d: 0x0008, 0xb3e: 0xe00d, 0xb3f: 0x0008, + // Block 0x2d, offset 0xb40 + 0xb40: 0x0008, 0xb41: 0x0008, 0xb42: 0x0008, 0xb43: 0x0008, 0xb44: 0x0008, 0xb45: 0x0008, + 0xb46: 0x0040, 0xb47: 0x0040, 0xb48: 0xe045, 0xb49: 0xe045, 0xb4a: 0xe045, 0xb4b: 0xe045, + 0xb4c: 0xe045, 0xb4d: 0xe045, 0xb4e: 0x0040, 0xb4f: 0x0040, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x0008, 0xb54: 0x0008, 0xb55: 0x0008, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0x0040, 0xb59: 0xe045, 0xb5a: 0x0040, 0xb5b: 0xe045, 0xb5c: 0x0040, 0xb5d: 0xe045, + 0xb5e: 0x0040, 0xb5f: 0xe045, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x0008, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0xe045, 0xb6b: 0xe045, 0xb6c: 0xe045, 0xb6d: 0xe045, 0xb6e: 0xe045, 0xb6f: 0xe045, + 0xb70: 0x0008, 0xb71: 0x04e1, 0xb72: 0x0008, 0xb73: 0x04e9, 0xb74: 0x0008, 0xb75: 0x04f1, + 0xb76: 0x0008, 0xb77: 0x04f9, 0xb78: 0x0008, 0xb79: 0x0501, 0xb7a: 0x0008, 0xb7b: 0x0509, + 0xb7c: 0x0008, 0xb7d: 0x0511, 0xb7e: 0x0040, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x0519, 0xb81: 0x0521, 0xb82: 0x0529, 0xb83: 0x0531, 0xb84: 0x0539, 0xb85: 0x0541, + 0xb86: 0x0549, 0xb87: 0x0551, 0xb88: 0x0519, 0xb89: 0x0521, 0xb8a: 0x0529, 0xb8b: 0x0531, + 0xb8c: 0x0539, 0xb8d: 0x0541, 0xb8e: 0x0549, 0xb8f: 0x0551, 0xb90: 0x0559, 0xb91: 0x0561, + 0xb92: 0x0569, 0xb93: 0x0571, 0xb94: 0x0579, 0xb95: 0x0581, 0xb96: 0x0589, 0xb97: 0x0591, + 0xb98: 0x0559, 0xb99: 0x0561, 0xb9a: 0x0569, 0xb9b: 0x0571, 0xb9c: 0x0579, 0xb9d: 0x0581, + 0xb9e: 0x0589, 0xb9f: 0x0591, 0xba0: 0x0599, 0xba1: 0x05a1, 0xba2: 0x05a9, 0xba3: 0x05b1, + 0xba4: 0x05b9, 0xba5: 0x05c1, 0xba6: 0x05c9, 0xba7: 0x05d1, 0xba8: 0x0599, 0xba9: 0x05a1, + 0xbaa: 0x05a9, 0xbab: 0x05b1, 0xbac: 0x05b9, 0xbad: 0x05c1, 0xbae: 0x05c9, 0xbaf: 0x05d1, + 0xbb0: 0x0008, 0xbb1: 0x0008, 0xbb2: 0x05d9, 0xbb3: 0x05e1, 0xbb4: 0x05e9, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x05f1, 0xbb8: 0xe045, 0xbb9: 0xe045, 0xbba: 0x0665, 0xbbb: 0x04e1, + 0xbbc: 0x05e1, 0xbbd: 0x067e, 0xbbe: 0x05f9, 0xbbf: 0x069e, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x06be, 0xbc1: 0x0602, 0xbc2: 0x0609, 0xbc3: 0x0611, 0xbc4: 0x0619, 0xbc5: 0x0040, + 0xbc6: 0x0008, 0xbc7: 0x0621, 0xbc8: 0x06dd, 0xbc9: 0x04e9, 0xbca: 0x06f5, 0xbcb: 0x04f1, + 0xbcc: 0x0611, 0xbcd: 0x062a, 0xbce: 0x0632, 0xbcf: 0x063a, 0xbd0: 0x0008, 0xbd1: 0x0008, + 0xbd2: 0x0008, 0xbd3: 0x0641, 0xbd4: 0x0040, 0xbd5: 0x0040, 0xbd6: 0x0008, 0xbd7: 0x0008, + 0xbd8: 0xe045, 0xbd9: 0xe045, 0xbda: 0x070d, 0xbdb: 0x04f9, 0xbdc: 0x0040, 0xbdd: 0x064a, + 0xbde: 0x0652, 0xbdf: 0x065a, 0xbe0: 0x0008, 0xbe1: 0x0008, 0xbe2: 0x0008, 0xbe3: 0x0661, + 0xbe4: 0x0008, 0xbe5: 0x0008, 0xbe6: 0x0008, 0xbe7: 0x0008, 0xbe8: 0xe045, 0xbe9: 0xe045, + 0xbea: 0x0725, 0xbeb: 0x0509, 0xbec: 0xe04d, 0xbed: 0x066a, 0xbee: 0x012a, 0xbef: 0x0672, + 0xbf0: 0x0040, 0xbf1: 0x0040, 0xbf2: 0x0679, 0xbf3: 0x0681, 0xbf4: 0x0689, 0xbf5: 0x0040, + 0xbf6: 0x0008, 0xbf7: 0x0691, 0xbf8: 0x073d, 0xbf9: 0x0501, 0xbfa: 0x0515, 0xbfb: 0x0511, + 0xbfc: 0x0681, 0xbfd: 0x0756, 0xbfe: 0x0776, 0xbff: 0x0040, + // Block 0x30, offset 0xc00 + 0xc00: 0x000a, 0xc01: 0x000a, 0xc02: 0x000a, 0xc03: 0x000a, 0xc04: 0x000a, 0xc05: 0x000a, + 0xc06: 0x000a, 0xc07: 0x000a, 0xc08: 0x000a, 0xc09: 0x000a, 0xc0a: 0x000a, 0xc0b: 0x03c0, + 0xc0c: 0x0003, 0xc0d: 0x0003, 0xc0e: 0x0340, 0xc0f: 0x0b40, 0xc10: 0x0018, 0xc11: 0xe00d, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x0796, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0040, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0018, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x000a, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0699, 0xc34: 0x06a1, 0xc35: 0x0018, + 0xc36: 0x06a9, 0xc37: 0x06b1, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x06ba, 0xc3d: 0x0018, 0xc3e: 0x07b6, 0xc3f: 0x0018, + // Block 0x31, offset 0xc40 + 0xc40: 0x0018, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0018, + 0xc46: 0x0018, 0xc47: 0x06c2, 0xc48: 0x06ca, 0xc49: 0x06d2, 0xc4a: 0x0018, 0xc4b: 0x0018, + 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0018, 0xc4f: 0x0018, 0xc50: 0x0018, 0xc51: 0x0018, + 0xc52: 0x0018, 0xc53: 0x0018, 0xc54: 0x0018, 0xc55: 0x0018, 0xc56: 0x0018, 0xc57: 0x06d9, + 0xc58: 0x0018, 0xc59: 0x0018, 0xc5a: 0x0018, 0xc5b: 0x0018, 0xc5c: 0x0018, 0xc5d: 0x0018, + 0xc5e: 0x0018, 0xc5f: 0x000a, 0xc60: 0x03c0, 0xc61: 0x0340, 0xc62: 0x0340, 0xc63: 0x0340, + 0xc64: 0x03c0, 0xc65: 0x0040, 0xc66: 0x0040, 0xc67: 0x0040, 0xc68: 0x0040, 0xc69: 0x0040, + 0xc6a: 0x0340, 0xc6b: 0x0340, 0xc6c: 0x0340, 0xc6d: 0x0340, 0xc6e: 0x0340, 0xc6f: 0x0340, + 0xc70: 0x06e1, 0xc71: 0x0311, 0xc72: 0x0040, 0xc73: 0x0040, 0xc74: 0x06e9, 0xc75: 0x06f1, + 0xc76: 0x06f9, 0xc77: 0x0701, 0xc78: 0x0709, 0xc79: 0x0711, 0xc7a: 0x071a, 0xc7b: 0x07d5, + 0xc7c: 0x0722, 0xc7d: 0x072a, 0xc7e: 0x0732, 0xc7f: 0x0329, + // Block 0x32, offset 0xc80 + 0xc80: 0x06e1, 0xc81: 0x0049, 0xc82: 0x0029, 0xc83: 0x0031, 0xc84: 0x06e9, 0xc85: 0x06f1, + 0xc86: 0x06f9, 0xc87: 0x0701, 0xc88: 0x0709, 0xc89: 0x0711, 0xc8a: 0x071a, 0xc8b: 0x07ed, + 0xc8c: 0x0722, 0xc8d: 0x072a, 0xc8e: 0x0732, 0xc8f: 0x0040, 0xc90: 0x0019, 0xc91: 0x02f9, + 0xc92: 0x0051, 0xc93: 0x0109, 0xc94: 0x0361, 0xc95: 0x00a9, 0xc96: 0x0319, 0xc97: 0x0101, + 0xc98: 0x0321, 0xc99: 0x0329, 0xc9a: 0x0339, 0xc9b: 0x0089, 0xc9c: 0x0341, 0xc9d: 0x0040, + 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x0018, 0xca1: 0x0018, 0xca2: 0x0018, 0xca3: 0x0018, + 0xca4: 0x0018, 0xca5: 0x0018, 0xca6: 0x0018, 0xca7: 0x0018, 0xca8: 0x0739, 0xca9: 0x0018, + 0xcaa: 0x0018, 0xcab: 0x0018, 0xcac: 0x0018, 0xcad: 0x0018, 0xcae: 0x0018, 0xcaf: 0x0018, + 0xcb0: 0x0018, 0xcb1: 0x0018, 0xcb2: 0x0018, 0xcb3: 0x0018, 0xcb4: 0x0018, 0xcb5: 0x0018, + 0xcb6: 0x0018, 0xcb7: 0x0018, 0xcb8: 0x0018, 0xcb9: 0x0018, 0xcba: 0x0018, 0xcbb: 0x0018, + 0xcbc: 0x0018, 0xcbd: 0x0018, 0xcbe: 0x0018, 0xcbf: 0x0018, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0806, 0xcc1: 0x0826, 0xcc2: 0x03d9, 0xcc3: 0x0845, 0xcc4: 0x0018, 0xcc5: 0x0866, + 0xcc6: 0x0886, 0xcc7: 0x0369, 0xcc8: 0x0018, 0xcc9: 0x08a5, 0xcca: 0x0309, 0xccb: 0x00a9, + 0xccc: 0x00a9, 0xccd: 0x00a9, 0xcce: 0x00a9, 0xccf: 0x0741, 0xcd0: 0x0311, 0xcd1: 0x0311, + 0xcd2: 0x0101, 0xcd3: 0x0101, 0xcd4: 0x0018, 0xcd5: 0x0329, 0xcd6: 0x0749, 0xcd7: 0x0018, + 0xcd8: 0x0018, 0xcd9: 0x0339, 0xcda: 0x0751, 0xcdb: 0x00b9, 0xcdc: 0x00b9, 0xcdd: 0x00b9, + 0xcde: 0x0018, 0xcdf: 0x0018, 0xce0: 0x0759, 0xce1: 0x08c5, 0xce2: 0x0761, 0xce3: 0x0018, + 0xce4: 0x04b1, 0xce5: 0x0018, 0xce6: 0x0769, 0xce7: 0x0018, 0xce8: 0x04b1, 0xce9: 0x0018, + 0xcea: 0x0319, 0xceb: 0x0771, 0xcec: 0x02e9, 0xced: 0x03d9, 0xcee: 0x0018, 0xcef: 0x02f9, + 0xcf0: 0x02f9, 0xcf1: 0x03f1, 0xcf2: 0x0040, 0xcf3: 0x0321, 0xcf4: 0x0051, 0xcf5: 0x0779, + 0xcf6: 0x0781, 0xcf7: 0x0789, 0xcf8: 0x0791, 0xcf9: 0x0311, 0xcfa: 0x0018, 0xcfb: 0x08e5, + 0xcfc: 0x0799, 0xcfd: 0x03a1, 0xcfe: 0x03a1, 0xcff: 0x0799, + // Block 0x34, offset 0xd00 + 0xd00: 0x0905, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x02f1, + 0xd06: 0x02f1, 0xd07: 0x02f9, 0xd08: 0x0311, 0xd09: 0x00b1, 0xd0a: 0x0018, 0xd0b: 0x0018, + 0xd0c: 0x0018, 0xd0d: 0x0018, 0xd0e: 0x0008, 0xd0f: 0x0018, 0xd10: 0x07a1, 0xd11: 0x07a9, + 0xd12: 0x07b1, 0xd13: 0x07b9, 0xd14: 0x07c1, 0xd15: 0x07c9, 0xd16: 0x07d1, 0xd17: 0x07d9, + 0xd18: 0x07e1, 0xd19: 0x07e9, 0xd1a: 0x07f1, 0xd1b: 0x07f9, 0xd1c: 0x0801, 0xd1d: 0x0809, + 0xd1e: 0x0811, 0xd1f: 0x0819, 0xd20: 0x0311, 0xd21: 0x0821, 0xd22: 0x091d, 0xd23: 0x0829, + 0xd24: 0x0391, 0xd25: 0x0831, 0xd26: 0x093d, 0xd27: 0x0839, 0xd28: 0x0841, 0xd29: 0x0109, + 0xd2a: 0x0849, 0xd2b: 0x095d, 0xd2c: 0x0101, 0xd2d: 0x03d9, 0xd2e: 0x02f1, 0xd2f: 0x0321, + 0xd30: 0x0311, 0xd31: 0x0821, 0xd32: 0x097d, 0xd33: 0x0829, 0xd34: 0x0391, 0xd35: 0x0831, + 0xd36: 0x099d, 0xd37: 0x0839, 0xd38: 0x0841, 0xd39: 0x0109, 0xd3a: 0x0849, 0xd3b: 0x09bd, + 0xd3c: 0x0101, 0xd3d: 0x03d9, 0xd3e: 0x02f1, 0xd3f: 0x0321, + // Block 0x35, offset 0xd40 + 0xd40: 0x0018, 0xd41: 0x0018, 0xd42: 0x0018, 0xd43: 0x0018, 0xd44: 0x0018, 0xd45: 0x0018, + 0xd46: 0x0018, 0xd47: 0x0018, 0xd48: 0x0018, 0xd49: 0x0018, 0xd4a: 0x0018, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0040, 0xd5d: 0x0040, + 0xd5e: 0x0040, 0xd5f: 0x0040, 0xd60: 0x0049, 0xd61: 0x0029, 0xd62: 0x0031, 0xd63: 0x06e9, + 0xd64: 0x06f1, 0xd65: 0x06f9, 0xd66: 0x0701, 0xd67: 0x0709, 0xd68: 0x0711, 0xd69: 0x0879, + 0xd6a: 0x0881, 0xd6b: 0x0889, 0xd6c: 0x0891, 0xd6d: 0x0899, 0xd6e: 0x08a1, 0xd6f: 0x08a9, + 0xd70: 0x08b1, 0xd71: 0x08b9, 0xd72: 0x08c1, 0xd73: 0x08c9, 0xd74: 0x0a1e, 0xd75: 0x0a3e, + 0xd76: 0x0a5e, 0xd77: 0x0a7e, 0xd78: 0x0a9e, 0xd79: 0x0abe, 0xd7a: 0x0ade, 0xd7b: 0x0afe, + 0xd7c: 0x0b1e, 0xd7d: 0x08d2, 0xd7e: 0x08da, 0xd7f: 0x08e2, + // Block 0x36, offset 0xd80 + 0xd80: 0x08ea, 0xd81: 0x08f2, 0xd82: 0x08fa, 0xd83: 0x0902, 0xd84: 0x090a, 0xd85: 0x0912, + 0xd86: 0x091a, 0xd87: 0x0922, 0xd88: 0x0040, 0xd89: 0x0040, 0xd8a: 0x0040, 0xd8b: 0x0040, + 0xd8c: 0x0040, 0xd8d: 0x0040, 0xd8e: 0x0040, 0xd8f: 0x0040, 0xd90: 0x0040, 0xd91: 0x0040, + 0xd92: 0x0040, 0xd93: 0x0040, 0xd94: 0x0040, 0xd95: 0x0040, 0xd96: 0x0040, 0xd97: 0x0040, + 0xd98: 0x0040, 0xd99: 0x0040, 0xd9a: 0x0040, 0xd9b: 0x0040, 0xd9c: 0x0b3e, 0xd9d: 0x0b5e, + 0xd9e: 0x0b7e, 0xd9f: 0x0b9e, 0xda0: 0x0bbe, 0xda1: 0x0bde, 0xda2: 0x0bfe, 0xda3: 0x0c1e, + 0xda4: 0x0c3e, 0xda5: 0x0c5e, 0xda6: 0x0c7e, 0xda7: 0x0c9e, 0xda8: 0x0cbe, 0xda9: 0x0cde, + 0xdaa: 0x0cfe, 0xdab: 0x0d1e, 0xdac: 0x0d3e, 0xdad: 0x0d5e, 0xdae: 0x0d7e, 0xdaf: 0x0d9e, + 0xdb0: 0x0dbe, 0xdb1: 0x0dde, 0xdb2: 0x0dfe, 0xdb3: 0x0e1e, 0xdb4: 0x0e3e, 0xdb5: 0x0e5e, + 0xdb6: 0x0019, 0xdb7: 0x02e9, 0xdb8: 0x03d9, 0xdb9: 0x02f1, 0xdba: 0x02f9, 0xdbb: 0x03f1, + 0xdbc: 0x0309, 0xdbd: 0x00a9, 0xdbe: 0x0311, 0xdbf: 0x00b1, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0319, 0xdc1: 0x0101, 0xdc2: 0x0321, 0xdc3: 0x0329, 0xdc4: 0x0051, 0xdc5: 0x0339, + 0xdc6: 0x0751, 0xdc7: 0x00b9, 0xdc8: 0x0089, 0xdc9: 0x0341, 0xdca: 0x0349, 0xdcb: 0x0391, + 0xdcc: 0x00c1, 0xdcd: 0x0109, 0xdce: 0x00c9, 0xdcf: 0x04b1, 0xdd0: 0x0019, 0xdd1: 0x02e9, + 0xdd2: 0x03d9, 0xdd3: 0x02f1, 0xdd4: 0x02f9, 0xdd5: 0x03f1, 0xdd6: 0x0309, 0xdd7: 0x00a9, + 0xdd8: 0x0311, 0xdd9: 0x00b1, 0xdda: 0x0319, 0xddb: 0x0101, 0xddc: 0x0321, 0xddd: 0x0329, + 0xdde: 0x0051, 0xddf: 0x0339, 0xde0: 0x0751, 0xde1: 0x00b9, 0xde2: 0x0089, 0xde3: 0x0341, + 0xde4: 0x0349, 0xde5: 0x0391, 0xde6: 0x00c1, 0xde7: 0x0109, 0xde8: 0x00c9, 0xde9: 0x04b1, + 0xdea: 0x06e1, 0xdeb: 0x0018, 0xdec: 0x0018, 0xded: 0x0018, 0xdee: 0x0018, 0xdef: 0x0018, + 0xdf0: 0x0018, 0xdf1: 0x0018, 0xdf2: 0x0018, 0xdf3: 0x0018, 0xdf4: 0x0018, 0xdf5: 0x0018, + 0xdf6: 0x0018, 0xdf7: 0x0018, 0xdf8: 0x0018, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x0008, 0xe01: 0x0008, 0xe02: 0x0008, 0xe03: 0x0008, 0xe04: 0x0008, 0xe05: 0x0008, + 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0008, 0xe09: 0x0008, 0xe0a: 0x0008, 0xe0b: 0x0008, + 0xe0c: 0x0008, 0xe0d: 0x0008, 0xe0e: 0x0008, 0xe0f: 0x0008, 0xe10: 0x0008, 0xe11: 0x0008, + 0xe12: 0x0008, 0xe13: 0x0008, 0xe14: 0x0008, 0xe15: 0x0008, 0xe16: 0x0008, 0xe17: 0x0008, + 0xe18: 0x0008, 0xe19: 0x0008, 0xe1a: 0x0008, 0xe1b: 0x0008, 0xe1c: 0x0008, 0xe1d: 0x0008, + 0xe1e: 0x0008, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0x0941, 0xe23: 0x0ed5, + 0xe24: 0x0949, 0xe25: 0x0008, 0xe26: 0x0008, 0xe27: 0xe07d, 0xe28: 0x0008, 0xe29: 0xe01d, + 0xe2a: 0x0008, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0x0359, 0xe2e: 0x0441, 0xe2f: 0x0351, + 0xe30: 0x03d1, 0xe31: 0x0008, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0008, 0xe35: 0xe01d, + 0xe36: 0x0008, 0xe37: 0x0008, 0xe38: 0x0008, 0xe39: 0x0008, 0xe3a: 0x0008, 0xe3b: 0x0008, + 0xe3c: 0x00b1, 0xe3d: 0x0391, 0xe3e: 0x0951, 0xe3f: 0x0959, + // Block 0x39, offset 0xe40 + 0xe40: 0xe00d, 0xe41: 0x0008, 0xe42: 0xe00d, 0xe43: 0x0008, 0xe44: 0xe00d, 0xe45: 0x0008, + 0xe46: 0xe00d, 0xe47: 0x0008, 0xe48: 0xe00d, 0xe49: 0x0008, 0xe4a: 0xe00d, 0xe4b: 0x0008, + 0xe4c: 0xe00d, 0xe4d: 0x0008, 0xe4e: 0xe00d, 0xe4f: 0x0008, 0xe50: 0xe00d, 0xe51: 0x0008, + 0xe52: 0xe00d, 0xe53: 0x0008, 0xe54: 0xe00d, 0xe55: 0x0008, 0xe56: 0xe00d, 0xe57: 0x0008, + 0xe58: 0xe00d, 0xe59: 0x0008, 0xe5a: 0xe00d, 0xe5b: 0x0008, 0xe5c: 0xe00d, 0xe5d: 0x0008, + 0xe5e: 0xe00d, 0xe5f: 0x0008, 0xe60: 0xe00d, 0xe61: 0x0008, 0xe62: 0xe00d, 0xe63: 0x0008, + 0xe64: 0x0008, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x0018, 0xe6b: 0xe03d, 0xe6c: 0x0008, 0xe6d: 0xe01d, 0xe6e: 0x0008, 0xe6f: 0x3308, + 0xe70: 0x3308, 0xe71: 0x3308, 0xe72: 0xe00d, 0xe73: 0x0008, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0018, 0xe7a: 0x0018, 0xe7b: 0x0018, + 0xe7c: 0x0018, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2715, 0xe81: 0x2735, 0xe82: 0x2755, 0xe83: 0x2775, 0xe84: 0x2795, 0xe85: 0x27b5, + 0xe86: 0x27d5, 0xe87: 0x27f5, 0xe88: 0x2815, 0xe89: 0x2835, 0xe8a: 0x2855, 0xe8b: 0x2875, + 0xe8c: 0x2895, 0xe8d: 0x28b5, 0xe8e: 0x28d5, 0xe8f: 0x28f5, 0xe90: 0x2915, 0xe91: 0x2935, + 0xe92: 0x2955, 0xe93: 0x2975, 0xe94: 0x2995, 0xe95: 0x29b5, 0xe96: 0x0040, 0xe97: 0x0040, + 0xe98: 0x0040, 0xe99: 0x0040, 0xe9a: 0x0040, 0xe9b: 0x0040, 0xe9c: 0x0040, 0xe9d: 0x0040, + 0xe9e: 0x0040, 0xe9f: 0x0040, 0xea0: 0x0040, 0xea1: 0x0040, 0xea2: 0x0040, 0xea3: 0x0040, + 0xea4: 0x0040, 0xea5: 0x0040, 0xea6: 0x0040, 0xea7: 0x0040, 0xea8: 0x0040, 0xea9: 0x0040, + 0xeaa: 0x0040, 0xeab: 0x0040, 0xeac: 0x0040, 0xead: 0x0040, 0xeae: 0x0040, 0xeaf: 0x0040, + 0xeb0: 0x0040, 0xeb1: 0x0040, 0xeb2: 0x0040, 0xeb3: 0x0040, 0xeb4: 0x0040, 0xeb5: 0x0040, + 0xeb6: 0x0040, 0xeb7: 0x0040, 0xeb8: 0x0040, 0xeb9: 0x0040, 0xeba: 0x0040, 0xebb: 0x0040, + 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + // Block 0x3b, offset 0xec0 + 0xec0: 0x000a, 0xec1: 0x0018, 0xec2: 0x0961, 0xec3: 0x0018, 0xec4: 0x0018, 0xec5: 0x0008, + 0xec6: 0x0008, 0xec7: 0x0008, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, + 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x0018, 0xed3: 0x0018, 0xed4: 0x0018, 0xed5: 0x0018, 0xed6: 0x0018, 0xed7: 0x0018, + 0xed8: 0x0018, 0xed9: 0x0018, 0xeda: 0x0018, 0xedb: 0x0018, 0xedc: 0x0018, 0xedd: 0x0018, + 0xede: 0x0018, 0xedf: 0x0018, 0xee0: 0x0018, 0xee1: 0x0018, 0xee2: 0x0018, 0xee3: 0x0018, + 0xee4: 0x0018, 0xee5: 0x0018, 0xee6: 0x0018, 0xee7: 0x0018, 0xee8: 0x0018, 0xee9: 0x0018, + 0xeea: 0x3308, 0xeeb: 0x3308, 0xeec: 0x3308, 0xeed: 0x3308, 0xeee: 0x3018, 0xeef: 0x3018, + 0xef0: 0x0018, 0xef1: 0x0018, 0xef2: 0x0018, 0xef3: 0x0018, 0xef4: 0x0018, 0xef5: 0x0018, + 0xef6: 0xe125, 0xef7: 0x0018, 0xef8: 0x29d5, 0xef9: 0x29f5, 0xefa: 0x2a15, 0xefb: 0x0018, + 0xefc: 0x0008, 0xefd: 0x0018, 0xefe: 0x0018, 0xeff: 0x0018, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2b55, 0xf01: 0x2b75, 0xf02: 0x2b95, 0xf03: 0x2bb5, 0xf04: 0x2bd5, 0xf05: 0x2bf5, + 0xf06: 0x2bf5, 0xf07: 0x2bf5, 0xf08: 0x2c15, 0xf09: 0x2c15, 0xf0a: 0x2c15, 0xf0b: 0x2c15, + 0xf0c: 0x2c35, 0xf0d: 0x2c35, 0xf0e: 0x2c35, 0xf0f: 0x2c55, 0xf10: 0x2c75, 0xf11: 0x2c75, + 0xf12: 0x2a95, 0xf13: 0x2a95, 0xf14: 0x2c75, 0xf15: 0x2c75, 0xf16: 0x2c95, 0xf17: 0x2c95, + 0xf18: 0x2c75, 0xf19: 0x2c75, 0xf1a: 0x2a95, 0xf1b: 0x2a95, 0xf1c: 0x2c75, 0xf1d: 0x2c75, + 0xf1e: 0x2c55, 0xf1f: 0x2c55, 0xf20: 0x2cb5, 0xf21: 0x2cb5, 0xf22: 0x2cd5, 0xf23: 0x2cd5, + 0xf24: 0x0040, 0xf25: 0x2cf5, 0xf26: 0x2d15, 0xf27: 0x2d35, 0xf28: 0x2d35, 0xf29: 0x2d55, + 0xf2a: 0x2d75, 0xf2b: 0x2d95, 0xf2c: 0x2db5, 0xf2d: 0x2dd5, 0xf2e: 0x2df5, 0xf2f: 0x2e15, + 0xf30: 0x2e35, 0xf31: 0x2e55, 0xf32: 0x2e55, 0xf33: 0x2e75, 0xf34: 0x2e95, 0xf35: 0x2e95, + 0xf36: 0x2eb5, 0xf37: 0x2ed5, 0xf38: 0x2e75, 0xf39: 0x2ef5, 0xf3a: 0x2f15, 0xf3b: 0x2ef5, + 0xf3c: 0x2e75, 0xf3d: 0x2f35, 0xf3e: 0x2f55, 0xf3f: 0x2f75, + // Block 0x3d, offset 0xf40 + 0xf40: 0x2f95, 0xf41: 0x2fb5, 0xf42: 0x2d15, 0xf43: 0x2cf5, 0xf44: 0x2fd5, 0xf45: 0x2ff5, + 0xf46: 0x3015, 0xf47: 0x3035, 0xf48: 0x3055, 0xf49: 0x3075, 0xf4a: 0x3095, 0xf4b: 0x30b5, + 0xf4c: 0x30d5, 0xf4d: 0x30f5, 0xf4e: 0x3115, 0xf4f: 0x0040, 0xf50: 0x0018, 0xf51: 0x0018, + 0xf52: 0x3135, 0xf53: 0x3155, 0xf54: 0x3175, 0xf55: 0x3195, 0xf56: 0x31b5, 0xf57: 0x31d5, + 0xf58: 0x31f5, 0xf59: 0x3215, 0xf5a: 0x3235, 0xf5b: 0x3255, 0xf5c: 0x3175, 0xf5d: 0x3275, + 0xf5e: 0x3295, 0xf5f: 0x32b5, 0xf60: 0x0008, 0xf61: 0x0008, 0xf62: 0x0008, 0xf63: 0x0008, + 0xf64: 0x0008, 0xf65: 0x0008, 0xf66: 0x0008, 0xf67: 0x0008, 0xf68: 0x0008, 0xf69: 0x0008, + 0xf6a: 0x0008, 0xf6b: 0x0008, 0xf6c: 0x0008, 0xf6d: 0x0008, 0xf6e: 0x0008, 0xf6f: 0x0008, + 0xf70: 0x0008, 0xf71: 0x0008, 0xf72: 0x0008, 0xf73: 0x0008, 0xf74: 0x0008, 0xf75: 0x0008, + 0xf76: 0x0008, 0xf77: 0x0008, 0xf78: 0x0008, 0xf79: 0x0008, 0xf7a: 0x0008, 0xf7b: 0x0008, + 0xf7c: 0x0008, 0xf7d: 0x0008, 0xf7e: 0x0008, 0xf7f: 0x0008, + // Block 0x3e, offset 0xf80 + 0xf80: 0x0b82, 0xf81: 0x0b8a, 0xf82: 0x0b92, 0xf83: 0x0b9a, 0xf84: 0x32d5, 0xf85: 0x32f5, + 0xf86: 0x3315, 0xf87: 0x3335, 0xf88: 0x0018, 0xf89: 0x0018, 0xf8a: 0x0018, 0xf8b: 0x0018, + 0xf8c: 0x0018, 0xf8d: 0x0018, 0xf8e: 0x0018, 0xf8f: 0x0018, 0xf90: 0x3355, 0xf91: 0x0ba1, + 0xf92: 0x0ba9, 0xf93: 0x0bb1, 0xf94: 0x0bb9, 0xf95: 0x0bc1, 0xf96: 0x0bc9, 0xf97: 0x0bd1, + 0xf98: 0x0bd9, 0xf99: 0x0be1, 0xf9a: 0x0be9, 0xf9b: 0x0bf1, 0xf9c: 0x0bf9, 0xf9d: 0x0c01, + 0xf9e: 0x0c09, 0xf9f: 0x0c11, 0xfa0: 0x3375, 0xfa1: 0x3395, 0xfa2: 0x33b5, 0xfa3: 0x33d5, + 0xfa4: 0x33f5, 0xfa5: 0x33f5, 0xfa6: 0x3415, 0xfa7: 0x3435, 0xfa8: 0x3455, 0xfa9: 0x3475, + 0xfaa: 0x3495, 0xfab: 0x34b5, 0xfac: 0x34d5, 0xfad: 0x34f5, 0xfae: 0x3515, 0xfaf: 0x3535, + 0xfb0: 0x3555, 0xfb1: 0x3575, 0xfb2: 0x3595, 0xfb3: 0x35b5, 0xfb4: 0x35d5, 0xfb5: 0x35f5, + 0xfb6: 0x3615, 0xfb7: 0x3635, 0xfb8: 0x3655, 0xfb9: 0x3675, 0xfba: 0x3695, 0xfbb: 0x36b5, + 0xfbc: 0x0c19, 0xfbd: 0x0c21, 0xfbe: 0x36d5, 0xfbf: 0x0018, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x36f5, 0xfc1: 0x3715, 0xfc2: 0x3735, 0xfc3: 0x3755, 0xfc4: 0x3775, 0xfc5: 0x3795, + 0xfc6: 0x37b5, 0xfc7: 0x37d5, 0xfc8: 0x37f5, 0xfc9: 0x3815, 0xfca: 0x3835, 0xfcb: 0x3855, + 0xfcc: 0x3875, 0xfcd: 0x3895, 0xfce: 0x38b5, 0xfcf: 0x38d5, 0xfd0: 0x38f5, 0xfd1: 0x3915, + 0xfd2: 0x3935, 0xfd3: 0x3955, 0xfd4: 0x3975, 0xfd5: 0x3995, 0xfd6: 0x39b5, 0xfd7: 0x39d5, + 0xfd8: 0x39f5, 0xfd9: 0x3a15, 0xfda: 0x3a35, 0xfdb: 0x3a55, 0xfdc: 0x3a75, 0xfdd: 0x3a95, + 0xfde: 0x3ab5, 0xfdf: 0x3ad5, 0xfe0: 0x3af5, 0xfe1: 0x3b15, 0xfe2: 0x3b35, 0xfe3: 0x3b55, + 0xfe4: 0x3b75, 0xfe5: 0x3b95, 0xfe6: 0x1295, 0xfe7: 0x3bb5, 0xfe8: 0x3bd5, 0xfe9: 0x3bf5, + 0xfea: 0x3c15, 0xfeb: 0x3c35, 0xfec: 0x3c55, 0xfed: 0x3c75, 0xfee: 0x23b5, 0xfef: 0x3c95, + 0xff0: 0x3cb5, 0xff1: 0x0c29, 0xff2: 0x0c31, 0xff3: 0x0c39, 0xff4: 0x0c41, 0xff5: 0x0c49, + 0xff6: 0x0c51, 0xff7: 0x0c59, 0xff8: 0x0c61, 0xff9: 0x0c69, 0xffa: 0x0c71, 0xffb: 0x0c79, + 0xffc: 0x0c81, 0xffd: 0x0c89, 0xffe: 0x0c91, 0xfff: 0x0c99, + // Block 0x40, offset 0x1000 + 0x1000: 0x0ca1, 0x1001: 0x0ca9, 0x1002: 0x0cb1, 0x1003: 0x0cb9, 0x1004: 0x0cc1, 0x1005: 0x0cc9, + 0x1006: 0x0cd1, 0x1007: 0x0cd9, 0x1008: 0x0ce1, 0x1009: 0x0ce9, 0x100a: 0x0cf1, 0x100b: 0x0cf9, + 0x100c: 0x0d01, 0x100d: 0x3cd5, 0x100e: 0x0d09, 0x100f: 0x3cf5, 0x1010: 0x3d15, 0x1011: 0x3d2d, + 0x1012: 0x3d45, 0x1013: 0x3d5d, 0x1014: 0x3d75, 0x1015: 0x3d75, 0x1016: 0x3d5d, 0x1017: 0x3d8d, + 0x1018: 0x07d5, 0x1019: 0x3da5, 0x101a: 0x3dbd, 0x101b: 0x3dd5, 0x101c: 0x3ded, 0x101d: 0x3e05, + 0x101e: 0x3e1d, 0x101f: 0x3e35, 0x1020: 0x3e4d, 0x1021: 0x3e65, 0x1022: 0x3e7d, 0x1023: 0x3e95, + 0x1024: 0x3ead, 0x1025: 0x3ead, 0x1026: 0x3ec5, 0x1027: 0x3ec5, 0x1028: 0x3edd, 0x1029: 0x3edd, + 0x102a: 0x3ef5, 0x102b: 0x3f0d, 0x102c: 0x3f25, 0x102d: 0x3f3d, 0x102e: 0x3f55, 0x102f: 0x3f55, + 0x1030: 0x3f6d, 0x1031: 0x3f6d, 0x1032: 0x3f6d, 0x1033: 0x3f85, 0x1034: 0x3f9d, 0x1035: 0x3fb5, + 0x1036: 0x3fcd, 0x1037: 0x3fb5, 0x1038: 0x3fe5, 0x1039: 0x3ffd, 0x103a: 0x3f85, 0x103b: 0x4015, + 0x103c: 0x402d, 0x103d: 0x402d, 0x103e: 0x402d, 0x103f: 0x0d11, + // Block 0x41, offset 0x1040 + 0x1040: 0x10f9, 0x1041: 0x1101, 0x1042: 0x40a5, 0x1043: 0x1109, 0x1044: 0x1111, 0x1045: 0x1119, + 0x1046: 0x1121, 0x1047: 0x1129, 0x1048: 0x40c5, 0x1049: 0x1131, 0x104a: 0x1139, 0x104b: 0x1141, + 0x104c: 0x40e5, 0x104d: 0x40e5, 0x104e: 0x1149, 0x104f: 0x1151, 0x1050: 0x1159, 0x1051: 0x4105, + 0x1052: 0x4125, 0x1053: 0x4145, 0x1054: 0x4165, 0x1055: 0x4185, 0x1056: 0x1161, 0x1057: 0x1169, + 0x1058: 0x1171, 0x1059: 0x1179, 0x105a: 0x1181, 0x105b: 0x41a5, 0x105c: 0x1189, 0x105d: 0x1191, + 0x105e: 0x1199, 0x105f: 0x41c5, 0x1060: 0x41e5, 0x1061: 0x11a1, 0x1062: 0x4205, 0x1063: 0x4225, + 0x1064: 0x4245, 0x1065: 0x11a9, 0x1066: 0x4265, 0x1067: 0x11b1, 0x1068: 0x11b9, 0x1069: 0x10f9, + 0x106a: 0x4285, 0x106b: 0x42a5, 0x106c: 0x42c5, 0x106d: 0x42e5, 0x106e: 0x11c1, 0x106f: 0x11c9, + 0x1070: 0x11d1, 0x1071: 0x11d9, 0x1072: 0x4305, 0x1073: 0x11e1, 0x1074: 0x11e9, 0x1075: 0x11f1, + 0x1076: 0x4325, 0x1077: 0x11f9, 0x1078: 0x1201, 0x1079: 0x11f9, 0x107a: 0x1209, 0x107b: 0x1211, + 0x107c: 0x4345, 0x107d: 0x1219, 0x107e: 0x1221, 0x107f: 0x1219, + // Block 0x42, offset 0x1080 + 0x1080: 0x4365, 0x1081: 0x4385, 0x1082: 0x0040, 0x1083: 0x1229, 0x1084: 0x1231, 0x1085: 0x1239, + 0x1086: 0x1241, 0x1087: 0x0040, 0x1088: 0x1249, 0x1089: 0x1251, 0x108a: 0x1259, 0x108b: 0x1261, + 0x108c: 0x1269, 0x108d: 0x1271, 0x108e: 0x1199, 0x108f: 0x1279, 0x1090: 0x1281, 0x1091: 0x1289, + 0x1092: 0x43a5, 0x1093: 0x1291, 0x1094: 0x1121, 0x1095: 0x43c5, 0x1096: 0x43e5, 0x1097: 0x1299, + 0x1098: 0x0040, 0x1099: 0x4405, 0x109a: 0x12a1, 0x109b: 0x12a9, 0x109c: 0x12b1, 0x109d: 0x12b9, + 0x109e: 0x12c1, 0x109f: 0x12c9, 0x10a0: 0x12d1, 0x10a1: 0x12d9, 0x10a2: 0x12e1, 0x10a3: 0x12e9, + 0x10a4: 0x12f1, 0x10a5: 0x12f9, 0x10a6: 0x1301, 0x10a7: 0x1309, 0x10a8: 0x1311, 0x10a9: 0x1319, + 0x10aa: 0x1321, 0x10ab: 0x1329, 0x10ac: 0x1331, 0x10ad: 0x1339, 0x10ae: 0x1341, 0x10af: 0x1349, + 0x10b0: 0x1351, 0x10b1: 0x1359, 0x10b2: 0x1361, 0x10b3: 0x1369, 0x10b4: 0x1371, 0x10b5: 0x1379, + 0x10b6: 0x1381, 0x10b7: 0x1389, 0x10b8: 0x1391, 0x10b9: 0x1399, 0x10ba: 0x13a1, 0x10bb: 0x13a9, + 0x10bc: 0x13b1, 0x10bd: 0x13b9, 0x10be: 0x13c1, 0x10bf: 0x4425, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x02d1, 0x111d: 0x13c9, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x4445, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x0409, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x13d1, 0x11eb: 0x0371, 0x11ec: 0x0401, 0x11ed: 0x13d9, 0x11ee: 0x0421, 0x11ef: 0x0008, + 0x11f0: 0x13e1, 0x11f1: 0x13e9, 0x11f2: 0x0429, 0x11f3: 0x4465, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0xe00d, 0x11f9: 0x0008, 0x11fa: 0xe00d, 0x11fb: 0x0008, + 0x11fc: 0xe00d, 0x11fd: 0x0008, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0x03f5, 0x1205: 0x0479, + 0x1206: 0x447d, 0x1207: 0xe07d, 0x1208: 0x0008, 0x1209: 0xe01d, 0x120a: 0x0008, 0x120b: 0x0040, + 0x120c: 0x0040, 0x120d: 0x0040, 0x120e: 0x0040, 0x120f: 0x0040, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0x0040, 0x1213: 0x0008, 0x1214: 0x0040, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x03d9, 0x1233: 0x03f1, 0x1234: 0x0751, 0x1235: 0xe01d, + 0x1236: 0x0008, 0x1237: 0x0008, 0x1238: 0x0741, 0x1239: 0x13f1, 0x123a: 0x0008, 0x123b: 0x0008, + 0x123c: 0x0008, 0x123d: 0x0008, 0x123e: 0x0008, 0x123f: 0x0008, + // Block 0x49, offset 0x1240 + 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, + 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, + 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, + 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, + 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, + 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, + 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, + 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, + 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, + 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, + 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x1409, 0x1290: 0x1411, 0x1291: 0x1419, + 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x1421, 0x1296: 0x1429, 0x1297: 0x1431, + 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x1439, 0x12c1: 0x1441, 0x12c2: 0x1449, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x1451, + 0x12c6: 0x1451, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x1459, 0x12d4: 0x1461, 0x12d5: 0x1469, 0x12d6: 0x1471, 0x12d7: 0x1479, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x1481, + 0x12de: 0x3308, 0x12df: 0x1489, 0x12e0: 0x1491, 0x12e1: 0x0779, 0x12e2: 0x0791, 0x12e3: 0x1499, + 0x12e4: 0x14a1, 0x12e5: 0x14a9, 0x12e6: 0x14b1, 0x12e7: 0x14b9, 0x12e8: 0x14c1, 0x12e9: 0x071a, + 0x12ea: 0x14c9, 0x12eb: 0x14d1, 0x12ec: 0x14d9, 0x12ed: 0x14e1, 0x12ee: 0x14e9, 0x12ef: 0x14f1, + 0x12f0: 0x14f9, 0x12f1: 0x1501, 0x12f2: 0x1509, 0x12f3: 0x1511, 0x12f4: 0x1519, 0x12f5: 0x1521, + 0x12f6: 0x1529, 0x12f7: 0x0040, 0x12f8: 0x1531, 0x12f9: 0x1539, 0x12fa: 0x1541, 0x12fb: 0x1549, + 0x12fc: 0x1551, 0x12fd: 0x0040, 0x12fe: 0x1559, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x1561, 0x1301: 0x1569, 0x1302: 0x0040, 0x1303: 0x1571, 0x1304: 0x1579, 0x1305: 0x0040, + 0x1306: 0x1581, 0x1307: 0x1589, 0x1308: 0x1591, 0x1309: 0x1599, 0x130a: 0x15a1, 0x130b: 0x15a9, + 0x130c: 0x15b1, 0x130d: 0x15b9, 0x130e: 0x15c1, 0x130f: 0x15c9, 0x1310: 0x15d1, 0x1311: 0x15d1, + 0x1312: 0x15d9, 0x1313: 0x15d9, 0x1314: 0x15d9, 0x1315: 0x15d9, 0x1316: 0x15e1, 0x1317: 0x15e1, + 0x1318: 0x15e1, 0x1319: 0x15e1, 0x131a: 0x15e9, 0x131b: 0x15e9, 0x131c: 0x15e9, 0x131d: 0x15e9, + 0x131e: 0x15f1, 0x131f: 0x15f1, 0x1320: 0x15f1, 0x1321: 0x15f1, 0x1322: 0x15f9, 0x1323: 0x15f9, + 0x1324: 0x15f9, 0x1325: 0x15f9, 0x1326: 0x1601, 0x1327: 0x1601, 0x1328: 0x1601, 0x1329: 0x1601, + 0x132a: 0x1609, 0x132b: 0x1609, 0x132c: 0x1609, 0x132d: 0x1609, 0x132e: 0x1611, 0x132f: 0x1611, + 0x1330: 0x1611, 0x1331: 0x1611, 0x1332: 0x1619, 0x1333: 0x1619, 0x1334: 0x1619, 0x1335: 0x1619, + 0x1336: 0x1621, 0x1337: 0x1621, 0x1338: 0x1621, 0x1339: 0x1621, 0x133a: 0x1629, 0x133b: 0x1629, + 0x133c: 0x1629, 0x133d: 0x1629, 0x133e: 0x1631, 0x133f: 0x1631, + // Block 0x4d, offset 0x1340 + 0x1340: 0x1631, 0x1341: 0x1631, 0x1342: 0x1639, 0x1343: 0x1639, 0x1344: 0x1641, 0x1345: 0x1641, + 0x1346: 0x1649, 0x1347: 0x1649, 0x1348: 0x1651, 0x1349: 0x1651, 0x134a: 0x1659, 0x134b: 0x1659, + 0x134c: 0x1661, 0x134d: 0x1661, 0x134e: 0x1669, 0x134f: 0x1669, 0x1350: 0x1669, 0x1351: 0x1669, + 0x1352: 0x1671, 0x1353: 0x1671, 0x1354: 0x1671, 0x1355: 0x1671, 0x1356: 0x1679, 0x1357: 0x1679, + 0x1358: 0x1679, 0x1359: 0x1679, 0x135a: 0x1681, 0x135b: 0x1681, 0x135c: 0x1681, 0x135d: 0x1681, + 0x135e: 0x1689, 0x135f: 0x1689, 0x1360: 0x1691, 0x1361: 0x1691, 0x1362: 0x1691, 0x1363: 0x1691, + 0x1364: 0x1699, 0x1365: 0x1699, 0x1366: 0x16a1, 0x1367: 0x16a1, 0x1368: 0x16a1, 0x1369: 0x16a1, + 0x136a: 0x16a9, 0x136b: 0x16a9, 0x136c: 0x16a9, 0x136d: 0x16a9, 0x136e: 0x16b1, 0x136f: 0x16b1, + 0x1370: 0x16b9, 0x1371: 0x16b9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0818, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x16c1, 0x1394: 0x16c1, 0x1395: 0x16c1, 0x1396: 0x16c1, 0x1397: 0x16c9, + 0x1398: 0x16c9, 0x1399: 0x16d1, 0x139a: 0x16d1, 0x139b: 0x16d9, 0x139c: 0x16d9, 0x139d: 0x0149, + 0x139e: 0x16e1, 0x139f: 0x16e1, 0x13a0: 0x16e9, 0x13a1: 0x16e9, 0x13a2: 0x16f1, 0x13a3: 0x16f1, + 0x13a4: 0x16f9, 0x13a5: 0x16f9, 0x13a6: 0x16f9, 0x13a7: 0x16f9, 0x13a8: 0x1701, 0x13a9: 0x1701, + 0x13aa: 0x1709, 0x13ab: 0x1709, 0x13ac: 0x1711, 0x13ad: 0x1711, 0x13ae: 0x1719, 0x13af: 0x1719, + 0x13b0: 0x1721, 0x13b1: 0x1721, 0x13b2: 0x1729, 0x13b3: 0x1729, 0x13b4: 0x1731, 0x13b5: 0x1731, + 0x13b6: 0x1739, 0x13b7: 0x1739, 0x13b8: 0x1739, 0x13b9: 0x1741, 0x13ba: 0x1741, 0x13bb: 0x1741, + 0x13bc: 0x1749, 0x13bd: 0x1749, 0x13be: 0x1749, 0x13bf: 0x1749, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x1949, 0x13c1: 0x1951, 0x13c2: 0x1959, 0x13c3: 0x1961, 0x13c4: 0x1969, 0x13c5: 0x1971, + 0x13c6: 0x1979, 0x13c7: 0x1981, 0x13c8: 0x1989, 0x13c9: 0x1991, 0x13ca: 0x1999, 0x13cb: 0x19a1, + 0x13cc: 0x19a9, 0x13cd: 0x19b1, 0x13ce: 0x19b9, 0x13cf: 0x19c1, 0x13d0: 0x19c9, 0x13d1: 0x19d1, + 0x13d2: 0x19d9, 0x13d3: 0x19e1, 0x13d4: 0x19e9, 0x13d5: 0x19f1, 0x13d6: 0x19f9, 0x13d7: 0x1a01, + 0x13d8: 0x1a09, 0x13d9: 0x1a11, 0x13da: 0x1a19, 0x13db: 0x1a21, 0x13dc: 0x1a29, 0x13dd: 0x1a31, + 0x13de: 0x1a3a, 0x13df: 0x1a42, 0x13e0: 0x1a4a, 0x13e1: 0x1a52, 0x13e2: 0x1a5a, 0x13e3: 0x1a62, + 0x13e4: 0x1a69, 0x13e5: 0x1a71, 0x13e6: 0x1761, 0x13e7: 0x1a79, 0x13e8: 0x1741, 0x13e9: 0x1769, + 0x13ea: 0x1a81, 0x13eb: 0x1a89, 0x13ec: 0x1789, 0x13ed: 0x1a91, 0x13ee: 0x1791, 0x13ef: 0x1799, + 0x13f0: 0x1a99, 0x13f1: 0x1aa1, 0x13f2: 0x17b9, 0x13f3: 0x1aa9, 0x13f4: 0x17c1, 0x13f5: 0x17c9, + 0x13f6: 0x1ab1, 0x13f7: 0x1ab9, 0x13f8: 0x17d9, 0x13f9: 0x1ac1, 0x13fa: 0x17e1, 0x13fb: 0x17e9, + 0x13fc: 0x18d1, 0x13fd: 0x18d9, 0x13fe: 0x18f1, 0x13ff: 0x18f9, + // Block 0x50, offset 0x1400 + 0x1400: 0x1901, 0x1401: 0x1921, 0x1402: 0x1929, 0x1403: 0x1931, 0x1404: 0x1939, 0x1405: 0x1959, + 0x1406: 0x1961, 0x1407: 0x1969, 0x1408: 0x1ac9, 0x1409: 0x1989, 0x140a: 0x1ad1, 0x140b: 0x1ad9, + 0x140c: 0x19b9, 0x140d: 0x1ae1, 0x140e: 0x19c1, 0x140f: 0x19c9, 0x1410: 0x1a31, 0x1411: 0x1ae9, + 0x1412: 0x1af1, 0x1413: 0x1a09, 0x1414: 0x1af9, 0x1415: 0x1a11, 0x1416: 0x1a19, 0x1417: 0x1751, + 0x1418: 0x1759, 0x1419: 0x1b01, 0x141a: 0x1761, 0x141b: 0x1b09, 0x141c: 0x1771, 0x141d: 0x1779, + 0x141e: 0x1781, 0x141f: 0x1789, 0x1420: 0x1b11, 0x1421: 0x17a1, 0x1422: 0x17a9, 0x1423: 0x17b1, + 0x1424: 0x17b9, 0x1425: 0x1b19, 0x1426: 0x17d9, 0x1427: 0x17f1, 0x1428: 0x17f9, 0x1429: 0x1801, + 0x142a: 0x1809, 0x142b: 0x1811, 0x142c: 0x1821, 0x142d: 0x1829, 0x142e: 0x1831, 0x142f: 0x1839, + 0x1430: 0x1841, 0x1431: 0x1849, 0x1432: 0x1b21, 0x1433: 0x1851, 0x1434: 0x1859, 0x1435: 0x1861, + 0x1436: 0x1869, 0x1437: 0x1871, 0x1438: 0x1879, 0x1439: 0x1889, 0x143a: 0x1891, 0x143b: 0x1899, + 0x143c: 0x18a1, 0x143d: 0x18a9, 0x143e: 0x18b1, 0x143f: 0x18b9, + // Block 0x51, offset 0x1440 + 0x1440: 0x18c1, 0x1441: 0x18c9, 0x1442: 0x18e1, 0x1443: 0x18e9, 0x1444: 0x1909, 0x1445: 0x1911, + 0x1446: 0x1919, 0x1447: 0x1921, 0x1448: 0x1929, 0x1449: 0x1941, 0x144a: 0x1949, 0x144b: 0x1951, + 0x144c: 0x1959, 0x144d: 0x1b29, 0x144e: 0x1971, 0x144f: 0x1979, 0x1450: 0x1981, 0x1451: 0x1989, + 0x1452: 0x19a1, 0x1453: 0x19a9, 0x1454: 0x19b1, 0x1455: 0x19b9, 0x1456: 0x1b31, 0x1457: 0x19d1, + 0x1458: 0x19d9, 0x1459: 0x1b39, 0x145a: 0x19f1, 0x145b: 0x19f9, 0x145c: 0x1a01, 0x145d: 0x1a09, + 0x145e: 0x1b41, 0x145f: 0x1761, 0x1460: 0x1b09, 0x1461: 0x1789, 0x1462: 0x1b11, 0x1463: 0x17b9, + 0x1464: 0x1b19, 0x1465: 0x17d9, 0x1466: 0x1b49, 0x1467: 0x1841, 0x1468: 0x1b51, 0x1469: 0x1b59, + 0x146a: 0x1b61, 0x146b: 0x1921, 0x146c: 0x1929, 0x146d: 0x1959, 0x146e: 0x19b9, 0x146f: 0x1b31, + 0x1470: 0x1a09, 0x1471: 0x1b41, 0x1472: 0x1b69, 0x1473: 0x1b71, 0x1474: 0x1b79, 0x1475: 0x1b81, + 0x1476: 0x1b89, 0x1477: 0x1b91, 0x1478: 0x1b99, 0x1479: 0x1ba1, 0x147a: 0x1ba9, 0x147b: 0x1bb1, + 0x147c: 0x1bb9, 0x147d: 0x1bc1, 0x147e: 0x1bc9, 0x147f: 0x1bd1, + // Block 0x52, offset 0x1480 + 0x1480: 0x1bd9, 0x1481: 0x1be1, 0x1482: 0x1be9, 0x1483: 0x1bf1, 0x1484: 0x1bf9, 0x1485: 0x1c01, + 0x1486: 0x1c09, 0x1487: 0x1c11, 0x1488: 0x1c19, 0x1489: 0x1c21, 0x148a: 0x1c29, 0x148b: 0x1c31, + 0x148c: 0x1b59, 0x148d: 0x1c39, 0x148e: 0x1c41, 0x148f: 0x1c49, 0x1490: 0x1c51, 0x1491: 0x1b81, + 0x1492: 0x1b89, 0x1493: 0x1b91, 0x1494: 0x1b99, 0x1495: 0x1ba1, 0x1496: 0x1ba9, 0x1497: 0x1bb1, + 0x1498: 0x1bb9, 0x1499: 0x1bc1, 0x149a: 0x1bc9, 0x149b: 0x1bd1, 0x149c: 0x1bd9, 0x149d: 0x1be1, + 0x149e: 0x1be9, 0x149f: 0x1bf1, 0x14a0: 0x1bf9, 0x14a1: 0x1c01, 0x14a2: 0x1c09, 0x14a3: 0x1c11, + 0x14a4: 0x1c19, 0x14a5: 0x1c21, 0x14a6: 0x1c29, 0x14a7: 0x1c31, 0x14a8: 0x1b59, 0x14a9: 0x1c39, + 0x14aa: 0x1c41, 0x14ab: 0x1c49, 0x14ac: 0x1c51, 0x14ad: 0x1c21, 0x14ae: 0x1c29, 0x14af: 0x1c31, + 0x14b0: 0x1b59, 0x14b1: 0x1b51, 0x14b2: 0x1b61, 0x14b3: 0x1881, 0x14b4: 0x1829, 0x14b5: 0x1831, + 0x14b6: 0x1839, 0x14b7: 0x1c21, 0x14b8: 0x1c29, 0x14b9: 0x1c31, 0x14ba: 0x1881, 0x14bb: 0x1889, + 0x14bc: 0x1c59, 0x14bd: 0x1c59, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0018, 0x14c1: 0x0018, 0x14c2: 0x0018, 0x14c3: 0x0018, 0x14c4: 0x0018, 0x14c5: 0x0018, + 0x14c6: 0x0018, 0x14c7: 0x0018, 0x14c8: 0x0018, 0x14c9: 0x0018, 0x14ca: 0x0018, 0x14cb: 0x0018, + 0x14cc: 0x0018, 0x14cd: 0x0018, 0x14ce: 0x0018, 0x14cf: 0x0018, 0x14d0: 0x1c61, 0x14d1: 0x1c69, + 0x14d2: 0x1c69, 0x14d3: 0x1c71, 0x14d4: 0x1c79, 0x14d5: 0x1c81, 0x14d6: 0x1c89, 0x14d7: 0x1c91, + 0x14d8: 0x1c99, 0x14d9: 0x1c99, 0x14da: 0x1ca1, 0x14db: 0x1ca9, 0x14dc: 0x1cb1, 0x14dd: 0x1cb9, + 0x14de: 0x1cc1, 0x14df: 0x1cc9, 0x14e0: 0x1cc9, 0x14e1: 0x1cd1, 0x14e2: 0x1cd9, 0x14e3: 0x1cd9, + 0x14e4: 0x1ce1, 0x14e5: 0x1ce1, 0x14e6: 0x1ce9, 0x14e7: 0x1cf1, 0x14e8: 0x1cf1, 0x14e9: 0x1cf9, + 0x14ea: 0x1d01, 0x14eb: 0x1d01, 0x14ec: 0x1d09, 0x14ed: 0x1d09, 0x14ee: 0x1d11, 0x14ef: 0x1d19, + 0x14f0: 0x1d19, 0x14f1: 0x1d21, 0x14f2: 0x1d21, 0x14f3: 0x1d29, 0x14f4: 0x1d31, 0x14f5: 0x1d39, + 0x14f6: 0x1d41, 0x14f7: 0x1d41, 0x14f8: 0x1d49, 0x14f9: 0x1d51, 0x14fa: 0x1d59, 0x14fb: 0x1d61, + 0x14fc: 0x1d69, 0x14fd: 0x1d69, 0x14fe: 0x1d71, 0x14ff: 0x1d79, + // Block 0x54, offset 0x1500 + 0x1500: 0x1f29, 0x1501: 0x1f31, 0x1502: 0x1f39, 0x1503: 0x1f11, 0x1504: 0x1d39, 0x1505: 0x1ce9, + 0x1506: 0x1f41, 0x1507: 0x1f49, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0018, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x1f51, 0x1531: 0x1f59, 0x1532: 0x1f61, 0x1533: 0x1f69, 0x1534: 0x1f71, 0x1535: 0x1f79, + 0x1536: 0x1f81, 0x1537: 0x1f89, 0x1538: 0x1f91, 0x1539: 0x1f99, 0x153a: 0x1fa2, 0x153b: 0x1faa, + 0x153c: 0x1fb1, 0x153d: 0x0018, 0x153e: 0x0018, 0x153f: 0x0018, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0x1fba, 0x1551: 0x7d8d, + 0x1552: 0x0040, 0x1553: 0x1fc2, 0x1554: 0x0122, 0x1555: 0x1fca, 0x1556: 0x1fd2, 0x1557: 0x7dad, + 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0x1fda, 0x1574: 0x1fda, 0x1575: 0x072a, + 0x1576: 0x0732, 0x1577: 0x1fe2, 0x1578: 0x1fea, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, + 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + // Block 0x56, offset 0x1580 + 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0x1ff2, 0x1588: 0x1ffa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, + 0x158c: 0x7fae, 0x158d: 0x1fda, 0x158e: 0x1fda, 0x158f: 0x1fda, 0x1590: 0x1fba, 0x1591: 0x7fcd, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x0122, 0x1595: 0x1fc2, 0x1596: 0x1fd2, 0x1597: 0x1fca, + 0x1598: 0x7fed, 0x1599: 0x072a, 0x159a: 0x0732, 0x159b: 0x1fe2, 0x159c: 0x1fea, 0x159d: 0x7ecd, + 0x159e: 0x7f2d, 0x159f: 0x2002, 0x15a0: 0x200a, 0x15a1: 0x2012, 0x15a2: 0x071a, 0x15a3: 0x2019, + 0x15a4: 0x2022, 0x15a5: 0x202a, 0x15a6: 0x0722, 0x15a7: 0x0040, 0x15a8: 0x2032, 0x15a9: 0x203a, + 0x15aa: 0x2042, 0x15ab: 0x204a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x800e, 0x15b1: 0x2051, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, + 0x15b6: 0x806e, 0x15b7: 0x2059, 0x15b8: 0x808e, 0x15b9: 0x2061, 0x15ba: 0x80ae, 0x15bb: 0x2069, + 0x15bc: 0x80ce, 0x15bd: 0x2071, 0x15be: 0x80ee, 0x15bf: 0x2079, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x2081, 0x15c1: 0x2089, 0x15c2: 0x2089, 0x15c3: 0x2091, 0x15c4: 0x2091, 0x15c5: 0x2099, + 0x15c6: 0x2099, 0x15c7: 0x20a1, 0x15c8: 0x20a1, 0x15c9: 0x20a9, 0x15ca: 0x20a9, 0x15cb: 0x20a9, + 0x15cc: 0x20a9, 0x15cd: 0x20b1, 0x15ce: 0x20b1, 0x15cf: 0x20b9, 0x15d0: 0x20b9, 0x15d1: 0x20b9, + 0x15d2: 0x20b9, 0x15d3: 0x20c1, 0x15d4: 0x20c1, 0x15d5: 0x20c9, 0x15d6: 0x20c9, 0x15d7: 0x20c9, + 0x15d8: 0x20c9, 0x15d9: 0x20d1, 0x15da: 0x20d1, 0x15db: 0x20d1, 0x15dc: 0x20d1, 0x15dd: 0x20d9, + 0x15de: 0x20d9, 0x15df: 0x20d9, 0x15e0: 0x20d9, 0x15e1: 0x20e1, 0x15e2: 0x20e1, 0x15e3: 0x20e1, + 0x15e4: 0x20e1, 0x15e5: 0x20e9, 0x15e6: 0x20e9, 0x15e7: 0x20e9, 0x15e8: 0x20e9, 0x15e9: 0x20f1, + 0x15ea: 0x20f1, 0x15eb: 0x20f9, 0x15ec: 0x20f9, 0x15ed: 0x2101, 0x15ee: 0x2101, 0x15ef: 0x2109, + 0x15f0: 0x2109, 0x15f1: 0x2111, 0x15f2: 0x2111, 0x15f3: 0x2111, 0x15f4: 0x2111, 0x15f5: 0x2119, + 0x15f6: 0x2119, 0x15f7: 0x2119, 0x15f8: 0x2119, 0x15f9: 0x2121, 0x15fa: 0x2121, 0x15fb: 0x2121, + 0x15fc: 0x2121, 0x15fd: 0x2129, 0x15fe: 0x2129, 0x15ff: 0x2129, + // Block 0x58, offset 0x1600 + 0x1600: 0x2129, 0x1601: 0x2131, 0x1602: 0x2131, 0x1603: 0x2131, 0x1604: 0x2131, 0x1605: 0x2139, + 0x1606: 0x2139, 0x1607: 0x2139, 0x1608: 0x2139, 0x1609: 0x2141, 0x160a: 0x2141, 0x160b: 0x2141, + 0x160c: 0x2141, 0x160d: 0x2149, 0x160e: 0x2149, 0x160f: 0x2149, 0x1610: 0x2149, 0x1611: 0x2151, + 0x1612: 0x2151, 0x1613: 0x2151, 0x1614: 0x2151, 0x1615: 0x2159, 0x1616: 0x2159, 0x1617: 0x2159, + 0x1618: 0x2159, 0x1619: 0x2161, 0x161a: 0x2161, 0x161b: 0x2161, 0x161c: 0x2161, 0x161d: 0x2169, + 0x161e: 0x2169, 0x161f: 0x2169, 0x1620: 0x2169, 0x1621: 0x2171, 0x1622: 0x2171, 0x1623: 0x2171, + 0x1624: 0x2171, 0x1625: 0x2179, 0x1626: 0x2179, 0x1627: 0x2179, 0x1628: 0x2179, 0x1629: 0x2181, + 0x162a: 0x2181, 0x162b: 0x2181, 0x162c: 0x2181, 0x162d: 0x2189, 0x162e: 0x2189, 0x162f: 0x1701, + 0x1630: 0x1701, 0x1631: 0x2191, 0x1632: 0x2191, 0x1633: 0x2191, 0x1634: 0x2191, 0x1635: 0x2199, + 0x1636: 0x2199, 0x1637: 0x21a1, 0x1638: 0x21a1, 0x1639: 0x21a9, 0x163a: 0x21a9, 0x163b: 0x21b1, + 0x163c: 0x21b1, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0x1fca, 0x1642: 0x21ba, 0x1643: 0x2002, 0x1644: 0x203a, 0x1645: 0x2042, + 0x1646: 0x200a, 0x1647: 0x21c2, 0x1648: 0x072a, 0x1649: 0x0732, 0x164a: 0x2012, 0x164b: 0x071a, + 0x164c: 0x1fba, 0x164d: 0x2019, 0x164e: 0x0961, 0x164f: 0x21ca, 0x1650: 0x06e1, 0x1651: 0x0049, + 0x1652: 0x0029, 0x1653: 0x0031, 0x1654: 0x06e9, 0x1655: 0x06f1, 0x1656: 0x06f9, 0x1657: 0x0701, + 0x1658: 0x0709, 0x1659: 0x0711, 0x165a: 0x1fc2, 0x165b: 0x0122, 0x165c: 0x2022, 0x165d: 0x0722, + 0x165e: 0x202a, 0x165f: 0x1fd2, 0x1660: 0x204a, 0x1661: 0x0019, 0x1662: 0x02e9, 0x1663: 0x03d9, + 0x1664: 0x02f1, 0x1665: 0x02f9, 0x1666: 0x03f1, 0x1667: 0x0309, 0x1668: 0x00a9, 0x1669: 0x0311, + 0x166a: 0x00b1, 0x166b: 0x0319, 0x166c: 0x0101, 0x166d: 0x0321, 0x166e: 0x0329, 0x166f: 0x0051, + 0x1670: 0x0339, 0x1671: 0x0751, 0x1672: 0x00b9, 0x1673: 0x0089, 0x1674: 0x0341, 0x1675: 0x0349, + 0x1676: 0x0391, 0x1677: 0x00c1, 0x1678: 0x0109, 0x1679: 0x00c9, 0x167a: 0x04b1, 0x167b: 0x1ff2, + 0x167c: 0x2032, 0x167d: 0x1ffa, 0x167e: 0x21d2, 0x167f: 0x1fda, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0672, 0x1681: 0x0019, 0x1682: 0x02e9, 0x1683: 0x03d9, 0x1684: 0x02f1, 0x1685: 0x02f9, + 0x1686: 0x03f1, 0x1687: 0x0309, 0x1688: 0x00a9, 0x1689: 0x0311, 0x168a: 0x00b1, 0x168b: 0x0319, + 0x168c: 0x0101, 0x168d: 0x0321, 0x168e: 0x0329, 0x168f: 0x0051, 0x1690: 0x0339, 0x1691: 0x0751, + 0x1692: 0x00b9, 0x1693: 0x0089, 0x1694: 0x0341, 0x1695: 0x0349, 0x1696: 0x0391, 0x1697: 0x00c1, + 0x1698: 0x0109, 0x1699: 0x00c9, 0x169a: 0x04b1, 0x169b: 0x1fe2, 0x169c: 0x21da, 0x169d: 0x1fea, + 0x169e: 0x21e2, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x0961, 0x16a2: 0x814d, 0x16a3: 0x814d, + 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, + 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, + 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, + 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, + 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, + 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, + 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, + 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, + 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, + 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, + 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, + 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, + 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, + 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, + 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, + 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, + 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x21e9, 0x1721: 0x21f1, 0x1722: 0x21f9, 0x1723: 0x8a0e, + 0x1724: 0x2201, 0x1725: 0x2209, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, + 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0008, 0x1741: 0x0008, 0x1742: 0x0008, 0x1743: 0x0008, 0x1744: 0x0008, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0008, 0x174e: 0x0008, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0008, + 0x1752: 0x0008, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0040, 0x176c: 0x0040, 0x176d: 0x0040, 0x176e: 0x0040, 0x176f: 0x0018, + 0x1770: 0x8b3d, 0x1771: 0x8b55, 0x1772: 0x8b6d, 0x1773: 0x8b55, 0x1774: 0x8b85, 0x1775: 0x8b55, + 0x1776: 0x8b6d, 0x1777: 0x8b55, 0x1778: 0x8b3d, 0x1779: 0x8b9d, 0x177a: 0x8bb5, 0x177b: 0x0040, + 0x177c: 0x8bcd, 0x177d: 0x8b9d, 0x177e: 0x8bb5, 0x177f: 0x8b9d, + // Block 0x5e, offset 0x1780 + 0x1780: 0xe13d, 0x1781: 0xe14d, 0x1782: 0xe15d, 0x1783: 0xe14d, 0x1784: 0xe17d, 0x1785: 0xe14d, + 0x1786: 0xe15d, 0x1787: 0xe14d, 0x1788: 0xe13d, 0x1789: 0xe1cd, 0x178a: 0xe1dd, 0x178b: 0x0040, + 0x178c: 0xe1fd, 0x178d: 0xe1cd, 0x178e: 0xe1dd, 0x178f: 0xe1cd, 0x1790: 0xe13d, 0x1791: 0xe14d, + 0x1792: 0xe15d, 0x1793: 0x0040, 0x1794: 0xe17d, 0x1795: 0xe14d, 0x1796: 0x0040, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0040, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0008, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0008, 0x17b2: 0x0040, 0x17b3: 0x0008, 0x17b4: 0x0008, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0008, + 0x17bc: 0x0008, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x0008, 0x17c1: 0x2211, 0x17c2: 0x2219, 0x17c3: 0x02e1, 0x17c4: 0x2221, 0x17c5: 0x2229, + 0x17c6: 0x0040, 0x17c7: 0x2231, 0x17c8: 0x2239, 0x17c9: 0x2241, 0x17ca: 0x2249, 0x17cb: 0x2251, + 0x17cc: 0x2259, 0x17cd: 0x2261, 0x17ce: 0x2269, 0x17cf: 0x2271, 0x17d0: 0x2279, 0x17d1: 0x2281, + 0x17d2: 0x2289, 0x17d3: 0x2291, 0x17d4: 0x2299, 0x17d5: 0x0741, 0x17d6: 0x22a1, 0x17d7: 0x22a9, + 0x17d8: 0x22b1, 0x17d9: 0x22b9, 0x17da: 0x22c1, 0x17db: 0x13d9, 0x17dc: 0x8be5, 0x17dd: 0x22c9, + 0x17de: 0x22d1, 0x17df: 0x8c05, 0x17e0: 0x22d9, 0x17e1: 0x8c25, 0x17e2: 0x22e1, 0x17e3: 0x22e9, + 0x17e4: 0x22f1, 0x17e5: 0x0751, 0x17e6: 0x22f9, 0x17e7: 0x8c45, 0x17e8: 0x0949, 0x17e9: 0x2301, + 0x17ea: 0x2309, 0x17eb: 0x2311, 0x17ec: 0x2319, 0x17ed: 0x2321, 0x17ee: 0x2329, 0x17ef: 0x2331, + 0x17f0: 0x2339, 0x17f1: 0x0040, 0x17f2: 0x2341, 0x17f3: 0x2349, 0x17f4: 0x2351, 0x17f5: 0x2359, + 0x17f6: 0x2361, 0x17f7: 0x2369, 0x17f8: 0x2371, 0x17f9: 0x8c65, 0x17fa: 0x8c85, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0a08, 0x1801: 0x0a08, 0x1802: 0x0a08, 0x1803: 0x0a08, 0x1804: 0x0a08, 0x1805: 0x0c08, + 0x1806: 0x0808, 0x1807: 0x0c08, 0x1808: 0x0818, 0x1809: 0x0c08, 0x180a: 0x0c08, 0x180b: 0x0808, + 0x180c: 0x0808, 0x180d: 0x0908, 0x180e: 0x0c08, 0x180f: 0x0c08, 0x1810: 0x0c08, 0x1811: 0x0c08, + 0x1812: 0x0c08, 0x1813: 0x0a08, 0x1814: 0x0a08, 0x1815: 0x0a08, 0x1816: 0x0a08, 0x1817: 0x0908, + 0x1818: 0x0a08, 0x1819: 0x0a08, 0x181a: 0x0a08, 0x181b: 0x0a08, 0x181c: 0x0a08, 0x181d: 0x0c08, + 0x181e: 0x0a08, 0x181f: 0x0a08, 0x1820: 0x0a08, 0x1821: 0x0c08, 0x1822: 0x0808, 0x1823: 0x0808, + 0x1824: 0x0c08, 0x1825: 0x3308, 0x1826: 0x3308, 0x1827: 0x0040, 0x1828: 0x0040, 0x1829: 0x0040, + 0x182a: 0x0040, 0x182b: 0x0a18, 0x182c: 0x0a18, 0x182d: 0x0a18, 0x182e: 0x0a18, 0x182f: 0x0c18, + 0x1830: 0x0818, 0x1831: 0x0818, 0x1832: 0x0818, 0x1833: 0x0818, 0x1834: 0x0818, 0x1835: 0x0818, + 0x1836: 0x0818, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0a08, 0x1841: 0x0c08, 0x1842: 0x0a08, 0x1843: 0x0c08, 0x1844: 0x0c08, 0x1845: 0x0c08, + 0x1846: 0x0a08, 0x1847: 0x0a08, 0x1848: 0x0a08, 0x1849: 0x0c08, 0x184a: 0x0a08, 0x184b: 0x0a08, + 0x184c: 0x0c08, 0x184d: 0x0a08, 0x184e: 0x0c08, 0x184f: 0x0c08, 0x1850: 0x0a08, 0x1851: 0x0c08, + 0x1852: 0x0040, 0x1853: 0x0040, 0x1854: 0x0040, 0x1855: 0x0040, 0x1856: 0x0040, 0x1857: 0x0040, + 0x1858: 0x0040, 0x1859: 0x0818, 0x185a: 0x0818, 0x185b: 0x0818, 0x185c: 0x0818, 0x185d: 0x0040, + 0x185e: 0x0040, 0x185f: 0x0040, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0040, 0x1863: 0x0040, + 0x1864: 0x0040, 0x1865: 0x0040, 0x1866: 0x0040, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0c18, + 0x186a: 0x0c18, 0x186b: 0x0c18, 0x186c: 0x0c18, 0x186d: 0x0a18, 0x186e: 0x0a18, 0x186f: 0x0818, + 0x1870: 0x0040, 0x1871: 0x0040, 0x1872: 0x0040, 0x1873: 0x0040, 0x1874: 0x0040, 0x1875: 0x0040, + 0x1876: 0x0040, 0x1877: 0x0040, 0x1878: 0x0040, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x0040, + 0x187c: 0x0040, 0x187d: 0x0040, 0x187e: 0x0040, 0x187f: 0x0040, + // Block 0x62, offset 0x1880 + 0x1880: 0x3308, 0x1881: 0x3308, 0x1882: 0x3008, 0x1883: 0x3008, 0x1884: 0x0040, 0x1885: 0x0008, + 0x1886: 0x0008, 0x1887: 0x0008, 0x1888: 0x0008, 0x1889: 0x0008, 0x188a: 0x0008, 0x188b: 0x0008, + 0x188c: 0x0008, 0x188d: 0x0040, 0x188e: 0x0040, 0x188f: 0x0008, 0x1890: 0x0008, 0x1891: 0x0040, + 0x1892: 0x0040, 0x1893: 0x0008, 0x1894: 0x0008, 0x1895: 0x0008, 0x1896: 0x0008, 0x1897: 0x0008, + 0x1898: 0x0008, 0x1899: 0x0008, 0x189a: 0x0008, 0x189b: 0x0008, 0x189c: 0x0008, 0x189d: 0x0008, + 0x189e: 0x0008, 0x189f: 0x0008, 0x18a0: 0x0008, 0x18a1: 0x0008, 0x18a2: 0x0008, 0x18a3: 0x0008, + 0x18a4: 0x0008, 0x18a5: 0x0008, 0x18a6: 0x0008, 0x18a7: 0x0008, 0x18a8: 0x0008, 0x18a9: 0x0040, + 0x18aa: 0x0008, 0x18ab: 0x0008, 0x18ac: 0x0008, 0x18ad: 0x0008, 0x18ae: 0x0008, 0x18af: 0x0008, + 0x18b0: 0x0008, 0x18b1: 0x0040, 0x18b2: 0x0008, 0x18b3: 0x0008, 0x18b4: 0x0040, 0x18b5: 0x0008, + 0x18b6: 0x0008, 0x18b7: 0x0008, 0x18b8: 0x0008, 0x18b9: 0x0008, 0x18ba: 0x0040, 0x18bb: 0x3308, + 0x18bc: 0x3308, 0x18bd: 0x0008, 0x18be: 0x3008, 0x18bf: 0x3008, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x3308, 0x18c1: 0x3008, 0x18c2: 0x3008, 0x18c3: 0x3008, 0x18c4: 0x3008, 0x18c5: 0x0040, + 0x18c6: 0x0040, 0x18c7: 0x3008, 0x18c8: 0x3008, 0x18c9: 0x0040, 0x18ca: 0x0040, 0x18cb: 0x3008, + 0x18cc: 0x3008, 0x18cd: 0x3808, 0x18ce: 0x0040, 0x18cf: 0x0040, 0x18d0: 0x0008, 0x18d1: 0x0040, + 0x18d2: 0x0040, 0x18d3: 0x0040, 0x18d4: 0x0040, 0x18d5: 0x0040, 0x18d6: 0x0040, 0x18d7: 0x3008, + 0x18d8: 0x0040, 0x18d9: 0x0040, 0x18da: 0x0040, 0x18db: 0x0040, 0x18dc: 0x0040, 0x18dd: 0x0008, + 0x18de: 0x0008, 0x18df: 0x0008, 0x18e0: 0x0008, 0x18e1: 0x0008, 0x18e2: 0x3008, 0x18e3: 0x3008, + 0x18e4: 0x0040, 0x18e5: 0x0040, 0x18e6: 0x3308, 0x18e7: 0x3308, 0x18e8: 0x3308, 0x18e9: 0x3308, + 0x18ea: 0x3308, 0x18eb: 0x3308, 0x18ec: 0x3308, 0x18ed: 0x0040, 0x18ee: 0x0040, 0x18ef: 0x0040, + 0x18f0: 0x3308, 0x18f1: 0x3308, 0x18f2: 0x3308, 0x18f3: 0x3308, 0x18f4: 0x3308, 0x18f5: 0x0040, + 0x18f6: 0x0040, 0x18f7: 0x0040, 0x18f8: 0x0040, 0x18f9: 0x0040, 0x18fa: 0x0040, 0x18fb: 0x0040, + 0x18fc: 0x0040, 0x18fd: 0x0040, 0x18fe: 0x0040, 0x18ff: 0x0040, + // Block 0x64, offset 0x1900 + 0x1900: 0x0008, 0x1901: 0x0008, 0x1902: 0x0008, 0x1903: 0x0008, 0x1904: 0x0008, 0x1905: 0x0008, + 0x1906: 0x0008, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0008, 0x190a: 0x0040, 0x190b: 0x0040, + 0x190c: 0x0008, 0x190d: 0x0008, 0x190e: 0x0008, 0x190f: 0x0008, 0x1910: 0x0008, 0x1911: 0x0008, + 0x1912: 0x0008, 0x1913: 0x0008, 0x1914: 0x0040, 0x1915: 0x0008, 0x1916: 0x0008, 0x1917: 0x0040, + 0x1918: 0x0008, 0x1919: 0x0008, 0x191a: 0x0008, 0x191b: 0x0008, 0x191c: 0x0008, 0x191d: 0x0008, + 0x191e: 0x0008, 0x191f: 0x0008, 0x1920: 0x0008, 0x1921: 0x0008, 0x1922: 0x0008, 0x1923: 0x0008, + 0x1924: 0x0008, 0x1925: 0x0008, 0x1926: 0x0008, 0x1927: 0x0008, 0x1928: 0x0008, 0x1929: 0x0008, + 0x192a: 0x0008, 0x192b: 0x0008, 0x192c: 0x0008, 0x192d: 0x0008, 0x192e: 0x0008, 0x192f: 0x0008, + 0x1930: 0x3008, 0x1931: 0x3008, 0x1932: 0x3008, 0x1933: 0x3008, 0x1934: 0x3008, 0x1935: 0x3008, + 0x1936: 0x0040, 0x1937: 0x3008, 0x1938: 0x3008, 0x1939: 0x0040, 0x193a: 0x0040, 0x193b: 0x3308, + 0x193c: 0x3308, 0x193d: 0x3808, 0x193e: 0x3b08, 0x193f: 0x0008, + // Block 0x65, offset 0x1940 + 0x1940: 0x0019, 0x1941: 0x02e9, 0x1942: 0x03d9, 0x1943: 0x02f1, 0x1944: 0x02f9, 0x1945: 0x03f1, + 0x1946: 0x0309, 0x1947: 0x00a9, 0x1948: 0x0311, 0x1949: 0x00b1, 0x194a: 0x0319, 0x194b: 0x0101, + 0x194c: 0x0321, 0x194d: 0x0329, 0x194e: 0x0051, 0x194f: 0x0339, 0x1950: 0x0751, 0x1951: 0x00b9, + 0x1952: 0x0089, 0x1953: 0x0341, 0x1954: 0x0349, 0x1955: 0x0391, 0x1956: 0x00c1, 0x1957: 0x0109, + 0x1958: 0x00c9, 0x1959: 0x04b1, 0x195a: 0x0019, 0x195b: 0x02e9, 0x195c: 0x03d9, 0x195d: 0x02f1, + 0x195e: 0x02f9, 0x195f: 0x03f1, 0x1960: 0x0309, 0x1961: 0x00a9, 0x1962: 0x0311, 0x1963: 0x00b1, + 0x1964: 0x0319, 0x1965: 0x0101, 0x1966: 0x0321, 0x1967: 0x0329, 0x1968: 0x0051, 0x1969: 0x0339, + 0x196a: 0x0751, 0x196b: 0x00b9, 0x196c: 0x0089, 0x196d: 0x0341, 0x196e: 0x0349, 0x196f: 0x0391, + 0x1970: 0x00c1, 0x1971: 0x0109, 0x1972: 0x00c9, 0x1973: 0x04b1, 0x1974: 0x0019, 0x1975: 0x02e9, + 0x1976: 0x03d9, 0x1977: 0x02f1, 0x1978: 0x02f9, 0x1979: 0x03f1, 0x197a: 0x0309, 0x197b: 0x00a9, + 0x197c: 0x0311, 0x197d: 0x00b1, 0x197e: 0x0319, 0x197f: 0x0101, + // Block 0x66, offset 0x1980 + 0x1980: 0x0321, 0x1981: 0x0329, 0x1982: 0x0051, 0x1983: 0x0339, 0x1984: 0x0751, 0x1985: 0x00b9, + 0x1986: 0x0089, 0x1987: 0x0341, 0x1988: 0x0349, 0x1989: 0x0391, 0x198a: 0x00c1, 0x198b: 0x0109, + 0x198c: 0x00c9, 0x198d: 0x04b1, 0x198e: 0x0019, 0x198f: 0x02e9, 0x1990: 0x03d9, 0x1991: 0x02f1, + 0x1992: 0x02f9, 0x1993: 0x03f1, 0x1994: 0x0309, 0x1995: 0x0040, 0x1996: 0x0311, 0x1997: 0x00b1, + 0x1998: 0x0319, 0x1999: 0x0101, 0x199a: 0x0321, 0x199b: 0x0329, 0x199c: 0x0051, 0x199d: 0x0339, + 0x199e: 0x0751, 0x199f: 0x00b9, 0x19a0: 0x0089, 0x19a1: 0x0341, 0x19a2: 0x0349, 0x19a3: 0x0391, + 0x19a4: 0x00c1, 0x19a5: 0x0109, 0x19a6: 0x00c9, 0x19a7: 0x04b1, 0x19a8: 0x0019, 0x19a9: 0x02e9, + 0x19aa: 0x03d9, 0x19ab: 0x02f1, 0x19ac: 0x02f9, 0x19ad: 0x03f1, 0x19ae: 0x0309, 0x19af: 0x00a9, + 0x19b0: 0x0311, 0x19b1: 0x00b1, 0x19b2: 0x0319, 0x19b3: 0x0101, 0x19b4: 0x0321, 0x19b5: 0x0329, + 0x19b6: 0x0051, 0x19b7: 0x0339, 0x19b8: 0x0751, 0x19b9: 0x00b9, 0x19ba: 0x0089, 0x19bb: 0x0341, + 0x19bc: 0x0349, 0x19bd: 0x0391, 0x19be: 0x00c1, 0x19bf: 0x0109, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x00c9, 0x19c1: 0x04b1, 0x19c2: 0x0019, 0x19c3: 0x02e9, 0x19c4: 0x03d9, 0x19c5: 0x02f1, + 0x19c6: 0x02f9, 0x19c7: 0x03f1, 0x19c8: 0x0309, 0x19c9: 0x00a9, 0x19ca: 0x0311, 0x19cb: 0x00b1, + 0x19cc: 0x0319, 0x19cd: 0x0101, 0x19ce: 0x0321, 0x19cf: 0x0329, 0x19d0: 0x0051, 0x19d1: 0x0339, + 0x19d2: 0x0751, 0x19d3: 0x00b9, 0x19d4: 0x0089, 0x19d5: 0x0341, 0x19d6: 0x0349, 0x19d7: 0x0391, + 0x19d8: 0x00c1, 0x19d9: 0x0109, 0x19da: 0x00c9, 0x19db: 0x04b1, 0x19dc: 0x0019, 0x19dd: 0x0040, + 0x19de: 0x03d9, 0x19df: 0x02f1, 0x19e0: 0x0040, 0x19e1: 0x0040, 0x19e2: 0x0309, 0x19e3: 0x0040, + 0x19e4: 0x0040, 0x19e5: 0x00b1, 0x19e6: 0x0319, 0x19e7: 0x0040, 0x19e8: 0x0040, 0x19e9: 0x0329, + 0x19ea: 0x0051, 0x19eb: 0x0339, 0x19ec: 0x0751, 0x19ed: 0x0040, 0x19ee: 0x0089, 0x19ef: 0x0341, + 0x19f0: 0x0349, 0x19f1: 0x0391, 0x19f2: 0x00c1, 0x19f3: 0x0109, 0x19f4: 0x00c9, 0x19f5: 0x04b1, + 0x19f6: 0x0019, 0x19f7: 0x02e9, 0x19f8: 0x03d9, 0x19f9: 0x02f1, 0x19fa: 0x0040, 0x19fb: 0x03f1, + 0x19fc: 0x0040, 0x19fd: 0x00a9, 0x19fe: 0x0311, 0x19ff: 0x00b1, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0319, 0x1a01: 0x0101, 0x1a02: 0x0321, 0x1a03: 0x0329, 0x1a04: 0x0040, 0x1a05: 0x0339, + 0x1a06: 0x0751, 0x1a07: 0x00b9, 0x1a08: 0x0089, 0x1a09: 0x0341, 0x1a0a: 0x0349, 0x1a0b: 0x0391, + 0x1a0c: 0x00c1, 0x1a0d: 0x0109, 0x1a0e: 0x00c9, 0x1a0f: 0x04b1, 0x1a10: 0x0019, 0x1a11: 0x02e9, + 0x1a12: 0x03d9, 0x1a13: 0x02f1, 0x1a14: 0x02f9, 0x1a15: 0x03f1, 0x1a16: 0x0309, 0x1a17: 0x00a9, + 0x1a18: 0x0311, 0x1a19: 0x00b1, 0x1a1a: 0x0319, 0x1a1b: 0x0101, 0x1a1c: 0x0321, 0x1a1d: 0x0329, + 0x1a1e: 0x0051, 0x1a1f: 0x0339, 0x1a20: 0x0751, 0x1a21: 0x00b9, 0x1a22: 0x0089, 0x1a23: 0x0341, + 0x1a24: 0x0349, 0x1a25: 0x0391, 0x1a26: 0x00c1, 0x1a27: 0x0109, 0x1a28: 0x00c9, 0x1a29: 0x04b1, + 0x1a2a: 0x0019, 0x1a2b: 0x02e9, 0x1a2c: 0x03d9, 0x1a2d: 0x02f1, 0x1a2e: 0x02f9, 0x1a2f: 0x03f1, + 0x1a30: 0x0309, 0x1a31: 0x00a9, 0x1a32: 0x0311, 0x1a33: 0x00b1, 0x1a34: 0x0319, 0x1a35: 0x0101, + 0x1a36: 0x0321, 0x1a37: 0x0329, 0x1a38: 0x0051, 0x1a39: 0x0339, 0x1a3a: 0x0751, 0x1a3b: 0x00b9, + 0x1a3c: 0x0089, 0x1a3d: 0x0341, 0x1a3e: 0x0349, 0x1a3f: 0x0391, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x00c1, 0x1a41: 0x0109, 0x1a42: 0x00c9, 0x1a43: 0x04b1, 0x1a44: 0x0019, 0x1a45: 0x02e9, + 0x1a46: 0x0040, 0x1a47: 0x02f1, 0x1a48: 0x02f9, 0x1a49: 0x03f1, 0x1a4a: 0x0309, 0x1a4b: 0x0040, + 0x1a4c: 0x0040, 0x1a4d: 0x00b1, 0x1a4e: 0x0319, 0x1a4f: 0x0101, 0x1a50: 0x0321, 0x1a51: 0x0329, + 0x1a52: 0x0051, 0x1a53: 0x0339, 0x1a54: 0x0751, 0x1a55: 0x0040, 0x1a56: 0x0089, 0x1a57: 0x0341, + 0x1a58: 0x0349, 0x1a59: 0x0391, 0x1a5a: 0x00c1, 0x1a5b: 0x0109, 0x1a5c: 0x00c9, 0x1a5d: 0x0040, + 0x1a5e: 0x0019, 0x1a5f: 0x02e9, 0x1a60: 0x03d9, 0x1a61: 0x02f1, 0x1a62: 0x02f9, 0x1a63: 0x03f1, + 0x1a64: 0x0309, 0x1a65: 0x00a9, 0x1a66: 0x0311, 0x1a67: 0x00b1, 0x1a68: 0x0319, 0x1a69: 0x0101, + 0x1a6a: 0x0321, 0x1a6b: 0x0329, 0x1a6c: 0x0051, 0x1a6d: 0x0339, 0x1a6e: 0x0751, 0x1a6f: 0x00b9, + 0x1a70: 0x0089, 0x1a71: 0x0341, 0x1a72: 0x0349, 0x1a73: 0x0391, 0x1a74: 0x00c1, 0x1a75: 0x0109, + 0x1a76: 0x00c9, 0x1a77: 0x04b1, 0x1a78: 0x0019, 0x1a79: 0x02e9, 0x1a7a: 0x0040, 0x1a7b: 0x02f1, + 0x1a7c: 0x02f9, 0x1a7d: 0x03f1, 0x1a7e: 0x0309, 0x1a7f: 0x0040, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0311, 0x1a81: 0x00b1, 0x1a82: 0x0319, 0x1a83: 0x0101, 0x1a84: 0x0321, 0x1a85: 0x0040, + 0x1a86: 0x0051, 0x1a87: 0x0040, 0x1a88: 0x0040, 0x1a89: 0x0040, 0x1a8a: 0x0089, 0x1a8b: 0x0341, + 0x1a8c: 0x0349, 0x1a8d: 0x0391, 0x1a8e: 0x00c1, 0x1a8f: 0x0109, 0x1a90: 0x00c9, 0x1a91: 0x0040, + 0x1a92: 0x0019, 0x1a93: 0x02e9, 0x1a94: 0x03d9, 0x1a95: 0x02f1, 0x1a96: 0x02f9, 0x1a97: 0x03f1, + 0x1a98: 0x0309, 0x1a99: 0x00a9, 0x1a9a: 0x0311, 0x1a9b: 0x00b1, 0x1a9c: 0x0319, 0x1a9d: 0x0101, + 0x1a9e: 0x0321, 0x1a9f: 0x0329, 0x1aa0: 0x0051, 0x1aa1: 0x0339, 0x1aa2: 0x0751, 0x1aa3: 0x00b9, + 0x1aa4: 0x0089, 0x1aa5: 0x0341, 0x1aa6: 0x0349, 0x1aa7: 0x0391, 0x1aa8: 0x00c1, 0x1aa9: 0x0109, + 0x1aaa: 0x00c9, 0x1aab: 0x04b1, 0x1aac: 0x0019, 0x1aad: 0x02e9, 0x1aae: 0x03d9, 0x1aaf: 0x02f1, + 0x1ab0: 0x02f9, 0x1ab1: 0x03f1, 0x1ab2: 0x0309, 0x1ab3: 0x00a9, 0x1ab4: 0x0311, 0x1ab5: 0x00b1, + 0x1ab6: 0x0319, 0x1ab7: 0x0101, 0x1ab8: 0x0321, 0x1ab9: 0x0329, 0x1aba: 0x0051, 0x1abb: 0x0339, + 0x1abc: 0x0751, 0x1abd: 0x00b9, 0x1abe: 0x0089, 0x1abf: 0x0341, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0349, 0x1ac1: 0x0391, 0x1ac2: 0x00c1, 0x1ac3: 0x0109, 0x1ac4: 0x00c9, 0x1ac5: 0x04b1, + 0x1ac6: 0x0019, 0x1ac7: 0x02e9, 0x1ac8: 0x03d9, 0x1ac9: 0x02f1, 0x1aca: 0x02f9, 0x1acb: 0x03f1, + 0x1acc: 0x0309, 0x1acd: 0x00a9, 0x1ace: 0x0311, 0x1acf: 0x00b1, 0x1ad0: 0x0319, 0x1ad1: 0x0101, + 0x1ad2: 0x0321, 0x1ad3: 0x0329, 0x1ad4: 0x0051, 0x1ad5: 0x0339, 0x1ad6: 0x0751, 0x1ad7: 0x00b9, + 0x1ad8: 0x0089, 0x1ad9: 0x0341, 0x1ada: 0x0349, 0x1adb: 0x0391, 0x1adc: 0x00c1, 0x1add: 0x0109, + 0x1ade: 0x00c9, 0x1adf: 0x04b1, 0x1ae0: 0x0019, 0x1ae1: 0x02e9, 0x1ae2: 0x03d9, 0x1ae3: 0x02f1, + 0x1ae4: 0x02f9, 0x1ae5: 0x03f1, 0x1ae6: 0x0309, 0x1ae7: 0x00a9, 0x1ae8: 0x0311, 0x1ae9: 0x00b1, + 0x1aea: 0x0319, 0x1aeb: 0x0101, 0x1aec: 0x0321, 0x1aed: 0x0329, 0x1aee: 0x0051, 0x1aef: 0x0339, + 0x1af0: 0x0751, 0x1af1: 0x00b9, 0x1af2: 0x0089, 0x1af3: 0x0341, 0x1af4: 0x0349, 0x1af5: 0x0391, + 0x1af6: 0x00c1, 0x1af7: 0x0109, 0x1af8: 0x00c9, 0x1af9: 0x04b1, 0x1afa: 0x0019, 0x1afb: 0x02e9, + 0x1afc: 0x03d9, 0x1afd: 0x02f1, 0x1afe: 0x02f9, 0x1aff: 0x03f1, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x0309, 0x1b01: 0x00a9, 0x1b02: 0x0311, 0x1b03: 0x00b1, 0x1b04: 0x0319, 0x1b05: 0x0101, + 0x1b06: 0x0321, 0x1b07: 0x0329, 0x1b08: 0x0051, 0x1b09: 0x0339, 0x1b0a: 0x0751, 0x1b0b: 0x00b9, + 0x1b0c: 0x0089, 0x1b0d: 0x0341, 0x1b0e: 0x0349, 0x1b0f: 0x0391, 0x1b10: 0x00c1, 0x1b11: 0x0109, + 0x1b12: 0x00c9, 0x1b13: 0x04b1, 0x1b14: 0x0019, 0x1b15: 0x02e9, 0x1b16: 0x03d9, 0x1b17: 0x02f1, + 0x1b18: 0x02f9, 0x1b19: 0x03f1, 0x1b1a: 0x0309, 0x1b1b: 0x00a9, 0x1b1c: 0x0311, 0x1b1d: 0x00b1, + 0x1b1e: 0x0319, 0x1b1f: 0x0101, 0x1b20: 0x0321, 0x1b21: 0x0329, 0x1b22: 0x0051, 0x1b23: 0x0339, + 0x1b24: 0x0751, 0x1b25: 0x00b9, 0x1b26: 0x0089, 0x1b27: 0x0341, 0x1b28: 0x0349, 0x1b29: 0x0391, + 0x1b2a: 0x00c1, 0x1b2b: 0x0109, 0x1b2c: 0x00c9, 0x1b2d: 0x04b1, 0x1b2e: 0x0019, 0x1b2f: 0x02e9, + 0x1b30: 0x03d9, 0x1b31: 0x02f1, 0x1b32: 0x02f9, 0x1b33: 0x03f1, 0x1b34: 0x0309, 0x1b35: 0x00a9, + 0x1b36: 0x0311, 0x1b37: 0x00b1, 0x1b38: 0x0319, 0x1b39: 0x0101, 0x1b3a: 0x0321, 0x1b3b: 0x0329, + 0x1b3c: 0x0051, 0x1b3d: 0x0339, 0x1b3e: 0x0751, 0x1b3f: 0x00b9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x0089, 0x1b41: 0x0341, 0x1b42: 0x0349, 0x1b43: 0x0391, 0x1b44: 0x00c1, 0x1b45: 0x0109, + 0x1b46: 0x00c9, 0x1b47: 0x04b1, 0x1b48: 0x0019, 0x1b49: 0x02e9, 0x1b4a: 0x03d9, 0x1b4b: 0x02f1, + 0x1b4c: 0x02f9, 0x1b4d: 0x03f1, 0x1b4e: 0x0309, 0x1b4f: 0x00a9, 0x1b50: 0x0311, 0x1b51: 0x00b1, + 0x1b52: 0x0319, 0x1b53: 0x0101, 0x1b54: 0x0321, 0x1b55: 0x0329, 0x1b56: 0x0051, 0x1b57: 0x0339, + 0x1b58: 0x0751, 0x1b59: 0x00b9, 0x1b5a: 0x0089, 0x1b5b: 0x0341, 0x1b5c: 0x0349, 0x1b5d: 0x0391, + 0x1b5e: 0x00c1, 0x1b5f: 0x0109, 0x1b60: 0x00c9, 0x1b61: 0x04b1, 0x1b62: 0x0019, 0x1b63: 0x02e9, + 0x1b64: 0x03d9, 0x1b65: 0x02f1, 0x1b66: 0x02f9, 0x1b67: 0x03f1, 0x1b68: 0x0309, 0x1b69: 0x00a9, + 0x1b6a: 0x0311, 0x1b6b: 0x00b1, 0x1b6c: 0x0319, 0x1b6d: 0x0101, 0x1b6e: 0x0321, 0x1b6f: 0x0329, + 0x1b70: 0x0051, 0x1b71: 0x0339, 0x1b72: 0x0751, 0x1b73: 0x00b9, 0x1b74: 0x0089, 0x1b75: 0x0341, + 0x1b76: 0x0349, 0x1b77: 0x0391, 0x1b78: 0x00c1, 0x1b79: 0x0109, 0x1b7a: 0x00c9, 0x1b7b: 0x04b1, + 0x1b7c: 0x0019, 0x1b7d: 0x02e9, 0x1b7e: 0x03d9, 0x1b7f: 0x02f1, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x02f9, 0x1b81: 0x03f1, 0x1b82: 0x0309, 0x1b83: 0x00a9, 0x1b84: 0x0311, 0x1b85: 0x00b1, + 0x1b86: 0x0319, 0x1b87: 0x0101, 0x1b88: 0x0321, 0x1b89: 0x0329, 0x1b8a: 0x0051, 0x1b8b: 0x0339, + 0x1b8c: 0x0751, 0x1b8d: 0x00b9, 0x1b8e: 0x0089, 0x1b8f: 0x0341, 0x1b90: 0x0349, 0x1b91: 0x0391, + 0x1b92: 0x00c1, 0x1b93: 0x0109, 0x1b94: 0x00c9, 0x1b95: 0x04b1, 0x1b96: 0x0019, 0x1b97: 0x02e9, + 0x1b98: 0x03d9, 0x1b99: 0x02f1, 0x1b9a: 0x02f9, 0x1b9b: 0x03f1, 0x1b9c: 0x0309, 0x1b9d: 0x00a9, + 0x1b9e: 0x0311, 0x1b9f: 0x00b1, 0x1ba0: 0x0319, 0x1ba1: 0x0101, 0x1ba2: 0x0321, 0x1ba3: 0x0329, + 0x1ba4: 0x0051, 0x1ba5: 0x0339, 0x1ba6: 0x0751, 0x1ba7: 0x00b9, 0x1ba8: 0x0089, 0x1ba9: 0x0341, + 0x1baa: 0x0349, 0x1bab: 0x0391, 0x1bac: 0x00c1, 0x1bad: 0x0109, 0x1bae: 0x00c9, 0x1baf: 0x04b1, + 0x1bb0: 0x0019, 0x1bb1: 0x02e9, 0x1bb2: 0x03d9, 0x1bb3: 0x02f1, 0x1bb4: 0x02f9, 0x1bb5: 0x03f1, + 0x1bb6: 0x0309, 0x1bb7: 0x00a9, 0x1bb8: 0x0311, 0x1bb9: 0x00b1, 0x1bba: 0x0319, 0x1bbb: 0x0101, + 0x1bbc: 0x0321, 0x1bbd: 0x0329, 0x1bbe: 0x0051, 0x1bbf: 0x0339, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0751, 0x1bc1: 0x00b9, 0x1bc2: 0x0089, 0x1bc3: 0x0341, 0x1bc4: 0x0349, 0x1bc5: 0x0391, + 0x1bc6: 0x00c1, 0x1bc7: 0x0109, 0x1bc8: 0x00c9, 0x1bc9: 0x04b1, 0x1bca: 0x0019, 0x1bcb: 0x02e9, + 0x1bcc: 0x03d9, 0x1bcd: 0x02f1, 0x1bce: 0x02f9, 0x1bcf: 0x03f1, 0x1bd0: 0x0309, 0x1bd1: 0x00a9, + 0x1bd2: 0x0311, 0x1bd3: 0x00b1, 0x1bd4: 0x0319, 0x1bd5: 0x0101, 0x1bd6: 0x0321, 0x1bd7: 0x0329, + 0x1bd8: 0x0051, 0x1bd9: 0x0339, 0x1bda: 0x0751, 0x1bdb: 0x00b9, 0x1bdc: 0x0089, 0x1bdd: 0x0341, + 0x1bde: 0x0349, 0x1bdf: 0x0391, 0x1be0: 0x00c1, 0x1be1: 0x0109, 0x1be2: 0x00c9, 0x1be3: 0x04b1, + 0x1be4: 0x23e1, 0x1be5: 0x23e9, 0x1be6: 0x0040, 0x1be7: 0x0040, 0x1be8: 0x23f1, 0x1be9: 0x0399, + 0x1bea: 0x03a1, 0x1beb: 0x03a9, 0x1bec: 0x23f9, 0x1bed: 0x2401, 0x1bee: 0x2409, 0x1bef: 0x04d1, + 0x1bf0: 0x05f9, 0x1bf1: 0x2411, 0x1bf2: 0x2419, 0x1bf3: 0x2421, 0x1bf4: 0x2429, 0x1bf5: 0x2431, + 0x1bf6: 0x2439, 0x1bf7: 0x0799, 0x1bf8: 0x03c1, 0x1bf9: 0x04d1, 0x1bfa: 0x2441, 0x1bfb: 0x2449, + 0x1bfc: 0x2451, 0x1bfd: 0x03b1, 0x1bfe: 0x03b9, 0x1bff: 0x2459, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0769, 0x1c01: 0x2461, 0x1c02: 0x23f1, 0x1c03: 0x0399, 0x1c04: 0x03a1, 0x1c05: 0x03a9, + 0x1c06: 0x23f9, 0x1c07: 0x2401, 0x1c08: 0x2409, 0x1c09: 0x04d1, 0x1c0a: 0x05f9, 0x1c0b: 0x2411, + 0x1c0c: 0x2419, 0x1c0d: 0x2421, 0x1c0e: 0x2429, 0x1c0f: 0x2431, 0x1c10: 0x2439, 0x1c11: 0x0799, + 0x1c12: 0x03c1, 0x1c13: 0x2441, 0x1c14: 0x2441, 0x1c15: 0x2449, 0x1c16: 0x2451, 0x1c17: 0x03b1, + 0x1c18: 0x03b9, 0x1c19: 0x2459, 0x1c1a: 0x0769, 0x1c1b: 0x2469, 0x1c1c: 0x23f9, 0x1c1d: 0x04d1, + 0x1c1e: 0x2411, 0x1c1f: 0x03b1, 0x1c20: 0x03c1, 0x1c21: 0x0799, 0x1c22: 0x23f1, 0x1c23: 0x0399, + 0x1c24: 0x03a1, 0x1c25: 0x03a9, 0x1c26: 0x23f9, 0x1c27: 0x2401, 0x1c28: 0x2409, 0x1c29: 0x04d1, + 0x1c2a: 0x05f9, 0x1c2b: 0x2411, 0x1c2c: 0x2419, 0x1c2d: 0x2421, 0x1c2e: 0x2429, 0x1c2f: 0x2431, + 0x1c30: 0x2439, 0x1c31: 0x0799, 0x1c32: 0x03c1, 0x1c33: 0x04d1, 0x1c34: 0x2441, 0x1c35: 0x2449, + 0x1c36: 0x2451, 0x1c37: 0x03b1, 0x1c38: 0x03b9, 0x1c39: 0x2459, 0x1c3a: 0x0769, 0x1c3b: 0x2461, + 0x1c3c: 0x23f1, 0x1c3d: 0x0399, 0x1c3e: 0x03a1, 0x1c3f: 0x03a9, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x23f9, 0x1c41: 0x2401, 0x1c42: 0x2409, 0x1c43: 0x04d1, 0x1c44: 0x05f9, 0x1c45: 0x2411, + 0x1c46: 0x2419, 0x1c47: 0x2421, 0x1c48: 0x2429, 0x1c49: 0x2431, 0x1c4a: 0x2439, 0x1c4b: 0x0799, + 0x1c4c: 0x03c1, 0x1c4d: 0x2441, 0x1c4e: 0x2441, 0x1c4f: 0x2449, 0x1c50: 0x2451, 0x1c51: 0x03b1, + 0x1c52: 0x03b9, 0x1c53: 0x2459, 0x1c54: 0x0769, 0x1c55: 0x2469, 0x1c56: 0x23f9, 0x1c57: 0x04d1, + 0x1c58: 0x2411, 0x1c59: 0x03b1, 0x1c5a: 0x03c1, 0x1c5b: 0x0799, 0x1c5c: 0x23f1, 0x1c5d: 0x0399, + 0x1c5e: 0x03a1, 0x1c5f: 0x03a9, 0x1c60: 0x23f9, 0x1c61: 0x2401, 0x1c62: 0x2409, 0x1c63: 0x04d1, + 0x1c64: 0x05f9, 0x1c65: 0x2411, 0x1c66: 0x2419, 0x1c67: 0x2421, 0x1c68: 0x2429, 0x1c69: 0x2431, + 0x1c6a: 0x2439, 0x1c6b: 0x0799, 0x1c6c: 0x03c1, 0x1c6d: 0x04d1, 0x1c6e: 0x2441, 0x1c6f: 0x2449, + 0x1c70: 0x2451, 0x1c71: 0x03b1, 0x1c72: 0x03b9, 0x1c73: 0x2459, 0x1c74: 0x0769, 0x1c75: 0x2461, + 0x1c76: 0x23f1, 0x1c77: 0x0399, 0x1c78: 0x03a1, 0x1c79: 0x03a9, 0x1c7a: 0x23f9, 0x1c7b: 0x2401, + 0x1c7c: 0x2409, 0x1c7d: 0x04d1, 0x1c7e: 0x05f9, 0x1c7f: 0x2411, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x2419, 0x1c81: 0x2421, 0x1c82: 0x2429, 0x1c83: 0x2431, 0x1c84: 0x2439, 0x1c85: 0x0799, + 0x1c86: 0x03c1, 0x1c87: 0x2441, 0x1c88: 0x2441, 0x1c89: 0x2449, 0x1c8a: 0x2451, 0x1c8b: 0x03b1, + 0x1c8c: 0x03b9, 0x1c8d: 0x2459, 0x1c8e: 0x0769, 0x1c8f: 0x2469, 0x1c90: 0x23f9, 0x1c91: 0x04d1, + 0x1c92: 0x2411, 0x1c93: 0x03b1, 0x1c94: 0x03c1, 0x1c95: 0x0799, 0x1c96: 0x23f1, 0x1c97: 0x0399, + 0x1c98: 0x03a1, 0x1c99: 0x03a9, 0x1c9a: 0x23f9, 0x1c9b: 0x2401, 0x1c9c: 0x2409, 0x1c9d: 0x04d1, + 0x1c9e: 0x05f9, 0x1c9f: 0x2411, 0x1ca0: 0x2419, 0x1ca1: 0x2421, 0x1ca2: 0x2429, 0x1ca3: 0x2431, + 0x1ca4: 0x2439, 0x1ca5: 0x0799, 0x1ca6: 0x03c1, 0x1ca7: 0x04d1, 0x1ca8: 0x2441, 0x1ca9: 0x2449, + 0x1caa: 0x2451, 0x1cab: 0x03b1, 0x1cac: 0x03b9, 0x1cad: 0x2459, 0x1cae: 0x0769, 0x1caf: 0x2461, + 0x1cb0: 0x23f1, 0x1cb1: 0x0399, 0x1cb2: 0x03a1, 0x1cb3: 0x03a9, 0x1cb4: 0x23f9, 0x1cb5: 0x2401, + 0x1cb6: 0x2409, 0x1cb7: 0x04d1, 0x1cb8: 0x05f9, 0x1cb9: 0x2411, 0x1cba: 0x2419, 0x1cbb: 0x2421, + 0x1cbc: 0x2429, 0x1cbd: 0x2431, 0x1cbe: 0x2439, 0x1cbf: 0x0799, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x03c1, 0x1cc1: 0x2441, 0x1cc2: 0x2441, 0x1cc3: 0x2449, 0x1cc4: 0x2451, 0x1cc5: 0x03b1, + 0x1cc6: 0x03b9, 0x1cc7: 0x2459, 0x1cc8: 0x0769, 0x1cc9: 0x2469, 0x1cca: 0x23f9, 0x1ccb: 0x04d1, + 0x1ccc: 0x2411, 0x1ccd: 0x03b1, 0x1cce: 0x03c1, 0x1ccf: 0x0799, 0x1cd0: 0x23f1, 0x1cd1: 0x0399, + 0x1cd2: 0x03a1, 0x1cd3: 0x03a9, 0x1cd4: 0x23f9, 0x1cd5: 0x2401, 0x1cd6: 0x2409, 0x1cd7: 0x04d1, + 0x1cd8: 0x05f9, 0x1cd9: 0x2411, 0x1cda: 0x2419, 0x1cdb: 0x2421, 0x1cdc: 0x2429, 0x1cdd: 0x2431, + 0x1cde: 0x2439, 0x1cdf: 0x0799, 0x1ce0: 0x03c1, 0x1ce1: 0x04d1, 0x1ce2: 0x2441, 0x1ce3: 0x2449, + 0x1ce4: 0x2451, 0x1ce5: 0x03b1, 0x1ce6: 0x03b9, 0x1ce7: 0x2459, 0x1ce8: 0x0769, 0x1ce9: 0x2461, + 0x1cea: 0x23f1, 0x1ceb: 0x0399, 0x1cec: 0x03a1, 0x1ced: 0x03a9, 0x1cee: 0x23f9, 0x1cef: 0x2401, + 0x1cf0: 0x2409, 0x1cf1: 0x04d1, 0x1cf2: 0x05f9, 0x1cf3: 0x2411, 0x1cf4: 0x2419, 0x1cf5: 0x2421, + 0x1cf6: 0x2429, 0x1cf7: 0x2431, 0x1cf8: 0x2439, 0x1cf9: 0x0799, 0x1cfa: 0x03c1, 0x1cfb: 0x2441, + 0x1cfc: 0x2441, 0x1cfd: 0x2449, 0x1cfe: 0x2451, 0x1cff: 0x03b1, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x03b9, 0x1d01: 0x2459, 0x1d02: 0x0769, 0x1d03: 0x2469, 0x1d04: 0x23f9, 0x1d05: 0x04d1, + 0x1d06: 0x2411, 0x1d07: 0x03b1, 0x1d08: 0x03c1, 0x1d09: 0x0799, 0x1d0a: 0x2471, 0x1d0b: 0x2471, + 0x1d0c: 0x0040, 0x1d0d: 0x0040, 0x1d0e: 0x06e1, 0x1d0f: 0x0049, 0x1d10: 0x0029, 0x1d11: 0x0031, + 0x1d12: 0x06e9, 0x1d13: 0x06f1, 0x1d14: 0x06f9, 0x1d15: 0x0701, 0x1d16: 0x0709, 0x1d17: 0x0711, + 0x1d18: 0x06e1, 0x1d19: 0x0049, 0x1d1a: 0x0029, 0x1d1b: 0x0031, 0x1d1c: 0x06e9, 0x1d1d: 0x06f1, + 0x1d1e: 0x06f9, 0x1d1f: 0x0701, 0x1d20: 0x0709, 0x1d21: 0x0711, 0x1d22: 0x06e1, 0x1d23: 0x0049, + 0x1d24: 0x0029, 0x1d25: 0x0031, 0x1d26: 0x06e9, 0x1d27: 0x06f1, 0x1d28: 0x06f9, 0x1d29: 0x0701, + 0x1d2a: 0x0709, 0x1d2b: 0x0711, 0x1d2c: 0x06e1, 0x1d2d: 0x0049, 0x1d2e: 0x0029, 0x1d2f: 0x0031, + 0x1d30: 0x06e9, 0x1d31: 0x06f1, 0x1d32: 0x06f9, 0x1d33: 0x0701, 0x1d34: 0x0709, 0x1d35: 0x0711, + 0x1d36: 0x06e1, 0x1d37: 0x0049, 0x1d38: 0x0029, 0x1d39: 0x0031, 0x1d3a: 0x06e9, 0x1d3b: 0x06f1, + 0x1d3c: 0x06f9, 0x1d3d: 0x0701, 0x1d3e: 0x0709, 0x1d3f: 0x0711, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x3308, 0x1d41: 0x3308, 0x1d42: 0x3308, 0x1d43: 0x3308, 0x1d44: 0x3308, 0x1d45: 0x3308, + 0x1d46: 0x3308, 0x1d47: 0x0040, 0x1d48: 0x3308, 0x1d49: 0x3308, 0x1d4a: 0x3308, 0x1d4b: 0x3308, + 0x1d4c: 0x3308, 0x1d4d: 0x3308, 0x1d4e: 0x3308, 0x1d4f: 0x3308, 0x1d50: 0x3308, 0x1d51: 0x3308, + 0x1d52: 0x3308, 0x1d53: 0x3308, 0x1d54: 0x3308, 0x1d55: 0x3308, 0x1d56: 0x3308, 0x1d57: 0x3308, + 0x1d58: 0x3308, 0x1d59: 0x0040, 0x1d5a: 0x0040, 0x1d5b: 0x3308, 0x1d5c: 0x3308, 0x1d5d: 0x3308, + 0x1d5e: 0x3308, 0x1d5f: 0x3308, 0x1d60: 0x3308, 0x1d61: 0x3308, 0x1d62: 0x0040, 0x1d63: 0x3308, + 0x1d64: 0x3308, 0x1d65: 0x0040, 0x1d66: 0x3308, 0x1d67: 0x3308, 0x1d68: 0x3308, 0x1d69: 0x3308, + 0x1d6a: 0x3308, 0x1d6b: 0x0040, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x2479, 0x1d71: 0x2481, 0x1d72: 0x02a9, 0x1d73: 0x2489, 0x1d74: 0x02b1, 0x1d75: 0x2491, + 0x1d76: 0x2499, 0x1d77: 0x24a1, 0x1d78: 0x24a9, 0x1d79: 0x24b1, 0x1d7a: 0x24b9, 0x1d7b: 0x24c1, + 0x1d7c: 0x02b9, 0x1d7d: 0x24c9, 0x1d7e: 0x24d1, 0x1d7f: 0x02c1, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x02c9, 0x1d81: 0x24d9, 0x1d82: 0x24e1, 0x1d83: 0x24e9, 0x1d84: 0x24f1, 0x1d85: 0x24f9, + 0x1d86: 0x2501, 0x1d87: 0x2509, 0x1d88: 0x2511, 0x1d89: 0x2519, 0x1d8a: 0x2521, 0x1d8b: 0x2529, + 0x1d8c: 0x2531, 0x1d8d: 0x2539, 0x1d8e: 0x2541, 0x1d8f: 0x2549, 0x1d90: 0x2551, 0x1d91: 0x2479, + 0x1d92: 0x2481, 0x1d93: 0x02a9, 0x1d94: 0x2489, 0x1d95: 0x02b1, 0x1d96: 0x2491, 0x1d97: 0x2499, + 0x1d98: 0x24a1, 0x1d99: 0x24a9, 0x1d9a: 0x24b1, 0x1d9b: 0x24b9, 0x1d9c: 0x02b9, 0x1d9d: 0x24c9, + 0x1d9e: 0x02c1, 0x1d9f: 0x24d9, 0x1da0: 0x24e1, 0x1da1: 0x24e9, 0x1da2: 0x24f1, 0x1da3: 0x24f9, + 0x1da4: 0x2501, 0x1da5: 0x02d1, 0x1da6: 0x2509, 0x1da7: 0x2559, 0x1da8: 0x2531, 0x1da9: 0x2561, + 0x1daa: 0x2569, 0x1dab: 0x2571, 0x1dac: 0x2579, 0x1dad: 0x2581, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0040, 0x1db1: 0x0040, 0x1db2: 0x0040, 0x1db3: 0x0040, 0x1db4: 0x0040, 0x1db5: 0x0040, + 0x1db6: 0x0040, 0x1db7: 0x0040, 0x1db8: 0x0040, 0x1db9: 0x0040, 0x1dba: 0x0040, 0x1dbb: 0x0040, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xe115, 0x1dc1: 0xe115, 0x1dc2: 0xe135, 0x1dc3: 0xe135, 0x1dc4: 0xe115, 0x1dc5: 0xe115, + 0x1dc6: 0xe175, 0x1dc7: 0xe175, 0x1dc8: 0xe115, 0x1dc9: 0xe115, 0x1dca: 0xe135, 0x1dcb: 0xe135, + 0x1dcc: 0xe115, 0x1dcd: 0xe115, 0x1dce: 0xe1f5, 0x1dcf: 0xe1f5, 0x1dd0: 0xe115, 0x1dd1: 0xe115, + 0x1dd2: 0xe135, 0x1dd3: 0xe135, 0x1dd4: 0xe115, 0x1dd5: 0xe115, 0x1dd6: 0xe175, 0x1dd7: 0xe175, + 0x1dd8: 0xe115, 0x1dd9: 0xe115, 0x1dda: 0xe135, 0x1ddb: 0xe135, 0x1ddc: 0xe115, 0x1ddd: 0xe115, + 0x1dde: 0x8ca5, 0x1ddf: 0x8ca5, 0x1de0: 0x04b5, 0x1de1: 0x04b5, 0x1de2: 0x0a08, 0x1de3: 0x0a08, + 0x1de4: 0x0a08, 0x1de5: 0x0a08, 0x1de6: 0x0a08, 0x1de7: 0x0a08, 0x1de8: 0x0a08, 0x1de9: 0x0a08, + 0x1dea: 0x0a08, 0x1deb: 0x0a08, 0x1dec: 0x0a08, 0x1ded: 0x0a08, 0x1dee: 0x0a08, 0x1def: 0x0a08, + 0x1df0: 0x0a08, 0x1df1: 0x0a08, 0x1df2: 0x0a08, 0x1df3: 0x0a08, 0x1df4: 0x0a08, 0x1df5: 0x0a08, + 0x1df6: 0x0a08, 0x1df7: 0x0a08, 0x1df8: 0x0a08, 0x1df9: 0x0a08, 0x1dfa: 0x0a08, 0x1dfb: 0x0a08, + 0x1dfc: 0x0a08, 0x1dfd: 0x0a08, 0x1dfe: 0x0a08, 0x1dff: 0x0a08, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x20b1, 0x1e01: 0x20b9, 0x1e02: 0x20d9, 0x1e03: 0x20f1, 0x1e04: 0x0040, 0x1e05: 0x2189, + 0x1e06: 0x2109, 0x1e07: 0x20e1, 0x1e08: 0x2131, 0x1e09: 0x2191, 0x1e0a: 0x2161, 0x1e0b: 0x2169, + 0x1e0c: 0x2171, 0x1e0d: 0x2179, 0x1e0e: 0x2111, 0x1e0f: 0x2141, 0x1e10: 0x2151, 0x1e11: 0x2121, + 0x1e12: 0x2159, 0x1e13: 0x2101, 0x1e14: 0x2119, 0x1e15: 0x20c9, 0x1e16: 0x20d1, 0x1e17: 0x20e9, + 0x1e18: 0x20f9, 0x1e19: 0x2129, 0x1e1a: 0x2139, 0x1e1b: 0x2149, 0x1e1c: 0x2589, 0x1e1d: 0x1689, + 0x1e1e: 0x2591, 0x1e1f: 0x2599, 0x1e20: 0x0040, 0x1e21: 0x20b9, 0x1e22: 0x20d9, 0x1e23: 0x0040, + 0x1e24: 0x2181, 0x1e25: 0x0040, 0x1e26: 0x0040, 0x1e27: 0x20e1, 0x1e28: 0x0040, 0x1e29: 0x2191, + 0x1e2a: 0x2161, 0x1e2b: 0x2169, 0x1e2c: 0x2171, 0x1e2d: 0x2179, 0x1e2e: 0x2111, 0x1e2f: 0x2141, + 0x1e30: 0x2151, 0x1e31: 0x2121, 0x1e32: 0x2159, 0x1e33: 0x0040, 0x1e34: 0x2119, 0x1e35: 0x20c9, + 0x1e36: 0x20d1, 0x1e37: 0x20e9, 0x1e38: 0x0040, 0x1e39: 0x2129, 0x1e3a: 0x0040, 0x1e3b: 0x2149, + 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x0040, 0x1e41: 0x0040, 0x1e42: 0x20d9, 0x1e43: 0x0040, 0x1e44: 0x0040, 0x1e45: 0x0040, + 0x1e46: 0x0040, 0x1e47: 0x20e1, 0x1e48: 0x0040, 0x1e49: 0x2191, 0x1e4a: 0x0040, 0x1e4b: 0x2169, + 0x1e4c: 0x0040, 0x1e4d: 0x2179, 0x1e4e: 0x2111, 0x1e4f: 0x2141, 0x1e50: 0x0040, 0x1e51: 0x2121, + 0x1e52: 0x2159, 0x1e53: 0x0040, 0x1e54: 0x2119, 0x1e55: 0x0040, 0x1e56: 0x0040, 0x1e57: 0x20e9, + 0x1e58: 0x0040, 0x1e59: 0x2129, 0x1e5a: 0x0040, 0x1e5b: 0x2149, 0x1e5c: 0x0040, 0x1e5d: 0x1689, + 0x1e5e: 0x0040, 0x1e5f: 0x2599, 0x1e60: 0x0040, 0x1e61: 0x20b9, 0x1e62: 0x20d9, 0x1e63: 0x0040, + 0x1e64: 0x2181, 0x1e65: 0x0040, 0x1e66: 0x0040, 0x1e67: 0x20e1, 0x1e68: 0x2131, 0x1e69: 0x2191, + 0x1e6a: 0x2161, 0x1e6b: 0x0040, 0x1e6c: 0x2171, 0x1e6d: 0x2179, 0x1e6e: 0x2111, 0x1e6f: 0x2141, + 0x1e70: 0x2151, 0x1e71: 0x2121, 0x1e72: 0x2159, 0x1e73: 0x0040, 0x1e74: 0x2119, 0x1e75: 0x20c9, + 0x1e76: 0x20d1, 0x1e77: 0x20e9, 0x1e78: 0x0040, 0x1e79: 0x2129, 0x1e7a: 0x2139, 0x1e7b: 0x2149, + 0x1e7c: 0x2589, 0x1e7d: 0x0040, 0x1e7e: 0x2591, 0x1e7f: 0x0040, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x20b1, 0x1e81: 0x20b9, 0x1e82: 0x20d9, 0x1e83: 0x20f1, 0x1e84: 0x2181, 0x1e85: 0x2189, + 0x1e86: 0x2109, 0x1e87: 0x20e1, 0x1e88: 0x2131, 0x1e89: 0x2191, 0x1e8a: 0x0040, 0x1e8b: 0x2169, + 0x1e8c: 0x2171, 0x1e8d: 0x2179, 0x1e8e: 0x2111, 0x1e8f: 0x2141, 0x1e90: 0x2151, 0x1e91: 0x2121, + 0x1e92: 0x2159, 0x1e93: 0x2101, 0x1e94: 0x2119, 0x1e95: 0x20c9, 0x1e96: 0x20d1, 0x1e97: 0x20e9, + 0x1e98: 0x20f9, 0x1e99: 0x2129, 0x1e9a: 0x2139, 0x1e9b: 0x2149, 0x1e9c: 0x0040, 0x1e9d: 0x0040, + 0x1e9e: 0x0040, 0x1e9f: 0x0040, 0x1ea0: 0x0040, 0x1ea1: 0x20b9, 0x1ea2: 0x20d9, 0x1ea3: 0x20f1, + 0x1ea4: 0x0040, 0x1ea5: 0x2189, 0x1ea6: 0x2109, 0x1ea7: 0x20e1, 0x1ea8: 0x2131, 0x1ea9: 0x2191, + 0x1eaa: 0x0040, 0x1eab: 0x2169, 0x1eac: 0x2171, 0x1ead: 0x2179, 0x1eae: 0x2111, 0x1eaf: 0x2141, + 0x1eb0: 0x2151, 0x1eb1: 0x2121, 0x1eb2: 0x2159, 0x1eb3: 0x2101, 0x1eb4: 0x2119, 0x1eb5: 0x20c9, + 0x1eb6: 0x20d1, 0x1eb7: 0x20e9, 0x1eb8: 0x20f9, 0x1eb9: 0x2129, 0x1eba: 0x2139, 0x1ebb: 0x2149, + 0x1ebc: 0x0040, 0x1ebd: 0x0040, 0x1ebe: 0x0040, 0x1ebf: 0x0040, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x0040, 0x1ec1: 0x25a2, 0x1ec2: 0x25aa, 0x1ec3: 0x25b2, 0x1ec4: 0x25ba, 0x1ec5: 0x25c2, + 0x1ec6: 0x25ca, 0x1ec7: 0x25d2, 0x1ec8: 0x25da, 0x1ec9: 0x25e2, 0x1eca: 0x25ea, 0x1ecb: 0x0018, + 0x1ecc: 0x0018, 0x1ecd: 0x0018, 0x1ece: 0x0018, 0x1ecf: 0x0018, 0x1ed0: 0x25f2, 0x1ed1: 0x25fa, + 0x1ed2: 0x2602, 0x1ed3: 0x260a, 0x1ed4: 0x2612, 0x1ed5: 0x261a, 0x1ed6: 0x2622, 0x1ed7: 0x262a, + 0x1ed8: 0x2632, 0x1ed9: 0x263a, 0x1eda: 0x2642, 0x1edb: 0x264a, 0x1edc: 0x2652, 0x1edd: 0x265a, + 0x1ede: 0x2662, 0x1edf: 0x266a, 0x1ee0: 0x2672, 0x1ee1: 0x267a, 0x1ee2: 0x2682, 0x1ee3: 0x268a, + 0x1ee4: 0x2692, 0x1ee5: 0x269a, 0x1ee6: 0x26a2, 0x1ee7: 0x26aa, 0x1ee8: 0x26b2, 0x1ee9: 0x26ba, + 0x1eea: 0x26c1, 0x1eeb: 0x03d9, 0x1eec: 0x00b9, 0x1eed: 0x1239, 0x1eee: 0x26c9, 0x1eef: 0x0018, + 0x1ef0: 0x0019, 0x1ef1: 0x02e9, 0x1ef2: 0x03d9, 0x1ef3: 0x02f1, 0x1ef4: 0x02f9, 0x1ef5: 0x03f1, + 0x1ef6: 0x0309, 0x1ef7: 0x00a9, 0x1ef8: 0x0311, 0x1ef9: 0x00b1, 0x1efa: 0x0319, 0x1efb: 0x0101, + 0x1efc: 0x0321, 0x1efd: 0x0329, 0x1efe: 0x0051, 0x1eff: 0x0339, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0x0751, 0x1f01: 0x00b9, 0x1f02: 0x0089, 0x1f03: 0x0341, 0x1f04: 0x0349, 0x1f05: 0x0391, + 0x1f06: 0x00c1, 0x1f07: 0x0109, 0x1f08: 0x00c9, 0x1f09: 0x04b1, 0x1f0a: 0x26d1, 0x1f0b: 0x11f9, + 0x1f0c: 0x26d9, 0x1f0d: 0x04d9, 0x1f0e: 0x26e1, 0x1f0f: 0x26e9, 0x1f10: 0x0018, 0x1f11: 0x0018, + 0x1f12: 0x0018, 0x1f13: 0x0018, 0x1f14: 0x0018, 0x1f15: 0x0018, 0x1f16: 0x0018, 0x1f17: 0x0018, + 0x1f18: 0x0018, 0x1f19: 0x0018, 0x1f1a: 0x0018, 0x1f1b: 0x0018, 0x1f1c: 0x0018, 0x1f1d: 0x0018, + 0x1f1e: 0x0018, 0x1f1f: 0x0018, 0x1f20: 0x0018, 0x1f21: 0x0018, 0x1f22: 0x0018, 0x1f23: 0x0018, + 0x1f24: 0x0018, 0x1f25: 0x0018, 0x1f26: 0x0018, 0x1f27: 0x0018, 0x1f28: 0x0018, 0x1f29: 0x0018, + 0x1f2a: 0x26f1, 0x1f2b: 0x26f9, 0x1f2c: 0x2701, 0x1f2d: 0x0018, 0x1f2e: 0x0018, 0x1f2f: 0x0018, + 0x1f30: 0x0018, 0x1f31: 0x0018, 0x1f32: 0x0018, 0x1f33: 0x0018, 0x1f34: 0x0018, 0x1f35: 0x0018, + 0x1f36: 0x0018, 0x1f37: 0x0018, 0x1f38: 0x0018, 0x1f39: 0x0018, 0x1f3a: 0x0018, 0x1f3b: 0x0018, + 0x1f3c: 0x0018, 0x1f3d: 0x0018, 0x1f3e: 0x0018, 0x1f3f: 0x0018, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0x2711, 0x1f41: 0x2719, 0x1f42: 0x2721, 0x1f43: 0x0040, 0x1f44: 0x0040, 0x1f45: 0x0040, + 0x1f46: 0x0040, 0x1f47: 0x0040, 0x1f48: 0x0040, 0x1f49: 0x0040, 0x1f4a: 0x0040, 0x1f4b: 0x0040, + 0x1f4c: 0x0040, 0x1f4d: 0x0040, 0x1f4e: 0x0040, 0x1f4f: 0x0040, 0x1f50: 0x2729, 0x1f51: 0x2731, + 0x1f52: 0x2739, 0x1f53: 0x2741, 0x1f54: 0x2749, 0x1f55: 0x2751, 0x1f56: 0x2759, 0x1f57: 0x2761, + 0x1f58: 0x2769, 0x1f59: 0x2771, 0x1f5a: 0x2779, 0x1f5b: 0x2781, 0x1f5c: 0x2789, 0x1f5d: 0x2791, + 0x1f5e: 0x2799, 0x1f5f: 0x27a1, 0x1f60: 0x27a9, 0x1f61: 0x27b1, 0x1f62: 0x27b9, 0x1f63: 0x27c1, + 0x1f64: 0x27c9, 0x1f65: 0x27d1, 0x1f66: 0x27d9, 0x1f67: 0x27e1, 0x1f68: 0x27e9, 0x1f69: 0x27f1, + 0x1f6a: 0x27f9, 0x1f6b: 0x2801, 0x1f6c: 0x2809, 0x1f6d: 0x2811, 0x1f6e: 0x2819, 0x1f6f: 0x2821, + 0x1f70: 0x2829, 0x1f71: 0x2831, 0x1f72: 0x2839, 0x1f73: 0x2841, 0x1f74: 0x2849, 0x1f75: 0x2851, + 0x1f76: 0x2859, 0x1f77: 0x2861, 0x1f78: 0x2869, 0x1f79: 0x2871, 0x1f7a: 0x2879, 0x1f7b: 0x2881, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x28e1, 0x1f81: 0x28e9, 0x1f82: 0x28f1, 0x1f83: 0x8cbd, 0x1f84: 0x28f9, 0x1f85: 0x2901, + 0x1f86: 0x2909, 0x1f87: 0x2911, 0x1f88: 0x2919, 0x1f89: 0x2921, 0x1f8a: 0x2929, 0x1f8b: 0x2931, + 0x1f8c: 0x2939, 0x1f8d: 0x8cdd, 0x1f8e: 0x2941, 0x1f8f: 0x2949, 0x1f90: 0x2951, 0x1f91: 0x2959, + 0x1f92: 0x8cfd, 0x1f93: 0x2961, 0x1f94: 0x2969, 0x1f95: 0x2799, 0x1f96: 0x8d1d, 0x1f97: 0x2971, + 0x1f98: 0x2979, 0x1f99: 0x2981, 0x1f9a: 0x2989, 0x1f9b: 0x2991, 0x1f9c: 0x8d3d, 0x1f9d: 0x2999, + 0x1f9e: 0x29a1, 0x1f9f: 0x29a9, 0x1fa0: 0x29b1, 0x1fa1: 0x29b9, 0x1fa2: 0x2871, 0x1fa3: 0x29c1, + 0x1fa4: 0x29c9, 0x1fa5: 0x29d1, 0x1fa6: 0x29d9, 0x1fa7: 0x29e1, 0x1fa8: 0x29e9, 0x1fa9: 0x29f1, + 0x1faa: 0x29f9, 0x1fab: 0x2a01, 0x1fac: 0x2a09, 0x1fad: 0x2a11, 0x1fae: 0x2a19, 0x1faf: 0x2a21, + 0x1fb0: 0x2a29, 0x1fb1: 0x2a31, 0x1fb2: 0x2a31, 0x1fb3: 0x2a31, 0x1fb4: 0x8d5d, 0x1fb5: 0x2a39, + 0x1fb6: 0x2a41, 0x1fb7: 0x2a49, 0x1fb8: 0x8d7d, 0x1fb9: 0x2a51, 0x1fba: 0x2a59, 0x1fbb: 0x2a61, + 0x1fbc: 0x2a69, 0x1fbd: 0x2a71, 0x1fbe: 0x2a79, 0x1fbf: 0x2a81, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x2a89, 0x1fc1: 0x2a91, 0x1fc2: 0x2a99, 0x1fc3: 0x2aa1, 0x1fc4: 0x2aa9, 0x1fc5: 0x2ab1, + 0x1fc6: 0x2ab1, 0x1fc7: 0x2ab9, 0x1fc8: 0x2ac1, 0x1fc9: 0x2ac9, 0x1fca: 0x2ad1, 0x1fcb: 0x2ad9, + 0x1fcc: 0x2ae1, 0x1fcd: 0x2ae9, 0x1fce: 0x2af1, 0x1fcf: 0x2af9, 0x1fd0: 0x2b01, 0x1fd1: 0x2b09, + 0x1fd2: 0x2b11, 0x1fd3: 0x2b19, 0x1fd4: 0x2b21, 0x1fd5: 0x2b29, 0x1fd6: 0x2b31, 0x1fd7: 0x2b39, + 0x1fd8: 0x2b41, 0x1fd9: 0x8d9d, 0x1fda: 0x2b49, 0x1fdb: 0x2b51, 0x1fdc: 0x2b59, 0x1fdd: 0x2751, + 0x1fde: 0x2b61, 0x1fdf: 0x2b69, 0x1fe0: 0x8dbd, 0x1fe1: 0x8ddd, 0x1fe2: 0x2b71, 0x1fe3: 0x2b79, + 0x1fe4: 0x2b81, 0x1fe5: 0x2b89, 0x1fe6: 0x2b91, 0x1fe7: 0x2b99, 0x1fe8: 0x2040, 0x1fe9: 0x2ba1, + 0x1fea: 0x2ba9, 0x1feb: 0x2ba9, 0x1fec: 0x8dfd, 0x1fed: 0x2bb1, 0x1fee: 0x2bb9, 0x1fef: 0x2bc1, + 0x1ff0: 0x2bc9, 0x1ff1: 0x8e1d, 0x1ff2: 0x2bd1, 0x1ff3: 0x2bd9, 0x1ff4: 0x2040, 0x1ff5: 0x2be1, + 0x1ff6: 0x2be9, 0x1ff7: 0x2bf1, 0x1ff8: 0x2bf9, 0x1ff9: 0x2c01, 0x1ffa: 0x2c09, 0x1ffb: 0x8e3d, + 0x1ffc: 0x2c11, 0x1ffd: 0x8e5d, 0x1ffe: 0x2c19, 0x1fff: 0x2c21, + // Block 0x80, offset 0x2000 + 0x2000: 0x2c29, 0x2001: 0x2c31, 0x2002: 0x2c39, 0x2003: 0x2c41, 0x2004: 0x2c49, 0x2005: 0x2c51, + 0x2006: 0x2c59, 0x2007: 0x2c61, 0x2008: 0x2c69, 0x2009: 0x8e7d, 0x200a: 0x2c71, 0x200b: 0x2c79, + 0x200c: 0x2c81, 0x200d: 0x2c89, 0x200e: 0x2c91, 0x200f: 0x8e9d, 0x2010: 0x2c99, 0x2011: 0x8ebd, + 0x2012: 0x8edd, 0x2013: 0x2ca1, 0x2014: 0x2ca9, 0x2015: 0x2ca9, 0x2016: 0x2cb1, 0x2017: 0x8efd, + 0x2018: 0x8f1d, 0x2019: 0x2cb9, 0x201a: 0x2cc1, 0x201b: 0x2cc9, 0x201c: 0x2cd1, 0x201d: 0x2cd9, + 0x201e: 0x2ce1, 0x201f: 0x2ce9, 0x2020: 0x2cf1, 0x2021: 0x2cf9, 0x2022: 0x2d01, 0x2023: 0x2d09, + 0x2024: 0x8f3d, 0x2025: 0x2d11, 0x2026: 0x2d19, 0x2027: 0x2d21, 0x2028: 0x2d29, 0x2029: 0x2d21, + 0x202a: 0x2d31, 0x202b: 0x2d39, 0x202c: 0x2d41, 0x202d: 0x2d49, 0x202e: 0x2d51, 0x202f: 0x2d59, + 0x2030: 0x2d61, 0x2031: 0x2d69, 0x2032: 0x2d71, 0x2033: 0x2d79, 0x2034: 0x2d81, 0x2035: 0x2d89, + 0x2036: 0x2d91, 0x2037: 0x2d99, 0x2038: 0x8f5d, 0x2039: 0x2da1, 0x203a: 0x2da9, 0x203b: 0x2db1, + 0x203c: 0x2db9, 0x203d: 0x2dc1, 0x203e: 0x8f7d, 0x203f: 0x2dc9, + // Block 0x81, offset 0x2040 + 0x2040: 0x2dd1, 0x2041: 0x2dd9, 0x2042: 0x2de1, 0x2043: 0x2de9, 0x2044: 0x2df1, 0x2045: 0x2df9, + 0x2046: 0x2e01, 0x2047: 0x2e09, 0x2048: 0x2e11, 0x2049: 0x2e19, 0x204a: 0x8f9d, 0x204b: 0x2e21, + 0x204c: 0x2e29, 0x204d: 0x2e31, 0x204e: 0x2e39, 0x204f: 0x2e41, 0x2050: 0x2e49, 0x2051: 0x2e51, + 0x2052: 0x2e59, 0x2053: 0x2e61, 0x2054: 0x2e69, 0x2055: 0x2e71, 0x2056: 0x2e79, 0x2057: 0x2e81, + 0x2058: 0x2e89, 0x2059: 0x2e91, 0x205a: 0x2e99, 0x205b: 0x2ea1, 0x205c: 0x2ea9, 0x205d: 0x8fbd, + 0x205e: 0x2eb1, 0x205f: 0x2eb9, 0x2060: 0x2ec1, 0x2061: 0x2ec9, 0x2062: 0x2ed1, 0x2063: 0x8fdd, + 0x2064: 0x2ed9, 0x2065: 0x2ee1, 0x2066: 0x2ee9, 0x2067: 0x2ef1, 0x2068: 0x2ef9, 0x2069: 0x2f01, + 0x206a: 0x2f09, 0x206b: 0x2f11, 0x206c: 0x7f0d, 0x206d: 0x2f19, 0x206e: 0x2f21, 0x206f: 0x2f29, + 0x2070: 0x8ffd, 0x2071: 0x2f31, 0x2072: 0x2f39, 0x2073: 0x2f41, 0x2074: 0x2f49, 0x2075: 0x2f51, + 0x2076: 0x2f59, 0x2077: 0x901d, 0x2078: 0x903d, 0x2079: 0x905d, 0x207a: 0x2f61, 0x207b: 0x907d, + 0x207c: 0x2f69, 0x207d: 0x2f71, 0x207e: 0x2f79, 0x207f: 0x2f81, + // Block 0x82, offset 0x2080 + 0x2080: 0x2f89, 0x2081: 0x2f91, 0x2082: 0x2f99, 0x2083: 0x2fa1, 0x2084: 0x2fa9, 0x2085: 0x2fb1, + 0x2086: 0x909d, 0x2087: 0x2fb9, 0x2088: 0x2fc1, 0x2089: 0x2fc9, 0x208a: 0x2fd1, 0x208b: 0x2fd9, + 0x208c: 0x2fe1, 0x208d: 0x90bd, 0x208e: 0x2fe9, 0x208f: 0x2ff1, 0x2090: 0x90dd, 0x2091: 0x90fd, + 0x2092: 0x2ff9, 0x2093: 0x3001, 0x2094: 0x3009, 0x2095: 0x3011, 0x2096: 0x3019, 0x2097: 0x3021, + 0x2098: 0x3029, 0x2099: 0x3031, 0x209a: 0x3039, 0x209b: 0x911d, 0x209c: 0x3041, 0x209d: 0x913d, + 0x209e: 0x3049, 0x209f: 0x2040, 0x20a0: 0x3051, 0x20a1: 0x3059, 0x20a2: 0x3061, 0x20a3: 0x915d, + 0x20a4: 0x3069, 0x20a5: 0x3071, 0x20a6: 0x917d, 0x20a7: 0x919d, 0x20a8: 0x3079, 0x20a9: 0x3081, + 0x20aa: 0x3089, 0x20ab: 0x3091, 0x20ac: 0x3099, 0x20ad: 0x3099, 0x20ae: 0x30a1, 0x20af: 0x30a9, + 0x20b0: 0x30b1, 0x20b1: 0x30b9, 0x20b2: 0x30c1, 0x20b3: 0x30c9, 0x20b4: 0x30d1, 0x20b5: 0x91bd, + 0x20b6: 0x30d9, 0x20b7: 0x91dd, 0x20b8: 0x30e1, 0x20b9: 0x91fd, 0x20ba: 0x30e9, 0x20bb: 0x921d, + 0x20bc: 0x923d, 0x20bd: 0x925d, 0x20be: 0x30f1, 0x20bf: 0x30f9, + // Block 0x83, offset 0x20c0 + 0x20c0: 0x3101, 0x20c1: 0x927d, 0x20c2: 0x929d, 0x20c3: 0x92bd, 0x20c4: 0x92dd, 0x20c5: 0x3109, + 0x20c6: 0x3111, 0x20c7: 0x3111, 0x20c8: 0x3119, 0x20c9: 0x3121, 0x20ca: 0x3129, 0x20cb: 0x3131, + 0x20cc: 0x3139, 0x20cd: 0x92fd, 0x20ce: 0x3141, 0x20cf: 0x3149, 0x20d0: 0x3151, 0x20d1: 0x3159, + 0x20d2: 0x931d, 0x20d3: 0x3161, 0x20d4: 0x933d, 0x20d5: 0x935d, 0x20d6: 0x3169, 0x20d7: 0x3171, + 0x20d8: 0x3179, 0x20d9: 0x3181, 0x20da: 0x3189, 0x20db: 0x3191, 0x20dc: 0x937d, 0x20dd: 0x939d, + 0x20de: 0x93bd, 0x20df: 0x2040, 0x20e0: 0x3199, 0x20e1: 0x93dd, 0x20e2: 0x31a1, 0x20e3: 0x31a9, + 0x20e4: 0x31b1, 0x20e5: 0x93fd, 0x20e6: 0x31b9, 0x20e7: 0x31c1, 0x20e8: 0x31c9, 0x20e9: 0x31d1, + 0x20ea: 0x31d9, 0x20eb: 0x941d, 0x20ec: 0x31e1, 0x20ed: 0x31e9, 0x20ee: 0x31f1, 0x20ef: 0x31f9, + 0x20f0: 0x3201, 0x20f1: 0x3209, 0x20f2: 0x943d, 0x20f3: 0x945d, 0x20f4: 0x3211, 0x20f5: 0x947d, + 0x20f6: 0x3219, 0x20f7: 0x949d, 0x20f8: 0x3221, 0x20f9: 0x3229, 0x20fa: 0x3231, 0x20fb: 0x94bd, + 0x20fc: 0x94dd, 0x20fd: 0x3239, 0x20fe: 0x94fd, 0x20ff: 0x3241, + // Block 0x84, offset 0x2100 + 0x2100: 0x951d, 0x2101: 0x3249, 0x2102: 0x3251, 0x2103: 0x3259, 0x2104: 0x3261, 0x2105: 0x3269, + 0x2106: 0x3271, 0x2107: 0x953d, 0x2108: 0x955d, 0x2109: 0x957d, 0x210a: 0x959d, 0x210b: 0x2ca1, + 0x210c: 0x3279, 0x210d: 0x3281, 0x210e: 0x3289, 0x210f: 0x3291, 0x2110: 0x3299, 0x2111: 0x32a1, + 0x2112: 0x32a9, 0x2113: 0x32b1, 0x2114: 0x32b9, 0x2115: 0x32c1, 0x2116: 0x32c9, 0x2117: 0x95bd, + 0x2118: 0x32d1, 0x2119: 0x32d9, 0x211a: 0x32e1, 0x211b: 0x32e9, 0x211c: 0x32f1, 0x211d: 0x32f9, + 0x211e: 0x3301, 0x211f: 0x3309, 0x2120: 0x3311, 0x2121: 0x3319, 0x2122: 0x3321, 0x2123: 0x3329, + 0x2124: 0x95dd, 0x2125: 0x95fd, 0x2126: 0x961d, 0x2127: 0x3331, 0x2128: 0x3339, 0x2129: 0x3341, + 0x212a: 0x3349, 0x212b: 0x963d, 0x212c: 0x3351, 0x212d: 0x965d, 0x212e: 0x3359, 0x212f: 0x3361, + 0x2130: 0x967d, 0x2131: 0x969d, 0x2132: 0x3369, 0x2133: 0x3371, 0x2134: 0x3379, 0x2135: 0x3381, + 0x2136: 0x3389, 0x2137: 0x3391, 0x2138: 0x3399, 0x2139: 0x33a1, 0x213a: 0x33a9, 0x213b: 0x33b1, + 0x213c: 0x33b9, 0x213d: 0x33c1, 0x213e: 0x33c9, 0x213f: 0x2040, + // Block 0x85, offset 0x2140 + 0x2140: 0x33d1, 0x2141: 0x33d9, 0x2142: 0x33e1, 0x2143: 0x33e9, 0x2144: 0x33f1, 0x2145: 0x96bd, + 0x2146: 0x33f9, 0x2147: 0x3401, 0x2148: 0x3409, 0x2149: 0x3411, 0x214a: 0x3419, 0x214b: 0x96dd, + 0x214c: 0x96fd, 0x214d: 0x3421, 0x214e: 0x3429, 0x214f: 0x3431, 0x2150: 0x3439, 0x2151: 0x3441, + 0x2152: 0x3449, 0x2153: 0x971d, 0x2154: 0x3451, 0x2155: 0x3459, 0x2156: 0x3461, 0x2157: 0x3469, + 0x2158: 0x973d, 0x2159: 0x975d, 0x215a: 0x3471, 0x215b: 0x3479, 0x215c: 0x3481, 0x215d: 0x977d, + 0x215e: 0x3489, 0x215f: 0x3491, 0x2160: 0x684d, 0x2161: 0x979d, 0x2162: 0x3499, 0x2163: 0x34a1, + 0x2164: 0x34a9, 0x2165: 0x97bd, 0x2166: 0x34b1, 0x2167: 0x34b9, 0x2168: 0x34c1, 0x2169: 0x34c9, + 0x216a: 0x34d1, 0x216b: 0x34d9, 0x216c: 0x34e1, 0x216d: 0x97dd, 0x216e: 0x34e9, 0x216f: 0x34f1, + 0x2170: 0x34f9, 0x2171: 0x97fd, 0x2172: 0x3501, 0x2173: 0x3509, 0x2174: 0x3511, 0x2175: 0x3519, + 0x2176: 0x7b6d, 0x2177: 0x981d, 0x2178: 0x3521, 0x2179: 0x3529, 0x217a: 0x3531, 0x217b: 0x983d, + 0x217c: 0x3539, 0x217d: 0x985d, 0x217e: 0x3541, 0x217f: 0x3541, + // Block 0x86, offset 0x2180 + 0x2180: 0x3549, 0x2181: 0x987d, 0x2182: 0x3551, 0x2183: 0x3559, 0x2184: 0x3561, 0x2185: 0x3569, + 0x2186: 0x3571, 0x2187: 0x3579, 0x2188: 0x3581, 0x2189: 0x989d, 0x218a: 0x3589, 0x218b: 0x3591, + 0x218c: 0x3599, 0x218d: 0x35a1, 0x218e: 0x35a9, 0x218f: 0x35b1, 0x2190: 0x98bd, 0x2191: 0x35b9, + 0x2192: 0x98dd, 0x2193: 0x98fd, 0x2194: 0x991d, 0x2195: 0x35c1, 0x2196: 0x35c9, 0x2197: 0x35d1, + 0x2198: 0x35d9, 0x2199: 0x35e1, 0x219a: 0x35e9, 0x219b: 0x35f1, 0x219c: 0x35f9, 0x219d: 0x993d, + 0x219e: 0x0040, 0x219f: 0x0040, 0x21a0: 0x0040, 0x21a1: 0x0040, 0x21a2: 0x0040, 0x21a3: 0x0040, + 0x21a4: 0x0040, 0x21a5: 0x0040, 0x21a6: 0x0040, 0x21a7: 0x0040, 0x21a8: 0x0040, 0x21a9: 0x0040, + 0x21aa: 0x0040, 0x21ab: 0x0040, 0x21ac: 0x0040, 0x21ad: 0x0040, 0x21ae: 0x0040, 0x21af: 0x0040, + 0x21b0: 0x0040, 0x21b1: 0x0040, 0x21b2: 0x0040, 0x21b3: 0x0040, 0x21b4: 0x0040, 0x21b5: 0x0040, + 0x21b6: 0x0040, 0x21b7: 0x0040, 0x21b8: 0x0040, 0x21b9: 0x0040, 0x21ba: 0x0040, 0x21bb: 0x0040, + 0x21bc: 0x0040, 0x21bd: 0x0040, 0x21be: 0x0040, 0x21bf: 0x0040, +} + +// idnaIndex: 39 blocks, 2496 entries, 4992 bytes +// Block 0 is the zero block. +var idnaIndex = [2496]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x85, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x86, 0xca: 0x87, 0xcb: 0x07, 0xcc: 0x88, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x89, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x8a, 0xd6: 0x8b, 0xd7: 0x8c, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x8d, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x8e, 0xde: 0x8f, 0xdf: 0x90, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x07, 0xea: 0x08, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x09, 0xee: 0x0a, 0xef: 0x0b, + 0xf0: 0x20, 0xf1: 0x21, 0xf2: 0x21, 0xf3: 0x23, 0xf4: 0x24, + // Block 0x4, offset 0x100 + 0x120: 0x91, 0x121: 0x13, 0x122: 0x14, 0x123: 0x92, 0x124: 0x93, 0x125: 0x15, 0x126: 0x16, 0x127: 0x17, + 0x128: 0x18, 0x129: 0x19, 0x12a: 0x1a, 0x12b: 0x1b, 0x12c: 0x1c, 0x12d: 0x1d, 0x12e: 0x1e, 0x12f: 0x94, + 0x130: 0x95, 0x131: 0x1f, 0x132: 0x20, 0x133: 0x21, 0x134: 0x96, 0x135: 0x22, 0x136: 0x97, 0x137: 0x98, + 0x138: 0x99, 0x139: 0x9a, 0x13a: 0x23, 0x13b: 0x9b, 0x13c: 0x9c, 0x13d: 0x24, 0x13e: 0x25, 0x13f: 0x9d, + // Block 0x5, offset 0x140 + 0x140: 0x9e, 0x141: 0x9f, 0x142: 0xa0, 0x143: 0xa1, 0x144: 0xa2, 0x145: 0xa3, 0x146: 0xa4, 0x147: 0xa5, + 0x148: 0xa6, 0x149: 0xa7, 0x14a: 0xa8, 0x14b: 0xa9, 0x14c: 0xaa, 0x14d: 0xab, 0x14e: 0xac, 0x14f: 0xad, + 0x150: 0xae, 0x151: 0xa6, 0x152: 0xa6, 0x153: 0xa6, 0x154: 0xa6, 0x155: 0xa6, 0x156: 0xa6, 0x157: 0xa6, + 0x158: 0xa6, 0x159: 0xaf, 0x15a: 0xb0, 0x15b: 0xb1, 0x15c: 0xb2, 0x15d: 0xb3, 0x15e: 0xb4, 0x15f: 0xb5, + 0x160: 0xb6, 0x161: 0xb7, 0x162: 0xb8, 0x163: 0xb9, 0x164: 0xba, 0x165: 0xbb, 0x166: 0xbc, 0x167: 0xbd, + 0x168: 0xbe, 0x169: 0xbf, 0x16a: 0xc0, 0x16b: 0xc1, 0x16c: 0xc2, 0x16d: 0xc3, 0x16e: 0xc4, 0x16f: 0xc5, + 0x170: 0xc6, 0x171: 0xc7, 0x172: 0xc8, 0x173: 0xc9, 0x174: 0x26, 0x175: 0x27, 0x176: 0x28, 0x177: 0x88, + 0x178: 0x29, 0x179: 0x29, 0x17a: 0x2a, 0x17b: 0x29, 0x17c: 0xca, 0x17d: 0x2b, 0x17e: 0x2c, 0x17f: 0x2d, + // Block 0x6, offset 0x180 + 0x180: 0x2e, 0x181: 0x2f, 0x182: 0x30, 0x183: 0xcb, 0x184: 0x31, 0x185: 0x32, 0x186: 0xcc, 0x187: 0xa2, + 0x188: 0xcd, 0x189: 0xce, 0x18a: 0xa2, 0x18b: 0xa2, 0x18c: 0xcf, 0x18d: 0xa2, 0x18e: 0xa2, 0x18f: 0xa2, + 0x190: 0xd0, 0x191: 0x33, 0x192: 0x34, 0x193: 0x35, 0x194: 0xa2, 0x195: 0xa2, 0x196: 0xa2, 0x197: 0xa2, + 0x198: 0xa2, 0x199: 0xa2, 0x19a: 0xa2, 0x19b: 0xa2, 0x19c: 0xa2, 0x19d: 0xa2, 0x19e: 0xa2, 0x19f: 0xa2, + 0x1a0: 0xa2, 0x1a1: 0xa2, 0x1a2: 0xa2, 0x1a3: 0xa2, 0x1a4: 0xa2, 0x1a5: 0xa2, 0x1a6: 0xa2, 0x1a7: 0xa2, + 0x1a8: 0xd1, 0x1a9: 0xd2, 0x1aa: 0xa2, 0x1ab: 0xd3, 0x1ac: 0xa2, 0x1ad: 0xd4, 0x1ae: 0xd5, 0x1af: 0xa2, + 0x1b0: 0xd6, 0x1b1: 0x36, 0x1b2: 0x29, 0x1b3: 0x37, 0x1b4: 0xd7, 0x1b5: 0xd8, 0x1b6: 0xd9, 0x1b7: 0xda, + 0x1b8: 0xdb, 0x1b9: 0xdc, 0x1ba: 0xdd, 0x1bb: 0xde, 0x1bc: 0xdf, 0x1bd: 0xe0, 0x1be: 0xe1, 0x1bf: 0x38, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x39, 0x1c1: 0xe2, 0x1c2: 0xe3, 0x1c3: 0xe4, 0x1c4: 0xe5, 0x1c5: 0x3a, 0x1c6: 0x3b, 0x1c7: 0xe6, + 0x1c8: 0xe7, 0x1c9: 0x3c, 0x1ca: 0x3d, 0x1cb: 0x3e, 0x1cc: 0xe8, 0x1cd: 0xe9, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0xa6, 0x1d1: 0xa6, 0x1d2: 0xa6, 0x1d3: 0xa6, 0x1d4: 0xa6, 0x1d5: 0xa6, 0x1d6: 0xa6, 0x1d7: 0xa6, + 0x1d8: 0xa6, 0x1d9: 0xa6, 0x1da: 0xa6, 0x1db: 0xa6, 0x1dc: 0xa6, 0x1dd: 0xa6, 0x1de: 0xa6, 0x1df: 0xa6, + 0x1e0: 0xa6, 0x1e1: 0xa6, 0x1e2: 0xa6, 0x1e3: 0xa6, 0x1e4: 0xa6, 0x1e5: 0xa6, 0x1e6: 0xa6, 0x1e7: 0xa6, + 0x1e8: 0xa6, 0x1e9: 0xa6, 0x1ea: 0xa6, 0x1eb: 0xa6, 0x1ec: 0xa6, 0x1ed: 0xa6, 0x1ee: 0xa6, 0x1ef: 0xa6, + 0x1f0: 0xa6, 0x1f1: 0xa6, 0x1f2: 0xa6, 0x1f3: 0xa6, 0x1f4: 0xa6, 0x1f5: 0xa6, 0x1f6: 0xa6, 0x1f7: 0xa6, + 0x1f8: 0xa6, 0x1f9: 0xa6, 0x1fa: 0xa6, 0x1fb: 0xa6, 0x1fc: 0xa6, 0x1fd: 0xa6, 0x1fe: 0xa6, 0x1ff: 0xa6, + // Block 0x8, offset 0x200 + 0x200: 0xa6, 0x201: 0xa6, 0x202: 0xa6, 0x203: 0xa6, 0x204: 0xa6, 0x205: 0xa6, 0x206: 0xa6, 0x207: 0xa6, + 0x208: 0xa6, 0x209: 0xa6, 0x20a: 0xa6, 0x20b: 0xa6, 0x20c: 0xa6, 0x20d: 0xa6, 0x20e: 0xa6, 0x20f: 0xa6, + 0x210: 0xa6, 0x211: 0xa6, 0x212: 0xa6, 0x213: 0xa6, 0x214: 0xa6, 0x215: 0xa6, 0x216: 0xa6, 0x217: 0xa6, + 0x218: 0xa6, 0x219: 0xa6, 0x21a: 0xa6, 0x21b: 0xa6, 0x21c: 0xa6, 0x21d: 0xa6, 0x21e: 0xa6, 0x21f: 0xa6, + 0x220: 0xa6, 0x221: 0xa6, 0x222: 0xa6, 0x223: 0xa6, 0x224: 0xa6, 0x225: 0xa6, 0x226: 0xa6, 0x227: 0xa6, + 0x228: 0xa6, 0x229: 0xa6, 0x22a: 0xa6, 0x22b: 0xa6, 0x22c: 0xa6, 0x22d: 0xa6, 0x22e: 0xa6, 0x22f: 0xa6, + 0x230: 0xa6, 0x231: 0xa6, 0x232: 0xa6, 0x233: 0xa6, 0x234: 0xa6, 0x235: 0xa6, 0x236: 0xa6, 0x237: 0xa2, + 0x238: 0xa6, 0x239: 0xa6, 0x23a: 0xa6, 0x23b: 0xa6, 0x23c: 0xa6, 0x23d: 0xa6, 0x23e: 0xa6, 0x23f: 0xa6, + // Block 0x9, offset 0x240 + 0x240: 0xa6, 0x241: 0xa6, 0x242: 0xa6, 0x243: 0xa6, 0x244: 0xa6, 0x245: 0xa6, 0x246: 0xa6, 0x247: 0xa6, + 0x248: 0xa6, 0x249: 0xa6, 0x24a: 0xa6, 0x24b: 0xa6, 0x24c: 0xa6, 0x24d: 0xa6, 0x24e: 0xa6, 0x24f: 0xa6, + 0x250: 0xa6, 0x251: 0xa6, 0x252: 0xa6, 0x253: 0xa6, 0x254: 0xa6, 0x255: 0xa6, 0x256: 0xa6, 0x257: 0xa6, + 0x258: 0xa6, 0x259: 0xa6, 0x25a: 0xa6, 0x25b: 0xa6, 0x25c: 0xa6, 0x25d: 0xa6, 0x25e: 0xa6, 0x25f: 0xa6, + 0x260: 0xa6, 0x261: 0xa6, 0x262: 0xa6, 0x263: 0xa6, 0x264: 0xa6, 0x265: 0xa6, 0x266: 0xa6, 0x267: 0xa6, + 0x268: 0xa6, 0x269: 0xa6, 0x26a: 0xa6, 0x26b: 0xa6, 0x26c: 0xa6, 0x26d: 0xa6, 0x26e: 0xa6, 0x26f: 0xa6, + 0x270: 0xa6, 0x271: 0xa6, 0x272: 0xa6, 0x273: 0xa6, 0x274: 0xa6, 0x275: 0xa6, 0x276: 0xa6, 0x277: 0xa6, + 0x278: 0xa6, 0x279: 0xa6, 0x27a: 0xa6, 0x27b: 0xa6, 0x27c: 0xa6, 0x27d: 0xa6, 0x27e: 0xa6, 0x27f: 0xa6, + // Block 0xa, offset 0x280 + 0x280: 0xa6, 0x281: 0xa6, 0x282: 0xa6, 0x283: 0xa6, 0x284: 0xa6, 0x285: 0xa6, 0x286: 0xa6, 0x287: 0xa6, + 0x288: 0xa6, 0x289: 0xa6, 0x28a: 0xa6, 0x28b: 0xa6, 0x28c: 0xa6, 0x28d: 0xa6, 0x28e: 0xa6, 0x28f: 0xa6, + 0x290: 0xa6, 0x291: 0xa6, 0x292: 0xea, 0x293: 0xeb, 0x294: 0xa6, 0x295: 0xa6, 0x296: 0xa6, 0x297: 0xa6, + 0x298: 0xec, 0x299: 0x41, 0x29a: 0x42, 0x29b: 0xed, 0x29c: 0x43, 0x29d: 0x44, 0x29e: 0x45, 0x29f: 0x46, + 0x2a0: 0xee, 0x2a1: 0xef, 0x2a2: 0xf0, 0x2a3: 0xf1, 0x2a4: 0xf2, 0x2a5: 0xf3, 0x2a6: 0xf4, 0x2a7: 0xf5, + 0x2a8: 0xf6, 0x2a9: 0xf7, 0x2aa: 0xf8, 0x2ab: 0xf9, 0x2ac: 0xfa, 0x2ad: 0xfb, 0x2ae: 0xfc, 0x2af: 0xfd, + 0x2b0: 0xa6, 0x2b1: 0xa6, 0x2b2: 0xa6, 0x2b3: 0xa6, 0x2b4: 0xa6, 0x2b5: 0xa6, 0x2b6: 0xa6, 0x2b7: 0xa6, + 0x2b8: 0xa6, 0x2b9: 0xa6, 0x2ba: 0xa6, 0x2bb: 0xa6, 0x2bc: 0xa6, 0x2bd: 0xa6, 0x2be: 0xa6, 0x2bf: 0xa6, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xa6, 0x2c1: 0xa6, 0x2c2: 0xa6, 0x2c3: 0xa6, 0x2c4: 0xa6, 0x2c5: 0xa6, 0x2c6: 0xa6, 0x2c7: 0xa6, + 0x2c8: 0xa6, 0x2c9: 0xa6, 0x2ca: 0xa6, 0x2cb: 0xa6, 0x2cc: 0xa6, 0x2cd: 0xa6, 0x2ce: 0xa6, 0x2cf: 0xa6, + 0x2d0: 0xa6, 0x2d1: 0xa6, 0x2d2: 0xa6, 0x2d3: 0xa6, 0x2d4: 0xa6, 0x2d5: 0xa6, 0x2d6: 0xa6, 0x2d7: 0xa6, + 0x2d8: 0xa6, 0x2d9: 0xa6, 0x2da: 0xa6, 0x2db: 0xa6, 0x2dc: 0xa6, 0x2dd: 0xa6, 0x2de: 0xfe, 0x2df: 0xff, + // Block 0xc, offset 0x300 + 0x300: 0x100, 0x301: 0x100, 0x302: 0x100, 0x303: 0x100, 0x304: 0x100, 0x305: 0x100, 0x306: 0x100, 0x307: 0x100, + 0x308: 0x100, 0x309: 0x100, 0x30a: 0x100, 0x30b: 0x100, 0x30c: 0x100, 0x30d: 0x100, 0x30e: 0x100, 0x30f: 0x100, + 0x310: 0x100, 0x311: 0x100, 0x312: 0x100, 0x313: 0x100, 0x314: 0x100, 0x315: 0x100, 0x316: 0x100, 0x317: 0x100, + 0x318: 0x100, 0x319: 0x100, 0x31a: 0x100, 0x31b: 0x100, 0x31c: 0x100, 0x31d: 0x100, 0x31e: 0x100, 0x31f: 0x100, + 0x320: 0x100, 0x321: 0x100, 0x322: 0x100, 0x323: 0x100, 0x324: 0x100, 0x325: 0x100, 0x326: 0x100, 0x327: 0x100, + 0x328: 0x100, 0x329: 0x100, 0x32a: 0x100, 0x32b: 0x100, 0x32c: 0x100, 0x32d: 0x100, 0x32e: 0x100, 0x32f: 0x100, + 0x330: 0x100, 0x331: 0x100, 0x332: 0x100, 0x333: 0x100, 0x334: 0x100, 0x335: 0x100, 0x336: 0x100, 0x337: 0x100, + 0x338: 0x100, 0x339: 0x100, 0x33a: 0x100, 0x33b: 0x100, 0x33c: 0x100, 0x33d: 0x100, 0x33e: 0x100, 0x33f: 0x100, + // Block 0xd, offset 0x340 + 0x340: 0x100, 0x341: 0x100, 0x342: 0x100, 0x343: 0x100, 0x344: 0x100, 0x345: 0x100, 0x346: 0x100, 0x347: 0x100, + 0x348: 0x100, 0x349: 0x100, 0x34a: 0x100, 0x34b: 0x100, 0x34c: 0x100, 0x34d: 0x100, 0x34e: 0x100, 0x34f: 0x100, + 0x350: 0x100, 0x351: 0x100, 0x352: 0x100, 0x353: 0x100, 0x354: 0x100, 0x355: 0x100, 0x356: 0x100, 0x357: 0x100, + 0x358: 0x100, 0x359: 0x100, 0x35a: 0x100, 0x35b: 0x100, 0x35c: 0x100, 0x35d: 0x100, 0x35e: 0x100, 0x35f: 0x100, + 0x360: 0x100, 0x361: 0x100, 0x362: 0x100, 0x363: 0x100, 0x364: 0x101, 0x365: 0x102, 0x366: 0x103, 0x367: 0x104, + 0x368: 0x47, 0x369: 0x105, 0x36a: 0x106, 0x36b: 0x48, 0x36c: 0x49, 0x36d: 0x4a, 0x36e: 0x4b, 0x36f: 0x4c, + 0x370: 0x107, 0x371: 0x4d, 0x372: 0x4e, 0x373: 0x4f, 0x374: 0x50, 0x375: 0x51, 0x376: 0x108, 0x377: 0x52, + 0x378: 0x53, 0x379: 0x54, 0x37a: 0x55, 0x37b: 0x56, 0x37c: 0x57, 0x37d: 0x58, 0x37e: 0x59, 0x37f: 0x5a, + // Block 0xe, offset 0x380 + 0x380: 0x109, 0x381: 0x10a, 0x382: 0xa6, 0x383: 0x10b, 0x384: 0x10c, 0x385: 0xa2, 0x386: 0x10d, 0x387: 0x10e, + 0x388: 0x100, 0x389: 0x100, 0x38a: 0x10f, 0x38b: 0x110, 0x38c: 0x111, 0x38d: 0x112, 0x38e: 0x113, 0x38f: 0x114, + 0x390: 0x115, 0x391: 0xa6, 0x392: 0x116, 0x393: 0x117, 0x394: 0x118, 0x395: 0x5b, 0x396: 0x5c, 0x397: 0x100, + 0x398: 0xa6, 0x399: 0xa6, 0x39a: 0xa6, 0x39b: 0xa6, 0x39c: 0x119, 0x39d: 0x11a, 0x39e: 0x5d, 0x39f: 0x100, + 0x3a0: 0x11b, 0x3a1: 0x11c, 0x3a2: 0x11d, 0x3a3: 0x11e, 0x3a4: 0x11f, 0x3a5: 0x100, 0x3a6: 0x120, 0x3a7: 0x121, + 0x3a8: 0x122, 0x3a9: 0x123, 0x3aa: 0x124, 0x3ab: 0x5e, 0x3ac: 0x125, 0x3ad: 0x126, 0x3ae: 0x5f, 0x3af: 0x100, + 0x3b0: 0x127, 0x3b1: 0x128, 0x3b2: 0x129, 0x3b3: 0x12a, 0x3b4: 0x12b, 0x3b5: 0x100, 0x3b6: 0x100, 0x3b7: 0x100, + 0x3b8: 0x100, 0x3b9: 0x12c, 0x3ba: 0x12d, 0x3bb: 0x12e, 0x3bc: 0x12f, 0x3bd: 0x130, 0x3be: 0x131, 0x3bf: 0x132, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x133, 0x3c1: 0x134, 0x3c2: 0x135, 0x3c3: 0x136, 0x3c4: 0x137, 0x3c5: 0x138, 0x3c6: 0x139, 0x3c7: 0x13a, + 0x3c8: 0x13b, 0x3c9: 0x13c, 0x3ca: 0x13d, 0x3cb: 0x13e, 0x3cc: 0x60, 0x3cd: 0x61, 0x3ce: 0x100, 0x3cf: 0x100, + 0x3d0: 0x13f, 0x3d1: 0x140, 0x3d2: 0x141, 0x3d3: 0x142, 0x3d4: 0x100, 0x3d5: 0x100, 0x3d6: 0x143, 0x3d7: 0x144, + 0x3d8: 0x145, 0x3d9: 0x146, 0x3da: 0x147, 0x3db: 0x148, 0x3dc: 0x149, 0x3dd: 0x14a, 0x3de: 0x100, 0x3df: 0x100, + 0x3e0: 0x14b, 0x3e1: 0x100, 0x3e2: 0x14c, 0x3e3: 0x14d, 0x3e4: 0x62, 0x3e5: 0x14e, 0x3e6: 0x14f, 0x3e7: 0x150, + 0x3e8: 0x151, 0x3e9: 0x152, 0x3ea: 0x153, 0x3eb: 0x154, 0x3ec: 0x155, 0x3ed: 0x100, 0x3ee: 0x100, 0x3ef: 0x100, + 0x3f0: 0x156, 0x3f1: 0x157, 0x3f2: 0x158, 0x3f3: 0x100, 0x3f4: 0x159, 0x3f5: 0x15a, 0x3f6: 0x15b, 0x3f7: 0x100, + 0x3f8: 0x100, 0x3f9: 0x100, 0x3fa: 0x100, 0x3fb: 0x15c, 0x3fc: 0x15d, 0x3fd: 0x15e, 0x3fe: 0x15f, 0x3ff: 0x160, + // Block 0x10, offset 0x400 + 0x400: 0xa6, 0x401: 0xa6, 0x402: 0xa6, 0x403: 0xa6, 0x404: 0xa6, 0x405: 0xa6, 0x406: 0xa6, 0x407: 0xa6, + 0x408: 0xa6, 0x409: 0xa6, 0x40a: 0xa6, 0x40b: 0xa6, 0x40c: 0xa6, 0x40d: 0xa6, 0x40e: 0x161, 0x40f: 0x100, + 0x410: 0xa2, 0x411: 0x162, 0x412: 0xa6, 0x413: 0xa6, 0x414: 0xa6, 0x415: 0x163, 0x416: 0x100, 0x417: 0x100, + 0x418: 0x100, 0x419: 0x100, 0x41a: 0x100, 0x41b: 0x100, 0x41c: 0x100, 0x41d: 0x100, 0x41e: 0x100, 0x41f: 0x100, + 0x420: 0x100, 0x421: 0x100, 0x422: 0x100, 0x423: 0x100, 0x424: 0x100, 0x425: 0x100, 0x426: 0x100, 0x427: 0x100, + 0x428: 0x100, 0x429: 0x100, 0x42a: 0x100, 0x42b: 0x100, 0x42c: 0x100, 0x42d: 0x100, 0x42e: 0x100, 0x42f: 0x100, + 0x430: 0x100, 0x431: 0x100, 0x432: 0x100, 0x433: 0x100, 0x434: 0x100, 0x435: 0x100, 0x436: 0x100, 0x437: 0x100, + 0x438: 0x100, 0x439: 0x100, 0x43a: 0x100, 0x43b: 0x100, 0x43c: 0x100, 0x43d: 0x100, 0x43e: 0x164, 0x43f: 0x165, + // Block 0x11, offset 0x440 + 0x440: 0xa6, 0x441: 0xa6, 0x442: 0xa6, 0x443: 0xa6, 0x444: 0xa6, 0x445: 0xa6, 0x446: 0xa6, 0x447: 0xa6, + 0x448: 0xa6, 0x449: 0xa6, 0x44a: 0xa6, 0x44b: 0xa6, 0x44c: 0xa6, 0x44d: 0xa6, 0x44e: 0xa6, 0x44f: 0xa6, + 0x450: 0x166, 0x451: 0x167, 0x452: 0x100, 0x453: 0x100, 0x454: 0x100, 0x455: 0x100, 0x456: 0x100, 0x457: 0x100, + 0x458: 0x100, 0x459: 0x100, 0x45a: 0x100, 0x45b: 0x100, 0x45c: 0x100, 0x45d: 0x100, 0x45e: 0x100, 0x45f: 0x100, + 0x460: 0x100, 0x461: 0x100, 0x462: 0x100, 0x463: 0x100, 0x464: 0x100, 0x465: 0x100, 0x466: 0x100, 0x467: 0x100, + 0x468: 0x100, 0x469: 0x100, 0x46a: 0x100, 0x46b: 0x100, 0x46c: 0x100, 0x46d: 0x100, 0x46e: 0x100, 0x46f: 0x100, + 0x470: 0x100, 0x471: 0x100, 0x472: 0x100, 0x473: 0x100, 0x474: 0x100, 0x475: 0x100, 0x476: 0x100, 0x477: 0x100, + 0x478: 0x100, 0x479: 0x100, 0x47a: 0x100, 0x47b: 0x100, 0x47c: 0x100, 0x47d: 0x100, 0x47e: 0x100, 0x47f: 0x100, + // Block 0x12, offset 0x480 + 0x480: 0x100, 0x481: 0x100, 0x482: 0x100, 0x483: 0x100, 0x484: 0x100, 0x485: 0x100, 0x486: 0x100, 0x487: 0x100, + 0x488: 0x100, 0x489: 0x100, 0x48a: 0x100, 0x48b: 0x100, 0x48c: 0x100, 0x48d: 0x100, 0x48e: 0x100, 0x48f: 0x100, + 0x490: 0xa6, 0x491: 0xa6, 0x492: 0xa6, 0x493: 0xa6, 0x494: 0xa6, 0x495: 0xa6, 0x496: 0xa6, 0x497: 0xa6, + 0x498: 0xa6, 0x499: 0x14a, 0x49a: 0x100, 0x49b: 0x100, 0x49c: 0x100, 0x49d: 0x100, 0x49e: 0x100, 0x49f: 0x100, + 0x4a0: 0x100, 0x4a1: 0x100, 0x4a2: 0x100, 0x4a3: 0x100, 0x4a4: 0x100, 0x4a5: 0x100, 0x4a6: 0x100, 0x4a7: 0x100, + 0x4a8: 0x100, 0x4a9: 0x100, 0x4aa: 0x100, 0x4ab: 0x100, 0x4ac: 0x100, 0x4ad: 0x100, 0x4ae: 0x100, 0x4af: 0x100, + 0x4b0: 0x100, 0x4b1: 0x100, 0x4b2: 0x100, 0x4b3: 0x100, 0x4b4: 0x100, 0x4b5: 0x100, 0x4b6: 0x100, 0x4b7: 0x100, + 0x4b8: 0x100, 0x4b9: 0x100, 0x4ba: 0x100, 0x4bb: 0x100, 0x4bc: 0x100, 0x4bd: 0x100, 0x4be: 0x100, 0x4bf: 0x100, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x100, 0x4c1: 0x100, 0x4c2: 0x100, 0x4c3: 0x100, 0x4c4: 0x100, 0x4c5: 0x100, 0x4c6: 0x100, 0x4c7: 0x100, + 0x4c8: 0x100, 0x4c9: 0x100, 0x4ca: 0x100, 0x4cb: 0x100, 0x4cc: 0x100, 0x4cd: 0x100, 0x4ce: 0x100, 0x4cf: 0x100, + 0x4d0: 0x100, 0x4d1: 0x100, 0x4d2: 0x100, 0x4d3: 0x100, 0x4d4: 0x100, 0x4d5: 0x100, 0x4d6: 0x100, 0x4d7: 0x100, + 0x4d8: 0x100, 0x4d9: 0x100, 0x4da: 0x100, 0x4db: 0x100, 0x4dc: 0x100, 0x4dd: 0x100, 0x4de: 0x100, 0x4df: 0x100, + 0x4e0: 0xa6, 0x4e1: 0xa6, 0x4e2: 0xa6, 0x4e3: 0xa6, 0x4e4: 0xa6, 0x4e5: 0xa6, 0x4e6: 0xa6, 0x4e7: 0xa6, + 0x4e8: 0x154, 0x4e9: 0x168, 0x4ea: 0x169, 0x4eb: 0x16a, 0x4ec: 0x16b, 0x4ed: 0x16c, 0x4ee: 0x16d, 0x4ef: 0x100, + 0x4f0: 0x100, 0x4f1: 0x100, 0x4f2: 0x100, 0x4f3: 0x100, 0x4f4: 0x100, 0x4f5: 0x100, 0x4f6: 0x100, 0x4f7: 0x100, + 0x4f8: 0x100, 0x4f9: 0x16e, 0x4fa: 0x16f, 0x4fb: 0x100, 0x4fc: 0xa6, 0x4fd: 0x170, 0x4fe: 0x171, 0x4ff: 0x172, + // Block 0x14, offset 0x500 + 0x500: 0xa6, 0x501: 0xa6, 0x502: 0xa6, 0x503: 0xa6, 0x504: 0xa6, 0x505: 0xa6, 0x506: 0xa6, 0x507: 0xa6, + 0x508: 0xa6, 0x509: 0xa6, 0x50a: 0xa6, 0x50b: 0xa6, 0x50c: 0xa6, 0x50d: 0xa6, 0x50e: 0xa6, 0x50f: 0xa6, + 0x510: 0xa6, 0x511: 0xa6, 0x512: 0xa6, 0x513: 0xa6, 0x514: 0xa6, 0x515: 0xa6, 0x516: 0xa6, 0x517: 0xa6, + 0x518: 0xa6, 0x519: 0xa6, 0x51a: 0xa6, 0x51b: 0xa6, 0x51c: 0xa6, 0x51d: 0xa6, 0x51e: 0xa6, 0x51f: 0x173, + 0x520: 0xa6, 0x521: 0xa6, 0x522: 0xa6, 0x523: 0xa6, 0x524: 0xa6, 0x525: 0xa6, 0x526: 0xa6, 0x527: 0xa6, + 0x528: 0xa6, 0x529: 0xa6, 0x52a: 0xa6, 0x52b: 0xa6, 0x52c: 0xa6, 0x52d: 0xa6, 0x52e: 0xa6, 0x52f: 0xa6, + 0x530: 0xa6, 0x531: 0xa6, 0x532: 0xa6, 0x533: 0x174, 0x534: 0x175, 0x535: 0x100, 0x536: 0x100, 0x537: 0x100, + 0x538: 0x100, 0x539: 0x100, 0x53a: 0x100, 0x53b: 0x100, 0x53c: 0x100, 0x53d: 0x100, 0x53e: 0x100, 0x53f: 0x100, + // Block 0x15, offset 0x540 + 0x540: 0x100, 0x541: 0x100, 0x542: 0x100, 0x543: 0x100, 0x544: 0x100, 0x545: 0x100, 0x546: 0x100, 0x547: 0x100, + 0x548: 0x100, 0x549: 0x100, 0x54a: 0x100, 0x54b: 0x100, 0x54c: 0x100, 0x54d: 0x100, 0x54e: 0x100, 0x54f: 0x100, + 0x550: 0x100, 0x551: 0x100, 0x552: 0x100, 0x553: 0x100, 0x554: 0x100, 0x555: 0x100, 0x556: 0x100, 0x557: 0x100, + 0x558: 0x100, 0x559: 0x100, 0x55a: 0x100, 0x55b: 0x100, 0x55c: 0x100, 0x55d: 0x100, 0x55e: 0x100, 0x55f: 0x100, + 0x560: 0x100, 0x561: 0x100, 0x562: 0x100, 0x563: 0x100, 0x564: 0x100, 0x565: 0x100, 0x566: 0x100, 0x567: 0x100, + 0x568: 0x100, 0x569: 0x100, 0x56a: 0x100, 0x56b: 0x100, 0x56c: 0x100, 0x56d: 0x100, 0x56e: 0x100, 0x56f: 0x100, + 0x570: 0x100, 0x571: 0x100, 0x572: 0x100, 0x573: 0x100, 0x574: 0x100, 0x575: 0x100, 0x576: 0x100, 0x577: 0x100, + 0x578: 0x100, 0x579: 0x100, 0x57a: 0x100, 0x57b: 0x100, 0x57c: 0x100, 0x57d: 0x100, 0x57e: 0x100, 0x57f: 0x176, + // Block 0x16, offset 0x580 + 0x580: 0xa6, 0x581: 0xa6, 0x582: 0xa6, 0x583: 0xa6, 0x584: 0x177, 0x585: 0x178, 0x586: 0xa6, 0x587: 0xa6, + 0x588: 0xa6, 0x589: 0xa6, 0x58a: 0xa6, 0x58b: 0x179, 0x58c: 0x100, 0x58d: 0x100, 0x58e: 0x100, 0x58f: 0x100, + 0x590: 0x100, 0x591: 0x100, 0x592: 0x100, 0x593: 0x100, 0x594: 0x100, 0x595: 0x100, 0x596: 0x100, 0x597: 0x100, + 0x598: 0x100, 0x599: 0x100, 0x59a: 0x100, 0x59b: 0x100, 0x59c: 0x100, 0x59d: 0x100, 0x59e: 0x100, 0x59f: 0x100, + 0x5a0: 0x100, 0x5a1: 0x100, 0x5a2: 0x100, 0x5a3: 0x100, 0x5a4: 0x100, 0x5a5: 0x100, 0x5a6: 0x100, 0x5a7: 0x100, + 0x5a8: 0x100, 0x5a9: 0x100, 0x5aa: 0x100, 0x5ab: 0x100, 0x5ac: 0x100, 0x5ad: 0x100, 0x5ae: 0x100, 0x5af: 0x100, + 0x5b0: 0xa6, 0x5b1: 0x17a, 0x5b2: 0x17b, 0x5b3: 0x100, 0x5b4: 0x100, 0x5b5: 0x100, 0x5b6: 0x100, 0x5b7: 0x100, + 0x5b8: 0x100, 0x5b9: 0x100, 0x5ba: 0x100, 0x5bb: 0x100, 0x5bc: 0x100, 0x5bd: 0x100, 0x5be: 0x100, 0x5bf: 0x100, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x100, 0x5c1: 0x100, 0x5c2: 0x100, 0x5c3: 0x100, 0x5c4: 0x100, 0x5c5: 0x100, 0x5c6: 0x100, 0x5c7: 0x100, + 0x5c8: 0x100, 0x5c9: 0x100, 0x5ca: 0x100, 0x5cb: 0x100, 0x5cc: 0x100, 0x5cd: 0x100, 0x5ce: 0x100, 0x5cf: 0x100, + 0x5d0: 0x100, 0x5d1: 0x100, 0x5d2: 0x100, 0x5d3: 0x100, 0x5d4: 0x100, 0x5d5: 0x100, 0x5d6: 0x100, 0x5d7: 0x100, + 0x5d8: 0x100, 0x5d9: 0x100, 0x5da: 0x100, 0x5db: 0x100, 0x5dc: 0x100, 0x5dd: 0x100, 0x5de: 0x100, 0x5df: 0x100, + 0x5e0: 0x100, 0x5e1: 0x100, 0x5e2: 0x100, 0x5e3: 0x100, 0x5e4: 0x100, 0x5e5: 0x100, 0x5e6: 0x100, 0x5e7: 0x100, + 0x5e8: 0x100, 0x5e9: 0x100, 0x5ea: 0x100, 0x5eb: 0x100, 0x5ec: 0x100, 0x5ed: 0x100, 0x5ee: 0x100, 0x5ef: 0x100, + 0x5f0: 0x100, 0x5f1: 0x100, 0x5f2: 0x100, 0x5f3: 0x100, 0x5f4: 0x100, 0x5f5: 0x100, 0x5f6: 0x100, 0x5f7: 0x100, + 0x5f8: 0x100, 0x5f9: 0x100, 0x5fa: 0x100, 0x5fb: 0x100, 0x5fc: 0x17c, 0x5fd: 0x17d, 0x5fe: 0xa2, 0x5ff: 0x17e, + // Block 0x18, offset 0x600 + 0x600: 0xa2, 0x601: 0xa2, 0x602: 0xa2, 0x603: 0x17f, 0x604: 0x180, 0x605: 0x181, 0x606: 0x182, 0x607: 0x183, + 0x608: 0xa2, 0x609: 0x184, 0x60a: 0x100, 0x60b: 0x185, 0x60c: 0xa2, 0x60d: 0x186, 0x60e: 0x100, 0x60f: 0x100, + 0x610: 0x63, 0x611: 0x64, 0x612: 0x65, 0x613: 0x66, 0x614: 0x67, 0x615: 0x68, 0x616: 0x69, 0x617: 0x6a, + 0x618: 0x6b, 0x619: 0x6c, 0x61a: 0x6d, 0x61b: 0x6e, 0x61c: 0x6f, 0x61d: 0x70, 0x61e: 0x71, 0x61f: 0x72, + 0x620: 0xa2, 0x621: 0xa2, 0x622: 0xa2, 0x623: 0xa2, 0x624: 0xa2, 0x625: 0xa2, 0x626: 0xa2, 0x627: 0xa2, + 0x628: 0x187, 0x629: 0x188, 0x62a: 0x189, 0x62b: 0x100, 0x62c: 0x100, 0x62d: 0x100, 0x62e: 0x100, 0x62f: 0x100, + 0x630: 0x100, 0x631: 0x100, 0x632: 0x100, 0x633: 0x100, 0x634: 0x100, 0x635: 0x100, 0x636: 0x100, 0x637: 0x100, + 0x638: 0x100, 0x639: 0x100, 0x63a: 0x100, 0x63b: 0x100, 0x63c: 0x18a, 0x63d: 0x100, 0x63e: 0x100, 0x63f: 0x100, + // Block 0x19, offset 0x640 + 0x640: 0x73, 0x641: 0x74, 0x642: 0x18b, 0x643: 0x100, 0x644: 0x18c, 0x645: 0x18d, 0x646: 0x100, 0x647: 0x100, + 0x648: 0x100, 0x649: 0x100, 0x64a: 0x18e, 0x64b: 0x18f, 0x64c: 0x100, 0x64d: 0x100, 0x64e: 0x100, 0x64f: 0x100, + 0x650: 0x100, 0x651: 0x100, 0x652: 0x100, 0x653: 0x190, 0x654: 0x100, 0x655: 0x100, 0x656: 0x100, 0x657: 0x100, + 0x658: 0x100, 0x659: 0x100, 0x65a: 0x100, 0x65b: 0x100, 0x65c: 0x100, 0x65d: 0x100, 0x65e: 0x100, 0x65f: 0x191, + 0x660: 0x127, 0x661: 0x127, 0x662: 0x127, 0x663: 0x192, 0x664: 0x75, 0x665: 0x193, 0x666: 0x100, 0x667: 0x100, + 0x668: 0x100, 0x669: 0x100, 0x66a: 0x100, 0x66b: 0x100, 0x66c: 0x100, 0x66d: 0x100, 0x66e: 0x100, 0x66f: 0x100, + 0x670: 0x100, 0x671: 0x194, 0x672: 0x195, 0x673: 0x100, 0x674: 0x196, 0x675: 0x100, 0x676: 0x100, 0x677: 0x100, + 0x678: 0x76, 0x679: 0x77, 0x67a: 0x78, 0x67b: 0x197, 0x67c: 0x100, 0x67d: 0x100, 0x67e: 0x100, 0x67f: 0x100, + // Block 0x1a, offset 0x680 + 0x680: 0x198, 0x681: 0xa2, 0x682: 0x199, 0x683: 0x19a, 0x684: 0x79, 0x685: 0x7a, 0x686: 0x19b, 0x687: 0x19c, + 0x688: 0x7b, 0x689: 0x19d, 0x68a: 0x100, 0x68b: 0x100, 0x68c: 0xa2, 0x68d: 0xa2, 0x68e: 0xa2, 0x68f: 0xa2, + 0x690: 0xa2, 0x691: 0xa2, 0x692: 0xa2, 0x693: 0xa2, 0x694: 0xa2, 0x695: 0xa2, 0x696: 0xa2, 0x697: 0xa2, + 0x698: 0xa2, 0x699: 0xa2, 0x69a: 0xa2, 0x69b: 0x19e, 0x69c: 0xa2, 0x69d: 0x19f, 0x69e: 0xa2, 0x69f: 0x1a0, + 0x6a0: 0x1a1, 0x6a1: 0x1a2, 0x6a2: 0x1a3, 0x6a3: 0x100, 0x6a4: 0xa2, 0x6a5: 0xa2, 0x6a6: 0xa2, 0x6a7: 0xa2, + 0x6a8: 0xa2, 0x6a9: 0x1a4, 0x6aa: 0x1a5, 0x6ab: 0x1a6, 0x6ac: 0xa2, 0x6ad: 0xa2, 0x6ae: 0x1a7, 0x6af: 0x1a8, + 0x6b0: 0x100, 0x6b1: 0x100, 0x6b2: 0x100, 0x6b3: 0x100, 0x6b4: 0x100, 0x6b5: 0x100, 0x6b6: 0x100, 0x6b7: 0x100, + 0x6b8: 0x100, 0x6b9: 0x100, 0x6ba: 0x100, 0x6bb: 0x100, 0x6bc: 0x100, 0x6bd: 0x100, 0x6be: 0x100, 0x6bf: 0x100, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0xa6, 0x6c1: 0xa6, 0x6c2: 0xa6, 0x6c3: 0xa6, 0x6c4: 0xa6, 0x6c5: 0xa6, 0x6c6: 0xa6, 0x6c7: 0xa6, + 0x6c8: 0xa6, 0x6c9: 0xa6, 0x6ca: 0xa6, 0x6cb: 0xa6, 0x6cc: 0xa6, 0x6cd: 0xa6, 0x6ce: 0xa6, 0x6cf: 0xa6, + 0x6d0: 0xa6, 0x6d1: 0xa6, 0x6d2: 0xa6, 0x6d3: 0xa6, 0x6d4: 0xa6, 0x6d5: 0xa6, 0x6d6: 0xa6, 0x6d7: 0xa6, + 0x6d8: 0xa6, 0x6d9: 0xa6, 0x6da: 0xa6, 0x6db: 0x1a9, 0x6dc: 0xa6, 0x6dd: 0xa6, 0x6de: 0xa6, 0x6df: 0xa6, + 0x6e0: 0xa6, 0x6e1: 0xa6, 0x6e2: 0xa6, 0x6e3: 0xa6, 0x6e4: 0xa6, 0x6e5: 0xa6, 0x6e6: 0xa6, 0x6e7: 0xa6, + 0x6e8: 0xa6, 0x6e9: 0xa6, 0x6ea: 0xa6, 0x6eb: 0xa6, 0x6ec: 0xa6, 0x6ed: 0xa6, 0x6ee: 0xa6, 0x6ef: 0xa6, + 0x6f0: 0xa6, 0x6f1: 0xa6, 0x6f2: 0xa6, 0x6f3: 0xa6, 0x6f4: 0xa6, 0x6f5: 0xa6, 0x6f6: 0xa6, 0x6f7: 0xa6, + 0x6f8: 0xa6, 0x6f9: 0xa6, 0x6fa: 0xa6, 0x6fb: 0xa6, 0x6fc: 0xa6, 0x6fd: 0xa6, 0x6fe: 0xa6, 0x6ff: 0xa6, + // Block 0x1c, offset 0x700 + 0x700: 0xa6, 0x701: 0xa6, 0x702: 0xa6, 0x703: 0xa6, 0x704: 0xa6, 0x705: 0xa6, 0x706: 0xa6, 0x707: 0xa6, + 0x708: 0xa6, 0x709: 0xa6, 0x70a: 0xa6, 0x70b: 0xa6, 0x70c: 0xa6, 0x70d: 0xa6, 0x70e: 0xa6, 0x70f: 0xa6, + 0x710: 0xa6, 0x711: 0xa6, 0x712: 0xa6, 0x713: 0xa6, 0x714: 0xa6, 0x715: 0xa6, 0x716: 0xa6, 0x717: 0xa6, + 0x718: 0xa6, 0x719: 0xa6, 0x71a: 0xa6, 0x71b: 0xa6, 0x71c: 0x1aa, 0x71d: 0xa6, 0x71e: 0xa6, 0x71f: 0xa6, + 0x720: 0x1ab, 0x721: 0xa6, 0x722: 0xa6, 0x723: 0xa6, 0x724: 0xa6, 0x725: 0xa6, 0x726: 0xa6, 0x727: 0xa6, + 0x728: 0xa6, 0x729: 0xa6, 0x72a: 0xa6, 0x72b: 0xa6, 0x72c: 0xa6, 0x72d: 0xa6, 0x72e: 0xa6, 0x72f: 0xa6, + 0x730: 0xa6, 0x731: 0xa6, 0x732: 0xa6, 0x733: 0xa6, 0x734: 0xa6, 0x735: 0xa6, 0x736: 0xa6, 0x737: 0xa6, + 0x738: 0xa6, 0x739: 0xa6, 0x73a: 0xa6, 0x73b: 0xa6, 0x73c: 0xa6, 0x73d: 0xa6, 0x73e: 0xa6, 0x73f: 0xa6, + // Block 0x1d, offset 0x740 + 0x740: 0xa6, 0x741: 0xa6, 0x742: 0xa6, 0x743: 0xa6, 0x744: 0xa6, 0x745: 0xa6, 0x746: 0xa6, 0x747: 0xa6, + 0x748: 0xa6, 0x749: 0xa6, 0x74a: 0xa6, 0x74b: 0xa6, 0x74c: 0xa6, 0x74d: 0xa6, 0x74e: 0xa6, 0x74f: 0xa6, + 0x750: 0xa6, 0x751: 0xa6, 0x752: 0xa6, 0x753: 0xa6, 0x754: 0xa6, 0x755: 0xa6, 0x756: 0xa6, 0x757: 0xa6, + 0x758: 0xa6, 0x759: 0xa6, 0x75a: 0xa6, 0x75b: 0xa6, 0x75c: 0xa6, 0x75d: 0xa6, 0x75e: 0xa6, 0x75f: 0xa6, + 0x760: 0xa6, 0x761: 0xa6, 0x762: 0xa6, 0x763: 0xa6, 0x764: 0xa6, 0x765: 0xa6, 0x766: 0xa6, 0x767: 0xa6, + 0x768: 0xa6, 0x769: 0xa6, 0x76a: 0xa6, 0x76b: 0xa6, 0x76c: 0xa6, 0x76d: 0xa6, 0x76e: 0xa6, 0x76f: 0xa6, + 0x770: 0xa6, 0x771: 0xa6, 0x772: 0xa6, 0x773: 0xa6, 0x774: 0xa6, 0x775: 0xa6, 0x776: 0xa6, 0x777: 0xa6, + 0x778: 0xa6, 0x779: 0xa6, 0x77a: 0x1ac, 0x77b: 0xa6, 0x77c: 0xa6, 0x77d: 0xa6, 0x77e: 0xa6, 0x77f: 0xa6, + // Block 0x1e, offset 0x780 + 0x780: 0xa6, 0x781: 0xa6, 0x782: 0xa6, 0x783: 0xa6, 0x784: 0xa6, 0x785: 0xa6, 0x786: 0xa6, 0x787: 0xa6, + 0x788: 0xa6, 0x789: 0xa6, 0x78a: 0xa6, 0x78b: 0xa6, 0x78c: 0xa6, 0x78d: 0xa6, 0x78e: 0xa6, 0x78f: 0xa6, + 0x790: 0xa6, 0x791: 0xa6, 0x792: 0xa6, 0x793: 0xa6, 0x794: 0xa6, 0x795: 0xa6, 0x796: 0xa6, 0x797: 0xa6, + 0x798: 0xa6, 0x799: 0xa6, 0x79a: 0xa6, 0x79b: 0xa6, 0x79c: 0xa6, 0x79d: 0xa6, 0x79e: 0xa6, 0x79f: 0xa6, + 0x7a0: 0xa6, 0x7a1: 0xa6, 0x7a2: 0xa6, 0x7a3: 0xa6, 0x7a4: 0xa6, 0x7a5: 0xa6, 0x7a6: 0xa6, 0x7a7: 0xa6, + 0x7a8: 0xa6, 0x7a9: 0xa6, 0x7aa: 0xa6, 0x7ab: 0xa6, 0x7ac: 0xa6, 0x7ad: 0xa6, 0x7ae: 0xa6, 0x7af: 0x1ad, + 0x7b0: 0x100, 0x7b1: 0x100, 0x7b2: 0x100, 0x7b3: 0x100, 0x7b4: 0x100, 0x7b5: 0x100, 0x7b6: 0x100, 0x7b7: 0x100, + 0x7b8: 0x100, 0x7b9: 0x100, 0x7ba: 0x100, 0x7bb: 0x100, 0x7bc: 0x100, 0x7bd: 0x100, 0x7be: 0x100, 0x7bf: 0x100, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x100, 0x7c1: 0x100, 0x7c2: 0x100, 0x7c3: 0x100, 0x7c4: 0x100, 0x7c5: 0x100, 0x7c6: 0x100, 0x7c7: 0x100, + 0x7c8: 0x100, 0x7c9: 0x100, 0x7ca: 0x100, 0x7cb: 0x100, 0x7cc: 0x100, 0x7cd: 0x100, 0x7ce: 0x100, 0x7cf: 0x100, + 0x7d0: 0x100, 0x7d1: 0x100, 0x7d2: 0x100, 0x7d3: 0x100, 0x7d4: 0x100, 0x7d5: 0x100, 0x7d6: 0x100, 0x7d7: 0x100, + 0x7d8: 0x100, 0x7d9: 0x100, 0x7da: 0x100, 0x7db: 0x100, 0x7dc: 0x100, 0x7dd: 0x100, 0x7de: 0x100, 0x7df: 0x100, + 0x7e0: 0x7c, 0x7e1: 0x7d, 0x7e2: 0x7e, 0x7e3: 0x7f, 0x7e4: 0x80, 0x7e5: 0x81, 0x7e6: 0x82, 0x7e7: 0x83, + 0x7e8: 0x84, 0x7e9: 0x100, 0x7ea: 0x100, 0x7eb: 0x100, 0x7ec: 0x100, 0x7ed: 0x100, 0x7ee: 0x100, 0x7ef: 0x100, + 0x7f0: 0x100, 0x7f1: 0x100, 0x7f2: 0x100, 0x7f3: 0x100, 0x7f4: 0x100, 0x7f5: 0x100, 0x7f6: 0x100, 0x7f7: 0x100, + 0x7f8: 0x100, 0x7f9: 0x100, 0x7fa: 0x100, 0x7fb: 0x100, 0x7fc: 0x100, 0x7fd: 0x100, 0x7fe: 0x100, 0x7ff: 0x100, + // Block 0x20, offset 0x800 + 0x800: 0xa6, 0x801: 0xa6, 0x802: 0xa6, 0x803: 0xa6, 0x804: 0xa6, 0x805: 0xa6, 0x806: 0xa6, 0x807: 0xa6, + 0x808: 0xa6, 0x809: 0xa6, 0x80a: 0xa6, 0x80b: 0xa6, 0x80c: 0xa6, 0x80d: 0x1ae, 0x80e: 0xa6, 0x80f: 0xa6, + 0x810: 0xa6, 0x811: 0xa6, 0x812: 0xa6, 0x813: 0xa6, 0x814: 0xa6, 0x815: 0xa6, 0x816: 0xa6, 0x817: 0xa6, + 0x818: 0xa6, 0x819: 0xa6, 0x81a: 0xa6, 0x81b: 0xa6, 0x81c: 0xa6, 0x81d: 0xa6, 0x81e: 0xa6, 0x81f: 0xa6, + 0x820: 0xa6, 0x821: 0xa6, 0x822: 0xa6, 0x823: 0xa6, 0x824: 0xa6, 0x825: 0xa6, 0x826: 0xa6, 0x827: 0xa6, + 0x828: 0xa6, 0x829: 0xa6, 0x82a: 0xa6, 0x82b: 0xa6, 0x82c: 0xa6, 0x82d: 0xa6, 0x82e: 0xa6, 0x82f: 0xa6, + 0x830: 0xa6, 0x831: 0xa6, 0x832: 0xa6, 0x833: 0xa6, 0x834: 0xa6, 0x835: 0xa6, 0x836: 0xa6, 0x837: 0xa6, + 0x838: 0xa6, 0x839: 0xa6, 0x83a: 0xa6, 0x83b: 0xa6, 0x83c: 0xa6, 0x83d: 0xa6, 0x83e: 0xa6, 0x83f: 0xa6, + // Block 0x21, offset 0x840 + 0x840: 0xa6, 0x841: 0xa6, 0x842: 0xa6, 0x843: 0xa6, 0x844: 0xa6, 0x845: 0xa6, 0x846: 0xa6, 0x847: 0xa6, + 0x848: 0xa6, 0x849: 0xa6, 0x84a: 0xa6, 0x84b: 0xa6, 0x84c: 0xa6, 0x84d: 0xa6, 0x84e: 0x1af, 0x84f: 0x100, + 0x850: 0x100, 0x851: 0x100, 0x852: 0x100, 0x853: 0x100, 0x854: 0x100, 0x855: 0x100, 0x856: 0x100, 0x857: 0x100, + 0x858: 0x100, 0x859: 0x100, 0x85a: 0x100, 0x85b: 0x100, 0x85c: 0x100, 0x85d: 0x100, 0x85e: 0x100, 0x85f: 0x100, + 0x860: 0x100, 0x861: 0x100, 0x862: 0x100, 0x863: 0x100, 0x864: 0x100, 0x865: 0x100, 0x866: 0x100, 0x867: 0x100, + 0x868: 0x100, 0x869: 0x100, 0x86a: 0x100, 0x86b: 0x100, 0x86c: 0x100, 0x86d: 0x100, 0x86e: 0x100, 0x86f: 0x100, + 0x870: 0x100, 0x871: 0x100, 0x872: 0x100, 0x873: 0x100, 0x874: 0x100, 0x875: 0x100, 0x876: 0x100, 0x877: 0x100, + 0x878: 0x100, 0x879: 0x100, 0x87a: 0x100, 0x87b: 0x100, 0x87c: 0x100, 0x87d: 0x100, 0x87e: 0x100, 0x87f: 0x100, + // Block 0x22, offset 0x880 + 0x890: 0x0c, 0x891: 0x0d, 0x892: 0x0e, 0x893: 0x0f, 0x894: 0x10, 0x895: 0x0a, 0x896: 0x11, 0x897: 0x07, + 0x898: 0x12, 0x899: 0x0a, 0x89a: 0x13, 0x89b: 0x14, 0x89c: 0x15, 0x89d: 0x16, 0x89e: 0x17, 0x89f: 0x18, + 0x8a0: 0x07, 0x8a1: 0x07, 0x8a2: 0x07, 0x8a3: 0x07, 0x8a4: 0x07, 0x8a5: 0x07, 0x8a6: 0x07, 0x8a7: 0x07, + 0x8a8: 0x07, 0x8a9: 0x07, 0x8aa: 0x19, 0x8ab: 0x1a, 0x8ac: 0x1b, 0x8ad: 0x07, 0x8ae: 0x1c, 0x8af: 0x1d, + 0x8b0: 0x07, 0x8b1: 0x1e, 0x8b2: 0x1f, 0x8b3: 0x0a, 0x8b4: 0x0a, 0x8b5: 0x0a, 0x8b6: 0x0a, 0x8b7: 0x0a, + 0x8b8: 0x0a, 0x8b9: 0x0a, 0x8ba: 0x0a, 0x8bb: 0x0a, 0x8bc: 0x0a, 0x8bd: 0x0a, 0x8be: 0x0a, 0x8bf: 0x0a, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0a, 0x8c1: 0x0a, 0x8c2: 0x0a, 0x8c3: 0x0a, 0x8c4: 0x0a, 0x8c5: 0x0a, 0x8c6: 0x0a, 0x8c7: 0x0a, + 0x8c8: 0x0a, 0x8c9: 0x0a, 0x8ca: 0x0a, 0x8cb: 0x0a, 0x8cc: 0x0a, 0x8cd: 0x0a, 0x8ce: 0x0a, 0x8cf: 0x0a, + 0x8d0: 0x0a, 0x8d1: 0x0a, 0x8d2: 0x0a, 0x8d3: 0x0a, 0x8d4: 0x0a, 0x8d5: 0x0a, 0x8d6: 0x0a, 0x8d7: 0x0a, + 0x8d8: 0x0a, 0x8d9: 0x0a, 0x8da: 0x0a, 0x8db: 0x0a, 0x8dc: 0x0a, 0x8dd: 0x0a, 0x8de: 0x0a, 0x8df: 0x0a, + 0x8e0: 0x0a, 0x8e1: 0x0a, 0x8e2: 0x0a, 0x8e3: 0x0a, 0x8e4: 0x0a, 0x8e5: 0x0a, 0x8e6: 0x0a, 0x8e7: 0x0a, + 0x8e8: 0x0a, 0x8e9: 0x0a, 0x8ea: 0x0a, 0x8eb: 0x0a, 0x8ec: 0x0a, 0x8ed: 0x0a, 0x8ee: 0x0a, 0x8ef: 0x0a, + 0x8f0: 0x0a, 0x8f1: 0x0a, 0x8f2: 0x0a, 0x8f3: 0x0a, 0x8f4: 0x0a, 0x8f5: 0x0a, 0x8f6: 0x0a, 0x8f7: 0x0a, + 0x8f8: 0x0a, 0x8f9: 0x0a, 0x8fa: 0x0a, 0x8fb: 0x0a, 0x8fc: 0x0a, 0x8fd: 0x0a, 0x8fe: 0x0a, 0x8ff: 0x0a, + // Block 0x24, offset 0x900 + 0x900: 0x1b0, 0x901: 0x1b1, 0x902: 0x100, 0x903: 0x100, 0x904: 0x1b2, 0x905: 0x1b2, 0x906: 0x1b2, 0x907: 0x1b3, + 0x908: 0x100, 0x909: 0x100, 0x90a: 0x100, 0x90b: 0x100, 0x90c: 0x100, 0x90d: 0x100, 0x90e: 0x100, 0x90f: 0x100, + 0x910: 0x100, 0x911: 0x100, 0x912: 0x100, 0x913: 0x100, 0x914: 0x100, 0x915: 0x100, 0x916: 0x100, 0x917: 0x100, + 0x918: 0x100, 0x919: 0x100, 0x91a: 0x100, 0x91b: 0x100, 0x91c: 0x100, 0x91d: 0x100, 0x91e: 0x100, 0x91f: 0x100, + 0x920: 0x100, 0x921: 0x100, 0x922: 0x100, 0x923: 0x100, 0x924: 0x100, 0x925: 0x100, 0x926: 0x100, 0x927: 0x100, + 0x928: 0x100, 0x929: 0x100, 0x92a: 0x100, 0x92b: 0x100, 0x92c: 0x100, 0x92d: 0x100, 0x92e: 0x100, 0x92f: 0x100, + 0x930: 0x100, 0x931: 0x100, 0x932: 0x100, 0x933: 0x100, 0x934: 0x100, 0x935: 0x100, 0x936: 0x100, 0x937: 0x100, + 0x938: 0x100, 0x939: 0x100, 0x93a: 0x100, 0x93b: 0x100, 0x93c: 0x100, 0x93d: 0x100, 0x93e: 0x100, 0x93f: 0x100, + // Block 0x25, offset 0x940 + 0x940: 0x0a, 0x941: 0x0a, 0x942: 0x0a, 0x943: 0x0a, 0x944: 0x0a, 0x945: 0x0a, 0x946: 0x0a, 0x947: 0x0a, + 0x948: 0x0a, 0x949: 0x0a, 0x94a: 0x0a, 0x94b: 0x0a, 0x94c: 0x0a, 0x94d: 0x0a, 0x94e: 0x0a, 0x94f: 0x0a, + 0x950: 0x0a, 0x951: 0x0a, 0x952: 0x0a, 0x953: 0x0a, 0x954: 0x0a, 0x955: 0x0a, 0x956: 0x0a, 0x957: 0x0a, + 0x958: 0x0a, 0x959: 0x0a, 0x95a: 0x0a, 0x95b: 0x0a, 0x95c: 0x0a, 0x95d: 0x0a, 0x95e: 0x0a, 0x95f: 0x0a, + 0x960: 0x22, 0x961: 0x0a, 0x962: 0x0a, 0x963: 0x0a, 0x964: 0x0a, 0x965: 0x0a, 0x966: 0x0a, 0x967: 0x0a, + 0x968: 0x0a, 0x969: 0x0a, 0x96a: 0x0a, 0x96b: 0x0a, 0x96c: 0x0a, 0x96d: 0x0a, 0x96e: 0x0a, 0x96f: 0x0a, + 0x970: 0x0a, 0x971: 0x0a, 0x972: 0x0a, 0x973: 0x0a, 0x974: 0x0a, 0x975: 0x0a, 0x976: 0x0a, 0x977: 0x0a, + 0x978: 0x0a, 0x979: 0x0a, 0x97a: 0x0a, 0x97b: 0x0a, 0x97c: 0x0a, 0x97d: 0x0a, 0x97e: 0x0a, 0x97f: 0x0a, + // Block 0x26, offset 0x980 + 0x980: 0x0a, 0x981: 0x0a, 0x982: 0x0a, 0x983: 0x0a, 0x984: 0x0a, 0x985: 0x0a, 0x986: 0x0a, 0x987: 0x0a, + 0x988: 0x0a, 0x989: 0x0a, 0x98a: 0x0a, 0x98b: 0x0a, 0x98c: 0x0a, 0x98d: 0x0a, 0x98e: 0x0a, 0x98f: 0x0a, +} + +// idnaSparseOffset: 303 entries, 606 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x7e, 0x87, 0x97, 0xa6, 0xb1, 0xbe, 0xcf, 0xd9, 0xe0, 0xed, 0xfe, 0x105, 0x110, 0x11f, 0x12d, 0x137, 0x139, 0x13e, 0x141, 0x144, 0x146, 0x152, 0x15d, 0x165, 0x16b, 0x171, 0x176, 0x17b, 0x17e, 0x182, 0x188, 0x18d, 0x198, 0x1a2, 0x1a8, 0x1b9, 0x1c4, 0x1c7, 0x1cf, 0x1d2, 0x1df, 0x1e7, 0x1eb, 0x1f2, 0x1fa, 0x20a, 0x216, 0x219, 0x223, 0x22f, 0x23b, 0x247, 0x24f, 0x254, 0x261, 0x272, 0x27d, 0x282, 0x28b, 0x293, 0x299, 0x29e, 0x2a1, 0x2a5, 0x2ab, 0x2af, 0x2b3, 0x2b7, 0x2bc, 0x2c4, 0x2cb, 0x2d6, 0x2e0, 0x2e4, 0x2e7, 0x2ed, 0x2f1, 0x2f3, 0x2f6, 0x2f8, 0x2fb, 0x305, 0x308, 0x317, 0x31b, 0x31f, 0x321, 0x32a, 0x32e, 0x333, 0x338, 0x33e, 0x34e, 0x354, 0x358, 0x367, 0x36c, 0x374, 0x37e, 0x389, 0x391, 0x3a2, 0x3ab, 0x3bb, 0x3c8, 0x3d4, 0x3d9, 0x3e6, 0x3ea, 0x3ef, 0x3f1, 0x3f3, 0x3f7, 0x3f9, 0x3fd, 0x406, 0x40c, 0x410, 0x420, 0x42a, 0x42f, 0x432, 0x438, 0x43f, 0x444, 0x448, 0x44e, 0x453, 0x45c, 0x461, 0x467, 0x46e, 0x475, 0x47c, 0x480, 0x483, 0x488, 0x494, 0x49a, 0x49f, 0x4a6, 0x4ae, 0x4b3, 0x4b7, 0x4c7, 0x4ce, 0x4d2, 0x4d6, 0x4dd, 0x4df, 0x4e2, 0x4e5, 0x4e9, 0x4f2, 0x4f6, 0x4fe, 0x501, 0x509, 0x514, 0x523, 0x52f, 0x535, 0x542, 0x54e, 0x556, 0x55f, 0x56a, 0x571, 0x580, 0x58d, 0x591, 0x59e, 0x5a7, 0x5ab, 0x5ba, 0x5c2, 0x5cd, 0x5d6, 0x5dc, 0x5e4, 0x5ed, 0x5f9, 0x5fc, 0x608, 0x60b, 0x614, 0x617, 0x61c, 0x625, 0x62a, 0x637, 0x642, 0x64b, 0x656, 0x659, 0x65c, 0x666, 0x66f, 0x67b, 0x688, 0x695, 0x6a3, 0x6aa, 0x6b5, 0x6bc, 0x6c0, 0x6c4, 0x6c7, 0x6cc, 0x6cf, 0x6d2, 0x6d6, 0x6d9, 0x6de, 0x6e5, 0x6e8, 0x6f0, 0x6f4, 0x6ff, 0x702, 0x705, 0x708, 0x70e, 0x714, 0x71d, 0x720, 0x723, 0x726, 0x72e, 0x733, 0x73c, 0x73f, 0x744, 0x74e, 0x752, 0x756, 0x759, 0x75c, 0x760, 0x76f, 0x77b, 0x77f, 0x784, 0x789, 0x78e, 0x792, 0x797, 0x7a0, 0x7a5, 0x7a9, 0x7af, 0x7b5, 0x7ba, 0x7c0, 0x7c6, 0x7d0, 0x7d6, 0x7df, 0x7e2, 0x7e5, 0x7e9, 0x7ed, 0x7f1, 0x7f7, 0x7fd, 0x802, 0x805, 0x815, 0x81c, 0x820, 0x827, 0x82b, 0x831, 0x838, 0x83f, 0x845, 0x84e, 0x852, 0x860, 0x863, 0x866, 0x86a, 0x86e, 0x871, 0x875, 0x878, 0x87d, 0x87f, 0x881} + +// idnaSparseValues: 2180 entries, 8720 bytes +var idnaSparseValues = [2180]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x00a9, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x00b1, lo: 0xb2, hi: 0xb2}, + {value: 0x00b9, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x00c1, lo: 0xb7, hi: 0xb7}, + {value: 0x00c9, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0131, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x05}, + {value: 0x0a08, lo: 0x80, hi: 0x88}, + {value: 0x0808, lo: 0x89, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xe, offset 0x7e + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x87 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x10, offset 0x97 + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa6 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x14, offset 0xcf + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x01f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x15, offset 0xd9 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x16, offset 0xe0 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0201, lo: 0x9c, hi: 0x9c}, + {value: 0x0209, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x17, offset 0xed + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x18, offset 0xfe + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x19, offset 0x105 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1c, offset 0x12d + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0x137 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1e, offset 0x139 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x1f, offset 0x13e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x20, offset 0x141 + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x21, offset 0x144 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x22, offset 0x146 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x25, offset 0x165 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x16b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x27, offset 0x171 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x28, offset 0x176 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x29, offset 0x17b + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2a, offset 0x17e + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2b, offset 0x182 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2c, offset 0x188 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2d, offset 0x18d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x3808, lo: 0x95, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3808, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x2f, offset 0x1a2 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x30, offset 0x1a8 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x31, offset 0x1b9 + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x33c0, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x32, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x33, offset 0x1c7 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x34, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x35, offset 0x1d2 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x36, offset 0x1df + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x37, offset 0x1e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x38, offset 0x1eb + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x39, offset 0x1f2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1fa + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3b, offset 0x20a + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x02}, + {value: 0x3308, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x3d, offset 0x219 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x22f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x40, offset 0x23b + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x41, offset 0x247 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x42, offset 0x24f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x43, offset 0x254 + {value: 0x0000, lo: 0x0c}, + {value: 0x02a9, lo: 0x80, hi: 0x80}, + {value: 0x02b1, lo: 0x81, hi: 0x81}, + {value: 0x02b9, lo: 0x82, hi: 0x82}, + {value: 0x02c1, lo: 0x83, hi: 0x83}, + {value: 0x02c9, lo: 0x84, hi: 0x85}, + {value: 0x02d1, lo: 0x86, hi: 0x86}, + {value: 0x02d9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x059d, lo: 0x90, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x059d, lo: 0xbd, hi: 0xbf}, + // Block 0x44, offset 0x261 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x45, offset 0x272 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x46, offset 0x27d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x47, offset 0x282 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x0851, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x48, offset 0x28b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0859, lo: 0xac, hi: 0xac}, + {value: 0x0861, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x0869, lo: 0xaf, hi: 0xaf}, + {value: 0x0871, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4a, offset 0x299 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09dd, lo: 0xa9, hi: 0xa9}, + {value: 0x09fd, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4c, offset 0x2a1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0929, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4d, offset 0x2a5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, + {value: 0x0932, lo: 0xb5, hi: 0xb5}, + {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x0939, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x50, offset 0x2b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0xbf}, + // Block 0x51, offset 0x2b7 + {value: 0x0000, lo: 0x04}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ebd, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x52, offset 0x2bc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x53, offset 0x2c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2cb + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x55, offset 0x2d6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x56, offset 0x2e0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e4 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2ed + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0f15, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5a, offset 0x2f1 + {value: 0x0020, lo: 0x01}, + {value: 0x0f35, lo: 0x80, hi: 0xbf}, + // Block 0x5b, offset 0x2f3 + {value: 0x0020, lo: 0x02}, + {value: 0x1735, lo: 0x80, hi: 0x8f}, + {value: 0x1915, lo: 0x90, hi: 0xbf}, + // Block 0x5c, offset 0x2f6 + {value: 0x0020, lo: 0x01}, + {value: 0x1f15, lo: 0x80, hi: 0xbf}, + // Block 0x5d, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x5e, offset 0x2fb + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x096a, lo: 0x9b, hi: 0x9b}, + {value: 0x0972, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x0979, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x5f, offset 0x305 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x0981, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x308 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a35, lo: 0xb1, hi: 0xb1}, + {value: 0x2a55, lo: 0xb2, hi: 0xb2}, + {value: 0x2a75, lo: 0xb3, hi: 0xb3}, + {value: 0x2a95, lo: 0xb4, hi: 0xb4}, + {value: 0x2a75, lo: 0xb5, hi: 0xb5}, + {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, + {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, + {value: 0x2af5, lo: 0xb8, hi: 0xb9}, + {value: 0x2b15, lo: 0xba, hi: 0xbb}, + {value: 0x2b35, lo: 0xbc, hi: 0xbd}, + {value: 0x2b15, lo: 0xbe, hi: 0xbf}, + // Block 0x61, offset 0x317 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x62, offset 0x31b + {value: 0x0008, lo: 0x03}, + {value: 0x098a, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0a82, lo: 0xa0, hi: 0xbf}, + // Block 0x63, offset 0x31f + {value: 0x0008, lo: 0x01}, + {value: 0x0d19, lo: 0x80, hi: 0xbf}, + // Block 0x64, offset 0x321 + {value: 0x0008, lo: 0x08}, + {value: 0x0f19, lo: 0x80, hi: 0xb0}, + {value: 0x4045, lo: 0xb1, hi: 0xb1}, + {value: 0x10a1, lo: 0xb2, hi: 0xb3}, + {value: 0x4065, lo: 0xb4, hi: 0xb4}, + {value: 0x10b1, lo: 0xb5, hi: 0xb7}, + {value: 0x4085, lo: 0xb8, hi: 0xb8}, + {value: 0x4085, lo: 0xb9, hi: 0xb9}, + {value: 0x10c9, lo: 0xba, hi: 0xbf}, + // Block 0x65, offset 0x32a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x66, offset 0x32e + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x67, offset 0x333 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x69, offset 0x33e + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x3b08, lo: 0xac, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6a, offset 0x34e + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x354 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6c, offset 0x358 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x6d, offset 0x367 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x6e, offset 0x36c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x6f, offset 0x374 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x37e + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x71, offset 0x389 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x72, offset 0x391 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x73, offset 0x3a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3bb + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x76, offset 0x3c8 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x449d, lo: 0x9c, hi: 0x9c}, + {value: 0x44b5, lo: 0x9d, hi: 0x9d}, + {value: 0x0941, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa8}, + {value: 0x13f9, lo: 0xa9, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x44cd, lo: 0xb0, hi: 0xbf}, + // Block 0x77, offset 0x3d4 + {value: 0x0000, lo: 0x04}, + {value: 0x44ed, lo: 0x80, hi: 0x8f}, + {value: 0x450d, lo: 0x90, hi: 0x9f}, + {value: 0x452d, lo: 0xa0, hi: 0xaf}, + {value: 0x450d, lo: 0xb0, hi: 0xbf}, + // Block 0x78, offset 0x3d9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x79, offset 0x3e6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3ea + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7b, offset 0x3ef + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x7c, offset 0x3f1 + {value: 0x0020, lo: 0x01}, + {value: 0x454d, lo: 0x80, hi: 0xbf}, + // Block 0x7d, offset 0x3f3 + {value: 0x0020, lo: 0x03}, + {value: 0x4d4d, lo: 0x80, hi: 0x94}, + {value: 0x4b0d, lo: 0x95, hi: 0x95}, + {value: 0x4fed, lo: 0x96, hi: 0xbf}, + // Block 0x7e, offset 0x3f7 + {value: 0x0020, lo: 0x01}, + {value: 0x552d, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3f9 + {value: 0x0020, lo: 0x03}, + {value: 0x5d2d, lo: 0x80, hi: 0x84}, + {value: 0x568d, lo: 0x85, hi: 0x85}, + {value: 0x5dcd, lo: 0x86, hi: 0xbf}, + // Block 0x80, offset 0x3fd + {value: 0x0020, lo: 0x08}, + {value: 0x6b8d, lo: 0x80, hi: 0x8f}, + {value: 0x6d4d, lo: 0x90, hi: 0x90}, + {value: 0x6d8d, lo: 0x91, hi: 0xab}, + {value: 0x1401, lo: 0xac, hi: 0xac}, + {value: 0x70ed, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x710d, lo: 0xb0, hi: 0xbf}, + // Block 0x81, offset 0x406 + {value: 0x0020, lo: 0x05}, + {value: 0x730d, lo: 0x80, hi: 0xad}, + {value: 0x656d, lo: 0xae, hi: 0xae}, + {value: 0x78cd, lo: 0xaf, hi: 0xb5}, + {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, + {value: 0x79ad, lo: 0xb7, hi: 0xbf}, + // Block 0x82, offset 0x40c + {value: 0x0008, lo: 0x03}, + {value: 0x1751, lo: 0x80, hi: 0x82}, + {value: 0x1741, lo: 0x83, hi: 0x83}, + {value: 0x1769, lo: 0x84, hi: 0xbf}, + // Block 0x83, offset 0x410 + {value: 0x0008, lo: 0x0f}, + {value: 0x1d81, lo: 0x80, hi: 0x83}, + {value: 0x1d99, lo: 0x84, hi: 0x85}, + {value: 0x1da1, lo: 0x86, hi: 0x87}, + {value: 0x1da9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x1de9, lo: 0x92, hi: 0x97}, + {value: 0x1e11, lo: 0x98, hi: 0x9c}, + {value: 0x1e31, lo: 0x9d, hi: 0xb3}, + {value: 0x1d71, lo: 0xb4, hi: 0xb4}, + {value: 0x1d81, lo: 0xb5, hi: 0xb5}, + {value: 0x1ee9, lo: 0xb6, hi: 0xbb}, + {value: 0x1f09, lo: 0xbc, hi: 0xbc}, + {value: 0x1ef9, lo: 0xbd, hi: 0xbd}, + {value: 0x1f19, lo: 0xbe, hi: 0xbf}, + // Block 0x84, offset 0x420 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x85, offset 0x42a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x86, offset 0x42f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x87, offset 0x432 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x88, offset 0x438 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x89, offset 0x43f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x444 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8b, offset 0x448 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8c, offset 0x44e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8b0d, lo: 0x98, hi: 0x9f}, + {value: 0x8b25, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x91, offset 0x46e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8b25, lo: 0xb0, hi: 0xb7}, + {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x95, offset 0x483 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x96, offset 0x488 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x97, offset 0x494 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x49a + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x49f + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9a, offset 0x4a6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x4ae + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9c, offset 0x4b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0x9d, offset 0x4b7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0x9f, offset 0x4ce + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa0, offset 0x4d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa1, offset 0x4d6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa2, offset 0x4dd + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa3, offset 0x4df + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa4, offset 0x4e2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa5, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa6, offset 0x4e9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xa8, offset 0x4f6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xac}, + {value: 0x0818, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xa9, offset 0x4fe + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbf}, + // Block 0xaa, offset 0x501 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xab, offset 0x509 + {value: 0x0000, lo: 0x0a}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xbf}, + // Block 0xac, offset 0x514 + {value: 0x0000, lo: 0x0e}, + {value: 0x0a08, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb7}, + {value: 0x0a08, lo: 0xb8, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xba}, + {value: 0x0a08, lo: 0xbb, hi: 0xbc}, + {value: 0x0c08, lo: 0xbd, hi: 0xbd}, + {value: 0x0a08, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x523 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x81}, + {value: 0x0c08, lo: 0x82, hi: 0x83}, + {value: 0x0a08, lo: 0x84, hi: 0x84}, + {value: 0x0818, lo: 0x85, hi: 0x88}, + {value: 0x0c18, lo: 0x89, hi: 0x89}, + {value: 0x0a18, lo: 0x8a, hi: 0x8a}, + {value: 0x0918, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xae, offset 0x52f + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xaf, offset 0x535 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x3b08, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xb0, offset 0x542 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb1, offset 0x54e + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb2, offset 0x556 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb3, offset 0x55f + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb4, offset 0x56a + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x571 + {value: 0x0000, lo: 0x0e}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8e, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb6, offset 0x580 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x58d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xb8, offset 0x591 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb9, offset 0x59e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xba, offset 0x5a7 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xbb, offset 0x5ab + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xbc, offset 0x5ba + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5c2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbe, offset 0x5cd + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5d6 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xc0, offset 0x5dc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc1, offset 0x5e4 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc2, offset 0x5ed + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5f9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc4, offset 0x5fc + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc5, offset 0x608 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xc6, offset 0x60b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc7, offset 0x614 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc8, offset 0x617 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x61c + {value: 0x0000, lo: 0x08}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xca, offset 0x625 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x9a, hi: 0x9b}, + {value: 0x3008, lo: 0x9c, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xbf}, + // Block 0xcc, offset 0x637 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x642 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xce, offset 0x64b + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xcf, offset 0x656 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd0, offset 0x659 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xd1, offset 0x65c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xd2, offset 0x666 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xd3, offset 0x66f + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xd4, offset 0x67b + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x688 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xd6, offset 0x695 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd7, offset 0x6a3 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd8, offset 0x6aa + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0xd9, offset 0x6b5 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xda, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0xdb, offset 0x6c0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xdc, offset 0x6c4 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xdd, offset 0x6c7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xde, offset 0x6cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xdf, offset 0x6cf + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbf}, + // Block 0xe0, offset 0x6d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe1, offset 0x6d6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0340, lo: 0xb0, hi: 0xbf}, + // Block 0xe2, offset 0x6d9 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xe3, offset 0x6de + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6e5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x6e8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe6, offset 0x6f0 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xe7, offset 0x6f4 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xe8, offset 0x6ff + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xe9, offset 0x702 + {value: 0x0000, lo: 0x02}, + {value: 0xe105, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xea, offset 0x705 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xeb, offset 0x708 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbf}, + // Block 0xec, offset 0x70e + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xed, offset 0x714 + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xee, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xef, offset 0x720 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xf0, offset 0x723 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xf1, offset 0x726 + {value: 0x0000, lo: 0x07}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf2, offset 0x72e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xf3, offset 0x733 + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x94}, + {value: 0x0008, lo: 0x95, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xf4, offset 0x73c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xf5, offset 0x73f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xf6, offset 0x744 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xf7, offset 0x74e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbf}, + // Block 0xf8, offset 0x752 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf9, offset 0x756 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xfa, offset 0x759 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x75c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xfc, offset 0x760 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0x2379, lo: 0x9e, hi: 0x9e}, + {value: 0x2381, lo: 0x9f, hi: 0x9f}, + {value: 0x2389, lo: 0xa0, hi: 0xa0}, + {value: 0x2391, lo: 0xa1, hi: 0xa1}, + {value: 0x2399, lo: 0xa2, hi: 0xa2}, + {value: 0x23a1, lo: 0xa3, hi: 0xa3}, + {value: 0x23a9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xfd, offset 0x76f + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0x23b1, lo: 0xbb, hi: 0xbb}, + {value: 0x23b9, lo: 0xbc, hi: 0xbc}, + {value: 0x23c1, lo: 0xbd, hi: 0xbd}, + {value: 0x23c9, lo: 0xbe, hi: 0xbe}, + {value: 0x23d1, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x77b + {value: 0x0000, lo: 0x03}, + {value: 0x23d9, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xff, offset 0x77f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0x100, offset 0x784 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x101, offset 0x789 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x102, offset 0x78e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x103, offset 0x792 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x104, offset 0x797 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x105, offset 0x7a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x106, offset 0x7a5 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0x107, offset 0x7a9 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x108, offset 0x7af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0x109, offset 0x7b5 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x3308, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0x10a, offset 0x7ba + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x10b, offset 0x7c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x10c, offset 0x7c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x10d, offset 0x7d0 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10e, offset 0x7d6 + {value: 0x0000, lo: 0x08}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0b08, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x10f, offset 0x7df + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0x110, offset 0x7e2 + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x111, offset 0x7e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0818, lo: 0x81, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x112, offset 0x7e9 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x113, offset 0x7ed + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x114, offset 0x7f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x115, offset 0x7f7 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x116, offset 0x7fd + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x2709, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x117, offset 0x802 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0x118, offset 0x805 + {value: 0x0000, lo: 0x0f}, + {value: 0x2889, lo: 0x80, hi: 0x80}, + {value: 0x2891, lo: 0x81, hi: 0x81}, + {value: 0x2899, lo: 0x82, hi: 0x82}, + {value: 0x28a1, lo: 0x83, hi: 0x83}, + {value: 0x28a9, lo: 0x84, hi: 0x84}, + {value: 0x28b1, lo: 0x85, hi: 0x85}, + {value: 0x28b9, lo: 0x86, hi: 0x86}, + {value: 0x28c1, lo: 0x87, hi: 0x87}, + {value: 0x28c9, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x28d1, lo: 0x90, hi: 0x90}, + {value: 0x28d9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0x119, offset 0x815 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x11a, offset 0x81c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x11b, offset 0x820 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x11c, offset 0x827 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x11d, offset 0x82b + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x11e, offset 0x831 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x11f, offset 0x838 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x120, offset 0x83f + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x121, offset 0x845 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x122, offset 0x84e + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0xbf}, + // Block 0x123, offset 0x852 + {value: 0x0000, lo: 0x0d}, + {value: 0x0018, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xaf}, + {value: 0x06e1, lo: 0xb0, hi: 0xb0}, + {value: 0x0049, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb2, hi: 0xb2}, + {value: 0x0031, lo: 0xb3, hi: 0xb3}, + {value: 0x06e9, lo: 0xb4, hi: 0xb4}, + {value: 0x06f1, lo: 0xb5, hi: 0xb5}, + {value: 0x06f9, lo: 0xb6, hi: 0xb6}, + {value: 0x0701, lo: 0xb7, hi: 0xb7}, + {value: 0x0709, lo: 0xb8, hi: 0xb8}, + {value: 0x0711, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x124, offset 0x860 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x125, offset 0x863 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x126, offset 0x866 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x127, offset 0x86a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x128, offset 0x86e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x129, offset 0x871 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbf}, + // Block 0x12a, offset 0x875 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x12b, offset 0x878 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x12c, offset 0x87d + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x12d, offset 0x87f + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x12e, offset 0x881 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 46723 bytes (45KiB); checksum: 4CF3143A diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie.go 2023-09-29 14:03:33.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie.go 2024-02-23 09:46:14.000000000 +0000 @@ -6,27 +6,6 @@ package idna -// appendMapping appends the mapping for the respective rune. isMapped must be -// true. A mapping is a categorization of a rune as defined in UTS #46. -func (c info) appendMapping(b []byte, s string) []byte { - index := int(c >> indexShift) - if c&xorBit == 0 { - s := mappings[index:] - return append(b, s[1:s[0]+1]...) - } - b = append(b, s...) - if c&inlineXOR == inlineXOR { - // TODO: support and handle two-byte inline masks - b[len(b)-1] ^= byte(index) - } else { - for p := len(b) - int(xorData[index]); p < len(b); p++ { - index++ - b[p] ^= xorData[index] - } - } - return b -} - // Sparse block handling code. type valueRange struct { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie12.0.0.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie12.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie12.0.0.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie12.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,31 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.16 +// +build !go1.16 + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie13.0.0.go temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie13.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/net/idna/trie13.0.0.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/net/idna/trie13.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,31 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + p := index + return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/byteorder.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/byteorder.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/byteorder.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/byteorder.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,290 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_aix.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_aix.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_aix.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_aix.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix +// +build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc +// +build 386 amd64 amd64p32 +// +build gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,23 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 +// +build !386,!amd64,!amd64p32,!arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,111 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) +// +build linux +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm +// +build !linux,arm + +package cpu + +func archInit() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 + +package cpu + +func doinit() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) +// +build !aix +// +build !linux +// +build ppc64 ppc64le + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 +// +build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_x86.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_x86.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/cpu_x86.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/cpu_x86.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,152 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/parse.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/parse.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/parse.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/parse.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 +// +build linux,arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/runtime_auxv.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/runtime_auxv.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/runtime_auxv.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/runtime_auxv.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo +// +build aix,gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go temporal-1.22.5/src/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mkall.sh temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mkall.sh --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mkall.sh 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mkall.sh 2024-02-23 09:46:14.000000000 +0000 @@ -50,7 +50,7 @@ # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit fi diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mkerrors.sh temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mkerrors.sh --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mkerrors.sh 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mkerrors.sh 2024-02-23 09:46:14.000000000 +0000 @@ -519,7 +519,7 @@ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || @@ -583,6 +583,7 @@ $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && @@ -624,7 +625,7 @@ $2 ~ /^MEM/ || $2 ~ /^WG/ || $2 ~ /^FIB_RULE_/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -741,7 +742,8 @@ e = errors[i].num; if(i > 0 && errors[i-1].num == e) continue; - strcpy(buf, strerror(e)); + strncpy(buf, strerror(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; @@ -760,7 +762,8 @@ e = signals[i].num; if(i > 0 && signals[i-1].num == e) continue; - strcpy(buf, strsignal(e)); + strncpy(buf, strsignal(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mmap_nomremap.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mmap_nomremap.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mmap_nomremap.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mmap_nomremap.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris + +package unix + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mremap.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mremap.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/mremap.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/mremap.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || netbsd +// +build linux netbsd + +package unix + +import "unsafe" + +type mremapMmapper struct { + mmapper + mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) +} + +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, +} + +func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 { + return nil, EINVAL + } + + pOld := &oldData[cap(oldData)-1] + m.Lock() + defer m.Unlock() + bOld := m.active[pOld] + if bOld == nil || &bOld[0] != &oldData[0] { + return nil, EINVAL + } + newAddr, errno := m.mremap(uintptr(unsafe.Pointer(&bOld[0])), uintptr(len(bOld)), uintptr(newLength), flags, 0) + if errno != nil { + return nil, errno + } + bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) + pNew := &bNew[cap(bNew)-1] + if flags&mremapDontunmap == 0 { + delete(m.active, pOld) + } + m.active[pNew] = bNew + return bNew, nil +} + +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go 2024-02-23 09:46:14.000000000 +0000 @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { - return ptrace1Ptr(request, pid, addr, data) -} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ptrace_ios.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ptrace_ios.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ptrace_ios.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ptrace_ios.go 2024-02-23 09:46:14.000000000 +0000 @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - return ENOTSUP -} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_aix.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_aix.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_aix.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_aix.go 2024-02-23 09:46:14.000000000 +0000 @@ -487,8 +487,6 @@ //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys readlen(fd int, p *byte, np int) (n int, err error) = read -//sys writelen(fd int, p *byte, np int) (n int, err error) = write //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 @@ -535,21 +533,6 @@ //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_bsd.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_bsd.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_bsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_bsd.go 2024-02-23 09:46:14.000000000 +0000 @@ -601,20 +601,6 @@ // Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, behav int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin.go 2024-02-23 09:46:14.000000000 +0000 @@ -510,30 +510,36 @@ return nil, err } - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } - - // Read into buffer of that size. - buf := make([]KinfoProc, n/SizeofKinfoProc) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { - return nil, err + for { + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + if err == ENOMEM { + // Process table grew. Try again. + continue + } + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } - - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n/SizeofKinfoProc], nil } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) @@ -638,189 +644,3 @@ //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// sendfile -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -47,6 +47,5 @@ //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -47,6 +47,5 @@ //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go 2024-02-23 09:46:14.000000000 +0000 @@ -343,203 +343,5 @@ //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - * TODO(jsing): Update this list for DragonFly. - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go 2024-02-23 09:46:14.000000000 +0000 @@ -449,197 +449,5 @@ //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdents -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -693,10 +693,10 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. - if len(sa.Type) > 13 { + if len(sa.Type) > len(sa.raw.Type)-1 { return nil, 0, EINVAL } - if len(sa.Name) > 63 { + if len(sa.Name) > len(sa.raw.Name)-1 { return nil, 0, EINVAL } @@ -704,17 +704,8 @@ sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask - typ, err := ByteSliceFromString(sa.Type) - if err != nil { - return nil, 0, err - } - name, err := ByteSliceFromString(sa.Name) - if err != nil { - return nil, 0, err - } - - copy(sa.raw.Type[:], typ) - copy(sa.raw.Name[:], name) + copy(sa.raw.Type[:], sa.Type) + copy(sa.raw.Name[:], sa.Name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } @@ -1699,12 +1690,23 @@ return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) } +// elfNT_PRSTATUS is a copy of the debug/elf.NT_PRSTATUS constant so +// x/sys/unix doesn't need to depend on debug/elf and thus +// compress/zlib, debug/dwarf, and other packages. +const elfNT_PRSTATUS = 1 + func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regsout)) + iov.SetLen(int(unsafe.Sizeof(*regsout))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regs)) + iov.SetLen(int(unsafe.Sizeof(*regs))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1874,7 +1876,7 @@ //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) -//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) @@ -1977,8 +1979,6 @@ //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT -//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ -//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV @@ -2113,21 +2113,7 @@ // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - +//sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2136,6 +2122,12 @@ //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) +const ( + mremapFixed = MREMAP_FIXED + mremapDontunmap = MREMAP_DONTUNMAP + mremapMaymove = MREMAP_MAYMOVE +) + // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { @@ -2420,99 +2412,73 @@ return rtSigprocmask(how, set, oldset, _C__NSIG/8) } -/* - * Unimplemented - */ -// AfsSyscall -// ArchPrctl -// Brk -// ClockNanosleep -// ClockSettime -// Clone -// EpollCtlOld -// EpollPwait -// EpollWaitOld -// Execve -// Fork -// Futex -// GetKernelSyms -// GetMempolicy -// GetRobustList -// GetThreadArea -// Getpmsg -// IoCancel -// IoDestroy -// IoGetevents -// IoSetup -// IoSubmit -// IoprioGet -// IoprioSet -// KexecLoad -// LookupDcookie -// Mbind -// MigratePages -// Mincore -// ModifyLdt -// Mount -// MovePages -// MqGetsetattr -// MqNotify -// MqOpen -// MqTimedreceive -// MqTimedsend -// MqUnlink -// Mremap -// Msgctl -// Msgget -// Msgrcv -// Msgsnd -// Nfsservctl -// Personality -// Pselect6 -// Ptrace -// Putpmsg -// Quotactl -// Readahead -// Readv -// RemapFilePages -// RestartSyscall -// RtSigaction -// RtSigpending -// RtSigqueueinfo -// RtSigreturn -// RtSigsuspend -// RtSigtimedwait -// SchedGetPriorityMax -// SchedGetPriorityMin -// SchedGetparam -// SchedGetscheduler -// SchedRrGetInterval -// SchedSetparam -// SchedYield -// Security -// Semctl -// Semget -// Semop -// Semtimedop -// SetMempolicy -// SetRobustList -// SetThreadArea -// SetTidAddress -// Sigaltstack -// Swapoff -// Swapon -// Sysfs -// TimerCreate -// TimerDelete -// TimerGetoverrun -// TimerGettime -// TimerSettime -// Tkill (obsolete) -// Tuxcall -// Umount2 -// Uselib -// Utimensat -// Vfork -// Vhangup -// Vserver -// _Sysctl +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + +// Pselect is a wrapper around the Linux pselect6 system call. +// This version does not modify the timeout argument. +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + // Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES, + // The Linux pselect6() system call modifies its timeout argument. + // [Not modifying the argument] is the behavior required by POSIX.1-2001. + var mutableTimeout *Timespec + if timeout != nil { + mutableTimeout = new(Timespec) + *mutableTimeout = *timeout + } + + // The final argument of the pselect6() system call is not a + // sigset_t * pointer, but is instead a structure + var kernelMask *sigset_argpack + if sigmask != nil { + wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize + + // A sigset stores one bit per signal, + // offset by 1 (because signal 0 does not exist). + // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉. + sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits) + + sigsetBytes := uintptr(sigsetWords * (wordBits / 8)) + kernelMask = &sigset_argpack{ + ss: sigmask, + ssLen: sigsetBytes, + } + } + + return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) +} + +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -40,7 +40,7 @@ if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -33,7 +33,7 @@ if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go 2024-02-23 09:46:14.000000000 +0000 @@ -28,7 +28,7 @@ if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go 2024-02-23 09:46:14.000000000 +0000 @@ -31,7 +31,7 @@ if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -32,7 +32,7 @@ if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) @@ -177,3 +177,14 @@ } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) + +func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { + var setSize uintptr + + if set != nil { + setSize = uintptr(unsafe.Sizeof(*set)) + } + return riscvHWProbe(pairs, setSize, set, flags) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_netbsd.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_netbsd.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_netbsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_netbsd.go 2024-02-23 09:46:14.000000000 +0000 @@ -356,266 +356,16 @@ //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) -/* - * Unimplemented - */ -// ____semctl13 -// __clone -// __fhopen40 -// __fhstat40 -// __fhstatvfs140 -// __fstat30 -// __getcwd -// __getfh30 -// __getlogin -// __lstat30 -// __mount50 -// __msgctl13 -// __msync13 -// __ntp_gettime30 -// __posix_chown -// __posix_fchown -// __posix_lchown -// __posix_rename -// __setlogin -// __shmctl13 -// __sigaction_sigtramp -// __sigaltstack14 -// __sigpending14 -// __sigprocmask14 -// __sigsuspend14 -// __sigtimedwait -// __stat30 -// __syscall -// __vfork14 -// _ksem_close -// _ksem_destroy -// _ksem_getvalue -// _ksem_init -// _ksem_open -// _ksem_post -// _ksem_trywait -// _ksem_unlink -// _ksem_wait -// _lwp_continue -// _lwp_create -// _lwp_ctl -// _lwp_detach -// _lwp_exit -// _lwp_getname -// _lwp_getprivate -// _lwp_kill -// _lwp_park -// _lwp_self -// _lwp_setname -// _lwp_setprivate -// _lwp_suspend -// _lwp_unpark -// _lwp_unpark_all -// _lwp_wait -// _lwp_wakeup -// _pset_bind -// _sched_getaffinity -// _sched_getparam -// _sched_setaffinity -// _sched_setparam -// acct -// aio_cancel -// aio_error -// aio_fsync -// aio_read -// aio_return -// aio_suspend -// aio_write -// break -// clock_getres -// clock_gettime -// clock_settime -// compat_09_ogetdomainname -// compat_09_osetdomainname -// compat_09_ouname -// compat_10_omsgsys -// compat_10_osemsys -// compat_10_oshmsys -// compat_12_fstat12 -// compat_12_getdirentries -// compat_12_lstat12 -// compat_12_msync -// compat_12_oreboot -// compat_12_oswapon -// compat_12_stat12 -// compat_13_sigaction13 -// compat_13_sigaltstack13 -// compat_13_sigpending13 -// compat_13_sigprocmask13 -// compat_13_sigreturn13 -// compat_13_sigsuspend13 -// compat_14___semctl -// compat_14_msgctl -// compat_14_shmctl -// compat_16___sigaction14 -// compat_16___sigreturn14 -// compat_20_fhstatfs -// compat_20_fstatfs -// compat_20_getfsstat -// compat_20_statfs -// compat_30___fhstat30 -// compat_30___fstat13 -// compat_30___lstat13 -// compat_30___stat13 -// compat_30_fhopen -// compat_30_fhstat -// compat_30_fhstatvfs1 -// compat_30_getdents -// compat_30_getfh -// compat_30_ntp_gettime -// compat_30_socket -// compat_40_mount -// compat_43_fstat43 -// compat_43_lstat43 -// compat_43_oaccept -// compat_43_ocreat -// compat_43_oftruncate -// compat_43_ogetdirentries -// compat_43_ogetdtablesize -// compat_43_ogethostid -// compat_43_ogethostname -// compat_43_ogetkerninfo -// compat_43_ogetpagesize -// compat_43_ogetpeername -// compat_43_ogetrlimit -// compat_43_ogetsockname -// compat_43_okillpg -// compat_43_olseek -// compat_43_ommap -// compat_43_oquota -// compat_43_orecv -// compat_43_orecvfrom -// compat_43_orecvmsg -// compat_43_osend -// compat_43_osendmsg -// compat_43_osethostid -// compat_43_osethostname -// compat_43_osigblock -// compat_43_osigsetmask -// compat_43_osigstack -// compat_43_osigvec -// compat_43_otruncate -// compat_43_owait -// compat_43_stat43 -// execve -// extattr_delete_fd -// extattr_delete_file -// extattr_delete_link -// extattr_get_fd -// extattr_get_file -// extattr_get_link -// extattr_list_fd -// extattr_list_file -// extattr_list_link -// extattr_set_fd -// extattr_set_file -// extattr_set_link -// extattrctl -// fchroot -// fdatasync -// fgetxattr -// fktrace -// flistxattr -// fork -// fremovexattr -// fsetxattr -// fstatvfs1 -// fsync_range -// getcontext -// getitimer -// getvfsstat -// getxattr -// ktrace -// lchflags -// lchmod -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// lgetxattr -// lio_listio -// listxattr -// llistxattr -// lremovexattr -// lseek -// lsetxattr -// lutimes -// madvise -// mincore -// minherit -// modctl -// mq_close -// mq_getattr -// mq_notify -// mq_open -// mq_receive -// mq_send -// mq_setattr -// mq_timedreceive -// mq_timedsend -// mq_unlink -// mremap -// msgget -// msgrcv -// msgsnd -// nfssvc -// ntp_adjtime -// pmc_control -// pmc_get_info -// pollts -// preadv -// profil -// pselect -// pset_assign -// pset_create -// pset_destroy -// ptrace -// pwritev -// quotactl -// rasctl -// readv -// reboot -// removexattr -// sa_enable -// sa_preempt -// sa_register -// sa_setconcurrency -// sa_stacks -// sa_yield -// sbrk -// sched_yield -// semconfig -// semget -// semop -// setcontext -// setitimer -// setxattr -// shmat -// shmdt -// shmget -// sstk -// statvfs1 -// swapctl -// sysarch -// syscall -// timer_create -// timer_delete -// timer_getoverrun -// timer_gettime -// timer_settime -// undelete -// utrace -// uuidgen -// vadvise -// vfork -// writev +const ( + mremapFixed = MAP_FIXED + mremapDontunmap = 0 + mremapMaymove = 0 +) + +//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP + +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { + return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_openbsd.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_openbsd.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_openbsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_openbsd.go 2024-02-23 09:46:14.000000000 +0000 @@ -151,6 +151,21 @@ return } +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL @@ -311,80 +326,4 @@ //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// __getcwd -// __semctl -// __syscall -// __sysctl -// adjfreq -// break -// clock_getres -// clock_gettime -// clock_settime -// closefrom -// execve -// fhopen -// fhstat -// fhstatfs -// fork -// futimens -// getfh -// getgid -// getitimer -// getlogin -// getresgid -// getresuid -// getthrid -// ktrace -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// mincore -// minherit -// mount -// mquery -// msgctl -// msgget -// msgrcv -// msgsnd -// nfssvc -// nnpfspioctl -// preadv -// profil -// pwritev -// quotactl -// readv -// reboot -// renameat -// rfork -// sched_yield -// semget -// semop -// setgroups -// setitimer -// setsockopt -// shmat -// shmctl -// shmdt -// shmget -// sigaction -// sigaltstack -// sigpending -// sigprocmask -// sigreturn -// sigsuspend -// sysarch -// syscall -// threxit -// thrsigdivert -// thrsleep -// thrwakeup -// vfork -// writev diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_solaris.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_solaris.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_solaris.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_solaris.go 2024-02-23 09:46:14.000000000 +0000 @@ -698,38 +698,6 @@ //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - // Event Ports type fileObjCookie struct { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_unix.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_unix.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_unix.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_unix.go 2024-02-23 09:46:14.000000000 +0000 @@ -147,6 +147,14 @@ return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { @@ -541,6 +549,9 @@ if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -192,7 +192,6 @@ //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys read(fd int, p []byte) (n int, err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys write(fd int, p []byte) (n int, err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A @@ -285,25 +284,11 @@ return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - // Dummy function: there are no semantics for Madvise on z/OS func Madvise(b []byte, advice int) (err error) { return } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -493,6 +493,7 @@ BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_DEV_BOUND_ONLY = 0x40 BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 @@ -826,9 +827,9 @@ DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2022-07-28)" + DM_VERSION_EXTRA = "-ioctl (2023-03-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2f + DM_VERSION_MINOR = 0x30 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1197,6 +1198,7 @@ FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 + FAN_INFO = 0x20 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 FAN_MARK_EVICTABLE = 0x200 @@ -1233,6 +1235,8 @@ FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 + FAN_RESPONSE_INFO_AUDIT_RULE = 0x1 + FAN_RESPONSE_INFO_NONE = 0x0 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 @@ -1860,6 +1864,7 @@ MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 + MFD_EXEC = 0x10 MFD_HUGETLB = 0x4 MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 @@ -1875,6 +1880,7 @@ MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f MFD_HUGE_SHIFT = 0x1a + MFD_NOEXEC_SEAL = 0x8 MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1898,6 +1904,9 @@ MOUNT_ATTR_SIZE_VER0 = 0x20 MOUNT_ATTR_STRICTATIME = 0x20 MOUNT_ATTR__ATIME = 0x70 + MREMAP_DONTUNMAP = 0x4 + MREMAP_FIXED = 0x2 + MREMAP_MAYMOVE = 0x1 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -2204,6 +2213,7 @@ PACKET_USER = 0x6 PACKET_VERSION = 0xa PACKET_VNET_HDR = 0xf + PACKET_VNET_HDR_SZ = 0x18 PARITY_CRC16_PR0 = 0x2 PARITY_CRC16_PR0_CCITT = 0x4 PARITY_CRC16_PR1 = 0x3 @@ -2221,6 +2231,7 @@ PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 PERF_ATTR_SIZE_VER7 = 0x80 + PERF_ATTR_SIZE_VER8 = 0x88 PERF_AUX_FLAG_COLLISION = 0x8 PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 @@ -2361,6 +2372,7 @@ PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 PR_GET_ENDIAN = 0x13 @@ -2369,6 +2381,8 @@ PR_GET_FP_MODE = 0x2e PR_GET_IO_FLUSHER = 0x3a PR_GET_KEEPCAPS = 0x7 + PR_GET_MDWE = 0x42 + PR_GET_MEMORY_MERGE = 0x44 PR_GET_NAME = 0x10 PR_GET_NO_NEW_PRIVS = 0x27 PR_GET_PDEATHSIG = 0x2 @@ -2389,6 +2403,7 @@ PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b PR_MTE_TAG_MASK = 0x7fff8 @@ -2406,6 +2421,15 @@ PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_RISCV_V_GET_CONTROL = 0x46 + PR_RISCV_V_SET_CONTROL = 0x45 + PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 + PR_RISCV_V_VSTATE_CTRL_DEFAULT = 0x0 + PR_RISCV_V_VSTATE_CTRL_INHERIT = 0x10 + PR_RISCV_V_VSTATE_CTRL_MASK = 0x1f + PR_RISCV_V_VSTATE_CTRL_NEXT_MASK = 0xc + PR_RISCV_V_VSTATE_CTRL_OFF = 0x1 + PR_RISCV_V_VSTATE_CTRL_ON = 0x2 PR_SCHED_CORE = 0x3e PR_SCHED_CORE_CREATE = 0x1 PR_SCHED_CORE_GET = 0x0 @@ -2423,6 +2447,8 @@ PR_SET_FP_MODE = 0x2d PR_SET_IO_FLUSHER = 0x39 PR_SET_KEEPCAPS = 0x8 + PR_SET_MDWE = 0x41 + PR_SET_MEMORY_MERGE = 0x43 PR_SET_MM = 0x23 PR_SET_MM_ARG_END = 0x9 PR_SET_MM_ARG_START = 0x8 @@ -2506,6 +2532,7 @@ PTRACE_GETSIGMASK = 0x420a PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e + PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG = 0x4211 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -2536,6 +2563,7 @@ PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_SYSCALL_INFO_ENTRY = 0x1 @@ -2802,6 +2830,23 @@ RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d @@ -3072,7 +3117,7 @@ TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xd + TASKSTATS_VERSION = 0xe TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3238,6 +3283,7 @@ TP_STATUS_COPY = 0x2 TP_STATUS_CSUMNOTREADY = 0x8 TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_GSO_TCP = 0x100 TP_STATUS_KERNEL = 0x0 TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -317,10 +326,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -318,10 +327,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -324,10 +333,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -314,10 +323,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 @@ -443,6 +454,7 @@ TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 + TPIDR2_MAGIC = 0x54504902 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETDEVNETNS = 0x54e3 @@ -515,6 +527,7 @@ XCASE = 0x4 XTABS = 0x1800 ZA_MAGIC = 0x54366345 + ZT_MAGIC = 0x5a544e01 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -109,6 +118,8 @@ IUCLC = 0x200 IXOFF = 0x1000 IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 + LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -308,10 +319,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -317,10 +326,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -317,10 +326,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -317,10 +326,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -317,10 +326,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -372,10 +381,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -376,10 +385,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -376,10 +385,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -305,10 +314,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -27,22 +27,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -380,10 +389,12 @@ SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -30,22 +30,31 @@ B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -329,6 +338,54 @@ SCM_WIFI_STATUS = 0x25 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 + SF_FP = 0x38 + SF_I0 = 0x20 + SF_I1 = 0x24 + SF_I2 = 0x28 + SF_I3 = 0x2c + SF_I4 = 0x30 + SF_I5 = 0x34 + SF_L0 = 0x0 + SF_L1 = 0x4 + SF_L2 = 0x8 + SF_L3 = 0xc + SF_L4 = 0x10 + SF_L5 = 0x14 + SF_L6 = 0x18 + SF_L7 = 0x1c + SF_PC = 0x3c + SF_RETP = 0x40 + SF_V9_FP = 0x70 + SF_V9_I0 = 0x40 + SF_V9_I1 = 0x48 + SF_V9_I2 = 0x50 + SF_V9_I3 = 0x58 + SF_V9_I4 = 0x60 + SF_V9_I5 = 0x68 + SF_V9_L0 = 0x0 + SF_V9_L1 = 0x8 + SF_V9_L2 = 0x10 + SF_V9_L3 = 0x18 + SF_V9_L4 = 0x20 + SF_V9_L5 = 0x28 + SF_V9_L6 = 0x30 + SF_V9_L7 = 0x38 + SF_V9_PC = 0x78 + SF_V9_RETP = 0x80 + SF_V9_XARG0 = 0x88 + SF_V9_XARG1 = 0x90 + SF_V9_XARG2 = 0x98 + SF_V9_XARG3 = 0xa0 + SF_V9_XARG4 = 0xa8 + SF_V9_XARG5 = 0xb0 + SF_V9_XXARG = 0xb8 + SF_XARG0 = 0x44 + SF_XARG1 = 0x48 + SF_XARG2 = 0x4c + SF_XARG3 = 0x50 + SF_XARG4 = 0x54 + SF_XARG5 = 0x58 + SF_XXARG = 0x5c SIOCATMARK = 0x8905 SIOCGPGRP = 0x8904 SIOCGSTAMPNS_NEW = 0x40108907 @@ -371,10 +428,12 @@ SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 + SO_PASSPIDFD = 0x55 SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d + SO_PEERPIDFD = 0x56 SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go 2024-02-23 09:46:14.000000000 +0000 @@ -817,28 +817,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { r0, er := C.dup2(C.int(oldfd), C.int(newfd)) if r0 == -1 && er != nil { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -762,28 +762,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { _, e1 := calldup2(oldfd, newfd) if e1 != 0 { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -725,6 +725,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2518,14 +2498,6 @@ if e1 != 0 { err = errnoErr(e1) } - return -} - -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } return } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s 2024-02-23 09:46:14.000000000 +0000 @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) - GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) - GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) - GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) - GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) - GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) - GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) - GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -725,6 +725,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2518,14 +2498,6 @@ if e1 != 0 { err = errnoErr(e1) } - return -} - -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } return } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s 2024-02-23 09:46:14.000000000 +0000 @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) - GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1642,28 +1642,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -1862,28 +1862,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1862,28 +1862,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -1862,28 +1862,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1862,28 +1862,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1862,28 +1862,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -40,7 +40,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -55,7 +55,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -70,7 +70,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -85,7 +85,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -96,7 +96,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -1356,7 +1356,7 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) { r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) if e1 != 0 { @@ -1734,28 +1734,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func readv(fd int, iovs []Iovec) (n int, err error) { var _p0 unsafe.Pointer if len(iovs) > 0 { @@ -1868,6 +1846,17 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldaddr), uintptr(oldlength), uintptr(newlength), uintptr(flags), uintptr(newaddr), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, advice int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { @@ -2170,5 +2159,39 @@ if e1 != 0 { err = errnoErr(e1) } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + RawSyscallNoError(SYS_GETRESUID, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } return } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -531,3 +531,19 @@ } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(pairs) > 0 { + _p0 = unsafe.Pointer(&pairs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -1824,20 +1824,13 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1839,9 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1824,20 +1824,13 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1839,9 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -1824,20 +1824,13 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1839,9 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -1824,20 +1824,13 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1839,9 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s 2024-02-23 09:46:14.000000000 +0000 @@ -189,6 +189,18 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresuid(SB) + RET +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresgid(SB) + RET +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ioctl(SB) RET diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -519,6 +519,28 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2189,28 +2213,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s 2024-02-23 09:46:14.000000000 +0000 @@ -158,6 +158,16 @@ GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -436,7 +436,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -446,7 +446,7 @@ func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -456,7 +456,7 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -471,7 +471,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -482,7 +482,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -492,7 +492,7 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -503,7 +503,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -518,7 +518,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -533,7 +533,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -548,7 +548,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -559,7 +559,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -569,7 +569,7 @@ func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -580,7 +580,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -591,7 +591,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -602,7 +602,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -612,7 +612,7 @@ func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -647,7 +647,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -658,7 +658,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -669,7 +669,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -684,7 +684,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -694,7 +694,7 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -709,7 +709,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -724,7 +724,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -739,7 +739,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -754,7 +754,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -764,7 +764,7 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -774,7 +774,7 @@ func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -790,7 +790,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -801,7 +801,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -811,7 +811,7 @@ func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -833,7 +833,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -843,7 +843,7 @@ func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -853,7 +853,7 @@ func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -868,7 +868,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -878,7 +878,7 @@ func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -893,7 +893,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -903,7 +903,7 @@ func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -913,7 +913,7 @@ func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -924,7 +924,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -934,7 +934,7 @@ func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -949,7 +949,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -959,7 +959,7 @@ func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -974,7 +974,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1001,7 +1001,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1012,7 +1012,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1047,7 +1047,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1057,7 +1057,7 @@ func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1067,7 +1067,7 @@ func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1078,7 +1078,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) sid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1088,7 +1088,7 @@ func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1106,7 +1106,7 @@ func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1121,7 +1121,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1141,7 +1141,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1151,7 +1151,7 @@ func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1166,7 +1166,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1180,7 +1180,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1195,7 +1195,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1210,7 +1210,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1225,7 +1225,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1240,7 +1240,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1255,7 +1255,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1270,7 +1270,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1284,7 +1284,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1294,7 +1294,7 @@ func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1308,7 +1308,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1322,7 +1322,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1336,7 +1336,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1346,7 +1346,7 @@ func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1356,7 +1356,7 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1372,7 +1372,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1388,7 +1388,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1404,7 +1404,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1414,7 +1414,7 @@ func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1429,7 +1429,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1444,7 +1444,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1459,7 +1459,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1479,7 +1479,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1499,7 +1499,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1519,7 +1519,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1534,7 +1534,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1545,7 +1545,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1556,7 +1556,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1566,7 +1566,7 @@ func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1576,7 +1576,7 @@ func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1586,7 +1586,7 @@ func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1600,7 +1600,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1610,7 +1610,7 @@ func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1620,7 +1620,7 @@ func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1630,7 +1630,7 @@ func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1640,7 +1640,7 @@ func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1651,7 +1651,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1661,7 +1661,7 @@ func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1671,7 +1671,7 @@ func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1686,7 +1686,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1701,7 +1701,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1721,7 +1721,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1731,7 +1731,7 @@ func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1742,7 +1742,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) n = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1753,7 +1753,7 @@ r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1768,7 +1768,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1778,7 +1778,7 @@ func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1788,7 +1788,7 @@ func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1806,7 +1806,7 @@ func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1821,7 +1821,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1836,7 +1836,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1851,7 +1851,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1861,7 +1861,7 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1876,7 +1876,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1886,7 +1886,7 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1896,7 +1896,7 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1907,7 +1907,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1917,7 +1917,7 @@ func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1928,7 +1928,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1942,7 +1942,7 @@ } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1953,7 +1953,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1963,7 +1963,7 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1978,7 +1978,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1988,7 +1988,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1998,7 +1998,7 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2008,7 +2008,7 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2023,7 +2023,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2034,7 +2034,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2045,7 +2045,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2056,7 +2056,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2067,7 +2067,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2078,7 +2078,7 @@ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2088,7 +2088,7 @@ func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2098,7 +2098,7 @@ func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -40,17 +40,6 @@ // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -447,4 +447,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -369,4 +369,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -411,4 +411,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -314,4 +314,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go 2024-02-23 09:46:14.000000000 +0000 @@ -308,4 +308,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go 2024-02-23 09:46:14.000000000 +0000 @@ -431,4 +431,5 @@ SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go 2024-02-23 09:46:14.000000000 +0000 @@ -361,4 +361,5 @@ SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -361,4 +361,5 @@ SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go 2024-02-23 09:46:14.000000000 +0000 @@ -431,4 +431,5 @@ SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go 2024-02-23 09:46:14.000000000 +0000 @@ -438,4 +438,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -410,4 +410,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -410,4 +410,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -251,6 +251,8 @@ SYS_ACCEPT4 = 242 SYS_RECVMMSG = 243 SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_RISCV_HWPROBE = 258 + SYS_RISCV_FLUSH_ICACHE = 259 SYS_WAIT4 = 260 SYS_PRLIMIT64 = 261 SYS_FANOTIFY_INIT = 262 @@ -313,4 +315,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -372,7 +372,9 @@ SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -389,4 +389,5 @@ SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux.go 2024-02-23 09:46:14.000000000 +0000 @@ -866,6 +866,11 @@ POLLNVAL = 0x20 ) +type sigset_argpack struct { + ss *Sigset_t + ssLen uintptr +} + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1538,6 +1543,10 @@ IFLA_GRO_MAX_SIZE = 0x3a IFLA_TSO_MAX_SIZE = 0x3b IFLA_TSO_MAX_SEGS = 0x3c + IFLA_ALLMULTI = 0x3d + IFLA_DEVLINK_PORT = 0x3e + IFLA_GSO_IPV4_MAX_SIZE = 0x3f + IFLA_GRO_IPV4_MAX_SIZE = 0x40 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1968,7 +1977,7 @@ NFT_MSG_GETFLOWTABLE = 0x17 NFT_MSG_DELFLOWTABLE = 0x18 NFT_MSG_GETRULE_RESET = 0x19 - NFT_MSG_MAX = 0x1a + NFT_MSG_MAX = 0x22 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2555,6 +2564,11 @@ BPF_REG_8 = 0x8 BPF_REG_9 = 0x9 BPF_REG_10 = 0xa + BPF_CGROUP_ITER_ORDER_UNSPEC = 0x0 + BPF_CGROUP_ITER_SELF_ONLY = 0x1 + BPF_CGROUP_ITER_DESCENDANTS_PRE = 0x2 + BPF_CGROUP_ITER_DESCENDANTS_POST = 0x3 + BPF_CGROUP_ITER_ANCESTORS_UP = 0x4 BPF_MAP_CREATE = 0x0 BPF_MAP_LOOKUP_ELEM = 0x1 BPF_MAP_UPDATE_ELEM = 0x2 @@ -2566,6 +2580,7 @@ BPF_PROG_ATTACH = 0x8 BPF_PROG_DETACH = 0x9 BPF_PROG_TEST_RUN = 0xa + BPF_PROG_RUN = 0xa BPF_PROG_GET_NEXT_ID = 0xb BPF_MAP_GET_NEXT_ID = 0xc BPF_PROG_GET_FD_BY_ID = 0xd @@ -2610,6 +2625,7 @@ BPF_MAP_TYPE_CPUMAP = 0x10 BPF_MAP_TYPE_XSKMAP = 0x11 BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 0x13 BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 @@ -2620,6 +2636,10 @@ BPF_MAP_TYPE_STRUCT_OPS = 0x1a BPF_MAP_TYPE_RINGBUF = 0x1b BPF_MAP_TYPE_INODE_STORAGE = 0x1c + BPF_MAP_TYPE_TASK_STORAGE = 0x1d + BPF_MAP_TYPE_BLOOM_FILTER = 0x1e + BPF_MAP_TYPE_USER_RINGBUF = 0x1f + BPF_MAP_TYPE_CGRP_STORAGE = 0x20 BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -2651,6 +2671,7 @@ BPF_PROG_TYPE_EXT = 0x1c BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_PROG_TYPE_SYSCALL = 0x1f BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2689,6 +2710,12 @@ BPF_XDP_CPUMAP = 0x23 BPF_SK_LOOKUP = 0x24 BPF_XDP = 0x25 + BPF_SK_SKB_VERDICT = 0x26 + BPF_SK_REUSEPORT_SELECT = 0x27 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 0x28 + BPF_PERF_EVENT = 0x29 + BPF_TRACE_KPROBE_MULTI = 0x2a + BPF_LSM_CGROUP = 0x2b BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2696,6 +2723,9 @@ BPF_LINK_TYPE_ITER = 0x4 BPF_LINK_TYPE_NETNS = 0x5 BPF_LINK_TYPE_XDP = 0x6 + BPF_LINK_TYPE_PERF_EVENT = 0x7 + BPF_LINK_TYPE_KPROBE_MULTI = 0x8 + BPF_LINK_TYPE_STRUCT_OPS = 0x9 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2733,6 +2763,7 @@ BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 @@ -2747,6 +2778,7 @@ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2771,10 +2803,16 @@ BPF_LWT_ENCAP_SEG6 = 0x0 BPF_LWT_ENCAP_SEG6_INLINE = 0x1 BPF_LWT_ENCAP_IP = 0x2 + BPF_F_BPRM_SECUREEXEC = 0x1 + BPF_F_BROADCAST = 0x8 + BPF_F_EXCLUDE_INGRESS = 0x10 + BPF_SKB_TSTAMP_UNSPEC = 0x0 + BPF_SKB_TSTAMP_DELIVERY_MONO = 0x1 BPF_OK = 0x0 BPF_DROP = 0x2 BPF_REDIRECT = 0x7 BPF_LWT_REROUTE = 0x80 + BPF_FLOW_DISSECTOR_CONTINUE = 0x81 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 @@ -2838,6 +2876,10 @@ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_MTU_CHK_SEGS = 0x1 + BPF_MTU_CHK_RET_SUCCESS = 0x0 + BPF_MTU_CHK_RET_FRAG_NEEDED = 0x1 + BPF_MTU_CHK_RET_SEGS_TOOBIG = 0x2 BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 BPF_FD_TYPE_TRACEPOINT = 0x1 BPF_FD_TYPE_KPROBE = 0x2 @@ -2847,6 +2889,19 @@ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_CORE_FIELD_BYTE_OFFSET = 0x0 + BPF_CORE_FIELD_BYTE_SIZE = 0x1 + BPF_CORE_FIELD_EXISTS = 0x2 + BPF_CORE_FIELD_SIGNED = 0x3 + BPF_CORE_FIELD_LSHIFT_U64 = 0x4 + BPF_CORE_FIELD_RSHIFT_U64 = 0x5 + BPF_CORE_TYPE_ID_LOCAL = 0x6 + BPF_CORE_TYPE_ID_TARGET = 0x7 + BPF_CORE_TYPE_EXISTS = 0x8 + BPF_CORE_TYPE_SIZE = 0x9 + BPF_CORE_ENUMVAL_EXISTS = 0xa + BPF_CORE_ENUMVAL_VALUE = 0xb + BPF_CORE_TYPE_MATCHES = 0xc ) const ( @@ -3605,7 +3660,7 @@ ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x26 + ETHTOOL_MSG_USER_MAX = 0x2b ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3645,7 +3700,7 @@ ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x2b ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3749,7 +3804,7 @@ ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0xd + ETHTOOL_A_RINGS_MAX = 0x10 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3787,14 +3842,14 @@ ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x1c ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 ETHTOOL_A_PAUSE_RX = 0x3 ETHTOOL_A_PAUSE_TX = 0x4 ETHTOOL_A_PAUSE_STATS = 0x5 - ETHTOOL_A_PAUSE_MAX = 0x5 + ETHTOOL_A_PAUSE_MAX = 0x6 ETHTOOL_A_PAUSE_STAT_UNSPEC = 0x0 ETHTOOL_A_PAUSE_STAT_PAD = 0x1 ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 0x2 @@ -4444,7 +4499,7 @@ NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x141 + NL80211_ATTR_MAX = 0x146 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4673,7 +4728,7 @@ NL80211_BAND_ATTR_HT_CAPA = 0x4 NL80211_BAND_ATTR_HT_MCS_SET = 0x3 NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 - NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 @@ -4814,7 +4869,7 @@ NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x98 + NL80211_CMD_MAX = 0x9a NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5448,7 +5503,7 @@ NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 @@ -5795,6 +5850,8 @@ TUN_F_TSO6 = 0x4 TUN_F_TSO_ECN = 0x8 TUN_F_UFO = 0x10 + TUN_F_USO4 = 0x20 + TUN_F_USO6 = 0x40 ) const ( @@ -5804,9 +5861,25 @@ ) const ( - VIRTIO_NET_HDR_GSO_NONE = 0x0 - VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 - VIRTIO_NET_HDR_GSO_UDP = 0x3 - VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 - VIRTIO_NET_HDR_GSO_ECN = 0x80 + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 + VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go 2024-02-23 09:46:14.000000000 +0000 @@ -337,6 +337,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go 2024-02-23 09:46:14.000000000 +0000 @@ -350,6 +350,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go 2024-02-23 09:46:14.000000000 +0000 @@ -328,6 +328,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go 2024-02-23 09:46:14.000000000 +0000 @@ -329,6 +329,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go 2024-02-23 09:46:14.000000000 +0000 @@ -330,6 +330,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go 2024-02-23 09:46:14.000000000 +0000 @@ -333,6 +333,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go 2024-02-23 09:46:14.000000000 +0000 @@ -332,6 +332,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -332,6 +332,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go 2024-02-23 09:46:14.000000000 +0000 @@ -333,6 +333,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go 2024-02-23 09:46:14.000000000 +0000 @@ -340,6 +340,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -339,6 +339,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go 2024-02-23 09:46:14.000000000 +0000 @@ -339,6 +339,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go 2024-02-23 09:46:14.000000000 +0000 @@ -357,6 +357,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -716,3 +718,30 @@ _ uint64 _ uint64 } + +type RISCVHWProbePairs struct { + Key int64 + Value uint64 +} + +const ( + RISCV_HWPROBE_KEY_MVENDORID = 0x0 + RISCV_HWPROBE_KEY_MARCHID = 0x1 + RISCV_HWPROBE_KEY_MIMPID = 0x2 + RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3 + RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1 + RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 + RISCV_HWPROBE_IMA_FD = 0x1 + RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_IMA_V = 0x4 + RISCV_HWPROBE_EXT_ZBA = 0x8 + RISCV_HWPROBE_EXT_ZBB = 0x10 + RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 + RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 + RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 + RISCV_HWPROBE_MISALIGNED_SLOW = 0x2 + RISCV_HWPROBE_MISALIGNED_FAST = 0x3 + RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 + RISCV_HWPROBE_MISALIGNED_MASK = 0x7 +) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go 2024-02-23 09:46:14.000000000 +0000 @@ -352,6 +352,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go 2024-02-23 09:46:14.000000000 +0000 @@ -334,6 +334,8 @@ Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/exec_windows.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/exec_windows.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/exec_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/exec_windows.go 2024-02-23 09:46:14.000000000 +0000 @@ -22,7 +22,7 @@ // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,20 +82,68 @@ // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " + if len(args) == 0 { + return "" + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) + } + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog } - commandLine += EscapeArg(args[i]) + commandLine = []byte(prog) } - return commandLine + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. -// DecomposeCommandLine returns error if commandLine contains NUL. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil @@ -105,18 +153,35 @@ return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") } var argc int32 - argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/security_windows.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/security_windows.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/security_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/security_windows.go 2024-02-23 09:46:14.000000000 +0000 @@ -7,8 +7,6 @@ import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -1341,21 +1339,14 @@ sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/service.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/service.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/service.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/service.go 2024-02-23 09:46:14.000000000 +0000 @@ -218,6 +218,10 @@ Actions *SC_ACTION } +type SERVICE_FAILURE_ACTIONS_FLAG struct { + FailureActionsOnNonCrashFailures int32 +} + type SC_ACTION struct { Type uint32 Delay uint32 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/syscall_windows.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/syscall_windows.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/syscall_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/syscall_windows.go 2024-02-23 09:46:14.000000000 +0000 @@ -15,8 +15,6 @@ "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -135,14 +133,14 @@ // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } @@ -216,7 +214,7 @@ //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -240,7 +238,7 @@ //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -299,12 +297,15 @@ //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -405,7 +406,7 @@ //sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW // Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses //sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules //sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation @@ -437,6 +438,10 @@ //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1354,6 +1359,17 @@ return syscall.EWINDOWS } +func EnumProcesses(processIds []uint32, bytesReturned *uint32) error { + // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses + // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy. + var p *uint32 + if len(processIds) > 0 { + p = &processIds[0] + } + size := uint32(len(processIds) * 4) + return enumProcesses(p, size, bytesReturned) +} + func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { @@ -1613,6 +1629,11 @@ return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } @@ -1647,12 +1668,8 @@ // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1675,12 +1692,8 @@ // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1732,10 +1745,7 @@ if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } @@ -1806,3 +1816,17 @@ // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK } + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/types_windows.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/types_windows.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/types_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/types_windows.go 2024-02-23 09:46:14.000000000 +0000 @@ -247,6 +247,7 @@ PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -2139,6 +2140,12 @@ ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go temporal-1.22.5/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go --- temporal-1.21.5-1/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go 2024-02-23 09:46:14.000000000 +0000 @@ -55,6 +55,7 @@ moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -187,6 +188,7 @@ procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -201,6 +203,7 @@ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -327,6 +330,7 @@ procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") @@ -468,6 +472,8 @@ procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -1630,6 +1636,11 @@ return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1759,6 +1770,14 @@ return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -2367,11 +2386,8 @@ return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -2862,6 +2878,14 @@ return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -3516,12 +3540,8 @@ return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3824,9 +3844,9 @@ return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } @@ -4019,6 +4039,22 @@ err = errnoErr(e1) } return +} + +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go temporal-1.22.5/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -1,7 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package bidi diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go temporal-1.22.5/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,2043 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 19904 bytes (19.44 KiB). Checksum: b1f201ed2debb6c8. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 259 blocks, 16576 entries, 16576 bytes +// The third block is the zero block. +var bidiValues = [16576]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, + 0x5f0: 0x000d, 0x5f1: 0x000d, 0x5f2: 0x000d, 0x5f3: 0x000d, 0x5f4: 0x000d, 0x5f5: 0x000d, + 0x5f6: 0x000d, 0x5f7: 0x000d, 0x5f8: 0x000d, 0x5f9: 0x000d, 0x5fa: 0x000d, 0x5fb: 0x000d, + 0x5fc: 0x000d, 0x5fd: 0x000d, 0x5fe: 0x000d, 0x5ff: 0x000d, + // Block 0x18, offset 0x600 + 0x600: 0x000d, 0x601: 0x000d, 0x602: 0x000d, 0x603: 0x000d, 0x604: 0x000d, 0x605: 0x000d, + 0x606: 0x000d, 0x607: 0x000d, 0x608: 0x000d, 0x609: 0x000d, 0x60a: 0x000d, 0x60b: 0x000d, + 0x60c: 0x000d, 0x60d: 0x000d, 0x60e: 0x000d, 0x60f: 0x0001, 0x610: 0x0005, 0x611: 0x0005, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x000c, 0x619: 0x000c, 0x61a: 0x000c, 0x61b: 0x000c, 0x61c: 0x000c, 0x61d: 0x000c, + 0x61e: 0x000c, 0x61f: 0x000c, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000c, 0x64b: 0x000c, + 0x64c: 0x000c, 0x64d: 0x000c, 0x64e: 0x000c, 0x64f: 0x000c, 0x650: 0x000c, 0x651: 0x000c, + 0x652: 0x000c, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x895: 0x000c, 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97c: 0x000c, 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa81: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaca: 0x000c, + 0xad2: 0x000c, 0xad3: 0x000c, 0xad4: 0x000c, 0xad6: 0x000c, + // Block 0x2c, offset 0xb00 + 0xb31: 0x000c, 0xb34: 0x000c, 0xb35: 0x000c, + 0xb36: 0x000c, 0xb37: 0x000c, 0xb38: 0x000c, 0xb39: 0x000c, 0xb3a: 0x000c, + 0xb3f: 0x0004, + // Block 0x2d, offset 0xb40 + 0xb47: 0x000c, 0xb48: 0x000c, 0xb49: 0x000c, 0xb4a: 0x000c, 0xb4b: 0x000c, + 0xb4c: 0x000c, 0xb4d: 0x000c, 0xb4e: 0x000c, + // Block 0x2e, offset 0xb80 + 0xbb1: 0x000c, 0xbb4: 0x000c, 0xbb5: 0x000c, + 0xbb6: 0x000c, 0xbb7: 0x000c, 0xbb8: 0x000c, 0xbb9: 0x000c, 0xbba: 0x000c, 0xbbb: 0x000c, + 0xbbc: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbc8: 0x000c, 0xbc9: 0x000c, 0xbca: 0x000c, 0xbcb: 0x000c, + 0xbcc: 0x000c, 0xbcd: 0x000c, 0xbce: 0x000c, + // Block 0x30, offset 0xc00 + 0xc18: 0x000c, 0xc19: 0x000c, + 0xc35: 0x000c, + 0xc37: 0x000c, 0xc39: 0x000c, 0xc3a: 0x003a, 0xc3b: 0x002a, + 0xc3c: 0x003a, 0xc3d: 0x002a, + // Block 0x31, offset 0xc40 + 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, 0xc7d: 0x000c, 0xc7e: 0x000c, + // Block 0x32, offset 0xc80 + 0xc80: 0x000c, 0xc81: 0x000c, 0xc82: 0x000c, 0xc83: 0x000c, 0xc84: 0x000c, + 0xc86: 0x000c, 0xc87: 0x000c, + 0xc8d: 0x000c, 0xc8e: 0x000c, 0xc8f: 0x000c, 0xc90: 0x000c, 0xc91: 0x000c, + 0xc92: 0x000c, 0xc93: 0x000c, 0xc94: 0x000c, 0xc95: 0x000c, 0xc96: 0x000c, 0xc97: 0x000c, + 0xc99: 0x000c, 0xc9a: 0x000c, 0xc9b: 0x000c, 0xc9c: 0x000c, 0xc9d: 0x000c, + 0xc9e: 0x000c, 0xc9f: 0x000c, 0xca0: 0x000c, 0xca1: 0x000c, 0xca2: 0x000c, 0xca3: 0x000c, + 0xca4: 0x000c, 0xca5: 0x000c, 0xca6: 0x000c, 0xca7: 0x000c, 0xca8: 0x000c, 0xca9: 0x000c, + 0xcaa: 0x000c, 0xcab: 0x000c, 0xcac: 0x000c, 0xcad: 0x000c, 0xcae: 0x000c, 0xcaf: 0x000c, + 0xcb0: 0x000c, 0xcb1: 0x000c, 0xcb2: 0x000c, 0xcb3: 0x000c, 0xcb4: 0x000c, 0xcb5: 0x000c, + 0xcb6: 0x000c, 0xcb7: 0x000c, 0xcb8: 0x000c, 0xcb9: 0x000c, 0xcba: 0x000c, 0xcbb: 0x000c, + 0xcbc: 0x000c, + // Block 0x33, offset 0xcc0 + 0xcc6: 0x000c, + // Block 0x34, offset 0xd00 + 0xd2d: 0x000c, 0xd2e: 0x000c, 0xd2f: 0x000c, + 0xd30: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, 0xd35: 0x000c, + 0xd36: 0x000c, 0xd37: 0x000c, 0xd39: 0x000c, 0xd3a: 0x000c, + 0xd3d: 0x000c, 0xd3e: 0x000c, + // Block 0x35, offset 0xd40 + 0xd58: 0x000c, 0xd59: 0x000c, + 0xd5e: 0x000c, 0xd5f: 0x000c, 0xd60: 0x000c, + 0xd71: 0x000c, 0xd72: 0x000c, 0xd73: 0x000c, 0xd74: 0x000c, + // Block 0x36, offset 0xd80 + 0xd82: 0x000c, 0xd85: 0x000c, + 0xd86: 0x000c, + 0xd8d: 0x000c, + 0xd9d: 0x000c, + // Block 0x37, offset 0xdc0 + 0xddd: 0x000c, + 0xdde: 0x000c, 0xddf: 0x000c, + // Block 0x38, offset 0xe00 + 0xe10: 0x000a, 0xe11: 0x000a, + 0xe12: 0x000a, 0xe13: 0x000a, 0xe14: 0x000a, 0xe15: 0x000a, 0xe16: 0x000a, 0xe17: 0x000a, + 0xe18: 0x000a, 0xe19: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0009, + 0xe9b: 0x007a, 0xe9c: 0x006a, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, 0xed4: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf12: 0x000c, 0xf13: 0x000c, + 0xf32: 0x000c, 0xf33: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf74: 0x000c, 0xf75: 0x000c, + 0xf77: 0x000c, 0xf78: 0x000c, 0xf79: 0x000c, 0xf7a: 0x000c, 0xf7b: 0x000c, + 0xf7c: 0x000c, 0xf7d: 0x000c, + // Block 0x3e, offset 0xf80 + 0xf86: 0x000c, 0xf89: 0x000c, 0xf8a: 0x000c, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000c, 0xf8f: 0x000c, 0xf90: 0x000c, 0xf91: 0x000c, + 0xf92: 0x000c, 0xf93: 0x000c, + 0xf9b: 0x0004, 0xf9d: 0x000c, + 0xfb0: 0x000a, 0xfb1: 0x000a, 0xfb2: 0x000a, 0xfb3: 0x000a, 0xfb4: 0x000a, 0xfb5: 0x000a, + 0xfb6: 0x000a, 0xfb7: 0x000a, 0xfb8: 0x000a, 0xfb9: 0x000a, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x000a, 0xfc1: 0x000a, 0xfc2: 0x000a, 0xfc3: 0x000a, 0xfc4: 0x000a, 0xfc5: 0x000a, + 0xfc6: 0x000a, 0xfc7: 0x000a, 0xfc8: 0x000a, 0xfc9: 0x000a, 0xfca: 0x000a, 0xfcb: 0x000c, + 0xfcc: 0x000c, 0xfcd: 0x000c, 0xfce: 0x000b, 0xfcf: 0x000c, + // Block 0x40, offset 0x1000 + 0x1005: 0x000c, + 0x1006: 0x000c, + 0x1029: 0x000c, + // Block 0x41, offset 0x1040 + 0x1060: 0x000c, 0x1061: 0x000c, 0x1062: 0x000c, + 0x1067: 0x000c, 0x1068: 0x000c, + 0x1072: 0x000c, + 0x1079: 0x000c, 0x107a: 0x000c, 0x107b: 0x000c, + // Block 0x42, offset 0x1080 + 0x1080: 0x000a, 0x1084: 0x000a, 0x1085: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10de: 0x000a, 0x10df: 0x000a, 0x10e0: 0x000a, 0x10e1: 0x000a, 0x10e2: 0x000a, 0x10e3: 0x000a, + 0x10e4: 0x000a, 0x10e5: 0x000a, 0x10e6: 0x000a, 0x10e7: 0x000a, 0x10e8: 0x000a, 0x10e9: 0x000a, + 0x10ea: 0x000a, 0x10eb: 0x000a, 0x10ec: 0x000a, 0x10ed: 0x000a, 0x10ee: 0x000a, 0x10ef: 0x000a, + 0x10f0: 0x000a, 0x10f1: 0x000a, 0x10f2: 0x000a, 0x10f3: 0x000a, 0x10f4: 0x000a, 0x10f5: 0x000a, + 0x10f6: 0x000a, 0x10f7: 0x000a, 0x10f8: 0x000a, 0x10f9: 0x000a, 0x10fa: 0x000a, 0x10fb: 0x000a, + 0x10fc: 0x000a, 0x10fd: 0x000a, 0x10fe: 0x000a, 0x10ff: 0x000a, + // Block 0x44, offset 0x1100 + 0x1117: 0x000c, + 0x1118: 0x000c, 0x111b: 0x000c, + // Block 0x45, offset 0x1140 + 0x1156: 0x000c, + 0x1158: 0x000c, 0x1159: 0x000c, 0x115a: 0x000c, 0x115b: 0x000c, 0x115c: 0x000c, 0x115d: 0x000c, + 0x115e: 0x000c, 0x1160: 0x000c, 0x1162: 0x000c, + 0x1165: 0x000c, 0x1166: 0x000c, 0x1167: 0x000c, 0x1168: 0x000c, 0x1169: 0x000c, + 0x116a: 0x000c, 0x116b: 0x000c, 0x116c: 0x000c, + 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117f: 0x000c, + // Block 0x46, offset 0x1180 + 0x11b0: 0x000c, 0x11b1: 0x000c, 0x11b2: 0x000c, 0x11b3: 0x000c, 0x11b4: 0x000c, 0x11b5: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bb: 0x000c, + 0x11bc: 0x000c, 0x11bd: 0x000c, 0x11be: 0x000c, 0x11bf: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x000c, 0x11c1: 0x000c, 0x11c2: 0x000c, 0x11c3: 0x000c, 0x11c4: 0x000c, 0x11c5: 0x000c, + 0x11c6: 0x000c, 0x11c7: 0x000c, 0x11c8: 0x000c, 0x11c9: 0x000c, 0x11ca: 0x000c, 0x11cb: 0x000c, + 0x11cc: 0x000c, 0x11cd: 0x000c, 0x11ce: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, 0x1202: 0x000c, 0x1203: 0x000c, + 0x1234: 0x000c, + 0x1236: 0x000c, 0x1237: 0x000c, 0x1238: 0x000c, 0x1239: 0x000c, 0x123a: 0x000c, + 0x123c: 0x000c, + // Block 0x49, offset 0x1240 + 0x1242: 0x000c, + 0x126b: 0x000c, 0x126c: 0x000c, 0x126d: 0x000c, 0x126e: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, 0x1272: 0x000c, 0x1273: 0x000c, + // Block 0x4a, offset 0x1280 + 0x1280: 0x000c, 0x1281: 0x000c, + 0x12a2: 0x000c, 0x12a3: 0x000c, + 0x12a4: 0x000c, 0x12a5: 0x000c, 0x12a8: 0x000c, 0x12a9: 0x000c, + 0x12ab: 0x000c, 0x12ac: 0x000c, 0x12ad: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12e6: 0x000c, 0x12e8: 0x000c, 0x12e9: 0x000c, + 0x12ed: 0x000c, 0x12ef: 0x000c, + 0x12f0: 0x000c, 0x12f1: 0x000c, + // Block 0x4c, offset 0x1300 + 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, + // Block 0x4d, offset 0x1340 + 0x1350: 0x000c, 0x1351: 0x000c, + 0x1352: 0x000c, 0x1354: 0x000c, 0x1355: 0x000c, 0x1356: 0x000c, 0x1357: 0x000c, + 0x1358: 0x000c, 0x1359: 0x000c, 0x135a: 0x000c, 0x135b: 0x000c, 0x135c: 0x000c, 0x135d: 0x000c, + 0x135e: 0x000c, 0x135f: 0x000c, 0x1360: 0x000c, 0x1362: 0x000c, 0x1363: 0x000c, + 0x1364: 0x000c, 0x1365: 0x000c, 0x1366: 0x000c, 0x1367: 0x000c, 0x1368: 0x000c, + 0x136d: 0x000c, + 0x1374: 0x000c, + 0x1378: 0x000c, 0x1379: 0x000c, + // Block 0x4e, offset 0x1380 + 0x13bd: 0x000a, 0x13bf: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x000a, 0x13c1: 0x000a, + 0x13cd: 0x000a, 0x13ce: 0x000a, 0x13cf: 0x000a, + 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, + 0x13ed: 0x000a, 0x13ee: 0x000a, 0x13ef: 0x000a, + 0x13fd: 0x000a, 0x13fe: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x0009, 0x1401: 0x0009, 0x1402: 0x0009, 0x1403: 0x0009, 0x1404: 0x0009, 0x1405: 0x0009, + 0x1406: 0x0009, 0x1407: 0x0009, 0x1408: 0x0009, 0x1409: 0x0009, 0x140a: 0x0009, 0x140b: 0x000b, + 0x140c: 0x000b, 0x140d: 0x000b, 0x140f: 0x0001, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x000a, 0x1420: 0x000a, 0x1421: 0x000a, 0x1422: 0x000a, 0x1423: 0x000a, + 0x1424: 0x000a, 0x1425: 0x000a, 0x1426: 0x000a, 0x1427: 0x000a, 0x1428: 0x0009, 0x1429: 0x0007, + 0x142a: 0x000e, 0x142b: 0x000e, 0x142c: 0x000e, 0x142d: 0x000e, 0x142e: 0x000e, 0x142f: 0x0006, + 0x1430: 0x0004, 0x1431: 0x0004, 0x1432: 0x0004, 0x1433: 0x0004, 0x1434: 0x0004, 0x1435: 0x000a, + 0x1436: 0x000a, 0x1437: 0x000a, 0x1438: 0x000a, 0x1439: 0x000a, 0x143a: 0x000a, 0x143b: 0x000a, + 0x143c: 0x000a, 0x143d: 0x000a, 0x143e: 0x000a, 0x143f: 0x000a, + // Block 0x51, offset 0x1440 + 0x1440: 0x000a, 0x1441: 0x000a, 0x1442: 0x000a, 0x1443: 0x000a, 0x1444: 0x0006, 0x1445: 0x009a, + 0x1446: 0x008a, 0x1447: 0x000a, 0x1448: 0x000a, 0x1449: 0x000a, 0x144a: 0x000a, 0x144b: 0x000a, + 0x144c: 0x000a, 0x144d: 0x000a, 0x144e: 0x000a, 0x144f: 0x000a, 0x1450: 0x000a, 0x1451: 0x000a, + 0x1452: 0x000a, 0x1453: 0x000a, 0x1454: 0x000a, 0x1455: 0x000a, 0x1456: 0x000a, 0x1457: 0x000a, + 0x1458: 0x000a, 0x1459: 0x000a, 0x145a: 0x000a, 0x145b: 0x000a, 0x145c: 0x000a, 0x145d: 0x000a, + 0x145e: 0x000a, 0x145f: 0x0009, 0x1460: 0x000b, 0x1461: 0x000b, 0x1462: 0x000b, 0x1463: 0x000b, + 0x1464: 0x000b, 0x1465: 0x000b, 0x1466: 0x000e, 0x1467: 0x000e, 0x1468: 0x000e, 0x1469: 0x000e, + 0x146a: 0x000b, 0x146b: 0x000b, 0x146c: 0x000b, 0x146d: 0x000b, 0x146e: 0x000b, 0x146f: 0x000b, + 0x1470: 0x0002, 0x1474: 0x0002, 0x1475: 0x0002, + 0x1476: 0x0002, 0x1477: 0x0002, 0x1478: 0x0002, 0x1479: 0x0002, 0x147a: 0x0003, 0x147b: 0x0003, + 0x147c: 0x000a, 0x147d: 0x009a, 0x147e: 0x008a, + // Block 0x52, offset 0x1480 + 0x1480: 0x0002, 0x1481: 0x0002, 0x1482: 0x0002, 0x1483: 0x0002, 0x1484: 0x0002, 0x1485: 0x0002, + 0x1486: 0x0002, 0x1487: 0x0002, 0x1488: 0x0002, 0x1489: 0x0002, 0x148a: 0x0003, 0x148b: 0x0003, + 0x148c: 0x000a, 0x148d: 0x009a, 0x148e: 0x008a, + 0x14a0: 0x0004, 0x14a1: 0x0004, 0x14a2: 0x0004, 0x14a3: 0x0004, + 0x14a4: 0x0004, 0x14a5: 0x0004, 0x14a6: 0x0004, 0x14a7: 0x0004, 0x14a8: 0x0004, 0x14a9: 0x0004, + 0x14aa: 0x0004, 0x14ab: 0x0004, 0x14ac: 0x0004, 0x14ad: 0x0004, 0x14ae: 0x0004, 0x14af: 0x0004, + 0x14b0: 0x0004, 0x14b1: 0x0004, 0x14b2: 0x0004, 0x14b3: 0x0004, 0x14b4: 0x0004, 0x14b5: 0x0004, + 0x14b6: 0x0004, 0x14b7: 0x0004, 0x14b8: 0x0004, 0x14b9: 0x0004, 0x14ba: 0x0004, 0x14bb: 0x0004, + 0x14bc: 0x0004, 0x14bd: 0x0004, 0x14be: 0x0004, 0x14bf: 0x0004, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0004, 0x14c1: 0x0004, 0x14c2: 0x0004, 0x14c3: 0x0004, 0x14c4: 0x0004, 0x14c5: 0x0004, + 0x14c6: 0x0004, 0x14c7: 0x0004, 0x14c8: 0x0004, 0x14c9: 0x0004, 0x14ca: 0x0004, 0x14cb: 0x0004, + 0x14cc: 0x0004, 0x14cd: 0x0004, 0x14ce: 0x0004, 0x14cf: 0x0004, 0x14d0: 0x000c, 0x14d1: 0x000c, + 0x14d2: 0x000c, 0x14d3: 0x000c, 0x14d4: 0x000c, 0x14d5: 0x000c, 0x14d6: 0x000c, 0x14d7: 0x000c, + 0x14d8: 0x000c, 0x14d9: 0x000c, 0x14da: 0x000c, 0x14db: 0x000c, 0x14dc: 0x000c, 0x14dd: 0x000c, + 0x14de: 0x000c, 0x14df: 0x000c, 0x14e0: 0x000c, 0x14e1: 0x000c, 0x14e2: 0x000c, 0x14e3: 0x000c, + 0x14e4: 0x000c, 0x14e5: 0x000c, 0x14e6: 0x000c, 0x14e7: 0x000c, 0x14e8: 0x000c, 0x14e9: 0x000c, + 0x14ea: 0x000c, 0x14eb: 0x000c, 0x14ec: 0x000c, 0x14ed: 0x000c, 0x14ee: 0x000c, 0x14ef: 0x000c, + 0x14f0: 0x000c, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, 0x1505: 0x000a, + 0x1506: 0x000a, 0x1508: 0x000a, 0x1509: 0x000a, + 0x1514: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, 0x1520: 0x000a, 0x1521: 0x000a, 0x1522: 0x000a, 0x1523: 0x000a, + 0x1525: 0x000a, 0x1527: 0x000a, 0x1529: 0x000a, + 0x152e: 0x0004, + 0x153a: 0x000a, 0x153b: 0x000a, + // Block 0x55, offset 0x1540 + 0x1540: 0x000a, 0x1541: 0x000a, 0x1542: 0x000a, 0x1543: 0x000a, 0x1544: 0x000a, + 0x154a: 0x000a, 0x154b: 0x000a, + 0x154c: 0x000a, 0x154d: 0x000a, 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x000a, 0x15d3: 0x000a, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x000a, 0x1609: 0x000a, 0x160a: 0x000a, 0x160b: 0x000a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x0003, 0x1613: 0x0004, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x000a, + 0x162a: 0x000a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + 0x1636: 0x000a, 0x1637: 0x000a, 0x1638: 0x000a, 0x1639: 0x000a, 0x163a: 0x000a, 0x163b: 0x000a, + 0x163c: 0x000a, 0x163d: 0x000a, 0x163e: 0x000a, 0x163f: 0x000a, + // Block 0x59, offset 0x1640 + 0x1640: 0x000a, 0x1641: 0x000a, 0x1642: 0x000a, 0x1643: 0x000a, 0x1644: 0x000a, 0x1645: 0x000a, + 0x1646: 0x000a, 0x1647: 0x000a, 0x1648: 0x003a, 0x1649: 0x002a, 0x164a: 0x003a, 0x164b: 0x002a, + 0x164c: 0x000a, 0x164d: 0x000a, 0x164e: 0x000a, 0x164f: 0x000a, 0x1650: 0x000a, 0x1651: 0x000a, + 0x1652: 0x000a, 0x1653: 0x000a, 0x1654: 0x000a, 0x1655: 0x000a, 0x1656: 0x000a, 0x1657: 0x000a, + 0x1658: 0x000a, 0x1659: 0x000a, 0x165a: 0x000a, 0x165b: 0x000a, 0x165c: 0x000a, 0x165d: 0x000a, + 0x165e: 0x000a, 0x165f: 0x000a, 0x1660: 0x000a, 0x1661: 0x000a, 0x1662: 0x000a, 0x1663: 0x000a, + 0x1664: 0x000a, 0x1665: 0x000a, 0x1666: 0x000a, 0x1667: 0x000a, 0x1668: 0x000a, 0x1669: 0x009a, + 0x166a: 0x008a, 0x166b: 0x000a, 0x166c: 0x000a, 0x166d: 0x000a, 0x166e: 0x000a, 0x166f: 0x000a, + 0x1670: 0x000a, 0x1671: 0x000a, 0x1672: 0x000a, 0x1673: 0x000a, 0x1674: 0x000a, 0x1675: 0x000a, + // Block 0x5a, offset 0x1680 + 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, + 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, + 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, + 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, 0x16ff: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, + 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, + 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, + 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, + 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, + 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, + 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, + 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, + 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, + 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, + 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, + 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, + 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x009a, 0x1ad6: 0x008a, 0x1ad7: 0x00ba, + 0x1ad8: 0x00aa, 0x1ad9: 0x009a, 0x1ada: 0x008a, 0x1adb: 0x007a, 0x1adc: 0x006a, 0x1add: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, 0x206c: 0x000c, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, 0x21bd: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x232a: 0x000a, 0x232b: 0x000a, + // Block 0x8d, offset 0x2340 + 0x2365: 0x000c, 0x2368: 0x000c, + 0x236d: 0x000c, + // Block 0x8e, offset 0x2380 + 0x239d: 0x0001, + 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, + 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, + 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, + 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, + 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, + 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, + 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000a, 0x2441: 0x000a, 0x2442: 0x000a, 0x2443: 0x000a, 0x2444: 0x000a, 0x2445: 0x000a, + 0x2446: 0x000a, 0x2447: 0x000a, 0x2448: 0x000a, 0x2449: 0x000a, 0x244a: 0x000a, 0x244b: 0x000a, + 0x244c: 0x000a, 0x244d: 0x000a, 0x244e: 0x000a, 0x244f: 0x000a, 0x2450: 0x000d, 0x2451: 0x000d, + 0x2452: 0x000d, 0x2453: 0x000d, 0x2454: 0x000d, 0x2455: 0x000d, 0x2456: 0x000d, 0x2457: 0x000d, + 0x2458: 0x000d, 0x2459: 0x000d, 0x245a: 0x000d, 0x245b: 0x000d, 0x245c: 0x000d, 0x245d: 0x000d, + 0x245e: 0x000d, 0x245f: 0x000d, 0x2460: 0x000d, 0x2461: 0x000d, 0x2462: 0x000d, 0x2463: 0x000d, + 0x2464: 0x000d, 0x2465: 0x000d, 0x2466: 0x000d, 0x2467: 0x000d, 0x2468: 0x000d, 0x2469: 0x000d, + 0x246a: 0x000d, 0x246b: 0x000d, 0x246c: 0x000d, 0x246d: 0x000d, 0x246e: 0x000d, 0x246f: 0x000d, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000a, 0x2490: 0x000b, 0x2491: 0x000b, + 0x2492: 0x000b, 0x2493: 0x000b, 0x2494: 0x000b, 0x2495: 0x000b, 0x2496: 0x000b, 0x2497: 0x000b, + 0x2498: 0x000b, 0x2499: 0x000b, 0x249a: 0x000b, 0x249b: 0x000b, 0x249c: 0x000b, 0x249d: 0x000b, + 0x249e: 0x000b, 0x249f: 0x000b, 0x24a0: 0x000b, 0x24a1: 0x000b, 0x24a2: 0x000b, 0x24a3: 0x000b, + 0x24a4: 0x000b, 0x24a5: 0x000b, 0x24a6: 0x000b, 0x24a7: 0x000b, 0x24a8: 0x000b, 0x24a9: 0x000b, + 0x24aa: 0x000b, 0x24ab: 0x000b, 0x24ac: 0x000b, 0x24ad: 0x000b, 0x24ae: 0x000b, 0x24af: 0x000b, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000c, 0x24c1: 0x000c, 0x24c2: 0x000c, 0x24c3: 0x000c, 0x24c4: 0x000c, 0x24c5: 0x000c, + 0x24c6: 0x000c, 0x24c7: 0x000c, 0x24c8: 0x000c, 0x24c9: 0x000c, 0x24ca: 0x000c, 0x24cb: 0x000c, + 0x24cc: 0x000c, 0x24cd: 0x000c, 0x24ce: 0x000c, 0x24cf: 0x000c, 0x24d0: 0x000a, 0x24d1: 0x000a, + 0x24d2: 0x000a, 0x24d3: 0x000a, 0x24d4: 0x000a, 0x24d5: 0x000a, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x000a, + 0x24e0: 0x000c, 0x24e1: 0x000c, 0x24e2: 0x000c, 0x24e3: 0x000c, + 0x24e4: 0x000c, 0x24e5: 0x000c, 0x24e6: 0x000c, 0x24e7: 0x000c, 0x24e8: 0x000c, 0x24e9: 0x000c, + 0x24ea: 0x000c, 0x24eb: 0x000c, 0x24ec: 0x000c, 0x24ed: 0x000c, 0x24ee: 0x000c, 0x24ef: 0x000c, + 0x24f0: 0x000a, 0x24f1: 0x000a, 0x24f2: 0x000a, 0x24f3: 0x000a, 0x24f4: 0x000a, 0x24f5: 0x000a, + 0x24f6: 0x000a, 0x24f7: 0x000a, 0x24f8: 0x000a, 0x24f9: 0x000a, 0x24fa: 0x000a, 0x24fb: 0x000a, + 0x24fc: 0x000a, 0x24fd: 0x000a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x000a, 0x2504: 0x000a, 0x2505: 0x000a, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x000a, 0x2509: 0x000a, 0x250a: 0x000a, 0x250b: 0x000a, + 0x250c: 0x000a, 0x250d: 0x000a, 0x250e: 0x000a, 0x250f: 0x000a, 0x2510: 0x0006, 0x2511: 0x000a, + 0x2512: 0x0006, 0x2514: 0x000a, 0x2515: 0x0006, 0x2516: 0x000a, 0x2517: 0x000a, + 0x2518: 0x000a, 0x2519: 0x009a, 0x251a: 0x008a, 0x251b: 0x007a, 0x251c: 0x006a, 0x251d: 0x009a, + 0x251e: 0x008a, 0x251f: 0x0004, 0x2520: 0x000a, 0x2521: 0x000a, 0x2522: 0x0003, 0x2523: 0x0003, + 0x2524: 0x000a, 0x2525: 0x000a, 0x2526: 0x000a, 0x2528: 0x000a, 0x2529: 0x0004, + 0x252a: 0x0004, 0x252b: 0x000a, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000d, + // Block 0x95, offset 0x2540 + 0x2540: 0x000d, 0x2541: 0x000d, 0x2542: 0x000d, 0x2543: 0x000d, 0x2544: 0x000d, 0x2545: 0x000d, + 0x2546: 0x000d, 0x2547: 0x000d, 0x2548: 0x000d, 0x2549: 0x000d, 0x254a: 0x000d, 0x254b: 0x000d, + 0x254c: 0x000d, 0x254d: 0x000d, 0x254e: 0x000d, 0x254f: 0x000d, 0x2550: 0x000d, 0x2551: 0x000d, + 0x2552: 0x000d, 0x2553: 0x000d, 0x2554: 0x000d, 0x2555: 0x000d, 0x2556: 0x000d, 0x2557: 0x000d, + 0x2558: 0x000d, 0x2559: 0x000d, 0x255a: 0x000d, 0x255b: 0x000d, 0x255c: 0x000d, 0x255d: 0x000d, + 0x255e: 0x000d, 0x255f: 0x000d, 0x2560: 0x000d, 0x2561: 0x000d, 0x2562: 0x000d, 0x2563: 0x000d, + 0x2564: 0x000d, 0x2565: 0x000d, 0x2566: 0x000d, 0x2567: 0x000d, 0x2568: 0x000d, 0x2569: 0x000d, + 0x256a: 0x000d, 0x256b: 0x000d, 0x256c: 0x000d, 0x256d: 0x000d, 0x256e: 0x000d, 0x256f: 0x000d, + 0x2570: 0x000d, 0x2571: 0x000d, 0x2572: 0x000d, 0x2573: 0x000d, 0x2574: 0x000d, 0x2575: 0x000d, + 0x2576: 0x000d, 0x2577: 0x000d, 0x2578: 0x000d, 0x2579: 0x000d, 0x257a: 0x000d, 0x257b: 0x000d, + 0x257c: 0x000d, 0x257d: 0x000d, 0x257e: 0x000d, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, 0x2582: 0x000a, 0x2583: 0x0004, 0x2584: 0x0004, 0x2585: 0x0004, + 0x2586: 0x000a, 0x2587: 0x000a, 0x2588: 0x003a, 0x2589: 0x002a, 0x258a: 0x000a, 0x258b: 0x0003, + 0x258c: 0x0006, 0x258d: 0x0003, 0x258e: 0x0006, 0x258f: 0x0006, 0x2590: 0x0002, 0x2591: 0x0002, + 0x2592: 0x0002, 0x2593: 0x0002, 0x2594: 0x0002, 0x2595: 0x0002, 0x2596: 0x0002, 0x2597: 0x0002, + 0x2598: 0x0002, 0x2599: 0x0002, 0x259a: 0x0006, 0x259b: 0x000a, 0x259c: 0x000a, 0x259d: 0x000a, + 0x259e: 0x000a, 0x259f: 0x000a, 0x25a0: 0x000a, + 0x25bb: 0x005a, + 0x25bc: 0x000a, 0x25bd: 0x004a, 0x25be: 0x000a, 0x25bf: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, + 0x25db: 0x005a, 0x25dc: 0x000a, 0x25dd: 0x004a, + 0x25de: 0x000a, 0x25df: 0x00fa, 0x25e0: 0x00ea, 0x25e1: 0x000a, 0x25e2: 0x003a, 0x25e3: 0x002a, + 0x25e4: 0x000a, 0x25e5: 0x000a, + // Block 0x98, offset 0x2600 + 0x2620: 0x0004, 0x2621: 0x0004, 0x2622: 0x000a, 0x2623: 0x000a, + 0x2624: 0x000a, 0x2625: 0x0004, 0x2626: 0x0004, 0x2628: 0x000a, 0x2629: 0x000a, + 0x262a: 0x000a, 0x262b: 0x000a, 0x262c: 0x000a, 0x262d: 0x000a, 0x262e: 0x000a, + 0x2630: 0x000b, 0x2631: 0x000b, 0x2632: 0x000b, 0x2633: 0x000b, 0x2634: 0x000b, 0x2635: 0x000b, + 0x2636: 0x000b, 0x2637: 0x000b, 0x2638: 0x000b, 0x2639: 0x000a, 0x263a: 0x000a, 0x263b: 0x000a, + 0x263c: 0x000a, 0x263d: 0x000a, 0x263e: 0x000b, 0x263f: 0x000b, + // Block 0x99, offset 0x2640 + 0x2641: 0x000a, + // Block 0x9a, offset 0x2680 + 0x2680: 0x000a, 0x2681: 0x000a, 0x2682: 0x000a, 0x2683: 0x000a, 0x2684: 0x000a, 0x2685: 0x000a, + 0x2686: 0x000a, 0x2687: 0x000a, 0x2688: 0x000a, 0x2689: 0x000a, 0x268a: 0x000a, 0x268b: 0x000a, + 0x268c: 0x000a, 0x2690: 0x000a, 0x2691: 0x000a, + 0x2692: 0x000a, 0x2693: 0x000a, 0x2694: 0x000a, 0x2695: 0x000a, 0x2696: 0x000a, 0x2697: 0x000a, + 0x2698: 0x000a, 0x2699: 0x000a, 0x269a: 0x000a, 0x269b: 0x000a, 0x269c: 0x000a, + 0x26a0: 0x000a, + // Block 0x9b, offset 0x26c0 + 0x26fd: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2720: 0x000c, 0x2721: 0x0002, 0x2722: 0x0002, 0x2723: 0x0002, + 0x2724: 0x0002, 0x2725: 0x0002, 0x2726: 0x0002, 0x2727: 0x0002, 0x2728: 0x0002, 0x2729: 0x0002, + 0x272a: 0x0002, 0x272b: 0x0002, 0x272c: 0x0002, 0x272d: 0x0002, 0x272e: 0x0002, 0x272f: 0x0002, + 0x2730: 0x0002, 0x2731: 0x0002, 0x2732: 0x0002, 0x2733: 0x0002, 0x2734: 0x0002, 0x2735: 0x0002, + 0x2736: 0x0002, 0x2737: 0x0002, 0x2738: 0x0002, 0x2739: 0x0002, 0x273a: 0x0002, 0x273b: 0x0002, + // Block 0x9d, offset 0x2740 + 0x2776: 0x000c, 0x2777: 0x000c, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x000a, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x000c, 0x2802: 0x000c, 0x2803: 0x000c, 0x2804: 0x0001, 0x2805: 0x000c, + 0x2806: 0x000c, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x000c, 0x280d: 0x000c, 0x280e: 0x000c, 0x280f: 0x000c, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x000c, 0x2839: 0x000c, 0x283a: 0x000c, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x000c, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x0001, 0x287a: 0x0001, 0x287b: 0x0001, + 0x287c: 0x0001, 0x287d: 0x0001, 0x287e: 0x0001, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x0001, 0x28b1: 0x0001, 0x28b2: 0x0001, 0x28b3: 0x0001, 0x28b4: 0x0001, 0x28b5: 0x0001, + 0x28b6: 0x0001, 0x28b7: 0x0001, 0x28b8: 0x0001, 0x28b9: 0x000a, 0x28ba: 0x000a, 0x28bb: 0x000a, + 0x28bc: 0x000a, 0x28bd: 0x000a, 0x28be: 0x000a, 0x28bf: 0x000a, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000d, 0x28c7: 0x000d, 0x28c8: 0x000d, 0x28c9: 0x000d, 0x28ca: 0x000d, 0x28cb: 0x000d, + 0x28cc: 0x000d, 0x28cd: 0x000d, 0x28ce: 0x000d, 0x28cf: 0x000d, 0x28d0: 0x000d, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000c, 0x28e5: 0x000c, 0x28e6: 0x000c, 0x28e7: 0x000c, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x0005, 0x28f1: 0x0005, 0x28f2: 0x0005, 0x28f3: 0x0005, 0x28f4: 0x0005, 0x28f5: 0x0005, + 0x28f6: 0x0005, 0x28f7: 0x0005, 0x28f8: 0x0005, 0x28f9: 0x0005, 0x28fa: 0x0001, 0x28fb: 0x0001, + 0x28fc: 0x0001, 0x28fd: 0x0001, 0x28fe: 0x0001, 0x28ff: 0x0001, + // Block 0xa4, offset 0x2900 + 0x2900: 0x0001, 0x2901: 0x0001, 0x2902: 0x0001, 0x2903: 0x0001, 0x2904: 0x0001, 0x2905: 0x0001, + 0x2906: 0x0001, 0x2907: 0x0001, 0x2908: 0x0001, 0x2909: 0x0001, 0x290a: 0x0001, 0x290b: 0x0001, + 0x290c: 0x0001, 0x290d: 0x0001, 0x290e: 0x0001, 0x290f: 0x0001, 0x2910: 0x0001, 0x2911: 0x0001, + 0x2912: 0x0001, 0x2913: 0x0001, 0x2914: 0x0001, 0x2915: 0x0001, 0x2916: 0x0001, 0x2917: 0x0001, + 0x2918: 0x0001, 0x2919: 0x0001, 0x291a: 0x0001, 0x291b: 0x0001, 0x291c: 0x0001, 0x291d: 0x0001, + 0x291e: 0x0001, 0x291f: 0x0001, 0x2920: 0x0005, 0x2921: 0x0005, 0x2922: 0x0005, 0x2923: 0x0005, + 0x2924: 0x0005, 0x2925: 0x0005, 0x2926: 0x0005, 0x2927: 0x0005, 0x2928: 0x0005, 0x2929: 0x0005, + 0x292a: 0x0005, 0x292b: 0x0005, 0x292c: 0x0005, 0x292d: 0x0005, 0x292e: 0x0005, 0x292f: 0x0005, + 0x2930: 0x0005, 0x2931: 0x0005, 0x2932: 0x0005, 0x2933: 0x0005, 0x2934: 0x0005, 0x2935: 0x0005, + 0x2936: 0x0005, 0x2937: 0x0005, 0x2938: 0x0005, 0x2939: 0x0005, 0x293a: 0x0005, 0x293b: 0x0005, + 0x293c: 0x0005, 0x293d: 0x0005, 0x293e: 0x0005, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2940: 0x0001, 0x2941: 0x0001, 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, 0x2945: 0x0001, + 0x2946: 0x0001, 0x2947: 0x0001, 0x2948: 0x0001, 0x2949: 0x0001, 0x294a: 0x0001, 0x294b: 0x0001, + 0x294c: 0x0001, 0x294d: 0x0001, 0x294e: 0x0001, 0x294f: 0x0001, 0x2950: 0x0001, 0x2951: 0x0001, + 0x2952: 0x0001, 0x2953: 0x0001, 0x2954: 0x0001, 0x2955: 0x0001, 0x2956: 0x0001, 0x2957: 0x0001, + 0x2958: 0x0001, 0x2959: 0x0001, 0x295a: 0x0001, 0x295b: 0x0001, 0x295c: 0x0001, 0x295d: 0x0001, + 0x295e: 0x0001, 0x295f: 0x0001, 0x2960: 0x0001, 0x2961: 0x0001, 0x2962: 0x0001, 0x2963: 0x0001, + 0x2964: 0x0001, 0x2965: 0x0001, 0x2966: 0x0001, 0x2967: 0x0001, 0x2968: 0x0001, 0x2969: 0x0001, + 0x296a: 0x0001, 0x296b: 0x000c, 0x296c: 0x000c, 0x296d: 0x0001, 0x296e: 0x0001, 0x296f: 0x0001, + 0x2970: 0x0001, 0x2971: 0x0001, 0x2972: 0x0001, 0x2973: 0x0001, 0x2974: 0x0001, 0x2975: 0x0001, + 0x2976: 0x0001, 0x2977: 0x0001, 0x2978: 0x0001, 0x2979: 0x0001, 0x297a: 0x0001, 0x297b: 0x0001, + 0x297c: 0x0001, 0x297d: 0x0001, 0x297e: 0x0001, 0x297f: 0x0001, + // Block 0xa6, offset 0x2980 + 0x2980: 0x0001, 0x2981: 0x0001, 0x2982: 0x0001, 0x2983: 0x0001, 0x2984: 0x0001, 0x2985: 0x0001, + 0x2986: 0x0001, 0x2987: 0x0001, 0x2988: 0x0001, 0x2989: 0x0001, 0x298a: 0x0001, 0x298b: 0x0001, + 0x298c: 0x0001, 0x298d: 0x0001, 0x298e: 0x0001, 0x298f: 0x0001, 0x2990: 0x0001, 0x2991: 0x0001, + 0x2992: 0x0001, 0x2993: 0x0001, 0x2994: 0x0001, 0x2995: 0x0001, 0x2996: 0x0001, 0x2997: 0x0001, + 0x2998: 0x0001, 0x2999: 0x0001, 0x299a: 0x0001, 0x299b: 0x0001, 0x299c: 0x0001, 0x299d: 0x0001, + 0x299e: 0x0001, 0x299f: 0x0001, 0x29a0: 0x0001, 0x29a1: 0x0001, 0x29a2: 0x0001, 0x29a3: 0x0001, + 0x29a4: 0x0001, 0x29a5: 0x0001, 0x29a6: 0x0001, 0x29a7: 0x0001, 0x29a8: 0x0001, 0x29a9: 0x0001, + 0x29aa: 0x0001, 0x29ab: 0x0001, 0x29ac: 0x0001, 0x29ad: 0x0001, 0x29ae: 0x0001, 0x29af: 0x0001, + 0x29b0: 0x0001, 0x29b1: 0x0001, 0x29b2: 0x0001, 0x29b3: 0x0001, 0x29b4: 0x0001, 0x29b5: 0x0001, + 0x29b6: 0x0001, 0x29b7: 0x0001, 0x29b8: 0x0001, 0x29b9: 0x0001, 0x29ba: 0x0001, 0x29bb: 0x0001, + 0x29bc: 0x0001, 0x29bd: 0x000c, 0x29be: 0x000c, 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x0001, 0x29c1: 0x0001, 0x29c2: 0x0001, 0x29c3: 0x0001, 0x29c4: 0x0001, 0x29c5: 0x0001, + 0x29c6: 0x0001, 0x29c7: 0x0001, 0x29c8: 0x0001, 0x29c9: 0x0001, 0x29ca: 0x0001, 0x29cb: 0x0001, + 0x29cc: 0x0001, 0x29cd: 0x0001, 0x29ce: 0x0001, 0x29cf: 0x0001, 0x29d0: 0x0001, 0x29d1: 0x0001, + 0x29d2: 0x0001, 0x29d3: 0x0001, 0x29d4: 0x0001, 0x29d5: 0x0001, 0x29d6: 0x0001, 0x29d7: 0x0001, + 0x29d8: 0x0001, 0x29d9: 0x0001, 0x29da: 0x0001, 0x29db: 0x0001, 0x29dc: 0x0001, 0x29dd: 0x0001, + 0x29de: 0x0001, 0x29df: 0x0001, 0x29e0: 0x0001, 0x29e1: 0x0001, 0x29e2: 0x0001, 0x29e3: 0x0001, + 0x29e4: 0x0001, 0x29e5: 0x0001, 0x29e6: 0x0001, 0x29e7: 0x0001, 0x29e8: 0x0001, 0x29e9: 0x0001, + 0x29ea: 0x0001, 0x29eb: 0x0001, 0x29ec: 0x0001, 0x29ed: 0x0001, 0x29ee: 0x0001, 0x29ef: 0x0001, + 0x29f0: 0x000d, 0x29f1: 0x000d, 0x29f2: 0x000d, 0x29f3: 0x000d, 0x29f4: 0x000d, 0x29f5: 0x000d, + 0x29f6: 0x000d, 0x29f7: 0x000d, 0x29f8: 0x000d, 0x29f9: 0x000d, 0x29fa: 0x000d, 0x29fb: 0x000d, + 0x29fc: 0x000d, 0x29fd: 0x000d, 0x29fe: 0x000d, 0x29ff: 0x000d, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000d, 0x2a01: 0x000d, 0x2a02: 0x000d, 0x2a03: 0x000d, 0x2a04: 0x000d, 0x2a05: 0x000d, + 0x2a06: 0x000c, 0x2a07: 0x000c, 0x2a08: 0x000c, 0x2a09: 0x000c, 0x2a0a: 0x000c, 0x2a0b: 0x000c, + 0x2a0c: 0x000c, 0x2a0d: 0x000c, 0x2a0e: 0x000c, 0x2a0f: 0x000c, 0x2a10: 0x000c, 0x2a11: 0x000d, + 0x2a12: 0x000d, 0x2a13: 0x000d, 0x2a14: 0x000d, 0x2a15: 0x000d, 0x2a16: 0x000d, 0x2a17: 0x000d, + 0x2a18: 0x000d, 0x2a19: 0x000d, 0x2a1a: 0x0001, 0x2a1b: 0x0001, 0x2a1c: 0x0001, 0x2a1d: 0x0001, + 0x2a1e: 0x0001, 0x2a1f: 0x0001, 0x2a20: 0x0001, 0x2a21: 0x0001, 0x2a22: 0x0001, 0x2a23: 0x0001, + 0x2a24: 0x0001, 0x2a25: 0x0001, 0x2a26: 0x0001, 0x2a27: 0x0001, 0x2a28: 0x0001, 0x2a29: 0x0001, + 0x2a2a: 0x0001, 0x2a2b: 0x0001, 0x2a2c: 0x0001, 0x2a2d: 0x0001, 0x2a2e: 0x0001, 0x2a2f: 0x0001, + 0x2a30: 0x0001, 0x2a31: 0x0001, 0x2a32: 0x0001, 0x2a33: 0x0001, 0x2a34: 0x0001, 0x2a35: 0x0001, + 0x2a36: 0x0001, 0x2a37: 0x0001, 0x2a38: 0x0001, 0x2a39: 0x0001, 0x2a3a: 0x0001, 0x2a3b: 0x0001, + 0x2a3c: 0x0001, 0x2a3d: 0x0001, 0x2a3e: 0x0001, 0x2a3f: 0x0001, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x0001, 0x2a41: 0x0001, 0x2a42: 0x000c, 0x2a43: 0x000c, 0x2a44: 0x000c, 0x2a45: 0x000c, + 0x2a46: 0x0001, 0x2a47: 0x0001, 0x2a48: 0x0001, 0x2a49: 0x0001, 0x2a4a: 0x0001, 0x2a4b: 0x0001, + 0x2a4c: 0x0001, 0x2a4d: 0x0001, 0x2a4e: 0x0001, 0x2a4f: 0x0001, 0x2a50: 0x0001, 0x2a51: 0x0001, + 0x2a52: 0x0001, 0x2a53: 0x0001, 0x2a54: 0x0001, 0x2a55: 0x0001, 0x2a56: 0x0001, 0x2a57: 0x0001, + 0x2a58: 0x0001, 0x2a59: 0x0001, 0x2a5a: 0x0001, 0x2a5b: 0x0001, 0x2a5c: 0x0001, 0x2a5d: 0x0001, + 0x2a5e: 0x0001, 0x2a5f: 0x0001, 0x2a60: 0x0001, 0x2a61: 0x0001, 0x2a62: 0x0001, 0x2a63: 0x0001, + 0x2a64: 0x0001, 0x2a65: 0x0001, 0x2a66: 0x0001, 0x2a67: 0x0001, 0x2a68: 0x0001, 0x2a69: 0x0001, + 0x2a6a: 0x0001, 0x2a6b: 0x0001, 0x2a6c: 0x0001, 0x2a6d: 0x0001, 0x2a6e: 0x0001, 0x2a6f: 0x0001, + 0x2a70: 0x0001, 0x2a71: 0x0001, 0x2a72: 0x0001, 0x2a73: 0x0001, 0x2a74: 0x0001, 0x2a75: 0x0001, + 0x2a76: 0x0001, 0x2a77: 0x0001, 0x2a78: 0x0001, 0x2a79: 0x0001, 0x2a7a: 0x0001, 0x2a7b: 0x0001, + 0x2a7c: 0x0001, 0x2a7d: 0x0001, 0x2a7e: 0x0001, 0x2a7f: 0x0001, + // Block 0xaa, offset 0x2a80 + 0x2a81: 0x000c, + 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, 0x2abf: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, 0x2ac2: 0x000c, 0x2ac3: 0x000c, 0x2ac4: 0x000c, 0x2ac5: 0x000c, + 0x2ac6: 0x000c, + 0x2ad2: 0x000a, 0x2ad3: 0x000a, 0x2ad4: 0x000a, 0x2ad5: 0x000a, 0x2ad6: 0x000a, 0x2ad7: 0x000a, + 0x2ad8: 0x000a, 0x2ad9: 0x000a, 0x2ada: 0x000a, 0x2adb: 0x000a, 0x2adc: 0x000a, 0x2add: 0x000a, + 0x2ade: 0x000a, 0x2adf: 0x000a, 0x2ae0: 0x000a, 0x2ae1: 0x000a, 0x2ae2: 0x000a, 0x2ae3: 0x000a, + 0x2ae4: 0x000a, 0x2ae5: 0x000a, + 0x2af0: 0x000c, 0x2af3: 0x000c, 0x2af4: 0x000c, + 0x2aff: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, 0x2b01: 0x000c, + 0x2b33: 0x000c, 0x2b34: 0x000c, 0x2b35: 0x000c, + 0x2b36: 0x000c, 0x2b39: 0x000c, 0x2b3a: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, 0x2b41: 0x000c, 0x2b42: 0x000c, + 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6d: 0x000c, 0x2b6e: 0x000c, 0x2b6f: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb3: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc0: 0x000c, 0x2bc1: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c09: 0x000c, 0x2c0a: 0x000c, 0x2c0b: 0x000c, + 0x2c0c: 0x000c, 0x2c0f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c6f: 0x000c, + 0x2c70: 0x000c, 0x2c71: 0x000c, 0x2c74: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, + 0x2c7e: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c9f: 0x000c, 0x2ca3: 0x000c, + 0x2ca4: 0x000c, 0x2ca5: 0x000c, 0x2ca6: 0x000c, 0x2ca7: 0x000c, 0x2ca8: 0x000c, 0x2ca9: 0x000c, + 0x2caa: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2ce6: 0x000c, 0x2ce7: 0x000c, 0x2ce8: 0x000c, 0x2ce9: 0x000c, + 0x2cea: 0x000c, 0x2ceb: 0x000c, 0x2cec: 0x000c, + 0x2cf0: 0x000c, 0x2cf1: 0x000c, 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, 0x2d3b: 0x000c, + 0x2d3c: 0x000c, 0x2d3d: 0x000c, 0x2d3e: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d42: 0x000c, 0x2d43: 0x000c, 0x2d44: 0x000c, + 0x2d46: 0x000c, + 0x2d5e: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db6: 0x000c, 0x2db7: 0x000c, 0x2db8: 0x000c, 0x2dba: 0x000c, + 0x2dbf: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x000c, 0x2dc2: 0x000c, 0x2dc3: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e3c: 0x000c, 0x2e3d: 0x000c, 0x2e3f: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x000c, + 0x2e5c: 0x000c, 0x2e5d: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2eb9: 0x000c, 0x2eba: 0x000c, + 0x2ebd: 0x000c, 0x2ebf: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec0: 0x000c, + 0x2ee0: 0x000a, 0x2ee1: 0x000a, 0x2ee2: 0x000a, 0x2ee3: 0x000a, + 0x2ee4: 0x000a, 0x2ee5: 0x000a, 0x2ee6: 0x000a, 0x2ee7: 0x000a, 0x2ee8: 0x000a, 0x2ee9: 0x000a, + 0x2eea: 0x000a, 0x2eeb: 0x000a, 0x2eec: 0x000a, + // Block 0xbc, offset 0x2f00 + 0x2f2b: 0x000c, 0x2f2d: 0x000c, + 0x2f30: 0x000c, 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f37: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f5d: 0x000c, + 0x2f5e: 0x000c, 0x2f5f: 0x000c, 0x2f62: 0x000c, 0x2f63: 0x000c, + 0x2f64: 0x000c, 0x2f65: 0x000c, 0x2f67: 0x000c, 0x2f68: 0x000c, 0x2f69: 0x000c, + 0x2f6a: 0x000c, 0x2f6b: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb1: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb4: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, 0x2fb7: 0x000c, 0x2fb9: 0x000c, 0x2fba: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ffb: 0x000c, + 0x2ffc: 0x000c, 0x2ffe: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3003: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3054: 0x000c, 0x3055: 0x000c, 0x3056: 0x000c, 0x3057: 0x000c, + 0x305a: 0x000c, 0x305b: 0x000c, + 0x3060: 0x000c, + // Block 0xc2, offset 0x3080 + 0x3081: 0x000c, 0x3082: 0x000c, 0x3083: 0x000c, 0x3084: 0x000c, 0x3085: 0x000c, + 0x3086: 0x000c, 0x3089: 0x000c, 0x308a: 0x000c, + 0x30b3: 0x000c, 0x30b4: 0x000c, 0x30b5: 0x000c, + 0x30b6: 0x000c, 0x30b7: 0x000c, 0x30b8: 0x000c, 0x30bb: 0x000c, + 0x30bc: 0x000c, 0x30bd: 0x000c, 0x30be: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30c7: 0x000c, + 0x30d1: 0x000c, + 0x30d2: 0x000c, 0x30d3: 0x000c, 0x30d4: 0x000c, 0x30d5: 0x000c, 0x30d6: 0x000c, + 0x30d9: 0x000c, 0x30da: 0x000c, 0x30db: 0x000c, + // Block 0xc4, offset 0x3100 + 0x310a: 0x000c, 0x310b: 0x000c, + 0x310c: 0x000c, 0x310d: 0x000c, 0x310e: 0x000c, 0x310f: 0x000c, 0x3110: 0x000c, 0x3111: 0x000c, + 0x3112: 0x000c, 0x3113: 0x000c, 0x3114: 0x000c, 0x3115: 0x000c, 0x3116: 0x000c, + 0x3118: 0x000c, 0x3119: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3170: 0x000c, 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, 0x3178: 0x000c, 0x3179: 0x000c, 0x317a: 0x000c, 0x317b: 0x000c, + 0x317c: 0x000c, 0x317d: 0x000c, + // Block 0xc6, offset 0x3180 + 0x3192: 0x000c, 0x3193: 0x000c, 0x3194: 0x000c, 0x3195: 0x000c, 0x3196: 0x000c, 0x3197: 0x000c, + 0x3198: 0x000c, 0x3199: 0x000c, 0x319a: 0x000c, 0x319b: 0x000c, 0x319c: 0x000c, 0x319d: 0x000c, + 0x319e: 0x000c, 0x319f: 0x000c, 0x31a0: 0x000c, 0x31a1: 0x000c, 0x31a2: 0x000c, 0x31a3: 0x000c, + 0x31a4: 0x000c, 0x31a5: 0x000c, 0x31a6: 0x000c, 0x31a7: 0x000c, + 0x31aa: 0x000c, 0x31ab: 0x000c, 0x31ac: 0x000c, 0x31ad: 0x000c, 0x31ae: 0x000c, 0x31af: 0x000c, + 0x31b0: 0x000c, 0x31b2: 0x000c, 0x31b3: 0x000c, 0x31b5: 0x000c, + 0x31b6: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, + 0x31f6: 0x000c, 0x31fa: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, + 0x3207: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3250: 0x000c, 0x3251: 0x000c, + 0x3255: 0x000c, 0x3257: 0x000c, + // Block 0xca, offset 0x3280 + 0x32b3: 0x000c, 0x32b4: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, + 0x32f6: 0x000c, 0x32f7: 0x000c, 0x32f8: 0x000c, 0x32f9: 0x000c, 0x32fa: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000c, 0x3302: 0x000c, + // Block 0xcd, offset 0x3340 + 0x3355: 0x000a, 0x3356: 0x000a, 0x3357: 0x000a, + 0x3358: 0x000a, 0x3359: 0x000a, 0x335a: 0x000a, 0x335b: 0x000a, 0x335c: 0x000a, 0x335d: 0x0004, + 0x335e: 0x0004, 0x335f: 0x0004, 0x3360: 0x0004, 0x3361: 0x000a, 0x3362: 0x000a, 0x3363: 0x000a, + 0x3364: 0x000a, 0x3365: 0x000a, 0x3366: 0x000a, 0x3367: 0x000a, 0x3368: 0x000a, 0x3369: 0x000a, + 0x336a: 0x000a, 0x336b: 0x000a, 0x336c: 0x000a, 0x336d: 0x000a, 0x336e: 0x000a, 0x336f: 0x000a, + 0x3370: 0x000a, 0x3371: 0x000a, + // Block 0xce, offset 0x3380 + 0x3380: 0x000c, + 0x3387: 0x000c, 0x3388: 0x000c, 0x3389: 0x000c, 0x338a: 0x000c, 0x338b: 0x000c, + 0x338c: 0x000c, 0x338d: 0x000c, 0x338e: 0x000c, 0x338f: 0x000c, 0x3390: 0x000c, 0x3391: 0x000c, + 0x3392: 0x000c, 0x3393: 0x000c, 0x3394: 0x000c, 0x3395: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33f0: 0x000c, 0x33f1: 0x000c, 0x33f2: 0x000c, 0x33f3: 0x000c, 0x33f4: 0x000c, + // Block 0xd0, offset 0x3400 + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, + // Block 0xd1, offset 0x3440 + 0x344f: 0x000c, + // Block 0xd2, offset 0x3480 + 0x348f: 0x000c, 0x3490: 0x000c, 0x3491: 0x000c, + 0x3492: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34e2: 0x000a, + 0x34e4: 0x000c, + // Block 0xd4, offset 0x3500 + 0x351d: 0x000c, + 0x351e: 0x000c, 0x3520: 0x000b, 0x3521: 0x000b, 0x3522: 0x000b, 0x3523: 0x000b, + // Block 0xd5, offset 0x3540 + 0x3540: 0x000c, 0x3541: 0x000c, 0x3542: 0x000c, 0x3543: 0x000c, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x000c, + 0x354c: 0x000c, 0x354d: 0x000c, 0x354e: 0x000c, 0x354f: 0x000c, 0x3550: 0x000c, 0x3551: 0x000c, + 0x3552: 0x000c, 0x3553: 0x000c, 0x3554: 0x000c, 0x3555: 0x000c, 0x3556: 0x000c, 0x3557: 0x000c, + 0x3558: 0x000c, 0x3559: 0x000c, 0x355a: 0x000c, 0x355b: 0x000c, 0x355c: 0x000c, 0x355d: 0x000c, + 0x355e: 0x000c, 0x355f: 0x000c, 0x3560: 0x000c, 0x3561: 0x000c, 0x3562: 0x000c, 0x3563: 0x000c, + 0x3564: 0x000c, 0x3565: 0x000c, 0x3566: 0x000c, 0x3567: 0x000c, 0x3568: 0x000c, 0x3569: 0x000c, + 0x356a: 0x000c, 0x356b: 0x000c, 0x356c: 0x000c, 0x356d: 0x000c, + 0x3570: 0x000c, 0x3571: 0x000c, 0x3572: 0x000c, 0x3573: 0x000c, 0x3574: 0x000c, 0x3575: 0x000c, + 0x3576: 0x000c, 0x3577: 0x000c, 0x3578: 0x000c, 0x3579: 0x000c, 0x357a: 0x000c, 0x357b: 0x000c, + 0x357c: 0x000c, 0x357d: 0x000c, 0x357e: 0x000c, 0x357f: 0x000c, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000c, 0x3581: 0x000c, 0x3582: 0x000c, 0x3583: 0x000c, 0x3584: 0x000c, 0x3585: 0x000c, + 0x3586: 0x000c, + // Block 0xd7, offset 0x35c0 + 0x35e7: 0x000c, 0x35e8: 0x000c, 0x35e9: 0x000c, + 0x35f3: 0x000b, 0x35f4: 0x000b, 0x35f5: 0x000b, + 0x35f6: 0x000b, 0x35f7: 0x000b, 0x35f8: 0x000b, 0x35f9: 0x000b, 0x35fa: 0x000b, 0x35fb: 0x000c, + 0x35fc: 0x000c, 0x35fd: 0x000c, 0x35fe: 0x000c, 0x35ff: 0x000c, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000c, 0x3601: 0x000c, 0x3602: 0x000c, 0x3605: 0x000c, + 0x3606: 0x000c, 0x3607: 0x000c, 0x3608: 0x000c, 0x3609: 0x000c, 0x360a: 0x000c, 0x360b: 0x000c, + 0x362a: 0x000c, 0x362b: 0x000c, 0x362c: 0x000c, 0x362d: 0x000c, + // Block 0xd9, offset 0x3640 + 0x3669: 0x000a, + 0x366a: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000c, 0x3683: 0x000c, 0x3684: 0x000c, 0x3685: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000a, 0x36c1: 0x000a, 0x36c2: 0x000a, 0x36c3: 0x000a, 0x36c4: 0x000a, 0x36c5: 0x000a, + 0x36c6: 0x000a, 0x36c7: 0x000a, 0x36c8: 0x000a, 0x36c9: 0x000a, 0x36ca: 0x000a, 0x36cb: 0x000a, + 0x36cc: 0x000a, 0x36cd: 0x000a, 0x36ce: 0x000a, 0x36cf: 0x000a, 0x36d0: 0x000a, 0x36d1: 0x000a, + 0x36d2: 0x000a, 0x36d3: 0x000a, 0x36d4: 0x000a, 0x36d5: 0x000a, 0x36d6: 0x000a, + // Block 0xdc, offset 0x3700 + 0x371b: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3755: 0x000a, + // Block 0xde, offset 0x3780 + 0x378f: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c9: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3803: 0x000a, + 0x380e: 0x0002, 0x380f: 0x0002, 0x3810: 0x0002, 0x3811: 0x0002, + 0x3812: 0x0002, 0x3813: 0x0002, 0x3814: 0x0002, 0x3815: 0x0002, 0x3816: 0x0002, 0x3817: 0x0002, + 0x3818: 0x0002, 0x3819: 0x0002, 0x381a: 0x0002, 0x381b: 0x0002, 0x381c: 0x0002, 0x381d: 0x0002, + 0x381e: 0x0002, 0x381f: 0x0002, 0x3820: 0x0002, 0x3821: 0x0002, 0x3822: 0x0002, 0x3823: 0x0002, + 0x3824: 0x0002, 0x3825: 0x0002, 0x3826: 0x0002, 0x3827: 0x0002, 0x3828: 0x0002, 0x3829: 0x0002, + 0x382a: 0x0002, 0x382b: 0x0002, 0x382c: 0x0002, 0x382d: 0x0002, 0x382e: 0x0002, 0x382f: 0x0002, + 0x3830: 0x0002, 0x3831: 0x0002, 0x3832: 0x0002, 0x3833: 0x0002, 0x3834: 0x0002, 0x3835: 0x0002, + 0x3836: 0x0002, 0x3837: 0x0002, 0x3838: 0x0002, 0x3839: 0x0002, 0x383a: 0x0002, 0x383b: 0x0002, + 0x383c: 0x0002, 0x383d: 0x0002, 0x383e: 0x0002, 0x383f: 0x0002, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000c, 0x3841: 0x000c, 0x3842: 0x000c, 0x3843: 0x000c, 0x3844: 0x000c, 0x3845: 0x000c, + 0x3846: 0x000c, 0x3847: 0x000c, 0x3848: 0x000c, 0x3849: 0x000c, 0x384a: 0x000c, 0x384b: 0x000c, + 0x384c: 0x000c, 0x384d: 0x000c, 0x384e: 0x000c, 0x384f: 0x000c, 0x3850: 0x000c, 0x3851: 0x000c, + 0x3852: 0x000c, 0x3853: 0x000c, 0x3854: 0x000c, 0x3855: 0x000c, 0x3856: 0x000c, 0x3857: 0x000c, + 0x3858: 0x000c, 0x3859: 0x000c, 0x385a: 0x000c, 0x385b: 0x000c, 0x385c: 0x000c, 0x385d: 0x000c, + 0x385e: 0x000c, 0x385f: 0x000c, 0x3860: 0x000c, 0x3861: 0x000c, 0x3862: 0x000c, 0x3863: 0x000c, + 0x3864: 0x000c, 0x3865: 0x000c, 0x3866: 0x000c, 0x3867: 0x000c, 0x3868: 0x000c, 0x3869: 0x000c, + 0x386a: 0x000c, 0x386b: 0x000c, 0x386c: 0x000c, 0x386d: 0x000c, 0x386e: 0x000c, 0x386f: 0x000c, + 0x3870: 0x000c, 0x3871: 0x000c, 0x3872: 0x000c, 0x3873: 0x000c, 0x3874: 0x000c, 0x3875: 0x000c, + 0x3876: 0x000c, 0x387b: 0x000c, + 0x387c: 0x000c, 0x387d: 0x000c, 0x387e: 0x000c, 0x387f: 0x000c, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000c, 0x3881: 0x000c, 0x3882: 0x000c, 0x3883: 0x000c, 0x3884: 0x000c, 0x3885: 0x000c, + 0x3886: 0x000c, 0x3887: 0x000c, 0x3888: 0x000c, 0x3889: 0x000c, 0x388a: 0x000c, 0x388b: 0x000c, + 0x388c: 0x000c, 0x388d: 0x000c, 0x388e: 0x000c, 0x388f: 0x000c, 0x3890: 0x000c, 0x3891: 0x000c, + 0x3892: 0x000c, 0x3893: 0x000c, 0x3894: 0x000c, 0x3895: 0x000c, 0x3896: 0x000c, 0x3897: 0x000c, + 0x3898: 0x000c, 0x3899: 0x000c, 0x389a: 0x000c, 0x389b: 0x000c, 0x389c: 0x000c, 0x389d: 0x000c, + 0x389e: 0x000c, 0x389f: 0x000c, 0x38a0: 0x000c, 0x38a1: 0x000c, 0x38a2: 0x000c, 0x38a3: 0x000c, + 0x38a4: 0x000c, 0x38a5: 0x000c, 0x38a6: 0x000c, 0x38a7: 0x000c, 0x38a8: 0x000c, 0x38a9: 0x000c, + 0x38aa: 0x000c, 0x38ab: 0x000c, 0x38ac: 0x000c, + 0x38b5: 0x000c, + // Block 0xe3, offset 0x38c0 + 0x38c4: 0x000c, + 0x38db: 0x000c, 0x38dc: 0x000c, 0x38dd: 0x000c, + 0x38de: 0x000c, 0x38df: 0x000c, 0x38e1: 0x000c, 0x38e2: 0x000c, 0x38e3: 0x000c, + 0x38e4: 0x000c, 0x38e5: 0x000c, 0x38e6: 0x000c, 0x38e7: 0x000c, 0x38e8: 0x000c, 0x38e9: 0x000c, + 0x38ea: 0x000c, 0x38eb: 0x000c, 0x38ec: 0x000c, 0x38ed: 0x000c, 0x38ee: 0x000c, 0x38ef: 0x000c, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000c, 0x3901: 0x000c, 0x3902: 0x000c, 0x3903: 0x000c, 0x3904: 0x000c, 0x3905: 0x000c, + 0x3906: 0x000c, 0x3908: 0x000c, 0x3909: 0x000c, 0x390a: 0x000c, 0x390b: 0x000c, + 0x390c: 0x000c, 0x390d: 0x000c, 0x390e: 0x000c, 0x390f: 0x000c, 0x3910: 0x000c, 0x3911: 0x000c, + 0x3912: 0x000c, 0x3913: 0x000c, 0x3914: 0x000c, 0x3915: 0x000c, 0x3916: 0x000c, 0x3917: 0x000c, + 0x3918: 0x000c, 0x391b: 0x000c, 0x391c: 0x000c, 0x391d: 0x000c, + 0x391e: 0x000c, 0x391f: 0x000c, 0x3920: 0x000c, 0x3921: 0x000c, 0x3923: 0x000c, + 0x3924: 0x000c, 0x3926: 0x000c, 0x3927: 0x000c, 0x3928: 0x000c, 0x3929: 0x000c, + 0x392a: 0x000c, + // Block 0xe5, offset 0x3940 + 0x396e: 0x000c, + // Block 0xe6, offset 0x3980 + 0x39ac: 0x000c, 0x39ad: 0x000c, 0x39ae: 0x000c, 0x39af: 0x000c, + 0x39bf: 0x0004, + // Block 0xe7, offset 0x39c0 + 0x39ec: 0x000c, 0x39ed: 0x000c, 0x39ee: 0x000c, 0x39ef: 0x000c, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x0001, 0x3a01: 0x0001, 0x3a02: 0x0001, 0x3a03: 0x0001, 0x3a04: 0x0001, 0x3a05: 0x0001, + 0x3a06: 0x0001, 0x3a07: 0x0001, 0x3a08: 0x0001, 0x3a09: 0x0001, 0x3a0a: 0x0001, 0x3a0b: 0x0001, + 0x3a0c: 0x0001, 0x3a0d: 0x0001, 0x3a0e: 0x0001, 0x3a0f: 0x0001, 0x3a10: 0x000c, 0x3a11: 0x000c, + 0x3a12: 0x000c, 0x3a13: 0x000c, 0x3a14: 0x000c, 0x3a15: 0x000c, 0x3a16: 0x000c, 0x3a17: 0x0001, + 0x3a18: 0x0001, 0x3a19: 0x0001, 0x3a1a: 0x0001, 0x3a1b: 0x0001, 0x3a1c: 0x0001, 0x3a1d: 0x0001, + 0x3a1e: 0x0001, 0x3a1f: 0x0001, 0x3a20: 0x0001, 0x3a21: 0x0001, 0x3a22: 0x0001, 0x3a23: 0x0001, + 0x3a24: 0x0001, 0x3a25: 0x0001, 0x3a26: 0x0001, 0x3a27: 0x0001, 0x3a28: 0x0001, 0x3a29: 0x0001, + 0x3a2a: 0x0001, 0x3a2b: 0x0001, 0x3a2c: 0x0001, 0x3a2d: 0x0001, 0x3a2e: 0x0001, 0x3a2f: 0x0001, + 0x3a30: 0x0001, 0x3a31: 0x0001, 0x3a32: 0x0001, 0x3a33: 0x0001, 0x3a34: 0x0001, 0x3a35: 0x0001, + 0x3a36: 0x0001, 0x3a37: 0x0001, 0x3a38: 0x0001, 0x3a39: 0x0001, 0x3a3a: 0x0001, 0x3a3b: 0x0001, + 0x3a3c: 0x0001, 0x3a3d: 0x0001, 0x3a3e: 0x0001, 0x3a3f: 0x0001, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x0001, 0x3a41: 0x0001, 0x3a42: 0x0001, 0x3a43: 0x0001, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x0001, + 0x3a4c: 0x0001, 0x3a4d: 0x0001, 0x3a4e: 0x0001, 0x3a4f: 0x0001, 0x3a50: 0x0001, 0x3a51: 0x0001, + 0x3a52: 0x0001, 0x3a53: 0x0001, 0x3a54: 0x0001, 0x3a55: 0x0001, 0x3a56: 0x0001, 0x3a57: 0x0001, + 0x3a58: 0x0001, 0x3a59: 0x0001, 0x3a5a: 0x0001, 0x3a5b: 0x0001, 0x3a5c: 0x0001, 0x3a5d: 0x0001, + 0x3a5e: 0x0001, 0x3a5f: 0x0001, 0x3a60: 0x0001, 0x3a61: 0x0001, 0x3a62: 0x0001, 0x3a63: 0x0001, + 0x3a64: 0x0001, 0x3a65: 0x0001, 0x3a66: 0x0001, 0x3a67: 0x0001, 0x3a68: 0x0001, 0x3a69: 0x0001, + 0x3a6a: 0x0001, 0x3a6b: 0x0001, 0x3a6c: 0x0001, 0x3a6d: 0x0001, 0x3a6e: 0x0001, 0x3a6f: 0x0001, + 0x3a70: 0x0001, 0x3a71: 0x0001, 0x3a72: 0x0001, 0x3a73: 0x0001, 0x3a74: 0x0001, 0x3a75: 0x0001, + 0x3a76: 0x0001, 0x3a77: 0x0001, 0x3a78: 0x0001, 0x3a79: 0x0001, 0x3a7a: 0x0001, 0x3a7b: 0x0001, + 0x3a7c: 0x0001, 0x3a7d: 0x0001, 0x3a7e: 0x0001, 0x3a7f: 0x0001, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x0001, 0x3a81: 0x0001, 0x3a82: 0x0001, 0x3a83: 0x0001, 0x3a84: 0x0001, 0x3a85: 0x0001, + 0x3a86: 0x0001, 0x3a87: 0x0001, 0x3a88: 0x0001, 0x3a89: 0x0001, 0x3a8a: 0x0001, 0x3a8b: 0x0001, + 0x3a8c: 0x0001, 0x3a8d: 0x0001, 0x3a8e: 0x0001, 0x3a8f: 0x0001, 0x3a90: 0x0001, 0x3a91: 0x0001, + 0x3a92: 0x0001, 0x3a93: 0x0001, 0x3a94: 0x0001, 0x3a95: 0x0001, 0x3a96: 0x0001, 0x3a97: 0x0001, + 0x3a98: 0x0001, 0x3a99: 0x0001, 0x3a9a: 0x0001, 0x3a9b: 0x0001, 0x3a9c: 0x0001, 0x3a9d: 0x0001, + 0x3a9e: 0x0001, 0x3a9f: 0x0001, 0x3aa0: 0x0001, 0x3aa1: 0x0001, 0x3aa2: 0x0001, 0x3aa3: 0x0001, + 0x3aa4: 0x0001, 0x3aa5: 0x0001, 0x3aa6: 0x0001, 0x3aa7: 0x0001, 0x3aa8: 0x0001, 0x3aa9: 0x0001, + 0x3aaa: 0x0001, 0x3aab: 0x0001, 0x3aac: 0x0001, 0x3aad: 0x0001, 0x3aae: 0x0001, 0x3aaf: 0x0001, + 0x3ab0: 0x0001, 0x3ab1: 0x000d, 0x3ab2: 0x000d, 0x3ab3: 0x000d, 0x3ab4: 0x000d, 0x3ab5: 0x000d, + 0x3ab6: 0x000d, 0x3ab7: 0x000d, 0x3ab8: 0x000d, 0x3ab9: 0x000d, 0x3aba: 0x000d, 0x3abb: 0x000d, + 0x3abc: 0x000d, 0x3abd: 0x000d, 0x3abe: 0x000d, 0x3abf: 0x000d, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000d, 0x3ac1: 0x000d, 0x3ac2: 0x000d, 0x3ac3: 0x000d, 0x3ac4: 0x000d, 0x3ac5: 0x000d, + 0x3ac6: 0x000d, 0x3ac7: 0x000d, 0x3ac8: 0x000d, 0x3ac9: 0x000d, 0x3aca: 0x000d, 0x3acb: 0x000d, + 0x3acc: 0x000d, 0x3acd: 0x000d, 0x3ace: 0x000d, 0x3acf: 0x000d, 0x3ad0: 0x000d, 0x3ad1: 0x000d, + 0x3ad2: 0x000d, 0x3ad3: 0x000d, 0x3ad4: 0x000d, 0x3ad5: 0x000d, 0x3ad6: 0x000d, 0x3ad7: 0x000d, + 0x3ad8: 0x000d, 0x3ad9: 0x000d, 0x3ada: 0x000d, 0x3adb: 0x000d, 0x3adc: 0x000d, 0x3add: 0x000d, + 0x3ade: 0x000d, 0x3adf: 0x000d, 0x3ae0: 0x000d, 0x3ae1: 0x000d, 0x3ae2: 0x000d, 0x3ae3: 0x000d, + 0x3ae4: 0x000d, 0x3ae5: 0x000d, 0x3ae6: 0x000d, 0x3ae7: 0x000d, 0x3ae8: 0x000d, 0x3ae9: 0x000d, + 0x3aea: 0x000d, 0x3aeb: 0x000d, 0x3aec: 0x000d, 0x3aed: 0x000d, 0x3aee: 0x000d, 0x3aef: 0x000d, + 0x3af0: 0x000d, 0x3af1: 0x000d, 0x3af2: 0x000d, 0x3af3: 0x000d, 0x3af4: 0x000d, 0x3af5: 0x0001, + 0x3af6: 0x0001, 0x3af7: 0x0001, 0x3af8: 0x0001, 0x3af9: 0x0001, 0x3afa: 0x0001, 0x3afb: 0x0001, + 0x3afc: 0x0001, 0x3afd: 0x0001, 0x3afe: 0x0001, 0x3aff: 0x0001, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x0001, 0x3b01: 0x000d, 0x3b02: 0x000d, 0x3b03: 0x000d, 0x3b04: 0x000d, 0x3b05: 0x000d, + 0x3b06: 0x000d, 0x3b07: 0x000d, 0x3b08: 0x000d, 0x3b09: 0x000d, 0x3b0a: 0x000d, 0x3b0b: 0x000d, + 0x3b0c: 0x000d, 0x3b0d: 0x000d, 0x3b0e: 0x000d, 0x3b0f: 0x000d, 0x3b10: 0x000d, 0x3b11: 0x000d, + 0x3b12: 0x000d, 0x3b13: 0x000d, 0x3b14: 0x000d, 0x3b15: 0x000d, 0x3b16: 0x000d, 0x3b17: 0x000d, + 0x3b18: 0x000d, 0x3b19: 0x000d, 0x3b1a: 0x000d, 0x3b1b: 0x000d, 0x3b1c: 0x000d, 0x3b1d: 0x000d, + 0x3b1e: 0x000d, 0x3b1f: 0x000d, 0x3b20: 0x000d, 0x3b21: 0x000d, 0x3b22: 0x000d, 0x3b23: 0x000d, + 0x3b24: 0x000d, 0x3b25: 0x000d, 0x3b26: 0x000d, 0x3b27: 0x000d, 0x3b28: 0x000d, 0x3b29: 0x000d, + 0x3b2a: 0x000d, 0x3b2b: 0x000d, 0x3b2c: 0x000d, 0x3b2d: 0x000d, 0x3b2e: 0x000d, 0x3b2f: 0x000d, + 0x3b30: 0x000d, 0x3b31: 0x000d, 0x3b32: 0x000d, 0x3b33: 0x000d, 0x3b34: 0x000d, 0x3b35: 0x000d, + 0x3b36: 0x000d, 0x3b37: 0x000d, 0x3b38: 0x000d, 0x3b39: 0x000d, 0x3b3a: 0x000d, 0x3b3b: 0x000d, + 0x3b3c: 0x000d, 0x3b3d: 0x000d, 0x3b3e: 0x0001, 0x3b3f: 0x0001, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x000d, 0x3b41: 0x000d, 0x3b42: 0x000d, 0x3b43: 0x000d, 0x3b44: 0x000d, 0x3b45: 0x000d, + 0x3b46: 0x000d, 0x3b47: 0x000d, 0x3b48: 0x000d, 0x3b49: 0x000d, 0x3b4a: 0x000d, 0x3b4b: 0x000d, + 0x3b4c: 0x000d, 0x3b4d: 0x000d, 0x3b4e: 0x000d, 0x3b4f: 0x000d, 0x3b50: 0x000d, 0x3b51: 0x000d, + 0x3b52: 0x000d, 0x3b53: 0x000d, 0x3b54: 0x000d, 0x3b55: 0x000d, 0x3b56: 0x000d, 0x3b57: 0x000d, + 0x3b58: 0x000d, 0x3b59: 0x000d, 0x3b5a: 0x000d, 0x3b5b: 0x000d, 0x3b5c: 0x000d, 0x3b5d: 0x000d, + 0x3b5e: 0x000d, 0x3b5f: 0x000d, 0x3b60: 0x000d, 0x3b61: 0x000d, 0x3b62: 0x000d, 0x3b63: 0x000d, + 0x3b64: 0x000d, 0x3b65: 0x000d, 0x3b66: 0x000d, 0x3b67: 0x000d, 0x3b68: 0x000d, 0x3b69: 0x000d, + 0x3b6a: 0x000d, 0x3b6b: 0x000d, 0x3b6c: 0x000d, 0x3b6d: 0x000d, 0x3b6e: 0x000d, 0x3b6f: 0x000d, + 0x3b70: 0x000a, 0x3b71: 0x000a, 0x3b72: 0x000d, 0x3b73: 0x000d, 0x3b74: 0x000d, 0x3b75: 0x000d, + 0x3b76: 0x000d, 0x3b77: 0x000d, 0x3b78: 0x000d, 0x3b79: 0x000d, 0x3b7a: 0x000d, 0x3b7b: 0x000d, + 0x3b7c: 0x000d, 0x3b7d: 0x000d, 0x3b7e: 0x000d, 0x3b7f: 0x000d, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000a, 0x3b81: 0x000a, 0x3b82: 0x000a, 0x3b83: 0x000a, 0x3b84: 0x000a, 0x3b85: 0x000a, + 0x3b86: 0x000a, 0x3b87: 0x000a, 0x3b88: 0x000a, 0x3b89: 0x000a, 0x3b8a: 0x000a, 0x3b8b: 0x000a, + 0x3b8c: 0x000a, 0x3b8d: 0x000a, 0x3b8e: 0x000a, 0x3b8f: 0x000a, 0x3b90: 0x000a, 0x3b91: 0x000a, + 0x3b92: 0x000a, 0x3b93: 0x000a, 0x3b94: 0x000a, 0x3b95: 0x000a, 0x3b96: 0x000a, 0x3b97: 0x000a, + 0x3b98: 0x000a, 0x3b99: 0x000a, 0x3b9a: 0x000a, 0x3b9b: 0x000a, 0x3b9c: 0x000a, 0x3b9d: 0x000a, + 0x3b9e: 0x000a, 0x3b9f: 0x000a, 0x3ba0: 0x000a, 0x3ba1: 0x000a, 0x3ba2: 0x000a, 0x3ba3: 0x000a, + 0x3ba4: 0x000a, 0x3ba5: 0x000a, 0x3ba6: 0x000a, 0x3ba7: 0x000a, 0x3ba8: 0x000a, 0x3ba9: 0x000a, + 0x3baa: 0x000a, 0x3bab: 0x000a, + 0x3bb0: 0x000a, 0x3bb1: 0x000a, 0x3bb2: 0x000a, 0x3bb3: 0x000a, 0x3bb4: 0x000a, 0x3bb5: 0x000a, + 0x3bb6: 0x000a, 0x3bb7: 0x000a, 0x3bb8: 0x000a, 0x3bb9: 0x000a, 0x3bba: 0x000a, 0x3bbb: 0x000a, + 0x3bbc: 0x000a, 0x3bbd: 0x000a, 0x3bbe: 0x000a, 0x3bbf: 0x000a, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000a, 0x3bc1: 0x000a, 0x3bc2: 0x000a, 0x3bc3: 0x000a, 0x3bc4: 0x000a, 0x3bc5: 0x000a, + 0x3bc6: 0x000a, 0x3bc7: 0x000a, 0x3bc8: 0x000a, 0x3bc9: 0x000a, 0x3bca: 0x000a, 0x3bcb: 0x000a, + 0x3bcc: 0x000a, 0x3bcd: 0x000a, 0x3bce: 0x000a, 0x3bcf: 0x000a, 0x3bd0: 0x000a, 0x3bd1: 0x000a, + 0x3bd2: 0x000a, 0x3bd3: 0x000a, + 0x3be0: 0x000a, 0x3be1: 0x000a, 0x3be2: 0x000a, 0x3be3: 0x000a, + 0x3be4: 0x000a, 0x3be5: 0x000a, 0x3be6: 0x000a, 0x3be7: 0x000a, 0x3be8: 0x000a, 0x3be9: 0x000a, + 0x3bea: 0x000a, 0x3beb: 0x000a, 0x3bec: 0x000a, 0x3bed: 0x000a, 0x3bee: 0x000a, + 0x3bf1: 0x000a, 0x3bf2: 0x000a, 0x3bf3: 0x000a, 0x3bf4: 0x000a, 0x3bf5: 0x000a, + 0x3bf6: 0x000a, 0x3bf7: 0x000a, 0x3bf8: 0x000a, 0x3bf9: 0x000a, 0x3bfa: 0x000a, 0x3bfb: 0x000a, + 0x3bfc: 0x000a, 0x3bfd: 0x000a, 0x3bfe: 0x000a, 0x3bff: 0x000a, + // Block 0xf0, offset 0x3c00 + 0x3c01: 0x000a, 0x3c02: 0x000a, 0x3c03: 0x000a, 0x3c04: 0x000a, 0x3c05: 0x000a, + 0x3c06: 0x000a, 0x3c07: 0x000a, 0x3c08: 0x000a, 0x3c09: 0x000a, 0x3c0a: 0x000a, 0x3c0b: 0x000a, + 0x3c0c: 0x000a, 0x3c0d: 0x000a, 0x3c0e: 0x000a, 0x3c0f: 0x000a, 0x3c11: 0x000a, + 0x3c12: 0x000a, 0x3c13: 0x000a, 0x3c14: 0x000a, 0x3c15: 0x000a, 0x3c16: 0x000a, 0x3c17: 0x000a, + 0x3c18: 0x000a, 0x3c19: 0x000a, 0x3c1a: 0x000a, 0x3c1b: 0x000a, 0x3c1c: 0x000a, 0x3c1d: 0x000a, + 0x3c1e: 0x000a, 0x3c1f: 0x000a, 0x3c20: 0x000a, 0x3c21: 0x000a, 0x3c22: 0x000a, 0x3c23: 0x000a, + 0x3c24: 0x000a, 0x3c25: 0x000a, 0x3c26: 0x000a, 0x3c27: 0x000a, 0x3c28: 0x000a, 0x3c29: 0x000a, + 0x3c2a: 0x000a, 0x3c2b: 0x000a, 0x3c2c: 0x000a, 0x3c2d: 0x000a, 0x3c2e: 0x000a, 0x3c2f: 0x000a, + 0x3c30: 0x000a, 0x3c31: 0x000a, 0x3c32: 0x000a, 0x3c33: 0x000a, 0x3c34: 0x000a, 0x3c35: 0x000a, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x0002, 0x3c41: 0x0002, 0x3c42: 0x0002, 0x3c43: 0x0002, 0x3c44: 0x0002, 0x3c45: 0x0002, + 0x3c46: 0x0002, 0x3c47: 0x0002, 0x3c48: 0x0002, 0x3c49: 0x0002, 0x3c4a: 0x0002, 0x3c4b: 0x000a, + 0x3c4c: 0x000a, 0x3c4d: 0x000a, 0x3c4e: 0x000a, 0x3c4f: 0x000a, + 0x3c6f: 0x000a, + // Block 0xf2, offset 0x3c80 + 0x3caa: 0x000a, 0x3cab: 0x000a, 0x3cac: 0x000a, 0x3cad: 0x000a, 0x3cae: 0x000a, 0x3caf: 0x000a, + // Block 0xf3, offset 0x3cc0 + 0x3ced: 0x000a, + // Block 0xf4, offset 0x3d00 + 0x3d20: 0x000a, 0x3d21: 0x000a, 0x3d22: 0x000a, 0x3d23: 0x000a, + 0x3d24: 0x000a, 0x3d25: 0x000a, + // Block 0xf5, offset 0x3d40 + 0x3d40: 0x000a, 0x3d41: 0x000a, 0x3d42: 0x000a, 0x3d43: 0x000a, 0x3d44: 0x000a, 0x3d45: 0x000a, + 0x3d46: 0x000a, 0x3d47: 0x000a, 0x3d48: 0x000a, 0x3d49: 0x000a, 0x3d4a: 0x000a, 0x3d4b: 0x000a, + 0x3d4c: 0x000a, 0x3d4d: 0x000a, 0x3d4e: 0x000a, 0x3d4f: 0x000a, 0x3d50: 0x000a, 0x3d51: 0x000a, + 0x3d52: 0x000a, 0x3d53: 0x000a, 0x3d54: 0x000a, 0x3d55: 0x000a, 0x3d56: 0x000a, 0x3d57: 0x000a, + 0x3d5c: 0x000a, 0x3d5d: 0x000a, + 0x3d5e: 0x000a, 0x3d5f: 0x000a, 0x3d60: 0x000a, 0x3d61: 0x000a, 0x3d62: 0x000a, 0x3d63: 0x000a, + 0x3d64: 0x000a, 0x3d65: 0x000a, 0x3d66: 0x000a, 0x3d67: 0x000a, 0x3d68: 0x000a, 0x3d69: 0x000a, + 0x3d6a: 0x000a, 0x3d6b: 0x000a, 0x3d6c: 0x000a, + 0x3d70: 0x000a, 0x3d71: 0x000a, 0x3d72: 0x000a, 0x3d73: 0x000a, 0x3d74: 0x000a, 0x3d75: 0x000a, + 0x3d76: 0x000a, 0x3d77: 0x000a, 0x3d78: 0x000a, 0x3d79: 0x000a, 0x3d7a: 0x000a, 0x3d7b: 0x000a, + 0x3d7c: 0x000a, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x000a, 0x3d81: 0x000a, 0x3d82: 0x000a, 0x3d83: 0x000a, 0x3d84: 0x000a, 0x3d85: 0x000a, + 0x3d86: 0x000a, 0x3d87: 0x000a, 0x3d88: 0x000a, 0x3d89: 0x000a, 0x3d8a: 0x000a, 0x3d8b: 0x000a, + 0x3d8c: 0x000a, 0x3d8d: 0x000a, 0x3d8e: 0x000a, 0x3d8f: 0x000a, 0x3d90: 0x000a, 0x3d91: 0x000a, + 0x3d92: 0x000a, 0x3d93: 0x000a, 0x3d94: 0x000a, 0x3d95: 0x000a, 0x3d96: 0x000a, 0x3d97: 0x000a, + 0x3d98: 0x000a, 0x3d99: 0x000a, 0x3d9a: 0x000a, 0x3d9b: 0x000a, 0x3d9c: 0x000a, 0x3d9d: 0x000a, + 0x3d9e: 0x000a, 0x3d9f: 0x000a, 0x3da0: 0x000a, 0x3da1: 0x000a, 0x3da2: 0x000a, 0x3da3: 0x000a, + 0x3da4: 0x000a, 0x3da5: 0x000a, 0x3da6: 0x000a, 0x3da7: 0x000a, 0x3da8: 0x000a, 0x3da9: 0x000a, + 0x3daa: 0x000a, 0x3dab: 0x000a, 0x3dac: 0x000a, 0x3dad: 0x000a, 0x3dae: 0x000a, 0x3daf: 0x000a, + 0x3db0: 0x000a, 0x3db1: 0x000a, 0x3db2: 0x000a, 0x3db3: 0x000a, 0x3db4: 0x000a, 0x3db5: 0x000a, + 0x3db6: 0x000a, 0x3dbb: 0x000a, + 0x3dbc: 0x000a, 0x3dbd: 0x000a, 0x3dbe: 0x000a, 0x3dbf: 0x000a, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x000a, 0x3dc1: 0x000a, 0x3dc2: 0x000a, 0x3dc3: 0x000a, 0x3dc4: 0x000a, 0x3dc5: 0x000a, + 0x3dc6: 0x000a, 0x3dc7: 0x000a, 0x3dc8: 0x000a, 0x3dc9: 0x000a, 0x3dca: 0x000a, 0x3dcb: 0x000a, + 0x3dcc: 0x000a, 0x3dcd: 0x000a, 0x3dce: 0x000a, 0x3dcf: 0x000a, 0x3dd0: 0x000a, 0x3dd1: 0x000a, + 0x3dd2: 0x000a, 0x3dd3: 0x000a, 0x3dd4: 0x000a, 0x3dd5: 0x000a, 0x3dd6: 0x000a, 0x3dd7: 0x000a, + 0x3dd8: 0x000a, 0x3dd9: 0x000a, + 0x3de0: 0x000a, 0x3de1: 0x000a, 0x3de2: 0x000a, 0x3de3: 0x000a, + 0x3de4: 0x000a, 0x3de5: 0x000a, 0x3de6: 0x000a, 0x3de7: 0x000a, 0x3de8: 0x000a, 0x3de9: 0x000a, + 0x3dea: 0x000a, 0x3deb: 0x000a, + 0x3df0: 0x000a, + // Block 0xf8, offset 0x3e00 + 0x3e00: 0x000a, 0x3e01: 0x000a, 0x3e02: 0x000a, 0x3e03: 0x000a, 0x3e04: 0x000a, 0x3e05: 0x000a, + 0x3e06: 0x000a, 0x3e07: 0x000a, 0x3e08: 0x000a, 0x3e09: 0x000a, 0x3e0a: 0x000a, 0x3e0b: 0x000a, + 0x3e10: 0x000a, 0x3e11: 0x000a, + 0x3e12: 0x000a, 0x3e13: 0x000a, 0x3e14: 0x000a, 0x3e15: 0x000a, 0x3e16: 0x000a, 0x3e17: 0x000a, + 0x3e18: 0x000a, 0x3e19: 0x000a, 0x3e1a: 0x000a, 0x3e1b: 0x000a, 0x3e1c: 0x000a, 0x3e1d: 0x000a, + 0x3e1e: 0x000a, 0x3e1f: 0x000a, 0x3e20: 0x000a, 0x3e21: 0x000a, 0x3e22: 0x000a, 0x3e23: 0x000a, + 0x3e24: 0x000a, 0x3e25: 0x000a, 0x3e26: 0x000a, 0x3e27: 0x000a, 0x3e28: 0x000a, 0x3e29: 0x000a, + 0x3e2a: 0x000a, 0x3e2b: 0x000a, 0x3e2c: 0x000a, 0x3e2d: 0x000a, 0x3e2e: 0x000a, 0x3e2f: 0x000a, + 0x3e30: 0x000a, 0x3e31: 0x000a, 0x3e32: 0x000a, 0x3e33: 0x000a, 0x3e34: 0x000a, 0x3e35: 0x000a, + 0x3e36: 0x000a, 0x3e37: 0x000a, 0x3e38: 0x000a, 0x3e39: 0x000a, 0x3e3a: 0x000a, 0x3e3b: 0x000a, + 0x3e3c: 0x000a, 0x3e3d: 0x000a, 0x3e3e: 0x000a, 0x3e3f: 0x000a, + // Block 0xf9, offset 0x3e40 + 0x3e40: 0x000a, 0x3e41: 0x000a, 0x3e42: 0x000a, 0x3e43: 0x000a, 0x3e44: 0x000a, 0x3e45: 0x000a, + 0x3e46: 0x000a, 0x3e47: 0x000a, + 0x3e50: 0x000a, 0x3e51: 0x000a, + 0x3e52: 0x000a, 0x3e53: 0x000a, 0x3e54: 0x000a, 0x3e55: 0x000a, 0x3e56: 0x000a, 0x3e57: 0x000a, + 0x3e58: 0x000a, 0x3e59: 0x000a, + 0x3e60: 0x000a, 0x3e61: 0x000a, 0x3e62: 0x000a, 0x3e63: 0x000a, + 0x3e64: 0x000a, 0x3e65: 0x000a, 0x3e66: 0x000a, 0x3e67: 0x000a, 0x3e68: 0x000a, 0x3e69: 0x000a, + 0x3e6a: 0x000a, 0x3e6b: 0x000a, 0x3e6c: 0x000a, 0x3e6d: 0x000a, 0x3e6e: 0x000a, 0x3e6f: 0x000a, + 0x3e70: 0x000a, 0x3e71: 0x000a, 0x3e72: 0x000a, 0x3e73: 0x000a, 0x3e74: 0x000a, 0x3e75: 0x000a, + 0x3e76: 0x000a, 0x3e77: 0x000a, 0x3e78: 0x000a, 0x3e79: 0x000a, 0x3e7a: 0x000a, 0x3e7b: 0x000a, + 0x3e7c: 0x000a, 0x3e7d: 0x000a, 0x3e7e: 0x000a, 0x3e7f: 0x000a, + // Block 0xfa, offset 0x3e80 + 0x3e80: 0x000a, 0x3e81: 0x000a, 0x3e82: 0x000a, 0x3e83: 0x000a, 0x3e84: 0x000a, 0x3e85: 0x000a, + 0x3e86: 0x000a, 0x3e87: 0x000a, + 0x3e90: 0x000a, 0x3e91: 0x000a, + 0x3e92: 0x000a, 0x3e93: 0x000a, 0x3e94: 0x000a, 0x3e95: 0x000a, 0x3e96: 0x000a, 0x3e97: 0x000a, + 0x3e98: 0x000a, 0x3e99: 0x000a, 0x3e9a: 0x000a, 0x3e9b: 0x000a, 0x3e9c: 0x000a, 0x3e9d: 0x000a, + 0x3e9e: 0x000a, 0x3e9f: 0x000a, 0x3ea0: 0x000a, 0x3ea1: 0x000a, 0x3ea2: 0x000a, 0x3ea3: 0x000a, + 0x3ea4: 0x000a, 0x3ea5: 0x000a, 0x3ea6: 0x000a, 0x3ea7: 0x000a, 0x3ea8: 0x000a, 0x3ea9: 0x000a, + 0x3eaa: 0x000a, 0x3eab: 0x000a, 0x3eac: 0x000a, 0x3ead: 0x000a, + 0x3eb0: 0x000a, 0x3eb1: 0x000a, + // Block 0xfb, offset 0x3ec0 + 0x3ec0: 0x000a, 0x3ec1: 0x000a, 0x3ec2: 0x000a, 0x3ec3: 0x000a, 0x3ec4: 0x000a, 0x3ec5: 0x000a, + 0x3ec6: 0x000a, 0x3ec7: 0x000a, 0x3ec8: 0x000a, 0x3ec9: 0x000a, 0x3eca: 0x000a, 0x3ecb: 0x000a, + 0x3ecc: 0x000a, 0x3ecd: 0x000a, 0x3ece: 0x000a, 0x3ecf: 0x000a, 0x3ed0: 0x000a, 0x3ed1: 0x000a, + 0x3ed2: 0x000a, 0x3ed3: 0x000a, + 0x3ee0: 0x000a, 0x3ee1: 0x000a, 0x3ee2: 0x000a, 0x3ee3: 0x000a, + 0x3ee4: 0x000a, 0x3ee5: 0x000a, 0x3ee6: 0x000a, 0x3ee7: 0x000a, 0x3ee8: 0x000a, 0x3ee9: 0x000a, + 0x3eea: 0x000a, 0x3eeb: 0x000a, 0x3eec: 0x000a, 0x3eed: 0x000a, + 0x3ef0: 0x000a, 0x3ef1: 0x000a, 0x3ef2: 0x000a, 0x3ef3: 0x000a, 0x3ef4: 0x000a, 0x3ef5: 0x000a, + 0x3ef6: 0x000a, 0x3ef7: 0x000a, 0x3ef8: 0x000a, 0x3ef9: 0x000a, 0x3efa: 0x000a, 0x3efb: 0x000a, + 0x3efc: 0x000a, + // Block 0xfc, offset 0x3f00 + 0x3f00: 0x000a, 0x3f01: 0x000a, 0x3f02: 0x000a, 0x3f03: 0x000a, 0x3f04: 0x000a, 0x3f05: 0x000a, + 0x3f06: 0x000a, 0x3f07: 0x000a, 0x3f08: 0x000a, + 0x3f10: 0x000a, 0x3f11: 0x000a, + 0x3f12: 0x000a, 0x3f13: 0x000a, 0x3f14: 0x000a, 0x3f15: 0x000a, 0x3f16: 0x000a, 0x3f17: 0x000a, + 0x3f18: 0x000a, 0x3f19: 0x000a, 0x3f1a: 0x000a, 0x3f1b: 0x000a, 0x3f1c: 0x000a, 0x3f1d: 0x000a, + 0x3f1e: 0x000a, 0x3f1f: 0x000a, 0x3f20: 0x000a, 0x3f21: 0x000a, 0x3f22: 0x000a, 0x3f23: 0x000a, + 0x3f24: 0x000a, 0x3f25: 0x000a, 0x3f26: 0x000a, 0x3f27: 0x000a, 0x3f28: 0x000a, 0x3f29: 0x000a, + 0x3f2a: 0x000a, 0x3f2b: 0x000a, 0x3f2c: 0x000a, 0x3f2d: 0x000a, 0x3f2e: 0x000a, 0x3f2f: 0x000a, + 0x3f30: 0x000a, 0x3f31: 0x000a, 0x3f32: 0x000a, 0x3f33: 0x000a, 0x3f34: 0x000a, 0x3f35: 0x000a, + 0x3f36: 0x000a, 0x3f37: 0x000a, 0x3f38: 0x000a, 0x3f39: 0x000a, 0x3f3a: 0x000a, 0x3f3b: 0x000a, + 0x3f3c: 0x000a, 0x3f3d: 0x000a, 0x3f3f: 0x000a, + // Block 0xfd, offset 0x3f40 + 0x3f40: 0x000a, 0x3f41: 0x000a, 0x3f42: 0x000a, 0x3f43: 0x000a, 0x3f44: 0x000a, 0x3f45: 0x000a, + 0x3f4e: 0x000a, 0x3f4f: 0x000a, 0x3f50: 0x000a, 0x3f51: 0x000a, + 0x3f52: 0x000a, 0x3f53: 0x000a, 0x3f54: 0x000a, 0x3f55: 0x000a, 0x3f56: 0x000a, 0x3f57: 0x000a, + 0x3f58: 0x000a, 0x3f59: 0x000a, 0x3f5a: 0x000a, 0x3f5b: 0x000a, + 0x3f60: 0x000a, 0x3f61: 0x000a, 0x3f62: 0x000a, 0x3f63: 0x000a, + 0x3f64: 0x000a, 0x3f65: 0x000a, 0x3f66: 0x000a, 0x3f67: 0x000a, 0x3f68: 0x000a, + 0x3f70: 0x000a, 0x3f71: 0x000a, 0x3f72: 0x000a, 0x3f73: 0x000a, 0x3f74: 0x000a, 0x3f75: 0x000a, + 0x3f76: 0x000a, 0x3f77: 0x000a, 0x3f78: 0x000a, + // Block 0xfe, offset 0x3f80 + 0x3f80: 0x000a, 0x3f81: 0x000a, 0x3f82: 0x000a, 0x3f83: 0x000a, 0x3f84: 0x000a, 0x3f85: 0x000a, + 0x3f86: 0x000a, 0x3f87: 0x000a, 0x3f88: 0x000a, 0x3f89: 0x000a, 0x3f8a: 0x000a, 0x3f8b: 0x000a, + 0x3f8c: 0x000a, 0x3f8d: 0x000a, 0x3f8e: 0x000a, 0x3f8f: 0x000a, 0x3f90: 0x000a, 0x3f91: 0x000a, + 0x3f92: 0x000a, 0x3f94: 0x000a, 0x3f95: 0x000a, 0x3f96: 0x000a, 0x3f97: 0x000a, + 0x3f98: 0x000a, 0x3f99: 0x000a, 0x3f9a: 0x000a, 0x3f9b: 0x000a, 0x3f9c: 0x000a, 0x3f9d: 0x000a, + 0x3f9e: 0x000a, 0x3f9f: 0x000a, 0x3fa0: 0x000a, 0x3fa1: 0x000a, 0x3fa2: 0x000a, 0x3fa3: 0x000a, + 0x3fa4: 0x000a, 0x3fa5: 0x000a, 0x3fa6: 0x000a, 0x3fa7: 0x000a, 0x3fa8: 0x000a, 0x3fa9: 0x000a, + 0x3faa: 0x000a, 0x3fab: 0x000a, 0x3fac: 0x000a, 0x3fad: 0x000a, 0x3fae: 0x000a, 0x3faf: 0x000a, + 0x3fb0: 0x000a, 0x3fb1: 0x000a, 0x3fb2: 0x000a, 0x3fb3: 0x000a, 0x3fb4: 0x000a, 0x3fb5: 0x000a, + 0x3fb6: 0x000a, 0x3fb7: 0x000a, 0x3fb8: 0x000a, 0x3fb9: 0x000a, 0x3fba: 0x000a, 0x3fbb: 0x000a, + 0x3fbc: 0x000a, 0x3fbd: 0x000a, 0x3fbe: 0x000a, 0x3fbf: 0x000a, + // Block 0xff, offset 0x3fc0 + 0x3fc0: 0x000a, 0x3fc1: 0x000a, 0x3fc2: 0x000a, 0x3fc3: 0x000a, 0x3fc4: 0x000a, 0x3fc5: 0x000a, + 0x3fc6: 0x000a, 0x3fc7: 0x000a, 0x3fc8: 0x000a, 0x3fc9: 0x000a, 0x3fca: 0x000a, + 0x3ff0: 0x0002, 0x3ff1: 0x0002, 0x3ff2: 0x0002, 0x3ff3: 0x0002, 0x3ff4: 0x0002, 0x3ff5: 0x0002, + 0x3ff6: 0x0002, 0x3ff7: 0x0002, 0x3ff8: 0x0002, 0x3ff9: 0x0002, + // Block 0x100, offset 0x4000 + 0x403e: 0x000b, 0x403f: 0x000b, + // Block 0x101, offset 0x4040 + 0x4040: 0x000b, 0x4041: 0x000b, 0x4042: 0x000b, 0x4043: 0x000b, 0x4044: 0x000b, 0x4045: 0x000b, + 0x4046: 0x000b, 0x4047: 0x000b, 0x4048: 0x000b, 0x4049: 0x000b, 0x404a: 0x000b, 0x404b: 0x000b, + 0x404c: 0x000b, 0x404d: 0x000b, 0x404e: 0x000b, 0x404f: 0x000b, 0x4050: 0x000b, 0x4051: 0x000b, + 0x4052: 0x000b, 0x4053: 0x000b, 0x4054: 0x000b, 0x4055: 0x000b, 0x4056: 0x000b, 0x4057: 0x000b, + 0x4058: 0x000b, 0x4059: 0x000b, 0x405a: 0x000b, 0x405b: 0x000b, 0x405c: 0x000b, 0x405d: 0x000b, + 0x405e: 0x000b, 0x405f: 0x000b, 0x4060: 0x000b, 0x4061: 0x000b, 0x4062: 0x000b, 0x4063: 0x000b, + 0x4064: 0x000b, 0x4065: 0x000b, 0x4066: 0x000b, 0x4067: 0x000b, 0x4068: 0x000b, 0x4069: 0x000b, + 0x406a: 0x000b, 0x406b: 0x000b, 0x406c: 0x000b, 0x406d: 0x000b, 0x406e: 0x000b, 0x406f: 0x000b, + 0x4070: 0x000b, 0x4071: 0x000b, 0x4072: 0x000b, 0x4073: 0x000b, 0x4074: 0x000b, 0x4075: 0x000b, + 0x4076: 0x000b, 0x4077: 0x000b, 0x4078: 0x000b, 0x4079: 0x000b, 0x407a: 0x000b, 0x407b: 0x000b, + 0x407c: 0x000b, 0x407d: 0x000b, 0x407e: 0x000b, 0x407f: 0x000b, + // Block 0x102, offset 0x4080 + 0x4080: 0x000c, 0x4081: 0x000c, 0x4082: 0x000c, 0x4083: 0x000c, 0x4084: 0x000c, 0x4085: 0x000c, + 0x4086: 0x000c, 0x4087: 0x000c, 0x4088: 0x000c, 0x4089: 0x000c, 0x408a: 0x000c, 0x408b: 0x000c, + 0x408c: 0x000c, 0x408d: 0x000c, 0x408e: 0x000c, 0x408f: 0x000c, 0x4090: 0x000c, 0x4091: 0x000c, + 0x4092: 0x000c, 0x4093: 0x000c, 0x4094: 0x000c, 0x4095: 0x000c, 0x4096: 0x000c, 0x4097: 0x000c, + 0x4098: 0x000c, 0x4099: 0x000c, 0x409a: 0x000c, 0x409b: 0x000c, 0x409c: 0x000c, 0x409d: 0x000c, + 0x409e: 0x000c, 0x409f: 0x000c, 0x40a0: 0x000c, 0x40a1: 0x000c, 0x40a2: 0x000c, 0x40a3: 0x000c, + 0x40a4: 0x000c, 0x40a5: 0x000c, 0x40a6: 0x000c, 0x40a7: 0x000c, 0x40a8: 0x000c, 0x40a9: 0x000c, + 0x40aa: 0x000c, 0x40ab: 0x000c, 0x40ac: 0x000c, 0x40ad: 0x000c, 0x40ae: 0x000c, 0x40af: 0x000c, + 0x40b0: 0x000b, 0x40b1: 0x000b, 0x40b2: 0x000b, 0x40b3: 0x000b, 0x40b4: 0x000b, 0x40b5: 0x000b, + 0x40b6: 0x000b, 0x40b7: 0x000b, 0x40b8: 0x000b, 0x40b9: 0x000b, 0x40ba: 0x000b, 0x40bb: 0x000b, + 0x40bc: 0x000b, 0x40bd: 0x000b, 0x40be: 0x000b, 0x40bf: 0x000b, +} + +// bidiIndex: 26 blocks, 1664 entries, 3328 bytes +// Block 0 is the zero block. +var bidiIndex = [1664]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x13, 0xf1: 0x14, 0xf2: 0x14, 0xf3: 0x16, 0xf4: 0x17, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x136: 0x28, 0x137: 0x29, + 0x138: 0x2a, 0x139: 0x2b, 0x13a: 0x2c, 0x13b: 0x2d, 0x13c: 0x2e, 0x13d: 0x2f, 0x13e: 0x30, 0x13f: 0x31, + // Block 0x5, offset 0x140 + 0x140: 0x32, 0x141: 0x33, 0x142: 0x34, + 0x14d: 0x35, 0x14e: 0x36, + 0x150: 0x37, + 0x15a: 0x38, 0x15c: 0x39, 0x15d: 0x3a, 0x15e: 0x3b, 0x15f: 0x3c, + 0x160: 0x3d, 0x162: 0x3e, 0x164: 0x3f, 0x165: 0x40, 0x167: 0x41, + 0x168: 0x42, 0x169: 0x43, 0x16a: 0x44, 0x16b: 0x45, 0x16c: 0x46, 0x16d: 0x47, 0x16e: 0x48, 0x16f: 0x49, + 0x170: 0x4a, 0x173: 0x4b, 0x177: 0x05, + 0x17e: 0x4c, 0x17f: 0x4d, + // Block 0x6, offset 0x180 + 0x180: 0x4e, 0x181: 0x4f, 0x182: 0x50, 0x183: 0x51, 0x184: 0x52, 0x185: 0x53, 0x186: 0x54, 0x187: 0x55, + 0x188: 0x56, 0x189: 0x55, 0x18a: 0x55, 0x18b: 0x55, 0x18c: 0x57, 0x18d: 0x58, 0x18e: 0x59, 0x18f: 0x55, + 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x55, 0x195: 0x55, 0x196: 0x55, 0x197: 0x55, + 0x198: 0x55, 0x199: 0x55, 0x19a: 0x5e, 0x19b: 0x55, 0x19c: 0x55, 0x19d: 0x5f, 0x19e: 0x55, 0x19f: 0x60, + 0x1a4: 0x55, 0x1a5: 0x55, 0x1a6: 0x61, 0x1a7: 0x62, + 0x1a8: 0x55, 0x1a9: 0x55, 0x1aa: 0x55, 0x1ab: 0x55, 0x1ac: 0x55, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x55, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x55, 0x1bd: 0x55, 0x1be: 0x55, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x55, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26d: 0x8a, 0x26f: 0x8b, + // Block 0xa, offset 0x280 + 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x8f, 0x2b6: 0x0e, 0x2b7: 0x90, + 0x2b8: 0x91, 0x2b9: 0x92, 0x2ba: 0x0e, 0x2bb: 0x93, 0x2bc: 0x94, 0x2bd: 0x95, 0x2bf: 0x96, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x97, 0x2c5: 0x55, 0x2c6: 0x98, 0x2c7: 0x99, + 0x2cb: 0x9a, 0x2cd: 0x9b, + 0x2e0: 0x9c, 0x2e1: 0x9c, 0x2e2: 0x9c, 0x2e3: 0x9c, 0x2e4: 0x9d, 0x2e5: 0x9c, 0x2e6: 0x9c, 0x2e7: 0x9c, + 0x2e8: 0x9e, 0x2e9: 0x9c, 0x2ea: 0x9c, 0x2eb: 0x9f, 0x2ec: 0xa0, 0x2ed: 0x9c, 0x2ee: 0x9c, 0x2ef: 0x9c, + 0x2f0: 0x9c, 0x2f1: 0x9c, 0x2f2: 0x9c, 0x2f3: 0x9c, 0x2f4: 0xa1, 0x2f5: 0x9c, 0x2f6: 0x9c, 0x2f7: 0x9c, + 0x2f8: 0x9c, 0x2f9: 0xa2, 0x2fa: 0xa3, 0x2fb: 0xa4, 0x2fc: 0xa5, 0x2fd: 0xa6, 0x2fe: 0xa7, 0x2ff: 0x9c, + // Block 0xc, offset 0x300 + 0x300: 0xa8, 0x301: 0xa9, 0x302: 0xaa, 0x303: 0x21, 0x304: 0xab, 0x305: 0xac, 0x306: 0xad, 0x307: 0xae, + 0x308: 0xaf, 0x309: 0x28, 0x30b: 0xb0, 0x30c: 0x26, 0x30d: 0xb1, + 0x310: 0xb2, 0x311: 0xb3, 0x312: 0xb4, 0x313: 0xb5, 0x316: 0xb6, 0x317: 0xb7, + 0x318: 0xb8, 0x319: 0xb9, 0x31a: 0xba, 0x31c: 0xbb, + 0x320: 0xbc, 0x324: 0xbd, 0x325: 0xbe, 0x327: 0xbf, + 0x328: 0xc0, 0x329: 0xc1, 0x32a: 0xc2, + 0x330: 0xc3, 0x332: 0xc4, 0x334: 0xc5, 0x335: 0xc6, 0x336: 0xc7, + 0x33b: 0xc8, 0x33c: 0xc9, 0x33d: 0xca, 0x33f: 0xcb, + // Block 0xd, offset 0x340 + 0x351: 0xcc, + // Block 0xe, offset 0x380 + 0x3ab: 0xcd, 0x3ac: 0xce, + 0x3bd: 0xcf, 0x3be: 0xd0, 0x3bf: 0xd1, + // Block 0xf, offset 0x3c0 + 0x3f2: 0xd2, + // Block 0x10, offset 0x400 + 0x43c: 0xd3, 0x43d: 0xd4, + // Block 0x11, offset 0x440 + 0x445: 0xd5, 0x446: 0xd6, 0x447: 0xd7, + 0x448: 0x55, 0x449: 0xd8, 0x44c: 0x55, 0x44d: 0xd9, + 0x45b: 0xda, 0x45c: 0xdb, 0x45d: 0xdc, 0x45e: 0xdd, 0x45f: 0xde, + 0x468: 0xdf, 0x469: 0xe0, 0x46a: 0xe1, + // Block 0x12, offset 0x480 + 0x480: 0xe2, 0x482: 0xcf, 0x484: 0xce, + 0x48a: 0xe3, 0x48b: 0xe4, + 0x493: 0xe5, + 0x4a0: 0x9c, 0x4a1: 0x9c, 0x4a2: 0x9c, 0x4a3: 0xe6, 0x4a4: 0x9c, 0x4a5: 0xe7, 0x4a6: 0x9c, 0x4a7: 0x9c, + 0x4a8: 0x9c, 0x4a9: 0x9c, 0x4aa: 0x9c, 0x4ab: 0x9c, 0x4ac: 0x9c, 0x4ad: 0x9c, 0x4ae: 0x9c, 0x4af: 0x9c, + 0x4b0: 0x9c, 0x4b1: 0xe8, 0x4b2: 0xe9, 0x4b3: 0x9c, 0x4b4: 0xea, 0x4b5: 0x9c, 0x4b6: 0x9c, 0x4b7: 0x9c, + 0x4b8: 0x0e, 0x4b9: 0x0e, 0x4ba: 0x0e, 0x4bb: 0xeb, 0x4bc: 0x9c, 0x4bd: 0x9c, 0x4be: 0x9c, 0x4bf: 0x9c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xec, 0x4c1: 0x55, 0x4c2: 0xed, 0x4c3: 0xee, 0x4c4: 0xef, 0x4c5: 0xf0, 0x4c6: 0xf1, + 0x4c9: 0xf2, 0x4cc: 0x55, 0x4cd: 0x55, 0x4ce: 0x55, 0x4cf: 0x55, + 0x4d0: 0x55, 0x4d1: 0x55, 0x4d2: 0x55, 0x4d3: 0x55, 0x4d4: 0x55, 0x4d5: 0x55, 0x4d6: 0x55, 0x4d7: 0x55, + 0x4d8: 0x55, 0x4d9: 0x55, 0x4da: 0x55, 0x4db: 0xf3, 0x4dc: 0x55, 0x4dd: 0xf4, 0x4de: 0x55, 0x4df: 0xf5, + 0x4e0: 0xf6, 0x4e1: 0xf7, 0x4e2: 0xf8, 0x4e4: 0x55, 0x4e5: 0x55, 0x4e6: 0x55, 0x4e7: 0x55, + 0x4e8: 0x55, 0x4e9: 0xf9, 0x4ea: 0xfa, 0x4eb: 0xfb, 0x4ec: 0x55, 0x4ed: 0x55, 0x4ee: 0xfc, 0x4ef: 0xfd, + 0x4ff: 0xfe, + // Block 0x14, offset 0x500 + 0x53f: 0xfe, + // Block 0x15, offset 0x540 + 0x550: 0x09, 0x551: 0x0a, 0x553: 0x0b, 0x556: 0x0c, + 0x55b: 0x0d, 0x55c: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, + 0x57f: 0x12, + // Block 0x16, offset 0x580 + 0x58f: 0x12, + 0x59f: 0x12, + 0x5af: 0x12, + 0x5bf: 0x12, + // Block 0x17, offset 0x5c0 + 0x5c0: 0xff, 0x5c1: 0xff, 0x5c2: 0xff, 0x5c3: 0xff, 0x5c4: 0x05, 0x5c5: 0x05, 0x5c6: 0x05, 0x5c7: 0x100, + 0x5c8: 0xff, 0x5c9: 0xff, 0x5ca: 0xff, 0x5cb: 0xff, 0x5cc: 0xff, 0x5cd: 0xff, 0x5ce: 0xff, 0x5cf: 0xff, + 0x5d0: 0xff, 0x5d1: 0xff, 0x5d2: 0xff, 0x5d3: 0xff, 0x5d4: 0xff, 0x5d5: 0xff, 0x5d6: 0xff, 0x5d7: 0xff, + 0x5d8: 0xff, 0x5d9: 0xff, 0x5da: 0xff, 0x5db: 0xff, 0x5dc: 0xff, 0x5dd: 0xff, 0x5de: 0xff, 0x5df: 0xff, + 0x5e0: 0xff, 0x5e1: 0xff, 0x5e2: 0xff, 0x5e3: 0xff, 0x5e4: 0xff, 0x5e5: 0xff, 0x5e6: 0xff, 0x5e7: 0xff, + 0x5e8: 0xff, 0x5e9: 0xff, 0x5ea: 0xff, 0x5eb: 0xff, 0x5ec: 0xff, 0x5ed: 0xff, 0x5ee: 0xff, 0x5ef: 0xff, + 0x5f0: 0xff, 0x5f1: 0xff, 0x5f2: 0xff, 0x5f3: 0xff, 0x5f4: 0xff, 0x5f5: 0xff, 0x5f6: 0xff, 0x5f7: 0xff, + 0x5f8: 0xff, 0x5f9: 0xff, 0x5fa: 0xff, 0x5fb: 0xff, 0x5fc: 0xff, 0x5fd: 0xff, 0x5fe: 0xff, 0x5ff: 0xff, + // Block 0x18, offset 0x600 + 0x60f: 0x12, + 0x61f: 0x12, + 0x620: 0x15, + 0x62f: 0x12, + 0x63f: 0x12, + // Block 0x19, offset 0x640 + 0x64f: 0x12, +} + +// Total table size 19960 bytes (19KiB); checksum: F50EF68C diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -1,7 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package norm diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go --- temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,7908 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "15.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x199A + firstCCC = 0x2DD5 + endMulti = 0x30A1 + firstLeadingCCC = 0x4AEF + firstCCCZeroExcept = 0x4BB9 + firstStarterWithNLead = 0x4BE0 + lastDecomp = 0x4BE2 + maxDecomp = 0x8000 +) + +// decomps: 19426 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xA6, 0x42, + 0xC3, 0xB0, 0x42, 0xC3, 0xB8, 0x42, 0xC4, 0xA6, + 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, 0x42, 0xC5, + 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, 0x8E, 0x42, + 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, 0xC7, 0x80, + 0x42, 0xC7, 0x81, 0x42, 0xC7, 0x82, 0x42, 0xC8, + // Bytes 100 - 13f + 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, 0x42, + 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, 0x93, + 0x42, 0xC9, 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, + 0x96, 0x42, 0xC9, 0x97, 0x42, 0xC9, 0x98, 0x42, + 0xC9, 0x99, 0x42, 0xC9, 0x9B, 0x42, 0xC9, 0x9C, + 0x42, 0xC9, 0x9E, 0x42, 0xC9, 0x9F, 0x42, 0xC9, + 0xA0, 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA2, 0x42, + 0xC9, 0xA3, 0x42, 0xC9, 0xA4, 0x42, 0xC9, 0xA5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA7, 0x42, 0xC9, + 0xA8, 0x42, 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, + 0xC9, 0xAB, 0x42, 0xC9, 0xAC, 0x42, 0xC9, 0xAD, + 0x42, 0xC9, 0xAE, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + 0x42, 0xC9, 0xB6, 0x42, 0xC9, 0xB7, 0x42, 0xC9, + 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, 0xBA, 0x42, + // Bytes 180 - 1bf + 0xC9, 0xBB, 0x42, 0xC9, 0xBD, 0x42, 0xC9, 0xBE, + 0x42, 0xCA, 0x80, 0x42, 0xCA, 0x81, 0x42, 0xCA, + 0x82, 0x42, 0xCA, 0x83, 0x42, 0xCA, 0x84, 0x42, + 0xCA, 0x88, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x8E, 0x42, 0xCA, 0x8F, 0x42, + 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, + 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x98, 0x42, 0xCA, + // Bytes 1c0 - 1ff + 0x99, 0x42, 0xCA, 0x9B, 0x42, 0xCA, 0x9C, 0x42, + 0xCA, 0x9D, 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xA1, + 0x42, 0xCA, 0xA2, 0x42, 0xCA, 0xA3, 0x42, 0xCA, + 0xA4, 0x42, 0xCA, 0xA5, 0x42, 0xCA, 0xA6, 0x42, + 0xCA, 0xA7, 0x42, 0xCA, 0xA8, 0x42, 0xCA, 0xA9, + 0x42, 0xCA, 0xAA, 0x42, 0xCA, 0xAB, 0x42, 0xCA, + 0xB9, 0x42, 0xCB, 0x90, 0x42, 0xCB, 0x91, 0x42, + 0xCE, 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, + // Bytes 200 - 23f + 0x42, 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, + 0x96, 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, + 0xCE, 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, + 0x42, 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, + 0x9E, 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, + 0xCE, 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, + 0x42, 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, + 0xA7, 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, + // Bytes 240 - 27f + 0xCE, 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, + 0x42, 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, + 0xB6, 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, + 0xCE, 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, + 0x42, 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, + 0xBE, 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, + 0xCF, 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, + 0x42, 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, + // Bytes 280 - 2bf + 0x86, 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, + 0xCF, 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, + 0x42, 0xD0, 0xB0, 0x42, 0xD0, 0xB1, 0x42, 0xD0, + 0xB2, 0x42, 0xD0, 0xB3, 0x42, 0xD0, 0xB4, 0x42, + 0xD0, 0xB5, 0x42, 0xD0, 0xB6, 0x42, 0xD0, 0xB7, + 0x42, 0xD0, 0xB8, 0x42, 0xD0, 0xBA, 0x42, 0xD0, + 0xBB, 0x42, 0xD0, 0xBC, 0x42, 0xD0, 0xBD, 0x42, + 0xD0, 0xBE, 0x42, 0xD0, 0xBF, 0x42, 0xD1, 0x80, + // Bytes 2c0 - 2ff + 0x42, 0xD1, 0x81, 0x42, 0xD1, 0x82, 0x42, 0xD1, + 0x83, 0x42, 0xD1, 0x84, 0x42, 0xD1, 0x85, 0x42, + 0xD1, 0x86, 0x42, 0xD1, 0x87, 0x42, 0xD1, 0x88, + 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8B, 0x42, 0xD1, + 0x8C, 0x42, 0xD1, 0x8D, 0x42, 0xD1, 0x8E, 0x42, + 0xD1, 0x95, 0x42, 0xD1, 0x96, 0x42, 0xD1, 0x98, + 0x42, 0xD1, 0x9F, 0x42, 0xD2, 0x91, 0x42, 0xD2, + 0xAB, 0x42, 0xD2, 0xAF, 0x42, 0xD2, 0xB1, 0x42, + // Bytes 300 - 33f + 0xD3, 0x8F, 0x42, 0xD3, 0x99, 0x42, 0xD3, 0xA9, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + // Bytes 340 - 37f + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + // Bytes 380 - 3bf + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + // Bytes 3c0 - 3ff + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + // Bytes 400 - 43f + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + // Bytes 440 - 47f + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + // Bytes 480 - 4bf + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + // Bytes 4c0 - 4ff + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + // Bytes 500 - 53f + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE1, + 0xB6, 0x91, 0x43, 0xE2, 0x80, 0x82, 0x43, 0xE2, + 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, 0xE2, + 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, 0xE2, + // Bytes 540 - 57f + 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, 0xE2, + 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, 0xE2, + 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, 0xE2, + 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, 0xE2, + 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, 0xE2, + 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, 0xE2, + 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, 0xE2, + 0xB1, 0xB1, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + // Bytes 600 - 63f + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + // Bytes 640 - 67f + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + // Bytes 680 - 6bf + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + // Bytes 6c0 - 6ff + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + // Bytes 700 - 73f + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + // Bytes 740 - 77f + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + // Bytes 780 - 7bf + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + // Bytes 7c0 - 7ff + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + // Bytes 800 - 83f + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + // Bytes 840 - 87f + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + // Bytes 900 - 93f + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + // Bytes 940 - 97f + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + // Bytes a00 - a3f + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + // Bytes a40 - a7f + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + // Bytes a80 - abf + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + // Bytes ac0 - aff + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + // Bytes b00 - b3f + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + // Bytes b80 - bbf + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + // Bytes bc0 - bff + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + // Bytes c00 - c3f + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + // Bytes c40 - c7f + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + // Bytes c80 - cbf + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + // Bytes cc0 - cff + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + // Bytes d00 - d3f + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + // Bytes d40 - d7f + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + // Bytes d80 - dbf + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + // Bytes dc0 - dff + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + // Bytes e00 - e3f + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + // Bytes e40 - e7f + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + // Bytes e80 - ebf + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + // Bytes ec0 - eff + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + // Bytes f00 - f3f + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + // Bytes f40 - f7f + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + // Bytes f80 - fbf + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + // Bytes fc0 - fff + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + // Bytes 1040 - 107f + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + // Bytes 1100 - 113f + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + // Bytes 1180 - 11bf + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + // Bytes 11c0 - 11ff + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + // Bytes 1200 - 123f + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + // Bytes 1240 - 127f + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + // Bytes 1300 - 133f + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + // Bytes 1340 - 137f + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + // Bytes 1440 - 147f + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + // Bytes 1480 - 14bf + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + // Bytes 14c0 - 14ff + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + // Bytes 1540 - 157f + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + // Bytes 1600 - 163f + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + // Bytes 1640 - 167f + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + // Bytes 1680 - 16bf + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + // Bytes 16c0 - 16ff + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + // Bytes 1700 - 173f + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x99, 0x91, 0x43, 0xEA, + 0x9A, 0x89, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1740 - 177f + 0x9D, 0xAF, 0x43, 0xEA, 0x9E, 0x8E, 0x43, 0xEA, + 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x43, 0xEA, + 0xAD, 0xA6, 0x43, 0xEA, 0xAD, 0xA7, 0x44, 0xF0, + 0x9D, 0xBC, 0x84, 0x44, 0xF0, 0x9D, 0xBC, 0x85, + 0x44, 0xF0, 0x9D, 0xBC, 0x86, 0x44, 0xF0, 0x9D, + 0xBC, 0x88, 0x44, 0xF0, 0x9D, 0xBC, 0x8A, 0x44, + 0xF0, 0x9D, 0xBC, 0x9E, 0x44, 0xF0, 0xA0, 0x84, + 0xA2, 0x44, 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, + // Bytes 1780 - 17bf + 0xA0, 0x94, 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, + 0x44, 0xF0, 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, + 0xA0, 0x84, 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, + 0xF0, 0xA0, 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, + 0xA3, 0x44, 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, + 0xA1, 0x9A, 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, + 0x44, 0xF0, 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, + 0xAC, 0x98, 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, + // Bytes 17c0 - 17ff + 0xF0, 0xA1, 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, + 0xA6, 0x44, 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, + 0xA2, 0x86, 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, + 0x44, 0xF0, 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, + 0xA1, 0x84, 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, + 0xF0, 0xA2, 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, + 0xB1, 0x44, 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, + 0xA3, 0x8A, 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, + // Bytes 1800 - 183f + 0x44, 0xF0, 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, + 0x8E, 0x9C, 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, + 0xF0, 0xA3, 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, + 0xAD, 0x44, 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, + 0xA3, 0xA2, 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, + 0x44, 0xF0, 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, + 0xB2, 0xBC, 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, + 0xF0, 0xA3, 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, + // Bytes 1840 - 187f + 0x9E, 0x44, 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, + 0xA4, 0x89, 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, + 0x44, 0xF0, 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, + 0x98, 0x88, 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, + 0xF0, 0xA4, 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, + 0xB6, 0x44, 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, + 0xA4, 0xBE, 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, + 0x44, 0xF0, 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, + // Bytes 1880 - 18bf + 0x83, 0xB2, 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, + 0xF0, 0xA5, 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, + 0xB3, 0x44, 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, + 0xA5, 0x90, 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, + 0x44, 0xF0, 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, + 0x9B, 0x85, 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, + 0xF0, 0xA5, 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, + 0xAB, 0x44, 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, + // Bytes 18c0 - 18ff + 0xA5, 0xB3, 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, + 0x44, 0xF0, 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, + 0x88, 0xA8, 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, + 0xF0, 0xA6, 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, + 0xBE, 0x44, 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, + 0xA6, 0x94, 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, + 0x44, 0xF0, 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, + 0x9E, 0xB5, 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, + // Bytes 1900 - 193f + 0xF0, 0xA6, 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, + 0x95, 0x44, 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, + 0xA6, 0xBC, 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, + 0x44, 0xF0, 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, + 0x8F, 0x8A, 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, + 0xF0, 0xA7, 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, + 0xA6, 0x44, 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, + 0xA7, 0xBB, 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, + // Bytes 1940 - 197f + 0x44, 0xF0, 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, + 0x97, 0xAD, 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, + 0xF0, 0xA8, 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, + 0xB7, 0x44, 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, + 0xA9, 0x87, 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, + 0x44, 0xF0, 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, + 0x92, 0x96, 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, + 0xF0, 0xA9, 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, + // Bytes 1980 - 19bf + 0x8E, 0x44, 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, + 0xAA, 0x88, 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, + 0x44, 0xF0, 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, + 0x98, 0x80, 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, + 0x42, 0x2E, 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, + 0x2E, 0x42, 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, + 0x31, 0x30, 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, + 0x42, 0x31, 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, + // Bytes 19c0 - 19ff + 0x35, 0x42, 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, + 0x31, 0x38, 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, + 0x42, 0x32, 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, + 0x31, 0x42, 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, + 0x32, 0x34, 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, + 0x42, 0x32, 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, + 0x39, 0x42, 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, + 0x33, 0x30, 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, + // Bytes 1a00 - 1a3f + 0x42, 0x33, 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, + 0x35, 0x42, 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, + 0x33, 0x38, 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, + 0x42, 0x34, 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, + 0x31, 0x42, 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, + 0x34, 0x34, 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, + 0x42, 0x34, 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, + 0x39, 0x42, 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, + // Bytes 1a40 - 1a7f + 0x35, 0x30, 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, + 0x42, 0x37, 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, + 0x2C, 0x42, 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, + 0x39, 0x2E, 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, + 0x42, 0x3F, 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, + 0x71, 0x42, 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, + 0x44, 0x5A, 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, + 0x42, 0x47, 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, + // Bytes 1a80 - 1abf + 0x56, 0x42, 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, + 0x49, 0x49, 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, + 0x42, 0x49, 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, + 0x42, 0x42, 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, + 0x4C, 0x4A, 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, + 0x42, 0x4D, 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, + 0x52, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + // Bytes 1ac0 - 1aff + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + // Bytes 1b00 - 1b3f + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + // Bytes 1b40 - 1b7f + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + // Bytes 1b80 - 1bbf + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + // Bytes 1bc0 - 1bff + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + // Bytes 1c00 - 1c3f + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + // Bytes 1c40 - 1c7f + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + // Bytes 1c80 - 1cbf + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + // Bytes 1cc0 - 1cff + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + // Bytes 1d00 - 1d3f + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + // Bytes 1d40 - 1d7f + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + // Bytes 1d80 - 1dbf + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + // Bytes 1dc0 - 1dff + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + // Bytes 1e00 - 1e3f + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + // Bytes 1e40 - 1e7f + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + // Bytes 1e80 - 1ebf + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + // Bytes 1ec0 - 1eff + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + // Bytes 1f00 - 1f3f + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + // Bytes 1f40 - 1f7f + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + // Bytes 1f80 - 1fbf + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + // Bytes 1fc0 - 1fff + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + // Bytes 2000 - 203f + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + // Bytes 2040 - 207f + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + // Bytes 2080 - 20bf + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + // Bytes 20c0 - 20ff + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + // Bytes 2100 - 213f + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + // Bytes 2140 - 217f + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + // Bytes 2180 - 21bf + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + // Bytes 21c0 - 21ff + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + // Bytes 2200 - 223f + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + // Bytes 2240 - 227f + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + // Bytes 2280 - 22bf + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + // Bytes 22c0 - 22ff + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2300 - 233f + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + // Bytes 2340 - 237f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + // Bytes 2380 - 23bf + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + // Bytes 23c0 - 23ff + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + // Bytes 2400 - 243f + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + // Bytes 2440 - 247f + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2480 - 24bf + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + // Bytes 24c0 - 24ff + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + // Bytes 2500 - 253f + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + // Bytes 2540 - 257f + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + // Bytes 2580 - 25bf + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + // Bytes 25c0 - 25ff + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + // Bytes 2600 - 263f + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + // Bytes 2640 - 267f + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + // Bytes 2680 - 26bf + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + // Bytes 26c0 - 26ff + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + // Bytes 2700 - 273f + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + // Bytes 2740 - 277f + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + // Bytes 2780 - 27bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + // Bytes 27c0 - 27ff + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2800 - 283f + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, + 0x8C, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, + 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, + // Bytes 2840 - 287f + 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, + 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, + 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, + 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + // Bytes 2880 - 28bf + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, + // Bytes 28c0 - 28ff + 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, + 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, + 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, + 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, + // Bytes 2900 - 293f + 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, + 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, + 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, + 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, + 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, + 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, + // Bytes 2940 - 297f + 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, + 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, + 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, + // Bytes 2980 - 29bf + 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 29c0 - 29ff + 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, + 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, + 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, + 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + // Bytes 2a00 - 2a3f + 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, + // Bytes 2a40 - 2a7f + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, + 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + // Bytes 2a80 - 2abf + 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, + 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, + 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, + // Bytes 2ac0 - 2aff + 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, + 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + // Bytes 2b00 - 2b3f + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, + // Bytes 2b40 - 2b7f + 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, + // Bytes 2b80 - 2bbf + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, + 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, + // Bytes 2bc0 - 2bff + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, + 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, + 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, + 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, + 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, + 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, + // Bytes 2c40 - 2c7f + 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, + // Bytes 2c80 - 2cbf + 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, + // Bytes 2cc0 - 2cff + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, + 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, + // Bytes 2d00 - 2d3f + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, + // Bytes 2d40 - 2d7f + 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, + // Bytes 2d80 - 2dbf + 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, + 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + // Bytes 2dc0 - 2dff + 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, + // Bytes 2e00 - 2e3f + 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, + 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, + 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, + // Bytes 2e40 - 2e7f + 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, + 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, + 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, + 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, + 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + // Bytes 2e80 - 2ebf + 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, + 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + // Bytes 2ec0 - 2eff + 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, + 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, + 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, + 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, + // Bytes 2f00 - 2f3f + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, + 0xF0, 0x91, 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, + 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, + 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, + 0x44, 0x5A, 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, + 0xCC, 0x8C, 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, + // Bytes 2f40 - 2f7f + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, + // Bytes 2f80 - 2fbf + 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, + 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, + // Bytes 2fc0 - 2fff + 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, + 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, + 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, + 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, + 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, + // Bytes 3000 - 303f + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, + 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, + 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, + 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, + // Bytes 3040 - 307f + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, + 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, + // Bytes 3080 - 30bf + 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, + 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, + 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, + 0x03, 0x41, 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, + 0x81, 0xCD, 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, + // Bytes 30c0 - 30ff + 0x41, 0xCC, 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, + 0xCD, 0x03, 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, + 0xCC, 0x8F, 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, + 0x03, 0x41, 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, + 0xA8, 0xA9, 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, + 0x42, 0xCC, 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, + 0xB9, 0x03, 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, + 0xCC, 0x82, 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, + // Bytes 3100 - 313f + 0x03, 0x43, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0x87, 0xCD, 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, + 0x44, 0xCC, 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, + 0xA9, 0x03, 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, + 0xCC, 0xB1, 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, + 0x03, 0x45, 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, + 0x83, 0xCD, 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, + 0x45, 0xCC, 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, + // Bytes 3140 - 317f + 0xCD, 0x03, 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, + 0xCC, 0x8C, 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, + 0x03, 0x45, 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, + 0xA8, 0xA9, 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, + 0x45, 0xCC, 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, + 0xCD, 0x03, 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, + 0xCC, 0x82, 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, + 0x03, 0x47, 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, + // Bytes 3180 - 31bf + 0x87, 0xCD, 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, + 0x47, 0xCC, 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, + 0xCD, 0x03, 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, + 0xCC, 0x88, 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, + 0x03, 0x48, 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, + 0x49, 0xCC, 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, + 0xCD, 0x03, 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, + // Bytes 31c0 - 31ff + 0xCC, 0x83, 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, + 0x03, 0x49, 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, + 0x87, 0xCD, 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, + 0x49, 0xCC, 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, + 0xCD, 0x03, 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, + 0xCC, 0xA3, 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, + 0x03, 0x49, 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, + 0x82, 0xCD, 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3200 - 323f + 0x4B, 0xCC, 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, + 0xB9, 0x03, 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, + 0xCC, 0xB1, 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, + 0x03, 0x4C, 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, + 0xA7, 0xA9, 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, + 0x4C, 0xCC, 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, + 0xCD, 0x03, 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, + 0xCC, 0xA3, 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, + // Bytes 3240 - 327f + 0x03, 0x4E, 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, + 0x83, 0xCD, 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, + 0x4E, 0xCC, 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, + 0xCC, 0xAD, 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, + 0x03, 0x4F, 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, + 0x81, 0xCD, 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, + 0x4F, 0xCC, 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, + // Bytes 3280 - 32bf + 0xCD, 0x03, 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, + 0xCC, 0x8F, 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, + 0x03, 0x50, 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, + 0x52, 0xCC, 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, + 0xCD, 0x03, 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, + 0xCC, 0x91, 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, + 0x03, 0x52, 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, + // Bytes 32c0 - 32ff + 0x82, 0xCD, 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, + 0x53, 0xCC, 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, + 0xA9, 0x03, 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, + 0xCC, 0x8C, 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, + 0x03, 0x54, 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, + 0xA7, 0xA9, 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, + 0x54, 0xCC, 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, + 0xCD, 0x03, 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, + // Bytes 3300 - 333f + 0xCC, 0x82, 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, + 0x03, 0x55, 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, + 0x8A, 0xCD, 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, + 0x55, 0xCC, 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, + 0xCD, 0x03, 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, + 0xCC, 0xA3, 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, + 0x03, 0x55, 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, + 0xAD, 0xB9, 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, + // Bytes 3340 - 337f + 0x56, 0xCC, 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, + 0xB9, 0x03, 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, + 0xCC, 0x81, 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, + 0x03, 0x57, 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, + 0x88, 0xCD, 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, + 0x58, 0xCC, 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, + 0xCD, 0x03, 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, + 0xCC, 0x81, 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, + // Bytes 3380 - 33bf + 0x03, 0x59, 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, + 0x84, 0xCD, 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, + 0x59, 0xCC, 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, + 0xCD, 0x03, 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, + 0xCC, 0x81, 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, + 0x03, 0x5A, 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, + 0x8C, 0xCD, 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, + 0x5A, 0xCC, 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, + // Bytes 33c0 - 33ff + 0xCD, 0x03, 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, + 0xCC, 0x83, 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, + 0x03, 0x61, 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, + 0x8C, 0xCD, 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, + 0x61, 0xCC, 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, + 0xB9, 0x03, 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, + 0xCC, 0x87, 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, + 0x03, 0x62, 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, + // Bytes 3400 - 343f + 0x81, 0xCD, 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, + 0x63, 0xCC, 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, + 0xCC, 0x8C, 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, + 0x03, 0x64, 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, + 0xAD, 0xB9, 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, + 0x65, 0xCC, 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, + 0xCD, 0x03, 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, + // Bytes 3440 - 347f + 0xCC, 0x86, 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, + 0x03, 0x65, 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, + 0x89, 0xCD, 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, + 0x65, 0xCC, 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, + 0xCD, 0x03, 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, + 0xCC, 0xAD, 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, + 0x03, 0x66, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, + 0x81, 0xCD, 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, + // Bytes 3480 - 34bf + 0x67, 0xCC, 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, + 0xCD, 0x03, 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, + 0xCC, 0x8C, 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, + 0x03, 0x68, 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, + 0x87, 0xCD, 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, + 0x68, 0xCC, 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, + 0xB9, 0x03, 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0xAE, 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, + // Bytes 34c0 - 34ff + 0x03, 0x69, 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, + 0x81, 0xCD, 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, + 0x69, 0xCC, 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, + 0xCD, 0x03, 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, + 0xCC, 0x89, 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, + 0x03, 0x69, 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, + 0x91, 0xCD, 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, + 0x69, 0xCC, 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, + // Bytes 3500 - 353f + 0xB9, 0x03, 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, + 0x03, 0x6B, 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, + 0xA3, 0xB9, 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, + 0x6B, 0xCC, 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, + 0xCD, 0x03, 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, + 0xCC, 0xA7, 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, + 0x03, 0x6C, 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, + // Bytes 3540 - 357f + 0x81, 0xCD, 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, + 0x6D, 0xCC, 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, + 0xCD, 0x03, 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, + 0xCC, 0x83, 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, + 0x03, 0x6E, 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, + 0x6E, 0xCC, 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, + 0xB9, 0x03, 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, + // Bytes 3580 - 35bf + 0xCC, 0x81, 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, + 0x03, 0x6F, 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, + 0x8B, 0xCD, 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, + 0x6F, 0xCC, 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, + 0xCD, 0x03, 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, + 0x03, 0x72, 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, + 0x8C, 0xCD, 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, + // Bytes 35c0 - 35ff + 0x72, 0xCC, 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, + 0xA9, 0x03, 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, + 0xCC, 0x82, 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, + 0x03, 0x73, 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, + 0xA7, 0xA9, 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, + 0x74, 0xCC, 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, + 0xCD, 0x03, 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, + 0xCC, 0xA6, 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, + // Bytes 3600 - 363f + 0x03, 0x74, 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, + 0xB1, 0xB9, 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, + 0x75, 0xCC, 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, + 0xCD, 0x03, 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, + 0xCC, 0x89, 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, + 0x03, 0x75, 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, + 0x8C, 0xCD, 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, + 0x75, 0xCC, 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, + // Bytes 3640 - 367f + 0xB9, 0x03, 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, + 0xCC, 0xA8, 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, + 0x03, 0x75, 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, + 0x83, 0xCD, 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, + 0x77, 0xCC, 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, + 0xCD, 0x03, 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, + 0xCC, 0x87, 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, + 0x03, 0x77, 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, + // Bytes 3680 - 36bf + 0xA3, 0xB9, 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, + 0x78, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, + 0xCD, 0x03, 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, + 0xCC, 0x82, 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, + 0x03, 0x79, 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, + 0x87, 0xCD, 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, + 0x79, 0xCC, 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, + 0xCD, 0x03, 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, + // Bytes 36c0 - 36ff + 0xCC, 0x81, 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, + 0x03, 0x7A, 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, + 0x8C, 0xCD, 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, + 0x7A, 0xCC, 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, + 0x80, 0xCE, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, + 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, + 0x86, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, + // Bytes 3700 - 373f + 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, + 0x81, 0xCD, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, + 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, + 0x92, 0xCC, 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, + // Bytes 3740 - 377f + 0x85, 0xDD, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x97, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + // Bytes 3780 - 37bf + 0x9F, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + 0xA9, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, + // Bytes 37c0 - 37ff + 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB1, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB7, 0xCD, 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, + // Bytes 3800 - 383f + 0x82, 0xCD, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x81, 0xCC, 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x94, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + 0x86, 0xCD, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, + 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, + // Bytes 3840 - 387f + 0x92, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x95, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, + // Bytes 3880 - 38bf + 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x84, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x9A, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + // Bytes 38c0 - 38ff + 0x8B, 0xCD, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAD, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + // Bytes 3900 - 393f + 0xB6, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x86, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, + // Bytes 3940 - 397f + 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, + 0x87, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0xB4, 0xCC, 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, + 0x8F, 0xCD, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, + 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0xA8, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, + // Bytes 3980 - 39bf + 0x88, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, + 0x94, 0xCD, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, + 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x92, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, + 0x94, 0xCD, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + // Bytes 39c0 - 39ff + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, + 0xCC, 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, + 0xCE, 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, + 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, + // Bytes 3a00 - 3a3f + 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, + 0xCC, 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, + 0xA7, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, + 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, + 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, + // Bytes 3a40 - 3a7f + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, + 0xA7, 0xCC, 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, + 0xCC, 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3a80 - 3abf + 0x83, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x88, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, + 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, + 0xCE, 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + // Bytes 3ac0 - 3aff + 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0xA3, 0xBA, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, + 0xCE, 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, + 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, + 0xCC, 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, + 0xA3, 0xCC, 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, + // Bytes 3b00 - 3b3f + 0x88, 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, + // Bytes 3b40 - 3b7f + 0xBA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, + 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, + 0xCE, 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + // Bytes 3b80 - 3bbf + 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, + 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, + // Bytes 3bc0 - 3bff + 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, + 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, + 0xCC, 0x86, 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, + 0x81, 0xCE, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, + // Bytes 3c00 - 3c3f + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, + 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x88, 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, + 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, + 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, + // Bytes 3c40 - 3c7f + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xBA, 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, + 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, + 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, + 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, + 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, + // Bytes 3c80 - 3cbf + 0xCC, 0x87, 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, + 0xCE, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + // Bytes 3cc0 - 3cff + 0x83, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, + 0xBE, 0xBF, 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, + 0xBE, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, + 0x82, 0xCE, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, + // Bytes 3d00 - 3d3f + 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, + // Bytes 3d40 - 3d7f + 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3d80 - 3dbf + 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3dc0 - 3dff + 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3e00 - 3e3f + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, + 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 3e40 - 3e7f + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3e80 - 3ebf + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 3ec0 - 3eff + 0xCE, 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, + // Bytes 3f00 - 3f3f + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 3f40 - 3f7f + 0xDE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3f80 - 3fbf + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, + // Bytes 3fc0 - 3fff + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, + // Bytes 4000 - 403f + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, + 0x89, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, + // Bytes 4040 - 407f + 0x15, 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, + // Bytes 4080 - 40bf + 0x11, 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, + // Bytes 40c0 - 40ff + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, + // Bytes 4100 - 413f + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4140 - 417f + 0x11, 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, + // Bytes 4180 - 41bf + 0x11, 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + // Bytes 41c0 - 41ff + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4200 - 423f + 0x11, 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 4240 - 427f + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4280 - 42bf + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, + // Bytes 42c0 - 42ff + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4300 - 433f + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + // Bytes 4340 - 437f + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, + // Bytes 4380 - 43bf + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, + 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, + 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, + 0x43, 0x20, 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, + 0x84, 0xCD, 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, + 0x20, 0xCC, 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, + 0xCD, 0x43, 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, + // Bytes 43c0 - 43ff + 0xCC, 0x8A, 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, + 0x43, 0x20, 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, + 0x94, 0xCD, 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, + 0x20, 0xCC, 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, + 0xB9, 0x43, 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, + 0xCD, 0x85, 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, + 0x43, 0x20, 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, + 0x8D, 0x65, 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, + // Bytes 4400 - 443f + 0x20, 0xD9, 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, + 0x71, 0x43, 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, + 0xD9, 0x92, 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, + 0x43, 0x73, 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, + 0x82, 0x99, 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, + 0x11, 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, + 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, + // Bytes 4440 - 447f + 0xCD, 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x9F, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, + 0xCD, 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, + // Bytes 4480 - 44bf + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, + 0xCD, 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, + 0xD7, 0x90, 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x92, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, + // Bytes 44c0 - 44ff + 0xD7, 0x95, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, + 0xD7, 0x99, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x9C, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, + // Bytes 4500 - 453f + 0x45, 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA3, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, + 0x4D, 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA7, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, + 0xD7, 0xA9, 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, + // Bytes 4540 - 457f + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, + 0x35, 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, + 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, + 0xD9, 0x94, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, + 0xD8, 0xB1, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, + 0xD9, 0x8B, 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, + 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, + // Bytes 4580 - 45bf + 0xD9, 0x80, 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, + 0xD9, 0x91, 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, + 0x79, 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, + 0xD9, 0x88, 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, + 0xCD, 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, + 0xDB, 0x95, 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, + 0x88, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, + // Bytes 45c0 - 45ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, + 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, + 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, + // Bytes 4600 - 463f + 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, + 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, + 0xD6, 0xBC, 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, + // Bytes 4640 - 467f + 0xD6, 0xBC, 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, + 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, + 0x95, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x96, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x97, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x9C, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + // Bytes 4680 - 46bf + 0xA1, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xA2, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAB, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAF, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA1, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA2, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xAF, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x96, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + // Bytes 46c0 - 46ff + 0x97, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x9C, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xAB, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB2, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB8, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA1, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA2, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, + // Bytes 4700 - 473f + 0xB3, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, + 0x86, 0xE3, 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, + 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, + // Bytes 4740 - 477f + 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBE, 0x80, 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4780 - 47bf + 0x85, 0xB1, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + // Bytes 47c0 - 47ff + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, + 0xB2, 0x83, 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, + 0xCC, 0x86, 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, + 0x83, 0x41, 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, + 0x8A, 0xCD, 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, + 0x43, 0xCC, 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, + 0xCD, 0x83, 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, + 0xCC, 0xA3, 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, + // Bytes 4800 - 483f + 0x83, 0x49, 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, + 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, + 0x4F, 0xCC, 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, + 0xCD, 0x83, 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, + 0xCC, 0x88, 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, + 0x83, 0x4F, 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, + 0xA8, 0xA9, 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, + 0x53, 0xCC, 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, + // Bytes 4840 - 487f + 0xCD, 0x83, 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, + 0xCC, 0x83, 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, + 0x83, 0x55, 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, + 0x9B, 0xB1, 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, + 0x61, 0xCC, 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, + 0xCD, 0x83, 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, + 0xCC, 0x8A, 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, + 0x83, 0x63, 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, + // Bytes 4880 - 48bf + 0x82, 0xCD, 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, + 0x65, 0xCC, 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, + 0xA9, 0x83, 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, + 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, + 0x83, 0x6F, 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, + 0x84, 0xCD, 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, + 0x6F, 0xCC, 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, + 0xB1, 0x83, 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, + // Bytes 48c0 - 48ff + 0xCC, 0xA8, 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, + 0x83, 0x73, 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, + 0x8C, 0xCD, 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, + 0x75, 0xCC, 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, + 0xCD, 0x83, 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, + 0xCC, 0x9B, 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x95, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4940 - 497f + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB5, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, + // Bytes 4980 - 49bf + 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4a00 - 4a3f + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + // Bytes 4a40 - 4a7f + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + // Bytes 4a80 - 4abf + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4ac0 - 4aff + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, + 0xCC, 0x80, 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, + 0x33, 0x42, 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, + // Bytes 4b00 - 4b3f + 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, + // Bytes 4b40 - 4b7f + 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, + // Bytes 4b80 - 4bbf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, + 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x33, 0x43, 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, + // Bytes 4bc0 - 4bff + 0xE3, 0x82, 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, + 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10798 bytes (10.54 KiB). Checksum: b5981cc85e3bd14. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, + // Block 0x5, offset 0x140 + 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x36e2, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3862, 0x2c1: 0x386e, 0x2c3: 0x385c, + 0x2c6: 0xa000, 0x2c7: 0x384a, + 0x2cc: 0x389e, 0x2cd: 0x3886, 0x2ce: 0x38b0, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3892, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x3916, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3874, 0x302: 0x38f8, + 0x310: 0x3850, 0x311: 0x38d4, + 0x312: 0x3856, 0x313: 0x38da, 0x316: 0x3868, 0x317: 0x38ec, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x396a, 0x31b: 0x3970, 0x31c: 0x387a, 0x31d: 0x38fe, + 0x31e: 0x3880, 0x31f: 0x3904, 0x322: 0x388c, 0x323: 0x3910, + 0x324: 0x3898, 0x325: 0x391c, 0x326: 0x38a4, 0x327: 0x3928, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3976, 0x32b: 0x397c, 0x32c: 0x38ce, 0x32d: 0x3952, 0x32e: 0x38aa, 0x32f: 0x392e, + 0x330: 0x38b6, 0x331: 0x393a, 0x332: 0x38bc, 0x333: 0x3940, 0x334: 0x38c2, 0x335: 0x3946, + 0x338: 0x38c8, 0x339: 0x394c, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3ca: 0x8133, 0x3cb: 0x8133, + 0x3cc: 0x8133, 0x3cd: 0x8133, 0x3ce: 0x8133, 0x3cf: 0x812e, 0x3d0: 0x812e, 0x3d1: 0x812e, + 0x3d2: 0x812e, 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2e5d, 0x407: 0xa000, 0x408: 0x2e65, 0x409: 0xa000, 0x40a: 0x2e6d, 0x40b: 0xa000, + 0x40c: 0x2e75, 0x40d: 0xa000, 0x40e: 0x2e7d, 0x411: 0xa000, + 0x412: 0x2e85, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2e8d, + 0x43c: 0xa000, 0x43d: 0x2e95, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47a: 0x812d, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x30d8, 0x481: 0x33e4, 0x482: 0x30e2, 0x483: 0x33ee, 0x484: 0x30e7, 0x485: 0x33f3, + 0x486: 0x30ec, 0x487: 0x33f8, 0x488: 0x3a0d, 0x489: 0x3b9c, 0x48a: 0x3105, 0x48b: 0x3411, + 0x48c: 0x310f, 0x48d: 0x341b, 0x48e: 0x311e, 0x48f: 0x342a, 0x490: 0x3114, 0x491: 0x3420, + 0x492: 0x3119, 0x493: 0x3425, 0x494: 0x3a30, 0x495: 0x3bbf, 0x496: 0x3a37, 0x497: 0x3bc6, + 0x498: 0x315a, 0x499: 0x3466, 0x49a: 0x315f, 0x49b: 0x346b, 0x49c: 0x3a45, 0x49d: 0x3bd4, + 0x49e: 0x3164, 0x49f: 0x3470, 0x4a0: 0x3173, 0x4a1: 0x347f, 0x4a2: 0x3191, 0x4a3: 0x349d, + 0x4a4: 0x31a0, 0x4a5: 0x34ac, 0x4a6: 0x3196, 0x4a7: 0x34a2, 0x4a8: 0x31a5, 0x4a9: 0x34b1, + 0x4aa: 0x31aa, 0x4ab: 0x34b6, 0x4ac: 0x31f0, 0x4ad: 0x34fc, 0x4ae: 0x3a4c, 0x4af: 0x3bdb, + 0x4b0: 0x31fa, 0x4b1: 0x350b, 0x4b2: 0x3204, 0x4b3: 0x3515, 0x4b4: 0x320e, 0x4b5: 0x351f, + 0x4b6: 0x4805, 0x4b7: 0x4896, 0x4b8: 0x3a53, 0x4b9: 0x3be2, 0x4ba: 0x3227, 0x4bb: 0x3538, + 0x4bc: 0x3222, 0x4bd: 0x3533, 0x4be: 0x322c, 0x4bf: 0x353d, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3231, 0x4c1: 0x3542, 0x4c2: 0x3236, 0x4c3: 0x3547, 0x4c4: 0x324a, 0x4c5: 0x355b, + 0x4c6: 0x3254, 0x4c7: 0x3565, 0x4c8: 0x3263, 0x4c9: 0x3574, 0x4ca: 0x325e, 0x4cb: 0x356f, + 0x4cc: 0x3a76, 0x4cd: 0x3c05, 0x4ce: 0x3a84, 0x4cf: 0x3c13, 0x4d0: 0x3a8b, 0x4d1: 0x3c1a, + 0x4d2: 0x3a92, 0x4d3: 0x3c21, 0x4d4: 0x3290, 0x4d5: 0x35a1, 0x4d6: 0x3295, 0x4d7: 0x35a6, + 0x4d8: 0x329f, 0x4d9: 0x35b0, 0x4da: 0x4832, 0x4db: 0x48c3, 0x4dc: 0x3ad8, 0x4dd: 0x3c67, + 0x4de: 0x32b8, 0x4df: 0x35c9, 0x4e0: 0x32c2, 0x4e1: 0x35d3, 0x4e2: 0x4841, 0x4e3: 0x48d2, + 0x4e4: 0x3adf, 0x4e5: 0x3c6e, 0x4e6: 0x3ae6, 0x4e7: 0x3c75, 0x4e8: 0x3aed, 0x4e9: 0x3c7c, + 0x4ea: 0x32d1, 0x4eb: 0x35e2, 0x4ec: 0x32db, 0x4ed: 0x35f1, 0x4ee: 0x32ef, 0x4ef: 0x3605, + 0x4f0: 0x32ea, 0x4f1: 0x3600, 0x4f2: 0x332b, 0x4f3: 0x3641, 0x4f4: 0x333a, 0x4f5: 0x3650, + 0x4f6: 0x3335, 0x4f7: 0x364b, 0x4f8: 0x3af4, 0x4f9: 0x3c83, 0x4fa: 0x3afb, 0x4fb: 0x3c8a, + 0x4fc: 0x333f, 0x4fd: 0x3655, 0x4fe: 0x3344, 0x4ff: 0x365a, + // Block 0x14, offset 0x500 + 0x500: 0x3349, 0x501: 0x365f, 0x502: 0x334e, 0x503: 0x3664, 0x504: 0x335d, 0x505: 0x3673, + 0x506: 0x3358, 0x507: 0x366e, 0x508: 0x3362, 0x509: 0x367d, 0x50a: 0x3367, 0x50b: 0x3682, + 0x50c: 0x336c, 0x50d: 0x3687, 0x50e: 0x338a, 0x50f: 0x36a5, 0x510: 0x33a3, 0x511: 0x36c3, + 0x512: 0x33b2, 0x513: 0x36d2, 0x514: 0x33b7, 0x515: 0x36d7, 0x516: 0x34bb, 0x517: 0x35e7, + 0x518: 0x3678, 0x519: 0x36b4, 0x51b: 0x3712, + 0x520: 0x47e2, 0x521: 0x4873, 0x522: 0x30c4, 0x523: 0x33d0, + 0x524: 0x39b9, 0x525: 0x3b48, 0x526: 0x39b2, 0x527: 0x3b41, 0x528: 0x39c7, 0x529: 0x3b56, + 0x52a: 0x39c0, 0x52b: 0x3b4f, 0x52c: 0x39ff, 0x52d: 0x3b8e, 0x52e: 0x39d5, 0x52f: 0x3b64, + 0x530: 0x39ce, 0x531: 0x3b5d, 0x532: 0x39e3, 0x533: 0x3b72, 0x534: 0x39dc, 0x535: 0x3b6b, + 0x536: 0x3a06, 0x537: 0x3b95, 0x538: 0x47f6, 0x539: 0x4887, 0x53a: 0x3141, 0x53b: 0x344d, + 0x53c: 0x312d, 0x53d: 0x3439, 0x53e: 0x3a1b, 0x53f: 0x3baa, + // Block 0x15, offset 0x540 + 0x540: 0x3a14, 0x541: 0x3ba3, 0x542: 0x3a29, 0x543: 0x3bb8, 0x544: 0x3a22, 0x545: 0x3bb1, + 0x546: 0x3a3e, 0x547: 0x3bcd, 0x548: 0x31d2, 0x549: 0x34de, 0x54a: 0x31e6, 0x54b: 0x34f2, + 0x54c: 0x4828, 0x54d: 0x48b9, 0x54e: 0x3277, 0x54f: 0x3588, 0x550: 0x3a61, 0x551: 0x3bf0, + 0x552: 0x3a5a, 0x553: 0x3be9, 0x554: 0x3a6f, 0x555: 0x3bfe, 0x556: 0x3a68, 0x557: 0x3bf7, + 0x558: 0x3aca, 0x559: 0x3c59, 0x55a: 0x3aae, 0x55b: 0x3c3d, 0x55c: 0x3aa7, 0x55d: 0x3c36, + 0x55e: 0x3abc, 0x55f: 0x3c4b, 0x560: 0x3ab5, 0x561: 0x3c44, 0x562: 0x3ac3, 0x563: 0x3c52, + 0x564: 0x3326, 0x565: 0x363c, 0x566: 0x3308, 0x567: 0x361e, 0x568: 0x3b25, 0x569: 0x3cb4, + 0x56a: 0x3b1e, 0x56b: 0x3cad, 0x56c: 0x3b33, 0x56d: 0x3cc2, 0x56e: 0x3b2c, 0x56f: 0x3cbb, + 0x570: 0x3b3a, 0x571: 0x3cc9, 0x572: 0x3371, 0x573: 0x368c, 0x574: 0x3399, 0x575: 0x36b9, + 0x576: 0x3394, 0x577: 0x36af, 0x578: 0x3380, 0x579: 0x369b, + // Block 0x16, offset 0x580 + 0x580: 0x4945, 0x581: 0x494b, 0x582: 0x4a5f, 0x583: 0x4a77, 0x584: 0x4a67, 0x585: 0x4a7f, + 0x586: 0x4a6f, 0x587: 0x4a87, 0x588: 0x48eb, 0x589: 0x48f1, 0x58a: 0x49cf, 0x58b: 0x49e7, + 0x58c: 0x49d7, 0x58d: 0x49ef, 0x58e: 0x49df, 0x58f: 0x49f7, 0x590: 0x4957, 0x591: 0x495d, + 0x592: 0x3ef9, 0x593: 0x3f09, 0x594: 0x3f01, 0x595: 0x3f11, + 0x598: 0x48f7, 0x599: 0x48fd, 0x59a: 0x3e29, 0x59b: 0x3e39, 0x59c: 0x3e31, 0x59d: 0x3e41, + 0x5a0: 0x496f, 0x5a1: 0x4975, 0x5a2: 0x4a8f, 0x5a3: 0x4aa7, + 0x5a4: 0x4a97, 0x5a5: 0x4aaf, 0x5a6: 0x4a9f, 0x5a7: 0x4ab7, 0x5a8: 0x4903, 0x5a9: 0x4909, + 0x5aa: 0x49ff, 0x5ab: 0x4a17, 0x5ac: 0x4a07, 0x5ad: 0x4a1f, 0x5ae: 0x4a0f, 0x5af: 0x4a27, + 0x5b0: 0x4987, 0x5b1: 0x498d, 0x5b2: 0x3f59, 0x5b3: 0x3f71, 0x5b4: 0x3f61, 0x5b5: 0x3f79, + 0x5b6: 0x3f69, 0x5b7: 0x3f81, 0x5b8: 0x490f, 0x5b9: 0x4915, 0x5ba: 0x3e59, 0x5bb: 0x3e71, + 0x5bc: 0x3e61, 0x5bd: 0x3e79, 0x5be: 0x3e69, 0x5bf: 0x3e81, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4993, 0x5c1: 0x4999, 0x5c2: 0x3f89, 0x5c3: 0x3f99, 0x5c4: 0x3f91, 0x5c5: 0x3fa1, + 0x5c8: 0x491b, 0x5c9: 0x4921, 0x5ca: 0x3e89, 0x5cb: 0x3e99, + 0x5cc: 0x3e91, 0x5cd: 0x3ea1, 0x5d0: 0x49a5, 0x5d1: 0x49ab, + 0x5d2: 0x3fc1, 0x5d3: 0x3fd9, 0x5d4: 0x3fc9, 0x5d5: 0x3fe1, 0x5d6: 0x3fd1, 0x5d7: 0x3fe9, + 0x5d9: 0x4927, 0x5db: 0x3ea9, 0x5dd: 0x3eb1, + 0x5df: 0x3eb9, 0x5e0: 0x49bd, 0x5e1: 0x49c3, 0x5e2: 0x4abf, 0x5e3: 0x4ad7, + 0x5e4: 0x4ac7, 0x5e5: 0x4adf, 0x5e6: 0x4acf, 0x5e7: 0x4ae7, 0x5e8: 0x492d, 0x5e9: 0x4933, + 0x5ea: 0x4a2f, 0x5eb: 0x4a47, 0x5ec: 0x4a37, 0x5ed: 0x4a4f, 0x5ee: 0x4a3f, 0x5ef: 0x4a57, + 0x5f0: 0x4939, 0x5f1: 0x445f, 0x5f2: 0x37d2, 0x5f3: 0x4465, 0x5f4: 0x4963, 0x5f5: 0x446b, + 0x5f6: 0x37e4, 0x5f7: 0x4471, 0x5f8: 0x3802, 0x5f9: 0x4477, 0x5fa: 0x381a, 0x5fb: 0x447d, + 0x5fc: 0x49b1, 0x5fd: 0x4483, + // Block 0x18, offset 0x600 + 0x600: 0x3ee1, 0x601: 0x3ee9, 0x602: 0x42c5, 0x603: 0x42e3, 0x604: 0x42cf, 0x605: 0x42ed, + 0x606: 0x42d9, 0x607: 0x42f7, 0x608: 0x3e19, 0x609: 0x3e21, 0x60a: 0x4211, 0x60b: 0x422f, + 0x60c: 0x421b, 0x60d: 0x4239, 0x60e: 0x4225, 0x60f: 0x4243, 0x610: 0x3f29, 0x611: 0x3f31, + 0x612: 0x4301, 0x613: 0x431f, 0x614: 0x430b, 0x615: 0x4329, 0x616: 0x4315, 0x617: 0x4333, + 0x618: 0x3e49, 0x619: 0x3e51, 0x61a: 0x424d, 0x61b: 0x426b, 0x61c: 0x4257, 0x61d: 0x4275, + 0x61e: 0x4261, 0x61f: 0x427f, 0x620: 0x4001, 0x621: 0x4009, 0x622: 0x433d, 0x623: 0x435b, + 0x624: 0x4347, 0x625: 0x4365, 0x626: 0x4351, 0x627: 0x436f, 0x628: 0x3ec1, 0x629: 0x3ec9, + 0x62a: 0x4289, 0x62b: 0x42a7, 0x62c: 0x4293, 0x62d: 0x42b1, 0x62e: 0x429d, 0x62f: 0x42bb, + 0x630: 0x37c6, 0x631: 0x37c0, 0x632: 0x3ed1, 0x633: 0x37cc, 0x634: 0x3ed9, + 0x636: 0x4951, 0x637: 0x3ef1, 0x638: 0x3736, 0x639: 0x3730, 0x63a: 0x3724, 0x63b: 0x442f, + 0x63c: 0x373c, 0x63d: 0x8100, 0x63e: 0x0257, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x36e8, 0x642: 0x3f19, 0x643: 0x37de, 0x644: 0x3f21, + 0x646: 0x497b, 0x647: 0x3f39, 0x648: 0x3742, 0x649: 0x4435, 0x64a: 0x374e, 0x64b: 0x443b, + 0x64c: 0x375a, 0x64d: 0x3cd0, 0x64e: 0x3cd7, 0x64f: 0x3cde, 0x650: 0x37f6, 0x651: 0x37f0, + 0x652: 0x3f41, 0x653: 0x4625, 0x656: 0x37fc, 0x657: 0x3f51, + 0x658: 0x3772, 0x659: 0x376c, 0x65a: 0x3760, 0x65b: 0x4441, 0x65d: 0x3ce5, + 0x65e: 0x3cec, 0x65f: 0x3cf3, 0x660: 0x382c, 0x661: 0x3826, 0x662: 0x3fa9, 0x663: 0x462d, + 0x664: 0x380e, 0x665: 0x3814, 0x666: 0x3832, 0x667: 0x3fb9, 0x668: 0x37a2, 0x669: 0x379c, + 0x66a: 0x3790, 0x66b: 0x444d, 0x66c: 0x378a, 0x66d: 0x36dc, 0x66e: 0x4429, 0x66f: 0x0081, + 0x672: 0x3ff1, 0x673: 0x3838, 0x674: 0x3ff9, + 0x676: 0x49c9, 0x677: 0x4011, 0x678: 0x377e, 0x679: 0x4447, 0x67a: 0x37ae, 0x67b: 0x4459, + 0x67c: 0x37ba, 0x67d: 0x4397, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3d47, 0x683: 0xa000, 0x684: 0x3d4e, 0x685: 0xa000, + 0x687: 0x3d55, 0x688: 0xa000, 0x689: 0x3d5c, + 0x68d: 0xa000, + 0x6a0: 0x30a6, 0x6a1: 0xa000, 0x6a2: 0x3d6a, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3d63, 0x6ae: 0x30a1, 0x6af: 0x30ab, + 0x6b0: 0x3d71, 0x6b1: 0x3d78, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3d7f, 0x6b5: 0x3d86, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3d8d, 0x6b9: 0x3d94, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3d9b, 0x6c1: 0x3da2, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3db7, 0x6c5: 0x3dbe, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3dc5, 0x6c9: 0x3dcc, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3de1, 0x6ed: 0x3de8, 0x6ee: 0x3def, 0x6ef: 0x3df6, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x4049, 0x70d: 0xa000, 0x70e: 0x4051, 0x70f: 0xa000, 0x710: 0x4059, 0x711: 0xa000, + 0x712: 0x4061, 0x713: 0xa000, 0x714: 0x4069, 0x715: 0xa000, 0x716: 0x4071, 0x717: 0xa000, + 0x718: 0x4079, 0x719: 0xa000, 0x71a: 0x4081, 0x71b: 0xa000, 0x71c: 0x4089, 0x71d: 0xa000, + 0x71e: 0x4091, 0x71f: 0xa000, 0x720: 0x4099, 0x721: 0xa000, 0x722: 0x40a1, + 0x724: 0xa000, 0x725: 0x40a9, 0x726: 0xa000, 0x727: 0x40b1, 0x728: 0xa000, 0x729: 0x40b9, + 0x72f: 0xa000, + 0x730: 0x40c1, 0x731: 0x40c9, 0x732: 0xa000, 0x733: 0x40d1, 0x734: 0x40d9, 0x735: 0xa000, + 0x736: 0x40e1, 0x737: 0x40e9, 0x738: 0xa000, 0x739: 0x40f1, 0x73a: 0x40f9, 0x73b: 0xa000, + 0x73c: 0x4101, 0x73d: 0x4109, + // Block 0x1d, offset 0x740 + 0x754: 0x4041, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x4111, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x4121, 0x76d: 0xa000, 0x76e: 0x4129, 0x76f: 0xa000, + 0x770: 0x4131, 0x771: 0xa000, 0x772: 0x4139, 0x773: 0xa000, 0x774: 0x4141, 0x775: 0xa000, + 0x776: 0x4149, 0x777: 0xa000, 0x778: 0x4151, 0x779: 0xa000, 0x77a: 0x4159, 0x77b: 0xa000, + 0x77c: 0x4161, 0x77d: 0xa000, 0x77e: 0x4169, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4171, 0x781: 0xa000, 0x782: 0x4179, 0x784: 0xa000, 0x785: 0x4181, + 0x786: 0xa000, 0x787: 0x4189, 0x788: 0xa000, 0x789: 0x4191, + 0x78f: 0xa000, 0x790: 0x4199, 0x791: 0x41a1, + 0x792: 0xa000, 0x793: 0x41a9, 0x794: 0x41b1, 0x795: 0xa000, 0x796: 0x41b9, 0x797: 0x41c1, + 0x798: 0xa000, 0x799: 0x41c9, 0x79a: 0x41d1, 0x79b: 0xa000, 0x79c: 0x41d9, 0x79d: 0x41e1, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x4119, + 0x7b7: 0x41e9, 0x7b8: 0x41f1, 0x7b9: 0x41f9, 0x7ba: 0x4201, + 0x7bd: 0xa000, 0x7be: 0x4209, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1472, 0x7c1: 0x0df6, 0x7c2: 0x14ce, 0x7c3: 0x149a, 0x7c4: 0x0f52, 0x7c5: 0x07e6, + 0x7c6: 0x09da, 0x7c7: 0x1726, 0x7c8: 0x1726, 0x7c9: 0x0b06, 0x7ca: 0x155a, 0x7cb: 0x0a3e, + 0x7cc: 0x0b02, 0x7cd: 0x0cea, 0x7ce: 0x10ca, 0x7cf: 0x125a, 0x7d0: 0x1392, 0x7d1: 0x13ce, + 0x7d2: 0x1402, 0x7d3: 0x1516, 0x7d4: 0x0e6e, 0x7d5: 0x0efa, 0x7d6: 0x0fa6, 0x7d7: 0x103e, + 0x7d8: 0x135a, 0x7d9: 0x1542, 0x7da: 0x166e, 0x7db: 0x080a, 0x7dc: 0x09ae, 0x7dd: 0x0e82, + 0x7de: 0x0fca, 0x7df: 0x138e, 0x7e0: 0x16be, 0x7e1: 0x0bae, 0x7e2: 0x0f72, 0x7e3: 0x137e, + 0x7e4: 0x1412, 0x7e5: 0x0d1e, 0x7e6: 0x12b6, 0x7e7: 0x13da, 0x7e8: 0x0c1a, 0x7e9: 0x0e0a, + 0x7ea: 0x0f12, 0x7eb: 0x1016, 0x7ec: 0x1522, 0x7ed: 0x084a, 0x7ee: 0x08e2, 0x7ef: 0x094e, + 0x7f0: 0x0d86, 0x7f1: 0x0e7a, 0x7f2: 0x0fc6, 0x7f3: 0x10ea, 0x7f4: 0x1272, 0x7f5: 0x1386, + 0x7f6: 0x139e, 0x7f7: 0x14c2, 0x7f8: 0x15ea, 0x7f9: 0x169e, 0x7fa: 0x16ba, 0x7fb: 0x1126, + 0x7fc: 0x1166, 0x7fd: 0x121e, 0x7fe: 0x133e, 0x7ff: 0x1576, + // Block 0x20, offset 0x800 + 0x800: 0x16c6, 0x801: 0x1446, 0x802: 0x0ac2, 0x803: 0x0c36, 0x804: 0x11d6, 0x805: 0x1296, + 0x806: 0x0ffa, 0x807: 0x112e, 0x808: 0x1492, 0x809: 0x15e2, 0x80a: 0x0abe, 0x80b: 0x0b8a, + 0x80c: 0x0e72, 0x80d: 0x0f26, 0x80e: 0x0f5a, 0x80f: 0x120e, 0x810: 0x1236, 0x811: 0x15a2, + 0x812: 0x094a, 0x813: 0x12a2, 0x814: 0x08ee, 0x815: 0x08ea, 0x816: 0x1192, 0x817: 0x1222, + 0x818: 0x1356, 0x819: 0x15aa, 0x81a: 0x1462, 0x81b: 0x0d22, 0x81c: 0x0e6e, 0x81d: 0x1452, + 0x81e: 0x07f2, 0x81f: 0x0b5e, 0x820: 0x0c8e, 0x821: 0x102a, 0x822: 0x10aa, 0x823: 0x096e, + 0x824: 0x1136, 0x825: 0x085a, 0x826: 0x0c72, 0x827: 0x07d2, 0x828: 0x0ee6, 0x829: 0x0d9e, + 0x82a: 0x120a, 0x82b: 0x09c2, 0x82c: 0x0aae, 0x82d: 0x10f6, 0x82e: 0x135e, 0x82f: 0x1436, + 0x830: 0x0eb2, 0x831: 0x14f2, 0x832: 0x0ede, 0x833: 0x0d32, 0x834: 0x1316, 0x835: 0x0d52, + 0x836: 0x10a6, 0x837: 0x0826, 0x838: 0x08a2, 0x839: 0x08e6, 0x83a: 0x0e4e, 0x83b: 0x11f6, + 0x83c: 0x12ee, 0x83d: 0x1442, 0x83e: 0x1556, 0x83f: 0x0956, + // Block 0x21, offset 0x840 + 0x840: 0x0a0a, 0x841: 0x0b12, 0x842: 0x0c2a, 0x843: 0x0dba, 0x844: 0x0f76, 0x845: 0x113a, + 0x846: 0x1592, 0x847: 0x1676, 0x848: 0x16ca, 0x849: 0x16e2, 0x84a: 0x0932, 0x84b: 0x0dee, + 0x84c: 0x0e9e, 0x84d: 0x14e6, 0x84e: 0x0bf6, 0x84f: 0x0cd2, 0x850: 0x0cee, 0x851: 0x0d7e, + 0x852: 0x0f66, 0x853: 0x0fb2, 0x854: 0x1062, 0x855: 0x1186, 0x856: 0x122a, 0x857: 0x128e, + 0x858: 0x14d6, 0x859: 0x1366, 0x85a: 0x14fe, 0x85b: 0x157a, 0x85c: 0x090a, 0x85d: 0x0936, + 0x85e: 0x0a1e, 0x85f: 0x0fa2, 0x860: 0x13ee, 0x861: 0x1436, 0x862: 0x0c16, 0x863: 0x0c86, + 0x864: 0x0d4a, 0x865: 0x0eaa, 0x866: 0x11d2, 0x867: 0x101e, 0x868: 0x0836, 0x869: 0x0a7a, + 0x86a: 0x0b5e, 0x86b: 0x0bc2, 0x86c: 0x0c92, 0x86d: 0x103a, 0x86e: 0x1056, 0x86f: 0x1266, + 0x870: 0x1286, 0x871: 0x155e, 0x872: 0x15de, 0x873: 0x15ee, 0x874: 0x162a, 0x875: 0x084e, + 0x876: 0x117a, 0x877: 0x154a, 0x878: 0x15c6, 0x879: 0x0caa, 0x87a: 0x0812, 0x87b: 0x0872, + 0x87c: 0x0b62, 0x87d: 0x0b82, 0x87e: 0x0daa, 0x87f: 0x0e6e, + // Block 0x22, offset 0x880 + 0x880: 0x0fbe, 0x881: 0x10c6, 0x882: 0x1372, 0x883: 0x1512, 0x884: 0x171e, 0x885: 0x0dde, + 0x886: 0x159e, 0x887: 0x092e, 0x888: 0x0e2a, 0x889: 0x0e36, 0x88a: 0x0f0a, 0x88b: 0x0f42, + 0x88c: 0x1046, 0x88d: 0x10a2, 0x88e: 0x1122, 0x88f: 0x1206, 0x890: 0x1636, 0x891: 0x08aa, + 0x892: 0x0cfe, 0x893: 0x15ae, 0x894: 0x0862, 0x895: 0x0ba6, 0x896: 0x0f2a, 0x897: 0x14da, + 0x898: 0x0c62, 0x899: 0x0cb2, 0x89a: 0x0e3e, 0x89b: 0x102a, 0x89c: 0x15b6, 0x89d: 0x0912, + 0x89e: 0x09fa, 0x89f: 0x0b92, 0x8a0: 0x0dce, 0x8a1: 0x0e1a, 0x8a2: 0x0e5a, 0x8a3: 0x0eee, + 0x8a4: 0x1042, 0x8a5: 0x10b6, 0x8a6: 0x1252, 0x8a7: 0x13f2, 0x8a8: 0x13fe, 0x8a9: 0x1552, + 0x8aa: 0x15d2, 0x8ab: 0x097e, 0x8ac: 0x0f46, 0x8ad: 0x09fe, 0x8ae: 0x0fc2, 0x8af: 0x1066, + 0x8b0: 0x1382, 0x8b1: 0x15ba, 0x8b2: 0x16a6, 0x8b3: 0x16ce, 0x8b4: 0x0e32, 0x8b5: 0x0f22, + 0x8b6: 0x12be, 0x8b7: 0x11b2, 0x8b8: 0x11be, 0x8b9: 0x11e2, 0x8ba: 0x1012, 0x8bb: 0x0f9a, + 0x8bc: 0x145e, 0x8bd: 0x082e, 0x8be: 0x1326, 0x8bf: 0x0916, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0906, 0x8c1: 0x0c06, 0x8c2: 0x0d26, 0x8c3: 0x11ee, 0x8c4: 0x0b4e, 0x8c5: 0x0efe, + 0x8c6: 0x0dea, 0x8c7: 0x14e2, 0x8c8: 0x13e2, 0x8c9: 0x15a6, 0x8ca: 0x141e, 0x8cb: 0x0c22, + 0x8cc: 0x0882, 0x8cd: 0x0a56, 0x8d0: 0x0aaa, + 0x8d2: 0x0dda, 0x8d5: 0x08f2, 0x8d6: 0x101a, 0x8d7: 0x10de, + 0x8d8: 0x1142, 0x8d9: 0x115e, 0x8da: 0x1162, 0x8db: 0x1176, 0x8dc: 0x15f6, 0x8dd: 0x11e6, + 0x8de: 0x126a, 0x8e0: 0x138a, 0x8e2: 0x144e, + 0x8e5: 0x1502, 0x8e6: 0x152e, + 0x8ea: 0x164a, 0x8eb: 0x164e, 0x8ec: 0x1652, 0x8ed: 0x16b6, 0x8ee: 0x1526, 0x8ef: 0x15c2, + 0x8f0: 0x0852, 0x8f1: 0x0876, 0x8f2: 0x088a, 0x8f3: 0x0946, 0x8f4: 0x0952, 0x8f5: 0x0992, + 0x8f6: 0x0a46, 0x8f7: 0x0a62, 0x8f8: 0x0a6a, 0x8f9: 0x0aa6, 0x8fa: 0x0ab2, 0x8fb: 0x0b8e, + 0x8fc: 0x0b96, 0x8fd: 0x0c9e, 0x8fe: 0x0cc6, 0x8ff: 0x0cce, + // Block 0x24, offset 0x900 + 0x900: 0x0ce6, 0x901: 0x0d92, 0x902: 0x0dc2, 0x903: 0x0de2, 0x904: 0x0e52, 0x905: 0x0f16, + 0x906: 0x0f32, 0x907: 0x0f62, 0x908: 0x0fb6, 0x909: 0x0fd6, 0x90a: 0x104a, 0x90b: 0x112a, + 0x90c: 0x1146, 0x90d: 0x114e, 0x90e: 0x114a, 0x90f: 0x1152, 0x910: 0x1156, 0x911: 0x115a, + 0x912: 0x116e, 0x913: 0x1172, 0x914: 0x1196, 0x915: 0x11aa, 0x916: 0x11c6, 0x917: 0x122a, + 0x918: 0x1232, 0x919: 0x123a, 0x91a: 0x124e, 0x91b: 0x1276, 0x91c: 0x12c6, 0x91d: 0x12fa, + 0x91e: 0x12fa, 0x91f: 0x1362, 0x920: 0x140a, 0x921: 0x1422, 0x922: 0x1456, 0x923: 0x145a, + 0x924: 0x149e, 0x925: 0x14a2, 0x926: 0x14fa, 0x927: 0x1502, 0x928: 0x15d6, 0x929: 0x161a, + 0x92a: 0x1632, 0x92b: 0x0c96, 0x92c: 0x184b, 0x92d: 0x12de, + 0x930: 0x07da, 0x931: 0x08de, 0x932: 0x089e, 0x933: 0x0846, 0x934: 0x0886, 0x935: 0x08b2, + 0x936: 0x0942, 0x937: 0x095e, 0x938: 0x0a46, 0x939: 0x0a32, 0x93a: 0x0a42, 0x93b: 0x0a5e, + 0x93c: 0x0aaa, 0x93d: 0x0aba, 0x93e: 0x0afe, 0x93f: 0x0b0a, + // Block 0x25, offset 0x940 + 0x940: 0x0b26, 0x941: 0x0b36, 0x942: 0x0c1e, 0x943: 0x0c26, 0x944: 0x0c56, 0x945: 0x0c76, + 0x946: 0x0ca6, 0x947: 0x0cbe, 0x948: 0x0cae, 0x949: 0x0cce, 0x94a: 0x0cc2, 0x94b: 0x0ce6, + 0x94c: 0x0d02, 0x94d: 0x0d5a, 0x94e: 0x0d66, 0x94f: 0x0d6e, 0x950: 0x0d96, 0x951: 0x0dda, + 0x952: 0x0e0a, 0x953: 0x0e0e, 0x954: 0x0e22, 0x955: 0x0ea2, 0x956: 0x0eb2, 0x957: 0x0f0a, + 0x958: 0x0f56, 0x959: 0x0f4e, 0x95a: 0x0f62, 0x95b: 0x0f7e, 0x95c: 0x0fb6, 0x95d: 0x110e, + 0x95e: 0x0fda, 0x95f: 0x100e, 0x960: 0x101a, 0x961: 0x105a, 0x962: 0x1076, 0x963: 0x109a, + 0x964: 0x10be, 0x965: 0x10c2, 0x966: 0x10de, 0x967: 0x10e2, 0x968: 0x10f2, 0x969: 0x1106, + 0x96a: 0x1102, 0x96b: 0x1132, 0x96c: 0x11ae, 0x96d: 0x11c6, 0x96e: 0x11de, 0x96f: 0x1216, + 0x970: 0x122a, 0x971: 0x1246, 0x972: 0x1276, 0x973: 0x132a, 0x974: 0x1352, 0x975: 0x13c6, + 0x976: 0x140e, 0x977: 0x141a, 0x978: 0x1422, 0x979: 0x143a, 0x97a: 0x144e, 0x97b: 0x143e, + 0x97c: 0x1456, 0x97d: 0x1452, 0x97e: 0x144a, 0x97f: 0x145a, + // Block 0x26, offset 0x980 + 0x980: 0x1466, 0x981: 0x14a2, 0x982: 0x14de, 0x983: 0x150e, 0x984: 0x1546, 0x985: 0x1566, + 0x986: 0x15b2, 0x987: 0x15d6, 0x988: 0x15f6, 0x989: 0x160a, 0x98a: 0x161a, 0x98b: 0x1626, + 0x98c: 0x1632, 0x98d: 0x1686, 0x98e: 0x1726, 0x98f: 0x17e2, 0x990: 0x17dd, 0x991: 0x180f, + 0x992: 0x0702, 0x993: 0x072a, 0x994: 0x072e, 0x995: 0x1891, 0x996: 0x18be, 0x997: 0x1936, + 0x998: 0x1712, 0x999: 0x1722, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x07f6, 0x9c1: 0x07ee, 0x9c2: 0x07fe, 0x9c3: 0x1774, 0x9c4: 0x0842, 0x9c5: 0x0852, + 0x9c6: 0x0856, 0x9c7: 0x085e, 0x9c8: 0x0866, 0x9c9: 0x086a, 0x9ca: 0x0876, 0x9cb: 0x086e, + 0x9cc: 0x06ae, 0x9cd: 0x1788, 0x9ce: 0x088a, 0x9cf: 0x088e, 0x9d0: 0x0892, 0x9d1: 0x08ae, + 0x9d2: 0x1779, 0x9d3: 0x06b2, 0x9d4: 0x089a, 0x9d5: 0x08ba, 0x9d6: 0x1783, 0x9d7: 0x08ca, + 0x9d8: 0x08d2, 0x9d9: 0x0832, 0x9da: 0x08da, 0x9db: 0x08de, 0x9dc: 0x195e, 0x9dd: 0x08fa, + 0x9de: 0x0902, 0x9df: 0x06ba, 0x9e0: 0x091a, 0x9e1: 0x091e, 0x9e2: 0x0926, 0x9e3: 0x092a, + 0x9e4: 0x06be, 0x9e5: 0x0942, 0x9e6: 0x0946, 0x9e7: 0x0952, 0x9e8: 0x095e, 0x9e9: 0x0962, + 0x9ea: 0x0966, 0x9eb: 0x096e, 0x9ec: 0x098e, 0x9ed: 0x0992, 0x9ee: 0x099a, 0x9ef: 0x09aa, + 0x9f0: 0x09b2, 0x9f1: 0x09b6, 0x9f2: 0x09b6, 0x9f3: 0x09b6, 0x9f4: 0x1797, 0x9f5: 0x0f8e, + 0x9f6: 0x09ca, 0x9f7: 0x09d2, 0x9f8: 0x179c, 0x9f9: 0x09de, 0x9fa: 0x09e6, 0x9fb: 0x09ee, + 0x9fc: 0x0a16, 0x9fd: 0x0a02, 0x9fe: 0x0a0e, 0x9ff: 0x0a12, + // Block 0x28, offset 0xa00 + 0xa00: 0x0a1a, 0xa01: 0x0a22, 0xa02: 0x0a26, 0xa03: 0x0a2e, 0xa04: 0x0a36, 0xa05: 0x0a3a, + 0xa06: 0x0a3a, 0xa07: 0x0a42, 0xa08: 0x0a4a, 0xa09: 0x0a4e, 0xa0a: 0x0a5a, 0xa0b: 0x0a7e, + 0xa0c: 0x0a62, 0xa0d: 0x0a82, 0xa0e: 0x0a66, 0xa0f: 0x0a6e, 0xa10: 0x0906, 0xa11: 0x0aca, + 0xa12: 0x0a92, 0xa13: 0x0a96, 0xa14: 0x0a9a, 0xa15: 0x0a8e, 0xa16: 0x0aa2, 0xa17: 0x0a9e, + 0xa18: 0x0ab6, 0xa19: 0x17a1, 0xa1a: 0x0ad2, 0xa1b: 0x0ad6, 0xa1c: 0x0ade, 0xa1d: 0x0aea, + 0xa1e: 0x0af2, 0xa1f: 0x0b0e, 0xa20: 0x17a6, 0xa21: 0x17ab, 0xa22: 0x0b1a, 0xa23: 0x0b1e, + 0xa24: 0x0b22, 0xa25: 0x0b16, 0xa26: 0x0b2a, 0xa27: 0x06c2, 0xa28: 0x06c6, 0xa29: 0x0b32, + 0xa2a: 0x0b3a, 0xa2b: 0x0b3a, 0xa2c: 0x17b0, 0xa2d: 0x0b56, 0xa2e: 0x0b5a, 0xa2f: 0x0b5e, + 0xa30: 0x0b66, 0xa31: 0x17b5, 0xa32: 0x0b6e, 0xa33: 0x0b72, 0xa34: 0x0c4a, 0xa35: 0x0b7a, + 0xa36: 0x06ca, 0xa37: 0x0b86, 0xa38: 0x0b96, 0xa39: 0x0ba2, 0xa3a: 0x0b9e, 0xa3b: 0x17bf, + 0xa3c: 0x0baa, 0xa3d: 0x17c4, 0xa3e: 0x0bb6, 0xa3f: 0x0bb2, + // Block 0x29, offset 0xa40 + 0xa40: 0x0bba, 0xa41: 0x0bca, 0xa42: 0x0bce, 0xa43: 0x06ce, 0xa44: 0x0bde, 0xa45: 0x0be6, + 0xa46: 0x0bea, 0xa47: 0x0bee, 0xa48: 0x06d2, 0xa49: 0x17c9, 0xa4a: 0x06d6, 0xa4b: 0x0c0a, + 0xa4c: 0x0c0e, 0xa4d: 0x0c12, 0xa4e: 0x0c1a, 0xa4f: 0x1990, 0xa50: 0x0c32, 0xa51: 0x17d3, + 0xa52: 0x17d3, 0xa53: 0x12d2, 0xa54: 0x0c42, 0xa55: 0x0c42, 0xa56: 0x06da, 0xa57: 0x17f6, + 0xa58: 0x18c8, 0xa59: 0x0c52, 0xa5a: 0x0c5a, 0xa5b: 0x06de, 0xa5c: 0x0c6e, 0xa5d: 0x0c7e, + 0xa5e: 0x0c82, 0xa5f: 0x0c8a, 0xa60: 0x0c9a, 0xa61: 0x06e6, 0xa62: 0x06e2, 0xa63: 0x0c9e, + 0xa64: 0x17d8, 0xa65: 0x0ca2, 0xa66: 0x0cb6, 0xa67: 0x0cba, 0xa68: 0x0cbe, 0xa69: 0x0cba, + 0xa6a: 0x0cca, 0xa6b: 0x0cce, 0xa6c: 0x0cde, 0xa6d: 0x0cd6, 0xa6e: 0x0cda, 0xa6f: 0x0ce2, + 0xa70: 0x0ce6, 0xa71: 0x0cea, 0xa72: 0x0cf6, 0xa73: 0x0cfa, 0xa74: 0x0d12, 0xa75: 0x0d1a, + 0xa76: 0x0d2a, 0xa77: 0x0d3e, 0xa78: 0x17e7, 0xa79: 0x0d3a, 0xa7a: 0x0d2e, 0xa7b: 0x0d46, + 0xa7c: 0x0d4e, 0xa7d: 0x0d62, 0xa7e: 0x17ec, 0xa7f: 0x0d6a, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0d5e, 0xa81: 0x0d56, 0xa82: 0x06ea, 0xa83: 0x0d72, 0xa84: 0x0d7a, 0xa85: 0x0d82, + 0xa86: 0x0d76, 0xa87: 0x06ee, 0xa88: 0x0d92, 0xa89: 0x0d9a, 0xa8a: 0x17f1, 0xa8b: 0x0dc6, + 0xa8c: 0x0dfa, 0xa8d: 0x0dd6, 0xa8e: 0x06fa, 0xa8f: 0x0de2, 0xa90: 0x06f6, 0xa91: 0x06f2, + 0xa92: 0x08be, 0xa93: 0x08c2, 0xa94: 0x0dfe, 0xa95: 0x0de6, 0xa96: 0x12a6, 0xa97: 0x075e, + 0xa98: 0x0e0a, 0xa99: 0x0e0e, 0xa9a: 0x0e12, 0xa9b: 0x0e26, 0xa9c: 0x0e1e, 0xa9d: 0x180a, + 0xa9e: 0x06fe, 0xa9f: 0x0e3a, 0xaa0: 0x0e2e, 0xaa1: 0x0e4a, 0xaa2: 0x0e52, 0xaa3: 0x1814, + 0xaa4: 0x0e56, 0xaa5: 0x0e42, 0xaa6: 0x0e5e, 0xaa7: 0x0702, 0xaa8: 0x0e62, 0xaa9: 0x0e66, + 0xaaa: 0x0e6a, 0xaab: 0x0e76, 0xaac: 0x1819, 0xaad: 0x0e7e, 0xaae: 0x0706, 0xaaf: 0x0e8a, + 0xab0: 0x181e, 0xab1: 0x0e8e, 0xab2: 0x070a, 0xab3: 0x0e9a, 0xab4: 0x0ea6, 0xab5: 0x0eb2, + 0xab6: 0x0eb6, 0xab7: 0x1823, 0xab8: 0x17ba, 0xab9: 0x1828, 0xaba: 0x0ed6, 0xabb: 0x182d, + 0xabc: 0x0ee2, 0xabd: 0x0eea, 0xabe: 0x0eda, 0xabf: 0x0ef6, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0f06, 0xac1: 0x0f16, 0xac2: 0x0f0a, 0xac3: 0x0f0e, 0xac4: 0x0f1a, 0xac5: 0x0f1e, + 0xac6: 0x1832, 0xac7: 0x0f02, 0xac8: 0x0f36, 0xac9: 0x0f3a, 0xaca: 0x070e, 0xacb: 0x0f4e, + 0xacc: 0x0f4a, 0xacd: 0x1837, 0xace: 0x0f2e, 0xacf: 0x0f6a, 0xad0: 0x183c, 0xad1: 0x1841, + 0xad2: 0x0f6e, 0xad3: 0x0f82, 0xad4: 0x0f7e, 0xad5: 0x0f7a, 0xad6: 0x0712, 0xad7: 0x0f86, + 0xad8: 0x0f96, 0xad9: 0x0f92, 0xada: 0x0f9e, 0xadb: 0x177e, 0xadc: 0x0fae, 0xadd: 0x1846, + 0xade: 0x0fba, 0xadf: 0x1850, 0xae0: 0x0fce, 0xae1: 0x0fda, 0xae2: 0x0fee, 0xae3: 0x1855, + 0xae4: 0x1002, 0xae5: 0x1006, 0xae6: 0x185a, 0xae7: 0x185f, 0xae8: 0x1022, 0xae9: 0x1032, + 0xaea: 0x0716, 0xaeb: 0x1036, 0xaec: 0x071a, 0xaed: 0x071a, 0xaee: 0x104e, 0xaef: 0x1052, + 0xaf0: 0x105a, 0xaf1: 0x105e, 0xaf2: 0x106a, 0xaf3: 0x071e, 0xaf4: 0x1082, 0xaf5: 0x1864, + 0xaf6: 0x109e, 0xaf7: 0x1869, 0xaf8: 0x10aa, 0xaf9: 0x17ce, 0xafa: 0x10ba, 0xafb: 0x186e, + 0xafc: 0x1873, 0xafd: 0x1878, 0xafe: 0x0722, 0xaff: 0x0726, + // Block 0x2c, offset 0xb00 + 0xb00: 0x10f2, 0xb01: 0x1882, 0xb02: 0x187d, 0xb03: 0x1887, 0xb04: 0x188c, 0xb05: 0x10fa, + 0xb06: 0x10fe, 0xb07: 0x10fe, 0xb08: 0x1106, 0xb09: 0x072e, 0xb0a: 0x110a, 0xb0b: 0x0732, + 0xb0c: 0x0736, 0xb0d: 0x1896, 0xb0e: 0x111e, 0xb0f: 0x1126, 0xb10: 0x1132, 0xb11: 0x073a, + 0xb12: 0x189b, 0xb13: 0x1156, 0xb14: 0x18a0, 0xb15: 0x18a5, 0xb16: 0x1176, 0xb17: 0x118e, + 0xb18: 0x073e, 0xb19: 0x1196, 0xb1a: 0x119a, 0xb1b: 0x119e, 0xb1c: 0x18aa, 0xb1d: 0x18af, + 0xb1e: 0x18af, 0xb1f: 0x11b6, 0xb20: 0x0742, 0xb21: 0x18b4, 0xb22: 0x11ca, 0xb23: 0x11ce, + 0xb24: 0x0746, 0xb25: 0x18b9, 0xb26: 0x11ea, 0xb27: 0x074a, 0xb28: 0x11fa, 0xb29: 0x11f2, + 0xb2a: 0x1202, 0xb2b: 0x18c3, 0xb2c: 0x121a, 0xb2d: 0x074e, 0xb2e: 0x1226, 0xb2f: 0x122e, + 0xb30: 0x123e, 0xb31: 0x0752, 0xb32: 0x18cd, 0xb33: 0x18d2, 0xb34: 0x0756, 0xb35: 0x18d7, + 0xb36: 0x1256, 0xb37: 0x18dc, 0xb38: 0x1262, 0xb39: 0x126e, 0xb3a: 0x1276, 0xb3b: 0x18e1, + 0xb3c: 0x18e6, 0xb3d: 0x128a, 0xb3e: 0x18eb, 0xb3f: 0x1292, + // Block 0x2d, offset 0xb40 + 0xb40: 0x17fb, 0xb41: 0x075a, 0xb42: 0x12aa, 0xb43: 0x12ae, 0xb44: 0x0762, 0xb45: 0x12b2, + 0xb46: 0x0b2e, 0xb47: 0x18f0, 0xb48: 0x18f5, 0xb49: 0x1800, 0xb4a: 0x1805, 0xb4b: 0x12d2, + 0xb4c: 0x12d6, 0xb4d: 0x14ee, 0xb4e: 0x0766, 0xb4f: 0x1302, 0xb50: 0x12fe, 0xb51: 0x1306, + 0xb52: 0x093a, 0xb53: 0x130a, 0xb54: 0x130e, 0xb55: 0x1312, 0xb56: 0x131a, 0xb57: 0x18fa, + 0xb58: 0x1316, 0xb59: 0x131e, 0xb5a: 0x1332, 0xb5b: 0x1336, 0xb5c: 0x1322, 0xb5d: 0x133a, + 0xb5e: 0x134e, 0xb5f: 0x1362, 0xb60: 0x132e, 0xb61: 0x1342, 0xb62: 0x1346, 0xb63: 0x134a, + 0xb64: 0x18ff, 0xb65: 0x1909, 0xb66: 0x1904, 0xb67: 0x076a, 0xb68: 0x136a, 0xb69: 0x136e, + 0xb6a: 0x1376, 0xb6b: 0x191d, 0xb6c: 0x137a, 0xb6d: 0x190e, 0xb6e: 0x076e, 0xb6f: 0x0772, + 0xb70: 0x1913, 0xb71: 0x1918, 0xb72: 0x0776, 0xb73: 0x139a, 0xb74: 0x139e, 0xb75: 0x13a2, + 0xb76: 0x13a6, 0xb77: 0x13b2, 0xb78: 0x13ae, 0xb79: 0x13ba, 0xb7a: 0x13b6, 0xb7b: 0x13c6, + 0xb7c: 0x13be, 0xb7d: 0x13c2, 0xb7e: 0x13ca, 0xb7f: 0x077a, + // Block 0x2e, offset 0xb80 + 0xb80: 0x13d2, 0xb81: 0x13d6, 0xb82: 0x077e, 0xb83: 0x13e6, 0xb84: 0x13ea, 0xb85: 0x1922, + 0xb86: 0x13f6, 0xb87: 0x13fa, 0xb88: 0x0782, 0xb89: 0x1406, 0xb8a: 0x06b6, 0xb8b: 0x1927, + 0xb8c: 0x192c, 0xb8d: 0x0786, 0xb8e: 0x078a, 0xb8f: 0x1432, 0xb90: 0x144a, 0xb91: 0x1466, + 0xb92: 0x1476, 0xb93: 0x1931, 0xb94: 0x148a, 0xb95: 0x148e, 0xb96: 0x14a6, 0xb97: 0x14b2, + 0xb98: 0x193b, 0xb99: 0x178d, 0xb9a: 0x14be, 0xb9b: 0x14ba, 0xb9c: 0x14c6, 0xb9d: 0x1792, + 0xb9e: 0x14d2, 0xb9f: 0x14de, 0xba0: 0x1940, 0xba1: 0x1945, 0xba2: 0x151e, 0xba3: 0x152a, + 0xba4: 0x1532, 0xba5: 0x194a, 0xba6: 0x1536, 0xba7: 0x1562, 0xba8: 0x156e, 0xba9: 0x1572, + 0xbaa: 0x156a, 0xbab: 0x157e, 0xbac: 0x1582, 0xbad: 0x194f, 0xbae: 0x158e, 0xbaf: 0x078e, + 0xbb0: 0x1596, 0xbb1: 0x1954, 0xbb2: 0x0792, 0xbb3: 0x15ce, 0xbb4: 0x0bbe, 0xbb5: 0x15e6, + 0xbb6: 0x1959, 0xbb7: 0x1963, 0xbb8: 0x0796, 0xbb9: 0x079a, 0xbba: 0x160e, 0xbbb: 0x1968, + 0xbbc: 0x079e, 0xbbd: 0x196d, 0xbbe: 0x1626, 0xbbf: 0x1626, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x162e, 0xbc1: 0x1972, 0xbc2: 0x1646, 0xbc3: 0x07a2, 0xbc4: 0x1656, 0xbc5: 0x1662, + 0xbc6: 0x166a, 0xbc7: 0x1672, 0xbc8: 0x07a6, 0xbc9: 0x1977, 0xbca: 0x1686, 0xbcb: 0x16a2, + 0xbcc: 0x16ae, 0xbcd: 0x07aa, 0xbce: 0x07ae, 0xbcf: 0x16b2, 0xbd0: 0x197c, 0xbd1: 0x07b2, + 0xbd2: 0x1981, 0xbd3: 0x1986, 0xbd4: 0x198b, 0xbd5: 0x16d6, 0xbd6: 0x07b6, 0xbd7: 0x16ea, + 0xbd8: 0x16f2, 0xbd9: 0x16f6, 0xbda: 0x16fe, 0xbdb: 0x1706, 0xbdc: 0x170e, 0xbdd: 0x1995, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x122: 0x3d, 0x123: 0x0d, 0x124: 0x3e, 0x125: 0x3f, 0x126: 0x40, 0x127: 0x41, + 0x128: 0x42, 0x129: 0x43, 0x12a: 0x44, 0x12b: 0x45, 0x12c: 0x40, 0x12d: 0x46, 0x12e: 0x47, 0x12f: 0x48, + 0x130: 0x44, 0x131: 0x49, 0x132: 0x4a, 0x133: 0x4b, 0x134: 0x4c, 0x135: 0x4d, 0x137: 0x4e, + 0x138: 0x4f, 0x139: 0x50, 0x13a: 0x51, 0x13b: 0x52, 0x13c: 0x53, 0x13d: 0x54, 0x13e: 0x55, 0x13f: 0x56, + // Block 0x5, offset 0x140 + 0x140: 0x57, 0x142: 0x58, 0x144: 0x59, 0x145: 0x5a, 0x146: 0x5b, 0x147: 0x5c, + 0x14d: 0x5d, + 0x15c: 0x5e, 0x15f: 0x5f, + 0x162: 0x60, 0x164: 0x61, + 0x168: 0x62, 0x169: 0x63, 0x16a: 0x64, 0x16b: 0x65, 0x16c: 0x0e, 0x16d: 0x66, 0x16e: 0x67, 0x16f: 0x68, + 0x170: 0x69, 0x173: 0x6a, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6b, 0x183: 0x6c, 0x184: 0x6d, 0x186: 0x6e, 0x187: 0x6f, + 0x188: 0x70, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x71, 0x18c: 0x72, + 0x1ab: 0x73, + 0x1b3: 0x74, 0x1b5: 0x75, 0x1b7: 0x76, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x77, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x78, 0x1c5: 0x79, + 0x1c9: 0x7a, 0x1cc: 0x7b, 0x1cd: 0x7c, + // Block 0x8, offset 0x200 + 0x219: 0x7d, 0x21a: 0x7e, 0x21b: 0x7f, + 0x220: 0x80, 0x223: 0x81, 0x224: 0x82, 0x225: 0x83, 0x226: 0x84, 0x227: 0x85, + 0x22a: 0x86, 0x22b: 0x87, 0x22f: 0x88, + 0x230: 0x89, 0x231: 0x8a, 0x232: 0x8b, 0x233: 0x8c, 0x234: 0x8d, 0x235: 0x8e, 0x236: 0x8f, 0x237: 0x89, + 0x238: 0x8a, 0x239: 0x8b, 0x23a: 0x8c, 0x23b: 0x8d, 0x23c: 0x8e, 0x23d: 0x8f, 0x23e: 0x89, 0x23f: 0x8a, + // Block 0x9, offset 0x240 + 0x240: 0x8b, 0x241: 0x8c, 0x242: 0x8d, 0x243: 0x8e, 0x244: 0x8f, 0x245: 0x89, 0x246: 0x8a, 0x247: 0x8b, + 0x248: 0x8c, 0x249: 0x8d, 0x24a: 0x8e, 0x24b: 0x8f, 0x24c: 0x89, 0x24d: 0x8a, 0x24e: 0x8b, 0x24f: 0x8c, + 0x250: 0x8d, 0x251: 0x8e, 0x252: 0x8f, 0x253: 0x89, 0x254: 0x8a, 0x255: 0x8b, 0x256: 0x8c, 0x257: 0x8d, + 0x258: 0x8e, 0x259: 0x8f, 0x25a: 0x89, 0x25b: 0x8a, 0x25c: 0x8b, 0x25d: 0x8c, 0x25e: 0x8d, 0x25f: 0x8e, + 0x260: 0x8f, 0x261: 0x89, 0x262: 0x8a, 0x263: 0x8b, 0x264: 0x8c, 0x265: 0x8d, 0x266: 0x8e, 0x267: 0x8f, + 0x268: 0x89, 0x269: 0x8a, 0x26a: 0x8b, 0x26b: 0x8c, 0x26c: 0x8d, 0x26d: 0x8e, 0x26e: 0x8f, 0x26f: 0x89, + 0x270: 0x8a, 0x271: 0x8b, 0x272: 0x8c, 0x273: 0x8d, 0x274: 0x8e, 0x275: 0x8f, 0x276: 0x89, 0x277: 0x8a, + 0x278: 0x8b, 0x279: 0x8c, 0x27a: 0x8d, 0x27b: 0x8e, 0x27c: 0x8f, 0x27d: 0x89, 0x27e: 0x8a, 0x27f: 0x8b, + // Block 0xa, offset 0x280 + 0x280: 0x8c, 0x281: 0x8d, 0x282: 0x8e, 0x283: 0x8f, 0x284: 0x89, 0x285: 0x8a, 0x286: 0x8b, 0x287: 0x8c, + 0x288: 0x8d, 0x289: 0x8e, 0x28a: 0x8f, 0x28b: 0x89, 0x28c: 0x8a, 0x28d: 0x8b, 0x28e: 0x8c, 0x28f: 0x8d, + 0x290: 0x8e, 0x291: 0x8f, 0x292: 0x89, 0x293: 0x8a, 0x294: 0x8b, 0x295: 0x8c, 0x296: 0x8d, 0x297: 0x8e, + 0x298: 0x8f, 0x299: 0x89, 0x29a: 0x8a, 0x29b: 0x8b, 0x29c: 0x8c, 0x29d: 0x8d, 0x29e: 0x8e, 0x29f: 0x8f, + 0x2a0: 0x89, 0x2a1: 0x8a, 0x2a2: 0x8b, 0x2a3: 0x8c, 0x2a4: 0x8d, 0x2a5: 0x8e, 0x2a6: 0x8f, 0x2a7: 0x89, + 0x2a8: 0x8a, 0x2a9: 0x8b, 0x2aa: 0x8c, 0x2ab: 0x8d, 0x2ac: 0x8e, 0x2ad: 0x8f, 0x2ae: 0x89, 0x2af: 0x8a, + 0x2b0: 0x8b, 0x2b1: 0x8c, 0x2b2: 0x8d, 0x2b3: 0x8e, 0x2b4: 0x8f, 0x2b5: 0x89, 0x2b6: 0x8a, 0x2b7: 0x8b, + 0x2b8: 0x8c, 0x2b9: 0x8d, 0x2ba: 0x8e, 0x2bb: 0x8f, 0x2bc: 0x89, 0x2bd: 0x8a, 0x2be: 0x8b, 0x2bf: 0x8c, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8d, 0x2c1: 0x8e, 0x2c2: 0x8f, 0x2c3: 0x89, 0x2c4: 0x8a, 0x2c5: 0x8b, 0x2c6: 0x8c, 0x2c7: 0x8d, + 0x2c8: 0x8e, 0x2c9: 0x8f, 0x2ca: 0x89, 0x2cb: 0x8a, 0x2cc: 0x8b, 0x2cd: 0x8c, 0x2ce: 0x8d, 0x2cf: 0x8e, + 0x2d0: 0x8f, 0x2d1: 0x89, 0x2d2: 0x8a, 0x2d3: 0x8b, 0x2d4: 0x8c, 0x2d5: 0x8d, 0x2d6: 0x8e, 0x2d7: 0x8f, + 0x2d8: 0x89, 0x2d9: 0x8a, 0x2da: 0x8b, 0x2db: 0x8c, 0x2dc: 0x8d, 0x2dd: 0x8e, 0x2de: 0x90, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x91, 0x32d: 0x92, 0x32e: 0x93, + 0x331: 0x94, 0x332: 0x95, 0x333: 0x96, 0x334: 0x97, + 0x338: 0x98, 0x339: 0x99, 0x33a: 0x9a, 0x33b: 0x9b, 0x33e: 0x9c, 0x33f: 0x9d, + // Block 0xd, offset 0x340 + 0x347: 0x9e, + 0x34b: 0x9f, 0x34d: 0xa0, + 0x368: 0xa1, 0x36b: 0xa2, + 0x374: 0xa3, + 0x37a: 0xa4, 0x37b: 0xa5, 0x37d: 0xa6, 0x37e: 0xa7, + // Block 0xe, offset 0x380 + 0x381: 0xa8, 0x382: 0xa9, 0x384: 0xaa, 0x385: 0x84, 0x387: 0xab, + 0x388: 0xac, 0x38b: 0xad, 0x38c: 0xae, 0x38d: 0xaf, + 0x391: 0xb0, 0x392: 0xb1, 0x393: 0xb2, 0x396: 0xb3, 0x397: 0xb4, + 0x398: 0x75, 0x39a: 0xb5, 0x39c: 0xb6, + 0x3a0: 0xb7, 0x3a4: 0xb8, 0x3a5: 0xb9, 0x3a7: 0xba, + 0x3a8: 0xbb, 0x3a9: 0xbc, 0x3aa: 0xbd, + 0x3b0: 0x75, 0x3b5: 0xbe, 0x3b6: 0xbf, + 0x3bd: 0xc0, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xc1, 0x3ec: 0xc2, + 0x3ff: 0xc3, + // Block 0x10, offset 0x400 + 0x432: 0xc4, + // Block 0x11, offset 0x440 + 0x445: 0xc5, 0x446: 0xc6, 0x447: 0xc7, + 0x449: 0xc8, + // Block 0x12, offset 0x480 + 0x480: 0xc9, 0x482: 0xca, 0x484: 0xc2, + 0x48a: 0xcb, 0x48b: 0xcc, + 0x493: 0xcd, + 0x4a3: 0xce, 0x4a5: 0xcf, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xd0, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 163 entries, 326 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x6e, 0x76, 0x7d, 0x80, 0x88, 0x8c, 0x90, 0x92, 0x94, 0x9d, 0xa1, 0xa8, 0xad, 0xb0, 0xba, 0xbd, 0xc4, 0xcc, 0xcf, 0xd1, 0xd4, 0xd6, 0xdb, 0xec, 0xf8, 0xfa, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10a, 0x10c, 0x10f, 0x112, 0x114, 0x117, 0x11a, 0x11e, 0x124, 0x12b, 0x134, 0x136, 0x139, 0x13b, 0x146, 0x14a, 0x158, 0x15b, 0x161, 0x167, 0x172, 0x176, 0x178, 0x17a, 0x17c, 0x17e, 0x180, 0x186, 0x18a, 0x18c, 0x18e, 0x196, 0x19a, 0x19d, 0x19f, 0x1a1, 0x1a4, 0x1a7, 0x1a9, 0x1ab, 0x1ad, 0x1af, 0x1b5, 0x1b8, 0x1ba, 0x1c1, 0x1c7, 0x1cd, 0x1d5, 0x1db, 0x1e1, 0x1e7, 0x1eb, 0x1f9, 0x202, 0x205, 0x208, 0x20a, 0x20d, 0x20f, 0x213, 0x218, 0x21a, 0x21c, 0x221, 0x227, 0x229, 0x22b, 0x22d, 0x233, 0x236, 0x238, 0x23a, 0x23c, 0x242, 0x246, 0x24a, 0x252, 0x259, 0x25c, 0x25f, 0x261, 0x264, 0x26c, 0x270, 0x277, 0x27a, 0x280, 0x282, 0x285, 0x287, 0x28a, 0x28f, 0x291, 0x293, 0x295, 0x297, 0x299, 0x29c, 0x29e, 0x2a0, 0x2a2, 0x2a4, 0x2a6, 0x2a8, 0x2b5, 0x2bf, 0x2c1, 0x2c3, 0x2c9, 0x2cb, 0x2cd, 0x2cf, 0x2d3, 0x2d5, 0x2d8} + +// nfcSparseValues: 730 entries, 2920 bytes +var nfcSparseValues = [730]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4981, lo: 0x8a, hi: 0x8a}, + {value: 0x499f, lo: 0x8b, hi: 0x8b}, + {value: 0x3808, lo: 0x8c, hi: 0x8c}, + {value: 0x3820, lo: 0x8d, hi: 0x8d}, + {value: 0x49b7, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x383e, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0x10, offset 0x6e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x76 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x7d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x80 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x14, offset 0x88 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x8c + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x90 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x92 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x94 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xa1 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xa8 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xad + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xb0 + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xba + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xbd + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xc4 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xcc + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xd1 + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x24, offset 0xd4 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xdb + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xec + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0xf8 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0xfa + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x108 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x10a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x10c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x10f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x114 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x117 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x124 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x136 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x139 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x13b + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x146 + {value: 0x0004, lo: 0x03}, + {value: 0x052a, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x14a + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3f, offset 0x158 + {value: 0x43bc, lo: 0x02}, + {value: 0x023c, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x40, offset 0x15b + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x161 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x167 + {value: 0x62c7, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x172 + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x176 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x178 + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x46, offset 0x17a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x47, offset 0x17c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x48, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x180 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x4a, offset 0x186 + {value: 0x0000, lo: 0x03}, + {value: 0x4be0, lo: 0xb3, hi: 0xb3}, + {value: 0x4be0, lo: 0xb5, hi: 0xb6}, + {value: 0x4be0, lo: 0xba, hi: 0xbf}, + // Block 0x4b, offset 0x18a + {value: 0x0000, lo: 0x01}, + {value: 0x4be0, lo: 0x8f, hi: 0xa3}, + // Block 0x4c, offset 0x18c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4d, offset 0x18e + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4e, offset 0x196 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4f, offset 0x19a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x50, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x51, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x52, offset 0x1a1 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x53, offset 0x1a4 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x54, offset 0x1a7 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x55, offset 0x1a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x56, offset 0x1ab + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x57, offset 0x1ad + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x58, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x59, offset 0x1b5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x5a, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5b, offset 0x1ba + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5c, offset 0x1c1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5f, offset 0x1d5 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x60, offset 0x1db + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x61, offset 0x1e1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x62, offset 0x1e7 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x63, offset 0x1eb + {value: 0x0006, lo: 0x0d}, + {value: 0x44d1, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4543, lo: 0x9f, hi: 0x9f}, + {value: 0x4531, lo: 0xaa, hi: 0xab}, + {value: 0x4635, lo: 0xac, hi: 0xac}, + {value: 0x463d, lo: 0xad, hi: 0xad}, + {value: 0x4489, lo: 0xae, hi: 0xb1}, + {value: 0x44a7, lo: 0xb2, hi: 0xb4}, + {value: 0x44bf, lo: 0xb5, hi: 0xb6}, + {value: 0x44cb, lo: 0xb8, hi: 0xb8}, + {value: 0x44d7, lo: 0xb9, hi: 0xbb}, + {value: 0x44ef, lo: 0xbc, hi: 0xbc}, + {value: 0x44f5, lo: 0xbe, hi: 0xbe}, + // Block 0x64, offset 0x1f9 + {value: 0x0006, lo: 0x08}, + {value: 0x44fb, lo: 0x80, hi: 0x81}, + {value: 0x4507, lo: 0x83, hi: 0x84}, + {value: 0x4519, lo: 0x86, hi: 0x89}, + {value: 0x453d, lo: 0x8a, hi: 0x8a}, + {value: 0x44b9, lo: 0x8b, hi: 0x8b}, + {value: 0x44a1, lo: 0x8c, hi: 0x8c}, + {value: 0x44e9, lo: 0x8d, hi: 0x8d}, + {value: 0x4513, lo: 0x8e, hi: 0x8e}, + // Block 0x65, offset 0x202 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x66, offset 0x205 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x67, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x68, offset 0x20a + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x69, offset 0x20d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x6a, offset 0x20f + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6b, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6c, offset 0x218 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6d, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6e, offset 0x21c + {value: 0x0000, lo: 0x04}, + {value: 0x4be0, lo: 0x9e, hi: 0x9f}, + {value: 0x4be0, lo: 0xa3, hi: 0xa3}, + {value: 0x4be0, lo: 0xa5, hi: 0xa6}, + {value: 0x4be0, lo: 0xaa, hi: 0xaf}, + // Block 0x6f, offset 0x221 + {value: 0x0000, lo: 0x05}, + {value: 0x4be0, lo: 0x82, hi: 0x87}, + {value: 0x4be0, lo: 0x8a, hi: 0x8f}, + {value: 0x4be0, lo: 0x92, hi: 0x97}, + {value: 0x4be0, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x70, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x71, offset 0x229 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x72, offset 0x22b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x73, offset 0x22d + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x233 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x75, offset 0x236 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x76, offset 0x238 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x77, offset 0x23a + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x78, offset 0x23c + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x79, offset 0x242 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x7a, offset 0x246 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x24a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x7c, offset 0x252 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7d, offset 0x259 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7e, offset 0x25c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7f, offset 0x25f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x80, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x264 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x82, offset 0x26c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x83, offset 0x270 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x84, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x85, offset 0x27a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x86, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x87, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x88, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x89, offset 0x287 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x8a, offset 0x28a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x8b, offset 0x28f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x8c, offset 0x291 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8d, offset 0x293 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8e, offset 0x295 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8f, offset 0x297 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x90, offset 0x299 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x91, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x92, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x93, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x94, offset 0x2a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x95, offset 0x2a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x96, offset 0x2a6 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x97, offset 0x2a8 + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x98, offset 0x2b5 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x9a, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x9b, offset 0x2c3 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x9c, offset 0x2c9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0x9d, offset 0x2cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0x9e, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x9f, offset 0x2cf + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xa0, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa1, offset 0x2d5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa2, offset 0x2d8 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 19260 bytes (18.81 KiB). Checksum: 1a0bbc4c8c24da49. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 95: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 95 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 97 blocks, 6208 entries, 12416 bytes +// The third block is the zero block. +var nfkcValues = [6208]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x132: 0x1a8a, 0x133: 0x1b17, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, 0x13f: 0x1cdc, + // Block 0x5, offset 0x140 + 0x140: 0x1d64, 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, 0x149: 0x1d8c, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2f2f, 0x185: 0x2f35, + 0x186: 0x2f3b, 0x187: 0x1a9f, 0x188: 0x1aa2, 0x189: 0x1b38, 0x18a: 0x1ab7, 0x18b: 0x1aba, + 0x18c: 0x1b6e, 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b1: 0x1a6f, 0x1b2: 0x1a72, 0x1b3: 0x1aff, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x43e6, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x439b, 0x285: 0x45bc, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4981, 0x2cb: 0x499f, + 0x2cc: 0x3808, 0x2cd: 0x3820, 0x2ce: 0x49b7, 0x2d0: 0x0242, 0x2d1: 0x0254, + 0x2d2: 0x0230, 0x2d3: 0x444d, 0x2d4: 0x4453, 0x2d5: 0x027e, 0x2d6: 0x026c, + 0x2f0: 0x025a, 0x2f1: 0x026f, 0x2f2: 0x0272, 0x2f4: 0x020c, 0x2f5: 0x024b, + 0x2f9: 0x022a, + // Block 0xc, offset 0x300 + 0x300: 0x3862, 0x301: 0x386e, 0x303: 0x385c, + 0x306: 0xa000, 0x307: 0x384a, + 0x30c: 0x389e, 0x30d: 0x3886, 0x30e: 0x38b0, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3892, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x3916, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3874, 0x342: 0x38f8, + 0x350: 0x3850, 0x351: 0x38d4, + 0x352: 0x3856, 0x353: 0x38da, 0x356: 0x3868, 0x357: 0x38ec, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x396a, 0x35b: 0x3970, 0x35c: 0x387a, 0x35d: 0x38fe, + 0x35e: 0x3880, 0x35f: 0x3904, 0x362: 0x388c, 0x363: 0x3910, + 0x364: 0x3898, 0x365: 0x391c, 0x366: 0x38a4, 0x367: 0x3928, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3976, 0x36b: 0x397c, 0x36c: 0x38ce, 0x36d: 0x3952, 0x36e: 0x38aa, 0x36f: 0x392e, + 0x370: 0x38b6, 0x371: 0x393a, 0x372: 0x38bc, 0x373: 0x3940, 0x374: 0x38c2, 0x375: 0x3946, + 0x378: 0x38c8, 0x379: 0x394c, + // Block 0xe, offset 0x380 + 0x387: 0x1e91, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1eb4, + 0x3f6: 0x2143, 0x3f7: 0x217f, 0x3f8: 0x217a, + // Block 0x10, offset 0x400 + 0x40a: 0x8133, 0x40b: 0x8133, + 0x40c: 0x8133, 0x40d: 0x8133, 0x40e: 0x8133, 0x40f: 0x812e, 0x410: 0x812e, 0x411: 0x812e, + 0x412: 0x812e, 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2e5d, 0x447: 0xa000, 0x448: 0x2e65, 0x449: 0xa000, 0x44a: 0x2e6d, 0x44b: 0xa000, + 0x44c: 0x2e75, 0x44d: 0xa000, 0x44e: 0x2e7d, 0x451: 0xa000, + 0x452: 0x2e85, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2e8d, + 0x47c: 0xa000, 0x47d: 0x2e95, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x0104, 0x485: 0x0107, + 0x486: 0x0506, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x011f, 0x48b: 0x0122, + 0x48c: 0x0125, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e6, + 0x492: 0x009f, 0x493: 0x0110, 0x494: 0x050a, 0x495: 0x050e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0516, 0x49a: 0x015b, 0x49b: 0x00ad, 0x49c: 0x051a, 0x49d: 0x0242, + 0x49e: 0x0245, 0x49f: 0x0248, 0x4a0: 0x027e, 0x4a1: 0x0281, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x0242, 0x4a7: 0x0245, 0x4a8: 0x026f, 0x4a9: 0x027e, + 0x4aa: 0x0281, + 0x4b8: 0x02b4, + // Block 0x13, offset 0x4c0 + 0x4db: 0x010a, 0x4dc: 0x0087, 0x4dd: 0x0113, + 0x4de: 0x00d7, 0x4df: 0x0125, 0x4e0: 0x008d, 0x4e1: 0x012b, 0x4e2: 0x0131, 0x4e3: 0x013d, + 0x4e4: 0x0146, 0x4e5: 0x0149, 0x4e6: 0x014c, 0x4e7: 0x051e, 0x4e8: 0x01c7, 0x4e9: 0x0155, + 0x4ea: 0x0522, 0x4eb: 0x01ca, 0x4ec: 0x0161, 0x4ed: 0x015e, 0x4ee: 0x0164, 0x4ef: 0x0167, + 0x4f0: 0x016a, 0x4f1: 0x016d, 0x4f2: 0x0176, 0x4f3: 0x018e, 0x4f4: 0x0191, 0x4f5: 0x00f2, + 0x4f6: 0x019a, 0x4f7: 0x019d, 0x4f8: 0x0512, 0x4f9: 0x01a0, 0x4fa: 0x01a3, 0x4fb: 0x00b5, + 0x4fc: 0x01af, 0x4fd: 0x01b2, 0x4fe: 0x01b5, 0x4ff: 0x0254, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53a: 0x812d, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x30d8, 0x541: 0x33e4, 0x542: 0x30e2, 0x543: 0x33ee, 0x544: 0x30e7, 0x545: 0x33f3, + 0x546: 0x30ec, 0x547: 0x33f8, 0x548: 0x3a0d, 0x549: 0x3b9c, 0x54a: 0x3105, 0x54b: 0x3411, + 0x54c: 0x310f, 0x54d: 0x341b, 0x54e: 0x311e, 0x54f: 0x342a, 0x550: 0x3114, 0x551: 0x3420, + 0x552: 0x3119, 0x553: 0x3425, 0x554: 0x3a30, 0x555: 0x3bbf, 0x556: 0x3a37, 0x557: 0x3bc6, + 0x558: 0x315a, 0x559: 0x3466, 0x55a: 0x315f, 0x55b: 0x346b, 0x55c: 0x3a45, 0x55d: 0x3bd4, + 0x55e: 0x3164, 0x55f: 0x3470, 0x560: 0x3173, 0x561: 0x347f, 0x562: 0x3191, 0x563: 0x349d, + 0x564: 0x31a0, 0x565: 0x34ac, 0x566: 0x3196, 0x567: 0x34a2, 0x568: 0x31a5, 0x569: 0x34b1, + 0x56a: 0x31aa, 0x56b: 0x34b6, 0x56c: 0x31f0, 0x56d: 0x34fc, 0x56e: 0x3a4c, 0x56f: 0x3bdb, + 0x570: 0x31fa, 0x571: 0x350b, 0x572: 0x3204, 0x573: 0x3515, 0x574: 0x320e, 0x575: 0x351f, + 0x576: 0x4805, 0x577: 0x4896, 0x578: 0x3a53, 0x579: 0x3be2, 0x57a: 0x3227, 0x57b: 0x3538, + 0x57c: 0x3222, 0x57d: 0x3533, 0x57e: 0x322c, 0x57f: 0x353d, + // Block 0x16, offset 0x580 + 0x580: 0x3231, 0x581: 0x3542, 0x582: 0x3236, 0x583: 0x3547, 0x584: 0x324a, 0x585: 0x355b, + 0x586: 0x3254, 0x587: 0x3565, 0x588: 0x3263, 0x589: 0x3574, 0x58a: 0x325e, 0x58b: 0x356f, + 0x58c: 0x3a76, 0x58d: 0x3c05, 0x58e: 0x3a84, 0x58f: 0x3c13, 0x590: 0x3a8b, 0x591: 0x3c1a, + 0x592: 0x3a92, 0x593: 0x3c21, 0x594: 0x3290, 0x595: 0x35a1, 0x596: 0x3295, 0x597: 0x35a6, + 0x598: 0x329f, 0x599: 0x35b0, 0x59a: 0x4832, 0x59b: 0x48c3, 0x59c: 0x3ad8, 0x59d: 0x3c67, + 0x59e: 0x32b8, 0x59f: 0x35c9, 0x5a0: 0x32c2, 0x5a1: 0x35d3, 0x5a2: 0x4841, 0x5a3: 0x48d2, + 0x5a4: 0x3adf, 0x5a5: 0x3c6e, 0x5a6: 0x3ae6, 0x5a7: 0x3c75, 0x5a8: 0x3aed, 0x5a9: 0x3c7c, + 0x5aa: 0x32d1, 0x5ab: 0x35e2, 0x5ac: 0x32db, 0x5ad: 0x35f1, 0x5ae: 0x32ef, 0x5af: 0x3605, + 0x5b0: 0x32ea, 0x5b1: 0x3600, 0x5b2: 0x332b, 0x5b3: 0x3641, 0x5b4: 0x333a, 0x5b5: 0x3650, + 0x5b6: 0x3335, 0x5b7: 0x364b, 0x5b8: 0x3af4, 0x5b9: 0x3c83, 0x5ba: 0x3afb, 0x5bb: 0x3c8a, + 0x5bc: 0x333f, 0x5bd: 0x3655, 0x5be: 0x3344, 0x5bf: 0x365a, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3349, 0x5c1: 0x365f, 0x5c2: 0x334e, 0x5c3: 0x3664, 0x5c4: 0x335d, 0x5c5: 0x3673, + 0x5c6: 0x3358, 0x5c7: 0x366e, 0x5c8: 0x3362, 0x5c9: 0x367d, 0x5ca: 0x3367, 0x5cb: 0x3682, + 0x5cc: 0x336c, 0x5cd: 0x3687, 0x5ce: 0x338a, 0x5cf: 0x36a5, 0x5d0: 0x33a3, 0x5d1: 0x36c3, + 0x5d2: 0x33b2, 0x5d3: 0x36d2, 0x5d4: 0x33b7, 0x5d5: 0x36d7, 0x5d6: 0x34bb, 0x5d7: 0x35e7, + 0x5d8: 0x3678, 0x5d9: 0x36b4, 0x5da: 0x1d10, 0x5db: 0x4418, + 0x5e0: 0x47e2, 0x5e1: 0x4873, 0x5e2: 0x30c4, 0x5e3: 0x33d0, + 0x5e4: 0x39b9, 0x5e5: 0x3b48, 0x5e6: 0x39b2, 0x5e7: 0x3b41, 0x5e8: 0x39c7, 0x5e9: 0x3b56, + 0x5ea: 0x39c0, 0x5eb: 0x3b4f, 0x5ec: 0x39ff, 0x5ed: 0x3b8e, 0x5ee: 0x39d5, 0x5ef: 0x3b64, + 0x5f0: 0x39ce, 0x5f1: 0x3b5d, 0x5f2: 0x39e3, 0x5f3: 0x3b72, 0x5f4: 0x39dc, 0x5f5: 0x3b6b, + 0x5f6: 0x3a06, 0x5f7: 0x3b95, 0x5f8: 0x47f6, 0x5f9: 0x4887, 0x5fa: 0x3141, 0x5fb: 0x344d, + 0x5fc: 0x312d, 0x5fd: 0x3439, 0x5fe: 0x3a1b, 0x5ff: 0x3baa, + // Block 0x18, offset 0x600 + 0x600: 0x3a14, 0x601: 0x3ba3, 0x602: 0x3a29, 0x603: 0x3bb8, 0x604: 0x3a22, 0x605: 0x3bb1, + 0x606: 0x3a3e, 0x607: 0x3bcd, 0x608: 0x31d2, 0x609: 0x34de, 0x60a: 0x31e6, 0x60b: 0x34f2, + 0x60c: 0x4828, 0x60d: 0x48b9, 0x60e: 0x3277, 0x60f: 0x3588, 0x610: 0x3a61, 0x611: 0x3bf0, + 0x612: 0x3a5a, 0x613: 0x3be9, 0x614: 0x3a6f, 0x615: 0x3bfe, 0x616: 0x3a68, 0x617: 0x3bf7, + 0x618: 0x3aca, 0x619: 0x3c59, 0x61a: 0x3aae, 0x61b: 0x3c3d, 0x61c: 0x3aa7, 0x61d: 0x3c36, + 0x61e: 0x3abc, 0x61f: 0x3c4b, 0x620: 0x3ab5, 0x621: 0x3c44, 0x622: 0x3ac3, 0x623: 0x3c52, + 0x624: 0x3326, 0x625: 0x363c, 0x626: 0x3308, 0x627: 0x361e, 0x628: 0x3b25, 0x629: 0x3cb4, + 0x62a: 0x3b1e, 0x62b: 0x3cad, 0x62c: 0x3b33, 0x62d: 0x3cc2, 0x62e: 0x3b2c, 0x62f: 0x3cbb, + 0x630: 0x3b3a, 0x631: 0x3cc9, 0x632: 0x3371, 0x633: 0x368c, 0x634: 0x3399, 0x635: 0x36b9, + 0x636: 0x3394, 0x637: 0x36af, 0x638: 0x3380, 0x639: 0x369b, + // Block 0x19, offset 0x640 + 0x640: 0x4945, 0x641: 0x494b, 0x642: 0x4a5f, 0x643: 0x4a77, 0x644: 0x4a67, 0x645: 0x4a7f, + 0x646: 0x4a6f, 0x647: 0x4a87, 0x648: 0x48eb, 0x649: 0x48f1, 0x64a: 0x49cf, 0x64b: 0x49e7, + 0x64c: 0x49d7, 0x64d: 0x49ef, 0x64e: 0x49df, 0x64f: 0x49f7, 0x650: 0x4957, 0x651: 0x495d, + 0x652: 0x3ef9, 0x653: 0x3f09, 0x654: 0x3f01, 0x655: 0x3f11, + 0x658: 0x48f7, 0x659: 0x48fd, 0x65a: 0x3e29, 0x65b: 0x3e39, 0x65c: 0x3e31, 0x65d: 0x3e41, + 0x660: 0x496f, 0x661: 0x4975, 0x662: 0x4a8f, 0x663: 0x4aa7, + 0x664: 0x4a97, 0x665: 0x4aaf, 0x666: 0x4a9f, 0x667: 0x4ab7, 0x668: 0x4903, 0x669: 0x4909, + 0x66a: 0x49ff, 0x66b: 0x4a17, 0x66c: 0x4a07, 0x66d: 0x4a1f, 0x66e: 0x4a0f, 0x66f: 0x4a27, + 0x670: 0x4987, 0x671: 0x498d, 0x672: 0x3f59, 0x673: 0x3f71, 0x674: 0x3f61, 0x675: 0x3f79, + 0x676: 0x3f69, 0x677: 0x3f81, 0x678: 0x490f, 0x679: 0x4915, 0x67a: 0x3e59, 0x67b: 0x3e71, + 0x67c: 0x3e61, 0x67d: 0x3e79, 0x67e: 0x3e69, 0x67f: 0x3e81, + // Block 0x1a, offset 0x680 + 0x680: 0x4993, 0x681: 0x4999, 0x682: 0x3f89, 0x683: 0x3f99, 0x684: 0x3f91, 0x685: 0x3fa1, + 0x688: 0x491b, 0x689: 0x4921, 0x68a: 0x3e89, 0x68b: 0x3e99, + 0x68c: 0x3e91, 0x68d: 0x3ea1, 0x690: 0x49a5, 0x691: 0x49ab, + 0x692: 0x3fc1, 0x693: 0x3fd9, 0x694: 0x3fc9, 0x695: 0x3fe1, 0x696: 0x3fd1, 0x697: 0x3fe9, + 0x699: 0x4927, 0x69b: 0x3ea9, 0x69d: 0x3eb1, + 0x69f: 0x3eb9, 0x6a0: 0x49bd, 0x6a1: 0x49c3, 0x6a2: 0x4abf, 0x6a3: 0x4ad7, + 0x6a4: 0x4ac7, 0x6a5: 0x4adf, 0x6a6: 0x4acf, 0x6a7: 0x4ae7, 0x6a8: 0x492d, 0x6a9: 0x4933, + 0x6aa: 0x4a2f, 0x6ab: 0x4a47, 0x6ac: 0x4a37, 0x6ad: 0x4a4f, 0x6ae: 0x4a3f, 0x6af: 0x4a57, + 0x6b0: 0x4939, 0x6b1: 0x445f, 0x6b2: 0x37d2, 0x6b3: 0x4465, 0x6b4: 0x4963, 0x6b5: 0x446b, + 0x6b6: 0x37e4, 0x6b7: 0x4471, 0x6b8: 0x3802, 0x6b9: 0x4477, 0x6ba: 0x381a, 0x6bb: 0x447d, + 0x6bc: 0x49b1, 0x6bd: 0x4483, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3ee1, 0x6c1: 0x3ee9, 0x6c2: 0x42c5, 0x6c3: 0x42e3, 0x6c4: 0x42cf, 0x6c5: 0x42ed, + 0x6c6: 0x42d9, 0x6c7: 0x42f7, 0x6c8: 0x3e19, 0x6c9: 0x3e21, 0x6ca: 0x4211, 0x6cb: 0x422f, + 0x6cc: 0x421b, 0x6cd: 0x4239, 0x6ce: 0x4225, 0x6cf: 0x4243, 0x6d0: 0x3f29, 0x6d1: 0x3f31, + 0x6d2: 0x4301, 0x6d3: 0x431f, 0x6d4: 0x430b, 0x6d5: 0x4329, 0x6d6: 0x4315, 0x6d7: 0x4333, + 0x6d8: 0x3e49, 0x6d9: 0x3e51, 0x6da: 0x424d, 0x6db: 0x426b, 0x6dc: 0x4257, 0x6dd: 0x4275, + 0x6de: 0x4261, 0x6df: 0x427f, 0x6e0: 0x4001, 0x6e1: 0x4009, 0x6e2: 0x433d, 0x6e3: 0x435b, + 0x6e4: 0x4347, 0x6e5: 0x4365, 0x6e6: 0x4351, 0x6e7: 0x436f, 0x6e8: 0x3ec1, 0x6e9: 0x3ec9, + 0x6ea: 0x4289, 0x6eb: 0x42a7, 0x6ec: 0x4293, 0x6ed: 0x42b1, 0x6ee: 0x429d, 0x6ef: 0x42bb, + 0x6f0: 0x37c6, 0x6f1: 0x37c0, 0x6f2: 0x3ed1, 0x6f3: 0x37cc, 0x6f4: 0x3ed9, + 0x6f6: 0x4951, 0x6f7: 0x3ef1, 0x6f8: 0x3736, 0x6f9: 0x3730, 0x6fa: 0x3724, 0x6fb: 0x442f, + 0x6fc: 0x373c, 0x6fd: 0x43c8, 0x6fe: 0x0257, 0x6ff: 0x43c8, + // Block 0x1c, offset 0x700 + 0x700: 0x43e1, 0x701: 0x45c3, 0x702: 0x3f19, 0x703: 0x37de, 0x704: 0x3f21, + 0x706: 0x497b, 0x707: 0x3f39, 0x708: 0x3742, 0x709: 0x4435, 0x70a: 0x374e, 0x70b: 0x443b, + 0x70c: 0x375a, 0x70d: 0x45ca, 0x70e: 0x45d1, 0x70f: 0x45d8, 0x710: 0x37f6, 0x711: 0x37f0, + 0x712: 0x3f41, 0x713: 0x4625, 0x716: 0x37fc, 0x717: 0x3f51, + 0x718: 0x3772, 0x719: 0x376c, 0x71a: 0x3760, 0x71b: 0x4441, 0x71d: 0x45df, + 0x71e: 0x45e6, 0x71f: 0x45ed, 0x720: 0x382c, 0x721: 0x3826, 0x722: 0x3fa9, 0x723: 0x462d, + 0x724: 0x380e, 0x725: 0x3814, 0x726: 0x3832, 0x727: 0x3fb9, 0x728: 0x37a2, 0x729: 0x379c, + 0x72a: 0x3790, 0x72b: 0x444d, 0x72c: 0x378a, 0x72d: 0x45b5, 0x72e: 0x45bc, 0x72f: 0x0081, + 0x732: 0x3ff1, 0x733: 0x3838, 0x734: 0x3ff9, + 0x736: 0x49c9, 0x737: 0x4011, 0x738: 0x377e, 0x739: 0x4447, 0x73a: 0x37ae, 0x73b: 0x4459, + 0x73c: 0x37ba, 0x73d: 0x439b, 0x73e: 0x43cd, + // Block 0x1d, offset 0x740 + 0x740: 0x1d08, 0x741: 0x1d0c, 0x742: 0x0047, 0x743: 0x1d84, 0x745: 0x1d18, + 0x746: 0x1d1c, 0x747: 0x00ef, 0x749: 0x1d88, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00e0, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1abd, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x1acf, 0x761: 0x1cf8, 0x762: 0x1ad8, + 0x764: 0x0075, 0x766: 0x023c, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x4413, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0308, + 0x776: 0x030b, 0x777: 0x030e, 0x778: 0x0311, 0x779: 0x0093, 0x77b: 0x1cc8, + 0x77c: 0x026c, 0x77d: 0x0245, 0x77e: 0x01fd, 0x77f: 0x0224, + // Block 0x1e, offset 0x780 + 0x780: 0x055a, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x235e, 0x791: 0x236a, + 0x792: 0x241e, 0x793: 0x2346, 0x794: 0x23ca, 0x795: 0x2352, 0x796: 0x23d0, 0x797: 0x23e8, + 0x798: 0x23f4, 0x799: 0x2358, 0x79a: 0x23fa, 0x79b: 0x2364, 0x79c: 0x23ee, 0x79d: 0x2400, + 0x79e: 0x2406, 0x79f: 0x1dec, 0x7a0: 0x0053, 0x7a1: 0x1a87, 0x7a2: 0x1cd4, 0x7a3: 0x1a90, + 0x7a4: 0x006d, 0x7a5: 0x1adb, 0x7a6: 0x1d00, 0x7a7: 0x1e78, 0x7a8: 0x1a93, 0x7a9: 0x0071, + 0x7aa: 0x1ae7, 0x7ab: 0x1d04, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x1b14, 0x7b2: 0x1d48, 0x7b3: 0x1b1d, 0x7b4: 0x00ad, 0x7b5: 0x1b92, + 0x7b6: 0x1d7c, 0x7b7: 0x1e8c, 0x7b8: 0x1b20, 0x7b9: 0x00b1, 0x7ba: 0x1b95, 0x7bb: 0x1d80, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3d47, 0x7c3: 0xa000, 0x7c4: 0x3d4e, 0x7c5: 0xa000, + 0x7c7: 0x3d55, 0x7c8: 0xa000, 0x7c9: 0x3d5c, + 0x7cd: 0xa000, + 0x7e0: 0x30a6, 0x7e1: 0xa000, 0x7e2: 0x3d6a, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3d63, 0x7ee: 0x30a1, 0x7ef: 0x30ab, + 0x7f0: 0x3d71, 0x7f1: 0x3d78, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3d7f, 0x7f5: 0x3d86, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3d8d, 0x7f9: 0x3d94, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3d9b, 0x801: 0x3da2, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3db7, 0x805: 0x3dbe, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3dc5, 0x809: 0x3dcc, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3de1, 0x82d: 0x3de8, 0x82e: 0x3def, 0x82f: 0x3df6, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x19af, + 0x86a: 0x19b2, 0x86b: 0x19b5, 0x86c: 0x19b8, 0x86d: 0x19bb, 0x86e: 0x19be, 0x86f: 0x19c1, + 0x870: 0x19c4, 0x871: 0x19c7, 0x872: 0x19ca, 0x873: 0x19d3, 0x874: 0x1b98, 0x875: 0x1b9c, + 0x876: 0x1ba0, 0x877: 0x1ba4, 0x878: 0x1ba8, 0x879: 0x1bac, 0x87a: 0x1bb0, 0x87b: 0x1bb4, + 0x87c: 0x1bb8, 0x87d: 0x1db0, 0x87e: 0x1db5, 0x87f: 0x1dba, + // Block 0x22, offset 0x880 + 0x880: 0x1dbf, 0x881: 0x1dc4, 0x882: 0x1dc9, 0x883: 0x1dce, 0x884: 0x1dd3, 0x885: 0x1dd8, + 0x886: 0x1ddd, 0x887: 0x1de2, 0x888: 0x19ac, 0x889: 0x19d0, 0x88a: 0x19f4, 0x88b: 0x1a18, + 0x88c: 0x1a3c, 0x88d: 0x1a45, 0x88e: 0x1a4b, 0x88f: 0x1a51, 0x890: 0x1a57, 0x891: 0x1c90, + 0x892: 0x1c94, 0x893: 0x1c98, 0x894: 0x1c9c, 0x895: 0x1ca0, 0x896: 0x1ca4, 0x897: 0x1ca8, + 0x898: 0x1cac, 0x899: 0x1cb0, 0x89a: 0x1cb4, 0x89b: 0x1cb8, 0x89c: 0x1c24, 0x89d: 0x1c28, + 0x89e: 0x1c2c, 0x89f: 0x1c30, 0x8a0: 0x1c34, 0x8a1: 0x1c38, 0x8a2: 0x1c3c, 0x8a3: 0x1c40, + 0x8a4: 0x1c44, 0x8a5: 0x1c48, 0x8a6: 0x1c4c, 0x8a7: 0x1c50, 0x8a8: 0x1c54, 0x8a9: 0x1c58, + 0x8aa: 0x1c5c, 0x8ab: 0x1c60, 0x8ac: 0x1c64, 0x8ad: 0x1c68, 0x8ae: 0x1c6c, 0x8af: 0x1c70, + 0x8b0: 0x1c74, 0x8b1: 0x1c78, 0x8b2: 0x1c7c, 0x8b3: 0x1c80, 0x8b4: 0x1c84, 0x8b5: 0x1c88, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x07ba, 0x8c1: 0x07de, 0x8c2: 0x07ea, 0x8c3: 0x07fa, 0x8c4: 0x0802, 0x8c5: 0x080e, + 0x8c6: 0x0816, 0x8c7: 0x081e, 0x8c8: 0x082a, 0x8c9: 0x087e, 0x8ca: 0x0896, 0x8cb: 0x08a6, + 0x8cc: 0x08b6, 0x8cd: 0x08c6, 0x8ce: 0x08d6, 0x8cf: 0x08f6, 0x8d0: 0x08fa, 0x8d1: 0x08fe, + 0x8d2: 0x0932, 0x8d3: 0x095a, 0x8d4: 0x096a, 0x8d5: 0x0972, 0x8d6: 0x0976, 0x8d7: 0x0982, + 0x8d8: 0x099e, 0x8d9: 0x09a2, 0x8da: 0x09ba, 0x8db: 0x09be, 0x8dc: 0x09c6, 0x8dd: 0x09d6, + 0x8de: 0x0a72, 0x8df: 0x0a86, 0x8e0: 0x0ac6, 0x8e1: 0x0ada, 0x8e2: 0x0ae2, 0x8e3: 0x0ae6, + 0x8e4: 0x0af6, 0x8e5: 0x0b12, 0x8e6: 0x0b3e, 0x8e7: 0x0b4a, 0x8e8: 0x0b6a, 0x8e9: 0x0b76, + 0x8ea: 0x0b7a, 0x8eb: 0x0b7e, 0x8ec: 0x0b96, 0x8ed: 0x0b9a, 0x8ee: 0x0bc6, 0x8ef: 0x0bd2, + 0x8f0: 0x0bda, 0x8f1: 0x0be2, 0x8f2: 0x0bf2, 0x8f3: 0x0bfa, 0x8f4: 0x0c02, 0x8f5: 0x0c2e, + 0x8f6: 0x0c32, 0x8f7: 0x0c3a, 0x8f8: 0x0c3e, 0x8f9: 0x0c46, 0x8fa: 0x0c4e, 0x8fb: 0x0c5e, + 0x8fc: 0x0c7a, 0x8fd: 0x0cf2, 0x8fe: 0x0d06, 0x8ff: 0x0d0a, + // Block 0x24, offset 0x900 + 0x900: 0x0d8a, 0x901: 0x0d8e, 0x902: 0x0da2, 0x903: 0x0da6, 0x904: 0x0dae, 0x905: 0x0db6, + 0x906: 0x0dbe, 0x907: 0x0dca, 0x908: 0x0df2, 0x909: 0x0e02, 0x90a: 0x0e16, 0x90b: 0x0e86, + 0x90c: 0x0e92, 0x90d: 0x0ea2, 0x90e: 0x0eae, 0x90f: 0x0eba, 0x910: 0x0ec2, 0x911: 0x0ec6, + 0x912: 0x0eca, 0x913: 0x0ece, 0x914: 0x0ed2, 0x915: 0x0f8a, 0x916: 0x0fd2, 0x917: 0x0fde, + 0x918: 0x0fe2, 0x919: 0x0fe6, 0x91a: 0x0fea, 0x91b: 0x0ff2, 0x91c: 0x0ff6, 0x91d: 0x100a, + 0x91e: 0x1026, 0x91f: 0x102e, 0x920: 0x106e, 0x921: 0x1072, 0x922: 0x107a, 0x923: 0x107e, + 0x924: 0x1086, 0x925: 0x108a, 0x926: 0x10ae, 0x927: 0x10b2, 0x928: 0x10ce, 0x929: 0x10d2, + 0x92a: 0x10d6, 0x92b: 0x10da, 0x92c: 0x10ee, 0x92d: 0x1112, 0x92e: 0x1116, 0x92f: 0x111a, + 0x930: 0x113e, 0x931: 0x117e, 0x932: 0x1182, 0x933: 0x11a2, 0x934: 0x11b2, 0x935: 0x11ba, + 0x936: 0x11da, 0x937: 0x11fe, 0x938: 0x1242, 0x939: 0x124a, 0x93a: 0x125e, 0x93b: 0x126a, + 0x93c: 0x1272, 0x93d: 0x127a, 0x93e: 0x127e, 0x93f: 0x1282, + // Block 0x25, offset 0x940 + 0x940: 0x129a, 0x941: 0x129e, 0x942: 0x12ba, 0x943: 0x12c2, 0x944: 0x12ca, 0x945: 0x12ce, + 0x946: 0x12da, 0x947: 0x12e2, 0x948: 0x12e6, 0x949: 0x12ea, 0x94a: 0x12f2, 0x94b: 0x12f6, + 0x94c: 0x1396, 0x94d: 0x13aa, 0x94e: 0x13de, 0x94f: 0x13e2, 0x950: 0x13ea, 0x951: 0x1416, + 0x952: 0x141e, 0x953: 0x1426, 0x954: 0x142e, 0x955: 0x146a, 0x956: 0x146e, 0x957: 0x1476, + 0x958: 0x147a, 0x959: 0x147e, 0x95a: 0x14aa, 0x95b: 0x14ae, 0x95c: 0x14b6, 0x95d: 0x14ca, + 0x95e: 0x14ce, 0x95f: 0x14ea, 0x960: 0x14f2, 0x961: 0x14f6, 0x962: 0x151a, 0x963: 0x153a, + 0x964: 0x154e, 0x965: 0x1552, 0x966: 0x155a, 0x967: 0x1586, 0x968: 0x158a, 0x969: 0x159a, + 0x96a: 0x15be, 0x96b: 0x15ca, 0x96c: 0x15da, 0x96d: 0x15f2, 0x96e: 0x15fa, 0x96f: 0x15fe, + 0x970: 0x1602, 0x971: 0x1606, 0x972: 0x1612, 0x973: 0x1616, 0x974: 0x161e, 0x975: 0x163a, + 0x976: 0x163e, 0x977: 0x1642, 0x978: 0x165a, 0x979: 0x165e, 0x97a: 0x1666, 0x97b: 0x167a, + 0x97c: 0x167e, 0x97d: 0x1682, 0x97e: 0x168a, 0x97f: 0x168e, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x4049, 0x98d: 0xa000, 0x98e: 0x4051, 0x98f: 0xa000, 0x990: 0x4059, 0x991: 0xa000, + 0x992: 0x4061, 0x993: 0xa000, 0x994: 0x4069, 0x995: 0xa000, 0x996: 0x4071, 0x997: 0xa000, + 0x998: 0x4079, 0x999: 0xa000, 0x99a: 0x4081, 0x99b: 0xa000, 0x99c: 0x4089, 0x99d: 0xa000, + 0x99e: 0x4091, 0x99f: 0xa000, 0x9a0: 0x4099, 0x9a1: 0xa000, 0x9a2: 0x40a1, + 0x9a4: 0xa000, 0x9a5: 0x40a9, 0x9a6: 0xa000, 0x9a7: 0x40b1, 0x9a8: 0xa000, 0x9a9: 0x40b9, + 0x9af: 0xa000, + 0x9b0: 0x40c1, 0x9b1: 0x40c9, 0x9b2: 0xa000, 0x9b3: 0x40d1, 0x9b4: 0x40d9, 0x9b5: 0xa000, + 0x9b6: 0x40e1, 0x9b7: 0x40e9, 0x9b8: 0xa000, 0x9b9: 0x40f1, 0x9ba: 0x40f9, 0x9bb: 0xa000, + 0x9bc: 0x4101, 0x9bd: 0x4109, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x4041, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x441d, 0x9dc: 0x4423, 0x9dd: 0xa000, + 0x9de: 0x4111, 0x9df: 0x27e4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x4121, 0x9ed: 0xa000, 0x9ee: 0x4129, 0x9ef: 0xa000, + 0x9f0: 0x4131, 0x9f1: 0xa000, 0x9f2: 0x4139, 0x9f3: 0xa000, 0x9f4: 0x4141, 0x9f5: 0xa000, + 0x9f6: 0x4149, 0x9f7: 0xa000, 0x9f8: 0x4151, 0x9f9: 0xa000, 0x9fa: 0x4159, 0x9fb: 0xa000, + 0x9fc: 0x4161, 0x9fd: 0xa000, 0x9fe: 0x4169, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4171, 0xa01: 0xa000, 0xa02: 0x4179, 0xa04: 0xa000, 0xa05: 0x4181, + 0xa06: 0xa000, 0xa07: 0x4189, 0xa08: 0xa000, 0xa09: 0x4191, + 0xa0f: 0xa000, 0xa10: 0x4199, 0xa11: 0x41a1, + 0xa12: 0xa000, 0xa13: 0x41a9, 0xa14: 0x41b1, 0xa15: 0xa000, 0xa16: 0x41b9, 0xa17: 0x41c1, + 0xa18: 0xa000, 0xa19: 0x41c9, 0xa1a: 0x41d1, 0xa1b: 0xa000, 0xa1c: 0x41d9, 0xa1d: 0x41e1, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x4119, + 0xa37: 0x41e9, 0xa38: 0x41f1, 0xa39: 0x41f9, 0xa3a: 0x4201, + 0xa3d: 0xa000, 0xa3e: 0x4209, 0xa3f: 0x27f9, + // Block 0x29, offset 0xa40 + 0xa40: 0x045a, 0xa41: 0x041e, 0xa42: 0x0422, 0xa43: 0x0426, 0xa44: 0x046e, 0xa45: 0x042a, + 0xa46: 0x042e, 0xa47: 0x0432, 0xa48: 0x0436, 0xa49: 0x043a, 0xa4a: 0x043e, 0xa4b: 0x0442, + 0xa4c: 0x0446, 0xa4d: 0x044a, 0xa4e: 0x044e, 0xa4f: 0x4afe, 0xa50: 0x4b04, 0xa51: 0x4b0a, + 0xa52: 0x4b10, 0xa53: 0x4b16, 0xa54: 0x4b1c, 0xa55: 0x4b22, 0xa56: 0x4b28, 0xa57: 0x4b2e, + 0xa58: 0x4b34, 0xa59: 0x4b3a, 0xa5a: 0x4b40, 0xa5b: 0x4b46, 0xa5c: 0x4b4c, 0xa5d: 0x4b52, + 0xa5e: 0x4b58, 0xa5f: 0x4b5e, 0xa60: 0x4b64, 0xa61: 0x4b6a, 0xa62: 0x4b70, 0xa63: 0x4b76, + 0xa64: 0x04b6, 0xa65: 0x0452, 0xa66: 0x0456, 0xa67: 0x04da, 0xa68: 0x04de, 0xa69: 0x04e2, + 0xa6a: 0x04e6, 0xa6b: 0x04ea, 0xa6c: 0x04ee, 0xa6d: 0x04f2, 0xa6e: 0x045e, 0xa6f: 0x04f6, + 0xa70: 0x04fa, 0xa71: 0x0462, 0xa72: 0x0466, 0xa73: 0x046a, 0xa74: 0x0472, 0xa75: 0x0476, + 0xa76: 0x047a, 0xa77: 0x047e, 0xa78: 0x0482, 0xa79: 0x0486, 0xa7a: 0x048a, 0xa7b: 0x048e, + 0xa7c: 0x0492, 0xa7d: 0x0496, 0xa7e: 0x049a, 0xa7f: 0x049e, + // Block 0x2a, offset 0xa80 + 0xa80: 0x04a2, 0xa81: 0x04a6, 0xa82: 0x04fe, 0xa83: 0x0502, 0xa84: 0x04aa, 0xa85: 0x04ae, + 0xa86: 0x04b2, 0xa87: 0x04ba, 0xa88: 0x04be, 0xa89: 0x04c2, 0xa8a: 0x04c6, 0xa8b: 0x04ca, + 0xa8c: 0x04ce, 0xa8d: 0x04d2, 0xa8e: 0x04d6, + 0xa92: 0x07ba, 0xa93: 0x0816, 0xa94: 0x07c6, 0xa95: 0x0a76, 0xa96: 0x07ca, 0xa97: 0x07e2, + 0xa98: 0x07ce, 0xa99: 0x108e, 0xa9a: 0x0802, 0xa9b: 0x07d6, 0xa9c: 0x07be, 0xa9d: 0x0afa, + 0xa9e: 0x0a8a, 0xa9f: 0x082a, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2184, 0xac1: 0x218a, 0xac2: 0x2190, 0xac3: 0x2196, 0xac4: 0x219c, 0xac5: 0x21a2, + 0xac6: 0x21a8, 0xac7: 0x21ae, 0xac8: 0x21b4, 0xac9: 0x21ba, 0xaca: 0x21c0, 0xacb: 0x21c6, + 0xacc: 0x21cc, 0xacd: 0x21d2, 0xace: 0x285d, 0xacf: 0x2866, 0xad0: 0x286f, 0xad1: 0x2878, + 0xad2: 0x2881, 0xad3: 0x288a, 0xad4: 0x2893, 0xad5: 0x289c, 0xad6: 0x28a5, 0xad7: 0x28b7, + 0xad8: 0x28c0, 0xad9: 0x28c9, 0xada: 0x28d2, 0xadb: 0x28db, 0xadc: 0x28ae, 0xadd: 0x2ce3, + 0xade: 0x2c24, 0xae0: 0x21d8, 0xae1: 0x21f0, 0xae2: 0x21e4, 0xae3: 0x2238, + 0xae4: 0x21f6, 0xae5: 0x2214, 0xae6: 0x21de, 0xae7: 0x220e, 0xae8: 0x21ea, 0xae9: 0x2220, + 0xaea: 0x2250, 0xaeb: 0x226e, 0xaec: 0x2268, 0xaed: 0x225c, 0xaee: 0x22aa, 0xaef: 0x223e, + 0xaf0: 0x224a, 0xaf1: 0x2262, 0xaf2: 0x2256, 0xaf3: 0x2280, 0xaf4: 0x222c, 0xaf5: 0x2274, + 0xaf6: 0x229e, 0xaf7: 0x2286, 0xaf8: 0x221a, 0xaf9: 0x21fc, 0xafa: 0x2232, 0xafb: 0x2244, + 0xafc: 0x227a, 0xafd: 0x2202, 0xafe: 0x22a4, 0xaff: 0x2226, + // Block 0x2c, offset 0xb00 + 0xb00: 0x228c, 0xb01: 0x2208, 0xb02: 0x2292, 0xb03: 0x2298, 0xb04: 0x0a2a, 0xb05: 0x0bfe, + 0xb06: 0x0da2, 0xb07: 0x11c2, + 0xb10: 0x1cf4, 0xb11: 0x19d6, + 0xb12: 0x19d9, 0xb13: 0x19dc, 0xb14: 0x19df, 0xb15: 0x19e2, 0xb16: 0x19e5, 0xb17: 0x19e8, + 0xb18: 0x19eb, 0xb19: 0x19ee, 0xb1a: 0x19f7, 0xb1b: 0x19fa, 0xb1c: 0x19fd, 0xb1d: 0x1a00, + 0xb1e: 0x1a03, 0xb1f: 0x1a06, 0xb20: 0x0406, 0xb21: 0x040e, 0xb22: 0x0412, 0xb23: 0x041a, + 0xb24: 0x041e, 0xb25: 0x0422, 0xb26: 0x042a, 0xb27: 0x0432, 0xb28: 0x0436, 0xb29: 0x043e, + 0xb2a: 0x0442, 0xb2b: 0x0446, 0xb2c: 0x044a, 0xb2d: 0x044e, 0xb2e: 0x2f59, 0xb2f: 0x2f61, + 0xb30: 0x2f69, 0xb31: 0x2f71, 0xb32: 0x2f79, 0xb33: 0x2f81, 0xb34: 0x2f89, 0xb35: 0x2f91, + 0xb36: 0x2fa1, 0xb37: 0x2fa9, 0xb38: 0x2fb1, 0xb39: 0x2fb9, 0xb3a: 0x2fc1, 0xb3b: 0x2fc9, + 0xb3c: 0x3014, 0xb3d: 0x2fdc, 0xb3e: 0x2f99, + // Block 0x2d, offset 0xb40 + 0xb40: 0x07ba, 0xb41: 0x0816, 0xb42: 0x07c6, 0xb43: 0x0a76, 0xb44: 0x081a, 0xb45: 0x08aa, + 0xb46: 0x07c2, 0xb47: 0x08a6, 0xb48: 0x0806, 0xb49: 0x0982, 0xb4a: 0x0e02, 0xb4b: 0x0f8a, + 0xb4c: 0x0ed2, 0xb4d: 0x0e16, 0xb4e: 0x155a, 0xb4f: 0x0a86, 0xb50: 0x0dca, 0xb51: 0x0e46, + 0xb52: 0x0e06, 0xb53: 0x1146, 0xb54: 0x09f6, 0xb55: 0x0ffe, 0xb56: 0x1482, 0xb57: 0x115a, + 0xb58: 0x093e, 0xb59: 0x118a, 0xb5a: 0x1096, 0xb5b: 0x0b12, 0xb5c: 0x150a, 0xb5d: 0x087a, + 0xb5e: 0x09a6, 0xb5f: 0x0ef2, 0xb60: 0x1622, 0xb61: 0x083e, 0xb62: 0x08ce, 0xb63: 0x0e96, + 0xb64: 0x07ca, 0xb65: 0x07e2, 0xb66: 0x07ce, 0xb67: 0x0bd6, 0xb68: 0x09ea, 0xb69: 0x097a, + 0xb6a: 0x0b52, 0xb6b: 0x0b46, 0xb6c: 0x10e6, 0xb6d: 0x083a, 0xb6e: 0x1496, 0xb6f: 0x0996, + 0xb70: 0x0aee, 0xb71: 0x1a09, 0xb72: 0x1a0c, 0xb73: 0x1a0f, 0xb74: 0x1a12, 0xb75: 0x1a1b, + 0xb76: 0x1a1e, 0xb77: 0x1a21, 0xb78: 0x1a24, 0xb79: 0x1a27, 0xb7a: 0x1a2a, 0xb7b: 0x1a2d, + 0xb7c: 0x1a30, 0xb7d: 0x1a33, 0xb7e: 0x1a36, 0xb7f: 0x1a3f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1df6, 0xb81: 0x1e05, 0xb82: 0x1e14, 0xb83: 0x1e23, 0xb84: 0x1e32, 0xb85: 0x1e41, + 0xb86: 0x1e50, 0xb87: 0x1e5f, 0xb88: 0x1e6e, 0xb89: 0x22bc, 0xb8a: 0x22ce, 0xb8b: 0x22e0, + 0xb8c: 0x1a81, 0xb8d: 0x1d34, 0xb8e: 0x1b02, 0xb8f: 0x1cd8, 0xb90: 0x05c6, 0xb91: 0x05ce, + 0xb92: 0x05d6, 0xb93: 0x05de, 0xb94: 0x05e6, 0xb95: 0x05ea, 0xb96: 0x05ee, 0xb97: 0x05f2, + 0xb98: 0x05f6, 0xb99: 0x05fa, 0xb9a: 0x05fe, 0xb9b: 0x0602, 0xb9c: 0x0606, 0xb9d: 0x060a, + 0xb9e: 0x060e, 0xb9f: 0x0612, 0xba0: 0x0616, 0xba1: 0x061e, 0xba2: 0x0622, 0xba3: 0x0626, + 0xba4: 0x062a, 0xba5: 0x062e, 0xba6: 0x0632, 0xba7: 0x0636, 0xba8: 0x063a, 0xba9: 0x063e, + 0xbaa: 0x0642, 0xbab: 0x0646, 0xbac: 0x064a, 0xbad: 0x064e, 0xbae: 0x0652, 0xbaf: 0x0656, + 0xbb0: 0x065a, 0xbb1: 0x065e, 0xbb2: 0x0662, 0xbb3: 0x066a, 0xbb4: 0x0672, 0xbb5: 0x067a, + 0xbb6: 0x067e, 0xbb7: 0x0682, 0xbb8: 0x0686, 0xbb9: 0x068a, 0xbba: 0x068e, 0xbbb: 0x0692, + 0xbbc: 0x0696, 0xbbd: 0x069a, 0xbbe: 0x069e, 0xbbf: 0x282a, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2c43, 0xbc1: 0x2adf, 0xbc2: 0x2c53, 0xbc3: 0x29b7, 0xbc4: 0x3025, 0xbc5: 0x29c1, + 0xbc6: 0x29cb, 0xbc7: 0x3069, 0xbc8: 0x2aec, 0xbc9: 0x29d5, 0xbca: 0x29df, 0xbcb: 0x29e9, + 0xbcc: 0x2b13, 0xbcd: 0x2b20, 0xbce: 0x2af9, 0xbcf: 0x2b06, 0xbd0: 0x2fea, 0xbd1: 0x2b2d, + 0xbd2: 0x2b3a, 0xbd3: 0x2cf5, 0xbd4: 0x27eb, 0xbd5: 0x2d08, 0xbd6: 0x2d1b, 0xbd7: 0x2c63, + 0xbd8: 0x2b47, 0xbd9: 0x2d2e, 0xbda: 0x2d41, 0xbdb: 0x2b54, 0xbdc: 0x29f3, 0xbdd: 0x29fd, + 0xbde: 0x2ff8, 0xbdf: 0x2b61, 0xbe0: 0x2c73, 0xbe1: 0x3036, 0xbe2: 0x2a07, 0xbe3: 0x2a11, + 0xbe4: 0x2b6e, 0xbe5: 0x2a1b, 0xbe6: 0x2a25, 0xbe7: 0x2800, 0xbe8: 0x2807, 0xbe9: 0x2a2f, + 0xbea: 0x2a39, 0xbeb: 0x2d54, 0xbec: 0x2b7b, 0xbed: 0x2c83, 0xbee: 0x2d67, 0xbef: 0x2b88, + 0xbf0: 0x2a4d, 0xbf1: 0x2a43, 0xbf2: 0x307d, 0xbf3: 0x2b95, 0xbf4: 0x2d7a, 0xbf5: 0x2a57, + 0xbf6: 0x2c93, 0xbf7: 0x2a61, 0xbf8: 0x2baf, 0xbf9: 0x2a6b, 0xbfa: 0x2bbc, 0xbfb: 0x3047, + 0xbfc: 0x2ba2, 0xbfd: 0x2ca3, 0xbfe: 0x2bc9, 0xbff: 0x280e, + // Block 0x30, offset 0xc00 + 0xc00: 0x3058, 0xc01: 0x2a75, 0xc02: 0x2a7f, 0xc03: 0x2bd6, 0xc04: 0x2a89, 0xc05: 0x2a93, + 0xc06: 0x2a9d, 0xc07: 0x2cb3, 0xc08: 0x2be3, 0xc09: 0x2815, 0xc0a: 0x2d8d, 0xc0b: 0x2fd1, + 0xc0c: 0x2cc3, 0xc0d: 0x2bf0, 0xc0e: 0x3006, 0xc0f: 0x2aa7, 0xc10: 0x2ab1, 0xc11: 0x2bfd, + 0xc12: 0x281c, 0xc13: 0x2c0a, 0xc14: 0x2cd3, 0xc15: 0x2823, 0xc16: 0x2da0, 0xc17: 0x2abb, + 0xc18: 0x1de7, 0xc19: 0x1dfb, 0xc1a: 0x1e0a, 0xc1b: 0x1e19, 0xc1c: 0x1e28, 0xc1d: 0x1e37, + 0xc1e: 0x1e46, 0xc1f: 0x1e55, 0xc20: 0x1e64, 0xc21: 0x1e73, 0xc22: 0x22c2, 0xc23: 0x22d4, + 0xc24: 0x22e6, 0xc25: 0x22f2, 0xc26: 0x22fe, 0xc27: 0x230a, 0xc28: 0x2316, 0xc29: 0x2322, + 0xc2a: 0x232e, 0xc2b: 0x233a, 0xc2c: 0x2376, 0xc2d: 0x2382, 0xc2e: 0x238e, 0xc2f: 0x239a, + 0xc30: 0x23a6, 0xc31: 0x1d44, 0xc32: 0x1af6, 0xc33: 0x1a63, 0xc34: 0x1d14, 0xc35: 0x1b77, + 0xc36: 0x1b86, 0xc37: 0x1afc, 0xc38: 0x1d2c, 0xc39: 0x1d30, 0xc3a: 0x1a8d, 0xc3b: 0x2838, + 0xc3c: 0x2846, 0xc3d: 0x2831, 0xc3e: 0x283f, 0xc3f: 0x2c17, + // Block 0x31, offset 0xc40 + 0xc40: 0x1b7a, 0xc41: 0x1b62, 0xc42: 0x1d90, 0xc43: 0x1b4a, 0xc44: 0x1b23, 0xc45: 0x1a96, + 0xc46: 0x1aa5, 0xc47: 0x1a75, 0xc48: 0x1d20, 0xc49: 0x1e82, 0xc4a: 0x1b7d, 0xc4b: 0x1b65, + 0xc4c: 0x1d94, 0xc4d: 0x1da0, 0xc4e: 0x1b56, 0xc4f: 0x1b2c, 0xc50: 0x1a84, 0xc51: 0x1d4c, + 0xc52: 0x1ce0, 0xc53: 0x1ccc, 0xc54: 0x1cfc, 0xc55: 0x1da4, 0xc56: 0x1b59, 0xc57: 0x1af9, + 0xc58: 0x1b2f, 0xc59: 0x1b0e, 0xc5a: 0x1b71, 0xc5b: 0x1da8, 0xc5c: 0x1b5c, 0xc5d: 0x1af0, + 0xc5e: 0x1b32, 0xc5f: 0x1d6c, 0xc60: 0x1d24, 0xc61: 0x1b44, 0xc62: 0x1d54, 0xc63: 0x1d70, + 0xc64: 0x1d28, 0xc65: 0x1b47, 0xc66: 0x1d58, 0xc67: 0x2418, 0xc68: 0x242c, 0xc69: 0x1ac6, + 0xc6a: 0x1d50, 0xc6b: 0x1ce4, 0xc6c: 0x1cd0, 0xc6d: 0x1d78, 0xc6e: 0x284d, 0xc6f: 0x28e4, + 0xc70: 0x1b89, 0xc71: 0x1b74, 0xc72: 0x1dac, 0xc73: 0x1b5f, 0xc74: 0x1b80, 0xc75: 0x1b68, + 0xc76: 0x1d98, 0xc77: 0x1b4d, 0xc78: 0x1b26, 0xc79: 0x1ab1, 0xc7a: 0x1b83, 0xc7b: 0x1b6b, + 0xc7c: 0x1d9c, 0xc7d: 0x1b50, 0xc7e: 0x1b29, 0xc7f: 0x1ab4, + // Block 0x32, offset 0xc80 + 0xc80: 0x1d5c, 0xc81: 0x1ce8, 0xc82: 0x1e7d, 0xc83: 0x1a66, 0xc84: 0x1aea, 0xc85: 0x1aed, + 0xc86: 0x2425, 0xc87: 0x1cc4, 0xc88: 0x1af3, 0xc89: 0x1a78, 0xc8a: 0x1b11, 0xc8b: 0x1a7b, + 0xc8c: 0x1b1a, 0xc8d: 0x1a99, 0xc8e: 0x1a9c, 0xc8f: 0x1b35, 0xc90: 0x1b3b, 0xc91: 0x1b3e, + 0xc92: 0x1d60, 0xc93: 0x1b41, 0xc94: 0x1b53, 0xc95: 0x1d68, 0xc96: 0x1d74, 0xc97: 0x1ac0, + 0xc98: 0x1e87, 0xc99: 0x1cec, 0xc9a: 0x1ac3, 0xc9b: 0x1b8c, 0xc9c: 0x1ad5, 0xc9d: 0x1ae4, + 0xc9e: 0x2412, 0xc9f: 0x240c, 0xca0: 0x1df1, 0xca1: 0x1e00, 0xca2: 0x1e0f, 0xca3: 0x1e1e, + 0xca4: 0x1e2d, 0xca5: 0x1e3c, 0xca6: 0x1e4b, 0xca7: 0x1e5a, 0xca8: 0x1e69, 0xca9: 0x22b6, + 0xcaa: 0x22c8, 0xcab: 0x22da, 0xcac: 0x22ec, 0xcad: 0x22f8, 0xcae: 0x2304, 0xcaf: 0x2310, + 0xcb0: 0x231c, 0xcb1: 0x2328, 0xcb2: 0x2334, 0xcb3: 0x2370, 0xcb4: 0x237c, 0xcb5: 0x2388, + 0xcb6: 0x2394, 0xcb7: 0x23a0, 0xcb8: 0x23ac, 0xcb9: 0x23b2, 0xcba: 0x23b8, 0xcbb: 0x23be, + 0xcbc: 0x23c4, 0xcbd: 0x23d6, 0xcbe: 0x23dc, 0xcbf: 0x1d40, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1472, 0xcc1: 0x0df6, 0xcc2: 0x14ce, 0xcc3: 0x149a, 0xcc4: 0x0f52, 0xcc5: 0x07e6, + 0xcc6: 0x09da, 0xcc7: 0x1726, 0xcc8: 0x1726, 0xcc9: 0x0b06, 0xcca: 0x155a, 0xccb: 0x0a3e, + 0xccc: 0x0b02, 0xccd: 0x0cea, 0xcce: 0x10ca, 0xccf: 0x125a, 0xcd0: 0x1392, 0xcd1: 0x13ce, + 0xcd2: 0x1402, 0xcd3: 0x1516, 0xcd4: 0x0e6e, 0xcd5: 0x0efa, 0xcd6: 0x0fa6, 0xcd7: 0x103e, + 0xcd8: 0x135a, 0xcd9: 0x1542, 0xcda: 0x166e, 0xcdb: 0x080a, 0xcdc: 0x09ae, 0xcdd: 0x0e82, + 0xcde: 0x0fca, 0xcdf: 0x138e, 0xce0: 0x16be, 0xce1: 0x0bae, 0xce2: 0x0f72, 0xce3: 0x137e, + 0xce4: 0x1412, 0xce5: 0x0d1e, 0xce6: 0x12b6, 0xce7: 0x13da, 0xce8: 0x0c1a, 0xce9: 0x0e0a, + 0xcea: 0x0f12, 0xceb: 0x1016, 0xcec: 0x1522, 0xced: 0x084a, 0xcee: 0x08e2, 0xcef: 0x094e, + 0xcf0: 0x0d86, 0xcf1: 0x0e7a, 0xcf2: 0x0fc6, 0xcf3: 0x10ea, 0xcf4: 0x1272, 0xcf5: 0x1386, + 0xcf6: 0x139e, 0xcf7: 0x14c2, 0xcf8: 0x15ea, 0xcf9: 0x169e, 0xcfa: 0x16ba, 0xcfb: 0x1126, + 0xcfc: 0x1166, 0xcfd: 0x121e, 0xcfe: 0x133e, 0xcff: 0x1576, + // Block 0x34, offset 0xd00 + 0xd00: 0x16c6, 0xd01: 0x1446, 0xd02: 0x0ac2, 0xd03: 0x0c36, 0xd04: 0x11d6, 0xd05: 0x1296, + 0xd06: 0x0ffa, 0xd07: 0x112e, 0xd08: 0x1492, 0xd09: 0x15e2, 0xd0a: 0x0abe, 0xd0b: 0x0b8a, + 0xd0c: 0x0e72, 0xd0d: 0x0f26, 0xd0e: 0x0f5a, 0xd0f: 0x120e, 0xd10: 0x1236, 0xd11: 0x15a2, + 0xd12: 0x094a, 0xd13: 0x12a2, 0xd14: 0x08ee, 0xd15: 0x08ea, 0xd16: 0x1192, 0xd17: 0x1222, + 0xd18: 0x1356, 0xd19: 0x15aa, 0xd1a: 0x1462, 0xd1b: 0x0d22, 0xd1c: 0x0e6e, 0xd1d: 0x1452, + 0xd1e: 0x07f2, 0xd1f: 0x0b5e, 0xd20: 0x0c8e, 0xd21: 0x102a, 0xd22: 0x10aa, 0xd23: 0x096e, + 0xd24: 0x1136, 0xd25: 0x085a, 0xd26: 0x0c72, 0xd27: 0x07d2, 0xd28: 0x0ee6, 0xd29: 0x0d9e, + 0xd2a: 0x120a, 0xd2b: 0x09c2, 0xd2c: 0x0aae, 0xd2d: 0x10f6, 0xd2e: 0x135e, 0xd2f: 0x1436, + 0xd30: 0x0eb2, 0xd31: 0x14f2, 0xd32: 0x0ede, 0xd33: 0x0d32, 0xd34: 0x1316, 0xd35: 0x0d52, + 0xd36: 0x10a6, 0xd37: 0x0826, 0xd38: 0x08a2, 0xd39: 0x08e6, 0xd3a: 0x0e4e, 0xd3b: 0x11f6, + 0xd3c: 0x12ee, 0xd3d: 0x1442, 0xd3e: 0x1556, 0xd3f: 0x0956, + // Block 0x35, offset 0xd40 + 0xd40: 0x0a0a, 0xd41: 0x0b12, 0xd42: 0x0c2a, 0xd43: 0x0dba, 0xd44: 0x0f76, 0xd45: 0x113a, + 0xd46: 0x1592, 0xd47: 0x1676, 0xd48: 0x16ca, 0xd49: 0x16e2, 0xd4a: 0x0932, 0xd4b: 0x0dee, + 0xd4c: 0x0e9e, 0xd4d: 0x14e6, 0xd4e: 0x0bf6, 0xd4f: 0x0cd2, 0xd50: 0x0cee, 0xd51: 0x0d7e, + 0xd52: 0x0f66, 0xd53: 0x0fb2, 0xd54: 0x1062, 0xd55: 0x1186, 0xd56: 0x122a, 0xd57: 0x128e, + 0xd58: 0x14d6, 0xd59: 0x1366, 0xd5a: 0x14fe, 0xd5b: 0x157a, 0xd5c: 0x090a, 0xd5d: 0x0936, + 0xd5e: 0x0a1e, 0xd5f: 0x0fa2, 0xd60: 0x13ee, 0xd61: 0x1436, 0xd62: 0x0c16, 0xd63: 0x0c86, + 0xd64: 0x0d4a, 0xd65: 0x0eaa, 0xd66: 0x11d2, 0xd67: 0x101e, 0xd68: 0x0836, 0xd69: 0x0a7a, + 0xd6a: 0x0b5e, 0xd6b: 0x0bc2, 0xd6c: 0x0c92, 0xd6d: 0x103a, 0xd6e: 0x1056, 0xd6f: 0x1266, + 0xd70: 0x1286, 0xd71: 0x155e, 0xd72: 0x15de, 0xd73: 0x15ee, 0xd74: 0x162a, 0xd75: 0x084e, + 0xd76: 0x117a, 0xd77: 0x154a, 0xd78: 0x15c6, 0xd79: 0x0caa, 0xd7a: 0x0812, 0xd7b: 0x0872, + 0xd7c: 0x0b62, 0xd7d: 0x0b82, 0xd7e: 0x0daa, 0xd7f: 0x0e6e, + // Block 0x36, offset 0xd80 + 0xd80: 0x0fbe, 0xd81: 0x10c6, 0xd82: 0x1372, 0xd83: 0x1512, 0xd84: 0x171e, 0xd85: 0x0dde, + 0xd86: 0x159e, 0xd87: 0x092e, 0xd88: 0x0e2a, 0xd89: 0x0e36, 0xd8a: 0x0f0a, 0xd8b: 0x0f42, + 0xd8c: 0x1046, 0xd8d: 0x10a2, 0xd8e: 0x1122, 0xd8f: 0x1206, 0xd90: 0x1636, 0xd91: 0x08aa, + 0xd92: 0x0cfe, 0xd93: 0x15ae, 0xd94: 0x0862, 0xd95: 0x0ba6, 0xd96: 0x0f2a, 0xd97: 0x14da, + 0xd98: 0x0c62, 0xd99: 0x0cb2, 0xd9a: 0x0e3e, 0xd9b: 0x102a, 0xd9c: 0x15b6, 0xd9d: 0x0912, + 0xd9e: 0x09fa, 0xd9f: 0x0b92, 0xda0: 0x0dce, 0xda1: 0x0e1a, 0xda2: 0x0e5a, 0xda3: 0x0eee, + 0xda4: 0x1042, 0xda5: 0x10b6, 0xda6: 0x1252, 0xda7: 0x13f2, 0xda8: 0x13fe, 0xda9: 0x1552, + 0xdaa: 0x15d2, 0xdab: 0x097e, 0xdac: 0x0f46, 0xdad: 0x09fe, 0xdae: 0x0fc2, 0xdaf: 0x1066, + 0xdb0: 0x1382, 0xdb1: 0x15ba, 0xdb2: 0x16a6, 0xdb3: 0x16ce, 0xdb4: 0x0e32, 0xdb5: 0x0f22, + 0xdb6: 0x12be, 0xdb7: 0x11b2, 0xdb8: 0x11be, 0xdb9: 0x11e2, 0xdba: 0x1012, 0xdbb: 0x0f9a, + 0xdbc: 0x145e, 0xdbd: 0x082e, 0xdbe: 0x1326, 0xdbf: 0x0916, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0906, 0xdc1: 0x0c06, 0xdc2: 0x0d26, 0xdc3: 0x11ee, 0xdc4: 0x0b4e, 0xdc5: 0x0efe, + 0xdc6: 0x0dea, 0xdc7: 0x14e2, 0xdc8: 0x13e2, 0xdc9: 0x15a6, 0xdca: 0x141e, 0xdcb: 0x0c22, + 0xdcc: 0x0882, 0xdcd: 0x0a56, 0xdd0: 0x0aaa, + 0xdd2: 0x0dda, 0xdd5: 0x08f2, 0xdd6: 0x101a, 0xdd7: 0x10de, + 0xdd8: 0x1142, 0xdd9: 0x115e, 0xdda: 0x1162, 0xddb: 0x1176, 0xddc: 0x15f6, 0xddd: 0x11e6, + 0xdde: 0x126a, 0xde0: 0x138a, 0xde2: 0x144e, + 0xde5: 0x1502, 0xde6: 0x152e, + 0xdea: 0x164a, 0xdeb: 0x164e, 0xdec: 0x1652, 0xded: 0x16b6, 0xdee: 0x1526, 0xdef: 0x15c2, + 0xdf0: 0x0852, 0xdf1: 0x0876, 0xdf2: 0x088a, 0xdf3: 0x0946, 0xdf4: 0x0952, 0xdf5: 0x0992, + 0xdf6: 0x0a46, 0xdf7: 0x0a62, 0xdf8: 0x0a6a, 0xdf9: 0x0aa6, 0xdfa: 0x0ab2, 0xdfb: 0x0b8e, + 0xdfc: 0x0b96, 0xdfd: 0x0c9e, 0xdfe: 0x0cc6, 0xdff: 0x0cce, + // Block 0x38, offset 0xe00 + 0xe00: 0x0ce6, 0xe01: 0x0d92, 0xe02: 0x0dc2, 0xe03: 0x0de2, 0xe04: 0x0e52, 0xe05: 0x0f16, + 0xe06: 0x0f32, 0xe07: 0x0f62, 0xe08: 0x0fb6, 0xe09: 0x0fd6, 0xe0a: 0x104a, 0xe0b: 0x112a, + 0xe0c: 0x1146, 0xe0d: 0x114e, 0xe0e: 0x114a, 0xe0f: 0x1152, 0xe10: 0x1156, 0xe11: 0x115a, + 0xe12: 0x116e, 0xe13: 0x1172, 0xe14: 0x1196, 0xe15: 0x11aa, 0xe16: 0x11c6, 0xe17: 0x122a, + 0xe18: 0x1232, 0xe19: 0x123a, 0xe1a: 0x124e, 0xe1b: 0x1276, 0xe1c: 0x12c6, 0xe1d: 0x12fa, + 0xe1e: 0x12fa, 0xe1f: 0x1362, 0xe20: 0x140a, 0xe21: 0x1422, 0xe22: 0x1456, 0xe23: 0x145a, + 0xe24: 0x149e, 0xe25: 0x14a2, 0xe26: 0x14fa, 0xe27: 0x1502, 0xe28: 0x15d6, 0xe29: 0x161a, + 0xe2a: 0x1632, 0xe2b: 0x0c96, 0xe2c: 0x184b, 0xe2d: 0x12de, + 0xe30: 0x07da, 0xe31: 0x08de, 0xe32: 0x089e, 0xe33: 0x0846, 0xe34: 0x0886, 0xe35: 0x08b2, + 0xe36: 0x0942, 0xe37: 0x095e, 0xe38: 0x0a46, 0xe39: 0x0a32, 0xe3a: 0x0a42, 0xe3b: 0x0a5e, + 0xe3c: 0x0aaa, 0xe3d: 0x0aba, 0xe3e: 0x0afe, 0xe3f: 0x0b0a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0b26, 0xe41: 0x0b36, 0xe42: 0x0c1e, 0xe43: 0x0c26, 0xe44: 0x0c56, 0xe45: 0x0c76, + 0xe46: 0x0ca6, 0xe47: 0x0cbe, 0xe48: 0x0cae, 0xe49: 0x0cce, 0xe4a: 0x0cc2, 0xe4b: 0x0ce6, + 0xe4c: 0x0d02, 0xe4d: 0x0d5a, 0xe4e: 0x0d66, 0xe4f: 0x0d6e, 0xe50: 0x0d96, 0xe51: 0x0dda, + 0xe52: 0x0e0a, 0xe53: 0x0e0e, 0xe54: 0x0e22, 0xe55: 0x0ea2, 0xe56: 0x0eb2, 0xe57: 0x0f0a, + 0xe58: 0x0f56, 0xe59: 0x0f4e, 0xe5a: 0x0f62, 0xe5b: 0x0f7e, 0xe5c: 0x0fb6, 0xe5d: 0x110e, + 0xe5e: 0x0fda, 0xe5f: 0x100e, 0xe60: 0x101a, 0xe61: 0x105a, 0xe62: 0x1076, 0xe63: 0x109a, + 0xe64: 0x10be, 0xe65: 0x10c2, 0xe66: 0x10de, 0xe67: 0x10e2, 0xe68: 0x10f2, 0xe69: 0x1106, + 0xe6a: 0x1102, 0xe6b: 0x1132, 0xe6c: 0x11ae, 0xe6d: 0x11c6, 0xe6e: 0x11de, 0xe6f: 0x1216, + 0xe70: 0x122a, 0xe71: 0x1246, 0xe72: 0x1276, 0xe73: 0x132a, 0xe74: 0x1352, 0xe75: 0x13c6, + 0xe76: 0x140e, 0xe77: 0x141a, 0xe78: 0x1422, 0xe79: 0x143a, 0xe7a: 0x144e, 0xe7b: 0x143e, + 0xe7c: 0x1456, 0xe7d: 0x1452, 0xe7e: 0x144a, 0xe7f: 0x145a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x1466, 0xe81: 0x14a2, 0xe82: 0x14de, 0xe83: 0x150e, 0xe84: 0x1546, 0xe85: 0x1566, + 0xe86: 0x15b2, 0xe87: 0x15d6, 0xe88: 0x15f6, 0xe89: 0x160a, 0xe8a: 0x161a, 0xe8b: 0x1626, + 0xe8c: 0x1632, 0xe8d: 0x1686, 0xe8e: 0x1726, 0xe8f: 0x17e2, 0xe90: 0x17dd, 0xe91: 0x180f, + 0xe92: 0x0702, 0xe93: 0x072a, 0xe94: 0x072e, 0xe95: 0x1891, 0xe96: 0x18be, 0xe97: 0x1936, + 0xe98: 0x1712, 0xe99: 0x1722, + // Block 0x3b, offset 0xec0 + 0xec0: 0x1b05, 0xec1: 0x1b08, 0xec2: 0x1b0b, 0xec3: 0x1d38, 0xec4: 0x1d3c, 0xec5: 0x1b8f, + 0xec6: 0x1b8f, + 0xed3: 0x1ea5, 0xed4: 0x1e96, 0xed5: 0x1e9b, 0xed6: 0x1eaa, 0xed7: 0x1ea0, + 0xedd: 0x44d1, + 0xede: 0x8116, 0xedf: 0x4543, 0xee0: 0x0320, 0xee1: 0x0308, 0xee2: 0x0311, 0xee3: 0x0314, + 0xee4: 0x0317, 0xee5: 0x031a, 0xee6: 0x031d, 0xee7: 0x0323, 0xee8: 0x0326, 0xee9: 0x0017, + 0xeea: 0x4531, 0xeeb: 0x4537, 0xeec: 0x4635, 0xeed: 0x463d, 0xeee: 0x4489, 0xeef: 0x448f, + 0xef0: 0x4495, 0xef1: 0x449b, 0xef2: 0x44a7, 0xef3: 0x44ad, 0xef4: 0x44b3, 0xef5: 0x44bf, + 0xef6: 0x44c5, 0xef8: 0x44cb, 0xef9: 0x44d7, 0xefa: 0x44dd, 0xefb: 0x44e3, + 0xefc: 0x44ef, 0xefe: 0x44f5, + // Block 0x3c, offset 0xf00 + 0xf00: 0x44fb, 0xf01: 0x4501, 0xf03: 0x4507, 0xf04: 0x450d, + 0xf06: 0x4519, 0xf07: 0x451f, 0xf08: 0x4525, 0xf09: 0x452b, 0xf0a: 0x453d, 0xf0b: 0x44b9, + 0xf0c: 0x44a1, 0xf0d: 0x44e9, 0xf0e: 0x4513, 0xf0f: 0x1eaf, 0xf10: 0x038c, 0xf11: 0x038c, + 0xf12: 0x0395, 0xf13: 0x0395, 0xf14: 0x0395, 0xf15: 0x0395, 0xf16: 0x0398, 0xf17: 0x0398, + 0xf18: 0x0398, 0xf19: 0x0398, 0xf1a: 0x039e, 0xf1b: 0x039e, 0xf1c: 0x039e, 0xf1d: 0x039e, + 0xf1e: 0x0392, 0xf1f: 0x0392, 0xf20: 0x0392, 0xf21: 0x0392, 0xf22: 0x039b, 0xf23: 0x039b, + 0xf24: 0x039b, 0xf25: 0x039b, 0xf26: 0x038f, 0xf27: 0x038f, 0xf28: 0x038f, 0xf29: 0x038f, + 0xf2a: 0x03c2, 0xf2b: 0x03c2, 0xf2c: 0x03c2, 0xf2d: 0x03c2, 0xf2e: 0x03c5, 0xf2f: 0x03c5, + 0xf30: 0x03c5, 0xf31: 0x03c5, 0xf32: 0x03a4, 0xf33: 0x03a4, 0xf34: 0x03a4, 0xf35: 0x03a4, + 0xf36: 0x03a1, 0xf37: 0x03a1, 0xf38: 0x03a1, 0xf39: 0x03a1, 0xf3a: 0x03a7, 0xf3b: 0x03a7, + 0xf3c: 0x03a7, 0xf3d: 0x03a7, 0xf3e: 0x03aa, 0xf3f: 0x03aa, + // Block 0x3d, offset 0xf40 + 0xf40: 0x03aa, 0xf41: 0x03aa, 0xf42: 0x03b3, 0xf43: 0x03b3, 0xf44: 0x03b0, 0xf45: 0x03b0, + 0xf46: 0x03b6, 0xf47: 0x03b6, 0xf48: 0x03ad, 0xf49: 0x03ad, 0xf4a: 0x03bc, 0xf4b: 0x03bc, + 0xf4c: 0x03b9, 0xf4d: 0x03b9, 0xf4e: 0x03c8, 0xf4f: 0x03c8, 0xf50: 0x03c8, 0xf51: 0x03c8, + 0xf52: 0x03ce, 0xf53: 0x03ce, 0xf54: 0x03ce, 0xf55: 0x03ce, 0xf56: 0x03d4, 0xf57: 0x03d4, + 0xf58: 0x03d4, 0xf59: 0x03d4, 0xf5a: 0x03d1, 0xf5b: 0x03d1, 0xf5c: 0x03d1, 0xf5d: 0x03d1, + 0xf5e: 0x03d7, 0xf5f: 0x03d7, 0xf60: 0x03da, 0xf61: 0x03da, 0xf62: 0x03da, 0xf63: 0x03da, + 0xf64: 0x45af, 0xf65: 0x45af, 0xf66: 0x03e0, 0xf67: 0x03e0, 0xf68: 0x03e0, 0xf69: 0x03e0, + 0xf6a: 0x03dd, 0xf6b: 0x03dd, 0xf6c: 0x03dd, 0xf6d: 0x03dd, 0xf6e: 0x03fb, 0xf6f: 0x03fb, + 0xf70: 0x45a9, 0xf71: 0x45a9, + // Block 0x3e, offset 0xf80 + 0xf93: 0x03cb, 0xf94: 0x03cb, 0xf95: 0x03cb, 0xf96: 0x03cb, 0xf97: 0x03e9, + 0xf98: 0x03e9, 0xf99: 0x03e6, 0xf9a: 0x03e6, 0xf9b: 0x03ec, 0xf9c: 0x03ec, 0xf9d: 0x217f, + 0xf9e: 0x03f2, 0xf9f: 0x03f2, 0xfa0: 0x03e3, 0xfa1: 0x03e3, 0xfa2: 0x03ef, 0xfa3: 0x03ef, + 0xfa4: 0x03f8, 0xfa5: 0x03f8, 0xfa6: 0x03f8, 0xfa7: 0x03f8, 0xfa8: 0x0380, 0xfa9: 0x0380, + 0xfaa: 0x26da, 0xfab: 0x26da, 0xfac: 0x274a, 0xfad: 0x274a, 0xfae: 0x2719, 0xfaf: 0x2719, + 0xfb0: 0x2735, 0xfb1: 0x2735, 0xfb2: 0x272e, 0xfb3: 0x272e, 0xfb4: 0x273c, 0xfb5: 0x273c, + 0xfb6: 0x2743, 0xfb7: 0x2743, 0xfb8: 0x2743, 0xfb9: 0x2720, 0xfba: 0x2720, 0xfbb: 0x2720, + 0xfbc: 0x03f5, 0xfbd: 0x03f5, 0xfbe: 0x03f5, 0xfbf: 0x03f5, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x26e1, 0xfc1: 0x26e8, 0xfc2: 0x2704, 0xfc3: 0x2720, 0xfc4: 0x2727, 0xfc5: 0x1eb9, + 0xfc6: 0x1ebe, 0xfc7: 0x1ec3, 0xfc8: 0x1ed2, 0xfc9: 0x1ee1, 0xfca: 0x1ee6, 0xfcb: 0x1eeb, + 0xfcc: 0x1ef0, 0xfcd: 0x1ef5, 0xfce: 0x1f04, 0xfcf: 0x1f13, 0xfd0: 0x1f18, 0xfd1: 0x1f1d, + 0xfd2: 0x1f2c, 0xfd3: 0x1f3b, 0xfd4: 0x1f40, 0xfd5: 0x1f45, 0xfd6: 0x1f4a, 0xfd7: 0x1f59, + 0xfd8: 0x1f5e, 0xfd9: 0x1f6d, 0xfda: 0x1f72, 0xfdb: 0x1f77, 0xfdc: 0x1f86, 0xfdd: 0x1f8b, + 0xfde: 0x1f90, 0xfdf: 0x1f9a, 0xfe0: 0x1fd6, 0xfe1: 0x1fe5, 0xfe2: 0x1ff4, 0xfe3: 0x1ff9, + 0xfe4: 0x1ffe, 0xfe5: 0x2008, 0xfe6: 0x2017, 0xfe7: 0x201c, 0xfe8: 0x202b, 0xfe9: 0x2030, + 0xfea: 0x2035, 0xfeb: 0x2044, 0xfec: 0x2049, 0xfed: 0x2058, 0xfee: 0x205d, 0xfef: 0x2062, + 0xff0: 0x2067, 0xff1: 0x206c, 0xff2: 0x2071, 0xff3: 0x2076, 0xff4: 0x207b, 0xff5: 0x2080, + 0xff6: 0x2085, 0xff7: 0x208a, 0xff8: 0x208f, 0xff9: 0x2094, 0xffa: 0x2099, 0xffb: 0x209e, + 0xffc: 0x20a3, 0xffd: 0x20a8, 0xffe: 0x20ad, 0xfff: 0x20b7, + // Block 0x40, offset 0x1000 + 0x1000: 0x20bc, 0x1001: 0x20c1, 0x1002: 0x20c6, 0x1003: 0x20d0, 0x1004: 0x20d5, 0x1005: 0x20df, + 0x1006: 0x20e4, 0x1007: 0x20e9, 0x1008: 0x20ee, 0x1009: 0x20f3, 0x100a: 0x20f8, 0x100b: 0x20fd, + 0x100c: 0x2102, 0x100d: 0x2107, 0x100e: 0x2116, 0x100f: 0x2125, 0x1010: 0x212a, 0x1011: 0x212f, + 0x1012: 0x2134, 0x1013: 0x2139, 0x1014: 0x213e, 0x1015: 0x2148, 0x1016: 0x214d, 0x1017: 0x2152, + 0x1018: 0x2161, 0x1019: 0x2170, 0x101a: 0x2175, 0x101b: 0x4561, 0x101c: 0x4567, 0x101d: 0x459d, + 0x101e: 0x45f4, 0x101f: 0x45fb, 0x1020: 0x4602, 0x1021: 0x4609, 0x1022: 0x4610, 0x1023: 0x4617, + 0x1024: 0x26f6, 0x1025: 0x26fd, 0x1026: 0x2704, 0x1027: 0x270b, 0x1028: 0x2720, 0x1029: 0x2727, + 0x102a: 0x1ec8, 0x102b: 0x1ecd, 0x102c: 0x1ed2, 0x102d: 0x1ed7, 0x102e: 0x1ee1, 0x102f: 0x1ee6, + 0x1030: 0x1efa, 0x1031: 0x1eff, 0x1032: 0x1f04, 0x1033: 0x1f09, 0x1034: 0x1f13, 0x1035: 0x1f18, + 0x1036: 0x1f22, 0x1037: 0x1f27, 0x1038: 0x1f2c, 0x1039: 0x1f31, 0x103a: 0x1f3b, 0x103b: 0x1f40, + 0x103c: 0x206c, 0x103d: 0x2071, 0x103e: 0x2080, 0x103f: 0x2085, + // Block 0x41, offset 0x1040 + 0x1040: 0x208a, 0x1041: 0x209e, 0x1042: 0x20a3, 0x1043: 0x20a8, 0x1044: 0x20ad, 0x1045: 0x20c6, + 0x1046: 0x20d0, 0x1047: 0x20d5, 0x1048: 0x20da, 0x1049: 0x20ee, 0x104a: 0x210c, 0x104b: 0x2111, + 0x104c: 0x2116, 0x104d: 0x211b, 0x104e: 0x2125, 0x104f: 0x212a, 0x1050: 0x459d, 0x1051: 0x2157, + 0x1052: 0x215c, 0x1053: 0x2161, 0x1054: 0x2166, 0x1055: 0x2170, 0x1056: 0x2175, 0x1057: 0x26e1, + 0x1058: 0x26e8, 0x1059: 0x26ef, 0x105a: 0x2704, 0x105b: 0x2712, 0x105c: 0x1eb9, 0x105d: 0x1ebe, + 0x105e: 0x1ec3, 0x105f: 0x1ed2, 0x1060: 0x1edc, 0x1061: 0x1eeb, 0x1062: 0x1ef0, 0x1063: 0x1ef5, + 0x1064: 0x1f04, 0x1065: 0x1f0e, 0x1066: 0x1f2c, 0x1067: 0x1f45, 0x1068: 0x1f4a, 0x1069: 0x1f59, + 0x106a: 0x1f5e, 0x106b: 0x1f6d, 0x106c: 0x1f77, 0x106d: 0x1f86, 0x106e: 0x1f8b, 0x106f: 0x1f90, + 0x1070: 0x1f9a, 0x1071: 0x1fd6, 0x1072: 0x1fdb, 0x1073: 0x1fe5, 0x1074: 0x1ff4, 0x1075: 0x1ff9, + 0x1076: 0x1ffe, 0x1077: 0x2008, 0x1078: 0x2017, 0x1079: 0x202b, 0x107a: 0x2030, 0x107b: 0x2035, + 0x107c: 0x2044, 0x107d: 0x2049, 0x107e: 0x2058, 0x107f: 0x205d, + // Block 0x42, offset 0x1080 + 0x1080: 0x2062, 0x1081: 0x2067, 0x1082: 0x2076, 0x1083: 0x207b, 0x1084: 0x208f, 0x1085: 0x2094, + 0x1086: 0x2099, 0x1087: 0x209e, 0x1088: 0x20a3, 0x1089: 0x20b7, 0x108a: 0x20bc, 0x108b: 0x20c1, + 0x108c: 0x20c6, 0x108d: 0x20cb, 0x108e: 0x20df, 0x108f: 0x20e4, 0x1090: 0x20e9, 0x1091: 0x20ee, + 0x1092: 0x20fd, 0x1093: 0x2102, 0x1094: 0x2107, 0x1095: 0x2116, 0x1096: 0x2120, 0x1097: 0x212f, + 0x1098: 0x2134, 0x1099: 0x4591, 0x109a: 0x2148, 0x109b: 0x214d, 0x109c: 0x2152, 0x109d: 0x2161, + 0x109e: 0x216b, 0x109f: 0x2704, 0x10a0: 0x2712, 0x10a1: 0x1ed2, 0x10a2: 0x1edc, 0x10a3: 0x1f04, + 0x10a4: 0x1f0e, 0x10a5: 0x1f2c, 0x10a6: 0x1f36, 0x10a7: 0x1f9a, 0x10a8: 0x1f9f, 0x10a9: 0x1fc2, + 0x10aa: 0x1fc7, 0x10ab: 0x209e, 0x10ac: 0x20a3, 0x10ad: 0x20c6, 0x10ae: 0x2116, 0x10af: 0x2120, + 0x10b0: 0x2161, 0x10b1: 0x216b, 0x10b2: 0x4645, 0x10b3: 0x464d, 0x10b4: 0x4655, 0x10b5: 0x2021, + 0x10b6: 0x2026, 0x10b7: 0x203a, 0x10b8: 0x203f, 0x10b9: 0x204e, 0x10ba: 0x2053, 0x10bb: 0x1fa4, + 0x10bc: 0x1fa9, 0x10bd: 0x1fcc, 0x10be: 0x1fd1, 0x10bf: 0x1f63, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1f68, 0x10c1: 0x1f4f, 0x10c2: 0x1f54, 0x10c3: 0x1f7c, 0x10c4: 0x1f81, 0x10c5: 0x1fea, + 0x10c6: 0x1fef, 0x10c7: 0x200d, 0x10c8: 0x2012, 0x10c9: 0x1fae, 0x10ca: 0x1fb3, 0x10cb: 0x1fb8, + 0x10cc: 0x1fc2, 0x10cd: 0x1fbd, 0x10ce: 0x1f95, 0x10cf: 0x1fe0, 0x10d0: 0x2003, 0x10d1: 0x2021, + 0x10d2: 0x2026, 0x10d3: 0x203a, 0x10d4: 0x203f, 0x10d5: 0x204e, 0x10d6: 0x2053, 0x10d7: 0x1fa4, + 0x10d8: 0x1fa9, 0x10d9: 0x1fcc, 0x10da: 0x1fd1, 0x10db: 0x1f63, 0x10dc: 0x1f68, 0x10dd: 0x1f4f, + 0x10de: 0x1f54, 0x10df: 0x1f7c, 0x10e0: 0x1f81, 0x10e1: 0x1fea, 0x10e2: 0x1fef, 0x10e3: 0x200d, + 0x10e4: 0x2012, 0x10e5: 0x1fae, 0x10e6: 0x1fb3, 0x10e7: 0x1fb8, 0x10e8: 0x1fc2, 0x10e9: 0x1fbd, + 0x10ea: 0x1f95, 0x10eb: 0x1fe0, 0x10ec: 0x2003, 0x10ed: 0x1fae, 0x10ee: 0x1fb3, 0x10ef: 0x1fb8, + 0x10f0: 0x1fc2, 0x10f1: 0x1f9f, 0x10f2: 0x1fc7, 0x10f3: 0x201c, 0x10f4: 0x1f86, 0x10f5: 0x1f8b, + 0x10f6: 0x1f90, 0x10f7: 0x1fae, 0x10f8: 0x1fb3, 0x10f9: 0x1fb8, 0x10fa: 0x201c, 0x10fb: 0x202b, + 0x10fc: 0x4549, 0x10fd: 0x4549, + // Block 0x44, offset 0x1100 + 0x1110: 0x2441, 0x1111: 0x2456, + 0x1112: 0x2456, 0x1113: 0x245d, 0x1114: 0x2464, 0x1115: 0x2479, 0x1116: 0x2480, 0x1117: 0x2487, + 0x1118: 0x24aa, 0x1119: 0x24aa, 0x111a: 0x24cd, 0x111b: 0x24c6, 0x111c: 0x24e2, 0x111d: 0x24d4, + 0x111e: 0x24db, 0x111f: 0x24fe, 0x1120: 0x24fe, 0x1121: 0x24f7, 0x1122: 0x2505, 0x1123: 0x2505, + 0x1124: 0x252f, 0x1125: 0x252f, 0x1126: 0x254b, 0x1127: 0x2513, 0x1128: 0x2513, 0x1129: 0x250c, + 0x112a: 0x2521, 0x112b: 0x2521, 0x112c: 0x2528, 0x112d: 0x2528, 0x112e: 0x2552, 0x112f: 0x2560, + 0x1130: 0x2560, 0x1131: 0x2567, 0x1132: 0x2567, 0x1133: 0x256e, 0x1134: 0x2575, 0x1135: 0x257c, + 0x1136: 0x2583, 0x1137: 0x2583, 0x1138: 0x258a, 0x1139: 0x2598, 0x113a: 0x25a6, 0x113b: 0x259f, + 0x113c: 0x25ad, 0x113d: 0x25ad, 0x113e: 0x25c2, 0x113f: 0x25c9, + // Block 0x45, offset 0x1140 + 0x1140: 0x25fa, 0x1141: 0x2608, 0x1142: 0x2601, 0x1143: 0x25e5, 0x1144: 0x25e5, 0x1145: 0x260f, + 0x1146: 0x260f, 0x1147: 0x2616, 0x1148: 0x2616, 0x1149: 0x2640, 0x114a: 0x2647, 0x114b: 0x264e, + 0x114c: 0x2624, 0x114d: 0x2632, 0x114e: 0x2655, 0x114f: 0x265c, + 0x1152: 0x262b, 0x1153: 0x26b0, 0x1154: 0x26b7, 0x1155: 0x268d, 0x1156: 0x2694, 0x1157: 0x2678, + 0x1158: 0x2678, 0x1159: 0x267f, 0x115a: 0x26a9, 0x115b: 0x26a2, 0x115c: 0x26cc, 0x115d: 0x26cc, + 0x115e: 0x243a, 0x115f: 0x244f, 0x1160: 0x2448, 0x1161: 0x2472, 0x1162: 0x246b, 0x1163: 0x2495, + 0x1164: 0x248e, 0x1165: 0x24b8, 0x1166: 0x249c, 0x1167: 0x24b1, 0x1168: 0x24e9, 0x1169: 0x2536, + 0x116a: 0x251a, 0x116b: 0x2559, 0x116c: 0x25f3, 0x116d: 0x261d, 0x116e: 0x26c5, 0x116f: 0x26be, + 0x1170: 0x26d3, 0x1171: 0x266a, 0x1172: 0x25d0, 0x1173: 0x269b, 0x1174: 0x25c2, 0x1175: 0x25fa, + 0x1176: 0x2591, 0x1177: 0x25de, 0x1178: 0x2671, 0x1179: 0x2663, 0x117a: 0x25ec, 0x117b: 0x25d7, + 0x117c: 0x25ec, 0x117d: 0x2671, 0x117e: 0x24a3, 0x117f: 0x24bf, + // Block 0x46, offset 0x1180 + 0x1180: 0x2639, 0x1181: 0x25b4, 0x1182: 0x2433, 0x1183: 0x25d7, 0x1184: 0x257c, 0x1185: 0x254b, + 0x1186: 0x24f0, 0x1187: 0x2686, + 0x11b0: 0x2544, 0x11b1: 0x25bb, 0x11b2: 0x28f6, 0x11b3: 0x28ed, 0x11b4: 0x2923, 0x11b5: 0x2911, + 0x11b6: 0x28ff, 0x11b7: 0x291a, 0x11b8: 0x292c, 0x11b9: 0x253d, 0x11ba: 0x2db3, 0x11bb: 0x2c33, + 0x11bc: 0x2908, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x057e, + 0x11d2: 0x0582, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x05ba, + 0x11d8: 0x05be, 0x11d9: 0x1c8c, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x19a0, 0x11f1: 0x053a, 0x11f2: 0x0536, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x05b2, 0x11fa: 0x05b6, 0x11fb: 0x05a6, + 0x11fc: 0x05aa, 0x11fd: 0x058e, 0x11fe: 0x0592, 0x11ff: 0x0586, + // Block 0x48, offset 0x1200 + 0x1200: 0x058a, 0x1201: 0x0596, 0x1202: 0x059a, 0x1203: 0x059e, 0x1204: 0x05a2, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x43aa, 0x120a: 0x43aa, 0x120b: 0x43aa, + 0x120c: 0x43aa, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x057e, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x053a, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x05b2, + 0x121e: 0x05b6, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x43eb, 0x1231: 0x456d, 0x1232: 0x43f0, 0x1234: 0x43f5, + 0x1236: 0x43fa, 0x1237: 0x4573, 0x1238: 0x43ff, 0x1239: 0x4579, 0x123a: 0x4404, 0x123b: 0x457f, + 0x123c: 0x4409, 0x123d: 0x4585, 0x123e: 0x440e, 0x123f: 0x458b, + // Block 0x49, offset 0x1240 + 0x1240: 0x0329, 0x1241: 0x454f, 0x1242: 0x454f, 0x1243: 0x4555, 0x1244: 0x4555, 0x1245: 0x4597, + 0x1246: 0x4597, 0x1247: 0x455b, 0x1248: 0x455b, 0x1249: 0x45a3, 0x124a: 0x45a3, 0x124b: 0x45a3, + 0x124c: 0x45a3, 0x124d: 0x032c, 0x124e: 0x032c, 0x124f: 0x032f, 0x1250: 0x032f, 0x1251: 0x032f, + 0x1252: 0x032f, 0x1253: 0x0332, 0x1254: 0x0332, 0x1255: 0x0335, 0x1256: 0x0335, 0x1257: 0x0335, + 0x1258: 0x0335, 0x1259: 0x0338, 0x125a: 0x0338, 0x125b: 0x0338, 0x125c: 0x0338, 0x125d: 0x033b, + 0x125e: 0x033b, 0x125f: 0x033b, 0x1260: 0x033b, 0x1261: 0x033e, 0x1262: 0x033e, 0x1263: 0x033e, + 0x1264: 0x033e, 0x1265: 0x0341, 0x1266: 0x0341, 0x1267: 0x0341, 0x1268: 0x0341, 0x1269: 0x0344, + 0x126a: 0x0344, 0x126b: 0x0347, 0x126c: 0x0347, 0x126d: 0x034a, 0x126e: 0x034a, 0x126f: 0x034d, + 0x1270: 0x034d, 0x1271: 0x0350, 0x1272: 0x0350, 0x1273: 0x0350, 0x1274: 0x0350, 0x1275: 0x0353, + 0x1276: 0x0353, 0x1277: 0x0353, 0x1278: 0x0353, 0x1279: 0x0356, 0x127a: 0x0356, 0x127b: 0x0356, + 0x127c: 0x0356, 0x127d: 0x0359, 0x127e: 0x0359, 0x127f: 0x0359, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0359, 0x1281: 0x035c, 0x1282: 0x035c, 0x1283: 0x035c, 0x1284: 0x035c, 0x1285: 0x035f, + 0x1286: 0x035f, 0x1287: 0x035f, 0x1288: 0x035f, 0x1289: 0x0362, 0x128a: 0x0362, 0x128b: 0x0362, + 0x128c: 0x0362, 0x128d: 0x0365, 0x128e: 0x0365, 0x128f: 0x0365, 0x1290: 0x0365, 0x1291: 0x0368, + 0x1292: 0x0368, 0x1293: 0x0368, 0x1294: 0x0368, 0x1295: 0x036b, 0x1296: 0x036b, 0x1297: 0x036b, + 0x1298: 0x036b, 0x1299: 0x036e, 0x129a: 0x036e, 0x129b: 0x036e, 0x129c: 0x036e, 0x129d: 0x0371, + 0x129e: 0x0371, 0x129f: 0x0371, 0x12a0: 0x0371, 0x12a1: 0x0374, 0x12a2: 0x0374, 0x12a3: 0x0374, + 0x12a4: 0x0374, 0x12a5: 0x0377, 0x12a6: 0x0377, 0x12a7: 0x0377, 0x12a8: 0x0377, 0x12a9: 0x037a, + 0x12aa: 0x037a, 0x12ab: 0x037a, 0x12ac: 0x037a, 0x12ad: 0x037d, 0x12ae: 0x037d, 0x12af: 0x0380, + 0x12b0: 0x0380, 0x12b1: 0x0383, 0x12b2: 0x0383, 0x12b3: 0x0383, 0x12b4: 0x0383, 0x12b5: 0x2f41, + 0x12b6: 0x2f41, 0x12b7: 0x2f49, 0x12b8: 0x2f49, 0x12b9: 0x2f51, 0x12ba: 0x2f51, 0x12bb: 0x20b2, + 0x12bc: 0x20b2, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x056e, 0x12e0: 0x0572, 0x12e1: 0x0582, 0x12e2: 0x0596, 0x12e3: 0x059a, + 0x12e4: 0x057e, 0x12e5: 0x06a6, 0x12e6: 0x069e, 0x12e7: 0x05c2, 0x12e8: 0x05ca, 0x12e9: 0x05d2, + 0x12ea: 0x05da, 0x12eb: 0x05e2, 0x12ec: 0x0666, 0x12ed: 0x066e, 0x12ee: 0x0676, 0x12ef: 0x061a, + 0x12f0: 0x06aa, 0x12f1: 0x05c6, 0x12f2: 0x05ce, 0x12f3: 0x05d6, 0x12f4: 0x05de, 0x12f5: 0x05e6, + 0x12f6: 0x05ea, 0x12f7: 0x05ee, 0x12f8: 0x05f2, 0x12f9: 0x05f6, 0x12fa: 0x05fa, 0x12fb: 0x05fe, + 0x12fc: 0x0602, 0x12fd: 0x0606, 0x12fe: 0x060a, 0x12ff: 0x060e, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0612, 0x1301: 0x0616, 0x1302: 0x061e, 0x1303: 0x0622, 0x1304: 0x0626, 0x1305: 0x062a, + 0x1306: 0x062e, 0x1307: 0x0632, 0x1308: 0x0636, 0x1309: 0x063a, 0x130a: 0x063e, 0x130b: 0x0642, + 0x130c: 0x0646, 0x130d: 0x064a, 0x130e: 0x064e, 0x130f: 0x0652, 0x1310: 0x0656, 0x1311: 0x065a, + 0x1312: 0x065e, 0x1313: 0x0662, 0x1314: 0x066a, 0x1315: 0x0672, 0x1316: 0x067a, 0x1317: 0x067e, + 0x1318: 0x0682, 0x1319: 0x0686, 0x131a: 0x068a, 0x131b: 0x068e, 0x131c: 0x0692, 0x131d: 0x06a2, + 0x131e: 0x4bb9, 0x131f: 0x4bbf, 0x1320: 0x04b6, 0x1321: 0x0406, 0x1322: 0x040a, 0x1323: 0x4b7c, + 0x1324: 0x040e, 0x1325: 0x4b82, 0x1326: 0x4b88, 0x1327: 0x0412, 0x1328: 0x0416, 0x1329: 0x041a, + 0x132a: 0x4b8e, 0x132b: 0x4b94, 0x132c: 0x4b9a, 0x132d: 0x4ba0, 0x132e: 0x4ba6, 0x132f: 0x4bac, + 0x1330: 0x045a, 0x1331: 0x041e, 0x1332: 0x0422, 0x1333: 0x0426, 0x1334: 0x046e, 0x1335: 0x042a, + 0x1336: 0x042e, 0x1337: 0x0432, 0x1338: 0x0436, 0x1339: 0x043a, 0x133a: 0x043e, 0x133b: 0x0442, + 0x133c: 0x0446, 0x133d: 0x044a, 0x133e: 0x044e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x4afe, 0x1343: 0x4b04, 0x1344: 0x4b0a, 0x1345: 0x4b10, + 0x1346: 0x4b16, 0x1347: 0x4b1c, 0x134a: 0x4b22, 0x134b: 0x4b28, + 0x134c: 0x4b2e, 0x134d: 0x4b34, 0x134e: 0x4b3a, 0x134f: 0x4b40, + 0x1352: 0x4b46, 0x1353: 0x4b4c, 0x1354: 0x4b52, 0x1355: 0x4b58, 0x1356: 0x4b5e, 0x1357: 0x4b64, + 0x135a: 0x4b6a, 0x135b: 0x4b70, 0x135c: 0x4b76, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x43a5, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x053e, 0x1368: 0x0562, 0x1369: 0x0542, + 0x136a: 0x0546, 0x136b: 0x054a, 0x136c: 0x054e, 0x136d: 0x0566, 0x136e: 0x056a, + // Block 0x4e, offset 0x1380 + 0x1381: 0x01f1, 0x1382: 0x01f4, 0x1383: 0x00d4, 0x1384: 0x01be, 0x1385: 0x010d, + 0x1387: 0x01d3, 0x1388: 0x174e, 0x1389: 0x01d9, 0x138a: 0x01d6, 0x138b: 0x0116, + 0x138c: 0x0119, 0x138d: 0x0526, 0x138e: 0x011c, 0x138f: 0x0128, 0x1390: 0x01e5, 0x1391: 0x013a, + 0x1392: 0x0134, 0x1393: 0x012e, 0x1394: 0x01c1, 0x1395: 0x00e0, 0x1396: 0x01c4, 0x1397: 0x0143, + 0x1398: 0x0194, 0x1399: 0x01e8, 0x139a: 0x01eb, 0x139b: 0x0152, 0x139c: 0x1756, 0x139d: 0x1742, + 0x139e: 0x0158, 0x139f: 0x175b, 0x13a0: 0x01a9, 0x13a1: 0x1760, 0x13a2: 0x00da, 0x13a3: 0x0170, + 0x13a4: 0x0173, 0x13a5: 0x00a3, 0x13a6: 0x017c, 0x13a7: 0x1765, 0x13a8: 0x0182, 0x13a9: 0x0185, + 0x13aa: 0x0188, 0x13ab: 0x01e2, 0x13ac: 0x01dc, 0x13ad: 0x1752, 0x13ae: 0x01df, 0x13af: 0x0197, + 0x13b0: 0x0576, 0x13b2: 0x01ac, 0x13b3: 0x01cd, 0x13b4: 0x01d0, 0x13b5: 0x01bb, + 0x13b6: 0x00f5, 0x13b7: 0x00f8, 0x13b8: 0x00fb, 0x13b9: 0x176a, 0x13ba: 0x176f, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0063, 0x13c1: 0x0065, 0x13c2: 0x0067, 0x13c3: 0x0069, 0x13c4: 0x006b, 0x13c5: 0x006d, + 0x13c6: 0x006f, 0x13c7: 0x0071, 0x13c8: 0x0073, 0x13c9: 0x0075, 0x13ca: 0x0083, 0x13cb: 0x0085, + 0x13cc: 0x0087, 0x13cd: 0x0089, 0x13ce: 0x008b, 0x13cf: 0x008d, 0x13d0: 0x008f, 0x13d1: 0x0091, + 0x13d2: 0x0093, 0x13d3: 0x0095, 0x13d4: 0x0097, 0x13d5: 0x0099, 0x13d6: 0x009b, 0x13d7: 0x009d, + 0x13d8: 0x009f, 0x13d9: 0x00a1, 0x13da: 0x00a3, 0x13db: 0x00a5, 0x13dc: 0x00a7, 0x13dd: 0x00a9, + 0x13de: 0x00ab, 0x13df: 0x00ad, 0x13e0: 0x00af, 0x13e1: 0x00b1, 0x13e2: 0x00b3, 0x13e3: 0x00b5, + 0x13e4: 0x00e3, 0x13e5: 0x0101, 0x13e8: 0x01f7, 0x13e9: 0x01fa, + 0x13ea: 0x01fd, 0x13eb: 0x0200, 0x13ec: 0x0203, 0x13ed: 0x0206, 0x13ee: 0x0209, 0x13ef: 0x020c, + 0x13f0: 0x020f, 0x13f1: 0x0212, 0x13f2: 0x0215, 0x13f3: 0x0218, 0x13f4: 0x021b, 0x13f5: 0x021e, + 0x13f6: 0x0221, 0x13f7: 0x0224, 0x13f8: 0x0227, 0x13f9: 0x020c, 0x13fa: 0x022a, 0x13fb: 0x022d, + 0x13fc: 0x0230, 0x13fd: 0x0233, 0x13fe: 0x0236, 0x13ff: 0x0239, + // Block 0x50, offset 0x1400 + 0x1400: 0x0281, 0x1401: 0x0284, 0x1402: 0x0287, 0x1403: 0x0552, 0x1404: 0x024b, 0x1405: 0x0254, + 0x1406: 0x025a, 0x1407: 0x027e, 0x1408: 0x026f, 0x1409: 0x026c, 0x140a: 0x028a, 0x140b: 0x028d, + 0x140e: 0x0021, 0x140f: 0x0023, 0x1410: 0x0025, 0x1411: 0x0027, + 0x1412: 0x0029, 0x1413: 0x002b, 0x1414: 0x002d, 0x1415: 0x002f, 0x1416: 0x0031, 0x1417: 0x0033, + 0x1418: 0x0021, 0x1419: 0x0023, 0x141a: 0x0025, 0x141b: 0x0027, 0x141c: 0x0029, 0x141d: 0x002b, + 0x141e: 0x002d, 0x141f: 0x002f, 0x1420: 0x0031, 0x1421: 0x0033, 0x1422: 0x0021, 0x1423: 0x0023, + 0x1424: 0x0025, 0x1425: 0x0027, 0x1426: 0x0029, 0x1427: 0x002b, 0x1428: 0x002d, 0x1429: 0x002f, + 0x142a: 0x0031, 0x142b: 0x0033, 0x142c: 0x0021, 0x142d: 0x0023, 0x142e: 0x0025, 0x142f: 0x0027, + 0x1430: 0x0029, 0x1431: 0x002b, 0x1432: 0x002d, 0x1433: 0x002f, 0x1434: 0x0031, 0x1435: 0x0033, + 0x1436: 0x0021, 0x1437: 0x0023, 0x1438: 0x0025, 0x1439: 0x0027, 0x143a: 0x0029, 0x143b: 0x002b, + 0x143c: 0x002d, 0x143d: 0x002f, 0x143e: 0x0031, 0x143f: 0x0033, + // Block 0x51, offset 0x1440 + 0x1440: 0x8133, 0x1441: 0x8133, 0x1442: 0x8133, 0x1443: 0x8133, 0x1444: 0x8133, 0x1445: 0x8133, + 0x1446: 0x8133, 0x1448: 0x8133, 0x1449: 0x8133, 0x144a: 0x8133, 0x144b: 0x8133, + 0x144c: 0x8133, 0x144d: 0x8133, 0x144e: 0x8133, 0x144f: 0x8133, 0x1450: 0x8133, 0x1451: 0x8133, + 0x1452: 0x8133, 0x1453: 0x8133, 0x1454: 0x8133, 0x1455: 0x8133, 0x1456: 0x8133, 0x1457: 0x8133, + 0x1458: 0x8133, 0x145b: 0x8133, 0x145c: 0x8133, 0x145d: 0x8133, + 0x145e: 0x8133, 0x145f: 0x8133, 0x1460: 0x8133, 0x1461: 0x8133, 0x1463: 0x8133, + 0x1464: 0x8133, 0x1466: 0x8133, 0x1467: 0x8133, 0x1468: 0x8133, 0x1469: 0x8133, + 0x146a: 0x8133, + 0x1470: 0x0290, 0x1471: 0x0293, 0x1472: 0x0296, 0x1473: 0x0299, 0x1474: 0x029c, 0x1475: 0x029f, + 0x1476: 0x02a2, 0x1477: 0x02a5, 0x1478: 0x02a8, 0x1479: 0x02ab, 0x147a: 0x02ae, 0x147b: 0x02b1, + 0x147c: 0x02b7, 0x147d: 0x02ba, 0x147e: 0x02bd, 0x147f: 0x02c0, + // Block 0x52, offset 0x1480 + 0x1480: 0x02c3, 0x1481: 0x02c6, 0x1482: 0x02c9, 0x1483: 0x02cc, 0x1484: 0x02cf, 0x1485: 0x02d2, + 0x1486: 0x02d5, 0x1487: 0x02db, 0x1488: 0x02e1, 0x1489: 0x02e4, 0x148a: 0x1736, 0x148b: 0x0302, + 0x148c: 0x02ea, 0x148d: 0x02ed, 0x148e: 0x0305, 0x148f: 0x02f9, 0x1490: 0x02ff, 0x1491: 0x0290, + 0x1492: 0x0293, 0x1493: 0x0296, 0x1494: 0x0299, 0x1495: 0x029c, 0x1496: 0x029f, 0x1497: 0x02a2, + 0x1498: 0x02a5, 0x1499: 0x02a8, 0x149a: 0x02ab, 0x149b: 0x02ae, 0x149c: 0x02b7, 0x149d: 0x02ba, + 0x149e: 0x02c0, 0x149f: 0x02c6, 0x14a0: 0x02c9, 0x14a1: 0x02cc, 0x14a2: 0x02cf, 0x14a3: 0x02d2, + 0x14a4: 0x02d5, 0x14a5: 0x02d8, 0x14a6: 0x02db, 0x14a7: 0x02f3, 0x14a8: 0x02ea, 0x14a9: 0x02e7, + 0x14aa: 0x02f0, 0x14ab: 0x02f6, 0x14ac: 0x1732, 0x14ad: 0x02fc, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x032c, 0x14c1: 0x032f, 0x14c2: 0x033b, 0x14c3: 0x0344, 0x14c5: 0x037d, + 0x14c6: 0x034d, 0x14c7: 0x033e, 0x14c8: 0x035c, 0x14c9: 0x0383, 0x14ca: 0x036e, 0x14cb: 0x0371, + 0x14cc: 0x0374, 0x14cd: 0x0377, 0x14ce: 0x0350, 0x14cf: 0x0362, 0x14d0: 0x0368, 0x14d1: 0x0356, + 0x14d2: 0x036b, 0x14d3: 0x034a, 0x14d4: 0x0353, 0x14d5: 0x0335, 0x14d6: 0x0338, 0x14d7: 0x0341, + 0x14d8: 0x0347, 0x14d9: 0x0359, 0x14da: 0x035f, 0x14db: 0x0365, 0x14dc: 0x0386, 0x14dd: 0x03d7, + 0x14de: 0x03bf, 0x14df: 0x0389, 0x14e1: 0x032f, 0x14e2: 0x033b, + 0x14e4: 0x037a, 0x14e7: 0x033e, 0x14e9: 0x0383, + 0x14ea: 0x036e, 0x14eb: 0x0371, 0x14ec: 0x0374, 0x14ed: 0x0377, 0x14ee: 0x0350, 0x14ef: 0x0362, + 0x14f0: 0x0368, 0x14f1: 0x0356, 0x14f2: 0x036b, 0x14f4: 0x0353, 0x14f5: 0x0335, + 0x14f6: 0x0338, 0x14f7: 0x0341, 0x14f9: 0x0359, 0x14fb: 0x0365, + // Block 0x54, offset 0x1500 + 0x1502: 0x033b, + 0x1507: 0x033e, 0x1509: 0x0383, 0x150b: 0x0371, + 0x150d: 0x0377, 0x150e: 0x0350, 0x150f: 0x0362, 0x1511: 0x0356, + 0x1512: 0x036b, 0x1514: 0x0353, 0x1517: 0x0341, + 0x1519: 0x0359, 0x151b: 0x0365, 0x151d: 0x03d7, + 0x151f: 0x0389, 0x1521: 0x032f, 0x1522: 0x033b, + 0x1524: 0x037a, 0x1527: 0x033e, 0x1528: 0x035c, 0x1529: 0x0383, + 0x152a: 0x036e, 0x152c: 0x0374, 0x152d: 0x0377, 0x152e: 0x0350, 0x152f: 0x0362, + 0x1530: 0x0368, 0x1531: 0x0356, 0x1532: 0x036b, 0x1534: 0x0353, 0x1535: 0x0335, + 0x1536: 0x0338, 0x1537: 0x0341, 0x1539: 0x0359, 0x153a: 0x035f, 0x153b: 0x0365, + 0x153c: 0x0386, 0x153e: 0x03bf, + // Block 0x55, offset 0x1540 + 0x1540: 0x032c, 0x1541: 0x032f, 0x1542: 0x033b, 0x1543: 0x0344, 0x1544: 0x037a, 0x1545: 0x037d, + 0x1546: 0x034d, 0x1547: 0x033e, 0x1548: 0x035c, 0x1549: 0x0383, 0x154b: 0x0371, + 0x154c: 0x0374, 0x154d: 0x0377, 0x154e: 0x0350, 0x154f: 0x0362, 0x1550: 0x0368, 0x1551: 0x0356, + 0x1552: 0x036b, 0x1553: 0x034a, 0x1554: 0x0353, 0x1555: 0x0335, 0x1556: 0x0338, 0x1557: 0x0341, + 0x1558: 0x0347, 0x1559: 0x0359, 0x155a: 0x035f, 0x155b: 0x0365, + 0x1561: 0x032f, 0x1562: 0x033b, 0x1563: 0x0344, + 0x1565: 0x037d, 0x1566: 0x034d, 0x1567: 0x033e, 0x1568: 0x035c, 0x1569: 0x0383, + 0x156b: 0x0371, 0x156c: 0x0374, 0x156d: 0x0377, 0x156e: 0x0350, 0x156f: 0x0362, + 0x1570: 0x0368, 0x1571: 0x0356, 0x1572: 0x036b, 0x1573: 0x034a, 0x1574: 0x0353, 0x1575: 0x0335, + 0x1576: 0x0338, 0x1577: 0x0341, 0x1578: 0x0347, 0x1579: 0x0359, 0x157a: 0x035f, 0x157b: 0x0365, + // Block 0x56, offset 0x1580 + 0x1580: 0x19a6, 0x1581: 0x19a3, 0x1582: 0x19a9, 0x1583: 0x19cd, 0x1584: 0x19f1, 0x1585: 0x1a15, + 0x1586: 0x1a39, 0x1587: 0x1a42, 0x1588: 0x1a48, 0x1589: 0x1a4e, 0x158a: 0x1a54, + 0x1590: 0x1bbc, 0x1591: 0x1bc0, + 0x1592: 0x1bc4, 0x1593: 0x1bc8, 0x1594: 0x1bcc, 0x1595: 0x1bd0, 0x1596: 0x1bd4, 0x1597: 0x1bd8, + 0x1598: 0x1bdc, 0x1599: 0x1be0, 0x159a: 0x1be4, 0x159b: 0x1be8, 0x159c: 0x1bec, 0x159d: 0x1bf0, + 0x159e: 0x1bf4, 0x159f: 0x1bf8, 0x15a0: 0x1bfc, 0x15a1: 0x1c00, 0x15a2: 0x1c04, 0x15a3: 0x1c08, + 0x15a4: 0x1c0c, 0x15a5: 0x1c10, 0x15a6: 0x1c14, 0x15a7: 0x1c18, 0x15a8: 0x1c1c, 0x15a9: 0x1c20, + 0x15aa: 0x2855, 0x15ab: 0x0047, 0x15ac: 0x0065, 0x15ad: 0x1a69, 0x15ae: 0x1ae1, + 0x15b0: 0x0043, 0x15b1: 0x0045, 0x15b2: 0x0047, 0x15b3: 0x0049, 0x15b4: 0x004b, 0x15b5: 0x004d, + 0x15b6: 0x004f, 0x15b7: 0x0051, 0x15b8: 0x0053, 0x15b9: 0x0055, 0x15ba: 0x0057, 0x15bb: 0x0059, + 0x15bc: 0x005b, 0x15bd: 0x005d, 0x15be: 0x005f, 0x15bf: 0x0061, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x27dd, 0x15c1: 0x27f2, 0x15c2: 0x05fe, + 0x15d0: 0x0d0a, 0x15d1: 0x0b42, + 0x15d2: 0x09ce, 0x15d3: 0x4705, 0x15d4: 0x0816, 0x15d5: 0x0aea, 0x15d6: 0x142a, 0x15d7: 0x0afa, + 0x15d8: 0x0822, 0x15d9: 0x0dd2, 0x15da: 0x0faa, 0x15db: 0x0daa, 0x15dc: 0x0922, 0x15dd: 0x0c66, + 0x15de: 0x08ba, 0x15df: 0x0db2, 0x15e0: 0x090e, 0x15e1: 0x1212, 0x15e2: 0x107e, 0x15e3: 0x1486, + 0x15e4: 0x0ace, 0x15e5: 0x0a06, 0x15e6: 0x0f5e, 0x15e7: 0x0d16, 0x15e8: 0x0d42, 0x15e9: 0x07ba, + 0x15ea: 0x07c6, 0x15eb: 0x1506, 0x15ec: 0x0bd6, 0x15ed: 0x07e2, 0x15ee: 0x09ea, 0x15ef: 0x0d36, + 0x15f0: 0x14ae, 0x15f1: 0x0d0e, 0x15f2: 0x116a, 0x15f3: 0x11a6, 0x15f4: 0x09f2, 0x15f5: 0x0f3e, + 0x15f6: 0x0e06, 0x15f7: 0x0e02, 0x15f8: 0x1092, 0x15f9: 0x0926, 0x15fa: 0x0a52, 0x15fb: 0x153e, + // Block 0x58, offset 0x1600 + 0x1600: 0x07f6, 0x1601: 0x07ee, 0x1602: 0x07fe, 0x1603: 0x1774, 0x1604: 0x0842, 0x1605: 0x0852, + 0x1606: 0x0856, 0x1607: 0x085e, 0x1608: 0x0866, 0x1609: 0x086a, 0x160a: 0x0876, 0x160b: 0x086e, + 0x160c: 0x06ae, 0x160d: 0x1788, 0x160e: 0x088a, 0x160f: 0x088e, 0x1610: 0x0892, 0x1611: 0x08ae, + 0x1612: 0x1779, 0x1613: 0x06b2, 0x1614: 0x089a, 0x1615: 0x08ba, 0x1616: 0x1783, 0x1617: 0x08ca, + 0x1618: 0x08d2, 0x1619: 0x0832, 0x161a: 0x08da, 0x161b: 0x08de, 0x161c: 0x195e, 0x161d: 0x08fa, + 0x161e: 0x0902, 0x161f: 0x06ba, 0x1620: 0x091a, 0x1621: 0x091e, 0x1622: 0x0926, 0x1623: 0x092a, + 0x1624: 0x06be, 0x1625: 0x0942, 0x1626: 0x0946, 0x1627: 0x0952, 0x1628: 0x095e, 0x1629: 0x0962, + 0x162a: 0x0966, 0x162b: 0x096e, 0x162c: 0x098e, 0x162d: 0x0992, 0x162e: 0x099a, 0x162f: 0x09aa, + 0x1630: 0x09b2, 0x1631: 0x09b6, 0x1632: 0x09b6, 0x1633: 0x09b6, 0x1634: 0x1797, 0x1635: 0x0f8e, + 0x1636: 0x09ca, 0x1637: 0x09d2, 0x1638: 0x179c, 0x1639: 0x09de, 0x163a: 0x09e6, 0x163b: 0x09ee, + 0x163c: 0x0a16, 0x163d: 0x0a02, 0x163e: 0x0a0e, 0x163f: 0x0a12, + // Block 0x59, offset 0x1640 + 0x1640: 0x0a1a, 0x1641: 0x0a22, 0x1642: 0x0a26, 0x1643: 0x0a2e, 0x1644: 0x0a36, 0x1645: 0x0a3a, + 0x1646: 0x0a3a, 0x1647: 0x0a42, 0x1648: 0x0a4a, 0x1649: 0x0a4e, 0x164a: 0x0a5a, 0x164b: 0x0a7e, + 0x164c: 0x0a62, 0x164d: 0x0a82, 0x164e: 0x0a66, 0x164f: 0x0a6e, 0x1650: 0x0906, 0x1651: 0x0aca, + 0x1652: 0x0a92, 0x1653: 0x0a96, 0x1654: 0x0a9a, 0x1655: 0x0a8e, 0x1656: 0x0aa2, 0x1657: 0x0a9e, + 0x1658: 0x0ab6, 0x1659: 0x17a1, 0x165a: 0x0ad2, 0x165b: 0x0ad6, 0x165c: 0x0ade, 0x165d: 0x0aea, + 0x165e: 0x0af2, 0x165f: 0x0b0e, 0x1660: 0x17a6, 0x1661: 0x17ab, 0x1662: 0x0b1a, 0x1663: 0x0b1e, + 0x1664: 0x0b22, 0x1665: 0x0b16, 0x1666: 0x0b2a, 0x1667: 0x06c2, 0x1668: 0x06c6, 0x1669: 0x0b32, + 0x166a: 0x0b3a, 0x166b: 0x0b3a, 0x166c: 0x17b0, 0x166d: 0x0b56, 0x166e: 0x0b5a, 0x166f: 0x0b5e, + 0x1670: 0x0b66, 0x1671: 0x17b5, 0x1672: 0x0b6e, 0x1673: 0x0b72, 0x1674: 0x0c4a, 0x1675: 0x0b7a, + 0x1676: 0x06ca, 0x1677: 0x0b86, 0x1678: 0x0b96, 0x1679: 0x0ba2, 0x167a: 0x0b9e, 0x167b: 0x17bf, + 0x167c: 0x0baa, 0x167d: 0x17c4, 0x167e: 0x0bb6, 0x167f: 0x0bb2, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0bba, 0x1681: 0x0bca, 0x1682: 0x0bce, 0x1683: 0x06ce, 0x1684: 0x0bde, 0x1685: 0x0be6, + 0x1686: 0x0bea, 0x1687: 0x0bee, 0x1688: 0x06d2, 0x1689: 0x17c9, 0x168a: 0x06d6, 0x168b: 0x0c0a, + 0x168c: 0x0c0e, 0x168d: 0x0c12, 0x168e: 0x0c1a, 0x168f: 0x1990, 0x1690: 0x0c32, 0x1691: 0x17d3, + 0x1692: 0x17d3, 0x1693: 0x12d2, 0x1694: 0x0c42, 0x1695: 0x0c42, 0x1696: 0x06da, 0x1697: 0x17f6, + 0x1698: 0x18c8, 0x1699: 0x0c52, 0x169a: 0x0c5a, 0x169b: 0x06de, 0x169c: 0x0c6e, 0x169d: 0x0c7e, + 0x169e: 0x0c82, 0x169f: 0x0c8a, 0x16a0: 0x0c9a, 0x16a1: 0x06e6, 0x16a2: 0x06e2, 0x16a3: 0x0c9e, + 0x16a4: 0x17d8, 0x16a5: 0x0ca2, 0x16a6: 0x0cb6, 0x16a7: 0x0cba, 0x16a8: 0x0cbe, 0x16a9: 0x0cba, + 0x16aa: 0x0cca, 0x16ab: 0x0cce, 0x16ac: 0x0cde, 0x16ad: 0x0cd6, 0x16ae: 0x0cda, 0x16af: 0x0ce2, + 0x16b0: 0x0ce6, 0x16b1: 0x0cea, 0x16b2: 0x0cf6, 0x16b3: 0x0cfa, 0x16b4: 0x0d12, 0x16b5: 0x0d1a, + 0x16b6: 0x0d2a, 0x16b7: 0x0d3e, 0x16b8: 0x17e7, 0x16b9: 0x0d3a, 0x16ba: 0x0d2e, 0x16bb: 0x0d46, + 0x16bc: 0x0d4e, 0x16bd: 0x0d62, 0x16be: 0x17ec, 0x16bf: 0x0d6a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0d5e, 0x16c1: 0x0d56, 0x16c2: 0x06ea, 0x16c3: 0x0d72, 0x16c4: 0x0d7a, 0x16c5: 0x0d82, + 0x16c6: 0x0d76, 0x16c7: 0x06ee, 0x16c8: 0x0d92, 0x16c9: 0x0d9a, 0x16ca: 0x17f1, 0x16cb: 0x0dc6, + 0x16cc: 0x0dfa, 0x16cd: 0x0dd6, 0x16ce: 0x06fa, 0x16cf: 0x0de2, 0x16d0: 0x06f6, 0x16d1: 0x06f2, + 0x16d2: 0x08be, 0x16d3: 0x08c2, 0x16d4: 0x0dfe, 0x16d5: 0x0de6, 0x16d6: 0x12a6, 0x16d7: 0x075e, + 0x16d8: 0x0e0a, 0x16d9: 0x0e0e, 0x16da: 0x0e12, 0x16db: 0x0e26, 0x16dc: 0x0e1e, 0x16dd: 0x180a, + 0x16de: 0x06fe, 0x16df: 0x0e3a, 0x16e0: 0x0e2e, 0x16e1: 0x0e4a, 0x16e2: 0x0e52, 0x16e3: 0x1814, + 0x16e4: 0x0e56, 0x16e5: 0x0e42, 0x16e6: 0x0e5e, 0x16e7: 0x0702, 0x16e8: 0x0e62, 0x16e9: 0x0e66, + 0x16ea: 0x0e6a, 0x16eb: 0x0e76, 0x16ec: 0x1819, 0x16ed: 0x0e7e, 0x16ee: 0x0706, 0x16ef: 0x0e8a, + 0x16f0: 0x181e, 0x16f1: 0x0e8e, 0x16f2: 0x070a, 0x16f3: 0x0e9a, 0x16f4: 0x0ea6, 0x16f5: 0x0eb2, + 0x16f6: 0x0eb6, 0x16f7: 0x1823, 0x16f8: 0x17ba, 0x16f9: 0x1828, 0x16fa: 0x0ed6, 0x16fb: 0x182d, + 0x16fc: 0x0ee2, 0x16fd: 0x0eea, 0x16fe: 0x0eda, 0x16ff: 0x0ef6, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0f06, 0x1701: 0x0f16, 0x1702: 0x0f0a, 0x1703: 0x0f0e, 0x1704: 0x0f1a, 0x1705: 0x0f1e, + 0x1706: 0x1832, 0x1707: 0x0f02, 0x1708: 0x0f36, 0x1709: 0x0f3a, 0x170a: 0x070e, 0x170b: 0x0f4e, + 0x170c: 0x0f4a, 0x170d: 0x1837, 0x170e: 0x0f2e, 0x170f: 0x0f6a, 0x1710: 0x183c, 0x1711: 0x1841, + 0x1712: 0x0f6e, 0x1713: 0x0f82, 0x1714: 0x0f7e, 0x1715: 0x0f7a, 0x1716: 0x0712, 0x1717: 0x0f86, + 0x1718: 0x0f96, 0x1719: 0x0f92, 0x171a: 0x0f9e, 0x171b: 0x177e, 0x171c: 0x0fae, 0x171d: 0x1846, + 0x171e: 0x0fba, 0x171f: 0x1850, 0x1720: 0x0fce, 0x1721: 0x0fda, 0x1722: 0x0fee, 0x1723: 0x1855, + 0x1724: 0x1002, 0x1725: 0x1006, 0x1726: 0x185a, 0x1727: 0x185f, 0x1728: 0x1022, 0x1729: 0x1032, + 0x172a: 0x0716, 0x172b: 0x1036, 0x172c: 0x071a, 0x172d: 0x071a, 0x172e: 0x104e, 0x172f: 0x1052, + 0x1730: 0x105a, 0x1731: 0x105e, 0x1732: 0x106a, 0x1733: 0x071e, 0x1734: 0x1082, 0x1735: 0x1864, + 0x1736: 0x109e, 0x1737: 0x1869, 0x1738: 0x10aa, 0x1739: 0x17ce, 0x173a: 0x10ba, 0x173b: 0x186e, + 0x173c: 0x1873, 0x173d: 0x1878, 0x173e: 0x0722, 0x173f: 0x0726, + // Block 0x5d, offset 0x1740 + 0x1740: 0x10f2, 0x1741: 0x1882, 0x1742: 0x187d, 0x1743: 0x1887, 0x1744: 0x188c, 0x1745: 0x10fa, + 0x1746: 0x10fe, 0x1747: 0x10fe, 0x1748: 0x1106, 0x1749: 0x072e, 0x174a: 0x110a, 0x174b: 0x0732, + 0x174c: 0x0736, 0x174d: 0x1896, 0x174e: 0x111e, 0x174f: 0x1126, 0x1750: 0x1132, 0x1751: 0x073a, + 0x1752: 0x189b, 0x1753: 0x1156, 0x1754: 0x18a0, 0x1755: 0x18a5, 0x1756: 0x1176, 0x1757: 0x118e, + 0x1758: 0x073e, 0x1759: 0x1196, 0x175a: 0x119a, 0x175b: 0x119e, 0x175c: 0x18aa, 0x175d: 0x18af, + 0x175e: 0x18af, 0x175f: 0x11b6, 0x1760: 0x0742, 0x1761: 0x18b4, 0x1762: 0x11ca, 0x1763: 0x11ce, + 0x1764: 0x0746, 0x1765: 0x18b9, 0x1766: 0x11ea, 0x1767: 0x074a, 0x1768: 0x11fa, 0x1769: 0x11f2, + 0x176a: 0x1202, 0x176b: 0x18c3, 0x176c: 0x121a, 0x176d: 0x074e, 0x176e: 0x1226, 0x176f: 0x122e, + 0x1770: 0x123e, 0x1771: 0x0752, 0x1772: 0x18cd, 0x1773: 0x18d2, 0x1774: 0x0756, 0x1775: 0x18d7, + 0x1776: 0x1256, 0x1777: 0x18dc, 0x1778: 0x1262, 0x1779: 0x126e, 0x177a: 0x1276, 0x177b: 0x18e1, + 0x177c: 0x18e6, 0x177d: 0x128a, 0x177e: 0x18eb, 0x177f: 0x1292, + // Block 0x5e, offset 0x1780 + 0x1780: 0x17fb, 0x1781: 0x075a, 0x1782: 0x12aa, 0x1783: 0x12ae, 0x1784: 0x0762, 0x1785: 0x12b2, + 0x1786: 0x0b2e, 0x1787: 0x18f0, 0x1788: 0x18f5, 0x1789: 0x1800, 0x178a: 0x1805, 0x178b: 0x12d2, + 0x178c: 0x12d6, 0x178d: 0x14ee, 0x178e: 0x0766, 0x178f: 0x1302, 0x1790: 0x12fe, 0x1791: 0x1306, + 0x1792: 0x093a, 0x1793: 0x130a, 0x1794: 0x130e, 0x1795: 0x1312, 0x1796: 0x131a, 0x1797: 0x18fa, + 0x1798: 0x1316, 0x1799: 0x131e, 0x179a: 0x1332, 0x179b: 0x1336, 0x179c: 0x1322, 0x179d: 0x133a, + 0x179e: 0x134e, 0x179f: 0x1362, 0x17a0: 0x132e, 0x17a1: 0x1342, 0x17a2: 0x1346, 0x17a3: 0x134a, + 0x17a4: 0x18ff, 0x17a5: 0x1909, 0x17a6: 0x1904, 0x17a7: 0x076a, 0x17a8: 0x136a, 0x17a9: 0x136e, + 0x17aa: 0x1376, 0x17ab: 0x191d, 0x17ac: 0x137a, 0x17ad: 0x190e, 0x17ae: 0x076e, 0x17af: 0x0772, + 0x17b0: 0x1913, 0x17b1: 0x1918, 0x17b2: 0x0776, 0x17b3: 0x139a, 0x17b4: 0x139e, 0x17b5: 0x13a2, + 0x17b6: 0x13a6, 0x17b7: 0x13b2, 0x17b8: 0x13ae, 0x17b9: 0x13ba, 0x17ba: 0x13b6, 0x17bb: 0x13c6, + 0x17bc: 0x13be, 0x17bd: 0x13c2, 0x17be: 0x13ca, 0x17bf: 0x077a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x13d2, 0x17c1: 0x13d6, 0x17c2: 0x077e, 0x17c3: 0x13e6, 0x17c4: 0x13ea, 0x17c5: 0x1922, + 0x17c6: 0x13f6, 0x17c7: 0x13fa, 0x17c8: 0x0782, 0x17c9: 0x1406, 0x17ca: 0x06b6, 0x17cb: 0x1927, + 0x17cc: 0x192c, 0x17cd: 0x0786, 0x17ce: 0x078a, 0x17cf: 0x1432, 0x17d0: 0x144a, 0x17d1: 0x1466, + 0x17d2: 0x1476, 0x17d3: 0x1931, 0x17d4: 0x148a, 0x17d5: 0x148e, 0x17d6: 0x14a6, 0x17d7: 0x14b2, + 0x17d8: 0x193b, 0x17d9: 0x178d, 0x17da: 0x14be, 0x17db: 0x14ba, 0x17dc: 0x14c6, 0x17dd: 0x1792, + 0x17de: 0x14d2, 0x17df: 0x14de, 0x17e0: 0x1940, 0x17e1: 0x1945, 0x17e2: 0x151e, 0x17e3: 0x152a, + 0x17e4: 0x1532, 0x17e5: 0x194a, 0x17e6: 0x1536, 0x17e7: 0x1562, 0x17e8: 0x156e, 0x17e9: 0x1572, + 0x17ea: 0x156a, 0x17eb: 0x157e, 0x17ec: 0x1582, 0x17ed: 0x194f, 0x17ee: 0x158e, 0x17ef: 0x078e, + 0x17f0: 0x1596, 0x17f1: 0x1954, 0x17f2: 0x0792, 0x17f3: 0x15ce, 0x17f4: 0x0bbe, 0x17f5: 0x15e6, + 0x17f6: 0x1959, 0x17f7: 0x1963, 0x17f8: 0x0796, 0x17f9: 0x079a, 0x17fa: 0x160e, 0x17fb: 0x1968, + 0x17fc: 0x079e, 0x17fd: 0x196d, 0x17fe: 0x1626, 0x17ff: 0x1626, + // Block 0x60, offset 0x1800 + 0x1800: 0x162e, 0x1801: 0x1972, 0x1802: 0x1646, 0x1803: 0x07a2, 0x1804: 0x1656, 0x1805: 0x1662, + 0x1806: 0x166a, 0x1807: 0x1672, 0x1808: 0x07a6, 0x1809: 0x1977, 0x180a: 0x1686, 0x180b: 0x16a2, + 0x180c: 0x16ae, 0x180d: 0x07aa, 0x180e: 0x07ae, 0x180f: 0x16b2, 0x1810: 0x197c, 0x1811: 0x07b2, + 0x1812: 0x1981, 0x1813: 0x1986, 0x1814: 0x198b, 0x1815: 0x16d6, 0x1816: 0x07b6, 0x1817: 0x16ea, + 0x1818: 0x16f2, 0x1819: 0x16f6, 0x181a: 0x16fe, 0x181b: 0x1706, 0x181c: 0x170e, 0x181d: 0x1995, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5f, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x60, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x61, 0xcb: 0x62, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x63, 0xd2: 0x64, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x65, + 0xd8: 0x66, 0xd9: 0x0d, 0xdb: 0x67, 0xdc: 0x68, 0xdd: 0x69, 0xdf: 0x6a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x6b, 0x121: 0x6c, 0x122: 0x6d, 0x123: 0x0e, 0x124: 0x6e, 0x125: 0x6f, 0x126: 0x70, 0x127: 0x71, + 0x128: 0x72, 0x129: 0x73, 0x12a: 0x74, 0x12b: 0x75, 0x12c: 0x70, 0x12d: 0x76, 0x12e: 0x77, 0x12f: 0x78, + 0x130: 0x74, 0x131: 0x79, 0x132: 0x7a, 0x133: 0x7b, 0x134: 0x7c, 0x135: 0x7d, 0x137: 0x7e, + 0x138: 0x7f, 0x139: 0x80, 0x13a: 0x81, 0x13b: 0x82, 0x13c: 0x83, 0x13d: 0x84, 0x13e: 0x85, 0x13f: 0x86, + // Block 0x5, offset 0x140 + 0x140: 0x87, 0x142: 0x88, 0x143: 0x89, 0x144: 0x8a, 0x145: 0x8b, 0x146: 0x8c, 0x147: 0x8d, + 0x14d: 0x8e, + 0x15c: 0x8f, 0x15f: 0x90, + 0x162: 0x91, 0x164: 0x92, + 0x168: 0x93, 0x169: 0x94, 0x16a: 0x95, 0x16b: 0x96, 0x16c: 0x0f, 0x16d: 0x97, 0x16e: 0x98, 0x16f: 0x99, + 0x170: 0x9a, 0x173: 0x9b, 0x174: 0x9c, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x9d, 0x181: 0x9e, 0x182: 0x9f, 0x183: 0xa0, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0xa1, 0x187: 0xa2, + 0x188: 0xa3, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa4, 0x18c: 0xa5, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa6, + 0x1a8: 0xa7, 0x1a9: 0xa8, 0x1ab: 0xa9, + 0x1b1: 0xaa, 0x1b3: 0xab, 0x1b5: 0xac, 0x1b7: 0xad, + 0x1ba: 0xae, 0x1bb: 0xaf, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xb0, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xb1, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xb2, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xb3, 0x21a: 0xb4, 0x21b: 0xb5, 0x21d: 0xb6, 0x21f: 0xb7, + 0x220: 0xb8, 0x223: 0xb9, 0x224: 0xba, 0x225: 0xbb, 0x226: 0xbc, 0x227: 0xbd, + 0x22a: 0xbe, 0x22b: 0xbf, 0x22d: 0xc0, 0x22f: 0xc1, + 0x230: 0xc2, 0x231: 0xc3, 0x232: 0xc4, 0x233: 0xc5, 0x234: 0xc6, 0x235: 0xc7, 0x236: 0xc8, 0x237: 0xc2, + 0x238: 0xc3, 0x239: 0xc4, 0x23a: 0xc5, 0x23b: 0xc6, 0x23c: 0xc7, 0x23d: 0xc8, 0x23e: 0xc2, 0x23f: 0xc3, + // Block 0x9, offset 0x240 + 0x240: 0xc4, 0x241: 0xc5, 0x242: 0xc6, 0x243: 0xc7, 0x244: 0xc8, 0x245: 0xc2, 0x246: 0xc3, 0x247: 0xc4, + 0x248: 0xc5, 0x249: 0xc6, 0x24a: 0xc7, 0x24b: 0xc8, 0x24c: 0xc2, 0x24d: 0xc3, 0x24e: 0xc4, 0x24f: 0xc5, + 0x250: 0xc6, 0x251: 0xc7, 0x252: 0xc8, 0x253: 0xc2, 0x254: 0xc3, 0x255: 0xc4, 0x256: 0xc5, 0x257: 0xc6, + 0x258: 0xc7, 0x259: 0xc8, 0x25a: 0xc2, 0x25b: 0xc3, 0x25c: 0xc4, 0x25d: 0xc5, 0x25e: 0xc6, 0x25f: 0xc7, + 0x260: 0xc8, 0x261: 0xc2, 0x262: 0xc3, 0x263: 0xc4, 0x264: 0xc5, 0x265: 0xc6, 0x266: 0xc7, 0x267: 0xc8, + 0x268: 0xc2, 0x269: 0xc3, 0x26a: 0xc4, 0x26b: 0xc5, 0x26c: 0xc6, 0x26d: 0xc7, 0x26e: 0xc8, 0x26f: 0xc2, + 0x270: 0xc3, 0x271: 0xc4, 0x272: 0xc5, 0x273: 0xc6, 0x274: 0xc7, 0x275: 0xc8, 0x276: 0xc2, 0x277: 0xc3, + 0x278: 0xc4, 0x279: 0xc5, 0x27a: 0xc6, 0x27b: 0xc7, 0x27c: 0xc8, 0x27d: 0xc2, 0x27e: 0xc3, 0x27f: 0xc4, + // Block 0xa, offset 0x280 + 0x280: 0xc5, 0x281: 0xc6, 0x282: 0xc7, 0x283: 0xc8, 0x284: 0xc2, 0x285: 0xc3, 0x286: 0xc4, 0x287: 0xc5, + 0x288: 0xc6, 0x289: 0xc7, 0x28a: 0xc8, 0x28b: 0xc2, 0x28c: 0xc3, 0x28d: 0xc4, 0x28e: 0xc5, 0x28f: 0xc6, + 0x290: 0xc7, 0x291: 0xc8, 0x292: 0xc2, 0x293: 0xc3, 0x294: 0xc4, 0x295: 0xc5, 0x296: 0xc6, 0x297: 0xc7, + 0x298: 0xc8, 0x299: 0xc2, 0x29a: 0xc3, 0x29b: 0xc4, 0x29c: 0xc5, 0x29d: 0xc6, 0x29e: 0xc7, 0x29f: 0xc8, + 0x2a0: 0xc2, 0x2a1: 0xc3, 0x2a2: 0xc4, 0x2a3: 0xc5, 0x2a4: 0xc6, 0x2a5: 0xc7, 0x2a6: 0xc8, 0x2a7: 0xc2, + 0x2a8: 0xc3, 0x2a9: 0xc4, 0x2aa: 0xc5, 0x2ab: 0xc6, 0x2ac: 0xc7, 0x2ad: 0xc8, 0x2ae: 0xc2, 0x2af: 0xc3, + 0x2b0: 0xc4, 0x2b1: 0xc5, 0x2b2: 0xc6, 0x2b3: 0xc7, 0x2b4: 0xc8, 0x2b5: 0xc2, 0x2b6: 0xc3, 0x2b7: 0xc4, + 0x2b8: 0xc5, 0x2b9: 0xc6, 0x2ba: 0xc7, 0x2bb: 0xc8, 0x2bc: 0xc2, 0x2bd: 0xc3, 0x2be: 0xc4, 0x2bf: 0xc5, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc6, 0x2c1: 0xc7, 0x2c2: 0xc8, 0x2c3: 0xc2, 0x2c4: 0xc3, 0x2c5: 0xc4, 0x2c6: 0xc5, 0x2c7: 0xc6, + 0x2c8: 0xc7, 0x2c9: 0xc8, 0x2ca: 0xc2, 0x2cb: 0xc3, 0x2cc: 0xc4, 0x2cd: 0xc5, 0x2ce: 0xc6, 0x2cf: 0xc7, + 0x2d0: 0xc8, 0x2d1: 0xc2, 0x2d2: 0xc3, 0x2d3: 0xc4, 0x2d4: 0xc5, 0x2d5: 0xc6, 0x2d6: 0xc7, 0x2d7: 0xc8, + 0x2d8: 0xc2, 0x2d9: 0xc3, 0x2da: 0xc4, 0x2db: 0xc5, 0x2dc: 0xc6, 0x2dd: 0xc7, 0x2de: 0xc9, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xca, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xcb, + 0x34b: 0xcc, 0x34d: 0xcd, + 0x35e: 0x4c, + 0x368: 0xce, 0x36b: 0xcf, + 0x374: 0xd0, + 0x37a: 0xd1, 0x37b: 0xd2, 0x37d: 0xd3, 0x37e: 0xd4, + // Block 0xe, offset 0x380 + 0x381: 0xd5, 0x382: 0xd6, 0x384: 0xd7, 0x385: 0xbc, 0x387: 0xd8, + 0x388: 0xd9, 0x38b: 0xda, 0x38c: 0xdb, 0x38d: 0xdc, + 0x391: 0xdd, 0x392: 0xde, 0x393: 0xdf, 0x396: 0xe0, 0x397: 0xe1, + 0x398: 0xe2, 0x39a: 0xe3, 0x39c: 0xe4, + 0x3a0: 0xe5, 0x3a4: 0xe6, 0x3a5: 0xe7, 0x3a7: 0xe8, + 0x3a8: 0xe9, 0x3a9: 0xea, 0x3aa: 0xeb, + 0x3b0: 0xe2, 0x3b5: 0xec, 0x3b6: 0xed, + 0x3bd: 0xee, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xef, 0x3ec: 0xf0, + 0x3ff: 0xf1, + // Block 0x10, offset 0x400 + 0x432: 0xf2, + // Block 0x11, offset 0x440 + 0x445: 0xf3, 0x446: 0xf4, 0x447: 0xf5, + 0x449: 0xf6, + 0x450: 0xf7, 0x451: 0xf8, 0x452: 0xf9, 0x453: 0xfa, 0x454: 0xfb, 0x455: 0xfc, 0x456: 0xfd, 0x457: 0xfe, + 0x458: 0xff, 0x459: 0x100, 0x45a: 0x4d, 0x45b: 0x101, 0x45c: 0x102, 0x45d: 0x103, 0x45e: 0x104, 0x45f: 0x4e, + // Block 0x12, offset 0x480 + 0x480: 0x4f, 0x481: 0x50, 0x482: 0x105, 0x484: 0xf0, + 0x48a: 0x106, 0x48b: 0x107, + 0x493: 0x108, + 0x4a3: 0x109, 0x4a5: 0x10a, + 0x4b8: 0x51, 0x4b9: 0x52, 0x4ba: 0x53, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x54, 0x4c5: 0x10b, 0x4c6: 0x10c, + 0x4c8: 0x55, 0x4c9: 0x10d, + 0x4ef: 0x10e, + // Block 0x14, offset 0x500 + 0x520: 0x56, 0x521: 0x57, 0x522: 0x58, 0x523: 0x59, 0x524: 0x5a, 0x525: 0x5b, 0x526: 0x5c, 0x527: 0x5d, + 0x528: 0x5e, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 176 entries, 352 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1c, 0x26, 0x36, 0x38, 0x3d, 0x48, 0x57, 0x64, 0x6c, 0x71, 0x76, 0x78, 0x7c, 0x84, 0x8b, 0x8e, 0x96, 0x9a, 0x9e, 0xa0, 0xa2, 0xab, 0xaf, 0xb6, 0xbb, 0xbe, 0xc8, 0xcb, 0xd2, 0xda, 0xde, 0xe0, 0xe4, 0xe8, 0xee, 0xff, 0x10b, 0x10d, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11d, 0x11f, 0x121, 0x124, 0x127, 0x129, 0x12c, 0x12f, 0x133, 0x139, 0x140, 0x149, 0x14b, 0x14e, 0x150, 0x15b, 0x166, 0x174, 0x182, 0x192, 0x1a0, 0x1a7, 0x1ad, 0x1bc, 0x1c0, 0x1c2, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1d0, 0x1d2, 0x1d5, 0x1d7, 0x1d9, 0x1db, 0x1e7, 0x1f1, 0x1fb, 0x1fe, 0x202, 0x204, 0x206, 0x20b, 0x20e, 0x211, 0x213, 0x215, 0x217, 0x219, 0x21f, 0x222, 0x227, 0x229, 0x230, 0x236, 0x23c, 0x244, 0x24a, 0x250, 0x256, 0x25a, 0x25c, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x26d, 0x26f, 0x271, 0x277, 0x27b, 0x27f, 0x287, 0x28e, 0x291, 0x294, 0x296, 0x299, 0x2a1, 0x2a5, 0x2ac, 0x2af, 0x2b5, 0x2b7, 0x2b9, 0x2bc, 0x2be, 0x2c1, 0x2c6, 0x2c8, 0x2ca, 0x2cc, 0x2ce, 0x2d0, 0x2d3, 0x2d5, 0x2d7, 0x2d9, 0x2db, 0x2dd, 0x2df, 0x2ec, 0x2f6, 0x2f8, 0x2fa, 0x2fe, 0x303, 0x30f, 0x314, 0x31d, 0x323, 0x328, 0x32c, 0x331, 0x335, 0x345, 0x353, 0x361, 0x36f, 0x371, 0x373, 0x375, 0x379, 0x37b, 0x37e, 0x389, 0x38b, 0x395} + +// nfkcSparseValues: 919 entries, 3676 bytes +var nfkcSparseValues = [919]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x43b9, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x43a5, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x439b, lo: 0xb4, hi: 0xb4}, + {value: 0x0260, lo: 0xb5, hi: 0xb5}, + {value: 0x43d2, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x234c, lo: 0xbc, hi: 0xbc}, + {value: 0x2340, lo: 0xbd, hi: 0xbd}, + {value: 0x23e2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0004, lo: 0x09}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0140, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0179, lo: 0xb4, hi: 0xb4}, + {value: 0x017f, lo: 0xb5, hi: 0xb5}, + {value: 0x018b, lo: 0xb6, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb8}, + // Block 0x3, offset 0x1c + {value: 0x000a, lo: 0x09}, + {value: 0x43af, lo: 0x98, hi: 0x98}, + {value: 0x43b4, lo: 0x99, hi: 0x9a}, + {value: 0x43d7, lo: 0x9b, hi: 0x9b}, + {value: 0x43a0, lo: 0x9c, hi: 0x9c}, + {value: 0x43c3, lo: 0x9d, hi: 0x9d}, + {value: 0x0137, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x01b8, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x26 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x36 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x38 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3d + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x48 + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x57 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x64 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6c + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x71 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x76 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x78 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0xf, offset 0x7c + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x84 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x8b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x8e + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x96 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x9a + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x9e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xa0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xa2 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xab + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xaf + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xb6 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xbb + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xbe + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xcb + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xd2 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xda + {value: 0x0000, lo: 0x03}, + {value: 0x2751, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xde + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xe0 + {value: 0x0000, lo: 0x03}, + {value: 0x2766, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xe4 + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x2758, lo: 0x9c, hi: 0x9c}, + {value: 0x275f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x05}, + {value: 0x03fe, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xee + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x4735, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x4740, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xff + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x10b + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x0402, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x11b + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x11d + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x11f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x121 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x124 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x129 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x12c + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x12f + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x133 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x139 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x140 + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x149 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x14b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x14e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x150 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x15b + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00ec, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00fe, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x166 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x0532, lo: 0x91, hi: 0x91}, + {value: 0x43dc, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x19a0, lo: 0xa5, hi: 0xa5}, + {value: 0x1c8c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x27c1, lo: 0xb3, hi: 0xb3}, + {value: 0x2935, lo: 0xb4, hi: 0xb4}, + {value: 0x27c8, lo: 0xb6, hi: 0xb6}, + {value: 0x293f, lo: 0xb7, hi: 0xb7}, + {value: 0x199a, lo: 0xbc, hi: 0xbc}, + {value: 0x43aa, lo: 0xbe, hi: 0xbe}, + // Block 0x3f, offset 0x174 + {value: 0x0002, lo: 0x0d}, + {value: 0x1a60, lo: 0x87, hi: 0x87}, + {value: 0x1a5d, lo: 0x88, hi: 0x88}, + {value: 0x199d, lo: 0x89, hi: 0x89}, + {value: 0x2ac5, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x055e, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x40, offset 0x182 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x055e, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x011f, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1ac9, lo: 0xa8, hi: 0xa8}, + // Block 0x41, offset 0x192 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x42, offset 0x1a0 + {value: 0x0007, lo: 0x06}, + {value: 0x22b0, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x43, offset 0x1a7 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x44, offset 0x1ad + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0x27cf, lo: 0xac, hi: 0xad}, + {value: 0x27d6, lo: 0xaf, hi: 0xaf}, + {value: 0x2953, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x45, offset 0x1bc + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x46, offset 0x1c0 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x47, offset 0x1c2 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x48, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x2ad2, lo: 0x8c, hi: 0x8c}, + // Block 0x49, offset 0x1c8 + {value: 0x0266, lo: 0x02}, + {value: 0x1cbc, lo: 0xb4, hi: 0xb4}, + {value: 0x1a5a, lo: 0xb5, hi: 0xb6}, + // Block 0x4a, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x4b, offset 0x1cd + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4c, offset 0x1d0 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4d, offset 0x1d2 + {value: 0x0000, lo: 0x02}, + {value: 0x057a, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x1d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4f, offset 0x1d7 + {value: 0x0000, lo: 0x01}, + {value: 0x0ebe, lo: 0x9f, hi: 0x9f}, + // Block 0x50, offset 0x1d9 + {value: 0x0000, lo: 0x01}, + {value: 0x172a, lo: 0xb3, hi: 0xb3}, + // Block 0x51, offset 0x1db + {value: 0x0004, lo: 0x0b}, + {value: 0x1692, lo: 0x80, hi: 0x82}, + {value: 0x16aa, lo: 0x83, hi: 0x83}, + {value: 0x16c2, lo: 0x84, hi: 0x85}, + {value: 0x16d2, lo: 0x86, hi: 0x89}, + {value: 0x16e6, lo: 0x8a, hi: 0x8c}, + {value: 0x16fa, lo: 0x8d, hi: 0x8d}, + {value: 0x1702, lo: 0x8e, hi: 0x8e}, + {value: 0x170a, lo: 0x8f, hi: 0x90}, + {value: 0x1716, lo: 0x91, hi: 0x93}, + {value: 0x1726, lo: 0x94, hi: 0x94}, + {value: 0x172e, lo: 0x95, hi: 0x95}, + // Block 0x52, offset 0x1e7 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x05ae, lo: 0xb6, hi: 0xb6}, + {value: 0x0982, lo: 0xb8, hi: 0xba}, + // Block 0x53, offset 0x1f1 + {value: 0x0006, lo: 0x09}, + {value: 0x0406, lo: 0xb1, hi: 0xb1}, + {value: 0x040a, lo: 0xb2, hi: 0xb2}, + {value: 0x4b7c, lo: 0xb3, hi: 0xb3}, + {value: 0x040e, lo: 0xb4, hi: 0xb4}, + {value: 0x4b82, lo: 0xb5, hi: 0xb6}, + {value: 0x0412, lo: 0xb7, hi: 0xb7}, + {value: 0x0416, lo: 0xb8, hi: 0xb8}, + {value: 0x041a, lo: 0xb9, hi: 0xb9}, + {value: 0x4b8e, lo: 0xba, hi: 0xbf}, + // Block 0x54, offset 0x1fb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x55, offset 0x1fe + {value: 0x0000, lo: 0x03}, + {value: 0x02d8, lo: 0x9c, hi: 0x9c}, + {value: 0x02de, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x56, offset 0x202 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x57, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x173e, lo: 0xb0, hi: 0xb0}, + // Block 0x58, offset 0x206 + {value: 0x0006, lo: 0x04}, + {value: 0x0047, lo: 0xb2, hi: 0xb3}, + {value: 0x0063, lo: 0xb4, hi: 0xb4}, + {value: 0x00dd, lo: 0xb8, hi: 0xb8}, + {value: 0x00e9, lo: 0xb9, hi: 0xb9}, + // Block 0x59, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x5a, offset 0x20e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5b, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5c, offset 0x213 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5d, offset 0x215 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5e, offset 0x217 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5f, offset 0x219 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x60, offset 0x21f + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x61, offset 0x222 + {value: 0x000c, lo: 0x04}, + {value: 0x173a, lo: 0x9c, hi: 0x9d}, + {value: 0x014f, lo: 0x9e, hi: 0x9e}, + {value: 0x174a, lo: 0x9f, hi: 0x9f}, + {value: 0x01a6, lo: 0xa9, hi: 0xa9}, + // Block 0x62, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x63, offset 0x229 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x64, offset 0x230 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x65, offset 0x236 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x66, offset 0x23c + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x67, offset 0x244 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x68, offset 0x24a + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x69, offset 0x250 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x6a, offset 0x256 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6b, offset 0x25a + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6c, offset 0x25c + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6d, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6e, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6f, offset 0x262 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x71, offset 0x26b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x72, offset 0x26d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x73, offset 0x26f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x271 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x277 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x76, offset 0x27b + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x77, offset 0x27f + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x78, offset 0x287 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x79, offset 0x28e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7a, offset 0x291 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7b, offset 0x294 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7c, offset 0x296 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7e, offset 0x2a1 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7f, offset 0x2a5 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x80, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x81, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x82, offset 0x2b5 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x83, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x84, offset 0x2b9 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x2bc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x2be + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x2c1 + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x2c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x2c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x2ca + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x2cc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x2ce + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x2d0 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x2d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x90, offset 0x2d7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x91, offset 0x2d9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x92, offset 0x2db + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x93, offset 0x2dd + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x94, offset 0x2df + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x95, offset 0x2ec + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x96, offset 0x2f6 + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x97, offset 0x2f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x98, offset 0x2fa + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x99, offset 0x2fe + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x9a, offset 0x303 + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x9b, offset 0x30f + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x314 + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x9d, offset 0x31d + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9e, offset 0x323 + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9f, offset 0x328 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0xa0, offset 0x32c + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0xa1, offset 0x331 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0xa2, offset 0x335 + {value: 0x0003, lo: 0x0f}, + {value: 0x023c, lo: 0x80, hi: 0x80}, + {value: 0x0556, lo: 0x81, hi: 0x81}, + {value: 0x023f, lo: 0x82, hi: 0x9a}, + {value: 0x0552, lo: 0x9b, hi: 0x9b}, + {value: 0x024b, lo: 0x9c, hi: 0x9c}, + {value: 0x0254, lo: 0x9d, hi: 0x9d}, + {value: 0x025a, lo: 0x9e, hi: 0x9e}, + {value: 0x027e, lo: 0x9f, hi: 0x9f}, + {value: 0x026f, lo: 0xa0, hi: 0xa0}, + {value: 0x026c, lo: 0xa1, hi: 0xa1}, + {value: 0x01f7, lo: 0xa2, hi: 0xb2}, + {value: 0x020c, lo: 0xb3, hi: 0xb3}, + {value: 0x022a, lo: 0xb4, hi: 0xba}, + {value: 0x0556, lo: 0xbb, hi: 0xbb}, + {value: 0x023f, lo: 0xbc, hi: 0xbf}, + // Block 0xa3, offset 0x345 + {value: 0x0003, lo: 0x0d}, + {value: 0x024b, lo: 0x80, hi: 0x94}, + {value: 0x0552, lo: 0x95, hi: 0x95}, + {value: 0x024b, lo: 0x96, hi: 0x96}, + {value: 0x0254, lo: 0x97, hi: 0x97}, + {value: 0x025a, lo: 0x98, hi: 0x98}, + {value: 0x027e, lo: 0x99, hi: 0x99}, + {value: 0x026f, lo: 0x9a, hi: 0x9a}, + {value: 0x026c, lo: 0x9b, hi: 0x9b}, + {value: 0x01f7, lo: 0x9c, hi: 0xac}, + {value: 0x020c, lo: 0xad, hi: 0xad}, + {value: 0x022a, lo: 0xae, hi: 0xb4}, + {value: 0x0556, lo: 0xb5, hi: 0xb5}, + {value: 0x023f, lo: 0xb6, hi: 0xbf}, + // Block 0xa4, offset 0x353 + {value: 0x0003, lo: 0x0d}, + {value: 0x025d, lo: 0x80, hi: 0x8e}, + {value: 0x0552, lo: 0x8f, hi: 0x8f}, + {value: 0x024b, lo: 0x90, hi: 0x90}, + {value: 0x0254, lo: 0x91, hi: 0x91}, + {value: 0x025a, lo: 0x92, hi: 0x92}, + {value: 0x027e, lo: 0x93, hi: 0x93}, + {value: 0x026f, lo: 0x94, hi: 0x94}, + {value: 0x026c, lo: 0x95, hi: 0x95}, + {value: 0x01f7, lo: 0x96, hi: 0xa6}, + {value: 0x020c, lo: 0xa7, hi: 0xa7}, + {value: 0x022a, lo: 0xa8, hi: 0xae}, + {value: 0x0556, lo: 0xaf, hi: 0xaf}, + {value: 0x023f, lo: 0xb0, hi: 0xbf}, + // Block 0xa5, offset 0x361 + {value: 0x0003, lo: 0x0d}, + {value: 0x026f, lo: 0x80, hi: 0x88}, + {value: 0x0552, lo: 0x89, hi: 0x89}, + {value: 0x024b, lo: 0x8a, hi: 0x8a}, + {value: 0x0254, lo: 0x8b, hi: 0x8b}, + {value: 0x025a, lo: 0x8c, hi: 0x8c}, + {value: 0x027e, lo: 0x8d, hi: 0x8d}, + {value: 0x026f, lo: 0x8e, hi: 0x8e}, + {value: 0x026c, lo: 0x8f, hi: 0x8f}, + {value: 0x01f7, lo: 0x90, hi: 0xa0}, + {value: 0x020c, lo: 0xa1, hi: 0xa1}, + {value: 0x022a, lo: 0xa2, hi: 0xa8}, + {value: 0x0556, lo: 0xa9, hi: 0xa9}, + {value: 0x023f, lo: 0xaa, hi: 0xbf}, + // Block 0xa6, offset 0x36f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0xa8, offset 0x373 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa9, offset 0x375 + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xaa, offset 0x379 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xab, offset 0x37b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xac, offset 0x37e + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1a7e, lo: 0x8a, hi: 0x8a}, + {value: 0x1ab1, lo: 0x8b, hi: 0x8b}, + {value: 0x1acc, lo: 0x8c, hi: 0x8c}, + {value: 0x1ad2, lo: 0x8d, hi: 0x8d}, + {value: 0x1cf0, lo: 0x8e, hi: 0x8e}, + {value: 0x1ade, lo: 0x8f, hi: 0x8f}, + {value: 0x1aa8, lo: 0xaa, hi: 0xaa}, + {value: 0x1aab, lo: 0xab, hi: 0xab}, + {value: 0x1aae, lo: 0xac, hi: 0xac}, + // Block 0xad, offset 0x389 + {value: 0x0000, lo: 0x01}, + {value: 0x1a6c, lo: 0x90, hi: 0x90}, + // Block 0xae, offset 0x38b + {value: 0x0028, lo: 0x09}, + {value: 0x2999, lo: 0x80, hi: 0x80}, + {value: 0x295d, lo: 0x81, hi: 0x81}, + {value: 0x2967, lo: 0x82, hi: 0x82}, + {value: 0x297b, lo: 0x83, hi: 0x84}, + {value: 0x2985, lo: 0x85, hi: 0x86}, + {value: 0x2971, lo: 0x87, hi: 0x87}, + {value: 0x298f, lo: 0x88, hi: 0x88}, + {value: 0x0c6a, lo: 0x90, hi: 0x90}, + {value: 0x09e2, lo: 0x91, hi: 0x91}, + // Block 0xaf, offset 0x395 + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 56KB (57068 bytes) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/trie.go temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/trie.go --- temporal-1.21.5-1/src/vendor/golang.org/x/text/unicode/norm/trie.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/text/unicode/norm/trie.go 2024-02-23 09:46:14.000000000 +0000 @@ -29,7 +29,7 @@ nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go temporal-1.22.5/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go 2024-02-23 09:46:14.000000000 +0000 @@ -128,15 +128,14 @@ // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/packages/golist.go temporal-1.22.5/src/vendor/golang.org/x/tools/go/packages/golist.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/packages/golist.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/go/packages/golist.go 2024-02-23 09:46:14.000000000 +0000 @@ -625,7 +625,12 @@ } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,13 +668,6 @@ response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. @@ -891,6 +889,15 @@ // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/packages/packages.go temporal-1.22.5/src/vendor/golang.org/x/tools/go/packages/packages.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/packages/packages.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/go/packages/packages.go 2024-02-23 09:46:14.000000000 +0000 @@ -308,6 +308,9 @@ TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go temporal-1.22.5/src/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,762 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// -// type A struct{ X int } -// type B A -// -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "go/types" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname -) - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, -// which is encoded as a string of decimal digits. -// - The TO operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// The indices used are implementation specific and may not correspond to -// the argument to the go/types function. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) -) - -// For returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - return newEncoderFor()(obj) -} - -// An encoder amortizes the cost of encoding the paths of multiple objects. -// Nonexported pending approval of proposal 58668. -type encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// pending approval of proposal 58668. -// -//go:linkname newEncoderFor -func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } - -func (enc *encoder) For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { - // With the exception of type parameters, only package-level type names - // have a path. - return "", fmt.Errorf("no path for %v", obj) - } - case *types.Const, // Only package-level constants have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - - if path, ok := enc.concreteMethod(obj); ok { - // Fast path for concrete methods that avoids looping over scope. - return path, nil - } - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, name...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { - return Path(r), nil - } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { - path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// concreteMethod returns the path for meth, which must have a non-nil receiver. -// The second return value indicates success and may be false if the method is -// an interface method or if it is an instantiated method. -// -// This function is just an optimization that avoids the general scope walking -// approach. You are expected to fall back to the general approach if this -// function fails. -func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { - // Concrete methods can only be declared on package-scoped named types. For - // that reason we can skip the expensive walk over the package scope: the - // path will always be package -> named type -> method. We can trivially get - // the type name from the receiver, and only have to look over the type's - // methods to find the method index. - // - // Methods on generic types require special consideration, however. Consider - // the following package: - // - // L1: type S[T any] struct{} - // L2: func (recv S[A]) Foo() { recv.Bar() } - // L3: func (recv S[B]) Bar() { } - // L4: type Alias = S[int] - // L5: func _[T any]() { var s S[int]; s.Foo() } - // - // The receivers of methods on generic types are instantiations. L2 and L3 - // instantiate S with the type-parameters A and B, which are scoped to the - // respective methods. L4 and L5 each instantiate S with int. Each of these - // instantiations has its own method set, full of methods (and thus objects) - // with receivers whose types are the respective instantiations. In other - // words, we have - // - // S[A].Foo, S[A].Bar - // S[B].Foo, S[B].Bar - // S[int].Foo, S[int].Bar - // - // We may thus be trying to produce object paths for any of these objects. - // - // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo - // and S.Bar, which are the paths that this function naturally produces. - // - // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that - // don't correspond to the origin methods. For S[int], this is significant. - // The most precise object path for S[int].Foo, for example, is Alias.Foo, - // not S.Foo. Our function, however, would produce S.Foo, which would - // resolve to a different object. - // - // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are - // still the correct paths, since only the origin methods have meaningful - // paths. But this is likely only true for trivial cases and has edge cases. - // Since this function is only an optimization, we err on the side of giving - // up, deferring to the slower but definitely correct algorithm. Most users - // of objectpath will only be giving us origin methods, anyway, as referring - // to instantiated methods is usually not useful. - - if typeparams.OriginMethod(meth) != meth { - return "", false - } - - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { - return "", false - } - - if types.IsInterface(named) { - // Named interfaces don't have to be package-scoped - // - // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface - // methods, too, I think. - return "", false - } - - // Preallocate space for the name, opType, opMethod, and some digits. - name := named.Obj().Name() - path := make([]byte, 0, len(name)+8) - path = append(path, name...) - path = append(path, opType) - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) -} - -// find finds obj within type T, returning the path to it, or nil if not found. -// -// The seen map is used to short circuit cycles through type parameters. If -// nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { - switch T := T.(type) { - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { - return r - } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults), seen) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - fld := T.Field(i) - path2 := appendOpArg(path, opField, i) - if fld == obj { - return path2 // found field var - } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *typeparams.TypeParam: - name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { - return nil - } - if seen == nil { - seen = make(map[*types.TypeName]bool) - } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { - return r - } - return nil - } - panic(T) -} - -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { - return r - } - } - return nil -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { - return nil, fmt.Errorf("empty path") - } - - pathstr := string(p) - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Named,Signature} - type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList - } - // abstraction of *types.{Named,TypeParam} - type hasObj interface { - Obj() *types.TypeName - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod, opTypeParam: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) - } - t = named.Underlying() - - case opTypeParam: - hasTypeParams, ok := t.(hasTypeParams) // Named, Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) - } - tparams := hasTypeParams.TypeParams() - if n := tparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = tparams.At(index) - - case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) - } - t = tparam.Constraint() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - switch t := t.(type) { - case *types.Interface: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) // Id-ordered - - case *types.Named: - methods := namedMethods(t) // (unmemoized) - if index >= len(methods) { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) - } - obj = methods[index] // Id-ordered - - default: - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) - } - t = nil - - case opObj: - hasObj, ok := t.(hasObj) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) - } - obj = hasObj.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} - -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo - if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m - } - names, ok := m[scope] - if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names - } - return names -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods - -} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/event/tag/tag.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/event/tag/tag.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/event/tag/tag.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/event/tag/tag.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // Package ID + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/bexport.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/bexport.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/bexport.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/bexport.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/bimport.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/bimport.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/bimport.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/bimport.go 2024-02-23 09:46:14.000000000 +0000 @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go 2024-02-23 09:46:14.000000000 +0000 @@ -230,20 +230,17 @@ // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/iexport.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/iexport.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/iexport.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/iexport.go 2024-02-23 09:46:14.000000000 +0000 @@ -44,12 +44,12 @@ return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) if err != nil { return nil, err } @@ -913,6 +913,17 @@ w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -969,6 +980,16 @@ return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1199,12 @@ q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/iimport.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/iimport.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/iimport.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/iimport.go 2024-02-23 09:46:14.000000000 +0000 @@ -85,7 +85,7 @@ // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,33 @@ // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackageFunc is a function that gets the package with the given path +// from the importer state, creating it (with the specified name) if necessary. +// It is an abstraction of the map historically used to memoize package creation. +// +// Two calls with the same path must return the same package. +// +// If the given getPackage func returns nil, the import will fail. +type GetPackageFunc = func(path, name string) *types.Package + +// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the +// given map of package path -> package. +// +// The resulting func may mutate m: if a requested package is not found, a new +// package will be inserted into m. +func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { + return func(path, name string) *types.Package { + if _, ok := m[path]; !ok { + m[path] = types.NewPackage(path, name) + } + return m[path] + } +} + +func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +131,7 @@ } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +140,8 @@ r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -195,10 +215,9 @@ if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] + pkg := getPackage(pkgPath, pkgName) if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg + errorf("internal error: getPackage returned nil package for %s", pkgPath) } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go 2024-02-23 09:46:14.000000000 +0000 @@ -10,6 +10,7 @@ package gcimporter import ( + "fmt" "go/token" "go/types" "sort" @@ -63,6 +64,14 @@ } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gocommand/invoke.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gocommand/invoke.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gocommand/invoke.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gocommand/invoke.go 2024-02-23 09:46:14.000000000 +0000 @@ -8,10 +8,12 @@ import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "reflect" "regexp" "runtime" "strconv" @@ -22,6 +24,9 @@ exec "golang.org/x/sys/execabs" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +56,19 @@ // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,6 +76,9 @@ // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } @@ -68,6 +86,8 @@ // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -215,6 +235,18 @@ cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +261,7 @@ cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +275,85 @@ // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that that has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that that still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +361,14 @@ // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +381,25 @@ } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gocommand/version.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gocommand/version.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/gocommand/version.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/gocommand/version.go 2024-02-23 09:46:14.000000000 +0000 @@ -23,21 +23,11 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go 2024-02-23 09:46:14.000000000 +0000 @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/typeparams/common.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/typeparams/common.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/typeparams/common.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/typeparams/common.go 2024-02-23 09:46:14.000000000 +0000 @@ -105,6 +105,26 @@ } orig := NamedTypeOrigin(named) gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + return gfn.(*types.Func) } diff -Nru temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/typesinternal/types.go temporal-1.22.5/src/vendor/golang.org/x/tools/internal/typesinternal/types.go --- temporal-1.21.5-1/src/vendor/golang.org/x/tools/internal/typesinternal/types.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/golang.org/x/tools/internal/typesinternal/types.go 2024-02-23 09:46:14.000000000 +0000 @@ -11,8 +11,6 @@ "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,10 +50,3 @@ } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// NewObjectpathEncoder returns a function closure equivalent to -// objectpath.For but amortized for multiple (sequential) calls. -// It is a temporary workaround, pending the approval of proposal 58668. -// -//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor -func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/googleapi/googleapi.go temporal-1.22.5/src/vendor/google.golang.org/api/googleapi/googleapi.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/googleapi/googleapi.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/googleapi/googleapi.go 2024-02-23 09:46:14.000000000 +0000 @@ -11,7 +11,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -144,7 +143,7 @@ if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) @@ -184,7 +183,7 @@ if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ Code: res.StatusCode, Body: string(slurp), diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go temporal-1.22.5/src/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go 2024-02-23 09:46:14.000000000 +0000 @@ -71,6 +71,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "iamcredentials:v1" const apiName = "iamcredentials" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/cba.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/cba.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/cba.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/cba.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,282 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cba.go (certificate-based access) contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials +// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// If running within Google's cloud environment, and client certificate is not specified +// and not available through DCA, we will try mTLS with credentials held by +// the Secure Session Agent, which is part of Google's cloud infrastructure. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "context" + "crypto/tls" + "net" + "net/url" + "os" + "strings" + + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/api/internal/cert" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" +) + +// getClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +type transportConfig struct { + clientCertSource cert.Source // The client certificate source. + endpoint string // The corresponding endpoint to use based on client certificate source. + s2aAddress string // The S2A address if it can be used, otherwise an empty string. + s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. +} + +func getTransportConfig(settings *DialSettings) (*transportConfig, error) { + clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) + if err != nil { + return &transportConfig{ + clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", + }, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: "", + s2aMTLSEndpoint: "", + } + + // Check the env to determine whether to use S2A. + if !isGoogleS2AEnabled() { + return &defaultTransportConfig, nil + } + + // If client cert is found, use that over S2A. + // If MTLS is not enabled for the endpoint, skip S2A. + if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { + return &defaultTransportConfig, nil + } + s2aMTLSEndpoint := settings.DefaultMTLSEndpoint + // If there is endpoint override, honor it. + if settings.Endpoint != "" { + s2aMTLSEndpoint = endpoint + } + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: s2aMTLSEndpoint, + }, nil +} + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, +// and the endpoint to use for HTTP client. +func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, nil, "", err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil +} + +// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. +var mtlsEndpointEnabledForS2A = func() bool { + // TODO(xmenxk): determine this via discovery config. + return true +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go 2024-02-23 09:46:14.000000000 +0000 @@ -18,7 +18,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -59,7 +58,7 @@ configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) } - file, err := ioutil.ReadFile(configFilePath) + file, err := os.ReadFile(configFilePath) if err != nil { if errors.Is(err, os.ErrNotExist) { // Config file missing means Secure Connect is not supported. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/creds.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/creds.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/creds.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/creds.go 2024-02-23 09:46:14.000000000 +0000 @@ -10,7 +10,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -48,7 +47,7 @@ return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) + data, err := os.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } @@ -92,7 +91,7 @@ // Determine configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) if err != nil { return nil, err } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/dca.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/dca.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/dca.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/dca.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dca contains utils for implementing Device Certificate -// Authentication according to https://google.aip.dev/auth/4114 -// -// The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. -// -// Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. -// -// We would like to avoid introducing client-side logic that parses whether the -// endpoint override is an mTLS url, since the url pattern may change at anytime. -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. - -// Package internal supports the options and transport packages. -package internal - -import ( - "net/url" - "os" - "strings" - - "google.golang.org/api/internal/cert" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" -) - -// GetClientCertificateSourceAndEndpoint is a convenience function that invokes -// getClientCertificateSource and getEndpoint sequentially and returns the client -// cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) - if err != nil { - return nil, "", err - } - return clientCertSource, endpoint, nil -} - -// getClientCertificateSource returns a default client certificate source, if -// not provided by the user. -// -// A nil default source can be returned if the source does not exist. Any exceptions -// encountered while initializing the default source will be reported as client -// error (ex. corrupt metadata file). -// -// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE -// must be set to "true" to allow certificate to be used (including user provided -// certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { - if !isClientCertificateEnabled() { - return nil, nil - } else if settings.ClientCertSource != nil { - return settings.ClientCertSource, nil - } else { - return cert.DefaultSource() - } -} - -func isClientCertificateEnabled() bool { - useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") - // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. - return strings.ToLower(useClientCert) == "true" -} - -// getEndpoint returns the endpoint for the service, taking into account the -// user-provided endpoint override "settings.Endpoint". -// -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. -// -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. -// -// If the endpoint override is an address (host:port) rather than full base -// URL (ex. https://...), then the user-provided address will be merged into -// the default endpoint. For example, WithEndpoint("myhost:8000") and -// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { - if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - return settings.DefaultMTLSEndpoint, nil - } - return settings.DefaultEndpoint, nil - } - if strings.Contains(settings.Endpoint, "://") { - // User passed in a full URL path, use it verbatim. - return settings.Endpoint, nil - } - if settings.DefaultEndpoint == "" { - // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. - // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. - return settings.Endpoint, nil - } - - // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) -} - -func getMTLSMode() string { - mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") - if mode == "" { - mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. - } - if mode == "" { - return mTLSModeAuto - } - return strings.ToLower(mode) -} - -func mergeEndpoints(baseURL, newHost string) (string, error) { - u, err := url.Parse(fixScheme(baseURL)) - if err != nil { - return "", err - } - return strings.Replace(baseURL, u.Host, newHost, 1), nil -} - -func fixScheme(baseURL string) string { - if !strings.Contains(baseURL, "://") { - return "https://" + baseURL - } - return baseURL -} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/media.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/media.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/media.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/media.go 2024-02-23 09:46:14.000000000 +0000 @@ -8,7 +8,6 @@ "bytes" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -222,8 +221,8 @@ toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) - rm := ioutil.NopCloser(fm()) + rb := io.NopCloser(fb()) + rm := io.NopCloser(fm()) var mimeBoundary string if _, params, err := mime.ParseMediaType(ctype); err == nil { mimeBoundary = params["boundary"] @@ -243,7 +242,7 @@ fb := readerFunc(body) if fb != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) + rb := io.NopCloser(fb()) toCleanup = append(toCleanup, rb) return rb, nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/resumable.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/resumable.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/resumable.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/resumable.go 2024-02-23 09:46:14.000000000 +0000 @@ -43,8 +43,8 @@ // retries should happen. ChunkRetryDeadline time.Duration - // Track current request invocation ID and attempt count for retry metric - // headers. + // Track current request invocation ID and attempt count for retry metrics + // and idempotency headers. invocationID string attempts int } @@ -81,10 +81,15 @@ req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Set idempotency token header which is used by GCS uploads. + req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/send.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/send.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/gensupport/send.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/gensupport/send.go 2024-02-23 09:46:14.000000000 +0000 @@ -138,9 +138,14 @@ } return resp, ctx.Err() } + + // Set retry metrics and idempotency headers for GCS. + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") req.Header.Set("X-Goog-Api-Client", xGoogHeader) + req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) resp, err = client.Do(req.WithContext(ctx)) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/impersonate/impersonate.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/impersonate/impersonate.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/impersonate/impersonate.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/impersonate/impersonate.go 2024-02-23 09:46:14.000000000 +0000 @@ -11,7 +11,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -105,7 +104,7 @@ return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/s2a.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/s2a.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/s2a.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/s2a.go 2024-02-23 09:46:14.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "log" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const configEndpointSuffix = "googleAutoMtlsConfiguration" + +// The period an MTLS config can be reused before needing refresh. +var configExpiry = time.Hour + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + once sync.Once +) + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + once.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/settings.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/settings.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/settings.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/settings.go 2024-02-23 09:46:14.000000000 +0000 @@ -46,6 +46,7 @@ SkipValidation bool ImpersonationConfig *impersonate.Config EnableDirectPath bool + EnableDirectPathXds bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/version.go temporal-1.22.5/src/vendor/google.golang.org/api/internal/version.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/internal/version.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/internal/version.go 2024-02-23 09:46:14.000000000 +0000 @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.114.0" +const Version = "0.128.0" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/option/internaloption/internaloption.go temporal-1.22.5/src/vendor/google.golang.org/api/option/internaloption/internaloption.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/option/internaloption/internaloption.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/option/internaloption/internaloption.go 2024-02-23 09:46:14.000000000 +0000 @@ -67,6 +67,21 @@ o.EnableDirectPath = bool(e) } +// EnableDirectPathXds returns a ClientOption that overrides the default +// DirectPath type. It is only valid when DirectPath is enabled. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPathXds() option.ClientOption { + return enableDirectPathXds(true) +} + +type enableDirectPathXds bool + +func (x enableDirectPathXds) Apply(o *internal.DialSettings) { + o.EnableDirectPathXds = bool(x) +} + // AllowNonDefaultServiceAccount returns a ClientOption that overrides the default // requirement for using the default service account for DirectPath. // diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/storage/v1/storage-gen.go temporal-1.22.5/src/vendor/google.golang.org/api/storage/v1/storage-gen.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/storage/v1/storage-gen.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/storage/v1/storage-gen.go 2024-02-23 09:46:14.000000000 +0000 @@ -78,6 +78,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "storage:v1" const apiName = "storage" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/transport/grpc/dial.go temporal-1.22.5/src/vendor/google.golang.org/api/transport/grpc/dial.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/transport/grpc/dial.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/transport/grpc/dial.go 2024-02-23 09:46:14.000000000 +0000 @@ -9,7 +9,6 @@ import ( "context" - "crypto/tls" "errors" "log" "net" @@ -22,7 +21,6 @@ "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" @@ -122,18 +120,13 @@ if o.GRPCConn != nil { return o.GRPCConn, nil } - clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(o) + transportCreds, endpoint, err := internal.GetGRPCTransportConfigAndEndpoint(o) if err != nil { return nil, err } - var transportCreds credentials.TransportCredentials if insecure { transportCreds = grpcinsecure.NewCredentials() - } else { - transportCreds = credentials.NewTLS(&tls.Config{ - GetClientCertificate: clientCertSource, - }) } // Initialize gRPC dial options with transport-level security options. @@ -171,7 +164,7 @@ grpcOpts = append(grpcOpts, timeoutDialerOption) } // Check if google-c2p resolver is enabled for DirectPath - if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + if isDirectPathXdsUsed(o) { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { endpoint = "google-c2p:///" + addr @@ -258,6 +251,19 @@ return true } +func isDirectPathXdsUsed(o *internal.DialSettings) bool { + // Method 1: Enable DirectPath xDS by env; + if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.EnableDirectPathXds { + return true + } + return false + +} + func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool { if ts == nil { return false diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/api/transport/http/dial.go temporal-1.22.5/src/vendor/google.golang.org/api/transport/http/dial.go --- temporal-1.21.5-1/src/vendor/google.golang.org/api/transport/http/dial.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/api/transport/http/dial.go 2024-02-23 09:46:14.000000000 +0000 @@ -33,7 +33,7 @@ if err != nil { return nil, "", err } - clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) if err != nil { return nil, "", err } @@ -41,7 +41,8 @@ if settings.HTTPClient != nil { return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) + + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err } @@ -152,7 +153,7 @@ // Otherwise, use a default transport, taking most defaults from // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. -func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } @@ -171,6 +172,10 @@ GetClientCertificate: clientCertSource, } } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } configureHTTP2(trans) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/field_behavior.proto package annotations @@ -78,6 +78,19 @@ // a non-empty value will be returned. The user will not be aware of what // non-empty value to expect. FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 ) // Enum value maps for FieldBehavior. @@ -91,6 +104,7 @@ 5: "IMMUTABLE", 6: "UNORDERED_LIST", 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -101,6 +115,7 @@ "IMMUTABLE": 5, "UNORDERED_LIST": 6, "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, } ) @@ -169,7 +184,7 @@ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -179,7 +194,8 @@ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go --- temporal-1.21.5-1/src/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,208 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by aliasgen. DO NOT EDIT. - -// Package iam aliases all exported identifiers in package -// "cloud.google.com/go/iam/apiv1/iampb". -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. -// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md -// for more details. -package iam - -import ( - src "cloud.google.com/go/iam/apiv1/iampb" - grpc "google.golang.org/grpc" -) - -// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb -const ( - AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED - AuditConfigDelta_ADD = src.AuditConfigDelta_ADD - AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE - AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ - AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ - AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE - AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED - BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED - BindingDelta_ADD = src.BindingDelta_ADD - BindingDelta_REMOVE = src.BindingDelta_REMOVE -) - -// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb -var ( - AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name - AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value - AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name - AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value - BindingDelta_Action_name = src.BindingDelta_Action_name - BindingDelta_Action_value = src.BindingDelta_Action_value - File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto - File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto - File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto -) - -// Specifies the audit configuration for a service. The configuration -// determines which permission types are logged, and what identities, if any, -// are exempted from logging. An AuditConfig must have one or more -// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a -// specific service, the union of the two AuditConfigs is used for that -// service: the log_types specified in each AuditConfig are enabled, and the -// exempted_members in each AuditLogConfig are exempted. Example Policy with -// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": -// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": -// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For -// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfig = src.AuditConfig - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta = src.AuditConfigDelta - -// The type of action performed on an audit configuration in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta_Action = src.AuditConfigDelta_Action - -// Provides the configuration for logging a type of permissions. Example: { -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables -// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from -// DATA_READ logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig = src.AuditLogConfig - -// The list of valid permission types for which logging can be configured. -// Admin writes are always logged, and are not configurable. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig_LogType = src.AuditLogConfig_LogType - -// Associates `members`, or principals, with a `role`. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Binding = src.Binding - -// One delta entry for Binding. Each individual change (only one member in -// each entry) to a binding will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta = src.BindingDelta - -// The type of action performed on a Binding in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta_Action = src.BindingDelta_Action - -// Request message for `GetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetIamPolicyRequest = src.GetIamPolicyRequest - -// Encapsulates settings provided to GetIamPolicy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetPolicyOptions = src.GetPolicyOptions - -// IAMPolicyClient is the client API for IAMPolicy service. For semantics -// around ctx use and closing/ending streaming RPCs, please refer to -// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyClient = src.IAMPolicyClient - -// IAMPolicyServer is the server API for IAMPolicy service. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyServer = src.IAMPolicyServer - -// An Identity and Access Management (IAM) policy, which specifies access -// controls for Google Cloud resources. A `Policy` is a collection of -// `bindings`. A `binding` binds one or more `members`, or principals, to a -// single `role`. Principals can be user accounts, service accounts, Google -// groups, and domains (such as G Suite). A `role` is a named list of -// permissions; each `role` can be an IAM predefined role or a user-created -// custom role. For some types of Google Cloud resources, a `binding` can also -// specify a `condition`, which is a logical expression that allows access to a -// resource only if the expression evaluates to `true`. A condition can add -// constraints based on attributes of the request, the resource, or both. To -// learn which resources support conditions in their IAM policies, see the [IAM -// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": -// "roles/resourcemanager.organizationAdmin", "members": [ -// "user:mike@example.com", "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": -// "roles/resourcemanager.organizationViewer", "members": [ -// "user:eve@example.com" ], "condition": { "title": "expirable access", -// "description": "Does not grant access after Sep 2020", "expression": -// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": -// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - -// user:mike@example.com - group:admins@example.com - domain:google.com - -// serviceAccount:my-project-id@appspot.gserviceaccount.com role: -// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com -// role: roles/resourcemanager.organizationViewer condition: title: expirable -// access description: Does not grant access after Sep 2020 expression: -// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= -// version: 3 For a description of IAM and its features, see the [IAM -// documentation](https://cloud.google.com/iam/docs/). -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Policy = src.Policy - -// The difference delta between two policies. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type PolicyDelta = src.PolicyDelta - -// Request message for `SetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type SetIamPolicyRequest = src.SetIamPolicyRequest - -// Request message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsRequest = src.TestIamPermissionsRequest - -// Response message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsResponse = src.TestIamPermissionsResponse - -// UnimplementedIAMPolicyServer can be embedded to have forward compatible -// implementations. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { - return src.NewIAMPolicyClient(cc) -} - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - src.RegisterIAMPolicyServer(s, srv) -} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/README.md temporal-1.22.5/src/vendor/google.golang.org/grpc/README.md --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/README.md 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/README.md 2024-02-23 09:46:15.000000000 +0000 @@ -14,21 +14,14 @@ ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/attributes/attributes.go temporal-1.22.5/src/vendor/google.golang.org/grpc/attributes/attributes.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/attributes/attributes.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/attributes/attributes.go 2024-02-23 09:46:15.000000000 +0000 @@ -25,30 +25,35 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -58,20 +63,19 @@ // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -88,7 +92,7 @@ // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -99,3 +103,39 @@ } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/balancer.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/balancer.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/balancer.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/balancer.go 2024-02-23 09:46:15.000000000 +0000 @@ -105,8 +105,8 @@ // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +115,13 @@ // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +136,11 @@ // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +162,24 @@ // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +270,7 @@ // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -286,7 +306,7 @@ // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -343,9 +363,13 @@ ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +414,14 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/base/balancer.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/base/balancer.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/base/balancer.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/base/balancer.go 2024-02-23 09:46:15.000000000 +0000 @@ -105,7 +105,12 @@ addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go 2024-02-23 09:46:15.000000000 +0000 @@ -213,7 +213,7 @@ backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + subConns map[resolver.Address]balancer.SubConn // Used to new/shutdown SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. picker balancer.Picker // Support fallback to resolved backend addresses if there's no response @@ -290,7 +290,7 @@ // aggregateSubConnStats calculate the aggregated state of SubConns in // lb.SubConns. These SubConns are subconns in use (when switching between // fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). +// those in cache (SubConns are cached for 10 seconds after shutdown). // // The aggregated state is: // - If at least one SubConn in Ready, the aggregated state is Ready; @@ -319,7 +319,13 @@ return connectivity.TransientFailure } +// UpdateSubConnState is unused; NewSubConn's options always specifies +// updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + logger.Errorf("grpclb: UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) +} + +func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState if logger.V(2) { logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) @@ -339,8 +345,8 @@ case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) case connectivity.TransientFailure: lb.connErr = scs.ConnectionError @@ -373,8 +379,13 @@ if forceRegeneratePicker || (lb.state != oldAggrState) { lb.regeneratePicker(resetDrop) } + var cc balancer.ClientConn = lb.cc + if lb.usePickFirst { + // Bypass the caching layer that would wrap the picker. + cc = lb.cc.ClientConn + } - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) + cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) } // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use @@ -448,17 +459,9 @@ gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) - addrs := ccs.ResolverState.Addresses + backendAddrs := ccs.ResolverState.Addresses - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } + var remoteBalancerAddrs []resolver.Address if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { // Override any balancer addresses provided via // ccs.ResolverState.Addresses. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go 2024-02-23 09:46:15.000000000 +0000 @@ -113,7 +113,6 @@ } balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst lb.usePickFirst = pickFirst if fallbackModeChanged || balancingPolicyChanged { @@ -123,13 +122,7 @@ // For fallback mode switching with pickfirst, we want to recreate the // SubConn because the creds could be different. for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } + sc.Shutdown() delete(lb.subConns, a) } } @@ -144,16 +137,17 @@ } if sc != nil { if len(backendAddrs) == 0 { - lb.cc.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, scKey) return } - lb.cc.cc.UpdateAddresses(sc, backendAddrs) + lb.cc.ClientConn.UpdateAddresses(sc, backendAddrs) sc.Connect() return } + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { logger.Warningf("grpclb: failed to create new SubConn: %v", err) return @@ -176,6 +170,8 @@ if _, ok := lb.subConns[addrWithoutAttrs]; !ok { // Use addrWithMD to create the SubConn. + var sc balancer.SubConn + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { logger.Warningf("grpclb: failed to create new SubConn: %v", err) @@ -194,7 +190,7 @@ for a, sc := range lb.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -419,7 +415,7 @@ } } // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + ccw.lb.cc.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = false diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go 2024-02-23 09:46:15.000000000 +0000 @@ -91,11 +91,12 @@ const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. +// SubConns will be kept in cache for subConnCacheTime before being shut down. // -// Its new and remove methods are updated to do cache first. +// Its NewSubconn and SubConn.Shutdown methods are updated to do cache first. type lbCacheClientConn struct { - cc balancer.ClientConn + balancer.ClientConn + timeout time.Duration mu sync.Mutex @@ -113,7 +114,7 @@ func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { return &lbCacheClientConn{ - cc: cc, + ClientConn: cc, timeout: subConnCacheTime, subConnCache: make(map[resolver.Address]*subConnCacheEntry), subConnToAddr: make(map[balancer.SubConn]resolver.Address), @@ -137,16 +138,27 @@ return entry.sc, nil } - scNew, err := ccc.cc.NewSubConn(addrs, opts) + scNew, err := ccc.ClientConn.NewSubConn(addrs, opts) if err != nil { return nil, err } + scNew = &lbCacheSubConn{SubConn: scNew, ccc: ccc} ccc.subConnToAddr[scNew] = addrWithoutAttrs return scNew, nil } func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +type lbCacheSubConn struct { + balancer.SubConn + ccc *lbCacheClientConn +} + +func (sc *lbCacheSubConn) Shutdown() { + ccc := sc.ccc ccc.mu.Lock() defer ccc.mu.Unlock() addr, ok := ccc.subConnToAddr[sc] @@ -156,11 +168,11 @@ if entry, ok := ccc.subConnCache[addr]; ok { if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. + // This could happen if NewSubConn was called multiple times for + // the same address, and those SubConns are all shut down. We + // remove sc immediately here. delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() } return } @@ -176,7 +188,7 @@ if entry.abortDeleting { return } - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) }) @@ -195,14 +207,28 @@ } func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) + s.Picker = &lbCachePicker{Picker: s.Picker} + ccc.ClientConn.UpdateState(s) } func (ccc *lbCacheClientConn) close() { ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. + defer ccc.mu.Unlock() + // Only cancel all existing timers. There's no need to shut down SubConns. for _, entry := range ccc.subConnCache { entry.cancel() } - ccc.mu.Unlock() +} + +type lbCachePicker struct { + balancer.Picker +} + +func (cp *lbCachePicker) Pick(i balancer.PickInfo) (balancer.PickResult, error) { + res, err := cp.Picker.Pick(i) + if err != nil { + return res, err + } + res.SubConn = res.SubConn.(*lbCacheSubConn).SubConn + return res, nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer_conn_wrappers.go temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer_conn_wrappers.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/balancer_conn_wrappers.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/balancer_conn_wrappers.go 2024-02-23 09:46:15.000000000 +0000 @@ -25,14 +25,20 @@ "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,89 @@ // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil - } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + } + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +151,27 @@ // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +187,112 @@ ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish before closing the balancer. + <-done + b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -308,32 +300,26 @@ channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +328,10 @@ } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +342,10 @@ } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,78 +356,57 @@ // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/call.go temporal-1.22.5/src/vendor/google.golang.org/grpc/call.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/call.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/call.go 2024-02-23 09:46:15.000000000 +0000 @@ -26,7 +26,7 @@ // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/clientconn.go temporal-1.22.5/src/vendor/google.golang.org/grpc/clientconn.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/clientconn.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/clientconn.go 2024-02-23 09:46:15.000000000 +0000 @@ -24,7 +24,6 @@ "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -35,9 +34,12 @@ "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -54,8 +56,6 @@ const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -69,6 +69,9 @@ errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,17 +137,28 @@ // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) disableGlobalOpts := false for _, opt := range opts { @@ -173,40 +187,13 @@ } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -249,15 +236,12 @@ } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil { // Blocking wait for the initial service config. @@ -275,57 +259,234 @@ go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -342,7 +503,7 @@ } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -354,7 +515,7 @@ if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -390,13 +551,27 @@ } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -412,6 +587,8 @@ return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -441,7 +618,7 @@ type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -471,7 +648,9 @@ authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -492,11 +671,44 @@ sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -536,7 +748,10 @@ // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -585,6 +800,10 @@ panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -693,6 +912,20 @@ cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -700,11 +933,12 @@ ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -798,9 +1032,6 @@ ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -819,58 +1050,63 @@ return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -961,23 +1197,13 @@ } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1016,46 +1242,50 @@ // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.Close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1085,7 +1315,8 @@ addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1099,6 +1330,9 @@ if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s if lastErr == nil { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) @@ -1124,7 +1358,8 @@ func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1152,15 +1387,16 @@ ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if acCtx.Err() != nil { + // addrConn was torn down. ac.mu.Unlock() return } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1175,13 +1411,13 @@ ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1196,14 +1432,13 @@ // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1217,7 +1452,7 @@ channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1234,19 +1469,20 @@ // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1265,7 +1501,7 @@ ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1282,7 +1518,7 @@ ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1294,6 +1530,9 @@ // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1353,7 +1592,7 @@ // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1401,6 +1640,29 @@ return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1418,16 +1680,7 @@ ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1441,6 +1694,29 @@ // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1552,7 +1828,14 @@ return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1564,7 +1847,8 @@ rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1579,38 +1863,98 @@ parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1627,25 +1971,62 @@ } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/codec.go temporal-1.22.5/src/vendor/google.golang.org/grpc/codec.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/codec.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/codec.go 2024-02-23 09:46:15.000000000 +0000 @@ -27,8 +27,8 @@ // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go 2024-02-23 09:46:15.000000000 +0000 @@ -25,8 +25,8 @@ "fmt" "io" "net" - "sync" + "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -35,15 +35,13 @@ "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 ) var ( @@ -59,9 +57,9 @@ return conn.NewAES128GCMRekey(s, keyData) }, } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed @@ -77,30 +75,6 @@ } } -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { @@ -134,10 +108,6 @@ return &ServerHandshakerOptions{} } -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - // altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. @@ -185,10 +155,10 @@ // ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !clientHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer clientHandshakes.Release(1) if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") @@ -238,10 +208,10 @@ // ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !serverHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer serverHandshakes.Release(1) if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") @@ -264,8 +234,6 @@ } // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, @@ -391,3 +359,10 @@ h.stream.CloseSend() } } + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/dialoptions.go temporal-1.22.5/src/vendor/google.golang.org/grpc/dialoptions.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/dialoptions.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/dialoptions.go 2024-02-23 09:46:15.000000000 +0000 @@ -77,6 +77,8 @@ defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -137,6 +139,20 @@ return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -627,6 +643,7 @@ ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -655,3 +672,44 @@ o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/encoding.go temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/encoding.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/encoding.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/encoding.go 2024-02-23 09:46:15.000000000 +0000 @@ -90,9 +90,9 @@ // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/gzip/gzip.go temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/gzip/gzip.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/gzip/gzip.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/gzip/gzip.go 2024-02-23 09:46:15.000000000 +0000 @@ -40,7 +40,7 @@ func init() { c := &compressor{} - c.poolCompressor.New = func() interface{} { + c.poolCompressor.New = func() any { return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) @@ -61,7 +61,7 @@ return fmt.Errorf("grpc: invalid gzip compression level: %d", level) } c := encoding.GetCompressor(Name).(*compressor) - c.poolCompressor.New = func() interface{} { + c.poolCompressor.New = func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/proto/proto.go temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/proto/proto.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/encoding/proto/proto.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/encoding/proto/proto.go 2024-02-23 09:46:15.000000000 +0000 @@ -37,7 +37,7 @@ // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld_grpc.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld_grpc.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld_grpc.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld_grpc.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/component.go temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/component.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/component.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/component.go 2024-02-23 09:46:15.000000000 +0000 @@ -31,71 +31,71 @@ var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/grpclog.go temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/grpclog.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/grpclog.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/grpclog.go 2024-02-23 09:46:15.000000000 +0000 @@ -42,53 +42,53 @@ } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/logger.go temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/logger.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/logger.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/logger.go 2024-02-23 09:46:15.000000000 +0000 @@ -24,12 +24,12 @@ // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/loggerv2.go temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/loggerv2.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/grpclog/loggerv2.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/grpclog/loggerv2.go 2024-02-23 09:46:15.000000000 +0000 @@ -33,35 +33,35 @@ // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/health/client.go temporal-1.22.5/src/vendor/google.golang.org/grpc/health/client.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/health/client.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/health/client.go 2024-02-23 09:46:15.000000000 +0000 @@ -56,7 +56,7 @@ // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/interceptor.go temporal-1.22.5/src/vendor/google.golang.org/grpc/interceptor.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/interceptor.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/interceptor.go 2024-02-23 09:46:15.000000000 +0000 @@ -23,7 +23,7 @@ ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go 2024-02-23 09:46:15.000000000 +0000 @@ -200,8 +200,8 @@ } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/balancerload/load.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/balancerload/load.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/balancerload/load.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/balancerload/load.go 2024-02-23 09:46:15.000000000 +0000 @@ -25,7 +25,7 @@ // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go 2024-02-23 09:46:15.000000000 +0000 @@ -32,6 +32,9 @@ // Logger specifies MethodLoggers for method names with a Log call that // takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go 2024-02-23 09:46:15.000000000 +0000 @@ -49,6 +49,9 @@ var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { Log(context.Context, LogEntryConfig) } @@ -65,6 +68,9 @@ } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -145,6 +151,9 @@ } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } @@ -221,7 +230,7 @@ OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -261,7 +270,7 @@ OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/buffer/unbounded.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/buffer/unbounded.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/buffer/unbounded.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/buffer/unbounded.go 2024-02-23 09:46:15.000000000 +0000 @@ -28,35 +28,38 @@ // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any + closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -func (b *Unbounded) Get() <-chan interface{} { +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. +func (b *Unbounded) Get() <-chan any { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/funcs.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/funcs.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/funcs.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/funcs.go 2024-02-23 09:46:15.000000000 +0000 @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/logging.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/logging.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/logging.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/logging.go 2024-02-23 09:46:15.000000000 +0000 @@ -31,7 +31,7 @@ } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/types.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/types.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/types.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/types.go 2024-02-23 09:46:15.000000000 +0000 @@ -628,6 +628,7 @@ type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/util_linux.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/util_linux.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/util_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/util_linux.go 2024-02-23 09:46:15.000000000 +0000 @@ -23,7 +23,7 @@ ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go 2024-02-23 09:46:15.000000000 +0000 @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/credentials/credentials.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/credentials/credentials.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/credentials/credentials.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/credentials/credentials.go 2024-02-23 09:46:15.000000000 +0000 @@ -25,12 +25,12 @@ type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go 2024-02-23 09:46:15.000000000 +0000 @@ -36,6 +36,16 @@ // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/observability.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/observability.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/observability.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/observability.go 2024-02-23 09:46:15.000000000 +0000 @@ -28,9 +28,15 @@ var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/xds.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/xds.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/envconfig/xds.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/envconfig/xds.go 2024-02-23 09:46:15.000000000 +0000 @@ -61,11 +61,10 @@ // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -82,11 +81,15 @@ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go 2024-02-23 09:46:15.000000000 +0000 @@ -30,7 +30,7 @@ var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go 2024-02-23 09:46:15.000000000 +0000 @@ -31,7 +31,7 @@ } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go 2024-02-23 09:46:15.000000000 +0000 @@ -72,3 +72,24 @@ defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go 2024-02-23 09:46:15.000000000 +0000 @@ -20,6 +20,7 @@ import ( "context" + "sync" "google.golang.org/grpc/internal/buffer" ) @@ -31,35 +32,94 @@ // // This type is safe for concurrent access. type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided // context will be passed to the scheduled callbacks. Users should cancel the // provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be executed once this context is canceled. +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{callbacks: buffer.NewUnbounded()} - go t.run(ctx) - return t + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { - t.callbacks.Put(f) +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() + + if cs.closed { + return false + } + cs.callbacks.Put(f) + return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): - return - case callback := <-t.callbacks.Get(): - t.callbacks.Load() + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + cs.callbacks.Load() + default: + return backlog + } + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/idle/idle.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/idle/idle.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/idle/idle.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/idle/idle.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,301 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error +} + +// Manager defines the functionality required to track RPC activity on a +// channel. +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() +} + +type noopManager struct{} + +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} + +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} + } + + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, + } + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + m.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + m.actuallyIdle = true + return true +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/internal.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/internal.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/internal.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/internal.go 2024-02-23 09:46:15.000000000 +0000 @@ -30,7 +30,7 @@ var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,54 +53,81 @@ // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - CanonicalString interface{} // func (codes.Code) string + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). - DisableGlobalDialOptions interface{} // func() grpc.DialOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -107,7 +138,7 @@ // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -139,7 +170,11 @@ UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -150,7 +185,7 @@ // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/metadata/metadata.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/metadata/metadata.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/metadata/metadata.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/metadata/metadata.go 2024-02-23 09:46:15.000000000 +0000 @@ -35,7 +35,7 @@ type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/pretty/pretty.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/pretty/pretty.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/pretty/pretty.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/pretty/pretty.go 2024-02-23 09:46:15.000000000 +0000 @@ -35,7 +35,7 @@ // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/resolver/config_selector.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/resolver/config_selector.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/resolver/config_selector.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/resolver/config_selector.go 2024-02-23 09:46:15.000000000 +0000 @@ -92,7 +92,7 @@ // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go 2024-02-23 09:46:15.000000000 +0000 @@ -62,7 +62,8 @@ defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/status/status.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/status/status.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/status/status.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/status/status.go 2024-02-23 09:46:15.000000000 +0000 @@ -49,7 +49,7 @@ } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +64,7 @@ } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +120,11 @@ // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/controlbuf.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/controlbuf.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/controlbuf.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/controlbuf.go 2024-02-23 09:46:15.000000000 +0000 @@ -40,7 +40,7 @@ } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go 2024-02-23 09:46:15.000000000 +0000 @@ -453,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go 2024-02-23 09:46:15.000000000 +0000 @@ -330,7 +330,7 @@ readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1337,7 +1337,7 @@ // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1505,14 +1505,15 @@ return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1521,12 @@ if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1554,9 +1552,10 @@ statusGen = status.New(rawStatusCode, grpcMessage) } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go 2024-02-23 09:46:15.000000000 +0000 @@ -165,7 +165,7 @@ if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -238,7 +238,7 @@ kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -855,7 +855,7 @@ } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -939,7 +939,7 @@ return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1166,12 +1166,12 @@ if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1318,14 +1318,14 @@ return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1367,7 +1367,7 @@ // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http_util.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http_util.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/http_util.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/http_util.go 2024-02-23 09:46:15.000000000 +0000 @@ -30,6 +30,7 @@ "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" @@ -309,6 +310,7 @@ } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +318,17 @@ err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +339,34 @@ n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +403,10 @@ fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +414,11 @@ if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +432,24 @@ return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/transport.go temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/transport.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/internal/transport/transport.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/internal/transport/transport.go 2024-02-23 09:46:15.000000000 +0000 @@ -43,10 +43,6 @@ "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -726,7 +721,7 @@ RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -736,7 +731,7 @@ } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/picker_wrapper.go temporal-1.22.5/src/vendor/google.golang.org/grpc/picker_wrapper.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/picker_wrapper.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/picker_wrapper.go 2024-02-23 09:46:15.000000000 +0000 @@ -28,26 +28,36 @@ "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -63,10 +73,8 @@ // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -92,6 +100,7 @@ var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -126,6 +135,20 @@ continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() @@ -152,14 +175,14 @@ return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -187,6 +210,25 @@ close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/pickfirst.go temporal-1.22.5/src/vendor/google.golang.org/grpc/pickfirst.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/pickfirst.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/pickfirst.go 2024-02-23 09:46:15.000000000 +0000 @@ -19,15 +19,25 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -36,22 +46,55 @@ type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -69,28 +112,49 @@ } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -109,17 +173,22 @@ return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +201,21 @@ Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +226,7 @@ Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/preloader.go temporal-1.22.5/src/vendor/google.golang.org/grpc/preloader.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/preloader.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/preloader.go 2024-02-23 09:46:15.000000000 +0000 @@ -37,7 +37,7 @@ } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/adapt.go temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/adapt.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/adapt.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/adapt.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/serverreflection.go temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/serverreflection.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/reflection/serverreflection.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/reflection/serverreflection.go 2024-02-23 09:46:15.000000000 +0000 @@ -48,8 +48,9 @@ "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,9 +64,19 @@ var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServer(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, svr) + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -120,13 +131,27 @@ // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -141,7 +166,7 @@ } type serverReflectionServer struct { - v1alphagrpc.UnimplementedServerReflectionServer + v1alphareflectiongrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -215,11 +240,11 @@ } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -228,7 +253,7 @@ } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -239,79 +264,79 @@ return err } - out := &v1alphapb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1alphapb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: s.listServices(), }, } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver/map.go temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver/map.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver/map.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver/map.go 2024-02-23 09:46:15.000000000 +0000 @@ -20,7 +20,7 @@ type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver/resolver.go temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver/resolver.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver/resolver.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver/resolver.go 2024-02-23 09:46:15.000000000 +0000 @@ -22,13 +22,13 @@ import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -77,25 +77,6 @@ return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -124,34 +102,46 @@ Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -180,11 +170,37 @@ Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -244,20 +260,7 @@ // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -311,10 +314,3 @@ // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver_conn_wrapper.go temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver_conn_wrapper.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/resolver_conn_wrapper.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/resolver_conn_wrapper.go 2024-02-23 09:46:15.000000000 +0000 @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,200 @@ "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State - - incomingMu sync.Mutex // Synchronizes all the incoming calls. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} + +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() } +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +243,5 @@ } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/rpc_util.go temporal-1.22.5/src/vendor/google.golang.org/grpc/rpc_util.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/rpc_util.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/rpc_util.go 2024-02-23 09:46:15.000000000 +0000 @@ -75,7 +75,7 @@ } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -692,7 +693,7 @@ return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +727,12 @@ } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +792,18 @@ // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +863,22 @@ // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/server.go temporal-1.22.5/src/vendor/google.golang.org/grpc/server.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/server.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/server.go 2024-02-23 09:46:15.000000000 +0000 @@ -86,7 +86,7 @@ var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +99,20 @@ ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any } type serverWorkerData struct { @@ -170,10 +170,12 @@ initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -182,6 +184,7 @@ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -233,6 +236,20 @@ return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -273,9 +290,9 @@ // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -552,6 +569,27 @@ }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -632,7 +670,7 @@ // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -640,7 +678,7 @@ // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -655,14 +693,14 @@ // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -673,7 +711,7 @@ s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -714,7 +752,7 @@ type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -895,7 +933,7 @@ s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -915,6 +953,7 @@ InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -1046,7 +1085,7 @@ if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1096,7 +1135,7 @@ atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1143,7 +1182,7 @@ } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1152,7 +1191,7 @@ if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1189,7 +1228,7 @@ defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1296,7 +1335,7 @@ if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1306,7 +1345,7 @@ if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1470,7 +1509,7 @@ } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1479,7 +1518,7 @@ if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1506,7 +1545,7 @@ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1520,7 +1559,7 @@ if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1623,7 +1662,7 @@ trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1689,13 +1728,13 @@ pos := strings.LastIndex(sm, "/") if pos == -1 { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1736,7 +1775,7 @@ } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1856,7 +1895,7 @@ if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/service_config.go temporal-1.22.5/src/vendor/google.golang.org/grpc/service_config.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/service_config.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/service_config.go 2024-02-23 09:46:15.000000000 +0000 @@ -23,8 +23,6 @@ "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/shared_buffer_pool.go temporal-1.22.5/src/vendor/google.golang.org/grpc/shared_buffer_pool.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/shared_buffer_pool.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/shared_buffer_pool.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/stats/stats.go temporal-1.22.5/src/vendor/google.golang.org/grpc/stats/stats.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/stats/stats.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/stats/stats.go 2024-02-23 09:46:15.000000000 +0000 @@ -59,12 +59,22 @@ func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/status/status.go temporal-1.22.5/src/vendor/google.golang.org/grpc/status/status.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/status/status.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/status/status.go 2024-02-23 09:46:15.000000000 +0000 @@ -50,7 +50,7 @@ } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -77,11 +77,18 @@ // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. For wrapped errors, the message returned contains the entire -// err.Error() text and not just the wrapped status. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -92,11 +99,27 @@ } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - return gs.GRPCStatus(), true + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - p := gs.GRPCStatus().Proto() + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/stream.go temporal-1.22.5/src/vendor/google.golang.org/grpc/stream.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/stream.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/stream.go 2024-02-23 09:46:15.000000000 +0000 @@ -31,6 +31,7 @@ "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -123,7 +126,10 @@ // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -132,7 +138,7 @@ // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -168,6 +174,16 @@ } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -425,7 +441,7 @@ ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -469,7 +485,7 @@ // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -479,7 +495,7 @@ // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -499,7 +515,7 @@ return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -780,23 +796,24 @@ func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -812,6 +829,7 @@ binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -852,7 +870,7 @@ cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -896,7 +914,7 @@ return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -920,24 +938,6 @@ if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -993,18 +993,30 @@ } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1020,7 +1032,7 @@ cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1047,7 +1059,7 @@ return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1262,17 +1274,22 @@ return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1335,7 +1352,7 @@ return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1380,7 +1397,7 @@ return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1499,7 +1516,7 @@ // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1508,7 +1525,7 @@ // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1589,7 +1606,7 @@ ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1597,7 +1614,7 @@ if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1664,7 +1681,7 @@ return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1672,7 +1689,7 @@ if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1744,7 +1761,7 @@ // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/trace.go temporal-1.22.5/src/vendor/google.golang.org/grpc/trace.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/trace.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/trace.go 2024-02-23 09:46:15.000000000 +0000 @@ -97,8 +97,8 @@ // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/version.go temporal-1.22.5/src/vendor/google.golang.org/grpc/version.go --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/version.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/version.go 2024-02-23 09:46:15.000000000 +0000 @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.55.0" +const Version = "1.58.2" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/grpc/vet.sh temporal-1.22.5/src/vendor/google.golang.org/grpc/vet.sh --- temporal-1.21.5-1/src/vendor/google.golang.org/grpc/vet.sh 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/grpc/vet.sh 2024-02-23 09:46:15.000000000 +0000 @@ -84,6 +84,9 @@ # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -106,7 +109,7 @@ goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +171,6 @@ proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go 2024-02-23 09:46:15.000000000 +0000 @@ -106,13 +106,19 @@ // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -120,7 +126,7 @@ o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -128,7 +134,7 @@ // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go 2024-02-23 09:46:15.000000000 +0000 @@ -101,13 +101,19 @@ // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -117,7 +123,7 @@ o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -125,7 +131,7 @@ // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go 2024-02-23 09:46:15.000000000 +0000 @@ -41,8 +41,10 @@ // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go 2024-02-23 09:46:15.000000000 +0000 @@ -53,8 +53,10 @@ // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go 2024-02-23 09:46:15.000000000 +0000 @@ -183,13 +183,58 @@ // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -540,6 +585,7 @@ FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -552,6 +598,7 @@ FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -567,6 +614,7 @@ FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/genid/type_gen.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/genid/type_gen.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/genid/type_gen.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/genid/type_gen.go 2024-02-23 09:46:15.000000000 +0000 @@ -32,6 +32,7 @@ Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/order/order.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/order/order.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/order/order.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/order/order.go 2024-02-23 09:46:15.000000000 +0000 @@ -33,7 +33,7 @@ return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/version/version.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/version/version.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/internal/version/version.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/internal/version/version.go 2024-02-23 09:46:15.000000000 +0000 @@ -51,7 +51,7 @@ // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 30 + Minor = 31 Patch = 0 PreRelease = "" ) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/proto/size.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/proto/size.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/proto/size.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/proto/size.go 2024-02-23 09:46:15.000000000 +0000 @@ -73,23 +73,27 @@ } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go 2024-02-23 09:46:15.000000000 +0000 @@ -363,6 +363,8 @@ b = p.appendSingularField(b, "retention", nil) case 18: b = p.appendSingularField(b, "target", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -418,6 +420,10 @@ switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -473,3 +479,24 @@ } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -48,6 +48,64 @@ sync "sync" ) +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -137,11 +195,11 @@ } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -197,11 +255,11 @@ } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +316,11 @@ } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +346,13 @@ const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +382,11 @@ } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -380,11 +444,11 @@ } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -442,11 +506,11 @@ } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -526,11 +590,11 @@ } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -588,11 +652,11 @@ } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -652,11 +716,11 @@ } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1015,8 +1079,22 @@ // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // The verification state of the range. + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) + func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} if protoimpl.UnsafeEnabled { @@ -1056,6 +1134,20 @@ return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -2046,8 +2138,10 @@ // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2111,9 +2205,11 @@ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2224,6 +2320,7 @@ return FieldOptions_RETENTION_UNKNOWN } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { if x != nil && x.Target != nil { return *x.Target @@ -2231,6 +2328,13 @@ return FieldOptions_TARGET_TYPE_UNKNOWN } +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2960,6 +3064,108 @@ return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2978,7 +3184,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2991,7 +3197,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3038,7 +3244,7 @@ func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3051,7 +3257,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3182,7 +3388,7 @@ func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3401,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3269,7 +3475,7 @@ func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3282,7 +3488,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,264 +3642,296 @@ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, - 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, - 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, - 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, - 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, - 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, - 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, - 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, - 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, - 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, - 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, - 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, - 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, - 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, - 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, - 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, - 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, - 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, - 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, - 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, - 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, - 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, @@ -3885,98 +4123,103 @@ return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 19: google.protobuf.FileOptions - (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 46, // [46:46] is the sub-list for method output_type - 46, // [46:46] is the sub-list for method input_type - 46, // [46:46] is the sub-list for extension type_name - 46, // [46:46] is the sub-list for extension extendee - 0, // [0:46] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4280,7 +4523,7 @@ } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4292,7 +4535,7 @@ } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4304,7 +4547,7 @@ } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -4316,6 +4559,18 @@ } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4333,8 +4588,8 @@ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 9, - NumMessages: 27, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -142,39 +142,39 @@ // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// // or ... -// if (any.isSameTypeAs(Foo.getDefaultInstance())) { -// foo = any.unpack(Foo.getDefaultInstance()); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,8 +182,8 @@ // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// # JSON -// +// JSON +// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -132,7 +132,7 @@ // `NullValue` is a singleton enumeration to represent the null value for the // `Value` type union. // -// The JSON representation for `NullValue` is JSON `null`. +// The JSON representation for `NullValue` is JSON `null`. type NullValue int32 const ( diff -Nru temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go --- temporal-1.21.5-1/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go 2024-02-23 09:46:15.000000000 +0000 @@ -167,7 +167,7 @@ // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState diff -Nru temporal-1.21.5-1/src/vendor/lukechampine.com/uint128/uint128.go temporal-1.22.5/src/vendor/lukechampine.com/uint128/uint128.go --- temporal-1.21.5-1/src/vendor/lukechampine.com/uint128/uint128.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/lukechampine.com/uint128/uint128.go 2024-02-23 09:46:15.000000000 +0000 @@ -379,6 +379,12 @@ binary.LittleEndian.PutUint64(b[8:], u.Hi) } +// PutBytesBE stores u in b in big-endian order. It panics if len(ip) < 16. +func (u Uint128) PutBytesBE(b []byte) { + binary.BigEndian.PutUint64(b[:8], u.Hi) + binary.BigEndian.PutUint64(b[8:], u.Lo) +} + // Big returns u as a *big.Int. func (u Uint128) Big() *big.Int { i := new(big.Int).SetUint64(u.Hi) @@ -420,6 +426,14 @@ ) } +// FromBytesBE converts big-endian b to a Uint128 value. +func FromBytesBE(b []byte) Uint128 { + return New( + binary.BigEndian.Uint64(b[8:]), + binary.BigEndian.Uint64(b[:8]), + ) +} + // FromBig converts i to a Uint128 value. It panics if i is negative or // overflows 128 bits. func FromBig(i *big.Int) (u Uint128) { diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/AUTHORS temporal-1.22.5/src/vendor/modernc.org/cc/v3/AUTHORS --- temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/AUTHORS 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/cc/v3/AUTHORS 2024-02-23 09:46:15.000000000 +0000 @@ -11,6 +11,7 @@ Dan Kortschak Dan Peterson Denys Smirnov +Huang Qiqi Jan Mercl <0xjnml@gmail.com> Maxim Kupriianov Peter Waller diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/CONTRIBUTORS temporal-1.22.5/src/vendor/modernc.org/cc/v3/CONTRIBUTORS --- temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/CONTRIBUTORS 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/cc/v3/CONTRIBUTORS 2024-02-23 09:46:15.000000000 +0000 @@ -9,6 +9,7 @@ Dan Kortschak Dan Peterson Denys Smirnov +Huang Qiqi Jan Mercl <0xjnml@gmail.com> Maxim Kupriianov Peter Waller diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/Makefile temporal-1.22.5/src/vendor/modernc.org/cc/v3/Makefile --- temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/Makefile 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/cc/v3/Makefile 2024-02-23 09:46:15.000000000 +0000 @@ -22,6 +22,7 @@ GOOS=linux GOARCH=386 go build GOOS=linux GOARCH=amd64 go build GOOS=linux GOARCH=arm go build + GOOS=linux GOARCH=loong64 go build GOOS=windows GOARCH=386 go build GOOS=windows GOARCH=amd64 go build go vet | grep -v $(ngrep) || true @@ -54,6 +55,9 @@ go version go test -v -timeout 24h +test_linux_loong64: + GOOS=linux GOARCH=loong64 make test + test_windows386: go version go test -v -timeout 24h @@ -65,19 +69,21 @@ GOOS=freebsd GOARCH=amd64 go build -v ./... GOOS=freebsd GOARCH=arm go build -v ./... GOOS=freebsd GOARCH=arm64 go build -v ./... + GOOS=illumos GOARCH=amd64 go build -v ./... GOOS=linux GOARCH=386 go build -v ./... GOOS=linux GOARCH=amd64 go build -v ./... GOOS=linux GOARCH=arm go build -v ./... GOOS=linux GOARCH=arm64 go build -v ./... + GOOS=linux GOARCH=loong64 go build -v ./... GOOS=linux GOARCH=ppc64le go build -v ./... GOOS=linux GOARCH=riscv64 go build -v ./... GOOS=linux GOARCH=s390x go build -v ./... + GOOS=netbsd GOARCH=386 go build -v ./... GOOS=netbsd GOARCH=amd64 go build -v ./... GOOS=netbsd GOARCH=arm go build -v ./... - GOOS=netbsd GOARCH=386 go build -v ./... + GOOS=openbsd GOARCH=386 go build -v ./... GOOS=openbsd GOARCH=amd64 go build -v ./... GOOS=openbsd GOARCH=arm64 go build -v ./... - GOOS=openbsd GOARCH=386 go build -v ./... GOOS=windows GOARCH=386 go build -v ./... GOOS=windows GOARCH=amd64 go build -v ./... GOOS=windows GOARCH=arm64 go build -v ./... diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/abi_platforms.go temporal-1.22.5/src/vendor/modernc.org/cc/v3/abi_platforms.go --- temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/abi_platforms.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/cc/v3/abi_platforms.go 2024-02-23 09:46:15.000000000 +0000 @@ -9,6 +9,7 @@ "amd64": binary.LittleEndian, "arm": binary.LittleEndian, "arm64": binary.LittleEndian, + "loong64": binary.LittleEndian, "ppc64le": binary.LittleEndian, "riscv64": binary.LittleEndian, "s390x": binary.BigEndian, @@ -23,18 +24,20 @@ {"linux", "riscv64"}: false, {"linux", "s390x"}: false, {"netbsd", "arm"}: false, + {"openbsd", "arm64"}: false, {"darwin", "amd64"}: true, {"darwin", "arm64"}: true, {"freebsd", "386"}: true, {"freebsd", "amd64"}: true, + {"illumos", "amd64"}: true, {"linux", "386"}: true, {"linux", "amd64"}: true, + {"linux", "loong64"}: true, {"netbsd", "386"}: true, {"netbsd", "amd64"}: true, {"openbsd", "386"}: true, {"openbsd", "amd64"}: true, - {"openbsd", "arm64"}: true, {"windows", "386"}: true, {"windows", "amd64"}: true, {"windows", "arm64"}: true, @@ -787,4 +790,81 @@ Decimal64: {8, 8, 8}, Decimal128: {16, 16, 16}, }, + // gcc (Loongnix 8.3.0-6.lnd.vec.33) 8.3.0 + {"linux", "loong64"}: { + Void: {1, 1, 1}, + Bool: {1, 1, 1}, + Char: {1, 1, 1}, + SChar: {1, 1, 1}, + UChar: {1, 1, 1}, + Short: {2, 2, 2}, + UShort: {2, 2, 2}, + Enum: {4, 4, 4}, + Int: {4, 4, 4}, + UInt: {4, 4, 4}, + Long: {8, 8, 8}, + ULong: {8, 8, 8}, + LongLong: {8, 8, 8}, + ULongLong: {8, 8, 8}, + Ptr: {8, 8, 8}, + Function: {8, 8, 8}, + Float: {4, 4, 4}, + Double: {8, 8, 8}, + LongDouble: {16, 16, 16}, + Int8: {1, 1, 1}, + UInt8: {1, 1, 1}, + Int16: {2, 2, 2}, + UInt16: {2, 2, 2}, + Int32: {4, 4, 4}, + UInt32: {4, 4, 4}, + Int64: {8, 8, 8}, + UInt64: {8, 8, 8}, + Int128: {16, 16, 16}, + UInt128: {16, 16, 16}, + Float32: {4, 4, 4}, + Float32x: {8, 8, 8}, + Float64: {8, 8, 8}, + Float64x: {16, 16, 16}, + Float128: {16, 16, 16}, + Decimal32: {4, 4, 4}, + Decimal64: {8, 8, 8}, + Decimal128: {16, 16, 16}, + }, + // gcc (OmniOS 151044/12.2.0-il-0) 12.2.0 + {"illumos", "amd64"}: { + Void: {1, 1, 1}, + Bool: {1, 1, 1}, + Char: {1, 1, 1}, + SChar: {1, 1, 1}, + UChar: {1, 1, 1}, + Short: {2, 2, 2}, + UShort: {2, 2, 2}, + Enum: {4, 4, 4}, + Int: {4, 4, 4}, + UInt: {4, 4, 4}, + Long: {8, 8, 8}, + ULong: {8, 8, 8}, + LongLong: {8, 8, 8}, + ULongLong: {8, 8, 8}, + Ptr: {8, 8, 8}, + Function: {8, 8, 8}, + Float: {4, 4, 4}, + Double: {8, 8, 8}, + LongDouble: {16, 16, 16}, + Int8: {1, 1, 1}, + UInt8: {1, 1, 1}, + Int16: {2, 2, 2}, + UInt16: {2, 2, 2}, + Int32: {4, 4, 4}, + UInt32: {4, 4, 4}, + Int64: {8, 8, 8}, + UInt64: {8, 8, 8}, + Int128: {16, 16, 16}, + UInt128: {16, 16, 16}, + Float32: {4, 4, 4}, + Float32x: {8, 8, 8}, + Float64: {8, 8, 8}, + Float64x: {16, 16, 16}, + Float128: {16, 16, 16}, + }, } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/cc.go temporal-1.22.5/src/vendor/modernc.org/cc/v3/cc.go --- temporal-1.21.5-1/src/vendor/modernc.org/cc/v3/cc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/cc/v3/cc.go 2024-02-23 09:46:15.000000000 +0000 @@ -805,13 +805,17 @@ cpp = "cpp" } args := append(append([]string{"-dM"}, opts...), os.DevNull) - pre, err := exec.Command(cpp, args...).Output() + cmd := exec.Command(cpp, args...) + cmd.Env = append(os.Environ(), "LC_ALL=C") + pre, err := cmd.Output() if err != nil { return "", nil, nil, err } args = append(append([]string{"-v"}, opts...), os.DevNull) - out, err := exec.Command(cpp, args...).CombinedOutput() + cmd = exec.Command(cpp, args...) + cmd.Env = append(os.Environ(), "LC_ALL=C") + out, err := cmd.CombinedOutput() if err != nil { return "", nil, nil, err } @@ -864,7 +868,9 @@ } args := append(opts, "-dM", "-E", "-") - pre, err := exec.Command(cc, args...).CombinedOutput() + cmd := exec.Command(cc, args...) + cmd.Env = append(os.Environ(), "LC_ALL=C") + pre, err := cmd.CombinedOutput() if err != nil { continue } @@ -883,7 +889,9 @@ } predefined = strings.Join(a[:w], "\n") args = append(opts, "-v", "-E", "-") - out, err := exec.Command(cc, args...).CombinedOutput() + cmd = exec.Command(cc, args...) + cmd.Env = append(os.Environ(), "LC_ALL=C") + out, err := cmd.CombinedOutput() if err != nil { continue } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/ccgo/v3/lib/go.go temporal-1.22.5/src/vendor/modernc.org/ccgo/v3/lib/go.go --- temporal-1.21.5-1/src/vendor/modernc.org/ccgo/v3/lib/go.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/ccgo/v3/lib/go.go 2024-02-23 09:46:15.000000000 +0000 @@ -110,6 +110,7 @@ fForceRuntimeConv fNoCondAssignment fAddrOfFuncPtrOk + fVolatileOk ) type imported struct { @@ -2112,7 +2113,7 @@ panic(todo("", p.pos(nd), t)) } - if t.IsAliasType() { + if t.IsAliasType() && !t.IsScalarType() { if tld := p.tlds[t.AliasDeclarator()]; tld != nil { return tld.name } @@ -3445,7 +3446,7 @@ } func (p *project) declaratorLValueNormal(n cc.Node, f *function, d *cc.Declarator, t cc.Type, mode exprMode, flags flags) { - if p.isVolatileOrAtomic(d) { + if p.isVolatileOrAtomic(d) && flags&fVolatileOk == 0 { panic(todo("", n.Position(), d.Position())) } @@ -9063,6 +9064,41 @@ x = "Inc" } ut := n.UnaryExpression.Operand.Type() + if d := n.UnaryExpression.Declarator(); d != nil && p.isVolatileOrAtomic(d) { + if !ut.IsIntegerType() { + panic(todo("", n.Position(), d.Position(), ut)) + } + + flags |= fVolatileOk + switch ut.Size() { + case 4: + switch { + case ut.IsSignedType(): + p.w("%sPre%sInt32(&", p.task.crt, x) + p.unaryExpression(f, n.UnaryExpression, ut, exprLValue, flags) + p.w(", 1)") + default: + p.w("%sPre%sInt32((*int32)(unsafe.Pointer(&", p.task.crt, x) + p.unaryExpression(f, n.UnaryExpression, ut, exprLValue, flags) + p.w(")), 1)") + } + case 8: + switch { + case ut.IsSignedType(): + p.w("%sPre%sInt64(&", p.task.crt, x) + p.unaryExpression(f, n.UnaryExpression, ut, exprLValue, flags) + p.w(", 1)") + default: + p.w("%sPre%sInt64((*int64)(unsafe.Pointer(&", p.task.crt, x) + p.unaryExpression(f, n.UnaryExpression, ut, exprLValue, flags) + p.w(")), 1)") + } + default: + panic(todo("", n.Position(), d.Position(), ut)) + } + return + } + p.w("%sPre%s%s(&", p.task.crt, x, p.helperType(n, ut)) p.unaryExpression(f, n.UnaryExpression, ut, exprLValue, flags) p.w(", %d)", p.incDelta(n.PostfixExpression, ut)) @@ -11088,14 +11124,26 @@ vt := pt.Elem() switch { case vt.IsIntegerType(): - var s string + var s, sb string switch { case vt.IsSignedType(): s = "Int" + sb = "int8" default: s = "Uint" + sb = "byte" } switch vt.Size() { + case 1: + switch { + case p.task.ignoreUnsupportedAligment: + p.w("(*(*%s)(unsafe.Pointer(", sb) + p.assignmentExpression(f, args[0], pt, exprValue, flags) + p.w(")))") + default: + p.err(n, "invalid argument of __atomic_load_n: %v, elem kind %v", pt, vt.Kind()) + } + return case 2, 4, 8: p.w("%sAtomicLoadN%s%d", p.task.crt, s, 8*vt.Size()) default: @@ -11207,7 +11255,23 @@ p.w(")") return case vt.Kind() == cc.Ptr: - panic(todo("", pt, vt)) + p.w("%sAtomicStoreNUintptr", p.task.crt) + p.w("(") + types := []cc.Type{pt, vt, p.intType} + for i, v := range args[:3] { + if i != 0 { + p.w(", ") + } + if i == 1 { + p.w("%s(", strings.ToLower(p.helperType(n, vt))) + } + p.assignmentExpression(f, v, types[i], exprValue, flags) + if i == 1 { + p.w(")") + } + } + p.w(")") + return } p.err(n, "invalid arguments of __atomic_store_n: (%v, %v), element kind %v", pt, vt, vt.Kind()) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/build_all_targets.sh temporal-1.22.5/src/vendor/modernc.org/libc/build_all_targets.sh --- temporal-1.21.5-1/src/vendor/modernc.org/libc/build_all_targets.sh 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/build_all_targets.sh 2024-02-23 09:46:15.000000000 +0000 @@ -2,42 +2,67 @@ for tag in none dmesg libc.membrk libc.memgrind do echo "-tags=$tag" + echo "GOOS=darwin GOARCH=amd64" GOOS=darwin GOARCH=amd64 go build -tags=$tag -v ./... GOOS=darwin GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=darwin GOARCH=arm64" GOOS=darwin GOARCH=arm64 go build -tags=$tag -v ./... GOOS=darwin GOARCH=arm64 go test -tags=$tag -c -o /dev/null + echo "GOOS=freebsd GOARCH=386" GOOS=freebsd GOARCH=386 go build -tags=$tag -v ./... GOOS=freebsd GOARCH=386 go test -tags=$tag -c -o /dev/null + echo "GOOS=freebsd GOARCH=amd64" GOOS=freebsd GOARCH=amd64 go build -tags=$tag -v ./... GOOS=freebsd GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=freebsd GOARCH=arm" GOOS=freebsd GOARCH=arm go build -tags=$tag -v ./... GOOS=freebsd GOARCH=arm go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=386" GOOS=linux GOARCH=386 go build -tags=$tag -v ./... GOOS=linux GOARCH=386 go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=amd64" GOOS=linux GOARCH=amd64 go build -tags=$tag -v ./... GOOS=linux GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=arm" GOOS=linux GOARCH=arm go build -tags=$tag -v ./... GOOS=linux GOARCH=arm go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=arm64" GOOS=linux GOARCH=arm64 go build -tags=$tag -v ./... GOOS=linux GOARCH=arm64 go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=loong64" + GOOS=linux GOARCH=loong64 go build -tags=$tag -v ./... + GOOS=linux GOARCH=loong64 go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=ppc64le" + GOOS=linux GOARCH=ppc64le go build -tags=$tag -v ./... GOOS=linux GOARCH=ppc64le go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=riscv64" GOOS=linux GOARCH=riscv64 go build -tags=$tag -v ./... + GOOS=linux GOARCH=riscv64 go test -tags=$tag -c -o /dev/null + echo "GOOS=linux GOARCH=s390x" GOOS=linux GOARCH=s390x go build -tags=$tag -v ./... GOOS=linux GOARCH=s390x go test -tags=$tag -c -o /dev/null + echo "GOOS=netbsd GOARCH=amd64" GOOS=netbsd GOARCH=amd64 go build -tags=$tag -v ./... GOOS=netbsd GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=netbsd GOARCH=arm" GOOS=netbsd GOARCH=arm go build -tags=$tag -v ./... GOOS=netbsd GOARCH=arm go test -tags=$tag -c -o /dev/null + echo "GOOS=openbsd GOARCH=386" GOOS=openbsd GOARCH=386 go build -tags=$tag -v ./... GOOS=openbsd GOARCH=386 go test -tags=$tag -c -o /dev/null + echo "GOOS=openbsd GOARCH=amd64" GOOS=openbsd GOARCH=amd64 go build -tags=$tag -v ./... GOOS=openbsd GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=openbsd GOARCH=arm64" GOOS=openbsd GOARCH=arm64 go build -tags=$tag -v ./... GOOS=openbsd GOARCH=arm64 go test -tags=$tag -c -o /dev/null + echo "GOOS=windows GOARCH=386" GOOS=windows GOARCH=386 go build -tags=$tag -v ./... GOOS=windows GOARCH=386 go test -tags=$tag -c -o /dev/null + echo "GOOS=windows GOARCH=amd64" GOOS=windows GOARCH=amd64 go build -tags=$tag -v ./... GOOS=windows GOARCH=amd64 go test -tags=$tag -c -o /dev/null + echo "GOOS=windows GOARCH=arm64" GOOS=windows GOARCH=arm64 go build -tags=$tag -v ./... GOOS=windows GOARCH=arm64 go test -tags=$tag -c -o /dev/null done diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_386.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_386.go 2024-02-23 09:46:15.000000000 +0000 @@ -343,6 +343,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -343,6 +343,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_arm.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_arm.go 2024-02-23 09:46:15.000000000 +0000 @@ -343,6 +343,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_freebsd_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_freebsd_arm64.go 2024-02-23 09:46:15.000000000 +0000 @@ -343,6 +343,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_386.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_386.go 2024-02-23 09:46:15.000000000 +0000 @@ -339,6 +339,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -350,6 +350,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_arm.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_arm.go 2024-02-23 09:46:15.000000000 +0000 @@ -339,6 +339,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_arm64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_arm64.go 2024-02-23 09:46:15.000000000 +0000 @@ -339,6 +339,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,528 @@ +// Code generated by 'go generate' - DO NOT EDIT. + +package libc // import "modernc.org/libc" + +var CAPI = map[string]struct{}{ + "_IO_putc": {}, + "___errno_location": {}, + "__assert_fail": {}, + "__builtin___memcpy_chk": {}, + "__builtin___memmove_chk": {}, + "__builtin___memset_chk": {}, + "__builtin___snprintf_chk": {}, + "__builtin___sprintf_chk": {}, + "__builtin___strcat_chk": {}, + "__builtin___strcpy_chk": {}, + "__builtin___strncpy_chk": {}, + "__builtin___vsnprintf_chk": {}, + "__builtin_abort": {}, + "__builtin_abs": {}, + "__builtin_add_overflowInt64": {}, + "__builtin_add_overflowUint32": {}, + "__builtin_add_overflowUint64": {}, + "__builtin_bswap16": {}, + "__builtin_bswap32": {}, + "__builtin_bswap64": {}, + "__builtin_bzero": {}, + "__builtin_clz": {}, + "__builtin_clzl": {}, + "__builtin_clzll": {}, + "__builtin_constant_p_impl": {}, + "__builtin_copysign": {}, + "__builtin_copysignf": {}, + "__builtin_copysignl": {}, + "__builtin_exit": {}, + "__builtin_expect": {}, + "__builtin_fabs": {}, + "__builtin_fabsf": {}, + "__builtin_fabsl": {}, + "__builtin_free": {}, + "__builtin_getentropy": {}, + "__builtin_huge_val": {}, + "__builtin_huge_valf": {}, + "__builtin_inf": {}, + "__builtin_inff": {}, + "__builtin_infl": {}, + "__builtin_isnan": {}, + "__builtin_isunordered": {}, + "__builtin_llabs": {}, + "__builtin_malloc": {}, + "__builtin_memcmp": {}, + "__builtin_memcpy": {}, + "__builtin_memset": {}, + "__builtin_mmap": {}, + "__builtin_mul_overflowInt64": {}, + "__builtin_mul_overflowUint128": {}, + "__builtin_mul_overflowUint64": {}, + "__builtin_nan": {}, + "__builtin_nanf": {}, + "__builtin_nanl": {}, + "__builtin_object_size": {}, + "__builtin_popcount": {}, + "__builtin_popcountl": {}, + "__builtin_prefetch": {}, + "__builtin_printf": {}, + "__builtin_snprintf": {}, + "__builtin_sprintf": {}, + "__builtin_strchr": {}, + "__builtin_strcmp": {}, + "__builtin_strcpy": {}, + "__builtin_strlen": {}, + "__builtin_sub_overflowInt64": {}, + "__builtin_trap": {}, + "__builtin_unreachable": {}, + "__ccgo_dmesg": {}, + "__ccgo_getMutexType": {}, + "__ccgo_in6addr_anyp": {}, + "__ccgo_pthreadAttrGetDetachState": {}, + "__ccgo_pthreadMutexattrGettype": {}, + "__ccgo_sqlite3_log": {}, + "__cmsg_nxthdr": {}, + "__ctype_b_loc": {}, + "__ctype_get_mb_cur_max": {}, + "__errno_location": {}, + "__floatscan": {}, + "__fpclassify": {}, + "__fpclassifyf": {}, + "__fpclassifyl": {}, + "__fsmu8": {}, + "__h_errno_location": {}, + "__inet_aton": {}, + "__intscan": {}, + "__isalnum_l": {}, + "__isalpha_l": {}, + "__isdigit_l": {}, + "__islower_l": {}, + "__isnan": {}, + "__isnanf": {}, + "__isnanl": {}, + "__isoc99_sscanf": {}, + "__isprint_l": {}, + "__isupper_l": {}, + "__isxdigit_l": {}, + "__lockfile": {}, + "__lookup_ipliteral": {}, + "__lookup_name": {}, + "__lookup_serv": {}, + "__shgetc": {}, + "__shlim": {}, + "__strncasecmp_l": {}, + "__sync_add_and_fetch_uint32": {}, + "__sync_sub_and_fetch_uint32": {}, + "__syscall1": {}, + "__syscall3": {}, + "__syscall4": {}, + "__toread": {}, + "__toread_needs_stdio_exit": {}, + "__uflow": {}, + "__unlockfile": {}, + "_exit": {}, + "_longjmp": {}, + "_obstack_begin": {}, + "_obstack_newchunk": {}, + "_setjmp": {}, + "abort": {}, + "abs": {}, + "accept": {}, + "access": {}, + "acos": {}, + "acosh": {}, + "alarm": {}, + "asin": {}, + "asinh": {}, + "atan": {}, + "atan2": {}, + "atanh": {}, + "atexit": {}, + "atof": {}, + "atoi": {}, + "atol": {}, + "backtrace": {}, + "backtrace_symbols_fd": {}, + "bind": {}, + "bsearch": {}, + "bzero": {}, + "calloc": {}, + "ceil": {}, + "ceilf": {}, + "cfgetospeed": {}, + "cfsetispeed": {}, + "cfsetospeed": {}, + "chdir": {}, + "chmod": {}, + "chown": {}, + "clock_gettime": {}, + "close": {}, + "closedir": {}, + "confstr": {}, + "connect": {}, + "copysign": {}, + "copysignf": {}, + "copysignl": {}, + "cos": {}, + "cosf": {}, + "cosh": {}, + "ctime": {}, + "ctime_r": {}, + "dlclose": {}, + "dlerror": {}, + "dlopen": {}, + "dlsym": {}, + "dup2": {}, + "dup3": {}, + "endpwent": {}, + "environ": {}, + "execvp": {}, + "exit": {}, + "exp": {}, + "fabs": {}, + "fabsf": {}, + "fabsl": {}, + "faccessat": {}, + "fchmod": {}, + "fchmodat": {}, + "fchown": {}, + "fchownat": {}, + "fclose": {}, + "fcntl": {}, + "fcntl64": {}, + "fdopen": {}, + "ferror": {}, + "fflush": {}, + "fgetc": {}, + "fgets": {}, + "fileno": {}, + "floor": {}, + "fmod": {}, + "fmodl": {}, + "fopen": {}, + "fopen64": {}, + "fork": {}, + "fprintf": {}, + "fputc": {}, + "fputs": {}, + "fread": {}, + "free": {}, + "freeaddrinfo": {}, + "frexp": {}, + "fscanf": {}, + "fseek": {}, + "fstat": {}, + "fstat64": {}, + "fstatfs": {}, + "fsync": {}, + "ftell": {}, + "ftruncate": {}, + "ftruncate64": {}, + "fts64_close": {}, + "fts64_open": {}, + "fts64_read": {}, + "fts_close": {}, + "fts_open": {}, + "fts_read": {}, + "fwrite": {}, + "gai_strerror": {}, + "getaddrinfo": {}, + "getc": {}, + "getcwd": {}, + "getegid": {}, + "getentropy": {}, + "getenv": {}, + "geteuid": {}, + "getgid": {}, + "getgrgid": {}, + "getgrgid_r": {}, + "getgrnam": {}, + "getgrnam_r": {}, + "gethostbyaddr": {}, + "gethostbyaddr_r": {}, + "gethostbyname": {}, + "gethostbyname2": {}, + "gethostbyname2_r": {}, + "gethostbyname_r": {}, + "gethostname": {}, + "getnameinfo": {}, + "getpeername": {}, + "getpid": {}, + "getpwnam": {}, + "getpwnam_r": {}, + "getpwuid": {}, + "getpwuid_r": {}, + "getrandom": {}, + "getresgid": {}, + "getresuid": {}, + "getrlimit": {}, + "getrlimit64": {}, + "getrusage": {}, + "getservbyname": {}, + "getsockname": {}, + "getsockopt": {}, + "gettimeofday": {}, + "getuid": {}, + "gmtime_r": {}, + "h_errno": {}, + "htonl": {}, + "htons": {}, + "hypot": {}, + "inet_ntoa": {}, + "inet_ntop": {}, + "inet_pton": {}, + "initstate": {}, + "initstate_r": {}, + "ioctl": {}, + "isalnum": {}, + "isalpha": {}, + "isascii": {}, + "isatty": {}, + "isdigit": {}, + "islower": {}, + "isnan": {}, + "isnanf": {}, + "isnanl": {}, + "isprint": {}, + "isupper": {}, + "iswalnum": {}, + "iswspace": {}, + "isxdigit": {}, + "kill": {}, + "ldexp": {}, + "link": {}, + "linkat": {}, + "listen": {}, + "llabs": {}, + "localeconv": {}, + "localtime": {}, + "localtime_r": {}, + "log": {}, + "log10": {}, + "log2": {}, + "longjmp": {}, + "lrand48": {}, + "lseek": {}, + "lseek64": {}, + "lstat": {}, + "lstat64": {}, + "malloc": {}, + "mblen": {}, + "mbrtowc": {}, + "mbsinit": {}, + "mbstowcs": {}, + "mbtowc": {}, + "memchr": {}, + "memcmp": {}, + "memcpy": {}, + "memmove": {}, + "memset": {}, + "mkdir": {}, + "mkdirat": {}, + "mkfifo": {}, + "mknod": {}, + "mknodat": {}, + "mkostemp": {}, + "mkstemp": {}, + "mkstemp64": {}, + "mkstemps": {}, + "mkstemps64": {}, + "mktime": {}, + "mmap": {}, + "mmap64": {}, + "modf": {}, + "mremap": {}, + "munmap": {}, + "nanf": {}, + "nanosleep": {}, + "nl_langinfo": {}, + "ntohs": {}, + "obstack_free": {}, + "obstack_vprintf": {}, + "open": {}, + "open64": {}, + "openat": {}, + "opendir": {}, + "openpty": {}, + "pathconf": {}, + "pause": {}, + "pclose": {}, + "perror": {}, + "pipe": {}, + "pipe2": {}, + "poll": {}, + "popen": {}, + "posix_fadvise": {}, + "pow": {}, + "pread": {}, + "printf": {}, + "pselect": {}, + "pthread_attr_destroy": {}, + "pthread_attr_getdetachstate": {}, + "pthread_attr_init": {}, + "pthread_attr_setdetachstate": {}, + "pthread_attr_setscope": {}, + "pthread_attr_setstacksize": {}, + "pthread_cond_broadcast": {}, + "pthread_cond_destroy": {}, + "pthread_cond_init": {}, + "pthread_cond_signal": {}, + "pthread_cond_timedwait": {}, + "pthread_cond_wait": {}, + "pthread_create": {}, + "pthread_detach": {}, + "pthread_equal": {}, + "pthread_exit": {}, + "pthread_getspecific": {}, + "pthread_join": {}, + "pthread_key_create": {}, + "pthread_key_delete": {}, + "pthread_mutex_destroy": {}, + "pthread_mutex_init": {}, + "pthread_mutex_lock": {}, + "pthread_mutex_trylock": {}, + "pthread_mutex_unlock": {}, + "pthread_mutexattr_destroy": {}, + "pthread_mutexattr_init": {}, + "pthread_mutexattr_settype": {}, + "pthread_self": {}, + "pthread_setspecific": {}, + "putc": {}, + "putchar": {}, + "puts": {}, + "pwrite": {}, + "qsort": {}, + "raise": {}, + "rand": {}, + "rand_r": {}, + "random": {}, + "random_r": {}, + "read": {}, + "readdir": {}, + "readdir64": {}, + "readlink": {}, + "readlinkat": {}, + "readv": {}, + "realloc": {}, + "reallocarray": {}, + "realpath": {}, + "recv": {}, + "recvfrom": {}, + "recvmsg": {}, + "remove": {}, + "rename": {}, + "renameat2": {}, + "rewind": {}, + "rindex": {}, + "rint": {}, + "rmdir": {}, + "round": {}, + "scalbn": {}, + "scalbnl": {}, + "sched_yield": {}, + "select": {}, + "send": {}, + "sendmsg": {}, + "sendto": {}, + "setbuf": {}, + "setenv": {}, + "setjmp": {}, + "setlocale": {}, + "setrlimit": {}, + "setrlimit64": {}, + "setsid": {}, + "setsockopt": {}, + "setstate": {}, + "setvbuf": {}, + "shmat": {}, + "shmctl": {}, + "shmdt": {}, + "shutdown": {}, + "sigaction": {}, + "signal": {}, + "sin": {}, + "sinf": {}, + "sinh": {}, + "sleep": {}, + "snprintf": {}, + "socket": {}, + "sprintf": {}, + "sqrt": {}, + "srand48": {}, + "sscanf": {}, + "stat": {}, + "stat64": {}, + "stderr": {}, + "stdin": {}, + "stdout": {}, + "strcasecmp": {}, + "strcat": {}, + "strchr": {}, + "strcmp": {}, + "strcpy": {}, + "strcspn": {}, + "strdup": {}, + "strerror": {}, + "strerror_r": {}, + "strlcat": {}, + "strlcpy": {}, + "strlen": {}, + "strncasecmp": {}, + "strncat": {}, + "strncmp": {}, + "strncpy": {}, + "strnlen": {}, + "strpbrk": {}, + "strrchr": {}, + "strspn": {}, + "strstr": {}, + "strtod": {}, + "strtof": {}, + "strtoimax": {}, + "strtok": {}, + "strtol": {}, + "strtold": {}, + "strtoll": {}, + "strtoul": {}, + "strtoull": {}, + "strtoumax": {}, + "symlink": {}, + "symlinkat": {}, + "sysconf": {}, + "system": {}, + "tan": {}, + "tanh": {}, + "tcgetattr": {}, + "tcsendbreak": {}, + "tcsetattr": {}, + "time": {}, + "tmpfile": {}, + "tolower": {}, + "toupper": {}, + "trunc": {}, + "tzset": {}, + "umask": {}, + "uname": {}, + "ungetc": {}, + "unlink": {}, + "unlinkat": {}, + "unsetenv": {}, + "usleep": {}, + "utime": {}, + "utimensat": {}, + "utimes": {}, + "uuid_copy": {}, + "uuid_generate_random": {}, + "uuid_parse": {}, + "uuid_unparse": {}, + "vasprintf": {}, + "vfprintf": {}, + "vfscanf": {}, + "vprintf": {}, + "vsnprintf": {}, + "vsprintf": {}, + "vsscanf": {}, + "waitpid": {}, + "wcschr": {}, + "wctomb": {}, + "wcwidth": {}, + "write": {}, + "writev": {}, + "zero_struct_address": {}, +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_ppc64le.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_ppc64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_ppc64le.go 2024-02-23 09:46:15.000000000 +0000 @@ -350,6 +350,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_riscv64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_riscv64.go 2024-02-23 09:46:15.000000000 +0000 @@ -341,6 +341,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_s390x.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_s390x.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_linux_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_linux_s390x.go 2024-02-23 09:46:15.000000000 +0000 @@ -339,6 +339,7 @@ "popen": {}, "posix_fadvise": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_netbsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_netbsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_netbsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_netbsd_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -338,6 +338,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_netbsd_arm.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_netbsd_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_netbsd_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_netbsd_arm.go 2024-02-23 09:46:15.000000000 +0000 @@ -338,6 +338,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_386.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_386.go 2024-02-23 09:46:15.000000000 +0000 @@ -349,6 +349,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -350,6 +350,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/capi_openbsd_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/capi_openbsd_arm64.go 2024-02-23 09:46:15.000000000 +0000 @@ -350,6 +350,7 @@ "poll": {}, "popen": {}, "pow": {}, + "pread": {}, "printf": {}, "pselect": {}, "pthread_attr_destroy": {}, diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/errno/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/errno/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/errno/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/errno/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo errno/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o errno/errno_linux_amd64.go -pkgname errno', DO NOT EDIT. + +package errno + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/errno/errno_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/errno/errno_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/errno/errno_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/errno/errno_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,187 @@ +// Code generated by 'ccgo errno/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o errno/errno_linux_amd64.go -pkgname errno', DO NOT EDIT. + +package errno + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + E2BIG = 7 // errno-base.h:11:1: + EACCES = 13 // errno-base.h:17:1: + EADDRINUSE = 98 // errno.h:81:1: + EADDRNOTAVAIL = 99 // errno.h:82:1: + EADV = 68 // errno.h:51:1: + EAFNOSUPPORT = 97 // errno.h:80:1: + EAGAIN = 11 // errno-base.h:15:1: + EALREADY = 114 // errno.h:97:1: + EBADE = 52 // errno.h:33:1: + EBADF = 9 // errno-base.h:13:1: + EBADFD = 77 // errno.h:60:1: + EBADMSG = 74 // errno.h:57:1: + EBADR = 53 // errno.h:34:1: + EBADRQC = 56 // errno.h:37:1: + EBADSLT = 57 // errno.h:38:1: + EBFONT = 59 // errno.h:42:1: + EBUSY = 16 // errno-base.h:20:1: + ECANCELED = 125 // errno.h:109:1: + ECHILD = 10 // errno-base.h:14:1: + ECHRNG = 44 // errno.h:25:1: + ECOMM = 70 // errno.h:53:1: + ECONNABORTED = 103 // errno.h:86:1: + ECONNREFUSED = 111 // errno.h:94:1: + ECONNRESET = 104 // errno.h:87:1: + EDEADLK = 35 // errno.h:7:1: + EDEADLOCK = 35 // errno.h:40:1: + EDESTADDRREQ = 89 // errno.h:72:1: + EDOM = 33 // errno-base.h:37:1: + EDOTDOT = 73 // errno.h:56:1: + EDQUOT = 122 // errno.h:105:1: + EEXIST = 17 // errno-base.h:21:1: + EFAULT = 14 // errno-base.h:18:1: + EFBIG = 27 // errno-base.h:31:1: + EHOSTDOWN = 112 // errno.h:95:1: + EHOSTUNREACH = 113 // errno.h:96:1: + EHWPOISON = 133 // errno.h:121:1: + EIDRM = 43 // errno.h:24:1: + EILSEQ = 84 // errno.h:67:1: + EINPROGRESS = 115 // errno.h:98:1: + EINTR = 4 // errno-base.h:8:1: + EINVAL = 22 // errno-base.h:26:1: + EIO = 5 // errno-base.h:9:1: + EISCONN = 106 // errno.h:89:1: + EISDIR = 21 // errno-base.h:25:1: + EISNAM = 120 // errno.h:103:1: + EKEYEXPIRED = 127 // errno.h:111:1: + EKEYREJECTED = 129 // errno.h:113:1: + EKEYREVOKED = 128 // errno.h:112:1: + EL2HLT = 51 // errno.h:32:1: + EL2NSYNC = 45 // errno.h:26:1: + EL3HLT = 46 // errno.h:27:1: + EL3RST = 47 // errno.h:28:1: + ELIBACC = 79 // errno.h:62:1: + ELIBBAD = 80 // errno.h:63:1: + ELIBEXEC = 83 // errno.h:66:1: + ELIBMAX = 82 // errno.h:65:1: + ELIBSCN = 81 // errno.h:64:1: + ELNRNG = 48 // errno.h:29:1: + ELOOP = 40 // errno.h:21:1: + EMEDIUMTYPE = 124 // errno.h:108:1: + EMFILE = 24 // errno-base.h:28:1: + EMLINK = 31 // errno-base.h:35:1: + EMSGSIZE = 90 // errno.h:73:1: + EMULTIHOP = 72 // errno.h:55:1: + ENAMETOOLONG = 36 // errno.h:8:1: + ENAVAIL = 119 // errno.h:102:1: + ENETDOWN = 100 // errno.h:83:1: + ENETRESET = 102 // errno.h:85:1: + ENETUNREACH = 101 // errno.h:84:1: + ENFILE = 23 // errno-base.h:27:1: + ENOANO = 55 // errno.h:36:1: + ENOBUFS = 105 // errno.h:88:1: + ENOCSI = 50 // errno.h:31:1: + ENODATA = 61 // errno.h:44:1: + ENODEV = 19 // errno-base.h:23:1: + ENOENT = 2 // errno-base.h:6:1: + ENOEXEC = 8 // errno-base.h:12:1: + ENOKEY = 126 // errno.h:110:1: + ENOLCK = 37 // errno.h:9:1: + ENOLINK = 67 // errno.h:50:1: + ENOMEDIUM = 123 // errno.h:107:1: + ENOMEM = 12 // errno-base.h:16:1: + ENOMSG = 42 // errno.h:23:1: + ENONET = 64 // errno.h:47:1: + ENOPKG = 65 // errno.h:48:1: + ENOPROTOOPT = 92 // errno.h:75:1: + ENOSPC = 28 // errno-base.h:32:1: + ENOSR = 63 // errno.h:46:1: + ENOSTR = 60 // errno.h:43:1: + ENOSYS = 38 // errno.h:18:1: + ENOTBLK = 15 // errno-base.h:19:1: + ENOTCONN = 107 // errno.h:90:1: + ENOTDIR = 20 // errno-base.h:24:1: + ENOTEMPTY = 39 // errno.h:20:1: + ENOTNAM = 118 // errno.h:101:1: + ENOTRECOVERABLE = 131 // errno.h:117:1: + ENOTSOCK = 88 // errno.h:71:1: + ENOTSUP = 95 // errno.h:30:1: + ENOTTY = 25 // errno-base.h:29:1: + ENOTUNIQ = 76 // errno.h:59:1: + ENXIO = 6 // errno-base.h:10:1: + EOPNOTSUPP = 95 // errno.h:78:1: + EOVERFLOW = 75 // errno.h:58:1: + EOWNERDEAD = 130 // errno.h:116:1: + EPERM = 1 // errno-base.h:5:1: + EPFNOSUPPORT = 96 // errno.h:79:1: + EPIPE = 32 // errno-base.h:36:1: + EPROTO = 71 // errno.h:54:1: + EPROTONOSUPPORT = 93 // errno.h:76:1: + EPROTOTYPE = 91 // errno.h:74:1: + ERANGE = 34 // errno-base.h:38:1: + EREMCHG = 78 // errno.h:61:1: + EREMOTE = 66 // errno.h:49:1: + EREMOTEIO = 121 // errno.h:104:1: + ERESTART = 85 // errno.h:68:1: + ERFKILL = 132 // errno.h:119:1: + EROFS = 30 // errno-base.h:34:1: + ESHUTDOWN = 108 // errno.h:91:1: + ESOCKTNOSUPPORT = 94 // errno.h:77:1: + ESPIPE = 29 // errno-base.h:33:1: + ESRCH = 3 // errno-base.h:7:1: + ESRMNT = 69 // errno.h:52:1: + ESTALE = 116 // errno.h:99:1: + ESTRPIPE = 86 // errno.h:69:1: + ETIME = 62 // errno.h:45:1: + ETIMEDOUT = 110 // errno.h:93:1: + ETOOMANYREFS = 109 // errno.h:92:1: + ETXTBSY = 26 // errno-base.h:30:1: + EUCLEAN = 117 // errno.h:100:1: + EUNATCH = 49 // errno.h:30:1: + EUSERS = 87 // errno.h:70:1: + EWOULDBLOCK = 11 // errno.h:22:1: + EXDEV = 18 // errno-base.h:22:1: + EXFULL = 54 // errno.h:35:1: + X_ASM_GENERIC_ERRNO_BASE_H = 0 // errno-base.h:3:1: + X_ASM_GENERIC_ERRNO_H = 0 // errno.h:3:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ERRNO_H = 1 // errno.h:20:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ERRNO_H = 1 // errno.h:23:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/fcntl/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/fcntl/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/fcntl/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/fcntl/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo fcntl/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o fcntl/fcntl_linux_amd64.go -pkgname fcntl', DO NOT EDIT. + +package fcntl + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/fcntl/fcntl_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/fcntl/fcntl_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/fcntl/fcntl_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/fcntl/fcntl_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1087 @@ +// Code generated by 'ccgo fcntl/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o fcntl/fcntl_linux_amd64.go -pkgname fcntl', DO NOT EDIT. + +package fcntl + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + AT_EACCESS = 0x200 // fcntl-linux.h:388:1: + AT_FDCWD = -100 // fcntl-linux.h:371:1: + AT_REMOVEDIR = 0x200 // fcntl-linux.h:375:1: + AT_SYMLINK_FOLLOW = 0x400 // fcntl-linux.h:377:1: + AT_SYMLINK_NOFOLLOW = 0x100 // fcntl-linux.h:374:1: + FAPPEND = 1024 // fcntl-linux.h:304:1: + FASYNC = 8192 // fcntl-linux.h:306:1: + FD_CLOEXEC = 1 // fcntl-linux.h:219:1: + FFSYNC = 1052672 // fcntl-linux.h:305:1: + FNDELAY = 2048 // fcntl-linux.h:308:1: + FNONBLOCK = 2048 // fcntl-linux.h:307:1: + F_DUPFD = 0 // fcntl-linux.h:167:1: + F_DUPFD_CLOEXEC = 1030 // fcntl-linux.h:214:1: + F_EXLCK = 4 // fcntl-linux.h:231:1: + F_GETFD = 1 // fcntl-linux.h:168:1: + F_GETFL = 3 // fcntl-linux.h:170:1: + F_GETLK = 5 // fcntl-linux.h:109:1: + F_GETLK64 = 5 // fcntl.h:29:1: + F_GETOWN = 9 // fcntl-linux.h:180:1: + F_LOCK = 1 // fcntl.h:238:1: + F_OK = 0 // fcntl.h:131:1: + F_RDLCK = 0 // fcntl-linux.h:223:1: + F_SETFD = 2 // fcntl-linux.h:169:1: + F_SETFL = 4 // fcntl-linux.h:171:1: + F_SETLK = 6 // fcntl-linux.h:110:1: + F_SETLK64 = 6 // fcntl.h:30:1: + F_SETLKW = 7 // fcntl-linux.h:111:1: + F_SETLKW64 = 7 // fcntl.h:31:1: + F_SETOWN = 8 // fcntl-linux.h:179:1: + F_SHLCK = 8 // fcntl-linux.h:232:1: + F_TEST = 3 // fcntl.h:240:1: + F_TLOCK = 2 // fcntl.h:239:1: + F_ULOCK = 0 // fcntl.h:237:1: + F_UNLCK = 2 // fcntl-linux.h:225:1: + F_WRLCK = 1 // fcntl-linux.h:224:1: + LOCK_EX = 2 // fcntl-linux.h:238:1: + LOCK_NB = 4 // fcntl-linux.h:239:1: + LOCK_SH = 1 // fcntl-linux.h:237:1: + LOCK_UN = 8 // fcntl-linux.h:241:1: + O_ACCMODE = 0003 // fcntl-linux.h:42:1: + O_APPEND = 02000 // fcntl-linux.h:59:1: + O_ASYNC = 020000 // fcntl-linux.h:72:1: + O_CLOEXEC = 524288 // fcntl-linux.h:144:1: + O_CREAT = 0100 // fcntl-linux.h:47:1: + O_DIRECTORY = 65536 // fcntl-linux.h:142:1: + O_DSYNC = 4096 // fcntl-linux.h:158:1: + O_EXCL = 0200 // fcntl-linux.h:50:1: + O_FSYNC = 1052672 // fcntl-linux.h:70:1: + O_NDELAY = 2048 // fcntl-linux.h:65:1: + O_NOCTTY = 0400 // fcntl-linux.h:53:1: + O_NOFOLLOW = 131072 // fcntl-linux.h:143:1: + O_NONBLOCK = 04000 // fcntl-linux.h:62:1: + O_RDONLY = 00 // fcntl-linux.h:43:1: + O_RDWR = 02 // fcntl-linux.h:45:1: + O_RSYNC = 1052672 // fcntl-linux.h:162:1: + O_SYNC = 04010000 // fcntl-linux.h:68:1: + O_TRUNC = 01000 // fcntl-linux.h:56:1: + O_WRONLY = 01 // fcntl-linux.h:44:1: + POSIX_FADV_DONTNEED = 4 // fcntl-linux.h:321:1: + POSIX_FADV_NOREUSE = 5 // fcntl-linux.h:322:1: + POSIX_FADV_NORMAL = 0 // fcntl-linux.h:317:1: + POSIX_FADV_RANDOM = 1 // fcntl-linux.h:318:1: + POSIX_FADV_SEQUENTIAL = 2 // fcntl-linux.h:319:1: + POSIX_FADV_WILLNEED = 3 // fcntl-linux.h:320:1: + R_OK = 4 // fcntl.h:128:1: + SEEK_CUR = 1 // fcntl.h:138:1: + SEEK_END = 2 // fcntl.h:139:1: + SEEK_SET = 0 // fcntl.h:137:1: + S_IFBLK = 24576 // fcntl.h:83:1: + S_IFCHR = 8192 // fcntl.h:82:1: + S_IFDIR = 16384 // fcntl.h:81:1: + S_IFIFO = 4096 // fcntl.h:86:1: + S_IFLNK = 40960 // fcntl.h:89:1: + S_IFMT = 61440 // fcntl.h:80:1: + S_IFREG = 32768 // fcntl.h:84:1: + S_IFSOCK = 49152 // fcntl.h:92:1: + S_IRGRP = 32 // fcntl.h:111:1: + S_IROTH = 4 // fcntl.h:117:1: + S_IRUSR = 256 // fcntl.h:105:1: + S_IRWXG = 56 // fcntl.h:115:1: + S_IRWXO = 7 // fcntl.h:121:1: + S_IRWXU = 448 // fcntl.h:109:1: + S_ISGID = 1024 // fcntl.h:98:1: + S_ISUID = 2048 // fcntl.h:97:1: + S_ISVTX = 512 // fcntl.h:102:1: + S_IWGRP = 16 // fcntl.h:112:1: + S_IWOTH = 2 // fcntl.h:118:1: + S_IWUSR = 128 // fcntl.h:106:1: + S_IXGRP = 8 // fcntl.h:113:1: + S_IXOTH = 1 // fcntl.h:119:1: + S_IXUSR = 64 // fcntl.h:107:1: + UTIME_NOW = 1073741823 // stat.h:206:1: + UTIME_OMIT = 1073741822 // stat.h:207:1: + W_OK = 2 // fcntl.h:129:1: + X_OK = 1 // fcntl.h:130:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_STAT_H = 1 // stat.h:23:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FCNTL_H = 1 // fcntl.h:23:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_MKNOD_VER_LINUX = 0 // stat.h:41:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STATBUF_ST_BLKSIZE = 0 // stat.h:172:1: + X_STATBUF_ST_NSEC = 0 // stat.h:175:1: + X_STATBUF_ST_RDEV = 0 // stat.h:173:1: + X_STAT_VER = 1 // stat.h:44:1: + X_STAT_VER_KERNEL = 0 // stat.h:37:1: + X_STAT_VER_LINUX = 1 // stat.h:38:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 6.5 File Control Operations + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// This must be early so can define types winningly. + +// Get __mode_t, __dev_t and __off_t . +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// Get the definitions of O_*, F_*, FD_*: all the +// numbers and flag bits for `open', `fcntl', et al. +// O_*, F_*, FD_* bit values for Linux/x86. +// Copyright (C) 2001-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Not necessary, we always have 64-bit offsets. + +type Flock = struct { + Fl_type int16 + Fl_whence int16 + F__ccgo_pad1 [4]byte + Fl_start X__off64_t + Fl_len X__off64_t + Fl_pid X__pid_t + F__ccgo_pad2 [4]byte +} /* fcntl.h:35:1 */ + +// Include generic Linux declarations. +// O_*, F_*, FD_* bit values for Linux. +// Copyright (C) 2001-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This file contains shared definitions between Linux architectures +// and is included by to declare them. The various +// #ifndef cases allow the architecture specific file to define those +// values with different values. +// +// A minimal contains just: +// +// struct flock {...} +// #ifdef __USE_LARGEFILE64 +// struct flock64 {...} +// #endif +// #include + +// open/fcntl. + +// open file description locks. +// +// Usually record locks held by a process are released on *any* close and are +// not inherited across a fork. +// +// These cmd values will set locks that conflict with process-associated record +// locks, but are "owned" by the opened file description, not the process. +// This means that they are inherited across fork or clone with CLONE_FILES +// like BSD (flock) locks, and they are only released automatically when the +// last reference to the the file description against which they were acquired +// is put. + +// For now, Linux has no separate synchronicity options for read +// operations. We define O_RSYNC therefore as the same as O_SYNC +// since this is a superset. + +// Values for the second argument to `fcntl'. + +// For F_[GET|SET]FD. + +// For posix fcntl() and `l_type' field of a `struct flock' for lockf(). + +// For old implementation of BSD flock. + +// Operations for BSD flock, also used by the kernel implementation. + +// Define some more compatibility macros to be backward compatible with +// BSD systems which did not managed to hide these kernel macros. + +// Advise to `posix_fadvise'. + +// Values for `*at' functions. + +// Detect if open needs mode as a third argument (or for openat as a fourth +// argument). + +// POSIX.1-2001 specifies that these types are defined by . +// +// Earlier POSIX standards permitted any type ending in `_t' to be defined +// by any POSIX header, so we don't conditionalize the definitions here. +type Mode_t = X__mode_t /* fcntl.h:50:18 */ + +type Off_t = X__off64_t /* fcntl.h:58:19 */ + +type Pid_t = X__pid_t /* fcntl.h:69:17 */ + +// For XPG all symbols from should also be available. +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +// Copyright (C) 1999-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Versions of the `struct stat' data structure. + +// x86-64 versions of the `xmknod' interface. + +type Stat = struct { + Fst_dev X__dev_t + Fst_ino X__ino_t + Fst_nlink X__nlink_t + Fst_mode X__mode_t + Fst_uid X__uid_t + Fst_gid X__gid_t + F__pad0 int32 + Fst_rdev X__dev_t + Fst_size X__off_t + Fst_blksize X__blksize_t + Fst_blocks X__blkcnt_t + Fst_atim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_mtim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_ctim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + F__glibc_reserved [3]X__syscall_slong_t +} /* stat.h:46:1 */ + +// Define some inlines helping to catch common problems. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/fts/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/fts/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/fts/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/fts/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo fts/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o fts/fts_linux_amd64.go -pkgname fts', DO NOT EDIT. + +package fts + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/fts/fts_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/fts/fts_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/fts/fts_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/fts/fts_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1904 @@ +// Code generated by 'ccgo fts/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o fts/fts_linux_amd64.go -pkgname fts', DO NOT EDIT. + +package fts + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + ACCESSPERMS = 511 // stat.h:195:1: + ALLPERMS = 4095 // stat.h:196:1: + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + DEFFILEMODE = 438 // stat.h:197:1: + FD_SETSIZE = 1024 // select.h:73:1: + FTS_AGAIN = 1 // fts.h:139:1: + FTS_COMFOLLOW = 0x0001 // fts.h:68:1: + FTS_D = 1 // fts.h:119:1: + FTS_DC = 2 // fts.h:120:1: + FTS_DEFAULT = 3 // fts.h:121:1: + FTS_DNR = 4 // fts.h:122:1: + FTS_DONTCHDIR = 0x01 // fts.h:135:1: + FTS_DOT = 5 // fts.h:123:1: + FTS_DP = 6 // fts.h:124:1: + FTS_ERR = 7 // fts.h:125:1: + FTS_F = 8 // fts.h:126:1: + FTS_FOLLOW = 2 // fts.h:140:1: + FTS_INIT = 9 // fts.h:127:1: + FTS_LOGICAL = 0x0002 // fts.h:69:1: + FTS_NAMEONLY = 0x0100 // fts.h:78:1: + FTS_NOCHDIR = 0x0004 // fts.h:70:1: + FTS_NOINSTR = 3 // fts.h:141:1: + FTS_NOSTAT = 0x0008 // fts.h:71:1: + FTS_NS = 10 // fts.h:128:1: + FTS_NSOK = 11 // fts.h:129:1: + FTS_OPTIONMASK = 0x00ff // fts.h:76:1: + FTS_PHYSICAL = 0x0010 // fts.h:72:1: + FTS_ROOTLEVEL = 0 // fts.h:116:1: + FTS_ROOTPARENTLEVEL = -1 // fts.h:115:1: + FTS_SEEDOT = 0x0020 // fts.h:73:1: + FTS_SKIP = 4 // fts.h:142:1: + FTS_SL = 12 // fts.h:130:1: + FTS_SLNONE = 13 // fts.h:131:1: + FTS_STOP = 0x0200 // fts.h:79:1: + FTS_SYMFOLLOW = 0x02 // fts.h:136:1: + FTS_W = 14 // fts.h:132:1: + FTS_WHITEOUT = 0x0080 // fts.h:75:1: + FTS_XDEV = 0x0040 // fts.h:74:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + S_BLKSIZE = 512 // stat.h:199:1: + S_IEXEC = 64 // stat.h:177:1: + S_IFBLK = 24576 // stat.h:107:1: + S_IFCHR = 8192 // stat.h:106:1: + S_IFDIR = 16384 // stat.h:105:1: + S_IFIFO = 4096 // stat.h:110:1: + S_IFLNK = 40960 // stat.h:113:1: + S_IFMT = 61440 // stat.h:104:1: + S_IFREG = 32768 // stat.h:108:1: + S_IFSOCK = 49152 // stat.h:117:1: + S_IREAD = 256 // stat.h:175:1: + S_IRGRP = 32 // stat.h:180:1: + S_IROTH = 4 // stat.h:186:1: + S_IRUSR = 256 // stat.h:168:1: + S_IRWXG = 56 // stat.h:184:1: + S_IRWXO = 7 // stat.h:190:1: + S_IRWXU = 448 // stat.h:172:1: + S_ISGID = 1024 // stat.h:161:1: + S_ISUID = 2048 // stat.h:160:1: + S_ISVTX = 512 // stat.h:165:1: + S_IWGRP = 16 // stat.h:181:1: + S_IWOTH = 2 // stat.h:187:1: + S_IWRITE = 128 // stat.h:176:1: + S_IWUSR = 128 // stat.h:169:1: + S_IXGRP = 8 // stat.h:182:1: + S_IXOTH = 1 // stat.h:188:1: + S_IXUSR = 64 // stat.h:170:1: + UTIME_NOW = 1073741823 // stat.h:206:1: + UTIME_OMIT = 1073741822 // stat.h:207:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_STAT_H = 1 // stat.h:23:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_FTS_H = 1 // fts.h:51:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_MKNOD_VER = 0 // stat.h:390:1: + X_MKNOD_VER_LINUX = 0 // stat.h:41:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STATBUF_ST_BLKSIZE = 0 // stat.h:172:1: + X_STATBUF_ST_NSEC = 0 // stat.h:175:1: + X_STATBUF_ST_RDEV = 0 // stat.h:173:1: + X_STAT_VER = 1 // stat.h:44:1: + X_STAT_VER_KERNEL = 0 // stat.h:37:1: + X_STAT_VER_LINUX = 1 // stat.h:38:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_STAT_H = 1 // stat.h:23:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 5.6 File Characteristics + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// NB: Include guard matches what uses. + +// The Single Unix specification says that some more types are +// available here. + +// Copyright (C) 1999-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Versions of the `struct stat' data structure. + +// x86-64 versions of the `xmknod' interface. + +type Stat = struct { + Fst_dev X__dev_t + Fst_ino X__ino_t + Fst_nlink X__nlink_t + Fst_mode X__mode_t + Fst_uid X__uid_t + Fst_gid X__gid_t + F__pad0 int32 + Fst_rdev X__dev_t + Fst_size X__off_t + Fst_blksize X__blksize_t + Fst_blocks X__blkcnt_t + Fst_atim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_mtim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_ctim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + F__glibc_reserved [3]X__syscall_slong_t +} /* stat.h:46:1 */ + +// File tree traversal functions declarations. +// Copyright (C) 1994-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (c) 1989, 1993 +// The Regents of the University of California. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 4. Neither the name of the University nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// @(#)fts.h 8.3 (Berkeley) 8/14/94 + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +type X_ftsent = struct { + Ffts_cycle uintptr + Ffts_parent uintptr + Ffts_link uintptr + Ffts_number int64 + Ffts_pointer uintptr + Ffts_accpath uintptr + Ffts_path uintptr + Ffts_errno int32 + Ffts_symfd int32 + Ffts_pathlen uint16 + Ffts_namelen uint16 + F__ccgo_pad1 [4]byte + Ffts_ino Ino_t + Ffts_dev Dev_t + Ffts_nlink Nlink_t + Ffts_level int16 + Ffts_info uint16 + Ffts_flags uint16 + Ffts_instr uint16 + Ffts_statp uintptr + Ffts_name [1]int8 + F__ccgo_pad2 [7]byte +} /* fts.h:58:2 */ + +// File tree traversal functions declarations. +// Copyright (C) 1994-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (c) 1989, 1993 +// The Regents of the University of California. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 4. Neither the name of the University nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// @(#)fts.h 8.3 (Berkeley) 8/14/94 + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +type FTS = struct { + Ffts_cur uintptr + Ffts_child uintptr + Ffts_array uintptr + Ffts_dev Dev_t + Ffts_path uintptr + Ffts_rfd int32 + Ffts_pathlen int32 + Ffts_nitems int32 + F__ccgo_pad1 [4]byte + Ffts_compar uintptr + Ffts_options int32 + F__ccgo_pad2 [4]byte +} /* fts.h:81:3 */ + +type FTSENT = X_ftsent /* fts.h:147:3 */ + +var _ int8 /* gen.c:5:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/grp/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/grp/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/grp/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/grp/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo grp/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o grp/grp_linux_amd64.go -pkgname grp', DO NOT EDIT. + +package grp + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/grp/grp_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/grp/grp_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/grp/grp_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/grp/grp_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,783 @@ +// Code generated by 'ccgo grp/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o grp/grp_linux_amd64.go -pkgname grp', DO NOT EDIT. + +package grp + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + NSS_BUFLEN_GROUP = 1024 // grp.h:114:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_GRP_H = 1 // grp.h:23:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 9.2.1 Group Database Access + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// For the Single Unix specification we must define this type here. +type Gid_t = X__gid_t /* grp.h:37:17 */ + +// The group structure. +type Group = struct { + Fgr_name uintptr + Fgr_passwd uintptr + Fgr_gid X__gid_t + F__ccgo_pad1 [4]byte + Fgr_mem uintptr +} /* grp.h:42:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/langinfo/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/langinfo/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/langinfo/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/langinfo/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo langinfo/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o langinfo/langinfo_linux_amd64.go -pkgname langinfo', DO NOT EDIT. + +package langinfo + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/langinfo/langinfo_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/langinfo/langinfo_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/langinfo/langinfo_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/langinfo/langinfo_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1058 @@ +// Code generated by 'ccgo langinfo/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o langinfo/langinfo_linux_amd64.go -pkgname langinfo', DO NOT EDIT. + +package langinfo + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + NL_CAT_LOCALE = 1 // nl_types.h:27:1: + NL_SETD = 1 // nl_types.h:24:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_LOCALE_H = 1 // locale.h:24:1: + X_BITS_TYPES_LOCALE_T_H = 1 // locale_t.h:20:1: + X_BITS_TYPES___LOCALE_T_H = 1 // __locale_t.h:21:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LANGINFO_H = 1 // langinfo.h:20:1: + X_LP64 = 1 // :284:1: + X_NL_TYPES_H = 1 // nl_types.h:19:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Definition of locale category symbol values. +// Copyright (C) 2001-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Construct an `nl_item' value for `nl_langinfo' from a locale category +// (LC_*) and an item index within the category. Some code may depend on +// the item values within a category increasing monotonically with the +// indices. + +// Extract the category and item index from a constructed `nl_item' value. + +// Enumeration of locale items that can be queried with `nl_langinfo'. +const ( /* langinfo.h:41:1: */ + // LC_TIME category: date and time formatting. + + // Abbreviated days of the week. + ABDAY_1 = 131072 // Sun + ABDAY_2 = 131073 + ABDAY_3 = 131074 + ABDAY_4 = 131075 + ABDAY_5 = 131076 + ABDAY_6 = 131077 + ABDAY_7 = 131078 + + // Long-named days of the week. + DAY_1 = 131079 // Sunday + DAY_2 = 131080 // Monday + DAY_3 = 131081 // Tuesday + DAY_4 = 131082 // Wednesday + DAY_5 = 131083 // Thursday + DAY_6 = 131084 // Friday + DAY_7 = 131085 // Saturday + + // Abbreviated month names, in the grammatical form used when the month + // is a part of a complete date. + ABMON_1 = 131086 // Jan + ABMON_2 = 131087 + ABMON_3 = 131088 + ABMON_4 = 131089 + ABMON_5 = 131090 + ABMON_6 = 131091 + ABMON_7 = 131092 + ABMON_8 = 131093 + ABMON_9 = 131094 + ABMON_10 = 131095 + ABMON_11 = 131096 + ABMON_12 = 131097 + + // Long month names, in the grammatical form used when the month + // is a part of a complete date. + MON_1 = 131098 // January + MON_2 = 131099 + MON_3 = 131100 + MON_4 = 131101 + MON_5 = 131102 + MON_6 = 131103 + MON_7 = 131104 + MON_8 = 131105 + MON_9 = 131106 + MON_10 = 131107 + MON_11 = 131108 + MON_12 = 131109 + + AM_STR = 131110 // Ante meridiem string. + PM_STR = 131111 // Post meridiem string. + + D_T_FMT = 131112 // Date and time format for strftime. + D_FMT = 131113 // Date format for strftime. + T_FMT = 131114 // Time format for strftime. + T_FMT_AMPM = 131115 // 12-hour time format for strftime. + + ERA = 131116 // Alternate era. + X__ERA_YEAR = 131117 // Year in alternate era format. + ERA_D_FMT = 131118 // Date in alternate era format. + ALT_DIGITS = 131119 // Alternate symbols for digits. + ERA_D_T_FMT = 131120 // Date and time in alternate era format. + ERA_T_FMT = 131121 // Time in alternate era format. + + X_NL_TIME_ERA_NUM_ENTRIES = 131122 // Number entries in the era arrays. + X_NL_TIME_ERA_ENTRIES = 131123 // Structure with era entries in usable form. + + X_NL_WABDAY_1 = 131124 // Sun + X_NL_WABDAY_2 = 131125 + X_NL_WABDAY_3 = 131126 + X_NL_WABDAY_4 = 131127 + X_NL_WABDAY_5 = 131128 + X_NL_WABDAY_6 = 131129 + X_NL_WABDAY_7 = 131130 + + // Long-named days of the week. + X_NL_WDAY_1 = 131131 // Sunday + X_NL_WDAY_2 = 131132 // Monday + X_NL_WDAY_3 = 131133 // Tuesday + X_NL_WDAY_4 = 131134 // Wednesday + X_NL_WDAY_5 = 131135 // Thursday + X_NL_WDAY_6 = 131136 // Friday + X_NL_WDAY_7 = 131137 // Saturday + + // Abbreviated month names, in the grammatical form used when the month + // is a part of a complete date. + X_NL_WABMON_1 = 131138 // Jan + X_NL_WABMON_2 = 131139 + X_NL_WABMON_3 = 131140 + X_NL_WABMON_4 = 131141 + X_NL_WABMON_5 = 131142 + X_NL_WABMON_6 = 131143 + X_NL_WABMON_7 = 131144 + X_NL_WABMON_8 = 131145 + X_NL_WABMON_9 = 131146 + X_NL_WABMON_10 = 131147 + X_NL_WABMON_11 = 131148 + X_NL_WABMON_12 = 131149 + + // Long month names, in the grammatical form used when the month + // is a part of a complete date. + X_NL_WMON_1 = 131150 // January + X_NL_WMON_2 = 131151 + X_NL_WMON_3 = 131152 + X_NL_WMON_4 = 131153 + X_NL_WMON_5 = 131154 + X_NL_WMON_6 = 131155 + X_NL_WMON_7 = 131156 + X_NL_WMON_8 = 131157 + X_NL_WMON_9 = 131158 + X_NL_WMON_10 = 131159 + X_NL_WMON_11 = 131160 + X_NL_WMON_12 = 131161 + + X_NL_WAM_STR = 131162 // Ante meridiem string. + X_NL_WPM_STR = 131163 // Post meridiem string. + + X_NL_WD_T_FMT = 131164 // Date and time format for strftime. + X_NL_WD_FMT = 131165 // Date format for strftime. + X_NL_WT_FMT = 131166 // Time format for strftime. + X_NL_WT_FMT_AMPM = 131167 // 12-hour time format for strftime. + + X_NL_WERA_YEAR = 131168 // Year in alternate era format. + X_NL_WERA_D_FMT = 131169 // Date in alternate era format. + X_NL_WALT_DIGITS = 131170 // Alternate symbols for digits. + X_NL_WERA_D_T_FMT = 131171 // Date and time in alternate era format. + X_NL_WERA_T_FMT = 131172 // Time in alternate era format. + + X_NL_TIME_WEEK_NDAYS = 131173 + X_NL_TIME_WEEK_1STDAY = 131174 + X_NL_TIME_WEEK_1STWEEK = 131175 + X_NL_TIME_FIRST_WEEKDAY = 131176 + X_NL_TIME_FIRST_WORKDAY = 131177 + X_NL_TIME_CAL_DIRECTION = 131178 + X_NL_TIME_TIMEZONE = 131179 + + X_DATE_FMT = 131180 // strftime format for date. + X_NL_W_DATE_FMT = 131181 + + X_NL_TIME_CODESET = 131182 + + // Long month names, in the grammatical form used when the month + // is named by itself. + X__ALTMON_1 = 131183 // January + X__ALTMON_2 = 131184 + X__ALTMON_3 = 131185 + X__ALTMON_4 = 131186 + X__ALTMON_5 = 131187 + X__ALTMON_6 = 131188 + X__ALTMON_7 = 131189 + X__ALTMON_8 = 131190 + X__ALTMON_9 = 131191 + X__ALTMON_10 = 131192 + X__ALTMON_11 = 131193 + X__ALTMON_12 = 131194 + + // Long month names, in the grammatical form used when the month + // is named by itself. + X_NL_WALTMON_1 = 131195 // January + X_NL_WALTMON_2 = 131196 + X_NL_WALTMON_3 = 131197 + X_NL_WALTMON_4 = 131198 + X_NL_WALTMON_5 = 131199 + X_NL_WALTMON_6 = 131200 + X_NL_WALTMON_7 = 131201 + X_NL_WALTMON_8 = 131202 + X_NL_WALTMON_9 = 131203 + X_NL_WALTMON_10 = 131204 + X_NL_WALTMON_11 = 131205 + X_NL_WALTMON_12 = 131206 + + // Abbreviated month names, in the grammatical form used when the month + // is named by itself. + X_NL_ABALTMON_1 = 131207 // Jan + X_NL_ABALTMON_2 = 131208 + X_NL_ABALTMON_3 = 131209 + X_NL_ABALTMON_4 = 131210 + X_NL_ABALTMON_5 = 131211 + X_NL_ABALTMON_6 = 131212 + X_NL_ABALTMON_7 = 131213 + X_NL_ABALTMON_8 = 131214 + X_NL_ABALTMON_9 = 131215 + X_NL_ABALTMON_10 = 131216 + X_NL_ABALTMON_11 = 131217 + X_NL_ABALTMON_12 = 131218 + + // Abbreviated month names, in the grammatical form used when the month + // is named by itself. + X_NL_WABALTMON_1 = 131219 // Jan + X_NL_WABALTMON_2 = 131220 + X_NL_WABALTMON_3 = 131221 + X_NL_WABALTMON_4 = 131222 + X_NL_WABALTMON_5 = 131223 + X_NL_WABALTMON_6 = 131224 + X_NL_WABALTMON_7 = 131225 + X_NL_WABALTMON_8 = 131226 + X_NL_WABALTMON_9 = 131227 + X_NL_WABALTMON_10 = 131228 + X_NL_WABALTMON_11 = 131229 + X_NL_WABALTMON_12 = 131230 + + X_NL_NUM_LC_TIME = 131231 // Number of indices in LC_TIME category. + + // LC_COLLATE category: text sorting. + // This information is accessed by the strcoll and strxfrm functions. + // These `nl_langinfo' names are used only internally. + X_NL_COLLATE_NRULES = 196608 + X_NL_COLLATE_RULESETS = 196609 + X_NL_COLLATE_TABLEMB = 196610 + X_NL_COLLATE_WEIGHTMB = 196611 + X_NL_COLLATE_EXTRAMB = 196612 + X_NL_COLLATE_INDIRECTMB = 196613 + X_NL_COLLATE_GAP1 = 196614 + X_NL_COLLATE_GAP2 = 196615 + X_NL_COLLATE_GAP3 = 196616 + X_NL_COLLATE_TABLEWC = 196617 + X_NL_COLLATE_WEIGHTWC = 196618 + X_NL_COLLATE_EXTRAWC = 196619 + X_NL_COLLATE_INDIRECTWC = 196620 + X_NL_COLLATE_SYMB_HASH_SIZEMB = 196621 + X_NL_COLLATE_SYMB_TABLEMB = 196622 + X_NL_COLLATE_SYMB_EXTRAMB = 196623 + X_NL_COLLATE_COLLSEQMB = 196624 + X_NL_COLLATE_COLLSEQWC = 196625 + X_NL_COLLATE_CODESET = 196626 + X_NL_NUM_LC_COLLATE = 196627 + + // LC_CTYPE category: character classification. + // This information is accessed by the functions in . + // These `nl_langinfo' names are used only internally. + X_NL_CTYPE_CLASS = 0 + X_NL_CTYPE_TOUPPER = 1 + X_NL_CTYPE_GAP1 = 2 + X_NL_CTYPE_TOLOWER = 3 + X_NL_CTYPE_GAP2 = 4 + X_NL_CTYPE_CLASS32 = 5 + X_NL_CTYPE_GAP3 = 6 + X_NL_CTYPE_GAP4 = 7 + X_NL_CTYPE_GAP5 = 8 + X_NL_CTYPE_GAP6 = 9 + X_NL_CTYPE_CLASS_NAMES = 10 + X_NL_CTYPE_MAP_NAMES = 11 + X_NL_CTYPE_WIDTH = 12 + X_NL_CTYPE_MB_CUR_MAX = 13 + X_NL_CTYPE_CODESET_NAME = 14 + CODESET = 14 + X_NL_CTYPE_TOUPPER32 = 15 + X_NL_CTYPE_TOLOWER32 = 16 + X_NL_CTYPE_CLASS_OFFSET = 17 + X_NL_CTYPE_MAP_OFFSET = 18 + X_NL_CTYPE_INDIGITS_MB_LEN = 19 + X_NL_CTYPE_INDIGITS0_MB = 20 + X_NL_CTYPE_INDIGITS1_MB = 21 + X_NL_CTYPE_INDIGITS2_MB = 22 + X_NL_CTYPE_INDIGITS3_MB = 23 + X_NL_CTYPE_INDIGITS4_MB = 24 + X_NL_CTYPE_INDIGITS5_MB = 25 + X_NL_CTYPE_INDIGITS6_MB = 26 + X_NL_CTYPE_INDIGITS7_MB = 27 + X_NL_CTYPE_INDIGITS8_MB = 28 + X_NL_CTYPE_INDIGITS9_MB = 29 + X_NL_CTYPE_INDIGITS_WC_LEN = 30 + X_NL_CTYPE_INDIGITS0_WC = 31 + X_NL_CTYPE_INDIGITS1_WC = 32 + X_NL_CTYPE_INDIGITS2_WC = 33 + X_NL_CTYPE_INDIGITS3_WC = 34 + X_NL_CTYPE_INDIGITS4_WC = 35 + X_NL_CTYPE_INDIGITS5_WC = 36 + X_NL_CTYPE_INDIGITS6_WC = 37 + X_NL_CTYPE_INDIGITS7_WC = 38 + X_NL_CTYPE_INDIGITS8_WC = 39 + X_NL_CTYPE_INDIGITS9_WC = 40 + X_NL_CTYPE_OUTDIGIT0_MB = 41 + X_NL_CTYPE_OUTDIGIT1_MB = 42 + X_NL_CTYPE_OUTDIGIT2_MB = 43 + X_NL_CTYPE_OUTDIGIT3_MB = 44 + X_NL_CTYPE_OUTDIGIT4_MB = 45 + X_NL_CTYPE_OUTDIGIT5_MB = 46 + X_NL_CTYPE_OUTDIGIT6_MB = 47 + X_NL_CTYPE_OUTDIGIT7_MB = 48 + X_NL_CTYPE_OUTDIGIT8_MB = 49 + X_NL_CTYPE_OUTDIGIT9_MB = 50 + X_NL_CTYPE_OUTDIGIT0_WC = 51 + X_NL_CTYPE_OUTDIGIT1_WC = 52 + X_NL_CTYPE_OUTDIGIT2_WC = 53 + X_NL_CTYPE_OUTDIGIT3_WC = 54 + X_NL_CTYPE_OUTDIGIT4_WC = 55 + X_NL_CTYPE_OUTDIGIT5_WC = 56 + X_NL_CTYPE_OUTDIGIT6_WC = 57 + X_NL_CTYPE_OUTDIGIT7_WC = 58 + X_NL_CTYPE_OUTDIGIT8_WC = 59 + X_NL_CTYPE_OUTDIGIT9_WC = 60 + X_NL_CTYPE_TRANSLIT_TAB_SIZE = 61 + X_NL_CTYPE_TRANSLIT_FROM_IDX = 62 + X_NL_CTYPE_TRANSLIT_FROM_TBL = 63 + X_NL_CTYPE_TRANSLIT_TO_IDX = 64 + X_NL_CTYPE_TRANSLIT_TO_TBL = 65 + X_NL_CTYPE_TRANSLIT_DEFAULT_MISSING_LEN = 66 + X_NL_CTYPE_TRANSLIT_DEFAULT_MISSING = 67 + X_NL_CTYPE_TRANSLIT_IGNORE_LEN = 68 + X_NL_CTYPE_TRANSLIT_IGNORE = 69 + X_NL_CTYPE_MAP_TO_NONASCII = 70 + X_NL_CTYPE_NONASCII_CASE = 71 + X_NL_CTYPE_EXTRA_MAP_1 = 72 + X_NL_CTYPE_EXTRA_MAP_2 = 73 + X_NL_CTYPE_EXTRA_MAP_3 = 74 + X_NL_CTYPE_EXTRA_MAP_4 = 75 + X_NL_CTYPE_EXTRA_MAP_5 = 76 + X_NL_CTYPE_EXTRA_MAP_6 = 77 + X_NL_CTYPE_EXTRA_MAP_7 = 78 + X_NL_CTYPE_EXTRA_MAP_8 = 79 + X_NL_CTYPE_EXTRA_MAP_9 = 80 + X_NL_CTYPE_EXTRA_MAP_10 = 81 + X_NL_CTYPE_EXTRA_MAP_11 = 82 + X_NL_CTYPE_EXTRA_MAP_12 = 83 + X_NL_CTYPE_EXTRA_MAP_13 = 84 + X_NL_CTYPE_EXTRA_MAP_14 = 85 + X_NL_NUM_LC_CTYPE = 86 + + // LC_MONETARY category: formatting of monetary quantities. + // These items each correspond to a member of `struct lconv', + // defined in . + X__INT_CURR_SYMBOL = 262144 + X__CURRENCY_SYMBOL = 262145 + X__MON_DECIMAL_POINT = 262146 + X__MON_THOUSANDS_SEP = 262147 + X__MON_GROUPING = 262148 + X__POSITIVE_SIGN = 262149 + X__NEGATIVE_SIGN = 262150 + X__INT_FRAC_DIGITS = 262151 + X__FRAC_DIGITS = 262152 + X__P_CS_PRECEDES = 262153 + X__P_SEP_BY_SPACE = 262154 + X__N_CS_PRECEDES = 262155 + X__N_SEP_BY_SPACE = 262156 + X__P_SIGN_POSN = 262157 + X__N_SIGN_POSN = 262158 + X_NL_MONETARY_CRNCYSTR = 262159 + X__INT_P_CS_PRECEDES = 262160 + X__INT_P_SEP_BY_SPACE = 262161 + X__INT_N_CS_PRECEDES = 262162 + X__INT_N_SEP_BY_SPACE = 262163 + X__INT_P_SIGN_POSN = 262164 + X__INT_N_SIGN_POSN = 262165 + X_NL_MONETARY_DUO_INT_CURR_SYMBOL = 262166 + X_NL_MONETARY_DUO_CURRENCY_SYMBOL = 262167 + X_NL_MONETARY_DUO_INT_FRAC_DIGITS = 262168 + X_NL_MONETARY_DUO_FRAC_DIGITS = 262169 + X_NL_MONETARY_DUO_P_CS_PRECEDES = 262170 + X_NL_MONETARY_DUO_P_SEP_BY_SPACE = 262171 + X_NL_MONETARY_DUO_N_CS_PRECEDES = 262172 + X_NL_MONETARY_DUO_N_SEP_BY_SPACE = 262173 + X_NL_MONETARY_DUO_INT_P_CS_PRECEDES = 262174 + X_NL_MONETARY_DUO_INT_P_SEP_BY_SPACE = 262175 + X_NL_MONETARY_DUO_INT_N_CS_PRECEDES = 262176 + X_NL_MONETARY_DUO_INT_N_SEP_BY_SPACE = 262177 + X_NL_MONETARY_DUO_P_SIGN_POSN = 262178 + X_NL_MONETARY_DUO_N_SIGN_POSN = 262179 + X_NL_MONETARY_DUO_INT_P_SIGN_POSN = 262180 + X_NL_MONETARY_DUO_INT_N_SIGN_POSN = 262181 + X_NL_MONETARY_UNO_VALID_FROM = 262182 + X_NL_MONETARY_UNO_VALID_TO = 262183 + X_NL_MONETARY_DUO_VALID_FROM = 262184 + X_NL_MONETARY_DUO_VALID_TO = 262185 + X_NL_MONETARY_CONVERSION_RATE = 262186 + X_NL_MONETARY_DECIMAL_POINT_WC = 262187 + X_NL_MONETARY_THOUSANDS_SEP_WC = 262188 + X_NL_MONETARY_CODESET = 262189 + X_NL_NUM_LC_MONETARY = 262190 + + // LC_NUMERIC category: formatting of numbers. + // These also correspond to members of `struct lconv'; see . + X__DECIMAL_POINT = 65536 + RADIXCHAR = 65536 + X__THOUSANDS_SEP = 65537 + THOUSEP = 65537 + X__GROUPING = 65538 + X_NL_NUMERIC_DECIMAL_POINT_WC = 65539 + X_NL_NUMERIC_THOUSANDS_SEP_WC = 65540 + X_NL_NUMERIC_CODESET = 65541 + X_NL_NUM_LC_NUMERIC = 65542 + + X__YESEXPR = 327680 // Regex matching ``yes'' input. + X__NOEXPR = 327681 // Regex matching ``no'' input. + X__YESSTR = 327682 // Output string for ``yes''. + X__NOSTR = 327683 // Output string for ``no''. + X_NL_MESSAGES_CODESET = 327684 + X_NL_NUM_LC_MESSAGES = 327685 + + X_NL_PAPER_HEIGHT = 458752 + X_NL_PAPER_WIDTH = 458753 + X_NL_PAPER_CODESET = 458754 + X_NL_NUM_LC_PAPER = 458755 + + X_NL_NAME_NAME_FMT = 524288 + X_NL_NAME_NAME_GEN = 524289 + X_NL_NAME_NAME_MR = 524290 + X_NL_NAME_NAME_MRS = 524291 + X_NL_NAME_NAME_MISS = 524292 + X_NL_NAME_NAME_MS = 524293 + X_NL_NAME_CODESET = 524294 + X_NL_NUM_LC_NAME = 524295 + + X_NL_ADDRESS_POSTAL_FMT = 589824 + X_NL_ADDRESS_COUNTRY_NAME = 589825 + X_NL_ADDRESS_COUNTRY_POST = 589826 + X_NL_ADDRESS_COUNTRY_AB2 = 589827 + X_NL_ADDRESS_COUNTRY_AB3 = 589828 + X_NL_ADDRESS_COUNTRY_CAR = 589829 + X_NL_ADDRESS_COUNTRY_NUM = 589830 + X_NL_ADDRESS_COUNTRY_ISBN = 589831 + X_NL_ADDRESS_LANG_NAME = 589832 + X_NL_ADDRESS_LANG_AB = 589833 + X_NL_ADDRESS_LANG_TERM = 589834 + X_NL_ADDRESS_LANG_LIB = 589835 + X_NL_ADDRESS_CODESET = 589836 + X_NL_NUM_LC_ADDRESS = 589837 + + X_NL_TELEPHONE_TEL_INT_FMT = 655360 + X_NL_TELEPHONE_TEL_DOM_FMT = 655361 + X_NL_TELEPHONE_INT_SELECT = 655362 + X_NL_TELEPHONE_INT_PREFIX = 655363 + X_NL_TELEPHONE_CODESET = 655364 + X_NL_NUM_LC_TELEPHONE = 655365 + + X_NL_MEASUREMENT_MEASUREMENT = 720896 + X_NL_MEASUREMENT_CODESET = 720897 + X_NL_NUM_LC_MEASUREMENT = 720898 + + X_NL_IDENTIFICATION_TITLE = 786432 + X_NL_IDENTIFICATION_SOURCE = 786433 + X_NL_IDENTIFICATION_ADDRESS = 786434 + X_NL_IDENTIFICATION_CONTACT = 786435 + X_NL_IDENTIFICATION_EMAIL = 786436 + X_NL_IDENTIFICATION_TEL = 786437 + X_NL_IDENTIFICATION_FAX = 786438 + X_NL_IDENTIFICATION_LANGUAGE = 786439 + X_NL_IDENTIFICATION_TERRITORY = 786440 + X_NL_IDENTIFICATION_AUDIENCE = 786441 + X_NL_IDENTIFICATION_APPLICATION = 786442 + X_NL_IDENTIFICATION_ABBREVIATION = 786443 + X_NL_IDENTIFICATION_REVISION = 786444 + X_NL_IDENTIFICATION_DATE = 786445 + X_NL_IDENTIFICATION_CATEGORY = 786446 + X_NL_IDENTIFICATION_CODESET = 786447 + X_NL_NUM_LC_IDENTIFICATION = 786448 + + // This marks the highest value used. + X_NL_NUM = 786449 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Access to locale-dependent parameters. +// Copyright (C) 1995-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get the type definition. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// The default message set used by the gencat program. + +// Value for FLAG parameter of `catgets' to say we want XPG4 compliance. + +// Message catalog descriptor type. +type Nl_catd = uintptr /* nl_types.h:33:14 */ + +// Type used by `nl_langinfo'. +type Nl_item = int32 /* nl_types.h:36:13 */ + +// POSIX.1-2008 extended locale interface (see locale.h). +// Definition of locale_t. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definition of struct __locale_struct and __locale_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// Contributed by Ulrich Drepper , 1997. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1-2008: the locale_t type, representing a locale context +// (implementation-namespace version). This type should be treated +// as opaque by applications; some details are exposed for the sake of +// efficiency in e.g. ctype functions. + +type X__locale_struct = struct { + F__locales [13]uintptr + F__ctype_b uintptr + F__ctype_tolower uintptr + F__ctype_toupper uintptr + F__names [13]uintptr +} /* __locale_t.h:28:1 */ + +type X__locale_t = uintptr /* __locale_t.h:42:32 */ + +type Locale_t = X__locale_t /* locale_t.h:24:20 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc.go temporal-1.22.5/src/vendor/modernc.org/libc/libc.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc.go 2024-02-23 09:46:15.000000000 +0000 @@ -970,13 +970,24 @@ } } -// time_t mktime(struct tm *tm); -func Xmktime(t *TLS, ptm uintptr) time.Time_t { - loc := gotime.Local +func getLocalLocation() (loc *gotime.Location) { + loc = gotime.Local if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, off) + zname := GoString(r) + zone, off := parseZone(zname) + loc = gotime.FixedZone(zone, -off) + loc2, _ := gotime.LoadLocation(zname) + if loc2 != nil { + loc = loc2 + } } + return loc + +} + +// time_t mktime(struct tm *tm); +func Xmktime(t *TLS, ptm uintptr) time.Time_t { + loc := getLocalLocation() tt := gotime.Date( int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_year+1900), gotime.Month((*time.Tm)(unsafe.Pointer(ptm)).Ftm_mon+1), diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc64.go 2024-02-23 09:46:15.000000000 +0000 @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 || arm64 || ppc64le || riscv64 || s390x -// +build amd64 arm64 ppc64le riscv64 s390x +//go:build amd64 || arm64 || ppc64le || riscv64 || s390x || loong64 +// +build amd64 arm64 ppc64le riscv64 s390x loong64 package libc // import "modernc.org/libc" diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_darwin.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_darwin.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_darwin.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_darwin.go 2024-02-23 09:46:15.000000000 +0000 @@ -277,11 +277,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -298,11 +294,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) (*time.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) @@ -1534,33 +1526,6 @@ // panic(todo("")) // } -// ssize_t pread(int fd, void *buf, size_t count, off_t offset); -func Xpread(t *TLS, fd int32, buf uintptr, count types.Size_t, offset types.Off_t) types.Ssize_t { - var n int - var err error - switch { - case count == 0: - n, err = unix.Pread(int(fd), nil, int64(offset)) - default: - n, err = unix.Pread(int(fd), (*RawMem)(unsafe.Pointer(buf))[:count:count], int64(offset)) - if dmesgs && err == nil { - dmesg("%v: fd %v, off %#x, count %#x, n %#x\n%s", origin(1), fd, offset, count, n, hex.Dump((*RawMem)(unsafe.Pointer(buf))[:n:n])) - } - } - if err != nil { - if dmesgs { - dmesg("%v: %v FAIL", origin(1), err) - } - t.setErrno(err) - return -1 - } - - if dmesgs { - dmesg("%v: ok", origin(1)) - } - return types.Ssize_t(n) -} - // ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset); func Xpwrite(t *TLS, fd int32, buf uintptr, count types.Size_t, offset types.Off_t) types.Ssize_t { var n int diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_freebsd.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_freebsd.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_freebsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_freebsd.go 2024-02-23 09:46:15.000000000 +0000 @@ -171,11 +171,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -192,11 +188,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*unix.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) (*time.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux.go 2024-02-23 09:46:15.000000000 +0000 @@ -133,11 +133,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := time.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = time.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*unix.Time_t)(unsafe.Pointer(timep)) t := time.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -154,11 +150,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { - loc := time.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = time.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*unix.Time_t)(unsafe.Pointer(timep)) t := time.Unix(int64(ut), 0).In(loc) (*ctime.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) @@ -645,16 +637,6 @@ return Xsetrlimit64(t, resource, rlim) } -// int setrlimit(int resource, const struct rlimit *rlim); -func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { - if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { - t.setErrno(err) - return -1 - } - - return 0 -} - // uid_t getuid(void); func Xgetuid(t *TLS) types.Uid_t { return types.Uid_t(os.Getuid()) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_386.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_386.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_386.go 2024-02-23 09:46:15.000000000 +0000 @@ -455,3 +455,13 @@ t.setErrno(errno.ENOMEM) return 0 } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -465,3 +465,13 @@ func Xiswalnum(t *TLS, wc wctype.Wint_t) int32 { return Bool32(unicode.IsLetter(rune(wc)) || unicode.IsNumber(rune(wc))) } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_arm.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_arm.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_arm.go 2024-02-23 09:46:15.000000000 +0000 @@ -447,3 +447,13 @@ //TODO- // } //TODO- return r //TODO- } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_arm64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_arm64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_arm64.go 2024-02-23 09:46:15.000000000 +0000 @@ -437,3 +437,13 @@ func Xalarm(t *TLS, seconds uint32) uint32 { panic(todo("")) } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,498 @@ +// Copyright 2020 The Libc Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package libc // import "modernc.org/libc" + +import ( + "unicode" + "unsafe" + "os" + "strings" + + "golang.org/x/sys/unix" + "modernc.org/libc/errno" + "modernc.org/libc/fcntl" + "modernc.org/libc/signal" + "modernc.org/libc/sys/types" + "modernc.org/libc/wctype" +) + +// int sigaction(int signum, const struct sigaction *act, struct sigaction *oldact); +func Xsigaction(t *TLS, signum int32, act, oldact uintptr) int32 { + // musl/arch/x86_64/ksigaction.h + // + // struct k_sigaction { + // void (*handler)(int); + // unsigned long flags; + // void (*restorer)(void); + // unsigned mask[2]; + // }; + type k_sigaction struct { + handler uintptr + flags ulong + restorer uintptr + mask [2]uint32 + } + + var kact, koldact uintptr + if act != 0 { + sz := int(unsafe.Sizeof(k_sigaction{})) + kact = t.Alloc(sz) + defer t.Free(sz) + *(*k_sigaction)(unsafe.Pointer(kact)) = k_sigaction{ + handler: (*signal.Sigaction)(unsafe.Pointer(act)).F__sigaction_handler.Fsa_handler, + flags: ulong((*signal.Sigaction)(unsafe.Pointer(act)).Fsa_flags), + restorer: (*signal.Sigaction)(unsafe.Pointer(act)).Fsa_restorer, + } + Xmemcpy(t, kact+unsafe.Offsetof(k_sigaction{}.mask), act+unsafe.Offsetof(signal.Sigaction{}.Fsa_mask), types.Size_t(unsafe.Sizeof(k_sigaction{}.mask))) + } + if oldact != 0 { + panic(todo("")) + } + + if _, _, err := unix.Syscall6(unix.SYS_RT_SIGACTION, uintptr(signum), kact, koldact, unsafe.Sizeof(k_sigaction{}.mask), 0, 0); err != 0 { + t.setErrno(err) + return -1 + } + + if oldact != 0 { + panic(todo("")) + } + + return 0 +} + +// int fcntl(int fd, int cmd, ... /* arg */ ); +func Xfcntl64(t *TLS, fd, cmd int32, args uintptr) int32 { + var arg uintptr + if args != 0 { + arg = *(*uintptr)(unsafe.Pointer(args)) + } + if cmd == fcntl.F_SETFL { + arg |= unix.O_LARGEFILE + } + n, _, err := unix.Syscall(unix.SYS_FCNTL, uintptr(fd), uintptr(cmd), arg) + if err != 0 { + // if dmesgs { + // dmesg("%v: fd %v cmd %v", origin(1), fcntlCmdStr(fd), cmd) + // } + t.setErrno(err) + return -1 + } + + // if dmesgs { + // dmesg("%v: %d %s %#x: %d", origin(1), fd, fcntlCmdStr(cmd), arg, n) + // } + return int32(n) +} + +// int lstat(const char *pathname, struct stat *statbuf); +func Xlstat64(t *TLS, pathname, statbuf uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_LSTAT, pathname, statbuf, 0); err != 0 { + // // if dmesgs { + // // dmesg("%v: %q: %v", origin(1), GoString(pathname), err) + // // } + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(pathname)) + // // } + // return 0 +} + +// int stat(const char *pathname, struct stat *statbuf); +func Xstat64(t *TLS, pathname, statbuf uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_STAT, pathname, statbuf, 0); err != 0 { + // // if dmesgs { + // // dmesg("%v: %q: %v", origin(1), GoString(pathname), err) + // // } + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(pathname)) + // // } + // return 0 +} + +// int fstat(int fd, struct stat *statbuf); +func Xfstat64(t *TLS, fd int32, statbuf uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_FSTAT, uintptr(fd), statbuf, 0); err != 0 { + // // if dmesgs { + // // dmesg("%v: fd %d: %v", origin(1), fd, err) + // // } + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %d size %#x: ok\n%+v", origin(1), fd, (*stat.Stat)(unsafe.Pointer(statbuf)).Fst_size, (*stat.Stat)(unsafe.Pointer(statbuf))) + // // } + // return 0 +} + +func Xmmap(t *TLS, addr uintptr, length types.Size_t, prot, flags, fd int32, offset types.Off_t) uintptr { + return Xmmap64(t, addr, length, prot, flags, fd, offset) +} + +// void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset); +func Xmmap64(t *TLS, addr uintptr, length types.Size_t, prot, flags, fd int32, offset types.Off_t) uintptr { + data, _, err := unix.Syscall6(unix.SYS_MMAP, addr, uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + if err != 0 { + // if dmesgs { + // dmesg("%v: %v", origin(1), err) + // } + t.setErrno(err) + return ^uintptr(0) // (void*)-1 + } + + // if dmesgs { + // dmesg("%v: %#x", origin(1), data) + // } + return data +} + +// void *mremap(void *old_address, size_t old_size, size_t new_size, int flags, ... /* void *new_address */); +func Xmremap(t *TLS, old_address uintptr, old_size, new_size types.Size_t, flags int32, args uintptr) uintptr { + var arg uintptr + if args != 0 { + arg = *(*uintptr)(unsafe.Pointer(args)) + } + data, _, err := unix.Syscall6(unix.SYS_MREMAP, old_address, uintptr(old_size), uintptr(new_size), uintptr(flags), arg, 0) + if err != 0 { + // if dmesgs { + // dmesg("%v: %v", origin(1), err) + // } + t.setErrno(err) + return ^uintptr(0) // (void*)-1 + } + + // if dmesgs { + // dmesg("%v: %#x", origin(1), data) + // } + return data +} + +// int ftruncate(int fd, off_t length); +func Xftruncate64(t *TLS, fd int32, length types.Off_t) int32 { + if _, _, err := unix.Syscall(unix.SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0); err != 0 { + // if dmesgs { + // dmesg("%v: fd %d: %v", origin(1), fd, err) + // } + t.setErrno(err) + return -1 + } + + // if dmesgs { + // dmesg("%v: %d %#x: ok", origin(1), fd, length) + // } + return 0 +} + +// off64_t lseek64(int fd, off64_t offset, int whence); +func Xlseek64(t *TLS, fd int32, offset types.Off_t, whence int32) types.Off_t { + n, _, err := unix.Syscall(unix.SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + if err != 0 { + // if dmesgs { + // dmesg("%v: fd %v, off %#x, whence %v: %v", origin(1), fd, offset, whenceStr(whence), err) + // } + t.setErrno(err) + return -1 + } + + // if dmesgs { + // dmesg("%v: fd %v, off %#x, whence %v: %#x", origin(1), fd, offset, whenceStr(whence), n) + // } + return types.Off_t(n) +} + +// int utime(const char *filename, const struct utimbuf *times); +func Xutime(t *TLS, filename, times uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_UTIME, filename, times, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// unsigned int alarm(unsigned int seconds); +func Xalarm(t *TLS, seconds uint32) uint32 { + panic(todo("")) + // n, _, err := unix.Syscall(unix.SYS_ALARM, uintptr(seconds), 0, 0) + // if err != 0 { + // panic(todo("")) + // } + + // return uint32(n) +} + +// time_t time(time_t *tloc); +func Xtime(t *TLS, tloc uintptr) types.Time_t { + panic(todo("")) + // n, _, err := unix.Syscall(unix.SYS_TIME, tloc, 0, 0) + // if err != 0 { + // t.setErrno(err) + // return types.Time_t(-1) + // } + + // if tloc != 0 { + // *(*types.Time_t)(unsafe.Pointer(tloc)) = types.Time_t(n) + // } + // return types.Time_t(n) +} + +// int getrlimit(int resource, struct rlimit *rlim); +func Xgetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_GETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// int mkdir(const char *path, mode_t mode); +func Xmkdir(t *TLS, path uintptr, mode types.Mode_t) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_MKDIR, path, uintptr(mode), 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(path)) + // // } + // return 0 +} + +// int symlink(const char *target, const char *linkpath); +func Xsymlink(t *TLS, target, linkpath uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_SYMLINK, target, linkpath, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q %q: ok", origin(1), GoString(target), GoString(linkpath)) + // // } + // return 0 +} + +// int chmod(const char *pathname, mode_t mode) +func Xchmod(t *TLS, pathname uintptr, mode types.Mode_t) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_CHMOD, pathname, uintptr(mode), 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q %#o: ok", origin(1), GoString(pathname), mode) + // // } + // return 0 +} + +// int utimes(const char *filename, const struct timeval times[2]); +func Xutimes(t *TLS, filename, times uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_UTIMES, filename, times, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(filename)) + // // } + // return 0 +} + +// int unlink(const char *pathname); +func Xunlink(t *TLS, pathname uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_UNLINK, pathname, 0, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(pathname)) + // // } + // return 0 +} + +// int access(const char *pathname, int mode); +func Xaccess(t *TLS, pathname uintptr, mode int32) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_ACCESS, pathname, uintptr(mode), 0); err != 0 { + // // if dmesgs { + // // dmesg("%v: %q: %v", origin(1), GoString(pathname), err) + // // } + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q %#o: ok", origin(1), GoString(pathname), mode) + // // } + // return 0 +} + +// int rmdir(const char *pathname); +func Xrmdir(t *TLS, pathname uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_RMDIR, pathname, 0, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // // if dmesgs { + // // dmesg("%v: %q: ok", origin(1), GoString(pathname)) + // // } + // return 0 +} + +// int rename(const char *oldpath, const char *newpath); +func Xrename(t *TLS, oldpath, newpath uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_RENAME, oldpath, newpath, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// int mknod(const char *pathname, mode_t mode, dev_t dev); +func Xmknod(t *TLS, pathname uintptr, mode types.Mode_t, dev types.Dev_t) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_MKNOD, pathname, uintptr(mode), uintptr(dev)); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// int chown(const char *pathname, uid_t owner, gid_t group); +func Xchown(t *TLS, pathname uintptr, owner types.Uid_t, group types.Gid_t) int32 { + panic(todo("")) + // // if _, _, err := unix.Syscall(unix.SYS_CHOWN, pathname, uintptr(owner), uintptr(group)); err != 0 { + // // t.setErrno(err) + // // return -1 + // // } + + // // return 0 +} + +// int link(const char *oldpath, const char *newpath); +func Xlink(t *TLS, oldpath, newpath uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_LINK, oldpath, newpath, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// int pipe(int pipefd[2]); +func Xpipe(t *TLS, pipefd uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_PIPE, pipefd, 0, 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} + +// int dup2(int oldfd, int newfd); +func Xdup2(t *TLS, oldfd, newfd int32) int32 { + panic(todo("")) + // n, _, err := unix.Syscall(unix.SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + // if err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return int32(n) +} + +// ssize_t readlink(const char *restrict path, char *restrict buf, size_t bufsize); +func Xreadlink(t *TLS, path, buf uintptr, bufsize types.Size_t) types.Ssize_t { + panic(todo("")) + // n, _, err := unix.Syscall(unix.SYS_READLINK, path, buf, uintptr(bufsize)) + // if err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return types.Ssize_t(n) +} + +// FILE *fopen64(const char *pathname, const char *mode); +func Xfopen64(t *TLS, pathname, mode uintptr) uintptr { + m := strings.ReplaceAll(GoString(mode), "b", "") + var flags int + switch m { + case "r": + flags = os.O_RDONLY + case "r+": + flags = os.O_RDWR + case "w": + flags = os.O_WRONLY | os.O_CREATE | os.O_TRUNC + case "w+": + flags = os.O_RDWR | os.O_CREATE | os.O_TRUNC + case "a": + flags = os.O_WRONLY | os.O_CREATE | os.O_APPEND + case "a+": + flags = os.O_RDWR | os.O_CREATE | os.O_APPEND + default: + panic(m) + } + fd, err := unix.Open(GoString(pathname), flags|unix.O_LARGEFILE, 0666) + if err != nil { + t.setErrno(err) + return 0 + } + + if p := newFile(t, int32(fd)); p != 0 { + return p + } + + Xclose(t, int32(fd)) + t.setErrno(errno.ENOMEM) + return 0 +} + +// int iswspace(wint_t wc); +func Xiswspace(t *TLS, wc wctype.Wint_t) int32 { + return Bool32(unicode.IsSpace(rune(wc))) +} + +// int iswalnum(wint_t wc); +func Xiswalnum(t *TLS, wc wctype.Wint_t) int32 { + return Bool32(unicode.IsLetter(rune(wc)) || unicode.IsNumber(rune(wc))) +} + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + panic(todo("")) + // if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + // t.setErrno(err) + // return -1 + // } + + // return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_ppc64le.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_ppc64le.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_ppc64le.go 2024-02-23 09:46:15.000000000 +0000 @@ -485,3 +485,13 @@ func __syscall4(t *TLS, trap, p1, p2, p3, p4 long) long { return __syscall(unix.Syscall6(uintptr(trap), uintptr(p1), uintptr(p2), uintptr(p3), uintptr(p4), 0, 0)) } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_riscv64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_riscv64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_riscv64.go 2024-02-23 09:46:15.000000000 +0000 @@ -411,3 +411,13 @@ func __syscall4(t *TLS, trap, p1, p2, p3, p4 long) long { return __syscall(unix.Syscall6(uintptr(trap), uintptr(p1), uintptr(p2), uintptr(p3), uintptr(p4), 0, 0)) } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_s390x.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_s390x.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_linux_s390x.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_linux_s390x.go 2024-02-23 09:46:15.000000000 +0000 @@ -464,3 +464,13 @@ func __syscall4(t *TLS, trap, p1, p2, p3, p4 long) long { return __syscall(unix.Syscall6(uintptr(trap), uintptr(p1), uintptr(p2), uintptr(p3), uintptr(p4), 0, 0)) } + +// int setrlimit(int resource, const struct rlimit *rlim); +func Xsetrlimit64(t *TLS, resource int32, rlim uintptr) int32 { + if _, _, err := unix.Syscall(unix.SYS_SETRLIMIT, uintptr(resource), uintptr(rlim), 0); err != 0 { + t.setErrno(err) + return -1 + } + + return 0 +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_netbsd.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_netbsd.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_netbsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_netbsd.go 2024-02-23 09:46:15.000000000 +0000 @@ -205,11 +205,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -226,11 +222,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) (*time.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_openbsd.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_openbsd.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_openbsd.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_openbsd.go 2024-02-23 09:46:15.000000000 +0000 @@ -205,11 +205,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -226,11 +222,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) (*time.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) @@ -362,7 +354,7 @@ } if dmesgs { - dmesg("%v: %d %#x: ok", origin(1), fd, length) + dmesg("%v: fd %d length %#0x: ok", origin(1), fd, length) } return 0 } @@ -380,10 +372,10 @@ return -1 } - // if dmesgs { - // // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) - // dmesg("%v: %d %#x: %#x", origin(1), fd, count, n) - // } + if dmesgs { + // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) + dmesg("%v: fd %d, buf %#0x, count %#x: n %#x", origin(1), fd, count, n) + } return types.Ssize_t(n) } @@ -395,19 +387,19 @@ var n uintptr switch n, _, err = unix.Syscall(unix.SYS_WRITE, uintptr(fd), buf, uintptr(count)); err { case 0: - // if dmesgs { - // // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) - // dmesg("%v: %d %#x: %#x", origin(1), fd, count, n) - // } + if dmesgs { + // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) + dmesg("%v: %d %#x: %#x", origin(1), fd, count, n) + } return types.Ssize_t(n) case errno.EAGAIN: // nop } } - // if dmesgs { - // dmesg("%v: fd %v, count %#x: %v", origin(1), fd, count, err) - // } + if dmesgs { + dmesg("%v: fd %v, buf %#0x, count %#x: %v", origin(1), fd, count, err) + } t.setErrno(err) return -1 } @@ -1468,8 +1460,16 @@ } func Xmmap(t *TLS, addr uintptr, length types.Size_t, prot, flags, fd int32, offset types.Off_t) uintptr { + // On 2021-12-23, a new syscall for mmap was introduced: + // + // 49 STD NOLOCK { void *sys_mmap(void *addr, size_t len, int prot, \ + // int flags, int fd, off_t pos); } + // src: https://github.com/golang/go/issues/59661 + + const unix_SYS_MMAP = 49 + // Cannot avoid the syscall here, addr sometimes matter. - data, _, err := unix.Syscall6(unix.SYS_MMAP, addr, uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + data, _, err := unix.Syscall6(unix_SYS_MMAP, addr, uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) if err != 0 { if dmesgs { dmesg("%v: %v FAIL", origin(1), err) @@ -1479,7 +1479,7 @@ } if dmesgs { - dmesg("%v: %#x", origin(1), data) + dmesg("%v: addr %#0x, length %#x0, prot %#0x, flags %#0x, fd %d, offset %#0x returns %#0x", origin(1), addr, length, prot, flags, fd, offset, data) } return data } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_openbsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_openbsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_openbsd_amd64.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_openbsd_amd64.go 2024-02-23 09:46:15.000000000 +0000 @@ -268,7 +268,7 @@ } if dmesgs { - dmesg("%v: ok", origin(1)) + dmesg("%v: fd %d, offset %#0x, whence %d, ok", origin(1), fd, offset, whence) } return types.Off_t(n) } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_unix.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_unix.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_unix.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_unix.go 2024-02-23 09:46:15.000000000 +0000 @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux || darwin || freebsd || netbsd || openbsd -// +build linux darwin freebsd netbsd openbsd +//go:build unix +// +build unix package libc // import "modernc.org/libc" import ( "bufio" + "encoding/hex" "io/ioutil" "math" "math/rand" @@ -992,3 +993,30 @@ copy((*RawMem)(unsafe.Pointer(buf))[:26:26], s) return buf } + +// ssize_t pread(int fd, void *buf, size_t count, off_t offset); +func Xpread(t *TLS, fd int32, buf uintptr, count types.Size_t, offset types.Off_t) types.Ssize_t { + var n int + var err error + switch { + case count == 0: + n, err = unix.Pread(int(fd), nil, int64(offset)) + default: + n, err = unix.Pread(int(fd), (*RawMem)(unsafe.Pointer(buf))[:count:count], int64(offset)) + if dmesgs && err == nil { + dmesg("%v: fd %v, off %#x, count %#x, n %#x\n%s", origin(1), fd, offset, count, n, hex.Dump((*RawMem)(unsafe.Pointer(buf))[:n:n])) + } + } + if err != nil { + if dmesgs { + dmesg("%v: %v FAIL", origin(1), err) + } + t.setErrno(err) + return -1 + } + + if dmesgs { + dmesg("%v: ok", origin(1)) + } + return types.Ssize_t(n) +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_windows.go temporal-1.22.5/src/vendor/modernc.org/libc/libc_windows.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/libc_windows.go 2023-09-29 14:03:34.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/libc_windows.go 2024-02-23 09:46:15.000000000 +0000 @@ -399,11 +399,7 @@ // struct tm *localtime(const time_t *timep); func Xlocaltime(_ *TLS, timep uintptr) uintptr { - loc := gotime.Local - if r := getenv(Environ(), "TZ"); r != 0 { - zone, off := parseZone(GoString(r)) - loc = gotime.FixedZone(zone, -off) - } + loc := getLocalLocation() ut := *(*time.Time_t)(unsafe.Pointer(timep)) t := gotime.Unix(int64(ut), 0).In(loc) localtime.Ftm_sec = int32(t.Second()) @@ -426,11 +422,7 @@ // struct tm *localtime_r(const time_t *timep, struct tm *result); func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr { panic(todo("")) - // loc := gotime.Local - // if r := getenv(Environ(), "TZ"); r != 0 { - // zone, off := parseZone(GoString(r)) - // loc = gotime.FixedZone(zone, -off) - // } + // loc := getLocalLocation() // ut := *(*unix.Time_t)(unsafe.Pointer(timep)) // t := gotime.Unix(int64(ut), 0).In(loc) // (*time.Tm)(unsafe.Pointer(result)).Ftm_sec = int32(t.Second()) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/limits/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/limits/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/limits/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/limits/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo limits/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o limits/limits_linux_amd64.go -pkgname limits', DO NOT EDIT. + +package limits + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/limits/limits_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/limits/limits_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/limits/limits_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/limits/limits_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,943 @@ +// Code generated by 'ccgo limits/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o limits/limits_linux_amd64.go -pkgname limits', DO NOT EDIT. + +package limits + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + AIO_PRIO_DELTA_MAX = 20 // local_lim.h:78:1: + BC_BASE_MAX = 99 // posix2_lim.h:63:1: + BC_DIM_MAX = 2048 // posix2_lim.h:66:1: + BC_SCALE_MAX = 99 // posix2_lim.h:69:1: + BC_STRING_MAX = 1000 // posix2_lim.h:72:1: + CHARCLASS_NAME_MAX = 2048 // posix2_lim.h:84:1: + CHAR_BIT = 8 // limits.h:64:1: + CHAR_MAX = 127 // limits.h:99:1: + CHAR_MIN = -128 // limits.h:97:1: + COLL_WEIGHTS_MAX = 255 // posix2_lim.h:75:1: + DELAYTIMER_MAX = 2147483647 // local_lim.h:84:1: + EXPR_NEST_MAX = 32 // posix2_lim.h:78:1: + HOST_NAME_MAX = 64 // local_lim.h:93:1: + INT_MAX = 2147483647 // limits.h:120:1: + INT_MIN = -2147483648 // limits.h:118:1: + LINE_MAX = 2048 // posix2_lim.h:81:1: + LLONG_MAX = 9223372036854775807 // limits.h:142:1: + LLONG_MIN = -9223372036854775808 // limits.h:140:1: + LOGIN_NAME_MAX = 256 // local_lim.h:90:1: + LONG_MAX = 9223372036854775807 // limits.h:131:1: + LONG_MIN = -9223372036854775808 // limits.h:129:1: + MAX_CANON = 255 // limits.h:10:1: + MAX_INPUT = 255 // limits.h:11:1: + MB_LEN_MAX = 16 // limits.h:32:1: + MQ_PRIO_MAX = 32768 // local_lim.h:96:1: + NAME_MAX = 255 // limits.h:12:1: + NGROUPS_MAX = 65536 // limits.h:7:1: + PATH_MAX = 4096 // limits.h:13:1: + PIPE_BUF = 4096 // limits.h:14:1: + PTHREAD_DESTRUCTOR_ITERATIONS = 4 // local_lim.h:69:1: + PTHREAD_KEYS_MAX = 1024 // local_lim.h:64:1: + PTHREAD_STACK_MIN = 16384 // local_lim.h:81:1: + RE_DUP_MAX = 32767 // posix2_lim.h:88:1: + RTSIG_MAX = 32 // limits.h:19:1: + SCHAR_MAX = 127 // limits.h:75:1: + SCHAR_MIN = -128 // limits.h:73:1: + SEM_VALUE_MAX = 2147483647 // local_lim.h:99:1: + SHRT_MAX = 32767 // limits.h:106:1: + SHRT_MIN = -32768 // limits.h:104:1: + SSIZE_MAX = 9223372036854775807 // posix1_lim.h:169:1: + TTY_NAME_MAX = 32 // local_lim.h:87:1: + UCHAR_MAX = 255 // limits.h:82:1: + UINT_MAX = 4294967295 // limits.h:124:1: + ULLONG_MAX = 18446744073709551615 // limits.h:146:1: + ULONG_MAX = 18446744073709551615 // limits.h:135:1: + USHRT_MAX = 65535 // limits.h:113:1: + XATTR_LIST_MAX = 65536 // limits.h:17:1: + XATTR_NAME_MAX = 255 // limits.h:15:1: + XATTR_SIZE_MAX = 65536 // limits.h:16:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_POSIX1_LIM_H = 1 // posix1_lim.h:25:1: + X_BITS_POSIX2_LIM_H = 1 // posix2_lim.h:23:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_LIMITS_H_ = 0 // limits.h:30:1: + X_LIBC_LIMITS_H_ = 1 // limits.h:23:1: + X_LIMITS_H___ = 0 // limits.h:60:1: + X_LINUX_LIMITS_H = 0 // limits.h:3:1: + X_LP64 = 1 // :284:1: + X_POSIX2_BC_BASE_MAX = 99 // posix2_lim.h:27:1: + X_POSIX2_BC_DIM_MAX = 2048 // posix2_lim.h:30:1: + X_POSIX2_BC_SCALE_MAX = 99 // posix2_lim.h:33:1: + X_POSIX2_BC_STRING_MAX = 1000 // posix2_lim.h:36:1: + X_POSIX2_CHARCLASS_NAME_MAX = 14 // posix2_lim.h:55:1: + X_POSIX2_COLL_WEIGHTS_MAX = 2 // posix2_lim.h:40:1: + X_POSIX2_EXPR_NEST_MAX = 32 // posix2_lim.h:44:1: + X_POSIX2_LINE_MAX = 2048 // posix2_lim.h:47:1: + X_POSIX2_RE_DUP_MAX = 255 // posix2_lim.h:51:1: + X_POSIX_AIO_LISTIO_MAX = 2 // posix1_lim.h:32:1: + X_POSIX_AIO_MAX = 1 // posix1_lim.h:35:1: + X_POSIX_ARG_MAX = 4096 // posix1_lim.h:38:1: + X_POSIX_CHILD_MAX = 25 // posix1_lim.h:42:1: + X_POSIX_CLOCKRES_MIN = 20000000 // posix1_lim.h:157:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_DELAYTIMER_MAX = 32 // posix1_lim.h:48:1: + X_POSIX_HOST_NAME_MAX = 255 // posix1_lim.h:52:1: + X_POSIX_LINK_MAX = 8 // posix1_lim.h:55:1: + X_POSIX_LOGIN_NAME_MAX = 9 // posix1_lim.h:58:1: + X_POSIX_MAX_CANON = 255 // posix1_lim.h:61:1: + X_POSIX_MAX_INPUT = 255 // posix1_lim.h:65:1: + X_POSIX_MQ_OPEN_MAX = 8 // posix1_lim.h:68:1: + X_POSIX_MQ_PRIO_MAX = 32 // posix1_lim.h:71:1: + X_POSIX_NAME_MAX = 14 // posix1_lim.h:74:1: + X_POSIX_NGROUPS_MAX = 8 // posix1_lim.h:78:1: + X_POSIX_OPEN_MAX = 20 // posix1_lim.h:85:1: + X_POSIX_PATH_MAX = 256 // posix1_lim.h:97:1: + X_POSIX_PIPE_BUF = 512 // posix1_lim.h:100:1: + X_POSIX_RE_DUP_MAX = 255 // posix1_lim.h:104:1: + X_POSIX_RTSIG_MAX = 8 // posix1_lim.h:107:1: + X_POSIX_SEM_NSEMS_MAX = 256 // posix1_lim.h:110:1: + X_POSIX_SEM_VALUE_MAX = 32767 // posix1_lim.h:113:1: + X_POSIX_SIGQUEUE_MAX = 32 // posix1_lim.h:116:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_POSIX_SSIZE_MAX = 32767 // posix1_lim.h:119:1: + X_POSIX_STREAM_MAX = 8 // posix1_lim.h:122:1: + X_POSIX_SYMLINK_MAX = 255 // posix1_lim.h:125:1: + X_POSIX_SYMLOOP_MAX = 8 // posix1_lim.h:129:1: + X_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4 // local_lim.h:67:1: + X_POSIX_THREAD_KEYS_MAX = 128 // local_lim.h:62:1: + X_POSIX_THREAD_THREADS_MAX = 64 // local_lim.h:72:1: + X_POSIX_TIMER_MAX = 32 // posix1_lim.h:132:1: + X_POSIX_TTY_NAME_MAX = 9 // posix1_lim.h:135:1: + X_POSIX_TZNAME_MAX = 6 // posix1_lim.h:139:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. +// +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// This administrivia gets added to the beginning of limits.h +// if the system has its own version of limits.h. + +// We use _GCC_LIMITS_H_ because we want this not to match +// any macros that the system's limits.h uses for its own purposes. + +// Use "..." so that we find syslimits.h only in this same directory. +// syslimits.h stands for the system's own limits.h file. +// If we can use it ok unmodified, then we install this text. +// If fixincludes fixes it, then the fixed version is installed +// instead of this text. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.10/5.2.4.2.1 Sizes of integer types + +// Handle feature test macros at the start of a header. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This header is internal to glibc and should not be included outside +// of glibc headers. Headers including it must define +// __GLIBC_INTERNAL_STARTING_HEADER_IMPLEMENTATION first. This header +// cannot have multiple include guards because ISO C feature test +// macros depend on the definition of the macro when an affected +// header is included, not when the first system header is +// included. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// ISO/IEC TR 24731-2:2010 defines the __STDC_WANT_LIB_EXT2__ +// macro. + +// ISO/IEC TS 18661-1:2014 defines the __STDC_WANT_IEC_60559_BFP_EXT__ +// macro. Most but not all symbols enabled by that macro in TS +// 18661-1 are enabled unconditionally in C2X; the symbols in Annex F +// still require that macro in C2X. + +// ISO/IEC TS 18661-4:2015 defines the +// __STDC_WANT_IEC_60559_FUNCS_EXT__ macro. Other than the reduction +// functions, the symbols from this TS are enabled unconditionally in +// C2X. + +// ISO/IEC TS 18661-3:2015 defines the +// __STDC_WANT_IEC_60559_TYPES_EXT__ macro. + +// Maximum length of any multibyte character in any locale. +// We define this value here since the gcc header does not define +// the correct value. + +// If we are not using GNU CC we have to define all the symbols ourself. +// Otherwise use gcc's definitions (see below). + +// Get the compiler's limits.h, which defines almost all the ISO constants. +// +// We put this #include_next outside the double inclusion check because +// it should be possible to include this file more than once and still get +// the definitions from gcc's header. + +// The files in some gcc versions don't define LLONG_MIN, +// LLONG_MAX, and ULLONG_MAX. Instead only the values gcc defined for +// ages are available. + +// The integer width macros are not defined by GCC's before +// GCC 7, or if _GNU_SOURCE rather than +// __STDC_WANT_IEC_60559_BFP_EXT__ is used to enable this feature. + +// POSIX adds things to . +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.9.2 Minimum Values Added to +// +// Never include this file directly; use instead. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// These are the standard-mandated minimum values. + +// Minimum number of operations in one list I/O call. + +// Minimal number of outstanding asynchronous I/O operations. + +// Maximum length of arguments to `execve', including environment. + +// Maximum simultaneous processes per real user ID. + +// Minimal number of timer expiration overruns. + +// Maximum length of a host name (not including the terminating null) +// as returned from the GETHOSTNAME function. + +// Maximum link count of a file. + +// Maximum length of login name. + +// Number of bytes in a terminal canonical input queue. + +// Number of bytes for which space will be +// available in a terminal input queue. + +// Maximum number of message queues open for a process. + +// Maximum number of supported message priorities. + +// Number of bytes in a filename. + +// Number of simultaneous supplementary group IDs per process. + +// Number of files one process can have open at once. + +// Number of bytes in a pathname. + +// Number of bytes than can be written atomically to a pipe. + +// The number of repeated occurrences of a BRE permitted by the +// REGEXEC and REGCOMP functions when using the interval notation. + +// Minimal number of realtime signals reserved for the application. + +// Number of semaphores a process can have. + +// Maximal value of a semaphore. + +// Number of pending realtime signals. + +// Largest value of a `ssize_t'. + +// Number of streams a process can have open at once. + +// The number of bytes in a symbolic link. + +// The number of symbolic links that can be traversed in the +// resolution of a pathname in the absence of a loop. + +// Number of timer for a process. + +// Maximum number of characters in a tty name. + +// Maximum length of a timezone name (element of `tzname'). + +// Maximum clock resolution in nanoseconds. + +// Get the implementation-specific values for the above. +// Minimum guaranteed maximum values for system limits. Linux version. +// Copyright (C) 1993-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation; either version 2.1 of the +// License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; see the file COPYING.LIB. If +// not, see . + +// The kernel header pollutes the namespace with the NR_OPEN symbol +// and defines LINK_MAX although filesystems have different maxima. A +// similar thing is true for OPEN_MAX: the limit can be changed at +// runtime and therefore the macro must not be defined. Remove this +// after including the header if necessary. + +// The kernel sources contain a file with all the needed information. +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// Have to remove NR_OPEN? +// Have to remove LINK_MAX? +// Have to remove OPEN_MAX? +// Have to remove ARG_MAX? + +// The number of data keys per process. +// This is the value this implementation supports. + +// Controlling the iterations of destructors for thread-specific data. +// Number of iterations this implementation does. + +// The number of threads per process. +// We have no predefined limit on the number of threads. + +// Maximum amount by which a process can descrease its asynchronous I/O +// priority level. + +// Minimum size for a thread. We are free to choose a reasonable value. + +// Maximum number of timer expiration overruns. + +// Maximum tty name length. + +// Maximum login name length. This is arbitrary. + +// Maximum host name length. + +// Maximum message queue priority level. + +// Maximum value the semaphore can have. + +// ssize_t is not formally required to be the signed type +// corresponding to size_t, but it is for all configurations supported +// by glibc. + +// This value is a guaranteed minimum maximum. +// The current maximum can be got from `sysconf'. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; include instead. + +// The maximum `ibase' and `obase' values allowed by the `bc' utility. + +// The maximum number of elements allowed in an array by the `bc' utility. + +// The maximum `scale' value allowed by the `bc' utility. + +// The maximum length of a string constant accepted by the `bc' utility. + +// The maximum number of weights that can be assigned to an entry of +// the LC_COLLATE `order' keyword in the locale definition file. + +// The maximum number of expressions that can be nested +// within parentheses by the `expr' utility. + +// The maximum length, in bytes, of an input line. + +// The maximum number of repeated occurrences of a regular expression +// permitted when using the interval notation `\{M,N\}'. + +// The maximum number of bytes in a character class name. We have no +// fixed limit, 2048 is a high number. + +// These values are implementation-specific, +// and may vary within the implementation. +// Their precise values can be obtained from sysconf. + +// This value is defined like this in regex.h. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. +// +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// Number of bits in a `char'. + +// Maximum length of a multibyte character. + +// Minimum and maximum values a `signed char' can hold. + +// Maximum value an `unsigned char' can hold. (Minimum is 0). + +// Minimum and maximum values a `char' can hold. + +// Minimum and maximum values a `signed short int' can hold. + +// Maximum value an `unsigned short int' can hold. (Minimum is 0). + +// Minimum and maximum values a `signed int' can hold. + +// Maximum value an `unsigned int' can hold. (Minimum is 0). + +// Minimum and maximum values a `signed long int' can hold. +// (Same as `int'). + +// Maximum value an `unsigned long int' can hold. (Minimum is 0). + +// Minimum and maximum values a `signed long long int' can hold. + +// Maximum value an `unsigned long long int' can hold. (Minimum is 0). + +// This administrivia gets added to the end of limits.h +// if the system has its own version of limits.h. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/musl_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/musl_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/musl_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/musl_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,6940 @@ +// Code generated by 'ccgo -export-externs X -hide __syscall0,__syscall1,__syscall2,__syscall3,__syscall4,__syscall5,__syscall6 -nostdinc -nostdlib -o ../musl_linux_amd64.go -pkgname libc -static-locals-prefix _s -Iarch/x86_64 -Iarch/generic -Iobj/src/internal -Isrc/include -Isrc/internal -Iobj/include -Iinclude copyright.c src/ctype/__ctype_b_loc.c src/ctype/isalnum.c src/ctype/isalpha.c src/ctype/isdigit.c src/ctype/islower.c src/ctype/isprint.c src/ctype/isupper.c src/ctype/isxdigit.c src/dirent/closedir.c src/dirent/opendir.c src/dirent/readdir.c src/internal/floatscan.c src/internal/intscan.c src/internal/shgetc.c src/locale/localeconv.c src/math/__fpclassify.c src/math/__fpclassifyf.c src/math/__fpclassifyl.c src/math/copysignl.c src/math/fabsl.c src/math/fmodl.c src/math/nanf.c src/math/rint.c src/math/scalbn.c src/math/scalbnl.c src/multibyte/internal.c src/multibyte/mbrtowc.c src/multibyte/mbsinit.c src/network/freeaddrinfo.c src/network/getaddrinfo.c src/network/gethostbyaddr.c src/network/gethostbyaddr_r.c src/network/gethostbyname.c src/network/gethostbyname2.c src/network/gethostbyname2_r.c src/network/gethostbyname_r.c src/network/getnameinfo.c src/network/h_errno.c src/network/inet_aton.c src/network/inet_ntop.c src/network/inet_pton.c src/network/lookup_ipliteral.c src/network/lookup_name.c src/network/lookup_serv.c src/prng/rand_r.c src/stdio/__lockfile.c src/stdio/__toread.c src/stdio/__uflow.c src/stdio/sscanf.c src/stdio/vfscanf.c src/stdio/vsscanf.c src/stdlib/bsearch.c src/stdlib/strtod.c src/stdlib/strtol.c src/string/strdup.c src/string/strlcat.c src/string/strlcpy.c src/string/strncasecmp.c src/string/strncat.c src/string/strnlen.c src/string/strspn.c src/string/strtok.c src/thread/pthread_attr_get.c src/thread/pthread_attr_setdetachstate.c src/thread/pthread_mutex_lock.c src/thread/pthread_mutexattr_destroy.c src/thread/pthread_mutexattr_init.c src/thread/pthread_mutexattr_settype.c', DO NOT EDIT. + +package libc + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +// musl as a whole is licensed under the following standard MIT license: +// +// ---------------------------------------------------------------------- +// Copyright © 2005-2020 Rich Felker, et al. +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// ---------------------------------------------------------------------- +// +// Authors/contributors include: +// +// A. Wilcox +// Ada Worcester +// Alex Dowad +// Alex Suykov +// Alexander Monakov +// Andre McCurdy +// Andrew Kelley +// Anthony G. Basile +// Aric Belsito +// Arvid Picciani +// Bartosz Brachaczek +// Benjamin Peterson +// Bobby Bingham +// Boris Brezillon +// Brent Cook +// Chris Spiegel +// Clément Vasseur +// Daniel Micay +// Daniel Sabogal +// Daurnimator +// David Carlier +// David Edelsohn +// Denys Vlasenko +// Dmitry Ivanov +// Dmitry V. Levin +// Drew DeVault +// Emil Renner Berthing +// Fangrui Song +// Felix Fietkau +// Felix Janda +// Gianluca Anzolin +// Hauke Mehrtens +// He X +// Hiltjo Posthuma +// Isaac Dunham +// Jaydeep Patil +// Jens Gustedt +// Jeremy Huntwork +// Jo-Philipp Wich +// Joakim Sindholt +// John Spencer +// Julien Ramseier +// Justin Cormack +// Kaarle Ritvanen +// Khem Raj +// Kylie McClain +// Leah Neukirchen +// Luca Barbato +// Luka Perkov +// M Farkas-Dyck (Strake) +// Mahesh Bodapati +// Markus Wichmann +// Masanori Ogino +// Michael Clark +// Michael Forney +// Mikhail Kremnyov +// Natanael Copa +// Nicholas J. Kain +// orc +// Pascal Cuoq +// Patrick Oppenlander +// Petr Hosek +// Petr Skocik +// Pierre Carrier +// Reini Urban +// Rich Felker +// Richard Pennington +// Ryan Fairfax +// Samuel Holland +// Segev Finer +// Shiz +// sin +// Solar Designer +// Stefan Kristiansson +// Stefan O'Rear +// Szabolcs Nagy +// Timo Teräs +// Trutz Behn +// Valentin Ochs +// Will Dietz +// William Haddon +// William Pitcock +// +// Portions of this software are derived from third-party works licensed +// under terms compatible with the above MIT license: +// +// The TRE regular expression implementation (src/regex/reg* and +// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed +// under a 2-clause BSD license (license text in the source files). The +// included version has been heavily modified by Rich Felker in 2012, in +// the interests of size, simplicity, and namespace cleanliness. +// +// Much of the math library code (src/math/* and src/complex/*) is +// Copyright © 1993,2004 Sun Microsystems or +// Copyright © 2003-2011 David Schultz or +// Copyright © 2003-2009 Steven G. Kargl or +// Copyright © 2003-2009 Bruce D. Evans or +// Copyright © 2008 Stephen L. Moshier or +// Copyright © 2017-2018 Arm Limited +// and labelled as such in comments in the individual source files. All +// have been licensed under extremely permissive terms. +// +// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 +// The Android Open Source Project and is licensed under a two-clause BSD +// license. It was taken from Bionic libc, used on Android. +// +// The AArch64 memcpy and memset code (src/string/aarch64/*) are +// Copyright © 1999-2019, Arm Limited. +// +// The implementation of DES for crypt (src/crypt/crypt_des.c) is +// Copyright © 1994 David Burren. It is licensed under a BSD license. +// +// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was +// originally written by Solar Designer and placed into the public +// domain. The code also comes with a fallback permissive license for use +// in jurisdictions that may not recognize the public domain. +// +// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 +// Valentin Ochs and is licensed under an MIT-style license. +// +// The x86_64 port was written by Nicholas J. Kain and is licensed under +// the standard MIT terms. +// +// The mips and microblaze ports were originally written by Richard +// Pennington for use in the ellcc project. The original code was adapted +// by Rich Felker for build system and code conventions during upstream +// integration. It is licensed under the standard MIT terms. +// +// The mips64 port was contributed by Imagination Technologies and is +// licensed under the standard MIT terms. +// +// The powerpc port was also originally written by Richard Pennington, +// and later supplemented and integrated by John Spencer. It is licensed +// under the standard MIT terms. +// +// All other files which have no copyright comments are original works +// produced specifically for use as part of this library, written either +// by Rich Felker, the main author of the library, or by one or more +// contibutors listed above. Details on authorship of individual files +// can be found in the git version control history of the project. The +// omission of copyright and license comments in each file is in the +// interest of source tree size. +// +// In addition, permission is hereby granted for all public header files +// (include/* and arch/*/bits/*) and crt files intended to be linked into +// applications (crt/*, ldso/dlstart.c, and arch/*/crt_arch.h) to omit +// the copyright notice and permission notice otherwise required by the +// license, and to use these files without any requirement of +// attribution. These files include substantial contributions from: +// +// Bobby Bingham +// John Spencer +// Nicholas J. Kain +// Rich Felker +// Richard Pennington +// Stefan Kristiansson +// Szabolcs Nagy +// +// all of whom have explicitly granted such permission. +// +// This file previously contained text expressing a belief that most of +// the files covered by the above exception were sufficiently trivial not +// to be subject to copyright, resulting in confusion over whether it +// negated the permissions granted in the license. In the spirit of +// permissive licensing, and of not having licensing issues being an +// obstacle to adoption, that text has been removed. +const ( /* copyright.c:194:1: */ + __musl__copyright__ = 0 +) + +const ( /* nameser.h:117:1: */ + ns_uop_delete = 0 + ns_uop_add = 1 + ns_uop_max = 2 +) + +const ( /* nameser.h:147:1: */ + ns_t_invalid = 0 + ns_t_a = 1 + ns_t_ns = 2 + ns_t_md = 3 + ns_t_mf = 4 + ns_t_cname = 5 + ns_t_soa = 6 + ns_t_mb = 7 + ns_t_mg = 8 + ns_t_mr = 9 + ns_t_null = 10 + ns_t_wks = 11 + ns_t_ptr = 12 + ns_t_hinfo = 13 + ns_t_minfo = 14 + ns_t_mx = 15 + ns_t_txt = 16 + ns_t_rp = 17 + ns_t_afsdb = 18 + ns_t_x25 = 19 + ns_t_isdn = 20 + ns_t_rt = 21 + ns_t_nsap = 22 + ns_t_nsap_ptr = 23 + ns_t_sig = 24 + ns_t_key = 25 + ns_t_px = 26 + ns_t_gpos = 27 + ns_t_aaaa = 28 + ns_t_loc = 29 + ns_t_nxt = 30 + ns_t_eid = 31 + ns_t_nimloc = 32 + ns_t_srv = 33 + ns_t_atma = 34 + ns_t_naptr = 35 + ns_t_kx = 36 + ns_t_cert = 37 + ns_t_a6 = 38 + ns_t_dname = 39 + ns_t_sink = 40 + ns_t_opt = 41 + ns_t_apl = 42 + ns_t_tkey = 249 + ns_t_tsig = 250 + ns_t_ixfr = 251 + ns_t_axfr = 252 + ns_t_mailb = 253 + ns_t_maila = 254 + ns_t_any = 255 + ns_t_zxfr = 256 + ns_t_max = 65536 +) + +const ( /* nameser.h:210:1: */ + ns_c_invalid = 0 + ns_c_in = 1 + ns_c_2 = 2 + ns_c_chaos = 3 + ns_c_hs = 4 + ns_c_none = 254 + ns_c_any = 255 + ns_c_max = 65536 +) + +const ( /* nameser.h:221:1: */ + ns_kt_rsa = 1 + ns_kt_dh = 2 + ns_kt_dsa = 3 + ns_kt_private = 254 +) + +const ( /* nameser.h:228:1: */ + cert_t_pkix = 1 + cert_t_spki = 2 + cert_t_pgp = 3 + cert_t_url = 253 + cert_t_oid = 254 +) + +const ( /* nameser.h:28:1: */ + ns_s_qd = 0 + ns_s_zn = 0 + ns_s_an = 1 + ns_s_pr = 1 + ns_s_ns = 2 + ns_s_ud = 2 + ns_s_ar = 3 + ns_s_max = 4 +) + +const ( /* nameser.h:75:1: */ + ns_f_qr = 0 + ns_f_opcode = 1 + ns_f_aa = 2 + ns_f_tc = 3 + ns_f_rd = 4 + ns_f_ra = 5 + ns_f_z = 6 + ns_f_ad = 7 + ns_f_cd = 8 + ns_f_rcode = 9 + ns_f_max = 10 +) + +const ( /* nameser.h:89:1: */ + ns_o_query = 0 + ns_o_iquery = 1 + ns_o_status = 2 + ns_o_notify = 4 + ns_o_update = 5 + ns_o_max = 6 +) + +const ( /* nameser.h:98:1: */ + ns_r_noerror = 0 + ns_r_formerr = 1 + ns_r_servfail = 2 + ns_r_nxdomain = 3 + ns_r_notimpl = 4 + ns_r_refused = 5 + ns_r_yxdomain = 6 + ns_r_yxrrset = 7 + ns_r_nxrrset = 8 + ns_r_notauth = 9 + ns_r_notzone = 10 + ns_r_max = 11 + ns_r_badvers = 16 + ns_r_badsig = 16 + ns_r_badkey = 17 + ns_r_badtime = 18 +) + +const ( /* pthread_impl.h:58:1: */ + DT_EXITING = 0 + DT_JOINABLE = 1 + DT_DETACHED = 2 +) + +type ptrdiff_t = int64 /* :3:26 */ + +type size_t = uint64 /* :9:23 */ + +type wchar_t = int32 /* :15:24 */ + +type uint16_t = uint16 /* alltypes.h:126:25 */ + +type uint32_t = uint32 /* alltypes.h:131:25 */ + +type uint64_t = uint64 /* alltypes.h:136:25 */ + +func __bswap32(tls *TLS, __x uint32_t) uint32_t { /* endian.h:24:26: */ + return __x>>24 | __x>>8&uint32_t(0xff00) | __x<<8&uint32_t(0xff0000) | __x<<24 +} + +var table = [384]uint16{ + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), + uint16((0x200/256 | 0x200*256) % 65536), uint16((0x320/256 | 0x320*256) % 65536), uint16((0x220/256 | 0x220*256) % 65536), uint16((0x220/256 | 0x220*256) % 65536), uint16((0x220/256 | 0x220*256) % 65536), uint16((0x220/256 | 0x220*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), + uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), + uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), + uint16((0x160/256 | 0x160*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), + uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), + uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), + uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x8d8/256 | 0x8d8*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), + uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8d5/256 | 0x8d5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), + uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), + uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), + uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x8c5/256 | 0x8c5*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), + uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8d6/256 | 0x8d6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), + uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), + uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), + uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x8c6/256 | 0x8c6*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x4c0/256 | 0x4c0*256) % 65536), uint16((0x200/256 | 0x200*256) % 65536), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), + uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), +} /* __ctype_b_loc.c:9:29 */ + +var ptable uintptr = 0 /* __ctype_b_loc.c:36:29 */ + +func X__ctype_b_loc(tls *TLS) uintptr { /* __ctype_b_loc.c:38:22: */ + return uintptr(unsafe.Pointer(&ptable)) +} + +func __isspace(tls *TLS, _c int32) int32 { /* ctype.h:26:21: */ + return Bool32(_c == ' ' || uint32(_c)-uint32('\t') < uint32(5)) +} + +type __locale_struct = struct{ cat [6]uintptr } /* alltypes.h:343:9 */ + +type locale_t = uintptr /* alltypes.h:343:32 */ + +func Xisalnum(tls *TLS, c int32) int32 { /* isalnum.c:3:5: */ + return Bool32(func() int32 { + if 0 != 0 { + return Xisalpha(tls, c) + } + return Bool32(uint32(c)|uint32(32)-uint32('a') < uint32(26)) + }() != 0 || func() int32 { + if 0 != 0 { + return Xisdigit(tls, c) + } + return Bool32(uint32(c)-uint32('0') < uint32(10)) + }() != 0) +} + +func X__isalnum_l(tls *TLS, c int32, l locale_t) int32 { /* isalnum.c:8:5: */ + return Xisalnum(tls, c) +} + +func Xisalpha(tls *TLS, c int32) int32 { /* isalpha.c:4:5: */ + return Bool32(uint32(c)|uint32(32)-uint32('a') < uint32(26)) +} + +func X__isalpha_l(tls *TLS, c int32, l locale_t) int32 { /* isalpha.c:9:5: */ + return Xisalpha(tls, c) +} + +func Xisdigit(tls *TLS, c int32) int32 { /* isdigit.c:4:5: */ + return Bool32(uint32(c)-uint32('0') < uint32(10)) +} + +func X__isdigit_l(tls *TLS, c int32, l locale_t) int32 { /* isdigit.c:9:5: */ + return Xisdigit(tls, c) +} + +func Xislower(tls *TLS, c int32) int32 { /* islower.c:4:5: */ + return Bool32(uint32(c)-uint32('a') < uint32(26)) +} + +func X__islower_l(tls *TLS, c int32, l locale_t) int32 { /* islower.c:9:5: */ + return Xislower(tls, c) +} + +func Xisprint(tls *TLS, c int32) int32 { /* isprint.c:4:5: */ + return Bool32(uint32(c)-uint32(0x20) < uint32(0x5f)) +} + +func X__isprint_l(tls *TLS, c int32, l locale_t) int32 { /* isprint.c:9:5: */ + return Xisprint(tls, c) +} + +func Xisupper(tls *TLS, c int32) int32 { /* isupper.c:4:5: */ + return Bool32(uint32(c)-uint32('A') < uint32(26)) +} + +func X__isupper_l(tls *TLS, c int32, l locale_t) int32 { /* isupper.c:9:5: */ + return Xisupper(tls, c) +} + +func Xisxdigit(tls *TLS, c int32) int32 { /* isxdigit.c:3:5: */ + return Bool32(func() int32 { + if 0 != 0 { + return Xisdigit(tls, c) + } + return Bool32(uint32(c)-uint32('0') < uint32(10)) + }() != 0 || uint32(c)|uint32(32)-uint32('a') < uint32(6)) +} + +func X__isxdigit_l(tls *TLS, c int32, l locale_t) int32 { /* isxdigit.c:8:5: */ + return Xisxdigit(tls, c) +} + +type off_t = int64 /* alltypes.h:162:16 */ + +type ino_t = uint64 /* alltypes.h:167:25 */ + +type dirent = struct { + d_ino ino_t + d_off off_t + d_reclen uint16 + d_type uint8 + d_name [256]int8 + _ [5]byte +} /* dirent.h:5:1 */ + +type __dirstream = struct { + tell off_t + fd int32 + buf_pos int32 + buf_end int32 + lock [1]int32 + buf [2048]int8 +} /* dirent.h:20:9 */ + +type DIR = __dirstream /* dirent.h:20:28 */ + +type ssize_t = int64 /* alltypes.h:65:15 */ + +type intptr_t = int64 /* alltypes.h:70:15 */ + +type pid_t = int32 /* alltypes.h:235:13 */ + +type uid_t = uint32 /* alltypes.h:245:18 */ + +type gid_t = uint32 /* alltypes.h:250:18 */ + +type useconds_t = uint32 /* alltypes.h:260:18 */ + +type div_t = struct { + quot int32 + rem int32 +} /* stdlib.h:62:35 */ +type ldiv_t = struct { + quot int64 + rem int64 +} /* stdlib.h:63:36 */ +type lldiv_t = struct { + quot int64 + rem int64 +} /* stdlib.h:64:41 */ + +func Xclosedir(tls *TLS, dir uintptr) int32 { /* closedir.c:6:5: */ + var ret int32 = Xclose(tls, (*DIR)(unsafe.Pointer(dir)).fd) + Xfree(tls, dir) + return ret +} + +type mode_t = uint32 /* alltypes.h:152:18 */ + +type iovec = struct { + iov_base uintptr + iov_len size_t +} /* alltypes.h:355:1 */ + +type flock = struct { + l_type int16 + l_whence int16 + _ [4]byte + l_start off_t + l_len off_t + l_pid pid_t + _ [4]byte +} /* fcntl.h:24:1 */ + +type file_handle = struct { + _ [0]uint32 + handle_bytes uint32 + handle_type int32 +} /* fcntl.h:167:1 */ + +type f_owner_ex = struct { + __type int32 + pid pid_t +} /* fcntl.h:172:1 */ + +type syscall_arg_t = int64 /* syscall.h:22:14 */ + +func Xopendir(tls *TLS, name uintptr) uintptr { /* opendir.c:8:5: */ + var fd int32 + var dir uintptr + + if AssignInt32(&fd, Xopen(tls, name, 00|0200000|02000000, 0)) < 0 { + return uintptr(0) + } + if !(int32(AssignUintptr(&dir, Xcalloc(tls, uint64(1), uint64(unsafe.Sizeof(DIR{}))))) != 0) { + X__syscall1(tls, int64(3), int64(fd)) + return uintptr(0) + } + (*DIR)(unsafe.Pointer(dir)).fd = fd + return dir +} + +type max_align_t = struct { + __ll int64 + __ld float64 +} /* alltypes.h:41:54 */ + +type dirstream_buf_alignment_check = [1]int8 /* readdir.c:7:14 */ + +func Xreaddir(tls *TLS, dir uintptr) uintptr { /* readdir.c:10:15: */ + var de uintptr + + if (*DIR)(unsafe.Pointer(dir)).buf_pos >= (*DIR)(unsafe.Pointer(dir)).buf_end { + var len int32 = int32(X__syscall3(tls, int64(217), int64((*DIR)(unsafe.Pointer(dir)).fd), int64(dir+24), int64(unsafe.Sizeof([2048]int8{})))) + if len <= 0 { + if len < 0 && len != -2 { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = -len + } + return uintptr(0) + } + (*DIR)(unsafe.Pointer(dir)).buf_end = len + (*DIR)(unsafe.Pointer(dir)).buf_pos = 0 + } + de = dir + 24 + uintptr((*DIR)(unsafe.Pointer(dir)).buf_pos) + *(*int32)(unsafe.Pointer(dir + 12)) += int32((*dirent)(unsafe.Pointer(de)).d_reclen) + (*DIR)(unsafe.Pointer(dir)).tell = (*dirent)(unsafe.Pointer(de)).d_off + return de +} + +type uintptr_t = uint64 /* alltypes.h:55:24 */ + +type int8_t = int8 /* alltypes.h:96:25 */ + +type int16_t = int16 /* alltypes.h:101:25 */ + +type int32_t = int32 /* alltypes.h:106:25 */ + +type int64_t = int64 /* alltypes.h:111:25 */ + +type intmax_t = int64 /* alltypes.h:116:25 */ + +type uint8_t = uint8 /* alltypes.h:121:25 */ + +type uintmax_t = uint64 /* alltypes.h:146:25 */ + +type int_fast8_t = int8_t /* stdint.h:22:16 */ +type int_fast64_t = int64_t /* stdint.h:23:17 */ + +type int_least8_t = int8_t /* stdint.h:25:17 */ +type int_least16_t = int16_t /* stdint.h:26:17 */ +type int_least32_t = int32_t /* stdint.h:27:17 */ +type int_least64_t = int64_t /* stdint.h:28:17 */ + +type uint_fast8_t = uint8_t /* stdint.h:30:17 */ +type uint_fast64_t = uint64_t /* stdint.h:31:18 */ + +type uint_least8_t = uint8_t /* stdint.h:33:18 */ +type uint_least16_t = uint16_t /* stdint.h:34:18 */ +type uint_least32_t = uint32_t /* stdint.h:35:18 */ +type uint_least64_t = uint64_t /* stdint.h:36:18 */ + +type int_fast16_t = int32_t /* stdint.h:1:17 */ +type int_fast32_t = int32_t /* stdint.h:2:17 */ +type uint_fast16_t = uint32_t /* stdint.h:3:18 */ +type uint_fast32_t = uint32_t /* stdint.h:4:18 */ + +type _IO_FILE = struct { + flags uint32 + _ [4]byte + rpos uintptr + rend uintptr + close uintptr + wend uintptr + wpos uintptr + mustbezero_1 uintptr + wbase uintptr + read uintptr + write uintptr + seek uintptr + buf uintptr + buf_size size_t + prev uintptr + next uintptr + fd int32 + pipe_pid int32 + lockcount int64 + mode int32 + lock int32 + lbf int32 + _ [4]byte + cookie uintptr + off off_t + getln_buf uintptr + mustbezero_2 uintptr + shend uintptr + shlim off_t + shcnt off_t + prev_locked uintptr + next_locked uintptr + locale uintptr +} /* alltypes.h:320:9 */ + +type FILE = _IO_FILE /* alltypes.h:320:25 */ + +type va_list = uintptr /* alltypes.h:326:27 */ + +type _G_fpos64_t = struct { + _ [0]uint64 + __opaque [16]int8 +} /* stdio.h:54:9 */ + +type fpos_t = _G_fpos64_t /* stdio.h:58:3 */ + +type float_t = float32 /* alltypes.h:29:15 */ + +type double_t = float64 /* alltypes.h:34:16 */ + +func __FLOAT_BITS(tls *TLS, __f float32) uint32 { /* math.h:55:26: */ + bp := tls.Alloc(4) + defer tls.Free(4) + + // var __u struct {__f float32;} at bp, 4 + + *(*float32)(unsafe.Pointer(bp)) = __f + return *(*uint32)(unsafe.Pointer(bp)) +} + +func __DOUBLE_BITS(tls *TLS, __f float64) uint64 { /* math.h:61:36: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + // var __u struct {__f float64;} at bp, 8 + + *(*float64)(unsafe.Pointer(bp)) = __f + return *(*uint64)(unsafe.Pointer(bp)) +} + +type __pthread = struct { + self uintptr + dtv uintptr + prev uintptr + next uintptr + sysinfo uintptr_t + canary uintptr_t + canary2 uintptr_t + tid int32 + errno_val int32 + detach_state int32 + cancel int32 + canceldisable uint8 + cancelasync uint8 + tsd_used uint8 /* unsigned char tsd_used: 1, unsigned char dlerror_flag: 1 */ + _ [5]byte + map_base uintptr + map_size size_t + stack uintptr + stack_size size_t + guard_size size_t + result uintptr + cancelbuf uintptr + tsd uintptr + robust_list struct { + head uintptr + off int64 + pending uintptr + } + timer_id int32 + _ [4]byte + locale locale_t + killlock [1]int32 + _ [4]byte + dlerror_buf uintptr + stdio_locks uintptr + canary_at_end uintptr_t + dtv_copy uintptr +} /* alltypes.h:273:9 */ + +func scanexp(tls *TLS, f uintptr, pok int32) int64 { /* floatscan.c:37:18: */ + var c int32 + var x int32 + var y int64 + var neg int32 = 0 + + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if c == '+' || c == '-' { + neg = Bool32(c == '-') + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if uint32(c-'0') >= 10 && pok != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + } + if uint32(c-'0') >= 10 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + return -0x7fffffffffffffff - int64(1) + } + for x = 0; uint32(c-'0') < 10 && x < 0x7fffffff/10; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + x = 10*x + c - '0' + } + for y = int64(x); uint32(c-'0') < 10 && y < 0x7fffffffffffffff/int64(100); c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + y = int64(10)*y + int64(c) - int64('0') + } + for ; uint32(c-'0') < 10; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if neg != 0 { + return -y + } + return y +} + +func decfloat(tls *TLS, f uintptr, c int32, bits int32, emin int32, sign int32, pok int32) float64 { /* floatscan.c:64:20: */ + bp := tls.Alloc(512) + defer tls.Free(512) + + // var x [128]uint32_t at bp, 512 + + var i int32 + var j int32 + var k int32 + var a int32 + var z int32 + var lrp int64 = int64(0) + var dc int64 = int64(0) + var e10 int64 = int64(0) + var lnz int32 = 0 + var gotdig int32 = 0 + var gotrad int32 = 0 + var rp int32 + var e2 int32 + var emax int32 = -emin - bits + 3 + var denormal int32 = 0 + var y float64 + var frac float64 = float64(0) + var bias float64 = float64(0) + + j = 0 + k = 0 + + // Don't let leading zeros consume buffer space + for ; c == '0'; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + gotdig = 1 + } + if c == '.' { + gotrad = 1 + for c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }(); c == '0'; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + gotdig = 1 + lrp-- + } + } + + *(*uint32_t)(unsafe.Pointer(bp)) = uint32_t(0) + for ; uint32(c-'0') < 10 || c == '.'; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + if c == '.' { + if gotrad != 0 { + break + } + gotrad = 1 + lrp = dc + } else if k < 128-3 { + dc++ + if c != '0' { + lnz = int32(dc) + } + if j != 0 { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) = *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4))*uint32_t(10) + uint32_t(c) - uint32_t('0') + } else { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) = uint32_t(c - '0') + } + if PreIncInt32(&j, 1) == 9 { + k++ + j = 0 + } + gotdig = 1 + } else { + dc++ + if c != '0' { + lnz = (128 - 4) * 9 + *(*uint32_t)(unsafe.Pointer(bp + 124*4)) |= uint32_t(1) + } + } + } + if !(gotrad != 0) { + lrp = dc + } + + if gotdig != 0 && c|32 == 'e' { + e10 = scanexp(tls, f, pok) + if e10 == -0x7fffffffffffffff-int64(1) { + if pok != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } else { + X__shlim(tls, f, int64(0)) + return float64(0) + } + e10 = int64(0) + } + lrp = lrp + e10 + } else if c >= 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + if !(gotdig != 0) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22 + X__shlim(tls, f, int64(0)) + return float64(0) + } + + // Handle zero specially to avoid nasty special cases later + if !(int32(*(*uint32_t)(unsafe.Pointer(bp))) != 0) { + return float64(sign) * 0.0 + } + + // Optimize small integers (w/no exponent) and over/under-flow + if lrp == dc && dc < int64(10) && (bits > 30 || *(*uint32_t)(unsafe.Pointer(bp))>>bits == uint32_t(0)) { + return float64(sign) * float64(*(*uint32_t)(unsafe.Pointer(bp))) + } + if lrp > int64(-emin/2) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return float64(sign) * 1.79769313486231570815e+308 * 1.79769313486231570815e+308 + } + if lrp < int64(emin-2*53) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return float64(sign) * 2.22507385850720138309e-308 * 2.22507385850720138309e-308 + } + + // Align incomplete final B1B digit + if j != 0 { + for ; j < 9; j++ { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) *= uint32_t(10) + } + k++ + j = 0 + } + + a = 0 + z = k + e2 = 0 + rp = int32(lrp) + + // Optimize small to mid-size integers (even in exp. notation) + if lnz < 9 && lnz <= rp && rp < 18 { + if rp == 9 { + return float64(sign) * float64(*(*uint32_t)(unsafe.Pointer(bp))) + } + if rp < 9 { + return float64(sign) * float64(*(*uint32_t)(unsafe.Pointer(bp))) / float64(_sp10s[8-rp]) + } + var bitlim int32 = bits - 3*(rp-9) + if bitlim > 30 || *(*uint32_t)(unsafe.Pointer(bp))>>bitlim == uint32_t(0) { + return float64(sign) * float64(*(*uint32_t)(unsafe.Pointer(bp))) * float64(_sp10s[rp-10]) + } + } + + // Drop trailing zeros + for ; !(int32(*(*uint32_t)(unsafe.Pointer(bp + uintptr(z-1)*4))) != 0); z-- { + } + + // Align radix point to B1B digit boundary + if rp%9 != 0 { + var rpm9 int32 + if rp >= 0 { + rpm9 = rp % 9 + } else { + rpm9 = rp%9 + 9 + } + var p10 int32 = _sp10s[8-rpm9] + var carry uint32_t = uint32_t(0) + for k = a; k != z; k++ { + var tmp uint32_t = *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) % uint32_t(p10) + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) = *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4))/uint32_t(p10) + carry + carry = uint32_t(1000000000/p10) * tmp + if k == a && !(int32(*(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4))) != 0) { + a = (a + 1) & (128 - 1) + rp = rp - 9 + } + } + if carry != 0 { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(PostIncInt32(&z, 1))*4)) = carry + } + rp = rp + (9 - rpm9) + } + + // Upscale until desired number of bits are left of radix point + for rp < 9*2 || rp == 9*2 && *(*uint32_t)(unsafe.Pointer(bp + uintptr(a)*4)) < _sth[0] { + var carry uint32_t = uint32_t(0) + e2 = e2 - 29 + for k = (z - 1) & (128 - 1); ; k = (k - 1) & (128 - 1) { + var tmp uint64_t = uint64_t(*(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)))<<29 + uint64_t(carry) + if tmp > uint64(1000000000) { + carry = uint32_t(tmp / uint64(1000000000)) + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) = uint32_t(tmp % uint64(1000000000)) + } else { + carry = uint32_t(0) + *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) = uint32_t(tmp) + } + if k == (z-1)&(128-1) && k != a && !(int32(*(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4))) != 0) { + z = k + } + if k == a { + break + } + } + if carry != 0 { + rp = rp + 9 + a = (a - 1) & (128 - 1) + if a == z { + z = (z - 1) & (128 - 1) + *(*uint32_t)(unsafe.Pointer(bp + uintptr((z-1)&(128-1))*4)) |= *(*uint32_t)(unsafe.Pointer(bp + uintptr(z)*4)) + } + *(*uint32_t)(unsafe.Pointer(bp + uintptr(a)*4)) = carry + } + } + + // Downscale until exactly number of bits are left of radix point + for { + var carry uint32_t = uint32_t(0) + var sh int32 = 1 + for i = 0; i < 2; i++ { + k = (a + i) & (128 - 1) + if k == z || *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) < _sth[i] { + i = 2 + break + } + if *(*uint32_t)(unsafe.Pointer(bp + uintptr((a+i)&(128-1))*4)) > _sth[i] { + break + } + } + if i == 2 && rp == 9*2 { + break + } + // FIXME: find a way to compute optimal sh + if rp > 9+9*2 { + sh = 9 + } + e2 = e2 + sh + for k = a; k != z; k = (k + 1) & (128 - 1) { + var tmp uint32_t = *(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4)) & uint32_t(int32(1)<>sh + carry + carry = uint32_t(int32(1000000000)>>sh) * tmp + if k == a && !(int32(*(*uint32_t)(unsafe.Pointer(bp + uintptr(k)*4))) != 0) { + a = (a + 1) & (128 - 1) + i-- + rp = rp - 9 + } + } + if carry != 0 { + if (z+1)&(128-1) != a { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(z)*4)) = carry + z = (z + 1) & (128 - 1) + } else { + *(*uint32_t)(unsafe.Pointer(bp + uintptr((z-1)&(128-1))*4)) |= uint32_t(1) + } + } + } + + // Assemble desired bits into floating point variable + for y = float64(AssignInt32(&i, 0)); i < 2; i++ { + if (a+i)&(128-1) == z { + *(*uint32_t)(unsafe.Pointer(bp + uintptr(AssignInt32(&z, (z+1)&(128-1))-1)*4)) = uint32_t(0) + } + y = 1000000000.0*y + float64(*(*uint32_t)(unsafe.Pointer(bp + uintptr((a+i)&(128-1))*4))) + } + + y = y * float64(sign) + + // Limit precision for denormal results + if bits > 53+e2-emin { + bits = 53 + e2 - emin + if bits < 0 { + bits = 0 + } + denormal = 1 + } + + // Calculate bias term to force rounding, move out lower bits + if bits < 53 { + bias = Xcopysignl(tls, Xscalbn(tls, float64(1), 2*53-bits-1), y) + frac = Xfmodl(tls, y, Xscalbn(tls, float64(1), 53-bits)) + y = y - frac + y = y + bias + } + + // Process tail of decimal input so it can affect rounding + if (a+i)&(128-1) != z { + var t uint32_t = *(*uint32_t)(unsafe.Pointer(bp + uintptr((a+i)&(128-1))*4)) + if t < uint32_t(500000000) && (t != 0 || (a+i+1)&(128-1) != z) { + frac = frac + 0.25*float64(sign) + } else if t > uint32_t(500000000) { + frac = frac + 0.75*float64(sign) + } else if t == uint32_t(500000000) { + if (a+i+1)&(128-1) == z { + frac = frac + 0.5*float64(sign) + } else { + frac = frac + 0.75*float64(sign) + } + } + if 53-bits >= 2 && !(Xfmodl(tls, frac, float64(1)) != 0) { + frac += 1 + } + } + + y = y + frac + y = y - bias + + if (e2+53)&0x7fffffff > emax-5 { + if Xfabsl(tls, y) >= float64(float64(2))/2.22044604925031308085e-16 { + if denormal != 0 && bits == 53+e2-emin { + denormal = 0 + } + y = y * 0.5 + e2++ + } + if e2+53 > emax || denormal != 0 && frac != 0 { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + } + } + + return Xscalbnl(tls, y, e2) +} + +var _sth = [2]uint32_t{uint32_t(9007199), uint32_t(254740991)} /* floatscan.c:67:24 */ +var _sp10s = [8]int32{10, 100, 1000, 10000, + 100000, 1000000, 10000000, 100000000} /* floatscan.c:80:19 */ + +func hexfloat(tls *TLS, f uintptr, bits int32, emin int32, sign int32, pok int32) float64 { /* floatscan.c:315:20: */ + var x uint32_t = uint32_t(0) + var y float64 = float64(0) + var scale float64 = float64(1) + var bias float64 = float64(0) + var gottail int32 = 0 + var gotrad int32 = 0 + var gotdig int32 = 0 + var rp int64 = int64(0) + var dc int64 = int64(0) + var e2 int64 = int64(0) + var d int32 + var c int32 + + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + + // Skip leading zeros + for ; c == '0'; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + gotdig = 1 + } + + if c == '.' { + gotrad = 1 + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + // Count zeros after the radix point before significand + rp = int64(0) + __1: + if !(c == '0') { + goto __3 + } + gotdig = 1 + goto __2 + __2: + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + rp-- + goto __1 + goto __3 + __3: + } + + for ; uint32(c-'0') < 10 || uint32(c|32-'a') < 6 || c == '.'; c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() { + if c == '.' { + if gotrad != 0 { + break + } + rp = dc + gotrad = 1 + } else { + gotdig = 1 + if c > '9' { + d = c | 32 + 10 - 'a' + } else { + d = c - '0' + } + if dc < int64(8) { + x = x*uint32_t(16) + uint32_t(d) + } else if dc < int64(53/4+1) { + y = y + float64(d)*AssignDivFloat64(&scale, float64(16)) + } else if d != 0 && !(gottail != 0) { + y = y + 0.5*scale + gottail = 1 + } + dc++ + } + } + if !(gotdig != 0) { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if pok != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if gotrad != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + } else { + X__shlim(tls, f, int64(0)) + } + return float64(sign) * 0.0 + } + if !(gotrad != 0) { + rp = dc + } + for dc < int64(8) { + x = x * uint32_t(16) + dc++ + } + if c|32 == 'p' { + e2 = scanexp(tls, f, pok) + if e2 == -0x7fffffffffffffff-int64(1) { + if pok != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } else { + X__shlim(tls, f, int64(0)) + return float64(0) + } + e2 = int64(0) + } + } else { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + e2 = e2 + (int64(4)*rp - int64(32)) + + if !(x != 0) { + return float64(sign) * 0.0 + } + if e2 > int64(-emin) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return float64(sign) * 1.79769313486231570815e+308 * 1.79769313486231570815e+308 + } + if e2 < int64(emin-2*53) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return float64(sign) * 2.22507385850720138309e-308 * 2.22507385850720138309e-308 + } + + for x < 0x80000000 { + if y >= 0.5 { + x = x + (x + uint32_t(1)) + y = y + (y - float64(1)) + } else { + x = x + x + y = y + y + } + e2-- + } + + if int64(bits) > int64(32)+e2-int64(emin) { + bits = int32(int64(32) + e2 - int64(emin)) + if bits < 0 { + bits = 0 + } + } + + if bits < 53 { + bias = Xcopysignl(tls, Xscalbn(tls, float64(1), 32+53-bits-1), float64(sign)) + } + + if bits < 32 && y != 0 && !(x&uint32_t(1) != 0) { + x++ + y = float64(0) + } + + y = bias + float64(sign)*float64(x) + float64(sign)*y + y = y - bias + + if !(y != 0) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + } + + return Xscalbnl(tls, y, int32(e2)) +} + +func X__floatscan(tls *TLS, f uintptr, prec int32, pok int32) float64 { /* floatscan.c:427:13: */ + var sign int32 = 1 + var i size_t + var bits int32 + var emin int32 + var c int32 + + switch prec { + case 0: + bits = 24 + emin = -125 - bits + break + case 1: + bits = 53 + emin = -1021 - bits + break + case 2: + bits = 53 + emin = -1021 - bits + break + default: + return float64(0) + } + + for __isspace(tls, AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())) != 0 { + } + + if c == '+' || c == '-' { + sign = sign - 2*Bool32(c == '-') + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + } + + for i = uint64(0); i < uint64(8) && c|32 == int32(*(*int8)(unsafe.Pointer(ts /* "infinity" */ + uintptr(i)))); i++ { + if i < uint64(7) { + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + } + } + if i == uint64(3) || i == uint64(8) || i > uint64(3) && pok != 0 { + if i != uint64(8) { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if pok != 0 { + for ; i > uint64(3); i-- { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + } + } + return float64(float32(sign) * X__builtin_inff(tls)) + } + if !(i != 0) { + for i = uint64(0); i < uint64(3) && c|32 == int32(*(*int8)(unsafe.Pointer(ts + 9 /* "nan" */ + uintptr(i)))); i++ { + if i < uint64(2) { + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + } + } + } + if i == uint64(3) { + if func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() != '(' { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + return float64(X__builtin_nanf(tls, ts+13)) + } + for i = uint64(1); ; i++ { + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if uint32(c-'0') < 10 || uint32(c-'A') < 26 || uint32(c-'a') < 26 || c == '_' { + continue + } + if c == ')' { + return float64(X__builtin_nanf(tls, ts+13)) + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if !(pok != 0) { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22 + X__shlim(tls, f, int64(0)) + return float64(0) + } + for PostDecUint64(&i, 1) != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + } + return float64(X__builtin_nanf(tls, ts+13)) + } + return float64(X__builtin_nanf(tls, ts+13)) + } + + if i != 0 { + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22 + X__shlim(tls, f, int64(0)) + return float64(0) + } + + if c == '0' { + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if c|32 == 'x' { + return hexfloat(tls, f, bits, emin, sign, pok) + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + c = '0' + } + + return decfloat(tls, f, c, bits, emin, sign, pok) +} + +// Lookup table for digit values. -1==255>=36 -> invalid +var table1 = [257]uint8{Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + uint8(0), uint8(1), uint8(2), uint8(3), uint8(4), uint8(5), uint8(6), uint8(7), uint8(8), uint8(9), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), uint8(10), uint8(11), uint8(12), uint8(13), uint8(14), uint8(15), uint8(16), uint8(17), uint8(18), uint8(19), uint8(20), uint8(21), uint8(22), uint8(23), uint8(24), + uint8(25), uint8(26), uint8(27), uint8(28), uint8(29), uint8(30), uint8(31), uint8(32), uint8(33), uint8(34), uint8(35), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), uint8(10), uint8(11), uint8(12), uint8(13), uint8(14), uint8(15), uint8(16), uint8(17), uint8(18), uint8(19), uint8(20), uint8(21), uint8(22), uint8(23), uint8(24), + uint8(25), uint8(26), uint8(27), uint8(28), uint8(29), uint8(30), uint8(31), uint8(32), uint8(33), uint8(34), uint8(35), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), + Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), Uint8FromInt32(-1), +} /* intscan.c:7:28 */ + +func X__intscan(tls *TLS, f uintptr, base uint32, pok int32, lim uint64) uint64 { /* intscan.c:26:20: */ + var val uintptr + var c int32 + var neg int32 + var x uint32 + var y uint64 + var bs int32 + val = uintptr(unsafe.Pointer(&table1)) + uintptr(1) + neg = 0 + if !(base > uint32(36) || base == uint32(1)) { + goto __1 + } + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22 + return uint64(0) +__1: + ; +__2: + if !(__isspace(tls, AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())) != 0) { + goto __3 + } + goto __2 +__3: + ; + if !(c == '+' || c == '-') { + goto __4 + } + neg = -Bool32(c == '-') + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() +__4: + ; + if !((base == uint32(0) || base == uint32(16)) && c == '0') { + goto __5 + } + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if !(c|32 == 'x') { + goto __7 + } + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + if !(int32(*(*uint8)(unsafe.Pointer(val + uintptr(c)))) >= 16) { + goto __9 + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if !(pok != 0) { + goto __10 + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + goto __11 +__10: + X__shlim(tls, f, int64(0)) +__11: + ; + return uint64(0) +__9: + ; + base = uint32(16) + goto __8 +__7: + if !(base == uint32(0)) { + goto __12 + } + base = uint32(8) +__12: + ; +__8: + ; + goto __6 +__5: + if !(base == uint32(0)) { + goto __13 + } + base = uint32(10) +__13: + ; + if !(uint32(*(*uint8)(unsafe.Pointer(val + uintptr(c)))) >= base) { + goto __14 + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + X__shlim(tls, f, int64(0)) + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22 + return uint64(0) +__14: + ; +__6: + ; + if !(base == uint32(10)) { + goto __15 + } + x = uint32(0) +__17: + if !(uint32(c-'0') < 10 && x <= 0xffffffff/uint32(10)-uint32(1)) { + goto __19 + } + x = x*uint32(10) + uint32(c-'0') + goto __18 +__18: + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + goto __17 + goto __19 +__19: + ; + y = uint64(x) +__20: + if !(uint32(c-'0') < 10 && y <= (2*uint64(0x7fffffffffffffff)+uint64(1))/uint64(10) && uint64(10)*y <= 2*uint64(0x7fffffffffffffff)+uint64(1)-uint64(c-'0')) { + goto __22 + } + y = y*uint64(10) + uint64(c-'0') + goto __21 +__21: + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() + goto __20 + goto __22 +__22: + ; + if !(uint32(c-'0') >= 10) { + goto __23 + } + goto done +__23: + ; + goto __16 +__15: + if !!(base&(base-uint32(1)) != 0) { + goto __24 + } + bs = int32(*(*int8)(unsafe.Pointer(ts + 14 + uintptr(uint32(0x17)*base>>5&uint32(7))))) + x = uint32(0) +__26: + if !(uint32(*(*uint8)(unsafe.Pointer(val + uintptr(c)))) < base && x <= 0xffffffff/uint32(32)) { + goto __28 + } + x = x<>bs) { + goto __31 + } + y = y<= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if !(y >= lim) { + goto __43 + } + if !(!(lim&uint64(1) != 0) && !(neg != 0)) { + goto __44 + } + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return lim - uint64(1) + goto __45 +__44: + if !(y > lim) { + goto __46 + } + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 34 + return lim +__46: + ; +__45: + ; +__43: + ; + return y ^ uint64(neg) - uint64(neg) +} + +// The shcnt field stores the number of bytes read so far, offset by +// the value of buf-rpos at the last function call (__shlim or __shgetc), +// so that between calls the inline shcnt macro can add rpos-buf to get +// the actual count. + +func X__shlim(tls *TLS, f uintptr, lim off_t) { /* shgetc.c:8:6: */ + (*FILE)(unsafe.Pointer(f)).shlim = lim + (*FILE)(unsafe.Pointer(f)).shcnt = (int64((*FILE)(unsafe.Pointer(f)).buf) - int64((*FILE)(unsafe.Pointer(f)).rpos)) / 1 + // If lim is nonzero, rend must be a valid pointer. + if lim != 0 && (int64((*FILE)(unsafe.Pointer(f)).rend)-int64((*FILE)(unsafe.Pointer(f)).rpos))/1 > lim { + (*FILE)(unsafe.Pointer(f)).shend = (*FILE)(unsafe.Pointer(f)).rpos + uintptr(lim) + } else { + (*FILE)(unsafe.Pointer(f)).shend = (*FILE)(unsafe.Pointer(f)).rend + } +} + +func X__shgetc(tls *TLS, f uintptr) int32 { /* shgetc.c:19:5: */ + var c int32 + var cnt off_t = (*FILE)(unsafe.Pointer(f)).shcnt + (int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1 + if (*FILE)(unsafe.Pointer(f)).shlim != 0 && cnt >= (*FILE)(unsafe.Pointer(f)).shlim || AssignInt32(&c, X__uflow(tls, f)) < 0 { + (*FILE)(unsafe.Pointer(f)).shcnt = (int64((*FILE)(unsafe.Pointer(f)).buf)-int64((*FILE)(unsafe.Pointer(f)).rpos))/1 + cnt + (*FILE)(unsafe.Pointer(f)).shend = (*FILE)(unsafe.Pointer(f)).rpos + (*FILE)(unsafe.Pointer(f)).shlim = int64(-1) + return -1 + } + cnt++ + if (*FILE)(unsafe.Pointer(f)).shlim != 0 && (int64((*FILE)(unsafe.Pointer(f)).rend)-int64((*FILE)(unsafe.Pointer(f)).rpos))/1 > (*FILE)(unsafe.Pointer(f)).shlim-cnt { + (*FILE)(unsafe.Pointer(f)).shend = (*FILE)(unsafe.Pointer(f)).rpos + uintptr((*FILE)(unsafe.Pointer(f)).shlim-cnt) + } else { + (*FILE)(unsafe.Pointer(f)).shend = (*FILE)(unsafe.Pointer(f)).rend + } + (*FILE)(unsafe.Pointer(f)).shcnt = (int64((*FILE)(unsafe.Pointer(f)).buf)-int64((*FILE)(unsafe.Pointer(f)).rpos))/1 + cnt + if (*FILE)(unsafe.Pointer(f)).rpos <= (*FILE)(unsafe.Pointer(f)).buf { + *(*uint8)(unsafe.Pointer((*FILE)(unsafe.Pointer(f)).rpos + UintptrFromInt32(-1))) = uint8(c) + } + return c +} + +type lconv = struct { + decimal_point uintptr + thousands_sep uintptr + grouping uintptr + int_curr_symbol uintptr + currency_symbol uintptr + mon_decimal_point uintptr + mon_thousands_sep uintptr + mon_grouping uintptr + positive_sign uintptr + negative_sign uintptr + int_frac_digits int8 + frac_digits int8 + p_cs_precedes int8 + p_sep_by_space int8 + n_cs_precedes int8 + n_sep_by_space int8 + p_sign_posn int8 + n_sign_posn int8 + int_p_cs_precedes int8 + int_p_sep_by_space int8 + int_n_cs_precedes int8 + int_n_sep_by_space int8 + int_p_sign_posn int8 + int_n_sign_posn int8 + _ [2]byte +} /* locale.h:24:1 */ + +// Support signed or unsigned plain-char + +// Implementation choices... + +// Arbitrary numbers... + +// POSIX/SUS requirements follow. These numbers come directly +// from SUS and have nothing to do with the host system. + +var posix_lconv = lconv{decimal_point: ts + 23, thousands_sep: ts + 13, grouping: ts + 13, int_curr_symbol: ts + 13, currency_symbol: ts + 13, mon_decimal_point: ts + 13, mon_thousands_sep: ts + 13, mon_grouping: ts + 13, positive_sign: ts + 13, negative_sign: ts + 13, int_frac_digits: Int8FromInt32(255), frac_digits: Int8FromInt32(255), p_cs_precedes: Int8FromInt32(255), p_sep_by_space: Int8FromInt32(255), n_cs_precedes: Int8FromInt32(255), n_sep_by_space: Int8FromInt32(255), p_sign_posn: Int8FromInt32(255), n_sign_posn: Int8FromInt32(255), int_p_cs_precedes: Int8FromInt32(255), int_p_sep_by_space: Int8FromInt32(255), int_n_cs_precedes: Int8FromInt32(255), int_n_sep_by_space: Int8FromInt32(255), int_p_sign_posn: Int8FromInt32(255), int_n_sign_posn: Int8FromInt32(255)} /* localeconv.c:4:27 */ + +func Xlocaleconv(tls *TLS) uintptr { /* localeconv.c:31:14: */ + return uintptr(unsafe.Pointer(&posix_lconv)) +} + +func X__fpclassify(tls *TLS, x float64) int32 { /* __fpclassify.c:4:5: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + *(*struct{ f float64 })(unsafe.Pointer(bp)) = func() (r struct{ f float64 }) { + *(*float64)(unsafe.Pointer(uintptr(unsafe.Pointer(&r)) + 0)) = x + return r + }() + var e int32 = int32(*(*uint64_t)(unsafe.Pointer(bp)) >> 52 & uint64(0x7ff)) + if !(e != 0) { + if *(*uint64_t)(unsafe.Pointer(bp))<<1 != 0 { + return 3 + } + return 2 + } + if e == 0x7ff { + if *(*uint64_t)(unsafe.Pointer(bp))<<12 != 0 { + return 0 + } + return 1 + } + return 4 +} + +func X__fpclassifyf(tls *TLS, x float32) int32 { /* __fpclassifyf.c:4:5: */ + bp := tls.Alloc(4) + defer tls.Free(4) + + *(*struct{ f float32 })(unsafe.Pointer(bp)) = func() (r struct{ f float32 }) { + *(*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(&r)) + 0)) = x + return r + }() + var e int32 = int32(*(*uint32_t)(unsafe.Pointer(bp)) >> 23 & uint32_t(0xff)) + if !(e != 0) { + if *(*uint32_t)(unsafe.Pointer(bp))<<1 != 0 { + return 3 + } + return 2 + } + if e == 0xff { + if *(*uint32_t)(unsafe.Pointer(bp))<<9 != 0 { + return 0 + } + return 1 + } + return 4 +} + +func X__fpclassifyl(tls *TLS, x float64) int32 { /* __fpclassifyl.c:4:5: */ + return X__fpclassify(tls, x) +} + +func Xcopysignl(tls *TLS, x float64, y float64) float64 { /* copysignl.c:4:13: */ + return Xcopysign(tls, x, y) +} + +func Xfabsl(tls *TLS, x float64) float64 { /* fabsl.c:3:13: */ + return Xfabs(tls, x) +} + +func Xfmodl(tls *TLS, x float64, y float64) float64 { /* fmodl.c:4:13: */ + return Xfmod(tls, x, y) +} + +func Xnanf(tls *TLS, s uintptr) float32 { /* nanf.c:3:7: */ + return X__builtin_nanf(tls, ts+13) +} + +var toint double_t = float64(float64(1)) / 2.22044604925031308085e-16 /* rint.c:10:23 */ + +func Xrint(tls *TLS, x float64) float64 { /* rint.c:12:8: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + *(*struct{ f float64 })(unsafe.Pointer(bp)) = func() (r struct{ f float64 }) { + *(*float64)(unsafe.Pointer(uintptr(unsafe.Pointer(&r)) + 0)) = x + return r + }() + var e int32 = int32(*(*uint64_t)(unsafe.Pointer(bp)) >> 52 & uint64(0x7ff)) + var s int32 = int32(*(*uint64_t)(unsafe.Pointer(bp)) >> 63) + var y double_t + + if e >= 0x3ff+52 { + return x + } + if s != 0 { + y = x - toint + toint + } else { + y = x + toint - toint + } + if y == float64(0) { + if s != 0 { + return -Float64FromFloat64(0.0) + } + return float64(0) + } + return y +} + +func Xscalbn(tls *TLS, x float64, n int32) float64 { /* scalbn.c:4:8: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + // var u struct {f float64;} at bp, 8 + + var y double_t = x + + if n > 1023 { + y = y * 0x1p1023 + n = n - 1023 + if n > 1023 { + y = y * 0x1p1023 + n = n - 1023 + if n > 1023 { + n = 1023 + } + } + } else if n < -1022 { + // make sure final n < -53 to avoid double + // rounding in the subnormal range + y = y * (float64(0x1p-1022) * 0x1p53) + n = n + (1022 - 53) + if n < -1022 { + y = y * (float64(0x1p-1022) * 0x1p53) + n = n + (1022 - 53) + if n < -1022 { + n = -1022 + } + } + } + *(*uint64_t)(unsafe.Pointer(bp)) = uint64_t(0x3ff+n) << 52 + x = y * *(*float64)(unsafe.Pointer(bp)) + return x +} + +func Xscalbnl(tls *TLS, x float64, n int32) float64 { /* scalbnl.c:4:13: */ + return Xscalbn(tls, x, n) +} + +// Support signed or unsigned plain-char + +// Implementation choices... + +// Arbitrary numbers... + +// POSIX/SUS requirements follow. These numbers come directly +// from SUS and have nothing to do with the host system. + +type __locale_map = struct { + __map uintptr + map_size size_t + name [24]int8 + next uintptr +} /* alltypes.h:343:9 */ + +type tls_module = struct { + next uintptr + image uintptr + len size_t + size size_t + align size_t + offset size_t +} /* libc.h:14:1 */ + +type __libc = struct { + can_do_threads int8 + threaded int8 + secure int8 + need_locks int8 + threads_minus_1 int32 + auxv uintptr + tls_head uintptr + tls_size size_t + tls_align size_t + tls_cnt size_t + page_size size_t + global_locale struct{ cat [6]uintptr } +} /* libc.h:20:1 */ + +type time_t = int64 /* alltypes.h:85:16 */ + +type clockid_t = int32 /* alltypes.h:214:13 */ + +type timespec = struct { + tv_sec time_t + tv_nsec int64 +} /* alltypes.h:229:1 */ + +type pthread_t = uintptr /* alltypes.h:273:26 */ + +type pthread_once_t = int32 /* alltypes.h:279:13 */ + +type pthread_key_t = uint32 /* alltypes.h:284:18 */ + +type pthread_spinlock_t = int32 /* alltypes.h:289:13 */ + +type pthread_mutexattr_t = struct{ __attr uint32 } /* alltypes.h:294:37 */ + +type pthread_condattr_t = struct{ __attr uint32 } /* alltypes.h:299:37 */ + +type pthread_barrierattr_t = struct{ __attr uint32 } /* alltypes.h:304:37 */ + +type pthread_rwlockattr_t = struct{ __attr [2]uint32 } /* alltypes.h:309:40 */ + +type __sigset_t = struct{ __bits [16]uint64 } /* alltypes.h:349:9 */ + +type sigset_t = __sigset_t /* alltypes.h:349:71 */ + +type pthread_attr_t = struct { + __u struct { + _ [0]uint64 + __i [14]int32 + } +} /* alltypes.h:372:147 */ + +type pthread_mutex_t = struct { + __u struct { + _ [0]uint64 + __i [10]int32 + } +} /* alltypes.h:377:157 */ + +type pthread_cond_t = struct { + __u struct { + _ [0]uint64 + __i [12]int32 + } +} /* alltypes.h:387:112 */ + +type pthread_rwlock_t = struct { + __u struct { + _ [0]uint64 + __i [14]int32 + } +} /* alltypes.h:397:139 */ + +type pthread_barrier_t = struct { + __u struct { + _ [0]uint64 + __i [8]int32 + } +} /* alltypes.h:402:137 */ + +type sched_param = struct { + sched_priority int32 + __reserved1 int32 + __reserved2 [2]struct { + __reserved1 time_t + __reserved2 int64 + } + __reserved3 int32 + _ [4]byte +} /* sched.h:19:1 */ + +type timer_t = uintptr /* alltypes.h:209:14 */ + +type clock_t = int64 /* alltypes.h:219:14 */ + +type tm = struct { + tm_sec int32 + tm_min int32 + tm_hour int32 + tm_mday int32 + tm_mon int32 + tm_year int32 + tm_wday int32 + tm_yday int32 + tm_isdst int32 + _ [4]byte + tm_gmtoff int64 + tm_zone uintptr +} /* time.h:38:1 */ + +type itimerspec = struct { + it_interval struct { + tv_sec time_t + tv_nsec int64 + } + it_value struct { + tv_sec time_t + tv_nsec int64 + } +} /* time.h:80:1 */ + +type sigevent = struct { + sigev_value struct { + _ [0]uint64 + sival_int int32 + _ [4]byte + } + sigev_signo int32 + sigev_notify int32 + sigev_notify_function uintptr + sigev_notify_attributes uintptr + __pad [32]int8 +} /* time.h:107:1 */ + +type __ptcb = struct { + __f uintptr + __x uintptr + __next uintptr +} /* alltypes.h:273:9 */ + +type sigaltstack = struct { + ss_sp uintptr + ss_flags int32 + _ [4]byte + ss_size size_t +} /* signal.h:44:9 */ + +type stack_t = sigaltstack /* signal.h:44:28 */ + +type greg_t = int64 /* signal.h:59:19 */ +type gregset_t = [23]int64 /* signal.h:59:27 */ +type _fpstate = struct { + cwd uint16 + swd uint16 + ftw uint16 + fop uint16 + rip uint64 + rdp uint64 + mxcsr uint32 + mxcr_mask uint32 + _st [8]struct { + significand [4]uint16 + exponent uint16 + padding [3]uint16 + } + _xmm [16]struct{ element [4]uint32 } + padding [24]uint32 +} /* signal.h:60:9 */ + +type fpregset_t = uintptr /* signal.h:71:3 */ +type sigcontext = struct { + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rdi uint64 + rsi uint64 + rbp uint64 + rbx uint64 + rdx uint64 + rax uint64 + rcx uint64 + rsp uint64 + rip uint64 + eflags uint64 + cs uint16 + gs uint16 + fs uint16 + __pad0 uint16 + err uint64 + trapno uint64 + oldmask uint64 + cr2 uint64 + fpstate uintptr + __reserved1 [8]uint64 +} /* signal.h:72:1 */ + +type mcontext_t = struct { + gregs gregset_t + fpregs fpregset_t + __reserved1 [8]uint64 +} /* signal.h:84:3 */ + +type __ucontext = struct { + uc_flags uint64 + uc_link uintptr + uc_stack stack_t + uc_mcontext mcontext_t + uc_sigmask sigset_t + __fpregs_mem [64]uint64 +} /* signal.h:97:9 */ + +type ucontext_t = __ucontext /* signal.h:104:3 */ + +type sigval = struct { + _ [0]uint64 + sival_int int32 + _ [4]byte +} /* time.h:107:1 */ + +type siginfo_t = struct { + si_signo int32 + si_errno int32 + si_code int32 + _ [4]byte + __si_fields struct { + _ [0]uint64 + __pad [112]int8 + } +} /* signal.h:145:3 */ + +type sigaction = struct { + __sa_handler struct{ sa_handler uintptr } + sa_mask sigset_t + sa_flags int32 + _ [4]byte + sa_restorer uintptr +} /* signal.h:167:1 */ + +type sig_t = uintptr /* signal.h:251:14 */ + +type sig_atomic_t = int32 /* signal.h:269:13 */ + +func a_cas(tls *TLS, p uintptr, t int32, s int32) int32 { /* atomic_arch.h:2:19: */ + panic(`arch/x86_64/atomic_arch.h:4:2: assembler statements not supported`) + return t +} + +func a_swap(tls *TLS, p uintptr, v int32) int32 { /* atomic_arch.h:20:19: */ + panic(`arch/x86_64/atomic_arch.h:22:2: assembler statements not supported`) + return v +} + +func a_or(tls *TLS, p uintptr, v int32) { /* atomic_arch.h:46:20: */ + panic(`arch/x86_64/atomic_arch.h:48:2: assembler statements not supported`) +} + +func a_or_64(tls *TLS, p uintptr, v uint64_t) { /* atomic_arch.h:62:20: */ + panic(`arch/x86_64/atomic_arch.h:64:2: assembler statements not supported`) +} + +func a_ctz_64(tls *TLS, x uint64_t) int32 { /* atomic_arch.h:112:19: */ + panic(`arch/x86_64/atomic_arch.h:114:2: assembler statements not supported`) + return int32(x) +} + +func a_ctz_32(tls *TLS, x uint32_t) int32 { /* atomic.h:256:19: */ + return int32(_sdebruijn328[x&-x*uint32_t(0x076be629)>>27]) +} + +var _sdebruijn328 = [32]int8{ + int8(0), int8(1), int8(23), int8(2), int8(29), int8(24), int8(19), int8(3), int8(30), int8(27), int8(25), int8(11), int8(20), int8(8), int8(4), int8(13), + int8(31), int8(22), int8(28), int8(18), int8(26), int8(10), int8(7), int8(12), int8(21), int8(17), int8(9), int8(6), int8(16), int8(5), int8(15), int8(14), +} /* atomic.h:261:20 */ + +type __timer = struct { + timerid int32 + _ [4]byte + thread pthread_t +} /* pthread_impl.h:64:1 */ + +func __pthread_self(tls *TLS) uintptr { /* pthread_arch.h:1:30: */ + var self uintptr + panic(`arch/x86_64/pthread_arch.h:4:2: assembler statements not supported`) + return self +} + +func __wake(tls *TLS, addr uintptr, cnt int32, priv int32) { /* pthread_impl.h:155:20: */ + if priv != 0 { + priv = 128 + } + if cnt < 0 { + cnt = 0x7fffffff + } + _ = Bool32(X__syscall3(tls, int64(202), int64(addr), int64(1|priv), int64(cnt)) != int64(-38) || X__syscall3(tls, int64(202), int64(addr), int64(1), int64(cnt)) != 0) +} + +func __futexwait(tls *TLS, addr uintptr, val int32, priv int32) { /* pthread_impl.h:162:20: */ + if priv != 0 { + priv = 128 + } + _ = Bool32(X__syscall4(tls, int64(202), int64(addr), int64(0|priv), int64(val), int64(0)) != int64(-38) || X__syscall4(tls, int64(202), int64(addr), int64(0), int64(val), int64(0)) != 0) +} + +var X__fsmu8 = [51]uint32_t{ + func() uint32 { + if 0x2 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x2) + }(), func() uint32 { + if 0x3 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x3) + }(), func() uint32 { + if 0x4 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x4) + }(), func() uint32 { + if 0x5 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x5) + }(), func() uint32 { + if 0x6 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x6) + }(), func() uint32 { + if 0x7 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x7) + }(), + func() uint32 { + if 0x8 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x8) + }(), func() uint32 { + if 0x9 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x9) + }(), func() uint32 { + if 0xa < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xa) + }(), func() uint32 { + if 0xb < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xb) + }(), func() uint32 { + if 0xc < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xc) + }(), func() uint32 { + if 0xd < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xd) + }(), func() uint32 { + if 0xe < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xe) + }(), func() uint32 { + if 0xf < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xf) + }(), + func() uint32 { + if 0x0+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x0+16) + }(), func() uint32 { + if 0x1+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x1+16) + }(), func() uint32 { + if 0x2+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x2+16) + }(), func() uint32 { + if 0x3+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x3+16) + }(), func() uint32 { + if 0x4+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x4+16) + }(), func() uint32 { + if 0x5+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x5+16) + }(), func() uint32 { + if 0x6+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x6+16) + }(), func() uint32 { + if 0x7+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x7+16) + }(), + func() uint32 { + if 0x8+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x8+16) + }(), func() uint32 { + if 0x9+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0x9+16) + }(), func() uint32 { + if 0xa+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xa+16) + }(), func() uint32 { + if 0xb+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xb+16) + }(), func() uint32 { + if 0xc+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xc+16) + }(), func() uint32 { + if 0xd+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xd+16) + }(), func() uint32 { + if 0xe+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xe+16) + }(), func() uint32 { + if 0xf+16 < 2 { + return Uint32FromInt32(-1) + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23 | uint32_t(0xf+16) + }(), + func() uint32 { + if 0x0 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x0 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x0), func() uint32 { + if 0x1 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x1 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x1), func() uint32 { + if 0x2 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x2 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x2), func() uint32 { + if 0x3 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x3 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x3), func() uint32 { + if 0x4 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x4 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x4), func() uint32 { + if 0x5 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x5 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x5), func() uint32 { + if 0x6 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x6 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x6), func() uint32 { + if 0x7 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x7 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x7), + func() uint32 { + if 0x8 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x8 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x8), func() uint32 { + if 0x9 == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0x9 == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0x9), func() uint32 { + if 0xa == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xa == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xa), func() uint32 { + if 0xb == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xb == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xb), func() uint32 { + if 0xc == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xc == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xc), func() uint32 { + if 0xd == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xd == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xd), func() uint32 { + if 0xe == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xe == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xe), func() uint32 { + if 0xf == 0 { + return func() uint32 { + if Int32(0xa0) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0xa0)) + }() << 23 + } + return func() uint32 { + if 0xf == 0xd { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xa0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(0xf), + func() uint32 { + if 0x0 >= 5 { + return uint32(0) + } + return func() uint32 { + if 0x0 == 0 { + return func() uint32 { + if Int32(0x90) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x90)) + }() << 23 + } + return func() uint32 { + if 0x0 == 4 { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0x90)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>12 | uint32_t(0x0), func() uint32 { + if 0x1 >= 5 { + return uint32(0) + } + return func() uint32 { + if 0x1 == 0 { + return func() uint32 { + if Int32(0x90) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x90)) + }() << 23 + } + return func() uint32 { + if 0x1 == 4 { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0x90)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>12 | uint32_t(0x1), func() uint32 { + if 0x2 >= 5 { + return uint32(0) + } + return func() uint32 { + if 0x2 == 0 { + return func() uint32 { + if Int32(0x90) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x90)) + }() << 23 + } + return func() uint32 { + if 0x2 == 4 { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0x90)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>12 | uint32_t(0x2), func() uint32 { + if 0x3 >= 5 { + return uint32(0) + } + return func() uint32 { + if 0x3 == 0 { + return func() uint32 { + if Int32(0x90) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x90)) + }() << 23 + } + return func() uint32 { + if 0x3 == 4 { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0x90)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>12 | uint32_t(0x3), func() uint32 { + if 0x4 >= 5 { + return uint32(0) + } + return func() uint32 { + if 0x4 == 0 { + return func() uint32 { + if Int32(0x90) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x90)) + }() << 23 + } + return func() uint32 { + if 0x4 == 4 { + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0x90)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + } + return func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }() << 23 + }() + }() + }() | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>6 | uint32_t(func() uint32 { + if Int32(0x80) == Int32(0x80) { + return Uint32(Uint32(0x40) - Uint32FromInt32(0xc0)) + } + return Uint32(Uint32(0) - Uint32FromInt32(0x80)) + }()<<23)>>12 | uint32_t(0x4), +} /* internal.c:18:16 */ + +type wint_t = uint32 /* alltypes.h:198:18 */ + +type wctype_t = uint64 /* alltypes.h:203:23 */ + +type __mbstate_t = struct { + __opaque1 uint32 + __opaque2 uint32 +} /* alltypes.h:337:9 */ + +type mbstate_t = __mbstate_t /* alltypes.h:337:63 */ + +func Xmbrtowc(tls *TLS, wc uintptr, src uintptr, n size_t, st uintptr) size_t { /* mbrtowc.c:6:8: */ + bp := tls.Alloc(4) + defer tls.Free(4) + + var c uint32 + var s uintptr + var N uint32 + // var dummy wchar_t at bp, 4 + s = src + N = uint32(n) + + if !!(st != 0) { + goto __1 + } + st = uintptr(unsafe.Pointer(&_sinternal_state)) +__1: + ; + c = *(*uint32)(unsafe.Pointer(st)) + + if !!(s != 0) { + goto __2 + } + if !(c != 0) { + goto __4 + } + goto ilseq +__4: + ; + return uint64(0) + goto __3 +__2: + if !!(wc != 0) { + goto __5 + } + wc = bp /* &dummy */ +__5: + ; +__3: + ; + + if !!(n != 0) { + goto __6 + } + return Uint64FromInt32(-2) +__6: + ; + if !!(c != 0) { + goto __7 + } + if !(int32(*(*uint8)(unsafe.Pointer(s))) < 0x80) { + goto __8 + } + return BoolUint64(!!(AssignPtrInt32(wc, wchar_t(*(*uint8)(unsafe.Pointer(s)))) != 0)) +__8: + ; + if !(func() int32 { + if !!(int32(*(*uintptr)(unsafe.Pointer((*__pthread)(unsafe.Pointer(__pthread_self(tls))).locale))) != 0) { + return 4 + } + return 1 + }() == 1) { + goto __9 + } + *(*wchar_t)(unsafe.Pointer(wc)) = 0xdfff & int32(int8(*(*uint8)(unsafe.Pointer(s)))) + return 1 +__9: + ; + if !(uint32(*(*uint8)(unsafe.Pointer(s)))-0xc2 > 0xf4-0xc2) { + goto __10 + } + goto ilseq +__10: + ; + c = X__fsmu8[uint32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&s, 1))))-0xc2] + n-- +__7: + ; + + if !(n != 0) { + goto __11 + } + if !((int32(*(*uint8)(unsafe.Pointer(s)))>>3-0x10|(int32(*(*uint8)(unsafe.Pointer(s)))>>3+int32_t(c)>>26))&CplInt32(7) != 0) { + goto __12 + } + goto ilseq +__12: + ; +loop: + c = c<<6 | uint32(int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&s, 1))))-0x80) + n-- + if !!(c&(uint32(1)<<31) != 0) { + goto __13 + } + *(*uint32)(unsafe.Pointer(st)) = uint32(0) + *(*wchar_t)(unsafe.Pointer(wc)) = wchar_t(c) + return size_t(N) - n +__13: + ; + if !(n != 0) { + goto __14 + } + if !(uint32(*(*uint8)(unsafe.Pointer(s)))-0x80 >= uint32(0x40)) { + goto __15 + } + goto ilseq +__15: + ; + goto loop +__14: + ; +__11: + ; + + *(*uint32)(unsafe.Pointer(st)) = c + return Uint64FromInt32(-2) +ilseq: + *(*uint32)(unsafe.Pointer(st)) = uint32(0) + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 84 + return Uint64FromInt32(-1) +} + +var _sinternal_state uint32 /* mbrtowc.c:8:18: */ + +func Xmbsinit(tls *TLS, st uintptr) int32 { /* mbsinit.c:3:5: */ + return Bool32(!(st != 0) || !(int32(*(*uint32)(unsafe.Pointer(st))) != 0)) +} + +type imaxdiv_t = struct { + quot intmax_t + rem intmax_t +} /* inttypes.h:14:40 */ + +type socklen_t = uint32 /* alltypes.h:361:18 */ + +type sa_family_t = uint16 /* alltypes.h:366:24 */ + +type msghdr = struct { + msg_name uintptr + msg_namelen socklen_t + _ [4]byte + msg_iov uintptr + msg_iovlen int32 + __pad1 int32 + msg_control uintptr + msg_controllen socklen_t + __pad2 int32 + msg_flags int32 + _ [4]byte +} /* socket.h:22:1 */ + +type cmsghdr = struct { + cmsg_len socklen_t + __pad1 int32 + cmsg_level int32 + cmsg_type int32 +} /* socket.h:44:1 */ + +type linger = struct { + l_onoff int32 + l_linger int32 +} /* socket.h:74:1 */ + +type sockaddr = struct { + sa_family sa_family_t + sa_data [14]int8 +} /* socket.h:367:1 */ + +type sockaddr_storage = struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 +} /* socket.h:372:1 */ + +type in_port_t = uint16_t /* in.h:12:18 */ +type in_addr_t = uint32_t /* in.h:13:18 */ +type in_addr = struct{ s_addr in_addr_t } /* in.h:14:1 */ + +type sockaddr_in = struct { + sin_family sa_family_t + sin_port in_port_t + sin_addr struct{ s_addr in_addr_t } + sin_zero [8]uint8_t +} /* in.h:16:1 */ + +type in6_addr = struct { + __in6_union struct { + _ [0]uint32 + __s6_addr [16]uint8_t + } +} /* in.h:23:1 */ + +type sockaddr_in6 = struct { + sin6_family sa_family_t + sin6_port in_port_t + sin6_flowinfo uint32_t + sin6_addr struct { + __in6_union struct { + _ [0]uint32 + __s6_addr [16]uint8_t + } + } + sin6_scope_id uint32_t +} /* in.h:34:1 */ + +type ipv6_mreq = struct { + ipv6mr_multiaddr struct { + __in6_union struct { + _ [0]uint32 + __s6_addr [16]uint8_t + } + } + ipv6mr_interface uint32 +} /* in.h:42:1 */ + +type ip_opts = struct { + ip_dst struct{ s_addr in_addr_t } + ip_opts [40]int8 +} /* in.h:229:1 */ + +type ip_mreq = struct { + imr_multiaddr struct{ s_addr in_addr_t } + imr_interface struct{ s_addr in_addr_t } +} /* in.h:247:1 */ + +type ip_mreqn = struct { + imr_multiaddr struct{ s_addr in_addr_t } + imr_address struct{ s_addr in_addr_t } + imr_ifindex int32 +} /* in.h:252:1 */ + +type ip_mreq_source = struct { + imr_multiaddr struct{ s_addr in_addr_t } + imr_interface struct{ s_addr in_addr_t } + imr_sourceaddr struct{ s_addr in_addr_t } +} /* in.h:258:1 */ + +type ip_msfilter = struct { + imsf_multiaddr struct{ s_addr in_addr_t } + imsf_interface struct{ s_addr in_addr_t } + imsf_fmode uint32_t + imsf_numsrc uint32_t + imsf_slist [1]struct{ s_addr in_addr_t } +} /* in.h:264:1 */ + +type group_req = struct { + gr_interface uint32_t + _ [4]byte + gr_group struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 + } +} /* in.h:275:1 */ + +type group_source_req = struct { + gsr_interface uint32_t + _ [4]byte + gsr_group struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 + } + gsr_source struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 + } +} /* in.h:280:1 */ + +type group_filter = struct { + gf_interface uint32_t + _ [4]byte + gf_group struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 + } + gf_fmode uint32_t + gf_numsrc uint32_t + gf_slist [1]struct { + ss_family sa_family_t + __ss_padding [118]int8 + __ss_align uint64 + } +} /* in.h:286:1 */ + +type in_pktinfo = struct { + ipi_ifindex int32 + ipi_spec_dst struct{ s_addr in_addr_t } + ipi_addr struct{ s_addr in_addr_t } +} /* in.h:297:1 */ + +type in6_pktinfo = struct { + ipi6_addr struct { + __in6_union struct { + _ [0]uint32 + __s6_addr [16]uint8_t + } + } + ipi6_ifindex uint32 +} /* in.h:303:1 */ + +type ip6_mtuinfo = struct { + ip6m_addr struct { + sin6_family sa_family_t + sin6_port in_port_t + sin6_flowinfo uint32_t + sin6_addr struct { + __in6_union struct { + _ [0]uint32 + __s6_addr [16]uint8_t + } + } + sin6_scope_id uint32_t + } + ip6m_mtu uint32_t +} /* in.h:308:1 */ + +type addrinfo = struct { + ai_flags int32 + ai_family int32 + ai_socktype int32 + ai_protocol int32 + ai_addrlen socklen_t + _ [4]byte + ai_addr uintptr + ai_canonname uintptr + ai_next uintptr +} /* netdb.h:16:1 */ + +// Legacy functions follow (marked OBsolete in SUS) + +type netent = struct { + n_name uintptr + n_aliases uintptr + n_addrtype int32 + n_net uint32_t +} /* netdb.h:62:1 */ + +type hostent = struct { + h_name uintptr + h_aliases uintptr + h_addrtype int32 + h_length int32 + h_addr_list uintptr +} /* netdb.h:69:1 */ + +type servent = struct { + s_name uintptr + s_aliases uintptr + s_port int32 + _ [4]byte + s_proto uintptr +} /* netdb.h:78:1 */ + +type protoent = struct { + p_name uintptr + p_aliases uintptr + p_proto int32 + _ [4]byte +} /* netdb.h:85:1 */ + +type aibuf = struct { + ai struct { + ai_flags int32 + ai_family int32 + ai_socktype int32 + ai_protocol int32 + ai_addrlen socklen_t + _ [4]byte + ai_addr uintptr + ai_canonname uintptr + ai_next uintptr + } + sa struct { + sin struct { + sin_family sa_family_t + sin_port in_port_t + sin_addr struct{ s_addr in_addr_t } + sin_zero [8]uint8_t + } + _ [12]byte + } + lock [1]int32 + slot int16 + ref int16 + _ [4]byte +} /* lookup.h:10:1 */ + +type sa = struct { + sin struct { + sin_family sa_family_t + sin_port in_port_t + sin_addr struct{ s_addr in_addr_t } + sin_zero [8]uint8_t + } + _ [12]byte +} /* lookup.h:10:1 */ + +type address = struct { + family int32 + scopeid uint32 + addr [16]uint8_t + sortkey int32 +} /* lookup.h:20:1 */ + +type service = struct { + port uint16_t + proto uint8 + socktype uint8 +} /* lookup.h:27:1 */ + +type resolvconf = struct { + ns [3]struct { + family int32 + scopeid uint32 + addr [16]uint8_t + sortkey int32 + } + nns uint32 + attempts uint32 + ndots uint32 + timeout uint32 +} /* lookup.h:34:1 */ + +func Xfreeaddrinfo(tls *TLS, p uintptr) { /* freeaddrinfo.c:7:6: */ + var cnt size_t + cnt = uint64(1) +__1: + if !((*addrinfo)(unsafe.Pointer(p)).ai_next != 0) { + goto __3 + } + goto __2 +__2: + cnt++ + p = (*addrinfo)(unsafe.Pointer(p)).ai_next + goto __1 + goto __3 +__3: + ; + var b uintptr = p - uintptr(uint64(uintptr(0))) + b -= 88 * uintptr((*aibuf)(unsafe.Pointer(b)).slot) + //TODO LOCK(b->lock); + if !(int32(AssignSubPtrInt16(b+82, int16(cnt))) != 0) { + Xfree(tls, b) + } + //TODO else UNLOCK(b->lock); +} + +func Xgetaddrinfo(tls *TLS, host uintptr, serv uintptr, hint uintptr, res uintptr) int32 { /* getaddrinfo.c:12:5: */ + bp := tls.Alloc(1608) + defer tls.Free(1608) + + // var ports [2]service at bp, 8 + + // var addrs [48]address at bp+8, 1344 + + // var canon [256]int8 at bp+1352, 256 + + var outcanon uintptr + var nservs int32 + var naddrs int32 + var nais int32 + var canon_len int32 + var i int32 + var j int32 + var k int32 + var family int32 = 0 + var flags int32 = 0 + var proto int32 = 0 + var socktype int32 = 0 + var out uintptr + + if !(host != 0) && !(serv != 0) { + return -2 + } + + if hint != 0 { + family = (*addrinfo)(unsafe.Pointer(hint)).ai_family + flags = (*addrinfo)(unsafe.Pointer(hint)).ai_flags + proto = (*addrinfo)(unsafe.Pointer(hint)).ai_protocol + socktype = (*addrinfo)(unsafe.Pointer(hint)).ai_socktype + + var mask int32 = 0x01 | 0x02 | 0x04 | 0x08 | 0x10 | 0x20 | 0x400 + if flags&mask != flags { + return -1 + } + + switch family { + case 2: + fallthrough + case 10: + fallthrough + case 0: + break + fallthrough + default: + return -6 + } + } + + if flags&0x20 != 0 { + Xabort(tls) //TODO- + // /* Define the "an address is configured" condition for address + // * families via ability to create a socket for the family plus + // * routability of the loopback address for the family. */ + // static const struct sockaddr_in lo4 = { + // .sin_family = AF_INET, .sin_port = 65535, + // .sin_addr.s_addr = __BYTE_ORDER == __BIG_ENDIAN + // ? 0x7f000001 : 0x0100007f + // }; + // static const struct sockaddr_in6 lo6 = { + // .sin6_family = AF_INET6, .sin6_port = 65535, + // .sin6_addr = IN6ADDR_LOOPBACK_INIT + // }; + // int tf[2] = { AF_INET, AF_INET6 }; + // const void *ta[2] = { &lo4, &lo6 }; + // socklen_t tl[2] = { sizeof lo4, sizeof lo6 }; + // for (i=0; i<2; i++) { + // if (family==tf[1-i]) continue; + // int s = socket(tf[i], SOCK_CLOEXEC|SOCK_DGRAM, + // IPPROTO_UDP); + // if (s>=0) { + // int cs; + // pthread_setcancelstate( + // PTHREAD_CANCEL_DISABLE, &cs); + // int r = connect(s, ta[i], tl[i]); + // pthread_setcancelstate(cs, 0); + // close(s); + // if (!r) continue; + // } + // switch (errno) { + // case EADDRNOTAVAIL: + // case EAFNOSUPPORT: + // case EHOSTUNREACH: + // case ENETDOWN: + // case ENETUNREACH: + // break; + // default: + // return EAI_SYSTEM; + // } + // if (family == tf[i]) return EAI_NONAME; + // family = tf[1-i]; + // } + } + + nservs = X__lookup_serv(tls, bp, serv, proto, socktype, flags) + if nservs < 0 { + return nservs + } + + naddrs = X__lookup_name(tls, bp+8, bp+1352, host, family, flags) + if naddrs < 0 { + return naddrs + } + + nais = nservs * naddrs + canon_len = int32(Xstrlen(tls, bp+1352)) + out = Xcalloc(tls, uint64(1), uint64(nais)*uint64(unsafe.Sizeof(aibuf{}))+uint64(canon_len)+uint64(1)) + if !(out != 0) { + return -10 + } + + if canon_len != 0 { + outcanon = out + uintptr(nais)*88 + Xmemcpy(tls, outcanon, bp+1352, uint64(canon_len+1)) + } else { + outcanon = uintptr(0) + } + + for k = AssignInt32(&i, 0); i < naddrs; i++ { + j = 0 + __1: + if !(j < nservs) { + goto __3 + } + { + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).slot = int16(k) + //TODO out[k].ai = (struct addrinfo){ + //TODO .ai_family = addrs[i].family, + //TODO .ai_socktype = ports[j].socktype, + //TODO .ai_protocol = ports[j].proto, + //TODO .ai_addrlen = addrs[i].family == AF_INET + //TODO ? sizeof(struct sockaddr_in) + //TODO : sizeof(struct sockaddr_in6), + //TODO .ai_addr = (void *)&out[k].sa, + //TODO .ai_canonname = outcanon }; + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_family = (*address)(unsafe.Pointer(bp + 8 + uintptr(i)*28)).family + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_socktype = int32((*service)(unsafe.Pointer(bp + uintptr(j)*4)).socktype) + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_protocol = int32((*service)(unsafe.Pointer(bp + uintptr(j)*4)).proto) + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_addrlen = func() uint32 { + if (*address)(unsafe.Pointer(bp+8+uintptr(i)*28)).family == 2 { + return uint32(unsafe.Sizeof(sockaddr_in{})) + } + return uint32(unsafe.Sizeof(sockaddr_in6{})) + }() + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_addr = out + uintptr(k)*88 + 48 + (*aibuf)(unsafe.Pointer(out + uintptr(k)*88)).ai.ai_canonname = outcanon + if k != 0 { + (*aibuf)(unsafe.Pointer(out + uintptr(k-1)*88)).ai.ai_next = out + uintptr(k)*88 + } + switch (*address)(unsafe.Pointer(bp + 8 + uintptr(i)*28)).family { + case 2: + (*sockaddr_in)(unsafe.Pointer(out + uintptr(k)*88 + 48)).sin_family = sa_family_t(2) + (*sockaddr_in)(unsafe.Pointer(out + uintptr(k)*88 + 48)).sin_port = Xhtons(tls, (*service)(unsafe.Pointer(bp+uintptr(j)*4)).port) + Xmemcpy(tls, out+uintptr(k)*88+48+4, bp+8+uintptr(i)*28+8, uint64(4)) + break + case 10: + (*sockaddr_in6)(unsafe.Pointer(out + uintptr(k)*88 + 48)).sin6_family = sa_family_t(10) + (*sockaddr_in6)(unsafe.Pointer(out + uintptr(k)*88 + 48)).sin6_port = Xhtons(tls, (*service)(unsafe.Pointer(bp+uintptr(j)*4)).port) + (*sockaddr_in6)(unsafe.Pointer(out + uintptr(k)*88 + 48)).sin6_scope_id = (*address)(unsafe.Pointer(bp + 8 + uintptr(i)*28)).scopeid + Xmemcpy(tls, out+uintptr(k)*88+48+8, bp+8+uintptr(i)*28+8, uint64(16)) + break + } + + } + goto __2 + __2: + j++ + k++ + goto __1 + goto __3 + __3: + } + (*aibuf)(unsafe.Pointer(out)).ref = int16(nais) + *(*uintptr)(unsafe.Pointer(res)) = out + return 0 +} + +type ucred = struct { + pid pid_t + uid uid_t + gid gid_t +} /* socket.h:57:1 */ + +type mmsghdr = struct { + msg_hdr struct { + msg_name uintptr + msg_namelen socklen_t + _ [4]byte + msg_iov uintptr + msg_iovlen int32 + __pad1 int32 + msg_control uintptr + msg_controllen socklen_t + __pad2 int32 + msg_flags int32 + _ [4]byte + } + msg_len uint32 + _ [4]byte +} /* socket.h:63:1 */ + +func Xgethostbyaddr(tls *TLS, a uintptr, l socklen_t, af int32) uintptr { /* gethostbyaddr.c:7:16: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + var size size_t = uint64(63) + // var res uintptr at bp, 8 + + var err int32 + for __ccgo := true; __ccgo; __ccgo = err == 34 { + Xfree(tls, _sh) + _sh = Xmalloc(tls, AssignAddUint64(&size, size+uint64(1))) + if !(_sh != 0) { + *(*int32)(unsafe.Pointer(X__h_errno_location(tls))) = 3 + return uintptr(0) + } + err = Xgethostbyaddr_r(tls, a, l, af, _sh, + _sh+uintptr(1)*32, size-size_t(unsafe.Sizeof(hostent{})), bp, X__h_errno_location(tls)) + } + if err != 0 { + return uintptr(0) + } + return _sh +} + +var _sh uintptr /* gethostbyaddr.c:9:24: */ + +func Xgethostbyaddr_r(tls *TLS, a uintptr, l socklen_t, af int32, h uintptr, buf uintptr, buflen size_t, res uintptr, err uintptr) int32 { /* gethostbyaddr_r.c:10:5: */ + bp := tls.Alloc(28) + defer tls.Free(28) + + //TODO union { + //TODO struct sockaddr_in sin; + //TODO struct sockaddr_in6 sin6; + //TODO } sa = { .sin.sin_family = af }; + *(*struct { + sin sockaddr_in + _ [12]byte + })(unsafe.Pointer(bp)) = struct { + sin sockaddr_in + _ [12]byte + }{} //TODO- + (*sockaddr_in)(unsafe.Pointer(bp)).sin_family = sa_family_t(af) //TODO- + var sl socklen_t + if af == 10 { + sl = uint32(unsafe.Sizeof(sockaddr_in6{})) + } else { + sl = uint32(unsafe.Sizeof(sockaddr_in{})) + } + var i int32 + + *(*uintptr)(unsafe.Pointer(res)) = uintptr(0) + + // Load address argument into sockaddr structure + if af == 10 && l == socklen_t(16) { + Xmemcpy(tls, bp+8, a, uint64(16)) + } else if af == 2 && l == socklen_t(4) { + Xmemcpy(tls, bp+4, a, uint64(4)) + } else { + *(*int32)(unsafe.Pointer(err)) = 3 + return 22 + } + + // Align buffer and check for space for pointers and ip address + i = int32(uintptr_t(buf) & (uint64(unsafe.Sizeof(uintptr(0))) - uint64(1))) + if !(i != 0) { + i = int32(unsafe.Sizeof(uintptr(0))) + } + if buflen <= uint64(5)*uint64(unsafe.Sizeof(uintptr(0)))-uint64(i)+uint64(l) { + return 34 + } + buf += uintptr(uint64(unsafe.Sizeof(uintptr(0))) - uint64(i)) + buflen = buflen - (uint64(5)*uint64(unsafe.Sizeof(uintptr(0))) - uint64(i) + uint64(l)) + + (*hostent)(unsafe.Pointer(h)).h_addr_list = buf + buf += uintptr(uint64(2) * uint64(unsafe.Sizeof(uintptr(0)))) + (*hostent)(unsafe.Pointer(h)).h_aliases = buf + buf += uintptr(uint64(2) * uint64(unsafe.Sizeof(uintptr(0)))) + + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list)) = buf + Xmemcpy(tls, *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list)), a, uint64(l)) + buf += uintptr(l) + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list + 1*8)) = uintptr(0) + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases)) = buf + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 1*8)) = uintptr(0) + + switch Xgetnameinfo(tls, bp, sl, buf, uint32(buflen), uintptr(0), uint32(0), 0) { + case -3: + *(*int32)(unsafe.Pointer(err)) = 2 + return 11 + case -12: + return 34 + default: + fallthrough + case -10: + fallthrough + case -11: + fallthrough + case -4: + *(*int32)(unsafe.Pointer(err)) = 3 + return *(*int32)(unsafe.Pointer(X___errno_location(tls))) + case 0: + break + } + + (*hostent)(unsafe.Pointer(h)).h_addrtype = af + (*hostent)(unsafe.Pointer(h)).h_length = int32(l) + (*hostent)(unsafe.Pointer(h)).h_name = *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases)) + *(*uintptr)(unsafe.Pointer(res)) = h + return 0 +} + +func Xgethostbyname(tls *TLS, name uintptr) uintptr { /* gethostbyname.c:8:16: */ + return Xgethostbyname2(tls, name, 2) +} + +func Xgethostbyname2(tls *TLS, name uintptr, af int32) uintptr { /* gethostbyname2.c:8:16: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + var size size_t = uint64(63) + // var res uintptr at bp, 8 + + var err int32 + for __ccgo := true; __ccgo; __ccgo = err == 34 { + Xfree(tls, _sh1) + _sh1 = Xmalloc(tls, AssignAddUint64(&size, size+uint64(1))) + if !(_sh1 != 0) { + *(*int32)(unsafe.Pointer(X__h_errno_location(tls))) = 3 + return uintptr(0) + } + err = Xgethostbyname2_r(tls, name, af, _sh1, + _sh1+uintptr(1)*32, size-size_t(unsafe.Sizeof(hostent{})), bp, X__h_errno_location(tls)) + } + if err != 0 { + return uintptr(0) + } + return _sh1 +} + +var _sh1 uintptr /* gethostbyname2.c:10:24: */ + +func Xgethostbyname2_r(tls *TLS, name uintptr, af int32, h uintptr, buf uintptr, buflen size_t, res uintptr, err uintptr) int32 { /* gethostbyname2_r.c:11:5: */ + bp := tls.Alloc(1600) + defer tls.Free(1600) + + // var addrs [48]address at bp, 1344 + + // var canon [256]int8 at bp+1344, 256 + + var i int32 + var cnt int32 + var align size_t + var need size_t + + *(*uintptr)(unsafe.Pointer(res)) = uintptr(0) + cnt = X__lookup_name(tls, bp, bp+1344, name, af, 0x02) + if cnt < 0 { + switch cnt { + case -2: + *(*int32)(unsafe.Pointer(err)) = 1 + return 2 + fallthrough + case -3: + *(*int32)(unsafe.Pointer(err)) = 2 + return 11 + fallthrough + default: + fallthrough + case -4: + *(*int32)(unsafe.Pointer(err)) = 3 + return 74 + fallthrough + case -10: + fallthrough + case -11: + *(*int32)(unsafe.Pointer(err)) = 3 + return *(*int32)(unsafe.Pointer(X___errno_location(tls))) + } + } + + (*hostent)(unsafe.Pointer(h)).h_addrtype = af + (*hostent)(unsafe.Pointer(h)).h_length = func() int32 { + if af == 10 { + return 16 + } + return 4 + }() + + // Align buffer + align = -uintptr_t(buf) & (uint64(unsafe.Sizeof(uintptr(0))) - uint64(1)) + + need = uint64(4) * uint64(unsafe.Sizeof(uintptr(0))) + need = need + uint64(cnt+1)*(uint64(unsafe.Sizeof(uintptr(0)))+uint64((*hostent)(unsafe.Pointer(h)).h_length)) + need = need + (Xstrlen(tls, name) + uint64(1)) + need = need + (Xstrlen(tls, bp+1344) + uint64(1)) + need = need + align + + if need > buflen { + return 34 + } + + buf += uintptr(align) + (*hostent)(unsafe.Pointer(h)).h_aliases = buf + buf += uintptr(uint64(3) * uint64(unsafe.Sizeof(uintptr(0)))) + (*hostent)(unsafe.Pointer(h)).h_addr_list = buf + buf += uintptr(uint64(cnt+1) * uint64(unsafe.Sizeof(uintptr(0)))) + + for i = 0; i < cnt; i++ { + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list + uintptr(i)*8)) = buf + buf += uintptr((*hostent)(unsafe.Pointer(h)).h_length) + Xmemcpy(tls, *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list + uintptr(i)*8)), bp+uintptr(i)*28+8, uint64((*hostent)(unsafe.Pointer(h)).h_length)) + } + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_addr_list + uintptr(i)*8)) = uintptr(0) + + (*hostent)(unsafe.Pointer(h)).h_name = AssignPtrUintptr((*hostent)(unsafe.Pointer(h)).h_aliases, buf) + Xstrcpy(tls, (*hostent)(unsafe.Pointer(h)).h_name, bp+1344) + buf += uintptr(Xstrlen(tls, (*hostent)(unsafe.Pointer(h)).h_name) + uint64(1)) + + if Xstrcmp(tls, (*hostent)(unsafe.Pointer(h)).h_name, name) != 0 { + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 1*8)) = buf + Xstrcpy(tls, *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 1*8)), name) + buf += uintptr(Xstrlen(tls, *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 1*8))) + uint64(1)) + } else { + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 1*8)) = uintptr(0) + } + + *(*uintptr)(unsafe.Pointer((*hostent)(unsafe.Pointer(h)).h_aliases + 2*8)) = uintptr(0) + + *(*uintptr)(unsafe.Pointer(res)) = h + return 0 +} + +func Xgethostbyname_r(tls *TLS, name uintptr, h uintptr, buf uintptr, buflen size_t, res uintptr, err uintptr) int32 { /* gethostbyname_r.c:6:5: */ + return Xgethostbyname2_r(tls, name, 2, h, buf, buflen, res, err) +} + +type if_nameindex = struct { + if_index uint32 + _ [4]byte + if_name uintptr +} /* if.h:12:1 */ + +type ifaddr = struct { + ifa_addr struct { + sa_family sa_family_t + sa_data [14]int8 + } + ifa_ifu struct { + ifu_broadaddr struct { + sa_family sa_family_t + sa_data [14]int8 + } + } + ifa_ifp uintptr + ifa_next uintptr +} /* if.h:51:1 */ + +type ifmap = struct { + mem_start uint64 + mem_end uint64 + base_addr uint16 + irq uint8 + dma uint8 + port uint8 + _ [3]byte +} /* if.h:64:1 */ + +type ifreq = struct { + ifr_ifrn struct{ ifrn_name [16]int8 } + ifr_ifru struct { + _ [0]uint64 + ifru_addr struct { + sa_family sa_family_t + sa_data [14]int8 + } + _ [8]byte + } +} /* if.h:76:1 */ + +type ifconf = struct { + ifc_len int32 + _ [4]byte + ifc_ifcu struct{ ifcu_buf uintptr } +} /* if.h:116:1 */ + +type ns_sect = uint32 /* nameser.h:37:3 */ + +type __ns_msg = struct { + _msg uintptr + _eom uintptr + _id uint16_t + _flags uint16_t + _counts [4]uint16_t + _ [4]byte + _sections [4]uintptr + _sect ns_sect + _rrnum int32 + _msg_ptr uintptr +} /* nameser.h:39:9 */ + +type ns_msg = __ns_msg /* nameser.h:46:3 */ + +type _ns_flagdata = struct { + mask int32 + shift int32 +} /* nameser.h:48:1 */ + +type __ns_rr = struct { + name [1025]int8 + _ [1]byte + __type uint16_t + rr_class uint16_t + _ [2]byte + ttl uint32_t + rdlength uint16_t + _ [2]byte + rdata uintptr +} /* nameser.h:59:9 */ + +type ns_rr = __ns_rr /* nameser.h:66:3 */ + +type ns_flag = uint32 /* nameser.h:87:3 */ + +type ns_opcode = uint32 /* nameser.h:96:3 */ + +type ns_rcode = uint32 /* nameser.h:115:3 */ + +type ns_update_operation = uint32 /* nameser.h:121:3 */ + +type ns_tsig_key1 = struct { + name [1025]int8 + alg [1025]int8 + _ [6]byte + data uintptr + len int32 + _ [4]byte +} /* nameser.h:123:1 */ + +type ns_tsig_key = ns_tsig_key1 /* nameser.h:128:28 */ + +type ns_tcp_tsig_state1 = struct { + counter int32 + _ [4]byte + key uintptr + ctx uintptr + sig [512]uint8 + siglen int32 + _ [4]byte +} /* nameser.h:130:1 */ + +type ns_tcp_tsig_state = ns_tcp_tsig_state1 /* nameser.h:137:34 */ + +type ns_type = uint32 /* nameser.h:200:3 */ + +type ns_class = uint32 /* nameser.h:219:3 */ + +type ns_key_types = uint32 /* nameser.h:226:3 */ + +type ns_cert_types = uint32 /* nameser.h:234:3 */ + +type HEADER = struct { + _ [0]uint32 + id uint32 /* unsigned id: 16, unsigned rd: 1, unsigned tc: 1, unsigned aa: 1, unsigned opcode: 4, unsigned qr: 1, unsigned rcode: 4, unsigned cd: 1, unsigned ad: 1, unsigned unused: 1, unsigned ra: 1 */ + qdcount uint32 /* unsigned qdcount: 16, unsigned ancount: 16 */ + nscount uint32 /* unsigned nscount: 16, unsigned arcount: 16 */ +} /* nameser.h:353:3 */ + +// unused; purely for broken apps +type __res_state = struct { + retrans int32 + retry int32 + options uint64 + nscount int32 + nsaddr_list [3]struct { + sin_family sa_family_t + sin_port in_port_t + sin_addr struct{ s_addr in_addr_t } + sin_zero [8]uint8_t + } + id uint16 + _ [2]byte + dnsrch [7]uintptr + defdname [256]int8 + pfcode uint64 + ndots uint32 /* unsigned ndots: 4, unsigned nsort: 4, unsigned ipv6_unavail: 1, unsigned unused: 23 */ + _ [4]byte + sort_list [10]struct { + addr struct{ s_addr in_addr_t } + mask uint32_t + } + qhook uintptr + rhook uintptr + res_h_errno int32 + _vcsock int32 + _flags uint32 + _ [4]byte + _u struct { + _ [0]uint64 + pad [52]int8 + _ [4]byte + } +} /* resolv.h:26:9 */ + +// unused; purely for broken apps +type res_state = uintptr /* resolv.h:62:3 */ + +type res_sym = struct { + number int32 + _ [4]byte + name uintptr + humanname uintptr +} /* resolv.h:70:1 */ + +func itoa(tls *TLS, p uintptr, x uint32) uintptr { /* getnameinfo.c:18:13: */ + p += uintptr(uint64(3) * uint64(unsafe.Sizeof(int32(0)))) + *(*int8)(unsafe.Pointer(PreDecUintptr(&p, 1))) = int8(0) + for __ccgo := true; __ccgo; __ccgo = x != 0 { + *(*int8)(unsafe.Pointer(PreDecUintptr(&p, 1))) = int8(uint32('0') + x%uint32(10)) + x = x / uint32(10) + } + return p +} + +func mkptr4(tls *TLS, s uintptr, ip uintptr) { /* getnameinfo.c:28:13: */ + bp := tls.Alloc(32) + defer tls.Free(32) + + Xsprintf(tls, s, ts+25, + VaList(bp, int32(*(*uint8)(unsafe.Pointer(ip + 3))), int32(*(*uint8)(unsafe.Pointer(ip + 2))), int32(*(*uint8)(unsafe.Pointer(ip + 1))), int32(*(*uint8)(unsafe.Pointer(ip))))) +} + +func mkptr6(tls *TLS, s uintptr, ip uintptr) { /* getnameinfo.c:34:13: */ + var i int32 + for i = 15; i >= 0; i-- { + *(*int8)(unsafe.Pointer(PostIncUintptr(&s, 1))) = _sxdigits[int32(*(*uint8)(unsafe.Pointer(ip + uintptr(i))))&15] + *(*int8)(unsafe.Pointer(PostIncUintptr(&s, 1))) = int8('.') + *(*int8)(unsafe.Pointer(PostIncUintptr(&s, 1))) = _sxdigits[int32(*(*uint8)(unsafe.Pointer(ip + uintptr(i))))>>4] + *(*int8)(unsafe.Pointer(PostIncUintptr(&s, 1))) = int8('.') + } + Xstrcpy(tls, s, ts+50) +} + +var _sxdigits = *(*[17]int8)(unsafe.Pointer(ts + 59)) /* getnameinfo.c:36:20 */ + +func reverse_hosts(tls *TLS, buf uintptr, a uintptr, scopeid uint32, family int32) { /* getnameinfo.c:45:13: */ + bp := tls.Alloc(556) + defer tls.Free(556) + + // var line [512]int8 at bp+16, 512 + + var p uintptr + var z uintptr + var _buf [1032]uint8 + _ = _buf + // var atmp [16]uint8 at bp, 16 + + // var iplit address at bp+528, 28 + + //TODO FILE _f, *f = __fopen_rb_ca("/etc/hosts", &_f, _buf, sizeof _buf); + var f uintptr = Xfopen(tls, ts+76, ts+87) + if !(f != 0) { + return + } + if family == 2 { + Xmemcpy(tls, bp+uintptr(12), a, uint64(4)) + Xmemcpy(tls, bp, ts+90, uint64(12)) + a = bp /* &atmp[0] */ + } + for Xfgets(tls, bp+16, int32(unsafe.Sizeof([512]int8{})), f) != 0 { + if AssignUintptr(&p, Xstrchr(tls, bp+16, '#')) != 0 { + *(*int8)(unsafe.Pointer(PostIncUintptr(&p, 1))) = int8('\n') + *(*int8)(unsafe.Pointer(p)) = int8(0) + } + + for p = bp + 16; /* &line[0] */ *(*int8)(unsafe.Pointer(p)) != 0 && !(__isspace(tls, int32(*(*int8)(unsafe.Pointer(p)))) != 0); p++ { + } + *(*int8)(unsafe.Pointer(PostIncUintptr(&p, 1))) = int8(0) + if X__lookup_ipliteral(tls, bp+528, bp+16, 0) <= 0 { + continue + } + + if (*address)(unsafe.Pointer(bp+528)).family == 2 { + Xmemcpy(tls, bp+528+8+uintptr(12), bp+528+8, uint64(4)) + Xmemcpy(tls, bp+528+8, ts+90, uint64(12)) + (*address)(unsafe.Pointer(bp + 528 /* &iplit */)).scopeid = uint32(0) + } + + if Xmemcmp(tls, a, bp+528+8, uint64(16)) != 0 || (*address)(unsafe.Pointer(bp+528)).scopeid != scopeid { + continue + } + + for ; *(*int8)(unsafe.Pointer(p)) != 0 && __isspace(tls, int32(*(*int8)(unsafe.Pointer(p)))) != 0; p++ { + } + for z = p; *(*int8)(unsafe.Pointer(z)) != 0 && !(__isspace(tls, int32(*(*int8)(unsafe.Pointer(z)))) != 0); z++ { + } + *(*int8)(unsafe.Pointer(z)) = int8(0) + if (int64(z)-int64(p))/1 < int64(256) { + Xmemcpy(tls, buf, p, uint64((int64(z)-int64(p))/1+int64(1))) + break + } + } + //TODO __fclose_ca(f); + Xfclose(tls, f) +} + +func reverse_services(tls *TLS, buf uintptr, port int32, dgram int32) { /* getnameinfo.c:87:13: */ + Xabort(tls) //TODO- + // unsigned long svport; + // char line[128], *p, *z; + // unsigned char _buf[1032]; + // FILE _f, *f = __fopen_rb_ca("/etc/services", &_f, _buf, sizeof _buf); + // if (!f) return; + // while (fgets(line, sizeof line, f)) { + // if ((p=strchr(line, '#'))) *p++='\n', *p=0; + + // for (p=line; *p && !isspace(*p); p++); + // if (!*p) continue; + // *p++ = 0; + // svport = strtoul(p, &z, 10); + + // if (svport != port || z==p) continue; + // if (dgram && strncmp(z, "/udp", 4)) continue; + // if (!dgram && strncmp(z, "/tcp", 4)) continue; + // if (p-line > 32) continue; + + // memcpy(buf, line, p-line); + // break; + // } + // __fclose_ca(f); +} + +func Xgetnameinfo(tls *TLS, sa1 uintptr, sl socklen_t, node uintptr, nodelen socklen_t, serv uintptr, servlen socklen_t, flags int32) int32 { /* getnameinfo.c:125:5: */ + bp := tls.Alloc(347) + defer tls.Free(347) + + // var ptr [78]int8 at bp, 78 + + // var buf [256]int8 at bp+78, 256 + + // var num [13]int8 at bp+334, 13 + + var af int32 = int32((*sockaddr)(unsafe.Pointer(sa1)).sa_family) + var a uintptr + var scopeid uint32 + + switch af { + case 2: + a = sa1 + 4 + if uint64(sl) < uint64(unsafe.Sizeof(sockaddr_in{})) { + return -6 + } + mkptr4(tls, bp, a) + scopeid = uint32(0) + break + case 10: + a = sa1 + 8 + if uint64(sl) < uint64(unsafe.Sizeof(sockaddr_in6{})) { + return -6 + } + if Xmemcmp(tls, a, ts+90, uint64(12)) != 0 { + mkptr6(tls, bp, a) + } else { + mkptr4(tls, bp, a+uintptr(12)) + } + scopeid = (*sockaddr_in6)(unsafe.Pointer(sa1)).sin6_scope_id + break + default: + return -6 + } + + if node != 0 && nodelen != 0 { + *(*int8)(unsafe.Pointer(bp + 78)) = int8(0) + if !(flags&0x01 != 0) { + reverse_hosts(tls, bp+78, a, scopeid, af) + } + if !(int32(*(*int8)(unsafe.Pointer(bp + 78))) != 0) && !(flags&0x01 != 0) { + Xabort(tls) //TODO- + // unsigned char query[18+PTR_MAX], reply[512]; + // int qlen = __res_mkquery(0, ptr, 1, RR_PTR, + // 0, 0, 0, query, sizeof query); + // query[3] = 0; /* don't need AD flag */ + // int rlen = __res_send(query, qlen, reply, sizeof reply); + // buf[0] = 0; + // if (rlen > 0) + // __dns_parse(reply, rlen, dns_parse_callback, buf); + } + if !(int32(*(*int8)(unsafe.Pointer(bp + 78))) != 0) { + if flags&0x08 != 0 { + return -2 + } + Xinet_ntop(tls, af, a, bp+78, uint32(unsafe.Sizeof([256]int8{}))) + if scopeid != 0 { + Xabort(tls) //TODO- + // char *p = 0, tmp[IF_NAMESIZE+1]; + // if (!(flags & NI_NUMERICSCOPE) && + // (IN6_IS_ADDR_LINKLOCAL(a) || + // IN6_IS_ADDR_MC_LINKLOCAL(a))) + // p = if_indextoname(scopeid, tmp+1); + // if (!p) + // p = itoa(num, scopeid); + // *--p = '%'; + // strcat(buf, p); + } + } + if Xstrlen(tls, bp+78) >= size_t(nodelen) { + return -12 + } + Xstrcpy(tls, node, bp+78) + } + + if serv != 0 && servlen != 0 { + var p uintptr = bp + 78 /* buf */ + var port int32 = int32(Xntohs(tls, (*sockaddr_in)(unsafe.Pointer(sa1)).sin_port)) + *(*int8)(unsafe.Pointer(bp + 78)) = int8(0) + if !(flags&0x02 != 0) { + reverse_services(tls, bp+78, port, flags&0x10) + } + if !(int32(*(*int8)(unsafe.Pointer(p))) != 0) { + p = itoa(tls, bp+334, uint32(port)) + } + if Xstrlen(tls, p) >= size_t(servlen) { + return -12 + } + Xstrcpy(tls, serv, p) + } + + return 0 +} + +var Xh_errno int32 /* h_errno.c:4:5: */ + +func X__h_errno_location(tls *TLS) uintptr { /* h_errno.c:6:5: */ + return uintptr(unsafe.Pointer(&Xh_errno)) +} + +func X__inet_aton(tls *TLS, s0 uintptr, dest uintptr) int32 { /* inet_aton.c:7:5: */ + bp := tls.Alloc(40) + defer tls.Free(40) + + var s uintptr = s0 + var d uintptr = dest + *(*[4]uint64)(unsafe.Pointer(bp /* a */)) = [4]uint64{0: uint64(0)} + // var z uintptr at bp+32, 8 + + var i int32 + + for i = 0; i < 4; i++ { + *(*uint64)(unsafe.Pointer(bp + uintptr(i)*8)) = Xstrtoul(tls, s, bp+32, 0) + if *(*uintptr)(unsafe.Pointer(bp + 32)) == s || *(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 32)))) != 0 && int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 32))))) != '.' || !(func() int32 { + if 0 != 0 { + return Xisdigit(tls, int32(*(*int8)(unsafe.Pointer(s)))) + } + return Bool32(uint32(*(*int8)(unsafe.Pointer(s)))-uint32('0') < uint32(10)) + }() != 0) { + return 0 + } + if !(int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 32))))) != 0) { + break + } + s = *(*uintptr)(unsafe.Pointer(bp + 32)) + uintptr(1) + } + if i == 4 { + return 0 + } + switch i { + case 0: + *(*uint64)(unsafe.Pointer(bp + 1*8)) = *(*uint64)(unsafe.Pointer(bp)) & uint64(0xffffff) + AssignShrPtrUint64(bp, int(24)) + fallthrough + case 1: + *(*uint64)(unsafe.Pointer(bp + 2*8)) = *(*uint64)(unsafe.Pointer(bp + 1*8)) & uint64(0xffff) + AssignShrPtrUint64(bp+1*8, int(16)) + fallthrough + case 2: + *(*uint64)(unsafe.Pointer(bp + 3*8)) = *(*uint64)(unsafe.Pointer(bp + 2*8)) & uint64(0xff) + AssignShrPtrUint64(bp+2*8, int(8)) + } + for i = 0; i < 4; i++ { + if *(*uint64)(unsafe.Pointer(bp + uintptr(i)*8)) > uint64(255) { + return 0 + } + *(*uint8)(unsafe.Pointer(d + uintptr(i))) = uint8(*(*uint64)(unsafe.Pointer(bp + uintptr(i)*8))) + } + return 1 +} + +func Xinet_ntop(tls *TLS, af int32, a0 uintptr, s uintptr, l socklen_t) uintptr { /* inet_ntop.c:7:12: */ + bp := tls.Alloc(276) + defer tls.Free(276) + + var a uintptr = a0 + var i int32 + var j int32 + var max int32 + var best int32 + // var buf [100]int8 at bp+176, 100 + + switch af { + case 2: + if socklen_t(Xsnprintf(tls, s, uint64(l), ts+103, VaList(bp, int32(*(*uint8)(unsafe.Pointer(a))), int32(*(*uint8)(unsafe.Pointer(a + 1))), int32(*(*uint8)(unsafe.Pointer(a + 2))), int32(*(*uint8)(unsafe.Pointer(a + 3)))))) < l { + return s + } + break + case 10: + if Xmemcmp(tls, a, ts+90, uint64(12)) != 0 { + Xsnprintf(tls, bp+176, uint64(unsafe.Sizeof([100]int8{})), + ts+115, + VaList(bp+32, 256*int32(*(*uint8)(unsafe.Pointer(a)))+int32(*(*uint8)(unsafe.Pointer(a + 1))), 256*int32(*(*uint8)(unsafe.Pointer(a + 2)))+int32(*(*uint8)(unsafe.Pointer(a + 3))), + 256*int32(*(*uint8)(unsafe.Pointer(a + 4)))+int32(*(*uint8)(unsafe.Pointer(a + 5))), 256*int32(*(*uint8)(unsafe.Pointer(a + 6)))+int32(*(*uint8)(unsafe.Pointer(a + 7))), + 256*int32(*(*uint8)(unsafe.Pointer(a + 8)))+int32(*(*uint8)(unsafe.Pointer(a + 9))), 256*int32(*(*uint8)(unsafe.Pointer(a + 10)))+int32(*(*uint8)(unsafe.Pointer(a + 11))), + 256*int32(*(*uint8)(unsafe.Pointer(a + 12)))+int32(*(*uint8)(unsafe.Pointer(a + 13))), 256*int32(*(*uint8)(unsafe.Pointer(a + 14)))+int32(*(*uint8)(unsafe.Pointer(a + 15))))) + } else { + Xsnprintf(tls, bp+176, uint64(unsafe.Sizeof([100]int8{})), + ts+139, + VaList(bp+96, 256*int32(*(*uint8)(unsafe.Pointer(a)))+int32(*(*uint8)(unsafe.Pointer(a + 1))), 256*int32(*(*uint8)(unsafe.Pointer(a + 2)))+int32(*(*uint8)(unsafe.Pointer(a + 3))), + 256*int32(*(*uint8)(unsafe.Pointer(a + 4)))+int32(*(*uint8)(unsafe.Pointer(a + 5))), 256*int32(*(*uint8)(unsafe.Pointer(a + 6)))+int32(*(*uint8)(unsafe.Pointer(a + 7))), + 256*int32(*(*uint8)(unsafe.Pointer(a + 8)))+int32(*(*uint8)(unsafe.Pointer(a + 9))), 256*int32(*(*uint8)(unsafe.Pointer(a + 10)))+int32(*(*uint8)(unsafe.Pointer(a + 11))), + int32(*(*uint8)(unsafe.Pointer(a + 12))), int32(*(*uint8)(unsafe.Pointer(a + 13))), int32(*(*uint8)(unsafe.Pointer(a + 14))), int32(*(*uint8)(unsafe.Pointer(a + 15))))) + } + // Replace longest /(^0|:)[:0]{2,}/ with "::" + i = AssignInt32(&best, 0) + max = 2 + for ; *(*int8)(unsafe.Pointer(bp + 176 + uintptr(i))) != 0; i++ { + if i != 0 && int32(*(*int8)(unsafe.Pointer(bp + 176 + uintptr(i)))) != ':' { + continue + } + j = int32(Xstrspn(tls, bp+176+uintptr(i), ts+169)) + if j > max { + best = i + max = j + } + } + if max > 3 { + *(*int8)(unsafe.Pointer(bp + 176 + uintptr(best))) = AssignPtrInt8(bp+176+uintptr(best+1), int8(':')) + Xmemmove(tls, bp+176+uintptr(best)+uintptr(2), bp+176+uintptr(best)+uintptr(max), uint64(i-best-max+1)) + } + if Xstrlen(tls, bp+176) < size_t(l) { + Xstrcpy(tls, s, bp+176) + return s + } + break + default: + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 97 + return uintptr(0) + } + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 28 + return uintptr(0) +} + +func hexval(tls *TLS, c uint32) int32 { /* inet_pton.c:7:12: */ + if c-uint32('0') < uint32(10) { + return int32(c - uint32('0')) + } + c = c | uint32(32) + if c-uint32('a') < uint32(6) { + return int32(c - uint32('a') + uint32(10)) + } + return -1 +} + +func Xinet_pton(tls *TLS, af int32, s uintptr, a0 uintptr) int32 { /* inet_pton.c:15:5: */ + bp := tls.Alloc(16) + defer tls.Free(16) + + // var ip [8]uint16_t at bp, 16 + + var a uintptr = a0 + var i int32 + var j int32 + var v int32 + var d int32 + var brk int32 = -1 + var need_v4 int32 = 0 + + if af == 2 { + for i = 0; i < 4; i++ { + for v = AssignInt32(&j, 0); j < 3 && func() int32 { + if 0 != 0 { + return Xisdigit(tls, int32(*(*int8)(unsafe.Pointer(s + uintptr(j))))) + } + return Bool32(uint32(*(*int8)(unsafe.Pointer(s + uintptr(j))))-uint32('0') < uint32(10)) + }() != 0; j++ { + v = 10*v + int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) - '0' + } + if j == 0 || j > 1 && int32(*(*int8)(unsafe.Pointer(s))) == '0' || v > 255 { + return 0 + } + *(*uint8)(unsafe.Pointer(a + uintptr(i))) = uint8(v) + if int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) == 0 && i == 3 { + return 1 + } + if int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) != '.' { + return 0 + } + s += uintptr(j + 1) + } + return 0 + } else if af != 10 { + *(*int32)(unsafe.Pointer(X___errno_location(tls))) = 97 + return -1 + } + + if int32(*(*int8)(unsafe.Pointer(s))) == ':' && int32(*(*int8)(unsafe.Pointer(PreIncUintptr(&s, 1)))) != ':' { + return 0 + } + + for i = 0; ; i++ { + if int32(*(*int8)(unsafe.Pointer(s))) == ':' && brk < 0 { + brk = i + *(*uint16_t)(unsafe.Pointer(bp + uintptr(i&7)*2)) = uint16_t(0) + if !(int32(*(*int8)(unsafe.Pointer(PreIncUintptr(&s, 1)))) != 0) { + break + } + if i == 7 { + return 0 + } + continue + } + for v = AssignInt32(&j, 0); j < 4 && AssignInt32(&d, hexval(tls, uint32(*(*int8)(unsafe.Pointer(s + uintptr(j)))))) >= 0; j++ { + v = 16*v + d + } + if j == 0 { + return 0 + } + *(*uint16_t)(unsafe.Pointer(bp + uintptr(i&7)*2)) = uint16_t(v) + if !(int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) != 0) && (brk >= 0 || i == 7) { + break + } + if i == 7 { + return 0 + } + if int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) != ':' { + if int32(*(*int8)(unsafe.Pointer(s + uintptr(j)))) != '.' || i < 6 && brk < 0 { + return 0 + } + need_v4 = 1 + i++ + break + } + s += uintptr(j + 1) + } + if brk >= 0 { + Xmemmove(tls, bp+uintptr(brk)*2+uintptr(7)*2-uintptr(i)*2, bp+uintptr(brk)*2, uint64(2*(i+1-brk))) + for j = 0; j < 7-i; j++ { + *(*uint16_t)(unsafe.Pointer(bp + uintptr(brk+j)*2)) = uint16_t(0) + } + } + for j = 0; j < 8; j++ { + *(*uint8)(unsafe.Pointer(PostIncUintptr(&a, 1))) = uint8(int32(*(*uint16_t)(unsafe.Pointer(bp + uintptr(j)*2))) >> 8) + *(*uint8)(unsafe.Pointer(PostIncUintptr(&a, 1))) = uint8(*(*uint16_t)(unsafe.Pointer(bp + uintptr(j)*2))) + } + if need_v4 != 0 && Xinet_pton(tls, 2, s, a-uintptr(4)) <= 0 { + return 0 + } + return 1 +} + +func X__lookup_ipliteral(tls *TLS, buf uintptr, name uintptr, family int32) int32 { /* lookup_ipliteral.c:12:5: */ + bp := tls.Alloc(96) + defer tls.Free(96) + + // var a4 in_addr at bp, 4 + + // var a6 in6_addr at bp+68, 16 + + if X__inet_aton(tls, name, bp) > 0 { + if family == 10 { // wrong family + return -2 + } + Xmemcpy(tls, buf+8, bp, uint64(unsafe.Sizeof(in_addr{}))) + (*address)(unsafe.Pointer(buf)).family = 2 + (*address)(unsafe.Pointer(buf)).scopeid = uint32(0) + return 1 + } + // var tmp [64]int8 at bp+4, 64 + + var p uintptr = Xstrchr(tls, name, '%') + // var z uintptr at bp+88, 8 + + var scopeid uint64 = uint64(0) + if p != 0 && (int64(p)-int64(name))/1 < int64(64) { + Xmemcpy(tls, bp+4, name, uint64((int64(p)-int64(name))/1)) + *(*int8)(unsafe.Pointer(bp + 4 + uintptr((int64(p)-int64(name))/1))) = int8(0) + name = bp + 4 /* &tmp[0] */ + } + + if Xinet_pton(tls, 10, name, bp+68) <= 0 { + return 0 + } + if family == 2 { // wrong family + return -2 + } + + Xmemcpy(tls, buf+8, bp+68, uint64(unsafe.Sizeof(in6_addr{}))) + (*address)(unsafe.Pointer(buf)).family = 10 + if p != 0 { + if func() int32 { + if 0 != 0 { + return Xisdigit(tls, int32(*(*int8)(unsafe.Pointer(PreIncUintptr(&p, 1))))) + } + return Bool32(uint32(*(*int8)(unsafe.Pointer(PreIncUintptr(&p, 1))))-uint32('0') < uint32(10)) + }() != 0 { + scopeid = Xstrtoull(tls, p, bp+88, 10) + } else { + *(*uintptr)(unsafe.Pointer(bp + 88 /* z */)) = p - uintptr(1) + } + if *(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 88)))) != 0 { + Xabort(tls) //TODO- + // if (!IN6_IS_ADDR_LINKLOCAL(&a6) && + // !IN6_IS_ADDR_MC_LINKLOCAL(&a6)) + // return EAI_NONAME; + // scopeid = if_nametoindex(p); + // if (!scopeid) return EAI_NONAME; + } + if scopeid > uint64(0xffffffff) { + return -2 + } + } + (*address)(unsafe.Pointer(buf)).scopeid = uint32(scopeid) + return 1 +} + +func is_valid_hostname(tls *TLS, host uintptr) int32 { /* lookup_name.c:18:12: */ + var s uintptr + //TODO if (strnlen(host, 255)-1 >= 254 || mbstowcs(0, host, 0) == -1) return 0; + if Xstrnlen(tls, host, uint64(255))-uint64(1) >= uint64(254) { + return 0 + } + for s = host; int32(*(*uint8)(unsafe.Pointer(s))) >= 0x80 || int32(*(*uint8)(unsafe.Pointer(s))) == '.' || int32(*(*uint8)(unsafe.Pointer(s))) == '-' || Xisalnum(tls, int32(*(*uint8)(unsafe.Pointer(s)))) != 0; s++ { + } + return BoolInt32(!(*(*uint8)(unsafe.Pointer(s)) != 0)) +} + +var Xzero_struct_address address /* lookup_name.c:27:16: */ + +func name_from_null(tls *TLS, buf uintptr, name uintptr, family int32, flags int32) int32 { /* lookup_name.c:29:12: */ + var cnt int32 = 0 + if name != 0 { + return 0 + } + if flags&0x01 != 0 { + //TODO if (family != AF_INET6) + //TODO buf[cnt++] = (struct address){ .family = AF_INET }; + if family != 10 { + var x = Xzero_struct_address + x.family = 2 + *(*address)(unsafe.Pointer(buf + uintptr(PostIncInt32(&cnt, 1))*28)) = x + } + //TODO if (family != AF_INET) + //TODO buf[cnt++] = (struct address){ .family = AF_INET6 }; + if family != 2 { + var x = Xzero_struct_address + x.family = 10 + *(*address)(unsafe.Pointer(buf + uintptr(PostIncInt32(&cnt, 1))*28)) = x + } + } else { + Xabort(tls) //TODO- + // if (family != AF_INET6) + // buf[cnt++] = (struct address){ .family = AF_INET, .addr = { 127,0,0,1 } }; + // if (family != AF_INET) + // buf[cnt++] = (struct address){ .family = AF_INET6, .addr = { [15] = 1 } }; + } + return cnt +} + +func name_from_numeric(tls *TLS, buf uintptr, name uintptr, family int32) int32 { /* lookup_name.c:58:12: */ + return X__lookup_ipliteral(tls, buf, name, family) +} + +func name_from_hosts(tls *TLS, buf uintptr, canon uintptr, name uintptr, family int32) int32 { /* lookup_name.c:63:12: */ + bp := tls.Alloc(512) + defer tls.Free(512) + + // var line [512]int8 at bp, 512 + + var l size_t = Xstrlen(tls, name) + var cnt int32 = 0 + var badfam int32 = 0 + var _buf [1032]uint8 + _ = _buf + //TODO FILE _f, *f = __fopen_rb_ca("/etc/hosts", &_f, _buf, sizeof _buf); + var _f FILE + _ = _f + var f uintptr = Xfopen(tls, ts+76, ts+87) + if !(f != 0) { + switch *(*int32)(unsafe.Pointer(X___errno_location(tls))) { + case 2: + fallthrough + case 20: + fallthrough + case 13: + return 0 + fallthrough + default: + return -11 + } + } + for Xfgets(tls, bp, int32(unsafe.Sizeof([512]int8{})), f) != 0 && cnt < 48 { + var p uintptr + var z uintptr + + if AssignUintptr(&p, Xstrchr(tls, bp, '#')) != 0 { + *(*int8)(unsafe.Pointer(PostIncUintptr(&p, 1))) = int8('\n') + *(*int8)(unsafe.Pointer(p)) = int8(0) + } + for p = bp + uintptr(1); AssignUintptr(&p, Xstrstr(tls, p, name)) != 0 && (!(__isspace(tls, int32(*(*int8)(unsafe.Pointer(p + UintptrFromInt32(-1))))) != 0) || !(__isspace(tls, int32(*(*int8)(unsafe.Pointer(p + uintptr(l))))) != 0)); p++ { + } + if !(p != 0) { + continue + } + + // Isolate IP address to parse + for p = bp; /* &line[0] */ *(*int8)(unsafe.Pointer(p)) != 0 && !(__isspace(tls, int32(*(*int8)(unsafe.Pointer(p)))) != 0); p++ { + } + *(*int8)(unsafe.Pointer(PostIncUintptr(&p, 1))) = int8(0) + switch name_from_numeric(tls, buf+uintptr(cnt)*28, bp, family) { + case 1: + cnt++ + break + case 0: + continue + default: + badfam = -2 + continue + } + + // Extract first name as canonical name + for ; *(*int8)(unsafe.Pointer(p)) != 0 && __isspace(tls, int32(*(*int8)(unsafe.Pointer(p)))) != 0; p++ { + } + for z = p; *(*int8)(unsafe.Pointer(z)) != 0 && !(__isspace(tls, int32(*(*int8)(unsafe.Pointer(z)))) != 0); z++ { + } + *(*int8)(unsafe.Pointer(z)) = int8(0) + if is_valid_hostname(tls, p) != 0 { + Xmemcpy(tls, canon, p, uint64((int64(z)-int64(p))/1+int64(1))) + } + } + //TODO __fclose_ca(f); + Xfclose(tls, f) + if cnt != 0 { + return cnt + } + return badfam +} + +type dpc_ctx = struct { + addrs uintptr + canon uintptr + cnt int32 + _ [4]byte +} /* lookup_name.c:112:1 */ + +func name_from_dns_search(tls *TLS, buf uintptr, canon uintptr, name uintptr, family int32) int32 { /* lookup_name.c:191:12: */ + return -1 //TODO- + Xabort(tls) + return int32(0) //TODO- + // char search[256]; + // struct resolvconf conf; + // size_t l, dots; + // char *p, *z; + + // if (__get_resolv_conf(&conf, search, sizeof search) < 0) return -1; + + // /* Count dots, suppress search when >=ndots or name ends in + // * a dot, which is an explicit request for global scope. */ + // for (dots=l=0; name[l]; l++) if (name[l]=='.') dots++; + // if (dots >= conf.ndots || name[l-1]=='.') *search = 0; + + // /* Strip final dot for canon, fail if multiple trailing dots. */ + // if (name[l-1]=='.') l--; + // if (!l || name[l-1]=='.') return EAI_NONAME; + + // /* This can never happen; the caller already checked length. */ + // if (l >= 256) return EAI_NONAME; + + // /* Name with search domain appended is setup in canon[]. This both + // * provides the desired default canonical name (if the requested + // * name is not a CNAME record) and serves as a buffer for passing + // * the full requested name to name_from_dns. */ + // memcpy(canon, name, l); + // canon[l] = '.'; + + // for (p=search; *p; p=z) { + // for (; isspace(*p); p++); + // for (z=p; *z && !isspace(*z); z++); + // if (z==p) break; + // if (z-p < 256 - l - 1) { + // memcpy(canon+l+1, p, z-p); + // canon[z-p+1+l] = 0; + // int cnt = name_from_dns(buf, canon, canon, family, &conf); + // if (cnt) return cnt; + // } + // } + + // canon[l] = 0; + // return name_from_dns(buf, canon, name, family, &conf); +} + +type policy = struct { + addr [16]uint8 + len uint8 + mask uint8 + prec uint8 + label uint8 +} /* lookup_name.c:237:14 */ + +var defpolicy = [6]policy{ + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 172)), len: uint8(15), mask: uint8(0xff), prec: uint8(50)}, + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 189)), len: uint8(11), mask: uint8(0xff), prec: uint8(35), label: uint8(4)}, + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 205)), len: uint8(1), mask: uint8(0xff), prec: uint8(30), label: uint8(2)}, + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 221)), len: uint8(3), mask: uint8(0xff), prec: uint8(5), label: uint8(5)}, + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 237)), mask: uint8(0xfe), prec: uint8(3), label: uint8(13)}, + // Last rule must match all addresses to stop loop. + {addr: *(*[16]uint8)(unsafe.Pointer(ts + 253)), prec: uint8(40), label: uint8(1)}, +} /* lookup_name.c:241:3 */ + +func policyof(tls *TLS, a uintptr) uintptr { /* lookup_name.c:259:28: */ + var i int32 + for i = 0; ; i++ { + if Xmemcmp(tls, a, uintptr(unsafe.Pointer(&defpolicy))+uintptr(i)*20, uint64(defpolicy[i].len)) != 0 { + continue + } + if int32(*(*uint8_t)(unsafe.Pointer(a + uintptr(defpolicy[i].len))))&int32(defpolicy[i].mask) != + int32(*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&defpolicy)) + uintptr(i)*20 + uintptr(defpolicy[i].len)))) { + continue + } + return uintptr(unsafe.Pointer(&defpolicy)) + uintptr(i)*20 + } + return uintptr(0) +} + +func labelof(tls *TLS, a uintptr) int32 { /* lookup_name.c:272:12: */ + return int32((*policy)(unsafe.Pointer(policyof(tls, a))).label) +} + +func scopeof(tls *TLS, a uintptr) int32 { /* lookup_name.c:277:12: */ + if int32(*(*uint8_t)(unsafe.Pointer(a))) == 0xff { + return int32(*(*uint8_t)(unsafe.Pointer(a + 1))) & 15 + } + if int32(*(*uint8_t)(unsafe.Pointer(a))) == 0xfe && int32(*(*uint8_t)(unsafe.Pointer(a + 1)))&0xc0 == 0x80 { + return 2 + } + if *(*uint32_t)(unsafe.Pointer(a)) == uint32_t(0) && *(*uint32_t)(unsafe.Pointer(a + 1*4)) == uint32_t(0) && *(*uint32_t)(unsafe.Pointer(a + 2*4)) == uint32_t(0) && int32(*(*uint8_t)(unsafe.Pointer(a + 12))) == 0 && int32(*(*uint8_t)(unsafe.Pointer(a + 13))) == 0 && int32(*(*uint8_t)(unsafe.Pointer(a + 14))) == 0 && int32(*(*uint8_t)(unsafe.Pointer(a + 15))) == 1 { + return 2 + } + if int32(*(*uint8_t)(unsafe.Pointer(a))) == 0xfe && int32(*(*uint8_t)(unsafe.Pointer(a + 1)))&0xc0 == 0xc0 { + return 5 + } + return 14 +} + +func prefixmatch(tls *TLS, s uintptr, d uintptr) int32 { /* lookup_name.c:286:12: */ + // FIXME: The common prefix length should be limited to no greater + // than the nominal length of the prefix portion of the source + // address. However the definition of the source prefix length is + // not clear and thus this limiting is not yet implemented. + var i uint32 + for i = uint32(0); i < uint32(128) && !((int32(*(*uint8_t)(unsafe.Pointer(s /* &.__in6_union */ /* &.__s6_addr */ + uintptr(i/uint32(8)))))^int32(*(*uint8_t)(unsafe.Pointer(d /* &.__in6_union */ /* &.__s6_addr */ + uintptr(i/uint32(8))))))&(int32(128)>>(i%uint32(8))) != 0); i++ { + } + return int32(i) +} + +func addrcmp(tls *TLS, _a uintptr, _b uintptr) int32 { /* lookup_name.c:305:12: */ + var a uintptr = _a + var b uintptr = _b + return (*address)(unsafe.Pointer(b)).sortkey - (*address)(unsafe.Pointer(a)).sortkey +} + +func X__lookup_name(tls *TLS, buf uintptr, canon uintptr, name uintptr, family int32, flags int32) int32 { /* lookup_name.c:311:5: */ + bp := tls.Alloc(92) + defer tls.Free(92) + + var cnt int32 = 0 + var i int32 + var j int32 + _ = j + + *(*int8)(unsafe.Pointer(canon)) = int8(0) + if name != 0 { + // reject empty name and check len so it fits into temp bufs + var l size_t = Xstrnlen(tls, name, uint64(255)) + if l-uint64(1) >= uint64(254) { + return -2 + } + Xmemcpy(tls, canon, name, l+uint64(1)) + } + + // Procedurally, a request for v6 addresses with the v4-mapped + // flag set is like a request for unspecified family, followed + // by filtering of the results. + if flags&0x08 != 0 { + if family == 10 { + family = 0 + } else { + flags = flags - 0x08 + } + } + + // Try each backend until there's at least one result. + cnt = name_from_null(tls, buf, name, family, flags) + if !(cnt != 0) { + cnt = name_from_numeric(tls, buf, name, family) + } + if !(cnt != 0) && !(flags&0x04 != 0) { + cnt = name_from_hosts(tls, buf, canon, name, family) + if !(cnt != 0) { + cnt = name_from_dns_search(tls, buf, canon, name, family) + } + } + if cnt <= 0 { + if cnt != 0 { + return cnt + } + return -2 + } + + // Filter/transform results for v4-mapped lookup, if requested. + if flags&0x08 != 0 { + Xabort(tls) //TODO- + // if (!(flags & AI_ALL)) { + // /* If any v6 results exist, remove v4 results. */ + // for (i=0; i= 0 { + if !(Xconnect(tls, fd, da, dalen) != 0) { + key = key | 0x40000000 + if !(Xgetsockname(tls, fd, sa1, bp+88) != 0) { + if family == 2 { + Xmemcpy(tls, + bp+28+8+uintptr(12), + bp+72+4, uint64(4)) + } + if dscope == scopeof(tls, bp+28+8) { + key = key | 0x20000000 + } + if dlabel == labelof(tls, bp+28+8) { + key = key | 0x10000000 + } + prefixlen = prefixmatch(tls, bp+28+8, + bp+8) + } + } + Xclose(tls, fd) + } + key = key | dprec<<20 + key = key | (15-dscope)<<16 + key = key | prefixlen<<8 + key = key | (48-i)<<0 + (*address)(unsafe.Pointer(buf + uintptr(i)*28)).sortkey = key + } + Xqsort(tls, buf, uint64(cnt), uint64(unsafe.Sizeof(address{})), *(*uintptr)(unsafe.Pointer(&struct { + f func(*TLS, uintptr, uintptr) int32 + }{addrcmp}))) + + //TODO pthread_setcancelstate(cs, 0); + + return cnt +} + +func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype int32, flags int32) int32 { /* lookup_serv.c:12:5: */ + bp := tls.Alloc(8) + defer tls.Free(8) + + var line [128]int8 + _ = line + var cnt int32 = 0 + var p uintptr + _ = p + *(*uintptr)(unsafe.Pointer(bp /* z */)) = ts + 13 /* "" */ + var port uint64 = uint64(0) + + switch socktype { + case 1: + switch proto { + case 0: + proto = 6 + fallthrough + case 6: + break + default: + return -8 + } + break + case 2: + switch proto { + case 0: + proto = 17 + fallthrough + case 17: + break + default: + return -8 + } + fallthrough + case 0: + break + default: + if name != 0 { + return -8 + } + (*service)(unsafe.Pointer(buf)).port = uint16_t(0) + (*service)(unsafe.Pointer(buf)).proto = uint8(proto) + (*service)(unsafe.Pointer(buf)).socktype = uint8(socktype) + return 1 + } + + if name != 0 { + if !(int32(*(*int8)(unsafe.Pointer(name))) != 0) { + return -8 + } + port = Xstrtoul(tls, name, bp, 10) + } + if !(int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp))))) != 0) { + if port > uint64(65535) { + return -8 + } + if proto != 17 { + (*service)(unsafe.Pointer(buf + uintptr(cnt)*4)).port = uint16_t(port) + (*service)(unsafe.Pointer(buf + uintptr(cnt)*4)).socktype = uint8(1) + (*service)(unsafe.Pointer(buf + uintptr(PostIncInt32(&cnt, 1))*4)).proto = uint8(6) + } + if proto != 6 { + (*service)(unsafe.Pointer(buf + uintptr(cnt)*4)).port = uint16_t(port) + (*service)(unsafe.Pointer(buf + uintptr(cnt)*4)).socktype = uint8(2) + (*service)(unsafe.Pointer(buf + uintptr(PostIncInt32(&cnt, 1))*4)).proto = uint8(17) + } + return cnt + } + + if flags&0x400 != 0 { + return -2 + } + + var l size_t = Xstrlen(tls, name) + _ = l + + Xabort(tls) //TODO- + // unsigned char _buf[1032]; + // FILE _f, *f = __fopen_rb_ca("/etc/services", &_f, _buf, sizeof _buf); + // if (!f) switch (errno) { + // case ENOENT: + // case ENOTDIR: + // case EACCES: + // return EAI_SERVICE; + // default: + // return EAI_SYSTEM; + // } + + Xabort(tls) //TODO- + // while (fgets(line, sizeof line, f) && cnt < MAXSERVS) { + // if ((p=strchr(line, '#'))) *p++='\n', *p=0; + + // /* Find service name */ + // for(p=line; (p=strstr(p, name)); p++) { + // if (p>line && !isspace(p[-1])) continue; + // if (p[l] && !isspace(p[l])) continue; + // break; + // } + // if (!p) continue; + + // /* Skip past canonical name at beginning of line */ + // for (p=line; *p && !isspace(*p); p++); + + // port = strtoul(p, &z, 10); + // if (port > 65535 || z==p) continue; + // if (!strncmp(z, "/udp", 4)) { + // if (proto == IPPROTO_TCP) continue; + // buf[cnt].port = port; + // buf[cnt].socktype = SOCK_DGRAM; + // buf[cnt++].proto = IPPROTO_UDP; + // } + // if (!strncmp(z, "/tcp", 4)) { + // if (proto == IPPROTO_UDP) continue; + // buf[cnt].port = port; + // buf[cnt].socktype = SOCK_STREAM; + // buf[cnt++].proto = IPPROTO_TCP; + // } + // } + // __fclose_ca(f); + // return cnt > 0 ? cnt : EAI_SERVICE; + Xabort(tls) + return int32(0) //TODO- +} + +func temper(tls *TLS, x uint32) uint32 { /* rand_r.c:3:17: */ + x = x ^ x>>11 + x = x ^ x<<7&0x9D2C5680 + x = x ^ x<<15&0xEFC60000 + x = x ^ x>>18 + return x +} + +func Xrand_r(tls *TLS, seed uintptr) int32 { /* rand_r.c:12:5: */ + return int32(temper(tls, AssignPtrUint32(seed, *(*uint32)(unsafe.Pointer(seed))*uint32(1103515245)+uint32(12345))) / uint32(2)) +} + +func X__lockfile(tls *TLS, f uintptr) int32 { /* __lockfile.c:4:5: */ + var owner int32 = (*FILE)(unsafe.Pointer(f)).lock + var tid int32 = (*__pthread)(unsafe.Pointer(__pthread_self(tls))).tid + if owner&CplInt32(0x40000000) == tid { + return 0 + } + owner = a_cas(tls, f+140, 0, tid) + if !(owner != 0) { + return 1 + } + for AssignInt32(&owner, a_cas(tls, f+140, 0, tid|0x40000000)) != 0 { + if owner&0x40000000 != 0 || a_cas(tls, f+140, owner, owner|0x40000000) == owner { + __futexwait(tls, f+140, owner|0x40000000, 1) + } + } + return 1 +} + +func X__unlockfile(tls *TLS, f uintptr) { /* __lockfile.c:19:6: */ + if a_swap(tls, f+140, 0)&0x40000000 != 0 { + __wake(tls, f+140, 1, 1) + } +} + +func X__toread(tls *TLS, f uintptr) int32 { /* __toread.c:3:5: */ + *(*int32)(unsafe.Pointer(f + 136)) |= (*FILE)(unsafe.Pointer(f)).mode - 1 + if (*FILE)(unsafe.Pointer(f)).wpos != (*FILE)(unsafe.Pointer(f)).wbase { + (*struct { + f func(*TLS, uintptr, uintptr, size_t) size_t + })(unsafe.Pointer(&struct{ uintptr }{(*FILE)(unsafe.Pointer(f)).write})).f(tls, f, uintptr(0), uint64(0)) + } + (*FILE)(unsafe.Pointer(f)).wpos = AssignPtrUintptr(f+56, AssignPtrUintptr(f+32, uintptr(0))) + if (*FILE)(unsafe.Pointer(f)).flags&uint32(4) != 0 { + *(*uint32)(unsafe.Pointer(f)) |= uint32(32) + return -1 + } + (*FILE)(unsafe.Pointer(f)).rpos = AssignPtrUintptr(f+16, (*FILE)(unsafe.Pointer(f)).buf+uintptr((*FILE)(unsafe.Pointer(f)).buf_size)) + if (*FILE)(unsafe.Pointer(f)).flags&uint32(16) != 0 { + return -1 + } + return 0 +} + +func X__toread_needs_stdio_exit(tls *TLS) { /* __toread.c:16:13: */ + X__builtin_abort(tls) //TODO- + // __stdio_exit_needed(); +} + +// This function assumes it will never be called if there is already +// data buffered for reading. + +func X__uflow(tls *TLS, f uintptr) int32 { /* __uflow.c:6:5: */ + bp := tls.Alloc(1) + defer tls.Free(1) + + // var c uint8 at bp, 1 + + if !(X__toread(tls, f) != 0) && (*struct { + f func(*TLS, uintptr, uintptr, size_t) size_t + })(unsafe.Pointer(&struct{ uintptr }{(*FILE)(unsafe.Pointer(f)).read})).f(tls, f, bp, uint64(1)) == uint64(1) { + return int32(*(*uint8)(unsafe.Pointer(bp))) + } + return -1 +} + +func Xsscanf(tls *TLS, s uintptr, fmt uintptr, va uintptr) int32 { /* sscanf.c:4:5: */ + var ret int32 + var ap va_list + _ = ap + ap = va + ret = Xvsscanf(tls, s, fmt, ap) + _ = ap + return ret +} + +type wctrans_t = uintptr /* wctype.h:20:19 */ + +func store_int(tls *TLS, dest uintptr, size int32, i uint64) { /* vfscanf.c:22:13: */ + if !(dest != 0) { + return + } + switch size { + case -2: + *(*int8)(unsafe.Pointer(dest)) = int8(i) + break + case -1: + *(*int16)(unsafe.Pointer(dest)) = int16(i) + break + case 0: + *(*int32)(unsafe.Pointer(dest)) = int32(i) + break + case 1: + *(*int64)(unsafe.Pointer(dest)) = int64(i) + break + case 3: + *(*int64)(unsafe.Pointer(dest)) = int64(i) + break + } +} + +func arg_n(tls *TLS, ap va_list, n uint32) uintptr { /* vfscanf.c:44:13: */ + var p uintptr + var i uint32 + var ap2 va_list + _ = ap2 + ap2 = ap + for i = n; i > uint32(1); i-- { + VaUintptr(&ap2) + } + p = VaUintptr(&ap2) + _ = ap2 + return p +} + +func Xvfscanf(tls *TLS, f uintptr, fmt uintptr, ap va_list) int32 { /* vfscanf.c:56:5: */ + bp := tls.Alloc(276) + defer tls.Free(276) + + var width int32 + var size int32 + var alloc int32 + var base int32 + var p uintptr + var c int32 + var t int32 + var s uintptr + var wcs uintptr + // var st mbstate_t at bp+268, 8 + + var dest uintptr + var invert int32 + var matches int32 + var x uint64 + var y float64 + var pos off_t + // var scanset [257]uint8 at bp, 257 + + var i size_t + var k size_t + // var wc wchar_t at bp+260, 4 + + var __need_unlock int32 + var tmp uintptr + var tmp1 uintptr + alloc = 0 + dest = uintptr(0) + matches = 0 + pos = int64(0) + __need_unlock = func() int32 { + if (*FILE)(unsafe.Pointer(f)).lock >= 0 { + return X__lockfile(tls, f) + } + return 0 + }() + + if !!(int32((*FILE)(unsafe.Pointer(f)).rpos) != 0) { + goto __1 + } + X__toread(tls, f) +__1: + ; + if !!(int32((*FILE)(unsafe.Pointer(f)).rpos) != 0) { + goto __2 + } + goto input_fail +__2: + ; + + p = fmt +__3: + if !(*(*uint8)(unsafe.Pointer(p)) != 0) { + goto __5 + } + + alloc = 0 + + if !(__isspace(tls, int32(*(*uint8)(unsafe.Pointer(p)))) != 0) { + goto __6 + } +__7: + if !(__isspace(tls, int32(*(*uint8)(unsafe.Pointer(p + 1)))) != 0) { + goto __8 + } + p++ + goto __7 +__8: + ; + X__shlim(tls, f, int64(0)) +__9: + if !(__isspace(tls, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }()) != 0) { + goto __10 + } + goto __9 +__10: + ; + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + pos = pos + ((*FILE)(unsafe.Pointer(f)).shcnt + (int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1) + goto __4 +__6: + ; + if !(int32(*(*uint8)(unsafe.Pointer(p))) != '%' || int32(*(*uint8)(unsafe.Pointer(p + 1))) == '%') { + goto __11 + } + X__shlim(tls, f, int64(0)) + if !(int32(*(*uint8)(unsafe.Pointer(p))) == '%') { + goto __12 + } + p++ +__14: + if !(__isspace(tls, AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())) != 0) { + goto __15 + } + goto __14 +__15: + ; + goto __13 +__12: + c = func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() +__13: + ; + if !(c != int32(*(*uint8)(unsafe.Pointer(p)))) { + goto __16 + } + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if !(c < 0) { + goto __17 + } + goto input_fail +__17: + ; + goto match_fail +__16: + ; + pos = pos + ((*FILE)(unsafe.Pointer(f)).shcnt + (int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1) + goto __4 +__11: + ; + + p++ + if !(int32(*(*uint8)(unsafe.Pointer(p))) == '*') { + goto __18 + } + dest = uintptr(0) + p++ + goto __19 +__18: + if !(func() int32 { + if 0 != 0 { + return Xisdigit(tls, int32(*(*uint8)(unsafe.Pointer(p)))) + } + return Bool32(uint32(*(*uint8)(unsafe.Pointer(p)))-uint32('0') < uint32(10)) + }() != 0 && int32(*(*uint8)(unsafe.Pointer(p + 1))) == '$') { + goto __20 + } + dest = arg_n(tls, ap, uint32(int32(*(*uint8)(unsafe.Pointer(p)))-'0')) + p += uintptr(2) + goto __21 +__20: + dest = VaUintptr(&ap) +__21: + ; +__19: + ; + + width = 0 +__22: + if !(func() int32 { + if 0 != 0 { + return Xisdigit(tls, int32(*(*uint8)(unsafe.Pointer(p)))) + } + return Bool32(uint32(*(*uint8)(unsafe.Pointer(p)))-uint32('0') < uint32(10)) + }() != 0) { + goto __24 + } + width = 10*width + int32(*(*uint8)(unsafe.Pointer(p))) - '0' + goto __23 +__23: + p++ + goto __22 + goto __24 +__24: + ; + + if !(int32(*(*uint8)(unsafe.Pointer(p))) == 'm') { + goto __25 + } + wcs = uintptr(0) + s = uintptr(0) + alloc = BoolInt32(!!(dest != 0)) + p++ + goto __26 +__25: + alloc = 0 +__26: + ; + + size = 0 + switch int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&p, 1)))) { + case 'h': + goto __28 + case 'l': + goto __29 + case 'j': + goto __30 + case 'z': + goto __31 + case 't': + goto __32 + case 'L': + goto __33 + case 'd': + goto __34 + case 'i': + goto __35 + case 'o': + goto __36 + case 'u': + goto __37 + case 'x': + goto __38 + case 'a': + goto __39 + case 'e': + goto __40 + case 'f': + goto __41 + case 'g': + goto __42 + case 'A': + goto __43 + case 'E': + goto __44 + case 'F': + goto __45 + case 'G': + goto __46 + case 'X': + goto __47 + case 's': + goto __48 + case 'c': + goto __49 + case '[': + goto __50 + case 'S': + goto __51 + case 'C': + goto __52 + case 'p': + goto __53 + case 'n': + goto __54 + default: + goto __55 + } + goto __27 +__28: + if !(int32(*(*uint8)(unsafe.Pointer(p))) == 'h') { + goto __56 + } + p++ + size = -2 + goto __57 +__56: + size = -1 +__57: + ; + goto __27 +__29: + if !(int32(*(*uint8)(unsafe.Pointer(p))) == 'l') { + goto __58 + } + p++ + size = 3 + goto __59 +__58: + size = 1 +__59: + ; + goto __27 +__30: + size = 3 + goto __27 +__31: +__32: + size = 1 + goto __27 +__33: + size = 2 + goto __27 +__34: +__35: +__36: +__37: +__38: +__39: +__40: +__41: +__42: +__43: +__44: +__45: +__46: +__47: +__48: +__49: +__50: +__51: +__52: +__53: +__54: + p-- + goto __27 +__55: + goto fmt_fail +__27: + ; + + t = int32(*(*uint8)(unsafe.Pointer(p))) + + // C or S + if !(t&0x2f == 3) { + goto __60 + } + t = t | 32 + size = 1 +__60: + ; + + switch t { + case 'c': + goto __62 + case '[': + goto __63 + case 'n': + goto __64 + default: + goto __65 + } + goto __61 +__62: + if !(width < 1) { + goto __66 + } + width = 1 +__66: + ; +__63: + goto __61 +__64: + store_int(tls, dest, size, uint64(pos)) + // do not increment match count, etc! + goto __4 +__65: + X__shlim(tls, f, int64(0)) +__67: + if !(__isspace(tls, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }()) != 0) { + goto __68 + } + goto __67 +__68: + ; + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + pos = pos + ((*FILE)(unsafe.Pointer(f)).shcnt + (int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1) +__61: + ; + + X__shlim(tls, f, int64(width)) + if !(func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }() < 0) { + goto __69 + } + goto input_fail +__69: + ; + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + + switch t { + case 's': + goto __71 + case 'c': + goto __72 + case '[': + goto __73 + case 'p': + goto __74 + case 'X': + goto __75 + case 'x': + goto __76 + case 'o': + goto __77 + case 'd': + goto __78 + case 'u': + goto __79 + case 'i': + goto __80 + case 'a': + goto __81 + case 'A': + goto __82 + case 'e': + goto __83 + case 'E': + goto __84 + case 'f': + goto __85 + case 'F': + goto __86 + case 'g': + goto __87 + case 'G': + goto __88 + } + goto __70 +__71: +__72: +__73: + if !(t == 'c' || t == 's') { + goto __89 + } + Xmemset(tls, bp, -1, uint64(unsafe.Sizeof([257]uint8{}))) + *(*uint8)(unsafe.Pointer(bp)) = uint8(0) + if !(t == 's') { + goto __91 + } + *(*uint8)(unsafe.Pointer(bp + 10)) = uint8(0) + *(*uint8)(unsafe.Pointer(bp + 11)) = uint8(0) + *(*uint8)(unsafe.Pointer(bp + 12)) = uint8(0) + *(*uint8)(unsafe.Pointer(bp + 13)) = uint8(0) + *(*uint8)(unsafe.Pointer(bp + 14)) = uint8(0) + *(*uint8)(unsafe.Pointer(bp + 33)) = uint8(0) +__91: + ; + goto __90 +__89: + if !(int32(*(*uint8)(unsafe.Pointer(PreIncUintptr(&p, 1)))) == '^') { + goto __92 + } + p++ + invert = 1 + goto __93 +__92: + invert = 0 +__93: + ; + Xmemset(tls, bp, invert, uint64(unsafe.Sizeof([257]uint8{}))) + *(*uint8)(unsafe.Pointer(bp)) = uint8(0) + if !(int32(*(*uint8)(unsafe.Pointer(p))) == '-') { + goto __94 + } + p++ + *(*uint8)(unsafe.Pointer(bp + 46)) = uint8(1 - invert) + goto __95 +__94: + if !(int32(*(*uint8)(unsafe.Pointer(p))) == ']') { + goto __96 + } + p++ + *(*uint8)(unsafe.Pointer(bp + 94)) = uint8(1 - invert) +__96: + ; +__95: + ; +__97: + if !(int32(*(*uint8)(unsafe.Pointer(p))) != ']') { + goto __99 + } + if !!(int32(*(*uint8)(unsafe.Pointer(p))) != 0) { + goto __100 + } + goto fmt_fail +__100: + ; + if !(int32(*(*uint8)(unsafe.Pointer(p))) == '-' && *(*uint8)(unsafe.Pointer(p + 1)) != 0 && int32(*(*uint8)(unsafe.Pointer(p + 1))) != ']') { + goto __101 + } + c = int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&p, 1) + UintptrFromInt32(-1)))) +__102: + if !(c < int32(*(*uint8)(unsafe.Pointer(p)))) { + goto __104 + } + *(*uint8)(unsafe.Pointer(bp + uintptr(1+c))) = uint8(1 - invert) + goto __103 +__103: + c++ + goto __102 + goto __104 +__104: + ; +__101: + ; + *(*uint8)(unsafe.Pointer(bp + uintptr(1+int32(*(*uint8)(unsafe.Pointer(p)))))) = uint8(1 - invert) + goto __98 +__98: + p++ + goto __97 + goto __99 +__99: + ; +__90: + ; + wcs = uintptr(0) + s = uintptr(0) + i = uint64(0) + if t == 'c' { + k = uint64(uint32(width) + 1) + } else { + k = uint64(31) + } + if !(size == 1) { + goto __105 + } + if !(alloc != 0) { + goto __107 + } + wcs = Xmalloc(tls, k*size_t(unsafe.Sizeof(wchar_t(0)))) + if !!(wcs != 0) { + goto __109 + } + goto alloc_fail +__109: + ; + goto __108 +__107: + wcs = dest +__108: + ; + *(*mbstate_t)(unsafe.Pointer(bp + 268 /* st */)) = mbstate_t{} +__110: + if !(*(*uint8)(unsafe.Pointer(bp + uintptr(AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())+1))) != 0) { + goto __111 + } + switch Xmbrtowc(tls, bp+260, func() uintptr { *(*int8)(unsafe.Pointer(bp + 264)) = int8(c); return bp + 264 }(), uint64(1), bp+268 /* &st */) { + case Uint64FromInt32(-1): + goto __113 + case Uint64FromInt32(-2): + goto __114 + } + goto __112 +__113: + goto input_fail +__114: + goto __110 +__112: + ; + if !(wcs != 0) { + goto __115 + } + *(*wchar_t)(unsafe.Pointer(wcs + uintptr(PostIncUint64(&i, 1))*4)) = *(*wchar_t)(unsafe.Pointer(bp + 260 /* wc */)) +__115: + ; + if !(alloc != 0 && i == k) { + goto __116 + } + k = k + (k + uint64(1)) + tmp = Xrealloc(tls, wcs, k*size_t(unsafe.Sizeof(wchar_t(0)))) + if !!(tmp != 0) { + goto __117 + } + goto alloc_fail +__117: + ; + wcs = tmp +__116: + ; + goto __110 +__111: + ; + if !!(Xmbsinit(tls, bp+268) != 0) { + goto __118 + } + goto input_fail +__118: + ; + goto __106 +__105: + if !(alloc != 0) { + goto __119 + } + s = Xmalloc(tls, k) + if !!(s != 0) { + goto __121 + } + goto alloc_fail +__121: + ; +__122: + if !(*(*uint8)(unsafe.Pointer(bp + uintptr(AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())+1))) != 0) { + goto __123 + } + *(*int8)(unsafe.Pointer(s + uintptr(PostIncUint64(&i, 1)))) = int8(c) + if !(i == k) { + goto __124 + } + k = k + (k + uint64(1)) + tmp1 = Xrealloc(tls, s, k) + if !!(tmp1 != 0) { + goto __125 + } + goto alloc_fail +__125: + ; + s = tmp1 +__124: + ; + goto __122 +__123: + ; + goto __120 +__119: + if !(AssignUintptr(&s, dest) != 0) { + goto __126 + } +__128: + if !(*(*uint8)(unsafe.Pointer(bp + uintptr(AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())+1))) != 0) { + goto __129 + } + *(*int8)(unsafe.Pointer(s + uintptr(PostIncUint64(&i, 1)))) = int8(c) + goto __128 +__129: + ; + goto __127 +__126: +__130: + if !(*(*uint8)(unsafe.Pointer(bp + uintptr(AssignInt32(&c, func() int32 { + if (*FILE)(unsafe.Pointer(f)).rpos != (*FILE)(unsafe.Pointer(f)).shend { + return int32(*(*uint8)(unsafe.Pointer(PostIncUintptr(&(*FILE)(unsafe.Pointer(f)).rpos, 1)))) + } + return X__shgetc(tls, f) + }())+1))) != 0) { + goto __131 + } + goto __130 +__131: + ; +__127: + ; +__120: + ; +__106: + ; + if (*FILE)(unsafe.Pointer(f)).shlim >= int64(0) { + (*FILE)(unsafe.Pointer(f)).rpos-- + } else { + } + if !!((*FILE)(unsafe.Pointer(f)).shcnt+(int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1 != 0) { + goto __132 + } + goto match_fail +__132: + ; + if !(t == 'c' && (*FILE)(unsafe.Pointer(f)).shcnt+(int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1 != off_t(width)) { + goto __133 + } + goto match_fail +__133: + ; + if !(alloc != 0) { + goto __134 + } + if !(size == 1) { + goto __135 + } + *(*uintptr)(unsafe.Pointer(dest)) = wcs + goto __136 +__135: + *(*uintptr)(unsafe.Pointer(dest)) = s +__136: + ; +__134: + ; + if !(t != 'c') { + goto __137 + } + if !(wcs != 0) { + goto __138 + } + *(*wchar_t)(unsafe.Pointer(wcs + uintptr(i)*4)) = 0 +__138: + ; + if !(s != 0) { + goto __139 + } + *(*int8)(unsafe.Pointer(s + uintptr(i))) = int8(0) +__139: + ; +__137: + ; + goto __70 +__74: +__75: +__76: + base = 16 + goto int_common +__77: + base = 8 + goto int_common +__78: +__79: + base = 10 + goto int_common +__80: + base = 0 +int_common: + x = X__intscan(tls, f, uint32(base), 0, 2*uint64(0x7fffffffffffffff)+uint64(1)) + if !!((*FILE)(unsafe.Pointer(f)).shcnt+(int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1 != 0) { + goto __140 + } + goto match_fail +__140: + ; + if !(t == 'p' && dest != 0) { + goto __141 + } + *(*uintptr)(unsafe.Pointer(dest)) = uintptr(uintptr_t(x)) + goto __142 +__141: + store_int(tls, dest, size, x) +__142: + ; + goto __70 +__81: +__82: +__83: +__84: +__85: +__86: +__87: +__88: + y = X__floatscan(tls, f, size, 0) + if !!((*FILE)(unsafe.Pointer(f)).shcnt+(int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1 != 0) { + goto __143 + } + goto match_fail +__143: + ; + if !(dest != 0) { + goto __144 + } + switch size { + case 0: + goto __146 + case 1: + goto __147 + case 2: + goto __148 + } + goto __145 +__146: + *(*float32)(unsafe.Pointer(dest)) = float32(y) + goto __145 +__147: + *(*float64)(unsafe.Pointer(dest)) = y + goto __145 +__148: + *(*float64)(unsafe.Pointer(dest)) = y + goto __145 +__145: + ; +__144: + ; + goto __70 +__70: + ; + + pos = pos + ((*FILE)(unsafe.Pointer(f)).shcnt + (int64((*FILE)(unsafe.Pointer(f)).rpos)-int64((*FILE)(unsafe.Pointer(f)).buf))/1) + if !(dest != 0) { + goto __149 + } + matches++ +__149: + ; + goto __4 +__4: + p++ + goto __3 + goto __5 +__5: + ; + if !(0 != 0) { + goto __150 + } +fmt_fail: +alloc_fail: +input_fail: + if !!(matches != 0) { + goto __151 + } + matches-- +__151: + ; +match_fail: + if !(alloc != 0) { + goto __152 + } + Xfree(tls, s) + Xfree(tls, wcs) +__152: + ; +__150: + ; +__153: + if !(__need_unlock != 0) { + goto __156 + } + X__unlockfile(tls, f) +__156: + ; + goto __154 +__154: + if 0 != 0 { + goto __153 + } + goto __155 +__155: + ; + return matches +} + +func string_read(tls *TLS, f uintptr, buf uintptr, len size_t) size_t { /* vsscanf.c:4:15: */ + var src uintptr = (*FILE)(unsafe.Pointer(f)).cookie + var k size_t = len + uint64(256) + var end uintptr = Xmemchr(tls, src, 0, k) + if end != 0 { + k = size_t((int64(end) - int64(src)) / 1) + } + if k < len { + len = k + } + Xmemcpy(tls, buf, src, len) + (*FILE)(unsafe.Pointer(f)).rpos = src + uintptr(len) + (*FILE)(unsafe.Pointer(f)).rend = src + uintptr(k) + (*FILE)(unsafe.Pointer(f)).cookie = src + uintptr(k) + return len +} + +func Xvsscanf(tls *TLS, s uintptr, fmt uintptr, ap va_list) int32 { /* vsscanf.c:18:5: */ + bp := tls.Alloc(232) + defer tls.Free(232) + + *(*FILE)(unsafe.Pointer(bp /* f */)) = FILE{read: *(*uintptr)(unsafe.Pointer(&struct { + f func(*TLS, uintptr, uintptr, size_t) size_t + }{string_read})), buf: s, lock: -1, cookie: s} + return Xvfscanf(tls, bp, fmt, ap) +} + +func Xbsearch(tls *TLS, key uintptr, base uintptr, nel size_t, width size_t, cmp uintptr) uintptr { /* bsearch.c:3:6: */ + var try uintptr + var sign int32 + for nel > uint64(0) { + try = base + uintptr(width*(nel/uint64(2))) + sign = (*struct { + f func(*TLS, uintptr, uintptr) int32 + })(unsafe.Pointer(&struct{ uintptr }{cmp})).f(tls, key, try) + if sign < 0 { + nel = nel / uint64(2) + } else if sign > 0 { + base = try + uintptr(width) + nel = nel - (nel/uint64(2) + uint64(1)) + } else { + return try + } + } + return uintptr(0) +} + +func strtox(tls *TLS, s uintptr, p uintptr, prec int32) float64 { /* strtod.c:6:20: */ + bp := tls.Alloc(232) + defer tls.Free(232) + + // var f FILE at bp, 232 + + (*FILE)(unsafe.Pointer(bp)).buf = AssignPtrUintptr(bp+8, s) + (*FILE)(unsafe.Pointer(bp)).rend = UintptrFromInt32(-1) + X__shlim(tls, bp, int64(0)) + var y float64 = X__floatscan(tls, bp, prec, 1) + var cnt off_t = (*FILE)(unsafe.Pointer(bp)).shcnt + (int64((*FILE)(unsafe.Pointer(bp)).rpos)-int64((*FILE)(unsafe.Pointer(bp)).buf))/1 + if p != 0 { + *(*uintptr)(unsafe.Pointer(p)) = func() uintptr { + if cnt != 0 { + return s + uintptr(cnt) + } + return s + }() + } + return y +} + +func Xstrtof(tls *TLS, s uintptr, p uintptr) float32 { /* strtod.c:17:7: */ + return float32(strtox(tls, s, p, 0)) +} + +func Xstrtod(tls *TLS, s uintptr, p uintptr) float64 { /* strtod.c:22:8: */ + return strtox(tls, s, p, 1) +} + +func Xstrtold(tls *TLS, s uintptr, p uintptr) float64 { /* strtod.c:27:13: */ + return strtox(tls, s, p, 2) +} + +func strtox1(tls *TLS, s uintptr, p uintptr, base int32, lim uint64) uint64 { /* strtol.c:8:27: */ + bp := tls.Alloc(232) + defer tls.Free(232) + + // var f FILE at bp, 232 + + (*FILE)(unsafe.Pointer(bp)).buf = AssignPtrUintptr(bp+8, s) + (*FILE)(unsafe.Pointer(bp)).rend = UintptrFromInt32(-1) + X__shlim(tls, bp, int64(0)) + var y uint64 = X__intscan(tls, bp, uint32(base), 1, lim) + if p != 0 { + var cnt size_t = size_t((*FILE)(unsafe.Pointer(bp)).shcnt + (int64((*FILE)(unsafe.Pointer(bp)).rpos)-int64((*FILE)(unsafe.Pointer(bp)).buf))/1) + *(*uintptr)(unsafe.Pointer(p)) = s + uintptr(cnt) + } + return y +} + +func Xstrtoull(tls *TLS, s uintptr, p uintptr, base int32) uint64 { /* strtol.c:21:20: */ + return strtox1(tls, s, p, base, 2*uint64(0x7fffffffffffffff)+uint64(1)) +} + +func Xstrtoll(tls *TLS, s uintptr, p uintptr, base int32) int64 { /* strtol.c:26:11: */ + return int64(strtox1(tls, s, p, base, Uint64FromInt64(-0x7fffffffffffffff-int64(1)))) +} + +func Xstrtoul(tls *TLS, s uintptr, p uintptr, base int32) uint64 { /* strtol.c:31:15: */ + return uint64(strtox1(tls, s, p, base, 2*uint64(0x7fffffffffffffff)+uint64(1))) +} + +func Xstrtol(tls *TLS, s uintptr, p uintptr, base int32) int64 { /* strtol.c:36:6: */ + return int64(strtox1(tls, s, p, base, 0+Uint64FromInt64(Int64(-Int64(0x7fffffffffffffff))-Int64FromInt32(1)))) +} + +func Xstrtoimax(tls *TLS, s uintptr, p uintptr, base int32) intmax_t { /* strtol.c:41:10: */ + return intmax_t(Xstrtoll(tls, s, p, base)) +} + +func Xstrtoumax(tls *TLS, s uintptr, p uintptr, base int32) uintmax_t { /* strtol.c:46:11: */ + return uintmax_t(Xstrtoull(tls, s, p, base)) +} + +func Xstrdup(tls *TLS, s uintptr) uintptr { /* strdup.c:4:6: */ + var l size_t = Xstrlen(tls, s) + var d uintptr = Xmalloc(tls, l+uint64(1)) + if !(d != 0) { + return uintptr(0) + } + return Xmemcpy(tls, d, s, l+uint64(1)) +} + +func Xstrlcat(tls *TLS, d uintptr, s uintptr, n size_t) size_t { /* strlcat.c:4:8: */ + var l size_t = Xstrnlen(tls, d, n) + if l == n { + return l + Xstrlen(tls, s) + } + return l + Xstrlcpy(tls, d+uintptr(l), s, n-l) +} + +// Support signed or unsigned plain-char + +// Implementation choices... + +// Arbitrary numbers... + +// POSIX/SUS requirements follow. These numbers come directly +// from SUS and have nothing to do with the host system. + +func Xstrlcpy(tls *TLS, d uintptr, s uintptr, n size_t) size_t { /* strlcpy.c:11:8: */ + var d0 uintptr + var wd uintptr + var ws uintptr + d0 = d + + if !!(int32(PostDecUint64(&n, 1)) != 0) { + goto __1 + } + goto finish +__1: + ; + if !(uintptr_t(s)&(uint64(unsafe.Sizeof(size_t(0)))-uint64(1)) == uintptr_t(d)&(uint64(unsafe.Sizeof(size_t(0)))-uint64(1))) { + goto __2 + } +__3: + if !(uintptr_t(s)&(uint64(unsafe.Sizeof(size_t(0)))-uint64(1)) != 0 && n != 0 && AssignPtrInt8(d, *(*int8)(unsafe.Pointer(s))) != 0) { + goto __5 + } + goto __4 +__4: + n-- + s++ + d++ + goto __3 + goto __5 +__5: + ; + if !(n != 0 && *(*int8)(unsafe.Pointer(s)) != 0) { + goto __6 + } + wd = d + ws = s +__7: + if !(n >= size_t(unsafe.Sizeof(size_t(0))) && !((*(*uint64)(unsafe.Pointer(ws))-Uint64(Uint64FromInt32(-1))/uint64(255)) & ^*(*uint64)(unsafe.Pointer(ws)) & (Uint64(Uint64FromInt32(-1))/uint64(255)*uint64(255/2+1)) != 0)) { + goto __9 + } + *(*size_t)(unsafe.Pointer(wd)) = *(*uint64)(unsafe.Pointer(ws)) + goto __8 +__8: + n = n - size_t(unsafe.Sizeof(size_t(0))) + ws += 8 + wd += 8 + goto __7 + goto __9 +__9: + ; + d = wd + s = ws +__6: + ; +__2: + ; +__10: + if !(n != 0 && AssignPtrInt8(d, *(*int8)(unsafe.Pointer(s))) != 0) { + goto __12 + } + goto __11 +__11: + n-- + s++ + d++ + goto __10 + goto __12 +__12: + ; + *(*int8)(unsafe.Pointer(d)) = int8(0) +finish: + return size_t((int64(d)-int64(d0))/1) + Xstrlen(tls, s) +} + +func Xstrncasecmp(tls *TLS, _l uintptr, _r uintptr, n size_t) int32 { /* strncasecmp.c:4:5: */ + var l uintptr = _l + var r uintptr = _r + if !(int32(PostDecUint64(&n, 1)) != 0) { + return 0 + } +__1: + if !(*(*uint8)(unsafe.Pointer(l)) != 0 && *(*uint8)(unsafe.Pointer(r)) != 0 && n != 0 && (int32(*(*uint8)(unsafe.Pointer(l))) == int32(*(*uint8)(unsafe.Pointer(r))) || Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(l)))) == Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(r)))))) { + goto __3 + } + goto __2 +__2: + l++ + r++ + n-- + goto __1 + goto __3 +__3: + ; + return Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(l)))) - Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(r)))) +} + +func X__strncasecmp_l(tls *TLS, l uintptr, r uintptr, n size_t, loc locale_t) int32 { /* strncasecmp.c:12:5: */ + return Xstrncasecmp(tls, l, r, n) +} + +func Xstrncat(tls *TLS, d uintptr, s uintptr, n size_t) uintptr { /* strncat.c:3:6: */ + var a uintptr = d + d += uintptr(Xstrlen(tls, d)) + for n != 0 && *(*int8)(unsafe.Pointer(s)) != 0 { + n-- + *(*int8)(unsafe.Pointer(PostIncUintptr(&d, 1))) = *(*int8)(unsafe.Pointer(PostIncUintptr(&s, 1))) + } + *(*int8)(unsafe.Pointer(PostIncUintptr(&d, 1))) = int8(0) + return a +} + +func Xstrnlen(tls *TLS, s uintptr, n size_t) size_t { /* strnlen.c:3:8: */ + var p uintptr = Xmemchr(tls, s, 0, n) + if p != 0 { + return uint64((int64(p) - int64(s)) / 1) + } + return n +} + +func Xstrspn(tls *TLS, s uintptr, c uintptr) size_t { /* strspn.c:6:8: */ + bp := tls.Alloc(32) + defer tls.Free(32) + + var a uintptr = s + *(*[4]size_t)(unsafe.Pointer(bp /* byteset */)) = [4]size_t{0: uint64(0)} + + if !(int32(*(*int8)(unsafe.Pointer(c))) != 0) { + return uint64(0) + } + if !(int32(*(*int8)(unsafe.Pointer(c + 1))) != 0) { + for ; int32(*(*int8)(unsafe.Pointer(s))) == int32(*(*int8)(unsafe.Pointer(c))); s++ { + } + return size_t((int64(s) - int64(a)) / 1) + } + + for ; *(*int8)(unsafe.Pointer(c)) != 0 && AssignOrPtrUint64(bp+uintptr(size_t(*(*uint8)(unsafe.Pointer(c)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8, size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(c)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; c++ { + } + for ; *(*int8)(unsafe.Pointer(s)) != 0 && *(*size_t)(unsafe.Pointer(bp + uintptr(size_t(*(*uint8)(unsafe.Pointer(s)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8))&(size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(s)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; s++ { + } + return size_t((int64(s) - int64(a)) / 1) +} + +func Xstrtok(tls *TLS, s uintptr, sep uintptr) uintptr { /* strtok.c:3:6: */ + if !(s != 0) && !(int32(AssignUintptr(&s, _sp)) != 0) { + return uintptr(0) + } + s += uintptr(Xstrspn(tls, s, sep)) + if !(int32(*(*int8)(unsafe.Pointer(s))) != 0) { + return AssignPtrUintptr(uintptr(unsafe.Pointer(&_sp)), uintptr(0)) + } + _sp = s + uintptr(Xstrcspn(tls, s, sep)) + if *(*int8)(unsafe.Pointer(_sp)) != 0 { + *(*int8)(unsafe.Pointer(PostIncUintptr(&_sp, 1))) = int8(0) + } else { + _sp = uintptr(0) + } + return s +} + +var _sp uintptr /* strtok.c:5:14: */ + +func X__ccgo_pthreadAttrGetDetachState(tls *TLS, a uintptr) int32 { /* pthread_attr_get.c:3:5: */ + return *(*int32)(unsafe.Pointer(a + 6*4)) +} + +func Xpthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) int32 { /* pthread_attr_get.c:7:5: */ + *(*int32)(unsafe.Pointer(state)) = *(*int32)(unsafe.Pointer(a + 6*4)) + return 0 +} + +// +// int pthread_attr_getguardsize(const pthread_attr_t *restrict a, size_t *restrict size) +// { +// *size = a->_a_guardsize; +// return 0; +// } +// +// int pthread_attr_getinheritsched(const pthread_attr_t *restrict a, int *restrict inherit) +// { +// *inherit = a->_a_sched; +// return 0; +// } +// +// int pthread_attr_getschedparam(const pthread_attr_t *restrict a, struct sched_param *restrict param) +// { +// param->sched_priority = a->_a_prio; +// return 0; +// } +// +// int pthread_attr_getschedpolicy(const pthread_attr_t *restrict a, int *restrict policy) +// { +// *policy = a->_a_policy; +// return 0; +// } +// +// int pthread_attr_getscope(const pthread_attr_t *restrict a, int *restrict scope) +// { +// *scope = PTHREAD_SCOPE_SYSTEM; +// return 0; +// } +// +// int pthread_attr_getstack(const pthread_attr_t *restrict a, void **restrict addr, size_t *restrict size) +// { +// if (!a->_a_stackaddr) +// return EINVAL; +// *size = a->_a_stacksize; +// *addr = (void *)(a->_a_stackaddr - *size); +// return 0; +// } +// +// int pthread_attr_getstacksize(const pthread_attr_t *restrict a, size_t *restrict size) +// { +// *size = a->_a_stacksize; +// return 0; +// } +// +// int pthread_barrierattr_getpshared(const pthread_barrierattr_t *restrict a, int *restrict pshared) +// { +// *pshared = !!a->__attr; +// return 0; +// } +// +// int pthread_condattr_getclock(const pthread_condattr_t *restrict a, clockid_t *restrict clk) +// { +// *clk = a->__attr & 0x7fffffff; +// return 0; +// } +// +// int pthread_condattr_getpshared(const pthread_condattr_t *restrict a, int *restrict pshared) +// { +// *pshared = a->__attr>>31; +// return 0; +// } +// +// int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *restrict a, int *restrict protocol) +// { +// *protocol = PTHREAD_PRIO_NONE; +// return 0; +// } +// int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared) +// { +// *pshared = a->__attr / 128U % 2; +// return 0; +// } +// +// int pthread_mutexattr_getrobust(const pthread_mutexattr_t *restrict a, int *restrict robust) +// { +// *robust = a->__attr / 4U % 2; +// return 0; +// } + +func X__ccgo_pthreadMutexattrGettype(tls *TLS, a uintptr) int32 { /* pthread_attr_get.c:93:5: */ + return int32((*pthread_mutexattr_t)(unsafe.Pointer(a)).__attr & uint32(3)) +} + +// int pthread_mutexattr_gettype(const pthread_mutexattr_t *restrict a, int *restrict type) +// { +// *type = a->__attr & 3; +// return 0; +// } +// +// int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *restrict a, int *restrict pshared) +// { +// *pshared = a->__attr[0]; +// return 0; +// } + +func Xpthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) int32 { /* pthread_attr_setdetachstate.c:3:5: */ + if uint32(state) > 1 { + return 22 + } + *(*int32)(unsafe.Pointer(a + 6*4)) = state + return 0 +} + +func X__ccgo_getMutexType(tls *TLS, m uintptr) int32 { /* pthread_mutex_lock.c:3:5: */ + return *(*int32)(unsafe.Pointer(m)) & 15 +} + +// int __pthread_mutex_lock(pthread_mutex_t *m) +// { +// if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL +// && !a_cas(&m->_m_lock, 0, EBUSY)) +// return 0; +// +// return __pthread_mutex_timedlock(m, 0); +// } +// +// weak_alias(__pthread_mutex_lock, pthread_mutex_lock); + +func Xpthread_mutexattr_destroy(tls *TLS, a uintptr) int32 { /* pthread_mutexattr_destroy.c:3:5: */ + return 0 +} + +func Xpthread_mutexattr_init(tls *TLS, a uintptr) int32 { /* pthread_mutexattr_init.c:3:5: */ + *(*pthread_mutexattr_t)(unsafe.Pointer(a)) = pthread_mutexattr_t{} + return 0 +} + +func Xpthread_mutexattr_settype(tls *TLS, a uintptr, type1 int32) int32 { /* pthread_mutexattr_settype.c:3:5: */ + if uint32(type1) > uint32(2) { + return 22 + } + (*pthread_mutexattr_t)(unsafe.Pointer(a)).__attr = (*pthread_mutexattr_t)(unsafe.Pointer(a)).__attr&Uint32FromInt32(CplInt32(3)) | uint32(type1) + return 0 +} + +func init() { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&ptable)) + 0)) = uintptr(unsafe.Pointer(&table)) + uintptr(128)*2 // __ctype_b_loc.c:36:45: +} + +var ts1 = "infinity\x00nan\x00\x00\x00\x01\x02\x04\a\x03\x06\x05\x00.\x00%d.%d.%d.%d.in-addr.arpa\x00ip6.arpa\x000123456789abcdef\x00/etc/hosts\x00rb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00%d.%d.%d.%d\x00%x:%x:%x:%x:%x:%x:%x:%x\x00%x:%x:%x:%x:%x:%x:%d.%d.%d.%d\x00:0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00 \x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/netdb/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/netdb/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/netdb/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/netdb/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo netdb/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o netdb/netdb_linux_amd64.go -pkgname netdb', DO NOT EDIT. + +package netdb + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/netdb/netdb_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/netdb/netdb_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/netdb/netdb_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/netdb/netdb_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,3081 @@ +// Code generated by 'ccgo netdb/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o netdb/netdb_linux_amd64.go -pkgname netdb', DO NOT EDIT. + +package netdb + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + AF_ALG = 38 // socket.h:133:1: + AF_APPLETALK = 5 // socket.h:99:1: + AF_ASH = 18 // socket.h:113:1: + AF_ATMPVC = 8 // socket.h:102:1: + AF_ATMSVC = 20 // socket.h:115:1: + AF_AX25 = 3 // socket.h:97:1: + AF_BLUETOOTH = 31 // socket.h:126:1: + AF_BRIDGE = 7 // socket.h:101:1: + AF_CAIF = 37 // socket.h:132:1: + AF_CAN = 29 // socket.h:124:1: + AF_DECnet = 12 // socket.h:106:1: + AF_ECONET = 19 // socket.h:114:1: + AF_FILE = 1 // socket.h:95:1: + AF_IB = 27 // socket.h:122:1: + AF_IEEE802154 = 36 // socket.h:131:1: + AF_INET = 2 // socket.h:96:1: + AF_INET6 = 10 // socket.h:104:1: + AF_IPX = 4 // socket.h:98:1: + AF_IRDA = 23 // socket.h:118:1: + AF_ISDN = 34 // socket.h:129:1: + AF_IUCV = 32 // socket.h:127:1: + AF_KCM = 41 // socket.h:136:1: + AF_KEY = 15 // socket.h:109:1: + AF_LLC = 26 // socket.h:121:1: + AF_LOCAL = 1 // socket.h:93:1: + AF_MAX = 45 // socket.h:140:1: + AF_MPLS = 28 // socket.h:123:1: + AF_NETBEUI = 13 // socket.h:107:1: + AF_NETLINK = 16 // socket.h:110:1: + AF_NETROM = 6 // socket.h:100:1: + AF_NFC = 39 // socket.h:134:1: + AF_PACKET = 17 // socket.h:112:1: + AF_PHONET = 35 // socket.h:130:1: + AF_PPPOX = 24 // socket.h:119:1: + AF_QIPCRTR = 42 // socket.h:137:1: + AF_RDS = 21 // socket.h:116:1: + AF_ROSE = 11 // socket.h:105:1: + AF_ROUTE = 16 // socket.h:111:1: + AF_RXRPC = 33 // socket.h:128:1: + AF_SECURITY = 14 // socket.h:108:1: + AF_SMC = 43 // socket.h:138:1: + AF_SNA = 22 // socket.h:117:1: + AF_TIPC = 30 // socket.h:125:1: + AF_UNIX = 1 // socket.h:94:1: + AF_UNSPEC = 0 // socket.h:92:1: + AF_VSOCK = 40 // socket.h:135:1: + AF_WANPIPE = 25 // socket.h:120:1: + AF_X25 = 9 // socket.h:103:1: + AF_XDP = 44 // socket.h:139:1: + AI_ADDRCONFIG = 0x0020 // netdb.h:601:1: + AI_ALL = 0x0010 // netdb.h:600:1: + AI_CANONNAME = 0x0002 // netdb.h:597:1: + AI_NUMERICHOST = 0x0004 // netdb.h:598:1: + AI_NUMERICSERV = 0x0400 // netdb.h:613:1: + AI_PASSIVE = 0x0001 // netdb.h:596:1: + AI_V4MAPPED = 0x0008 // netdb.h:599:1: + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + EAI_AGAIN = -3 // netdb.h:618:1: + EAI_BADFLAGS = -1 // netdb.h:616:1: + EAI_FAIL = -4 // netdb.h:619:1: + EAI_FAMILY = -6 // netdb.h:620:1: + EAI_MEMORY = -10 // netdb.h:623:1: + EAI_NONAME = -2 // netdb.h:617:1: + EAI_OVERFLOW = -12 // netdb.h:625:1: + EAI_SERVICE = -8 // netdb.h:622:1: + EAI_SOCKTYPE = -7 // netdb.h:621:1: + EAI_SYSTEM = -11 // netdb.h:624:1: + FD_SETSIZE = 1024 // select.h:73:1: + FIOGETOWN = 0x8903 // sockios.h:8:1: + FIOSETOWN = 0x8901 // sockios.h:6:1: + HOST_NOT_FOUND = 1 // netdb.h:63:1: + INET6_ADDRSTRLEN = 46 // in.h:234:1: + INET_ADDRSTRLEN = 16 // in.h:233:1: + IN_CLASSA_HOST = 16777215 // in.h:169:1: + IN_CLASSA_MAX = 128 // in.h:170:1: + IN_CLASSA_NET = 0xff000000 // in.h:167:1: + IN_CLASSA_NSHIFT = 24 // in.h:168:1: + IN_CLASSB_HOST = 65535 // in.h:175:1: + IN_CLASSB_MAX = 65536 // in.h:176:1: + IN_CLASSB_NET = 0xffff0000 // in.h:173:1: + IN_CLASSB_NSHIFT = 16 // in.h:174:1: + IN_CLASSC_HOST = 255 // in.h:181:1: + IN_CLASSC_NET = 0xffffff00 // in.h:179:1: + IN_CLASSC_NSHIFT = 8 // in.h:180:1: + IN_LOOPBACKNET = 127 // in.h:197:1: + IPPORT_RESERVED1 = 1024 // netdb.h:79:1: + IPV6_2292DSTOPTS = 4 // in.h:171:1: + IPV6_2292HOPLIMIT = 8 // in.h:175:1: + IPV6_2292HOPOPTS = 3 // in.h:170:1: + IPV6_2292PKTINFO = 2 // in.h:169:1: + IPV6_2292PKTOPTIONS = 6 // in.h:173:1: + IPV6_2292RTHDR = 5 // in.h:172:1: + IPV6_ADDRFORM = 1 // in.h:168:1: + IPV6_ADDR_PREFERENCES = 72 // in.h:223:1: + IPV6_ADD_MEMBERSHIP = 20 // in.h:237:1: + IPV6_AUTHHDR = 10 // in.h:180:1: + IPV6_AUTOFLOWLABEL = 70 // in.h:220:1: + IPV6_CHECKSUM = 7 // in.h:174:1: + IPV6_DONTFRAG = 62 // in.h:214:1: + IPV6_DROP_MEMBERSHIP = 21 // in.h:238:1: + IPV6_DSTOPTS = 59 // in.h:211:1: + IPV6_FREEBIND = 78 // in.h:233:1: + IPV6_HDRINCL = 36 // in.h:198:1: + IPV6_HOPLIMIT = 52 // in.h:204:1: + IPV6_HOPOPTS = 54 // in.h:206:1: + IPV6_IPSEC_POLICY = 34 // in.h:196:1: + IPV6_JOIN_ANYCAST = 27 // in.h:192:1: + IPV6_JOIN_GROUP = 20 // in.h:185:1: + IPV6_LEAVE_ANYCAST = 28 // in.h:193:1: + IPV6_LEAVE_GROUP = 21 // in.h:186:1: + IPV6_MINHOPCOUNT = 73 // in.h:226:1: + IPV6_MTU = 24 // in.h:189:1: + IPV6_MTU_DISCOVER = 23 // in.h:188:1: + IPV6_MULTICAST_ALL = 29 // in.h:194:1: + IPV6_MULTICAST_HOPS = 18 // in.h:183:1: + IPV6_MULTICAST_IF = 17 // in.h:182:1: + IPV6_MULTICAST_LOOP = 19 // in.h:184:1: + IPV6_NEXTHOP = 9 // in.h:179:1: + IPV6_ORIGDSTADDR = 74 // in.h:228:1: + IPV6_PATHMTU = 61 // in.h:213:1: + IPV6_PKTINFO = 50 // in.h:202:1: + IPV6_PMTUDISC_DO = 2 // in.h:246:1: + IPV6_PMTUDISC_DONT = 0 // in.h:244:1: + IPV6_PMTUDISC_INTERFACE = 4 // in.h:248:1: + IPV6_PMTUDISC_OMIT = 5 // in.h:249:1: + IPV6_PMTUDISC_PROBE = 3 // in.h:247:1: + IPV6_PMTUDISC_WANT = 1 // in.h:245:1: + IPV6_RECVDSTOPTS = 58 // in.h:210:1: + IPV6_RECVERR = 25 // in.h:190:1: + IPV6_RECVFRAGSIZE = 77 // in.h:232:1: + IPV6_RECVHOPLIMIT = 51 // in.h:203:1: + IPV6_RECVHOPOPTS = 53 // in.h:205:1: + IPV6_RECVORIGDSTADDR = 74 // in.h:229:1: + IPV6_RECVPATHMTU = 60 // in.h:212:1: + IPV6_RECVPKTINFO = 49 // in.h:201:1: + IPV6_RECVRTHDR = 56 // in.h:208:1: + IPV6_RECVTCLASS = 66 // in.h:217:1: + IPV6_ROUTER_ALERT = 22 // in.h:187:1: + IPV6_ROUTER_ALERT_ISOLATE = 30 // in.h:195:1: + IPV6_RTHDR = 57 // in.h:209:1: + IPV6_RTHDRDSTOPTS = 55 // in.h:207:1: + IPV6_RTHDR_LOOSE = 0 // in.h:256:1: + IPV6_RTHDR_STRICT = 1 // in.h:257:1: + IPV6_RTHDR_TYPE_0 = 0 // in.h:259:1: + IPV6_RXDSTOPTS = 59 // in.h:241:1: + IPV6_RXHOPOPTS = 54 // in.h:240:1: + IPV6_TCLASS = 67 // in.h:218:1: + IPV6_TRANSPARENT = 75 // in.h:230:1: + IPV6_UNICAST_HOPS = 16 // in.h:181:1: + IPV6_UNICAST_IF = 76 // in.h:231:1: + IPV6_V6ONLY = 26 // in.h:191:1: + IPV6_XFRM_POLICY = 35 // in.h:197:1: + IP_ADD_MEMBERSHIP = 35 // in.h:121:1: + IP_ADD_SOURCE_MEMBERSHIP = 39 // in.h:125:1: + IP_BIND_ADDRESS_NO_PORT = 24 // in.h:103:1: + IP_BLOCK_SOURCE = 38 // in.h:124:1: + IP_CHECKSUM = 23 // in.h:102:1: + IP_DEFAULT_MULTICAST_LOOP = 1 // in.h:135:1: + IP_DEFAULT_MULTICAST_TTL = 1 // in.h:134:1: + IP_DROP_MEMBERSHIP = 36 // in.h:122:1: + IP_DROP_SOURCE_MEMBERSHIP = 40 // in.h:126:1: + IP_FREEBIND = 15 // in.h:89:1: + IP_HDRINCL = 3 // in.h:48:1: + IP_IPSEC_POLICY = 16 // in.h:90:1: + IP_MAX_MEMBERSHIPS = 20 // in.h:136:1: + IP_MINTTL = 21 // in.h:100:1: + IP_MSFILTER = 41 // in.h:127:1: + IP_MTU = 14 // in.h:88:1: + IP_MTU_DISCOVER = 10 // in.h:84:1: + IP_MULTICAST_ALL = 49 // in.h:128:1: + IP_MULTICAST_IF = 32 // in.h:118:1: + IP_MULTICAST_LOOP = 34 // in.h:120:1: + IP_MULTICAST_TTL = 33 // in.h:119:1: + IP_NODEFRAG = 22 // in.h:101:1: + IP_OPTIONS = 4 // in.h:47:1: + IP_ORIGDSTADDR = 20 // in.h:97:1: + IP_PASSSEC = 18 // in.h:92:1: + IP_PKTINFO = 8 // in.h:81:1: + IP_PKTOPTIONS = 9 // in.h:82:1: + IP_PMTUDISC = 10 // in.h:83:1: + IP_PMTUDISC_DO = 2 // in.h:109:1: + IP_PMTUDISC_DONT = 0 // in.h:107:1: + IP_PMTUDISC_INTERFACE = 4 // in.h:114:1: + IP_PMTUDISC_OMIT = 5 // in.h:116:1: + IP_PMTUDISC_PROBE = 3 // in.h:110:1: + IP_PMTUDISC_WANT = 1 // in.h:108:1: + IP_RECVERR = 11 // in.h:85:1: + IP_RECVFRAGSIZE = 25 // in.h:104:1: + IP_RECVOPTS = 6 // in.h:51:1: + IP_RECVORIGDSTADDR = 20 // in.h:98:1: + IP_RECVRETOPTS = 7 // in.h:53:1: + IP_RECVTOS = 13 // in.h:87:1: + IP_RECVTTL = 12 // in.h:86:1: + IP_RETOPTS = 7 // in.h:54:1: + IP_ROUTER_ALERT = 5 // in.h:80:1: + IP_TOS = 1 // in.h:49:1: + IP_TRANSPARENT = 19 // in.h:93:1: + IP_TTL = 2 // in.h:50:1: + IP_UNBLOCK_SOURCE = 37 // in.h:123:1: + IP_UNICAST_IF = 50 // in.h:129:1: + IP_XFRM_POLICY = 17 // in.h:91:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + MCAST_BLOCK_SOURCE = 43 // in.h:67:1: + MCAST_EXCLUDE = 0 // in.h:76:1: + MCAST_INCLUDE = 1 // in.h:77:1: + MCAST_JOIN_GROUP = 42 // in.h:66:1: + MCAST_JOIN_SOURCE_GROUP = 46 // in.h:70:1: + MCAST_LEAVE_GROUP = 45 // in.h:69:1: + MCAST_LEAVE_SOURCE_GROUP = 47 // in.h:71:1: + MCAST_MSFILTER = 48 // in.h:72:1: + MCAST_UNBLOCK_SOURCE = 44 // in.h:68:1: + NETDB_INTERNAL = -1 // netdb.h:72:1: + NETDB_SUCCESS = 0 // netdb.h:73:1: + NI_DGRAM = 16 // netdb.h:646:1: + NI_MAXHOST = 1025 // netdb.h:638:1: + NI_MAXSERV = 32 // netdb.h:639:1: + NI_NAMEREQD = 8 // netdb.h:645:1: + NI_NOFQDN = 4 // netdb.h:644:1: + NI_NUMERICHOST = 1 // netdb.h:642:1: + NI_NUMERICSERV = 2 // netdb.h:643:1: + NO_ADDRESS = 4 // netdb.h:74:1: + NO_DATA = 4 // netdb.h:68:1: + NO_RECOVERY = 3 // netdb.h:66:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + PF_ALG = 38 // socket.h:82:1: + PF_APPLETALK = 5 // socket.h:48:1: + PF_ASH = 18 // socket.h:62:1: + PF_ATMPVC = 8 // socket.h:51:1: + PF_ATMSVC = 20 // socket.h:64:1: + PF_AX25 = 3 // socket.h:46:1: + PF_BLUETOOTH = 31 // socket.h:75:1: + PF_BRIDGE = 7 // socket.h:50:1: + PF_CAIF = 37 // socket.h:81:1: + PF_CAN = 29 // socket.h:73:1: + PF_DECnet = 12 // socket.h:55:1: + PF_ECONET = 19 // socket.h:63:1: + PF_FILE = 1 // socket.h:44:1: + PF_IB = 27 // socket.h:71:1: + PF_IEEE802154 = 36 // socket.h:80:1: + PF_INET = 2 // socket.h:45:1: + PF_INET6 = 10 // socket.h:53:1: + PF_IPX = 4 // socket.h:47:1: + PF_IRDA = 23 // socket.h:67:1: + PF_ISDN = 34 // socket.h:78:1: + PF_IUCV = 32 // socket.h:76:1: + PF_KCM = 41 // socket.h:85:1: + PF_KEY = 15 // socket.h:58:1: + PF_LLC = 26 // socket.h:70:1: + PF_LOCAL = 1 // socket.h:42:1: + PF_MAX = 45 // socket.h:89:1: + PF_MPLS = 28 // socket.h:72:1: + PF_NETBEUI = 13 // socket.h:56:1: + PF_NETLINK = 16 // socket.h:59:1: + PF_NETROM = 6 // socket.h:49:1: + PF_NFC = 39 // socket.h:83:1: + PF_PACKET = 17 // socket.h:61:1: + PF_PHONET = 35 // socket.h:79:1: + PF_PPPOX = 24 // socket.h:68:1: + PF_QIPCRTR = 42 // socket.h:86:1: + PF_RDS = 21 // socket.h:65:1: + PF_ROSE = 11 // socket.h:54:1: + PF_ROUTE = 16 // socket.h:60:1: + PF_RXRPC = 33 // socket.h:77:1: + PF_SECURITY = 14 // socket.h:57:1: + PF_SMC = 43 // socket.h:87:1: + PF_SNA = 22 // socket.h:66:1: + PF_TIPC = 30 // socket.h:74:1: + PF_UNIX = 1 // socket.h:43:1: + PF_UNSPEC = 0 // socket.h:41:1: + PF_VSOCK = 40 // socket.h:84:1: + PF_WANPIPE = 25 // socket.h:69:1: + PF_X25 = 9 // socket.h:52:1: + PF_XDP = 44 // socket.h:88:1: + SCM_TIMESTAMP = 29 // socket.h:140:1: + SCM_TIMESTAMPING = 37 // socket.h:142:1: + SCM_TIMESTAMPING_OPT_STATS = 54 // socket.h:90:1: + SCM_TIMESTAMPING_PKTINFO = 58 // socket.h:98:1: + SCM_TIMESTAMPNS = 35 // socket.h:141:1: + SCM_TXTIME = 61 // socket.h:105:1: + SCM_WIFI_STATUS = 41 // socket.h:64:1: + SIOCATMARK = 0x8905 // sockios.h:10:1: + SIOCGPGRP = 0x8904 // sockios.h:9:1: + SIOCGSTAMP = 0x8906 // sockios.h:11:1: + SIOCGSTAMPNS = 0x8907 // sockios.h:12:1: + SIOCSPGRP = 0x8902 // sockios.h:7:1: + SOL_AAL = 265 // socket.h:151:1: + SOL_ALG = 279 // socket.h:165:1: + SOL_ATM = 264 // socket.h:150:1: + SOL_BLUETOOTH = 274 // socket.h:160:1: + SOL_CAIF = 278 // socket.h:164:1: + SOL_DCCP = 269 // socket.h:155:1: + SOL_DECNET = 261 // socket.h:147:1: + SOL_ICMPV6 = 58 // in.h:253:1: + SOL_IP = 0 // in.h:132:1: + SOL_IPV6 = 41 // in.h:252:1: + SOL_IRDA = 266 // socket.h:152:1: + SOL_IUCV = 277 // socket.h:163:1: + SOL_KCM = 281 // socket.h:167:1: + SOL_LLC = 268 // socket.h:154:1: + SOL_NETBEUI = 267 // socket.h:153:1: + SOL_NETLINK = 270 // socket.h:156:1: + SOL_NFC = 280 // socket.h:166:1: + SOL_PACKET = 263 // socket.h:149:1: + SOL_PNPIPE = 275 // socket.h:161:1: + SOL_PPPOL2TP = 273 // socket.h:159:1: + SOL_RAW = 255 // socket.h:146:1: + SOL_RDS = 276 // socket.h:162:1: + SOL_RXRPC = 272 // socket.h:158:1: + SOL_SOCKET = 1 // socket.h:9:1: + SOL_TIPC = 271 // socket.h:157:1: + SOL_TLS = 282 // socket.h:168:1: + SOL_X25 = 262 // socket.h:148:1: + SOL_XDP = 283 // socket.h:169:1: + SOMAXCONN = 4096 // socket.h:172:1: + SO_ACCEPTCONN = 30 // socket.h:51:1: + SO_ATTACH_BPF = 50 // socket.h:82:1: + SO_ATTACH_FILTER = 26 // socket.h:45:1: + SO_ATTACH_REUSEPORT_CBPF = 51 // socket.h:85:1: + SO_ATTACH_REUSEPORT_EBPF = 52 // socket.h:86:1: + SO_BINDTODEVICE = 25 // socket.h:42:1: + SO_BINDTOIFINDEX = 62 // socket.h:107:1: + SO_BPF_EXTENSIONS = 48 // socket.h:78:1: + SO_BROADCAST = 6 // socket.h:16:1: + SO_BSDCOMPAT = 14 // socket.h:26:1: + SO_BUSY_POLL = 46 // socket.h:74:1: + SO_CNX_ADVICE = 53 // socket.h:88:1: + SO_COOKIE = 57 // socket.h:96:1: + SO_DEBUG = 1 // socket.h:11:1: + SO_DETACH_BPF = 27 // socket.h:83:1: + SO_DETACH_FILTER = 27 // socket.h:46:1: + SO_DETACH_REUSEPORT_BPF = 68 // socket.h:120:1: + SO_DOMAIN = 39 // socket.h:59:1: + SO_DONTROUTE = 5 // socket.h:15:1: + SO_ERROR = 4 // socket.h:14:1: + SO_GET_FILTER = 26 // socket.h:47:1: + SO_INCOMING_CPU = 49 // socket.h:80:1: + SO_INCOMING_NAPI_ID = 56 // socket.h:94:1: + SO_KEEPALIVE = 9 // socket.h:21:1: + SO_LINGER = 13 // socket.h:25:1: + SO_LOCK_FILTER = 44 // socket.h:70:1: + SO_MARK = 36 // socket.h:56:1: + SO_MAX_PACING_RATE = 47 // socket.h:76:1: + SO_MEMINFO = 55 // socket.h:92:1: + SO_NOFCS = 43 // socket.h:68:1: + SO_NO_CHECK = 11 // socket.h:23:1: + SO_OOBINLINE = 10 // socket.h:22:1: + SO_PASSCRED = 16 // socket.h:29:1: + SO_PASSSEC = 34 // socket.h:54:1: + SO_PEEK_OFF = 42 // socket.h:65:1: + SO_PEERCRED = 17 // socket.h:30:1: + SO_PEERGROUPS = 59 // socket.h:100:1: + SO_PEERNAME = 28 // socket.h:49:1: + SO_PEERSEC = 31 // socket.h:53:1: + SO_PRIORITY = 12 // socket.h:24:1: + SO_PROTOCOL = 38 // socket.h:58:1: + SO_RCVBUF = 8 // socket.h:18:1: + SO_RCVBUFFORCE = 33 // socket.h:20:1: + SO_RCVLOWAT = 18 // socket.h:31:1: + SO_RCVTIMEO = 20 // socket.h:129:1: + SO_RCVTIMEO_NEW = 66 // socket.h:117:1: + SO_RCVTIMEO_OLD = 20 // socket.h:33:1: + SO_REUSEADDR = 2 // socket.h:12:1: + SO_REUSEPORT = 15 // socket.h:27:1: + SO_RXQ_OVFL = 40 // socket.h:61:1: + SO_SECURITY_AUTHENTICATION = 22 // socket.h:38:1: + SO_SECURITY_ENCRYPTION_NETWORK = 24 // socket.h:40:1: + SO_SECURITY_ENCRYPTION_TRANSPORT = 23 // socket.h:39:1: + SO_SELECT_ERR_QUEUE = 45 // socket.h:72:1: + SO_SNDBUF = 7 // socket.h:17:1: + SO_SNDBUFFORCE = 32 // socket.h:19:1: + SO_SNDLOWAT = 19 // socket.h:32:1: + SO_SNDTIMEO = 21 // socket.h:130:1: + SO_SNDTIMEO_NEW = 67 // socket.h:118:1: + SO_SNDTIMEO_OLD = 21 // socket.h:34:1: + SO_TIMESTAMP = 29 // socket.h:125:1: + SO_TIMESTAMPING = 37 // socket.h:127:1: + SO_TIMESTAMPING_NEW = 65 // socket.h:115:1: + SO_TIMESTAMPING_OLD = 37 // socket.h:111:1: + SO_TIMESTAMPNS = 35 // socket.h:126:1: + SO_TIMESTAMPNS_NEW = 64 // socket.h:114:1: + SO_TIMESTAMPNS_OLD = 35 // socket.h:110:1: + SO_TIMESTAMP_NEW = 63 // socket.h:113:1: + SO_TIMESTAMP_OLD = 29 // socket.h:109:1: + SO_TXTIME = 61 // socket.h:104:1: + SO_TYPE = 3 // socket.h:13:1: + SO_WIFI_STATUS = 41 // socket.h:63:1: + SO_ZEROCOPY = 60 // socket.h:102:1: + TRY_AGAIN = 2 // netdb.h:64:1: + X_ASM_X86_POSIX_TYPES_64_H = 0 // posix_types_64.h:3:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_SOCKADDR_H = 1 // sockaddr.h:24:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_STDINT_UINTN_H = 1 // stdint-uintn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LINUX_POSIX_TYPES_H = 0 // posix_types.h:3:1: + X_LP64 = 1 // :284:1: + X_NETDB_H = 1 // netdb.h:23:1: + X_NETINET_IN_H = 1 // in.h:19:1: + X_PATH_HEQUIV = "/etc/hosts.equiv" // netdb.h:43:1: + X_PATH_HOSTS = "/etc/hosts" // netdb.h:44:1: + X_PATH_NETWORKS = "/etc/networks" // netdb.h:45:1: + X_PATH_NSSWITCH_CONF = "/etc/nsswitch.conf" // netdb.h:46:1: + X_PATH_PROTOCOLS = "/etc/protocols" // netdb.h:47:1: + X_PATH_SERVICES = "/etc/services" // netdb.h:48:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RPC_NETDB_H = 1 // netdb.h:37:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_SS_SIZE = 128 // sockaddr.h:40:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_SOCKET_H = 1 // socket.h:20:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Standard well-known ports. +const ( /* in.h:122:1: */ + IPPORT_ECHO = 7 // Echo service. + IPPORT_DISCARD = 9 // Discard transmissions service. + IPPORT_SYSTAT = 11 // System status service. + IPPORT_DAYTIME = 13 // Time of day service. + IPPORT_NETSTAT = 15 // Network status service. + IPPORT_FTP = 21 // File Transfer Protocol. + IPPORT_TELNET = 23 // Telnet protocol. + IPPORT_SMTP = 25 // Simple Mail Transfer Protocol. + IPPORT_TIMESERVER = 37 // Timeserver service. + IPPORT_NAMESERVER = 42 // Domain Name Service. + IPPORT_WHOIS = 43 // Internet Whois service. + IPPORT_MTP = 57 + + IPPORT_TFTP = 69 // Trivial File Transfer Protocol. + IPPORT_RJE = 77 + IPPORT_FINGER = 79 // Finger service. + IPPORT_TTYLINK = 87 + IPPORT_SUPDUP = 95 // SUPDUP protocol. + + IPPORT_EXECSERVER = 512 // execd service. + IPPORT_LOGINSERVER = 513 // rlogind service. + IPPORT_CMDSERVER = 514 + IPPORT_EFSSERVER = 520 + + // UDP ports. + IPPORT_BIFFUDP = 512 + IPPORT_WHOSERVER = 513 + IPPORT_ROUTESERVER = 520 + + // Ports less than this value are reserved for privileged processes. + IPPORT_RESERVED = 1024 + + // Ports greater this value are reserved for (non-privileged) servers. + IPPORT_USERRESERVED = 5000 +) + +// Options for use with `getsockopt' and `setsockopt' at the IPv6 level. +// The first word in the comment at the right is the data type used; +// "bool" means a boolean value stored in an `int'. + +// Advanced API (RFC3542) (1). + +// Advanced API (RFC3542) (2). + +// RFC5014. + +// RFC5082. + +// Obsolete synonyms for the above. + +// IPV6_MTU_DISCOVER values. + +// Socket level values for IPv6. + +// Routing header options for IPv6. + +// Standard well-defined IP protocols. +const ( /* in.h:40:1: */ + IPPROTO_IP = 0 // Dummy protocol for TCP. + IPPROTO_ICMP = 1 // Internet Control Message Protocol. + IPPROTO_IGMP = 2 // Internet Group Management Protocol. + IPPROTO_IPIP = 4 // IPIP tunnels (older KA9Q tunnels use 94). + IPPROTO_TCP = 6 // Transmission Control Protocol. + IPPROTO_EGP = 8 // Exterior Gateway Protocol. + IPPROTO_PUP = 12 // PUP protocol. + IPPROTO_UDP = 17 // User Datagram Protocol. + IPPROTO_IDP = 22 // XNS IDP protocol. + IPPROTO_TP = 29 // SO Transport Protocol Class 4. + IPPROTO_DCCP = 33 // Datagram Congestion Control Protocol. + IPPROTO_IPV6 = 41 // IPv6 header. + IPPROTO_RSVP = 46 // Reservation Protocol. + IPPROTO_GRE = 47 // General Routing Encapsulation. + IPPROTO_ESP = 50 // encapsulating security payload. + IPPROTO_AH = 51 // authentication header. + IPPROTO_MTP = 92 // Multicast Transport Protocol. + IPPROTO_BEETPH = 94 // IP option pseudo header for BEET. + IPPROTO_ENCAP = 98 // Encapsulation Header. + IPPROTO_PIM = 103 // Protocol Independent Multicast. + IPPROTO_COMP = 108 // Compression Header Protocol. + IPPROTO_SCTP = 132 // Stream Control Transmission Protocol. + IPPROTO_UDPLITE = 136 // UDP-Lite protocol. + IPPROTO_MPLS = 137 // MPLS in IP. + IPPROTO_RAW = 255 // Raw IP packets. + IPPROTO_MAX = 256 +) + +// If __USE_KERNEL_IPV6_DEFS is 1 then the user has included the kernel +// +// network headers first and we should use those ABI-identical definitions +// instead of our own, otherwise 0. +const ( /* in.h:99:1: */ + IPPROTO_HOPOPTS = 0 // IPv6 Hop-by-Hop options. + IPPROTO_ROUTING = 43 // IPv6 routing header. + IPPROTO_FRAGMENT = 44 // IPv6 fragmentation header. + IPPROTO_ICMPV6 = 58 // ICMPv6. + IPPROTO_NONE = 59 // IPv6 no next header. + IPPROTO_DSTOPTS = 60 // IPv6 destination options. + IPPROTO_MH = 135 +) + +// Bits in the FLAGS argument to `send', `recv', et al. +const ( /* socket.h:200:1: */ + MSG_OOB = 1 // Process out-of-band data. + MSG_PEEK = 2 // Peek at incoming messages. + MSG_DONTROUTE = 4 // Don't use local routing. + MSG_CTRUNC = 8 // Control data lost before delivery. + MSG_PROXY = 16 // Supply or ask second address. + MSG_TRUNC = 32 + MSG_DONTWAIT = 64 // Nonblocking IO. + MSG_EOR = 128 // End of record. + MSG_WAITALL = 256 // Wait for a full request. + MSG_FIN = 512 + MSG_SYN = 1024 + MSG_CONFIRM = 2048 // Confirm path validity. + MSG_RST = 4096 + MSG_ERRQUEUE = 8192 // Fetch message from error queue. + MSG_NOSIGNAL = 16384 // Do not generate SIGPIPE. + MSG_MORE = 32768 // Sender will send more. + MSG_WAITFORONE = 65536 // Wait for at least one packet to return. + MSG_BATCH = 262144 // sendmmsg: more messages coming. + MSG_ZEROCOPY = 67108864 // Use user data in kernel path. + MSG_FASTOPEN = 536870912 // Send data in TCP SYN. + + MSG_CMSG_CLOEXEC = 1073741824 +) + +// Socket level message types. This must match the definitions in +// +// . +const ( /* socket.h:332:1: */ + SCM_RIGHTS = 1 +) + +// Get the architecture-dependent definition of enum __socket_type. +// Define enum __socket_type for generic Linux. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Types of sockets. +const ( /* socket_type.h:24:1: */ + SOCK_STREAM = 1 // Sequenced, reliable, connection-based + // byte streams. + SOCK_DGRAM = 2 // Connectionless, unreliable datagrams + // of fixed maximum length. + SOCK_RAW = 3 // Raw protocol interface. + SOCK_RDM = 4 // Reliably-delivered messages. + SOCK_SEQPACKET = 5 // Sequenced, reliable, connection-based, + // datagrams of fixed maximum length. + SOCK_DCCP = 6 // Datagram Congestion Control Protocol. + SOCK_PACKET = 10 // Linux specific way of getting packets + // at the dev level. For writing rarp and + // other similar things on the user level. + + // Flags to be ORed into the type parameter of socket and socketpair and + // used for the flags parameter of paccept. + + SOCK_CLOEXEC = 524288 // Atomically set close-on-exec flag for the + // new descriptor(s). + SOCK_NONBLOCK = 2048 +) + +// The following constants should be used for the second parameter of +// +// `shutdown'. +const ( /* socket.h:41:1: */ + SHUT_RD = 0 // No more receptions. + SHUT_WR = 1 // No more transmissions. + SHUT_RDWR = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// All data returned by the network data base library are supplied in +// host order and returned in network order (suitable for use in +// system calls). + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define uintN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type Uint8_t = X__uint8_t /* stdint-uintn.h:24:19 */ +type Uint16_t = X__uint16_t /* stdint-uintn.h:25:20 */ +type Uint32_t = X__uint32_t /* stdint-uintn.h:26:20 */ +type Uint64_t = X__uint64_t /* stdint-uintn.h:27:20 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Structure for scatter/gather I/O. +type Iovec = struct { + Fiov_base uintptr + Fiov_len Size_t +} /* struct_iovec.h:26:1 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This operating system-specific header file defines the SOCK_*, PF_*, +// AF_*, MSG_*, SOL_*, and SO_* constants, and the `struct sockaddr', +// `struct msghdr', and `struct linger' types. +// System-specific socket constants and types. Linux version. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Type for length arguments in socket calls. +type Socklen_t = X__socklen_t /* socket.h:33:21 */ + +// Protocol families. + +// Address families. + +// Socket level values. Others are defined in the appropriate headers. +// +// XXX These definitions also should go into the appropriate headers as +// far as they are available. + +// Maximum queue length specifiable by listen. + +// Get the definition of the macro to define the common sockaddr members. +// Definition of struct sockaddr_* common members and sizes, generic version. +// Copyright (C) 1995-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// POSIX.1g specifies this type name for the `sa_family' member. +type Sa_family_t = uint16 /* sockaddr.h:28:28 */ + +// This macro is used to declare the initial common members +// of the data types used for socket addresses, `struct sockaddr', +// `struct sockaddr_in', `struct sockaddr_un', etc. + +// Size of struct sockaddr_storage. + +// Structure describing a generic socket address. +type Sockaddr = struct { + Fsa_family Sa_family_t + Fsa_data [14]int8 +} /* socket.h:178:1 */ + +// Structure large enough to hold any socket address (with the historical +// exception of AF_UNIX). + +type Sockaddr_storage = struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 +} /* socket.h:191:1 */ + +// Structure describing messages sent by +// +// `sendmsg' and received by `recvmsg'. +type Msghdr = struct { + Fmsg_name uintptr + Fmsg_namelen Socklen_t + F__ccgo_pad1 [4]byte + Fmsg_iov uintptr + Fmsg_iovlen Size_t + Fmsg_control uintptr + Fmsg_controllen Size_t + Fmsg_flags int32 + F__ccgo_pad2 [4]byte +} /* socket.h:257:1 */ + +// Structure used for storage of ancillary data object information. +type Cmsghdr = struct { + F__ccgo_pad1 [0]uint64 + Fcmsg_len Size_t + Fcmsg_level int32 + Fcmsg_type int32 +} /* socket.h:275:1 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This allows for 1024 file descriptors: if NR_OPEN is ever grown +// beyond that you'll have to change this too. But 1024 fd's seem to be +// enough even for such "real" unices like OSF/1, so hopefully this is +// one limit that doesn't have to be changed [again]. +// +// Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in +// (and thus ) - but this is a more logical +// place for them. Solved by having dummy defines in . + +// This macro may have been defined in . But we always +// use the one here. + +type X__kernel_fd_set = struct{ Ffds_bits [16]uint64 } /* posix_types.h:27:3 */ + +// Type of a signal handler. +type X__kernel_sighandler_t = uintptr /* posix_types.h:30:14 */ + +// Type of a SYSV IPC key. +type X__kernel_key_t = int32 /* posix_types.h:33:13 */ +type X__kernel_mqd_t = int32 /* posix_types.h:34:13 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. Also, we cannot +// assume GCC is being used. + +type X__kernel_old_uid_t = uint16 /* posix_types_64.h:11:24 */ +type X__kernel_old_gid_t = uint16 /* posix_types_64.h:12:24 */ + +type X__kernel_old_dev_t = uint64 /* posix_types_64.h:15:23 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// There seems to be no way of detecting this automatically from user +// space, so 64 bit architectures should override this in their +// bitsperlong.h. In particular, an architecture that supports +// both 32 and 64 bit user space must not rely on CONFIG_64BIT +// to decide it, but rather check a compiler provided macro. + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. +// +// First the types that are often defined in different ways across +// architectures, so that you can override them. + +type X__kernel_long_t = int64 /* posix_types.h:15:15 */ +type X__kernel_ulong_t = uint64 /* posix_types.h:16:23 */ + +type X__kernel_ino_t = X__kernel_ulong_t /* posix_types.h:20:26 */ + +type X__kernel_mode_t = uint32 /* posix_types.h:24:22 */ + +type X__kernel_pid_t = int32 /* posix_types.h:28:14 */ + +type X__kernel_ipc_pid_t = int32 /* posix_types.h:32:14 */ + +type X__kernel_uid_t = uint32 /* posix_types.h:36:22 */ +type X__kernel_gid_t = uint32 /* posix_types.h:37:22 */ + +type X__kernel_suseconds_t = X__kernel_long_t /* posix_types.h:41:26 */ + +type X__kernel_daddr_t = int32 /* posix_types.h:45:14 */ + +type X__kernel_uid32_t = uint32 /* posix_types.h:49:22 */ +type X__kernel_gid32_t = uint32 /* posix_types.h:50:22 */ + +// Most 32 bit architectures use "unsigned int" size_t, +// and all 64 bit architectures use "unsigned long" size_t. +type X__kernel_size_t = X__kernel_ulong_t /* posix_types.h:72:26 */ +type X__kernel_ssize_t = X__kernel_long_t /* posix_types.h:73:25 */ +type X__kernel_ptrdiff_t = X__kernel_long_t /* posix_types.h:74:25 */ + +type X__kernel_fsid_t = struct{ Fval [2]int32 } /* posix_types.h:81:3 */ + +// anything below here should be completely generic +type X__kernel_off_t = X__kernel_long_t /* posix_types.h:87:25 */ +type X__kernel_loff_t = int64 /* posix_types.h:88:19 */ +type X__kernel_old_time_t = X__kernel_long_t /* posix_types.h:89:25 */ +type X__kernel_time_t = X__kernel_long_t /* posix_types.h:90:25 */ +type X__kernel_time64_t = int64 /* posix_types.h:91:19 */ +type X__kernel_clock_t = X__kernel_long_t /* posix_types.h:92:25 */ +type X__kernel_timer_t = int32 /* posix_types.h:93:14 */ +type X__kernel_clockid_t = int32 /* posix_types.h:94:14 */ +type X__kernel_caddr_t = uintptr /* posix_types.h:95:14 */ +type X__kernel_uid16_t = uint16 /* posix_types.h:96:24 */ +type X__kernel_gid16_t = uint16 /* posix_types.h:97:24 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// Socket-level I/O control calls. + +// For setsockopt(2) + +// Security levels - as per NRL IPv6 - don't actually do anything + +// Socket filtering + +// Instruct lower device to use last 4-bytes of skb data as FCS + +// on 64-bit and x32, avoid the ?: operator + +// Structure used to manipulate the SO_LINGER option. +type Linger = struct { + Fl_onoff int32 + Fl_linger int32 +} /* socket.h:361:1 */ + +// This is the 4.3 BSD `struct sockaddr' format, which is used as wire +// +// format in the grotty old 4.3 `talk' protocol. +type Osockaddr = struct { + Fsa_family uint16 + Fsa_data [14]uint8 +} /* struct_osockaddr.h:6:1 */ + +// Define some macros helping to catch buffer overflows. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Internet address. +type In_addr_t = Uint32_t /* in.h:30:18 */ +type In_addr = struct{ Fs_addr In_addr_t } /* in.h:31:1 */ + +// Get system-specific definitions. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Linux version. + +// If the application has already included linux/in6.h from a linux-based +// kernel then we will not define the IPv6 IPPROTO_* defines, in6_addr (nor the +// defines), sockaddr_in6, or ipv6_mreq. Same for in6_ptkinfo or ip6_mtuinfo +// in linux/ipv6.h. The ABI used by the linux-kernel and glibc match exactly. +// Neither the linux kernel nor glibc should break this ABI without coordination. +// In upstream kernel 56c176c9 the _UAPI prefix was stripped so we need to check +// for _LINUX_IN6_H and _IPV6_H now, and keep checking the old versions for +// maximum backwards compatibility. + +// Options for use with `getsockopt' and `setsockopt' at the IP level. +// The first word in the comment at the right is the data type used; +// "bool" means a boolean value stored in an `int'. +// For BSD compatibility. + +// TProxy original addresses + +// IP_MTU_DISCOVER arguments. +// Always use interface mtu (ignores dst pmtu) but don't set DF flag. +// Also incoming ICMP frag_needed notifications will be ignored on +// this socket to prevent accepting spoofed ones. +// Like IP_PMTUDISC_INTERFACE but allow packets to be fragmented. + +// To select the IP level. + +// Structure used to describe IP options for IP_OPTIONS and IP_RETOPTS. +// +// The `ip_dst' field is used for the first-hop gateway when using a +// source route (this gets put into the header proper). +type Ip_opts = struct { + Fip_dst struct{ Fs_addr In_addr_t } + Fip_opts [40]int8 +} /* in.h:142:1 */ + +// Like `struct ip_mreq' but including interface specification by index. +type Ip_mreqn = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_address struct{ Fs_addr In_addr_t } + Fimr_ifindex int32 +} /* in.h:149:1 */ + +// Structure used for IP_PKTINFO. +type In_pktinfo = struct { + Fipi_ifindex int32 + Fipi_spec_dst struct{ Fs_addr In_addr_t } + Fipi_addr struct{ Fs_addr In_addr_t } +} /* in.h:157:1 */ + +// Type to represent a port. +type In_port_t = Uint16_t /* in.h:119:18 */ + +// Definitions of the bits in an Internet address integer. +// +// On subnets, host and network parts are found according to +// the subnet mask, not these masks. + +// Address to accept any incoming messages. +// Address to send to all hosts. +// Address indicating an error return. + +// Network number for local host loopback. +// Address to loopback in software to local host. + +// Defines for Multicast INADDR. + +// IPv6 address +type In6_addr = struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } +} /* in.h:212:1 */ + +// ::1 + +// Structure describing an Internet socket address. +type Sockaddr_in = struct { + Fsin_family Sa_family_t + Fsin_port In_port_t + Fsin_addr struct{ Fs_addr In_addr_t } + Fsin_zero [8]uint8 +} /* in.h:238:1 */ + +// Ditto, for IPv6. +type Sockaddr_in6 = struct { + Fsin6_family Sa_family_t + Fsin6_port In_port_t + Fsin6_flowinfo Uint32_t + Fsin6_addr struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } + } + Fsin6_scope_id Uint32_t +} /* in.h:253:1 */ + +// IPv4 multicast request. +type Ip_mreq = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_interface struct{ Fs_addr In_addr_t } +} /* in.h:265:1 */ + +type Ip_mreq_source = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_interface struct{ Fs_addr In_addr_t } + Fimr_sourceaddr struct{ Fs_addr In_addr_t } +} /* in.h:274:1 */ + +// Likewise, for IPv6. +type Ipv6_mreq = struct { + Fipv6mr_multiaddr struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } + } + Fipv6mr_interface uint32 +} /* in.h:289:1 */ + +// Multicast group request. +type Group_req = struct { + Fgr_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgr_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:301:1 */ + +type Group_source_req = struct { + Fgsr_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgsr_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } + Fgsr_source struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:310:1 */ + +// Full-state filter operations. +type Ip_msfilter = struct { + Fimsf_multiaddr struct{ Fs_addr In_addr_t } + Fimsf_interface struct{ Fs_addr In_addr_t } + Fimsf_fmode Uint32_t + Fimsf_numsrc Uint32_t + Fimsf_slist [1]struct{ Fs_addr In_addr_t } +} /* in.h:324:1 */ + +type Group_filter = struct { + Fgf_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgf_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } + Fgf_fmode Uint32_t + Fgf_numsrc Uint32_t + Fgf_slist [1]struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:345:1 */ + +// Define uintN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This is necessary to make this include file properly replace the +// Sun version. +// @(#)netdb.h 2.1 88/07/29 3.9 RPCSRC +// Copyright (c) 2010, Oracle America, Inc. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials +// provided with the distribution. +// * Neither the name of the "Oracle America, Inc." nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Cleaned up for GNU C library roland@gnu.ai.mit.edu: +// added multiple inclusion protection and use of . +// In GNU this file is #include'd by . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +type Rpcent = struct { + Fr_name uintptr + Fr_aliases uintptr + Fr_number int32 + F__ccgo_pad1 [4]byte +} /* netdb.h:46:1 */ + +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Description of data base entry for a single network. NOTE: here a +// +// poor assumption is made. The network number is expected to fit +// into an unsigned long int variable. +type Netent = struct { + Fn_name uintptr + Fn_aliases uintptr + Fn_addrtype int32 + Fn_net Uint32_t +} /* netdb.h:26:1 */ + +// Description of data base entry for a single host. +type Hostent = struct { + Fh_name uintptr + Fh_aliases uintptr + Fh_addrtype int32 + Fh_length int32 + Fh_addr_list uintptr +} /* netdb.h:98:1 */ + +// Description of data base entry for a single service. +type Servent = struct { + Fs_name uintptr + Fs_aliases uintptr + Fs_port int32 + F__ccgo_pad1 [4]byte + Fs_proto uintptr +} /* netdb.h:255:1 */ + +// Description of data base entry for a single service. +type Protoent = struct { + Fp_name uintptr + Fp_aliases uintptr + Fp_proto int32 + F__ccgo_pad1 [4]byte +} /* netdb.h:324:1 */ + +// Extension from POSIX.1:2001. +// Structure to contain information about address of a service provider. +type Addrinfo = struct { + Fai_flags int32 + Fai_family int32 + Fai_socktype int32 + Fai_protocol int32 + Fai_addrlen Socklen_t + F__ccgo_pad1 [4]byte + Fai_addr uintptr + Fai_canonname uintptr + Fai_next uintptr +} /* netdb.h:565:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/netinet/in/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/netinet/in/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/netinet/in/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/netinet/in/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo netinet/in/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o netinet/in/in_linux_amd64.go -pkgname in', DO NOT EDIT. + +package in + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/netinet/in/in_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/netinet/in/in_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/netinet/in/in_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/netinet/in/in_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,2790 @@ +// Code generated by 'ccgo netinet/in/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o netinet/in/in_linux_amd64.go -pkgname in', DO NOT EDIT. + +package in + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + AF_ALG = 38 // socket.h:133:1: + AF_APPLETALK = 5 // socket.h:99:1: + AF_ASH = 18 // socket.h:113:1: + AF_ATMPVC = 8 // socket.h:102:1: + AF_ATMSVC = 20 // socket.h:115:1: + AF_AX25 = 3 // socket.h:97:1: + AF_BLUETOOTH = 31 // socket.h:126:1: + AF_BRIDGE = 7 // socket.h:101:1: + AF_CAIF = 37 // socket.h:132:1: + AF_CAN = 29 // socket.h:124:1: + AF_DECnet = 12 // socket.h:106:1: + AF_ECONET = 19 // socket.h:114:1: + AF_FILE = 1 // socket.h:95:1: + AF_IB = 27 // socket.h:122:1: + AF_IEEE802154 = 36 // socket.h:131:1: + AF_INET = 2 // socket.h:96:1: + AF_INET6 = 10 // socket.h:104:1: + AF_IPX = 4 // socket.h:98:1: + AF_IRDA = 23 // socket.h:118:1: + AF_ISDN = 34 // socket.h:129:1: + AF_IUCV = 32 // socket.h:127:1: + AF_KCM = 41 // socket.h:136:1: + AF_KEY = 15 // socket.h:109:1: + AF_LLC = 26 // socket.h:121:1: + AF_LOCAL = 1 // socket.h:93:1: + AF_MAX = 45 // socket.h:140:1: + AF_MPLS = 28 // socket.h:123:1: + AF_NETBEUI = 13 // socket.h:107:1: + AF_NETLINK = 16 // socket.h:110:1: + AF_NETROM = 6 // socket.h:100:1: + AF_NFC = 39 // socket.h:134:1: + AF_PACKET = 17 // socket.h:112:1: + AF_PHONET = 35 // socket.h:130:1: + AF_PPPOX = 24 // socket.h:119:1: + AF_QIPCRTR = 42 // socket.h:137:1: + AF_RDS = 21 // socket.h:116:1: + AF_ROSE = 11 // socket.h:105:1: + AF_ROUTE = 16 // socket.h:111:1: + AF_RXRPC = 33 // socket.h:128:1: + AF_SECURITY = 14 // socket.h:108:1: + AF_SMC = 43 // socket.h:138:1: + AF_SNA = 22 // socket.h:117:1: + AF_TIPC = 30 // socket.h:125:1: + AF_UNIX = 1 // socket.h:94:1: + AF_UNSPEC = 0 // socket.h:92:1: + AF_VSOCK = 40 // socket.h:135:1: + AF_WANPIPE = 25 // socket.h:120:1: + AF_X25 = 9 // socket.h:103:1: + AF_XDP = 44 // socket.h:139:1: + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + FD_SETSIZE = 1024 // select.h:73:1: + FIOGETOWN = 0x8903 // sockios.h:8:1: + FIOSETOWN = 0x8901 // sockios.h:6:1: + INET6_ADDRSTRLEN = 46 // in.h:234:1: + INET_ADDRSTRLEN = 16 // in.h:233:1: + IN_CLASSA_HOST = 16777215 // in.h:169:1: + IN_CLASSA_MAX = 128 // in.h:170:1: + IN_CLASSA_NET = 0xff000000 // in.h:167:1: + IN_CLASSA_NSHIFT = 24 // in.h:168:1: + IN_CLASSB_HOST = 65535 // in.h:175:1: + IN_CLASSB_MAX = 65536 // in.h:176:1: + IN_CLASSB_NET = 0xffff0000 // in.h:173:1: + IN_CLASSB_NSHIFT = 16 // in.h:174:1: + IN_CLASSC_HOST = 255 // in.h:181:1: + IN_CLASSC_NET = 0xffffff00 // in.h:179:1: + IN_CLASSC_NSHIFT = 8 // in.h:180:1: + IN_LOOPBACKNET = 127 // in.h:197:1: + IPV6_2292DSTOPTS = 4 // in.h:171:1: + IPV6_2292HOPLIMIT = 8 // in.h:175:1: + IPV6_2292HOPOPTS = 3 // in.h:170:1: + IPV6_2292PKTINFO = 2 // in.h:169:1: + IPV6_2292PKTOPTIONS = 6 // in.h:173:1: + IPV6_2292RTHDR = 5 // in.h:172:1: + IPV6_ADDRFORM = 1 // in.h:168:1: + IPV6_ADDR_PREFERENCES = 72 // in.h:223:1: + IPV6_ADD_MEMBERSHIP = 20 // in.h:237:1: + IPV6_AUTHHDR = 10 // in.h:180:1: + IPV6_AUTOFLOWLABEL = 70 // in.h:220:1: + IPV6_CHECKSUM = 7 // in.h:174:1: + IPV6_DONTFRAG = 62 // in.h:214:1: + IPV6_DROP_MEMBERSHIP = 21 // in.h:238:1: + IPV6_DSTOPTS = 59 // in.h:211:1: + IPV6_FREEBIND = 78 // in.h:233:1: + IPV6_HDRINCL = 36 // in.h:198:1: + IPV6_HOPLIMIT = 52 // in.h:204:1: + IPV6_HOPOPTS = 54 // in.h:206:1: + IPV6_IPSEC_POLICY = 34 // in.h:196:1: + IPV6_JOIN_ANYCAST = 27 // in.h:192:1: + IPV6_JOIN_GROUP = 20 // in.h:185:1: + IPV6_LEAVE_ANYCAST = 28 // in.h:193:1: + IPV6_LEAVE_GROUP = 21 // in.h:186:1: + IPV6_MINHOPCOUNT = 73 // in.h:226:1: + IPV6_MTU = 24 // in.h:189:1: + IPV6_MTU_DISCOVER = 23 // in.h:188:1: + IPV6_MULTICAST_ALL = 29 // in.h:194:1: + IPV6_MULTICAST_HOPS = 18 // in.h:183:1: + IPV6_MULTICAST_IF = 17 // in.h:182:1: + IPV6_MULTICAST_LOOP = 19 // in.h:184:1: + IPV6_NEXTHOP = 9 // in.h:179:1: + IPV6_ORIGDSTADDR = 74 // in.h:228:1: + IPV6_PATHMTU = 61 // in.h:213:1: + IPV6_PKTINFO = 50 // in.h:202:1: + IPV6_PMTUDISC_DO = 2 // in.h:246:1: + IPV6_PMTUDISC_DONT = 0 // in.h:244:1: + IPV6_PMTUDISC_INTERFACE = 4 // in.h:248:1: + IPV6_PMTUDISC_OMIT = 5 // in.h:249:1: + IPV6_PMTUDISC_PROBE = 3 // in.h:247:1: + IPV6_PMTUDISC_WANT = 1 // in.h:245:1: + IPV6_RECVDSTOPTS = 58 // in.h:210:1: + IPV6_RECVERR = 25 // in.h:190:1: + IPV6_RECVFRAGSIZE = 77 // in.h:232:1: + IPV6_RECVHOPLIMIT = 51 // in.h:203:1: + IPV6_RECVHOPOPTS = 53 // in.h:205:1: + IPV6_RECVORIGDSTADDR = 74 // in.h:229:1: + IPV6_RECVPATHMTU = 60 // in.h:212:1: + IPV6_RECVPKTINFO = 49 // in.h:201:1: + IPV6_RECVRTHDR = 56 // in.h:208:1: + IPV6_RECVTCLASS = 66 // in.h:217:1: + IPV6_ROUTER_ALERT = 22 // in.h:187:1: + IPV6_ROUTER_ALERT_ISOLATE = 30 // in.h:195:1: + IPV6_RTHDR = 57 // in.h:209:1: + IPV6_RTHDRDSTOPTS = 55 // in.h:207:1: + IPV6_RTHDR_LOOSE = 0 // in.h:256:1: + IPV6_RTHDR_STRICT = 1 // in.h:257:1: + IPV6_RTHDR_TYPE_0 = 0 // in.h:259:1: + IPV6_RXDSTOPTS = 59 // in.h:241:1: + IPV6_RXHOPOPTS = 54 // in.h:240:1: + IPV6_TCLASS = 67 // in.h:218:1: + IPV6_TRANSPARENT = 75 // in.h:230:1: + IPV6_UNICAST_HOPS = 16 // in.h:181:1: + IPV6_UNICAST_IF = 76 // in.h:231:1: + IPV6_V6ONLY = 26 // in.h:191:1: + IPV6_XFRM_POLICY = 35 // in.h:197:1: + IP_ADD_MEMBERSHIP = 35 // in.h:121:1: + IP_ADD_SOURCE_MEMBERSHIP = 39 // in.h:125:1: + IP_BIND_ADDRESS_NO_PORT = 24 // in.h:103:1: + IP_BLOCK_SOURCE = 38 // in.h:124:1: + IP_CHECKSUM = 23 // in.h:102:1: + IP_DEFAULT_MULTICAST_LOOP = 1 // in.h:135:1: + IP_DEFAULT_MULTICAST_TTL = 1 // in.h:134:1: + IP_DROP_MEMBERSHIP = 36 // in.h:122:1: + IP_DROP_SOURCE_MEMBERSHIP = 40 // in.h:126:1: + IP_FREEBIND = 15 // in.h:89:1: + IP_HDRINCL = 3 // in.h:48:1: + IP_IPSEC_POLICY = 16 // in.h:90:1: + IP_MAX_MEMBERSHIPS = 20 // in.h:136:1: + IP_MINTTL = 21 // in.h:100:1: + IP_MSFILTER = 41 // in.h:127:1: + IP_MTU = 14 // in.h:88:1: + IP_MTU_DISCOVER = 10 // in.h:84:1: + IP_MULTICAST_ALL = 49 // in.h:128:1: + IP_MULTICAST_IF = 32 // in.h:118:1: + IP_MULTICAST_LOOP = 34 // in.h:120:1: + IP_MULTICAST_TTL = 33 // in.h:119:1: + IP_NODEFRAG = 22 // in.h:101:1: + IP_OPTIONS = 4 // in.h:47:1: + IP_ORIGDSTADDR = 20 // in.h:97:1: + IP_PASSSEC = 18 // in.h:92:1: + IP_PKTINFO = 8 // in.h:81:1: + IP_PKTOPTIONS = 9 // in.h:82:1: + IP_PMTUDISC = 10 // in.h:83:1: + IP_PMTUDISC_DO = 2 // in.h:109:1: + IP_PMTUDISC_DONT = 0 // in.h:107:1: + IP_PMTUDISC_INTERFACE = 4 // in.h:114:1: + IP_PMTUDISC_OMIT = 5 // in.h:116:1: + IP_PMTUDISC_PROBE = 3 // in.h:110:1: + IP_PMTUDISC_WANT = 1 // in.h:108:1: + IP_RECVERR = 11 // in.h:85:1: + IP_RECVFRAGSIZE = 25 // in.h:104:1: + IP_RECVOPTS = 6 // in.h:51:1: + IP_RECVORIGDSTADDR = 20 // in.h:98:1: + IP_RECVRETOPTS = 7 // in.h:53:1: + IP_RECVTOS = 13 // in.h:87:1: + IP_RECVTTL = 12 // in.h:86:1: + IP_RETOPTS = 7 // in.h:54:1: + IP_ROUTER_ALERT = 5 // in.h:80:1: + IP_TOS = 1 // in.h:49:1: + IP_TRANSPARENT = 19 // in.h:93:1: + IP_TTL = 2 // in.h:50:1: + IP_UNBLOCK_SOURCE = 37 // in.h:123:1: + IP_UNICAST_IF = 50 // in.h:129:1: + IP_XFRM_POLICY = 17 // in.h:91:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + MCAST_BLOCK_SOURCE = 43 // in.h:67:1: + MCAST_EXCLUDE = 0 // in.h:76:1: + MCAST_INCLUDE = 1 // in.h:77:1: + MCAST_JOIN_GROUP = 42 // in.h:66:1: + MCAST_JOIN_SOURCE_GROUP = 46 // in.h:70:1: + MCAST_LEAVE_GROUP = 45 // in.h:69:1: + MCAST_LEAVE_SOURCE_GROUP = 47 // in.h:71:1: + MCAST_MSFILTER = 48 // in.h:72:1: + MCAST_UNBLOCK_SOURCE = 44 // in.h:68:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + PF_ALG = 38 // socket.h:82:1: + PF_APPLETALK = 5 // socket.h:48:1: + PF_ASH = 18 // socket.h:62:1: + PF_ATMPVC = 8 // socket.h:51:1: + PF_ATMSVC = 20 // socket.h:64:1: + PF_AX25 = 3 // socket.h:46:1: + PF_BLUETOOTH = 31 // socket.h:75:1: + PF_BRIDGE = 7 // socket.h:50:1: + PF_CAIF = 37 // socket.h:81:1: + PF_CAN = 29 // socket.h:73:1: + PF_DECnet = 12 // socket.h:55:1: + PF_ECONET = 19 // socket.h:63:1: + PF_FILE = 1 // socket.h:44:1: + PF_IB = 27 // socket.h:71:1: + PF_IEEE802154 = 36 // socket.h:80:1: + PF_INET = 2 // socket.h:45:1: + PF_INET6 = 10 // socket.h:53:1: + PF_IPX = 4 // socket.h:47:1: + PF_IRDA = 23 // socket.h:67:1: + PF_ISDN = 34 // socket.h:78:1: + PF_IUCV = 32 // socket.h:76:1: + PF_KCM = 41 // socket.h:85:1: + PF_KEY = 15 // socket.h:58:1: + PF_LLC = 26 // socket.h:70:1: + PF_LOCAL = 1 // socket.h:42:1: + PF_MAX = 45 // socket.h:89:1: + PF_MPLS = 28 // socket.h:72:1: + PF_NETBEUI = 13 // socket.h:56:1: + PF_NETLINK = 16 // socket.h:59:1: + PF_NETROM = 6 // socket.h:49:1: + PF_NFC = 39 // socket.h:83:1: + PF_PACKET = 17 // socket.h:61:1: + PF_PHONET = 35 // socket.h:79:1: + PF_PPPOX = 24 // socket.h:68:1: + PF_QIPCRTR = 42 // socket.h:86:1: + PF_RDS = 21 // socket.h:65:1: + PF_ROSE = 11 // socket.h:54:1: + PF_ROUTE = 16 // socket.h:60:1: + PF_RXRPC = 33 // socket.h:77:1: + PF_SECURITY = 14 // socket.h:57:1: + PF_SMC = 43 // socket.h:87:1: + PF_SNA = 22 // socket.h:66:1: + PF_TIPC = 30 // socket.h:74:1: + PF_UNIX = 1 // socket.h:43:1: + PF_UNSPEC = 0 // socket.h:41:1: + PF_VSOCK = 40 // socket.h:84:1: + PF_WANPIPE = 25 // socket.h:69:1: + PF_X25 = 9 // socket.h:52:1: + PF_XDP = 44 // socket.h:88:1: + SCM_TIMESTAMP = 29 // socket.h:140:1: + SCM_TIMESTAMPING = 37 // socket.h:142:1: + SCM_TIMESTAMPING_OPT_STATS = 54 // socket.h:90:1: + SCM_TIMESTAMPING_PKTINFO = 58 // socket.h:98:1: + SCM_TIMESTAMPNS = 35 // socket.h:141:1: + SCM_TXTIME = 61 // socket.h:105:1: + SCM_WIFI_STATUS = 41 // socket.h:64:1: + SIOCATMARK = 0x8905 // sockios.h:10:1: + SIOCGPGRP = 0x8904 // sockios.h:9:1: + SIOCGSTAMP = 0x8906 // sockios.h:11:1: + SIOCGSTAMPNS = 0x8907 // sockios.h:12:1: + SIOCSPGRP = 0x8902 // sockios.h:7:1: + SOL_AAL = 265 // socket.h:151:1: + SOL_ALG = 279 // socket.h:165:1: + SOL_ATM = 264 // socket.h:150:1: + SOL_BLUETOOTH = 274 // socket.h:160:1: + SOL_CAIF = 278 // socket.h:164:1: + SOL_DCCP = 269 // socket.h:155:1: + SOL_DECNET = 261 // socket.h:147:1: + SOL_ICMPV6 = 58 // in.h:253:1: + SOL_IP = 0 // in.h:132:1: + SOL_IPV6 = 41 // in.h:252:1: + SOL_IRDA = 266 // socket.h:152:1: + SOL_IUCV = 277 // socket.h:163:1: + SOL_KCM = 281 // socket.h:167:1: + SOL_LLC = 268 // socket.h:154:1: + SOL_NETBEUI = 267 // socket.h:153:1: + SOL_NETLINK = 270 // socket.h:156:1: + SOL_NFC = 280 // socket.h:166:1: + SOL_PACKET = 263 // socket.h:149:1: + SOL_PNPIPE = 275 // socket.h:161:1: + SOL_PPPOL2TP = 273 // socket.h:159:1: + SOL_RAW = 255 // socket.h:146:1: + SOL_RDS = 276 // socket.h:162:1: + SOL_RXRPC = 272 // socket.h:158:1: + SOL_SOCKET = 1 // socket.h:9:1: + SOL_TIPC = 271 // socket.h:157:1: + SOL_TLS = 282 // socket.h:168:1: + SOL_X25 = 262 // socket.h:148:1: + SOL_XDP = 283 // socket.h:169:1: + SOMAXCONN = 4096 // socket.h:172:1: + SO_ACCEPTCONN = 30 // socket.h:51:1: + SO_ATTACH_BPF = 50 // socket.h:82:1: + SO_ATTACH_FILTER = 26 // socket.h:45:1: + SO_ATTACH_REUSEPORT_CBPF = 51 // socket.h:85:1: + SO_ATTACH_REUSEPORT_EBPF = 52 // socket.h:86:1: + SO_BINDTODEVICE = 25 // socket.h:42:1: + SO_BINDTOIFINDEX = 62 // socket.h:107:1: + SO_BPF_EXTENSIONS = 48 // socket.h:78:1: + SO_BROADCAST = 6 // socket.h:16:1: + SO_BSDCOMPAT = 14 // socket.h:26:1: + SO_BUSY_POLL = 46 // socket.h:74:1: + SO_CNX_ADVICE = 53 // socket.h:88:1: + SO_COOKIE = 57 // socket.h:96:1: + SO_DEBUG = 1 // socket.h:11:1: + SO_DETACH_BPF = 27 // socket.h:83:1: + SO_DETACH_FILTER = 27 // socket.h:46:1: + SO_DETACH_REUSEPORT_BPF = 68 // socket.h:120:1: + SO_DOMAIN = 39 // socket.h:59:1: + SO_DONTROUTE = 5 // socket.h:15:1: + SO_ERROR = 4 // socket.h:14:1: + SO_GET_FILTER = 26 // socket.h:47:1: + SO_INCOMING_CPU = 49 // socket.h:80:1: + SO_INCOMING_NAPI_ID = 56 // socket.h:94:1: + SO_KEEPALIVE = 9 // socket.h:21:1: + SO_LINGER = 13 // socket.h:25:1: + SO_LOCK_FILTER = 44 // socket.h:70:1: + SO_MARK = 36 // socket.h:56:1: + SO_MAX_PACING_RATE = 47 // socket.h:76:1: + SO_MEMINFO = 55 // socket.h:92:1: + SO_NOFCS = 43 // socket.h:68:1: + SO_NO_CHECK = 11 // socket.h:23:1: + SO_OOBINLINE = 10 // socket.h:22:1: + SO_PASSCRED = 16 // socket.h:29:1: + SO_PASSSEC = 34 // socket.h:54:1: + SO_PEEK_OFF = 42 // socket.h:65:1: + SO_PEERCRED = 17 // socket.h:30:1: + SO_PEERGROUPS = 59 // socket.h:100:1: + SO_PEERNAME = 28 // socket.h:49:1: + SO_PEERSEC = 31 // socket.h:53:1: + SO_PRIORITY = 12 // socket.h:24:1: + SO_PROTOCOL = 38 // socket.h:58:1: + SO_RCVBUF = 8 // socket.h:18:1: + SO_RCVBUFFORCE = 33 // socket.h:20:1: + SO_RCVLOWAT = 18 // socket.h:31:1: + SO_RCVTIMEO = 20 // socket.h:129:1: + SO_RCVTIMEO_NEW = 66 // socket.h:117:1: + SO_RCVTIMEO_OLD = 20 // socket.h:33:1: + SO_REUSEADDR = 2 // socket.h:12:1: + SO_REUSEPORT = 15 // socket.h:27:1: + SO_RXQ_OVFL = 40 // socket.h:61:1: + SO_SECURITY_AUTHENTICATION = 22 // socket.h:38:1: + SO_SECURITY_ENCRYPTION_NETWORK = 24 // socket.h:40:1: + SO_SECURITY_ENCRYPTION_TRANSPORT = 23 // socket.h:39:1: + SO_SELECT_ERR_QUEUE = 45 // socket.h:72:1: + SO_SNDBUF = 7 // socket.h:17:1: + SO_SNDBUFFORCE = 32 // socket.h:19:1: + SO_SNDLOWAT = 19 // socket.h:32:1: + SO_SNDTIMEO = 21 // socket.h:130:1: + SO_SNDTIMEO_NEW = 67 // socket.h:118:1: + SO_SNDTIMEO_OLD = 21 // socket.h:34:1: + SO_TIMESTAMP = 29 // socket.h:125:1: + SO_TIMESTAMPING = 37 // socket.h:127:1: + SO_TIMESTAMPING_NEW = 65 // socket.h:115:1: + SO_TIMESTAMPING_OLD = 37 // socket.h:111:1: + SO_TIMESTAMPNS = 35 // socket.h:126:1: + SO_TIMESTAMPNS_NEW = 64 // socket.h:114:1: + SO_TIMESTAMPNS_OLD = 35 // socket.h:110:1: + SO_TIMESTAMP_NEW = 63 // socket.h:113:1: + SO_TIMESTAMP_OLD = 29 // socket.h:109:1: + SO_TXTIME = 61 // socket.h:104:1: + SO_TYPE = 3 // socket.h:13:1: + SO_WIFI_STATUS = 41 // socket.h:63:1: + SO_ZEROCOPY = 60 // socket.h:102:1: + X_ASM_X86_POSIX_TYPES_64_H = 0 // posix_types_64.h:3:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_SOCKADDR_H = 1 // sockaddr.h:24:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_STDINT_UINTN_H = 1 // stdint-uintn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LINUX_POSIX_TYPES_H = 0 // posix_types.h:3:1: + X_LP64 = 1 // :284:1: + X_NETINET_IN_H = 1 // in.h:19:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_SS_SIZE = 128 // sockaddr.h:40:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_SOCKET_H = 1 // socket.h:20:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Standard well-known ports. +const ( /* in.h:122:1: */ + IPPORT_ECHO = 7 // Echo service. + IPPORT_DISCARD = 9 // Discard transmissions service. + IPPORT_SYSTAT = 11 // System status service. + IPPORT_DAYTIME = 13 // Time of day service. + IPPORT_NETSTAT = 15 // Network status service. + IPPORT_FTP = 21 // File Transfer Protocol. + IPPORT_TELNET = 23 // Telnet protocol. + IPPORT_SMTP = 25 // Simple Mail Transfer Protocol. + IPPORT_TIMESERVER = 37 // Timeserver service. + IPPORT_NAMESERVER = 42 // Domain Name Service. + IPPORT_WHOIS = 43 // Internet Whois service. + IPPORT_MTP = 57 + + IPPORT_TFTP = 69 // Trivial File Transfer Protocol. + IPPORT_RJE = 77 + IPPORT_FINGER = 79 // Finger service. + IPPORT_TTYLINK = 87 + IPPORT_SUPDUP = 95 // SUPDUP protocol. + + IPPORT_EXECSERVER = 512 // execd service. + IPPORT_LOGINSERVER = 513 // rlogind service. + IPPORT_CMDSERVER = 514 + IPPORT_EFSSERVER = 520 + + // UDP ports. + IPPORT_BIFFUDP = 512 + IPPORT_WHOSERVER = 513 + IPPORT_ROUTESERVER = 520 + + // Ports less than this value are reserved for privileged processes. + IPPORT_RESERVED = 1024 + + // Ports greater this value are reserved for (non-privileged) servers. + IPPORT_USERRESERVED = 5000 +) + +// Options for use with `getsockopt' and `setsockopt' at the IPv6 level. +// The first word in the comment at the right is the data type used; +// "bool" means a boolean value stored in an `int'. + +// Advanced API (RFC3542) (1). + +// Advanced API (RFC3542) (2). + +// RFC5014. + +// RFC5082. + +// Obsolete synonyms for the above. + +// IPV6_MTU_DISCOVER values. + +// Socket level values for IPv6. + +// Routing header options for IPv6. + +// Standard well-defined IP protocols. +const ( /* in.h:40:1: */ + IPPROTO_IP = 0 // Dummy protocol for TCP. + IPPROTO_ICMP = 1 // Internet Control Message Protocol. + IPPROTO_IGMP = 2 // Internet Group Management Protocol. + IPPROTO_IPIP = 4 // IPIP tunnels (older KA9Q tunnels use 94). + IPPROTO_TCP = 6 // Transmission Control Protocol. + IPPROTO_EGP = 8 // Exterior Gateway Protocol. + IPPROTO_PUP = 12 // PUP protocol. + IPPROTO_UDP = 17 // User Datagram Protocol. + IPPROTO_IDP = 22 // XNS IDP protocol. + IPPROTO_TP = 29 // SO Transport Protocol Class 4. + IPPROTO_DCCP = 33 // Datagram Congestion Control Protocol. + IPPROTO_IPV6 = 41 // IPv6 header. + IPPROTO_RSVP = 46 // Reservation Protocol. + IPPROTO_GRE = 47 // General Routing Encapsulation. + IPPROTO_ESP = 50 // encapsulating security payload. + IPPROTO_AH = 51 // authentication header. + IPPROTO_MTP = 92 // Multicast Transport Protocol. + IPPROTO_BEETPH = 94 // IP option pseudo header for BEET. + IPPROTO_ENCAP = 98 // Encapsulation Header. + IPPROTO_PIM = 103 // Protocol Independent Multicast. + IPPROTO_COMP = 108 // Compression Header Protocol. + IPPROTO_SCTP = 132 // Stream Control Transmission Protocol. + IPPROTO_UDPLITE = 136 // UDP-Lite protocol. + IPPROTO_MPLS = 137 // MPLS in IP. + IPPROTO_RAW = 255 // Raw IP packets. + IPPROTO_MAX = 256 +) + +// If __USE_KERNEL_IPV6_DEFS is 1 then the user has included the kernel +// +// network headers first and we should use those ABI-identical definitions +// instead of our own, otherwise 0. +const ( /* in.h:99:1: */ + IPPROTO_HOPOPTS = 0 // IPv6 Hop-by-Hop options. + IPPROTO_ROUTING = 43 // IPv6 routing header. + IPPROTO_FRAGMENT = 44 // IPv6 fragmentation header. + IPPROTO_ICMPV6 = 58 // ICMPv6. + IPPROTO_NONE = 59 // IPv6 no next header. + IPPROTO_DSTOPTS = 60 // IPv6 destination options. + IPPROTO_MH = 135 +) + +// Bits in the FLAGS argument to `send', `recv', et al. +const ( /* socket.h:200:1: */ + MSG_OOB = 1 // Process out-of-band data. + MSG_PEEK = 2 // Peek at incoming messages. + MSG_DONTROUTE = 4 // Don't use local routing. + MSG_CTRUNC = 8 // Control data lost before delivery. + MSG_PROXY = 16 // Supply or ask second address. + MSG_TRUNC = 32 + MSG_DONTWAIT = 64 // Nonblocking IO. + MSG_EOR = 128 // End of record. + MSG_WAITALL = 256 // Wait for a full request. + MSG_FIN = 512 + MSG_SYN = 1024 + MSG_CONFIRM = 2048 // Confirm path validity. + MSG_RST = 4096 + MSG_ERRQUEUE = 8192 // Fetch message from error queue. + MSG_NOSIGNAL = 16384 // Do not generate SIGPIPE. + MSG_MORE = 32768 // Sender will send more. + MSG_WAITFORONE = 65536 // Wait for at least one packet to return. + MSG_BATCH = 262144 // sendmmsg: more messages coming. + MSG_ZEROCOPY = 67108864 // Use user data in kernel path. + MSG_FASTOPEN = 536870912 // Send data in TCP SYN. + + MSG_CMSG_CLOEXEC = 1073741824 +) + +// Socket level message types. This must match the definitions in +// +// . +const ( /* socket.h:332:1: */ + SCM_RIGHTS = 1 +) + +// Get the architecture-dependent definition of enum __socket_type. +// Define enum __socket_type for generic Linux. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Types of sockets. +const ( /* socket_type.h:24:1: */ + SOCK_STREAM = 1 // Sequenced, reliable, connection-based + // byte streams. + SOCK_DGRAM = 2 // Connectionless, unreliable datagrams + // of fixed maximum length. + SOCK_RAW = 3 // Raw protocol interface. + SOCK_RDM = 4 // Reliably-delivered messages. + SOCK_SEQPACKET = 5 // Sequenced, reliable, connection-based, + // datagrams of fixed maximum length. + SOCK_DCCP = 6 // Datagram Congestion Control Protocol. + SOCK_PACKET = 10 // Linux specific way of getting packets + // at the dev level. For writing rarp and + // other similar things on the user level. + + // Flags to be ORed into the type parameter of socket and socketpair and + // used for the flags parameter of paccept. + + SOCK_CLOEXEC = 524288 // Atomically set close-on-exec flag for the + // new descriptor(s). + SOCK_NONBLOCK = 2048 +) + +// The following constants should be used for the second parameter of +// +// `shutdown'. +const ( /* socket.h:41:1: */ + SHUT_RD = 0 // No more receptions. + SHUT_WR = 1 // No more transmissions. + SHUT_RDWR = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// Define uintN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type Uint8_t = X__uint8_t /* stdint-uintn.h:24:19 */ +type Uint16_t = X__uint16_t /* stdint-uintn.h:25:20 */ +type Uint32_t = X__uint32_t /* stdint-uintn.h:26:20 */ +type Uint64_t = X__uint64_t /* stdint-uintn.h:27:20 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Structure for scatter/gather I/O. +type Iovec = struct { + Fiov_base uintptr + Fiov_len Size_t +} /* struct_iovec.h:26:1 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This operating system-specific header file defines the SOCK_*, PF_*, +// AF_*, MSG_*, SOL_*, and SO_* constants, and the `struct sockaddr', +// `struct msghdr', and `struct linger' types. +// System-specific socket constants and types. Linux version. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Type for length arguments in socket calls. +type Socklen_t = X__socklen_t /* socket.h:33:21 */ + +// Protocol families. + +// Address families. + +// Socket level values. Others are defined in the appropriate headers. +// +// XXX These definitions also should go into the appropriate headers as +// far as they are available. + +// Maximum queue length specifiable by listen. + +// Get the definition of the macro to define the common sockaddr members. +// Definition of struct sockaddr_* common members and sizes, generic version. +// Copyright (C) 1995-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// POSIX.1g specifies this type name for the `sa_family' member. +type Sa_family_t = uint16 /* sockaddr.h:28:28 */ + +// This macro is used to declare the initial common members +// of the data types used for socket addresses, `struct sockaddr', +// `struct sockaddr_in', `struct sockaddr_un', etc. + +// Size of struct sockaddr_storage. + +// Structure describing a generic socket address. +type Sockaddr = struct { + Fsa_family Sa_family_t + Fsa_data [14]int8 +} /* socket.h:178:1 */ + +// Structure large enough to hold any socket address (with the historical +// exception of AF_UNIX). + +type Sockaddr_storage = struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 +} /* socket.h:191:1 */ + +// Structure describing messages sent by +// +// `sendmsg' and received by `recvmsg'. +type Msghdr = struct { + Fmsg_name uintptr + Fmsg_namelen Socklen_t + F__ccgo_pad1 [4]byte + Fmsg_iov uintptr + Fmsg_iovlen Size_t + Fmsg_control uintptr + Fmsg_controllen Size_t + Fmsg_flags int32 + F__ccgo_pad2 [4]byte +} /* socket.h:257:1 */ + +// Structure used for storage of ancillary data object information. +type Cmsghdr = struct { + F__ccgo_pad1 [0]uint64 + Fcmsg_len Size_t + Fcmsg_level int32 + Fcmsg_type int32 +} /* socket.h:275:1 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This allows for 1024 file descriptors: if NR_OPEN is ever grown +// beyond that you'll have to change this too. But 1024 fd's seem to be +// enough even for such "real" unices like OSF/1, so hopefully this is +// one limit that doesn't have to be changed [again]. +// +// Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in +// (and thus ) - but this is a more logical +// place for them. Solved by having dummy defines in . + +// This macro may have been defined in . But we always +// use the one here. + +type X__kernel_fd_set = struct{ Ffds_bits [16]uint64 } /* posix_types.h:27:3 */ + +// Type of a signal handler. +type X__kernel_sighandler_t = uintptr /* posix_types.h:30:14 */ + +// Type of a SYSV IPC key. +type X__kernel_key_t = int32 /* posix_types.h:33:13 */ +type X__kernel_mqd_t = int32 /* posix_types.h:34:13 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. Also, we cannot +// assume GCC is being used. + +type X__kernel_old_uid_t = uint16 /* posix_types_64.h:11:24 */ +type X__kernel_old_gid_t = uint16 /* posix_types_64.h:12:24 */ + +type X__kernel_old_dev_t = uint64 /* posix_types_64.h:15:23 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// There seems to be no way of detecting this automatically from user +// space, so 64 bit architectures should override this in their +// bitsperlong.h. In particular, an architecture that supports +// both 32 and 64 bit user space must not rely on CONFIG_64BIT +// to decide it, but rather check a compiler provided macro. + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. +// +// First the types that are often defined in different ways across +// architectures, so that you can override them. + +type X__kernel_long_t = int64 /* posix_types.h:15:15 */ +type X__kernel_ulong_t = uint64 /* posix_types.h:16:23 */ + +type X__kernel_ino_t = X__kernel_ulong_t /* posix_types.h:20:26 */ + +type X__kernel_mode_t = uint32 /* posix_types.h:24:22 */ + +type X__kernel_pid_t = int32 /* posix_types.h:28:14 */ + +type X__kernel_ipc_pid_t = int32 /* posix_types.h:32:14 */ + +type X__kernel_uid_t = uint32 /* posix_types.h:36:22 */ +type X__kernel_gid_t = uint32 /* posix_types.h:37:22 */ + +type X__kernel_suseconds_t = X__kernel_long_t /* posix_types.h:41:26 */ + +type X__kernel_daddr_t = int32 /* posix_types.h:45:14 */ + +type X__kernel_uid32_t = uint32 /* posix_types.h:49:22 */ +type X__kernel_gid32_t = uint32 /* posix_types.h:50:22 */ + +// Most 32 bit architectures use "unsigned int" size_t, +// and all 64 bit architectures use "unsigned long" size_t. +type X__kernel_size_t = X__kernel_ulong_t /* posix_types.h:72:26 */ +type X__kernel_ssize_t = X__kernel_long_t /* posix_types.h:73:25 */ +type X__kernel_ptrdiff_t = X__kernel_long_t /* posix_types.h:74:25 */ + +type X__kernel_fsid_t = struct{ Fval [2]int32 } /* posix_types.h:81:3 */ + +// anything below here should be completely generic +type X__kernel_off_t = X__kernel_long_t /* posix_types.h:87:25 */ +type X__kernel_loff_t = int64 /* posix_types.h:88:19 */ +type X__kernel_old_time_t = X__kernel_long_t /* posix_types.h:89:25 */ +type X__kernel_time_t = X__kernel_long_t /* posix_types.h:90:25 */ +type X__kernel_time64_t = int64 /* posix_types.h:91:19 */ +type X__kernel_clock_t = X__kernel_long_t /* posix_types.h:92:25 */ +type X__kernel_timer_t = int32 /* posix_types.h:93:14 */ +type X__kernel_clockid_t = int32 /* posix_types.h:94:14 */ +type X__kernel_caddr_t = uintptr /* posix_types.h:95:14 */ +type X__kernel_uid16_t = uint16 /* posix_types.h:96:24 */ +type X__kernel_gid16_t = uint16 /* posix_types.h:97:24 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// Socket-level I/O control calls. + +// For setsockopt(2) + +// Security levels - as per NRL IPv6 - don't actually do anything + +// Socket filtering + +// Instruct lower device to use last 4-bytes of skb data as FCS + +// on 64-bit and x32, avoid the ?: operator + +// Structure used to manipulate the SO_LINGER option. +type Linger = struct { + Fl_onoff int32 + Fl_linger int32 +} /* socket.h:361:1 */ + +// This is the 4.3 BSD `struct sockaddr' format, which is used as wire +// +// format in the grotty old 4.3 `talk' protocol. +type Osockaddr = struct { + Fsa_family uint16 + Fsa_data [14]uint8 +} /* struct_osockaddr.h:6:1 */ + +// Define some macros helping to catch buffer overflows. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Internet address. +type In_addr_t = Uint32_t /* in.h:30:18 */ +type In_addr = struct{ Fs_addr In_addr_t } /* in.h:31:1 */ + +// Get system-specific definitions. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Linux version. + +// If the application has already included linux/in6.h from a linux-based +// kernel then we will not define the IPv6 IPPROTO_* defines, in6_addr (nor the +// defines), sockaddr_in6, or ipv6_mreq. Same for in6_ptkinfo or ip6_mtuinfo +// in linux/ipv6.h. The ABI used by the linux-kernel and glibc match exactly. +// Neither the linux kernel nor glibc should break this ABI without coordination. +// In upstream kernel 56c176c9 the _UAPI prefix was stripped so we need to check +// for _LINUX_IN6_H and _IPV6_H now, and keep checking the old versions for +// maximum backwards compatibility. + +// Options for use with `getsockopt' and `setsockopt' at the IP level. +// The first word in the comment at the right is the data type used; +// "bool" means a boolean value stored in an `int'. +// For BSD compatibility. + +// TProxy original addresses + +// IP_MTU_DISCOVER arguments. +// Always use interface mtu (ignores dst pmtu) but don't set DF flag. +// Also incoming ICMP frag_needed notifications will be ignored on +// this socket to prevent accepting spoofed ones. +// Like IP_PMTUDISC_INTERFACE but allow packets to be fragmented. + +// To select the IP level. + +// Structure used to describe IP options for IP_OPTIONS and IP_RETOPTS. +// +// The `ip_dst' field is used for the first-hop gateway when using a +// source route (this gets put into the header proper). +type Ip_opts = struct { + Fip_dst struct{ Fs_addr In_addr_t } + Fip_opts [40]int8 +} /* in.h:142:1 */ + +// Like `struct ip_mreq' but including interface specification by index. +type Ip_mreqn = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_address struct{ Fs_addr In_addr_t } + Fimr_ifindex int32 +} /* in.h:149:1 */ + +// Structure used for IP_PKTINFO. +type In_pktinfo = struct { + Fipi_ifindex int32 + Fipi_spec_dst struct{ Fs_addr In_addr_t } + Fipi_addr struct{ Fs_addr In_addr_t } +} /* in.h:157:1 */ + +// Type to represent a port. +type In_port_t = Uint16_t /* in.h:119:18 */ + +// Definitions of the bits in an Internet address integer. +// +// On subnets, host and network parts are found according to +// the subnet mask, not these masks. + +// Address to accept any incoming messages. +// Address to send to all hosts. +// Address indicating an error return. + +// Network number for local host loopback. +// Address to loopback in software to local host. + +// Defines for Multicast INADDR. + +// IPv6 address +type In6_addr = struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } +} /* in.h:212:1 */ + +// ::1 + +// Structure describing an Internet socket address. +type Sockaddr_in = struct { + Fsin_family Sa_family_t + Fsin_port In_port_t + Fsin_addr struct{ Fs_addr In_addr_t } + Fsin_zero [8]uint8 +} /* in.h:238:1 */ + +// Ditto, for IPv6. +type Sockaddr_in6 = struct { + Fsin6_family Sa_family_t + Fsin6_port In_port_t + Fsin6_flowinfo Uint32_t + Fsin6_addr struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } + } + Fsin6_scope_id Uint32_t +} /* in.h:253:1 */ + +// IPv4 multicast request. +type Ip_mreq = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_interface struct{ Fs_addr In_addr_t } +} /* in.h:265:1 */ + +type Ip_mreq_source = struct { + Fimr_multiaddr struct{ Fs_addr In_addr_t } + Fimr_interface struct{ Fs_addr In_addr_t } + Fimr_sourceaddr struct{ Fs_addr In_addr_t } +} /* in.h:274:1 */ + +// Likewise, for IPv6. +type Ipv6_mreq = struct { + Fipv6mr_multiaddr struct { + F__in6_u struct { + F__ccgo_pad1 [0]uint32 + F__u6_addr8 [16]Uint8_t + } + } + Fipv6mr_interface uint32 +} /* in.h:289:1 */ + +// Multicast group request. +type Group_req = struct { + Fgr_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgr_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:301:1 */ + +type Group_source_req = struct { + Fgsr_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgsr_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } + Fgsr_source struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:310:1 */ + +// Full-state filter operations. +type Ip_msfilter = struct { + Fimsf_multiaddr struct{ Fs_addr In_addr_t } + Fimsf_interface struct{ Fs_addr In_addr_t } + Fimsf_fmode Uint32_t + Fimsf_numsrc Uint32_t + Fimsf_slist [1]struct{ Fs_addr In_addr_t } +} /* in.h:324:1 */ + +type Group_filter = struct { + Fgf_interface Uint32_t + F__ccgo_pad1 [4]byte + Fgf_group struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } + Fgf_fmode Uint32_t + Fgf_numsrc Uint32_t + Fgf_slist [1]struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 + } +} /* in.h:345:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/poll/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/poll/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/poll/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/poll/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo poll/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o poll/poll_linux_amd64.go -pkgname poll', DO NOT EDIT. + +package poll + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/poll/poll_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/poll/poll_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/poll/poll_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/poll/poll_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,540 @@ +// Code generated by 'ccgo poll/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o poll/poll_linux_amd64.go -pkgname poll', DO NOT EDIT. + +package poll + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + POLLERR = 0x008 // poll.h:47:1: + POLLHUP = 0x010 // poll.h:48:1: + POLLIN = 0x001 // poll.h:25:1: + POLLNVAL = 0x020 // poll.h:49:1: + POLLOUT = 0x004 // poll.h:27:1: + POLLPRI = 0x002 // poll.h:26:1: + POLLRDBAND = 0x080 // poll.h:32:1: + POLLRDNORM = 0x040 // poll.h:31:1: + POLLWRBAND = 0x200 // poll.h:34:1: + POLLWRNORM = 0x100 // poll.h:33:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_POLL_H = 1 // poll.h:20:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Compatibility definitions for System V `poll' interface. +// Copyright (C) 1994-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// Get the platform dependent bits of `poll'. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Event types that can be polled for. These bits may be set in `events' +// to indicate the interesting event types; they will appear in `revents' +// to indicate the status of the file descriptor. + +// These values are defined in XPG4.2. + +// Event types always implicitly polled for. These bits need not be set in +// `events', but they will appear in `revents' to indicate the status of +// the file descriptor. + +// Type used for the number of file descriptors. +type Nfds_t = uint64 /* poll.h:33:27 */ + +// Data structure describing a polling request. +type Pollfd = struct { + Ffd int32 + Fevents int16 + Frevents int16 +} /* poll.h:36:1 */ + +// Define some inlines helping to catch common problems. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/pthread/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/pthread/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/pthread/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/pthread/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo pthread/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o pthread/pthread_linux_amd64.go -pkgname pthread', DO NOT EDIT. + +package pthread + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/pthread/pthread_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/pthread/pthread_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/pthread/pthread_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/pthread/pthread_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1873 @@ +// Code generated by 'ccgo pthread/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o pthread/pthread_linux_amd64.go -pkgname pthread', DO NOT EDIT. + +package pthread + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + CLOCK_BOOTTIME = 7 // time.h:60:1: + CLOCK_BOOTTIME_ALARM = 9 // time.h:64:1: + CLOCK_MONOTONIC = 1 // time.h:48:1: + CLOCK_MONOTONIC_COARSE = 6 // time.h:58:1: + CLOCK_MONOTONIC_RAW = 4 // time.h:54:1: + CLOCK_PROCESS_CPUTIME_ID = 2 // time.h:50:1: + CLOCK_REALTIME = 0 // time.h:46:1: + CLOCK_REALTIME_ALARM = 8 // time.h:62:1: + CLOCK_REALTIME_COARSE = 5 // time.h:56:1: + CLOCK_TAI = 11 // time.h:66:1: + CLOCK_THREAD_CPUTIME_ID = 3 // time.h:52:1: + PTHREAD_BARRIER_SERIAL_THREAD = -1 // pthread.h:189:1: + PTHREAD_ONCE_INIT = 0 // pthread.h:182:1: + SCHED_FIFO = 1 // sched.h:29:1: + SCHED_OTHER = 0 // sched.h:28:1: + SCHED_RR = 2 // sched.h:30:1: + TIMER_ABSTIME = 1 // time.h:69:1: + TIME_UTC = 1 // time.h:65:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_CPU_SET_H = 1 // cpu-set.h:21:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_SCHED_H = 1 // sched.h:21:1: + X_BITS_SETJMP_H = 1 // setjmp.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TIME_H = 1 // time.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_TYPES_LOCALE_T_H = 1 // locale_t.h:20:1: + X_BITS_TYPES_STRUCT_SCHED_PARAM = 1 // struct_sched_param.h:20:1: + X_BITS_TYPES___LOCALE_T_H = 1 // __locale_t.h:21:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_PTHREAD_H = 1 // pthread.h:19:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SCHED_H = 1 // sched.h:20:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_TIME_H = 1 // time.h:23:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Read-write lock types. +const ( /* pthread.h:100:1: */ + PTHREAD_RWLOCK_PREFER_READER_NP = 0 + PTHREAD_RWLOCK_PREFER_WRITER_NP = 1 + PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP = 2 + PTHREAD_RWLOCK_DEFAULT_NP = 0 +) + +// Read-write lock initializers. + +// Scheduler inheritance. +const ( /* pthread.h:120:1: */ + PTHREAD_INHERIT_SCHED = 0 + PTHREAD_EXPLICIT_SCHED = 1 +) + +// Scope handling. +const ( /* pthread.h:130:1: */ + PTHREAD_SCOPE_SYSTEM = 0 + PTHREAD_SCOPE_PROCESS = 1 +) + +// Process shared or private flag. +const ( /* pthread.h:140:1: */ + PTHREAD_PROCESS_PRIVATE = 0 + PTHREAD_PROCESS_SHARED = 1 +) + +// Cancellation +const ( /* pthread.h:164:1: */ + PTHREAD_CANCEL_ENABLE = 0 + PTHREAD_CANCEL_DISABLE = 1 +) +const ( /* pthread.h:171:1: */ + PTHREAD_CANCEL_DEFERRED = 0 + PTHREAD_CANCEL_ASYNCHRONOUS = 1 +) + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// NB: Include guard matches what uses. + +// Detach state. +const ( /* pthread.h:33:1: */ + PTHREAD_CREATE_JOINABLE = 0 + PTHREAD_CREATE_DETACHED = 1 +) + +// Mutex types. +const ( /* pthread.h:43:1: */ + PTHREAD_MUTEX_TIMED_NP = 0 + PTHREAD_MUTEX_RECURSIVE_NP = 1 + PTHREAD_MUTEX_ERRORCHECK_NP = 2 + PTHREAD_MUTEX_ADAPTIVE_NP = 3 + PTHREAD_MUTEX_NORMAL = 0 + PTHREAD_MUTEX_RECURSIVE = 1 + PTHREAD_MUTEX_ERRORCHECK = 2 + PTHREAD_MUTEX_DEFAULT = 0 +) + +// Robust mutex or not flags. +const ( /* pthread.h:65:1: */ + PTHREAD_MUTEX_STALLED = 0 + PTHREAD_MUTEX_STALLED_NP = 0 + PTHREAD_MUTEX_ROBUST = 1 + PTHREAD_MUTEX_ROBUST_NP = 1 +) + +// Mutex protocols. +const ( /* pthread.h:77:1: */ + PTHREAD_PRIO_NONE = 0 + PTHREAD_PRIO_INHERIT = 1 + PTHREAD_PRIO_PROTECT = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// Definitions for POSIX 1003.1b-1993 (aka POSIX.4) scheduling interface. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get type definitions. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Pid_t = X__pid_t /* sched.h:38:17 */ + +// Get system specific constant and data structure definitions. +// Definitions of constants and data structure for POSIX 1003.1b-1993 +// scheduling interface. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Scheduling algorithms. + +// Sched parameter structure. Generic version. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Data structure to describe a process' schedulability. +type Sched_param = struct{ Fsched_priority int32 } /* struct_sched_param.h:23:1 */ + +// Definition of the cpu_set_t structure used by the POSIX 1003.1b-1993 +// scheduling interface. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Size definition for CPU sets. + +// Type for array elements in 'cpu_set_t'. +type X__cpu_mask = uint64 /* cpu-set.h:32:25 */ + +// Basic access functions. + +// Data structure to describe CPU mask. +type Cpu_set_t = struct{ F__bits [16]X__cpu_mask } /* cpu-set.h:42:3 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.23 Date and time + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This defines CLOCKS_PER_SEC, which is the number of processor clock +// ticks per second, and possibly a number of other constants. +// System-dependent timing definitions. Linux version. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// ISO/IEC 9899:1999 7.23.1: Components of time +// The macro `CLOCKS_PER_SEC' is an expression with type `clock_t' that is +// the number per second of the value returned by the `clock' function. +// CAE XSH, Issue 4, Version 2: +// The value of CLOCKS_PER_SEC is required to be 1 million on all +// XSI-conformant systems. + +// Identifier for system-wide realtime clock. +// Monotonic system-wide clock. +// High-resolution timer from the CPU. +// Thread-specific CPU-time clock. +// Monotonic system-wide clock, not adjusted for frequency scaling. +// Identifier for system-wide realtime clock, updated only on ticks. +// Monotonic system-wide clock, updated only on ticks. +// Monotonic system-wide clock that includes time spent in suspension. +// Like CLOCK_REALTIME but also wakes suspended system. +// Like CLOCK_BOOTTIME but also wakes suspended system. +// Like CLOCK_REALTIME but in International Atomic Time. + +// Flag to indicate time is absolute. + +// Many of the typedefs and structs whose official home is this header +// may also need to be defined by other headers. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// ISO C `broken-down time' structure. +type Tm = struct { + Ftm_sec int32 + Ftm_min int32 + Ftm_hour int32 + Ftm_mday int32 + Ftm_mon int32 + Ftm_year int32 + Ftm_wday int32 + Ftm_yday int32 + Ftm_isdst int32 + F__ccgo_pad1 [4]byte + Ftm_gmtoff int64 + Ftm_zone uintptr +} /* struct_tm.h:7:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// NB: Include guard matches what uses. + +// POSIX.1b structure for timer start values and intervals. +type Itimerspec = struct { + Fit_interval struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fit_value struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } +} /* struct_itimerspec.h:8:1 */ + +// Definition of locale_t. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definition of struct __locale_struct and __locale_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// Contributed by Ulrich Drepper , 1997. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1-2008: the locale_t type, representing a locale context +// (implementation-namespace version). This type should be treated +// as opaque by applications; some details are exposed for the sake of +// efficiency in e.g. ctype functions. + +type X__locale_struct = struct { + F__locales [13]uintptr + F__ctype_b uintptr + F__ctype_tolower uintptr + F__ctype_toupper uintptr + F__names [13]uintptr +} /* __locale_t.h:28:1 */ + +type X__locale_t = uintptr /* __locale_t.h:42:32 */ + +type Locale_t = X__locale_t /* locale_t.h:24:20 */ + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Copyright (C) 2001-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define the machine-dependent type `jmp_buf'. x86-64 version. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +type X__jmp_buf = [8]int64 /* setjmp.h:31:18 */ + +// Conditional variable handling. + +// Cleanup buffers +type X_pthread_cleanup_buffer = struct { + F__routine uintptr + F__arg uintptr + F__canceltype int32 + F__ccgo_pad1 [4]byte + F__prev uintptr +} /* pthread.h:155:1 */ + +// Cancellation handling with integration into exception handling. + +type X__pthread_unwind_buf_t = struct { + F__cancel_jmp_buf [1]struct { + F__cancel_jmp_buf X__jmp_buf + F__mask_was_saved int32 + F__ccgo_pad1 [4]byte + } + F__pad [4]uintptr +} /* pthread.h:507:3 */ + +// No special attributes by default. + +// Structure to hold the cleanup handler information. +type X__pthread_cleanup_frame = struct { + F__cancel_routine uintptr + F__cancel_arg uintptr + F__do_it int32 + F__cancel_type int32 +} /* pthread.h:516:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/pwd/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/pwd/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/pwd/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/pwd/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo pwd/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o pwd/pwd_linux_amd64.go -pkgname pwd', DO NOT EDIT. + +package pwd + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/pwd/pwd_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/pwd/pwd_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/pwd/pwd_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/pwd/pwd_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,789 @@ +// Code generated by 'ccgo pwd/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o pwd/pwd_linux_amd64.go -pkgname pwd', DO NOT EDIT. + +package pwd + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + NSS_BUFLEN_PASSWD = 1024 // pwd.h:123:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_PWD_H = 1 // pwd.h:23:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 9.2.2 User Database Access + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// The Single Unix specification says that some more types are +// +// available here. +type Gid_t = X__gid_t /* pwd.h:38:17 */ + +type Uid_t = X__uid_t /* pwd.h:43:17 */ + +// A record in the user database. +type Passwd = struct { + Fpw_name uintptr + Fpw_passwd uintptr + Fpw_uid X__uid_t + Fpw_gid X__gid_t + Fpw_gecos uintptr + Fpw_dir uintptr + Fpw_shell uintptr +} /* pwd.h:49:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/signal/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/signal/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo signal/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o signal/signal_linux_amd64.go -pkgname signal', DO NOT EDIT. + +package signal + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/more_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/signal/more_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/more_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/signal/more_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2020 The Libc Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package signal + +const ( + // /usr/include/asm-generic/signal-defs.h:24:#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ + SIG_DFL = 0 + // /usr/include/asm-generic/signal-defs.h:25:#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ + SIG_IGN = 1 +) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/signal_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/signal/signal_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/signal/signal_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/signal/signal_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,2198 @@ +// Code generated by 'ccgo signal/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o signal/signal_linux_amd64.go -pkgname signal', DO NOT EDIT. + +package signal + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + FP_XSTATE_MAGIC1 = 0x46505853 // sigcontext.h:27:1: + FP_XSTATE_MAGIC2 = 0x46505845 // sigcontext.h:28:1: + MINSIGSTKSZ = 2048 // sigstack.h:27:1: + NGREG = 23 // ucontext.h:42:1: + NSIG = 65 // signal.h:181:1: + SA_INTERRUPT = 0x20000000 // sigaction.h:70:1: + SA_NOCLDSTOP = 1 // sigaction.h:56:1: + SA_NOCLDWAIT = 2 // sigaction.h:57:1: + SA_NODEFER = 0x40000000 // sigaction.h:65:1: + SA_NOMASK = 1073741824 // sigaction.h:73:1: + SA_ONESHOT = 2147483648 // sigaction.h:74:1: + SA_ONSTACK = 0x08000000 // sigaction.h:61:1: + SA_RESETHAND = 0x80000000 // sigaction.h:67:1: + SA_RESTART = 0x10000000 // sigaction.h:64:1: + SA_SIGINFO = 4 // sigaction.h:58:1: + SA_STACK = 134217728 // sigaction.h:75:1: + SIGABRT = 6 // signum-generic.h:50:1: + SIGALRM = 14 // signum-generic.h:63:1: + SIGBUS = 7 // signum.h:35:1: + SIGCHLD = 17 // signum.h:41:1: + SIGCLD = 17 // signum-generic.h:88:1: + SIGCONT = 18 // signum.h:43:1: + SIGFPE = 8 // signum-generic.h:51:1: + SIGHUP = 1 // signum-generic.h:56:1: + SIGILL = 4 // signum-generic.h:49:1: + SIGINT = 2 // signum-generic.h:48:1: + SIGIO = 29 // signum-generic.h:86:1: + SIGIOT = 6 // signum-generic.h:87:1: + SIGKILL = 9 // signum-generic.h:59:1: + SIGPIPE = 13 // signum-generic.h:62:1: + SIGPOLL = 29 // signum.h:51:1: + SIGPROF = 27 // signum-generic.h:77:1: + SIGPWR = 30 // signum.h:32:1: + SIGQUIT = 3 // signum-generic.h:57:1: + SIGSEGV = 11 // signum-generic.h:52:1: + SIGSTKFLT = 16 // signum.h:31:1: + SIGSTKSZ = 8192 // sigstack.h:30:1: + SIGSTOP = 19 // signum.h:45:1: + SIGSYS = 31 // signum.h:53:1: + SIGTERM = 15 // signum-generic.h:53:1: + SIGTRAP = 5 // signum-generic.h:58:1: + SIGTSTP = 20 // signum.h:47:1: + SIGTTIN = 21 // signum-generic.h:71:1: + SIGTTOU = 22 // signum-generic.h:72:1: + SIGURG = 23 // signum.h:49:1: + SIGUSR1 = 10 // signum.h:37:1: + SIGUSR2 = 12 // signum.h:39:1: + SIGVTALRM = 26 // signum-generic.h:76:1: + SIGWINCH = 28 // signum-generic.h:83:1: + SIGXCPU = 24 // signum-generic.h:74:1: + SIGXFSZ = 25 // signum-generic.h:75:1: + SIG_BLOCK = 0 // sigaction.h:79:1: + SIG_SETMASK = 2 // sigaction.h:81:1: + SIG_UNBLOCK = 1 // sigaction.h:80:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_SIGACTION_H = 1 // sigaction.h:20:1: + X_BITS_SIGCONTEXT_H = 1 // sigcontext.h:19:1: + X_BITS_SIGEVENT_CONSTS_H = 1 // sigevent-consts.h:20:1: + X_BITS_SIGINFO_ARCH_H = 1 // siginfo-arch.h:3:1: + X_BITS_SIGINFO_CONSTS_H = 1 // siginfo-consts.h:20:1: + X_BITS_SIGNUM_GENERIC_H = 1 // signum-generic.h:20:1: + X_BITS_SIGNUM_H = 1 // signum.h:20:1: + X_BITS_SIGSTACK_H = 1 // sigstack.h:20:1: + X_BITS_SIGTHREAD_H = 1 // sigthread.h:20:1: + X_BITS_SS_FLAGS_H = 1 // ss_flags.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_NSIG = 65 // signum-generic.h:100:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIGNAL_H = 0 // signal.h:23:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_UCONTEXT_H = 1 // ucontext.h:19:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// POSIX names to access some of the members. + +// sigevent constants. Linux version. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// `sigev_notify' values. +const ( /* sigevent-consts.h:27:1: */ + SIGEV_SIGNAL = 0 // Notify via signal. + SIGEV_NONE = 1 // Other notification: meaningless. + SIGEV_THREAD = 2 // Deliver via thread creation. + + SIGEV_THREAD_ID = 4 +) + +// `si_code' values for SIGSEGV signal. +const ( /* siginfo-consts.h:119:1: */ + SEGV_MAPERR = 1 // Address not mapped to object. + SEGV_ACCERR = 2 // Invalid permissions for mapped object. + SEGV_BNDERR = 3 // Bounds checking failure. + SEGV_PKUERR = 4 // Protection key checking failure. + SEGV_ACCADI = 5 // ADI not enabled for mapped object. + SEGV_ADIDERR = 6 // Disrupting MCD error. + SEGV_ADIPERR = 7 +) + +// `si_code' values for SIGBUS signal. +const ( /* siginfo-consts.h:138:1: */ + BUS_ADRALN = 1 // Invalid address alignment. + BUS_ADRERR = 2 // Non-existant physical address. + BUS_OBJERR = 3 // Object specific hardware error. + BUS_MCEERR_AR = 4 // Hardware memory error: action required. + BUS_MCEERR_AO = 5 +) + +// `si_code' values for SIGCHLD signal. +const ( /* siginfo-consts.h:172:1: */ + CLD_EXITED = 1 // Child has exited. + CLD_KILLED = 2 // Child was killed. + CLD_DUMPED = 3 // Child terminated abnormally. + CLD_TRAPPED = 4 // Traced child has trapped. + CLD_STOPPED = 5 // Child has stopped. + CLD_CONTINUED = 6 +) + +// `si_code' values for SIGPOLL signal. +const ( /* siginfo-consts.h:189:1: */ + POLL_IN = 1 // Data input available. + POLL_OUT = 2 // Output buffers available. + POLL_MSG = 3 // Input message available. + POLL_ERR = 4 // I/O error. + POLL_PRI = 5 // High priority input available. + POLL_HUP = 6 +) + +// X/Open requires some more fields with fixed names. + +// siginfo constants. Linux version. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Most of these constants are uniform across all architectures, but there +// is one exception. +// Architecture-specific adjustments to siginfo_t. x86 version. + +// Values for `si_code'. Positive values are reserved for kernel-generated +// +// signals. +const ( /* siginfo-consts.h:35:1: */ + SI_ASYNCNL = -60 // Sent by asynch name lookup completion. + SI_DETHREAD = -7 // Sent by execve killing subsidiary + // threads. + SI_TKILL = -6 // Sent by tkill. + SI_SIGIO = -5 // Sent by queued SIGIO. + SI_ASYNCIO = -4 // Sent by AIO completion. + SI_MESGQ = -3 // Sent by real time mesq state change. + SI_TIMER = -2 // Sent by timer expiration. + SI_QUEUE = -1 // Sent by sigqueue. + SI_USER = 0 // Sent by kill, sigsend. + SI_KERNEL = 128 +) + +// `si_code' values for SIGILL signal. +const ( /* siginfo-consts.h:71:1: */ + ILL_ILLOPC = 1 // Illegal opcode. + ILL_ILLOPN = 2 // Illegal operand. + ILL_ILLADR = 3 // Illegal addressing mode. + ILL_ILLTRP = 4 // Illegal trap. + ILL_PRVOPC = 5 // Privileged opcode. + ILL_PRVREG = 6 // Privileged register. + ILL_COPROC = 7 // Coprocessor error. + ILL_BADSTK = 8 // Internal stack error. + ILL_BADIADDR = 9 +) + +// `si_code' values for SIGFPE signal. +const ( /* siginfo-consts.h:94:1: */ + FPE_INTDIV = 1 // Integer divide by zero. + FPE_INTOVF = 2 // Integer overflow. + FPE_FLTDIV = 3 // Floating point divide by zero. + FPE_FLTOVF = 4 // Floating point overflow. + FPE_FLTUND = 5 // Floating point underflow. + FPE_FLTRES = 6 // Floating point inexact result. + FPE_FLTINV = 7 // Floating point invalid operation. + FPE_FLTSUB = 8 // Subscript out of range. + FPE_FLTUNK = 14 // Undiagnosed floating-point exception. + FPE_CONDTRAP = 15 +) + +// sigstack, sigaltstack definitions. +// Copyright (C) 1998-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Minimum stack size for a signal handler. + +// System default stack size. + +// ss_flags values for stack_t. Linux version. +// Copyright (C) 1998-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Possible values for `ss_flags'. +const ( /* ss_flags.h:27:1: */ + SS_ONSTACK = 1 + SS_DISABLE = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.14 Signal handling + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// Signal number definitions. Linux version. +// Copyright (C) 1995-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Signal number constants. Generic template. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Fake signal functions. + +// We define here all the signal names listed in POSIX (1003.1-2008); +// as of 1003.1-2013, no additional signals have been added by POSIX. +// We also define here signal names that historically exist in every +// real-world POSIX variant (e.g. SIGWINCH). +// +// Signals in the 1-15 range are defined with their historical numbers. +// For other signals, we use the BSD numbers. +// There are two unallocated signal numbers in the 1-31 range: 7 and 29. +// Signal number 0 is reserved for use as kill(pid, 0), to test whether +// a process exists without sending it a signal. + +// ISO C99 signals. + +// Historical signals specified by POSIX. + +// New(er) POSIX signals (1003.1-2008, 1003.1-2013). + +// Nonstandard signals found in all modern POSIX systems +// (including both BSD and Linux). + +// Archaic names for compatibility. + +// Not all systems support real-time signals. bits/signum.h indicates +// that they are supported by overriding __SIGRTMAX to a value greater +// than __SIGRTMIN. These constants give the kernel-level hard limits, +// but some real-time signals may be used internally by glibc. Do not +// use these constants in application code; use SIGRTMIN and SIGRTMAX +// (defined in signal.h) instead. + +// Biggest signal number + 1 (including real-time signals). + +// Adjustments and additions to the signal number constants for +// most Linux systems. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// An integral type that can be modified atomically, without the +// +// possibility of a signal arriving in the middle of the operation. +type Sig_atomic_t = X__sig_atomic_t /* sig_atomic_t.h:8:24 */ + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +type Pid_t = X__pid_t /* signal.h:40:17 */ +type Uid_t = X__uid_t /* signal.h:46:17 */ + +// We need `struct timespec' later on. +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Define __sigval_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Type for data associated with a signal. +type Sigval = struct { + F__ccgo_pad1 [0]uint64 + Fsival_int int32 + F__ccgo_pad2 [4]byte +} /* __sigval_t.h:24:1 */ + +type X__sigval_t = Sigval /* __sigval_t.h:30:22 */ + +// Some fields of siginfo_t have architecture-specific variations. +// Architecture-specific adjustments to siginfo_t. x86 version. + +type Siginfo_t = struct { + Fsi_signo int32 + Fsi_errno int32 + Fsi_code int32 + F__pad0 int32 + F_sifields struct { + F__ccgo_pad1 [0]uint64 + F_pad [28]int32 + } +} /* siginfo_t.h:124:5 */ + +// Architectures might also add architecture-specific constants. +// These are all considered GNU extensions. + +// Define __sigval_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// To avoid sigval_t (not a standard type name) having C++ name +// mangling depending on whether the selected standard includes union +// sigval, it should not be defined at all when using a standard for +// which the sigval name is not reserved; in that case, headers should +// not include and should use only the +// internal __sigval_t name. + +type Sigval_t = X__sigval_t /* sigval_t.h:16:20 */ + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Define __sigval_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Forward declaration. +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* sigevent_t.h:17:9 */ + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Define __sigval_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Forward declaration. +type Pthread_attr_t = Pthread_attr_t1 /* sigevent_t.h:17:30 */ + +// Structure to transport application-defined values with signals. +type Sigevent = struct { + Fsigev_value X__sigval_t + Fsigev_signo int32 + Fsigev_notify int32 + F_sigev_un struct { + F__ccgo_pad1 [0]uint64 + F_pad [12]int32 + } +} /* sigevent_t.h:22:9 */ + +// Structure to transport application-defined values with signals. +type Sigevent_t = Sigevent /* sigevent_t.h:42:5 */ + +// Type of a signal handler. +type X__sighandler_t = uintptr /* signal.h:72:14 */ + +// 4.4 BSD uses the name `sig_t' for this. +type Sig_t = X__sighandler_t /* signal.h:190:24 */ + +// Get the system-specific definitions of `struct sigaction' +// and the `SA_*' and `SIG_*'. constants. +// The proper definitions for Linux's sigaction. +// Copyright (C) 1993-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Structure describing the action to be taken when a signal arrives. +type Sigaction = struct { + F__sigaction_handler struct{ Fsa_handler X__sighandler_t } + Fsa_mask X__sigset_t + Fsa_flags int32 + F__ccgo_pad1 [4]byte + Fsa_restorer uintptr +} /* sigaction.h:27:1 */ + +// Get machine-dependent `struct sigcontext' and signal subcodes. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type X_fpx_sw_bytes = struct { + Fmagic1 X__uint32_t + Fextended_size X__uint32_t + Fxstate_bv X__uint64_t + Fxstate_size X__uint32_t + F__glibc_reserved1 [7]X__uint32_t +} /* sigcontext.h:31:1 */ + +type X_fpreg = struct { + Fsignificand [4]uint16 + Fexponent uint16 +} /* sigcontext.h:40:1 */ + +type X_fpxreg = struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 +} /* sigcontext.h:46:1 */ + +type X_xmmreg = struct{ Felement [4]X__uint32_t } /* sigcontext.h:53:1 */ + +type X_fpstate = struct { + Fcwd X__uint16_t + Fswd X__uint16_t + Fftw X__uint16_t + Ffop X__uint16_t + Frip X__uint64_t + Frdp X__uint64_t + Fmxcsr X__uint32_t + Fmxcr_mask X__uint32_t + F_st [8]struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 + } + F_xmm [16]struct{ Felement [4]X__uint32_t } + F__glibc_reserved1 [24]X__uint32_t +} /* sigcontext.h:123:1 */ + +type Sigcontext = struct { + Fr8 X__uint64_t + Fr9 X__uint64_t + Fr10 X__uint64_t + Fr11 X__uint64_t + Fr12 X__uint64_t + Fr13 X__uint64_t + Fr14 X__uint64_t + Fr15 X__uint64_t + Frdi X__uint64_t + Frsi X__uint64_t + Frbp X__uint64_t + Frbx X__uint64_t + Frdx X__uint64_t + Frax X__uint64_t + Frcx X__uint64_t + Frsp X__uint64_t + Frip X__uint64_t + Feflags X__uint64_t + Fcs uint16 + Fgs uint16 + Ffs uint16 + F__pad0 uint16 + Ferr X__uint64_t + Ftrapno X__uint64_t + Foldmask X__uint64_t + Fcr2 X__uint64_t + F__184 struct{ Ffpstate uintptr } + F__reserved1 [8]X__uint64_t +} /* sigcontext.h:139:1 */ + +type X_xsave_hdr = struct { + Fxstate_bv X__uint64_t + F__glibc_reserved1 [2]X__uint64_t + F__glibc_reserved2 [5]X__uint64_t +} /* sigcontext.h:177:1 */ + +type X_ymmh_state = struct{ Fymmh_space [64]X__uint32_t } /* sigcontext.h:184:1 */ + +type X_xstate = struct { + Ffpstate struct { + Fcwd X__uint16_t + Fswd X__uint16_t + Fftw X__uint16_t + Ffop X__uint16_t + Frip X__uint64_t + Frdp X__uint64_t + Fmxcsr X__uint32_t + Fmxcr_mask X__uint32_t + F_st [8]struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 + } + F_xmm [16]struct{ Felement [4]X__uint32_t } + F__glibc_reserved1 [24]X__uint32_t + } + Fxstate_hdr struct { + Fxstate_bv X__uint64_t + F__glibc_reserved1 [2]X__uint64_t + F__glibc_reserved2 [5]X__uint64_t + } + Fymmh struct{ Fymmh_space [64]X__uint32_t } +} /* sigcontext.h:189:1 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Define stack_t. Linux version. +// Copyright (C) 1998-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Structure describing a signal stack. +type Stack_t = struct { + Fss_sp uintptr + Fss_flags int32 + F__ccgo_pad1 [4]byte + Fss_size Size_t +} /* stack_t.h:31:5 */ + +// This will define `ucontext_t' and `mcontext_t'. +// Copyright (C) 2001-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Define stack_t. Linux version. +// Copyright (C) 1998-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Type for general register. +type Greg_t = int64 /* ucontext.h:37:37 */ + +// Number of general registers. + +// Container for all general registers. +type Gregset_t = [23]Greg_t /* ucontext.h:46:16 */ + +type X_libc_fpxreg = struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 +} /* ucontext.h:101:1 */ + +type X_libc_xmmreg = struct{ Felement [4]X__uint32_t } /* ucontext.h:108:1 */ + +type X_libc_fpstate = struct { + Fcwd X__uint16_t + Fswd X__uint16_t + Fftw X__uint16_t + Ffop X__uint16_t + Frip X__uint64_t + Frdp X__uint64_t + Fmxcsr X__uint32_t + Fmxcr_mask X__uint32_t + F_st [8]struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 + } + F_xmm [16]struct{ Felement [4]X__uint32_t } + F__glibc_reserved1 [24]X__uint32_t +} /* ucontext.h:113:1 */ + +// Structure to describe FPU registers. +type Fpregset_t = uintptr /* ucontext.h:130:30 */ + +// Context to describe whole processor state. +type Mcontext_t = struct { + Fgregs Gregset_t + Ffpregs Fpregset_t + F__reserved1 [8]uint64 +} /* ucontext.h:139:3 */ + +// Userlevel context. +type Ucontext_t1 = struct { + Fuc_flags uint64 + Fuc_link uintptr + Fuc_stack Stack_t + Fuc_mcontext Mcontext_t + Fuc_sigmask Sigset_t + F__fpregs_mem struct { + Fcwd X__uint16_t + Fswd X__uint16_t + Fftw X__uint16_t + Ffop X__uint16_t + Frip X__uint64_t + Frdp X__uint64_t + Fmxcsr X__uint32_t + Fmxcr_mask X__uint32_t + F_st [8]struct { + Fsignificand [4]uint16 + Fexponent uint16 + F__glibc_reserved1 [3]uint16 + } + F_xmm [16]struct{ Felement [4]X__uint32_t } + F__glibc_reserved1 [24]X__uint32_t + } + F__ssp [4]uint64 +} /* ucontext.h:142:9 */ + +// Userlevel context. +type Ucontext_t = Ucontext_t1 /* ucontext.h:151:5 */ + +// Define struct sigstack. +// Copyright (C) 1998-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Structure describing a signal stack (obsolete). +type Sigstack = struct { + Fss_sp uintptr + Fss_onstack int32 + F__ccgo_pad1 [4]byte +} /* struct_sigstack.h:23:1 */ + +// Some of the functions for handling signals in threaded programs must +// be defined here. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Some of the functions for handling signals in threaded programs must +// be defined here. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// System-specific extensions. +// System-specific extensions of , Linux version. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/stdio/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/stdio/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/stdio/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/stdio/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo stdio/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o stdio/stdio_linux_amd64.go -pkgname stdio', DO NOT EDIT. + +package stdio + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/stdio/stdio_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/stdio/stdio_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/stdio/stdio_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/stdio/stdio_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,540 @@ +// Code generated by 'ccgo stdio/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o stdio/stdio_linux_amd64.go -pkgname stdio', DO NOT EDIT. + +package stdio + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + BUFSIZ = 8192 // stdio.h:99:1: + EOF = -1 // stdio.h:104:1: + FILENAME_MAX = 4096 // stdio_lim.h:27:1: + FOPEN_MAX = 16 // stdio_lim.h:37:1: + L_ctermid = 9 // stdio_lim.h:30:1: + L_tmpnam = 20 // stdio_lim.h:25:1: + P_tmpdir = "/tmp" // stdio.h:120:1: + SEEK_CUR = 1 // stdio.h:110:1: + SEEK_END = 2 // stdio.h:111:1: + SEEK_SET = 0 // stdio.h:109:1: + TMP_MAX = 238328 // stdio_lim.h:26:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_STDIO_LIM_H = 1 // stdio_lim.h:19:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_IOFBF = 0 // stdio.h:93:1: + X_IOLBF = 1 // stdio.h:94:1: + X_IONBF = 2 // stdio.h:95:1: + X_IO_EOF_SEEN = 0x0010 // struct_FILE.h:111:1: + X_IO_ERR_SEEN = 0x0020 // struct_FILE.h:114:1: + X_IO_USER_LOCK = 0x8000 // struct_FILE.h:117:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STDIO_H = 1 // stdio.h:24:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + X_VA_LIST_DEFINED = 0 // stdio.h:53:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.15 Variable arguments + +// Define __gnuc_va_list. + +type X__gnuc_va_list = X__builtin_va_list /* stdarg.h:40:27 */ + +// Define the standard macros for the user, +// if this invocation was from the user program. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Integral type unchanged by default argument promotions that can +// hold any value corresponding to members of the extended character +// set, as well as at least one value that does not correspond to any +// member of the extended character set. + +// Conversion state information. +type X__mbstate_t = struct { + F__count int32 + F__value struct{ F__wch uint32 } +} /* __mbstate_t.h:21:3 */ + +// The tag name of this struct is _G_fpos_t to preserve historic +// +// C++ mangled names for functions taking fpos_t arguments. +// That name should not be used in new code. +type X_G_fpos_t = struct { + F__pos X__off_t + F__state X__mbstate_t +} /* __fpos_t.h:10:9 */ + +// The tag name of this struct is _G_fpos_t to preserve historic +// +// C++ mangled names for functions taking fpos_t arguments. +// That name should not be used in new code. +type X__fpos_t = X_G_fpos_t /* __fpos_t.h:14:3 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// The tag name of this struct is _G_fpos64_t to preserve historic +// +// C++ mangled names for functions taking fpos_t and/or fpos64_t +// arguments. That name should not be used in new code. +type X_G_fpos64_t = struct { + F__pos X__off64_t + F__state X__mbstate_t +} /* __fpos64_t.h:10:9 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// The tag name of this struct is _G_fpos64_t to preserve historic +// +// C++ mangled names for functions taking fpos_t and/or fpos64_t +// arguments. That name should not be used in new code. +type X__fpos64_t = X_G_fpos64_t /* __fpos64_t.h:14:3 */ + +type X_IO_FILE = struct { + F_flags int32 + F__ccgo_pad1 [4]byte + F_IO_read_ptr uintptr + F_IO_read_end uintptr + F_IO_read_base uintptr + F_IO_write_base uintptr + F_IO_write_ptr uintptr + F_IO_write_end uintptr + F_IO_buf_base uintptr + F_IO_buf_end uintptr + F_IO_save_base uintptr + F_IO_backup_base uintptr + F_IO_save_end uintptr + F_markers uintptr + F_chain uintptr + F_fileno int32 + F_flags2 int32 + F_old_offset X__off_t + F_cur_column uint16 + F_vtable_offset int8 + F_shortbuf [1]int8 + F__ccgo_pad2 [4]byte + F_lock uintptr + F_offset X__off64_t + F_codecvt uintptr + F_wide_data uintptr + F_freeres_list uintptr + F_freeres_buf uintptr + F__pad5 Size_t + F_mode int32 + F_unused2 [20]int8 +} /* __FILE.h:4:1 */ + +type X__FILE = X_IO_FILE /* __FILE.h:5:25 */ + +// The opaque type of streams. This is the definition used elsewhere. +type FILE = X_IO_FILE /* FILE.h:7:25 */ + +// These macros are used by bits/stdio.h and internal headers. + +// Many more flag bits are defined internally. + +type Va_list = X__gnuc_va_list /* stdio.h:52:24 */ + +type Off_t = X__off64_t /* stdio.h:65:19 */ + +type Ssize_t = X__ssize_t /* stdio.h:77:19 */ + +// The type of the second argument to `fgetpos' and `fsetpos'. +type Fpos_t = X__fpos64_t /* stdio.h:86:20 */ + +// If we are compiling with optimizing read this file. It contains +// several optimizing inline functions and macros. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/stdlib/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/stdlib/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/stdlib/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/stdlib/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo stdlib/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o stdlib/stdlib_linux_amd64.go -pkgname stdlib', DO NOT EDIT. + +package stdlib + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/stdlib/stdlib_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/stdlib/stdlib_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/stdlib/stdlib_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/stdlib/stdlib_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1541 @@ +// Code generated by 'ccgo stdlib/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o stdlib/stdlib_linux_amd64.go -pkgname stdlib', DO NOT EDIT. + +package stdlib + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + EXIT_FAILURE = 1 // stdlib.h:91:1: + EXIT_SUCCESS = 0 // stdlib.h:92:1: + FD_SETSIZE = 1024 // select.h:73:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + RAND_MAX = 2147483647 // stdlib.h:86:1: + WCONTINUED = 8 // waitflags.h:32:1: + WEXITED = 4 // waitflags.h:31:1: + WNOHANG = 1 // waitflags.h:25:1: + WNOWAIT = 0x01000000 // waitflags.h:33:1: + WSTOPPED = 2 // waitflags.h:30:1: + WUNTRACED = 2 // waitflags.h:26:1: + X_ALLOCA_H = 1 // alloca.h:19:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_FLOATN_COMMON_H = 0 // floatn-common.h:21:1: + X_BITS_FLOATN_H = 0 // floatn.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_GCC_WCHAR_T = 0 // stddef.h:273:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STDLIB_H = 1 // stdlib.h:35:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + X_T_WCHAR = 0 // stddef.h:264:1: + X_T_WCHAR_ = 0 // stddef.h:263:1: + X_WCHAR_T = 0 // stddef.h:262:1: + X_WCHAR_T_ = 0 // stddef.h:266:1: + X_WCHAR_T_DECLARED = 0 // stddef.h:274:1: + X_WCHAR_T_DEFINED = 0 // stddef.h:269:1: + X_WCHAR_T_DEFINED_ = 0 // stddef.h:268:1: + X_WCHAR_T_H = 0 // stddef.h:270:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// A null pointer constant. + +// XPG requires a few symbols from being defined. +// Definitions of flag bits for `waitpid' et al. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Bits in the third argument to `waitpid'. + +// Bits in the fourth argument to `waitid'. + +// The following values are used by the `waitid' function. + +// The Linux kernel defines these bare, rather than an enum, +// which causes a conflict if the include order is reversed. + +const ( /* waitflags.h:52:1: */ + P_ALL = 0 // Wait for any child. + P_PID = 1 // Wait for specified process. + P_PGID = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// A null pointer constant. + +// XPG requires a few symbols from being defined. +// Definitions of flag bits for `waitpid' et al. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Bits in the third argument to `waitpid'. + +// Bits in the fourth argument to `waitid'. + +// The following values are used by the `waitid' function. + +// The Linux kernel defines these bare, rather than an enum, +// which causes a conflict if the include order is reversed. + +type Idtype_t = uint32 /* waitflags.h:57:3 */ +// Definitions of status bits for `wait' et al. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Everything extant so far uses these same bits. + +// If WIFEXITED(STATUS), the low-order 8 bits of the status. + +// If WIFSIGNALED(STATUS), the terminating signal. + +// If WIFSTOPPED(STATUS), the signal that stopped the child. + +// Nonzero if STATUS indicates normal termination. + +// Nonzero if STATUS indicates termination by a signal. + +// Nonzero if STATUS indicates the child is stopped. + +// Nonzero if STATUS indicates the child continued after a stop. We only +// define this if provides the WCONTINUED flag bit. + +// Nonzero if STATUS indicates the child dumped core. + +// Macros for constructing status values. + +// Define the macros also would define this way. + +// _FloatN API tests for enablement. +// Macros to control TS 18661-3 glibc features on x86. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Defined to 1 if the current compiler invocation provides a +// floating-point type with the IEEE 754 binary128 format, and this +// glibc includes corresponding *f128 interfaces for it. The required +// libgcc support was added some time after the basic compiler +// support, for x86_64 and x86. + +// Defined to 1 if __HAVE_FLOAT128 is 1 and the type is ABI-distinct +// from the default float, double and long double types in this glibc. + +// Defined to 1 if the current compiler invocation provides a +// floating-point type with the right format for _Float64x, and this +// glibc includes corresponding *f64x interfaces for it. + +// Defined to 1 if __HAVE_FLOAT64X is 1 and _Float64x has the format +// of long double. Otherwise, if __HAVE_FLOAT64X is 1, _Float64x has +// the format of _Float128, which must be different from that of long +// double. + +// Defined to concatenate the literal suffix to be used with _Float128 +// types, if __HAVE_FLOAT128 is 1. + +// Defined to a complex binary128 type if __HAVE_FLOAT128 is 1. + +// The remaining of this file provides support for older compilers. + +// The type _Float128 exists only since GCC 7.0. + +// __builtin_huge_valf128 doesn't exist before GCC 7.0. + +// Older GCC has only a subset of built-in functions for _Float128 on +// x86, and __builtin_infq is not usable in static initializers. +// Converting a narrower sNaN to _Float128 produces a quiet NaN, so +// attempts to use _Float128 sNaNs will not work properly with older +// compilers. + +// In math/math.h, __MATH_TG will expand signbit to __builtin_signbit*, +// e.g.: __builtin_signbitf128, before GCC 6. However, there has never +// been a __builtin_signbitf128 in GCC and the type-generic builtin is +// only available since GCC 6. + +// Macros to control TS 18661-3 glibc features where the same +// definitions are appropriate for all platforms. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// This header should be included at the bottom of each bits/floatn.h. +// It defines the following macros for each _FloatN and _FloatNx type, +// where the same definitions, or definitions based only on the macros +// in bits/floatn.h, are appropriate for all glibc configurations. + +// Defined to 1 if the current compiler invocation provides a +// floating-point type with the right format for this type, and this +// glibc includes corresponding *fN or *fNx interfaces for it. + +// Defined to 1 if the corresponding __HAVE_ macro is 1 and the +// type is the first with its format in the sequence of (the default +// choices for) float, double, long double, _Float16, _Float32, +// _Float64, _Float128, _Float32x, _Float64x, _Float128x for this +// glibc; that is, if functions present once per floating-point format +// rather than once per type are present for this type. +// +// All configurations supported by glibc have _Float32 the same format +// as float, _Float64 and _Float32x the same format as double, the +// _Float64x the same format as either long double or _Float128. No +// configurations support _Float128x or, as of GCC 7, have compiler +// support for a type meeting the requirements for _Float128x. + +// Defined to 1 if the corresponding _FloatN type is not binary compatible +// with the corresponding ISO C type in the current compilation unit as +// opposed to __HAVE_DISTINCT_FLOATN, which indicates the default types built +// in glibc. + +// Defined to 1 if any _FloatN or _FloatNx types that are not +// ABI-distinct are however distinct types at the C language level (so +// for the purposes of __builtin_types_compatible_p and _Generic). + +// Defined to concatenate the literal suffix to be used with _FloatN +// or _FloatNx types, if __HAVE_ is 1. The corresponding +// literal suffixes exist since GCC 7, for C only. + +// Defined to a complex type if __HAVE_ is 1. + +// The remaining of this file provides support for older compilers. + +// If double, long double and _Float64 all have the same set of +// values, TS 18661-3 requires the usual arithmetic conversions on +// long double and _Float64 to produce _Float64. For this to be the +// case when building with a compiler without a distinct _Float64 +// type, _Float64 must be a typedef for long double, not for +// double. + +// Returned by `div'. +type Div_t = struct { + Fquot int32 + Frem int32 +} /* stdlib.h:62:5 */ + +// Returned by `ldiv'. +type Ldiv_t = struct { + Fquot int64 + Frem int64 +} /* stdlib.h:70:5 */ + +// Returned by `lldiv'. +type Lldiv_t = struct { + Fquot int64 + Frem int64 +} /* stdlib.h:80:5 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Reentrant versions of the `random' family of functions. +// These functions all use the following data structure to contain +// state, rather than global state variables. + +type Random_data = struct { + Ffptr uintptr + Frptr uintptr + Fstate uintptr + Frand_type int32 + Frand_deg int32 + Frand_sep int32 + F__ccgo_pad1 [4]byte + Fend_ptr uintptr +} /* stdlib.h:423:1 */ + +// Data structure for communication with thread safe versions. This +// +// type is to be regarded as opaque. It's only exported because users +// have to allocate objects of this type. +type Drand48_data = struct { + F__x [3]uint16 + F__old_x [3]uint16 + F__c uint16 + F__init uint16 + F__a uint64 +} /* stdlib.h:490:1 */ + +// Shorthand for type of comparison functions. +type X__compar_fn_t = uintptr /* stdlib.h:808:13 */ + +// Floating-point inline functions for stdlib.h. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define some macros helping to catch buffer overflows. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/socket/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/socket/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/socket/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/socket/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo sys/socket/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/socket/socket_linux_amd64.go -pkgname socket', DO NOT EDIT. + +package socket + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/socket/socket_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/socket/socket_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/socket/socket_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/socket/socket_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1848 @@ +// Code generated by 'ccgo sys/socket/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/socket/socket_linux_amd64.go -pkgname socket', DO NOT EDIT. + +package socket + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + AF_ALG = 38 // socket.h:133:1: + AF_APPLETALK = 5 // socket.h:99:1: + AF_ASH = 18 // socket.h:113:1: + AF_ATMPVC = 8 // socket.h:102:1: + AF_ATMSVC = 20 // socket.h:115:1: + AF_AX25 = 3 // socket.h:97:1: + AF_BLUETOOTH = 31 // socket.h:126:1: + AF_BRIDGE = 7 // socket.h:101:1: + AF_CAIF = 37 // socket.h:132:1: + AF_CAN = 29 // socket.h:124:1: + AF_DECnet = 12 // socket.h:106:1: + AF_ECONET = 19 // socket.h:114:1: + AF_FILE = 1 // socket.h:95:1: + AF_IB = 27 // socket.h:122:1: + AF_IEEE802154 = 36 // socket.h:131:1: + AF_INET = 2 // socket.h:96:1: + AF_INET6 = 10 // socket.h:104:1: + AF_IPX = 4 // socket.h:98:1: + AF_IRDA = 23 // socket.h:118:1: + AF_ISDN = 34 // socket.h:129:1: + AF_IUCV = 32 // socket.h:127:1: + AF_KCM = 41 // socket.h:136:1: + AF_KEY = 15 // socket.h:109:1: + AF_LLC = 26 // socket.h:121:1: + AF_LOCAL = 1 // socket.h:93:1: + AF_MAX = 45 // socket.h:140:1: + AF_MPLS = 28 // socket.h:123:1: + AF_NETBEUI = 13 // socket.h:107:1: + AF_NETLINK = 16 // socket.h:110:1: + AF_NETROM = 6 // socket.h:100:1: + AF_NFC = 39 // socket.h:134:1: + AF_PACKET = 17 // socket.h:112:1: + AF_PHONET = 35 // socket.h:130:1: + AF_PPPOX = 24 // socket.h:119:1: + AF_QIPCRTR = 42 // socket.h:137:1: + AF_RDS = 21 // socket.h:116:1: + AF_ROSE = 11 // socket.h:105:1: + AF_ROUTE = 16 // socket.h:111:1: + AF_RXRPC = 33 // socket.h:128:1: + AF_SECURITY = 14 // socket.h:108:1: + AF_SMC = 43 // socket.h:138:1: + AF_SNA = 22 // socket.h:117:1: + AF_TIPC = 30 // socket.h:125:1: + AF_UNIX = 1 // socket.h:94:1: + AF_UNSPEC = 0 // socket.h:92:1: + AF_VSOCK = 40 // socket.h:135:1: + AF_WANPIPE = 25 // socket.h:120:1: + AF_X25 = 9 // socket.h:103:1: + AF_XDP = 44 // socket.h:139:1: + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + FD_SETSIZE = 1024 // select.h:73:1: + FIOGETOWN = 0x8903 // sockios.h:8:1: + FIOSETOWN = 0x8901 // sockios.h:6:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + PF_ALG = 38 // socket.h:82:1: + PF_APPLETALK = 5 // socket.h:48:1: + PF_ASH = 18 // socket.h:62:1: + PF_ATMPVC = 8 // socket.h:51:1: + PF_ATMSVC = 20 // socket.h:64:1: + PF_AX25 = 3 // socket.h:46:1: + PF_BLUETOOTH = 31 // socket.h:75:1: + PF_BRIDGE = 7 // socket.h:50:1: + PF_CAIF = 37 // socket.h:81:1: + PF_CAN = 29 // socket.h:73:1: + PF_DECnet = 12 // socket.h:55:1: + PF_ECONET = 19 // socket.h:63:1: + PF_FILE = 1 // socket.h:44:1: + PF_IB = 27 // socket.h:71:1: + PF_IEEE802154 = 36 // socket.h:80:1: + PF_INET = 2 // socket.h:45:1: + PF_INET6 = 10 // socket.h:53:1: + PF_IPX = 4 // socket.h:47:1: + PF_IRDA = 23 // socket.h:67:1: + PF_ISDN = 34 // socket.h:78:1: + PF_IUCV = 32 // socket.h:76:1: + PF_KCM = 41 // socket.h:85:1: + PF_KEY = 15 // socket.h:58:1: + PF_LLC = 26 // socket.h:70:1: + PF_LOCAL = 1 // socket.h:42:1: + PF_MAX = 45 // socket.h:89:1: + PF_MPLS = 28 // socket.h:72:1: + PF_NETBEUI = 13 // socket.h:56:1: + PF_NETLINK = 16 // socket.h:59:1: + PF_NETROM = 6 // socket.h:49:1: + PF_NFC = 39 // socket.h:83:1: + PF_PACKET = 17 // socket.h:61:1: + PF_PHONET = 35 // socket.h:79:1: + PF_PPPOX = 24 // socket.h:68:1: + PF_QIPCRTR = 42 // socket.h:86:1: + PF_RDS = 21 // socket.h:65:1: + PF_ROSE = 11 // socket.h:54:1: + PF_ROUTE = 16 // socket.h:60:1: + PF_RXRPC = 33 // socket.h:77:1: + PF_SECURITY = 14 // socket.h:57:1: + PF_SMC = 43 // socket.h:87:1: + PF_SNA = 22 // socket.h:66:1: + PF_TIPC = 30 // socket.h:74:1: + PF_UNIX = 1 // socket.h:43:1: + PF_UNSPEC = 0 // socket.h:41:1: + PF_VSOCK = 40 // socket.h:84:1: + PF_WANPIPE = 25 // socket.h:69:1: + PF_X25 = 9 // socket.h:52:1: + PF_XDP = 44 // socket.h:88:1: + SCM_TIMESTAMP = 29 // socket.h:140:1: + SCM_TIMESTAMPING = 37 // socket.h:142:1: + SCM_TIMESTAMPING_OPT_STATS = 54 // socket.h:90:1: + SCM_TIMESTAMPING_PKTINFO = 58 // socket.h:98:1: + SCM_TIMESTAMPNS = 35 // socket.h:141:1: + SCM_TXTIME = 61 // socket.h:105:1: + SCM_WIFI_STATUS = 41 // socket.h:64:1: + SIOCATMARK = 0x8905 // sockios.h:10:1: + SIOCGPGRP = 0x8904 // sockios.h:9:1: + SIOCGSTAMP = 0x8906 // sockios.h:11:1: + SIOCGSTAMPNS = 0x8907 // sockios.h:12:1: + SIOCSPGRP = 0x8902 // sockios.h:7:1: + SOL_AAL = 265 // socket.h:151:1: + SOL_ALG = 279 // socket.h:165:1: + SOL_ATM = 264 // socket.h:150:1: + SOL_BLUETOOTH = 274 // socket.h:160:1: + SOL_CAIF = 278 // socket.h:164:1: + SOL_DCCP = 269 // socket.h:155:1: + SOL_DECNET = 261 // socket.h:147:1: + SOL_IRDA = 266 // socket.h:152:1: + SOL_IUCV = 277 // socket.h:163:1: + SOL_KCM = 281 // socket.h:167:1: + SOL_LLC = 268 // socket.h:154:1: + SOL_NETBEUI = 267 // socket.h:153:1: + SOL_NETLINK = 270 // socket.h:156:1: + SOL_NFC = 280 // socket.h:166:1: + SOL_PACKET = 263 // socket.h:149:1: + SOL_PNPIPE = 275 // socket.h:161:1: + SOL_PPPOL2TP = 273 // socket.h:159:1: + SOL_RAW = 255 // socket.h:146:1: + SOL_RDS = 276 // socket.h:162:1: + SOL_RXRPC = 272 // socket.h:158:1: + SOL_SOCKET = 1 // socket.h:9:1: + SOL_TIPC = 271 // socket.h:157:1: + SOL_TLS = 282 // socket.h:168:1: + SOL_X25 = 262 // socket.h:148:1: + SOL_XDP = 283 // socket.h:169:1: + SOMAXCONN = 4096 // socket.h:172:1: + SO_ACCEPTCONN = 30 // socket.h:51:1: + SO_ATTACH_BPF = 50 // socket.h:82:1: + SO_ATTACH_FILTER = 26 // socket.h:45:1: + SO_ATTACH_REUSEPORT_CBPF = 51 // socket.h:85:1: + SO_ATTACH_REUSEPORT_EBPF = 52 // socket.h:86:1: + SO_BINDTODEVICE = 25 // socket.h:42:1: + SO_BINDTOIFINDEX = 62 // socket.h:107:1: + SO_BPF_EXTENSIONS = 48 // socket.h:78:1: + SO_BROADCAST = 6 // socket.h:16:1: + SO_BSDCOMPAT = 14 // socket.h:26:1: + SO_BUSY_POLL = 46 // socket.h:74:1: + SO_CNX_ADVICE = 53 // socket.h:88:1: + SO_COOKIE = 57 // socket.h:96:1: + SO_DEBUG = 1 // socket.h:11:1: + SO_DETACH_BPF = 27 // socket.h:83:1: + SO_DETACH_FILTER = 27 // socket.h:46:1: + SO_DETACH_REUSEPORT_BPF = 68 // socket.h:120:1: + SO_DOMAIN = 39 // socket.h:59:1: + SO_DONTROUTE = 5 // socket.h:15:1: + SO_ERROR = 4 // socket.h:14:1: + SO_GET_FILTER = 26 // socket.h:47:1: + SO_INCOMING_CPU = 49 // socket.h:80:1: + SO_INCOMING_NAPI_ID = 56 // socket.h:94:1: + SO_KEEPALIVE = 9 // socket.h:21:1: + SO_LINGER = 13 // socket.h:25:1: + SO_LOCK_FILTER = 44 // socket.h:70:1: + SO_MARK = 36 // socket.h:56:1: + SO_MAX_PACING_RATE = 47 // socket.h:76:1: + SO_MEMINFO = 55 // socket.h:92:1: + SO_NOFCS = 43 // socket.h:68:1: + SO_NO_CHECK = 11 // socket.h:23:1: + SO_OOBINLINE = 10 // socket.h:22:1: + SO_PASSCRED = 16 // socket.h:29:1: + SO_PASSSEC = 34 // socket.h:54:1: + SO_PEEK_OFF = 42 // socket.h:65:1: + SO_PEERCRED = 17 // socket.h:30:1: + SO_PEERGROUPS = 59 // socket.h:100:1: + SO_PEERNAME = 28 // socket.h:49:1: + SO_PEERSEC = 31 // socket.h:53:1: + SO_PRIORITY = 12 // socket.h:24:1: + SO_PROTOCOL = 38 // socket.h:58:1: + SO_RCVBUF = 8 // socket.h:18:1: + SO_RCVBUFFORCE = 33 // socket.h:20:1: + SO_RCVLOWAT = 18 // socket.h:31:1: + SO_RCVTIMEO = 20 // socket.h:129:1: + SO_RCVTIMEO_NEW = 66 // socket.h:117:1: + SO_RCVTIMEO_OLD = 20 // socket.h:33:1: + SO_REUSEADDR = 2 // socket.h:12:1: + SO_REUSEPORT = 15 // socket.h:27:1: + SO_RXQ_OVFL = 40 // socket.h:61:1: + SO_SECURITY_AUTHENTICATION = 22 // socket.h:38:1: + SO_SECURITY_ENCRYPTION_NETWORK = 24 // socket.h:40:1: + SO_SECURITY_ENCRYPTION_TRANSPORT = 23 // socket.h:39:1: + SO_SELECT_ERR_QUEUE = 45 // socket.h:72:1: + SO_SNDBUF = 7 // socket.h:17:1: + SO_SNDBUFFORCE = 32 // socket.h:19:1: + SO_SNDLOWAT = 19 // socket.h:32:1: + SO_SNDTIMEO = 21 // socket.h:130:1: + SO_SNDTIMEO_NEW = 67 // socket.h:118:1: + SO_SNDTIMEO_OLD = 21 // socket.h:34:1: + SO_TIMESTAMP = 29 // socket.h:125:1: + SO_TIMESTAMPING = 37 // socket.h:127:1: + SO_TIMESTAMPING_NEW = 65 // socket.h:115:1: + SO_TIMESTAMPING_OLD = 37 // socket.h:111:1: + SO_TIMESTAMPNS = 35 // socket.h:126:1: + SO_TIMESTAMPNS_NEW = 64 // socket.h:114:1: + SO_TIMESTAMPNS_OLD = 35 // socket.h:110:1: + SO_TIMESTAMP_NEW = 63 // socket.h:113:1: + SO_TIMESTAMP_OLD = 29 // socket.h:109:1: + SO_TXTIME = 61 // socket.h:104:1: + SO_TYPE = 3 // socket.h:13:1: + SO_WIFI_STATUS = 41 // socket.h:63:1: + SO_ZEROCOPY = 60 // socket.h:102:1: + X_ASM_X86_POSIX_TYPES_64_H = 0 // posix_types_64.h:3:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_SOCKADDR_H = 1 // sockaddr.h:24:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LINUX_POSIX_TYPES_H = 0 // posix_types.h:3:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_SS_SIZE = 128 // sockaddr.h:40:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_SOCKET_H = 1 // socket.h:20:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Bits in the FLAGS argument to `send', `recv', et al. +const ( /* socket.h:200:1: */ + MSG_OOB = 1 // Process out-of-band data. + MSG_PEEK = 2 // Peek at incoming messages. + MSG_DONTROUTE = 4 // Don't use local routing. + MSG_CTRUNC = 8 // Control data lost before delivery. + MSG_PROXY = 16 // Supply or ask second address. + MSG_TRUNC = 32 + MSG_DONTWAIT = 64 // Nonblocking IO. + MSG_EOR = 128 // End of record. + MSG_WAITALL = 256 // Wait for a full request. + MSG_FIN = 512 + MSG_SYN = 1024 + MSG_CONFIRM = 2048 // Confirm path validity. + MSG_RST = 4096 + MSG_ERRQUEUE = 8192 // Fetch message from error queue. + MSG_NOSIGNAL = 16384 // Do not generate SIGPIPE. + MSG_MORE = 32768 // Sender will send more. + MSG_WAITFORONE = 65536 // Wait for at least one packet to return. + MSG_BATCH = 262144 // sendmmsg: more messages coming. + MSG_ZEROCOPY = 67108864 // Use user data in kernel path. + MSG_FASTOPEN = 536870912 // Send data in TCP SYN. + + MSG_CMSG_CLOEXEC = 1073741824 +) + +// Socket level message types. This must match the definitions in +// +// . +const ( /* socket.h:332:1: */ + SCM_RIGHTS = 1 +) + +// Get the architecture-dependent definition of enum __socket_type. +// Define enum __socket_type for generic Linux. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Types of sockets. +const ( /* socket_type.h:24:1: */ + SOCK_STREAM = 1 // Sequenced, reliable, connection-based + // byte streams. + SOCK_DGRAM = 2 // Connectionless, unreliable datagrams + // of fixed maximum length. + SOCK_RAW = 3 // Raw protocol interface. + SOCK_RDM = 4 // Reliably-delivered messages. + SOCK_SEQPACKET = 5 // Sequenced, reliable, connection-based, + // datagrams of fixed maximum length. + SOCK_DCCP = 6 // Datagram Congestion Control Protocol. + SOCK_PACKET = 10 // Linux specific way of getting packets + // at the dev level. For writing rarp and + // other similar things on the user level. + + // Flags to be ORed into the type parameter of socket and socketpair and + // used for the flags parameter of paccept. + + SOCK_CLOEXEC = 524288 // Atomically set close-on-exec flag for the + // new descriptor(s). + SOCK_NONBLOCK = 2048 +) + +// The following constants should be used for the second parameter of +// +// `shutdown'. +const ( /* socket.h:41:1: */ + SHUT_RD = 0 // No more receptions. + SHUT_WR = 1 // No more transmissions. + SHUT_RDWR = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Structure for scatter/gather I/O. +type Iovec = struct { + Fiov_base uintptr + Fiov_len Size_t +} /* struct_iovec.h:26:1 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This operating system-specific header file defines the SOCK_*, PF_*, +// AF_*, MSG_*, SOL_*, and SO_* constants, and the `struct sockaddr', +// `struct msghdr', and `struct linger' types. +// System-specific socket constants and types. Linux version. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Type for length arguments in socket calls. +type Socklen_t = X__socklen_t /* socket.h:33:21 */ + +// Protocol families. + +// Address families. + +// Socket level values. Others are defined in the appropriate headers. +// +// XXX These definitions also should go into the appropriate headers as +// far as they are available. + +// Maximum queue length specifiable by listen. + +// Get the definition of the macro to define the common sockaddr members. +// Definition of struct sockaddr_* common members and sizes, generic version. +// Copyright (C) 1995-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// POSIX.1g specifies this type name for the `sa_family' member. +type Sa_family_t = uint16 /* sockaddr.h:28:28 */ + +// This macro is used to declare the initial common members +// of the data types used for socket addresses, `struct sockaddr', +// `struct sockaddr_in', `struct sockaddr_un', etc. + +// Size of struct sockaddr_storage. + +// Structure describing a generic socket address. +type Sockaddr = struct { + Fsa_family Sa_family_t + Fsa_data [14]int8 +} /* socket.h:178:1 */ + +// Structure large enough to hold any socket address (with the historical +// exception of AF_UNIX). + +type Sockaddr_storage = struct { + Fss_family Sa_family_t + F__ss_padding [118]int8 + F__ss_align uint64 +} /* socket.h:191:1 */ + +// Structure describing messages sent by +// +// `sendmsg' and received by `recvmsg'. +type Msghdr = struct { + Fmsg_name uintptr + Fmsg_namelen Socklen_t + F__ccgo_pad1 [4]byte + Fmsg_iov uintptr + Fmsg_iovlen Size_t + Fmsg_control uintptr + Fmsg_controllen Size_t + Fmsg_flags int32 + F__ccgo_pad2 [4]byte +} /* socket.h:257:1 */ + +// Structure used for storage of ancillary data object information. +type Cmsghdr = struct { + F__ccgo_pad1 [0]uint64 + Fcmsg_len Size_t + Fcmsg_level int32 + Fcmsg_type int32 +} /* socket.h:275:1 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This allows for 1024 file descriptors: if NR_OPEN is ever grown +// beyond that you'll have to change this too. But 1024 fd's seem to be +// enough even for such "real" unices like OSF/1, so hopefully this is +// one limit that doesn't have to be changed [again]. +// +// Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in +// (and thus ) - but this is a more logical +// place for them. Solved by having dummy defines in . + +// This macro may have been defined in . But we always +// use the one here. + +type X__kernel_fd_set = struct{ Ffds_bits [16]uint64 } /* posix_types.h:27:3 */ + +// Type of a signal handler. +type X__kernel_sighandler_t = uintptr /* posix_types.h:30:14 */ + +// Type of a SYSV IPC key. +type X__kernel_key_t = int32 /* posix_types.h:33:13 */ +type X__kernel_mqd_t = int32 /* posix_types.h:34:13 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. Also, we cannot +// assume GCC is being used. + +type X__kernel_old_uid_t = uint16 /* posix_types_64.h:11:24 */ +type X__kernel_old_gid_t = uint16 /* posix_types_64.h:12:24 */ + +type X__kernel_old_dev_t = uint64 /* posix_types_64.h:15:23 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// There seems to be no way of detecting this automatically from user +// space, so 64 bit architectures should override this in their +// bitsperlong.h. In particular, an architecture that supports +// both 32 and 64 bit user space must not rely on CONFIG_64BIT +// to decide it, but rather check a compiler provided macro. + +// This file is generally used by user-level software, so you need to +// be a little careful about namespace pollution etc. +// +// First the types that are often defined in different ways across +// architectures, so that you can override them. + +type X__kernel_long_t = int64 /* posix_types.h:15:15 */ +type X__kernel_ulong_t = uint64 /* posix_types.h:16:23 */ + +type X__kernel_ino_t = X__kernel_ulong_t /* posix_types.h:20:26 */ + +type X__kernel_mode_t = uint32 /* posix_types.h:24:22 */ + +type X__kernel_pid_t = int32 /* posix_types.h:28:14 */ + +type X__kernel_ipc_pid_t = int32 /* posix_types.h:32:14 */ + +type X__kernel_uid_t = uint32 /* posix_types.h:36:22 */ +type X__kernel_gid_t = uint32 /* posix_types.h:37:22 */ + +type X__kernel_suseconds_t = X__kernel_long_t /* posix_types.h:41:26 */ + +type X__kernel_daddr_t = int32 /* posix_types.h:45:14 */ + +type X__kernel_uid32_t = uint32 /* posix_types.h:49:22 */ +type X__kernel_gid32_t = uint32 /* posix_types.h:50:22 */ + +// Most 32 bit architectures use "unsigned int" size_t, +// and all 64 bit architectures use "unsigned long" size_t. +type X__kernel_size_t = X__kernel_ulong_t /* posix_types.h:72:26 */ +type X__kernel_ssize_t = X__kernel_long_t /* posix_types.h:73:25 */ +type X__kernel_ptrdiff_t = X__kernel_long_t /* posix_types.h:74:25 */ + +type X__kernel_fsid_t = struct{ Fval [2]int32 } /* posix_types.h:81:3 */ + +// anything below here should be completely generic +type X__kernel_off_t = X__kernel_long_t /* posix_types.h:87:25 */ +type X__kernel_loff_t = int64 /* posix_types.h:88:19 */ +type X__kernel_old_time_t = X__kernel_long_t /* posix_types.h:89:25 */ +type X__kernel_time_t = X__kernel_long_t /* posix_types.h:90:25 */ +type X__kernel_time64_t = int64 /* posix_types.h:91:19 */ +type X__kernel_clock_t = X__kernel_long_t /* posix_types.h:92:25 */ +type X__kernel_timer_t = int32 /* posix_types.h:93:14 */ +type X__kernel_clockid_t = int32 /* posix_types.h:94:14 */ +type X__kernel_caddr_t = uintptr /* posix_types.h:95:14 */ +type X__kernel_uid16_t = uint16 /* posix_types.h:96:24 */ +type X__kernel_gid16_t = uint16 /* posix_types.h:97:24 */ + +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + +// Socket-level I/O control calls. + +// For setsockopt(2) + +// Security levels - as per NRL IPv6 - don't actually do anything + +// Socket filtering + +// Instruct lower device to use last 4-bytes of skb data as FCS + +// on 64-bit and x32, avoid the ?: operator + +// Structure used to manipulate the SO_LINGER option. +type Linger = struct { + Fl_onoff int32 + Fl_linger int32 +} /* socket.h:361:1 */ + +// This is the 4.3 BSD `struct sockaddr' format, which is used as wire +// +// format in the grotty old 4.3 `talk' protocol. +type Osockaddr = struct { + Fsa_family uint16 + Fsa_data [14]uint8 +} /* struct_osockaddr.h:6:1 */ + +// Define some macros helping to catch buffer overflows. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/stat/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/stat/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/stat/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/stat/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo sys/stat/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/stat/stat_linux_amd64.go -pkgname stat', DO NOT EDIT. + +package stat + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/stat/stat_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/stat/stat_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/stat/stat_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/stat/stat_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,947 @@ +// Code generated by 'ccgo sys/stat/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/stat/stat_linux_amd64.go -pkgname stat', DO NOT EDIT. + +package stat + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + ACCESSPERMS = 511 // stat.h:195:1: + ALLPERMS = 4095 // stat.h:196:1: + DEFFILEMODE = 438 // stat.h:197:1: + S_BLKSIZE = 512 // stat.h:199:1: + S_IEXEC = 64 // stat.h:177:1: + S_IFBLK = 24576 // stat.h:107:1: + S_IFCHR = 8192 // stat.h:106:1: + S_IFDIR = 16384 // stat.h:105:1: + S_IFIFO = 4096 // stat.h:110:1: + S_IFLNK = 40960 // stat.h:113:1: + S_IFMT = 61440 // stat.h:104:1: + S_IFREG = 32768 // stat.h:108:1: + S_IFSOCK = 49152 // stat.h:117:1: + S_IREAD = 256 // stat.h:175:1: + S_IRGRP = 32 // stat.h:180:1: + S_IROTH = 4 // stat.h:186:1: + S_IRUSR = 256 // stat.h:168:1: + S_IRWXG = 56 // stat.h:184:1: + S_IRWXO = 7 // stat.h:190:1: + S_IRWXU = 448 // stat.h:172:1: + S_ISGID = 1024 // stat.h:161:1: + S_ISUID = 2048 // stat.h:160:1: + S_ISVTX = 512 // stat.h:165:1: + S_IWGRP = 16 // stat.h:181:1: + S_IWOTH = 2 // stat.h:187:1: + S_IWRITE = 128 // stat.h:176:1: + S_IWUSR = 128 // stat.h:169:1: + S_IXGRP = 8 // stat.h:182:1: + S_IXOTH = 1 // stat.h:188:1: + S_IXUSR = 64 // stat.h:170:1: + UTIME_NOW = 1073741823 // stat.h:206:1: + UTIME_OMIT = 1073741822 // stat.h:207:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_STAT_H = 1 // stat.h:23:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_MKNOD_VER = 0 // stat.h:390:1: + X_MKNOD_VER_LINUX = 0 // stat.h:41:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STATBUF_ST_BLKSIZE = 0 // stat.h:172:1: + X_STATBUF_ST_NSEC = 0 // stat.h:175:1: + X_STATBUF_ST_RDEV = 0 // stat.h:173:1: + X_STAT_VER = 1 // stat.h:44:1: + X_STAT_VER_KERNEL = 0 // stat.h:37:1: + X_STAT_VER_LINUX = 1 // stat.h:38:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_STAT_H = 1 // stat.h:23:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 5.6 File Characteristics + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +// The Single Unix specification says that some more types are +// available here. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +type Dev_t = X__dev_t /* stat.h:40:17 */ + +type Gid_t = X__gid_t /* stat.h:45:17 */ + +type Ino_t = X__ino64_t /* stat.h:53:19 */ + +type Mode_t = X__mode_t /* stat.h:59:18 */ + +type Nlink_t = X__nlink_t /* stat.h:64:19 */ + +type Off_t = X__off64_t /* stat.h:72:19 */ + +type Uid_t = X__uid_t /* stat.h:78:17 */ + +// Copyright (C) 1999-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Versions of the `struct stat' data structure. + +// x86-64 versions of the `xmknod' interface. + +type Stat = struct { + Fst_dev X__dev_t + Fst_ino X__ino_t + Fst_nlink X__nlink_t + Fst_mode X__mode_t + Fst_uid X__uid_t + Fst_gid X__gid_t + F__pad0 int32 + Fst_rdev X__dev_t + Fst_size X__off_t + Fst_blksize X__blksize_t + Fst_blocks X__blkcnt_t + Fst_atim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_mtim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fst_ctim struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + F__glibc_reserved [3]X__syscall_slong_t +} /* stat.h:46:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/types/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/types/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/types/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/types/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo sys/types/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/types/types_linux_amd64.go -pkgname types', DO NOT EDIT. + +package types + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/types/types_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/sys/types/types_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/sys/types/types_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/sys/types/types_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1511 @@ +// Code generated by 'ccgo sys/types/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o sys/types/types_linux_amd64.go -pkgname types', DO NOT EDIT. + +package types + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + FD_SETSIZE = 1024 // select.h:73:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/termios/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/termios/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/termios/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/termios/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo termios/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o termios/termios_linux_amd64.go -pkgname termios', DO NOT EDIT. + +package termios + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/termios/termios_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/termios/termios_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/termios/termios_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/termios/termios_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1024 @@ +// Code generated by 'ccgo termios/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o termios/termios_linux_amd64.go -pkgname termios', DO NOT EDIT. + +package termios + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + B0 = 0000000 // termios.h:33:1: + B1000000 = 0010010 // termios-baud.h:40:1: + B110 = 0000003 // termios.h:36:1: + B115200 = 0010002 // termios-baud.h:34:1: + B1152000 = 0010011 // termios-baud.h:41:1: + B1200 = 0000011 // termios.h:42:1: + B134 = 0000004 // termios.h:37:1: + B150 = 0000005 // termios.h:38:1: + B1500000 = 0010012 // termios-baud.h:42:1: + B1800 = 0000012 // termios.h:43:1: + B19200 = 0000016 // termios.h:47:1: + B200 = 0000006 // termios.h:39:1: + B2000000 = 0010013 // termios-baud.h:43:1: + B230400 = 0010003 // termios-baud.h:35:1: + B2400 = 0000013 // termios.h:44:1: + B2500000 = 0010014 // termios-baud.h:44:1: + B300 = 0000007 // termios.h:40:1: + B3000000 = 0010015 // termios-baud.h:45:1: + B3500000 = 0010016 // termios-baud.h:46:1: + B38400 = 0000017 // termios.h:48:1: + B4000000 = 0010017 // termios-baud.h:47:1: + B460800 = 0010004 // termios-baud.h:36:1: + B4800 = 0000014 // termios.h:45:1: + B50 = 0000001 // termios.h:34:1: + B500000 = 0010005 // termios-baud.h:37:1: + B57600 = 0010001 // termios-baud.h:33:1: + B576000 = 0010006 // termios-baud.h:38:1: + B600 = 0000010 // termios.h:41:1: + B75 = 0000002 // termios.h:35:1: + B921600 = 0010007 // termios-baud.h:39:1: + B9600 = 0000015 // termios.h:46:1: + BRKINT = 0000002 // termios-c_iflag.h:25:1: + BS0 = 0000000 // termios-c_oflag.h:48:1: + BS1 = 0020000 // termios-c_oflag.h:49:1: + BSDLY = 0020000 // termios-c_oflag.h:47:1: + CBAUD = 000000010017 // termios-baud.h:24:1: + CBAUDEX = 000000010000 // termios-baud.h:25:1: + CBRK = 0 // ttydefaults.h:83:1: + CDISCARD = 15 // ttydefaults.h:78:1: + CDSUSP = 25 // ttydefaults.h:74:1: + CEOF = 4 // ttydefaults.h:56:1: + CEOL = 0 // ttydefaults.h:60:1: + CEOT = 4 // ttydefaults.h:81:1: + CERASE = 0177 // ttydefaults.h:62:1: + CFLUSH = 15 // ttydefaults.h:85:1: + CIBAUD = 002003600000 // termios-baud.h:27:1: + CINTR = 3 // ttydefaults.h:63:1: + CKILL = 21 // ttydefaults.h:69:1: + CLNEXT = 22 // ttydefaults.h:77:1: + CLOCAL = 0004000 // termios-c_cflag.h:34:1: + CMIN = 1 // ttydefaults.h:70:1: + CMSPAR = 010000000000 // termios-baud.h:28:1: + CQUIT = 034 // ttydefaults.h:71:1: + CR0 = 0000000 // termios-c_oflag.h:38:1: + CR1 = 0001000 // termios-c_oflag.h:39:1: + CR2 = 0002000 // termios-c_oflag.h:40:1: + CR3 = 0003000 // termios-c_oflag.h:41:1: + CRDLY = 0003000 // termios-c_oflag.h:37:1: + CREAD = 0000200 // termios-c_cflag.h:30:1: + CREPRINT = 18 // ttydefaults.h:80:1: + CRPRNT = 18 // ttydefaults.h:84:1: + CRTSCTS = 020000000000 // termios-baud.h:29:1: + CS5 = 0000000 // termios-c_cflag.h:25:1: + CS6 = 0000020 // termios-c_cflag.h:26:1: + CS7 = 0000040 // termios-c_cflag.h:27:1: + CS8 = 0000060 // termios-c_cflag.h:28:1: + CSIZE = 0000060 // termios-c_cflag.h:24:1: + CSTART = 17 // ttydefaults.h:75:1: + CSTATUS = 0 // ttydefaults.h:67:1: + CSTOP = 19 // ttydefaults.h:76:1: + CSTOPB = 0000100 // termios-c_cflag.h:29:1: + CSUSP = 26 // ttydefaults.h:72:1: + CTIME = 0 // ttydefaults.h:73:1: + CWERASE = 23 // ttydefaults.h:79:1: + ECHO = 0000010 // termios-c_lflag.h:29:1: + ECHOCTL = 0001000 // termios-c_lflag.h:37:1: + ECHOE = 0000020 // termios-c_lflag.h:30:1: + ECHOK = 0000040 // termios-c_lflag.h:32:1: + ECHOKE = 0004000 // termios-c_lflag.h:45:1: + ECHONL = 0000100 // termios-c_lflag.h:33:1: + ECHOPRT = 0002000 // termios-c_lflag.h:42:1: + EXTA = 14 // termios.h:50:1: + EXTB = 15 // termios.h:51:1: + EXTPROC = 0200000 // termios-c_lflag.h:57:1: + FF0 = 0000000 // termios-c_oflag.h:51:1: + FF1 = 0100000 // termios-c_oflag.h:52:1: + FFDLY = 0100000 // termios-c_oflag.h:50:1: + FLUSHO = 0010000 // termios-c_lflag.h:48:1: + HUPCL = 0002000 // termios-c_cflag.h:33:1: + ICANON = 0000002 // termios-c_lflag.h:25:1: + ICRNL = 0000400 // termios-c_iflag.h:32:1: + IEXTEN = 0100000 // termios-c_lflag.h:54:1: + IGNBRK = 0000001 // termios-c_iflag.h:24:1: + IGNCR = 0000200 // termios-c_iflag.h:31:1: + IGNPAR = 0000004 // termios-c_iflag.h:26:1: + IMAXBEL = 0020000 // termios-c_iflag.h:38:1: + INLCR = 0000100 // termios-c_iflag.h:30:1: + INPCK = 0000020 // termios-c_iflag.h:28:1: + ISIG = 0000001 // termios-c_lflag.h:24:1: + ISTRIP = 0000040 // termios-c_iflag.h:29:1: + IUCLC = 0001000 // termios-c_iflag.h:33:1: + IUTF8 = 0040000 // termios-c_iflag.h:40:1: + IXANY = 0004000 // termios-c_iflag.h:36:1: + IXOFF = 0010000 // termios-c_iflag.h:37:1: + IXON = 0002000 // termios-c_iflag.h:35:1: + NCCS = 32 // termios-struct.h:23:1: + NL0 = 0000000 // termios-c_oflag.h:35:1: + NL1 = 0000400 // termios-c_oflag.h:36:1: + NLDLY = 0000400 // termios-c_oflag.h:34:1: + NOFLSH = 0000200 // termios-c_lflag.h:34:1: + OCRNL = 0000010 // termios-c_oflag.h:28:1: + OFDEL = 0000200 // termios-c_oflag.h:32:1: + OFILL = 0000100 // termios-c_oflag.h:31:1: + OLCUC = 0000002 // termios-c_oflag.h:25:1: + ONLCR = 0000004 // termios-c_oflag.h:27:1: + ONLRET = 0000040 // termios-c_oflag.h:30:1: + ONOCR = 0000020 // termios-c_oflag.h:29:1: + OPOST = 0000001 // termios-c_oflag.h:24:1: + PARENB = 0000400 // termios-c_cflag.h:31:1: + PARMRK = 0000010 // termios-c_iflag.h:27:1: + PARODD = 0001000 // termios-c_cflag.h:32:1: + PENDIN = 0040000 // termios-c_lflag.h:50:1: + TAB0 = 0000000 // termios-c_oflag.h:43:1: + TAB1 = 0004000 // termios-c_oflag.h:44:1: + TAB2 = 0010000 // termios-c_oflag.h:45:1: + TAB3 = 0014000 // termios-c_oflag.h:46:1: + TABDLY = 0014000 // termios-c_oflag.h:42:1: + TCIFLUSH = 0 // termios.h:70:1: + TCIOFF = 2 // termios.h:66:1: + TCIOFLUSH = 2 // termios.h:72:1: + TCION = 3 // termios.h:67:1: + TCOFLUSH = 1 // termios.h:71:1: + TCOOFF = 0 // termios.h:64:1: + TCOON = 1 // termios.h:65:1: + TCSADRAIN = 1 // termios-tcflow.h:25:1: + TCSAFLUSH = 2 // termios-tcflow.h:26:1: + TCSANOW = 0 // termios-tcflow.h:24:1: + TIOCSER_TEMT = 0x01 // termios.h:60:1: + TOSTOP = 0000400 // termios-c_lflag.h:35:1: + TTYDEF_CFLAG = 1440 // ttydefaults.h:49:1: + TTYDEF_IFLAG = 11554 // ttydefaults.h:46:1: + TTYDEF_LFLAG = 35355 // ttydefaults.h:48:1: + TTYDEF_OFLAG = 6149 // ttydefaults.h:47:1: + TTYDEF_SPEED = 13 // ttydefaults.h:50:1: + VDISCARD = 13 // termios-c_cc.h:37:1: + VEOF = 4 // termios-c_cc.h:28:1: + VEOL = 11 // termios-c_cc.h:35:1: + VEOL2 = 16 // termios-c_cc.h:40:1: + VERASE = 2 // termios-c_cc.h:26:1: + VINTR = 0 // termios-c_cc.h:24:1: + VKILL = 3 // termios-c_cc.h:27:1: + VLNEXT = 15 // termios-c_cc.h:39:1: + VMIN = 6 // termios-c_cc.h:30:1: + VQUIT = 1 // termios-c_cc.h:25:1: + VREPRINT = 12 // termios-c_cc.h:36:1: + VSTART = 8 // termios-c_cc.h:32:1: + VSTOP = 9 // termios-c_cc.h:33:1: + VSUSP = 10 // termios-c_cc.h:34:1: + VSWTC = 7 // termios-c_cc.h:31:1: + VT0 = 0000000 // termios-c_oflag.h:56:1: + VT1 = 0040000 // termios-c_oflag.h:57:1: + VTDLY = 0040000 // termios-c_oflag.h:55:1: + VTIME = 5 // termios-c_cc.h:29:1: + VWERASE = 14 // termios-c_cc.h:38:1: + XCASE = 0000004 // termios-c_lflag.h:27:1: + XTABS = 0014000 // termios-c_oflag.h:60:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_HAVE_STRUCT_TERMIOS_C_ISPEED = 1 // termios-struct.h:34:1: + X_HAVE_STRUCT_TERMIOS_C_OSPEED = 1 // termios-struct.h:35:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_TTYDEFAULTS_H_ = 0 // ttydefaults.h:41:1: + X_TERMIOS_H = 1 // termios.h:23:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 7.1-2 General Terminal Interface + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// We need `pid_t'. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type Pid_t = X__pid_t /* termios.h:30:17 */ + +// Get the system-dependent definitions of `struct termios', `tcflag_t', +// `cc_t', `speed_t', and all the macros specifying the flag bits. +// termios type and macro definitions. Linux version. +// Copyright (C) 1993-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type Cc_t = uint8 /* termios.h:23:23 */ +type Speed_t = uint32 /* termios.h:24:22 */ +type Tcflag_t = uint32 /* termios.h:25:22 */ + +// struct termios definition. Linux/generic version. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library. If not, see +// . + +type Termios = struct { + Fc_iflag Tcflag_t + Fc_oflag Tcflag_t + Fc_cflag Tcflag_t + Fc_lflag Tcflag_t + Fc_line Cc_t + Fc_cc [32]Cc_t + F__ccgo_pad1 [3]byte + Fc_ispeed Speed_t + Fc_ospeed Speed_t +} /* termios-struct.h:24:1 */ + +// - +// Copyright (c) 1982, 1986, 1993 +// The Regents of the University of California. All rights reserved. +// (c) UNIX System Laboratories, Inc. +// All or some portions of this file are derived from material licensed +// to the University of California by American Telephone and Telegraph +// Co. or Unix System Laboratories, Inc. and are reproduced herein with +// the permission of UNIX System Laboratories, Inc. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 4. Neither the name of the University nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// @(#)ttydefaults.h 8.4 (Berkeley) 1/21/94 + +// System wide defaults for terminal state. Linux version. + +// Defaults on "first" open. + +// Control Character Defaults +// compat + +// PROTECTED INCLUSION ENDS HERE + +// #define TTYDEFCHARS to include an array of default control characters. + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/time/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/time/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/time/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/time/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo time/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o time/time_linux_amd64.go -pkgname time', DO NOT EDIT. + +package time + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/time/time_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/time/time_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/time/time_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/time/time_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,679 @@ +// Code generated by 'ccgo time/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o time/time_linux_amd64.go -pkgname time', DO NOT EDIT. + +package time + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + CLOCK_BOOTTIME = 7 // time.h:60:1: + CLOCK_BOOTTIME_ALARM = 9 // time.h:64:1: + CLOCK_MONOTONIC = 1 // time.h:48:1: + CLOCK_MONOTONIC_COARSE = 6 // time.h:58:1: + CLOCK_MONOTONIC_RAW = 4 // time.h:54:1: + CLOCK_PROCESS_CPUTIME_ID = 2 // time.h:50:1: + CLOCK_REALTIME = 0 // time.h:46:1: + CLOCK_REALTIME_ALARM = 8 // time.h:62:1: + CLOCK_REALTIME_COARSE = 5 // time.h:56:1: + CLOCK_TAI = 11 // time.h:66:1: + CLOCK_THREAD_CPUTIME_ID = 3 // time.h:52:1: + TIMER_ABSTIME = 1 // time.h:69:1: + TIME_UTC = 1 // time.h:65:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TIME_H = 1 // time.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_TYPES_LOCALE_T_H = 1 // locale_t.h:20:1: + X_BITS_TYPES___LOCALE_T_H = 1 // __locale_t.h:21:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_TIME_H = 1 // time.h:23:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This defines CLOCKS_PER_SEC, which is the number of processor clock +// ticks per second, and possibly a number of other constants. +// System-dependent timing definitions. Linux version. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// ISO/IEC 9899:1999 7.23.1: Components of time +// The macro `CLOCKS_PER_SEC' is an expression with type `clock_t' that is +// the number per second of the value returned by the `clock' function. +// CAE XSH, Issue 4, Version 2: +// The value of CLOCKS_PER_SEC is required to be 1 million on all +// XSI-conformant systems. + +// Identifier for system-wide realtime clock. +// Monotonic system-wide clock. +// High-resolution timer from the CPU. +// Thread-specific CPU-time clock. +// Monotonic system-wide clock, not adjusted for frequency scaling. +// Identifier for system-wide realtime clock, updated only on ticks. +// Monotonic system-wide clock, updated only on ticks. +// Monotonic system-wide clock that includes time spent in suspension. +// Like CLOCK_REALTIME but also wakes suspended system. +// Like CLOCK_BOOTTIME but also wakes suspended system. +// Like CLOCK_REALTIME but in International Atomic Time. + +// Flag to indicate time is absolute. + +// Many of the typedefs and structs whose official home is this header +// may also need to be defined by other headers. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// ISO C `broken-down time' structure. +type Tm = struct { + Ftm_sec int32 + Ftm_min int32 + Ftm_hour int32 + Ftm_mday int32 + Ftm_mon int32 + Ftm_year int32 + Ftm_wday int32 + Ftm_yday int32 + Ftm_isdst int32 + F__ccgo_pad1 [4]byte + Ftm_gmtoff int64 + Ftm_zone uintptr +} /* struct_tm.h:7:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// NB: Include guard matches what uses. + +// POSIX.1b structure for timer start values and intervals. +type Itimerspec = struct { + Fit_interval struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fit_value struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } +} /* struct_itimerspec.h:8:1 */ + +type Pid_t = X__pid_t /* time.h:54:17 */ + +// Definition of locale_t. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definition of struct __locale_struct and __locale_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// Contributed by Ulrich Drepper , 1997. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1-2008: the locale_t type, representing a locale context +// (implementation-namespace version). This type should be treated +// as opaque by applications; some details are exposed for the sake of +// efficiency in e.g. ctype functions. + +type X__locale_struct = struct { + F__locales [13]uintptr + F__ctype_b uintptr + F__ctype_tolower uintptr + F__ctype_toupper uintptr + F__names [13]uintptr +} /* __locale_t.h:28:1 */ + +type X__locale_t = uintptr /* __locale_t.h:42:32 */ + +type Locale_t = X__locale_t /* locale_t.h:24:20 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/unistd/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/unistd/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/unistd/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/unistd/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo unistd/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o unistd/unistd_linux_amd64.go -pkgname unistd', DO NOT EDIT. + +package unistd + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/unistd/unistd_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/unistd/unistd_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/unistd/unistd_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/unistd/unistd_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1592 @@ +// Code generated by 'ccgo unistd/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o unistd/unistd_linux_amd64.go -pkgname unistd', DO NOT EDIT. + +package unistd + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + F_LOCK = 1 // unistd.h:1074:1: + F_OK = 0 // unistd.h:284:1: + F_TEST = 3 // unistd.h:1076:1: + F_TLOCK = 2 // unistd.h:1075:1: + F_ULOCK = 0 // unistd.h:1073:1: + L_INCR = 1 // unistd.h:323:1: + L_SET = 0 // unistd.h:322:1: + L_XTND = 2 // unistd.h:324:1: + R_OK = 4 // unistd.h:281:1: + SEEK_CUR = 1 // unistd.h:312:1: + SEEK_END = 2 // unistd.h:313:1: + SEEK_SET = 0 // unistd.h:311:1: + STDERR_FILENO = 2 // unistd.h:212:1: + STDIN_FILENO = 0 // unistd.h:210:1: + STDOUT_FILENO = 1 // unistd.h:211:1: + W_OK = 2 // unistd.h:282:1: + X_OK = 1 // unistd.h:283:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_POSIX_OPT_H = 1 // posix_opt.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_GETOPT_CORE_H = 1 // getopt_core.h:21:1: + X_GETOPT_POSIX_H = 1 // getopt_posix.h:21:1: + X_LFS64_ASYNCHRONOUS_IO = 1 // posix_opt.h:117:1: + X_LFS64_LARGEFILE = 1 // posix_opt.h:121:1: + X_LFS64_STDIO = 1 // posix_opt.h:122:1: + X_LFS_ASYNCHRONOUS_IO = 1 // posix_opt.h:112:1: + X_LFS_LARGEFILE = 1 // posix_opt.h:120:1: + X_LP64 = 1 // :284:1: + X_POSIX2_CHAR_TERM = 200809 // posix_opt.h:179:1: + X_POSIX2_C_BIND = 200809 // unistd.h:74:1: + X_POSIX2_C_DEV = 200809 // unistd.h:78:1: + X_POSIX2_C_VERSION = 200809 // unistd.h:70:1: + X_POSIX2_LOCALEDEF = 200809 // unistd.h:86:1: + X_POSIX2_SW_DEV = 200809 // unistd.h:82:1: + X_POSIX2_VERSION = 200809 // unistd.h:67:1: + X_POSIX_ADVISORY_INFO = 200809 // posix_opt.h:170:1: + X_POSIX_ASYNCHRONOUS_IO = 200809 // posix_opt.h:109:1: + X_POSIX_ASYNC_IO = 1 // posix_opt.h:110:1: + X_POSIX_BARRIERS = 200809 // posix_opt.h:155:1: + X_POSIX_CHOWN_RESTRICTED = 0 // posix_opt.h:53:1: + X_POSIX_CLOCK_SELECTION = 200809 // posix_opt.h:167:1: + X_POSIX_CPUTIME = 0 // posix_opt.h:128:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_FSYNC = 200809 // posix_opt.h:38:1: + X_POSIX_IPV6 = 200809 // posix_opt.h:173:1: + X_POSIX_JOB_CONTROL = 1 // posix_opt.h:23:1: + X_POSIX_MAPPED_FILES = 200809 // posix_opt.h:41:1: + X_POSIX_MEMLOCK = 200809 // posix_opt.h:44:1: + X_POSIX_MEMLOCK_RANGE = 200809 // posix_opt.h:47:1: + X_POSIX_MEMORY_PROTECTION = 200809 // posix_opt.h:50:1: + X_POSIX_MESSAGE_PASSING = 200809 // posix_opt.h:158:1: + X_POSIX_MONOTONIC_CLOCK = 0 // posix_opt.h:164:1: + X_POSIX_NO_TRUNC = 1 // posix_opt.h:60:1: + X_POSIX_PRIORITIZED_IO = 200809 // posix_opt.h:114:1: + X_POSIX_PRIORITY_SCHEDULING = 200809 // posix_opt.h:32:1: + X_POSIX_RAW_SOCKETS = 200809 // posix_opt.h:176:1: + X_POSIX_READER_WRITER_LOCKS = 200809 // posix_opt.h:137:1: + X_POSIX_REALTIME_SIGNALS = 200809 // posix_opt.h:106:1: + X_POSIX_REENTRANT_FUNCTIONS = 1 // posix_opt.h:75:1: + X_POSIX_REGEXP = 1 // posix_opt.h:134:1: + X_POSIX_SAVED_IDS = 1 // posix_opt.h:26:1: + X_POSIX_SEMAPHORES = 200809 // posix_opt.h:103:1: + X_POSIX_SHARED_MEMORY_OBJECTS = 200809 // posix_opt.h:125:1: + X_POSIX_SHELL = 1 // posix_opt.h:140:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_POSIX_SPAWN = 200809 // posix_opt.h:149:1: + X_POSIX_SPIN_LOCKS = 200809 // posix_opt.h:146:1: + X_POSIX_SPORADIC_SERVER = -1 // posix_opt.h:182:1: + X_POSIX_SYNCHRONIZED_IO = 200809 // posix_opt.h:35:1: + X_POSIX_THREADS = 200809 // posix_opt.h:72:1: + X_POSIX_THREAD_ATTR_STACKADDR = 200809 // posix_opt.h:85:1: + X_POSIX_THREAD_ATTR_STACKSIZE = 200809 // posix_opt.h:82:1: + X_POSIX_THREAD_CPUTIME = 0 // posix_opt.h:131:1: + X_POSIX_THREAD_PRIORITY_SCHEDULING = 200809 // posix_opt.h:79:1: + X_POSIX_THREAD_PRIO_INHERIT = 200809 // posix_opt.h:88:1: + X_POSIX_THREAD_PRIO_PROTECT = 200809 // posix_opt.h:92:1: + X_POSIX_THREAD_PROCESS_SHARED = 200809 // posix_opt.h:161:1: + X_POSIX_THREAD_ROBUST_PRIO_INHERIT = 200809 // posix_opt.h:96:1: + X_POSIX_THREAD_ROBUST_PRIO_PROTECT = -1 // posix_opt.h:99:1: + X_POSIX_THREAD_SAFE_FUNCTIONS = 200809 // posix_opt.h:76:1: + X_POSIX_THREAD_SPORADIC_SERVER = -1 // posix_opt.h:183:1: + X_POSIX_TIMEOUTS = 200809 // posix_opt.h:143:1: + X_POSIX_TIMERS = 200809 // posix_opt.h:152:1: + X_POSIX_TRACE = -1 // posix_opt.h:186:1: + X_POSIX_TRACE_EVENT_FILTER = -1 // posix_opt.h:187:1: + X_POSIX_TRACE_INHERIT = -1 // posix_opt.h:188:1: + X_POSIX_TRACE_LOG = -1 // posix_opt.h:189:1: + X_POSIX_TYPED_MEMORY_OBJECTS = -1 // posix_opt.h:192:1: + X_POSIX_V6_LP64_OFF64 = 1 // environments.h:62:1: + X_POSIX_V6_LPBIG_OFFBIG = -1 // environments.h:57:1: + X_POSIX_V7_LP64_OFF64 = 1 // environments.h:61:1: + X_POSIX_V7_LPBIG_OFFBIG = -1 // environments.h:56:1: + X_POSIX_VDISABLE = 0 // posix_opt.h:57:1: + X_POSIX_VERSION = 200809 // unistd.h:34:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + X_UNISTD_H = 1 // unistd.h:23:1: + X_XBS5_LP64_OFF64 = 1 // environments.h:63:1: + X_XBS5_LPBIG_OFFBIG = -1 // environments.h:58:1: + X_XOPEN_ENH_I18N = 1 // unistd.h:112:1: + X_XOPEN_LEGACY = 1 // unistd.h:115:1: + X_XOPEN_REALTIME = 1 // posix_opt.h:63:1: + X_XOPEN_REALTIME_THREADS = 1 // posix_opt.h:66:1: + X_XOPEN_SHM = 1 // posix_opt.h:69:1: + X_XOPEN_UNIX = 1 // unistd.h:108:1: + X_XOPEN_VERSION = 700 // unistd.h:90:1: + X_XOPEN_XCU_VERSION = 4 // unistd.h:100:1: + X_XOPEN_XPG2 = 1 // unistd.h:103:1: + X_XOPEN_XPG3 = 1 // unistd.h:104:1: + X_XOPEN_XPG4 = 1 // unistd.h:105:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Get the `_PC_*' symbols for the NAME argument to `pathconf' and `fpathconf'; +// the `_SC_*' symbols for the NAME argument to `sysconf'; +// and the `_CS_*' symbols for the NAME argument to `confstr'. +// `sysconf', `pathconf', and `confstr' NAME values. Generic version. +// Copyright (C) 1993-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Values for the NAME argument to `pathconf' and `fpathconf'. +const ( /* confname.h:24:1: */ + X_PC_LINK_MAX = 0 + X_PC_MAX_CANON = 1 + X_PC_MAX_INPUT = 2 + X_PC_NAME_MAX = 3 + X_PC_PATH_MAX = 4 + X_PC_PIPE_BUF = 5 + X_PC_CHOWN_RESTRICTED = 6 + X_PC_NO_TRUNC = 7 + X_PC_VDISABLE = 8 + X_PC_SYNC_IO = 9 + X_PC_ASYNC_IO = 10 + X_PC_PRIO_IO = 11 + X_PC_SOCK_MAXBUF = 12 + X_PC_FILESIZEBITS = 13 + X_PC_REC_INCR_XFER_SIZE = 14 + X_PC_REC_MAX_XFER_SIZE = 15 + X_PC_REC_MIN_XFER_SIZE = 16 + X_PC_REC_XFER_ALIGN = 17 + X_PC_ALLOC_SIZE_MIN = 18 + X_PC_SYMLINK_MAX = 19 + X_PC_2_SYMLINKS = 20 +) + +// Values for the NAME argument to `confstr'. +const ( /* confname.h:533:1: */ + X_CS_PATH = 0 // The default search path. + + X_CS_V6_WIDTH_RESTRICTED_ENVS = 1 + + X_CS_GNU_LIBC_VERSION = 2 + X_CS_GNU_LIBPTHREAD_VERSION = 3 + + X_CS_V5_WIDTH_RESTRICTED_ENVS = 4 + + X_CS_V7_WIDTH_RESTRICTED_ENVS = 5 + + X_CS_LFS_CFLAGS = 1000 + X_CS_LFS_LDFLAGS = 1001 + X_CS_LFS_LIBS = 1002 + X_CS_LFS_LINTFLAGS = 1003 + X_CS_LFS64_CFLAGS = 1004 + X_CS_LFS64_LDFLAGS = 1005 + X_CS_LFS64_LIBS = 1006 + X_CS_LFS64_LINTFLAGS = 1007 + + X_CS_XBS5_ILP32_OFF32_CFLAGS = 1100 + X_CS_XBS5_ILP32_OFF32_LDFLAGS = 1101 + X_CS_XBS5_ILP32_OFF32_LIBS = 1102 + X_CS_XBS5_ILP32_OFF32_LINTFLAGS = 1103 + X_CS_XBS5_ILP32_OFFBIG_CFLAGS = 1104 + X_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 1105 + X_CS_XBS5_ILP32_OFFBIG_LIBS = 1106 + X_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 1107 + X_CS_XBS5_LP64_OFF64_CFLAGS = 1108 + X_CS_XBS5_LP64_OFF64_LDFLAGS = 1109 + X_CS_XBS5_LP64_OFF64_LIBS = 1110 + X_CS_XBS5_LP64_OFF64_LINTFLAGS = 1111 + X_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 1112 + X_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 1113 + X_CS_XBS5_LPBIG_OFFBIG_LIBS = 1114 + X_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 1115 + + X_CS_POSIX_V6_ILP32_OFF32_CFLAGS = 1116 + X_CS_POSIX_V6_ILP32_OFF32_LDFLAGS = 1117 + X_CS_POSIX_V6_ILP32_OFF32_LIBS = 1118 + X_CS_POSIX_V6_ILP32_OFF32_LINTFLAGS = 1119 + X_CS_POSIX_V6_ILP32_OFFBIG_CFLAGS = 1120 + X_CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS = 1121 + X_CS_POSIX_V6_ILP32_OFFBIG_LIBS = 1122 + X_CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS = 1123 + X_CS_POSIX_V6_LP64_OFF64_CFLAGS = 1124 + X_CS_POSIX_V6_LP64_OFF64_LDFLAGS = 1125 + X_CS_POSIX_V6_LP64_OFF64_LIBS = 1126 + X_CS_POSIX_V6_LP64_OFF64_LINTFLAGS = 1127 + X_CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS = 1128 + X_CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS = 1129 + X_CS_POSIX_V6_LPBIG_OFFBIG_LIBS = 1130 + X_CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS = 1131 + + X_CS_POSIX_V7_ILP32_OFF32_CFLAGS = 1132 + X_CS_POSIX_V7_ILP32_OFF32_LDFLAGS = 1133 + X_CS_POSIX_V7_ILP32_OFF32_LIBS = 1134 + X_CS_POSIX_V7_ILP32_OFF32_LINTFLAGS = 1135 + X_CS_POSIX_V7_ILP32_OFFBIG_CFLAGS = 1136 + X_CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS = 1137 + X_CS_POSIX_V7_ILP32_OFFBIG_LIBS = 1138 + X_CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS = 1139 + X_CS_POSIX_V7_LP64_OFF64_CFLAGS = 1140 + X_CS_POSIX_V7_LP64_OFF64_LDFLAGS = 1141 + X_CS_POSIX_V7_LP64_OFF64_LIBS = 1142 + X_CS_POSIX_V7_LP64_OFF64_LINTFLAGS = 1143 + X_CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS = 1144 + X_CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS = 1145 + X_CS_POSIX_V7_LPBIG_OFFBIG_LIBS = 1146 + X_CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS = 1147 + + X_CS_V6_ENV = 1148 + X_CS_V7_ENV = 1149 +) + +// Values for the argument to `sysconf'. +const ( /* confname.h:71:1: */ + X_SC_ARG_MAX = 0 + X_SC_CHILD_MAX = 1 + X_SC_CLK_TCK = 2 + X_SC_NGROUPS_MAX = 3 + X_SC_OPEN_MAX = 4 + X_SC_STREAM_MAX = 5 + X_SC_TZNAME_MAX = 6 + X_SC_JOB_CONTROL = 7 + X_SC_SAVED_IDS = 8 + X_SC_REALTIME_SIGNALS = 9 + X_SC_PRIORITY_SCHEDULING = 10 + X_SC_TIMERS = 11 + X_SC_ASYNCHRONOUS_IO = 12 + X_SC_PRIORITIZED_IO = 13 + X_SC_SYNCHRONIZED_IO = 14 + X_SC_FSYNC = 15 + X_SC_MAPPED_FILES = 16 + X_SC_MEMLOCK = 17 + X_SC_MEMLOCK_RANGE = 18 + X_SC_MEMORY_PROTECTION = 19 + X_SC_MESSAGE_PASSING = 20 + X_SC_SEMAPHORES = 21 + X_SC_SHARED_MEMORY_OBJECTS = 22 + X_SC_AIO_LISTIO_MAX = 23 + X_SC_AIO_MAX = 24 + X_SC_AIO_PRIO_DELTA_MAX = 25 + X_SC_DELAYTIMER_MAX = 26 + X_SC_MQ_OPEN_MAX = 27 + X_SC_MQ_PRIO_MAX = 28 + X_SC_VERSION = 29 + X_SC_PAGESIZE = 30 + X_SC_RTSIG_MAX = 31 + X_SC_SEM_NSEMS_MAX = 32 + X_SC_SEM_VALUE_MAX = 33 + X_SC_SIGQUEUE_MAX = 34 + X_SC_TIMER_MAX = 35 + + // Values for the argument to `sysconf' + // corresponding to _POSIX2_* symbols. + X_SC_BC_BASE_MAX = 36 + X_SC_BC_DIM_MAX = 37 + X_SC_BC_SCALE_MAX = 38 + X_SC_BC_STRING_MAX = 39 + X_SC_COLL_WEIGHTS_MAX = 40 + X_SC_EQUIV_CLASS_MAX = 41 + X_SC_EXPR_NEST_MAX = 42 + X_SC_LINE_MAX = 43 + X_SC_RE_DUP_MAX = 44 + X_SC_CHARCLASS_NAME_MAX = 45 + + X_SC_2_VERSION = 46 + X_SC_2_C_BIND = 47 + X_SC_2_C_DEV = 48 + X_SC_2_FORT_DEV = 49 + X_SC_2_FORT_RUN = 50 + X_SC_2_SW_DEV = 51 + X_SC_2_LOCALEDEF = 52 + + X_SC_PII = 53 + X_SC_PII_XTI = 54 + X_SC_PII_SOCKET = 55 + X_SC_PII_INTERNET = 56 + X_SC_PII_OSI = 57 + X_SC_POLL = 58 + X_SC_SELECT = 59 + X_SC_UIO_MAXIOV = 60 + X_SC_IOV_MAX = 60 + X_SC_PII_INTERNET_STREAM = 61 + X_SC_PII_INTERNET_DGRAM = 62 + X_SC_PII_OSI_COTS = 63 + X_SC_PII_OSI_CLTS = 64 + X_SC_PII_OSI_M = 65 + X_SC_T_IOV_MAX = 66 + + // Values according to POSIX 1003.1c (POSIX threads). + X_SC_THREADS = 67 + X_SC_THREAD_SAFE_FUNCTIONS = 68 + X_SC_GETGR_R_SIZE_MAX = 69 + X_SC_GETPW_R_SIZE_MAX = 70 + X_SC_LOGIN_NAME_MAX = 71 + X_SC_TTY_NAME_MAX = 72 + X_SC_THREAD_DESTRUCTOR_ITERATIONS = 73 + X_SC_THREAD_KEYS_MAX = 74 + X_SC_THREAD_STACK_MIN = 75 + X_SC_THREAD_THREADS_MAX = 76 + X_SC_THREAD_ATTR_STACKADDR = 77 + X_SC_THREAD_ATTR_STACKSIZE = 78 + X_SC_THREAD_PRIORITY_SCHEDULING = 79 + X_SC_THREAD_PRIO_INHERIT = 80 + X_SC_THREAD_PRIO_PROTECT = 81 + X_SC_THREAD_PROCESS_SHARED = 82 + + X_SC_NPROCESSORS_CONF = 83 + X_SC_NPROCESSORS_ONLN = 84 + X_SC_PHYS_PAGES = 85 + X_SC_AVPHYS_PAGES = 86 + X_SC_ATEXIT_MAX = 87 + X_SC_PASS_MAX = 88 + + X_SC_XOPEN_VERSION = 89 + X_SC_XOPEN_XCU_VERSION = 90 + X_SC_XOPEN_UNIX = 91 + X_SC_XOPEN_CRYPT = 92 + X_SC_XOPEN_ENH_I18N = 93 + X_SC_XOPEN_SHM = 94 + + X_SC_2_CHAR_TERM = 95 + X_SC_2_C_VERSION = 96 + X_SC_2_UPE = 97 + + X_SC_XOPEN_XPG2 = 98 + X_SC_XOPEN_XPG3 = 99 + X_SC_XOPEN_XPG4 = 100 + + X_SC_CHAR_BIT = 101 + X_SC_CHAR_MAX = 102 + X_SC_CHAR_MIN = 103 + X_SC_INT_MAX = 104 + X_SC_INT_MIN = 105 + X_SC_LONG_BIT = 106 + X_SC_WORD_BIT = 107 + X_SC_MB_LEN_MAX = 108 + X_SC_NZERO = 109 + X_SC_SSIZE_MAX = 110 + X_SC_SCHAR_MAX = 111 + X_SC_SCHAR_MIN = 112 + X_SC_SHRT_MAX = 113 + X_SC_SHRT_MIN = 114 + X_SC_UCHAR_MAX = 115 + X_SC_UINT_MAX = 116 + X_SC_ULONG_MAX = 117 + X_SC_USHRT_MAX = 118 + + X_SC_NL_ARGMAX = 119 + X_SC_NL_LANGMAX = 120 + X_SC_NL_MSGMAX = 121 + X_SC_NL_NMAX = 122 + X_SC_NL_SETMAX = 123 + X_SC_NL_TEXTMAX = 124 + + X_SC_XBS5_ILP32_OFF32 = 125 + X_SC_XBS5_ILP32_OFFBIG = 126 + X_SC_XBS5_LP64_OFF64 = 127 + X_SC_XBS5_LPBIG_OFFBIG = 128 + + X_SC_XOPEN_LEGACY = 129 + X_SC_XOPEN_REALTIME = 130 + X_SC_XOPEN_REALTIME_THREADS = 131 + + X_SC_ADVISORY_INFO = 132 + X_SC_BARRIERS = 133 + X_SC_BASE = 134 + X_SC_C_LANG_SUPPORT = 135 + X_SC_C_LANG_SUPPORT_R = 136 + X_SC_CLOCK_SELECTION = 137 + X_SC_CPUTIME = 138 + X_SC_THREAD_CPUTIME = 139 + X_SC_DEVICE_IO = 140 + X_SC_DEVICE_SPECIFIC = 141 + X_SC_DEVICE_SPECIFIC_R = 142 + X_SC_FD_MGMT = 143 + X_SC_FIFO = 144 + X_SC_PIPE = 145 + X_SC_FILE_ATTRIBUTES = 146 + X_SC_FILE_LOCKING = 147 + X_SC_FILE_SYSTEM = 148 + X_SC_MONOTONIC_CLOCK = 149 + X_SC_MULTI_PROCESS = 150 + X_SC_SINGLE_PROCESS = 151 + X_SC_NETWORKING = 152 + X_SC_READER_WRITER_LOCKS = 153 + X_SC_SPIN_LOCKS = 154 + X_SC_REGEXP = 155 + X_SC_REGEX_VERSION = 156 + X_SC_SHELL = 157 + X_SC_SIGNALS = 158 + X_SC_SPAWN = 159 + X_SC_SPORADIC_SERVER = 160 + X_SC_THREAD_SPORADIC_SERVER = 161 + X_SC_SYSTEM_DATABASE = 162 + X_SC_SYSTEM_DATABASE_R = 163 + X_SC_TIMEOUTS = 164 + X_SC_TYPED_MEMORY_OBJECTS = 165 + X_SC_USER_GROUPS = 166 + X_SC_USER_GROUPS_R = 167 + X_SC_2_PBS = 168 + X_SC_2_PBS_ACCOUNTING = 169 + X_SC_2_PBS_LOCATE = 170 + X_SC_2_PBS_MESSAGE = 171 + X_SC_2_PBS_TRACK = 172 + X_SC_SYMLOOP_MAX = 173 + X_SC_STREAMS = 174 + X_SC_2_PBS_CHECKPOINT = 175 + + X_SC_V6_ILP32_OFF32 = 176 + X_SC_V6_ILP32_OFFBIG = 177 + X_SC_V6_LP64_OFF64 = 178 + X_SC_V6_LPBIG_OFFBIG = 179 + + X_SC_HOST_NAME_MAX = 180 + X_SC_TRACE = 181 + X_SC_TRACE_EVENT_FILTER = 182 + X_SC_TRACE_INHERIT = 183 + X_SC_TRACE_LOG = 184 + + X_SC_LEVEL1_ICACHE_SIZE = 185 + X_SC_LEVEL1_ICACHE_ASSOC = 186 + X_SC_LEVEL1_ICACHE_LINESIZE = 187 + X_SC_LEVEL1_DCACHE_SIZE = 188 + X_SC_LEVEL1_DCACHE_ASSOC = 189 + X_SC_LEVEL1_DCACHE_LINESIZE = 190 + X_SC_LEVEL2_CACHE_SIZE = 191 + X_SC_LEVEL2_CACHE_ASSOC = 192 + X_SC_LEVEL2_CACHE_LINESIZE = 193 + X_SC_LEVEL3_CACHE_SIZE = 194 + X_SC_LEVEL3_CACHE_ASSOC = 195 + X_SC_LEVEL3_CACHE_LINESIZE = 196 + X_SC_LEVEL4_CACHE_SIZE = 197 + X_SC_LEVEL4_CACHE_ASSOC = 198 + X_SC_LEVEL4_CACHE_LINESIZE = 199 + // Leave room here, maybe we need a few more cache levels some day. + + X_SC_IPV6 = 235 + X_SC_RAW_SOCKETS = 236 + + X_SC_V7_ILP32_OFF32 = 237 + X_SC_V7_ILP32_OFFBIG = 238 + X_SC_V7_LP64_OFF64 = 239 + X_SC_V7_LPBIG_OFFBIG = 240 + + X_SC_SS_REPL_MAX = 241 + + X_SC_TRACE_EVENT_NAME_MAX = 242 + X_SC_TRACE_NAME_MAX = 243 + X_SC_TRACE_SYS_MAX = 244 + X_SC_TRACE_USER_EVENT_MAX = 245 + + X_SC_XOPEN_STREAMS = 246 + + X_SC_THREAD_ROBUST_PRIO_INHERIT = 247 + X_SC_THREAD_ROBUST_PRIO_PROTECT = 248 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.10 Symbolic Constants + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// These may be used to determine what facilities are present at compile time. +// Their values can be obtained at run time from `sysconf'. + +// POSIX Standard approved as ISO/IEC 9945-1 as of September 2008. + +// These are not #ifdef __USE_POSIX2 because they are +// in the theoretically application-owned namespace. + +// The utilities on GNU systems also correspond to this version. + +// The utilities on GNU systems also correspond to this version. + +// This symbol was required until the 2001 edition of POSIX. + +// If defined, the implementation supports the +// C Language Bindings Option. + +// If defined, the implementation supports the +// C Language Development Utilities Option. + +// If defined, the implementation supports the +// Software Development Utilities Option. + +// If defined, the implementation supports the +// creation of locales with the localedef utility. + +// X/Open version number to which the library conforms. It is selectable. + +// Commands and utilities from XPG4 are available. + +// We are compatible with the old published standards as well. + +// The X/Open Unix extensions are available. + +// The enhanced internationalization capabilities according to XPG4.2 +// are present. + +// The legacy interfaces are also available. + +// Get values of POSIX options: +// +// If these symbols are defined, the corresponding features are +// always available. If not, they may be available sometimes. +// The current values can be obtained with `sysconf'. +// +// _POSIX_JOB_CONTROL Job control is supported. +// _POSIX_SAVED_IDS Processes have a saved set-user-ID +// and a saved set-group-ID. +// _POSIX_REALTIME_SIGNALS Real-time, queued signals are supported. +// _POSIX_PRIORITY_SCHEDULING Priority scheduling is supported. +// _POSIX_TIMERS POSIX.4 clocks and timers are supported. +// _POSIX_ASYNCHRONOUS_IO Asynchronous I/O is supported. +// _POSIX_PRIORITIZED_IO Prioritized asynchronous I/O is supported. +// _POSIX_SYNCHRONIZED_IO Synchronizing file data is supported. +// _POSIX_FSYNC The fsync function is present. +// _POSIX_MAPPED_FILES Mapping of files to memory is supported. +// _POSIX_MEMLOCK Locking of all memory is supported. +// _POSIX_MEMLOCK_RANGE Locking of ranges of memory is supported. +// _POSIX_MEMORY_PROTECTION Setting of memory protections is supported. +// _POSIX_MESSAGE_PASSING POSIX.4 message queues are supported. +// _POSIX_SEMAPHORES POSIX.4 counting semaphores are supported. +// _POSIX_SHARED_MEMORY_OBJECTS POSIX.4 shared memory objects are supported. +// _POSIX_THREADS POSIX.1c pthreads are supported. +// _POSIX_THREAD_ATTR_STACKADDR Thread stack address attribute option supported. +// _POSIX_THREAD_ATTR_STACKSIZE Thread stack size attribute option supported. +// _POSIX_THREAD_SAFE_FUNCTIONS Thread-safe functions are supported. +// _POSIX_THREAD_PRIORITY_SCHEDULING +// POSIX.1c thread execution scheduling supported. +// _POSIX_THREAD_PRIO_INHERIT Thread priority inheritance option supported. +// _POSIX_THREAD_PRIO_PROTECT Thread priority protection option supported. +// _POSIX_THREAD_PROCESS_SHARED Process-shared synchronization supported. +// _POSIX_PII Protocol-independent interfaces are supported. +// _POSIX_PII_XTI XTI protocol-indep. interfaces are supported. +// _POSIX_PII_SOCKET Socket protocol-indep. interfaces are supported. +// _POSIX_PII_INTERNET Internet family of protocols supported. +// _POSIX_PII_INTERNET_STREAM Connection-mode Internet protocol supported. +// _POSIX_PII_INTERNET_DGRAM Connectionless Internet protocol supported. +// _POSIX_PII_OSI ISO/OSI family of protocols supported. +// _POSIX_PII_OSI_COTS Connection-mode ISO/OSI service supported. +// _POSIX_PII_OSI_CLTS Connectionless ISO/OSI service supported. +// _POSIX_POLL Implementation supports `poll' function. +// _POSIX_SELECT Implementation supports `select' and `pselect'. +// +// _XOPEN_REALTIME X/Open realtime support is available. +// _XOPEN_REALTIME_THREADS X/Open realtime thread support is available. +// _XOPEN_SHM Shared memory interface according to XPG4.2. +// +// _XBS5_ILP32_OFF32 Implementation provides environment with 32-bit +// int, long, pointer, and off_t types. +// _XBS5_ILP32_OFFBIG Implementation provides environment with 32-bit +// int, long, and pointer and off_t with at least +// 64 bits. +// _XBS5_LP64_OFF64 Implementation provides environment with 32-bit +// int, and 64-bit long, pointer, and off_t types. +// _XBS5_LPBIG_OFFBIG Implementation provides environment with at +// least 32 bits int and long, pointer, and off_t +// with at least 64 bits. +// +// If any of these symbols is defined as -1, the corresponding option is not +// true for any file. If any is defined as other than -1, the corresponding +// option is true for all files. If a symbol is not defined at all, the value +// for a specific file can be obtained from `pathconf' and `fpathconf'. +// +// _POSIX_CHOWN_RESTRICTED Only the super user can use `chown' to change +// the owner of a file. `chown' can only be used +// to change the group ID of a file to a group of +// which the calling process is a member. +// _POSIX_NO_TRUNC Pathname components longer than +// NAME_MAX generate an error. +// _POSIX_VDISABLE If defined, if the value of an element of the +// `c_cc' member of `struct termios' is +// _POSIX_VDISABLE, no character will have the +// effect associated with that element. +// _POSIX_SYNC_IO Synchronous I/O may be performed. +// _POSIX_ASYNC_IO Asynchronous I/O may be performed. +// _POSIX_PRIO_IO Prioritized Asynchronous I/O may be performed. +// +// Support for the Large File Support interface is not generally available. +// If it is available the following constants are defined to one. +// _LFS64_LARGEFILE Low-level I/O supports large files. +// _LFS64_STDIO Standard I/O supports large files. +// + +// Define POSIX options for Linux. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation; either version 2.1 of the +// License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; see the file COPYING.LIB. If +// not, see . + +// Job control is supported. + +// Processes have a saved set-user-ID and a saved set-group-ID. + +// Priority scheduling is not supported with the correct semantics, +// but GNU/Linux applications expect that the corresponding interfaces +// are available, even though the semantics do not meet the POSIX +// requirements. See glibc bug 14829. + +// Synchronizing file data is supported. + +// The fsync function is present. + +// Mapping of files to memory is supported. + +// Locking of all memory is supported. + +// Locking of ranges of memory is supported. + +// Setting of memory protections is supported. + +// Some filesystems allow all users to change file ownership. + +// `c_cc' member of 'struct termios' structure can be disabled by +// using the value _POSIX_VDISABLE. + +// Filenames are not silently truncated. + +// X/Open realtime support is available. + +// X/Open thread realtime support is available. + +// XPG4.2 shared memory is supported. + +// Tell we have POSIX threads. + +// We have the reentrant functions described in POSIX. + +// We provide priority scheduling for threads. + +// We support user-defined stack sizes. + +// We support user-defined stacks. + +// We support priority inheritence. + +// We support priority protection, though only for non-robust +// mutexes. + +// We support priority inheritence for robust mutexes. + +// We do not support priority protection for robust mutexes. + +// We support POSIX.1b semaphores. + +// Real-time signals are supported. + +// We support asynchronous I/O. +// Alternative name for Unix98. +// Support for prioritization is also available. + +// The LFS support in asynchronous I/O is also available. + +// The rest of the LFS is also available. + +// POSIX shared memory objects are implemented. + +// CPU-time clocks support needs to be checked at runtime. + +// Clock support in threads must be also checked at runtime. + +// GNU libc provides regular expression handling. + +// Reader/Writer locks are available. + +// We have a POSIX shell. + +// We support the Timeouts option. + +// We support spinlocks. + +// The `spawn' function family is supported. + +// We have POSIX timers. + +// The barrier functions are available. + +// POSIX message queues are available. + +// Thread process-shared synchronization is supported. + +// The monotonic clock might be available. + +// The clock selection interfaces are available. + +// Advisory information interfaces are available. + +// IPv6 support is available. + +// Raw socket support is available. + +// We have at least one terminal. + +// Neither process nor thread sporadic server interfaces is available. + +// trace.h is not available. + +// Typed memory objects are not available. + +// Get the environment definitions from Unix98. +// Copyright (C) 1999-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// This header should define the following symbols under the described +// situations. A value `1' means that the model is always supported, +// `-1' means it is never supported. Undefined means it cannot be +// statically decided. +// +// _POSIX_V7_ILP32_OFF32 32bit int, long, pointers, and off_t type +// _POSIX_V7_ILP32_OFFBIG 32bit int, long, and pointers and larger off_t type +// +// _POSIX_V7_LP64_OFF32 64bit long and pointers and 32bit off_t type +// _POSIX_V7_LPBIG_OFFBIG 64bit long and pointers and large off_t type +// +// The macros _POSIX_V6_ILP32_OFF32, _POSIX_V6_ILP32_OFFBIG, +// _POSIX_V6_LP64_OFF32, _POSIX_V6_LPBIG_OFFBIG, _XBS5_ILP32_OFF32, +// _XBS5_ILP32_OFFBIG, _XBS5_LP64_OFF32, and _XBS5_LPBIG_OFFBIG were +// used in previous versions of the Unix standard and are available +// only for compatibility. + +// Environments with 32-bit wide pointers are optionally provided. +// Therefore following macros aren't defined: +// # undef _POSIX_V7_ILP32_OFF32 +// # undef _POSIX_V7_ILP32_OFFBIG +// # undef _POSIX_V6_ILP32_OFF32 +// # undef _POSIX_V6_ILP32_OFFBIG +// # undef _XBS5_ILP32_OFF32 +// # undef _XBS5_ILP32_OFFBIG +// and users need to check at runtime. + +// We also have no use (for now) for an environment with bigger pointers +// and offsets. + +// By default we have 64-bit wide `long int', pointers and `off_t'. + +// Standard file descriptors. + +// All functions that are not declared anywhere else. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type Ssize_t = X__ssize_t /* unistd.h:220:19 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// The Single Unix specification says that some more types are +// +// available here. +type Gid_t = X__gid_t /* unistd.h:232:17 */ + +type Uid_t = X__uid_t /* unistd.h:237:17 */ + +type Off_t = X__off64_t /* unistd.h:245:19 */ + +type Useconds_t = X__useconds_t /* unistd.h:255:22 */ + +type Pid_t = X__pid_t /* unistd.h:260:17 */ + +type Intptr_t = X__intptr_t /* unistd.h:267:20 */ + +type Socklen_t = X__socklen_t /* unistd.h:274:21 */ + +// Define some macros helping to catch buffer overflows. + +// System-specific extensions. +// System-specific extensions of , Linux version. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/utime/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/utime/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/utime/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/utime/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo utime/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o utime/utime_linux_amd64.go -pkgname utime', DO NOT EDIT. + +package utime + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/utime/utime_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/utime/utime_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/utime/utime_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/utime/utime_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,781 @@ +// Code generated by 'ccgo utime/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o utime/utime_linux_amd64.go -pkgname utime', DO NOT EDIT. + +package utime + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_UTIME_H = 1 // utime.h:23:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 5.6.6 Set File Access and Modification Times + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// Structure describing file times. +type Utimbuf = struct { + Factime X__time_t + Fmodtime X__time_t +} /* utime.h:36:1 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/uuid/uuid/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/uuid/uuid/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/uuid/uuid/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/uuid/uuid/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo uuid/uuid/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o uuid/uuid/uuid_linux_amd64.go -pkgname uuid', DO NOT EDIT. + +package uuid + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/uuid/uuid/uuid_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/uuid/uuid/uuid_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/uuid/uuid/uuid_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/uuid/uuid/uuid_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,1986 @@ +// Code generated by 'ccgo uuid/uuid/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o uuid/uuid/uuid_linux_amd64.go -pkgname uuid', DO NOT EDIT. + +package uuid + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + BIG_ENDIAN = 4321 // endian.h:28:1: + BYTE_ORDER = 1234 // endian.h:30:1: + CLOCK_BOOTTIME = 7 // time.h:60:1: + CLOCK_BOOTTIME_ALARM = 9 // time.h:64:1: + CLOCK_MONOTONIC = 1 // time.h:48:1: + CLOCK_MONOTONIC_COARSE = 6 // time.h:58:1: + CLOCK_MONOTONIC_RAW = 4 // time.h:54:1: + CLOCK_PROCESS_CPUTIME_ID = 2 // time.h:50:1: + CLOCK_REALTIME = 0 // time.h:46:1: + CLOCK_REALTIME_ALARM = 8 // time.h:62:1: + CLOCK_REALTIME_COARSE = 5 // time.h:56:1: + CLOCK_TAI = 11 // time.h:66:1: + CLOCK_THREAD_CPUTIME_ID = 3 // time.h:52:1: + FD_SETSIZE = 1024 // select.h:73:1: + LITTLE_ENDIAN = 1234 // endian.h:27:1: + PDP_ENDIAN = 3412 // endian.h:29:1: + TIMER_ABSTIME = 1 // time.h:69:1: + TIME_UTC = 1 // time.h:65:1: + UUID_STR_LEN = 37 // uuid.h:65:1: + UUID_TYPE_DCE_MD5 = 3 // uuid.h:58:1: + UUID_TYPE_DCE_RANDOM = 4 // uuid.h:59:1: + UUID_TYPE_DCE_SECURITY = 2 // uuid.h:57:1: + UUID_TYPE_DCE_SHA1 = 5 // uuid.h:60:1: + UUID_TYPE_DCE_TIME = 1 // uuid.h:56:1: + UUID_TYPE_MASK = 0xf // uuid.h:63:1: + UUID_TYPE_SHIFT = 4 // uuid.h:62:1: + UUID_VARIANT_DCE = 1 // uuid.h:48:1: + UUID_VARIANT_MASK = 0x7 // uuid.h:53:1: + UUID_VARIANT_MICROSOFT = 2 // uuid.h:49:1: + UUID_VARIANT_NCS = 0 // uuid.h:47:1: + UUID_VARIANT_OTHER = 3 // uuid.h:50:1: + UUID_VARIANT_SHIFT = 5 // uuid.h:52:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_BYTESWAP_H = 1 // byteswap.h:24:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_PTHREADTYPES_ARCH_H = 1 // pthreadtypes-arch.h:19:1: + X_BITS_PTHREADTYPES_COMMON_H = 1 // pthreadtypes.h:20:1: + X_BITS_STDINT_INTN_H = 1 // stdint-intn.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TIME_H = 1 // time.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_TYPES_LOCALE_T_H = 1 // locale_t.h:20:1: + X_BITS_TYPES___LOCALE_T_H = 1 // __locale_t.h:21:1: + X_BITS_UINTN_IDENTITY_H = 1 // uintn-identity.h:24:1: + X_BSD_SIZE_T_ = 0 // stddef.h:189:1: + X_BSD_SIZE_T_DEFINED_ = 0 // stddef.h:192:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_ENDIAN_H = 1 // endian.h:19:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_GCC_SIZE_T = 0 // stddef.h:195:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_RWLOCK_INTERNAL_H = 0 // struct_rwlock.h:21:1: + X_SIZET_ = 0 // stddef.h:196:1: + X_SIZE_T = 0 // stddef.h:183:1: + X_SIZE_T_ = 0 // stddef.h:188:1: + X_SIZE_T_DECLARED = 0 // stddef.h:193:1: + X_SIZE_T_DEFINED = 0 // stddef.h:191:1: + X_SIZE_T_DEFINED_ = 0 // stddef.h:190:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_STRUCT_TIMESPEC = 1 // struct_timespec.h:3:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_SYS_SELECT_H = 1 // select.h:22:1: + X_SYS_SIZE_T_H = 0 // stddef.h:184:1: + X_SYS_TIME_H = 1 // time.h:19:1: + X_SYS_TYPES_H = 1 // types.h:23:1: + X_THREAD_MUTEX_INTERNAL_H = 1 // struct_mutex.h:20:1: + X_THREAD_SHARED_TYPES_H = 1 // thread-shared-types.h:20:1: + X_TIME_H = 1 // time.h:23:1: + X_T_SIZE = 0 // stddef.h:186:1: + X_T_SIZE_ = 0 // stddef.h:185:1: + X_UUID_UUID_H = 0 // uuid.h:36:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// Values for the first argument to `getitimer' and `setitimer'. +const ( /* time.h:89:1: */ + // Timers run in real time. + ITIMER_REAL = 0 + // Timers run only when the process is executing. + ITIMER_VIRTUAL = 1 + // Timers run when the process is executing and when + // the system is executing on behalf of the process. + ITIMER_PROF = 2 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Public include file for the UUID library +// +// Copyright (C) 1996, 1997, 1998 Theodore Ts'o. +// +// %Begin-Header% +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, and the entire permission notice in its entirety, +// including the disclaimer of warranties. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 3. The name of the author may not be used to endorse or promote +// products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF +// WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH +// DAMAGE. +// %End-Header% + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX Standard: 2.6 Primitive System Data Types + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +type U_char = X__u_char /* types.h:33:18 */ +type U_short = X__u_short /* types.h:34:19 */ +type U_int = X__u_int /* types.h:35:17 */ +type U_long = X__u_long /* types.h:36:18 */ +type Quad_t = X__quad_t /* types.h:37:18 */ +type U_quad_t = X__u_quad_t /* types.h:38:20 */ +type Fsid_t = X__fsid_t /* types.h:39:18 */ +type Loff_t = X__loff_t /* types.h:42:18 */ + +type Ino_t = X__ino64_t /* types.h:49:19 */ + +type Dev_t = X__dev_t /* types.h:59:17 */ + +type Gid_t = X__gid_t /* types.h:64:17 */ + +type Mode_t = X__mode_t /* types.h:69:18 */ + +type Nlink_t = X__nlink_t /* types.h:74:19 */ + +type Uid_t = X__uid_t /* types.h:79:17 */ + +type Off_t = X__off64_t /* types.h:87:19 */ + +type Pid_t = X__pid_t /* types.h:97:17 */ + +type Id_t = X__id_t /* types.h:103:16 */ + +type Ssize_t = X__ssize_t /* types.h:108:19 */ + +type Daddr_t = X__daddr_t /* types.h:114:19 */ +type Caddr_t = X__caddr_t /* types.h:115:19 */ + +type Key_t = X__key_t /* types.h:121:17 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `clock'. +type Clock_t = X__clock_t /* clock_t.h:7:19 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Clock ID used in clock and timer functions. +type Clockid_t = X__clockid_t /* clockid_t.h:7:21 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Returned by `time'. +type Time_t = X__time_t /* time_t.h:7:18 */ + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Timer ID returned by `timer_create'. +type Timer_t = X__timer_t /* timer_t.h:7:19 */ + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// Old compatibility names for C types. +type Ulong = uint64 /* types.h:148:27 */ +type Ushort = uint16 /* types.h:149:28 */ +type Uint = uint32 /* types.h:150:22 */ + +// These size-specific names are used by some of the inet code. + +// Define intN_t types. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +type Int8_t = X__int8_t /* stdint-intn.h:24:18 */ +type Int16_t = X__int16_t /* stdint-intn.h:25:19 */ +type Int32_t = X__int32_t /* stdint-intn.h:26:19 */ +type Int64_t = X__int64_t /* stdint-intn.h:27:19 */ + +// These were defined by ISO C without the first `_'. +type U_int8_t = X__uint8_t /* types.h:158:19 */ +type U_int16_t = X__uint16_t /* types.h:159:20 */ +type U_int32_t = X__uint32_t /* types.h:160:20 */ +type U_int64_t = X__uint64_t /* types.h:161:20 */ + +type Register_t = int32 /* types.h:164:13 */ + +// It also defines `fd_set' and the FD_* macros for `select'. +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Get definition of needed basic types. +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Get __FD_* definitions. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Get sigset_t. + +type X__sigset_t = struct{ F__val [16]uint64 } /* __sigset_t.h:8:3 */ + +// A set of signals to be blocked, unblocked, or waited for. +type Sigset_t = X__sigset_t /* sigset_t.h:7:20 */ + +// Get definition of timer specification structures. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// A time value that is accurate to the nearest +// +// microsecond but also has a range of years. +type Timeval = struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t +} /* struct_timeval.h:8:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1b structure for a time value. This is like a `struct timeval' but +// +// has nanoseconds instead of microseconds. +type Timespec = struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t +} /* struct_timespec.h:10:1 */ + +type Suseconds_t = X__suseconds_t /* select.h:43:23 */ + +// The fd_set member is required to be an array of longs. +type X__fd_mask = int64 /* select.h:49:18 */ + +// Some versions of define this macros. +// It's easier to assume 8-bit bytes than to get CHAR_BIT. + +// fd_set for select and pselect. +type Fd_set = struct{ F__fds_bits [16]X__fd_mask } /* select.h:70:5 */ + +// Maximum number of file descriptors in `fd_set'. + +// Sometimes the fd_set member is assumed to have this type. +type Fd_mask = X__fd_mask /* select.h:77:19 */ + +// Define some inlines helping to catch common problems. + +type Blksize_t = X__blksize_t /* types.h:185:21 */ + +// Types from the Large File Support interface. +type Blkcnt_t = X__blkcnt64_t /* types.h:205:22 */ // Type to count number of disk blocks. +type Fsblkcnt_t = X__fsblkcnt64_t /* types.h:209:24 */ // Type to count file system blocks. +type Fsfilcnt_t = X__fsfilcnt64_t /* types.h:213:24 */ // Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_internal_list = struct { + F__prev uintptr + F__next uintptr +} /* thread-shared-types.h:49:9 */ + +// Type to count file system inodes. + +// Now add the thread types. +// Declaration of common pthread types for all architectures. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For internal mutex and condition variable definitions. +// Common threading primitives definitions for both POSIX and C11. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Arch-specific definitions. Each architecture must define the following +// macros to define the expected sizes of pthread data types: +// +// __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. +// __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. +// __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. +// __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. +// __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. +// __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. +// __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. +// __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. +// __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. +// +// The additional macro defines any constraint for the lock alignment +// inside the thread structures: +// +// __LOCK_ALIGNMENT - for internal lock/futex usage. +// +// Same idea but for the once locking primitive: +// +// __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. + +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. + +// Common definition of pthread_mutex_t. + +type X__pthread_list_t = X__pthread_internal_list /* thread-shared-types.h:53:3 */ + +type X__pthread_internal_slist = struct{ F__next uintptr } /* thread-shared-types.h:55:9 */ + +type X__pthread_slist_t = X__pthread_internal_slist /* thread-shared-types.h:58:3 */ + +// Arch-specific mutex definitions. A generic implementation is provided +// by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture +// can override it by defining: +// +// 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t +// definition). It should contains at least the internal members +// defined in the generic version. +// +// 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with +// atomic operations. +// +// 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization. +// It should initialize the mutex internal flag. + +// x86 internal mutex struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_mutex_s = struct { + F__lock int32 + F__count uint32 + F__owner int32 + F__nusers uint32 + F__kind int32 + F__spins int16 + F__elision int16 + F__list X__pthread_list_t +} /* struct_mutex.h:22:1 */ + +// Arch-sepecific read-write lock definitions. A generic implementation is +// provided by struct_rwlock.h. If required, an architecture can override it +// by defining: +// +// 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition). +// It should contain at least the internal members defined in the +// generic version. +// +// 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization. +// It should initialize the rwlock internal type. + +// x86 internal rwlock struct definitions. +// Copyright (C) 2019-2020 Free Software Foundation, Inc. +// +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +type X__pthread_rwlock_arch_t = struct { + F__readers uint32 + F__writers uint32 + F__wrphase_futex uint32 + F__writers_futex uint32 + F__pad3 uint32 + F__pad4 uint32 + F__cur_writer int32 + F__shared int32 + F__rwelision int8 + F__pad1 [7]uint8 + F__pad2 uint64 + F__flags uint32 + F__ccgo_pad1 [4]byte +} /* struct_rwlock.h:23:1 */ + +// Common definition of pthread_cond_t. + +type X__pthread_cond_s = struct { + F__0 struct{ F__wseq uint64 } + F__8 struct{ F__g1_start uint64 } + F__g_refs [2]uint32 + F__g_size [2]uint32 + F__g1_orig_size uint32 + F__wrefs uint32 + F__g_signals [2]uint32 +} /* thread-shared-types.h:92:1 */ + +// Thread identifiers. The structure of the attribute type is not +// +// exposed on purpose. +type Pthread_t = uint64 /* pthreadtypes.h:27:27 */ + +// Data structures for mutex handling. The structure of the attribute +// +// type is not exposed on purpose. +type Pthread_mutexattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:36:3 */ + +// Data structure for condition variable handling. The structure of +// +// the attribute type is not exposed on purpose. +type Pthread_condattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:45:3 */ + +// Keys for thread-specific data +type Pthread_key_t = uint32 /* pthreadtypes.h:49:22 */ + +// Once-only execution +type Pthread_once_t = int32 /* pthreadtypes.h:53:30 */ + +type Pthread_attr_t1 = struct { + F__ccgo_pad1 [0]uint64 + F__size [56]int8 +} /* pthreadtypes.h:56:1 */ + +type Pthread_attr_t = Pthread_attr_t1 /* pthreadtypes.h:62:30 */ + +type Pthread_mutex_t = struct{ F__data X__pthread_mutex_s } /* pthreadtypes.h:72:3 */ + +type Pthread_cond_t = struct{ F__data X__pthread_cond_s } /* pthreadtypes.h:80:3 */ + +// Data structure for reader-writer lock variable handling. The +// +// structure of the attribute type is deliberately not exposed. +type Pthread_rwlock_t = struct{ F__data X__pthread_rwlock_arch_t } /* pthreadtypes.h:91:3 */ + +type Pthread_rwlockattr_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [8]int8 +} /* pthreadtypes.h:97:3 */ + +// POSIX spinlock data type. +type Pthread_spinlock_t = int32 /* pthreadtypes.h:103:22 */ + +// POSIX barriers data type. The structure of the type is +// +// deliberately not exposed. +type Pthread_barrier_t = struct { + F__ccgo_pad1 [0]uint64 + F__size [32]int8 +} /* pthreadtypes.h:112:3 */ + +type Pthread_barrierattr_t = struct { + F__ccgo_pad1 [0]uint32 + F__size [4]int8 +} /* pthreadtypes.h:118:3 */ + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// `fd_set' type and related macros, and `select'/`pselect' declarations. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX 1003.1g: 6.2 Select from File Descriptor Sets + +// Structure crudely representing a timezone. +// +// This is obsolete and should never be used. +type Timezone = struct { + Ftz_minuteswest int32 + Ftz_dsttime int32 +} /* time.h:52:1 */ + +// Type of the second argument to `getitimer' and +// +// the second and third arguments `setitimer'. +type Itimerval = struct { + Fit_interval struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t + } + Fit_value struct { + Ftv_sec X__time_t + Ftv_usec X__suseconds_t + } +} /* time.h:105:1 */ + +type X__itimer_which_t = int32 /* time.h:118:13 */ + +// Convenience macros for operations on timevals. +// NOTE: `timercmp' does not work for >= or <=. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.23 Date and time + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Copyright (C) 1989-2020 Free Software Foundation, Inc. +// +// This file is part of GCC. +// +// GCC is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// GCC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. +// +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +// ISO C Standard: 7.17 Common definitions + +// Any one of these symbols __need_* means that GNU libc +// wants us just to define one data type. So don't define +// the symbols that indicate this file's entire job has been done. + +// This avoids lossage on SunOS but only if stdtypes.h comes first. +// There's no way to win with the other order! Sun lossage. + +// Sequent's header files use _PTRDIFF_T_ in some conflicting way. +// Just ignore it. + +// On VxWorks, may have defined macros like +// _TYPE_size_t which will typedef size_t. fixincludes patched the +// vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is +// not defined, and so that defining this macro defines _GCC_SIZE_T. +// If we find that the macros are still defined at this point, we must +// invoke them so that the type is defined as expected. + +// In case nobody has defined these types, but we aren't running under +// GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and +// __WCHAR_TYPE__ have reasonable values. This can happen if the +// parts of GCC is compiled by an older compiler, that actually +// include gstddef.h, such as collect2. + +// Signed type of difference of two pointers. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Unsigned type of `sizeof' something. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// Wide character type. +// Locale-writers should change this as necessary to +// be big enough to hold unique values not between 0 and 127, +// and not (wchar_t) -1, for each defined multibyte character. + +// Define this type if we are doing the whole job, +// or if we want this type in particular. + +// A null pointer constant. + +// This defines CLOCKS_PER_SEC, which is the number of processor clock +// ticks per second, and possibly a number of other constants. +// System-dependent timing definitions. Linux version. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// ISO/IEC 9899:1999 7.23.1: Components of time +// The macro `CLOCKS_PER_SEC' is an expression with type `clock_t' that is +// the number per second of the value returned by the `clock' function. +// CAE XSH, Issue 4, Version 2: +// The value of CLOCKS_PER_SEC is required to be 1 million on all +// XSI-conformant systems. + +// Identifier for system-wide realtime clock. +// Monotonic system-wide clock. +// High-resolution timer from the CPU. +// Thread-specific CPU-time clock. +// Monotonic system-wide clock, not adjusted for frequency scaling. +// Identifier for system-wide realtime clock, updated only on ticks. +// Monotonic system-wide clock, updated only on ticks. +// Monotonic system-wide clock that includes time spent in suspension. +// Like CLOCK_REALTIME but also wakes suspended system. +// Like CLOCK_BOOTTIME but also wakes suspended system. +// Like CLOCK_REALTIME but in International Atomic Time. + +// Flag to indicate time is absolute. + +// Many of the typedefs and structs whose official home is this header +// may also need to be defined by other headers. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// ISO C `broken-down time' structure. +type Tm = struct { + Ftm_sec int32 + Ftm_min int32 + Ftm_hour int32 + Ftm_mday int32 + Ftm_mon int32 + Ftm_year int32 + Ftm_wday int32 + Ftm_yday int32 + Ftm_isdst int32 + F__ccgo_pad1 [4]byte + Ftm_gmtoff int64 + Ftm_zone uintptr +} /* struct_tm.h:7:1 */ + +// NB: Include guard matches what uses. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// NB: Include guard matches what uses. + +// POSIX.1b structure for timer start values and intervals. +type Itimerspec = struct { + Fit_interval struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } + Fit_value struct { + Ftv_sec X__time_t + Ftv_nsec X__syscall_slong_t + } +} /* struct_itimerspec.h:8:1 */ + +// Definition of locale_t. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definition of struct __locale_struct and __locale_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// Contributed by Ulrich Drepper , 1997. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1-2008: the locale_t type, representing a locale context +// (implementation-namespace version). This type should be treated +// as opaque by applications; some details are exposed for the sake of +// efficiency in e.g. ctype functions. + +type X__locale_struct = struct { + F__locales [13]uintptr + F__ctype_b uintptr + F__ctype_tolower uintptr + F__ctype_toupper uintptr + F__names [13]uintptr +} /* __locale_t.h:28:1 */ + +type X__locale_t = uintptr /* __locale_t.h:42:32 */ + +type Locale_t = X__locale_t /* locale_t.h:24:20 */ + +type Uuid_t = [16]uint8 /* uuid.h:44:23 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/wctype/capi_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/wctype/capi_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/wctype/capi_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/wctype/capi_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,5 @@ +// Code generated by 'ccgo wctype/gen.c -crt-import-path -export-defines -export-enums -export-externs X -export-fields F -export-structs -export-typedefs -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o wctype/wctype_linux_amd64.go -pkgname wctype', DO NOT EDIT. + +package wctype + +var CAPI = map[string]struct{}{} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/libc/wctype/wctype_linux_loong64.go temporal-1.22.5/src/vendor/modernc.org/libc/wctype/wctype_linux_loong64.go --- temporal-1.21.5-1/src/vendor/modernc.org/libc/wctype/wctype_linux_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/libc/wctype/wctype_linux_loong64.go 2024-02-23 09:46:15.000000000 +0000 @@ -0,0 +1,953 @@ +// Code generated by 'ccgo wctype/gen.c -crt-import-path "" -export-defines "" -export-enums "" -export-externs X -export-fields F -export-structs "" -export-typedefs "" -header -hide _OSSwapInt16,_OSSwapInt32,_OSSwapInt64 -ignore-unsupported-alignment -o wctype/wctype_linux_amd64.go -pkgname wctype', DO NOT EDIT. + +package wctype + +import ( + "math" + "reflect" + "sync/atomic" + "unsafe" +) + +var _ = math.Pi +var _ reflect.Kind +var _ atomic.Value +var _ unsafe.Pointer + +const ( + WEOF = 4294967295 // wctype.h:33:1: + X_ATFILE_SOURCE = 1 // features.h:342:1: + X_BITS_ENDIANNESS_H = 1 // endianness.h:2:1: + X_BITS_ENDIAN_H = 1 // endian.h:20:1: + X_BITS_TIME64_H = 1 // time64.h:24:1: + X_BITS_TYPESIZES_H = 1 // typesizes.h:24:1: + X_BITS_TYPES_H = 1 // types.h:24:1: + X_BITS_TYPES_LOCALE_T_H = 1 // locale_t.h:20:1: + X_BITS_TYPES___LOCALE_T_H = 1 // __locale_t.h:21:1: + X_BITS_WCTYPE_WCHAR_H = 1 // wctype-wchar.h:24:1: + X_DEFAULT_SOURCE = 1 // features.h:227:1: + X_FEATURES_H = 1 // features.h:19:1: + X_FILE_OFFSET_BITS = 64 // :25:1: + X_LP64 = 1 // :284:1: + X_POSIX_C_SOURCE = 200809 // features.h:281:1: + X_POSIX_SOURCE = 1 // features.h:279:1: + X_STDC_PREDEF_H = 1 // :162:1: + X_SYS_CDEFS_H = 1 // cdefs.h:19:1: + X_WCTYPE_H = 1 // wctype.h:24:1: + X_WINT_T = 1 // wint_t.h:10:1: + Linux = 1 // :231:1: + Unix = 1 // :177:1: +) + +// The characteristics are stored always in network byte order (big +// endian). We define the bit value interpretations here dependent on the +// machine's byte order. + +// Endian macros for string.h functions +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definitions for byte order, according to significance of bytes, +// from low addresses to high addresses. The value is what you get by +// putting '4' in the most significant byte, '3' in the second most +// significant byte, '2' in the second least significant byte, and '1' +// in the least significant byte, and then writing down one digit for +// each byte, starting with the byte at the lowest address at the left, +// and proceeding to the byte with the highest address at the right. + +// This file defines `__BYTE_ORDER' for the particular machine. + +// i386/x86_64 are little-endian. + +// Some machines may need to use a different endianness for floating point +// values. + +const ( /* wctype-wchar.h:56:1: */ + X__ISwupper = 0 // UPPERCASE. + X__ISwlower = 1 // lowercase. + X__ISwalpha = 2 // Alphabetic. + X__ISwdigit = 3 // Numeric. + X__ISwxdigit = 4 // Hexadecimal numeric. + X__ISwspace = 5 // Whitespace. + X__ISwprint = 6 // Printing. + X__ISwgraph = 7 // Graphical. + X__ISwblank = 8 // Blank (usually SPC and TAB). + X__ISwcntrl = 9 // Control character. + X__ISwpunct = 10 // Punctuation. + X__ISwalnum = 11 // Alphanumeric. + + X_ISwupper = 16777216 // UPPERCASE. + X_ISwlower = 33554432 // lowercase. + X_ISwalpha = 67108864 // Alphabetic. + X_ISwdigit = 134217728 // Numeric. + X_ISwxdigit = 268435456 // Hexadecimal numeric. + X_ISwspace = 536870912 // Whitespace. + X_ISwprint = 1073741824 // Printing. + X_ISwgraph = -2147483648 // Graphical. + X_ISwblank = 65536 // Blank (usually SPC and TAB). + X_ISwcntrl = 131072 // Control character. + X_ISwpunct = 262144 // Punctuation. + X_ISwalnum = 524288 +) + +type Ptrdiff_t = int64 /* :3:26 */ + +type Size_t = uint64 /* :9:23 */ + +type Wchar_t = int32 /* :15:24 */ + +type X__int128_t = struct { + Flo int64 + Fhi int64 +} /* :21:43 */ // must match modernc.org/mathutil.Int128 +type X__uint128_t = struct { + Flo uint64 + Fhi uint64 +} /* :22:44 */ // must match modernc.org/mathutil.Int128 + +type X__builtin_va_list = uintptr /* :46:14 */ +type X__float128 = float64 /* :47:21 */ + +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.25 +// Wide character classification and mapping utilities + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// These are defined by the user (or the compiler) +// to specify the desired environment: +// +// __STRICT_ANSI__ ISO Standard C. +// _ISOC99_SOURCE Extensions to ISO C89 from ISO C99. +// _ISOC11_SOURCE Extensions to ISO C99 from ISO C11. +// _ISOC2X_SOURCE Extensions to ISO C99 from ISO C2X. +// __STDC_WANT_LIB_EXT2__ +// Extensions to ISO C99 from TR 27431-2:2010. +// __STDC_WANT_IEC_60559_BFP_EXT__ +// Extensions to ISO C11 from TS 18661-1:2014. +// __STDC_WANT_IEC_60559_FUNCS_EXT__ +// Extensions to ISO C11 from TS 18661-4:2015. +// __STDC_WANT_IEC_60559_TYPES_EXT__ +// Extensions to ISO C11 from TS 18661-3:2015. +// +// _POSIX_SOURCE IEEE Std 1003.1. +// _POSIX_C_SOURCE If ==1, like _POSIX_SOURCE; if >=2 add IEEE Std 1003.2; +// if >=199309L, add IEEE Std 1003.1b-1993; +// if >=199506L, add IEEE Std 1003.1c-1995; +// if >=200112L, all of IEEE 1003.1-2004 +// if >=200809L, all of IEEE 1003.1-2008 +// _XOPEN_SOURCE Includes POSIX and XPG things. Set to 500 if +// Single Unix conformance is wanted, to 600 for the +// sixth revision, to 700 for the seventh revision. +// _XOPEN_SOURCE_EXTENDED XPG things and X/Open Unix extensions. +// _LARGEFILE_SOURCE Some more functions for correct standard I/O. +// _LARGEFILE64_SOURCE Additional functionality from LFS for large files. +// _FILE_OFFSET_BITS=N Select default filesystem interface. +// _ATFILE_SOURCE Additional *at interfaces. +// _GNU_SOURCE All of the above, plus GNU extensions. +// _DEFAULT_SOURCE The default set of features (taking precedence over +// __STRICT_ANSI__). +// +// _FORTIFY_SOURCE Add security hardening to many library functions. +// Set to 1 or 2; 2 performs stricter checks than 1. +// +// _REENTRANT, _THREAD_SAFE +// Obsolete; equivalent to _POSIX_C_SOURCE=199506L. +// +// The `-ansi' switch to the GNU C compiler, and standards conformance +// options such as `-std=c99', define __STRICT_ANSI__. If none of +// these are defined, or if _DEFAULT_SOURCE is defined, the default is +// to have _POSIX_SOURCE set to one and _POSIX_C_SOURCE set to +// 200809L, as well as enabling miscellaneous functions from BSD and +// SVID. If more than one of these are defined, they accumulate. For +// example __STRICT_ANSI__, _POSIX_SOURCE and _POSIX_C_SOURCE together +// give you ISO C, 1003.1, and 1003.2, but nothing else. +// +// These are defined by this file and are used by the +// header files to decide what to declare or define: +// +// __GLIBC_USE (F) Define things from feature set F. This is defined +// to 1 or 0; the subsequent macros are either defined +// or undefined, and those tests should be moved to +// __GLIBC_USE. +// __USE_ISOC11 Define ISO C11 things. +// __USE_ISOC99 Define ISO C99 things. +// __USE_ISOC95 Define ISO C90 AMD1 (C95) things. +// __USE_ISOCXX11 Define ISO C++11 things. +// __USE_POSIX Define IEEE Std 1003.1 things. +// __USE_POSIX2 Define IEEE Std 1003.2 things. +// __USE_POSIX199309 Define IEEE Std 1003.1, and .1b things. +// __USE_POSIX199506 Define IEEE Std 1003.1, .1b, .1c and .1i things. +// __USE_XOPEN Define XPG things. +// __USE_XOPEN_EXTENDED Define X/Open Unix things. +// __USE_UNIX98 Define Single Unix V2 things. +// __USE_XOPEN2K Define XPG6 things. +// __USE_XOPEN2KXSI Define XPG6 XSI things. +// __USE_XOPEN2K8 Define XPG7 things. +// __USE_XOPEN2K8XSI Define XPG7 XSI things. +// __USE_LARGEFILE Define correct standard I/O things. +// __USE_LARGEFILE64 Define LFS things with separate names. +// __USE_FILE_OFFSET64 Define 64bit interface as default. +// __USE_MISC Define things from 4.3BSD or System V Unix. +// __USE_ATFILE Define *at interfaces and AT_* constants for them. +// __USE_GNU Define GNU extensions. +// __USE_FORTIFY_LEVEL Additional security measures used, according to level. +// +// The macros `__GNU_LIBRARY__', `__GLIBC__', and `__GLIBC_MINOR__' are +// defined by this file unconditionally. `__GNU_LIBRARY__' is provided +// only for compatibility. All new code should use the other symbols +// to test for features. +// +// All macros listed above as possibly being defined by this file are +// explicitly undefined if they are not explicitly defined. +// Feature-test macros that are not defined by the user or compiler +// but are implied by the other feature-test macros defined (or by the +// lack of any definitions) are defined by the file. +// +// ISO C feature test macros depend on the definition of the macro +// when an affected header is included, not when the first system +// header is included, and so they are handled in +// , which does not have a multiple include +// guard. Feature test macros that can be handled from the first +// system header included are handled here. + +// Undefine everything, so we get a clean slate. + +// Suppress kernel-name space pollution unless user expressedly asks +// for it. + +// Convenience macro to test the version of gcc. +// Use like this: +// #if __GNUC_PREREQ (2,8) +// ... code requiring gcc 2.8 or later ... +// #endif +// Note: only works for GCC 2.0 and later, because __GNUC_MINOR__ was +// added in 2.0. + +// Similarly for clang. Features added to GCC after version 4.2 may +// or may not also be available in clang, and clang's definitions of +// __GNUC(_MINOR)__ are fixed at 4 and 2 respectively. Not all such +// features can be queried via __has_extension/__has_feature. + +// Whether to use feature set F. + +// _BSD_SOURCE and _SVID_SOURCE are deprecated aliases for +// _DEFAULT_SOURCE. If _DEFAULT_SOURCE is present we do not +// issue a warning; the expectation is that the source is being +// transitioned to use the new macro. + +// If _GNU_SOURCE was defined by the user, turn on all the other features. + +// If nothing (other than _GNU_SOURCE and _DEFAULT_SOURCE) is defined, +// define _DEFAULT_SOURCE. + +// This is to enable the ISO C2X extension. + +// This is to enable the ISO C11 extension. + +// This is to enable the ISO C99 extension. + +// This is to enable the ISO C90 Amendment 1:1995 extension. + +// If none of the ANSI/POSIX macros are defined, or if _DEFAULT_SOURCE +// is defined, use POSIX.1-2008 (or another version depending on +// _XOPEN_SOURCE). + +// Some C libraries once required _REENTRANT and/or _THREAD_SAFE to be +// defined in all multithreaded code. GNU libc has not required this +// for many years. We now treat them as compatibility synonyms for +// _POSIX_C_SOURCE=199506L, which is the earliest level of POSIX with +// comprehensive support for multithreaded code. Using them never +// lowers the selected level of POSIX conformance, only raises it. + +// The function 'gets' existed in C89, but is impossible to use +// safely. It has been removed from ISO C11 and ISO C++14. Note: for +// compatibility with various implementations of , this test +// must consider only the value of __cplusplus when compiling C++. + +// GNU formerly extended the scanf functions with modified format +// specifiers %as, %aS, and %a[...] that allocate a buffer for the +// input using malloc. This extension conflicts with ISO C99, which +// defines %a as a standalone format specifier that reads a floating- +// point number; moreover, POSIX.1-2008 provides the same feature +// using the modifier letter 'm' instead (%ms, %mS, %m[...]). +// +// We now follow C99 unless GNU extensions are active and the compiler +// is specifically in C89 or C++98 mode (strict or not). For +// instance, with GCC, -std=gnu11 will have C99-compliant scanf with +// or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the +// old extension. + +// Get definitions of __STDC_* predefined macros, if the compiler has +// not preincluded this header automatically. +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// This macro indicates that the installed library is the GNU C Library. +// For historic reasons the value now is 6 and this will stay from now +// on. The use of this variable is deprecated. Use __GLIBC__ and +// __GLIBC_MINOR__ now (see below) when you want to test for a specific +// GNU C library version and use the values in to get +// the sonames of the shared libraries. + +// Major and minor version number of the GNU C library package. Use +// these macros to test for features in specific releases. + +// This is here only because every header file already includes this one. +// Copyright (C) 1992-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// We are almost always included from features.h. + +// The GNU libc does not support any K&R compilers or the traditional mode +// of ISO C compilers anymore. Check for some of the combinations not +// anymore supported. + +// Some user header file might have defined this before. + +// All functions, except those with callbacks or those that +// synchronize memory, are leaf functions. + +// GCC can always grok prototypes. For C++ programs we add throw() +// to help it optimize the function calls. But this works only with +// gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions +// as non-throwing using a function attribute since programs can use +// the -fexceptions options for C code as well. + +// Compilers that are not clang may object to +// #if defined __clang__ && __has_extension(...) +// even though they do not need to evaluate the right-hand side of the &&. + +// These two macros are not used in glibc anymore. They are kept here +// only because some other projects expect the macros to be defined. + +// For these things, GCC behaves the ANSI way normally, +// and the non-ANSI way under -traditional. + +// This is not a typedef so `const __ptr_t' does the right thing. + +// C++ needs to know that types and declarations are C, not C++. + +// Fortify support. + +// Support for flexible arrays. +// Headers that should use flexible arrays only if they're "real" +// (e.g. only if they won't affect sizeof()) should test +// #if __glibc_c99_flexarr_available. + +// __asm__ ("xyz") is used throughout the headers to rename functions +// at the assembly language level. This is wrapped by the __REDIRECT +// macro, in order to support compilers that can do this some other +// way. When compilers don't support asm-names at all, we have to do +// preprocessor tricks instead (which don't have exactly the right +// semantics, but it's the best we can do). +// +// Example: +// int __REDIRECT(setpgrp, (__pid_t pid, __pid_t pgrp), setpgid); + +// +// #elif __SOME_OTHER_COMPILER__ +// +// # define __REDIRECT(name, proto, alias) name proto; _Pragma("let " #name " = " #alias) + +// GCC has various useful declarations that can be made with the +// `__attribute__' syntax. All of the ways we use this do fine if +// they are omitted for compilers that don't understand it. + +// At some point during the gcc 2.96 development the `malloc' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Tell the compiler which arguments to an allocation function +// indicate the size of the allocation. + +// At some point during the gcc 2.96 development the `pure' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// This declaration tells the compiler that the value is constant. + +// At some point during the gcc 3.1 development the `used' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. + +// Since version 3.2, gcc allows marking deprecated functions. + +// Since version 4.5, gcc also allows one to specify the message printed +// when a deprecated function is used. clang claims to be gcc 4.2, but +// may also support this feature. + +// At some point during the gcc 2.8 development the `format_arg' attribute +// for functions was introduced. We don't want to use it unconditionally +// (although this would be possible) since it generates warnings. +// If several `format_arg' attributes are given for the same function, in +// gcc-3.0 and older, all but the last one are ignored. In newer gccs, +// all designated arguments are considered. + +// At some point during the gcc 2.97 development the `strfmon' format +// attribute for functions was introduced. We don't want to use it +// unconditionally (although this would be possible) since it +// generates warnings. + +// The nonull function attribute allows to mark pointer parameters which +// must not be NULL. + +// If fortification mode, we warn about unused results of certain +// function calls which can lead to problems. + +// Forces a function to be always inlined. +// The Linux kernel defines __always_inline in stddef.h (283d7573), and +// it conflicts with this definition. Therefore undefine it first to +// allow either header to be included first. + +// Associate error messages with the source location of the call site rather +// than with the source location inside the function. + +// GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 +// inline semantics, unless -fgnu89-inline is used. Using __GNUC_STDC_INLINE__ +// or __GNUC_GNU_INLINE is not a good enough check for gcc because gcc versions +// older than 4.3 may define these macros and still not guarantee GNU inlining +// semantics. +// +// clang++ identifies itself as gcc-4.2, but has support for GNU inlining +// semantics, that can be checked for by using the __GNUC_STDC_INLINE_ and +// __GNUC_GNU_INLINE__ macro definitions. + +// GCC 4.3 and above allow passing all anonymous arguments of an +// __extern_always_inline function to some other vararg function. + +// It is possible to compile containing GCC extensions even if GCC is +// run in pedantic mode if the uses are carefully marked using the +// `__extension__' keyword. But this is not generally available before +// version 2.8. + +// __restrict is known in EGCS 1.2 and above. + +// ISO C99 also allows to declare arrays as non-overlapping. The syntax is +// array_name[restrict] +// GCC 3.1 supports this. + +// Describes a char array whose address can safely be passed as the first +// argument to strncpy and strncat, as the char array is not necessarily +// a NUL-terminated string. + +// Undefine (also defined in libc-symbols.h). +// Copies attributes from the declaration or type referenced by +// the argument. + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Properties of long double type. ldbl-96 version. +// Copyright (C) 2016-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// long double is distinct from double, so there is nothing to +// define here. + +// __glibc_macro_warning (MESSAGE) issues warning MESSAGE. This is +// intended for use in preprocessor macros. +// +// Note: MESSAGE must be a _single_ string; concatenation of string +// literals is not supported. + +// Generic selection (ISO C11) is a C-only feature, available in GCC +// since version 4.9. Previous versions do not provide generic +// selection, even though they might set __STDC_VERSION__ to 201112L, +// when in -std=c11 mode. Thus, we must check for !defined __GNUC__ +// when testing __STDC_VERSION__ for generic selection support. +// On the other hand, Clang also defines __GNUC__, so a clang-specific +// check is required to enable the use of generic selection. + +// If we don't have __REDIRECT, prototypes will be missing if +// __USE_FILE_OFFSET64 but not __USE_LARGEFILE[64]. + +// Decide whether we can define 'extern inline' functions in headers. + +// This is here only because every header file already includes this one. +// Get the definitions of all the appropriate `__stub_FUNCTION' symbols. +// contains `#define __stub_FUNCTION' when FUNCTION is a stub +// that will always return failure (and set errno to ENOSYS). +// This file is automatically generated. +// This file selects the right generated file of `__stub_FUNCTION' macros +// based on the architecture being compiled for. + +// This file is automatically generated. +// It defines a symbol `__stub_FUNCTION' for each function +// in the C library which is a stub, meaning it will fail +// every time called, usually setting errno to ENOSYS. + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// Copyright (C) 1991-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Determine the wordsize from the preprocessor defines. + +// Both x86-64 and x32 use the 64-bit system call interface. +// Bit size of the time_t type at glibc build time, x86-64 and x32 case. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// For others, time size is word size. + +// Convenience types. +type X__u_char = uint8 /* types.h:31:23 */ +type X__u_short = uint16 /* types.h:32:28 */ +type X__u_int = uint32 /* types.h:33:22 */ +type X__u_long = uint64 /* types.h:34:27 */ + +// Fixed-size types, underlying types depend on word size and compiler. +type X__int8_t = int8 /* types.h:37:21 */ +type X__uint8_t = uint8 /* types.h:38:23 */ +type X__int16_t = int16 /* types.h:39:26 */ +type X__uint16_t = uint16 /* types.h:40:28 */ +type X__int32_t = int32 /* types.h:41:20 */ +type X__uint32_t = uint32 /* types.h:42:22 */ +type X__int64_t = int64 /* types.h:44:25 */ +type X__uint64_t = uint64 /* types.h:45:27 */ + +// Smallest types with at least a given width. +type X__int_least8_t = X__int8_t /* types.h:52:18 */ +type X__uint_least8_t = X__uint8_t /* types.h:53:19 */ +type X__int_least16_t = X__int16_t /* types.h:54:19 */ +type X__uint_least16_t = X__uint16_t /* types.h:55:20 */ +type X__int_least32_t = X__int32_t /* types.h:56:19 */ +type X__uint_least32_t = X__uint32_t /* types.h:57:20 */ +type X__int_least64_t = X__int64_t /* types.h:58:19 */ +type X__uint_least64_t = X__uint64_t /* types.h:59:20 */ + +// quad_t is also 64 bits. +type X__quad_t = int64 /* types.h:63:18 */ +type X__u_quad_t = uint64 /* types.h:64:27 */ + +// Largest integral types. +type X__intmax_t = int64 /* types.h:72:18 */ +type X__uintmax_t = uint64 /* types.h:73:27 */ + +// The machine-dependent file defines __*_T_TYPE +// macros for each of the OS types we define below. The definitions +// of those macros must use the following macros for underlying types. +// We define __S_TYPE and __U_TYPE for the signed and unsigned +// variants of each of the following integer types on this machine. +// +// 16 -- "natural" 16-bit type (always short) +// 32 -- "natural" 32-bit type (always int) +// 64 -- "natural" 64-bit type (long or long long) +// LONG32 -- 32-bit type, traditionally long +// QUAD -- 64-bit type, traditionally long long +// WORD -- natural type of __WORDSIZE bits (int or long) +// LONGWORD -- type of __WORDSIZE bits, traditionally long +// +// We distinguish WORD/LONGWORD, 32/LONG32, and 64/QUAD so that the +// conventional uses of `long' or `long long' type modifiers match the +// types we define, even when a less-adorned type would be the same size. +// This matters for (somewhat) portably writing printf/scanf formats for +// these types, where using the appropriate l or ll format modifiers can +// make the typedefs and the formats match up across all GNU platforms. If +// we used `long' when it's 64 bits where `long long' is expected, then the +// compiler would warn about the formats not matching the argument types, +// and the programmer changing them to shut up the compiler would break the +// program's portability. +// +// Here we assume what is presently the case in all the GCC configurations +// we support: long long is always 64 bits, long is always word/address size, +// and int is always 32 bits. + +// No need to mark the typedef with __extension__. +// bits/typesizes.h -- underlying types for *_t. Linux/x86-64 version. +// Copyright (C) 2012-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// See for the meaning of these macros. This file exists so +// that need not vary across different GNU platforms. + +// X32 kernel interface is 64-bit. + +// Tell the libc code that off_t and off64_t are actually the same type +// for all ABI purposes, even if possibly expressed as different base types +// for C type-checking purposes. + +// Same for ino_t and ino64_t. + +// And for __rlim_t and __rlim64_t. + +// And for fsblkcnt_t, fsblkcnt64_t, fsfilcnt_t and fsfilcnt64_t. + +// Number of descriptors that can fit in an `fd_set'. + +// bits/time64.h -- underlying types for __time64_t. Generic version. +// Copyright (C) 2018-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Define __TIME64_T_TYPE so that it is always a 64-bit type. + +// If we already have 64-bit time type then use it. + +type X__dev_t = uint64 /* types.h:145:25 */ // Type of device numbers. +type X__uid_t = uint32 /* types.h:146:25 */ // Type of user identifications. +type X__gid_t = uint32 /* types.h:147:25 */ // Type of group identifications. +type X__ino_t = uint64 /* types.h:148:25 */ // Type of file serial numbers. +type X__ino64_t = uint64 /* types.h:149:27 */ // Type of file serial numbers (LFS). +type X__mode_t = uint32 /* types.h:150:26 */ // Type of file attribute bitmasks. +type X__nlink_t = uint64 /* types.h:151:27 */ // Type of file link counts. +type X__off_t = int64 /* types.h:152:25 */ // Type of file sizes and offsets. +type X__off64_t = int64 /* types.h:153:27 */ // Type of file sizes and offsets (LFS). +type X__pid_t = int32 /* types.h:154:25 */ // Type of process identifications. +type X__fsid_t = struct{ F__val [2]int32 } /* types.h:155:26 */ // Type of file system IDs. +type X__clock_t = int64 /* types.h:156:27 */ // Type of CPU usage counts. +type X__rlim_t = uint64 /* types.h:157:26 */ // Type for resource measurement. +type X__rlim64_t = uint64 /* types.h:158:28 */ // Type for resource measurement (LFS). +type X__id_t = uint32 /* types.h:159:24 */ // General type for IDs. +type X__time_t = int64 /* types.h:160:26 */ // Seconds since the Epoch. +type X__useconds_t = uint32 /* types.h:161:30 */ // Count of microseconds. +type X__suseconds_t = int64 /* types.h:162:31 */ // Signed count of microseconds. + +type X__daddr_t = int32 /* types.h:164:27 */ // The type of a disk address. +type X__key_t = int32 /* types.h:165:25 */ // Type of an IPC key. + +// Clock ID used in clock and timer functions. +type X__clockid_t = int32 /* types.h:168:29 */ + +// Timer ID returned by `timer_create'. +type X__timer_t = uintptr /* types.h:171:12 */ + +// Type to represent block size. +type X__blksize_t = int64 /* types.h:174:29 */ + +// Types from the Large File Support interface. + +// Type to count number of disk blocks. +type X__blkcnt_t = int64 /* types.h:179:28 */ +type X__blkcnt64_t = int64 /* types.h:180:30 */ + +// Type to count file system blocks. +type X__fsblkcnt_t = uint64 /* types.h:183:30 */ +type X__fsblkcnt64_t = uint64 /* types.h:184:32 */ + +// Type to count file system nodes. +type X__fsfilcnt_t = uint64 /* types.h:187:30 */ +type X__fsfilcnt64_t = uint64 /* types.h:188:32 */ + +// Type of miscellaneous file system fields. +type X__fsword_t = int64 /* types.h:191:28 */ + +type X__ssize_t = int64 /* types.h:193:27 */ // Type of a byte count, or error. + +// Signed long type used in system calls. +type X__syscall_slong_t = int64 /* types.h:196:33 */ +// Unsigned long type used in system calls. +type X__syscall_ulong_t = uint64 /* types.h:198:33 */ + +// These few don't really vary by system, they always correspond +// +// to one of the other defined types. +type X__loff_t = X__off64_t /* types.h:202:19 */ // Type of file sizes and offsets (LFS). +type X__caddr_t = uintptr /* types.h:203:14 */ + +// Duplicates info from stdint.h but this is used in unistd.h. +type X__intptr_t = int64 /* types.h:206:25 */ + +// Duplicate info from sys/socket.h. +type X__socklen_t = uint32 /* types.h:209:23 */ + +// C99: An integer type that can be accessed as an atomic entity, +// +// even in the presence of asynchronous interrupts. +// It is not currently necessary for this to be machine-specific. +type X__sig_atomic_t = int32 /* types.h:214:13 */ + +// Seconds since the Epoch, visible to user code when time_t is too +// narrow only for consistency with the old way of widening too-narrow +// types. User code should never use __time64_t. + +// Some versions of stddef.h provide wint_t, even though neither the +// C nor C++ standards, nor POSIX, specifies this. We assume that +// stddef.h will define the macro _WINT_T if and only if it provides +// wint_t, and conversely, that it will avoid providing wint_t if +// _WINT_T is already defined. + +// Integral type unchanged by default argument promotions that can +// hold any value corresponding to members of the extended character +// set, as well as at least one value that does not correspond to any +// member of the extended character set. + +type Wint_t = uint32 /* wint_t.h:20:23 */ + +// Constant expression of type `wint_t' whose value does not correspond +// to any member of the extended character set. + +// Some definitions from this header also appear in in +// Unix98 mode. +// Copyright (C) 1996-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// ISO C99 Standard: 7.25 +// Wide character classification and mapping utilities + +// bits/types.h -- definitions of __*_t types underlying *_t types. +// Copyright (C) 2002-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Never include this file directly; use instead. + +// The definitions in this header are specified to appear in +// in ISO C99, but in in Unix98. _GNU_SOURCE follows C99. + +// Scalar type that can hold values which represent locale-specific +// +// character classifications. +type Wctype_t = uint64 /* wctype-wchar.h:38:27 */ + +// Extensible wide-character mapping functions: 7.15.3.2. + +// Scalar type that can hold values which represent locale-specific +// +// character mappings. +type Wctrans_t = uintptr /* wctype.h:48:25 */ + +// POSIX.1-2008 extended locale interface (see locale.h). +// Definition of locale_t. +// Copyright (C) 2017-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// Definition of struct __locale_struct and __locale_t. +// Copyright (C) 1997-2020 Free Software Foundation, Inc. +// This file is part of the GNU C Library. +// Contributed by Ulrich Drepper , 1997. +// +// The GNU C Library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// The GNU C Library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with the GNU C Library; if not, see +// . + +// POSIX.1-2008: the locale_t type, representing a locale context +// (implementation-namespace version). This type should be treated +// as opaque by applications; some details are exposed for the sake of +// efficiency in e.g. ctype functions. + +type X__locale_struct = struct { + F__locales [13]uintptr + F__ctype_b uintptr + F__ctype_tolower uintptr + F__ctype_toupper uintptr + F__names [13]uintptr +} /* __locale_t.h:28:1 */ + +type X__locale_t = uintptr /* __locale_t.h:42:32 */ + +type Locale_t = X__locale_t /* locale_t.h:24:20 */ + +var _ int8 /* gen.c:2:13: */ diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/memory/Makefile temporal-1.22.5/src/vendor/modernc.org/memory/Makefile --- temporal-1.21.5-1/src/vendor/modernc.org/memory/Makefile 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/memory/Makefile 2024-02-23 09:46:15.000000000 +0000 @@ -2,7 +2,7 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -.PHONY: all clean cover cpu editor internalError later mem nuke todo edit +.PHONY: all clean cover cpu editor internalError later mem nuke todo edit build_all_targets grep=--include=*.go --include=*.l --include=*.y --include=*.yy ngrep='TODOOK\|parser\.go\|scanner\.go\|.*_string\.go' @@ -34,6 +34,8 @@ editor: gofmt -l -s -w *.go + +build_all_targets: GOOS=darwin GOARCH=amd64 go build GOOS=darwin GOARCH=arm64 go build GOOS=freebsd GOARCH=386 go build @@ -45,6 +47,7 @@ GOOS=linux GOARCH=amd64 go build GOOS=linux GOARCH=arm go build GOOS=linux GOARCH=arm64 go build + GOOS=linux GOARCH=loong64 go build GOOS=linux GOARCH=mips go build GOOS=linux GOARCH=mips64le go build GOOS=linux GOARCH=mipsle go build diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/memory/memory64.go temporal-1.22.5/src/vendor/modernc.org/memory/memory64.go --- temporal-1.21.5-1/src/vendor/modernc.org/memory/memory64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/memory/memory64.go 2024-02-23 09:46:15.000000000 +0000 @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 || amd64p32 || arm64 || arm64be || mips64 || mips64le || mips64p32 || mips64p32le || ppc64 || ppc64le || sparc64 || riscv64 -// +build amd64 amd64p32 arm64 arm64be mips64 mips64le mips64p32 mips64p32le ppc64 ppc64le sparc64 riscv64 +//go:build amd64 || amd64p32 || arm64 || arm64be || mips64 || mips64le || mips64p32 || mips64p32le || ppc64 || ppc64le || sparc64 || riscv64 || loong64 +// +build amd64 amd64p32 arm64 arm64be mips64 mips64le mips64p32 mips64p32le ppc64 ppc64le sparc64 riscv64 loong64 package memory // import "modernc.org/memory" diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/memory/mmap_linux_64.go temporal-1.22.5/src/vendor/modernc.org/memory/mmap_linux_64.go --- temporal-1.21.5-1/src/vendor/modernc.org/memory/mmap_linux_64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/memory/mmap_linux_64.go 2024-02-23 09:46:15.000000000 +0000 @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE-GO file. -//go:build linux && (amd64 || arm64 || mips64 || mips64le || riscv64 || ppc64le) +//go:build linux && (amd64 || arm64 || mips64 || mips64le || riscv64 || ppc64le || loong64) // +build linux -// +build amd64 arm64 mips64 mips64le riscv64 ppc64le +// +build amd64 arm64 mips64 mips64le riscv64 ppc64le loong64 package memory diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/AUTHORS temporal-1.22.5/src/vendor/modernc.org/sqlite/AUTHORS --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/AUTHORS 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/AUTHORS 2024-02-23 09:46:16.000000000 +0000 @@ -12,12 +12,13 @@ Dan Peterson David Walton Davsk Ltd Co +FerretDB Inc. Jaap Aarts Jan Mercl <0xjnml@gmail.com> Josh Bleecher Snyder Logan Snow Michael Hoffmann +Michael Rykov Ross Light Saed SayedAhmed Steffen Butzer -Michael Rykov diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/CONTRIBUTORS temporal-1.22.5/src/vendor/modernc.org/sqlite/CONTRIBUTORS --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/CONTRIBUTORS 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/CONTRIBUTORS 2024-02-23 09:46:16.000000000 +0000 @@ -7,6 +7,7 @@ # Please keep the list sorted. Alexander Menzhinsky +Alexey Palazhchenko Artyom Pervukhin Dan Peterson David Skinner @@ -20,8 +21,9 @@ Logan Snow Matthew Gabeler-Lee Michael Hoffmann +Michael Rykov Ross Light Saed SayedAhmed +Sean McGivern Steffen Butzer Yaacov Akiba Slama -Michael Rykov diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/README.md temporal-1.22.5/src/vendor/modernc.org/sqlite/README.md --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/README.md 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/README.md 2024-02-23 09:46:16.000000000 +0000 @@ -21,3 +21,56 @@ ## Builders [modern-c.appspot.com/-/builder/?importpath=modernc.org%2fsqlite](https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fsqlite) + +## Speedtest1 + +Numbers for the pure Go version were produced by + + ~/src/modernc.org/sqlite/speedtest1$ go build && ./speedtest1 + +Numbers for the pure C version were produced by + + ~/src/modernc.org/sqlite/testdata/sqlite-src-3410200/test$ gcc speedtest1.c ../../sqlite-amalgamation-3410200/sqlite3.c -lpthread -ldl && ./a.out + +The results are from Go version 1.20.4 and GCC version 10.2.1 on a +Linux/amd64 machine, CPU: AMD Ryzen 9 3900X 12-Core Processor × 24, 128GB +RAM. Shown are the best of 3 runs. + + Go C + + -- Speedtest1 for SQLite 3.41.2 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69 -- Speedtest1 for SQLite 3.41.2 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69 + 100 - 50000 INSERTs into table with no index...................... 0.071s 100 - 50000 INSERTs into table with no index...................... 0.077s + 110 - 50000 ordered INSERTS with one index/PK..................... 0.114s 110 - 50000 ordered INSERTS with one index/PK..................... 0.082s + 120 - 50000 unordered INSERTS with one index/PK................... 0.137s 120 - 50000 unordered INSERTS with one index/PK................... 0.099s + 130 - 25 SELECTS, numeric BETWEEN, unindexed...................... 0.083s 130 - 25 SELECTS, numeric BETWEEN, unindexed...................... 0.091s + 140 - 10 SELECTS, LIKE, unindexed................................. 0.210s 140 - 10 SELECTS, LIKE, unindexed................................. 0.120s + 142 - 10 SELECTS w/ORDER BY, unindexed............................ 0.276s 142 - 10 SELECTS w/ORDER BY, unindexed............................ 0.182s + 145 - 10 SELECTS w/ORDER BY and LIMIT, unindexed.................. 0.183s 145 - 10 SELECTS w/ORDER BY and LIMIT, unindexed.................. 0.099s + 150 - CREATE INDEX five times..................................... 0.172s 150 - CREATE INDEX five times..................................... 0.127s + 160 - 10000 SELECTS, numeric BETWEEN, indexed..................... 0.080s 160 - 10000 SELECTS, numeric BETWEEN, indexed..................... 0.078s + 161 - 10000 SELECTS, numeric BETWEEN, PK.......................... 0.080s 161 - 10000 SELECTS, numeric BETWEEN, PK.......................... 0.078s + 170 - 10000 SELECTS, text BETWEEN, indexed........................ 0.187s 170 - 10000 SELECTS, text BETWEEN, indexed........................ 0.169s + 180 - 50000 INSERTS with three indexes............................ 0.196s 180 - 50000 INSERTS with three indexes............................ 0.154s + 190 - DELETE and REFILL one table................................. 0.200s 190 - DELETE and REFILL one table................................. 0.155s + 200 - VACUUM...................................................... 0.180s 200 - VACUUM...................................................... 0.142s + 210 - ALTER TABLE ADD COLUMN, and query........................... 0.004s 210 - ALTER TABLE ADD COLUMN, and query........................... 0.005s + 230 - 10000 UPDATES, numeric BETWEEN, indexed..................... 0.093s 230 - 10000 UPDATES, numeric BETWEEN, indexed..................... 0.080s + 240 - 50000 UPDATES of individual rows............................ 0.153s 240 - 50000 UPDATES of individual rows............................ 0.137s + 250 - One big UPDATE of the whole 50000-row table................. 0.024s 250 - One big UPDATE of the whole 50000-row table................. 0.019s + 260 - Query added column after filling............................ 0.004s 260 - Query added column after filling............................ 0.005s + 270 - 10000 DELETEs, numeric BETWEEN, indexed..................... 0.278s 270 - 10000 DELETEs, numeric BETWEEN, indexed..................... 0.263s + 280 - 50000 DELETEs of individual rows............................ 0.188s 280 - 50000 DELETEs of individual rows............................ 0.180s + 290 - Refill two 50000-row tables using REPLACE................... 0.411s 290 - Refill two 50000-row tables using REPLACE................... 0.359s + 300 - Refill a 50000-row table using (b&1)==(a&1)................. 0.175s 300 - Refill a 50000-row table using (b&1)==(a&1)................. 0.151s + 310 - 10000 four-ways joins....................................... 0.427s 310 - 10000 four-ways joins....................................... 0.365s + 320 - subquery in result set...................................... 0.440s 320 - subquery in result set...................................... 0.521s + 400 - 70000 REPLACE ops on an IPK................................. 0.125s 400 - 70000 REPLACE ops on an IPK................................. 0.106s + 410 - 70000 SELECTS on an IPK..................................... 0.081s 410 - 70000 SELECTS on an IPK..................................... 0.078s + 500 - 70000 REPLACE on TEXT PK.................................... 0.174s 500 - 70000 REPLACE on TEXT PK.................................... 0.116s + 510 - 70000 SELECTS on a TEXT PK.................................. 0.153s 510 - 70000 SELECTS on a TEXT PK.................................. 0.117s + 520 - 70000 SELECT DISTINCT....................................... 0.083s 520 - 70000 SELECT DISTINCT....................................... 0.067s + 980 - PRAGMA integrity_check...................................... 0.436s 980 - PRAGMA integrity_check...................................... 0.377s + 990 - ANALYZE..................................................... 0.107s 990 - ANALYZE..................................................... 0.038s + TOTAL....................................................... 5.525s TOTAL....................................................... 4.637s + +This particular test executes 16.1% faster in the C version. diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/dmesg.go temporal-1.22.5/src/vendor/modernc.org/sqlite/dmesg.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/dmesg.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/dmesg.go 2024-02-23 09:46:16.000000000 +0000 @@ -0,0 +1,56 @@ +// Copyright 2023 The Sqlite Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build sqlite.dmesg +// +build sqlite.dmesg + +package sqlite // import "modernc.org/sqlite" + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" +) + +const dmesgs = true + +var ( + pid = fmt.Sprintf("[%v %v] ", os.Getpid(), filepath.Base(os.Args[0])) + logf *os.File +) + +func init() { + t := time.Now() + // 01/02 03:04:05PM '06 -0700 + dn := t.Format("sqlite-dmesg-2006-01-02-03-150405") + dn = filepath.Join(os.TempDir(), fmt.Sprintf("%s.%d", dn, os.Getpid())) + if err := os.Mkdir(dn, 0770); err != nil { + panic(err.Error()) + } + + fn := filepath.Join(dn, "dmesg.log") + var err error + if logf, err = os.OpenFile(fn, os.O_APPEND|os.O_CREATE|os.O_WRONLY|os.O_SYNC, 0644); err != nil { + panic(err.Error()) + } + + dmesg("%v", time.Now()) + fmt.Fprintf(os.Stderr, "debug messages in %s\n", fn) +} + +func dmesg(s string, args ...interface{}) { + if s == "" { + s = strings.Repeat("%v ", len(args)) + } + s = fmt.Sprintf(pid+s, args...) + s += fmt.Sprintf(" (%v: %v:)", origin(3), origin(2)) + switch { + case len(s) != 0 && s[len(s)-1] == '\n': + fmt.Fprint(logf, s) + default: + fmt.Fprintln(logf, s) + } +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/doc.go temporal-1.22.5/src/vendor/modernc.org/sqlite/doc.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/doc.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/doc.go 2024-02-23 09:46:16.000000000 +0000 @@ -19,27 +19,89 @@ // // OS Arch SQLite version // ------------------------------ -// darwin amd64 3.41.0 -// darwin arm64 3.41.0 -// freebsd amd64 3.41.0 -// freebsd arm64 3.41.0 -// linux 386 3.41.0 -// linux amd64 3.41.0 -// linux arm 3.41.0 -// linux arm64 3.41.0 -// linux ppc64le 3.41.0 -// linux riscv64 3.41.0 -// windows amd64 3.41.0 -// windows arm64 3.41.0 +// darwin amd64 3.41.2 +// darwin arm64 3.41.2 +// freebsd amd64 3.41.2 +// freebsd arm64 3.41.2 +// linux 386 3.41.2 +// linux amd64 3.41.2 +// linux arm 3.41.2 +// linux arm64 3.41.2 +// linux ppc64le 3.41.2 +// linux riscv64 3.41.2 +// linux s390x 3.41.2 +// windows amd64 3.41.2 +// windows arm64 3.41.2 // // Builders // -// Builder results available at +// Builder results available at: // // https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fsqlite // +// Speedtest1 +// +// Numbers for the pure Go version were produced by +// +// ~/src/modernc.org/sqlite/speedtest1$ go build && ./speedtest1 +// +// Numbers for the pure C version were produced by +// +// ~/src/modernc.org/sqlite/testdata/sqlite-src-3410200/test$ gcc speedtest1.c ../../sqlite-amalgamation-3410200/sqlite3.c -lpthread -ldl && ./a.out +// +// The results are from Go version 1.20.4 and GCC version 10.2.1 on a +// Linux/amd64 machine, CPU: AMD Ryzen 9 3900X 12-Core Processor × 24, 128GB +// RAM. Shown are the best of 3 runs. +// +// Go C +// +// -- Speedtest1 for SQLite 3.41.2 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69 -- Speedtest1 for SQLite 3.41.2 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69 +// 100 - 50000 INSERTs into table with no index...................... 0.071s 100 - 50000 INSERTs into table with no index...................... 0.077s +// 110 - 50000 ordered INSERTS with one index/PK..................... 0.114s 110 - 50000 ordered INSERTS with one index/PK..................... 0.082s +// 120 - 50000 unordered INSERTS with one index/PK................... 0.137s 120 - 50000 unordered INSERTS with one index/PK................... 0.099s +// 130 - 25 SELECTS, numeric BETWEEN, unindexed...................... 0.083s 130 - 25 SELECTS, numeric BETWEEN, unindexed...................... 0.091s +// 140 - 10 SELECTS, LIKE, unindexed................................. 0.210s 140 - 10 SELECTS, LIKE, unindexed................................. 0.120s +// 142 - 10 SELECTS w/ORDER BY, unindexed............................ 0.276s 142 - 10 SELECTS w/ORDER BY, unindexed............................ 0.182s +// 145 - 10 SELECTS w/ORDER BY and LIMIT, unindexed.................. 0.183s 145 - 10 SELECTS w/ORDER BY and LIMIT, unindexed.................. 0.099s +// 150 - CREATE INDEX five times..................................... 0.172s 150 - CREATE INDEX five times..................................... 0.127s +// 160 - 10000 SELECTS, numeric BETWEEN, indexed..................... 0.080s 160 - 10000 SELECTS, numeric BETWEEN, indexed..................... 0.078s +// 161 - 10000 SELECTS, numeric BETWEEN, PK.......................... 0.080s 161 - 10000 SELECTS, numeric BETWEEN, PK.......................... 0.078s +// 170 - 10000 SELECTS, text BETWEEN, indexed........................ 0.187s 170 - 10000 SELECTS, text BETWEEN, indexed........................ 0.169s +// 180 - 50000 INSERTS with three indexes............................ 0.196s 180 - 50000 INSERTS with three indexes............................ 0.154s +// 190 - DELETE and REFILL one table................................. 0.200s 190 - DELETE and REFILL one table................................. 0.155s +// 200 - VACUUM...................................................... 0.180s 200 - VACUUM...................................................... 0.142s +// 210 - ALTER TABLE ADD COLUMN, and query........................... 0.004s 210 - ALTER TABLE ADD COLUMN, and query........................... 0.005s +// 230 - 10000 UPDATES, numeric BETWEEN, indexed..................... 0.093s 230 - 10000 UPDATES, numeric BETWEEN, indexed..................... 0.080s +// 240 - 50000 UPDATES of individual rows............................ 0.153s 240 - 50000 UPDATES of individual rows............................ 0.137s +// 250 - One big UPDATE of the whole 50000-row table................. 0.024s 250 - One big UPDATE of the whole 50000-row table................. 0.019s +// 260 - Query added column after filling............................ 0.004s 260 - Query added column after filling............................ 0.005s +// 270 - 10000 DELETEs, numeric BETWEEN, indexed..................... 0.278s 270 - 10000 DELETEs, numeric BETWEEN, indexed..................... 0.263s +// 280 - 50000 DELETEs of individual rows............................ 0.188s 280 - 50000 DELETEs of individual rows............................ 0.180s +// 290 - Refill two 50000-row tables using REPLACE................... 0.411s 290 - Refill two 50000-row tables using REPLACE................... 0.359s +// 300 - Refill a 50000-row table using (b&1)==(a&1)................. 0.175s 300 - Refill a 50000-row table using (b&1)==(a&1)................. 0.151s +// 310 - 10000 four-ways joins....................................... 0.427s 310 - 10000 four-ways joins....................................... 0.365s +// 320 - subquery in result set...................................... 0.440s 320 - subquery in result set...................................... 0.521s +// 400 - 70000 REPLACE ops on an IPK................................. 0.125s 400 - 70000 REPLACE ops on an IPK................................. 0.106s +// 410 - 70000 SELECTS on an IPK..................................... 0.081s 410 - 70000 SELECTS on an IPK..................................... 0.078s +// 500 - 70000 REPLACE on TEXT PK.................................... 0.174s 500 - 70000 REPLACE on TEXT PK.................................... 0.116s +// 510 - 70000 SELECTS on a TEXT PK.................................. 0.153s 510 - 70000 SELECTS on a TEXT PK.................................. 0.117s +// 520 - 70000 SELECT DISTINCT....................................... 0.083s 520 - 70000 SELECT DISTINCT....................................... 0.067s +// 980 - PRAGMA integrity_check...................................... 0.436s 980 - PRAGMA integrity_check...................................... 0.377s +// 990 - ANALYZE..................................................... 0.107s 990 - ANALYZE..................................................... 0.038s +// TOTAL....................................................... 5.525s TOTAL....................................................... 4.637s +// +// This particular test executes 16.1% faster in the C version. +// // Changelog // +// 2023-06-01 v1.23.0: +// +// Allow registering aggregate functions +// +// 2023-04-22 v1.22.0: +// +// Support linux/s390x. +// // 2023-02-23 v1.21.0: // // Upgrade to SQLite 3.41.0, release notes at https://sqlite.org/releaselog/3_41_0.html. @@ -201,6 +263,122 @@ // // Note: To run `go generate` you need to have modernc.org/ccgo/v3 installed. // +// Hacking +// +// This is an example of how to use the debug logs in modernc.org/libc when hunting a bug. +// +// +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ git status +// On branch master +// Your branch is up to date with 'origin/master'. +// +// nothing to commit, working tree clean +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ git log -1 +// commit df33b8d15107f3cc777799c0fe105f74ef499e62 (HEAD -> master, tag: v1.21.1, origin/master, origin/HEAD, wips, ok) +// Author: Jan Mercl <0xjnml@gmail.com> +// Date: Mon Mar 27 16:18:28 2023 +0200 +// +// upgrade to SQLite 3.41.2 +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ rm -f /tmp/libc.log ; go test -v -tags=libc.dmesg -run TestScalar ; ls -l /tmp/libc.log +// test binary compiled for linux/amd64 +// === RUN TestScalar +// --- PASS: TestScalar (0.09s) +// PASS +// ok modernc.org/sqlite 0.128s +// -rw-r--r-- 1 jnml jnml 76 Apr 6 11:22 /tmp/libc.log +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ cat /tmp/libc.log +// [10723 sqlite.test] 2023-04-06 11:22:48.288066057 +0200 CEST m=+0.000707150 +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ +// +// +// The /tmp/libc.log file is created as requested. No useful messages there because none are enabled in libc. Let's try to enable Xwrite as an example. +// +// +// 0:jnml@e5-1650:~/src/modernc.org/libc$ git status +// On branch master +// Your branch is up to date with 'origin/master'. +// +// Changes not staged for commit: +// (use "git add ..." to update what will be committed) +// (use "git restore ..." to discard changes in working directory) +// modified: libc_linux.go +// +// no changes added to commit (use "git add" and/or "git commit -a") +// 0:jnml@e5-1650:~/src/modernc.org/libc$ git log -1 +// commit 1e22c18cf2de8aa86d5b19b165f354f99c70479c (HEAD -> master, tag: v1.22.3, origin/master, origin/HEAD) +// Author: Jan Mercl <0xjnml@gmail.com> +// Date: Wed Feb 22 20:27:45 2023 +0100 +// +// support sqlite 3.41 on linux targets +// 0:jnml@e5-1650:~/src/modernc.org/libc$ git diff +// diff --git a/libc_linux.go b/libc_linux.go +// index 1c2f482..ac1f08d 100644 +// --- a/libc_linux.go +// +++ b/libc_linux.go +// @@ -332,19 +332,19 @@ func Xwrite(t *TLS, fd int32, buf uintptr, count types.Size_t) types.Ssize_t { +// var n uintptr +// switch n, _, err = unix.Syscall(unix.SYS_WRITE, uintptr(fd), buf, uintptr(count)); err { +// case 0: +// - // if dmesgs { +// - // // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) +// - // dmesg("%v: %d %#x: %#x", origin(1), fd, count, n) +// - // } +// + if dmesgs { +// + // dmesg("%v: %d %#x: %#x\n%s", origin(1), fd, count, n, hex.Dump(GoBytes(buf, int(n)))) +// + dmesg("%v: %d %#x: %#x", origin(1), fd, count, n) +// + } +// return types.Ssize_t(n) +// case errno.EAGAIN: +// // nop +// } +// } +// +// - // if dmesgs { +// - // dmesg("%v: fd %v, count %#x: %v", origin(1), fd, count, err) +// - // } +// + if dmesgs { +// + dmesg("%v: fd %v, count %#x: %v", origin(1), fd, count, err) +// + } +// t.setErrno(err) +// return -1 +// } +// 0:jnml@e5-1650:~/src/modernc.org/libc$ +// +// +// We need to tell the Go build system to use our local, patched/debug libc: +// +// +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ go work use $(go env GOPATH)/src/modernc.org/libc +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ go work use . +// +// +// And run the test again: +// +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ rm -f /tmp/libc.log ; go test -v -tags=libc.dmesg -run TestScalar ; ls -l /tmp/libc.log +// test binary compiled for linux/amd64 +// === RUN TestScalar +// --- PASS: TestScalar (0.26s) +// PASS +// ok modernc.org/sqlite 0.285s +// -rw-r--r-- 1 jnml jnml 918 Apr 6 11:29 /tmp/libc.log +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ cat /tmp/libc.log +// [11910 sqlite.test] 2023-04-06 11:29:13.143589542 +0200 CEST m=+0.000689270 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x200: 0x200 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0xc: 0xc +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 7 0x1000: 0x1000 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 7 0x1000: 0x1000 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x200: 0x200 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x4: 0x4 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x1000: 0x1000 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x4: 0x4 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x4: 0x4 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x1000: 0x1000 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0x4: 0x4 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 8 0xc: 0xc +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 7 0x1000: 0x1000 +// [11910 sqlite.test] libc_linux.go:337:Xwrite: 7 0x1000: 0x1000 +// 0:jnml@e5-1650:~/src/modernc.org/sqlite$ +// // Sqlite documentation // // See https://sqlite.org/docs.html diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/generator.go temporal-1.22.5/src/vendor/modernc.org/sqlite/generator.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/generator.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/generator.go 2024-02-23 09:46:16.000000000 +0000 @@ -269,12 +269,12 @@ sz int dev bool }{ - {sqliteDir, "https://www.sqlite.org/2023/sqlite-amalgamation-3410000.zip", 2457, false}, - {sqliteSrcDir, "https://www.sqlite.org/2023/sqlite-src-3410000.zip", 12814, false}, + {sqliteDir, "https://www.sqlite.org/2023/sqlite-amalgamation-3410200.zip", 2457, false}, + {sqliteSrcDir, "https://www.sqlite.org/2023/sqlite-src-3410200.zip", 12814, false}, } - sqliteDir = filepath.FromSlash("testdata/sqlite-amalgamation-3410000") - sqliteSrcDir = filepath.FromSlash("testdata/sqlite-src-3410000") + sqliteDir = filepath.FromSlash("testdata/sqlite-amalgamation-3410200") + sqliteSrcDir = filepath.FromSlash("testdata/sqlite-src-3410200") ) func download() { diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/issue120.diff temporal-1.22.5/src/vendor/modernc.org/sqlite/issue120.diff --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/issue120.diff 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/issue120.diff 2024-02-23 09:46:16.000000000 +0000 @@ -0,0 +1,78 @@ +--- /home/jnml/tmp/test_syscall.c 2023-04-21 16:26:44.302689709 +0200 ++++ testdata/sqlite-src-3410200/src/test_syscall.c 2023-04-21 16:29:28.000869993 +0200 +@@ -110,15 +110,15 @@ + static int ts_fstat(int fd, struct stat *p); + static int ts_ftruncate(int fd, off_t n); + static int ts_fcntl(int fd, int cmd, ... ); +-static int ts_read(int fd, void *aBuf, size_t nBuf); +-static int ts_pread(int fd, void *aBuf, size_t nBuf, off_t off); ++static ssize_t ts_read(int fd, void *aBuf, size_t nBuf); ++static ssize_t ts_pread(int fd, void *aBuf, size_t nBuf, off_t off); + /* Note: pread64() and pwrite64() actually use off64_t as the type on their + ** last parameter. But that datatype is not defined on many systems + ** (ex: Mac, OpenBSD). So substitute a likely equivalent: sqlite3_uint64 */ +-static int ts_pread64(int fd, void *aBuf, size_t nBuf, sqlite3_uint64 off); +-static int ts_write(int fd, const void *aBuf, size_t nBuf); +-static int ts_pwrite(int fd, const void *aBuf, size_t nBuf, off_t off); +-static int ts_pwrite64(int fd, const void *aBuf, size_t nBuf, sqlite3_uint64 off); ++static ssize_t ts_pread64(int fd, void *aBuf, size_t nBuf, sqlite3_uint64 off); ++static ssize_t ts_write(int fd, const void *aBuf, size_t nBuf); ++static ssize_t ts_pwrite(int fd, const void *aBuf, size_t nBuf, off_t off); ++static ssize_t ts_pwrite64(int fd, const void *aBuf, size_t nBuf, sqlite3_uint64 off); + static int ts_fchmod(int fd, mode_t mode); + static int ts_fallocate(int fd, off_t off, off_t len); + static void *ts_mmap(void *, size_t, int, int, int, off_t); +@@ -313,7 +313,7 @@ + /* + ** A wrapper around read(). + */ +-static int ts_read(int fd, void *aBuf, size_t nBuf){ ++static ssize_t ts_read(int fd, void *aBuf, size_t nBuf){ + if( tsIsFailErrno("read") ){ + return -1; + } +@@ -323,7 +323,7 @@ + /* + ** A wrapper around pread(). + */ +-static int ts_pread(int fd, void *aBuf, size_t nBuf, off_t off){ ++static ssize_t ts_pread(int fd, void *aBuf, size_t nBuf, off_t off){ + if( tsIsFailErrno("pread") ){ + return -1; + } +@@ -333,7 +333,7 @@ + /* + ** A wrapper around pread64(). + */ +-static int ts_pread64(int fd, void *aBuf, size_t nBuf, sqlite3_uint64 off){ ++static ssize_t ts_pread64(int fd, void *aBuf, size_t nBuf, sqlite3_uint64 off){ + if( tsIsFailErrno("pread64") ){ + return -1; + } +@@ -343,7 +343,7 @@ + /* + ** A wrapper around write(). + */ +-static int ts_write(int fd, const void *aBuf, size_t nBuf){ ++static ssize_t ts_write(int fd, const void *aBuf, size_t nBuf){ + if( tsIsFailErrno("write") ){ + if( tsErrno("write")==EINTR ) orig_write(fd, aBuf, nBuf/2); + return -1; +@@ -354,7 +354,7 @@ + /* + ** A wrapper around pwrite(). + */ +-static int ts_pwrite(int fd, const void *aBuf, size_t nBuf, off_t off){ ++static ssize_t ts_pwrite(int fd, const void *aBuf, size_t nBuf, off_t off){ + if( tsIsFailErrno("pwrite") ){ + return -1; + } +@@ -364,7 +364,7 @@ + /* + ** A wrapper around pwrite64(). + */ +-static int ts_pwrite64(int fd, const void *aBuf, size_t nBuf, sqlite3_uint64 off){ ++static ssize_t ts_pwrite64(int fd, const void *aBuf, size_t nBuf, sqlite3_uint64 off){ + if( tsIsFailErrno("pwrite64") ){ + return -1; + } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_darwin_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_darwin_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_darwin_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_darwin_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_darwin_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_darwin_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_darwin_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_darwin_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_386.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_386.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_386.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_386.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_386.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_freebsd_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_386.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_386.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_386.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_386.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_386.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_arm.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_arm.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_arm.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_ppc64le.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_ppc64le.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_ppc64le.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_ppc64le.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_ppc64le.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_riscv64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_riscv64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_riscv64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_riscv64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_riscv64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_s390x.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_s390x.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_linux_s390x.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_linux_s390x.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_s390x.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_s390x.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_openbsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_openbsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_openbsd_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_openbsd_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_openbsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_openbsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_openbsd_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_openbsd_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_windows_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_windows_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_windows_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_windows_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_windows_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_windows_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/capi_windows_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/capi_windows_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines -export-enums -export-externs X -export-fields F -export-typedefs -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. package sqlite3 diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/mutex.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/mutex.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/mutex.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/mutex.go 2024-02-23 09:46:16.000000000 +0000 @@ -92,13 +92,17 @@ if p == 0 { return nil } + ix := p - 1 + + mutexes.Lock() + defer mutexes.Unlock() + return &mutexes.a[ix>>8][ix&255] } func (m *mutexPool) alloc(recursive bool) uintptr { m.Lock() - defer m.Unlock() n := len(m.freeList) @@ -124,8 +128,8 @@ ptr := mutexFromPtr(p) ix := ptr.poolIndex *ptr = mutex{} - m.Lock() + m.Lock() defer m.Unlock() m.freeList = append(m.freeList, ix) diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. package sqlite3 @@ -1203,11 +1203,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NFSV2_MAX_FH_SIZE = 32 NFSV3_MAX_FH_SIZE = 64 @@ -2537,7 +2537,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2645,8 +2645,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -8736,7 +8736,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -9378,17 +9379,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -9609,14 +9611,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -10423,7 +10425,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -19809,7 +19811,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -20354,7 +20356,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -21121,7 +21123,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -21138,14 +21140,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3661, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3661, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -21165,7 +21167,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -21233,7 +21235,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -21467,7 +21469,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -21495,7 +21497,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -21594,7 +21596,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -21724,7 +21726,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -21770,7 +21772,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -22009,7 +22011,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -22236,7 +22238,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -22253,7 +22255,7 @@ rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { Xsqlite3_free(tls, (*UnixFile)(unsafe.Pointer(pNew)).FlockingContext) - robust_close(tls, pNew, h, 42698) + robust_close(tls, pNew, h, 42702) h = -1 } unixLeaveMutex(tls) @@ -22275,7 +22277,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -22595,7 +22597,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -22639,7 +22641,7 @@ goto __26 } storeLastErrno(tls, p, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - robust_close(tls, p, fd, 43252) + robust_close(tls, p, fd, 43256) return SQLITE_IOERR | int32(13)<<8 __26: ; @@ -22747,7 +22749,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -22755,9 +22757,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3860, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3860, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -22821,18 +22823,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&S_IFMT == S_IFLNK { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+144, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 144 + uintptr(got))) = int8(0) @@ -22872,14 +22874,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -22954,7 +22956,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -23168,7 +23170,7 @@ __12: return SQLITE_IOERR | int32(15)<<8 __13: - return Xsqlite3CantopenError(tls, 44048) + return Xsqlite3CantopenError(tls, 44052) __10: ; __8: @@ -23199,7 +23201,7 @@ __15: ; end_create_proxy: - robust_close(tls, pNew, fd, 44072) + robust_close(tls, pNew, fd, 44076) Xsqlite3_free(tls, pNew) Xsqlite3_free(tls, pUnused) return rc @@ -23288,7 +23290,7 @@ ; rc = 0 libc.Xfprintf(tls, libc.X__stderrp, ts+4066, libc.VaList(bp+40, cPath)) - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(conchFile)).Fh, 44175) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(conchFile)).Fh, 44179) (*UnixFile)(unsafe.Pointer(conchFile)).Fh = fd (*UnixFile)(unsafe.Pointer(conchFile)).FopenFlags = O_RDWR | O_CREAT @@ -23300,7 +23302,7 @@ goto __7 } (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 16*24 + 8)))(tls, bp+64) - robust_close(tls, pFile, fd, 44183) + robust_close(tls, pFile, fd, 44187) __7: ; libc.Xfprintf(tls, libc.X__stderrp, ts+4090, libc.VaList(bp+48, cPath, bp+1088)) @@ -23580,7 +23582,7 @@ if !((*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0) { goto __29 } - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 44436) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 44440) __29: ; (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 @@ -23592,7 +23594,7 @@ (*UnixFile)(unsafe.Pointer(pFile)).Fh = fd goto __31 __30: - rc = Xsqlite3CantopenError(tls, 44444) + rc = Xsqlite3CantopenError(tls, 44448) __31: ; @@ -25377,7 +25379,7 @@ libc.X__builtin___memset_chk(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32), libc.X__builtin_object_size(tls, pPgHdr+32, 0)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.X__builtin___memset_chk(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8), libc.X__builtin_object_size(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -25407,7 +25409,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -25458,7 +25460,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -25562,8 +25564,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -25641,13 +25643,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -27940,7 +27942,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -28373,7 +28375,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -28525,9 +28527,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -28859,7 +28861,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -29009,7 +29011,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -29390,7 +29392,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -29496,7 +29498,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -29514,7 +29516,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -29553,7 +29555,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -29630,7 +29632,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -30388,7 +30390,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -30631,9 +30633,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -31389,7 +31391,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -31488,7 +31490,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -32074,7 +32076,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -32349,7 +32351,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -32822,7 +32824,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -33327,7 +33329,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -33985,7 +33987,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -34122,7 +34124,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -34139,7 +34141,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -34147,7 +34149,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -34190,7 +34192,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -34200,7 +34202,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -34450,7 +34452,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -34497,7 +34499,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -34507,7 +34509,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -34520,7 +34522,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -34529,14 +34531,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.X__builtin___memmove_chk(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz)), libc.X__builtin_object_size(tls, data+uintptr(iFree+sz+sz2), 0)) @@ -34546,7 +34548,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -34610,7 +34612,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -34620,7 +34622,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -34642,7 +34644,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -34677,7 +34679,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -34690,13 +34692,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -34721,7 +34723,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -34732,7 +34734,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -34784,22 +34786,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -34809,7 +34811,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -34817,7 +34819,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -34825,10 +34827,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -34888,7 +34890,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -34924,7 +34926,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -34954,11 +34956,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -34969,15 +34971,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -35005,14 +35007,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -35026,7 +35028,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -35038,7 +35040,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -35141,7 +35143,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -35169,7 +35171,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -35208,7 +35210,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -36091,7 +36093,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -36506,7 +36508,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -36532,7 +36534,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -36541,7 +36543,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -36552,7 +36554,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -36568,7 +36570,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -36629,7 +36631,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -36664,7 +36666,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -36724,7 +36726,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -36763,7 +36765,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -36794,7 +36796,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -37135,7 +37137,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -37379,14 +37381,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -37431,7 +37433,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -37480,7 +37482,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -37560,7 +37562,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -37651,7 +37653,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -37671,7 +37673,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -37881,7 +37883,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -38085,7 +38087,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -38150,7 +38152,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -38198,7 +38200,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -38317,7 +38319,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -38477,7 +38479,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -38542,7 +38544,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -38578,7 +38580,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -38622,7 +38624,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -38734,7 +38736,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -38892,7 +38894,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -38949,7 +38951,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -38965,7 +38967,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -39039,7 +39041,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -39051,7 +39053,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -39062,7 +39064,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -39227,7 +39229,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -39506,12 +39508,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -39519,7 +39521,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.X__builtin___memmove_chk(tls, pData, pCell, uint64(sz), libc.X__builtin_object_size(tls, pData, 0)) @@ -39579,7 +39581,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.X__builtin___memmove_chk(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz), libc.X__builtin_object_size(tls, pSlot, 0)) @@ -39668,7 +39670,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.X__builtin___memmove_chk(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2), libc.X__builtin_object_size(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, 0)) @@ -39784,7 +39786,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -40104,7 +40106,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -40115,7 +40117,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -40273,7 +40275,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -40347,7 +40349,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -40410,7 +40412,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -40438,7 +40440,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -40699,7 +40701,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -40889,7 +40891,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -40927,7 +40929,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -41033,7 +41035,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -41058,7 +41060,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -41128,7 +41130,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -41241,7 +41243,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -41301,6 +41303,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -41308,7 +41311,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -41346,13 +41349,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.X__builtin___memcpy_chk(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124))), libc.X__builtin_object_size(tls, oldCell, 0)) @@ -41383,7 +41386,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -41458,7 +41460,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -41483,7 +41485,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -41584,7 +41586,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -41592,11 +41594,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -41671,7 +41673,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -41740,7 +41742,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -41769,7 +41771,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -41845,7 +41847,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -41859,7 +41861,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -41993,7 +41995,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -44462,7 +44464,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -45111,7 +45113,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -45126,14 +45128,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -47437,7 +47439,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -47988,7 +47990,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -48053,7 +48055,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -48087,7 +48089,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -48137,7 +48139,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -48283,7 +48285,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -48454,7 +48456,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -48480,7 +48482,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -48754,7 +48756,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -49369,7 +49371,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -49889,7 +49891,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -49897,7 +49899,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5760, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -50302,7 +50304,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -50446,7 +50448,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -50890,10 +50892,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -53542,7 +53540,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -55302,7 +55300,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -56080,7 +56078,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+6269) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+6269) goto abort_due_to_error __770: ; @@ -56190,7 +56188,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -56384,7 +56382,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -57751,7 +57749,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -58271,7 +58269,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -58354,7 +58352,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -61794,14 +61792,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7167 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7172 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -61845,7 +61839,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7178, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+7167, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -61909,7 +61903,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7195, libc.VaList(bp, pExpr)) + ts+7184, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -61925,7 +61919,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+7259, + Xsqlite3ErrorMsg(tls, pParse, ts+7248, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -61939,7 +61933,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7295, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+7284, uintptr(0), pExpr) } } else { @@ -61962,30 +61956,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7323, libc.VaList(bp+16, pExpr)) + ts+7312, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 7366 + zType = ts + 7355 } else { - zType = ts + 7373 + zType = ts + 7362 } - Xsqlite3ErrorMsg(tls, pParse, ts+7383, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7372, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7411, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7400, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7433, + Xsqlite3ErrorMsg(tls, pParse, ts+7422, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7477, + ts+7466, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -62057,15 +62051,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7525, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7514, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -62073,7 +62067,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7536, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7525, pExpr, pExpr) } break @@ -62204,7 +62198,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7547, libc.VaList(bp, i, zType, mx)) + ts+7536, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -62224,7 +62218,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7603, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7592, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -62259,7 +62253,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7637, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7626, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -62316,7 +62310,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7643, libc.VaList(bp, i+1)) + ts+7632, libc.VaList(bp, i+1)) return 1 } } @@ -62344,7 +62338,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7704, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7693, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -62558,7 +62552,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7735, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7724, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -62598,7 +62592,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7637) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7626) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -62609,7 +62603,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7774) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7763) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -62621,7 +62615,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7780, 0) + ts+7769, 0) return WRC_Abort } @@ -63485,7 +63479,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7839, libc.VaList(bp, mxHeight)) + ts+7828, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -63734,10 +63728,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7887, + Xsqlite3ErrorMsg(tls, pParse, ts+7876, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7931 + return ts + 7920 } return ts + 1547 }(), nElem)) @@ -63778,7 +63772,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -63804,7 +63798,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7935, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7924, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -63832,7 +63826,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7969, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7958, libc.VaList(bp, pExpr)) } } } @@ -63879,7 +63873,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7989, + Xsqlite3ErrorMsg(tls, pParse, ts+7978, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -63904,7 +63898,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8032, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8021, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -64479,7 +64473,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+8055, + Xsqlite3ErrorMsg(tls, pParse, ts+8044, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -64602,7 +64596,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+8085, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+8074, libc.VaList(bp, zObject)) } } @@ -64658,10 +64652,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+7167) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+8097) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+7172) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+8102) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -65736,7 +65730,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -66174,6 +66168,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -66187,6 +66182,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -66405,6 +66403,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -66418,6 +66417,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -67191,7 +67198,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+8055, + Xsqlite3ErrorMsg(tls, pParse, ts+8044, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -67213,11 +67220,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -67288,13 +67294,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -67307,15 +67319,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -67325,22 +67337,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -67349,21 +67361,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -67373,27 +67385,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8485, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -67402,7 +67414,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -70073,7 +70085,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+11325, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -70090,7 +70102,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -71010,7 +71022,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -75374,6 +75386,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -76538,7 +76556,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7536, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7525, 10) == 0 { return 0 } return 1 @@ -77784,7 +77802,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14543, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -79829,7 +79847,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -86294,7 +86312,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -87696,7 +87714,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17935 } else { - zType = ts + 7931 + zType = ts + 7920 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17937, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -87863,6 +87881,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -89220,7 +89239,7 @@ goto __223 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7931, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7920, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __222 __222: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -89236,7 +89255,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __226 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7931, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7920, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __225 __225: i6++ @@ -90041,80 +90060,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __351 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+18381) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+18417) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__351: + ; label6 = 0 kk = 0 -__351: +__352: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __353 + goto __354 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __354 + goto __355 } - goto __352 -__354: + goto __353 +__355: ; if !(label6 == 0) { - goto __355 + goto __356 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__355: +__356: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 624))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __352 -__352: - kk++ - goto __351 goto __353 __353: + kk++ + goto __352 + goto __354 +__354: ; if !(label6 != 0) { - goto __356 + goto __357 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+18355) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+18381) + Xsqlite3VdbeLoadString(tls, v, 4, ts+18428) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__356: +__357: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __357 + goto __358 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__358: +__359: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __360 + goto __361 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __361 + goto __362 } - goto __359 -__361: + goto __360 +__362: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __359 -__359: - kk++ - goto __358 goto __360 __360: + kk++ + goto __359 + goto __361 +__361: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 624))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 624))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+18408) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18455) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__357: +__358: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 640))) @@ -90131,20 +90164,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 620)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __362 + goto __363 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+18435) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18482) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__363: +__364: if !(pIdx5 != 0) { - goto __365 + goto __366 } if !(pPk1 == pIdx5) { - goto __366 + goto __367 } - goto __364 -__366: + goto __365 +__367: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -90153,21 +90186,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __364 -__364: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __363 goto __365 __365: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __364 + goto __366 +__366: ; if !(pPk1 != 0) { - goto __367 + goto __368 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__367: +__368: ; -__362: +__363: ; goto __297 __297: @@ -90185,14 +90218,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __368 + goto __369 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 616)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18464 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18511 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__368: +__369: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -90200,27 +90233,27 @@ __46: if !!(zRight != 0) { - goto __369 + goto __370 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __371 + goto __372 } goto pragma_out -__371: +__372: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __370 -__369: + goto __371 +__370: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __372 + goto __373 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__373: +__374: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __375 + goto __376 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __376 + goto __377 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -90229,25 +90262,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __375 -__376: + goto __376 +__377: ; - goto __374 -__374: - pEnc += 16 - goto __373 goto __375 __375: + pEnc += 16 + goto __374 + goto __376 +__376: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __377 + goto __378 } - Xsqlite3ErrorMsg(tls, pParse, ts+18467, libc.VaList(bp+456, zRight)) -__377: + Xsqlite3ErrorMsg(tls, pParse, ts+18514, libc.VaList(bp+456, zRight)) +__378: ; -__372: +__373: ; -__370: +__371: ; goto __15 @@ -90255,15 +90288,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __378 + goto __379 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __380 + goto __381 } goto __15 -__380: +__381: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -90271,41 +90304,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __381 + goto __382 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__381: +__382: ; - goto __379 -__378: + goto __380 +__379: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __382 + goto __383 } goto __15 -__382: +__383: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__379: +__380: ; goto __15 __48: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__383: +__384: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __384 + goto __385 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __383 -__384: + goto __384 +__385: ; Xsqlite3VdbeReusable(tls, v) @@ -90320,31 +90353,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __385 + goto __386 } if !(Xsqlite3StrICmp(tls, zRight, ts+17767) == 0) { - goto __386 + goto __387 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __387 -__386: - if !(Xsqlite3StrICmp(tls, zRight, ts+18492) == 0) { - goto __388 + goto __388 +__387: + if !(Xsqlite3StrICmp(tls, zRight, ts+18539) == 0) { + goto __389 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __389 -__388: + goto __390 +__389: if !(Xsqlite3StrICmp(tls, zRight, ts+17920) == 0) { - goto __390 + goto __391 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__390: +__391: ; -__389: +__390: ; -__387: +__388: ; -__385: +__386: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -90354,10 +90387,10 @@ __50: if !(zRight != 0) { - goto __391 + goto __392 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__391: +__392: ; returnSingleInt(tls, v, func() int64 { @@ -90377,19 +90410,19 @@ __52: if !(zRight != 0) { - goto __392 + goto __393 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __394 + goto __395 } goto __15 -__394: +__395: ; - goto __393 -__392: - opMask = U32(0xfffe) + goto __394 __393: + opMask = U32(0xfffe) +__394: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -90398,86 +90431,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__395: +__396: if !(iDb <= iDbLast) { - goto __397 + goto __398 } if !(iDb == 1) { - goto __398 + goto __399 } - goto __396 -__398: + goto __397 +__399: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__399: +__400: if !(k4 != 0) { - goto __401 + goto __402 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __402 + goto __403 } - goto __400 -__402: + goto __401 +__403: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__403: +__404: if !(pIdx6 != 0) { - goto __405 + goto __406 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __406 + goto __407 } szThreshold = int16(0) - goto __405 -__406: + goto __406 +__407: ; - goto __404 -__404: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __403 goto __405 __405: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __404 + goto __406 +__406: ; if !(szThreshold != 0) { - goto __407 + goto __408 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__407: +__408: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18500, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18547, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __408 + goto __409 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __409 -__408: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __410 __409: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__410: ; - goto __400 -__400: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __399 goto __401 __401: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __400 + goto __402 +__402: ; - goto __396 -__396: - iDb++ - goto __395 goto __397 __397: + iDb++ + goto __396 + goto __398 +__398: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -90485,36 +90518,36 @@ __53: ; if !(zRight != 0) { - goto __410 + goto __411 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__410: +__411: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __411 + goto __412 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__411: +__412: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __55: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK) { - goto __412 + goto __413 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)))) { - goto __413 + goto __414 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 656))) -__413: +__414: ; -__412: +__413: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -90523,10 +90556,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __414 + goto __415 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664))&int64(0x7fffffff))) -__414: +__415: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -90535,10 +90568,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+672) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) >= int64(0)) { - goto __415 + goto __416 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) & int64(0x7fffffff)) -__415: +__416: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -90546,10 +90579,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __416 + goto __417 } -__416: +__417: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -90601,14 +90634,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18518, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18523, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18529, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18538, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18547, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18555, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18563}, - {FzName: ts + 18570}, + {FzName: ts + 18565, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18570, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18576, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18585, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18594, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18602, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18610}, + {FzName: ts + 18617}, {}, } var setCookie = [2]VdbeOpList{ @@ -90660,7 +90693,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18576) + Xsqlite3_str_appendall(tls, bp+32, ts+18623) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -90668,7 +90701,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18591, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18638, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -90681,16 +90714,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18598, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18645, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18604) + Xsqlite3_str_appendall(tls, bp+32, ts+18651) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18616) + Xsqlite3_str_appendall(tls, bp+32, ts+18663) j++ } Xsqlite3_str_append(tls, bp+32, ts+5360, 1) @@ -90873,13 +90906,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18631) + Xsqlite3_str_appendall(tls, bp+32, ts+18678) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18639, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18686, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18643, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18690, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -90956,12 +90989,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18647, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18694, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -90970,19 +91003,19 @@ } else { zObj = ts + 5411 } - z = Xsqlite3MPrintf(tls, db, ts+18675, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18722, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18706, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18753, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18714, - ts + 18721, - ts + 18733, + ts + 18761, + ts + 18768, + ts + 18780, } // Check to see if any sibling index (another index on the same table) @@ -91074,7 +91107,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18744) + corruptSchema(tls, pData, argv, ts+18791) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -91122,7 +91155,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 8341 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18757 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18804 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -91251,7 +91284,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18829) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18876) rc = SQLITE_ERROR goto initone_error_out __19: @@ -91265,7 +91298,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18853, + ts+18900, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -91597,7 +91630,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18887, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18934, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -91627,7 +91660,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18917, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18964, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -91723,7 +91756,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -91822,7 +91855,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -92149,13 +92182,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18936, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18983, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18966)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19013)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -92330,7 +92363,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19000, libc.VaList(bp, 0)) + ts+19047, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -92375,7 +92408,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19050, libc.VaList(bp+8, zName)) + ts+19097, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -92386,7 +92419,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19114, + Xsqlite3ErrorMsg(tls, pParse, ts+19161, libc.VaList(bp+16, zName)) break } @@ -93014,16 +93047,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 19151 + z = ts + 19198 break case TK_INTERSECT: - z = ts + 19161 + z = ts + 19208 break case TK_EXCEPT: - z = ts + 19171 + z = ts + 19218 break default: - z = ts + 19178 + z = ts + 19225 break } return z @@ -93033,7 +93066,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19184, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19231, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -93059,9 +93092,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19207, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19254, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 19238 + return ts + 19285 } return ts + 1547 }())) @@ -93405,7 +93438,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+19253, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+19300, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -93505,7 +93538,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+19253, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+19300, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -93521,7 +93554,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+19262, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+19309, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -93604,8 +93637,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -93620,12 +93651,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 19270 + zType = ts + 19317 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -93841,7 +93875,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+19274, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19321, 0) return __1: ; @@ -93932,7 +93966,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19323, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19370, 0) goto end_of_recursive_query __15: ; @@ -93952,7 +93986,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19365, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19412, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -93989,7 +94023,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19371, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19418, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -94023,7 +94057,7 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19386, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19433, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } @@ -94128,8 +94162,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19409, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19424, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19456, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19471, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -94176,7 +94210,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -94243,7 +94277,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19443, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19490, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -94305,7 +94339,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19443, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19490, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -94458,10 +94492,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19464, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19511, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19510, + ts+19557, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -94715,8 +94749,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7637) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7637) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7626) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7626) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -94743,13 +94777,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19592, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19639, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19603, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19650, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -94761,7 +94795,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19608, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19655, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -94949,7 +94983,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.X__builtin___memset_chk(tls, bp, 0, uint64(unsafe.Sizeof(Expr{})), libc.X__builtin_object_size(tls, bp, 0)) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -95848,7 +95883,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19614, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19661, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -95931,7 +95966,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19632, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19679, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -96060,7 +96095,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19655, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19702, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -96083,7 +96118,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19675, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19722, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -96099,7 +96134,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19718 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19765 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -96125,7 +96160,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19741, + Xsqlite3ErrorMsg(tls, pParse, ts+19788, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -96136,9 +96171,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19779 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19826 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19813 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19860 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -96185,7 +96220,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19851, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19898, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -96297,7 +96332,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19855, + Xsqlite3ErrorMsg(tls, pParse, ts+19902, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -96316,7 +96351,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19894, + Xsqlite3ErrorMsg(tls, pParse, ts+19941, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -96440,7 +96475,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19925, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19972, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -96505,7 +96540,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19930, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19977, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -96536,9 +96571,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19939, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19957, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20004, 0) } } } @@ -96548,7 +96583,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19977, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20024, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -96686,7 +96721,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -96770,13 +96805,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+20008, 0) + ts+20055, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20059, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20106, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -96965,11 +97000,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20092, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20139, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 20104 + return ts + 20151 } return ts + 1547 }(), @@ -97297,7 +97332,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+20127, + ts+20174, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -97358,7 +97393,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+20181, + Xsqlite3ErrorMsg(tls, pParse, ts+20228, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -97500,7 +97535,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20221, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20268, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -97559,7 +97594,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20236, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20283, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -98030,9 +98065,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 20252 + return ts + 20299 } - return ts + 20261 + return ts + 20308 }()) groupBySort = 1 @@ -98383,7 +98418,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+20252) + explainTempTable(tls, pParse, ts+20299) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -98488,7 +98523,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+20270, 0) + ts+20317, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -98721,7 +98756,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+20335, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20382, 0) goto trigger_cleanup __3: ; @@ -98765,7 +98800,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+20381, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+20428, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -98783,7 +98818,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+20389, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) goto trigger_orphan_error __11: ; @@ -98795,7 +98830,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+20381, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+20428, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -98810,11 +98845,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+20430, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20477, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -98825,19 +98861,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6784, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20456, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20503, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20494, + Xsqlite3ErrorMsg(tls, pParse, ts+20541, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20531 + return ts + 20578 } - return ts + 20538 + return ts + 20585 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -98846,7 +98882,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20544, libc.VaList(bp+24, pTableName+8)) + ts+20591, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -98995,7 +99031,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+20381, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+20428, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -99028,7 +99064,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20590, + ts+20637, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -99053,13 +99089,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20638, + ts+20685, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20713, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20760, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -99315,7 +99351,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20742, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20789, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -99368,7 +99404,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20762, + ts+20809, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -99482,12 +99518,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20824, + ts+20871, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20872 + return ts + 20919 } - return ts + 20879 + return ts + 20926 }())) __15: ; @@ -99601,7 +99637,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20886, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20933, 0) return 1 } @@ -99667,7 +99703,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.X__builtin___memset_chk(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{})), libc.X__builtin_object_size(tls, bp+240, 0)) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -99831,7 +99867,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20928, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20975, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -100424,7 +100460,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20942, + ts+20989, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -100456,7 +100492,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20978, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21025, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -100782,7 +100818,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -101336,7 +101377,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20997) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21044) __169: ; update_cleanup: @@ -101642,10 +101683,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21010, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21057, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+21014, libc.VaList(bp+8, bp+216)) + ts+21061, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -101768,7 +101809,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+21087, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+21091, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+21134, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+21138, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -101916,14 +101957,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21095) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21142) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21135) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21182) return SQLITE_ERROR __2: ; @@ -101934,7 +101975,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21178) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21225) return SQLITE_ERROR __5: ; @@ -101962,7 +102003,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+21196, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+21243, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -101982,7 +102023,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+21219) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21266) goto end_of_vacuum __8: ; @@ -102042,7 +102083,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+21246, + ts+21293, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -102051,7 +102092,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21354, + ts+21401, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -102062,7 +102103,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+21408, + ts+21455, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -102073,7 +102114,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21559, + ts+21606, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -102502,11 +102543,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21689, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21736, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21713, + ts+21760, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -102516,7 +102557,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21812, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21859, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -102577,7 +102618,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21831, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21878, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -102605,9 +102646,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -102615,7 +102658,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21873, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21920, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3658, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -102627,7 +102670,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21903 + var zFormat uintptr = ts + 21950 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -102701,7 +102744,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21949, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21996, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -102759,7 +102802,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21949, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21996, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -102793,7 +102836,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -103246,7 +103289,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -103273,7 +103316,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -103504,7 +103547,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21968 + return ts + 22015 } if i == -1 { return ts + 16673 @@ -103516,11 +103559,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21975, 5) + Xsqlite3_str_append(tls, pStr, ts+22022, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21981, 1) + Xsqlite3_str_append(tls, pStr, ts+22028, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -103535,7 +103578,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21981, 1) + Xsqlite3_str_append(tls, pStr, ts+22028, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -103561,27 +103604,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21983, 2) + Xsqlite3_str_append(tls, pStr, ts+22030, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21975, 5) + Xsqlite3_str_append(tls, pStr, ts+22022, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21986 + return ts + 22033 } - return ts + 21991 + return ts + 22038 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21999) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22046) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22001) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22048) } Xsqlite3_str_append(tls, pStr, ts+5360, 1) } @@ -103624,11 +103667,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+22003, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+22050, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 22009 + return ts + 22056 } - return ts + 22016 + return ts + 22063 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -103641,40 +103684,40 @@ zFmt = ts + 11379 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 22021 + zFmt = ts + 22068 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 22054 + zFmt = ts + 22101 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 22079 + zFmt = ts + 22126 } else { - zFmt = ts + 22097 + zFmt = ts + 22144 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+22106, 7) + Xsqlite3_str_append(tls, bp+64, ts+22153, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16673 - Xsqlite3_str_appendf(tls, bp+64, ts+22114, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+22161, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+22145, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+22192, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+22155, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+22202, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+22160, + Xsqlite3_str_appendf(tls, bp+64, ts+22207, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+22187, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+22234, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -103706,22 +103749,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+22198, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+22245, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21986, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+22033, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+22219, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+22266, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21975, 5) + Xsqlite3_str_append(tls, bp+24, ts+22022, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21986, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+22033, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+5360, 1) @@ -105318,7 +105361,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22227, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22274, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -105346,7 +105389,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22242, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22289, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -105864,7 +105907,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22251, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22298, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -106224,7 +106267,7 @@ {FzOp: ts + 16522, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15850, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 15370, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 22265, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 22312, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -106714,12 +106757,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+22272, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22319, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22272, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22319, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -106798,7 +106841,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7172 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8102 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -106892,7 +106935,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 22313 + return ts + 22360 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -107268,7 +107311,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+22320, + Xsqlite3ErrorMsg(tls, pParse, ts+22367, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -107284,7 +107327,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -108002,7 +108045,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+22356, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+22403, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -108073,7 +108116,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 22382 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 22429 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -108247,6 +108290,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -108290,9 +108337,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -108326,6 +108371,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -108584,11 +108630,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -110169,7 +110220,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22393, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22440, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -110227,7 +110278,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22393, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22440, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -110625,7 +110676,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+22419, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22466, 0) rc = SQLITE_OK } else { goto __3 @@ -111232,7 +111283,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22454, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22501, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -111267,6 +111318,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -111561,6 +111616,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -111713,7 +111771,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22472, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22519, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -111776,7 +111834,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22500, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22547, 0) goto __5 __4: ii = 0 @@ -112658,7 +112716,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22518, -1) + pCtx, ts+22565, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -112791,7 +112849,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22574, -1) + pCtx, ts+22621, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -112881,17 +112939,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22619)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22630)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22641)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22646)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22659)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22669)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22675)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22686)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22696)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22708)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22713)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22666)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22677)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22688)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22693)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22706)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22716)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22722)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22733)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22743)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22755)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22760)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -112937,7 +112995,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22717, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22764, libc.VaList(bp, zName)) } return p } @@ -112981,12 +113039,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22736, 0) + ts+22783, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22807, 0) + ts+22854, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -113213,7 +113271,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22870, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22917, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -113329,7 +113387,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922)) } pSub = Xsqlite3SelectNew(tls, @@ -113444,7 +113502,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22896, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22943, 0) goto windowAllocErr __2: ; @@ -113509,15 +113567,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22928 + zErr = ts + 22975 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22945 + zErr = ts + 22992 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22961 + zErr = ts + 23008 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22981, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+23028, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -113538,7 +113596,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+23014, 0) + ts+23061, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -113694,11 +113752,11 @@ } var azErr = [5]uintptr{ - ts + 23061, - ts + 23114, - ts + 22518, - ts + 23165, - ts + 23217, + ts + 23108, + ts + 23161, + ts + 22565, + ts + 23212, + ts + 23264, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -115093,19 +115151,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23267, + Xsqlite3ErrorMsg(tls, pParse, ts+23314, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 23309 + return ts + 23356 } - return ts + 23318 + return ts + 23365 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+23324, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23371, 0) } } @@ -115173,7 +115231,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23358, + Xsqlite3ErrorMsg(tls, pParse, ts+23405, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -116270,7 +116328,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+23396, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23443, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -117249,7 +117307,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+23418, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23465, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -117259,7 +117317,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+23418, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23465, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -118002,7 +118060,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23445) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23492) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -118166,7 +118224,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23454, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23501, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -118383,9 +118441,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 7167 + return ts + 8097 } - return ts + 7172 + return ts + 8102 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -118669,19 +118727,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23478, 0) + ts+23525, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23573, 0) + ts+23620, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23657, 0) + ts+23704, 0) } break case uint32(273): @@ -119060,9 +119118,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23454, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23501, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23742, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23789, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -119830,7 +119888,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23759, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23806, libc.VaList(bp, bp+2464)) break } } @@ -119853,7 +119911,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3658, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23784, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23831, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -120026,7 +120084,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23795, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23842, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -120039,11 +120097,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+20381, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+20428, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23802, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23849, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23807, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23854, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -120056,9 +120114,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23817, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23864, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23821, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23868, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -120292,7 +120350,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -120867,7 +120925,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -120882,7 +120940,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23829, 0) + ts+23876, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -121073,23 +121131,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23897 + var zErr uintptr = ts + 23944 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23911 + zErr = ts + 23958 break } case SQLITE_ROW: { - zErr = ts + 23933 + zErr = ts + 23980 break } case SQLITE_DONE: { - zErr = ts + 23955 + zErr = ts + 24002 break } @@ -121107,35 +121165,35 @@ } var aMsg = [29]uintptr{ - ts + 23978, - ts + 23991, + ts + 24025, + ts + 24038, uintptr(0), - ts + 24007, - ts + 24032, - ts + 24046, - ts + 24065, + ts + 24054, + ts + 24079, + ts + 24093, + ts + 24112, ts + 1483, - ts + 24090, - ts + 24127, - ts + 24139, - ts + 24154, - ts + 24187, - ts + 24205, - ts + 24230, - ts + 24259, + ts + 24137, + ts + 24174, + ts + 24186, + ts + 24201, + ts + 24234, + ts + 24252, + ts + 24277, + ts + 24306, uintptr(0), ts + 6241, ts + 5737, - ts + 24276, - ts + 24294, - ts + 24312, - uintptr(0), - ts + 24346, + ts + 24323, + ts + 24341, + ts + 24359, uintptr(0), - ts + 24367, ts + 24393, - ts + 24416, - ts + 24437, + uintptr(0), + ts + 24414, + ts + 24440, + ts + 24463, + ts + 24484, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -121256,7 +121314,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -121301,7 +121359,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24453, 0) + ts+24500, 0) return SQLITE_BUSY } else { @@ -121418,7 +121476,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24516, libc.VaList(bp, zName)) + ts+24563, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -121654,7 +121712,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24567, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24614, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -121747,7 +121805,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -121817,7 +121875,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -121827,7 +121885,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -121859,14 +121917,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24588, 0) + ts+24635, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -121996,7 +122054,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24656, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24703, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -122041,10 +122099,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24662, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24709, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24672, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24719, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -122149,7 +122207,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24700, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24747, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -122160,17 +122218,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24704, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24751, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24704 + zModeType = ts + 24751 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24710, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24757, zOpt, uint64(4)) == 0) { goto __32 } @@ -122208,7 +122266,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24715, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24762, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -122216,7 +122274,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24735, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24782, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -122256,7 +122314,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24759, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24806, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -122280,14 +122338,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24775, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24782, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24822, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24829, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24790, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24793, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24796, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24837, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24840, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24843, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17784, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -122434,10 +122492,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+22313, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+22360, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24800, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24847, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -122451,7 +122509,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -122504,7 +122562,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6844 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23802 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23849 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -122609,7 +122667,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24806 + zFilename = ts + 24853 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -122712,21 +122770,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24809, + Xsqlite3_log(tls, iErr, ts+24856, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24834) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24881) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24854) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24901) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24861) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24908) } // This is a convenience routine that makes sure that all thread-specific @@ -122884,7 +122942,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24878, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24925, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -123540,7 +123598,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24906, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24953, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -123658,7 +123716,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24914 + return ts + 24961 } return uintptr(0) }(), 0) @@ -123836,7 +123894,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6584, ts + 7167, ts + 7172, ts + 6594, ts + 6589, ts + 8408, ts + 24937, ts + 24943, + ts + 6584, ts + 8097, ts + 8102, ts + 6594, ts + 6589, ts + 8408, ts + 24984, ts + 24990, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -123989,7 +124047,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24950 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24997 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -124044,7 +124102,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24967, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25014, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -124108,13 +124166,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+7167, uint32(4)) + jsonAppendRaw(tls, pOut, ts+8097, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+7172, uint32(5)) + jsonAppendRaw(tls, pOut, ts+8102, uint32(5)) break } @@ -124664,12 +124722,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+7167, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+8097, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+7172, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+8102, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -124770,7 +124828,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24996, -1) + Xsqlite3_result_error(tls, pCtx, ts+25043, -1) } } jsonParseReset(tls, pParse) @@ -125076,7 +125134,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+25011, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+25058, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -125091,7 +125149,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+25015, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+25062, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -125145,7 +125203,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25041, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -125250,11 +125308,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+25084, uint32(2)) + jsonAppendRaw(tls, bp, ts+25131, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+5401, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+25087, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+25134, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -125411,14 +125469,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+25090, -1) + ts+25137, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+25141, -1) + Xsqlite3_result_error(tls, ctx, ts+25188, -1) jsonReset(tls, bp) return } @@ -125588,9 +125646,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 25175 + return ts + 25222 } - return ts + 25179 + return ts + 25226 }()) return __2: @@ -125723,7 +125781,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+25186, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+25233, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -125820,7 +125878,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+25189, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+25236, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -125864,7 +125922,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+25192) + ts+25239) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -125995,7 +126053,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+25275, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+25322, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -126014,7 +126072,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+25281, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+25328, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -126110,7 +126168,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+25281, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+25328, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -126134,7 +126192,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 25286 + zRoot = ts + 25333 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -126256,7 +126314,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24996, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25043, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -126351,25 +126409,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25288}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25293}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25304}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25304}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25322}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 25335}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 25338}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25342}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25354}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25366}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25377}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25388}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25400}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25413}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25422}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25422}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25432}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25443}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25460}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25335}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25340}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25351}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25351}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25369}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 25382}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 25385}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25389}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25401}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25413}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25424}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25435}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25447}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25460}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25469}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25469}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25479}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25490}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25507}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -126388,8 +126446,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25478, FpModule: 0}, - {FzName: ts + 25488, FpModule: 0}, + {FzName: ts + 25525, FpModule: 0}, + {FzName: ts + 25535, FpModule: 0}, } type Rtree1 = struct { @@ -126649,11 +126707,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25498, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25545, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25506, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25553, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -126864,7 +126922,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25511, + ts+25558, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -127567,7 +127625,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25593) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25640) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -128908,7 +128966,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25607, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25654, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -128920,12 +128978,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25627, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25674, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25659, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25706, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -129151,7 +129209,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25696, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25743, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -129174,7 +129232,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25841 + var zFmt uintptr = ts + 25888 var zSql uintptr var rc int32 @@ -129222,7 +129280,7 @@ } var azName1 = [3]uintptr{ - ts + 25897, ts + 5463, ts + 16673, + ts + 25944, ts + 5463, ts + 16673, } var rtreeModule = Sqlite3_module{ @@ -129265,19 +129323,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25902, + ts+25949, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25964, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+26011, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25969, + ts+26016, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26033, + ts+26080, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26103, + ts+26150, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -129306,7 +129364,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 26152 + zFormat = ts + 26199 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -129318,7 +129376,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+26260, + ts+26307, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -129326,18 +129384,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+26305, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+26352, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+13170, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+26332, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+26379, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+26354, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+26401, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+26362, 0) + Xsqlite3_str_appendf(tls, p, ts+26409, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -129352,14 +129410,14 @@ } var azSql = [8]uintptr{ - ts + 26378, - ts + 26431, - ts + 26476, - ts + 26528, - ts + 26582, - ts + 26627, - ts + 26685, - ts + 26740, + ts + 26425, + ts + 26478, + ts + 26523, + ts + 26575, + ts + 26629, + ts + 26674, + ts + 26732, + ts + 26787, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -129388,7 +129446,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26787, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26834, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -129400,7 +129458,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26807, + ts+26854, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -129408,7 +129466,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26864, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26911, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -129450,10 +129508,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26899, - ts + 26942, - ts + 26977, - ts + 27013, + ts + 26946, + ts + 26989, + ts + 27024, + ts + 27060, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -129484,7 +129542,7 @@ libc.X__builtin___memcpy_chk(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName), libc.X__builtin_object_size(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, 0)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27050, + Xsqlite3_str_appendf(tls, pSql, ts+27097, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -129496,7 +129554,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27074, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+27121, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -129519,7 +129577,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+27080, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27127, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -129615,7 +129673,7 @@ return rc } -var azFormat = [2]uintptr{ts + 27083, ts + 27094} +var azFormat = [2]uintptr{ts + 27130, ts + 27141} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -129655,11 +129713,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+11323, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+27104, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+27151, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+27110, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+27157, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+27114, 1) + Xsqlite3_str_append(tls, pOut, ts+27161, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -129670,7 +129728,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+27116, -1) + Xsqlite3_result_error(tls, ctx, ts+27163, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -129748,7 +129806,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+27149, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+27196, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4465 @@ -129772,7 +129830,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+27156, + ts+27203, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -129791,7 +129849,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+27248, libc.VaList(bp+16, iNode)) } } @@ -129805,8 +129863,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 27233, - ts + 27287, + ts + 27280, + ts + 27334, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -129821,23 +129879,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+27335, + rtreeCheckAppendMsg(tls, pCheck, ts+27382, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 27380 + return ts + 27427 } - return ts + 27388 + return ts + 27435 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+27397, + ts+27444, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 27380 + return ts + 27427 } - return ts + 27388 + return ts + 27435 }(), iKey, iVal)) } } @@ -129861,7 +129919,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27455, libc.VaList(bp, i, iCell, iNode)) + ts+27502, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -129881,7 +129939,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27503, libc.VaList(bp+24, i, iCell, iNode)) + ts+27550, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -129898,14 +129956,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27570, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27617, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27604, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27651, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -129913,7 +129971,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27634, + ts+27681, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -129942,14 +130000,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27689, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27736, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27720, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27767, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -129976,7 +130034,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27787, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27834, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -129985,12 +130043,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25607, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25654, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27815, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27862, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -130004,8 +130062,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27846, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27853, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27893, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27900, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -130013,7 +130071,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27861, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27908, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -130028,7 +130086,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27865, -1) + ts+27912, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -130046,7 +130104,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18464 + return ts + 18511 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -130417,11 +130475,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27916, 1) + Xsqlite3_str_append(tls, x, ts+27963, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27918, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27965, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27929, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27976, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -130441,19 +130499,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27940, 0) + Xsqlite3_str_appendf(tls, x, ts+27987, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27958, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28005, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27966, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28013, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27974, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+28021, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27978, 0) + Xsqlite3_str_appendf(tls, x, ts+28025, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -131373,7 +131431,7 @@ libc.X__builtin___memcpy_chk(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName), libc.X__builtin_object_size(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, 0)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27991, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28038, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -131382,7 +131440,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28013, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+28060, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -131390,7 +131448,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+27080, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27127, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -131627,7 +131685,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28017 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28064 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -131635,7 +131693,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28023 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28070 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -131747,7 +131805,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28032, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28079, 0) __4: ; goto geopoly_update_end @@ -131879,14 +131937,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+28072) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+28119) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+28088) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+28135) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -131951,7 +132009,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+28103, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28150, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -131963,25 +132021,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28111}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28124}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28137}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 28150}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28088}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 28162}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28072}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 28185}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28199}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 28212}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 28226}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28242}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28158}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28171}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28184}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 28197}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28135}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 28209}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28119}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 28232}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28246}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 28259}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 28273}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28289}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 28254}, + {FxStep: 0, FxFinal: 0, FzName: ts + 28301}, } // Register the r-tree module with database handle db. This creates the @@ -131991,26 +132049,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+28273, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28320, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+28283, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28330, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+28294, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28341, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28017, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28064, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28305, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28352, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -132064,7 +132122,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25593, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25640, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -132391,7 +132449,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+28315, -1) + Xsqlite3_result_error(tls, context, ts+28362, -1) return } @@ -132402,7 +132460,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+28315, -1) + Xsqlite3_result_error(tls, context, ts+28362, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -132503,7 +132561,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+28336, uintptr(0), uintptr(0), p+64) + ts+28383, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -132567,7 +132625,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25506, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25553, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -132588,16 +132646,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28507, libc.VaList(bp, func() uintptr { + ts+28554, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28657 + return ts + 28704 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28698) + ts+28745) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -132713,7 +132771,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28823, libc.VaList(bp, zTab))) + ts+28870, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -132731,7 +132789,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28942, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28989, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -132749,7 +132807,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28963, libc.VaList(bp+16, zIdx))) + ts+29010, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -132772,7 +132830,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+29014, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+29061, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -132818,7 +132876,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.X__builtin___memcpy_chk(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol), libc.X__builtin_object_size(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+29035, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29082, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -132833,7 +132891,7 @@ libc.X__builtin___memset_chk(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol), libc.X__builtin_object_size(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -132873,7 +132931,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19939, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19986, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -132883,18 +132941,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29092, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+29139, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+29111, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+29158, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+29116, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+29163, zName) { bRbuRowid = 1 } } @@ -132906,18 +132964,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+29126, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+29173, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 29155 + return ts + 29202 } - return ts + 29168 + return ts + 29215 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29224, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -132931,7 +132989,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29199, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29246, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -132978,7 +133036,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29226, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+29273, libc.VaList(bp, zList, zSep, z)) zSep = ts + 15017 } return zList @@ -132996,7 +133054,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+29235, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+29282, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -133018,25 +133076,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+29248, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+29295, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+29280, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+29303) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+29309, ts+29316, ts+5360) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+29350) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+29356, ts+29363, ts+5360) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+29324, + ts+29371, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+29366, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -133078,7 +133136,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -133113,7 +133171,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 29386 + zCol = ts + 29433 __7: ; goto __5 @@ -133121,11 +133179,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+29394, + zLhs = rbuMPrintf(tls, p, ts+29441, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+29415, + zOrder = rbuMPrintf(tls, p, ts+29462, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29451, + zSelect = rbuMPrintf(tls, p, ts+29498, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 15017 iCol++ @@ -133145,7 +133203,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29478, + Xsqlite3_mprintf(tls, ts+29525, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -133172,7 +133230,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 15017 goto __15 __15: @@ -133184,7 +133242,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -133217,7 +133275,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -133229,7 +133287,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29545, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29592, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -133241,37 +133299,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 29386 + zCol = ts + 29433 } else { - zCol = ts + 29116 + zCol = ts + 29163 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29567, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29614, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 29303 + return ts + 29350 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29587, + zImpPK = Xsqlite3_mprintf(tls, ts+29634, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29608, + zImpCols = Xsqlite3_mprintf(tls, ts+29655, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29641, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29688, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 15017 - zAnd = ts + 21975 + zAnd = ts + 22022 nBind++ } @@ -133310,9 +133368,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29665, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29712, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29677, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29724, libc.VaList(bp+32, zList, zS)) } zS = ts + 15017 if zList == uintptr(0) { @@ -133322,7 +133380,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29686, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29733, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -133334,18 +133392,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29701, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29748, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29715, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21975 + zList = rbuMPrintf(tls, p, ts+29762, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 22022 } } zList = rbuMPrintf(tls, p, - ts+29727, libc.VaList(bp+40, zList)) + ts+29774, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -133353,8 +133411,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29777, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21975 + zList = rbuMPrintf(tls, p, ts+29824, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 22022 } } } @@ -133363,7 +133421,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29790, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29837, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -133381,15 +133439,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29777, + zList = rbuMPrintf(tls, p, ts+29824, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29816, + zList = rbuMPrintf(tls, p, ts+29863, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29846, + zList = rbuMPrintf(tls, p, ts+29893, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } @@ -133426,19 +133484,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29883 + var zSep uintptr = ts + 29930 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+29035, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29082, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16561) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+8, zIdx))) } break } @@ -133450,15 +133508,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 29303 + zDesc = ts + 29350 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29896, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29943, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 15017 } } - z = rbuMPrintf(tls, p, ts+29907, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29954, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -133478,7 +133536,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29911) + ts+29958) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -133487,7 +133545,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -133497,23 +133555,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29961, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+30008, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29983, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+30030, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 29303 + return ts + 29350 } return ts + 1547 }())) zComma = ts + 15017 } } - zCols = rbuMPrintf(tls, p, ts+29993, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+30040, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30008, + ts+30055, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 0)) } @@ -133539,13 +133597,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 30070 + zPk = ts + 30117 } - zSql = rbuMPrintf(tls, p, ts+30083, + zSql = rbuMPrintf(tls, p, ts+30130, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 30110 + return ts + 30157 } return ts + 1547 }())) @@ -133555,16 +133613,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+30120, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+30167, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+30127, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+30174, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 30159 + return ts + 30206 } return ts + 1547 }())) @@ -133581,7 +133639,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+30174, + ts+30221, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -133618,7 +133676,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+30231) + ts+30278) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -133723,7 +133781,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+30297, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+30344, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -133746,7 +133804,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30317, + ts+30364, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 0)) @@ -133754,13 +133812,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+30382, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+30429, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+30418, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30465, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -133776,7 +133834,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30452, + ts+30499, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -133784,9 +133842,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30513 + return ts + 30560 } - return ts + 30517 + return ts + 30564 }() } return ts + 1547 @@ -133795,20 +133853,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30523, + ts+30570, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30584, + ts+30631, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30513 + return ts + 30560 } - return ts + 30517 + return ts + 30564 }(), zCollist, zLimit)) } @@ -133845,16 +133903,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30743 + return ts + 30790 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30752, + ts+30799, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30788 + return ts + 30835 } return ts + 1547 }(), zBindings))) @@ -133863,32 +133921,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30798, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30845, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30826 + zRbuRowid = ts + 30873 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30838, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30885, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30914 + return ts + 30961 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30931, + ts+30978, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31230, + ts+31277, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -133901,9 +133959,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 31329 + zRbuRowid = ts + 31376 } else { - zRbuRowid = ts + 31339 + zRbuRowid = ts + 31386 } } @@ -133916,7 +133974,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+29386, 0) + zOrder = rbuMPrintf(tls, p, ts+29433, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+1547) } @@ -133925,11 +133983,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+31350, + ts+31397, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 31398 + return ts + 31445 } return ts + 1547 }(), @@ -133942,7 +134000,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 23309 + return ts + 23356 } return ts + 1547 }(), zOrder, @@ -134010,9 +134068,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30743 + zPrefix = ts + 30790 } - zUpdate = Xsqlite3_mprintf(tls, ts+31404, + zUpdate = Xsqlite3_mprintf(tls, ts+31451, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -134071,7 +134129,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+31434, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31481, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -134144,18 +134202,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31464, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31511, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31492, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31539, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.X__builtin___memcpy_chk(tls, p+48, ts+3279, uint64(4), libc.X__builtin_object_size(tls, p+48, 0)) } else { libc.X__builtin___memcpy_chk(tls, p+48, ts+6844, uint64(4), libc.X__builtin_object_size(tls, p+48, 0)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31510, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31557, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -134195,11 +134253,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31576, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31623, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24656, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24703, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -134211,13 +134269,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31608, + zTarget = Xsqlite3_mprintf(tls, ts+31655, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31640 + return ts + 31687 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -134236,21 +134294,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31642, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31689, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31657, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31704, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31674, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31721, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -134258,7 +134316,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31690, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31737, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_RBU, p) @@ -134266,7 +134324,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31718, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31765, 0) } } @@ -134295,14 +134353,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31690, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31737, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31736, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31783, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -134428,7 +134486,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31771, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31818, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -134443,8 +134501,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844) } - zOal = Xsqlite3_mprintf(tls, ts+31796, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31803, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31843, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31850, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -134561,7 +134619,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+24294, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+24341, 0) return } @@ -134654,7 +134712,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31810) + ts+31857) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -134662,7 +134720,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31832, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31879, libc.VaList(bp, iCookie+1)) } } } @@ -134683,7 +134741,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31859, + ts+31906, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -134713,9 +134771,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+32017, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+32064, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32032, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32079, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -134729,10 +134787,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32052, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32099, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32077) + ts+32124) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -134746,12 +134804,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32185) + ts+32232) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+32250) + ts+32297) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -134763,7 +134821,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32294, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32341, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -134791,7 +134849,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32319, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+32366, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -134913,7 +134971,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32347, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32394, 0) } if rc == SQLITE_OK { @@ -134929,7 +134987,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31796, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31843, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -134946,7 +135004,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+32372, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+32419, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -134980,7 +135038,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+32383, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+32430, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -135010,13 +135068,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32455, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32502, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32469) + ts+32516) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -135027,7 +135085,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32526) + ts+32573) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -135101,7 +135159,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32600, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32647, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -135119,12 +135177,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32632, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32679, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32664 + return ts + 32711 } - return ts + 32671 + return ts + 32718 }())) } } @@ -135148,14 +135206,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32678, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32725, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6844, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32694, uintptr(0), uintptr(0), p+64) + db, ts+32741, uintptr(0), uintptr(0), p+64) } } @@ -135209,7 +135267,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32718, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32765, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -135236,7 +135294,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30743, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30790, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -135272,7 +135330,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32726, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32773, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -135391,12 +135449,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14906 } else { - zBegin = ts + 32678 + zBegin = ts + 32725 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32678, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32725, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135742,7 +135800,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32753, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32800, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -135767,7 +135825,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32776, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32823, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -135927,7 +135985,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32787, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32834, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -136756,7 +136814,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32798, 0) + ts+32845, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -136769,7 +136827,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32919, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32966, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -137449,9 +137507,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32948, + zRet = Xsqlite3_mprintf(tls, ts+32995, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21975 + zSep = ts + 22022 if zRet == uintptr(0) { break } @@ -137474,9 +137532,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32982, + ts+33029, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 33023 + zSep = ts + 33070 if zRet == uintptr(0) { break } @@ -137484,7 +137542,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7933, 0) + zRet = Xsqlite3_mprintf(tls, ts+7922, 0) } return zRet @@ -137495,7 +137553,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+33028, + ts+33075, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -137538,7 +137596,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+33106, + ts+33153, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -137665,7 +137723,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+33159, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+33206, 0) __16: ; rc = SQLITE_SCHEMA @@ -138141,7 +138199,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11751, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+33186, libc.VaList(bp, zDb)) + ts+33233, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -138150,18 +138208,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+33296, bp+24) + sessionAppendStr(tls, bp+8, ts+33343, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1560, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+33311, bp+24) + sessionAppendStr(tls, bp+8, ts+33358, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+33319, bp+24) + sessionAppendStr(tls, bp+8, ts+33366, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21975 + zSep = ts + 22022 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -138270,7 +138328,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+33325, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+33372, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -138362,7 +138420,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+33345, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -138625,7 +138683,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -138648,7 +138706,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -138690,7 +138748,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -138751,7 +138809,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -138825,13 +138883,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -138893,7 +138951,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -139266,7 +139324,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -139445,34 +139503,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.X__builtin___memcpy_chk(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0))), libc.X__builtin_object_size(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, 0)) - sessionAppendStr(tls, bp, ts+33363, bp+16) + sessionAppendStr(tls, bp, ts+33410, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+33376, bp+16) + sessionAppendStr(tls, bp, ts+33423, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33382, bp+16) + sessionAppendStr(tls, bp, ts+33429, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 15017 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+33311, bp+16) + sessionAppendStr(tls, bp, ts+33358, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+33387, bp+16) + ts+33434, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33319, bp+16) + sessionAppendStr(tls, bp, ts+33366, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21975 + zSep = ts + 22022 } } @@ -139524,34 +139582,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33462, bp+16) + sessionAppendStr(tls, bp, ts+33509, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+33311, bp+16) + sessionAppendStr(tls, bp, ts+33358, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33382, bp+16) + sessionAppendStr(tls, bp, ts+33429, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21975 + zSep = ts + 22022 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33480, bp+16) + sessionAppendStr(tls, bp, ts+33527, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+33023, bp+16) + sessionAppendStr(tls, bp, ts+33070, bp+16) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33319, bp+16) + sessionAppendStr(tls, bp, ts+33366, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33488 + zSep = ts + 33535 } } sessionAppendStr(tls, bp, ts+5360, bp+16) @@ -139578,9 +139636,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33493, bp+16) + sessionAppendStr(tls, bp, ts+33540, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21981, bp+16) + sessionAppendStr(tls, bp, ts+22028, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+15017, bp+16) @@ -139588,9 +139646,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33511, bp+16) + sessionAppendStr(tls, bp, ts+33558, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33522, bp+16) + sessionAppendStr(tls, bp, ts+33569, bp+16) } sessionAppendStr(tls, bp, ts+5360, bp+16) @@ -139609,11 +139667,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11751, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33526) + ts+33573) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33639) + ts+33686) } return rc } @@ -139641,7 +139699,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -139894,7 +139952,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33783, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33830, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -139910,7 +139968,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33804, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33851, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -139983,10 +140041,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33823, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33870, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33849, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33896, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -140045,16 +140103,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33879, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33926, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33923, + ts+33970, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33994, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34041, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11751) { @@ -140108,14 +140166,14 @@ } } } - Xsqlite3_exec(tls, db, ts+34054, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34101, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34084, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34131, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+34108, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+34084, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34155, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34131, uintptr(0), uintptr(0), uintptr(0)) } } @@ -141363,7 +141421,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+34136, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34183, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -141651,7 +141709,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+34164, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+34211, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -141838,7 +141896,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+34195, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+34242, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -141906,7 +141964,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 34202 + var zErr uintptr = ts + 34249 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -142088,7 +142146,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 34252 + var zErr uintptr = ts + 34299 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -142412,13 +142470,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 34300, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34347, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 34308, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34355, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 34318, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34365, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -142969,7 +143027,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+34323, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34370, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -142996,14 +143054,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34330, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34377, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+34361, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+34408, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -143014,7 +143072,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34394, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34441, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -143027,7 +143085,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34431, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34478, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -143036,7 +143094,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34440, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34487, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -143055,7 +143113,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34473, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34520, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -143070,14 +143128,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34507, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34554, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34515, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34562, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34547, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34594, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -143085,9 +143143,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34553, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34600, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34567, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34614, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -143095,9 +143153,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34605, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34652, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34616, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34663, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -143109,17 +143167,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8429, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17767}, - {FzName: ts + 34651, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34698, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34659, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34706, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34690, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34737, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -143166,15 +143224,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22641) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22688) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16673) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34718, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34765, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34748) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34795) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34758, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34805, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -143191,13 +143249,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34789, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34836, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34794, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34841, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34801, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34848, libc.VaList(bp+16, i)) } } } @@ -143235,8 +143293,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22641) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34809, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22688) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34856, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -143268,7 +143326,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34838, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34885, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -143305,14 +143363,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34507 + zTail = ts + 34554 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34858 + zTail = ts + 34905 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34866, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34913, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -143361,7 +143419,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34877, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34924, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -143369,10 +143427,10 @@ } return ts + 15017 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34893, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34940, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34900, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22641)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34947, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22688)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -143482,7 +143540,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34926) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34973) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -143492,7 +143550,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34931) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34978) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -143502,7 +143560,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34940) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34987) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -143515,7 +143573,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34950) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34997) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -143525,7 +143583,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34960) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35007) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -143541,7 +143599,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22641) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22688) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -143564,7 +143622,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34972 + var zSelect uintptr = ts + 35019 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -143586,7 +143644,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+35004) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+35051) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -143600,7 +143658,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35012, + ts+35059, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -143698,7 +143756,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+35077, 0) + sqlite3Fts5ParseError(tls, pParse, ts+35124, 0) return FTS5_EOF } } @@ -143711,20 +143769,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+35097, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+35144, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35128, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35175, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35131, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35178, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30513, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30560, uint64(3)) == 0 { tok = FTS5_AND } break @@ -145502,9 +145560,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+35135, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+35182, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+34164, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+34211, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -145520,7 +145578,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+35140, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+35187, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -145607,7 +145665,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20978, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+21025, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -145688,7 +145746,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+35169, 0) + ts+35216, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -145858,12 +145916,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+35222, + ts+35269, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 35272 + return ts + 35319 } - return ts + 35135 + return ts + 35182 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -146806,7 +146864,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+35279, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+35326, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -146885,7 +146943,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+35285, + ts+35332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -146910,7 +146968,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+35336, + ts+35383, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -146933,7 +146991,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+35385, + ts+35432, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -147172,7 +147230,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+35425, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35472, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -148371,7 +148429,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+35448, + ts+35495, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -149837,7 +149895,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35532, + ts+35579, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -150919,13 +150977,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35589, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35636, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25506, ts+35597, 0, pzErr) + pConfig, ts+25553, ts+35644, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11891, - ts+35632, + ts+35679, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -151178,7 +151236,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+35279, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+35326, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -151292,7 +151350,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35676, + ts+35723, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -151462,7 +151520,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35762) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35809) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -151733,7 +151791,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35767, 0) + ts+35814, 0) return SQLITE_ERROR } @@ -152157,7 +152215,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35806, + ts+35853, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -152173,9 +152231,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35861 + return ts + 35908 } - return ts + 35866 + return ts + 35913 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -152221,12 +152279,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35870, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35917, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35876, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35923, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -152257,7 +152315,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35904, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35951, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -152288,7 +152346,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35914, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35961, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -152320,14 +152378,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35935, libc.VaList(bp, z)) + ts+35982, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 34318 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 34365 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -152383,7 +152441,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35767, 0) + ts+35814, 0) return SQLITE_ERROR __1: ; @@ -152600,7 +152658,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+36015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -152745,28 +152803,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+36004, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+36051, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+36015, 0) + ts+36062, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+36095, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36142, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+36103, 0) + ts+36150, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+17356, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+36159, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36206, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+36165, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36212, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -152837,12 +152895,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36181, + ts+36228, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20879 + return ts + 20926 } - return ts + 36218 + return ts + 36265 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -153472,7 +153530,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+36230, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+36277, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -153716,7 +153774,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36251, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36298, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -153735,7 +153793,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36273, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36320, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -153782,7 +153840,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+36304) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+36351) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -153791,7 +153849,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+36317, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+36364, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -153805,7 +153863,7 @@ } var azName2 = [5]uintptr{ - ts + 36408, ts + 34507, ts + 25506, ts + 34858, ts + 11891, + ts + 36455, ts + 34554, ts + 25553, ts + 34905, ts + 11891, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -153829,7 +153887,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+36415, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36462, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -153847,13 +153905,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+36415, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36462, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+36420, 0, + db, ts+36467, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -153910,17 +153968,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 36435, - ts + 36503, - ts + 36572, - ts + 36605, - ts + 36644, - ts + 36684, - ts + 36723, - ts + 36764, - ts + 36803, - ts + 36845, - ts + 36885, + ts + 36482, + ts + 36550, + ts + 36619, + ts + 36652, + ts + 36691, + ts + 36731, + ts + 36770, + ts + 36811, + ts + 36850, + ts + 36892, + ts + 36932, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -154022,18 +154080,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36908, + ts+36955, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37012, + ts+37059, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37050, + ts+37097, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -154045,7 +154103,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37088, + ts+37135, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -154057,14 +154115,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25506, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25553, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11891, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+36408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36455, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34858, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34905, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34507, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34554, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -154076,17 +154134,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+37130, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+37177, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 30159 + return ts + 30206 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+37160, + ts+37207, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -154123,27 +154181,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+37204, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+37251, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+37227, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+37274, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34507, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34554, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34858, ts+37233, 0, pzErr) + pConfig, ts+34905, ts+37280, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+36408, ts+37265, 1, pzErr) + pConfig, ts+36455, ts+37312, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35004, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35051, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -154349,12 +154407,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37282, + ts+37329, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37332, + ts+37379, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -154362,7 +154420,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35004, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35051, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -154538,7 +154596,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+37361, + zSql = Xsqlite3_mprintf(tls, ts+37408, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -154720,14 +154778,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34507, bp+48) + rc = fts5StorageCount(tls, p, ts+34554, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34858, bp+56) + rc = fts5StorageCount(tls, p, ts+34905, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -154922,9 +154980,9 @@ libc.X__builtin___memcpy_chk(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar)), libc.X__builtin_object_size(tls, p, 0)) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37393) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37440) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37404) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37451) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -155139,7 +155197,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 37415 + var zCat uintptr = ts + 37462 var i int32 libc.X__builtin___memset_chk(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{})), libc.X__builtin_object_size(tls, p, 0)) @@ -155151,7 +155209,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37424) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37471) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -155162,18 +155220,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37435) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37482) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37393) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37440) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37404) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37451) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37424) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37471) { } else { rc = SQLITE_ERROR } @@ -155449,7 +155507,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37453 + var zBase uintptr = ts + 37500 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -155591,7 +155649,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37463, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37510, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -155599,11 +155657,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37466, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37513, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37471, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37518, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -155611,7 +155669,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37476, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37523, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -155619,7 +155677,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37479, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37526, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -155627,11 +155685,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37482, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37529, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37487, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37534, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -155639,19 +155697,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37492, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37539, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37496, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37543, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37502, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37549, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37507, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37554, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155659,11 +155717,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37511, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37558, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37515, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37562, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -155671,7 +155729,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37518, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37565, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155679,11 +155737,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37522, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37569, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37526, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37573, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155691,7 +155749,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37530, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37577, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155699,7 +155757,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37534, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37581, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155707,7 +155765,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37538, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37585, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155723,24 +155781,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37542, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37589, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37545, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37592, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37552, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37599, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -155755,44 +155813,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37555, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37602, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37563, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37610, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37570, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37617, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37575, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37622, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37471, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37518, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37580, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37627, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37466, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37513, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37585, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37632, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37590, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37637, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+15883, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -155801,91 +155859,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37595, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37642, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37599, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37646, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37604, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37651, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37507, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37554, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37610, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37657, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37614, uint64(1), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37661, uint64(1), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37616, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37663, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37530, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37577, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37622, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37669, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37630, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37677, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37636, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37683, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37641, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37688, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37647, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37694, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37534, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37581, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37655, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37702, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37663, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37710, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37667, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37714, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37530, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37577, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37675, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37722, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37681, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37728, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37534, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37581, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37687, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37734, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -155900,16 +155958,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37694, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37741, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37699, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37746, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -155917,21 +155975,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37704, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37751, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37710, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37757, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37663, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37710, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -155939,7 +155997,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37716, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37763, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -155947,9 +156005,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37722, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37769, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -155964,12 +156022,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37728, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37775, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37732, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37779, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37735, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37782, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -155978,7 +156036,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37738, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37785, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -156134,7 +156192,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37742) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37789) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -156314,22 +156372,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37453, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37500, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37757, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37804, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37763, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37810, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37770, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37817, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -157472,14 +157530,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37778) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37825) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37782) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37829) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37786) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37833) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37795, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37842, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -157505,19 +157563,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37829, - ts + 37869, - ts + 37904, + ts + 37876, + ts + 37916, + ts + 37951, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23802, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23849, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37947, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37994, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -157650,11 +157708,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37980, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38027, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+38011, + ts+38058, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -157678,7 +157736,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38062, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38109, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -158073,7 +158131,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+38088, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+38135, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -158095,7 +158153,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 38098 + return ts + 38145 } func init() { @@ -159238,5 +159296,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-12.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00S\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00nfs\x00hfs\x00ufs\x00afpfs\x00smbfs\x00webdav\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00msdos\x00exfat\x00SQLITE_FORCE_PROXY_LOCKING\x00:auto:\x00fsync\x00/dev/urandom\x00sqliteplocks\x00/\x00dummy\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00break\x00path error (len %d)\x00read error (len %d)\x00create failed (%d)\x00write failed (%d)\x00rename failed (%d)\x00broke stale lock on %s\n\x00failed to break stale lock on %s, %s\n\x00-conch\x00.lock\x00:auto: (not held)\x00unix\x00unix-none\x00unix-dotfile\x00unix-posix\x00unix-flock\x00unix-afp\x00unix-nfs\x00unix-proxy\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dylib\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00lock_proxy_file\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00failed to set lock proxy file\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-12.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00S\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00nfs\x00hfs\x00ufs\x00afpfs\x00smbfs\x00webdav\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00msdos\x00exfat\x00SQLITE_FORCE_PROXY_LOCKING\x00:auto:\x00fsync\x00/dev/urandom\x00sqliteplocks\x00/\x00dummy\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00break\x00path error (len %d)\x00read error (len %d)\x00create failed (%d)\x00write failed (%d)\x00rename failed (%d)\x00broke stale lock on %s\n\x00failed to break stale lock on %s, %s\n\x00-conch\x00.lock\x00:auto: (not held)\x00unix\x00unix-none\x00unix-dotfile\x00unix-posix\x00unix-flock\x00unix-afp\x00unix-nfs\x00unix-proxy\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dylib\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00lock_proxy_file\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00failed to set lock proxy file\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_darwin_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_darwin_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1 -DSQLITE_WITHOUT_ZONEMALLOC', DO NOT EDIT. package sqlite3 @@ -1277,11 +1277,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NFSV2_MAX_FH_SIZE = 32 NFSV3_MAX_FH_SIZE = 64 @@ -2620,7 +2620,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2728,8 +2728,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -7273,7 +7273,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -7915,17 +7916,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -8146,14 +8148,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -8960,7 +8962,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -18319,7 +18321,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -18864,7 +18866,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -19631,7 +19633,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -19648,14 +19650,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3661, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3661, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -19675,7 +19677,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -19743,7 +19745,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -19977,7 +19979,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -20005,7 +20007,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -20104,7 +20106,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -20234,7 +20236,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -20280,7 +20282,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -20519,7 +20521,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -20746,7 +20748,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -20763,7 +20765,7 @@ rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { Xsqlite3_free(tls, (*UnixFile)(unsafe.Pointer(pNew)).FlockingContext) - robust_close(tls, pNew, h, 42698) + robust_close(tls, pNew, h, 42702) h = -1 } unixLeaveMutex(tls) @@ -20785,7 +20787,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -21105,7 +21107,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -21149,7 +21151,7 @@ goto __26 } storeLastErrno(tls, p, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - robust_close(tls, p, fd, 43252) + robust_close(tls, p, fd, 43256) return SQLITE_IOERR | int32(13)<<8 __26: ; @@ -21257,7 +21259,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -21265,9 +21267,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3860, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3860, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -21331,18 +21333,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&S_IFMT == S_IFLNK { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+144, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 144 + uintptr(got))) = int8(0) @@ -21382,14 +21384,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -21464,7 +21466,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -21678,7 +21680,7 @@ __12: return SQLITE_IOERR | int32(15)<<8 __13: - return Xsqlite3CantopenError(tls, 44048) + return Xsqlite3CantopenError(tls, 44052) __10: ; __8: @@ -21709,7 +21711,7 @@ __15: ; end_create_proxy: - robust_close(tls, pNew, fd, 44072) + robust_close(tls, pNew, fd, 44076) Xsqlite3_free(tls, pNew) Xsqlite3_free(tls, pUnused) return rc @@ -21798,7 +21800,7 @@ ; rc = 0 libc.Xfprintf(tls, libc.X__stderrp, ts+4066, libc.VaList(bp+40, cPath)) - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(conchFile)).Fh, 44175) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(conchFile)).Fh, 44179) (*UnixFile)(unsafe.Pointer(conchFile)).Fh = fd (*UnixFile)(unsafe.Pointer(conchFile)).FopenFlags = O_RDWR | O_CREAT @@ -21810,7 +21812,7 @@ goto __7 } (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 16*24 + 8)))(tls, bp+64) - robust_close(tls, pFile, fd, 44183) + robust_close(tls, pFile, fd, 44187) __7: ; libc.Xfprintf(tls, libc.X__stderrp, ts+4090, libc.VaList(bp+48, cPath, bp+1088)) @@ -22090,7 +22092,7 @@ if !((*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0) { goto __29 } - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 44436) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 44440) __29: ; (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 @@ -22102,7 +22104,7 @@ (*UnixFile)(unsafe.Pointer(pFile)).Fh = fd goto __31 __30: - rc = Xsqlite3CantopenError(tls, 44444) + rc = Xsqlite3CantopenError(tls, 44448) __31: ; @@ -23887,7 +23889,7 @@ libc.X__builtin___memset_chk(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32), libc.X__builtin_object_size(tls, pPgHdr+32, 0)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.X__builtin___memset_chk(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8), libc.X__builtin_object_size(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -23917,7 +23919,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -23968,7 +23970,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -24072,8 +24074,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -24151,13 +24153,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -26450,7 +26452,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -26883,7 +26885,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -27035,9 +27037,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -27369,7 +27371,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -27519,7 +27521,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -27900,7 +27902,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -28006,7 +28008,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -28024,7 +28026,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -28063,7 +28065,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -28140,7 +28142,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -28898,7 +28900,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -29141,9 +29143,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -29899,7 +29901,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -29998,7 +30000,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -30584,7 +30586,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -30859,7 +30861,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -31332,7 +31334,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -31837,7 +31839,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -32495,7 +32497,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -32632,7 +32634,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -32649,7 +32651,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -32657,7 +32659,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -32700,7 +32702,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -32710,7 +32712,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -32960,7 +32962,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -33007,7 +33009,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -33017,7 +33019,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -33030,7 +33032,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -33039,14 +33041,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.X__builtin___memmove_chk(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz)), libc.X__builtin_object_size(tls, data+uintptr(iFree+sz+sz2), 0)) @@ -33056,7 +33058,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -33120,7 +33122,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -33130,7 +33132,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -33152,7 +33154,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -33187,7 +33189,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -33200,13 +33202,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -33231,7 +33233,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -33242,7 +33244,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -33294,22 +33296,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -33319,7 +33321,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -33327,7 +33329,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -33335,10 +33337,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -33398,7 +33400,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -33434,7 +33436,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -33464,11 +33466,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -33479,15 +33481,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -33515,14 +33517,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -33536,7 +33538,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -33548,7 +33550,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -33651,7 +33653,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -33679,7 +33681,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -33718,7 +33720,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -34601,7 +34603,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -35016,7 +35018,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -35042,7 +35044,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -35051,7 +35053,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -35062,7 +35064,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -35078,7 +35080,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -35139,7 +35141,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -35174,7 +35176,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -35234,7 +35236,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -35273,7 +35275,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -35304,7 +35306,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -35645,7 +35647,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -35889,14 +35891,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -35941,7 +35943,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -35990,7 +35992,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -36070,7 +36072,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -36161,7 +36163,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -36181,7 +36183,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -36391,7 +36393,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -36595,7 +36597,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -36660,7 +36662,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -36708,7 +36710,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -36827,7 +36829,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -36987,7 +36989,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -37052,7 +37054,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -37088,7 +37090,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -37132,7 +37134,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -37244,7 +37246,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -37402,7 +37404,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -37459,7 +37461,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -37475,7 +37477,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -37549,7 +37551,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -37561,7 +37563,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -37572,7 +37574,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -37737,7 +37739,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -38016,12 +38018,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -38029,7 +38031,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.X__builtin___memmove_chk(tls, pData, pCell, uint64(sz), libc.X__builtin_object_size(tls, pData, 0)) @@ -38089,7 +38091,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.X__builtin___memmove_chk(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz), libc.X__builtin_object_size(tls, pSlot, 0)) @@ -38178,7 +38180,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.X__builtin___memmove_chk(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2), libc.X__builtin_object_size(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, 0)) @@ -38294,7 +38296,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -38614,7 +38616,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -38625,7 +38627,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -38783,7 +38785,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -38857,7 +38859,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -38920,7 +38922,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -38948,7 +38950,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -39209,7 +39211,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -39399,7 +39401,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -39437,7 +39439,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -39543,7 +39545,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -39568,7 +39570,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -39638,7 +39640,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -39751,7 +39753,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -39811,6 +39813,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -39818,7 +39821,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -39856,13 +39859,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.X__builtin___memcpy_chk(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124))), libc.X__builtin_object_size(tls, oldCell, 0)) @@ -39893,7 +39896,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -39968,7 +39970,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -39993,7 +39995,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -40094,7 +40096,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -40102,11 +40104,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -40181,7 +40183,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -40250,7 +40252,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -40279,7 +40281,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -40355,7 +40357,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -40369,7 +40371,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -40503,7 +40505,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -42972,7 +42974,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -43621,7 +43623,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -43636,14 +43638,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -45947,7 +45949,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -46498,7 +46500,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -46563,7 +46565,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -46597,7 +46599,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -46647,7 +46649,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -46793,7 +46795,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -46964,7 +46966,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -46990,7 +46992,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -47264,7 +47266,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -47879,7 +47881,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -48399,7 +48401,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -48407,7 +48409,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5760, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -48812,7 +48814,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -48956,7 +48958,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -49400,10 +49402,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -52052,7 +52050,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -53812,7 +53810,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -54590,7 +54588,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+6269) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+6269) goto abort_due_to_error __770: ; @@ -54700,7 +54698,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -54894,7 +54892,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -56261,7 +56259,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -56781,7 +56779,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56864,7 +56862,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -60304,14 +60302,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7167 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7172 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -60355,7 +60349,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7178, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+7167, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -60419,7 +60413,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7195, libc.VaList(bp, pExpr)) + ts+7184, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -60435,7 +60429,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+7259, + Xsqlite3ErrorMsg(tls, pParse, ts+7248, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -60449,7 +60443,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7295, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+7284, uintptr(0), pExpr) } } else { @@ -60472,30 +60466,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7323, libc.VaList(bp+16, pExpr)) + ts+7312, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 7366 + zType = ts + 7355 } else { - zType = ts + 7373 + zType = ts + 7362 } - Xsqlite3ErrorMsg(tls, pParse, ts+7383, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7372, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7411, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7400, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7433, + Xsqlite3ErrorMsg(tls, pParse, ts+7422, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7477, + ts+7466, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -60567,15 +60561,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7525, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7514, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -60583,7 +60577,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7536, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7525, pExpr, pExpr) } break @@ -60714,7 +60708,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7547, libc.VaList(bp, i, zType, mx)) + ts+7536, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -60734,7 +60728,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7603, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7592, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -60769,7 +60763,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7637, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7626, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -60826,7 +60820,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7643, libc.VaList(bp, i+1)) + ts+7632, libc.VaList(bp, i+1)) return 1 } } @@ -60854,7 +60848,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7704, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7693, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -61068,7 +61062,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7735, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7724, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -61108,7 +61102,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7637) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7626) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -61119,7 +61113,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7774) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7763) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -61131,7 +61125,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7780, 0) + ts+7769, 0) return WRC_Abort } @@ -61995,7 +61989,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7839, libc.VaList(bp, mxHeight)) + ts+7828, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -62244,10 +62238,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7887, + Xsqlite3ErrorMsg(tls, pParse, ts+7876, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7931 + return ts + 7920 } return ts + 1547 }(), nElem)) @@ -62288,7 +62282,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -62314,7 +62308,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7935, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7924, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -62342,7 +62336,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7969, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7958, libc.VaList(bp, pExpr)) } } } @@ -62389,7 +62383,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7989, + Xsqlite3ErrorMsg(tls, pParse, ts+7978, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -62414,7 +62408,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8032, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8021, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -62989,7 +62983,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+8055, + Xsqlite3ErrorMsg(tls, pParse, ts+8044, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -63112,7 +63106,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+8085, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+8074, libc.VaList(bp, zObject)) } } @@ -63168,10 +63162,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+7167) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+8097) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+7172) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+8102) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -64246,7 +64240,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -64684,6 +64678,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -64697,6 +64692,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -64915,6 +64913,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -64928,6 +64927,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -65701,7 +65708,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+8055, + Xsqlite3ErrorMsg(tls, pParse, ts+8044, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -65723,11 +65730,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -65798,13 +65804,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -65817,15 +65829,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -65835,22 +65847,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -65859,21 +65871,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -65883,27 +65895,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8485, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -65912,7 +65924,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -68583,7 +68595,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+11325, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -68600,7 +68612,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -69520,7 +69532,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -73884,6 +73896,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -75048,7 +75066,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7536, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7525, 10) == 0 { return 0 } return 1 @@ -76294,7 +76312,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14543, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -78339,7 +78357,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -84804,7 +84822,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -86206,7 +86224,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17935 } else { - zType = ts + 7931 + zType = ts + 7920 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17937, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -86373,6 +86391,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -87730,7 +87749,7 @@ goto __223 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7931, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7920, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __222 __222: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -87746,7 +87765,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __226 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7931, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7920, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __225 __225: i6++ @@ -88551,80 +88570,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __351 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+18381) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+18417) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__351: + ; label6 = 0 kk = 0 -__351: +__352: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __353 + goto __354 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __354 + goto __355 } - goto __352 -__354: + goto __353 +__355: ; if !(label6 == 0) { - goto __355 + goto __356 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__355: +__356: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 624))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __352 -__352: - kk++ - goto __351 goto __353 __353: + kk++ + goto __352 + goto __354 +__354: ; if !(label6 != 0) { - goto __356 + goto __357 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+18355) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+18381) + Xsqlite3VdbeLoadString(tls, v, 4, ts+18428) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__356: +__357: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __357 + goto __358 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__358: +__359: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __360 + goto __361 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __361 + goto __362 } - goto __359 -__361: + goto __360 +__362: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __359 -__359: - kk++ - goto __358 goto __360 __360: + kk++ + goto __359 + goto __361 +__361: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 624))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 624))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+18408) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18455) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__357: +__358: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 640))) @@ -88641,20 +88674,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 620)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __362 + goto __363 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+18435) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18482) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__363: +__364: if !(pIdx5 != 0) { - goto __365 + goto __366 } if !(pPk1 == pIdx5) { - goto __366 + goto __367 } - goto __364 -__366: + goto __365 +__367: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -88663,21 +88696,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __364 -__364: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __363 goto __365 __365: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __364 + goto __366 +__366: ; if !(pPk1 != 0) { - goto __367 + goto __368 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__367: +__368: ; -__362: +__363: ; goto __297 __297: @@ -88695,14 +88728,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __368 + goto __369 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 616)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18464 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18511 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__368: +__369: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -88710,27 +88743,27 @@ __46: if !!(zRight != 0) { - goto __369 + goto __370 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __371 + goto __372 } goto pragma_out -__371: +__372: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __370 -__369: + goto __371 +__370: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __372 + goto __373 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__373: +__374: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __375 + goto __376 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __376 + goto __377 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -88739,25 +88772,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __375 -__376: + goto __376 +__377: ; - goto __374 -__374: - pEnc += 16 - goto __373 goto __375 __375: + pEnc += 16 + goto __374 + goto __376 +__376: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __377 + goto __378 } - Xsqlite3ErrorMsg(tls, pParse, ts+18467, libc.VaList(bp+456, zRight)) -__377: + Xsqlite3ErrorMsg(tls, pParse, ts+18514, libc.VaList(bp+456, zRight)) +__378: ; -__372: +__373: ; -__370: +__371: ; goto __15 @@ -88765,15 +88798,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __378 + goto __379 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __380 + goto __381 } goto __15 -__380: +__381: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -88781,41 +88814,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __381 + goto __382 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__381: +__382: ; - goto __379 -__378: + goto __380 +__379: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __382 + goto __383 } goto __15 -__382: +__383: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__379: +__380: ; goto __15 __48: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__383: +__384: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __384 + goto __385 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __383 -__384: + goto __384 +__385: ; Xsqlite3VdbeReusable(tls, v) @@ -88830,31 +88863,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __385 + goto __386 } if !(Xsqlite3StrICmp(tls, zRight, ts+17767) == 0) { - goto __386 + goto __387 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __387 -__386: - if !(Xsqlite3StrICmp(tls, zRight, ts+18492) == 0) { - goto __388 + goto __388 +__387: + if !(Xsqlite3StrICmp(tls, zRight, ts+18539) == 0) { + goto __389 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __389 -__388: + goto __390 +__389: if !(Xsqlite3StrICmp(tls, zRight, ts+17920) == 0) { - goto __390 + goto __391 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__390: +__391: ; -__389: +__390: ; -__387: +__388: ; -__385: +__386: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -88864,10 +88897,10 @@ __50: if !(zRight != 0) { - goto __391 + goto __392 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__391: +__392: ; returnSingleInt(tls, v, func() int64 { @@ -88887,19 +88920,19 @@ __52: if !(zRight != 0) { - goto __392 + goto __393 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __394 + goto __395 } goto __15 -__394: +__395: ; - goto __393 -__392: - opMask = U32(0xfffe) + goto __394 __393: + opMask = U32(0xfffe) +__394: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -88908,86 +88941,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__395: +__396: if !(iDb <= iDbLast) { - goto __397 + goto __398 } if !(iDb == 1) { - goto __398 + goto __399 } - goto __396 -__398: + goto __397 +__399: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__399: +__400: if !(k4 != 0) { - goto __401 + goto __402 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __402 + goto __403 } - goto __400 -__402: + goto __401 +__403: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__403: +__404: if !(pIdx6 != 0) { - goto __405 + goto __406 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __406 + goto __407 } szThreshold = int16(0) - goto __405 -__406: + goto __406 +__407: ; - goto __404 -__404: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __403 goto __405 __405: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __404 + goto __406 +__406: ; if !(szThreshold != 0) { - goto __407 + goto __408 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__407: +__408: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18500, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18547, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __408 + goto __409 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __409 -__408: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __410 __409: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__410: ; - goto __400 -__400: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __399 goto __401 __401: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __400 + goto __402 +__402: ; - goto __396 -__396: - iDb++ - goto __395 goto __397 __397: + iDb++ + goto __396 + goto __398 +__398: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -88995,36 +89028,36 @@ __53: ; if !(zRight != 0) { - goto __410 + goto __411 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__410: +__411: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __411 + goto __412 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__411: +__412: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __55: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK) { - goto __412 + goto __413 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)))) { - goto __413 + goto __414 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 656))) -__413: +__414: ; -__412: +__413: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -89033,10 +89066,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __414 + goto __415 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664))&int64(0x7fffffff))) -__414: +__415: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -89045,10 +89078,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+672) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) >= int64(0)) { - goto __415 + goto __416 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) & int64(0x7fffffff)) -__415: +__416: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -89056,10 +89089,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __416 + goto __417 } -__416: +__417: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -89111,14 +89144,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18518, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18523, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18529, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18538, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18547, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18555, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18563}, - {FzName: ts + 18570}, + {FzName: ts + 18565, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18570, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18576, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18585, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18594, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18602, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18610}, + {FzName: ts + 18617}, {}, } var setCookie = [2]VdbeOpList{ @@ -89170,7 +89203,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18576) + Xsqlite3_str_appendall(tls, bp+32, ts+18623) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -89178,7 +89211,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18591, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18638, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -89191,16 +89224,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18598, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18645, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18604) + Xsqlite3_str_appendall(tls, bp+32, ts+18651) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18616) + Xsqlite3_str_appendall(tls, bp+32, ts+18663) j++ } Xsqlite3_str_append(tls, bp+32, ts+5360, 1) @@ -89383,13 +89416,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18631) + Xsqlite3_str_appendall(tls, bp+32, ts+18678) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18639, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18686, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18643, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18690, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -89466,12 +89499,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18647, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18694, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -89480,19 +89513,19 @@ } else { zObj = ts + 5411 } - z = Xsqlite3MPrintf(tls, db, ts+18675, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18722, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18706, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18753, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18714, - ts + 18721, - ts + 18733, + ts + 18761, + ts + 18768, + ts + 18780, } // Check to see if any sibling index (another index on the same table) @@ -89584,7 +89617,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18744) + corruptSchema(tls, pData, argv, ts+18791) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -89632,7 +89665,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 8341 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18757 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18804 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -89761,7 +89794,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18829) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18876) rc = SQLITE_ERROR goto initone_error_out __19: @@ -89775,7 +89808,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18853, + ts+18900, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -90107,7 +90140,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18887, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18934, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -90137,7 +90170,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18917, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18964, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -90233,7 +90266,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -90332,7 +90365,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -90659,13 +90692,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18936, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18983, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18966)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19013)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -90840,7 +90873,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19000, libc.VaList(bp, 0)) + ts+19047, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -90885,7 +90918,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19050, libc.VaList(bp+8, zName)) + ts+19097, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -90896,7 +90929,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19114, + Xsqlite3ErrorMsg(tls, pParse, ts+19161, libc.VaList(bp+16, zName)) break } @@ -91524,16 +91557,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 19151 + z = ts + 19198 break case TK_INTERSECT: - z = ts + 19161 + z = ts + 19208 break case TK_EXCEPT: - z = ts + 19171 + z = ts + 19218 break default: - z = ts + 19178 + z = ts + 19225 break } return z @@ -91543,7 +91576,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19184, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19231, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -91569,9 +91602,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19207, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19254, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 19238 + return ts + 19285 } return ts + 1547 }())) @@ -91915,7 +91948,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+19253, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+19300, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -92015,7 +92048,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+19253, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+19300, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -92031,7 +92064,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+19262, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+19309, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -92114,8 +92147,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -92130,12 +92161,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 19270 + zType = ts + 19317 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -92351,7 +92385,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+19274, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19321, 0) return __1: ; @@ -92442,7 +92476,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19323, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19370, 0) goto end_of_recursive_query __15: ; @@ -92462,7 +92496,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19365, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19412, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -92499,7 +92533,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19371, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19418, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -92533,7 +92567,7 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19386, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19433, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } @@ -92638,8 +92672,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19409, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19424, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19456, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19471, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -92686,7 +92720,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -92753,7 +92787,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19443, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19490, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -92815,7 +92849,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19443, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19490, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -92968,10 +93002,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19464, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19511, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19510, + ts+19557, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -93225,8 +93259,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7637) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7637) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7626) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7626) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -93253,13 +93287,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19592, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19639, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19603, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19650, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -93271,7 +93305,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19608, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19655, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -93459,7 +93493,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.X__builtin___memset_chk(tls, bp, 0, uint64(unsafe.Sizeof(Expr{})), libc.X__builtin_object_size(tls, bp, 0)) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -94358,7 +94393,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19614, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19661, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -94441,7 +94476,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19632, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19679, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -94570,7 +94605,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19655, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19702, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -94593,7 +94628,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19675, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19722, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -94609,7 +94644,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19718 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19765 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -94635,7 +94670,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19741, + Xsqlite3ErrorMsg(tls, pParse, ts+19788, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -94646,9 +94681,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19779 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19826 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19813 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19860 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -94695,7 +94730,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19851, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19898, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -94807,7 +94842,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19855, + Xsqlite3ErrorMsg(tls, pParse, ts+19902, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -94826,7 +94861,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19894, + Xsqlite3ErrorMsg(tls, pParse, ts+19941, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -94950,7 +94985,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19925, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19972, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -95015,7 +95050,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19930, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19977, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -95046,9 +95081,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19939, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19957, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20004, 0) } } } @@ -95058,7 +95093,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19977, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20024, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -95196,7 +95231,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -95280,13 +95315,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+20008, 0) + ts+20055, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20059, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20106, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -95475,11 +95510,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20092, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20139, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 20104 + return ts + 20151 } return ts + 1547 }(), @@ -95807,7 +95842,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+20127, + ts+20174, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -95868,7 +95903,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+20181, + Xsqlite3ErrorMsg(tls, pParse, ts+20228, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -96010,7 +96045,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20221, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20268, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -96069,7 +96104,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20236, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20283, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -96540,9 +96575,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 20252 + return ts + 20299 } - return ts + 20261 + return ts + 20308 }()) groupBySort = 1 @@ -96893,7 +96928,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+20252) + explainTempTable(tls, pParse, ts+20299) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -96998,7 +97033,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+20270, 0) + ts+20317, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -97231,7 +97266,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+20335, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20382, 0) goto trigger_cleanup __3: ; @@ -97275,7 +97310,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+20381, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+20428, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -97293,7 +97328,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+20389, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) goto trigger_orphan_error __11: ; @@ -97305,7 +97340,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+20381, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+20428, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -97320,11 +97355,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+20430, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20477, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -97335,19 +97371,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6784, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20456, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20503, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20494, + Xsqlite3ErrorMsg(tls, pParse, ts+20541, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20531 + return ts + 20578 } - return ts + 20538 + return ts + 20585 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -97356,7 +97392,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20544, libc.VaList(bp+24, pTableName+8)) + ts+20591, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -97505,7 +97541,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+20381, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+20428, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -97538,7 +97574,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20590, + ts+20637, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -97563,13 +97599,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20638, + ts+20685, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20713, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20760, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -97825,7 +97861,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20742, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20789, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -97878,7 +97914,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20762, + ts+20809, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -97992,12 +98028,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20824, + ts+20871, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20872 + return ts + 20919 } - return ts + 20879 + return ts + 20926 }())) __15: ; @@ -98111,7 +98147,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20886, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20933, 0) return 1 } @@ -98177,7 +98213,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.X__builtin___memset_chk(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{})), libc.X__builtin_object_size(tls, bp+240, 0)) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -98341,7 +98377,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20928, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20975, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -98934,7 +98970,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20942, + ts+20989, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -98966,7 +99002,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20978, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21025, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -99292,7 +99328,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -99846,7 +99887,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20997) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21044) __169: ; update_cleanup: @@ -100152,10 +100193,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21010, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21057, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+21014, libc.VaList(bp+8, bp+216)) + ts+21061, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -100278,7 +100319,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+21087, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+21091, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+21134, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+21138, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -100426,14 +100467,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21095) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21142) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21135) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21182) return SQLITE_ERROR __2: ; @@ -100444,7 +100485,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+21178) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21225) return SQLITE_ERROR __5: ; @@ -100472,7 +100513,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+21196, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+21243, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -100492,7 +100533,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+21219) + Xsqlite3SetString(tls, pzErrMsg, db, ts+21266) goto end_of_vacuum __8: ; @@ -100552,7 +100593,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+21246, + ts+21293, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -100561,7 +100602,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21354, + ts+21401, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -100572,7 +100613,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+21408, + ts+21455, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -100583,7 +100624,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21559, + ts+21606, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -101012,11 +101053,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21689, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21736, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21713, + ts+21760, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -101026,7 +101067,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21812, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21859, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -101087,7 +101128,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21831, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21878, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -101115,9 +101156,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -101125,7 +101168,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21873, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21920, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3658, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -101137,7 +101180,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21903 + var zFormat uintptr = ts + 21950 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -101211,7 +101254,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21949, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21996, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -101269,7 +101312,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21949, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21996, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -101303,7 +101346,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -101756,7 +101799,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -101783,7 +101826,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -102014,7 +102057,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21968 + return ts + 22015 } if i == -1 { return ts + 16673 @@ -102026,11 +102069,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21975, 5) + Xsqlite3_str_append(tls, pStr, ts+22022, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21981, 1) + Xsqlite3_str_append(tls, pStr, ts+22028, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -102045,7 +102088,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21981, 1) + Xsqlite3_str_append(tls, pStr, ts+22028, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -102071,27 +102114,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21983, 2) + Xsqlite3_str_append(tls, pStr, ts+22030, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21975, 5) + Xsqlite3_str_append(tls, pStr, ts+22022, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21986 + return ts + 22033 } - return ts + 21991 + return ts + 22038 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21999) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22046) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22001) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22048) } Xsqlite3_str_append(tls, pStr, ts+5360, 1) } @@ -102134,11 +102177,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+22003, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+22050, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 22009 + return ts + 22056 } - return ts + 22016 + return ts + 22063 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -102151,40 +102194,40 @@ zFmt = ts + 11379 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 22021 + zFmt = ts + 22068 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 22054 + zFmt = ts + 22101 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 22079 + zFmt = ts + 22126 } else { - zFmt = ts + 22097 + zFmt = ts + 22144 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+22106, 7) + Xsqlite3_str_append(tls, bp+64, ts+22153, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16673 - Xsqlite3_str_appendf(tls, bp+64, ts+22114, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+22161, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+22145, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+22192, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+22155, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+22202, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+22160, + Xsqlite3_str_appendf(tls, bp+64, ts+22207, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+22187, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+22234, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -102216,22 +102259,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+22198, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+22245, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21986, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+22033, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+22219, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+22266, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21975, 5) + Xsqlite3_str_append(tls, bp+24, ts+22022, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21986, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+22033, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+5360, 1) @@ -103828,7 +103871,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22227, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22274, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -103856,7 +103899,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22242, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22289, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -104374,7 +104417,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22251, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+22298, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -104734,7 +104777,7 @@ {FzOp: ts + 16522, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15850, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 15370, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 22265, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 22312, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -105224,12 +105267,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+22272, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22319, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22272, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22319, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -105308,7 +105351,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7172 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8102 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -105402,7 +105445,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 22313 + return ts + 22360 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -105778,7 +105821,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+22320, + Xsqlite3ErrorMsg(tls, pParse, ts+22367, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -105794,7 +105837,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -106512,7 +106555,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+22356, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+22403, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -106583,7 +106626,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 22382 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 22429 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -106757,6 +106800,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -106800,9 +106847,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -106836,6 +106881,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -107094,11 +107140,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -108679,7 +108730,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22393, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22440, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -108737,7 +108788,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22393, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22440, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -109135,7 +109186,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+22419, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22466, 0) rc = SQLITE_OK } else { goto __3 @@ -109742,7 +109793,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22454, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22501, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -109777,6 +109828,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -110071,6 +110126,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -110223,7 +110281,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22472, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22519, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -110286,7 +110344,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22500, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22547, 0) goto __5 __4: ii = 0 @@ -111168,7 +111226,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22518, -1) + pCtx, ts+22565, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -111301,7 +111359,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22574, -1) + pCtx, ts+22621, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -111391,17 +111449,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22619)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22630)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22641)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22646)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22659)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22669)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22675)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22686)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22696)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22708)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22713)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22666)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22677)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22688)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22693)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22706)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22716)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22722)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22733)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22743)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22755)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22760)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -111447,7 +111505,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22717, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22764, libc.VaList(bp, zName)) } return p } @@ -111491,12 +111549,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22736, 0) + ts+22783, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22807, 0) + ts+22854, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -111723,7 +111781,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22870, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22917, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -111839,7 +111897,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7933)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7922)) } pSub = Xsqlite3SelectNew(tls, @@ -111954,7 +112012,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22896, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22943, 0) goto windowAllocErr __2: ; @@ -112019,15 +112077,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22928 + zErr = ts + 22975 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22945 + zErr = ts + 22992 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22961 + zErr = ts + 23008 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22981, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+23028, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -112048,7 +112106,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+23014, 0) + ts+23061, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -112204,11 +112262,11 @@ } var azErr = [5]uintptr{ - ts + 23061, - ts + 23114, - ts + 22518, - ts + 23165, - ts + 23217, + ts + 23108, + ts + 23161, + ts + 22565, + ts + 23212, + ts + 23264, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -113603,19 +113661,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23267, + Xsqlite3ErrorMsg(tls, pParse, ts+23314, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 23309 + return ts + 23356 } - return ts + 23318 + return ts + 23365 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+23324, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23371, 0) } } @@ -113683,7 +113741,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23358, + Xsqlite3ErrorMsg(tls, pParse, ts+23405, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -114780,7 +114838,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+23396, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23443, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -115759,7 +115817,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+23418, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23465, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -115769,7 +115827,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+23418, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23465, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -116512,7 +116570,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23445) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23492) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -116676,7 +116734,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23454, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23501, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -116893,9 +116951,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 7167 + return ts + 8097 } - return ts + 7172 + return ts + 8102 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -117179,19 +117237,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23478, 0) + ts+23525, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23573, 0) + ts+23620, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23657, 0) + ts+23704, 0) } break case uint32(273): @@ -117570,9 +117628,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23454, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23501, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23742, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23789, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -118340,7 +118398,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23759, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23806, libc.VaList(bp, bp+2464)) break } } @@ -118363,7 +118421,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3658, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23784, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23831, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -118536,7 +118594,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23795, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23842, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -118549,11 +118607,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+20381, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+20428, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23802, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23849, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23807, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23854, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -118566,9 +118624,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23817, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23864, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23821, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23868, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -118802,7 +118860,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -119377,7 +119435,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -119392,7 +119450,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23829, 0) + ts+23876, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -119583,23 +119641,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23897 + var zErr uintptr = ts + 23944 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23911 + zErr = ts + 23958 break } case SQLITE_ROW: { - zErr = ts + 23933 + zErr = ts + 23980 break } case SQLITE_DONE: { - zErr = ts + 23955 + zErr = ts + 24002 break } @@ -119617,35 +119675,35 @@ } var aMsg = [29]uintptr{ - ts + 23978, - ts + 23991, + ts + 24025, + ts + 24038, uintptr(0), - ts + 24007, - ts + 24032, - ts + 24046, - ts + 24065, + ts + 24054, + ts + 24079, + ts + 24093, + ts + 24112, ts + 1483, - ts + 24090, - ts + 24127, - ts + 24139, - ts + 24154, - ts + 24187, - ts + 24205, - ts + 24230, - ts + 24259, + ts + 24137, + ts + 24174, + ts + 24186, + ts + 24201, + ts + 24234, + ts + 24252, + ts + 24277, + ts + 24306, uintptr(0), ts + 6241, ts + 5737, - ts + 24276, - ts + 24294, - ts + 24312, - uintptr(0), - ts + 24346, + ts + 24323, + ts + 24341, + ts + 24359, uintptr(0), - ts + 24367, ts + 24393, - ts + 24416, - ts + 24437, + uintptr(0), + ts + 24414, + ts + 24440, + ts + 24463, + ts + 24484, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -119766,7 +119824,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -119811,7 +119869,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24453, 0) + ts+24500, 0) return SQLITE_BUSY } else { @@ -119928,7 +119986,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24516, libc.VaList(bp, zName)) + ts+24563, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -120164,7 +120222,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24567, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24614, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -120257,7 +120315,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -120327,7 +120385,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -120337,7 +120395,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -120369,14 +120427,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24588, 0) + ts+24635, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -120506,7 +120564,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24656, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24703, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -120551,10 +120609,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24662, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24709, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24672, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24719, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -120659,7 +120717,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24700, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24747, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -120670,17 +120728,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24704, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24751, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24704 + zModeType = ts + 24751 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24710, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24757, zOpt, uint64(4)) == 0) { goto __32 } @@ -120718,7 +120776,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24715, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24762, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -120726,7 +120784,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24735, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24782, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -120766,7 +120824,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24759, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24806, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -120790,14 +120848,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24775, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24782, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24822, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24829, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24790, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24793, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24796, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24837, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24840, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24843, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17784, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -120944,10 +121002,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+22313, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+22360, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24800, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24847, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -120961,7 +121019,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -121014,7 +121072,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6844 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23802 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23849 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -121119,7 +121177,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24806 + zFilename = ts + 24853 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -121222,21 +121280,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24809, + Xsqlite3_log(tls, iErr, ts+24856, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24834) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24881) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24854) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24901) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24861) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24908) } // This is a convenience routine that makes sure that all thread-specific @@ -121394,7 +121452,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24878, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24925, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -122050,7 +122108,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24906, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24953, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -122168,7 +122226,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24914 + return ts + 24961 } return uintptr(0) }(), 0) @@ -122346,7 +122404,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6584, ts + 7167, ts + 7172, ts + 6594, ts + 6589, ts + 8408, ts + 24937, ts + 24943, + ts + 6584, ts + 8097, ts + 8102, ts + 6594, ts + 6589, ts + 8408, ts + 24984, ts + 24990, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -122499,7 +122557,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24950 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24997 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -122554,7 +122612,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24967, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25014, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -122618,13 +122676,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+7167, uint32(4)) + jsonAppendRaw(tls, pOut, ts+8097, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+7172, uint32(5)) + jsonAppendRaw(tls, pOut, ts+8102, uint32(5)) break } @@ -123174,12 +123232,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+7167, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+8097, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+7172, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+8102, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -123280,7 +123338,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24996, -1) + Xsqlite3_result_error(tls, pCtx, ts+25043, -1) } } jsonParseReset(tls, pParse) @@ -123586,7 +123644,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+25011, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+25058, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -123601,7 +123659,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+25015, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+25062, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -123655,7 +123713,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25041, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -123760,11 +123818,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+25084, uint32(2)) + jsonAppendRaw(tls, bp, ts+25131, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+5401, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+25087, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+25134, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -123921,14 +123979,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+25090, -1) + ts+25137, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+25141, -1) + Xsqlite3_result_error(tls, ctx, ts+25188, -1) jsonReset(tls, bp) return } @@ -124098,9 +124156,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 25175 + return ts + 25222 } - return ts + 25179 + return ts + 25226 }()) return __2: @@ -124233,7 +124291,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+25186, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+25233, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -124330,7 +124388,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+25189, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+25236, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -124374,7 +124432,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+25192) + ts+25239) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -124505,7 +124563,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+25275, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+25322, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -124524,7 +124582,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+25281, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+25328, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -124620,7 +124678,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+25281, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+25328, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -124644,7 +124702,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 25286 + zRoot = ts + 25333 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -124766,7 +124824,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24996, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25043, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -124861,25 +124919,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25288}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25293}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25304}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25304}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25322}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 25335}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 25338}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25342}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25354}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25366}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25377}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25388}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25400}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25413}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25422}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25422}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25432}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25443}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25460}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25335}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25340}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25351}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25351}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25369}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 25382}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 25385}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25389}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25401}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25413}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25424}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25435}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25447}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25460}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25469}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25469}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25479}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25490}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25507}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -124898,8 +124956,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25478, FpModule: 0}, - {FzName: ts + 25488, FpModule: 0}, + {FzName: ts + 25525, FpModule: 0}, + {FzName: ts + 25535, FpModule: 0}, } type Rtree1 = struct { @@ -125159,11 +125217,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25498, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25545, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25506, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25553, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -125374,7 +125432,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25511, + ts+25558, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -126077,7 +126135,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25593) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25640) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -127418,7 +127476,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25607, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25654, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -127430,12 +127488,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25627, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25674, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25659, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25706, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -127661,7 +127719,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25696, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25743, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -127684,7 +127742,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25841 + var zFmt uintptr = ts + 25888 var zSql uintptr var rc int32 @@ -127732,7 +127790,7 @@ } var azName1 = [3]uintptr{ - ts + 25897, ts + 5463, ts + 16673, + ts + 25944, ts + 5463, ts + 16673, } var rtreeModule = Sqlite3_module{ @@ -127775,19 +127833,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25902, + ts+25949, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25964, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+26011, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25969, + ts+26016, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26033, + ts+26080, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26103, + ts+26150, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -127816,7 +127874,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 26152 + zFormat = ts + 26199 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -127828,7 +127886,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+26260, + ts+26307, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -127836,18 +127894,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+26305, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+26352, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+13170, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+26332, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+26379, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+26354, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+26401, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+26362, 0) + Xsqlite3_str_appendf(tls, p, ts+26409, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -127862,14 +127920,14 @@ } var azSql = [8]uintptr{ - ts + 26378, - ts + 26431, - ts + 26476, - ts + 26528, - ts + 26582, - ts + 26627, - ts + 26685, - ts + 26740, + ts + 26425, + ts + 26478, + ts + 26523, + ts + 26575, + ts + 26629, + ts + 26674, + ts + 26732, + ts + 26787, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -127898,7 +127956,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26787, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26834, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -127910,7 +127968,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26807, + ts+26854, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -127918,7 +127976,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26864, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26911, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -127960,10 +128018,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26899, - ts + 26942, - ts + 26977, - ts + 27013, + ts + 26946, + ts + 26989, + ts + 27024, + ts + 27060, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -127994,7 +128052,7 @@ libc.X__builtin___memcpy_chk(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName), libc.X__builtin_object_size(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, 0)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27050, + Xsqlite3_str_appendf(tls, pSql, ts+27097, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -128006,7 +128064,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27074, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+27121, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -128029,7 +128087,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+27080, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27127, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -128125,7 +128183,7 @@ return rc } -var azFormat = [2]uintptr{ts + 27083, ts + 27094} +var azFormat = [2]uintptr{ts + 27130, ts + 27141} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -128165,11 +128223,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+11323, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+27104, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+27151, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+27110, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+27157, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+27114, 1) + Xsqlite3_str_append(tls, pOut, ts+27161, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -128180,7 +128238,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+27116, -1) + Xsqlite3_result_error(tls, ctx, ts+27163, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -128258,7 +128316,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+27149, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+27196, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4465 @@ -128282,7 +128340,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+27156, + ts+27203, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -128301,7 +128359,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+27248, libc.VaList(bp+16, iNode)) } } @@ -128315,8 +128373,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 27233, - ts + 27287, + ts + 27280, + ts + 27334, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -128331,23 +128389,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+27335, + rtreeCheckAppendMsg(tls, pCheck, ts+27382, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 27380 + return ts + 27427 } - return ts + 27388 + return ts + 27435 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+27397, + ts+27444, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 27380 + return ts + 27427 } - return ts + 27388 + return ts + 27435 }(), iKey, iVal)) } } @@ -128371,7 +128429,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27455, libc.VaList(bp, i, iCell, iNode)) + ts+27502, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -128391,7 +128449,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27503, libc.VaList(bp+24, i, iCell, iNode)) + ts+27550, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -128408,14 +128466,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27570, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27617, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27604, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27651, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -128423,7 +128481,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27634, + ts+27681, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -128452,14 +128510,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27689, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27736, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27720, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27767, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -128486,7 +128544,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27787, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27834, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -128495,12 +128553,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25607, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25654, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27815, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27862, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -128514,8 +128572,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27846, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27853, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27893, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27900, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -128523,7 +128581,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27861, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27908, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -128538,7 +128596,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27865, -1) + ts+27912, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -128556,7 +128614,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18464 + return ts + 18511 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -128927,11 +128985,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27916, 1) + Xsqlite3_str_append(tls, x, ts+27963, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27918, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27965, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27929, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27976, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -128951,19 +129009,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27940, 0) + Xsqlite3_str_appendf(tls, x, ts+27987, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27958, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28005, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27966, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28013, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27974, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+28021, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27978, 0) + Xsqlite3_str_appendf(tls, x, ts+28025, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -129883,7 +129941,7 @@ libc.X__builtin___memcpy_chk(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName), libc.X__builtin_object_size(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, 0)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27991, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28038, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -129892,7 +129950,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28013, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+28060, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -129900,7 +129958,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+27080, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27127, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -130137,7 +130195,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28017 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28064 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -130145,7 +130203,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28023 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28070 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -130257,7 +130315,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28032, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28079, 0) __4: ; goto geopoly_update_end @@ -130389,14 +130447,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+28072) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+28119) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+28088) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+28135) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -130461,7 +130519,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+28103, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28150, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -130473,25 +130531,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28111}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28124}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28137}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 28150}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28088}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 28162}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28072}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 28185}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28199}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 28212}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 28226}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28242}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28158}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28171}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28184}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 28197}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28135}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 28209}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28119}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 28232}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28246}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 28259}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 28273}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 28289}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 28254}, + {FxStep: 0, FxFinal: 0, FzName: ts + 28301}, } // Register the r-tree module with database handle db. This creates the @@ -130501,26 +130559,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+28273, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28320, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+28283, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28330, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+28294, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+28341, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28017, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28064, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28305, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28352, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -130574,7 +130632,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25593, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25640, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -130901,7 +130959,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+28315, -1) + Xsqlite3_result_error(tls, context, ts+28362, -1) return } @@ -130912,7 +130970,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+28315, -1) + Xsqlite3_result_error(tls, context, ts+28362, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -131013,7 +131071,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+28336, uintptr(0), uintptr(0), p+64) + ts+28383, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -131077,7 +131135,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25506, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25553, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -131098,16 +131156,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28507, libc.VaList(bp, func() uintptr { + ts+28554, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28657 + return ts + 28704 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28698) + ts+28745) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -131223,7 +131281,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28823, libc.VaList(bp, zTab))) + ts+28870, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -131241,7 +131299,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28942, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28989, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -131259,7 +131317,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28963, libc.VaList(bp+16, zIdx))) + ts+29010, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -131282,7 +131340,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+29014, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+29061, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -131328,7 +131386,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.X__builtin___memcpy_chk(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol), libc.X__builtin_object_size(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+29035, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29082, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -131343,7 +131401,7 @@ libc.X__builtin___memset_chk(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol), libc.X__builtin_object_size(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -131383,7 +131441,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19939, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19986, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -131393,18 +131451,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29092, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+29139, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+29111, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+29158, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+29116, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+29163, zName) { bRbuRowid = 1 } } @@ -131416,18 +131474,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+29126, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+29173, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 29155 + return ts + 29202 } - return ts + 29168 + return ts + 29215 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29224, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -131441,7 +131499,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29199, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29246, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -131488,7 +131546,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29226, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+29273, libc.VaList(bp, zList, zSep, z)) zSep = ts + 15017 } return zList @@ -131506,7 +131564,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+29235, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+29282, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -131528,25 +131586,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+29248, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+29295, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+29280, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+29303) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+29309, ts+29316, ts+5360) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+29350) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+29356, ts+29363, ts+5360) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+29324, + ts+29371, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+29366, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -131588,7 +131646,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -131623,7 +131681,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 29386 + zCol = ts + 29433 __7: ; goto __5 @@ -131631,11 +131689,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+29394, + zLhs = rbuMPrintf(tls, p, ts+29441, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+29415, + zOrder = rbuMPrintf(tls, p, ts+29462, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29451, + zSelect = rbuMPrintf(tls, p, ts+29498, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 15017 iCol++ @@ -131655,7 +131713,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29478, + Xsqlite3_mprintf(tls, ts+29525, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -131682,7 +131740,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 15017 goto __15 __15: @@ -131694,7 +131752,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -131727,7 +131785,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -131739,7 +131797,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29545, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29592, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -131751,37 +131809,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 29386 + zCol = ts + 29433 } else { - zCol = ts + 29116 + zCol = ts + 29163 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29567, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29614, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 29303 + return ts + 29350 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29587, + zImpPK = Xsqlite3_mprintf(tls, ts+29634, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29608, + zImpCols = Xsqlite3_mprintf(tls, ts+29655, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29641, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29688, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 15017 - zAnd = ts + 21975 + zAnd = ts + 22022 nBind++ } @@ -131820,9 +131878,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29665, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29712, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29677, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29724, libc.VaList(bp+32, zList, zS)) } zS = ts + 15017 if zList == uintptr(0) { @@ -131832,7 +131890,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29686, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29733, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -131844,18 +131902,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29701, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29748, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29715, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21975 + zList = rbuMPrintf(tls, p, ts+29762, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 22022 } } zList = rbuMPrintf(tls, p, - ts+29727, libc.VaList(bp+40, zList)) + ts+29774, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -131863,8 +131921,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29777, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21975 + zList = rbuMPrintf(tls, p, ts+29824, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 22022 } } } @@ -131873,7 +131931,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29790, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29837, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -131891,15 +131949,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29777, + zList = rbuMPrintf(tls, p, ts+29824, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29816, + zList = rbuMPrintf(tls, p, ts+29863, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29846, + zList = rbuMPrintf(tls, p, ts+29893, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15017 } @@ -131936,19 +131994,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29883 + var zSep uintptr = ts + 29930 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+29035, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+29082, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16561) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+8, zIdx))) } break } @@ -131960,15 +132018,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 29303 + zDesc = ts + 29350 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29896, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29943, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 15017 } } - z = rbuMPrintf(tls, p, ts+29907, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29954, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -131988,7 +132046,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29911) + ts+29958) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -131997,7 +132055,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29063, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -132007,23 +132065,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29961, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+30008, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29983, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+30030, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 29303 + return ts + 29350 } return ts + 1547 }())) zComma = ts + 15017 } } - zCols = rbuMPrintf(tls, p, ts+29993, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+30040, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30008, + ts+30055, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 0)) } @@ -132049,13 +132107,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 30070 + zPk = ts + 30117 } - zSql = rbuMPrintf(tls, p, ts+30083, + zSql = rbuMPrintf(tls, p, ts+30130, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 30110 + return ts + 30157 } return ts + 1547 }())) @@ -132065,16 +132123,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+30120, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+30167, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+30127, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+30174, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 30159 + return ts + 30206 } return ts + 1547 }())) @@ -132091,7 +132149,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+30174, + ts+30221, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -132128,7 +132186,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+30231) + ts+30278) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -132233,7 +132291,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+30297, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+30344, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -132256,7 +132314,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30317, + ts+30364, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, 0, 0)) @@ -132264,13 +132322,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+30382, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+30429, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+30418, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30465, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -132286,7 +132344,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30452, + ts+30499, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -132294,9 +132352,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30513 + return ts + 30560 } - return ts + 30517 + return ts + 30564 }() } return ts + 1547 @@ -132305,20 +132363,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30523, + ts+30570, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30584, + ts+30631, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30513 + return ts + 30560 } - return ts + 30517 + return ts + 30564 }(), zCollist, zLimit)) } @@ -132355,16 +132413,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30743 + return ts + 30790 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30752, + ts+30799, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30788 + return ts + 30835 } return ts + 1547 }(), zBindings))) @@ -132373,32 +132431,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30798, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30845, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30826 + zRbuRowid = ts + 30873 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30838, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30885, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30914 + return ts + 30961 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30931, + ts+30978, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31230, + ts+31277, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -132411,9 +132469,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 31329 + zRbuRowid = ts + 31376 } else { - zRbuRowid = ts + 31339 + zRbuRowid = ts + 31386 } } @@ -132426,7 +132484,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+29386, 0) + zOrder = rbuMPrintf(tls, p, ts+29433, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+15017, ts+1547) } @@ -132435,11 +132493,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+31350, + ts+31397, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 31398 + return ts + 31445 } return ts + 1547 }(), @@ -132452,7 +132510,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 23309 + return ts + 23356 } return ts + 1547 }(), zOrder, @@ -132520,9 +132578,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30743 + zPrefix = ts + 30790 } - zUpdate = Xsqlite3_mprintf(tls, ts+31404, + zUpdate = Xsqlite3_mprintf(tls, ts+31451, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -132581,7 +132639,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+31434, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31481, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -132654,18 +132712,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31464, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31511, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31492, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31539, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.X__builtin___memcpy_chk(tls, p+48, ts+3279, uint64(4), libc.X__builtin_object_size(tls, p+48, 0)) } else { libc.X__builtin___memcpy_chk(tls, p+48, ts+6844, uint64(4), libc.X__builtin_object_size(tls, p+48, 0)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31510, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31557, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -132705,11 +132763,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31576, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31623, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24656, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24703, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -132721,13 +132779,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31608, + zTarget = Xsqlite3_mprintf(tls, ts+31655, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31640 + return ts + 31687 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -132746,21 +132804,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31642, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31689, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31657, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31704, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31674, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31721, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -132768,7 +132826,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31690, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31737, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_RBU, p) @@ -132776,7 +132834,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31718, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31765, 0) } } @@ -132805,14 +132863,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31690, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31737, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31736, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31783, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -132938,7 +132996,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31771, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31818, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -132953,8 +133011,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6844) } - zOal = Xsqlite3_mprintf(tls, ts+31796, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31803, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31843, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31850, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -133071,7 +133129,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+24294, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+24341, 0) return } @@ -133164,7 +133222,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31810) + ts+31857) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -133172,7 +133230,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31832, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31879, libc.VaList(bp, iCookie+1)) } } } @@ -133193,7 +133251,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31859, + ts+31906, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -133223,9 +133281,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+32017, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+32064, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32032, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32079, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -133239,10 +133297,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32052, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32099, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32077) + ts+32124) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -133256,12 +133314,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32185) + ts+32232) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+32250) + ts+32297) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -133273,7 +133331,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32294, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32341, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -133301,7 +133359,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32319, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+32366, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -133423,7 +133481,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32347, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32394, 0) } if rc == SQLITE_OK { @@ -133439,7 +133497,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31796, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31843, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6844, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -133456,7 +133514,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+32372, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+32419, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -133490,7 +133548,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+32383, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+32430, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -133520,13 +133578,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32455, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32502, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32469) + ts+32516) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -133537,7 +133595,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32526) + ts+32573) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -133611,7 +133669,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32600, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32647, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -133629,12 +133687,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32632, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32679, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32664 + return ts + 32711 } - return ts + 32671 + return ts + 32718 }())) } } @@ -133658,14 +133716,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32678, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32725, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6844, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32694, uintptr(0), uintptr(0), p+64) + db, ts+32741, uintptr(0), uintptr(0), p+64) } } @@ -133719,7 +133777,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32718, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32765, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -133746,7 +133804,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30743, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30790, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -133782,7 +133840,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32726, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32773, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -133901,12 +133959,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14906 } else { - zBegin = ts + 32678 + zBegin = ts + 32725 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32678, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32725, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134252,7 +134310,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32753, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32800, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -134277,7 +134335,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32776, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32823, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -134437,7 +134495,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32787, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32834, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -135266,7 +135324,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32798, 0) + ts+32845, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -135279,7 +135337,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32919, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32966, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -135959,9 +136017,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32948, + zRet = Xsqlite3_mprintf(tls, ts+32995, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21975 + zSep = ts + 22022 if zRet == uintptr(0) { break } @@ -135984,9 +136042,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32982, + ts+33029, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 33023 + zSep = ts + 33070 if zRet == uintptr(0) { break } @@ -135994,7 +136052,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7933, 0) + zRet = Xsqlite3_mprintf(tls, ts+7922, 0) } return zRet @@ -136005,7 +136063,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+33028, + ts+33075, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -136048,7 +136106,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+33106, + ts+33153, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -136175,7 +136233,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+33159, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+33206, 0) __16: ; rc = SQLITE_SCHEMA @@ -136651,7 +136709,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11751, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+33186, libc.VaList(bp, zDb)) + ts+33233, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -136660,18 +136718,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+33296, bp+24) + sessionAppendStr(tls, bp+8, ts+33343, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1560, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+33311, bp+24) + sessionAppendStr(tls, bp+8, ts+33358, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+33319, bp+24) + sessionAppendStr(tls, bp+8, ts+33366, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21975 + zSep = ts + 22022 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -136780,7 +136838,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+33325, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+33372, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -136872,7 +136930,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+33345, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -137135,7 +137193,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -137158,7 +137216,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -137200,7 +137258,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -137261,7 +137319,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -137335,13 +137393,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -137403,7 +137461,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -137776,7 +137834,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -137955,34 +138013,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.X__builtin___memcpy_chk(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0))), libc.X__builtin_object_size(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, 0)) - sessionAppendStr(tls, bp, ts+33363, bp+16) + sessionAppendStr(tls, bp, ts+33410, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+33376, bp+16) + sessionAppendStr(tls, bp, ts+33423, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33382, bp+16) + sessionAppendStr(tls, bp, ts+33429, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 15017 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+33311, bp+16) + sessionAppendStr(tls, bp, ts+33358, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+33387, bp+16) + ts+33434, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33319, bp+16) + sessionAppendStr(tls, bp, ts+33366, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21975 + zSep = ts + 22022 } } @@ -138034,34 +138092,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33462, bp+16) + sessionAppendStr(tls, bp, ts+33509, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+33311, bp+16) + sessionAppendStr(tls, bp, ts+33358, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33382, bp+16) + sessionAppendStr(tls, bp, ts+33429, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21975 + zSep = ts + 22022 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33480, bp+16) + sessionAppendStr(tls, bp, ts+33527, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+33023, bp+16) + sessionAppendStr(tls, bp, ts+33070, bp+16) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+33319, bp+16) + sessionAppendStr(tls, bp, ts+33366, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33488 + zSep = ts + 33535 } } sessionAppendStr(tls, bp, ts+5360, bp+16) @@ -138088,9 +138146,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33493, bp+16) + sessionAppendStr(tls, bp, ts+33540, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21981, bp+16) + sessionAppendStr(tls, bp, ts+22028, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+15017, bp+16) @@ -138098,9 +138156,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33511, bp+16) + sessionAppendStr(tls, bp, ts+33558, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33522, bp+16) + sessionAppendStr(tls, bp, ts+33569, bp+16) } sessionAppendStr(tls, bp, ts+5360, bp+16) @@ -138119,11 +138177,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11751, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33526) + ts+33573) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33639) + ts+33686) } return rc } @@ -138151,7 +138209,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -138404,7 +138462,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33783, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33830, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -138420,7 +138478,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33804, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33851, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -138493,10 +138551,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33823, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33870, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33849, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33896, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -138555,16 +138613,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33879, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33926, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33923, + ts+33970, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33994, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34041, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11751) { @@ -138618,14 +138676,14 @@ } } } - Xsqlite3_exec(tls, db, ts+34054, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34101, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34084, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34131, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+34108, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+34084, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34155, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34131, uintptr(0), uintptr(0), uintptr(0)) } } @@ -139873,7 +139931,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+34136, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34183, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -140161,7 +140219,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+34164, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+34211, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -140348,7 +140406,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+34195, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+34242, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -140416,7 +140474,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 34202 + var zErr uintptr = ts + 34249 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -140598,7 +140656,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 34252 + var zErr uintptr = ts + 34299 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -140922,13 +140980,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 34300, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34347, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 34308, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34355, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 34318, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 34365, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -141479,7 +141537,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+34323, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34370, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -141506,14 +141564,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34330, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34377, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+34361, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+34408, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -141524,7 +141582,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34394, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34441, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -141537,7 +141595,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34431, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34478, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -141546,7 +141604,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34440, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34487, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -141565,7 +141623,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34473, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34520, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -141580,14 +141638,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34507, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34554, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34515, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34562, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34547, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34594, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -141595,9 +141653,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34553, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34600, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34567, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34614, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -141605,9 +141663,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34605, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34652, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34616, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34663, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -141619,17 +141677,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8429, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17767}, - {FzName: ts + 34651, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34698, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34659, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34706, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34690, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34737, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -141676,15 +141734,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22641) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22688) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16673) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34718, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34765, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34748) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34795) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34758, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34805, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -141701,13 +141759,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34789, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34836, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34794, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34841, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34801, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34848, libc.VaList(bp+16, i)) } } } @@ -141745,8 +141803,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22641) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34809, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22688) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34856, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -141778,7 +141836,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34838, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34885, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -141815,14 +141873,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34507 + zTail = ts + 34554 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34858 + zTail = ts + 34905 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34866, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34913, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -141871,7 +141929,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34877, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34924, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -141879,10 +141937,10 @@ } return ts + 15017 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34893, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34940, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34900, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22641)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34947, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22688)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -141992,7 +142050,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34926) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34973) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -142002,7 +142060,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34931) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34978) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -142012,7 +142070,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34940) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34987) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -142025,7 +142083,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34950) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34997) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -142035,7 +142093,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34960) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35007) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -142051,7 +142109,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22641) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22688) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -142074,7 +142132,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34972 + var zSelect uintptr = ts + 35019 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -142096,7 +142154,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+35004) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+35051) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -142110,7 +142168,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35012, + ts+35059, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -142208,7 +142266,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+35077, 0) + sqlite3Fts5ParseError(tls, pParse, ts+35124, 0) return FTS5_EOF } } @@ -142221,20 +142279,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+35097, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+35144, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35128, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35175, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35131, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+35178, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30513, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30560, uint64(3)) == 0 { tok = FTS5_AND } break @@ -144012,9 +144070,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+35135, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+35182, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+34164, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+34211, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -144030,7 +144088,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+35140, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+35187, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -144117,7 +144175,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20978, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+21025, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -144198,7 +144256,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+35169, 0) + ts+35216, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -144368,12 +144426,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+35222, + ts+35269, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 35272 + return ts + 35319 } - return ts + 35135 + return ts + 35182 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -145316,7 +145374,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+35279, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+35326, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -145395,7 +145453,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+35285, + ts+35332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -145420,7 +145478,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+35336, + ts+35383, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -145443,7 +145501,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+35385, + ts+35432, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -145682,7 +145740,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+35425, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35472, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -146881,7 +146939,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+35448, + ts+35495, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -148347,7 +148405,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35532, + ts+35579, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -149429,13 +149487,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35589, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35636, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25506, ts+35597, 0, pzErr) + pConfig, ts+25553, ts+35644, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11891, - ts+35632, + ts+35679, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -149688,7 +149746,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+35279, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+35326, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -149802,7 +149860,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35676, + ts+35723, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -149972,7 +150030,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35762) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35809) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -150243,7 +150301,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35767, 0) + ts+35814, 0) return SQLITE_ERROR } @@ -150667,7 +150725,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35806, + ts+35853, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -150683,9 +150741,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35861 + return ts + 35908 } - return ts + 35866 + return ts + 35913 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -150731,12 +150789,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35870, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35917, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35876, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35923, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -150767,7 +150825,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35904, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35951, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -150798,7 +150856,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35914, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35961, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -150830,14 +150888,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35935, libc.VaList(bp, z)) + ts+35982, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 34318 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 34365 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -150893,7 +150951,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35767, 0) + ts+35814, 0) return SQLITE_ERROR __1: ; @@ -151110,7 +151168,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+36015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -151255,28 +151313,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+36004, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+36051, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+36015, 0) + ts+36062, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+36095, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36142, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+36103, 0) + ts+36150, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+17356, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+36159, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36206, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+36165, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+36212, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -151347,12 +151405,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36181, + ts+36228, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20879 + return ts + 20926 } - return ts + 36218 + return ts + 36265 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -151982,7 +152040,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+36230, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+36277, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -152226,7 +152284,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36251, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36298, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -152245,7 +152303,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36273, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+36320, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -152292,7 +152350,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+36304) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+36351) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -152301,7 +152359,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+36317, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+36364, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -152315,7 +152373,7 @@ } var azName2 = [5]uintptr{ - ts + 36408, ts + 34507, ts + 25506, ts + 34858, ts + 11891, + ts + 36455, ts + 34554, ts + 25553, ts + 34905, ts + 11891, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -152339,7 +152397,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+36415, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36462, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -152357,13 +152415,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+36415, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36462, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+36420, 0, + db, ts+36467, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -152420,17 +152478,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 36435, - ts + 36503, - ts + 36572, - ts + 36605, - ts + 36644, - ts + 36684, - ts + 36723, - ts + 36764, - ts + 36803, - ts + 36845, - ts + 36885, + ts + 36482, + ts + 36550, + ts + 36619, + ts + 36652, + ts + 36691, + ts + 36731, + ts + 36770, + ts + 36811, + ts + 36850, + ts + 36892, + ts + 36932, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -152532,18 +152590,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36908, + ts+36955, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37012, + ts+37059, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37050, + ts+37097, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -152555,7 +152613,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37088, + ts+37135, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -152567,14 +152625,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25506, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25553, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11891, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+36408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36455, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34858, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34905, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34507, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34554, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -152586,17 +152644,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+37130, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+37177, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 30159 + return ts + 30206 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+37160, + ts+37207, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -152633,27 +152691,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+37204, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+37251, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+37227, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+37274, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34507, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34554, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34858, ts+37233, 0, pzErr) + pConfig, ts+34905, ts+37280, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+36408, ts+37265, 1, pzErr) + pConfig, ts+36455, ts+37312, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35004, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35051, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -152859,12 +152917,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37282, + ts+37329, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37332, + ts+37379, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -152872,7 +152930,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35004, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35051, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -153048,7 +153106,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+37361, + zSql = Xsqlite3_mprintf(tls, ts+37408, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -153230,14 +153288,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34507, bp+48) + rc = fts5StorageCount(tls, p, ts+34554, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34858, bp+56) + rc = fts5StorageCount(tls, p, ts+34905, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -153432,9 +153490,9 @@ libc.X__builtin___memcpy_chk(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar)), libc.X__builtin_object_size(tls, p, 0)) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37393) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37440) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37404) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37451) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -153649,7 +153707,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 37415 + var zCat uintptr = ts + 37462 var i int32 libc.X__builtin___memset_chk(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{})), libc.X__builtin_object_size(tls, p, 0)) @@ -153661,7 +153719,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37424) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37471) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -153672,18 +153730,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37435) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37482) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37393) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37440) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37404) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37451) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37424) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37471) { } else { rc = SQLITE_ERROR } @@ -153959,7 +154017,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37453 + var zBase uintptr = ts + 37500 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -154101,7 +154159,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37463, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37510, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -154109,11 +154167,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37466, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37513, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37471, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37518, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -154121,7 +154179,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37476, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37523, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -154129,7 +154187,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37479, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37526, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -154137,11 +154195,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37482, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37529, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37487, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37534, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -154149,19 +154207,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37492, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37539, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37496, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37543, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37502, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37549, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37507, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37554, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154169,11 +154227,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37511, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37558, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37515, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37562, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -154181,7 +154239,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37518, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37565, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154189,11 +154247,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37522, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37569, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37526, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37573, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154201,7 +154259,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37530, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37577, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154209,7 +154267,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37534, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37581, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154217,7 +154275,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37538, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37585, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154233,24 +154291,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37542, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37589, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37545, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37592, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37552, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37599, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-2), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-2), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -154265,44 +154323,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37555, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37602, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37563, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37610, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37570, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37617, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37575, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37622, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37471, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37518, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37580, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37627, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37466, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37513, uint64(4), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37585, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37632, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37590, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37637, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+15883, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -154311,91 +154369,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37595, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37642, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37599, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37646, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37604, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37651, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37507, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37554, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37610, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37657, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37614, uint64(1), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37661, uint64(1), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37616, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37663, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37530, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37577, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37622, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37669, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37538, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37585, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37630, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37677, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37636, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37683, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37522, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37569, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37641, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37688, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37647, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37694, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37534, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37581, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37655, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37702, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37663, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37710, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37667, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37714, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37530, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-7), ts+37577, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-7), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37675, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37722, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37681, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37728, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37534, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37581, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37687, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37734, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37548, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-6), ts+37595, uint64(3), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-6), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -154410,16 +154468,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37694, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37741, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-4), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-4), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37699, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37746, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -154427,21 +154485,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37704, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37751, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37710, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37757, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37479, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37526, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37663, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37710, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -154449,7 +154507,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37716, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37763, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -154457,9 +154515,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37722, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37769, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37463, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-5), ts+37510, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-5), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -154474,12 +154532,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37728, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37775, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37732, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) + libc.X__builtin___memcpy_chk(tls, aBuf+uintptr(nBuf-3), ts+37779, uint64(2), libc.X__builtin_object_size(tls, aBuf+uintptr(nBuf-3), 0)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37735, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37782, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -154488,7 +154546,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37738, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37785, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -154644,7 +154702,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37742) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37789) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -154824,22 +154882,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37453, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37500, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37757, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37804, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37763, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37810, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37770, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37817, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -155982,14 +156040,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37778) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37825) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37782) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37829) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37786) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37833) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37795, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37842, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -156015,19 +156073,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37829, - ts + 37869, - ts + 37904, + ts + 37876, + ts + 37916, + ts + 37951, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23802, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23849, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37947, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37994, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -156160,11 +156218,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37980, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38027, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+38011, + ts+38058, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -156188,7 +156246,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38062, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38109, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -156583,7 +156641,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+38088, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+38135, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -156605,7 +156663,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 38098 + return ts + 38145 } func init() { @@ -157748,5 +157806,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-14.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00S\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00nfs\x00hfs\x00ufs\x00afpfs\x00smbfs\x00webdav\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00msdos\x00exfat\x00SQLITE_FORCE_PROXY_LOCKING\x00:auto:\x00fsync\x00/dev/urandom\x00sqliteplocks\x00/\x00dummy\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00break\x00path error (len %d)\x00read error (len %d)\x00create failed (%d)\x00write failed (%d)\x00rename failed (%d)\x00broke stale lock on %s\n\x00failed to break stale lock on %s, %s\n\x00-conch\x00.lock\x00:auto: (not held)\x00unix\x00unix-none\x00unix-dotfile\x00unix-posix\x00unix-flock\x00unix-afp\x00unix-nfs\x00unix-proxy\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dylib\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00lock_proxy_file\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00failed to set lock proxy file\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-14.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00S\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00nfs\x00hfs\x00ufs\x00afpfs\x00smbfs\x00webdav\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00msdos\x00exfat\x00SQLITE_FORCE_PROXY_LOCKING\x00:auto:\x00fsync\x00/dev/urandom\x00sqliteplocks\x00/\x00dummy\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00break\x00path error (len %d)\x00read error (len %d)\x00create failed (%d)\x00write failed (%d)\x00rename failed (%d)\x00broke stale lock on %s\n\x00failed to break stale lock on %s, %s\n\x00-conch\x00.lock\x00:auto: (not held)\x00unix\x00unix-none\x00unix-dotfile\x00unix-posix\x00unix-flock\x00unix-afp\x00unix-nfs\x00unix-proxy\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dylib\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00lock_proxy_file\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00failed to set lock proxy file\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_386.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_386.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_386.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_386.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_386.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -747,11 +747,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NETGRAPHDISC = 6 NN = 1 @@ -1957,7 +1957,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2065,8 +2065,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5258,7 +5258,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5879,17 +5880,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6096,7 +6098,7 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 @@ -6895,7 +6897,7 @@ _ = pMutex if op < 0 || op >= int32(uint32(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint32(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15069,7 +15071,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15546,7 +15548,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15837,7 +15839,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15854,14 +15856,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15881,7 +15883,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -15949,7 +15951,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16176,7 +16178,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16204,7 +16206,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16303,7 +16305,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -16433,7 +16435,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -16479,7 +16481,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16718,7 +16720,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -16852,7 +16854,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+8) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16873,7 +16875,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17189,7 +17191,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -17280,7 +17282,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -17288,9 +17290,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -17354,18 +17356,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*12 + 4)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&0170000 == 0120000 { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*12 + 4)))(tls, zIn, bp+208, uint32(unsafe.Sizeof([1026]int8{}))-uint32(2)) if got <= 0 || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-2 { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 208 + uintptr(got))) = int8(0) @@ -17405,14 +17407,14 @@ (*DbPath)(unsafe.Pointer(bp + 1028)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*12 + 4)))(tls, bp, uint32(unsafe.Sizeof([1026]int8{}))-uint32(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1028, bp) } appendAllPathElements(tls, bp+1028, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1028)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1028)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1028)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1028)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17507,7 +17509,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = (*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*12 + 4)))(tls, fd, zBuf, uint32(nBuf)) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -18937,7 +18939,7 @@ libc.Xmemset(tls, pPgHdr+16, 0, uint32(unsafe.Sizeof(PgHdr{}))-uint32(uintptr(0)+16)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*40 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*48 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint32(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -18967,7 +18969,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19018,7 +19020,7 @@ *(*U16)(unsafe.Pointer(p + 28)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 28)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19122,8 +19124,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(40) - defer tls.Free(40) + bp := tls.Alloc(48) + defer tls.Free(48) var pTail uintptr pTail = bp @@ -19201,13 +19203,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21498,7 +21500,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -21931,7 +21933,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22083,9 +22085,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*40 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*48 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -22417,7 +22419,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22567,7 +22569,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -22948,7 +22950,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23054,7 +23056,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23072,7 +23074,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23111,7 +23113,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23188,7 +23190,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -23946,7 +23948,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24189,9 +24191,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 28)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -24945,7 +24947,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25044,7 +25046,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25629,7 +25631,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+12) @@ -25904,7 +25906,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -26377,7 +26379,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26882,7 +26884,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27538,7 +27540,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27675,7 +27677,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27692,7 +27694,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27700,7 +27702,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27743,7 +27745,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27753,7 +27755,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28003,7 +28005,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28050,7 +28052,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28060,7 +28062,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28073,7 +28075,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28082,14 +28084,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint32(iFree2-(iFree+sz))) @@ -28099,7 +28101,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28163,7 +28165,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28173,7 +28175,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28195,7 +28197,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28230,7 +28232,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28243,13 +28245,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -28274,7 +28276,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -28285,7 +28287,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, (int32(pSpace)-int32(data))/1) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -28337,22 +28339,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -28362,7 +28364,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -28370,7 +28372,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -28378,10 +28380,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -28441,7 +28443,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -28477,7 +28479,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28507,11 +28509,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28522,15 +28524,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28558,14 +28560,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28579,7 +28581,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28591,7 +28593,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28694,7 +28696,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28722,7 +28724,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28761,7 +28763,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29644,7 +29646,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30059,7 +30061,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30085,7 +30087,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30094,7 +30096,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30105,7 +30107,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30121,7 +30123,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30182,7 +30184,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30217,7 +30219,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) if *(*Pgno)(unsafe.Pointer(bp + 24)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -30277,7 +30279,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -30316,7 +30318,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -30347,7 +30349,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30688,7 +30690,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -30932,14 +30934,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int32(aPayload)-int32((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > (*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -30984,7 +30986,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31033,7 +31035,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31113,7 +31115,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31204,7 +31206,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31224,7 +31226,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -31434,7 +31436,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31638,7 +31640,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31703,7 +31705,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31751,7 +31753,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31870,7 +31872,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32030,7 +32032,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32095,7 +32097,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+4, 0) @@ -32131,7 +32133,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32175,7 +32177,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -32287,7 +32289,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -32445,7 +32447,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -32502,7 +32504,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32518,7 +32520,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32592,7 +32594,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32604,7 +32606,7 @@ *(*Pgno)(unsafe.Pointer(bp + 4)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+4) @@ -32615,7 +32617,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32780,7 +32782,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33058,12 +33060,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int32(pCell)-int32(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33071,7 +33073,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int32(pData) - int32(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint32(sz)) @@ -33131,7 +33133,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4)), uint32(sz)) @@ -33220,7 +33222,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint32(nCell*2)) @@ -33336,7 +33338,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 80)) = allocateBtreePage(tls, pBt, bp, bp+4, uint32(0), uint8(0)) @@ -33656,7 +33658,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 72)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33667,7 +33669,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33825,7 +33827,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -33899,7 +33901,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -33962,7 +33964,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -33990,7 +33992,7 @@ *(*int32)(unsafe.Pointer(bp + 112)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 112)) != 0) { @@ -34251,7 +34253,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -34443,7 +34445,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -34481,7 +34483,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 112 + uintptr(iPage-1)*4)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 64 + uintptr(iPage-1)*2))) @@ -34587,7 +34589,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34612,7 +34614,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34682,7 +34684,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34795,7 +34797,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 84)) = btreeComputeFreeSpace(tls, pPage) @@ -34855,6 +34857,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34862,7 +34865,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -34900,13 +34903,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 88))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint32(*(*int32)(unsafe.Pointer(bp + 88)))) @@ -34937,7 +34940,6 @@ ; *(*int32)(unsafe.Pointer(bp + 84)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 88)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35012,7 +35014,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35037,7 +35039,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35138,7 +35140,7 @@ return *(*int32)(unsafe.Pointer(bp + 20)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35146,11 +35148,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35225,7 +35227,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -35294,7 +35296,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -35323,7 +35325,7 @@ } *(*int32)(unsafe.Pointer(bp + 24)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+16, bp+20) if int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 12))) @@ -35399,7 +35401,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 24)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -35413,7 +35415,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35547,7 +35549,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38016,7 +38018,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38664,7 +38666,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38679,14 +38681,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -40990,7 +40992,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41541,7 +41543,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41606,7 +41608,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 20 + uintptr(i)*4)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41640,7 +41642,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 48)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41690,7 +41692,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 48))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41836,7 +41838,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42007,7 +42009,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42033,7 +42035,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -42307,7 +42309,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -42922,7 +42924,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -43442,7 +43444,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -43450,7 +43452,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5350, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43855,7 +43857,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -43999,7 +44001,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -44443,10 +44445,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47095,7 +47093,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48855,7 +48853,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49633,7 +49631,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5859) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5859) goto abort_due_to_error __770: ; @@ -49743,7 +49741,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -49937,7 +49935,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -51304,7 +51302,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51824,7 +51822,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -51907,7 +51905,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -55340,14 +55338,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6757 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -55391,7 +55385,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6768, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6757, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -55455,7 +55449,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*20)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6785, libc.VaList(bp, pExpr)) + ts+6774, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -55471,7 +55465,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6849, + Xsqlite3ErrorMsg(tls, pParse, ts+6838, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55485,7 +55479,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6885, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6874, uintptr(0), pExpr) } } else { @@ -55508,30 +55502,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6913, libc.VaList(bp+16, pExpr)) + ts+6902, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6956 + zType = ts + 6945 } else { - zType = ts + 6963 + zType = ts + 6952 } - Xsqlite3ErrorMsg(tls, pParse, ts+6973, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6962, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7001, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6990, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7023, + Xsqlite3ErrorMsg(tls, pParse, ts+7012, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7067, + ts+7056, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55603,15 +55597,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7104, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 20))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_Subquery } break @@ -55619,7 +55613,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7126, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) } break @@ -55750,7 +55744,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7137, libc.VaList(bp, i, zType, mx)) + ts+7126, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55770,7 +55764,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7193, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7182, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55805,7 +55799,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7227, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7216, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55862,7 +55856,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*20 + 8 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7233, libc.VaList(bp, i+1)) + ts+7222, libc.VaList(bp, i+1)) return 1 } } @@ -55890,7 +55884,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7294, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7283, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56104,7 +56098,7 @@ *(*int32)(unsafe.Pointer(bp + 24)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7325, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7314, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56144,7 +56138,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56155,7 +56149,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7364) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7353) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56167,7 +56161,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7370, 0) + ts+7359, 0) return WRC_Abort } @@ -57031,7 +57025,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7429, libc.VaList(bp, mxHeight)) + ts+7418, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -57280,10 +57274,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7477, + Xsqlite3ErrorMsg(tls, pParse, ts+7466, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7521 + return ts + 7510 } return ts + 1547 }(), nElem)) @@ -57324,7 +57318,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -57350,7 +57344,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7525, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7514, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 20)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -57378,7 +57372,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7548, libc.VaList(bp, pExpr)) } } } @@ -57425,7 +57419,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 116 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7579, + Xsqlite3ErrorMsg(tls, pParse, ts+7568, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 116 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -57450,7 +57444,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 116 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7622, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7611, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58025,7 +58019,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58148,7 +58142,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7675, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7664, libc.VaList(bp, zObject)) } } @@ -58204,10 +58198,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6757) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7687) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6762) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7692) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -59282,7 +59276,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59720,6 +59714,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59733,6 +59728,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -59951,6 +59949,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -59964,6 +59963,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60737,7 +60744,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60759,11 +60766,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60834,13 +60840,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60853,15 +60865,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60871,22 +60883,22 @@ pTest = bp + 100 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 100)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -60895,21 +60907,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*20)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*20)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -60919,27 +60931,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8075, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -60948,7 +60960,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63619,7 +63631,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10915, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63636,7 +63648,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64557,7 +64569,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -68921,6 +68933,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70085,7 +70103,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7126, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7115, 10) == 0 { return 0 } return 1 @@ -71331,7 +71349,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14133, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -73376,7 +73394,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79841,7 +79859,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81238,7 +81256,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17506 } else { - zType = ts + 7521 + zType = ts + 7510 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17508, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -81399,6 +81417,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82717,7 +82736,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82733,7 +82752,7 @@ if !(i6 < int32(uint32(unsafe.Sizeof(aPragmaName))/uint32(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -83538,80 +83557,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 568))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17922) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17958) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*4)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 568))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17896) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17922) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17969) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*16 + 4))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 568))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 568))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17949) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17996) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 576))) @@ -83628,20 +83661,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 564)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17976) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18023) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 568))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83650,21 +83683,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -83682,14 +83715,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(endCode))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 560)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*20)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18005 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18052 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*20)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*20 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83697,27 +83730,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83726,25 +83759,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 8 - goto __367 goto __369 __369: + pEnc += 8 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18008, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18055, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -83752,15 +83785,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(setCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp1 = iDb @@ -83768,41 +83801,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(readCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -83817,31 +83850,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17338) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18033) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18080) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17491) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83851,10 +83884,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -83874,19 +83907,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -83895,86 +83928,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*16)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 56))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18041, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18088, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -83982,36 +84015,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+580) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 580))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+588) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 588)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 588)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 588))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84020,10 +84053,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+596) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 596)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 596))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84032,10 +84065,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+604) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 604)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 604)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84043,10 +84076,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84098,14 +84131,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18059, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18064, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18088, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18096, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18104}, - {FzName: ts + 18111}, + {FzName: ts + 18106, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18111, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18135, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18143, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18151}, + {FzName: ts + 18158}, {}, } var setCookie = [2]VdbeOpList{ @@ -84157,7 +84190,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+56, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18117) + Xsqlite3_str_appendall(tls, bp+32, ts+18164) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84165,7 +84198,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18132, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18179, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -84178,16 +84211,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18145) + Xsqlite3_str_appendall(tls, bp+32, ts+18192) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18157) + Xsqlite3_str_appendall(tls, bp+32, ts+18204) j++ } Xsqlite3_str_append(tls, bp+32, ts+4950, 1) @@ -84370,13 +84403,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 116 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18172) + Xsqlite3_str_appendall(tls, bp+32, ts+18219) if *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18180, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18227, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 16)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18184, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18231, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -84453,12 +84486,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18188, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), + ts+18235, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -84467,19 +84500,19 @@ } else { zObj = ts + 5001 } - z = Xsqlite3MPrintf(tls, db, ts+18216, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18263, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18247, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18294, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18255, - ts + 18262, - ts + 18274, + ts + 18302, + ts + 18309, + ts + 18321, } // Check to see if any sibling index (another index on the same table) @@ -84571,7 +84604,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*4)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18285) + corruptSchema(tls, pData, argv, ts+18332) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4)), pIndex+44) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84619,7 +84652,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*4)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*4)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*4)) = ts + 7931 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18298 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18345 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*4)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 40)).Fdb = db (*InitData)(unsafe.Pointer(bp + 40)).FiDb = iDb @@ -84748,7 +84781,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18370) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18417) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84762,7 +84795,7 @@ (*InitData)(unsafe.Pointer(bp + 40)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18394, + ts+18441, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85094,7 +85127,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*16)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18428, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18475, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85124,7 +85157,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18458, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18505, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85220,7 +85253,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -85319,7 +85352,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85646,13 +85679,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18477, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18524, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18507)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18554)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85827,7 +85860,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 48)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18541, libc.VaList(bp, 0)) + ts+18588, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85872,7 +85905,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18591, libc.VaList(bp+8, zName)) + ts+18638, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85883,7 +85916,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*68 + 36 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*68 + 48)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18655, + Xsqlite3ErrorMsg(tls, pParse, ts+18702, libc.VaList(bp+16, zName)) break } @@ -86511,16 +86544,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18692 + z = ts + 18739 break case TK_INTERSECT: - z = ts + 18702 + z = ts + 18749 break case TK_EXCEPT: - z = ts + 18712 + z = ts + 18759 break default: - z = ts + 18719 + z = ts + 18766 break } return z @@ -86530,7 +86563,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18725, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18772, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86556,9 +86589,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18748, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18795, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18779 + return ts + 18826 } return ts + 1547 }())) @@ -86902,7 +86935,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*20)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87002,7 +87035,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 48)) = U32(0) @@ -87018,7 +87051,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18803, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18850, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 48)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+48) @@ -87101,8 +87134,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87117,12 +87148,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18811 + zType = ts + 18858 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -87338,7 +87372,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18862, 0) return __1: ; @@ -87429,7 +87463,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18864, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18911, 0) goto end_of_recursive_query __15: ; @@ -87449,7 +87483,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18906, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18953, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -87486,7 +87520,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18912, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -87520,11 +87554,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18927, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18974, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } - return ts + 18950 + return ts + 18997 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87625,8 +87659,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18952, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18967, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18999, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19014, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87673,7 +87707,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18692, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18739, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87740,7 +87774,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 48)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+48) @@ -87802,7 +87836,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 76)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+76) @@ -87955,10 +87989,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19007, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19054, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19053, + ts+19100, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88212,8 +88246,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7227) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7216) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88240,13 +88274,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+36, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19135, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19182, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19193, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88258,7 +88292,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) Xsqlite3Select(tls, pParse, p, bp+36) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -88445,7 +88479,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint32(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -89344,7 +89379,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19157, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19204, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -89427,7 +89462,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19175, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19222, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89556,7 +89591,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19198, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 60)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19245, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 60)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+36+4, uint32(1), 8, 0x100) @@ -89579,7 +89614,7 @@ libc.SetBitFieldPtr16Uint32(pItem+36+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19218, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19265, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89595,7 +89630,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19261 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19308 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89621,7 +89656,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19284, + Xsqlite3ErrorMsg(tls, pParse, ts+19331, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89632,9 +89667,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+34, pTab+4) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19322 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19369 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19356 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19403 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89681,7 +89716,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19394, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19441, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89793,7 +89828,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19398, + Xsqlite3ErrorMsg(tls, pParse, ts+19445, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89812,7 +89847,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*16)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19437, + Xsqlite3ErrorMsg(tls, pParse, ts+19484, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 44)), 0) @@ -89936,7 +89971,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*20 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19468, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19515, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(1), 7, 0x80) } @@ -90001,7 +90036,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*20)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19473, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19520, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90032,9 +90067,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19482, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19529, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19500, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19547, 0) } } } @@ -90044,7 +90079,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19520, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19567, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90182,7 +90217,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*16)).FiSorterColumn) + 1) } @@ -90266,13 +90301,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 20)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 20)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19551, 0) + ts+19598, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 20)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19602, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19649, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -90461,11 +90496,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19635, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19682, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19647 + return ts + 19694 } return ts + 1547 }(), @@ -90793,7 +90828,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19670, + ts+19717, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90854,7 +90889,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19724, + Xsqlite3ErrorMsg(tls, pParse, ts+19771, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -90996,7 +91031,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+88, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19764, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19811, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+36+4, uint32(1), 5, 0x20) @@ -91055,7 +91090,7 @@ ; Xsqlite3SelectDestInit(tls, bp+88, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19779, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19826, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -91526,9 +91561,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+116)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19795 + return ts + 19842 } - return ts + 19804 + return ts + 19851 }()) groupBySort = 1 @@ -91879,7 +91914,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+116)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19795) + explainTempTable(tls, pParse, ts+19842) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -91983,7 +92018,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19813, 0) + ts+19860, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92216,7 +92251,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19878, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19925, 0) goto trigger_cleanup __3: ; @@ -92260,7 +92295,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19924, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19971, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+36, pTableName) != 0) { goto __9 } @@ -92278,7 +92313,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19979, 0) goto trigger_orphan_error __11: ; @@ -92290,7 +92325,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19924, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19971, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -92305,11 +92340,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19973, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20020, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -92320,19 +92356,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6374, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19999, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20046, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20037, + Xsqlite3ErrorMsg(tls, pParse, ts+20084, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20074 + return ts + 20121 } - return ts + 20081 + return ts + 20128 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -92341,7 +92377,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20087, libc.VaList(bp+24, pTableName+8)) + ts+20134, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -92490,7 +92526,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19924, bp+56) + Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19971, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -92523,7 +92559,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20133, + ts+20180, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -92548,13 +92584,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20181, + ts+20228, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20256, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20303, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92810,7 +92846,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20285, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20332, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92863,7 +92899,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20305, + ts+20352, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -92977,12 +93013,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20367, + ts+20414, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20415 + return ts + 20462 } - return ts + 20422 + return ts + 20469 }())) __15: ; @@ -93096,7 +93132,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20429, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20476, 0) return 1 } @@ -93162,7 +93198,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+152, 0, uint32(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -93326,7 +93362,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20471, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20518, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -93919,7 +93955,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20485, + ts+20532, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*16)).FzCnName)) goto update_cleanup __27: @@ -93951,7 +93987,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20521, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20568, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -94277,7 +94313,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 68)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 68)) != 0) && + (*NameContext)(unsafe.Pointer(bp+28)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94831,7 +94872,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20540) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20587) __169: ; update_cleanup: @@ -95137,10 +95178,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 152)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+152, ts+20553, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+152, ts+20600, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20557, libc.VaList(bp+8, bp+152)) + ts+20604, libc.VaList(bp+8, bp+152)) return SQLITE_ERROR } @@ -95263,7 +95304,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20630, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20634, uint32(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20677, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20681, uint32(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -95411,14 +95452,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20638) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20678) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20725) return SQLITE_ERROR __2: ; @@ -95429,7 +95470,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20721) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20768) return SQLITE_ERROR __5: ; @@ -95457,7 +95498,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20739, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20786, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -95477,7 +95518,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20762) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20809) goto end_of_vacuum __8: ; @@ -95537,7 +95578,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20789, + ts+20836, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -95546,7 +95587,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20897, + ts+20944, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95557,7 +95598,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20951, + ts+20998, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 24)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95568,7 +95609,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21102, + ts+21149, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -95997,11 +96038,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32((int32((*Token)(unsafe.Pointer(pEnd)).Fz)-int32((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21232, libc.VaList(bp, pParse+196)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21279, libc.VaList(bp, pParse+196)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21256, + ts+21303, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96011,7 +96052,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21355, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21402, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96072,7 +96113,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21374, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21421, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96100,9 +96141,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+8, bp+48) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96110,7 +96153,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21416, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21463, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 48)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 48))) @@ -96122,7 +96165,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21446 + var zFormat uintptr = ts + 21493 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96196,7 +96239,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 44 + 4)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21492, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21539, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96254,7 +96297,7 @@ pMod = Xsqlite3HashFind(tls, db+396, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21492, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21539, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -96288,7 +96331,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96741,7 +96784,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96768,7 +96811,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -96994,7 +97037,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21511 + return ts + 21558 } if i == -1 { return ts + 16260 @@ -97006,11 +97049,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97025,7 +97068,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97051,27 +97094,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21526, 2) + Xsqlite3_str_append(tls, pStr, ts+21573, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21529 + return ts + 21576 } - return ts + 21534 + return ts + 21581 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21542) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21589) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21544) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21591) } Xsqlite3_str_append(tls, pStr, ts+4950, 1) } @@ -97114,11 +97157,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+88, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21546, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21593, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21552 + return ts + 21599 } - return ts + 21559 + return ts + 21606 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97131,40 +97174,40 @@ zFmt = ts + 10969 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21564 + zFmt = ts + 21611 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21597 + zFmt = ts + 21644 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21622 + zFmt = ts + 21669 } else { - zFmt = ts + 21640 + zFmt = ts + 21687 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21649, 7) + Xsqlite3_str_append(tls, bp+64, ts+21696, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16260 - Xsqlite3_str_appendf(tls, bp+64, ts+21657, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21704, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21688, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21735, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21698, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21745, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21703, + Xsqlite3_str_appendf(tls, bp+64, ts+21750, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 12)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21730, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21777, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97196,22 +97239,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+48, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21741, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21788, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21762, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21809, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21518, 5) + Xsqlite3_str_append(tls, bp+24, ts+21565, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4950, 1) @@ -98808,7 +98851,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21770, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21817, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98836,7 +98879,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -99354,7 +99397,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21794, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21841, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99715,7 +99758,7 @@ {FzOp: ts + 16109, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15440, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14960, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21808, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21855, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100205,12 +100248,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -100289,7 +100332,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7692 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -100383,7 +100426,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 16)) != 0 { - return ts + 21856 + return ts + 21903 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100759,7 +100802,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21863, + Xsqlite3ErrorMsg(tls, pParse, ts+21910, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100775,7 +100818,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*20)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -101492,7 +101535,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21899, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21946, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*16)).FzCnName)) sentWarning = U8(1) __6: @@ -101563,7 +101606,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21925 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21972 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101737,6 +101780,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101780,9 +101827,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101816,6 +101861,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102074,11 +102120,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103658,7 +103709,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*4)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103716,7 +103767,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*4)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104114,7 +104165,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21962, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22009, 0) rc = SQLITE_OK } else { goto __3 @@ -104721,7 +104772,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21997, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22044, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104756,6 +104807,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105050,6 +105105,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105202,7 +105260,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22015, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22062, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) return uintptr(0) __2: ; @@ -105266,7 +105324,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22043, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22090, 0) goto __5 __4: ii = 0 @@ -106148,7 +106206,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22061, -1) + pCtx, ts+22108, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -106281,7 +106339,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22117, -1) + pCtx, ts+22164, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -106370,17 +106428,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22162)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22173)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22184)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22189)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22202)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22212)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22218)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22229)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22239)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22251)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22256)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22209)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22220)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22231)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22236)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22249)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22259)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22265)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22276)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22286)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22298)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22303)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -106426,7 +106484,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22260, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22307, libc.VaList(bp, zName)) } return p } @@ -106470,12 +106528,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22279, 0) + ts+22326, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22350, 0) + ts+22397, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106701,7 +106759,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22413, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22460, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106817,7 +106875,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 28)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 28)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512)) } pSub = Xsqlite3SelectNew(tls, @@ -106932,7 +106990,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22439, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22486, 0) goto windowAllocErr __2: ; @@ -106997,15 +107055,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22471 + zErr = ts + 22518 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22488 + zErr = ts + 22535 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22504 + zErr = ts + 22551 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22524, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22571, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107026,7 +107084,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22557, 0) + ts+22604, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107182,11 +107240,11 @@ } var azErr = [5]uintptr{ - ts + 22604, - ts + 22657, - ts + 22061, - ts + 22708, - ts + 22760, + ts + 22651, + ts + 22704, + ts + 22108, + ts + 22755, + ts + 22807, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108578,19 +108636,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22810, + Xsqlite3ErrorMsg(tls, pParse, ts+22857, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22852 + return ts + 22899 } - return ts + 22861 + return ts + 22908 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22867, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22914, 0) } } @@ -108661,7 +108719,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22901, + Xsqlite3ErrorMsg(tls, pParse, ts+22948, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109756,7 +109814,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22986, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110735,7 +110793,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } break @@ -110745,7 +110803,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 4)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -111488,7 +111546,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*12 + 4)), yymsp+libc.UintptrFromInt32(-4)*12+4) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+22988) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+23035) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111652,7 +111710,7 @@ *(*Token)(unsafe.Pointer(bp + 92)) = *(*Token)(unsafe.Pointer(yymsp + 4)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp+32, bp+92)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp+32, bp+92)) *(*uintptr)(unsafe.Pointer(yymsp + 4)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 4)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111869,9 +111927,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*12 + 4)) != 0 { - return ts + 6757 + return ts + 7687 } - return ts + 6762 + return ts + 7692 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) @@ -112155,19 +112213,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)) = *(*Token)(unsafe.Pointer(yymsp + 4)) Xsqlite3ErrorMsg(tls, pParse, - ts+23021, 0) + ts+23068, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23116, 0) + ts+23163, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23200, 0) + ts+23247, 0) } break case uint32(273): @@ -112546,9 +112604,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23285, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23332, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -113316,7 +113374,7 @@ } else { (*Token)(unsafe.Pointer(bp + 1248)).Fz = zSql (*Token)(unsafe.Pointer(bp + 1248)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23302, libc.VaList(bp, bp+1248)) + Xsqlite3ErrorMsg(tls, pParse, ts+23349, libc.VaList(bp, bp+1248)) break } } @@ -113339,7 +113397,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23327, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23374, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -113512,7 +113570,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23338, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23385, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -113525,11 +113583,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19924, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19971, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23350, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23397, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113542,9 +113600,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23360, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23407, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23364, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23411, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113778,7 +113836,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -114353,7 +114411,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -114368,7 +114426,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23372, 0) + ts+23419, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114559,23 +114617,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23440 + var zErr uintptr = ts + 23487 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23454 + zErr = ts + 23501 break } case SQLITE_ROW: { - zErr = ts + 23476 + zErr = ts + 23523 break } case SQLITE_DONE: { - zErr = ts + 23498 + zErr = ts + 23545 break } @@ -114593,35 +114651,35 @@ } var aMsg = [29]uintptr{ - ts + 23521, - ts + 23534, + ts + 23568, + ts + 23581, uintptr(0), - ts + 23550, - ts + 23575, - ts + 23589, - ts + 23608, + ts + 23597, + ts + 23622, + ts + 23636, + ts + 23655, ts + 1483, - ts + 23633, - ts + 23670, - ts + 23682, - ts + 23697, - ts + 23730, - ts + 23748, - ts + 23773, - ts + 23802, + ts + 23680, + ts + 23717, + ts + 23729, + ts + 23744, + ts + 23777, + ts + 23795, + ts + 23820, + ts + 23849, uintptr(0), ts + 5831, ts + 5327, - ts + 23819, - ts + 23837, - ts + 23855, - uintptr(0), - ts + 23889, + ts + 23866, + ts + 23884, + ts + 23902, uintptr(0), - ts + 23910, ts + 23936, - ts + 23959, - ts + 23980, + uintptr(0), + ts + 23957, + ts + 23983, + ts + 24006, + ts + 24027, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114742,7 +114800,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114787,7 +114845,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23996, 0) + ts+24043, 0) return SQLITE_BUSY } else { @@ -114904,7 +114962,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24059, libc.VaList(bp, zName)) + ts+24106, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115140,7 +115198,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24110, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24157, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115233,7 +115291,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -115303,7 +115361,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115313,7 +115371,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115345,14 +115403,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24131, 0) + ts+24178, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -115482,7 +115540,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24199, uint32(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24246, uint32(5)) == 0) { goto __1 } iOut = 0 @@ -115527,10 +115585,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24205, zUri+7, uint32(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24252, zUri+7, uint32(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24215, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24262, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115635,7 +115693,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24243, zOpt, uint32(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24290, zOpt, uint32(3)) == 0) { goto __29 } zVfs = zVal @@ -115646,17 +115704,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24247, zOpt, uint32(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24294, zOpt, uint32(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24247 + zModeType = ts + 24294 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24253, zOpt, uint32(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24300, zOpt, uint32(4)) == 0) { goto __32 } @@ -115694,7 +115752,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24258, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24305, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115702,7 +115760,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24278, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24325, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115742,7 +115800,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24302, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24349, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115765,14 +115823,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24318, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24325, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24365, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24333, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24336, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24339, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24380, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24383, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24386, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17355, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -115919,10 +115977,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21856, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21903, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24343, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24390, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -115936,7 +115994,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+12, bp+16) @@ -115989,7 +116047,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6434 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23345 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23392 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116094,7 +116152,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24349 + zFilename = ts + 24396 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116197,21 +116255,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24352, + Xsqlite3_log(tls, iErr, ts+24399, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24377) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24424) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24397) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24444) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24451) } // This is a convenience routine that makes sure that all thread-specific @@ -116369,7 +116427,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24421, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24468, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117025,7 +117083,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24449, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24496, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117143,7 +117201,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24457 + return ts + 24504 } return uintptr(0) }(), 0) @@ -117320,7 +117378,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6174, ts + 6757, ts + 6762, ts + 6184, ts + 6179, ts + 7998, ts + 24480, ts + 24486, + ts + 6174, ts + 7687, ts + 7692, ts + 6184, ts + 6179, ts + 7998, ts + 24527, ts + 24533, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -117473,7 +117531,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24493 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24540 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -117528,7 +117586,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24510, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24557, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117592,13 +117650,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6757, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7687, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6762, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7692, uint32(5)) break } @@ -118148,12 +118206,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6757, uint32(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7687, uint32(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6762, uint32(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7692, uint32(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -118254,7 +118312,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24539, -1) + Xsqlite3_result_error(tls, pCtx, ts+24586, -1) } } jsonParseReset(tls, pParse) @@ -118560,7 +118618,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24554, uint32(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24601, uint32(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118575,7 +118633,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24558, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24605, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118629,7 +118687,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24584, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24631, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118734,11 +118792,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24627, uint32(2)) + jsonAppendRaw(tls, bp, ts+24674, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4991, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24630, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24677, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -118895,14 +118953,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24633, -1) + ts+24680, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24684, -1) + Xsqlite3_result_error(tls, ctx, ts+24731, -1) jsonReset(tls, bp) return } @@ -119072,9 +119130,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24718 + return ts + 24765 } - return ts + 24722 + return ts + 24769 }()) return __2: @@ -119207,7 +119265,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24729, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24776, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119304,7 +119362,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24732, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24779, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119348,7 +119406,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24735) + ts+24782) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -119479,7 +119537,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24818, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24865, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -119498,7 +119556,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*12 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*12 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24824, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24871, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 12 @@ -119594,7 +119652,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24824, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24871, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119618,7 +119676,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24829 + zRoot = ts + 24876 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119740,7 +119798,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24539, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24586, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119835,25 +119893,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24831}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24836}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24865}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24878}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24881}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24897}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24909}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24920}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24931}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24943}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24975}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24986}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25003}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24878}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24883}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24912}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24925}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24928}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24944}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24967}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24978}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24990}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25022}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25033}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25050}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -119872,8 +119930,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25021, FpModule: 0}, - {FzName: ts + 25031, FpModule: 0}, + {FzName: ts + 25068, FpModule: 0}, + {FzName: ts + 25078, FpModule: 0}, } type Rtree1 = struct { @@ -120126,11 +120184,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25041, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25049, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25096, iNode, 0, pRtree+72) Xsqlite3_free(tls, zTab) } @@ -120341,7 +120399,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25054, + ts+25101, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121044,7 +121102,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25136) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25183) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -122385,7 +122443,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25150, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25197, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -122397,12 +122455,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25170, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25217, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25202, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25249, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122628,7 +122686,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25239, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25286, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122651,7 +122709,7 @@ bp := tls.Alloc(20) defer tls.Free(20) - var zFmt uintptr = ts + 25384 + var zFmt uintptr = ts + 25431 var zSql uintptr var rc int32 @@ -122699,7 +122757,7 @@ } var azName1 = [3]uintptr{ - ts + 25440, ts + 5053, ts + 16260, + ts + 25487, ts + 5053, ts + 16260, } var rtreeModule = Sqlite3_module{ @@ -122742,19 +122800,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25445, + ts+25492, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25507, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25554, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25512, + ts+25559, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25576, + ts+25623, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25646, + ts+25693, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122783,7 +122841,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25695 + zFormat = ts + 25742 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122795,7 +122853,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25803, + ts+25850, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122803,18 +122861,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25848, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25895, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12760, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25875, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25922, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25897, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25944, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25905, 0) + Xsqlite3_str_appendf(tls, p, ts+25952, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122829,14 +122887,14 @@ } var azSql = [8]uintptr{ - ts + 25921, - ts + 25974, - ts + 26019, - ts + 26071, - ts + 26125, - ts + 26170, - ts + 26228, - ts + 26283, + ts + 25968, + ts + 26021, + ts + 26066, + ts + 26118, + ts + 26172, + ts + 26217, + ts + 26275, + ts + 26330, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -122865,7 +122923,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26330, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26377, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -122877,7 +122935,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26350, + ts+26397, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+16) if rc != SQLITE_OK { @@ -122885,7 +122943,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26407, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26454, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -122927,10 +122985,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26442, - ts + 26485, - ts + 26520, - ts + 26556, + ts + 26489, + ts + 26532, + ts + 26567, + ts + 26603, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -122961,7 +123019,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26593, + Xsqlite3_str_appendf(tls, pSql, ts+26640, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4))), *(*uintptr)(unsafe.Pointer(argv + 3*4)))) ii = 4 __3: @@ -122973,7 +123031,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26617, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26664, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -122996,7 +123054,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123092,7 +123150,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26626, ts + 26637} +var azFormat = [2]uintptr{ts + 26673, ts + 26684} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(592) @@ -123132,11 +123190,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10913, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26647, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+544)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26694, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+544)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+44)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26653, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 544 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26700, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 544 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26657, 1) + Xsqlite3_str_append(tls, pOut, ts+26704, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123147,7 +123205,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26659, -1) + Xsqlite3_result_error(tls, ctx, ts+26706, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123223,7 +123281,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26692, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26739, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4055 @@ -123247,7 +123305,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26699, + ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -123266,7 +123324,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26744, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26791, libc.VaList(bp+16, iNode)) } } @@ -123280,8 +123338,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26776, - ts + 26830, + ts + 26823, + ts + 26877, } if *(*uintptr)(unsafe.Pointer(pCheck + 24 + uintptr(bLeaf)*4)) == uintptr(0) { @@ -123296,23 +123354,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26878, + rtreeCheckAppendMsg(tls, pCheck, ts+26925, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26940, + ts+26987, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }(), iKey, iVal)) } } @@ -123336,7 +123394,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26998, libc.VaList(bp, i, iCell, iNode)) + ts+27045, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -123356,7 +123414,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27046, libc.VaList(bp+24, i, iCell, iNode)) + ts+27093, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -123373,14 +123431,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27113, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27160, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27147, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27194, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -123388,7 +123446,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27177, + ts+27224, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -123417,14 +123475,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27232, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27279, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27263, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27310, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -123451,7 +123509,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27330, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27377, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -123460,12 +123518,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25150, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25197, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27358, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27405, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -123479,8 +123537,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27389, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27436, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -123488,7 +123546,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 24 + 1*4))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27404, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27451, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -123503,7 +123561,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27408, -1) + ts+27455, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -123521,7 +123579,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18005 + return ts + 18052 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -123891,11 +123949,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27459, 1) + Xsqlite3_str_append(tls, x, ts+27506, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27461, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27472, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27519, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -123915,19 +123973,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27483, 0) + Xsqlite3_str_appendf(tls, x, ts+27530, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27501, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27548, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27509, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27556, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27517, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27564, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27521, 0) + Xsqlite3_str_appendf(tls, x, ts+27568, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124845,7 +124903,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27534, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27581, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124854,7 +124912,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) + Xsqlite3_str_appendf(tls, pSql, ts+27603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) goto __3 __3: ii++ @@ -124862,7 +124920,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125099,7 +125157,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27560 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27607 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125107,7 +125165,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27566 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27613 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125219,7 +125277,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27575, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27622, 0) __4: ; goto geopoly_update_end @@ -125351,14 +125409,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27615) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27662) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27631) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27678) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -125423,7 +125481,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27646, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27693, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -125435,25 +125493,25 @@ F__ccgo_pad1 [2]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27654}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27667}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27680}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27693}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27631}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27705}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27615}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27728}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27742}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27755}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27769}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27785}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27701}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27714}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27727}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27740}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27678}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27752}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27662}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27775}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27789}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27802}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27816}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27832}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27797}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27844}, } // Register the r-tree module with database handle db. This creates the @@ -125463,26 +125521,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27816, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27863, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27826, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27873, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27837, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27884, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27560, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27607, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27848, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27895, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125536,7 +125594,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25136, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25183, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -125851,7 +125909,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) return } @@ -125862,7 +125920,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -125963,7 +126021,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27879, uintptr(0), uintptr(0), p+36) + ts+27926, uintptr(0), uintptr(0), p+36) } if rc == SQLITE_OK { @@ -126027,7 +126085,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25049, zIn, uint32(4)) == 0 { + if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25096, zIn, uint32(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126048,16 +126106,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+36, Xsqlite3_mprintf(tls, - ts+28050, libc.VaList(bp, func() uintptr { + ts+28097, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28200 + return ts + 28247 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+4, p+36, - ts+28241) + ts+28288) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126173,7 +126231,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+36, Xsqlite3_mprintf(tls, - ts+28366, libc.VaList(bp, zTab))) + ts+28413, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126191,7 +126249,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*4, p+36, - Xsqlite3_mprintf(tls, ts+28485, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28532, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126209,7 +126267,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*4, p+36, Xsqlite3_mprintf(tls, - ts+28506, libc.VaList(bp+16, zIdx))) + ts+28553, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126232,7 +126290,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*4, p+36, - Xsqlite3_mprintf(tls, ts+28557, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28604, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -126278,7 +126336,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+36, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -126293,7 +126351,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+20, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 20)), 1) if iCid >= 0 { @@ -126333,7 +126391,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+36, bp+56, pIter+60) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19482, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19529, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -126343,18 +126401,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28635, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28682, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), i) - if Xsqlite3_strnicmp(tls, ts+28654, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28701, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+32) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*4)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28659, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28706, zName) { bRbuRowid = 1 } } @@ -126366,18 +126424,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28669, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28716, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28698 + return ts + 28745 } - return ts + 28711 + return ts + 28758 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28720, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28767, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 1) @@ -126391,7 +126449,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28742, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28789, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 5) @@ -126438,7 +126496,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14607 } return zList @@ -126456,7 +126514,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zRet = rbuMPrintf(tls, p, ts+28778, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28825, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -126478,25 +126536,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28791, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28838, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28870, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28846) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28852, ts+28859, ts+4950) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28893) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28899, ts+28906, ts+4950) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28867, + ts+28914, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28909, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28956, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126538,7 +126596,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126573,7 +126631,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) goto __7 __6: - zCol = ts + 28929 + zCol = ts + 28976 __7: ; goto __5 @@ -126581,11 +126639,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28937, + zLhs = rbuMPrintf(tls, p, ts+28984, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28958, + zOrder = rbuMPrintf(tls, p, ts+29005, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28994, + zSelect = rbuMPrintf(tls, p, ts+29041, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14607 iCol++ @@ -126605,7 +126663,7 @@ *(*uintptr)(unsafe.Pointer(bp + 180)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+180, p+36, - Xsqlite3_mprintf(tls, ts+29021, + Xsqlite3_mprintf(tls, ts+29068, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 180)))) { goto __13 @@ -126632,7 +126690,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29069, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29116, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14607 goto __15 __15: @@ -126644,7 +126702,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126677,7 +126735,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126689,7 +126747,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29088, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29135, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -126701,37 +126759,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28929 + zCol = ts + 28976 } else { - zCol = ts + 28659 + zCol = ts + 28706 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)) } - zRet = Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29157, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29130, + zImpPK = Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29151, + zImpCols = Xsqlite3_mprintf(tls, ts+29198, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29184, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29231, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14607 - zAnd = ts + 21518 + zAnd = ts + 21565 nBind++ } @@ -126770,9 +126828,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = Xsqlite3_mprintf(tls, ts+29208, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29255, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29220, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29267, libc.VaList(bp+32, zList, zS)) } zS = ts + 14607 if zList == uintptr(0) { @@ -126782,7 +126840,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29229, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29276, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126794,18 +126852,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29244, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29291, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29258, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29305, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21565 } } zList = rbuMPrintf(tls, p, - ts+29270, libc.VaList(bp+40, zList)) + ts+29317, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -126813,8 +126871,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+29320, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21565 } } } @@ -126823,7 +126881,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29333, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29380, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126841,15 +126899,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29320, + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29359, + zList = rbuMPrintf(tls, p, ts+29406, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29389, + zList = rbuMPrintf(tls, p, ts+29436, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } @@ -126886,19 +126944,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29426 + var zSep uintptr = ts + 29473 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 60)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+36, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16148) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) } break } @@ -126910,15 +126968,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 3) != 0 { - zDesc = ts + 28846 + zDesc = ts + 28893 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29439, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29486, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14607 } } - z = rbuMPrintf(tls, p, ts+29450, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29497, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 60))) } return z @@ -126938,7 +126996,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+36, - ts+29454) + ts+29501) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -126947,7 +127005,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+172, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -126957,23 +127015,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 4) - zCols = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29551, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }())) zComma = ts + 14607 } } - zCols = rbuMPrintf(tls, p, ts+29536, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29583, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 172))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29551, + ts+29598, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) } @@ -126999,13 +127057,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29613 + zPk = ts + 29660 } - zSql = rbuMPrintf(tls, p, ts+29626, + zSql = rbuMPrintf(tls, p, ts+29673, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*4)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29653 + return ts + 29700 } return ts + 1547 }())) @@ -127015,16 +127073,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29663, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29710, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29670, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29717, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) @@ -127041,7 +127099,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+88, p+36, Xsqlite3_mprintf(tls, - ts+29717, + ts+29764, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127078,7 +127136,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 4)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+36, - ts+29774) + ts+29821) } if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { var rc2 int32 @@ -127183,7 +127241,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29840, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29887, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127206,7 +127264,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29860, + ts+29907, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 604)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) @@ -127214,13 +127272,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, p+36, - Xsqlite3_mprintf(tls, ts+29925, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29972, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, p+36, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127236,7 +127294,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29995, + ts+30042, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127244,9 +127302,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }() } return ts + 1547 @@ -127255,20 +127313,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30066, + ts+30113, libc.VaList(bp+216, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30127, + ts+30174, libc.VaList(bp+264, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }(), zCollist, zLimit)) } @@ -127305,16 +127363,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30286 + return ts + 30333 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, pz, Xsqlite3_mprintf(tls, - ts+30295, + ts+30342, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30331 + return ts + 30378 } return ts + 1547 }(), zBindings))) @@ -127323,32 +127381,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, pz, Xsqlite3_mprintf(tls, - ts+30341, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30388, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30369 + zRbuRowid = ts + 30416 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30381, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30428, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30457 + return ts + 30504 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30474, + ts+30521, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30773, + ts+30820, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -127361,9 +127419,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30872 + zRbuRowid = ts + 30919 } else { - zRbuRowid = ts + 30882 + zRbuRowid = ts + 30929 } } @@ -127376,7 +127434,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28929, 0) + zOrder = rbuMPrintf(tls, p, ts+28976, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) } @@ -127385,11 +127443,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+76, pz, Xsqlite3_mprintf(tls, - ts+30893, + ts+30940, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30941 + return ts + 30988 } return ts + 1547 }(), @@ -127402,7 +127460,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22852 + return ts + 22899 } return ts + 1547 }(), zOrder, @@ -127470,9 +127528,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30286 + zPrefix = ts + 30333 } - zUpdate = Xsqlite3_mprintf(tls, ts+30947, + zUpdate = Xsqlite3_mprintf(tls, ts+30994, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+4, p+36, zUpdate) @@ -127531,7 +127589,7 @@ } *(*int32)(unsafe.Pointer(bp + 12)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+36, - Xsqlite3_mprintf(tls, ts+30977, libc.VaList(bp, p+24))) + Xsqlite3_mprintf(tls, ts+31024, libc.VaList(bp, p+24))) for *(*int32)(unsafe.Pointer(bp + 12)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127604,18 +127662,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31007, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31054, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31035, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31082, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+24, ts+3279, uint32(4)) } else { libc.Xmemcpy(tls, p+24, ts+6434, uint32(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31053, libc.VaList(bp+24, p+24)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31100, libc.VaList(bp+24, p+24)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127655,11 +127713,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31119, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31166, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24199, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24246, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127671,13 +127729,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31151, + zTarget = Xsqlite3_mprintf(tls, ts+31198, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31183 + return ts + 31230 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -127696,21 +127754,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31185, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31232, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31200, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31247, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31217, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31264, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127718,7 +127776,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) @@ -127726,7 +127784,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31261, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31308, 0) } } @@ -127755,14 +127813,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31279, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31326, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -127888,7 +127946,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31314, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31361, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -127903,8 +127961,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) } - zOal = Xsqlite3_mprintf(tls, ts+31339, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31386, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128021,7 +128079,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23837, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23884, 0) return } @@ -128114,7 +128172,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+36, - ts+31353) + ts+31400) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128122,7 +128180,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31375, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31422, libc.VaList(bp, iCookie+1)) } } } @@ -128143,7 +128201,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+36, Xsqlite3_mprintf(tls, - ts+31402, + ts+31449, libc.VaList(bp, p+24, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128173,9 +128231,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+36, - Xsqlite3_mprintf(tls, ts+31560, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31607, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31575, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31622, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128189,10 +128247,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 4)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31595, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31642, uintptr(0), uintptr(0), p+36) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31620) + ts+31667) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128206,12 +128264,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31728) + ts+31775) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+4, p+36, - ts+31793) + ts+31840) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128223,7 +128281,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31837, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31884, uintptr(0), uintptr(0), p+36) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -128251,7 +128309,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31862, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31909, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -128373,7 +128431,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31890, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31937, 0) } if rc == SQLITE_OK { @@ -128389,7 +128447,7 @@ bp := tls.Alloc(12) defer tls.Free(12) - var zOal uintptr = rbuMPrintf(tls, p, ts+31339, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31386, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -128406,7 +128464,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31915, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31962, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -128440,7 +128498,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+12, Xsqlite3_mprintf(tls, - ts+31926, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31973, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 12)), -1) } else { @@ -128470,13 +128528,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31998, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32045, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32012) + ts+32059) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -128487,7 +128545,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32069) + ts+32116) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128561,7 +128619,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32143, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32190, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128579,12 +128637,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32175, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32222, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32207 + return ts + 32254 } - return ts + 32214 + return ts + 32261 }())) } } @@ -128608,14 +128666,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32221, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32268, uintptr(0), uintptr(0), p+36) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6434, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32237, uintptr(0), uintptr(0), p+36) + db, ts+32284, uintptr(0), uintptr(0), p+36) } } @@ -128669,7 +128727,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32261, zState+uintptr(n-7), uint32(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32308, zState+uintptr(n-7), uint32(7)) { return rbuMisuseError(tls) } } @@ -128696,7 +128754,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); i < nErrmsg-Size_t(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30286, uint32(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30333, uint32(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128732,7 +128790,7 @@ rbuObjIterFinalize(tls, p+48) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32269, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32316, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128851,12 +128909,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14496 } else { - zBegin = ts + 32221 + zBegin = ts + 32268 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32221, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32268, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129202,7 +129260,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32296, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32343, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129227,7 +129285,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32319, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32366, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -129387,7 +129445,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32330, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32377, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130212,7 +130270,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32341, 0) + ts+32388, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -130225,7 +130283,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32462, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32509, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -130904,9 +130962,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32491, + zRet = Xsqlite3_mprintf(tls, ts+32538, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 21518 + zSep = ts + 21565 if zRet == uintptr(0) { break } @@ -130929,9 +130987,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32525, + ts+32572, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 32566 + zSep = ts + 32613 if zRet == uintptr(0) { break } @@ -130939,7 +130997,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7523, 0) + zRet = Xsqlite3_mprintf(tls, ts+7512, 0) } return zRet @@ -130950,7 +131008,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32571, + ts+32618, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -130993,7 +131051,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32649, + ts+32696, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131120,7 +131178,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32702, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32749, 0) __16: ; rc = SQLITE_SCHEMA @@ -131596,7 +131654,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11341, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32729, libc.VaList(bp, zDb)) + ts+32776, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 20)) = SQLITE_NOMEM } @@ -131605,18 +131663,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32839, bp+20) + sessionAppendStr(tls, bp+8, ts+32886, bp+20) sessionAppendIdent(tls, bp+8, zDb, bp+20) sessionAppendStr(tls, bp+8, ts+1560, bp+20) sessionAppendIdent(tls, bp+8, zTab, bp+20) - sessionAppendStr(tls, bp+8, ts+32854, bp+20) + sessionAppendStr(tls, bp+8, ts+32901, bp+20) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+20) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), bp+20) - sessionAppendStr(tls, bp+8, ts+32862, bp+20) + sessionAppendStr(tls, bp+8, ts+32909, bp+20) sessionAppendInteger(tls, bp+8, i+1, bp+20) - zSep = ts + 21518 + zSep = ts + 21565 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131725,7 +131783,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32868, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32915, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -131817,7 +131875,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+12)).FaBuf) - Xsqlite3_exec(tls, db, ts+32888, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32935, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132080,7 +132138,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132103,7 +132161,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132145,7 +132203,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132206,7 +132264,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+44, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -132280,13 +132338,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -132348,7 +132406,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) == uintptr(0) { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) = uintptr(0) } @@ -132721,7 +132779,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -132898,34 +132956,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*12 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint32(nU32)*uint32(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32906, bp+12) + sessionAppendStr(tls, bp, ts+32953, bp+12) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+12) - sessionAppendStr(tls, bp, ts+32919, bp+12) + sessionAppendStr(tls, bp, ts+32966, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32925, bp+12) + sessionAppendStr(tls, bp, ts+32972, bp+12) sessionAppendInteger(tls, bp, ii*2+1, bp+12) zSep = ts + 14607 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+32854, bp+12) + sessionAppendStr(tls, bp, ts+32901, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32930, bp+12) + ts+32977, bp+12) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32862, bp+12) + sessionAppendStr(tls, bp, ts+32909, bp+12) sessionAppendInteger(tls, bp, ii*2+2, bp+12) } - zSep = ts + 21518 + zSep = ts + 21565 } } @@ -132977,34 +133035,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33005, bp+12) + sessionAppendStr(tls, bp, ts+33052, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+32854, bp+12) + sessionAppendStr(tls, bp, ts+32901, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32925, bp+12) + sessionAppendStr(tls, bp, ts+32972, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 21518 + zSep = ts + 21565 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33023, bp+12) + sessionAppendStr(tls, bp, ts+33070, bp+12) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+12) - sessionAppendStr(tls, bp, ts+32566, bp+12) + sessionAppendStr(tls, bp, ts+32613, bp+12) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32862, bp+12) + sessionAppendStr(tls, bp, ts+32909, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 33031 + zSep = ts + 33078 } } sessionAppendStr(tls, bp, ts+4950, bp+12) @@ -133031,9 +133089,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33036, bp+12) + sessionAppendStr(tls, bp, ts+33083, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+21524, bp+12) + sessionAppendStr(tls, bp, ts+21571, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14607, bp+12) @@ -133041,9 +133099,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) } - sessionAppendStr(tls, bp, ts+33054, bp+12) + sessionAppendStr(tls, bp, ts+33101, bp+12) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33065, bp+12) + sessionAppendStr(tls, bp, ts+33112, bp+12) } sessionAppendStr(tls, bp, ts+4950, bp+12) @@ -133062,11 +133120,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11341, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33069) + ts+33116) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+4, - ts+33182) + ts+33229) } return rc } @@ -133094,7 +133152,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -133347,7 +133405,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33326, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -133363,7 +133421,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33347, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33394, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -133436,10 +133494,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33366, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33413, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33439, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+124, bp+128, bp+132, uintptr(0)) @@ -133498,16 +133556,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33422, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33469, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 128)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33466, + ts+33513, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 140)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 128)))) } else if *(*int32)(unsafe.Pointer(bp + 128)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 136)), uint32(*(*int32)(unsafe.Pointer(bp + 128)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33537, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33584, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 128)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 140)), ts+11341) { @@ -133561,14 +133619,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33597, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33644, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33698, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134800,7 +134858,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33679, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33726, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135088,7 +135146,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33754, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135273,7 +135331,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33738, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33785, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -135341,7 +135399,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33745 + var zErr uintptr = ts + 33792 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135522,7 +135580,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33795 + var zErr uintptr = ts + 33842 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135845,13 +135903,13 @@ defer tls.Free(48) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33843, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33890, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33851, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33898, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33861, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33908, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -136401,7 +136459,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33866, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33913, zCmd, nCmd) == 0 { var nByte int32 = int32(uint32(unsafe.Sizeof(int32(0))) * uint32(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -136428,14 +136486,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33920, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33904, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33951, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136446,7 +136504,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33937, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33984, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136459,7 +136517,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33974, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34021, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + Size_t(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, Sqlite3_int64(unsafe.Sizeof(uintptr(0)))*nArg) @@ -136468,7 +136526,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33983, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34030, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136487,7 +136545,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34016, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34063, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136502,14 +136560,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34050, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34097, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34058, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34105, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34090, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34137, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136517,9 +136575,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34096, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34143, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34110, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34157, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136527,9 +136585,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34148, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34195, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34159, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34206, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -136541,17 +136599,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 44)) = [4]Fts5Enum{ {FzName: ts + 8019, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17338}, - {FzName: ts + 34194, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34241, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+44, zArg, pConfig+48)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34202, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34249, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34233, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34280, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136598,15 +136656,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22184) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22231) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16260) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34261, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34291) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34338) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34301, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34348, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136623,13 +136681,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 28)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34379, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34337, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34384, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34344, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34391, libc.VaList(bp+16, i)) } } } @@ -136667,8 +136725,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*4)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22184) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34352, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22231) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34399, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136700,7 +136758,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34381, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34428, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136737,14 +136795,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34050 + zTail = ts + 34097 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34401 + zTail = ts + 34448 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34409, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34456, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136793,7 +136851,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34420, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34467, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136801,10 +136859,10 @@ } return ts + 14607 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34436, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34483, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22184)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22231)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -136914,7 +136972,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34469) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34516) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -136924,7 +136982,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -136934,7 +136992,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34483) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34530) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -136947,7 +137005,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34493) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34540) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -136957,7 +137015,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34503) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34550) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -136973,7 +137031,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22184) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22231) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+4) @@ -136996,7 +137054,7 @@ bp := tls.Alloc(44) defer tls.Free(44) - var zSelect uintptr = ts + 34515 + var zSelect uintptr = ts + 34562 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 36)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137018,7 +137076,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 36))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34547) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34594) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 40)) = 0 @@ -137032,7 +137090,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34555, + ts+34602, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137130,7 +137188,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34620, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34667, 0) return FTS5_EOF } } @@ -137143,20 +137201,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34640, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34687, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = (int32(z2) - int32(z)) / 1 - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34671, uint32(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34718, uint32(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34674, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34721, uint32(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30056, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30103, uint32(3)) == 0 { tok = FTS5_AND } break @@ -138933,9 +138991,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34678, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34725, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33754, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -138951,7 +139009,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34683, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34730, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139038,7 +139096,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20521, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20568, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139119,7 +139177,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34712, 0) + ts+34759, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -139289,12 +139347,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+20)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34765, + ts+34812, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34815 + return ts + 34862 } - return ts + 34678 + return ts + 34725 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -140234,7 +140292,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34822, iRowid, 0, p+36) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34869, iRowid, 0, p+36) } if rc == SQLITE_ERROR { @@ -140313,7 +140371,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+40, Xsqlite3_mprintf(tls, - ts+34828, + ts+34875, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -140338,7 +140396,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34879, + ts+34926, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+44, zSql) != 0 { return @@ -140361,7 +140419,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+52, Xsqlite3_mprintf(tls, - ts+34928, + ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140600,7 +140658,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+64, - Xsqlite3_mprintf(tls, ts+34968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141799,7 +141857,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+56, Xsqlite3_mprintf(tls, - ts+34991, + ts+35038, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -143264,7 +143322,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+48, Xsqlite3_mprintf(tls, - ts+35075, + ts+35122, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -144345,13 +144403,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35132, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35179, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25049, ts+35140, 0, pzErr) + pConfig, ts+25096, ts+35187, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11481, - ts+35175, + ts+35222, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144604,7 +144662,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34822, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) + ts+34869, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 4)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) @@ -144718,7 +144776,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35219, + ts+35266, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -144888,7 +144946,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 8)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+68+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*92, uintptr(0), bp+8) - sqlite3Fts5BufferAppendBlob(tls, p+32, bp+8, uint32(4), ts+35305) + sqlite3Fts5BufferAppendBlob(tls, p+32, bp+8, uint32(4), ts+35352) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fn, bp+20, bp+24) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) & int64(0x7FFFFFFF)) @@ -145156,7 +145214,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR } @@ -145580,7 +145638,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35349, + ts+35396, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145596,9 +145654,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35404 + return ts + 35451 } - return ts + 35409 + return ts + 35456 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145644,12 +145702,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35413, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5050, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35419, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35466, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145680,7 +145738,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35447, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35494, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 20)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145711,7 +145769,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35504, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145743,14 +145801,14 @@ *(*int32)(unsafe.Pointer(pCsr + 56)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35478, libc.VaList(bp, z)) + ts+35525, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33861 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33908 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145806,7 +145864,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR __1: ; @@ -146023,7 +146081,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35511, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35558, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146168,28 +146226,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35547, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35594, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35558, 0) + ts+35605, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35638, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35685, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35646, 0) + ts+35693, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16927, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35702, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35749, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35708, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35755, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -146260,12 +146318,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35724, + ts+35771, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20422 + return ts + 20469 } - return ts + 35761 + return ts + 35808 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -146895,7 +146953,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35773, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35820, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147139,7 +147197,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35794, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35841, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147158,7 +147216,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 8 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35816, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35863, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147205,7 +147263,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35847) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35894) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147214,7 +147272,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35860, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35907, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -147228,7 +147286,7 @@ } var azName2 = [5]uintptr{ - ts + 35951, ts + 34050, ts + 25049, ts + 34401, ts + 11481, + ts + 35998, ts + 34097, ts + 25096, ts + 34448, ts + 11481, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -147252,7 +147310,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35958, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36005, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -147270,13 +147328,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35958, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35963, 0, + db, ts+36010, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -147333,17 +147391,17 @@ if *(*uintptr)(unsafe.Pointer(p + 24 + uintptr(eStmt)*4)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35978, - ts + 36046, - ts + 36115, - ts + 36148, - ts + 36187, - ts + 36227, - ts + 36266, - ts + 36307, - ts + 36346, - ts + 36388, - ts + 36428, + ts + 36025, + ts + 36093, + ts + 36162, + ts + 36195, + ts + 36234, + ts + 36274, + ts + 36313, + ts + 36354, + ts + 36393, + ts + 36435, + ts + 36475, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -147445,18 +147503,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36451, + ts+36498, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36555, + ts+36602, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36593, + ts+36640, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -147468,7 +147526,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36631, + ts+36678, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147480,14 +147538,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25049, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25096, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11481, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35951, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35998, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34401, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34448, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34050, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34097, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147499,17 +147557,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36673, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36720, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36703, + ts+36750, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147546,27 +147604,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36747, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36794, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36770, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36817, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34050, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34097, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34401, ts+36776, 0, pzErr) + pConfig, ts+34448, ts+36823, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35951, ts+36808, 1, pzErr) + pConfig, ts+35998, ts+36855, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147772,12 +147830,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36825, + ts+36872, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36875, + ts+36922, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147785,7 +147843,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -147961,7 +148019,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36904, + zSql = Xsqlite3_mprintf(tls, ts+36951, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148143,14 +148201,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 36)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34050, bp+36) + rc = fts5StorageCount(tls, p, ts+34097, bp+36) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 36)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 44)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34401, bp+44) + rc = fts5StorageCount(tls, p, ts+34448, bp+44) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 44)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -148345,9 +148403,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint32(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36936) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36983) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36994) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148561,7 +148619,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36958 + var zCat uintptr = ts + 37005 var i int32 libc.Xmemset(tls, p, 0, uint32(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148573,7 +148631,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36967) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37014) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) } } @@ -148584,18 +148642,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36978) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37025) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36936) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36983) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36994) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36967) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37014) { } else { rc = SQLITE_ERROR } @@ -148871,7 +148929,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36996 + var zBase uintptr = ts + 37043 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149011,7 +149069,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37006, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37053, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149019,11 +149077,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37009, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37014, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149031,7 +149089,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37019, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37066, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149039,7 +149097,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37022, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149047,11 +149105,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37025, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149059,19 +149117,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37035, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37082, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37039, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37050, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149079,11 +149137,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37054, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37101, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37058, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149091,7 +149149,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149099,11 +149157,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37116, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149111,7 +149169,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37120, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149119,7 +149177,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149127,7 +149185,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37081, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149143,24 +149201,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37085, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37065, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37132, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37091, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37081, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149175,44 +149233,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37098, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37106, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37113, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37160, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37118, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37014, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37061, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37009, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37081, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15473, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149221,91 +149279,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37091, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37147, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37194, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37050, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37097, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37200, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37157, uint32(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37204, uint32(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37159, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37073, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37120, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37212, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37081, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37173, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37220, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37179, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37226, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37184, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37190, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37077, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37124, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37198, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37245, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37206, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37253, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37210, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37257, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37073, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37120, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37218, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37224, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37077, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37124, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37230, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37277, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37091, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -149320,16 +149378,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37242, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37289, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149337,21 +149395,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37247, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37294, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37300, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149359,7 +149417,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37259, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37306, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -149367,9 +149425,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37312, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -149384,12 +149442,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37318, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37275, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37322, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -149398,7 +149456,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37281, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37328, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149554,7 +149612,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37285) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37332) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149734,22 +149792,22 @@ defer tls.Free(64) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36996, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37043, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37300, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37347, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37306, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37353, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -150888,14 +150946,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37321) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37368) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37325) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37372) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37329) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37376) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37338, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37385, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -150921,19 +150979,19 @@ defer tls.Free(20) *(*[3]uintptr)(unsafe.Pointer(bp + 4)) = [3]uintptr{ - ts + 37372, - ts + 37412, - ts + 37447, + ts + 37419, + ts + 37459, + ts + 37494, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23345, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23392, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37490, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37537, 0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else { var nByte int32 @@ -151066,11 +151124,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37523, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37570, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37554, + ts+37601, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+68, uintptr(0)) @@ -151094,7 +151152,7 @@ *(*uintptr)(unsafe.Pointer(bp + 68)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37605, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37652, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151489,7 +151547,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37631, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37678, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151511,7 +151569,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37641 + return ts + 37688 } func init() { @@ -152485,5 +152543,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 68)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -748,11 +748,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NETGRAPHDISC = 6 NN = 1 @@ -1958,7 +1958,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2066,8 +2066,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5299,7 +5299,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5941,17 +5942,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6172,14 +6174,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -6986,7 +6988,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15156,7 +15158,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15633,7 +15635,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15924,7 +15926,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15941,14 +15943,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15968,7 +15970,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16036,7 +16038,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16263,7 +16265,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16291,7 +16293,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16390,7 +16392,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -16520,7 +16522,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -16566,7 +16568,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16805,7 +16807,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -16939,7 +16941,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16960,7 +16962,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17276,7 +17278,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -17367,7 +17369,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -17375,9 +17377,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -17441,18 +17443,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&0170000 == 0120000 { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+224, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 224 + uintptr(got))) = int8(0) @@ -17492,14 +17494,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17596,7 +17598,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19029,7 +19031,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19059,7 +19061,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19110,7 +19112,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19214,8 +19216,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -19293,13 +19295,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21593,7 +21595,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22026,7 +22028,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22178,9 +22180,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -22512,7 +22514,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22662,7 +22664,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23043,7 +23045,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23149,7 +23151,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23167,7 +23169,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23206,7 +23208,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23283,7 +23285,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24041,7 +24043,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24284,9 +24286,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25043,7 +25045,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25142,7 +25144,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25728,7 +25730,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26003,7 +26005,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -26476,7 +26478,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26981,7 +26983,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27639,7 +27641,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27776,7 +27778,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27793,7 +27795,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27801,7 +27803,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27844,7 +27846,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27854,7 +27856,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28104,7 +28106,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28151,7 +28153,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28161,7 +28163,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28174,7 +28176,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28183,14 +28185,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28200,7 +28202,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28264,7 +28266,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28274,7 +28276,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28296,7 +28298,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28331,7 +28333,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28344,13 +28346,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -28375,7 +28377,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -28386,7 +28388,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -28438,22 +28440,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -28463,7 +28465,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -28471,7 +28473,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -28479,10 +28481,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -28542,7 +28544,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -28578,7 +28580,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28608,11 +28610,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28623,15 +28625,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28659,14 +28661,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28680,7 +28682,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28692,7 +28694,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28795,7 +28797,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28823,7 +28825,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28862,7 +28864,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29745,7 +29747,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30160,7 +30162,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30186,7 +30188,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30195,7 +30197,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30206,7 +30208,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30222,7 +30224,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30283,7 +30285,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30318,7 +30320,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -30378,7 +30380,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -30417,7 +30419,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -30448,7 +30450,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30789,7 +30791,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31033,14 +31035,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31085,7 +31087,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31134,7 +31136,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31214,7 +31216,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31305,7 +31307,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31325,7 +31327,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -31535,7 +31537,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31739,7 +31741,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31804,7 +31806,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31852,7 +31854,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31971,7 +31973,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32131,7 +32133,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32196,7 +32198,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32232,7 +32234,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32276,7 +32278,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -32388,7 +32390,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -32546,7 +32548,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -32603,7 +32605,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32619,7 +32621,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32693,7 +32695,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32705,7 +32707,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -32716,7 +32718,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32881,7 +32883,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33160,12 +33162,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33173,7 +33175,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33233,7 +33235,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -33322,7 +33324,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -33438,7 +33440,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -33758,7 +33760,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33769,7 +33771,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33927,7 +33929,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34001,7 +34003,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34064,7 +34066,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34092,7 +34094,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -34353,7 +34355,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -34545,7 +34547,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -34583,7 +34585,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -34689,7 +34691,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34714,7 +34716,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34784,7 +34786,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34897,7 +34899,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -34957,6 +34959,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34964,7 +34967,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35002,13 +35005,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35039,7 +35042,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35114,7 +35116,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35139,7 +35141,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35240,7 +35242,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35248,11 +35250,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35327,7 +35329,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -35396,7 +35398,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -35425,7 +35427,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -35501,7 +35503,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -35515,7 +35517,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35649,7 +35651,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38118,7 +38120,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38767,7 +38769,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38782,14 +38784,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41093,7 +41095,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41644,7 +41646,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41709,7 +41711,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41743,7 +41745,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41793,7 +41795,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41939,7 +41941,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42110,7 +42112,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42136,7 +42138,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -42410,7 +42412,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43025,7 +43027,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -43545,7 +43547,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -43553,7 +43555,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5350, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43958,7 +43960,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44102,7 +44104,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -44546,10 +44548,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47198,7 +47196,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48958,7 +48956,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49736,7 +49734,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5859) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5859) goto abort_due_to_error __770: ; @@ -49846,7 +49844,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50040,7 +50038,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -51407,7 +51405,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51927,7 +51925,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52010,7 +52008,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -55450,14 +55448,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6757 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -55501,7 +55495,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6768, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6757, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -55565,7 +55559,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6785, libc.VaList(bp, pExpr)) + ts+6774, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -55581,7 +55575,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6849, + Xsqlite3ErrorMsg(tls, pParse, ts+6838, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55595,7 +55589,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6885, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6874, uintptr(0), pExpr) } } else { @@ -55618,30 +55612,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6913, libc.VaList(bp+16, pExpr)) + ts+6902, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6956 + zType = ts + 6945 } else { - zType = ts + 6963 + zType = ts + 6952 } - Xsqlite3ErrorMsg(tls, pParse, ts+6973, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6962, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7001, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6990, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7023, + Xsqlite3ErrorMsg(tls, pParse, ts+7012, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7067, + ts+7056, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55713,15 +55707,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7104, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -55729,7 +55723,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7126, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) } break @@ -55860,7 +55854,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7137, libc.VaList(bp, i, zType, mx)) + ts+7126, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55880,7 +55874,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7193, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7182, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55915,7 +55909,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7227, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7216, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55972,7 +55966,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7233, libc.VaList(bp, i+1)) + ts+7222, libc.VaList(bp, i+1)) return 1 } } @@ -56000,7 +55994,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7294, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7283, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56214,7 +56208,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7325, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7314, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56254,7 +56248,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56265,7 +56259,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7364) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7353) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56277,7 +56271,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7370, 0) + ts+7359, 0) return WRC_Abort } @@ -57141,7 +57135,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7429, libc.VaList(bp, mxHeight)) + ts+7418, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -57390,10 +57384,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7477, + Xsqlite3ErrorMsg(tls, pParse, ts+7466, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7521 + return ts + 7510 } return ts + 1547 }(), nElem)) @@ -57434,7 +57428,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -57460,7 +57454,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7525, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7514, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -57488,7 +57482,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7548, libc.VaList(bp, pExpr)) } } } @@ -57535,7 +57529,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7579, + Xsqlite3ErrorMsg(tls, pParse, ts+7568, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -57560,7 +57554,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7622, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7611, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58135,7 +58129,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58258,7 +58252,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7675, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7664, libc.VaList(bp, zObject)) } } @@ -58314,10 +58308,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6757) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7687) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6762) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7692) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -59392,7 +59386,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59830,6 +59824,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59843,6 +59838,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60061,6 +60059,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60074,6 +60073,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60847,7 +60854,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60869,11 +60876,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60944,13 +60950,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60963,15 +60975,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60981,22 +60993,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61005,21 +61017,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61029,27 +61041,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8075, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61058,7 +61070,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63729,7 +63741,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10915, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63746,7 +63758,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64667,7 +64679,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69031,6 +69043,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70195,7 +70213,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7126, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7115, 10) == 0 { return 0 } return 1 @@ -71441,7 +71459,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14133, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -73486,7 +73504,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79952,7 +79970,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81351,7 +81369,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17506 } else { - zType = ts + 7521 + zType = ts + 7510 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17508, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -81512,6 +81530,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82830,7 +82849,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82846,7 +82865,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -83651,80 +83670,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17922) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17958) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17896) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17922) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17969) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17949) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17996) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -83741,20 +83774,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17976) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18023) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83763,21 +83796,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -83795,14 +83828,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18005 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18052 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83810,27 +83843,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83839,25 +83872,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18008, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18055, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -83865,15 +83898,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -83881,41 +83914,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -83930,31 +83963,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17338) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18033) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18080) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17491) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83964,10 +83997,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -83987,19 +84020,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84008,86 +84041,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18041, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18088, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84095,36 +84128,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84133,10 +84166,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84145,10 +84178,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84156,10 +84189,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84211,14 +84244,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18059, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18064, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18088, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18096, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18104}, - {FzName: ts + 18111}, + {FzName: ts + 18106, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18111, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18135, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18143, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18151}, + {FzName: ts + 18158}, {}, } var setCookie = [2]VdbeOpList{ @@ -84270,7 +84303,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18117) + Xsqlite3_str_appendall(tls, bp+32, ts+18164) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84278,7 +84311,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18132, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18179, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -84291,16 +84324,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18145) + Xsqlite3_str_appendall(tls, bp+32, ts+18192) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18157) + Xsqlite3_str_appendall(tls, bp+32, ts+18204) j++ } Xsqlite3_str_append(tls, bp+32, ts+4950, 1) @@ -84483,13 +84516,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18172) + Xsqlite3_str_appendall(tls, bp+32, ts+18219) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18180, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18227, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18184, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18231, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -84566,12 +84599,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18188, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18235, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -84580,19 +84613,19 @@ } else { zObj = ts + 5001 } - z = Xsqlite3MPrintf(tls, db, ts+18216, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18263, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18247, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18294, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18255, - ts + 18262, - ts + 18274, + ts + 18302, + ts + 18309, + ts + 18321, } // Check to see if any sibling index (another index on the same table) @@ -84684,7 +84717,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18285) + corruptSchema(tls, pData, argv, ts+18332) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84732,7 +84765,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7931 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18298 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18345 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -84861,7 +84894,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18370) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18417) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84875,7 +84908,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18394, + ts+18441, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85207,7 +85240,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18428, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18475, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85237,7 +85270,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18458, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18505, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85333,7 +85366,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -85432,7 +85465,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85759,13 +85792,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18477, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18524, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18507)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18554)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85940,7 +85973,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18541, libc.VaList(bp, 0)) + ts+18588, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85985,7 +86018,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18591, libc.VaList(bp+8, zName)) + ts+18638, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85996,7 +86029,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18655, + Xsqlite3ErrorMsg(tls, pParse, ts+18702, libc.VaList(bp+16, zName)) break } @@ -86624,16 +86657,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18692 + z = ts + 18739 break case TK_INTERSECT: - z = ts + 18702 + z = ts + 18749 break case TK_EXCEPT: - z = ts + 18712 + z = ts + 18759 break default: - z = ts + 18719 + z = ts + 18766 break } return z @@ -86643,7 +86676,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18725, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18772, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86669,9 +86702,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18748, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18795, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18779 + return ts + 18826 } return ts + 1547 }())) @@ -87015,7 +87048,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87115,7 +87148,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87131,7 +87164,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18803, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18850, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -87214,8 +87247,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87230,12 +87261,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18811 + zType = ts + 18858 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -87451,7 +87485,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18862, 0) return __1: ; @@ -87542,7 +87576,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18864, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18911, 0) goto end_of_recursive_query __15: ; @@ -87562,7 +87596,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18906, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18953, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -87599,7 +87633,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18912, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -87633,11 +87667,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18927, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18974, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } - return ts + 18950 + return ts + 18997 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87738,8 +87772,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18952, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18967, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18999, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19014, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87786,7 +87820,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18692, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18739, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87853,7 +87887,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -87915,7 +87949,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88068,10 +88102,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19007, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19054, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19053, + ts+19100, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88325,8 +88359,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7227) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7216) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88353,13 +88387,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19135, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19182, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19193, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88371,7 +88405,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -88559,7 +88593,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -89458,7 +89493,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19157, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19204, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -89541,7 +89576,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19175, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19222, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89670,7 +89705,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19198, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19245, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -89693,7 +89728,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19218, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19265, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89709,7 +89744,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19261 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19308 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89735,7 +89770,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19284, + Xsqlite3ErrorMsg(tls, pParse, ts+19331, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89746,9 +89781,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19322 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19369 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19356 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19403 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89795,7 +89830,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19394, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19441, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89907,7 +89942,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19398, + Xsqlite3ErrorMsg(tls, pParse, ts+19445, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89926,7 +89961,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19437, + Xsqlite3ErrorMsg(tls, pParse, ts+19484, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90050,7 +90085,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19468, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19515, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90115,7 +90150,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19473, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19520, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90146,9 +90181,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19482, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19529, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19500, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19547, 0) } } } @@ -90158,7 +90193,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19520, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19567, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90296,7 +90331,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -90380,13 +90415,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19551, 0) + ts+19598, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19602, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19649, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -90575,11 +90610,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19635, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19682, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19647 + return ts + 19694 } return ts + 1547 }(), @@ -90907,7 +90942,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19670, + ts+19717, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90968,7 +91003,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19724, + Xsqlite3ErrorMsg(tls, pParse, ts+19771, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91110,7 +91145,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19764, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19811, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91169,7 +91204,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19779, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19826, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -91640,9 +91675,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19795 + return ts + 19842 } - return ts + 19804 + return ts + 19851 }()) groupBySort = 1 @@ -91993,7 +92028,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19795) + explainTempTable(tls, pParse, ts+19842) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92098,7 +92133,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19813, 0) + ts+19860, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92331,7 +92366,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19878, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19925, 0) goto trigger_cleanup __3: ; @@ -92375,7 +92410,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19924, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19971, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -92393,7 +92428,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19979, 0) goto trigger_orphan_error __11: ; @@ -92405,7 +92440,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19924, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19971, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -92420,11 +92455,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19973, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20020, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -92435,19 +92471,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6374, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19999, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20046, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20037, + Xsqlite3ErrorMsg(tls, pParse, ts+20084, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20074 + return ts + 20121 } - return ts + 20081 + return ts + 20128 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -92456,7 +92492,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20087, libc.VaList(bp+24, pTableName+8)) + ts+20134, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -92605,7 +92641,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19924, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19971, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -92638,7 +92674,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20133, + ts+20180, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -92663,13 +92699,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20181, + ts+20228, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20256, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20303, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92925,7 +92961,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20285, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20332, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92978,7 +93014,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20305, + ts+20352, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93092,12 +93128,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20367, + ts+20414, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20415 + return ts + 20462 } - return ts + 20422 + return ts + 20469 }())) __15: ; @@ -93211,7 +93247,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20429, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20476, 0) return 1 } @@ -93277,7 +93313,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -93441,7 +93477,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20471, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20518, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94034,7 +94070,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20485, + ts+20532, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94066,7 +94102,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20521, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20568, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -94392,7 +94428,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94946,7 +94987,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20540) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20587) __169: ; update_cleanup: @@ -95252,10 +95293,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20553, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20600, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20557, libc.VaList(bp+8, bp+216)) + ts+20604, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -95378,7 +95419,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20630, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20634, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20677, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20681, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -95526,14 +95567,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20638) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20678) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20725) return SQLITE_ERROR __2: ; @@ -95544,7 +95585,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20721) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20768) return SQLITE_ERROR __5: ; @@ -95572,7 +95613,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20739, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20786, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -95592,7 +95633,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20762) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20809) goto end_of_vacuum __8: ; @@ -95652,7 +95693,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20789, + ts+20836, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -95661,7 +95702,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20897, + ts+20944, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95672,7 +95713,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20951, + ts+20998, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95683,7 +95724,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21102, + ts+21149, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96112,11 +96153,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21232, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21279, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21256, + ts+21303, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96126,7 +96167,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21355, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21402, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96187,7 +96228,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21374, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21421, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96215,9 +96256,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96225,7 +96268,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21416, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21463, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -96237,7 +96280,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21446 + var zFormat uintptr = ts + 21493 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96311,7 +96354,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21492, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21539, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96369,7 +96412,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21492, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21539, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -96403,7 +96446,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96856,7 +96899,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96883,7 +96926,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97114,7 +97157,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21511 + return ts + 21558 } if i == -1 { return ts + 16260 @@ -97126,11 +97169,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97145,7 +97188,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97171,27 +97214,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21526, 2) + Xsqlite3_str_append(tls, pStr, ts+21573, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21529 + return ts + 21576 } - return ts + 21534 + return ts + 21581 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21542) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21589) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21544) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21591) } Xsqlite3_str_append(tls, pStr, ts+4950, 1) } @@ -97234,11 +97277,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21546, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21593, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21552 + return ts + 21599 } - return ts + 21559 + return ts + 21606 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97251,40 +97294,40 @@ zFmt = ts + 10969 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21564 + zFmt = ts + 21611 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21597 + zFmt = ts + 21644 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21622 + zFmt = ts + 21669 } else { - zFmt = ts + 21640 + zFmt = ts + 21687 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21649, 7) + Xsqlite3_str_append(tls, bp+64, ts+21696, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16260 - Xsqlite3_str_appendf(tls, bp+64, ts+21657, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21704, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21688, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21735, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21698, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21745, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21703, + Xsqlite3_str_appendf(tls, bp+64, ts+21750, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21730, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21777, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97316,22 +97359,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21741, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21788, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21762, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21809, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21518, 5) + Xsqlite3_str_append(tls, bp+24, ts+21565, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4950, 1) @@ -98928,7 +98971,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21770, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21817, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98956,7 +98999,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -99474,7 +99517,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21794, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21841, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99835,7 +99878,7 @@ {FzOp: ts + 16109, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15440, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14960, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21808, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21855, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100325,12 +100368,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -100409,7 +100452,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7692 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -100503,7 +100546,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21856 + return ts + 21903 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100879,7 +100922,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21863, + Xsqlite3ErrorMsg(tls, pParse, ts+21910, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100895,7 +100938,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -101613,7 +101656,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21899, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21946, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -101684,7 +101727,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21925 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21972 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101858,6 +101901,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101901,9 +101948,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101937,6 +101982,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102195,11 +102241,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103780,7 +103831,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103838,7 +103889,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104236,7 +104287,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21962, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22009, 0) rc = SQLITE_OK } else { goto __3 @@ -104843,7 +104894,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21997, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22044, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104878,6 +104929,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105172,6 +105227,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105324,7 +105382,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22015, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22062, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -105388,7 +105446,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22043, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22090, 0) goto __5 __4: ii = 0 @@ -106270,7 +106328,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22061, -1) + pCtx, ts+22108, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -106403,7 +106461,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22117, -1) + pCtx, ts+22164, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -106493,17 +106551,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22162)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22173)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22184)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22189)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22202)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22212)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22218)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22229)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22239)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22251)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22256)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22209)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22220)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22231)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22236)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22249)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22259)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22265)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22276)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22286)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22298)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22303)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -106549,7 +106607,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22260, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22307, libc.VaList(bp, zName)) } return p } @@ -106593,12 +106651,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22279, 0) + ts+22326, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22350, 0) + ts+22397, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106825,7 +106883,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22413, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22460, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106941,7 +106999,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512)) } pSub = Xsqlite3SelectNew(tls, @@ -107056,7 +107114,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22439, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22486, 0) goto windowAllocErr __2: ; @@ -107121,15 +107179,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22471 + zErr = ts + 22518 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22488 + zErr = ts + 22535 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22504 + zErr = ts + 22551 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22524, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22571, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107150,7 +107208,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22557, 0) + ts+22604, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107306,11 +107364,11 @@ } var azErr = [5]uintptr{ - ts + 22604, - ts + 22657, - ts + 22061, - ts + 22708, - ts + 22760, + ts + 22651, + ts + 22704, + ts + 22108, + ts + 22755, + ts + 22807, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108705,19 +108763,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22810, + Xsqlite3ErrorMsg(tls, pParse, ts+22857, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22852 + return ts + 22899 } - return ts + 22861 + return ts + 22908 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22867, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22914, 0) } } @@ -108785,7 +108843,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22901, + Xsqlite3ErrorMsg(tls, pParse, ts+22948, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109882,7 +109940,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22986, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110861,7 +110919,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -110871,7 +110929,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -111614,7 +111672,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22988) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23035) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111778,7 +111836,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111995,9 +112053,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6757 + return ts + 7687 } - return ts + 6762 + return ts + 7692 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -112281,19 +112339,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23021, 0) + ts+23068, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23116, 0) + ts+23163, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23200, 0) + ts+23247, 0) } break case uint32(273): @@ -112672,9 +112730,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23285, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23332, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -113442,7 +113500,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23302, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23349, libc.VaList(bp, bp+2464)) break } } @@ -113465,7 +113523,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23327, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23374, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -113638,7 +113696,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23338, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23385, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -113651,11 +113709,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19924, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19971, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23350, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23397, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113668,9 +113726,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23360, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23407, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23364, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23411, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113904,7 +113962,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -114479,7 +114537,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -114494,7 +114552,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23372, 0) + ts+23419, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114685,23 +114743,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23440 + var zErr uintptr = ts + 23487 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23454 + zErr = ts + 23501 break } case SQLITE_ROW: { - zErr = ts + 23476 + zErr = ts + 23523 break } case SQLITE_DONE: { - zErr = ts + 23498 + zErr = ts + 23545 break } @@ -114719,35 +114777,35 @@ } var aMsg = [29]uintptr{ - ts + 23521, - ts + 23534, + ts + 23568, + ts + 23581, uintptr(0), - ts + 23550, - ts + 23575, - ts + 23589, - ts + 23608, + ts + 23597, + ts + 23622, + ts + 23636, + ts + 23655, ts + 1483, - ts + 23633, - ts + 23670, - ts + 23682, - ts + 23697, - ts + 23730, - ts + 23748, - ts + 23773, - ts + 23802, + ts + 23680, + ts + 23717, + ts + 23729, + ts + 23744, + ts + 23777, + ts + 23795, + ts + 23820, + ts + 23849, uintptr(0), ts + 5831, ts + 5327, - ts + 23819, - ts + 23837, - ts + 23855, - uintptr(0), - ts + 23889, + ts + 23866, + ts + 23884, + ts + 23902, uintptr(0), - ts + 23910, ts + 23936, - ts + 23959, - ts + 23980, + uintptr(0), + ts + 23957, + ts + 23983, + ts + 24006, + ts + 24027, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114868,7 +114926,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114913,7 +114971,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23996, 0) + ts+24043, 0) return SQLITE_BUSY } else { @@ -115030,7 +115088,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24059, libc.VaList(bp, zName)) + ts+24106, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115266,7 +115324,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24110, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24157, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115359,7 +115417,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -115429,7 +115487,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115439,7 +115497,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115471,14 +115529,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24131, 0) + ts+24178, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -115608,7 +115666,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24199, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24246, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -115653,10 +115711,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24205, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24252, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24215, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24262, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115761,7 +115819,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24243, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24290, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -115772,17 +115830,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24247, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24294, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24247 + zModeType = ts + 24294 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24253, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24300, zOpt, uint64(4)) == 0) { goto __32 } @@ -115820,7 +115878,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24258, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24305, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115828,7 +115886,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24278, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24325, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115868,7 +115926,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24302, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24349, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115892,14 +115950,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24318, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24325, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24365, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24333, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24336, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24339, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24380, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24383, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24386, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17355, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116046,10 +116104,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21856, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21903, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24343, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24390, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116063,7 +116121,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116116,7 +116174,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6434 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23345 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23392 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116221,7 +116279,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24349 + zFilename = ts + 24396 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116324,21 +116382,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24352, + Xsqlite3_log(tls, iErr, ts+24399, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24377) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24424) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24397) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24444) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24451) } // This is a convenience routine that makes sure that all thread-specific @@ -116496,7 +116554,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24421, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24468, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117152,7 +117210,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24449, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24496, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117270,7 +117328,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24457 + return ts + 24504 } return uintptr(0) }(), 0) @@ -117448,7 +117506,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6174, ts + 6757, ts + 6762, ts + 6184, ts + 6179, ts + 7998, ts + 24480, ts + 24486, + ts + 6174, ts + 7687, ts + 7692, ts + 6184, ts + 6179, ts + 7998, ts + 24527, ts + 24533, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -117601,7 +117659,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24493 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24540 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -117656,7 +117714,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24510, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24557, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117720,13 +117778,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6757, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7687, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6762, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7692, uint32(5)) break } @@ -118276,12 +118334,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6757, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7687, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6762, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7692, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -118382,7 +118440,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24539, -1) + Xsqlite3_result_error(tls, pCtx, ts+24586, -1) } } jsonParseReset(tls, pParse) @@ -118688,7 +118746,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24554, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24601, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118703,7 +118761,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24558, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24605, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118757,7 +118815,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24584, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24631, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118862,11 +118920,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24627, uint32(2)) + jsonAppendRaw(tls, bp, ts+24674, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4991, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24630, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24677, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -119023,14 +119081,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24633, -1) + ts+24680, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24684, -1) + Xsqlite3_result_error(tls, ctx, ts+24731, -1) jsonReset(tls, bp) return } @@ -119200,9 +119258,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24718 + return ts + 24765 } - return ts + 24722 + return ts + 24769 }()) return __2: @@ -119335,7 +119393,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24729, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24776, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119432,7 +119490,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24732, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24779, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119476,7 +119534,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24735) + ts+24782) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -119607,7 +119665,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24818, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24865, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -119626,7 +119684,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24824, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24871, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -119722,7 +119780,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24824, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24871, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119746,7 +119804,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24829 + zRoot = ts + 24876 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119868,7 +119926,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24539, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24586, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119963,25 +120021,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24831}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24836}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24865}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24878}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24881}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24897}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24909}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24920}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24931}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24943}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24975}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24986}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25003}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24878}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24883}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24912}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24925}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24928}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24944}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24967}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24978}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24990}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25022}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25033}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25050}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120000,8 +120058,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25021, FpModule: 0}, - {FzName: ts + 25031, FpModule: 0}, + {FzName: ts + 25068, FpModule: 0}, + {FzName: ts + 25078, FpModule: 0}, } type Rtree1 = struct { @@ -120261,11 +120319,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25041, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25049, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25096, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -120476,7 +120534,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25054, + ts+25101, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121179,7 +121237,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25136) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25183) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -122520,7 +122578,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25150, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25197, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -122532,12 +122590,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25170, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25217, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25202, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25249, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122763,7 +122821,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25239, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25286, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122786,7 +122844,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25384 + var zFmt uintptr = ts + 25431 var zSql uintptr var rc int32 @@ -122834,7 +122892,7 @@ } var azName1 = [3]uintptr{ - ts + 25440, ts + 5053, ts + 16260, + ts + 25487, ts + 5053, ts + 16260, } var rtreeModule = Sqlite3_module{ @@ -122877,19 +122935,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25445, + ts+25492, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25507, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25554, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25512, + ts+25559, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25576, + ts+25623, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25646, + ts+25693, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122918,7 +122976,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25695 + zFormat = ts + 25742 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122930,7 +122988,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25803, + ts+25850, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122938,18 +122996,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25848, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25895, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12760, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25875, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25922, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25897, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25944, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25905, 0) + Xsqlite3_str_appendf(tls, p, ts+25952, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122964,14 +123022,14 @@ } var azSql = [8]uintptr{ - ts + 25921, - ts + 25974, - ts + 26019, - ts + 26071, - ts + 26125, - ts + 26170, - ts + 26228, - ts + 26283, + ts + 25968, + ts + 26021, + ts + 26066, + ts + 26118, + ts + 26172, + ts + 26217, + ts + 26275, + ts + 26330, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123000,7 +123058,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26330, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26377, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123012,7 +123070,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26350, + ts+26397, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123020,7 +123078,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26407, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26454, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123062,10 +123120,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26442, - ts + 26485, - ts + 26520, - ts + 26556, + ts + 26489, + ts + 26532, + ts + 26567, + ts + 26603, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123096,7 +123154,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26593, + Xsqlite3_str_appendf(tls, pSql, ts+26640, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123108,7 +123166,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26617, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26664, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123131,7 +123189,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123227,7 +123285,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26626, ts + 26637} +var azFormat = [2]uintptr{ts + 26673, ts + 26684} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -123267,11 +123325,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10913, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26647, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26694, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26653, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26700, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26657, 1) + Xsqlite3_str_append(tls, pOut, ts+26704, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123282,7 +123340,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26659, -1) + Xsqlite3_result_error(tls, ctx, ts+26706, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123360,7 +123418,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26692, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26739, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4055 @@ -123384,7 +123442,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26699, + ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -123403,7 +123461,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26744, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26791, libc.VaList(bp+16, iNode)) } } @@ -123417,8 +123475,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26776, - ts + 26830, + ts + 26823, + ts + 26877, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -123433,23 +123491,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26878, + rtreeCheckAppendMsg(tls, pCheck, ts+26925, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26940, + ts+26987, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }(), iKey, iVal)) } } @@ -123473,7 +123531,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26998, libc.VaList(bp, i, iCell, iNode)) + ts+27045, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -123493,7 +123551,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27046, libc.VaList(bp+24, i, iCell, iNode)) + ts+27093, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -123510,14 +123568,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27113, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27160, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27147, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27194, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -123525,7 +123583,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27177, + ts+27224, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -123554,14 +123612,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27232, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27279, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27263, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27310, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -123588,7 +123646,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27330, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27377, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -123597,12 +123655,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25150, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25197, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27358, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27405, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -123616,8 +123674,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27389, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27436, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -123625,7 +123683,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27404, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27451, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -123640,7 +123698,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27408, -1) + ts+27455, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -123658,7 +123716,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18005 + return ts + 18052 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124029,11 +124087,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27459, 1) + Xsqlite3_str_append(tls, x, ts+27506, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27461, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27472, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27519, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124053,19 +124111,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27483, 0) + Xsqlite3_str_appendf(tls, x, ts+27530, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27501, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27548, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27509, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27556, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27517, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27564, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27521, 0) + Xsqlite3_str_appendf(tls, x, ts+27568, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124985,7 +125043,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27534, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27581, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124994,7 +125052,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125002,7 +125060,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125239,7 +125297,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27560 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27607 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125247,7 +125305,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27566 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27613 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125359,7 +125417,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27575, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27622, 0) __4: ; goto geopoly_update_end @@ -125491,14 +125549,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27615) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27662) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27631) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27678) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -125563,7 +125621,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27646, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27693, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -125575,25 +125633,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27654}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27667}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27680}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27693}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27631}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27705}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27615}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27728}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27742}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27755}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27769}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27785}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27701}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27714}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27727}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27740}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27678}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27752}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27662}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27775}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27789}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27802}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27816}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27832}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27797}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27844}, } // Register the r-tree module with database handle db. This creates the @@ -125603,26 +125661,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27816, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27863, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27826, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27873, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27837, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27884, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27560, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27607, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27848, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27895, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125676,7 +125734,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25136, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25183, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126003,7 +126061,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) return } @@ -126014,7 +126072,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126115,7 +126173,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27879, uintptr(0), uintptr(0), p+64) + ts+27926, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126179,7 +126237,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25049, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25096, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126200,16 +126258,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28050, libc.VaList(bp, func() uintptr { + ts+28097, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28200 + return ts + 28247 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28241) + ts+28288) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126325,7 +126383,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28366, libc.VaList(bp, zTab))) + ts+28413, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126343,7 +126401,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28485, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28532, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126361,7 +126419,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28506, libc.VaList(bp+16, zIdx))) + ts+28553, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126384,7 +126442,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28557, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28604, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -126430,7 +126488,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -126445,7 +126503,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -126485,7 +126543,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19482, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19529, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -126495,18 +126553,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28635, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28682, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28654, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28701, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28659, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28706, zName) { bRbuRowid = 1 } } @@ -126518,18 +126576,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28669, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28716, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28698 + return ts + 28745 } - return ts + 28711 + return ts + 28758 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28720, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28767, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -126543,7 +126601,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28742, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28789, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -126590,7 +126648,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14607 } return zList @@ -126608,7 +126666,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28778, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28825, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -126630,25 +126688,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28791, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28838, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28870, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28846) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28852, ts+28859, ts+4950) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28893) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28899, ts+28906, ts+4950) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28867, + ts+28914, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28909, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28956, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126690,7 +126748,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126725,7 +126783,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28929 + zCol = ts + 28976 __7: ; goto __5 @@ -126733,11 +126791,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28937, + zLhs = rbuMPrintf(tls, p, ts+28984, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28958, + zOrder = rbuMPrintf(tls, p, ts+29005, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28994, + zSelect = rbuMPrintf(tls, p, ts+29041, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14607 iCol++ @@ -126757,7 +126815,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29021, + Xsqlite3_mprintf(tls, ts+29068, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -126784,7 +126842,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29069, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29116, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14607 goto __15 __15: @@ -126796,7 +126854,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126829,7 +126887,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126841,7 +126899,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29088, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29135, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -126853,37 +126911,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28929 + zCol = ts + 28976 } else { - zCol = ts + 28659 + zCol = ts + 28706 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29157, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29130, + zImpPK = Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29151, + zImpCols = Xsqlite3_mprintf(tls, ts+29198, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29184, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29231, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14607 - zAnd = ts + 21518 + zAnd = ts + 21565 nBind++ } @@ -126922,9 +126980,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29208, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29255, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29220, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29267, libc.VaList(bp+32, zList, zS)) } zS = ts + 14607 if zList == uintptr(0) { @@ -126934,7 +126992,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29229, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29276, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126946,18 +127004,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29244, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29291, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29258, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29305, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21565 } } zList = rbuMPrintf(tls, p, - ts+29270, libc.VaList(bp+40, zList)) + ts+29317, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -126965,8 +127023,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29320, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21565 } } } @@ -126975,7 +127033,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29333, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29380, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126993,15 +127051,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29320, + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29359, + zList = rbuMPrintf(tls, p, ts+29406, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29389, + zList = rbuMPrintf(tls, p, ts+29436, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } @@ -127038,19 +127096,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29426 + var zSep uintptr = ts + 29473 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16148) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) } break } @@ -127062,15 +127120,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28846 + zDesc = ts + 28893 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29439, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29486, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14607 } } - z = rbuMPrintf(tls, p, ts+29450, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29497, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127090,7 +127148,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29454) + ts+29501) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127099,7 +127157,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127109,23 +127167,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29551, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }())) zComma = ts + 14607 } } - zCols = rbuMPrintf(tls, p, ts+29536, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29583, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29551, + ts+29598, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) } @@ -127151,13 +127209,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29613 + zPk = ts + 29660 } - zSql = rbuMPrintf(tls, p, ts+29626, + zSql = rbuMPrintf(tls, p, ts+29673, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29653 + return ts + 29700 } return ts + 1547 }())) @@ -127167,16 +127225,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29663, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29710, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29670, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29717, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) @@ -127193,7 +127251,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29717, + ts+29764, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127230,7 +127288,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29774) + ts+29821) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -127335,7 +127393,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29840, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29887, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127358,7 +127416,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29860, + ts+29907, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) @@ -127366,13 +127424,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29925, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29972, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127388,7 +127446,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29995, + ts+30042, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127396,9 +127454,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }() } return ts + 1547 @@ -127407,20 +127465,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30066, + ts+30113, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30127, + ts+30174, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }(), zCollist, zLimit)) } @@ -127457,16 +127515,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30286 + return ts + 30333 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30295, + ts+30342, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30331 + return ts + 30378 } return ts + 1547 }(), zBindings))) @@ -127475,32 +127533,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30341, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30388, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30369 + zRbuRowid = ts + 30416 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30381, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30428, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30457 + return ts + 30504 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30474, + ts+30521, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30773, + ts+30820, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -127513,9 +127571,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30872 + zRbuRowid = ts + 30919 } else { - zRbuRowid = ts + 30882 + zRbuRowid = ts + 30929 } } @@ -127528,7 +127586,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28929, 0) + zOrder = rbuMPrintf(tls, p, ts+28976, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) } @@ -127537,11 +127595,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30893, + ts+30940, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30941 + return ts + 30988 } return ts + 1547 }(), @@ -127554,7 +127612,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22852 + return ts + 22899 } return ts + 1547 }(), zOrder, @@ -127622,9 +127680,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30286 + zPrefix = ts + 30333 } - zUpdate = Xsqlite3_mprintf(tls, ts+30947, + zUpdate = Xsqlite3_mprintf(tls, ts+30994, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -127683,7 +127741,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30977, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31024, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127756,18 +127814,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31007, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31054, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31035, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31082, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3279, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6434, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31053, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31100, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127807,11 +127865,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31119, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31166, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24199, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24246, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127823,13 +127881,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31151, + zTarget = Xsqlite3_mprintf(tls, ts+31198, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31183 + return ts + 31230 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -127848,21 +127906,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31185, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31232, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31200, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31247, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31217, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31264, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127870,7 +127928,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) @@ -127878,7 +127936,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31261, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31308, 0) } } @@ -127907,14 +127965,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31279, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31326, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128040,7 +128098,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31314, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31361, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128055,8 +128113,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) } - zOal = Xsqlite3_mprintf(tls, ts+31339, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31386, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128173,7 +128231,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23837, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23884, 0) return } @@ -128266,7 +128324,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31353) + ts+31400) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128274,7 +128332,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31375, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31422, libc.VaList(bp, iCookie+1)) } } } @@ -128295,7 +128353,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31402, + ts+31449, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128325,9 +128383,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31560, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31607, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31575, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31622, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128341,10 +128399,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31595, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31642, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31620) + ts+31667) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128358,12 +128416,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31728) + ts+31775) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31793) + ts+31840) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128375,7 +128433,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31837, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31884, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -128403,7 +128461,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31862, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31909, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -128525,7 +128583,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31890, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31937, 0) } if rc == SQLITE_OK { @@ -128541,7 +128599,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31339, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31386, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -128558,7 +128616,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31915, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31962, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -128592,7 +128650,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31926, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31973, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -128622,13 +128680,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31998, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32045, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32012) + ts+32059) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -128639,7 +128697,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32069) + ts+32116) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128713,7 +128771,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32143, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32190, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128731,12 +128789,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32175, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32222, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32207 + return ts + 32254 } - return ts + 32214 + return ts + 32261 }())) } } @@ -128760,14 +128818,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32221, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32268, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6434, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32237, uintptr(0), uintptr(0), p+64) + db, ts+32284, uintptr(0), uintptr(0), p+64) } } @@ -128821,7 +128879,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32261, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32308, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -128848,7 +128906,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30286, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30333, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128884,7 +128942,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32269, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32316, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129003,12 +129061,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14496 } else { - zBegin = ts + 32221 + zBegin = ts + 32268 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32221, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32268, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129354,7 +129412,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32296, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32343, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129379,7 +129437,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32319, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32366, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -129539,7 +129597,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32330, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32377, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130368,7 +130426,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32341, 0) + ts+32388, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -130381,7 +130439,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32462, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32509, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131061,9 +131119,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32491, + zRet = Xsqlite3_mprintf(tls, ts+32538, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21518 + zSep = ts + 21565 if zRet == uintptr(0) { break } @@ -131086,9 +131144,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32525, + ts+32572, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32566 + zSep = ts + 32613 if zRet == uintptr(0) { break } @@ -131096,7 +131154,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7523, 0) + zRet = Xsqlite3_mprintf(tls, ts+7512, 0) } return zRet @@ -131107,7 +131165,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32571, + ts+32618, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131150,7 +131208,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32649, + ts+32696, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131277,7 +131335,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32702, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32749, 0) __16: ; rc = SQLITE_SCHEMA @@ -131753,7 +131811,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11341, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32729, libc.VaList(bp, zDb)) + ts+32776, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -131762,18 +131820,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32839, bp+24) + sessionAppendStr(tls, bp+8, ts+32886, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1560, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32854, bp+24) + sessionAppendStr(tls, bp+8, ts+32901, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32862, bp+24) + sessionAppendStr(tls, bp+8, ts+32909, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21518 + zSep = ts + 21565 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131882,7 +131940,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32868, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32915, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131974,7 +132032,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32888, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32935, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132237,7 +132295,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132260,7 +132318,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132302,7 +132360,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132363,7 +132421,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -132437,13 +132495,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -132505,7 +132563,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -132878,7 +132936,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133057,34 +133115,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32906, bp+16) + sessionAppendStr(tls, bp, ts+32953, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32919, bp+16) + sessionAppendStr(tls, bp, ts+32966, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32925, bp+16) + sessionAppendStr(tls, bp, ts+32972, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14607 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+32854, bp+16) + sessionAppendStr(tls, bp, ts+32901, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32930, bp+16) + ts+32977, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32862, bp+16) + sessionAppendStr(tls, bp, ts+32909, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21518 + zSep = ts + 21565 } } @@ -133136,34 +133194,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33005, bp+16) + sessionAppendStr(tls, bp, ts+33052, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32854, bp+16) + sessionAppendStr(tls, bp, ts+32901, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32925, bp+16) + sessionAppendStr(tls, bp, ts+32972, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21518 + zSep = ts + 21565 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33023, bp+16) + sessionAppendStr(tls, bp, ts+33070, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32566, bp+16) + sessionAppendStr(tls, bp, ts+32613, bp+16) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32862, bp+16) + sessionAppendStr(tls, bp, ts+32909, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33031 + zSep = ts + 33078 } } sessionAppendStr(tls, bp, ts+4950, bp+16) @@ -133190,9 +133248,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33036, bp+16) + sessionAppendStr(tls, bp, ts+33083, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21524, bp+16) + sessionAppendStr(tls, bp, ts+21571, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14607, bp+16) @@ -133200,9 +133258,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33054, bp+16) + sessionAppendStr(tls, bp, ts+33101, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33065, bp+16) + sessionAppendStr(tls, bp, ts+33112, bp+16) } sessionAppendStr(tls, bp, ts+4950, bp+16) @@ -133221,11 +133279,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11341, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33069) + ts+33116) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33182) + ts+33229) } return rc } @@ -133253,7 +133311,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -133506,7 +133564,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33326, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -133522,7 +133580,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33347, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33394, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -133595,10 +133653,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33366, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33413, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33439, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -133657,16 +133715,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33422, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33469, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33466, + ts+33513, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33537, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33584, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11341) { @@ -133720,14 +133778,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33597, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33644, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33698, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134975,7 +135033,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33679, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33726, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135263,7 +135321,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33754, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135450,7 +135508,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33738, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33785, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -135518,7 +135576,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33745 + var zErr uintptr = ts + 33792 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135700,7 +135758,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33795 + var zErr uintptr = ts + 33842 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136024,13 +136082,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33843, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33890, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33851, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33898, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33861, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33908, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -136581,7 +136639,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33866, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33913, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -136608,14 +136666,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33920, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33904, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33951, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136626,7 +136684,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33937, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33984, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136639,7 +136697,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33974, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34021, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -136648,7 +136706,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33983, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34030, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136667,7 +136725,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34016, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34063, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136682,14 +136740,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34050, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34097, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34058, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34105, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34090, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34137, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136697,9 +136755,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34096, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34143, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34110, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34157, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136707,9 +136765,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34148, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34195, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34159, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34206, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -136721,17 +136779,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8019, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17338}, - {FzName: ts + 34194, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34241, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34202, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34249, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34233, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34280, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136778,15 +136836,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22184) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22231) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16260) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34261, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34291) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34338) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34301, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34348, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136803,13 +136861,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34379, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34337, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34384, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+16, i)) } } } @@ -136847,8 +136905,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22184) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34352, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22231) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34399, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136880,7 +136938,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34381, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34428, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136917,14 +136975,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34050 + zTail = ts + 34097 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34401 + zTail = ts + 34448 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34409, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34456, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136973,7 +137031,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34420, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34467, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136981,10 +137039,10 @@ } return ts + 14607 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34436, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34483, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22184)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22231)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137094,7 +137152,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34469) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34516) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137104,7 +137162,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137114,7 +137172,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34483) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34530) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137127,7 +137185,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34493) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34540) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137137,7 +137195,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34503) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34550) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137153,7 +137211,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22184) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22231) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137176,7 +137234,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34515 + var zSelect uintptr = ts + 34562 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137198,7 +137256,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34547) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34594) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -137212,7 +137270,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34555, + ts+34602, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137310,7 +137368,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34620, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34667, 0) return FTS5_EOF } } @@ -137323,20 +137381,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34640, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34687, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34671, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34718, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34674, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34721, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30056, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30103, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139114,9 +139172,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34678, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34725, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33754, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139132,7 +139190,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34683, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34730, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139219,7 +139277,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20521, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20568, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139300,7 +139358,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34712, 0) + ts+34759, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -139470,12 +139528,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34765, + ts+34812, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34815 + return ts + 34862 } - return ts + 34678 + return ts + 34725 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -140418,7 +140476,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34822, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34869, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -140497,7 +140555,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34828, + ts+34875, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -140522,7 +140580,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34879, + ts+34926, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -140545,7 +140603,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34928, + ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140784,7 +140842,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141983,7 +142041,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34991, + ts+35038, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -143449,7 +143507,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35075, + ts+35122, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -144531,13 +144589,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35132, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35179, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25049, ts+35140, 0, pzErr) + pConfig, ts+25096, ts+35187, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11481, - ts+35175, + ts+35222, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144790,7 +144848,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34822, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34869, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -144904,7 +144962,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35219, + ts+35266, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145074,7 +145132,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35305) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35352) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -145345,7 +145403,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR } @@ -145769,7 +145827,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35349, + ts+35396, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145785,9 +145843,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35404 + return ts + 35451 } - return ts + 35409 + return ts + 35456 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145833,12 +145891,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35413, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5050, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35419, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35466, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145869,7 +145927,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35447, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35494, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145900,7 +145958,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35504, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145932,14 +145990,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35478, libc.VaList(bp, z)) + ts+35525, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33861 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33908 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145995,7 +146053,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR __1: ; @@ -146212,7 +146270,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35511, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35558, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146357,28 +146415,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35547, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35594, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35558, 0) + ts+35605, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35638, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35685, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35646, 0) + ts+35693, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16927, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35702, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35749, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35708, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35755, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -146449,12 +146507,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35724, + ts+35771, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20422 + return ts + 20469 } - return ts + 35761 + return ts + 35808 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147084,7 +147142,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35773, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35820, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147328,7 +147386,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35794, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35841, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147347,7 +147405,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35816, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35863, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147394,7 +147452,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35847) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35894) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147403,7 +147461,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35860, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35907, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -147417,7 +147475,7 @@ } var azName2 = [5]uintptr{ - ts + 35951, ts + 34050, ts + 25049, ts + 34401, ts + 11481, + ts + 35998, ts + 34097, ts + 25096, ts + 34448, ts + 11481, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -147441,7 +147499,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35958, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36005, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -147459,13 +147517,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35958, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35963, 0, + db, ts+36010, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -147522,17 +147580,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35978, - ts + 36046, - ts + 36115, - ts + 36148, - ts + 36187, - ts + 36227, - ts + 36266, - ts + 36307, - ts + 36346, - ts + 36388, - ts + 36428, + ts + 36025, + ts + 36093, + ts + 36162, + ts + 36195, + ts + 36234, + ts + 36274, + ts + 36313, + ts + 36354, + ts + 36393, + ts + 36435, + ts + 36475, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -147634,18 +147692,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36451, + ts+36498, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36555, + ts+36602, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36593, + ts+36640, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -147657,7 +147715,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36631, + ts+36678, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147669,14 +147727,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25049, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25096, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11481, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35951, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35998, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34401, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34448, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34050, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34097, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147688,17 +147746,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36673, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36720, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36703, + ts+36750, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147735,27 +147793,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36747, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36794, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36770, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36817, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34050, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34097, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34401, ts+36776, 0, pzErr) + pConfig, ts+34448, ts+36823, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35951, ts+36808, 1, pzErr) + pConfig, ts+35998, ts+36855, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147961,12 +148019,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36825, + ts+36872, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36875, + ts+36922, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147974,7 +148032,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148150,7 +148208,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36904, + zSql = Xsqlite3_mprintf(tls, ts+36951, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148332,14 +148390,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34050, bp+48) + rc = fts5StorageCount(tls, p, ts+34097, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34401, bp+56) + rc = fts5StorageCount(tls, p, ts+34448, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -148534,9 +148592,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36936) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36983) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36994) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148751,7 +148809,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36958 + var zCat uintptr = ts + 37005 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148763,7 +148821,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36967) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37014) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -148774,18 +148832,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36978) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37025) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36936) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36983) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36994) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36967) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37014) { } else { rc = SQLITE_ERROR } @@ -149061,7 +149119,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36996 + var zBase uintptr = ts + 37043 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149203,7 +149261,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37006, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37053, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149211,11 +149269,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37009, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37014, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149223,7 +149281,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37019, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37066, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149231,7 +149289,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37022, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149239,11 +149297,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37025, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149251,19 +149309,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37035, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37082, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37039, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37050, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149271,11 +149329,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37054, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37101, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37058, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149283,7 +149341,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149291,11 +149349,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37116, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149303,7 +149361,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37120, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149311,7 +149369,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149319,7 +149377,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37081, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149335,24 +149393,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37085, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37065, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37132, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37091, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37081, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149367,44 +149425,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37098, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37106, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37113, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37160, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37118, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37014, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37061, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37009, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37081, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15473, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149413,91 +149471,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37091, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37147, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37194, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37050, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37097, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37200, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37157, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37204, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37159, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37073, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37120, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37212, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37081, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37173, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37220, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37179, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37226, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37184, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37190, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37077, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37124, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37198, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37245, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37206, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37253, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37210, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37257, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37073, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37120, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37218, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37224, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37077, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37124, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37230, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37277, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37091, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -149512,16 +149570,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37242, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37289, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149529,21 +149587,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37247, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37294, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37300, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149551,7 +149609,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37259, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37306, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -149559,9 +149617,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37312, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -149576,12 +149634,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37318, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37275, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37322, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -149590,7 +149648,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37281, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37328, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149746,7 +149804,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37285) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37332) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149926,22 +149984,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36996, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37043, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37300, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37347, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37306, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37353, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151084,14 +151142,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37321) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37368) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37325) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37372) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37329) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37376) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37338, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37385, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151117,19 +151175,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37372, - ts + 37412, - ts + 37447, + ts + 37419, + ts + 37459, + ts + 37494, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23345, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23392, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37490, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37537, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -151262,11 +151320,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37523, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37570, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37554, + ts+37601, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -151290,7 +151348,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37605, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37652, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151685,7 +151743,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37631, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37678, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151707,7 +151765,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37641 + return ts + 37688 } func init() { @@ -152681,5 +152739,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -747,11 +747,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NETGRAPHDISC = 6 NN = 1 @@ -1957,7 +1957,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2065,8 +2065,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5268,7 +5268,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5897,17 +5898,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6120,14 +6122,15 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + F__ccgo_pad1 [4]byte + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad2 [2]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -6924,7 +6927,7 @@ _ = pMutex if op < 0 || op >= int32(uint32(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint32(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15109,7 +15112,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15586,7 +15589,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15877,7 +15880,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15894,14 +15897,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15921,7 +15924,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -15989,7 +15992,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16216,7 +16219,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16244,7 +16247,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16343,7 +16346,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -16473,7 +16476,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -16519,7 +16522,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16758,7 +16761,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -16892,7 +16895,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+8) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16913,7 +16916,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17229,7 +17232,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -17320,7 +17323,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -17328,9 +17331,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -17394,18 +17397,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*12 + 4)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&0170000 == 0120000 { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*12 + 4)))(tls, zIn, bp+224, uint32(unsafe.Sizeof([1026]uint8{}))-uint32(2)) if got <= 0 || got >= Ssize_t(unsafe.Sizeof([1026]uint8{}))-2 { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 224 + uintptr(got))) = uint8(0) @@ -17445,14 +17448,14 @@ (*DbPath)(unsafe.Pointer(bp + 1028)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*12 + 4)))(tls, bp, uint32(unsafe.Sizeof([1026]uint8{}))-uint32(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1028, bp) } appendAllPathElements(tls, bp+1028, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1028)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+1028)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1028)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1028)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17547,7 +17550,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = (*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*12 + 4)))(tls, fd, zBuf, uint32(nBuf)) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -18977,7 +18980,7 @@ libc.Xmemset(tls, pPgHdr+16, 0, uint32(unsafe.Sizeof(PgHdr{}))-uint32(uintptr(0)+16)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*40 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*48 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint32(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19007,7 +19010,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19058,7 +19061,7 @@ *(*U16)(unsafe.Pointer(p + 28)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 28)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19162,8 +19165,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(40) - defer tls.Free(40) + bp := tls.Alloc(48) + defer tls.Free(48) var pTail uintptr pTail = bp @@ -19241,13 +19244,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21539,7 +21542,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -21972,7 +21975,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22124,9 +22127,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*40 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*48 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -22458,7 +22461,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22608,7 +22611,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -22989,7 +22992,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23095,7 +23098,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23113,7 +23116,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23152,7 +23155,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23229,7 +23232,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -23987,7 +23990,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24230,9 +24233,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 28)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -24986,7 +24989,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25085,7 +25088,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25670,7 +25673,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -25945,7 +25948,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -26418,7 +26421,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26923,7 +26926,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27580,7 +27583,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27717,7 +27720,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27734,7 +27737,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27742,7 +27745,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27785,7 +27788,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27795,7 +27798,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28045,7 +28048,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28092,7 +28095,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28102,7 +28105,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28115,7 +28118,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28124,14 +28127,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint32(iFree2-(iFree+sz))) @@ -28141,7 +28144,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28205,7 +28208,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28215,7 +28218,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28237,7 +28240,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28272,7 +28275,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28285,13 +28288,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -28316,7 +28319,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -28327,7 +28330,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, (int32(pSpace)-int32(data))/1) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -28379,22 +28382,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -28404,7 +28407,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -28412,7 +28415,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -28420,10 +28423,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -28483,7 +28486,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -28519,7 +28522,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28549,11 +28552,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28564,15 +28567,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28600,14 +28603,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28621,7 +28624,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28633,7 +28636,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28736,7 +28739,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28764,7 +28767,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28803,7 +28806,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29686,7 +29689,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30101,7 +30104,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30127,7 +30130,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30136,7 +30139,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30147,7 +30150,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30163,7 +30166,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30224,7 +30227,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30259,7 +30262,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) if *(*Pgno)(unsafe.Pointer(bp + 24)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -30319,7 +30322,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -30358,7 +30361,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -30389,7 +30392,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30730,7 +30733,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -30974,14 +30977,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int32(aPayload)-int32((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > (*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31026,7 +31029,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31075,7 +31078,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31155,7 +31158,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31246,7 +31249,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31266,7 +31269,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -31476,7 +31479,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31680,7 +31683,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31745,7 +31748,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31793,7 +31796,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31912,7 +31915,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32072,7 +32075,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32137,7 +32140,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+4, 0) @@ -32173,7 +32176,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32217,7 +32220,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -32329,7 +32332,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -32487,7 +32490,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -32544,7 +32547,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32560,7 +32563,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32634,7 +32637,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32646,7 +32649,7 @@ *(*Pgno)(unsafe.Pointer(bp + 4)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+4) @@ -32657,7 +32660,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32822,7 +32825,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33100,12 +33103,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int32(pCell)-int32(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33113,7 +33116,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int32(pData) - int32(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint32(sz)) @@ -33173,7 +33176,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4)), uint32(sz)) @@ -33262,7 +33265,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint32(nCell*2)) @@ -33378,7 +33381,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 80)) = allocateBtreePage(tls, pBt, bp, bp+4, uint32(0), uint8(0)) @@ -33698,7 +33701,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 72)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33709,7 +33712,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33867,7 +33870,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -33941,7 +33944,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34004,7 +34007,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34032,7 +34035,7 @@ *(*int32)(unsafe.Pointer(bp + 112)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 112)) != 0) { @@ -34293,7 +34296,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -34485,7 +34488,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -34523,7 +34526,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 120 + uintptr(iPage-1)*4)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 72 + uintptr(iPage-1)*2))) @@ -34629,7 +34632,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34654,7 +34657,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34724,7 +34727,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34837,7 +34840,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 104)) = btreeComputeFreeSpace(tls, pPage) @@ -34897,6 +34900,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34904,7 +34908,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -34942,13 +34946,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 108))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint32(*(*int32)(unsafe.Pointer(bp + 108)))) @@ -34979,7 +34983,6 @@ ; *(*int32)(unsafe.Pointer(bp + 104)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 108)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35054,7 +35057,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35079,7 +35082,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35180,7 +35183,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35188,11 +35191,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35267,7 +35270,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -35336,7 +35339,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -35365,7 +35368,7 @@ } *(*int32)(unsafe.Pointer(bp + 24)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+16, bp+20) if int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 12))) @@ -35441,7 +35444,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -35455,7 +35458,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35589,7 +35592,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38058,7 +38061,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38706,7 +38709,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38721,14 +38724,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41033,7 +41036,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41584,7 +41587,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41649,7 +41652,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 20 + uintptr(i)*4)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41683,7 +41686,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 48)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41733,7 +41736,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 48))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41879,7 +41882,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42050,7 +42053,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42076,7 +42079,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -42350,7 +42353,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -42965,7 +42968,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -43485,7 +43488,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -43493,7 +43496,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5350, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43898,7 +43901,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44042,7 +44045,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -44486,10 +44489,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47138,7 +47137,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48898,7 +48897,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49676,7 +49675,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5859) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5859) goto abort_due_to_error __770: ; @@ -49786,7 +49785,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -49980,7 +49979,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -51347,7 +51346,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51867,7 +51866,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -51950,7 +51949,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -55391,14 +55390,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6757 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -55442,7 +55437,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6768, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6757, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -55506,7 +55501,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*20)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6785, libc.VaList(bp, pExpr)) + ts+6774, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -55522,7 +55517,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6849, + Xsqlite3ErrorMsg(tls, pParse, ts+6838, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55536,7 +55531,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6885, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6874, uintptr(0), pExpr) } } else { @@ -55559,30 +55554,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6913, libc.VaList(bp+16, pExpr)) + ts+6902, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6956 + zType = ts + 6945 } else { - zType = ts + 6963 + zType = ts + 6952 } - Xsqlite3ErrorMsg(tls, pParse, ts+6973, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6962, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7001, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6990, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7023, + Xsqlite3ErrorMsg(tls, pParse, ts+7012, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7067, + ts+7056, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55654,15 +55649,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7104, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 20))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_Subquery } break @@ -55670,7 +55665,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7126, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) } break @@ -55801,7 +55796,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7137, libc.VaList(bp, i, zType, mx)) + ts+7126, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55821,7 +55816,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7193, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7182, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55856,7 +55851,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7227, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7216, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55913,7 +55908,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*20 + 8 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7233, libc.VaList(bp, i+1)) + ts+7222, libc.VaList(bp, i+1)) return 1 } } @@ -55941,7 +55936,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7294, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7283, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56155,7 +56150,7 @@ *(*int32)(unsafe.Pointer(bp + 24)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7325, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7314, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56195,7 +56190,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56206,7 +56201,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7364) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7353) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56218,7 +56213,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7370, 0) + ts+7359, 0) return WRC_Abort } @@ -57082,7 +57077,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7429, libc.VaList(bp, mxHeight)) + ts+7418, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -57331,10 +57326,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7477, + Xsqlite3ErrorMsg(tls, pParse, ts+7466, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7521 + return ts + 7510 } return ts + 1547 }(), nElem)) @@ -57375,7 +57370,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -57401,7 +57396,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7525, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7514, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 20)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -57429,7 +57424,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7548, libc.VaList(bp, pExpr)) } } } @@ -57476,7 +57471,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 120 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7579, + Xsqlite3ErrorMsg(tls, pParse, ts+7568, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 120 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -57501,7 +57496,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 120 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7622, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7611, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58076,7 +58071,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58199,7 +58194,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7675, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7664, libc.VaList(bp, zObject)) } } @@ -58255,10 +58250,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6757) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7687) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6762) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7692) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -59333,7 +59328,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59771,6 +59766,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59784,6 +59780,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60002,6 +60001,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60015,6 +60015,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60788,7 +60796,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60810,11 +60818,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60885,13 +60892,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60904,15 +60917,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60922,22 +60935,22 @@ pTest = bp + 100 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 100)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -60946,21 +60959,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*20)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*20)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -60970,27 +60983,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8075, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -60999,7 +61012,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63670,7 +63683,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10915, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63687,7 +63700,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64608,7 +64621,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -68974,6 +68987,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70138,7 +70157,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7126, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7115, 10) == 0 { return 0 } return 1 @@ -71384,7 +71403,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14133, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -73429,7 +73448,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79894,7 +79913,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81291,7 +81310,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17506 } else { - zType = ts + 7521 + zType = ts + 7510 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17508, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -81452,6 +81471,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82770,7 +82790,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82786,7 +82806,7 @@ if !(i6 < int32(uint32(unsafe.Sizeof(aPragmaName))/uint32(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -83591,80 +83611,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 572))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17922) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17958) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*4)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 572))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17896) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17922) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17969) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*16 + 4))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 572))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 572))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17949) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17996) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 580))) @@ -83681,20 +83715,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 568)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17976) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18023) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 572))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83703,21 +83737,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -83735,14 +83769,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(endCode))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 564)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*20)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18005 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18052 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*20)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*20 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83750,27 +83784,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83779,25 +83813,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 8 - goto __367 goto __369 __369: + pEnc += 8 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18008, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18055, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -83805,15 +83839,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(setCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp1 = iDb @@ -83821,41 +83855,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(readCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -83870,31 +83904,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17338) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18033) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18080) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17491) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83904,10 +83938,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -83927,19 +83961,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -83948,86 +83982,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*16)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 56))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18041, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18088, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84035,36 +84069,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+584) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 584))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+592) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 592)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 592)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 592))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84073,10 +84107,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+600) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 600)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 600))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84085,10 +84119,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+608) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 608)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 608)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84096,10 +84130,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84151,14 +84185,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18059, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18064, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18088, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18096, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18104}, - {FzName: ts + 18111}, + {FzName: ts + 18106, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18111, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18135, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18143, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18151}, + {FzName: ts + 18158}, {}, } var setCookie = [2]VdbeOpList{ @@ -84210,7 +84244,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+56, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18117) + Xsqlite3_str_appendall(tls, bp+32, ts+18164) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84218,7 +84252,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18132, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18179, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -84231,16 +84265,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18145) + Xsqlite3_str_appendall(tls, bp+32, ts+18192) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18157) + Xsqlite3_str_appendall(tls, bp+32, ts+18204) j++ } Xsqlite3_str_append(tls, bp+32, ts+4950, 1) @@ -84423,13 +84457,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 120 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18172) + Xsqlite3_str_appendall(tls, bp+32, ts+18219) if *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18180, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18227, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 16)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18184, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18231, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -84506,12 +84540,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18188, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), + ts+18235, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -84520,19 +84554,19 @@ } else { zObj = ts + 5001 } - z = Xsqlite3MPrintf(tls, db, ts+18216, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18263, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18247, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18294, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18255, - ts + 18262, - ts + 18274, + ts + 18302, + ts + 18309, + ts + 18321, } // Check to see if any sibling index (another index on the same table) @@ -84624,7 +84658,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*4)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18285) + corruptSchema(tls, pData, argv, ts+18332) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4)), pIndex+44) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84672,7 +84706,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*4)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*4)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*4)) = ts + 7931 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18298 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18345 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*4)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 40)).Fdb = db (*InitData)(unsafe.Pointer(bp + 40)).FiDb = iDb @@ -84801,7 +84835,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18370) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18417) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84815,7 +84849,7 @@ (*InitData)(unsafe.Pointer(bp + 40)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18394, + ts+18441, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85147,7 +85181,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*16)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18428, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18475, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85177,7 +85211,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18458, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18505, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85273,7 +85307,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -85372,7 +85406,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85699,13 +85733,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18477, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18524, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18507)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18554)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85880,7 +85914,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 48)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18541, libc.VaList(bp, 0)) + ts+18588, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85925,7 +85959,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18591, libc.VaList(bp+8, zName)) + ts+18638, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85936,7 +85970,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*72 + 36 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*72 + 48)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18655, + Xsqlite3ErrorMsg(tls, pParse, ts+18702, libc.VaList(bp+16, zName)) break } @@ -86564,16 +86598,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18692 + z = ts + 18739 break case TK_INTERSECT: - z = ts + 18702 + z = ts + 18749 break case TK_EXCEPT: - z = ts + 18712 + z = ts + 18759 break default: - z = ts + 18719 + z = ts + 18766 break } return z @@ -86583,7 +86617,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18725, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18772, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86609,9 +86643,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18748, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18795, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18779 + return ts + 18826 } return ts + 1547 }())) @@ -86955,7 +86989,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*20)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87055,7 +87089,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 48)) = U32(0) @@ -87071,7 +87105,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18803, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18850, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 48)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+48) @@ -87154,8 +87188,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87170,12 +87202,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18811 + zType = ts + 18858 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -87391,7 +87426,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18862, 0) return __1: ; @@ -87482,7 +87517,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18864, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18911, 0) goto end_of_recursive_query __15: ; @@ -87502,7 +87537,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18906, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18953, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -87539,7 +87574,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18912, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -87573,11 +87608,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18927, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18974, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } - return ts + 18950 + return ts + 18997 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87678,8 +87713,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18952, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18967, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18999, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19014, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87726,7 +87761,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18692, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18739, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87793,7 +87828,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 48)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+48) @@ -87855,7 +87890,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 76)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+76) @@ -88008,10 +88043,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19007, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19054, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19053, + ts+19100, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88265,8 +88300,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7227) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7216) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88293,13 +88328,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+36, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19135, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19182, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19193, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88311,7 +88346,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) Xsqlite3Select(tls, pParse, p, bp+36) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -88498,7 +88533,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint32(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -89397,7 +89433,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19157, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19204, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -89480,7 +89516,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19175, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19222, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89609,7 +89645,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19198, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 64)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19245, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 64)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+36+4, uint32(1), 8, 0x100) @@ -89632,7 +89668,7 @@ libc.SetBitFieldPtr16Uint32(pItem+36+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19218, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19265, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89648,7 +89684,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19261 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19308 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89674,7 +89710,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19284, + Xsqlite3ErrorMsg(tls, pParse, ts+19331, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89685,9 +89721,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+34, pTab+4) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19322 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19369 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19356 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19403 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89734,7 +89770,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19394, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19441, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89846,7 +89882,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19398, + Xsqlite3ErrorMsg(tls, pParse, ts+19445, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89865,7 +89901,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*16)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19437, + Xsqlite3ErrorMsg(tls, pParse, ts+19484, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 44)), 0) @@ -89989,7 +90025,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*20 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19468, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19515, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(1), 7, 0x80) } @@ -90054,7 +90090,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*20)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19473, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19520, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90085,9 +90121,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19482, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19529, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19500, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19547, 0) } } } @@ -90097,7 +90133,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19520, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19567, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90235,7 +90271,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*16)).FiSorterColumn) + 1) } @@ -90319,13 +90355,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 20)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 20)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19551, 0) + ts+19598, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 20)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19602, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19649, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -90514,11 +90550,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19635, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19682, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19647 + return ts + 19694 } return ts + 1547 }(), @@ -90846,7 +90882,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19670, + ts+19717, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90907,7 +90943,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19724, + Xsqlite3ErrorMsg(tls, pParse, ts+19771, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91049,7 +91085,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+88, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19764, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19811, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+36+4, uint32(1), 5, 0x20) @@ -91108,7 +91144,7 @@ ; Xsqlite3SelectDestInit(tls, bp+88, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19779, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19826, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -91579,9 +91615,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+116)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19795 + return ts + 19842 } - return ts + 19804 + return ts + 19851 }()) groupBySort = 1 @@ -91932,7 +91968,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+116)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19795) + explainTempTable(tls, pParse, ts+19842) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92036,7 +92072,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19813, 0) + ts+19860, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92269,7 +92305,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19878, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19925, 0) goto trigger_cleanup __3: ; @@ -92313,7 +92349,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19924, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19971, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+36, pTableName) != 0) { goto __9 } @@ -92331,7 +92367,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19979, 0) goto trigger_orphan_error __11: ; @@ -92343,7 +92379,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19924, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19971, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -92358,11 +92394,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19973, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20020, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -92373,19 +92410,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6374, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19999, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20046, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20037, + Xsqlite3ErrorMsg(tls, pParse, ts+20084, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20074 + return ts + 20121 } - return ts + 20081 + return ts + 20128 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -92394,7 +92431,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20087, libc.VaList(bp+24, pTableName+8)) + ts+20134, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -92543,7 +92580,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19924, bp+56) + Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19971, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -92576,7 +92613,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20133, + ts+20180, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -92601,13 +92638,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20181, + ts+20228, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20256, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20303, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92863,7 +92900,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20285, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20332, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92916,7 +92953,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20305, + ts+20352, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93030,12 +93067,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20367, + ts+20414, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20415 + return ts + 20462 } - return ts + 20422 + return ts + 20469 }())) __15: ; @@ -93149,7 +93186,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20429, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20476, 0) return 1 } @@ -93215,7 +93252,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+160, 0, uint32(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -93379,7 +93416,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20471, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20518, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -93972,7 +94009,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20485, + ts+20532, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*16)).FzCnName)) goto update_cleanup __27: @@ -94004,7 +94041,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20521, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20568, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -94330,7 +94367,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 68)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 68)) != 0) && + (*NameContext)(unsafe.Pointer(bp+28)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94884,7 +94926,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20540) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20587) __169: ; update_cleanup: @@ -95190,10 +95232,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 152)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+152, ts+20553, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+152, ts+20600, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20557, libc.VaList(bp+8, bp+152)) + ts+20604, libc.VaList(bp+8, bp+152)) return SQLITE_ERROR } @@ -95316,7 +95358,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20630, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20634, uint32(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20677, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20681, uint32(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -95464,14 +95506,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20638) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20678) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20725) return SQLITE_ERROR __2: ; @@ -95482,7 +95524,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20721) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20768) return SQLITE_ERROR __5: ; @@ -95510,7 +95552,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20739, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20786, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -95530,7 +95572,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20762) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20809) goto end_of_vacuum __8: ; @@ -95590,7 +95632,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20789, + ts+20836, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -95599,7 +95641,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20897, + ts+20944, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95610,7 +95652,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20951, + ts+20998, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 24)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95621,7 +95663,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21102, + ts+21149, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96050,11 +96092,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32((int32((*Token)(unsafe.Pointer(pEnd)).Fz)-int32((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21232, libc.VaList(bp, pParse+196)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21279, libc.VaList(bp, pParse+196)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21256, + ts+21303, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96064,7 +96106,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21355, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21402, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96125,7 +96167,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21374, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21421, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96153,9 +96195,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+8, bp+48) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96163,7 +96207,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21416, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21463, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 48)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 48))) @@ -96175,7 +96219,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21446 + var zFormat uintptr = ts + 21493 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96249,7 +96293,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 44 + 4)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21492, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21539, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96307,7 +96351,7 @@ pMod = Xsqlite3HashFind(tls, db+404, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21492, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21539, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -96341,7 +96385,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96794,7 +96838,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96821,7 +96865,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97053,7 +97097,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21511 + return ts + 21558 } if i == -1 { return ts + 16260 @@ -97065,11 +97109,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97084,7 +97128,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97110,27 +97154,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21526, 2) + Xsqlite3_str_append(tls, pStr, ts+21573, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21529 + return ts + 21576 } - return ts + 21534 + return ts + 21581 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21542) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21589) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21544) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21591) } Xsqlite3_str_append(tls, pStr, ts+4950, 1) } @@ -97173,11 +97217,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+88, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21546, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21593, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21552 + return ts + 21599 } - return ts + 21559 + return ts + 21606 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97190,40 +97234,40 @@ zFmt = ts + 10969 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21564 + zFmt = ts + 21611 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21597 + zFmt = ts + 21644 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21622 + zFmt = ts + 21669 } else { - zFmt = ts + 21640 + zFmt = ts + 21687 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21649, 7) + Xsqlite3_str_append(tls, bp+64, ts+21696, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16260 - Xsqlite3_str_appendf(tls, bp+64, ts+21657, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21704, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21688, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21735, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21698, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21745, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21703, + Xsqlite3_str_appendf(tls, bp+64, ts+21750, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 12)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21730, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21777, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97255,22 +97299,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+48, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21741, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21788, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21762, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21809, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21518, 5) + Xsqlite3_str_append(tls, bp+24, ts+21565, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4950, 1) @@ -98867,7 +98911,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21770, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21817, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98895,7 +98939,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -99413,7 +99457,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21794, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21841, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99774,7 +99818,7 @@ {FzOp: ts + 16109, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15440, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14960, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21808, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21855, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100264,12 +100308,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -100348,7 +100392,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7692 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -100442,7 +100486,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 16)) != 0 { - return ts + 21856 + return ts + 21903 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100818,7 +100862,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21863, + Xsqlite3ErrorMsg(tls, pParse, ts+21910, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100834,7 +100878,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*20)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -101551,7 +101595,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21899, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21946, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*16)).FzCnName)) sentWarning = U8(1) __6: @@ -101622,7 +101666,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21925 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21972 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101796,6 +101840,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101839,9 +101887,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101875,6 +101921,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102133,11 +102180,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103717,7 +103769,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*4)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103775,7 +103827,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*4)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104173,7 +104225,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21962, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22009, 0) rc = SQLITE_OK } else { goto __3 @@ -104780,7 +104832,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21997, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22044, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104815,6 +104867,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105109,6 +105165,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105261,7 +105320,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22015, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22062, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) return uintptr(0) __2: ; @@ -105325,7 +105384,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22043, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22090, 0) goto __5 __4: ii = 0 @@ -106208,7 +106267,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22061, -1) + pCtx, ts+22108, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -106341,7 +106400,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22117, -1) + pCtx, ts+22164, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -106430,17 +106489,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22162)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22173)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22184)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22189)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22202)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22212)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22218)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22229)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22239)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22251)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22256)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22209)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22220)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22231)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22236)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22249)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22259)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22265)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22276)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22286)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22298)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22303)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -106486,7 +106545,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22260, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22307, libc.VaList(bp, zName)) } return p } @@ -106530,12 +106589,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22279, 0) + ts+22326, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22350, 0) + ts+22397, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106761,7 +106820,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22413, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22460, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106877,7 +106936,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 28)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 28)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512)) } pSub = Xsqlite3SelectNew(tls, @@ -106992,7 +107051,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22439, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22486, 0) goto windowAllocErr __2: ; @@ -107057,15 +107116,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22471 + zErr = ts + 22518 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22488 + zErr = ts + 22535 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22504 + zErr = ts + 22551 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22524, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22571, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107086,7 +107145,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22557, 0) + ts+22604, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107242,11 +107301,11 @@ } var azErr = [5]uintptr{ - ts + 22604, - ts + 22657, - ts + 22061, - ts + 22708, - ts + 22760, + ts + 22651, + ts + 22704, + ts + 22108, + ts + 22755, + ts + 22807, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108638,19 +108697,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22810, + Xsqlite3ErrorMsg(tls, pParse, ts+22857, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22852 + return ts + 22899 } - return ts + 22861 + return ts + 22908 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22867, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22914, 0) } } @@ -108721,7 +108780,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22901, + Xsqlite3ErrorMsg(tls, pParse, ts+22948, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109816,7 +109875,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22986, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110795,7 +110854,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } break @@ -110805,7 +110864,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 4)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -111548,7 +111607,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*12 + 4)), yymsp+libc.UintptrFromInt32(-4)*12+4) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+22988) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+23035) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111712,7 +111771,7 @@ *(*Token)(unsafe.Pointer(bp + 92)) = *(*Token)(unsafe.Pointer(yymsp + 4)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp+32, bp+92)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp+32, bp+92)) *(*uintptr)(unsafe.Pointer(yymsp + 4)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 4)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111929,9 +111988,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*12 + 4)) != 0 { - return ts + 6757 + return ts + 7687 } - return ts + 6762 + return ts + 7692 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) @@ -112215,19 +112274,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)) = *(*Token)(unsafe.Pointer(yymsp + 4)) Xsqlite3ErrorMsg(tls, pParse, - ts+23021, 0) + ts+23068, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23116, 0) + ts+23163, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23200, 0) + ts+23247, 0) } break case uint32(273): @@ -112606,9 +112665,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23285, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23332, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -113376,7 +113435,7 @@ } else { (*Token)(unsafe.Pointer(bp + 1248)).Fz = zSql (*Token)(unsafe.Pointer(bp + 1248)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23302, libc.VaList(bp, bp+1248)) + Xsqlite3ErrorMsg(tls, pParse, ts+23349, libc.VaList(bp, bp+1248)) break } } @@ -113399,7 +113458,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23327, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23374, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -113572,7 +113631,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23338, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23385, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -113585,11 +113644,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19924, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19971, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23350, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23397, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113602,9 +113661,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23360, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23407, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23364, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23411, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113838,7 +113897,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -114413,7 +114472,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -114428,7 +114487,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23372, 0) + ts+23419, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114619,23 +114678,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23440 + var zErr uintptr = ts + 23487 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23454 + zErr = ts + 23501 break } case SQLITE_ROW: { - zErr = ts + 23476 + zErr = ts + 23523 break } case SQLITE_DONE: { - zErr = ts + 23498 + zErr = ts + 23545 break } @@ -114653,35 +114712,35 @@ } var aMsg = [29]uintptr{ - ts + 23521, - ts + 23534, + ts + 23568, + ts + 23581, uintptr(0), - ts + 23550, - ts + 23575, - ts + 23589, - ts + 23608, + ts + 23597, + ts + 23622, + ts + 23636, + ts + 23655, ts + 1483, - ts + 23633, - ts + 23670, - ts + 23682, - ts + 23697, - ts + 23730, - ts + 23748, - ts + 23773, - ts + 23802, + ts + 23680, + ts + 23717, + ts + 23729, + ts + 23744, + ts + 23777, + ts + 23795, + ts + 23820, + ts + 23849, uintptr(0), ts + 5831, ts + 5327, - ts + 23819, - ts + 23837, - ts + 23855, - uintptr(0), - ts + 23889, + ts + 23866, + ts + 23884, + ts + 23902, uintptr(0), - ts + 23910, ts + 23936, - ts + 23959, - ts + 23980, + uintptr(0), + ts + 23957, + ts + 23983, + ts + 24006, + ts + 24027, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114802,7 +114861,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114847,7 +114906,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23996, 0) + ts+24043, 0) return SQLITE_BUSY } else { @@ -114964,7 +115023,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24059, libc.VaList(bp, zName)) + ts+24106, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115200,7 +115259,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24110, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24157, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115293,7 +115352,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -115363,7 +115422,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115373,7 +115432,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115405,14 +115464,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24131, 0) + ts+24178, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -115542,7 +115601,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24199, uint32(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24246, uint32(5)) == 0) { goto __1 } iOut = 0 @@ -115587,10 +115646,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24205, zUri+7, uint32(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24252, zUri+7, uint32(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24215, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24262, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115695,7 +115754,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24243, zOpt, uint32(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24290, zOpt, uint32(3)) == 0) { goto __29 } zVfs = zVal @@ -115706,17 +115765,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24247, zOpt, uint32(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24294, zOpt, uint32(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24247 + zModeType = ts + 24294 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24253, zOpt, uint32(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24300, zOpt, uint32(4)) == 0) { goto __32 } @@ -115754,7 +115813,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24258, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24305, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115762,7 +115821,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24278, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24325, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115802,7 +115861,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24302, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24349, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115825,14 +115884,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24318, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24325, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24365, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24333, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24336, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24339, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24380, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24383, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24386, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17355, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -115979,10 +116038,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21856, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21903, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24343, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24390, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -115996,7 +116055,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+12, bp+16) @@ -116049,7 +116108,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6434 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23345 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23392 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116154,7 +116213,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24349 + zFilename = ts + 24396 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116257,21 +116316,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24352, + Xsqlite3_log(tls, iErr, ts+24399, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24377) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24424) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24397) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24444) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24451) } // This is a convenience routine that makes sure that all thread-specific @@ -116429,7 +116488,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24421, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24468, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117085,7 +117144,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24449, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24496, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117203,7 +117262,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24457 + return ts + 24504 } return uintptr(0) }(), 0) @@ -117380,7 +117439,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6174, ts + 6757, ts + 6762, ts + 6184, ts + 6179, ts + 7998, ts + 24480, ts + 24486, + ts + 6174, ts + 7687, ts + 7692, ts + 6184, ts + 6179, ts + 7998, ts + 24527, ts + 24533, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -117533,7 +117592,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24493 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24540 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -117588,7 +117647,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24510, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24557, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117652,13 +117711,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6757, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7687, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6762, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7692, uint32(5)) break } @@ -118208,12 +118267,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6757, uint32(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7687, uint32(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6762, uint32(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7692, uint32(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -118314,7 +118373,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24539, -1) + Xsqlite3_result_error(tls, pCtx, ts+24586, -1) } } jsonParseReset(tls, pParse) @@ -118620,7 +118679,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24554, uint32(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24601, uint32(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118635,7 +118694,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24558, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24605, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118689,7 +118748,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24584, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24631, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118794,11 +118853,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24627, uint32(2)) + jsonAppendRaw(tls, bp, ts+24674, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4991, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24630, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24677, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -118955,14 +119014,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24633, -1) + ts+24680, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24684, -1) + Xsqlite3_result_error(tls, ctx, ts+24731, -1) jsonReset(tls, bp) return } @@ -119132,9 +119191,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24718 + return ts + 24765 } - return ts + 24722 + return ts + 24769 }()) return __2: @@ -119267,7 +119326,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24729, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24776, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119364,7 +119423,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24732, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24779, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119408,7 +119467,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24735) + ts+24782) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -119539,7 +119598,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24818, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24865, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -119558,7 +119617,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*12 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*12 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24824, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24871, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 12 @@ -119654,7 +119713,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24824, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24871, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119678,7 +119737,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24829 + zRoot = ts + 24876 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119800,7 +119859,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24539, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24586, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119895,25 +119954,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24831}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24836}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24865}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24878}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24881}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24897}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24909}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24920}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24931}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24943}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24975}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24986}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25003}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24878}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24883}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24912}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24925}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24928}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24944}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24967}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24978}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24990}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25022}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25033}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25050}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -119932,8 +119991,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25021, FpModule: 0}, - {FzName: ts + 25031, FpModule: 0}, + {FzName: ts + 25068, FpModule: 0}, + {FzName: ts + 25078, FpModule: 0}, } type Rtree1 = struct { @@ -120190,11 +120249,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25041, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25049, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25096, iNode, 0, pRtree+76) Xsqlite3_free(tls, zTab) } @@ -120405,7 +120464,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25054, + ts+25101, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121108,7 +121167,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25136) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25183) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -122449,7 +122508,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25150, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25197, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -122461,12 +122520,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25170, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25217, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25202, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25249, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122692,7 +122751,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25239, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25286, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122715,7 +122774,7 @@ bp := tls.Alloc(20) defer tls.Free(20) - var zFmt uintptr = ts + 25384 + var zFmt uintptr = ts + 25431 var zSql uintptr var rc int32 @@ -122763,7 +122822,7 @@ } var azName1 = [3]uintptr{ - ts + 25440, ts + 5053, ts + 16260, + ts + 25487, ts + 5053, ts + 16260, } var rtreeModule = Sqlite3_module{ @@ -122806,19 +122865,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25445, + ts+25492, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25507, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25554, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25512, + ts+25559, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25576, + ts+25623, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25646, + ts+25693, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122847,7 +122906,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25695 + zFormat = ts + 25742 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122859,7 +122918,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25803, + ts+25850, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122867,18 +122926,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25848, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25895, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12760, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25875, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25922, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25897, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25944, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25905, 0) + Xsqlite3_str_appendf(tls, p, ts+25952, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122893,14 +122952,14 @@ } var azSql = [8]uintptr{ - ts + 25921, - ts + 25974, - ts + 26019, - ts + 26071, - ts + 26125, - ts + 26170, - ts + 26228, - ts + 26283, + ts + 25968, + ts + 26021, + ts + 26066, + ts + 26118, + ts + 26172, + ts + 26217, + ts + 26275, + ts + 26330, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -122929,7 +122988,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26330, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26377, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -122941,7 +123000,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26350, + ts+26397, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+16) if rc != SQLITE_OK { @@ -122949,7 +123008,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26407, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26454, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -122991,10 +123050,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26442, - ts + 26485, - ts + 26520, - ts + 26556, + ts + 26489, + ts + 26532, + ts + 26567, + ts + 26603, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123025,7 +123084,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26593, + Xsqlite3_str_appendf(tls, pSql, ts+26640, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4))), *(*uintptr)(unsafe.Pointer(argv + 3*4)))) ii = 4 __3: @@ -123037,7 +123096,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26617, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26664, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123060,7 +123119,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123156,7 +123215,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26626, ts + 26637} +var azFormat = [2]uintptr{ts + 26673, ts + 26684} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(600) @@ -123196,11 +123255,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10913, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26647, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+552)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26694, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+552)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+48)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26653, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 552 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26700, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 552 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26657, 1) + Xsqlite3_str_append(tls, pOut, ts+26704, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123211,7 +123270,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26659, -1) + Xsqlite3_result_error(tls, ctx, ts+26706, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123287,7 +123346,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26692, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26739, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4055 @@ -123311,7 +123370,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26699, + ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -123330,7 +123389,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26744, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26791, libc.VaList(bp+16, iNode)) } } @@ -123344,8 +123403,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26776, - ts + 26830, + ts + 26823, + ts + 26877, } if *(*uintptr)(unsafe.Pointer(pCheck + 24 + uintptr(bLeaf)*4)) == uintptr(0) { @@ -123360,23 +123419,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26878, + rtreeCheckAppendMsg(tls, pCheck, ts+26925, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26940, + ts+26987, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }(), iKey, iVal)) } } @@ -123400,7 +123459,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26998, libc.VaList(bp, i, iCell, iNode)) + ts+27045, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -123420,7 +123479,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27046, libc.VaList(bp+24, i, iCell, iNode)) + ts+27093, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -123437,14 +123496,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27113, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27160, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27147, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27194, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -123452,7 +123511,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27177, + ts+27224, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -123481,14 +123540,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27232, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27279, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27263, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27310, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -123515,7 +123574,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27330, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27377, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -123524,12 +123583,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25150, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25197, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27358, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27405, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -123543,8 +123602,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27389, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27436, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -123552,7 +123611,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 24 + 1*4))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27404, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27451, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -123567,7 +123626,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27408, -1) + ts+27455, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -123585,7 +123644,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18005 + return ts + 18052 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -123955,11 +124014,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27459, 1) + Xsqlite3_str_append(tls, x, ts+27506, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27461, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27472, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27519, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -123979,19 +124038,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27483, 0) + Xsqlite3_str_appendf(tls, x, ts+27530, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27501, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27548, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27509, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27556, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27517, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27564, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27521, 0) + Xsqlite3_str_appendf(tls, x, ts+27568, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124910,7 +124969,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27534, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27581, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124919,7 +124978,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) + Xsqlite3_str_appendf(tls, pSql, ts+27603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) goto __3 __3: ii++ @@ -124927,7 +124986,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125164,7 +125223,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27560 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27607 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125172,7 +125231,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27566 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27613 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125284,7 +125343,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27575, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27622, 0) __4: ; goto geopoly_update_end @@ -125416,14 +125475,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27615) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27662) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27631) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27678) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -125488,7 +125547,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27646, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27693, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -125500,25 +125559,25 @@ F__ccgo_pad1 [2]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27654}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27667}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27680}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27693}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27631}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27705}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27615}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27728}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27742}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27755}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27769}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27785}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27701}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27714}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27727}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27740}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27678}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27752}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27662}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27775}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27789}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27802}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27816}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27832}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27797}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27844}, } // Register the r-tree module with database handle db. This creates the @@ -125528,26 +125587,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27816, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27863, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27826, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27873, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27837, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27884, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27560, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27607, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27848, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27895, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125601,7 +125660,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25136, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25183, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -125919,7 +125978,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) return } @@ -125930,7 +125989,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126031,7 +126090,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27879, uintptr(0), uintptr(0), p+36) + ts+27926, uintptr(0), uintptr(0), p+36) } if rc == SQLITE_OK { @@ -126095,7 +126154,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25049, zIn, uint32(4)) == 0 { + if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25096, zIn, uint32(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126116,16 +126175,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+36, Xsqlite3_mprintf(tls, - ts+28050, libc.VaList(bp, func() uintptr { + ts+28097, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28200 + return ts + 28247 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+4, p+36, - ts+28241) + ts+28288) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126241,7 +126300,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+36, Xsqlite3_mprintf(tls, - ts+28366, libc.VaList(bp, zTab))) + ts+28413, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126259,7 +126318,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*4, p+36, - Xsqlite3_mprintf(tls, ts+28485, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28532, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126277,7 +126336,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*4, p+36, Xsqlite3_mprintf(tls, - ts+28506, libc.VaList(bp+16, zIdx))) + ts+28553, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126300,7 +126359,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*4, p+36, - Xsqlite3_mprintf(tls, ts+28557, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28604, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -126346,7 +126405,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+36, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -126361,7 +126420,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+20, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 20)), 1) if iCid >= 0 { @@ -126401,7 +126460,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+36, bp+56, pIter+60) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19482, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19529, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -126411,18 +126470,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28635, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28682, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), i) - if Xsqlite3_strnicmp(tls, ts+28654, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28701, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+32) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*4)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28659, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28706, zName) { bRbuRowid = 1 } } @@ -126434,18 +126493,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28669, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28716, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28698 + return ts + 28745 } - return ts + 28711 + return ts + 28758 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28720, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28767, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 1) @@ -126459,7 +126518,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28742, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28789, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 5) @@ -126506,7 +126565,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14607 } return zList @@ -126524,7 +126583,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zRet = rbuMPrintf(tls, p, ts+28778, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28825, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -126546,25 +126605,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28791, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28838, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28870, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28846) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28852, ts+28859, ts+4950) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28893) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28899, ts+28906, ts+4950) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28867, + ts+28914, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28909, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28956, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126606,7 +126665,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126641,7 +126700,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) goto __7 __6: - zCol = ts + 28929 + zCol = ts + 28976 __7: ; goto __5 @@ -126649,11 +126708,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28937, + zLhs = rbuMPrintf(tls, p, ts+28984, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28958, + zOrder = rbuMPrintf(tls, p, ts+29005, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28994, + zSelect = rbuMPrintf(tls, p, ts+29041, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14607 iCol++ @@ -126673,7 +126732,7 @@ *(*uintptr)(unsafe.Pointer(bp + 180)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+180, p+36, - Xsqlite3_mprintf(tls, ts+29021, + Xsqlite3_mprintf(tls, ts+29068, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 180)))) { goto __13 @@ -126700,7 +126759,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29069, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29116, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14607 goto __15 __15: @@ -126712,7 +126771,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126745,7 +126804,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126757,7 +126816,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29088, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29135, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -126769,37 +126828,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28929 + zCol = ts + 28976 } else { - zCol = ts + 28659 + zCol = ts + 28706 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)) } - zRet = Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29157, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29130, + zImpPK = Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29151, + zImpCols = Xsqlite3_mprintf(tls, ts+29198, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29184, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29231, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14607 - zAnd = ts + 21518 + zAnd = ts + 21565 nBind++ } @@ -126838,9 +126897,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = Xsqlite3_mprintf(tls, ts+29208, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29255, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29220, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29267, libc.VaList(bp+32, zList, zS)) } zS = ts + 14607 if zList == uintptr(0) { @@ -126850,7 +126909,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29229, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29276, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126862,18 +126921,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29244, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29291, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29258, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29305, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21565 } } zList = rbuMPrintf(tls, p, - ts+29270, libc.VaList(bp+40, zList)) + ts+29317, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -126881,8 +126940,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+29320, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21565 } } } @@ -126891,7 +126950,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29333, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29380, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126909,15 +126968,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29320, + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29359, + zList = rbuMPrintf(tls, p, ts+29406, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29389, + zList = rbuMPrintf(tls, p, ts+29436, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14607 } @@ -126954,19 +127013,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29426 + var zSep uintptr = ts + 29473 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 60)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+36, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16148) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) } break } @@ -126978,15 +127037,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 3) != 0 { - zDesc = ts + 28846 + zDesc = ts + 28893 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29439, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29486, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14607 } } - z = rbuMPrintf(tls, p, ts+29450, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29497, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 60))) } return z @@ -127006,7 +127065,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+36, - ts+29454) + ts+29501) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127015,7 +127074,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+172, p+36, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127025,23 +127084,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 4) - zCols = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29551, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }())) zComma = ts + 14607 } } - zCols = rbuMPrintf(tls, p, ts+29536, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29583, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 172))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29551, + ts+29598, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) } @@ -127067,13 +127126,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29613 + zPk = ts + 29660 } - zSql = rbuMPrintf(tls, p, ts+29626, + zSql = rbuMPrintf(tls, p, ts+29673, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*4)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29653 + return ts + 29700 } return ts + 1547 }())) @@ -127083,16 +127142,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29663, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29710, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29670, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29717, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) @@ -127109,7 +127168,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+88, p+36, Xsqlite3_mprintf(tls, - ts+29717, + ts+29764, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127146,7 +127205,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 4)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+36, - ts+29774) + ts+29821) } if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { var rc2 int32 @@ -127251,7 +127310,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29840, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29887, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127274,7 +127333,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29860, + ts+29907, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 604)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) @@ -127282,13 +127341,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, p+36, - Xsqlite3_mprintf(tls, ts+29925, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29972, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, p+36, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127304,7 +127363,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29995, + ts+30042, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127312,9 +127371,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }() } return ts + 1547 @@ -127323,20 +127382,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30066, + ts+30113, libc.VaList(bp+216, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30127, + ts+30174, libc.VaList(bp+264, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }(), zCollist, zLimit)) } @@ -127373,16 +127432,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30286 + return ts + 30333 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, pz, Xsqlite3_mprintf(tls, - ts+30295, + ts+30342, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30331 + return ts + 30378 } return ts + 1547 }(), zBindings))) @@ -127391,32 +127450,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, pz, Xsqlite3_mprintf(tls, - ts+30341, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30388, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30369 + zRbuRowid = ts + 30416 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30381, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30428, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30457 + return ts + 30504 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30474, + ts+30521, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30773, + ts+30820, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -127429,9 +127488,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30872 + zRbuRowid = ts + 30919 } else { - zRbuRowid = ts + 30882 + zRbuRowid = ts + 30929 } } @@ -127444,7 +127503,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28929, 0) + zOrder = rbuMPrintf(tls, p, ts+28976, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) } @@ -127453,11 +127512,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+76, pz, Xsqlite3_mprintf(tls, - ts+30893, + ts+30940, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30941 + return ts + 30988 } return ts + 1547 }(), @@ -127470,7 +127529,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22852 + return ts + 22899 } return ts + 1547 }(), zOrder, @@ -127538,9 +127597,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30286 + zPrefix = ts + 30333 } - zUpdate = Xsqlite3_mprintf(tls, ts+30947, + zUpdate = Xsqlite3_mprintf(tls, ts+30994, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+4, p+36, zUpdate) @@ -127599,7 +127658,7 @@ } *(*int32)(unsafe.Pointer(bp + 12)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+36, - Xsqlite3_mprintf(tls, ts+30977, libc.VaList(bp, p+24))) + Xsqlite3_mprintf(tls, ts+31024, libc.VaList(bp, p+24))) for *(*int32)(unsafe.Pointer(bp + 12)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127672,18 +127731,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31007, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31054, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31035, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31082, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+24, ts+3279, uint32(4)) } else { libc.Xmemcpy(tls, p+24, ts+6434, uint32(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31053, libc.VaList(bp+24, p+24)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31100, libc.VaList(bp+24, p+24)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127723,11 +127782,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31119, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31166, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24199, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24246, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127739,13 +127798,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31151, + zTarget = Xsqlite3_mprintf(tls, ts+31198, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31183 + return ts + 31230 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -127764,21 +127823,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31185, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31232, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31200, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31247, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31217, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31264, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127786,7 +127845,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) @@ -127794,7 +127853,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31261, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31308, 0) } } @@ -127823,14 +127882,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31279, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31326, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -127956,7 +128015,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31314, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31361, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -127971,8 +128030,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) } - zOal = Xsqlite3_mprintf(tls, ts+31339, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31386, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128089,7 +128148,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23837, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23884, 0) return } @@ -128182,7 +128241,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+36, - ts+31353) + ts+31400) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128190,7 +128249,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31375, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31422, libc.VaList(bp, iCookie+1)) } } } @@ -128211,7 +128270,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+36, Xsqlite3_mprintf(tls, - ts+31402, + ts+31449, libc.VaList(bp, p+24, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128241,9 +128300,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+36, - Xsqlite3_mprintf(tls, ts+31560, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31607, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31575, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31622, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128257,10 +128316,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 4)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31595, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31642, uintptr(0), uintptr(0), p+36) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31620) + ts+31667) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128274,12 +128333,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31728) + ts+31775) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+4, p+36, - ts+31793) + ts+31840) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128291,7 +128350,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31837, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31884, uintptr(0), uintptr(0), p+36) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -128319,7 +128378,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31862, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31909, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -128441,7 +128500,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31890, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31937, 0) } if rc == SQLITE_OK { @@ -128457,7 +128516,7 @@ bp := tls.Alloc(12) defer tls.Free(12) - var zOal uintptr = rbuMPrintf(tls, p, ts+31339, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31386, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -128474,7 +128533,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31915, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31962, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -128508,7 +128567,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+12, Xsqlite3_mprintf(tls, - ts+31926, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31973, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 12)), -1) } else { @@ -128538,13 +128597,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31998, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32045, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32012) + ts+32059) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -128555,7 +128614,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32069) + ts+32116) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128629,7 +128688,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32143, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32190, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128647,12 +128706,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32175, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32222, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32207 + return ts + 32254 } - return ts + 32214 + return ts + 32261 }())) } } @@ -128676,14 +128735,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32221, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32268, uintptr(0), uintptr(0), p+36) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6434, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32237, uintptr(0), uintptr(0), p+36) + db, ts+32284, uintptr(0), uintptr(0), p+36) } } @@ -128737,7 +128796,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32261, zState+uintptr(n-7), uint32(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32308, zState+uintptr(n-7), uint32(7)) { return rbuMisuseError(tls) } } @@ -128764,7 +128823,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); i < nErrmsg-Size_t(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30286, uint32(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30333, uint32(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128800,7 +128859,7 @@ rbuObjIterFinalize(tls, p+48) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32269, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32316, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128919,12 +128978,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14496 } else { - zBegin = ts + 32221 + zBegin = ts + 32268 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32221, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32268, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129270,7 +129329,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32296, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32343, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129295,7 +129354,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32319, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32366, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -129455,7 +129514,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32330, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32377, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130280,7 +130339,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32341, 0) + ts+32388, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -130293,7 +130352,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32462, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32509, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -130972,9 +131031,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32491, + zRet = Xsqlite3_mprintf(tls, ts+32538, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 21518 + zSep = ts + 21565 if zRet == uintptr(0) { break } @@ -130997,9 +131056,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32525, + ts+32572, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 32566 + zSep = ts + 32613 if zRet == uintptr(0) { break } @@ -131007,7 +131066,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7523, 0) + zRet = Xsqlite3_mprintf(tls, ts+7512, 0) } return zRet @@ -131018,7 +131077,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32571, + ts+32618, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131061,7 +131120,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32649, + ts+32696, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131188,7 +131247,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32702, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32749, 0) __16: ; rc = SQLITE_SCHEMA @@ -131664,7 +131723,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11341, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32729, libc.VaList(bp, zDb)) + ts+32776, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 20)) = SQLITE_NOMEM } @@ -131673,18 +131732,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32839, bp+20) + sessionAppendStr(tls, bp+8, ts+32886, bp+20) sessionAppendIdent(tls, bp+8, zDb, bp+20) sessionAppendStr(tls, bp+8, ts+1560, bp+20) sessionAppendIdent(tls, bp+8, zTab, bp+20) - sessionAppendStr(tls, bp+8, ts+32854, bp+20) + sessionAppendStr(tls, bp+8, ts+32901, bp+20) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+20) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), bp+20) - sessionAppendStr(tls, bp+8, ts+32862, bp+20) + sessionAppendStr(tls, bp+8, ts+32909, bp+20) sessionAppendInteger(tls, bp+8, i+1, bp+20) - zSep = ts + 21518 + zSep = ts + 21565 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131793,7 +131852,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32868, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32915, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -131885,7 +131944,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+12)).FaBuf) - Xsqlite3_exec(tls, db, ts+32888, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32935, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132148,7 +132207,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132171,7 +132230,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132213,7 +132272,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132274,7 +132333,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+44, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -132348,13 +132407,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -132416,7 +132475,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) == uintptr(0) { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) = uintptr(0) } @@ -132789,7 +132848,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -132966,34 +133025,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*12 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint32(nU32)*uint32(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32906, bp+12) + sessionAppendStr(tls, bp, ts+32953, bp+12) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+12) - sessionAppendStr(tls, bp, ts+32919, bp+12) + sessionAppendStr(tls, bp, ts+32966, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32925, bp+12) + sessionAppendStr(tls, bp, ts+32972, bp+12) sessionAppendInteger(tls, bp, ii*2+1, bp+12) zSep = ts + 14607 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+32854, bp+12) + sessionAppendStr(tls, bp, ts+32901, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32930, bp+12) + ts+32977, bp+12) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32862, bp+12) + sessionAppendStr(tls, bp, ts+32909, bp+12) sessionAppendInteger(tls, bp, ii*2+2, bp+12) } - zSep = ts + 21518 + zSep = ts + 21565 } } @@ -133045,34 +133104,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33005, bp+12) + sessionAppendStr(tls, bp, ts+33052, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+32854, bp+12) + sessionAppendStr(tls, bp, ts+32901, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32925, bp+12) + sessionAppendStr(tls, bp, ts+32972, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 21518 + zSep = ts + 21565 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33023, bp+12) + sessionAppendStr(tls, bp, ts+33070, bp+12) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+12) - sessionAppendStr(tls, bp, ts+32566, bp+12) + sessionAppendStr(tls, bp, ts+32613, bp+12) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32862, bp+12) + sessionAppendStr(tls, bp, ts+32909, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 33031 + zSep = ts + 33078 } } sessionAppendStr(tls, bp, ts+4950, bp+12) @@ -133099,9 +133158,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33036, bp+12) + sessionAppendStr(tls, bp, ts+33083, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+21524, bp+12) + sessionAppendStr(tls, bp, ts+21571, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14607, bp+12) @@ -133109,9 +133168,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) } - sessionAppendStr(tls, bp, ts+33054, bp+12) + sessionAppendStr(tls, bp, ts+33101, bp+12) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33065, bp+12) + sessionAppendStr(tls, bp, ts+33112, bp+12) } sessionAppendStr(tls, bp, ts+4950, bp+12) @@ -133130,11 +133189,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11341, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33069) + ts+33116) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+4, - ts+33182) + ts+33229) } return rc } @@ -133162,7 +133221,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -133415,7 +133474,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33326, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -133431,7 +133490,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33347, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33394, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -133504,10 +133563,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33366, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33413, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33439, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+124, bp+128, bp+132, uintptr(0)) @@ -133566,16 +133625,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33422, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33469, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 128)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33466, + ts+33513, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 140)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 128)))) } else if *(*int32)(unsafe.Pointer(bp + 128)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 136)), uint32(*(*int32)(unsafe.Pointer(bp + 128)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33537, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33584, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 128)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 140)), ts+11341) { @@ -133629,14 +133688,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33597, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33644, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33698, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134873,7 +134932,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33679, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33726, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135161,7 +135220,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33754, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135346,7 +135405,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33738, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33785, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -135414,7 +135473,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33745 + var zErr uintptr = ts + 33792 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135595,7 +135654,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33795 + var zErr uintptr = ts + 33842 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135919,13 +135978,13 @@ defer tls.Free(48) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33843, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33890, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33851, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33898, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33861, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33908, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -136475,7 +136534,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33866, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33913, zCmd, nCmd) == 0 { var nByte int32 = int32(uint32(unsafe.Sizeof(int32(0))) * uint32(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -136502,14 +136561,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33920, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33904, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33951, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136520,7 +136579,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33937, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33984, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136533,7 +136592,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33974, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34021, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + Size_t(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, Sqlite3_int64(unsafe.Sizeof(uintptr(0)))*nArg) @@ -136542,7 +136601,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33983, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34030, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136561,7 +136620,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34016, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34063, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136576,14 +136635,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34050, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34097, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34058, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34105, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34090, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34137, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136591,9 +136650,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34096, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34143, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34110, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34157, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136601,9 +136660,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34148, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34195, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34159, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34206, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -136615,17 +136674,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 44)) = [4]Fts5Enum{ {FzName: ts + 8019, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17338}, - {FzName: ts + 34194, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34241, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+44, zArg, pConfig+48)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34202, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34249, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34233, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34280, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136672,15 +136731,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22184) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22231) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16260) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34261, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34291) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34338) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34301, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34348, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136697,13 +136756,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 28)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34379, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34337, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34384, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34344, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34391, libc.VaList(bp+16, i)) } } } @@ -136741,8 +136800,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*4)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22184) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34352, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22231) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34399, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136774,7 +136833,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34381, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34428, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136811,14 +136870,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34050 + zTail = ts + 34097 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34401 + zTail = ts + 34448 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34409, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34456, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136867,7 +136926,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34420, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34467, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136875,10 +136934,10 @@ } return ts + 14607 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34436, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34483, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22184)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22231)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -136988,7 +137047,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34469) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34516) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -136998,7 +137057,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137008,7 +137067,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34483) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34530) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137021,7 +137080,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34493) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34540) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137031,7 +137090,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34503) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34550) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137047,7 +137106,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22184) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22231) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+4) @@ -137070,7 +137129,7 @@ bp := tls.Alloc(44) defer tls.Free(44) - var zSelect uintptr = ts + 34515 + var zSelect uintptr = ts + 34562 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 36)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137092,7 +137151,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 36))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34547) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34594) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 40)) = 0 @@ -137106,7 +137165,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34555, + ts+34602, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137204,7 +137263,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34620, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34667, 0) return FTS5_EOF } } @@ -137217,20 +137276,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34640, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34687, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = (int32(z2) - int32(z)) / 1 - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34671, uint32(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34718, uint32(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34674, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34721, uint32(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30056, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30103, uint32(3)) == 0 { tok = FTS5_AND } break @@ -139009,9 +139068,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34678, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34725, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33754, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139027,7 +139086,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34683, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34730, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139114,7 +139173,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20521, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20568, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139195,7 +139254,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34712, 0) + ts+34759, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -139365,12 +139424,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+20)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34765, + ts+34812, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34815 + return ts + 34862 } - return ts + 34678 + return ts + 34725 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -140319,7 +140378,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34822, iRowid, 0, p+40) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34869, iRowid, 0, p+40) } if rc == SQLITE_ERROR { @@ -140398,7 +140457,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+44, Xsqlite3_mprintf(tls, - ts+34828, + ts+34875, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -140423,7 +140482,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34879, + ts+34926, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+48, zSql) != 0 { return @@ -140446,7 +140505,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+56, Xsqlite3_mprintf(tls, - ts+34928, + ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140685,7 +140744,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+68, - Xsqlite3_mprintf(tls, ts+34968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141884,7 +141943,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+60, Xsqlite3_mprintf(tls, - ts+34991, + ts+35038, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -143349,7 +143408,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+52, Xsqlite3_mprintf(tls, - ts+35075, + ts+35122, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -144432,13 +144491,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35132, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35179, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25049, ts+35140, 0, pzErr) + pConfig, ts+25096, ts+35187, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11481, - ts+35175, + ts+35222, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144691,7 +144750,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34822, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) + ts+34869, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 4)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) @@ -144805,7 +144864,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35219, + ts+35266, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -144975,7 +145034,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 8)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+80+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*96, uintptr(0), bp+8) - sqlite3Fts5BufferAppendBlob(tls, p+36, bp+8, uint32(4), ts+35305) + sqlite3Fts5BufferAppendBlob(tls, p+36, bp+8, uint32(4), ts+35352) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fn, bp+20, bp+24) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) & int64(0x7FFFFFFF)) @@ -145246,7 +145305,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR } @@ -145670,7 +145729,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35349, + ts+35396, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145686,9 +145745,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35404 + return ts + 35451 } - return ts + 35409 + return ts + 35456 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145734,12 +145793,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35413, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5050, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35419, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35466, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145770,7 +145829,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35447, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35494, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 20)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145801,7 +145860,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35504, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145833,14 +145892,14 @@ *(*int32)(unsafe.Pointer(pCsr + 60)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35478, libc.VaList(bp, z)) + ts+35525, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33861 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33908 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145896,7 +145955,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR __1: ; @@ -146113,7 +146172,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35511, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35558, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146258,28 +146317,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35547, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35594, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35558, 0) + ts+35605, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35638, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35685, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35646, 0) + ts+35693, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16927, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35702, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35749, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35708, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35755, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -146350,12 +146409,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35724, + ts+35771, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20422 + return ts + 20469 } - return ts + 35761 + return ts + 35808 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -146985,7 +147044,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35773, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35820, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147229,7 +147288,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35794, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35841, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147248,7 +147307,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 8 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35816, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35863, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147295,7 +147354,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35847) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35894) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147304,7 +147363,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35860, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35907, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -147318,7 +147377,7 @@ } var azName2 = [5]uintptr{ - ts + 35951, ts + 34050, ts + 25049, ts + 34401, ts + 11481, + ts + 35998, ts + 34097, ts + 25096, ts + 34448, ts + 11481, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -147342,7 +147401,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35958, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36005, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -147360,13 +147419,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35958, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35963, 0, + db, ts+36010, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -147423,17 +147482,17 @@ if *(*uintptr)(unsafe.Pointer(p + 28 + uintptr(eStmt)*4)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35978, - ts + 36046, - ts + 36115, - ts + 36148, - ts + 36187, - ts + 36227, - ts + 36266, - ts + 36307, - ts + 36346, - ts + 36388, - ts + 36428, + ts + 36025, + ts + 36093, + ts + 36162, + ts + 36195, + ts + 36234, + ts + 36274, + ts + 36313, + ts + 36354, + ts + 36393, + ts + 36435, + ts + 36475, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -147535,18 +147594,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36451, + ts+36498, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36555, + ts+36602, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36593, + ts+36640, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -147558,7 +147617,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36631, + ts+36678, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147570,14 +147629,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25049, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25096, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11481, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35951, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35998, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34401, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34448, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34050, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34097, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147589,17 +147648,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36673, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36720, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36703, + ts+36750, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147636,27 +147695,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36747, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36794, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36770, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36817, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34050, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34097, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34401, ts+36776, 0, pzErr) + pConfig, ts+34448, ts+36823, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35951, ts+36808, 1, pzErr) + pConfig, ts+35998, ts+36855, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147862,12 +147921,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36825, + ts+36872, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36875, + ts+36922, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147875,7 +147934,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148051,7 +148110,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36904, + zSql = Xsqlite3_mprintf(tls, ts+36951, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148233,14 +148292,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 40)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34050, bp+40) + rc = fts5StorageCount(tls, p, ts+34097, bp+40) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 40)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34401, bp+48) + rc = fts5StorageCount(tls, p, ts+34448, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -148435,9 +148494,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint32(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36936) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36983) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36994) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148651,7 +148710,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36958 + var zCat uintptr = ts + 37005 var i int32 libc.Xmemset(tls, p, 0, uint32(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148663,7 +148722,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36967) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37014) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) } } @@ -148674,18 +148733,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36978) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37025) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36936) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36983) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36994) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36967) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37014) { } else { rc = SQLITE_ERROR } @@ -148961,7 +149020,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36996 + var zBase uintptr = ts + 37043 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149101,7 +149160,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37006, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37053, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149109,11 +149168,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37009, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37014, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149121,7 +149180,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37019, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37066, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149129,7 +149188,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37022, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149137,11 +149196,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37025, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149149,19 +149208,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37035, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37082, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37039, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37050, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149169,11 +149228,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37054, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37101, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37058, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149181,7 +149240,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149189,11 +149248,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37116, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149201,7 +149260,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37120, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149209,7 +149268,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149217,7 +149276,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37081, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149233,24 +149292,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37085, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37065, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37132, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37091, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37081, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149265,44 +149324,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37098, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37106, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37113, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37160, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37118, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37014, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37061, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37009, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37081, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15473, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149311,91 +149370,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37091, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37147, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37194, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37050, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37097, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37200, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37157, uint32(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37204, uint32(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37159, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37073, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37120, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37212, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37081, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37128, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37173, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37220, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37179, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37226, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37065, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37112, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37184, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37190, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37077, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37124, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37198, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37245, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37206, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37253, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37210, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37257, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37073, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37120, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37218, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37224, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37077, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37124, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37230, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37277, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37091, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37138, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -149410,16 +149469,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37242, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37289, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149427,21 +149486,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37247, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37294, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37300, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149449,7 +149508,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37259, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37306, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -149457,9 +149516,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37312, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -149474,12 +149533,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37318, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37275, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37322, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -149488,7 +149547,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37281, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37328, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149644,7 +149703,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37285) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37332) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149824,22 +149883,22 @@ defer tls.Free(64) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36996, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37043, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37300, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37347, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37306, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37353, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -150981,14 +151040,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37321) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37368) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37325) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37372) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37329) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37376) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37338, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37385, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151014,19 +151073,19 @@ defer tls.Free(20) *(*[3]uintptr)(unsafe.Pointer(bp + 4)) = [3]uintptr{ - ts + 37372, - ts + 37412, - ts + 37447, + ts + 37419, + ts + 37459, + ts + 37494, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23345, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23392, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37490, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37537, 0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else { var nByte int32 @@ -151159,11 +151218,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37523, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37570, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37554, + ts+37601, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+68, uintptr(0)) @@ -151187,7 +151246,7 @@ *(*uintptr)(unsafe.Pointer(bp + 68)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37605, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37652, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151582,7 +151641,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37631, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37678, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151604,7 +151663,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37641 + return ts + 37688 } func init() { @@ -152578,5 +152637,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 68)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_freebsd_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_freebsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -748,11 +748,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NETGRAPHDISC = 6 NN = 1 @@ -1958,7 +1958,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2066,8 +2066,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5299,7 +5299,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5941,17 +5942,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6172,14 +6174,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -6986,7 +6988,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15156,7 +15158,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15633,7 +15635,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15924,7 +15926,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3371, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3371, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15941,14 +15943,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3659, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15968,7 +15970,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16036,7 +16038,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3290, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16263,7 +16265,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16291,7 +16293,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3290, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16390,7 +16392,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3254, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3254, zShm, 41628) goto shm_open_err __10: ; @@ -16520,7 +16522,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3325, zFile, 41772) goto shmpage_out __14: ; @@ -16566,7 +16568,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3412, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16805,7 +16807,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -16939,7 +16941,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16960,7 +16962,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17276,7 +17278,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3254, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3254, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -17367,7 +17369,7 @@ if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3364, zPath, 43341) } return rc } @@ -17375,9 +17377,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3781, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -17441,18 +17443,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3452, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3452, zIn, 43447) } } else if int32((*stat)(unsafe.Pointer(bp)).Fst_mode)&0170000 == 0120000 { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+224, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3443, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3443, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 224 + uintptr(got))) = int8(0) @@ -17492,14 +17494,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3272, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3272, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17596,7 +17598,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19029,7 +19031,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19059,7 +19061,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19110,7 +19112,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19214,8 +19216,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -19293,13 +19295,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21593,7 +21595,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22026,7 +22028,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22178,9 +22180,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -22512,7 +22514,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22662,7 +22664,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23043,7 +23045,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23149,7 +23151,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23167,7 +23169,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23206,7 +23208,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23283,7 +23285,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24041,7 +24043,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24284,9 +24286,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25043,7 +25045,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25142,7 +25144,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25728,7 +25730,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26003,7 +26005,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -26476,7 +26478,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26981,7 +26983,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27639,7 +27641,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27776,7 +27778,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27793,7 +27795,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27801,7 +27803,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27844,7 +27846,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27854,7 +27856,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28104,7 +28106,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28151,7 +28153,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28161,7 +28163,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28174,7 +28176,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28183,14 +28185,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28200,7 +28202,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28264,7 +28266,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28274,7 +28276,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28296,7 +28298,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28331,7 +28333,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28344,13 +28346,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -28375,7 +28377,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -28386,7 +28388,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -28438,22 +28440,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -28463,7 +28465,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -28471,7 +28473,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -28479,10 +28481,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -28542,7 +28544,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -28578,7 +28580,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28608,11 +28610,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28623,15 +28625,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28659,14 +28661,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28680,7 +28682,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28692,7 +28694,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28795,7 +28797,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28823,7 +28825,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28862,7 +28864,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29745,7 +29747,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30160,7 +30162,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30186,7 +30188,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30195,7 +30197,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30206,7 +30208,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30222,7 +30224,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30283,7 +30285,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30318,7 +30320,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -30378,7 +30380,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -30417,7 +30419,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -30448,7 +30450,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30789,7 +30791,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31033,14 +31035,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31085,7 +31087,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31134,7 +31136,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31214,7 +31216,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31305,7 +31307,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31325,7 +31327,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -31535,7 +31537,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31739,7 +31741,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31804,7 +31806,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31852,7 +31854,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31971,7 +31973,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32131,7 +32133,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32196,7 +32198,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32232,7 +32234,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32276,7 +32278,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -32388,7 +32390,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -32546,7 +32548,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -32603,7 +32605,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32619,7 +32621,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32693,7 +32695,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32705,7 +32707,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -32716,7 +32718,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32881,7 +32883,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33160,12 +33162,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33173,7 +33175,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33233,7 +33235,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -33322,7 +33324,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -33438,7 +33440,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -33758,7 +33760,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33769,7 +33771,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33927,7 +33929,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34001,7 +34003,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34064,7 +34066,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34092,7 +34094,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -34353,7 +34355,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -34545,7 +34547,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -34583,7 +34585,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -34689,7 +34691,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34714,7 +34716,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34784,7 +34786,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34897,7 +34899,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -34957,6 +34959,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34964,7 +34967,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35002,13 +35005,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35039,7 +35042,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35114,7 +35116,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35139,7 +35141,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35240,7 +35242,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35248,11 +35250,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35327,7 +35329,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -35396,7 +35398,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -35425,7 +35427,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -35501,7 +35503,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -35515,7 +35517,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35649,7 +35651,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38118,7 +38120,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38767,7 +38769,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38782,14 +38784,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41093,7 +41095,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41644,7 +41646,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41709,7 +41711,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41743,7 +41745,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41793,7 +41795,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41939,7 +41941,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42110,7 +42112,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42136,7 +42138,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -42410,7 +42412,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43025,7 +43027,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -43545,7 +43547,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -43553,7 +43555,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5350, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43958,7 +43960,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44102,7 +44104,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -44546,10 +44548,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47198,7 +47196,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48958,7 +48956,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49736,7 +49734,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5859) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5859) goto abort_due_to_error __770: ; @@ -49846,7 +49844,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50040,7 +50038,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -51407,7 +51405,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51927,7 +51925,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52010,7 +52008,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -55450,14 +55448,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6757 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -55501,7 +55495,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6768, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6757, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -55565,7 +55559,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6785, libc.VaList(bp, pExpr)) + ts+6774, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -55581,7 +55575,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6849, + Xsqlite3ErrorMsg(tls, pParse, ts+6838, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55595,7 +55589,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6885, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6874, uintptr(0), pExpr) } } else { @@ -55618,30 +55612,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6913, libc.VaList(bp+16, pExpr)) + ts+6902, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6956 + zType = ts + 6945 } else { - zType = ts + 6963 + zType = ts + 6952 } - Xsqlite3ErrorMsg(tls, pParse, ts+6973, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6962, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7001, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6990, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7023, + Xsqlite3ErrorMsg(tls, pParse, ts+7012, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7067, + ts+7056, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55713,15 +55707,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7104, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -55729,7 +55723,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7126, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7115, pExpr, pExpr) } break @@ -55860,7 +55854,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7137, libc.VaList(bp, i, zType, mx)) + ts+7126, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55880,7 +55874,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7193, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7182, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55915,7 +55909,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7227, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7216, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55972,7 +55966,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7233, libc.VaList(bp, i+1)) + ts+7222, libc.VaList(bp, i+1)) return 1 } } @@ -56000,7 +55994,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7294, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7283, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56214,7 +56208,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7325, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7314, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56254,7 +56248,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56265,7 +56259,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7364) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7353) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56277,7 +56271,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7370, 0) + ts+7359, 0) return WRC_Abort } @@ -57141,7 +57135,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7429, libc.VaList(bp, mxHeight)) + ts+7418, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -57390,10 +57384,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7477, + Xsqlite3ErrorMsg(tls, pParse, ts+7466, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7521 + return ts + 7510 } return ts + 1547 }(), nElem)) @@ -57434,7 +57428,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -57460,7 +57454,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7525, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7514, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -57488,7 +57482,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7548, libc.VaList(bp, pExpr)) } } } @@ -57535,7 +57529,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7579, + Xsqlite3ErrorMsg(tls, pParse, ts+7568, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -57560,7 +57554,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7622, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7611, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58135,7 +58129,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58258,7 +58252,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7675, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7664, libc.VaList(bp, zObject)) } } @@ -58314,10 +58308,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6757) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7687) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6762) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7692) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -59392,7 +59386,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59830,6 +59824,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59843,6 +59838,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60061,6 +60059,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60074,6 +60073,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60847,7 +60854,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7645, + Xsqlite3ErrorMsg(tls, pParse, ts+7634, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60869,11 +60876,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60944,13 +60950,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60963,15 +60975,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60981,22 +60993,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61005,21 +61017,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61029,27 +61041,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8075, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61058,7 +61070,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63729,7 +63741,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10915, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63746,7 +63758,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64667,7 +64679,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69031,6 +69043,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70195,7 +70213,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3279, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7126, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7115, 10) == 0 { return 0 } return 1 @@ -71441,7 +71459,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14133, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -73486,7 +73504,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79952,7 +79970,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81351,7 +81369,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17506 } else { - zType = ts + 7521 + zType = ts + 7510 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17508, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -81512,6 +81530,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82830,7 +82849,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82846,7 +82865,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7521, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7510, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -83651,80 +83670,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17922) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17958) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17896) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17922) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17969) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17949) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17996) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -83741,20 +83774,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17976) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18023) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83763,21 +83796,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -83795,14 +83828,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18005 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18052 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83810,27 +83843,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83839,25 +83872,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18008, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18055, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -83865,15 +83898,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -83881,41 +83914,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -83930,31 +83963,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17338) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18033) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18080) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17491) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83964,10 +83997,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -83987,19 +84020,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84008,86 +84041,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18041, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18088, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84095,36 +84128,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84133,10 +84166,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84145,10 +84178,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84156,10 +84189,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84211,14 +84244,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18059, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18064, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18088, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18096, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18104}, - {FzName: ts + 18111}, + {FzName: ts + 18106, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18111, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18135, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18143, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18151}, + {FzName: ts + 18158}, {}, } var setCookie = [2]VdbeOpList{ @@ -84270,7 +84303,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18117) + Xsqlite3_str_appendall(tls, bp+32, ts+18164) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84278,7 +84311,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18132, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18179, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -84291,16 +84324,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18145) + Xsqlite3_str_appendall(tls, bp+32, ts+18192) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18157) + Xsqlite3_str_appendall(tls, bp+32, ts+18204) j++ } Xsqlite3_str_append(tls, bp+32, ts+4950, 1) @@ -84483,13 +84516,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18172) + Xsqlite3_str_appendall(tls, bp+32, ts+18219) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18180, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18227, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18184, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18231, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -84566,12 +84599,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18188, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18235, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -84580,19 +84613,19 @@ } else { zObj = ts + 5001 } - z = Xsqlite3MPrintf(tls, db, ts+18216, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18263, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18247, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18294, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18255, - ts + 18262, - ts + 18274, + ts + 18302, + ts + 18309, + ts + 18321, } // Check to see if any sibling index (another index on the same table) @@ -84684,7 +84717,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18285) + corruptSchema(tls, pData, argv, ts+18332) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84732,7 +84765,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7931 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18298 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18345 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -84861,7 +84894,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18370) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18417) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84875,7 +84908,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18394, + ts+18441, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85207,7 +85240,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18428, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18475, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85237,7 +85270,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18458, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18505, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85333,7 +85366,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -85432,7 +85465,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85759,13 +85792,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18477, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18524, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18507)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18554)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85940,7 +85973,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18541, libc.VaList(bp, 0)) + ts+18588, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85985,7 +86018,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18591, libc.VaList(bp+8, zName)) + ts+18638, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85996,7 +86029,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18655, + Xsqlite3ErrorMsg(tls, pParse, ts+18702, libc.VaList(bp+16, zName)) break } @@ -86624,16 +86657,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18692 + z = ts + 18739 break case TK_INTERSECT: - z = ts + 18702 + z = ts + 18749 break case TK_EXCEPT: - z = ts + 18712 + z = ts + 18759 break default: - z = ts + 18719 + z = ts + 18766 break } return z @@ -86643,7 +86676,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18725, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18772, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86669,9 +86702,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18748, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18795, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18779 + return ts + 18826 } return ts + 1547 }())) @@ -87015,7 +87048,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87115,7 +87148,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87131,7 +87164,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18803, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18850, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -87214,8 +87247,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87230,12 +87261,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18811 + zType = ts + 18858 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -87451,7 +87485,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18862, 0) return __1: ; @@ -87542,7 +87576,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18864, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18911, 0) goto end_of_recursive_query __15: ; @@ -87562,7 +87596,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18906, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18953, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -87599,7 +87633,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18912, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -87633,11 +87667,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18927, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18974, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1547 } - return ts + 18950 + return ts + 18997 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87738,8 +87772,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18952, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18967, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18999, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19014, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87786,7 +87820,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18692, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18739, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87853,7 +87887,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -87915,7 +87949,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18986, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19033, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88068,10 +88102,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19007, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19054, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19053, + ts+19100, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88325,8 +88359,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7227) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7227) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7216) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7216) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88353,13 +88387,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19135, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19182, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19193, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88371,7 +88405,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19151, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19198, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -88559,7 +88593,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -89458,7 +89493,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19157, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19204, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -89541,7 +89576,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19175, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19222, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89670,7 +89705,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19198, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19245, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -89693,7 +89728,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19218, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19265, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89709,7 +89744,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19261 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19308 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89735,7 +89770,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19284, + Xsqlite3ErrorMsg(tls, pParse, ts+19331, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89746,9 +89781,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19322 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19369 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19356 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19403 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89795,7 +89830,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19394, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19441, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89907,7 +89942,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19398, + Xsqlite3ErrorMsg(tls, pParse, ts+19445, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89926,7 +89961,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19437, + Xsqlite3ErrorMsg(tls, pParse, ts+19484, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90050,7 +90085,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19468, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19515, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90115,7 +90150,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19473, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19520, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90146,9 +90181,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19482, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19529, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19500, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19547, 0) } } } @@ -90158,7 +90193,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19520, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19567, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90296,7 +90331,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -90380,13 +90415,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19551, 0) + ts+19598, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19602, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19649, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -90575,11 +90610,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19635, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19682, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19647 + return ts + 19694 } return ts + 1547 }(), @@ -90907,7 +90942,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19670, + ts+19717, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90968,7 +91003,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19724, + Xsqlite3ErrorMsg(tls, pParse, ts+19771, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91110,7 +91145,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19764, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19811, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91169,7 +91204,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19779, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19826, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -91640,9 +91675,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19795 + return ts + 19842 } - return ts + 19804 + return ts + 19851 }()) groupBySort = 1 @@ -91993,7 +92028,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19795) + explainTempTable(tls, pParse, ts+19842) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92098,7 +92133,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19813, 0) + ts+19860, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92331,7 +92366,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19878, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19925, 0) goto trigger_cleanup __3: ; @@ -92375,7 +92410,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19924, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19971, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -92393,7 +92428,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19979, 0) goto trigger_orphan_error __11: ; @@ -92405,7 +92440,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19924, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19971, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -92420,11 +92455,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19973, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20020, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -92435,19 +92471,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6374, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19999, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20046, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20037, + Xsqlite3ErrorMsg(tls, pParse, ts+20084, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20074 + return ts + 20121 } - return ts + 20081 + return ts + 20128 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -92456,7 +92492,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20087, libc.VaList(bp+24, pTableName+8)) + ts+20134, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -92605,7 +92641,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19924, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19971, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -92638,7 +92674,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20133, + ts+20180, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -92663,13 +92699,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20181, + ts+20228, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20256, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20303, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92925,7 +92961,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20285, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20332, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92978,7 +93014,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20305, + ts+20352, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93092,12 +93128,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20367, + ts+20414, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20415 + return ts + 20462 } - return ts + 20422 + return ts + 20469 }())) __15: ; @@ -93211,7 +93247,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20429, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20476, 0) return 1 } @@ -93277,7 +93313,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -93441,7 +93477,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20471, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20518, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94034,7 +94070,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20485, + ts+20532, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94066,7 +94102,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20521, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20568, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -94392,7 +94428,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94946,7 +94987,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20540) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20587) __169: ; update_cleanup: @@ -95252,10 +95293,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20553, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20600, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20557, libc.VaList(bp+8, bp+216)) + ts+20604, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -95378,7 +95419,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20630, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20634, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20677, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20681, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -95526,14 +95567,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20638) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20678) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20725) return SQLITE_ERROR __2: ; @@ -95544,7 +95585,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20721) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20768) return SQLITE_ERROR __5: ; @@ -95572,7 +95613,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20739, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20786, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -95592,7 +95633,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20762) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20809) goto end_of_vacuum __8: ; @@ -95652,7 +95693,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20789, + ts+20836, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -95661,7 +95702,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20897, + ts+20944, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95672,7 +95713,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20951, + ts+20998, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95683,7 +95724,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21102, + ts+21149, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96112,11 +96153,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21232, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21279, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21256, + ts+21303, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96126,7 +96167,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21355, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21402, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96187,7 +96228,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21374, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21421, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96215,9 +96256,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96225,7 +96268,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21416, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21463, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -96237,7 +96280,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21446 + var zFormat uintptr = ts + 21493 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96311,7 +96354,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21492, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21539, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96369,7 +96412,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21492, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21539, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -96403,7 +96446,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96856,7 +96899,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96883,7 +96926,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97114,7 +97157,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21511 + return ts + 21558 } if i == -1 { return ts + 16260 @@ -97126,11 +97169,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97145,7 +97188,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21524, 1) + Xsqlite3_str_append(tls, pStr, ts+21571, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97171,27 +97214,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21526, 2) + Xsqlite3_str_append(tls, pStr, ts+21573, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21518, 5) + Xsqlite3_str_append(tls, pStr, ts+21565, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21529 + return ts + 21576 } - return ts + 21534 + return ts + 21581 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21542) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21589) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21544) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21591) } Xsqlite3_str_append(tls, pStr, ts+4950, 1) } @@ -97234,11 +97277,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21546, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21593, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21552 + return ts + 21599 } - return ts + 21559 + return ts + 21606 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97251,40 +97294,40 @@ zFmt = ts + 10969 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21564 + zFmt = ts + 21611 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21597 + zFmt = ts + 21644 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21622 + zFmt = ts + 21669 } else { - zFmt = ts + 21640 + zFmt = ts + 21687 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21649, 7) + Xsqlite3_str_append(tls, bp+64, ts+21696, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16260 - Xsqlite3_str_appendf(tls, bp+64, ts+21657, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21704, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21688, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21735, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21698, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21745, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21703, + Xsqlite3_str_appendf(tls, bp+64, ts+21750, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21730, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21777, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97316,22 +97359,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21741, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21788, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21762, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21809, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21518, 5) + Xsqlite3_str_append(tls, bp+24, ts+21565, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21529, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21576, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4950, 1) @@ -98928,7 +98971,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21770, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21817, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98956,7 +98999,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -99474,7 +99517,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21794, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21841, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99835,7 +99878,7 @@ {FzOp: ts + 16109, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15440, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14960, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21808, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21855, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100325,12 +100368,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21862, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -100409,7 +100452,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6762 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7692 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -100503,7 +100546,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21856 + return ts + 21903 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100879,7 +100922,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21863, + Xsqlite3ErrorMsg(tls, pParse, ts+21910, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100895,7 +100938,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -101613,7 +101656,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21899, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21946, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -101684,7 +101727,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21925 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21972 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101858,6 +101901,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101901,9 +101948,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101937,6 +101982,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102195,11 +102241,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103780,7 +103831,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103838,7 +103889,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21936, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21983, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104236,7 +104287,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21962, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22009, 0) rc = SQLITE_OK } else { goto __3 @@ -104843,7 +104894,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21997, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22044, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104878,6 +104929,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105172,6 +105227,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105324,7 +105382,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22015, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22062, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -105388,7 +105446,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22043, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22090, 0) goto __5 __4: ii = 0 @@ -106270,7 +106328,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22061, -1) + pCtx, ts+22108, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -106403,7 +106461,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22117, -1) + pCtx, ts+22164, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -106493,17 +106551,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22162)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22173)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22184)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22189)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22202)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22212)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22218)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22229)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22239)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22251)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22256)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22209)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22220)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22231)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22236)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22249)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22259)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22265)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22276)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22286)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22298)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22303)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -106549,7 +106607,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22260, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22307, libc.VaList(bp, zName)) } return p } @@ -106593,12 +106651,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22279, 0) + ts+22326, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22350, 0) + ts+22397, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106825,7 +106883,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22413, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22460, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106941,7 +106999,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7523)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7512)) } pSub = Xsqlite3SelectNew(tls, @@ -107056,7 +107114,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22439, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22486, 0) goto windowAllocErr __2: ; @@ -107121,15 +107179,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22471 + zErr = ts + 22518 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22488 + zErr = ts + 22535 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22504 + zErr = ts + 22551 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22524, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22571, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107150,7 +107208,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22557, 0) + ts+22604, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107306,11 +107364,11 @@ } var azErr = [5]uintptr{ - ts + 22604, - ts + 22657, - ts + 22061, - ts + 22708, - ts + 22760, + ts + 22651, + ts + 22704, + ts + 22108, + ts + 22755, + ts + 22807, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108705,19 +108763,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22810, + Xsqlite3ErrorMsg(tls, pParse, ts+22857, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22852 + return ts + 22899 } - return ts + 22861 + return ts + 22908 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22867, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22914, 0) } } @@ -108785,7 +108843,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22901, + Xsqlite3ErrorMsg(tls, pParse, ts+22948, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109882,7 +109940,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22986, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110861,7 +110919,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -110871,7 +110929,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22961, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23008, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -111614,7 +111672,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22988) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23035) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111778,7 +111836,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111995,9 +112053,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6757 + return ts + 7687 } - return ts + 6762 + return ts + 7692 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -112281,19 +112339,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23021, 0) + ts+23068, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23116, 0) + ts+23163, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23200, 0) + ts+23247, 0) } break case uint32(273): @@ -112672,9 +112730,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22997, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23044, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23285, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23332, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -113442,7 +113500,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23302, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23349, libc.VaList(bp, bp+2464)) break } } @@ -113465,7 +113523,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3656, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23327, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23374, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -113638,7 +113696,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23338, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23385, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -113651,11 +113709,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19924, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19971, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23350, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23397, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113668,9 +113726,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23360, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23407, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23364, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23411, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113904,7 +113962,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -114479,7 +114537,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -114494,7 +114552,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23372, 0) + ts+23419, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114685,23 +114743,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23440 + var zErr uintptr = ts + 23487 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23454 + zErr = ts + 23501 break } case SQLITE_ROW: { - zErr = ts + 23476 + zErr = ts + 23523 break } case SQLITE_DONE: { - zErr = ts + 23498 + zErr = ts + 23545 break } @@ -114719,35 +114777,35 @@ } var aMsg = [29]uintptr{ - ts + 23521, - ts + 23534, + ts + 23568, + ts + 23581, uintptr(0), - ts + 23550, - ts + 23575, - ts + 23589, - ts + 23608, + ts + 23597, + ts + 23622, + ts + 23636, + ts + 23655, ts + 1483, - ts + 23633, - ts + 23670, - ts + 23682, - ts + 23697, - ts + 23730, - ts + 23748, - ts + 23773, - ts + 23802, + ts + 23680, + ts + 23717, + ts + 23729, + ts + 23744, + ts + 23777, + ts + 23795, + ts + 23820, + ts + 23849, uintptr(0), ts + 5831, ts + 5327, - ts + 23819, - ts + 23837, - ts + 23855, - uintptr(0), - ts + 23889, + ts + 23866, + ts + 23884, + ts + 23902, uintptr(0), - ts + 23910, ts + 23936, - ts + 23959, - ts + 23980, + uintptr(0), + ts + 23957, + ts + 23983, + ts + 24006, + ts + 24027, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114868,7 +114926,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114913,7 +114971,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23996, 0) + ts+24043, 0) return SQLITE_BUSY } else { @@ -115030,7 +115088,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24059, libc.VaList(bp, zName)) + ts+24106, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115266,7 +115324,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24110, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24157, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115359,7 +115417,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -115429,7 +115487,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115439,7 +115497,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -115471,14 +115529,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24131, 0) + ts+24178, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -115608,7 +115666,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24199, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24246, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -115653,10 +115711,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24205, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24252, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24215, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24262, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115761,7 +115819,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24243, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24290, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -115772,17 +115830,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24247, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24294, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24247 + zModeType = ts + 24294 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24253, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24300, zOpt, uint64(4)) == 0) { goto __32 } @@ -115820,7 +115878,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24258, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24305, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115828,7 +115886,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24278, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24325, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115868,7 +115926,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24302, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24349, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115892,14 +115950,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24318, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24325, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24365, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24333, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24336, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24339, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24380, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24383, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24386, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17355, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116046,10 +116104,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21856, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21903, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24343, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24390, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116063,7 +116121,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116116,7 +116174,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6434 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23345 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23392 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116221,7 +116279,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24349 + zFilename = ts + 24396 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116324,21 +116382,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24352, + Xsqlite3_log(tls, iErr, ts+24399, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24377) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24424) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24397) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24444) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24451) } // This is a convenience routine that makes sure that all thread-specific @@ -116496,7 +116554,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24421, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24468, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117152,7 +117210,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24449, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24496, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117270,7 +117328,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24457 + return ts + 24504 } return uintptr(0) }(), 0) @@ -117448,7 +117506,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6174, ts + 6757, ts + 6762, ts + 6184, ts + 6179, ts + 7998, ts + 24480, ts + 24486, + ts + 6174, ts + 7687, ts + 7692, ts + 6184, ts + 6179, ts + 7998, ts + 24527, ts + 24533, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -117601,7 +117659,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24493 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24540 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -117656,7 +117714,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24510, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24557, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117720,13 +117778,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6757, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7687, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6762, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7692, uint32(5)) break } @@ -118276,12 +118334,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6757, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7687, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6762, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7692, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -118382,7 +118440,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24539, -1) + Xsqlite3_result_error(tls, pCtx, ts+24586, -1) } } jsonParseReset(tls, pParse) @@ -118688,7 +118746,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24554, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24601, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118703,7 +118761,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24558, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24605, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118757,7 +118815,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24584, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24631, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118862,11 +118920,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24627, uint32(2)) + jsonAppendRaw(tls, bp, ts+24674, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4991, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24630, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24677, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -119023,14 +119081,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24633, -1) + ts+24680, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24684, -1) + Xsqlite3_result_error(tls, ctx, ts+24731, -1) jsonReset(tls, bp) return } @@ -119200,9 +119258,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24718 + return ts + 24765 } - return ts + 24722 + return ts + 24769 }()) return __2: @@ -119335,7 +119393,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24729, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24776, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119432,7 +119490,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24732, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24779, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -119476,7 +119534,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24735) + ts+24782) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -119607,7 +119665,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24818, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24865, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -119626,7 +119684,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24824, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24871, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -119722,7 +119780,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24824, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24871, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119746,7 +119804,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24829 + zRoot = ts + 24876 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119868,7 +119926,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24539, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24586, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119963,25 +120021,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24831}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24836}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24847}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24865}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24878}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24881}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24897}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24909}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24920}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24931}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24943}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24965}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24975}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24986}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25003}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24878}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24883}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24894}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24912}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24925}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24928}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24944}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24967}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24978}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24990}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25012}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25022}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25033}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25050}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120000,8 +120058,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25021, FpModule: 0}, - {FzName: ts + 25031, FpModule: 0}, + {FzName: ts + 25068, FpModule: 0}, + {FzName: ts + 25078, FpModule: 0}, } type Rtree1 = struct { @@ -120261,11 +120319,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25041, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25088, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25049, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25096, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -120476,7 +120534,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25054, + ts+25101, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121179,7 +121237,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25136) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25183) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -122520,7 +122578,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25150, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25197, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -122532,12 +122590,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25170, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25217, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25202, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25249, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122763,7 +122821,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25239, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25286, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122786,7 +122844,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25384 + var zFmt uintptr = ts + 25431 var zSql uintptr var rc int32 @@ -122834,7 +122892,7 @@ } var azName1 = [3]uintptr{ - ts + 25440, ts + 5053, ts + 16260, + ts + 25487, ts + 5053, ts + 16260, } var rtreeModule = Sqlite3_module{ @@ -122877,19 +122935,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25445, + ts+25492, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25507, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25554, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25512, + ts+25559, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25576, + ts+25623, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25646, + ts+25693, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122918,7 +122976,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25695 + zFormat = ts + 25742 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122930,7 +122988,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25803, + ts+25850, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122938,18 +122996,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25848, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25895, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12760, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25875, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25922, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25897, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25944, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25905, 0) + Xsqlite3_str_appendf(tls, p, ts+25952, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122964,14 +123022,14 @@ } var azSql = [8]uintptr{ - ts + 25921, - ts + 25974, - ts + 26019, - ts + 26071, - ts + 26125, - ts + 26170, - ts + 26228, - ts + 26283, + ts + 25968, + ts + 26021, + ts + 26066, + ts + 26118, + ts + 26172, + ts + 26217, + ts + 26275, + ts + 26330, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123000,7 +123058,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26330, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26377, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123012,7 +123070,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26350, + ts+26397, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123020,7 +123078,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26407, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26454, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123062,10 +123120,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26442, - ts + 26485, - ts + 26520, - ts + 26556, + ts + 26489, + ts + 26532, + ts + 26567, + ts + 26603, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123096,7 +123154,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26593, + Xsqlite3_str_appendf(tls, pSql, ts+26640, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123108,7 +123166,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26617, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26664, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123131,7 +123189,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123227,7 +123285,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26626, ts + 26637} +var azFormat = [2]uintptr{ts + 26673, ts + 26684} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -123267,11 +123325,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10913, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26647, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26694, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26653, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26700, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26657, 1) + Xsqlite3_str_append(tls, pOut, ts+26704, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123282,7 +123340,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26659, -1) + Xsqlite3_result_error(tls, ctx, ts+26706, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123360,7 +123418,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26692, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26739, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4055 @@ -123384,7 +123442,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26699, + ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -123403,7 +123461,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26744, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26791, libc.VaList(bp+16, iNode)) } } @@ -123417,8 +123475,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26776, - ts + 26830, + ts + 26823, + ts + 26877, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -123433,23 +123491,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26878, + rtreeCheckAppendMsg(tls, pCheck, ts+26925, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26940, + ts+26987, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26923 + return ts + 26970 } - return ts + 26931 + return ts + 26978 }(), iKey, iVal)) } } @@ -123473,7 +123531,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26998, libc.VaList(bp, i, iCell, iNode)) + ts+27045, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -123493,7 +123551,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27046, libc.VaList(bp+24, i, iCell, iNode)) + ts+27093, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -123510,14 +123568,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27113, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27160, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27147, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27194, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -123525,7 +123583,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27177, + ts+27224, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -123554,14 +123612,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27232, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27279, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27263, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27310, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -123588,7 +123646,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27330, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27377, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -123597,12 +123655,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25150, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25197, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27358, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27405, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -123616,8 +123674,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27389, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27436, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -123625,7 +123683,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27404, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27451, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -123640,7 +123698,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27408, -1) + ts+27455, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -123658,7 +123716,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18005 + return ts + 18052 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124029,11 +124087,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27459, 1) + Xsqlite3_str_append(tls, x, ts+27506, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27461, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27472, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27519, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124053,19 +124111,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27483, 0) + Xsqlite3_str_appendf(tls, x, ts+27530, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27501, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27548, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27509, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27556, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27517, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27564, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27521, 0) + Xsqlite3_str_appendf(tls, x, ts+27568, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124985,7 +125043,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27534, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27581, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124994,7 +125052,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125002,7 +125060,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26623, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26670, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125239,7 +125297,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27560 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27607 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125247,7 +125305,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27566 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27613 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125359,7 +125417,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27575, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27622, 0) __4: ; goto geopoly_update_end @@ -125491,14 +125549,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27615) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27662) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27631) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27678) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -125563,7 +125621,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27646, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27693, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -125575,25 +125633,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27654}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27667}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27680}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27693}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27631}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27705}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27615}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27728}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27742}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27755}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27769}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27785}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27701}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27714}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27727}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27740}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27678}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27752}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27662}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27775}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27789}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27802}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27816}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27832}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27797}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27844}, } // Register the r-tree module with database handle db. This creates the @@ -125603,26 +125661,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27816, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27863, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27826, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27873, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27837, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27884, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27560, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27607, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27848, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27895, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125676,7 +125734,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25136, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25183, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126003,7 +126061,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) return } @@ -126014,7 +126072,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27858, -1) + Xsqlite3_result_error(tls, context, ts+27905, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126115,7 +126173,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27879, uintptr(0), uintptr(0), p+64) + ts+27926, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126179,7 +126237,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25049, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25096, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126200,16 +126258,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28050, libc.VaList(bp, func() uintptr { + ts+28097, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28200 + return ts + 28247 } return ts + 1547 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28241) + ts+28288) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126325,7 +126383,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28366, libc.VaList(bp, zTab))) + ts+28413, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126343,7 +126401,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28485, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28532, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126361,7 +126419,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28506, libc.VaList(bp+16, zIdx))) + ts+28553, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126384,7 +126442,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28557, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28604, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -126430,7 +126488,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -126445,7 +126503,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -126485,7 +126543,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19482, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19529, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -126495,18 +126553,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28635, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28682, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28654, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28701, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28659, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28706, zName) { bRbuRowid = 1 } } @@ -126518,18 +126576,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28669, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28716, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28698 + return ts + 28745 } - return ts + 28711 + return ts + 28758 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28720, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28767, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -126543,7 +126601,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28742, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28789, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -126590,7 +126648,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14607 } return zList @@ -126608,7 +126666,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28778, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28825, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -126630,25 +126688,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28791, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28838, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28870, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28846) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28852, ts+28859, ts+4950) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+28893) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28899, ts+28906, ts+4950) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28867, + ts+28914, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28909, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28956, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126690,7 +126748,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126725,7 +126783,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28929 + zCol = ts + 28976 __7: ; goto __5 @@ -126733,11 +126791,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28937, + zLhs = rbuMPrintf(tls, p, ts+28984, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28958, + zOrder = rbuMPrintf(tls, p, ts+29005, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28994, + zSelect = rbuMPrintf(tls, p, ts+29041, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14607 iCol++ @@ -126757,7 +126815,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29021, + Xsqlite3_mprintf(tls, ts+29068, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -126784,7 +126842,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29069, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29116, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14607 goto __15 __15: @@ -126796,7 +126854,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126829,7 +126887,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126841,7 +126899,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29088, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29135, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1547 } else { @@ -126853,37 +126911,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28929 + zCol = ts + 28976 } else { - zCol = ts + 28659 + zCol = ts + 28706 } zType = ts + 1112 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29110, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29157, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29130, + zImpPK = Xsqlite3_mprintf(tls, ts+29177, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29151, + zImpCols = Xsqlite3_mprintf(tls, ts+29198, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29184, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29231, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14607 - zAnd = ts + 21518 + zAnd = ts + 21565 nBind++ } @@ -126922,9 +126980,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29208, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29255, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29220, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29267, libc.VaList(bp+32, zList, zS)) } zS = ts + 14607 if zList == uintptr(0) { @@ -126934,7 +126992,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29229, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29276, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126946,18 +127004,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29244, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29291, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1547 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29258, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29305, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21565 } } zList = rbuMPrintf(tls, p, - ts+29270, libc.VaList(bp+40, zList)) + ts+29317, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1547 @@ -126965,8 +127023,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29320, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21518 + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21565 } } } @@ -126975,7 +127033,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29333, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29380, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126993,15 +127051,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29320, + zList = rbuMPrintf(tls, p, ts+29367, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29359, + zList = rbuMPrintf(tls, p, ts+29406, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29389, + zList = rbuMPrintf(tls, p, ts+29436, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14607 } @@ -127038,19 +127096,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29426 + var zSep uintptr = ts + 29473 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28578, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28625, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16148) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp+8, zIdx))) } break } @@ -127062,15 +127120,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28846 + zDesc = ts + 28893 } else { zDesc = ts + 1547 } - z = rbuMPrintf(tls, p, ts+29439, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29486, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14607 } } - z = rbuMPrintf(tls, p, ts+29450, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29497, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127090,7 +127148,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29454) + ts+29501) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127099,7 +127157,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28606, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28653, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127109,23 +127167,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29551, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29526, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29573, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28846 + return ts + 28893 } return ts + 1547 }())) zComma = ts + 14607 } } - zCols = rbuMPrintf(tls, p, ts+29536, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29583, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29551, + ts+29598, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) } @@ -127151,13 +127209,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29613 + zPk = ts + 29660 } - zSql = rbuMPrintf(tls, p, ts+29626, + zSql = rbuMPrintf(tls, p, ts+29673, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29653 + return ts + 29700 } return ts + 1547 }())) @@ -127167,16 +127225,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29663, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29710, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29670, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29717, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) @@ -127193,7 +127251,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29717, + ts+29764, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127230,7 +127288,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29774) + ts+29821) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -127335,7 +127393,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29840, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29887, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127358,7 +127416,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29860, + ts+29907, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, 0, 0)) @@ -127366,13 +127424,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29925, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29972, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127388,7 +127446,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29995, + ts+30042, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127396,9 +127454,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }() } return ts + 1547 @@ -127407,20 +127465,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30066, + ts+30113, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30127, + ts+30174, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30056 + return ts + 30103 } - return ts + 30060 + return ts + 30107 }(), zCollist, zLimit)) } @@ -127457,16 +127515,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1547 } - return ts + 30286 + return ts + 30333 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30295, + ts+30342, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30331 + return ts + 30378 } return ts + 1547 }(), zBindings))) @@ -127475,32 +127533,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30341, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30388, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30369 + zRbuRowid = ts + 30416 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30381, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30428, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30457 + return ts + 30504 } return ts + 1547 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30474, + ts+30521, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30773, + ts+30820, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -127513,9 +127571,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30872 + zRbuRowid = ts + 30919 } else { - zRbuRowid = ts + 30882 + zRbuRowid = ts + 30929 } } @@ -127528,7 +127586,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28929, 0) + zOrder = rbuMPrintf(tls, p, ts+28976, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1547, ts+14607, ts+1547) } @@ -127537,11 +127595,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30893, + ts+30940, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30941 + return ts + 30988 } return ts + 1547 }(), @@ -127554,7 +127612,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22852 + return ts + 22899 } return ts + 1547 }(), zOrder, @@ -127622,9 +127680,9 @@ var zPrefix uintptr = ts + 1547 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30286 + zPrefix = ts + 30333 } - zUpdate = Xsqlite3_mprintf(tls, ts+30947, + zUpdate = Xsqlite3_mprintf(tls, ts+30994, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -127683,7 +127741,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30977, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31024, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127756,18 +127814,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31007, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31054, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31035, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31082, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3279, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6434, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31053, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31100, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127807,11 +127865,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31119, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31166, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24199, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24246, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127823,13 +127881,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31151, + zTarget = Xsqlite3_mprintf(tls, ts+31198, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 } - return ts + 31183 + return ts + 31230 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1547 @@ -127848,21 +127906,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31185, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31232, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31200, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31247, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31217, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31264, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127870,7 +127928,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_RBU, p) @@ -127878,7 +127936,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31261, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31308, 0) } } @@ -127907,14 +127965,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31233, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31280, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31279, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31326, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128040,7 +128098,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31314, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31361, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128055,8 +128113,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6434) } - zOal = Xsqlite3_mprintf(tls, ts+31339, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31386, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128173,7 +128231,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23837, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23884, 0) return } @@ -128266,7 +128324,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31353) + ts+31400) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128274,7 +128332,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31375, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31422, libc.VaList(bp, iCookie+1)) } } } @@ -128295,7 +128353,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31402, + ts+31449, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128325,9 +128383,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31560, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31607, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31575, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31622, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128341,10 +128399,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31595, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31642, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31620) + ts+31667) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128358,12 +128416,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31728) + ts+31775) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31793) + ts+31840) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128375,7 +128433,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31837, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31884, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -128403,7 +128461,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31862, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31909, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -128525,7 +128583,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31890, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31937, 0) } if rc == SQLITE_OK { @@ -128541,7 +128599,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31339, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31386, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6434, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -128558,7 +128616,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31915, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31962, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -128592,7 +128650,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31926, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31973, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -128622,13 +128680,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31998, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32045, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32012) + ts+32059) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -128639,7 +128697,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32069) + ts+32116) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128713,7 +128771,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32143, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32190, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128731,12 +128789,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32175, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32222, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32207 + return ts + 32254 } - return ts + 32214 + return ts + 32261 }())) } } @@ -128760,14 +128818,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32221, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32268, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6434, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32237, uintptr(0), uintptr(0), p+64) + db, ts+32284, uintptr(0), uintptr(0), p+64) } } @@ -128821,7 +128879,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32261, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32308, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -128848,7 +128906,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30286, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30333, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128884,7 +128942,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32269, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32316, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129003,12 +129061,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14496 } else { - zBegin = ts + 32221 + zBegin = ts + 32268 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32221, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32268, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129354,7 +129412,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32296, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32343, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129379,7 +129437,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32319, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32366, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -129539,7 +129597,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32330, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32377, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130368,7 +130426,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32341, 0) + ts+32388, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1547, 0) } else { @@ -130381,7 +130439,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32462, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32509, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131061,9 +131119,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32491, + zRet = Xsqlite3_mprintf(tls, ts+32538, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21518 + zSep = ts + 21565 if zRet == uintptr(0) { break } @@ -131086,9 +131144,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32525, + ts+32572, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32566 + zSep = ts + 32613 if zRet == uintptr(0) { break } @@ -131096,7 +131154,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7523, 0) + zRet = Xsqlite3_mprintf(tls, ts+7512, 0) } return zRet @@ -131107,7 +131165,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32571, + ts+32618, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131150,7 +131208,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32649, + ts+32696, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131277,7 +131335,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32702, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32749, 0) __16: ; rc = SQLITE_SCHEMA @@ -131753,7 +131811,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11341, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32729, libc.VaList(bp, zDb)) + ts+32776, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -131762,18 +131820,18 @@ var zSep uintptr = ts + 1547 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32839, bp+24) + sessionAppendStr(tls, bp+8, ts+32886, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1560, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32854, bp+24) + sessionAppendStr(tls, bp+8, ts+32901, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32862, bp+24) + sessionAppendStr(tls, bp+8, ts+32909, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21518 + zSep = ts + 21565 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131882,7 +131940,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32868, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32915, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131974,7 +132032,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32888, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32935, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132237,7 +132295,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132260,7 +132318,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132302,7 +132360,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132363,7 +132421,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -132437,13 +132495,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -132505,7 +132563,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -132878,7 +132936,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133057,34 +133115,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32906, bp+16) + sessionAppendStr(tls, bp, ts+32953, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32919, bp+16) + sessionAppendStr(tls, bp, ts+32966, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32925, bp+16) + sessionAppendStr(tls, bp, ts+32972, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14607 } } zSep = ts + 1547 - sessionAppendStr(tls, bp, ts+32854, bp+16) + sessionAppendStr(tls, bp, ts+32901, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32930, bp+16) + ts+32977, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32862, bp+16) + sessionAppendStr(tls, bp, ts+32909, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21518 + zSep = ts + 21565 } } @@ -133136,34 +133194,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33005, bp+16) + sessionAppendStr(tls, bp, ts+33052, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32854, bp+16) + sessionAppendStr(tls, bp, ts+32901, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32925, bp+16) + sessionAppendStr(tls, bp, ts+32972, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21518 + zSep = ts + 21565 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33023, bp+16) + sessionAppendStr(tls, bp, ts+33070, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32566, bp+16) + sessionAppendStr(tls, bp, ts+32613, bp+16) zSep = ts + 1547 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32862, bp+16) + sessionAppendStr(tls, bp, ts+32909, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33031 + zSep = ts + 33078 } } sessionAppendStr(tls, bp, ts+4950, bp+16) @@ -133190,9 +133248,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33036, bp+16) + sessionAppendStr(tls, bp, ts+33083, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21524, bp+16) + sessionAppendStr(tls, bp, ts+21571, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14607, bp+16) @@ -133200,9 +133258,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33054, bp+16) + sessionAppendStr(tls, bp, ts+33101, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33065, bp+16) + sessionAppendStr(tls, bp, ts+33112, bp+16) } sessionAppendStr(tls, bp, ts+4950, bp+16) @@ -133221,11 +133279,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11341, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33069) + ts+33116) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33182) + ts+33229) } return rc } @@ -133253,7 +133311,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -133506,7 +133564,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33326, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -133522,7 +133580,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33347, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33394, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -133595,10 +133653,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33366, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33413, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33392, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33439, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -133657,16 +133715,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33422, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33469, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33466, + ts+33513, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33537, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33584, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11341) { @@ -133720,14 +133778,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33597, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33644, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33627, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33698, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33674, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134975,7 +135033,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33679, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33726, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135263,7 +135321,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33754, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135450,7 +135508,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33738, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33785, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -135518,7 +135576,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33745 + var zErr uintptr = ts + 33792 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135700,7 +135758,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33795 + var zErr uintptr = ts + 33842 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136024,13 +136082,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33843, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33890, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33851, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33898, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33861, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33908, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -136581,7 +136639,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33866, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33913, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -136608,14 +136666,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33920, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33904, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33951, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136626,7 +136684,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33937, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33984, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -136639,7 +136697,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33974, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34021, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -136648,7 +136706,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33983, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34030, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136667,7 +136725,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34016, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34063, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136682,14 +136740,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34050, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34097, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34058, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34105, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34090, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34137, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136697,9 +136755,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34096, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34143, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34110, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34157, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136707,9 +136765,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34148, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34195, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34159, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34206, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -136721,17 +136779,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8019, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17338}, - {FzName: ts + 34194, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34241, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34202, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34249, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34233, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34280, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136778,15 +136836,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22184) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22231) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16260) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34261, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34291) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34338) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34301, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34348, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136803,13 +136861,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34332, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34379, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34337, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34384, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+16, i)) } } } @@ -136847,8 +136905,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22184) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34352, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22231) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34399, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136880,7 +136938,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34381, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34428, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136917,14 +136975,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34050 + zTail = ts + 34097 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34401 + zTail = ts + 34448 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34409, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34456, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136973,7 +137031,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34420, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34467, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136981,10 +137039,10 @@ } return ts + 14607 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34436, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34483, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22184)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22231)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137094,7 +137152,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34469) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34516) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137104,7 +137162,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137114,7 +137172,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34483) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34530) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137127,7 +137185,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34493) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34540) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137137,7 +137195,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34503) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34550) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137153,7 +137211,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22184) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22231) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137176,7 +137234,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34515 + var zSelect uintptr = ts + 34562 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137198,7 +137256,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34547) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34594) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -137212,7 +137270,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34555, + ts+34602, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137310,7 +137368,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34620, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34667, 0) return FTS5_EOF } } @@ -137323,20 +137381,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34640, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34687, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34671, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34718, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34674, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34721, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30056, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30103, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139114,9 +139172,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34678, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34725, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33707, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33754, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139132,7 +139190,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34683, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34730, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139219,7 +139277,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20521, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20568, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139300,7 +139358,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34712, 0) + ts+34759, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -139470,12 +139528,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34765, + ts+34812, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34815 + return ts + 34862 } - return ts + 34678 + return ts + 34725 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -140418,7 +140476,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34822, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34869, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -140497,7 +140555,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34828, + ts+34875, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -140522,7 +140580,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34879, + ts+34926, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -140545,7 +140603,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34928, + ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140784,7 +140842,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34968, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35015, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141983,7 +142041,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34991, + ts+35038, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -143449,7 +143507,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35075, + ts+35122, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -144531,13 +144589,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35132, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35179, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25049, ts+35140, 0, pzErr) + pConfig, ts+25096, ts+35187, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11481, - ts+35175, + ts+35222, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144790,7 +144848,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34822, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34869, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -144904,7 +144962,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35219, + ts+35266, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145074,7 +145132,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35305) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35352) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -145345,7 +145403,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR } @@ -145769,7 +145827,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35349, + ts+35396, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145785,9 +145843,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35404 + return ts + 35451 } - return ts + 35409 + return ts + 35456 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145833,12 +145891,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35413, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35460, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5050, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35419, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35466, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145869,7 +145927,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35447, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35494, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145900,7 +145958,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35504, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145932,14 +145990,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35478, libc.VaList(bp, z)) + ts+35525, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33861 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33908 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145995,7 +146053,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35310, 0) + ts+35357, 0) return SQLITE_ERROR __1: ; @@ -146212,7 +146270,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35511, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35558, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146357,28 +146415,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35547, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35594, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35558, 0) + ts+35605, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35638, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35685, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35646, 0) + ts+35693, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16927, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35702, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35749, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35708, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35755, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -146449,12 +146507,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35724, + ts+35771, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20422 + return ts + 20469 } - return ts + 35761 + return ts + 35808 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147084,7 +147142,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35773, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35820, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147328,7 +147386,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35794, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35841, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147347,7 +147405,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35816, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35863, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147394,7 +147452,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35847) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35894) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147403,7 +147461,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35860, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35907, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -147417,7 +147475,7 @@ } var azName2 = [5]uintptr{ - ts + 35951, ts + 34050, ts + 25049, ts + 34401, ts + 11481, + ts + 35998, ts + 34097, ts + 25096, ts + 34448, ts + 11481, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -147441,7 +147499,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35958, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36005, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -147459,13 +147517,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35958, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35963, 0, + db, ts+36010, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -147522,17 +147580,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35978, - ts + 36046, - ts + 36115, - ts + 36148, - ts + 36187, - ts + 36227, - ts + 36266, - ts + 36307, - ts + 36346, - ts + 36388, - ts + 36428, + ts + 36025, + ts + 36093, + ts + 36162, + ts + 36195, + ts + 36234, + ts + 36274, + ts + 36313, + ts + 36354, + ts + 36393, + ts + 36435, + ts + 36475, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -147634,18 +147692,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36451, + ts+36498, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36555, + ts+36602, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36593, + ts+36640, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -147657,7 +147715,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36631, + ts+36678, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147669,14 +147727,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25049, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25096, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11481, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35951, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35998, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34401, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34448, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34050, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34097, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147688,17 +147746,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36673, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36720, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29702 + return ts + 29749 } return ts + 1547 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36703, + ts+36750, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147735,27 +147793,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36747, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36794, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36770, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36817, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34050, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34097, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34401, ts+36776, 0, pzErr) + pConfig, ts+34448, ts+36823, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35951, ts+36808, 1, pzErr) + pConfig, ts+35998, ts+36855, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147961,12 +148019,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36825, + ts+36872, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36875, + ts+36922, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147974,7 +148032,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34547, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34594, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148150,7 +148208,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36904, + zSql = Xsqlite3_mprintf(tls, ts+36951, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148332,14 +148390,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34050, bp+48) + rc = fts5StorageCount(tls, p, ts+34097, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34401, bp+56) + rc = fts5StorageCount(tls, p, ts+34448, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -148534,9 +148592,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36936) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36983) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36994) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148751,7 +148809,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36958 + var zCat uintptr = ts + 37005 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148763,7 +148821,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36967) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37014) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -148774,18 +148832,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36978) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37025) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36936) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36983) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36947) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36994) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36967) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37014) { } else { rc = SQLITE_ERROR } @@ -149061,7 +149119,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36996 + var zBase uintptr = ts + 37043 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149203,7 +149261,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37006, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37053, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149211,11 +149269,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37009, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37014, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149223,7 +149281,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37019, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37066, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149231,7 +149289,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37022, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149239,11 +149297,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37025, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149251,19 +149309,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37035, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37082, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37039, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37050, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149271,11 +149329,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37054, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37101, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37058, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149283,7 +149341,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149291,11 +149349,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37069, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37116, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149303,7 +149361,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37120, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149311,7 +149369,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149319,7 +149377,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37081, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149335,24 +149393,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37085, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37065, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37132, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37091, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37081, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149367,44 +149425,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37098, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37106, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37113, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37160, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37118, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37014, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37061, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37009, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37128, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37081, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15473, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149413,91 +149471,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37091, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37147, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37194, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37050, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37097, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37153, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37200, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37157, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37204, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37159, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37073, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37120, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37165, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37212, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37081, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37128, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37173, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37220, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37179, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37226, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37065, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37112, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37184, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37190, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37077, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37124, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37198, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37245, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37206, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37253, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37210, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37257, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37073, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37120, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37218, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37224, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37077, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37124, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37230, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37277, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37091, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37138, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -149512,16 +149570,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37242, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37289, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149529,21 +149587,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37247, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37294, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37300, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37022, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37069, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37206, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37253, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149551,7 +149609,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37259, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37306, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -149559,9 +149617,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37265, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37312, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37006, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37053, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -149576,12 +149634,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37271, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37318, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37275, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37322, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -149590,7 +149648,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37281, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37328, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149746,7 +149804,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37285) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37332) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149926,22 +149984,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36996, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37043, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37300, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37347, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37306, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37353, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151084,14 +151142,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37321) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37368) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37325) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37372) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37329) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37376) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37338, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37385, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151117,19 +151175,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37372, - ts + 37412, - ts + 37447, + ts + 37419, + ts + 37459, + ts + 37494, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23345, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23392, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37490, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37537, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -151262,11 +151320,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37523, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37570, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37554, + ts+37601, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -151290,7 +151348,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37605, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37652, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151685,7 +151743,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37631, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37678, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151707,7 +151765,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37641 + return ts + 37688 } func init() { @@ -152681,5 +152739,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_386.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_386.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_386.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_386.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_386.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_386.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -920,11 +920,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2166,7 +2166,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2274,8 +2274,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6087,7 +6087,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6708,17 +6709,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6925,7 +6927,7 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 @@ -7724,7 +7726,7 @@ _ = pMutex if op < 0 || op >= int32(uint32(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint32(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15798,7 +15800,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16275,7 +16277,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16566,7 +16568,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16583,14 +16585,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16610,7 +16612,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16678,7 +16680,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16905,7 +16907,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16933,7 +16935,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17032,7 +17034,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17162,7 +17164,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17208,7 +17210,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17439,7 +17441,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17573,7 +17575,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+8) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17594,7 +17596,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17910,7 +17912,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18001,7 +18003,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18009,9 +18011,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18075,18 +18077,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*12 + 4)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*12 + 4)))(tls, zIn, bp+96, uint32(unsafe.Sizeof([4098]int8{}))-uint32(2)) if got <= 0 || got >= Ssize_t(unsafe.Sizeof([4098]int8{}))-2 { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 96 + uintptr(got))) = int8(0) @@ -18126,14 +18128,14 @@ (*DbPath)(unsafe.Pointer(bp + 4100)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*12 + 4)))(tls, bp, uint32(unsafe.Sizeof([4098]int8{}))-uint32(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4100, bp) } appendAllPathElements(tls, bp+4100, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4100)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+4100)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4100)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4100)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18232,7 +18234,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = (*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*12 + 4)))(tls, fd, zBuf, uint32(nBuf)) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19662,7 +19664,7 @@ libc.Xmemset(tls, pPgHdr+16, 0, uint32(unsafe.Sizeof(PgHdr{}))-uint32(uintptr(0)+16)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*40 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*48 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint32(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19692,7 +19694,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19743,7 +19745,7 @@ *(*U16)(unsafe.Pointer(p + 28)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 28)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19847,8 +19849,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(40) - defer tls.Free(40) + bp := tls.Alloc(48) + defer tls.Free(48) var pTail uintptr pTail = bp @@ -19926,13 +19928,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22223,7 +22225,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22656,7 +22658,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22808,9 +22810,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*40 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*48 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23142,7 +23144,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23292,7 +23294,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23673,7 +23675,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23779,7 +23781,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23797,7 +23799,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23836,7 +23838,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23913,7 +23915,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24671,7 +24673,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24914,9 +24916,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 28)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25670,7 +25672,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25769,7 +25771,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26354,7 +26356,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+12) @@ -26629,7 +26631,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27102,7 +27104,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27607,7 +27609,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28263,7 +28265,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28400,7 +28402,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28417,7 +28419,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28425,7 +28427,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28468,7 +28470,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28478,7 +28480,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28728,7 +28730,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28775,7 +28777,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28785,7 +28787,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28798,7 +28800,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28807,14 +28809,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint32(iFree2-(iFree+sz))) @@ -28824,7 +28826,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28888,7 +28890,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28898,7 +28900,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28920,7 +28922,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28955,7 +28957,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28968,13 +28970,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -28999,7 +29001,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29010,7 +29012,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, (int32(pSpace)-int32(data))/1) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29062,22 +29064,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29087,7 +29089,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29095,7 +29097,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29103,10 +29105,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29166,7 +29168,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29202,7 +29204,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29232,11 +29234,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29247,15 +29249,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29283,14 +29285,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29304,7 +29306,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29316,7 +29318,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29419,7 +29421,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29447,7 +29449,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29486,7 +29488,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30369,7 +30371,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30784,7 +30786,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30810,7 +30812,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30819,7 +30821,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30830,7 +30832,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30846,7 +30848,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30907,7 +30909,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30942,7 +30944,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) if *(*Pgno)(unsafe.Pointer(bp + 24)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31002,7 +31004,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31041,7 +31043,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31072,7 +31074,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31413,7 +31415,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31657,14 +31659,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int32(aPayload)-int32((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > (*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31709,7 +31711,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31758,7 +31760,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31838,7 +31840,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31929,7 +31931,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31949,7 +31951,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32159,7 +32161,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32363,7 +32365,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32428,7 +32430,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32476,7 +32478,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32595,7 +32597,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32755,7 +32757,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32820,7 +32822,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+4, 0) @@ -32856,7 +32858,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32900,7 +32902,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33012,7 +33014,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33170,7 +33172,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33227,7 +33229,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33243,7 +33245,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33317,7 +33319,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33329,7 +33331,7 @@ *(*Pgno)(unsafe.Pointer(bp + 4)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+4) @@ -33340,7 +33342,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33505,7 +33507,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33783,12 +33785,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int32(pCell)-int32(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33796,7 +33798,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int32(pData) - int32(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint32(sz)) @@ -33856,7 +33858,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4)), uint32(sz)) @@ -33945,7 +33947,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint32(nCell*2)) @@ -34061,7 +34063,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 80)) = allocateBtreePage(tls, pBt, bp, bp+4, uint32(0), uint8(0)) @@ -34381,7 +34383,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 72)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34392,7 +34394,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34550,7 +34552,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34624,7 +34626,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34687,7 +34689,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34715,7 +34717,7 @@ *(*int32)(unsafe.Pointer(bp + 112)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 112)) != 0) { @@ -34976,7 +34978,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35168,7 +35170,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35206,7 +35208,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 112 + uintptr(iPage-1)*4)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 64 + uintptr(iPage-1)*2))) @@ -35312,7 +35314,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35337,7 +35339,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35407,7 +35409,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35520,7 +35522,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 84)) = btreeComputeFreeSpace(tls, pPage) @@ -35580,6 +35582,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35587,7 +35590,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 84)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35625,13 +35628,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 88))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint32(*(*int32)(unsafe.Pointer(bp + 88)))) @@ -35662,7 +35665,6 @@ ; *(*int32)(unsafe.Pointer(bp + 84)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 88)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35737,7 +35739,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35762,7 +35764,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35863,7 +35865,7 @@ return *(*int32)(unsafe.Pointer(bp + 20)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35871,11 +35873,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35950,7 +35952,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36019,7 +36021,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36048,7 +36050,7 @@ } *(*int32)(unsafe.Pointer(bp + 24)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+16, bp+20) if int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 12))) @@ -36124,7 +36126,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 24)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36138,7 +36140,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36272,7 +36274,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38742,7 +38744,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39390,7 +39392,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39405,14 +39407,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41716,7 +41718,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42267,7 +42269,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42332,7 +42334,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 20 + uintptr(i)*4)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42366,7 +42368,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 48)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42416,7 +42418,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 48))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42562,7 +42564,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42733,7 +42735,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42759,7 +42761,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43033,7 +43035,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43648,7 +43650,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44168,7 +44170,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44176,7 +44178,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44581,7 +44583,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44725,7 +44727,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45169,10 +45171,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47820,7 +47818,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49580,7 +49578,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50358,7 +50356,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50468,7 +50466,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50662,7 +50660,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52029,7 +52027,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52549,7 +52547,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52632,7 +52630,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56065,14 +56063,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56116,7 +56110,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56180,7 +56174,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*20)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56196,7 +56190,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56210,7 +56204,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56233,30 +56227,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56328,15 +56322,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 20))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_Subquery } break @@ -56344,7 +56338,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56475,7 +56469,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56495,7 +56489,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56530,7 +56524,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56587,7 +56581,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*20 + 8 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56615,7 +56609,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56829,7 +56823,7 @@ *(*int32)(unsafe.Pointer(bp + 24)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56869,7 +56863,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56880,7 +56874,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56892,7 +56886,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57756,7 +57750,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58005,10 +57999,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58049,7 +58043,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58075,7 +58069,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 20)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58103,7 +58097,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58150,7 +58144,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 116 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 116 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58175,7 +58169,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 116 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58750,7 +58744,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58873,7 +58867,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -58929,10 +58923,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60006,7 +60000,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60444,6 +60438,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60457,6 +60452,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60675,6 +60673,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60688,6 +60687,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61461,7 +61468,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61483,11 +61490,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61558,13 +61564,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61577,15 +61589,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61595,22 +61607,22 @@ pTest = bp + 100 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 100)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61619,21 +61631,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*20)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*20)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61643,27 +61655,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61672,7 +61684,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64340,7 +64352,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64357,7 +64369,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65278,7 +65290,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69642,6 +69654,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70806,7 +70824,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72052,7 +72070,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74097,7 +74115,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80564,7 +80582,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81961,7 +81979,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82122,6 +82140,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83440,7 +83459,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83456,7 +83475,7 @@ if !(i6 < int32(uint32(unsafe.Sizeof(aPragmaName))/uint32(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84261,80 +84280,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 568))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*4)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 568))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*16 + 4))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 568))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 568))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 576))) @@ -84351,20 +84384,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 564)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 568))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84373,21 +84406,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84405,14 +84438,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(endCode))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 560)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*20)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*20)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*20 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84420,27 +84453,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84449,25 +84482,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 8 - goto __367 goto __369 __369: + pEnc += 8 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84475,15 +84508,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(setCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp1 = iDb @@ -84491,41 +84524,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(readCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84540,31 +84573,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84574,10 +84607,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84597,19 +84630,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84618,86 +84651,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*16)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 56))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84705,36 +84738,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+580) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 580))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+588) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 588)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 588)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 588))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84743,10 +84776,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+596) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 596)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 596))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84755,10 +84788,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+604) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 604)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 604)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84766,10 +84799,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84821,14 +84854,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -84880,7 +84913,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+56, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84888,7 +84921,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -84901,16 +84934,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85093,13 +85126,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 116 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 16)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85176,12 +85209,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85190,19 +85223,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85294,7 +85327,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*4)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4)), pIndex+44) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85342,7 +85375,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*4)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*4)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*4)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*4)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 40)).Fdb = db (*InitData)(unsafe.Pointer(bp + 40)).FiDb = iDb @@ -85471,7 +85504,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85485,7 +85518,7 @@ (*InitData)(unsafe.Pointer(bp + 40)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85817,7 +85850,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*16)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85847,7 +85880,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85943,7 +85976,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86042,7 +86075,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86369,13 +86402,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86550,7 +86583,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 48)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86595,7 +86628,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86606,7 +86639,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*68 + 36 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*68 + 48)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87234,16 +87267,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87253,7 +87286,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87279,9 +87312,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87625,7 +87658,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*20)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87725,7 +87758,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 48)) = U32(0) @@ -87741,7 +87774,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 48)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+48) @@ -87824,8 +87857,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87840,12 +87871,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88061,7 +88095,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88152,7 +88186,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88172,7 +88206,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88209,7 +88243,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88243,11 +88277,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88348,8 +88382,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88396,7 +88430,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88463,7 +88497,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 48)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+48) @@ -88525,7 +88559,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 76)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+76) @@ -88678,10 +88712,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88935,8 +88969,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88963,13 +88997,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+36, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88981,7 +89015,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+36) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89168,7 +89202,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint32(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90067,7 +90102,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90150,7 +90185,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90279,7 +90314,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 60)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 60)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+36+4, uint32(1), 8, 0x100) @@ -90302,7 +90337,7 @@ libc.SetBitFieldPtr16Uint32(pItem+36+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90318,7 +90353,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90344,7 +90379,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90355,9 +90390,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+34, pTab+4) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90404,7 +90439,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90516,7 +90551,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90535,7 +90570,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*16)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 44)), 0) @@ -90659,7 +90694,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*20 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(1), 7, 0x80) } @@ -90724,7 +90759,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*20)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90755,9 +90790,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90767,7 +90802,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 116 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90905,7 +90940,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*16)).FiSorterColumn) + 1) } @@ -90989,13 +91024,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 20)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 20)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 20)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91184,11 +91219,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91516,7 +91551,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91577,7 +91612,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91719,7 +91754,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+88, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+36+4, uint32(1), 5, 0x20) @@ -91778,7 +91813,7 @@ ; Xsqlite3SelectDestInit(tls, bp+88, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92249,9 +92284,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+116)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92602,7 +92637,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+116)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92706,7 +92741,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92939,7 +92974,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -92983,7 +93018,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+36, pTableName) != 0) { goto __9 } @@ -93001,7 +93036,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93013,7 +93048,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93028,11 +93063,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93043,19 +93079,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93064,7 +93100,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93213,7 +93249,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93246,7 +93282,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93271,13 +93307,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93533,7 +93569,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93586,7 +93622,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93700,12 +93736,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93819,7 +93855,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -93885,7 +93921,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+152, 0, uint32(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94049,7 +94085,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94642,7 +94678,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*16)).FzCnName)) goto update_cleanup __27: @@ -94674,7 +94710,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95000,7 +95036,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 68)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 68)) != 0) && + (*NameContext)(unsafe.Pointer(bp+28)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95554,7 +95595,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -95860,10 +95901,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 152)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+152, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+152, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+152)) + ts+20611, libc.VaList(bp+8, bp+152)) return SQLITE_ERROR } @@ -95986,7 +96027,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint32(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint32(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96134,14 +96175,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96152,7 +96193,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96180,7 +96221,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96200,7 +96241,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96260,7 +96301,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96269,7 +96310,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96280,7 +96321,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 24)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96291,7 +96332,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96720,11 +96761,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32((int32((*Token)(unsafe.Pointer(pEnd)).Fz)-int32((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+196)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+196)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96734,7 +96775,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96795,7 +96836,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96823,9 +96864,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+8, bp+48) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96833,7 +96876,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 48)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 48))) @@ -96845,7 +96888,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96919,7 +96962,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 44 + 4)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96977,7 +97020,7 @@ pMod = Xsqlite3HashFind(tls, db+396, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97011,7 +97054,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97464,7 +97507,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97491,7 +97534,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97717,7 +97760,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97729,11 +97772,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97748,7 +97791,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97774,27 +97817,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -97837,11 +97880,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+88, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97854,40 +97897,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 12)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97919,22 +97962,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+48, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99531,7 +99574,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99559,7 +99602,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100077,7 +100120,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100438,7 +100481,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100928,12 +100971,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101012,7 +101055,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101106,7 +101149,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 16)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101482,7 +101525,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101498,7 +101541,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*20)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102215,7 +102258,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*16)).FzCnName)) sentWarning = U8(1) __6: @@ -102286,7 +102329,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102460,6 +102503,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102503,9 +102550,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102539,6 +102584,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102797,11 +102843,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104381,7 +104432,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*4)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104439,7 +104490,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*4)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104837,7 +104888,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105444,7 +105495,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105479,6 +105530,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105773,6 +105828,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105925,7 +105983,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) return uintptr(0) __2: ; @@ -105989,7 +106047,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -106871,7 +106929,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107004,7 +107062,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107093,17 +107151,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107149,7 +107207,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107193,12 +107251,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107426,7 +107484,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107542,7 +107600,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 28)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 28)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107657,7 +107715,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107722,15 +107780,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107751,7 +107809,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107907,11 +107965,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109303,19 +109361,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 116 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109386,7 +109444,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110481,7 +110539,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111460,7 +111518,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } break @@ -111470,7 +111528,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 4)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112213,7 +112271,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*12 + 4)), yymsp+libc.UintptrFromInt32(-4)*12+4) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112377,7 +112435,7 @@ *(*Token)(unsafe.Pointer(bp + 92)) = *(*Token)(unsafe.Pointer(yymsp + 4)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+92)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+92)) *(*uintptr)(unsafe.Pointer(yymsp + 4)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 4)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112594,9 +112652,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*12 + 4)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) @@ -112880,19 +112938,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)) = *(*Token)(unsafe.Pointer(yymsp + 4)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113271,9 +113329,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114041,7 +114099,7 @@ } else { (*Token)(unsafe.Pointer(bp + 1248)).Fz = zSql (*Token)(unsafe.Pointer(bp + 1248)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+1248)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+1248)) break } } @@ -114064,7 +114122,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114237,7 +114295,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114250,11 +114308,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114267,9 +114325,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114503,7 +114561,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115078,7 +115136,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115093,7 +115151,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115284,23 +115342,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115318,35 +115376,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115467,7 +115525,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115512,7 +115570,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115629,7 +115687,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115865,7 +115923,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115958,7 +116016,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116028,7 +116086,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116038,7 +116096,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116070,14 +116128,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116207,7 +116265,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint32(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint32(5)) == 0) { goto __1 } iOut = 0 @@ -116252,10 +116310,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint32(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint32(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116360,7 +116418,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint32(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint32(3)) == 0) { goto __29 } zVfs = zVal @@ -116371,17 +116429,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint32(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint32(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint32(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint32(4)) == 0) { goto __32 } @@ -116419,7 +116477,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116427,7 +116485,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116467,7 +116525,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116490,14 +116548,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116644,10 +116702,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116661,7 +116719,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+12, bp+16) @@ -116714,7 +116772,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116819,7 +116877,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116922,21 +116980,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117094,7 +117152,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117750,7 +117808,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117868,7 +117926,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118045,7 +118103,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118198,7 +118256,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -118253,7 +118311,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118317,13 +118375,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -118873,12 +118931,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint32(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint32(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint32(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint32(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -118979,7 +119037,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119285,7 +119343,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint32(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint32(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119300,7 +119358,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119354,7 +119412,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119459,11 +119517,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -119620,14 +119678,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -119797,9 +119855,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -119932,7 +119990,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120029,7 +120087,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120073,7 +120131,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120204,7 +120262,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120223,7 +120281,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*12 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*12 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 12 @@ -120319,7 +120377,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120343,7 +120401,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120465,7 +120523,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120560,25 +120618,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120597,8 +120655,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -120851,11 +120909,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+72) Xsqlite3_free(tls, zTab) } @@ -121066,7 +121124,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121764,7 +121822,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123105,7 +123163,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123117,12 +123175,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123348,7 +123406,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123371,7 +123429,7 @@ bp := tls.Alloc(20) defer tls.Free(20) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123419,7 +123477,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123462,19 +123520,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123503,7 +123561,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123515,7 +123573,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123523,18 +123581,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123549,14 +123607,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123585,7 +123643,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123597,7 +123655,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+16) if rc != SQLITE_OK { @@ -123605,7 +123663,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123647,10 +123705,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123681,7 +123739,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4))), *(*uintptr)(unsafe.Pointer(argv + 3*4)))) ii = 4 __3: @@ -123693,7 +123751,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123716,7 +123774,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123812,7 +123870,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(592) @@ -123852,11 +123910,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+544)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+544)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+44)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 544 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 544 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123867,7 +123925,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123943,7 +124001,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -123967,7 +124025,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -123986,7 +124044,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124000,8 +124058,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 24 + uintptr(bLeaf)*4)) == uintptr(0) { @@ -124016,23 +124074,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124056,7 +124114,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124076,7 +124134,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124093,14 +124151,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124108,7 +124166,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124137,14 +124195,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124171,7 +124229,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124180,12 +124238,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124199,8 +124257,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124208,7 +124266,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 24 + 1*4))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124223,7 +124281,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124241,7 +124299,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124611,11 +124669,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124635,19 +124693,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125565,7 +125623,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125574,7 +125632,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) goto __3 __3: ii++ @@ -125582,7 +125640,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125819,7 +125877,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125827,7 +125885,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125939,7 +125997,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126071,14 +126129,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126143,7 +126201,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126155,25 +126213,25 @@ F__ccgo_pad1 [2]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126183,26 +126241,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126256,7 +126314,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126571,7 +126629,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126582,7 +126640,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126683,7 +126741,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+36) + ts+27933, uintptr(0), uintptr(0), p+36) } if rc == SQLITE_OK { @@ -126747,7 +126805,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint32(4)) == 0 { + if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint32(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126768,16 +126826,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+36, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+4, p+36, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126893,7 +126951,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+36, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126911,7 +126969,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*4, p+36, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126929,7 +126987,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*4, p+36, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126952,7 +127010,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*4, p+36, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -126998,7 +127056,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+36, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127013,7 +127071,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+20, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 20)), 1) if iCid >= 0 { @@ -127053,7 +127111,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+36, bp+56, pIter+60) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127063,18 +127121,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+32) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*4)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127086,18 +127144,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 1) @@ -127111,7 +127169,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 5) @@ -127158,7 +127216,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127176,7 +127234,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127198,25 +127256,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127258,7 +127316,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127293,7 +127351,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127301,11 +127359,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127325,7 +127383,7 @@ *(*uintptr)(unsafe.Pointer(bp + 180)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+180, p+36, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 180)))) { goto __13 @@ -127352,7 +127410,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127364,7 +127422,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127397,7 +127455,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127409,7 +127467,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127421,37 +127479,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127490,9 +127548,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127502,7 +127560,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127514,18 +127572,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127533,8 +127591,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127543,7 +127601,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127561,15 +127619,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } @@ -127606,19 +127664,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 60)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+36, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127630,15 +127688,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 60))) } return z @@ -127658,7 +127716,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+36, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127667,7 +127725,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+172, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127677,23 +127735,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 172))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127719,13 +127777,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*4)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127735,16 +127793,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -127761,7 +127819,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+88, p+36, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127798,7 +127856,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 4)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+36, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { var rc2 int32 @@ -127903,7 +127961,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127926,7 +127984,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 604)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -127934,13 +127992,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, p+36, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, p+36, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127956,7 +128014,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127964,9 +128022,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -127975,20 +128033,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128025,16 +128083,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128043,32 +128101,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128081,9 +128139,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128096,7 +128154,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128105,11 +128163,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+76, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128122,7 +128180,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128190,9 +128248,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+4, p+36, zUpdate) @@ -128251,7 +128309,7 @@ } *(*int32)(unsafe.Pointer(bp + 12)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+36, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+24))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+24))) for *(*int32)(unsafe.Pointer(bp + 12)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128324,18 +128382,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+24, ts+3286, uint32(4)) } else { libc.Xmemcpy(tls, p+24, ts+6441, uint32(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+24)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+24)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128375,11 +128433,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128391,13 +128449,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128416,21 +128474,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128438,7 +128496,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128446,7 +128504,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128475,14 +128533,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128608,7 +128666,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128623,8 +128681,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128741,7 +128799,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -128834,7 +128892,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+36, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128842,7 +128900,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -128863,7 +128921,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+36, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+24, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128893,9 +128951,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+36, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128909,10 +128967,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 4)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+36) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128926,12 +128984,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+4, p+36, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128943,7 +129001,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+36) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -128971,7 +129029,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129093,7 +129151,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129109,7 +129167,7 @@ bp := tls.Alloc(12) defer tls.Free(12) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129126,7 +129184,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129160,7 +129218,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+12, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 12)), -1) } else { @@ -129190,13 +129248,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129207,7 +129265,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129281,7 +129339,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129299,12 +129357,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129328,14 +129386,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+36) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+36) + db, ts+32291, uintptr(0), uintptr(0), p+36) } } @@ -129389,7 +129447,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint32(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint32(7)) { return rbuMisuseError(tls) } } @@ -129416,7 +129474,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); i < nErrmsg-Size_t(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint32(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint32(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129452,7 +129510,7 @@ rbuObjIterFinalize(tls, p+48) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129571,12 +129629,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129922,7 +129980,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129947,7 +130005,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130107,7 +130165,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130932,7 +130990,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -130945,7 +131003,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131624,9 +131682,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131649,9 +131707,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131659,7 +131717,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131670,7 +131728,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131713,7 +131771,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131840,7 +131898,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132316,7 +132374,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 20)) = SQLITE_NOMEM } @@ -132325,18 +132383,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+20) + sessionAppendStr(tls, bp+8, ts+32893, bp+20) sessionAppendIdent(tls, bp+8, zDb, bp+20) sessionAppendStr(tls, bp+8, ts+1567, bp+20) sessionAppendIdent(tls, bp+8, zTab, bp+20) - sessionAppendStr(tls, bp+8, ts+32861, bp+20) + sessionAppendStr(tls, bp+8, ts+32908, bp+20) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+20) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), bp+20) - sessionAppendStr(tls, bp+8, ts+32869, bp+20) + sessionAppendStr(tls, bp+8, ts+32916, bp+20) sessionAppendInteger(tls, bp+8, i+1, bp+20) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132445,7 +132503,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132537,7 +132595,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+12)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132800,7 +132858,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132823,7 +132881,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132865,7 +132923,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132926,7 +132984,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+44, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133000,13 +133058,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133068,7 +133126,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) == uintptr(0) { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) = uintptr(0) } @@ -133441,7 +133499,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133618,34 +133676,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*12 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint32(nU32)*uint32(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+12) + sessionAppendStr(tls, bp, ts+32960, bp+12) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+12) - sessionAppendStr(tls, bp, ts+32926, bp+12) + sessionAppendStr(tls, bp, ts+32973, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32932, bp+12) + sessionAppendStr(tls, bp, ts+32979, bp+12) sessionAppendInteger(tls, bp, ii*2+1, bp+12) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+12) + sessionAppendStr(tls, bp, ts+32908, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+12) + ts+32984, bp+12) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32869, bp+12) + sessionAppendStr(tls, bp, ts+32916, bp+12) sessionAppendInteger(tls, bp, ii*2+2, bp+12) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133697,34 +133755,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+12) + sessionAppendStr(tls, bp, ts+33059, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+32861, bp+12) + sessionAppendStr(tls, bp, ts+32908, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32932, bp+12) + sessionAppendStr(tls, bp, ts+32979, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+12) + sessionAppendStr(tls, bp, ts+33077, bp+12) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+12) - sessionAppendStr(tls, bp, ts+32573, bp+12) + sessionAppendStr(tls, bp, ts+32620, bp+12) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32869, bp+12) + sessionAppendStr(tls, bp, ts+32916, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+12) @@ -133751,9 +133809,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+12) + sessionAppendStr(tls, bp, ts+33090, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+21531, bp+12) + sessionAppendStr(tls, bp, ts+21578, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+12) @@ -133761,9 +133819,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) } - sessionAppendStr(tls, bp, ts+33061, bp+12) + sessionAppendStr(tls, bp, ts+33108, bp+12) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+12) + sessionAppendStr(tls, bp, ts+33119, bp+12) } sessionAppendStr(tls, bp, ts+4957, bp+12) @@ -133782,11 +133840,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+4, - ts+33189) + ts+33236) } return rc } @@ -133814,7 +133872,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134067,7 +134125,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134083,7 +134141,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134156,10 +134214,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+124, bp+128, bp+132, uintptr(0)) @@ -134218,16 +134276,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 128)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 140)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 128)))) } else if *(*int32)(unsafe.Pointer(bp + 128)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 136)), uint32(*(*int32)(unsafe.Pointer(bp + 128)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 128)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 140)), ts+11348) { @@ -134281,14 +134339,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135520,7 +135578,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135808,7 +135866,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135993,7 +136051,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136061,7 +136119,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136242,7 +136300,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136565,13 +136623,13 @@ defer tls.Free(48) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137121,7 +137179,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint32(unsafe.Sizeof(int32(0))) * uint32(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137148,14 +137206,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137166,7 +137224,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137179,7 +137237,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + Size_t(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, Sqlite3_int64(unsafe.Sizeof(uintptr(0)))*nArg) @@ -137188,7 +137246,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137207,7 +137265,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137222,14 +137280,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137237,9 +137295,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137247,9 +137305,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -137261,17 +137319,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 44)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+44, zArg, pConfig+48)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137318,15 +137376,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137343,13 +137401,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 28)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137387,8 +137445,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*4)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137420,7 +137478,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137457,14 +137515,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137513,7 +137571,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137521,10 +137579,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137634,7 +137692,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137644,7 +137702,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137654,7 +137712,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137667,7 +137725,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137677,7 +137735,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137693,7 +137751,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+4) @@ -137716,7 +137774,7 @@ bp := tls.Alloc(44) defer tls.Free(44) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 36)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137738,7 +137796,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 36))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 40)) = 0 @@ -137752,7 +137810,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137850,7 +137908,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -137863,20 +137921,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = (int32(z2) - int32(z)) / 1 - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint32(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint32(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint32(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint32(3)) == 0 { tok = FTS5_AND } break @@ -139653,9 +139711,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139671,7 +139729,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139758,7 +139816,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139839,7 +139897,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140009,12 +140067,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+20)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -140954,7 +141012,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+36) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+36) } if rc == SQLITE_ERROR { @@ -141033,7 +141091,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+40, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141058,7 +141116,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+44, zSql) != 0 { return @@ -141081,7 +141139,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+52, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141320,7 +141378,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+64, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142519,7 +142577,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+56, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -143984,7 +144042,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+48, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145065,13 +145123,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145324,7 +145382,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 4)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) @@ -145438,7 +145496,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145608,7 +145666,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 8)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+68+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*92, uintptr(0), bp+8) - sqlite3Fts5BufferAppendBlob(tls, p+32, bp+8, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+32, bp+8, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fn, bp+20, bp+24) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) & int64(0x7FFFFFFF)) @@ -145876,7 +145934,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146300,7 +146358,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146316,9 +146374,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146364,12 +146422,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146400,7 +146458,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 20)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146431,7 +146489,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146463,14 +146521,14 @@ *(*int32)(unsafe.Pointer(pCsr + 56)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146526,7 +146584,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -146743,7 +146801,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146888,28 +146946,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -146980,12 +147038,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147615,7 +147673,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147859,7 +147917,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147878,7 +147936,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 8 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147925,7 +147983,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147934,7 +147992,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -147948,7 +148006,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -147972,7 +148030,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -147990,13 +148048,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148053,17 +148111,17 @@ if *(*uintptr)(unsafe.Pointer(p + 24 + uintptr(eStmt)*4)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148165,18 +148223,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148188,7 +148246,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148200,14 +148258,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148219,17 +148277,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148266,27 +148324,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148492,12 +148550,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148505,7 +148563,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148681,7 +148739,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148863,14 +148921,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 36)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+36) + rc = fts5StorageCount(tls, p, ts+34104, bp+36) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 36)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 44)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+44) + rc = fts5StorageCount(tls, p, ts+34455, bp+44) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 44)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149065,9 +149123,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint32(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149281,7 +149339,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint32(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149293,7 +149351,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) } } @@ -149304,18 +149362,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37032) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149591,7 +149649,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149731,7 +149789,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149739,11 +149797,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149751,7 +149809,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149759,7 +149817,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149767,11 +149825,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149779,19 +149837,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149799,11 +149857,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149811,7 +149869,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149819,11 +149877,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149831,7 +149889,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149839,7 +149897,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149847,7 +149905,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149863,24 +149921,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149895,44 +149953,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149941,91 +149999,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint32(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint32(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150040,16 +150098,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150057,21 +150115,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150079,7 +150137,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150087,9 +150145,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150104,12 +150162,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150118,7 +150176,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150274,7 +150332,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37339) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150454,22 +150512,22 @@ defer tls.Free(64) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151608,14 +151666,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151641,19 +151699,19 @@ defer tls.Free(20) *(*[3]uintptr)(unsafe.Pointer(bp + 4)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else { var nByte int32 @@ -151786,11 +151844,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+68, uintptr(0)) @@ -151814,7 +151872,7 @@ *(*uintptr)(unsafe.Pointer(bp + 68)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152209,7 +152267,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152231,7 +152289,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153208,5 +153266,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 68)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -920,11 +920,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2166,7 +2166,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2274,8 +2274,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6113,7 +6113,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6755,17 +6756,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6986,14 +6988,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7800,7 +7802,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15883,7 +15885,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16360,7 +16362,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16651,7 +16653,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16668,14 +16670,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16695,7 +16697,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16763,7 +16765,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16990,7 +16992,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -17018,7 +17020,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17117,7 +17119,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17247,7 +17249,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17293,7 +17295,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17524,7 +17526,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17658,7 +17660,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17679,7 +17681,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17995,7 +17997,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18086,7 +18088,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18094,9 +18096,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18160,18 +18162,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+144, uint64(unsafe.Sizeof([4098]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([4098]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 144 + uintptr(got))) = int8(0) @@ -18211,14 +18213,14 @@ (*DbPath)(unsafe.Pointer(bp + 4104)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([4098]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4104, bp) } appendAllPathElements(tls, bp+4104, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4104)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+4104)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4104)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4104)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18319,7 +18321,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19752,7 +19754,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19782,7 +19784,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19833,7 +19835,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19937,8 +19939,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -20016,13 +20018,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22316,7 +22318,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22749,7 +22751,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22901,9 +22903,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23235,7 +23237,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23385,7 +23387,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23766,7 +23768,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23872,7 +23874,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23890,7 +23892,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23929,7 +23931,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -24006,7 +24008,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24764,7 +24766,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -25007,9 +25009,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25766,7 +25768,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25865,7 +25867,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26451,7 +26453,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26726,7 +26728,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27199,7 +27201,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27704,7 +27706,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28362,7 +28364,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28499,7 +28501,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28516,7 +28518,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28524,7 +28526,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28567,7 +28569,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28577,7 +28579,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28827,7 +28829,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28874,7 +28876,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28884,7 +28886,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28897,7 +28899,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28906,14 +28908,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28923,7 +28925,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28987,7 +28989,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28997,7 +28999,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -29019,7 +29021,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -29054,7 +29056,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -29067,13 +29069,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29098,7 +29100,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29109,7 +29111,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29161,22 +29163,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29186,7 +29188,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29194,7 +29196,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29202,10 +29204,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29265,7 +29267,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29301,7 +29303,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29331,11 +29333,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29346,15 +29348,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29382,14 +29384,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29403,7 +29405,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29415,7 +29417,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29518,7 +29520,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29546,7 +29548,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29585,7 +29587,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30468,7 +30470,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30883,7 +30885,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30909,7 +30911,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30918,7 +30920,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30929,7 +30931,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30945,7 +30947,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -31006,7 +31008,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -31041,7 +31043,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31101,7 +31103,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31140,7 +31142,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31171,7 +31173,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31512,7 +31514,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31756,14 +31758,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31808,7 +31810,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31857,7 +31859,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31937,7 +31939,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -32028,7 +32030,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -32048,7 +32050,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32258,7 +32260,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32462,7 +32464,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32527,7 +32529,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32575,7 +32577,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32694,7 +32696,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32854,7 +32856,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32919,7 +32921,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32955,7 +32957,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32999,7 +33001,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33111,7 +33113,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33269,7 +33271,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33326,7 +33328,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33342,7 +33344,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33416,7 +33418,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33428,7 +33430,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -33439,7 +33441,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33604,7 +33606,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33883,12 +33885,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33896,7 +33898,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33956,7 +33958,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -34045,7 +34047,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -34161,7 +34163,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -34481,7 +34483,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34492,7 +34494,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34650,7 +34652,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34724,7 +34726,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34787,7 +34789,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34815,7 +34817,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -35076,7 +35078,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35268,7 +35270,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35306,7 +35308,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -35412,7 +35414,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35437,7 +35439,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35507,7 +35509,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35620,7 +35622,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -35680,6 +35682,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35687,7 +35690,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35725,13 +35728,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35762,7 +35765,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35837,7 +35839,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35862,7 +35864,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35963,7 +35965,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35971,11 +35973,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -36050,7 +36052,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36119,7 +36121,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36148,7 +36150,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -36224,7 +36226,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36238,7 +36240,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36372,7 +36374,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38842,7 +38844,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39491,7 +39493,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39506,14 +39508,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41817,7 +41819,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42368,7 +42370,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42433,7 +42435,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42467,7 +42469,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42517,7 +42519,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42663,7 +42665,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42834,7 +42836,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42860,7 +42862,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43134,7 +43136,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43749,7 +43751,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44269,7 +44271,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44277,7 +44279,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44682,7 +44684,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44826,7 +44828,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45270,10 +45272,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47921,7 +47919,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49681,7 +49679,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50459,7 +50457,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50569,7 +50567,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50763,7 +50761,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52130,7 +52128,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52650,7 +52648,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52733,7 +52731,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56173,14 +56171,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56224,7 +56218,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56288,7 +56282,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56304,7 +56298,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56318,7 +56312,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56341,30 +56335,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56436,15 +56430,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -56452,7 +56446,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56583,7 +56577,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56603,7 +56597,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56638,7 +56632,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56695,7 +56689,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56723,7 +56717,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56937,7 +56931,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56977,7 +56971,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56988,7 +56982,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -57000,7 +56994,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57864,7 +57858,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58113,10 +58107,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58157,7 +58151,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58183,7 +58177,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58211,7 +58205,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58258,7 +58252,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58283,7 +58277,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58858,7 +58852,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58981,7 +58975,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -59037,10 +59031,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60114,7 +60108,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60552,6 +60546,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60565,6 +60560,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60783,6 +60781,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60796,6 +60795,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61569,7 +61576,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61591,11 +61598,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61666,13 +61672,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61685,15 +61697,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61703,22 +61715,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61727,21 +61739,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61751,27 +61763,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61780,7 +61792,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64448,7 +64460,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64465,7 +64477,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65386,7 +65398,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69750,6 +69762,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70914,7 +70932,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72160,7 +72178,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74205,7 +74223,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80673,7 +80691,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -82072,7 +82090,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82233,6 +82251,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83551,7 +83570,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83567,7 +83586,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84372,80 +84391,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -84462,20 +84495,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84484,21 +84517,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84516,14 +84549,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84531,27 +84564,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84560,25 +84593,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84586,15 +84619,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -84602,41 +84635,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84651,31 +84684,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84685,10 +84718,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84708,19 +84741,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84729,86 +84762,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84816,36 +84849,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84854,10 +84887,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84866,10 +84899,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84877,10 +84910,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84932,14 +84965,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -84991,7 +85024,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84999,7 +85032,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -85012,16 +85045,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85204,13 +85237,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85287,12 +85320,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85301,19 +85334,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85405,7 +85438,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85453,7 +85486,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -85582,7 +85615,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85596,7 +85629,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85928,7 +85961,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85958,7 +85991,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -86054,7 +86087,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86153,7 +86186,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86480,13 +86513,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86661,7 +86694,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86706,7 +86739,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86717,7 +86750,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87345,16 +87378,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87364,7 +87397,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87390,9 +87423,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87736,7 +87769,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87836,7 +87869,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87852,7 +87885,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -87935,8 +87968,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87951,12 +87982,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88172,7 +88206,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88263,7 +88297,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88283,7 +88317,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88320,7 +88354,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88354,11 +88388,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88459,8 +88493,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88507,7 +88541,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88574,7 +88608,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -88636,7 +88670,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88789,10 +88823,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -89046,8 +89080,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -89074,13 +89108,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -89092,7 +89126,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89280,7 +89314,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90179,7 +90214,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90262,7 +90297,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90391,7 +90426,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -90414,7 +90449,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90430,7 +90465,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90456,7 +90491,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90467,9 +90502,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90516,7 +90551,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90628,7 +90663,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90647,7 +90682,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90771,7 +90806,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90836,7 +90871,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90867,9 +90902,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90879,7 +90914,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -91017,7 +91052,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -91101,13 +91136,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91296,11 +91331,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91628,7 +91663,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91689,7 +91724,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91831,7 +91866,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91890,7 +91925,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92361,9 +92396,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92714,7 +92749,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92819,7 +92854,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -93052,7 +93087,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93096,7 +93131,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -93114,7 +93149,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93126,7 +93161,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93141,11 +93176,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93156,19 +93192,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93177,7 +93213,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93326,7 +93362,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93359,7 +93395,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93384,13 +93420,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93646,7 +93682,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93699,7 +93735,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93813,12 +93849,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93932,7 +93968,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -93998,7 +94034,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94162,7 +94198,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94755,7 +94791,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94787,7 +94823,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95113,7 +95149,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95667,7 +95708,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -95973,10 +96014,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+216)) + ts+20611, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -96099,7 +96140,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96247,14 +96288,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96265,7 +96306,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96293,7 +96334,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96313,7 +96354,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96373,7 +96414,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96382,7 +96423,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96393,7 +96434,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96404,7 +96445,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96833,11 +96874,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96847,7 +96888,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96908,7 +96949,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96936,9 +96977,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96946,7 +96989,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -96958,7 +97001,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -97032,7 +97075,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -97090,7 +97133,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97124,7 +97167,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97577,7 +97620,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97604,7 +97647,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97835,7 +97878,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97847,11 +97890,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97866,7 +97909,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97892,27 +97935,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -97955,11 +97998,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97972,40 +98015,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -98037,22 +98080,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99649,7 +99692,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99677,7 +99720,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100195,7 +100238,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100556,7 +100599,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -101046,12 +101089,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101130,7 +101173,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101224,7 +101267,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101600,7 +101643,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101616,7 +101659,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102334,7 +102377,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -102405,7 +102448,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102579,6 +102622,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102622,9 +102669,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102658,6 +102703,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102916,11 +102962,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104501,7 +104552,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104559,7 +104610,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104957,7 +105008,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105564,7 +105615,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105599,6 +105650,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105893,6 +105948,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -106045,7 +106103,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -106109,7 +106167,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -106991,7 +107049,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107124,7 +107182,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107214,17 +107272,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107270,7 +107328,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107314,12 +107372,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107548,7 +107606,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107664,7 +107722,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107779,7 +107837,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107844,15 +107902,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107873,7 +107931,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -108029,11 +108087,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109428,19 +109486,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109508,7 +109566,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110605,7 +110663,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111584,7 +111642,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -111594,7 +111652,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112337,7 +112395,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112501,7 +112559,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112718,9 +112776,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -113004,19 +113062,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113395,9 +113453,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114165,7 +114223,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+2464)) break } } @@ -114188,7 +114246,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114361,7 +114419,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114374,11 +114432,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114391,9 +114449,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114627,7 +114685,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115202,7 +115260,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115217,7 +115275,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115408,23 +115466,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115442,35 +115500,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115591,7 +115649,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115636,7 +115694,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115753,7 +115811,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115989,7 +116047,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -116082,7 +116140,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116152,7 +116210,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116162,7 +116220,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116194,14 +116252,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116331,7 +116389,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -116376,10 +116434,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116484,7 +116542,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -116495,17 +116553,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint64(4)) == 0) { goto __32 } @@ -116543,7 +116601,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116551,7 +116609,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116591,7 +116649,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116615,14 +116673,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116769,10 +116827,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116786,7 +116844,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116839,7 +116897,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116944,7 +117002,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -117047,21 +117105,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117219,7 +117277,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117875,7 +117933,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117993,7 +118051,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118171,7 +118229,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118324,7 +118382,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -118379,7 +118437,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118443,13 +118501,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -118999,12 +119057,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119105,7 +119163,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119411,7 +119469,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119426,7 +119484,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119480,7 +119538,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119585,11 +119643,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -119746,14 +119804,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -119923,9 +119981,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -120058,7 +120116,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120155,7 +120213,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120199,7 +120257,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120330,7 +120388,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120349,7 +120407,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -120445,7 +120503,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120469,7 +120527,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120591,7 +120649,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120686,25 +120744,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120723,8 +120781,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -120984,11 +121042,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -121199,7 +121257,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121897,7 +121955,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123238,7 +123296,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123250,12 +123308,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123481,7 +123539,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123504,7 +123562,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123552,7 +123610,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123595,19 +123653,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123636,7 +123694,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123648,7 +123706,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123656,18 +123714,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123682,14 +123740,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123718,7 +123776,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123730,7 +123788,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123738,7 +123796,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123780,10 +123838,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123814,7 +123872,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123826,7 +123884,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123849,7 +123907,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123945,7 +124003,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -123985,11 +124043,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -124000,7 +124058,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -124078,7 +124136,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -124102,7 +124160,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124121,7 +124179,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124135,8 +124193,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -124151,23 +124209,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124191,7 +124249,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124211,7 +124269,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124228,14 +124286,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124243,7 +124301,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124272,14 +124330,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124306,7 +124364,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124315,12 +124373,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124334,8 +124392,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124343,7 +124401,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124358,7 +124416,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124376,7 +124434,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124747,11 +124805,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124771,19 +124829,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125703,7 +125761,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125712,7 +125770,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125720,7 +125778,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125957,7 +126015,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125965,7 +126023,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -126077,7 +126135,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126209,14 +126267,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126281,7 +126339,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126293,25 +126351,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126321,26 +126379,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126394,7 +126452,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126721,7 +126779,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126732,7 +126790,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126833,7 +126891,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+64) + ts+27933, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126897,7 +126955,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126918,16 +126976,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -127043,7 +127101,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -127061,7 +127119,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -127079,7 +127137,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -127102,7 +127160,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127148,7 +127206,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127163,7 +127221,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -127203,7 +127261,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127213,18 +127271,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127236,18 +127294,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -127261,7 +127319,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -127308,7 +127366,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127326,7 +127384,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127348,25 +127406,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127408,7 +127466,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127443,7 +127501,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127451,11 +127509,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127475,7 +127533,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -127502,7 +127560,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127514,7 +127572,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127547,7 +127605,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127559,7 +127617,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127571,37 +127629,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127640,9 +127698,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127652,7 +127710,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127664,18 +127722,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127683,8 +127741,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127693,7 +127751,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127711,15 +127769,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } @@ -127756,19 +127814,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127780,15 +127838,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127808,7 +127866,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127817,7 +127875,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127827,23 +127885,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127869,13 +127927,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127885,16 +127943,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -127911,7 +127969,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127948,7 +128006,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -128053,7 +128111,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -128076,7 +128134,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -128084,13 +128142,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -128106,7 +128164,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -128114,9 +128172,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128125,20 +128183,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128175,16 +128233,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128193,32 +128251,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128231,9 +128289,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128246,7 +128304,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128255,11 +128313,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128272,7 +128330,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128340,9 +128398,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -128401,7 +128459,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128474,18 +128532,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3286, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6441, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128525,11 +128583,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128541,13 +128599,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128566,21 +128624,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128588,7 +128646,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128596,7 +128654,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128625,14 +128683,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128758,7 +128816,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128773,8 +128831,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128891,7 +128949,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -128984,7 +129042,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128992,7 +129050,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -129013,7 +129071,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -129043,9 +129101,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -129059,10 +129117,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129076,12 +129134,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129093,7 +129151,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129121,7 +129179,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129243,7 +129301,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129259,7 +129317,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129276,7 +129334,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129310,7 +129368,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -129340,13 +129398,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129357,7 +129415,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129431,7 +129489,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129449,12 +129507,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129478,14 +129536,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+64) + db, ts+32291, uintptr(0), uintptr(0), p+64) } } @@ -129539,7 +129597,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -129566,7 +129624,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129602,7 +129660,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129721,12 +129779,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -130072,7 +130130,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -130097,7 +130155,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130257,7 +130315,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -131086,7 +131144,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -131099,7 +131157,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131779,9 +131837,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131804,9 +131862,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131814,7 +131872,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131825,7 +131883,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131868,7 +131926,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131995,7 +132053,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132471,7 +132529,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -132480,18 +132538,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+24) + sessionAppendStr(tls, bp+8, ts+32893, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1567, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32861, bp+24) + sessionAppendStr(tls, bp+8, ts+32908, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32869, bp+24) + sessionAppendStr(tls, bp+8, ts+32916, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132600,7 +132658,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132692,7 +132750,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132955,7 +133013,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132978,7 +133036,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -133020,7 +133078,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -133081,7 +133139,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133155,13 +133213,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133223,7 +133281,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -133596,7 +133654,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133775,34 +133833,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+16) + sessionAppendStr(tls, bp, ts+32960, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32926, bp+16) + sessionAppendStr(tls, bp, ts+32973, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+16) + ts+32984, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133854,34 +133912,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+16) + sessionAppendStr(tls, bp, ts+33059, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+16) + sessionAppendStr(tls, bp, ts+33077, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32573, bp+16) + sessionAppendStr(tls, bp, ts+32620, bp+16) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -133908,9 +133966,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+16) + sessionAppendStr(tls, bp, ts+33090, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21531, bp+16) + sessionAppendStr(tls, bp, ts+21578, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+16) @@ -133918,9 +133976,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33061, bp+16) + sessionAppendStr(tls, bp, ts+33108, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+16) + sessionAppendStr(tls, bp, ts+33119, bp+16) } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -133939,11 +133997,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33189) + ts+33236) } return rc } @@ -133971,7 +134029,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134224,7 +134282,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134240,7 +134298,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134313,10 +134371,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -134375,16 +134433,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11348) { @@ -134438,14 +134496,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135693,7 +135751,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135981,7 +136039,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136168,7 +136226,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136236,7 +136294,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136418,7 +136476,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136742,13 +136800,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137299,7 +137357,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137326,14 +137384,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137344,7 +137402,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137357,7 +137415,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -137366,7 +137424,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137385,7 +137443,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137400,14 +137458,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137415,9 +137473,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137425,9 +137483,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -137439,17 +137497,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137496,15 +137554,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137521,13 +137579,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137565,8 +137623,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137598,7 +137656,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137635,14 +137693,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137691,7 +137749,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137699,10 +137757,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137812,7 +137870,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137822,7 +137880,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137832,7 +137890,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137845,7 +137903,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137855,7 +137913,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137871,7 +137929,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137894,7 +137952,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137916,7 +137974,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -137930,7 +137988,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -138028,7 +138086,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -138041,20 +138099,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139832,9 +139890,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139850,7 +139908,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139937,7 +139995,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -140018,7 +140076,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140188,12 +140246,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141136,7 +141194,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -141215,7 +141273,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141240,7 +141298,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -141263,7 +141321,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141502,7 +141560,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142701,7 +142759,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144167,7 +144225,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145249,13 +145307,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145508,7 +145566,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -145622,7 +145680,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145792,7 +145850,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -146063,7 +146121,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146487,7 +146545,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146503,9 +146561,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146551,12 +146609,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146587,7 +146645,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146618,7 +146676,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146650,14 +146708,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146713,7 +146771,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -146930,7 +146988,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -147075,28 +147133,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147167,12 +147225,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147802,7 +147860,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -148046,7 +148104,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -148065,7 +148123,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -148112,7 +148170,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -148121,7 +148179,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148135,7 +148193,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148159,7 +148217,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148177,13 +148235,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148240,17 +148298,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148352,18 +148410,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148375,7 +148433,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148387,14 +148445,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148406,17 +148464,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148453,27 +148511,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148679,12 +148737,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148692,7 +148750,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148868,7 +148926,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -149050,14 +149108,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+48) + rc = fts5StorageCount(tls, p, ts+34104, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+56) + rc = fts5StorageCount(tls, p, ts+34455, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149252,9 +149310,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149469,7 +149527,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149481,7 +149539,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -149492,18 +149550,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37032) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149779,7 +149837,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149921,7 +149979,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149929,11 +149987,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149941,7 +149999,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149949,7 +150007,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149957,11 +150015,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149969,19 +150027,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149989,11 +150047,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150001,7 +150059,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150009,11 +150067,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150021,7 +150079,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150029,7 +150087,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150037,7 +150095,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150053,24 +150111,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -150085,44 +150143,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -150131,91 +150189,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150230,16 +150288,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150247,21 +150305,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150269,7 +150327,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150277,9 +150335,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150294,12 +150352,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150308,7 +150366,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150464,7 +150522,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37339) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150644,22 +150702,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151802,14 +151860,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151835,19 +151893,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -151980,11 +152038,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -152008,7 +152066,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152403,7 +152461,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152425,7 +152483,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153402,5 +153460,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -893,11 +893,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2139,7 +2139,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2247,8 +2247,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6062,7 +6062,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6691,17 +6692,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6914,14 +6916,15 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + F__ccgo_pad1 [4]byte + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad2 [2]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7718,7 +7721,7 @@ _ = pMutex if op < 0 || op >= int32(uint32(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint32(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15803,7 +15806,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16280,7 +16283,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16571,7 +16574,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16588,14 +16591,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16615,7 +16618,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16683,7 +16686,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16910,7 +16913,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -16938,7 +16941,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17037,7 +17040,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17167,7 +17170,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17213,7 +17216,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17444,7 +17447,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17578,7 +17581,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+8) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17599,7 +17602,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -17915,7 +17918,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18006,7 +18009,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18014,9 +18017,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*12 + 4)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18080,18 +18083,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*12 + 4)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*12 + 4)))(tls, zIn, bp+104, uint32(unsafe.Sizeof([4098]uint8{}))-uint32(2)) if got <= 0 || got >= Ssize_t(unsafe.Sizeof([4098]uint8{}))-2 { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 104 + uintptr(got))) = uint8(0) @@ -18131,14 +18134,14 @@ (*DbPath)(unsafe.Pointer(bp + 4100)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*12 + 4)))(tls, bp, uint32(unsafe.Sizeof([4098]uint8{}))-uint32(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4100, bp) } appendAllPathElements(tls, bp+4100, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4100)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+4100)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4100)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4100)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18237,7 +18240,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = (*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*12 + 4)))(tls, fd, zBuf, uint32(nBuf)) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19667,7 +19670,7 @@ libc.Xmemset(tls, pPgHdr+16, 0, uint32(unsafe.Sizeof(PgHdr{}))-uint32(uintptr(0)+16)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*40 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*48 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint32(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19697,7 +19700,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19748,7 +19751,7 @@ *(*U16)(unsafe.Pointer(p + 28)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 28)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19852,8 +19855,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(40) - defer tls.Free(40) + bp := tls.Alloc(48) + defer tls.Free(48) var pTail uintptr pTail = bp @@ -19931,13 +19934,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22229,7 +22232,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22662,7 +22665,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22814,9 +22817,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*40 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*48 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23148,7 +23151,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23298,7 +23301,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23679,7 +23682,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23785,7 +23788,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23803,7 +23806,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23842,7 +23845,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23919,7 +23922,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24677,7 +24680,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -24920,9 +24923,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 28)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25676,7 +25679,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25775,7 +25778,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26360,7 +26363,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26635,7 +26638,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27108,7 +27111,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27613,7 +27616,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28270,7 +28273,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28407,7 +28410,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28424,7 +28427,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28432,7 +28435,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28475,7 +28478,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28485,7 +28488,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28735,7 +28738,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28782,7 +28785,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28792,7 +28795,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28805,7 +28808,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28814,14 +28817,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint32(iFree2-(iFree+sz))) @@ -28831,7 +28834,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28895,7 +28898,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -28905,7 +28908,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -28927,7 +28930,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -28962,7 +28965,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -28975,13 +28978,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29006,7 +29009,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29017,7 +29020,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, (int32(pSpace)-int32(data))/1) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29069,22 +29072,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29094,7 +29097,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29102,7 +29105,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29110,10 +29113,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29173,7 +29176,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29209,7 +29212,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29239,11 +29242,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29254,15 +29257,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29290,14 +29293,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29311,7 +29314,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29323,7 +29326,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29426,7 +29429,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29454,7 +29457,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29493,7 +29496,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30376,7 +30379,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30791,7 +30794,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30817,7 +30820,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30826,7 +30829,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30837,7 +30840,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30853,7 +30856,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -30914,7 +30917,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -30949,7 +30952,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) if *(*Pgno)(unsafe.Pointer(bp + 24)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31009,7 +31012,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31048,7 +31051,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31079,7 +31082,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31420,7 +31423,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31664,14 +31667,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int32(aPayload)-int32((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > (*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31716,7 +31719,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31765,7 +31768,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31845,7 +31848,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -31936,7 +31939,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -31956,7 +31959,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32166,7 +32169,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32370,7 +32373,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32435,7 +32438,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32483,7 +32486,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32602,7 +32605,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32762,7 +32765,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32827,7 +32830,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+4, 0) @@ -32863,7 +32866,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -32907,7 +32910,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33019,7 +33022,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33177,7 +33180,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33234,7 +33237,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33250,7 +33253,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33324,7 +33327,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33336,7 +33339,7 @@ *(*Pgno)(unsafe.Pointer(bp + 4)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+4) @@ -33347,7 +33350,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33512,7 +33515,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33790,12 +33793,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int32(pCell)-int32(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33803,7 +33806,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int32(pData) - int32(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint32(sz)) @@ -33863,7 +33866,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*4)), uint32(sz)) @@ -33952,7 +33955,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint32(nCell*2)) @@ -34068,7 +34071,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 80)) = allocateBtreePage(tls, pBt, bp, bp+4, uint32(0), uint8(0)) @@ -34388,7 +34391,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 72)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34399,7 +34402,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34557,7 +34560,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34631,7 +34634,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34694,7 +34697,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34722,7 +34725,7 @@ *(*int32)(unsafe.Pointer(bp + 112)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 112)) != 0) { @@ -34983,7 +34986,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 112)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35175,7 +35178,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35213,7 +35216,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 120 + uintptr(iPage-1)*4)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 72 + uintptr(iPage-1)*2))) @@ -35319,7 +35322,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35344,7 +35347,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35414,7 +35417,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35527,7 +35530,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 104)) = btreeComputeFreeSpace(tls, pPage) @@ -35587,6 +35590,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35594,7 +35598,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 104)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35632,13 +35636,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 108))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint32(*(*int32)(unsafe.Pointer(bp + 108)))) @@ -35669,7 +35673,6 @@ ; *(*int32)(unsafe.Pointer(bp + 104)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 108)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35744,7 +35747,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35769,7 +35772,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35870,7 +35873,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35878,11 +35881,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -35957,7 +35960,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36026,7 +36029,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36055,7 +36058,7 @@ } *(*int32)(unsafe.Pointer(bp + 24)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+16, bp+20) if int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 16))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 12))) @@ -36131,7 +36134,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36145,7 +36148,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36279,7 +36282,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38749,7 +38752,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39397,7 +39400,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39412,14 +39415,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41724,7 +41727,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42275,7 +42278,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42340,7 +42343,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 20 + uintptr(i)*4)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42374,7 +42377,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 48)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42424,7 +42427,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 48))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42570,7 +42573,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42741,7 +42744,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42767,7 +42770,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43041,7 +43044,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43656,7 +43659,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44176,7 +44179,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44184,7 +44187,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44589,7 +44592,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44733,7 +44736,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45177,10 +45180,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47828,7 +47827,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49588,7 +49587,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50366,7 +50365,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50476,7 +50475,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50670,7 +50669,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52037,7 +52036,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52557,7 +52556,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52640,7 +52639,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56081,14 +56080,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56132,7 +56127,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56196,7 +56191,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*20)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56212,7 +56207,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56226,7 +56221,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56249,30 +56244,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56344,15 +56339,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 20))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 24)) |= NC_Subquery } break @@ -56360,7 +56355,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56491,7 +56486,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56511,7 +56506,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56546,7 +56541,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56603,7 +56598,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*20 + 8 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56631,7 +56626,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56845,7 +56840,7 @@ *(*int32)(unsafe.Pointer(bp + 24)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56885,7 +56880,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56896,7 +56891,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -56908,7 +56903,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57772,7 +57767,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58021,10 +58016,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58065,7 +58060,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58091,7 +58086,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 20)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58119,7 +58114,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58166,7 +58161,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 120 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 120 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58191,7 +58186,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 120 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58766,7 +58761,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58889,7 +58884,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -58945,10 +58940,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60022,7 +60017,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60460,6 +60455,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60473,6 +60469,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60691,6 +60690,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60704,6 +60704,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61477,7 +61485,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61499,11 +61507,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61574,13 +61581,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61593,15 +61606,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61611,22 +61624,22 @@ pTest = bp + 100 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 100)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*20)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61635,21 +61648,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*20)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*20)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61659,27 +61672,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61688,7 +61701,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64356,7 +64369,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64373,7 +64386,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65294,7 +65307,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69660,6 +69673,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70824,7 +70843,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72070,7 +72089,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74115,7 +74134,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+8)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80582,7 +80601,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -81979,7 +81998,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82140,6 +82159,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83458,7 +83478,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83474,7 +83494,7 @@ if !(i6 < int32(uint32(unsafe.Sizeof(aPragmaName))/uint32(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84279,80 +84299,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 572))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*4)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 572))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*16 + 4))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 572))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 572))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 580))) @@ -84369,20 +84403,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 568)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 572))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84391,21 +84425,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84423,14 +84457,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(endCode))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 564)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*20)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*20 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*20)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*20 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84438,27 +84472,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84467,25 +84501,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 8 - goto __367 goto __369 __369: + pEnc += 8 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84493,15 +84527,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(setCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp1 = iDb @@ -84509,41 +84543,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*20)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint32(unsafe.Sizeof(readCookie))/uint32(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*20)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84558,31 +84592,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84592,10 +84626,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84615,19 +84649,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84636,86 +84670,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*16)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 56))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84723,36 +84757,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+584) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 584))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+592) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 592)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 592)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 592))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84761,10 +84795,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+600) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 600)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 600))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84773,10 +84807,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+608) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 608)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 608)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84784,10 +84818,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84839,14 +84873,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -84898,7 +84932,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+56, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -84906,7 +84940,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -84919,16 +84953,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85111,13 +85145,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 120 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 16 + 1*4)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 16)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 16)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85194,12 +85228,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*4)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85208,19 +85242,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85312,7 +85346,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*4)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4)), pIndex+44) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85360,7 +85394,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*4)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*4)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*4)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*4)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*4)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 40)).Fdb = db (*InitData)(unsafe.Pointer(bp + 40)).FiDb = iDb @@ -85489,7 +85523,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85503,7 +85537,7 @@ (*InitData)(unsafe.Pointer(bp + 40)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85835,7 +85869,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*16)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85865,7 +85899,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -85961,7 +85995,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86060,7 +86094,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86387,13 +86421,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86568,7 +86602,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 48)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86613,7 +86647,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86624,7 +86658,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 36 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*72 + 36 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*72 + 48)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87252,16 +87286,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87271,7 +87305,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87297,9 +87331,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87643,7 +87677,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*20)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87743,7 +87777,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 48)) = U32(0) @@ -87759,7 +87793,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 48)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 48)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+48) @@ -87842,8 +87876,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87858,12 +87890,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88079,7 +88114,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88170,7 +88205,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88190,7 +88225,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88227,7 +88262,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88261,11 +88296,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88366,8 +88401,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88414,7 +88449,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88481,7 +88516,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 48)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+48) @@ -88543,7 +88578,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 76)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+76) @@ -88696,10 +88731,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -88953,8 +88988,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -88981,13 +89016,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+36, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -88999,7 +89034,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+36) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89186,7 +89221,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint32(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90085,7 +90121,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90168,7 +90204,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90297,7 +90333,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 36 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 64)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 64)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+36+4, uint32(1), 8, 0x100) @@ -90320,7 +90356,7 @@ libc.SetBitFieldPtr16Uint32(pItem+36+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90336,7 +90372,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90362,7 +90398,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90373,9 +90409,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+34, pTab+4) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90422,7 +90458,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90534,7 +90570,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90553,7 +90589,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*16)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 44)), 0) @@ -90677,7 +90713,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*20 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+8+4, uint32(1), 7, 0x80) } @@ -90742,7 +90778,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*20)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90773,9 +90809,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90785,7 +90821,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 120 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -90923,7 +90959,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*16)).FiSorterColumn) + 1) } @@ -91007,13 +91043,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 20)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 20)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 20)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91202,11 +91238,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91534,7 +91570,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91595,7 +91631,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91737,7 +91773,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+88, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+36+4, uint32(1), 5, 0x20) @@ -91796,7 +91832,7 @@ ; Xsqlite3SelectDestInit(tls, bp+88, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+88) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92267,9 +92303,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+116)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92620,7 +92656,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+116)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92724,7 +92760,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -92957,7 +92993,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93001,7 +93037,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+36, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+36, pTableName) != 0) { goto __9 } @@ -93019,7 +93055,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93031,7 +93067,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93046,11 +93082,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93061,19 +93098,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93082,7 +93119,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93231,7 +93268,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+64, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+64, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93264,7 +93301,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93289,13 +93326,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93551,7 +93588,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93604,7 +93641,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93718,12 +93755,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93837,7 +93874,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -93903,7 +93940,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+160, 0, uint32(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94067,7 +94104,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94660,7 +94697,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*16)).FzCnName)) goto update_cleanup __27: @@ -94692,7 +94729,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*20)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95018,7 +95055,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 68)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 68)) != 0) && + (*NameContext)(unsafe.Pointer(bp+28)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95572,7 +95614,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -95878,10 +95920,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 152)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+152, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+152, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+152)) + ts+20611, libc.VaList(bp+8, bp+152)) return SQLITE_ERROR } @@ -96004,7 +96046,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint32(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint32(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint32(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96152,14 +96194,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96170,7 +96212,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96198,7 +96240,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96218,7 +96260,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96278,7 +96320,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96287,7 +96329,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96298,7 +96340,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 24)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96309,7 +96351,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96738,11 +96780,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32((int32((*Token)(unsafe.Pointer(pEnd)).Fz)-int32((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+196)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+196)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*16)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96752,7 +96794,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96813,7 +96855,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96841,9 +96883,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+8, bp+48) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96851,7 +96895,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 48)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 48))) @@ -96863,7 +96907,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -96937,7 +96981,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 44 + 4)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -96995,7 +97039,7 @@ pMod = Xsqlite3HashFind(tls, db+404, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97029,7 +97073,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97482,7 +97526,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97509,7 +97553,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97741,7 +97785,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97753,11 +97797,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97772,7 +97816,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97798,27 +97842,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -97861,11 +97905,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+88, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97878,40 +97922,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 12)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -97943,22 +97987,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+48, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*16)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99555,7 +99599,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99583,7 +99627,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100101,7 +100145,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100462,7 +100506,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -100952,12 +100996,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101036,7 +101080,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101130,7 +101174,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 16)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101506,7 +101550,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101522,7 +101566,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*20)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102239,7 +102283,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*16)).FzCnName)) sentWarning = U8(1) __6: @@ -102310,7 +102354,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102484,6 +102528,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102527,9 +102575,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102563,6 +102609,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102821,11 +102868,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 56))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104405,7 +104457,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*4)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104463,7 +104515,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*4)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104861,7 +104913,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105468,7 +105520,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105503,6 +105555,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105797,6 +105853,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -105949,7 +106008,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint32(unsafe.Sizeof(Bitmask(0)))*uint32(8)))) return uintptr(0) __2: ; @@ -106013,7 +106072,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -106896,7 +106955,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107029,7 +107088,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107118,17 +107177,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107174,7 +107233,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107218,12 +107277,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107451,7 +107510,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107567,7 +107626,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 28)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 28)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107682,7 +107741,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107747,15 +107806,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107776,7 +107835,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -107932,11 +107991,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109328,19 +109387,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 120 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109411,7 +109470,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110506,7 +110565,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111485,7 +111544,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } break @@ -111495,7 +111554,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+4)).Fn, (*Token)(unsafe.Pointer(yymsp+4)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 4)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112238,7 +112297,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*12 + 4)), yymsp+libc.UintptrFromInt32(-4)*12+4) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*12 + 4)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112402,7 +112461,7 @@ *(*Token)(unsafe.Pointer(bp + 92)) = *(*Token)(unsafe.Pointer(yymsp + 4)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+92)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+92)) *(*uintptr)(unsafe.Pointer(yymsp + 4)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 4)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112619,9 +112678,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*12 + 4)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*12 + 4))) @@ -112905,19 +112964,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*12 + 4)) = *(*Token)(unsafe.Pointer(yymsp + 4)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113296,9 +113355,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114066,7 +114125,7 @@ } else { (*Token)(unsafe.Pointer(bp + 1248)).Fz = zSql (*Token)(unsafe.Pointer(bp + 1248)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+1248)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+1248)) break } } @@ -114089,7 +114148,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114262,7 +114321,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114275,11 +114334,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114292,9 +114351,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114528,7 +114587,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115103,7 +115162,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115118,7 +115177,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115309,23 +115368,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115343,35 +115402,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115492,7 +115551,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115537,7 +115596,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115654,7 +115713,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115890,7 +115949,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -115983,7 +116042,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116053,7 +116112,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116063,7 +116122,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116095,14 +116154,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116232,7 +116291,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint32(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint32(5)) == 0) { goto __1 } iOut = 0 @@ -116277,10 +116336,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint32(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint32(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116385,7 +116444,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint32(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint32(3)) == 0) { goto __29 } zVfs = zVal @@ -116396,17 +116455,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint32(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint32(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint32(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint32(4)) == 0) { goto __32 } @@ -116444,7 +116503,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116452,7 +116511,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116492,7 +116551,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116515,14 +116574,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116669,10 +116728,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116686,7 +116745,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+12, bp+16) @@ -116739,7 +116798,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*16)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116844,7 +116903,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -116947,21 +117006,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117119,7 +117178,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117775,7 +117834,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -117893,7 +117952,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118070,7 +118129,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118223,7 +118282,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -118278,7 +118337,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118342,13 +118401,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -118898,12 +118957,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint32(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint32(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint32(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint32(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119004,7 +119063,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119310,7 +119369,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint32(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint32(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119325,7 +119384,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119379,7 +119438,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119484,11 +119543,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -119645,14 +119704,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -119822,9 +119881,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -119957,7 +120016,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120054,7 +120113,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120098,7 +120157,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120229,7 +120288,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120248,7 +120307,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*12 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*12 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 12 @@ -120344,7 +120403,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120368,7 +120427,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120490,7 +120549,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120585,25 +120644,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_JSON), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_SQL), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(JSON_ISSET), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120622,8 +120681,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -120880,11 +120939,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+76) Xsqlite3_free(tls, zTab) } @@ -121095,7 +121154,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121793,7 +121852,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123134,7 +123193,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123146,12 +123205,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123377,7 +123436,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123400,7 +123459,7 @@ bp := tls.Alloc(20) defer tls.Free(20) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123448,7 +123507,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123491,19 +123550,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123532,7 +123591,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123544,7 +123603,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123552,18 +123611,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123578,14 +123637,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123614,7 +123673,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123626,7 +123685,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+16) if rc != SQLITE_OK { @@ -123634,7 +123693,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123676,10 +123735,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123710,7 +123769,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*4))), *(*uintptr)(unsafe.Pointer(argv + 3*4)))) ii = 4 __3: @@ -123722,7 +123781,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123745,7 +123804,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123841,7 +123900,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(600) @@ -123881,11 +123940,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+552)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+552)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+48)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 552 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 552 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -123896,7 +123955,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -123972,7 +124031,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -123996,7 +124055,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124015,7 +124074,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124029,8 +124088,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 24 + uintptr(bLeaf)*4)) == uintptr(0) { @@ -124045,23 +124104,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124085,7 +124144,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124105,7 +124164,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124122,14 +124181,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124137,7 +124196,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124166,14 +124225,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124200,7 +124259,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124209,12 +124268,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124228,8 +124287,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124237,7 +124296,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 24 + 1*4))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124252,7 +124311,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124270,7 +124329,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124640,11 +124699,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124664,19 +124723,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*4))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125595,7 +125654,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*4)), uint32(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125604,7 +125663,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*4)))) goto __3 __3: ii++ @@ -125612,7 +125671,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125849,7 +125908,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125857,7 +125916,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -125969,7 +126028,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126101,14 +126160,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126173,7 +126232,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126185,25 +126244,25 @@ F__ccgo_pad1 [2]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126213,26 +126272,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126286,7 +126345,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126604,7 +126663,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126615,7 +126674,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126716,7 +126775,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+36) + ts+27933, uintptr(0), uintptr(0), p+36) } if rc == SQLITE_OK { @@ -126780,7 +126839,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint32(4)) == 0 { + if libc.Xstrlen(tls, zIn) > Size_t(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint32(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126801,16 +126860,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+36, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+4, p+36, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -126926,7 +126985,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+36, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -126944,7 +127003,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*4, p+36, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -126962,7 +127021,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*4, p+36, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -126985,7 +127044,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*4, p+36, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127031,7 +127090,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+36, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127046,7 +127105,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint32(unsafe.Sizeof(U8(0)))*uint32((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+20, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 20))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 20)), 1) if iCid >= 0 { @@ -127086,7 +127145,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+36, bp+56, pIter+60) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127096,18 +127155,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+32) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*4)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127119,18 +127178,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 60))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 1) @@ -127144,7 +127203,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 5) @@ -127191,7 +127250,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127209,7 +127268,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127231,25 +127290,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+36, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127291,7 +127350,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127326,7 +127385,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127334,11 +127393,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127358,7 +127417,7 @@ *(*uintptr)(unsafe.Pointer(bp + 180)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+180, p+36, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 180)))) { goto __13 @@ -127385,7 +127444,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127397,7 +127456,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127430,7 +127489,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127442,7 +127501,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*8)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127454,37 +127513,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*4)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127523,9 +127582,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127535,7 +127594,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127547,18 +127606,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127566,8 +127625,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127576,7 +127635,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127594,15 +127653,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*4)), i+1)) zSep = ts + 14614 } @@ -127639,19 +127698,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 60)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+36, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+60, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127663,15 +127722,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 60)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 60))) } return z @@ -127691,7 +127750,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+36, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127700,7 +127759,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+172, p+36, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127710,23 +127769,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 172)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*4)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 172))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127752,13 +127811,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*4)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127768,16 +127827,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -127794,7 +127853,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+88, p+36, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127831,7 +127890,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 4)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+36, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 4)) == SQLITE_OK { var rc2 int32 @@ -127936,7 +127995,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -127959,7 +128018,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 604)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -127967,13 +128026,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, p+36, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, p+36, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 608))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -127989,7 +128048,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -127997,9 +128056,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128008,20 +128067,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128058,16 +128117,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+80, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128076,32 +128135,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+84, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128114,9 +128173,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128129,7 +128188,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128138,11 +128197,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+76, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128155,7 +128214,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128223,9 +128282,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+4, p+36, zUpdate) @@ -128284,7 +128343,7 @@ } *(*int32)(unsafe.Pointer(bp + 12)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+36, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+24))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+24))) for *(*int32)(unsafe.Pointer(bp + 12)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128357,18 +128416,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+24, ts+3286, uint32(4)) } else { libc.Xmemcpy(tls, p+24, ts+6441, uint32(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+24)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+24)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128408,11 +128467,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= Size_t(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint32(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128424,13 +128483,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128449,21 +128508,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128471,7 +128530,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128479,7 +128538,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128508,14 +128567,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128641,7 +128700,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128656,8 +128715,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128774,7 +128833,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -128867,7 +128926,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+36, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -128875,7 +128934,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -128896,7 +128955,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+36, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+24, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -128926,9 +128985,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+36, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -128942,10 +129001,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 4)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+36) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128959,12 +129018,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+4, p+36, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -128976,7 +129035,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+36) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129004,7 +129063,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+24, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129126,7 +129185,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129142,7 +129201,7 @@ bp := tls.Alloc(12) defer tls.Free(12) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129159,7 +129218,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129193,7 +129252,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+12, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 12)), -1) } else { @@ -129223,13 +129282,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129240,7 +129299,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+36, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129314,7 +129373,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129332,12 +129391,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129361,14 +129420,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+36) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+36) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+36) + db, ts+32291, uintptr(0), uintptr(0), p+36) } } @@ -129422,7 +129481,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint32(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint32(7)) { return rbuMisuseError(tls) } } @@ -129449,7 +129508,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); i < nErrmsg-Size_t(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint32(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint32(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129485,7 +129544,7 @@ rbuObjIterFinalize(tls, p+48) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129604,12 +129663,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -129955,7 +130014,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -129980,7 +130039,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130140,7 +130199,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -130965,7 +131024,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -130978,7 +131037,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131657,9 +131716,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131682,9 +131741,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131692,7 +131751,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131703,7 +131762,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131746,7 +131805,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -131873,7 +131932,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132349,7 +132408,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 20)) = SQLITE_NOMEM } @@ -132358,18 +132417,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+20) + sessionAppendStr(tls, bp+8, ts+32893, bp+20) sessionAppendIdent(tls, bp+8, zDb, bp+20) sessionAppendStr(tls, bp+8, ts+1567, bp+20) sessionAppendIdent(tls, bp+8, zTab, bp+20) - sessionAppendStr(tls, bp+8, ts+32861, bp+20) + sessionAppendStr(tls, bp+8, ts+32908, bp+20) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+20) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*4)), bp+20) - sessionAppendStr(tls, bp+8, ts+32869, bp+20) + sessionAppendStr(tls, bp+8, ts+32916, bp+20) sessionAppendInteger(tls, bp+8, i+1, bp+20) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132478,7 +132537,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 24)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 24)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132570,7 +132629,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+12)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 24)) } @@ -132833,7 +132892,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132856,7 +132915,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -132898,7 +132957,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -132959,7 +133018,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+44, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133033,13 +133092,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133101,7 +133160,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*4)) == uintptr(0) { - return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+68, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*4)) = uintptr(0) } @@ -133474,7 +133533,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133651,34 +133710,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*12 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint32(nU32)*uint32(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+12) + sessionAppendStr(tls, bp, ts+32960, bp+12) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+12) - sessionAppendStr(tls, bp, ts+32926, bp+12) + sessionAppendStr(tls, bp, ts+32973, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32932, bp+12) + sessionAppendStr(tls, bp, ts+32979, bp+12) sessionAppendInteger(tls, bp, ii*2+1, bp+12) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+12) + sessionAppendStr(tls, bp, ts+32908, bp+12) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*4)) != 0 { sessionAppendStr(tls, bp, zSep, bp+12) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+12) + ts+32984, bp+12) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32869, bp+12) + sessionAppendStr(tls, bp, ts+32916, bp+12) sessionAppendInteger(tls, bp, ii*2+2, bp+12) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133730,34 +133789,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+12) + sessionAppendStr(tls, bp, ts+33059, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+32861, bp+12) + sessionAppendStr(tls, bp, ts+32908, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32932, bp+12) + sessionAppendStr(tls, bp, ts+32979, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+12) + sessionAppendStr(tls, bp, ts+33077, bp+12) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+12) - sessionAppendStr(tls, bp, ts+32573, bp+12) + sessionAppendStr(tls, bp, ts+32620, bp+12) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+12) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) - sessionAppendStr(tls, bp, ts+32869, bp+12) + sessionAppendStr(tls, bp, ts+32916, bp+12) sessionAppendInteger(tls, bp, i+1, bp+12) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+12) @@ -133784,9 +133843,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+12) + sessionAppendStr(tls, bp, ts+33090, bp+12) sessionAppendIdent(tls, bp, zTab, bp+12) - sessionAppendStr(tls, bp, ts+21531, bp+12) + sessionAppendStr(tls, bp, ts+21578, bp+12) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+12) @@ -133794,9 +133853,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)), bp+12) } - sessionAppendStr(tls, bp, ts+33061, bp+12) + sessionAppendStr(tls, bp, ts+33108, bp+12) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+12) + sessionAppendStr(tls, bp, ts+33119, bp+12) } sessionAppendStr(tls, bp, ts+4957, bp+12) @@ -133815,11 +133874,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+4, - ts+33189) + ts+33236) } return rc } @@ -133847,7 +133906,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134100,7 +134159,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134116,7 +134175,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134189,10 +134248,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+124, bp+128, bp+132, uintptr(0)) @@ -134251,16 +134310,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 128)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 140)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 128)))) } else if *(*int32)(unsafe.Pointer(bp + 128)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 136)), uint32(*(*int32)(unsafe.Pointer(bp + 128)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 140)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 128)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 140)), ts+11348) { @@ -134314,14 +134373,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135558,7 +135617,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135846,7 +135905,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136031,7 +136090,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136099,7 +136158,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136280,7 +136339,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136604,13 +136663,13 @@ defer tls.Free(48) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137160,7 +137219,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint32(unsafe.Sizeof(int32(0))) * uint32(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137187,14 +137246,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137205,7 +137264,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137218,7 +137277,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + Size_t(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, Sqlite3_int64(unsafe.Sizeof(uintptr(0)))*nArg) @@ -137227,7 +137286,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137246,7 +137305,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137261,14 +137320,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137276,9 +137335,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137286,9 +137345,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -137300,17 +137359,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 44)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+44, zArg, pConfig+48)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137357,15 +137416,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137382,13 +137441,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 28)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*4)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+28, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137426,8 +137485,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*4)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137459,7 +137518,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137496,14 +137555,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137552,7 +137611,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137560,10 +137619,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*4)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137673,7 +137732,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137683,7 +137742,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137693,7 +137752,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137706,7 +137765,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137716,7 +137775,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137732,7 +137791,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+4) @@ -137755,7 +137814,7 @@ bp := tls.Alloc(44) defer tls.Free(44) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 36)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137777,7 +137836,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 36))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 36)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 40)) = 0 @@ -137791,7 +137850,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -137889,7 +137948,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -137902,20 +137961,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = (int32(z2) - int32(z)) / 1 - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint32(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint32(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint32(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint32(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint32(3)) == 0 { tok = FTS5_AND } break @@ -139694,9 +139753,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint32(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139712,7 +139771,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139799,7 +139858,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -139880,7 +139939,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140050,12 +140109,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+20)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141004,7 +141063,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+40) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+40) } if rc == SQLITE_ERROR { @@ -141083,7 +141142,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+44, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141108,7 +141167,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+48, zSql) != 0 { return @@ -141131,7 +141190,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+56, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141370,7 +141429,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+68, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142569,7 +142628,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+60, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144034,7 +144093,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+52, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145117,13 +145176,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145376,7 +145435,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+4) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 4)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 4))) @@ -145490,7 +145549,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145660,7 +145719,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 8)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+80+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*96, uintptr(0), bp+8) - sqlite3Fts5BufferAppendBlob(tls, p+36, bp+8, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+36, bp+8, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+8)).Fn, bp+20, bp+24) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 24)) & int64(0x7FFFFFFF)) @@ -145931,7 +145990,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146355,7 +146414,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146371,9 +146430,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146419,12 +146478,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146455,7 +146514,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 20)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146486,7 +146545,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146518,14 +146577,14 @@ *(*int32)(unsafe.Pointer(pCsr + 60)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146581,7 +146640,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -146798,7 +146857,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -146943,28 +147002,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147035,12 +147094,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147670,7 +147729,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -147914,7 +147973,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -147933,7 +147992,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 8 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -147980,7 +148039,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -147989,7 +148048,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148003,7 +148062,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148027,7 +148086,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148045,13 +148104,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148108,17 +148167,17 @@ if *(*uintptr)(unsafe.Pointer(p + 28 + uintptr(eStmt)*4)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148220,18 +148279,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148243,7 +148302,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148255,14 +148314,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148274,17 +148333,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148321,27 +148380,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148547,12 +148606,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148560,7 +148619,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148736,7 +148795,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -148918,14 +148977,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 40)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+40) + rc = fts5StorageCount(tls, p, ts+34104, bp+40) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 40)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+48) + rc = fts5StorageCount(tls, p, ts+34455, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149120,9 +149179,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint32(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149336,7 +149395,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint32(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149348,7 +149407,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) } } @@ -149359,18 +149418,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37032) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149646,7 +149705,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149786,7 +149845,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149794,11 +149853,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149806,7 +149865,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149814,7 +149873,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint32(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149822,11 +149881,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149834,19 +149893,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149854,11 +149913,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149866,7 +149925,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149874,11 +149933,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149886,7 +149945,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149894,7 +149953,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149902,7 +149961,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149918,24 +149977,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint32(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint32(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint32(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -149950,44 +150009,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint32(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint32(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -149996,91 +150055,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint32(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint32(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint32(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint32(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint32(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint32(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint32(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint32(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint32(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint32(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint32(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150095,16 +150154,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint32(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint32(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150112,21 +150171,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint32(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150134,7 +150193,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150142,9 +150201,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint32(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint32(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150159,12 +150218,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint32(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint32(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint32(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint32(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150173,7 +150232,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint32(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint32(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150329,7 +150388,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*4)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*4)), ts+37339) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150509,22 +150568,22 @@ defer tls.Free(64) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151666,14 +151725,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151699,19 +151758,19 @@ defer tls.Free(20) *(*[3]uintptr)(unsafe.Pointer(bp + 4)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*4))) == Size_t(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*4)), uint32(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else { var nByte int32 @@ -151844,11 +151903,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+68, uintptr(0)) @@ -151872,7 +151931,7 @@ *(*uintptr)(unsafe.Pointer(bp + 68)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152267,7 +152326,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152289,7 +152348,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153266,5 +153325,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 68)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -921,11 +921,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2167,7 +2167,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2275,8 +2275,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6119,7 +6119,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6761,17 +6762,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6992,14 +6994,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7806,7 +7808,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15891,7 +15893,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16368,7 +16370,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16659,7 +16661,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16676,14 +16678,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16703,7 +16705,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16771,7 +16773,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -16998,7 +17000,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -17026,7 +17028,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17125,7 +17127,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17255,7 +17257,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17301,7 +17303,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17532,7 +17534,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17666,7 +17668,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17687,7 +17689,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -18003,7 +18005,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18094,7 +18096,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18102,9 +18104,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18168,18 +18170,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+128, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([4098]uint8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 128 + uintptr(got))) = uint8(0) @@ -18219,14 +18221,14 @@ (*DbPath)(unsafe.Pointer(bp + 4104)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4104, bp) } appendAllPathElements(tls, bp+4104, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4104)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+4104)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4104)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4104)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18327,7 +18329,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19760,7 +19762,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19790,7 +19792,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19841,7 +19843,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19945,8 +19947,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -20024,13 +20026,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22324,7 +22326,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22757,7 +22759,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22909,9 +22911,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23243,7 +23245,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23393,7 +23395,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23774,7 +23776,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23880,7 +23882,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23898,7 +23900,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23937,7 +23939,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -24014,7 +24016,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24772,7 +24774,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -25015,9 +25017,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25774,7 +25776,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25873,7 +25875,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26459,7 +26461,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26734,7 +26736,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27207,7 +27209,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27712,7 +27714,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28370,7 +28372,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28507,7 +28509,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28524,7 +28526,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28532,7 +28534,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28575,7 +28577,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28585,7 +28587,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28835,7 +28837,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28882,7 +28884,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28892,7 +28894,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28905,7 +28907,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28914,14 +28916,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28931,7 +28933,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -28995,7 +28997,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -29005,7 +29007,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -29027,7 +29029,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -29062,7 +29064,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -29075,13 +29077,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29106,7 +29108,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29117,7 +29119,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29169,22 +29171,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29194,7 +29196,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29202,7 +29204,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29210,10 +29212,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29273,7 +29275,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29309,7 +29311,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29339,11 +29341,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29354,15 +29356,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29390,14 +29392,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29411,7 +29413,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29423,7 +29425,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29526,7 +29528,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29554,7 +29556,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29593,7 +29595,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30476,7 +30478,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30891,7 +30893,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30917,7 +30919,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30926,7 +30928,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30937,7 +30939,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30953,7 +30955,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -31014,7 +31016,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -31049,7 +31051,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31109,7 +31111,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31148,7 +31150,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31179,7 +31181,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31520,7 +31522,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31764,14 +31766,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31816,7 +31818,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31865,7 +31867,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31945,7 +31947,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -32036,7 +32038,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -32056,7 +32058,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32266,7 +32268,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32470,7 +32472,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32535,7 +32537,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32583,7 +32585,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32702,7 +32704,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32862,7 +32864,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32927,7 +32929,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32963,7 +32965,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -33007,7 +33009,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33119,7 +33121,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33277,7 +33279,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33334,7 +33336,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33350,7 +33352,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33424,7 +33426,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33436,7 +33438,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -33447,7 +33449,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33612,7 +33614,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33891,12 +33893,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33904,7 +33906,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33964,7 +33966,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -34053,7 +34055,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -34169,7 +34171,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -34489,7 +34491,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34500,7 +34502,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34658,7 +34660,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34732,7 +34734,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34795,7 +34797,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34823,7 +34825,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -35084,7 +35086,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35276,7 +35278,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35314,7 +35316,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -35420,7 +35422,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35445,7 +35447,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35515,7 +35517,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35628,7 +35630,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -35688,6 +35690,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35695,7 +35698,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35733,13 +35736,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35770,7 +35773,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35845,7 +35847,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35870,7 +35872,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35971,7 +35973,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35979,11 +35981,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -36058,7 +36060,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36127,7 +36129,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36156,7 +36158,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -36232,7 +36234,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36246,7 +36248,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36380,7 +36382,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38850,7 +38852,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39499,7 +39501,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39514,14 +39516,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41825,7 +41827,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42376,7 +42378,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42441,7 +42443,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42475,7 +42477,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42525,7 +42527,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42671,7 +42673,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42842,7 +42844,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42868,7 +42870,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43142,7 +43144,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43757,7 +43759,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44277,7 +44279,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44285,7 +44287,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44690,7 +44692,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44834,7 +44836,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45278,10 +45280,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47929,7 +47927,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49689,7 +49687,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50467,7 +50465,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50577,7 +50575,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50771,7 +50769,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52138,7 +52136,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52658,7 +52656,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52741,7 +52739,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56181,14 +56179,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56232,7 +56226,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56296,7 +56290,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56312,7 +56306,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56326,7 +56320,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56349,30 +56343,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56444,15 +56438,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -56460,7 +56454,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56591,7 +56585,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56611,7 +56605,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56646,7 +56640,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56703,7 +56697,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56731,7 +56725,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56945,7 +56939,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -56985,7 +56979,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -56996,7 +56990,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -57008,7 +57002,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57872,7 +57866,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58121,10 +58115,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58165,7 +58159,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58191,7 +58185,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58219,7 +58213,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58266,7 +58260,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58291,7 +58285,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58866,7 +58860,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -58989,7 +58983,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -59045,10 +59039,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60122,7 +60116,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60560,6 +60554,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60573,6 +60568,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60791,6 +60789,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60804,6 +60803,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61577,7 +61584,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61599,11 +61606,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61674,13 +61680,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61693,15 +61705,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61711,22 +61723,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61735,21 +61747,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61759,27 +61771,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61788,7 +61800,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64456,7 +64468,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64473,7 +64485,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65394,7 +65406,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69758,6 +69770,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70922,7 +70940,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72168,7 +72186,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74213,7 +74231,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80681,7 +80699,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -82080,7 +82098,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82241,6 +82259,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83559,7 +83578,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83575,7 +83594,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84380,80 +84399,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -84470,20 +84503,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84492,21 +84525,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84524,14 +84557,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84539,27 +84572,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84568,25 +84601,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84594,15 +84627,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -84610,41 +84643,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84659,31 +84692,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84693,10 +84726,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84716,19 +84749,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84737,86 +84770,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84824,36 +84857,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84862,10 +84895,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84874,10 +84907,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84885,10 +84918,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84940,14 +84973,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -84999,7 +85032,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -85007,7 +85040,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -85020,16 +85053,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85212,13 +85245,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85295,12 +85328,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85309,19 +85342,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85413,7 +85446,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85461,7 +85494,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -85590,7 +85623,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85604,7 +85637,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85936,7 +85969,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -85966,7 +85999,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -86062,7 +86095,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86161,7 +86194,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86488,13 +86521,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86669,7 +86702,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86714,7 +86747,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86725,7 +86758,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87353,16 +87386,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87372,7 +87405,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87398,9 +87431,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87744,7 +87777,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87844,7 +87877,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87860,7 +87893,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -87943,8 +87976,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -87959,12 +87990,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88180,7 +88214,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88271,7 +88305,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88291,7 +88325,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88328,7 +88362,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88362,11 +88396,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88467,8 +88501,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88515,7 +88549,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88582,7 +88616,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -88644,7 +88678,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88797,10 +88831,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -89054,8 +89088,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -89082,13 +89116,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -89100,7 +89134,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89288,7 +89322,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90187,7 +90222,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90270,7 +90305,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90399,7 +90434,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -90422,7 +90457,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90438,7 +90473,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90464,7 +90499,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90475,9 +90510,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90524,7 +90559,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90636,7 +90671,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90655,7 +90690,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90779,7 +90814,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90844,7 +90879,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90875,9 +90910,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90887,7 +90922,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -91025,7 +91060,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -91109,13 +91144,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91304,11 +91339,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91636,7 +91671,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91697,7 +91732,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91839,7 +91874,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91898,7 +91933,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92369,9 +92404,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92722,7 +92757,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92827,7 +92862,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -93060,7 +93095,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93104,7 +93139,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -93122,7 +93157,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93134,7 +93169,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93149,11 +93184,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93164,19 +93200,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93185,7 +93221,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93334,7 +93370,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93367,7 +93403,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93392,13 +93428,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93654,7 +93690,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93707,7 +93743,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93821,12 +93857,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93940,7 +93976,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -94006,7 +94042,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94170,7 +94206,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94763,7 +94799,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94795,7 +94831,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95121,7 +95157,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95675,7 +95716,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -95981,10 +96022,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 216)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+216)) + ts+20611, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -96107,7 +96148,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96255,14 +96296,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96273,7 +96314,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96301,7 +96342,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96321,7 +96362,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96381,7 +96422,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96390,7 +96431,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96401,7 +96442,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96412,7 +96453,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96841,11 +96882,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96855,7 +96896,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96916,7 +96957,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96944,9 +96985,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -96954,7 +96997,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -96966,7 +97009,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -97040,7 +97083,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -97098,7 +97141,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97132,7 +97175,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97585,7 +97628,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97612,7 +97655,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97843,7 +97886,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97855,11 +97898,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97874,7 +97917,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97900,27 +97943,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -97963,11 +98006,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -97980,40 +98023,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -98045,22 +98088,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99657,7 +99700,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99685,7 +99728,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100203,7 +100246,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100564,7 +100607,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -101054,12 +101097,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101138,7 +101181,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101232,7 +101275,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101608,7 +101651,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101624,7 +101667,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102342,7 +102385,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -102413,7 +102456,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102587,6 +102630,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102630,9 +102677,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102666,6 +102711,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102924,11 +102970,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104509,7 +104560,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104567,7 +104618,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104965,7 +105016,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105572,7 +105623,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105607,6 +105658,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105901,6 +105956,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -106053,7 +106111,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -106117,7 +106175,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -106999,7 +107057,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107132,7 +107190,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107222,17 +107280,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107278,7 +107336,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107322,12 +107380,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107556,7 +107614,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107672,7 +107730,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107787,7 +107845,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107852,15 +107910,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107881,7 +107939,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -108037,11 +108095,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109436,19 +109494,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109516,7 +109574,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110613,7 +110671,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111592,7 +111650,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -111602,7 +111660,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112345,7 +112403,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112509,7 +112567,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112726,9 +112784,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -113012,19 +113070,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113403,9 +113461,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114173,7 +114231,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+2464)) break } } @@ -114196,7 +114254,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114369,7 +114427,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114382,11 +114440,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114399,9 +114457,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114635,7 +114693,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115210,7 +115268,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115225,7 +115283,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115416,23 +115474,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115450,35 +115508,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115599,7 +115657,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115644,7 +115702,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115761,7 +115819,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -115997,7 +116055,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -116090,7 +116148,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116160,7 +116218,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116170,7 +116228,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116202,14 +116260,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116339,7 +116397,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -116384,10 +116442,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116492,7 +116550,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -116503,17 +116561,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint64(4)) == 0) { goto __32 } @@ -116551,7 +116609,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116559,7 +116617,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116599,7 +116657,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116623,14 +116681,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116777,10 +116835,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116794,7 +116852,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116847,7 +116905,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -116952,7 +117010,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -117055,21 +117113,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117227,7 +117285,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117883,7 +117941,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -118001,7 +118059,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118179,7 +118237,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118332,7 +118390,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -118387,7 +118445,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118451,13 +118509,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -119007,12 +119065,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119113,7 +119171,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119419,7 +119477,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119434,7 +119492,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119488,7 +119546,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119593,11 +119651,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -119754,14 +119812,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -119931,9 +119989,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -120066,7 +120124,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120163,7 +120221,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120207,7 +120265,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120338,7 +120396,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120357,7 +120415,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -120453,7 +120511,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120477,7 +120535,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120599,7 +120657,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120694,25 +120752,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120731,8 +120789,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -120992,11 +121050,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -121207,7 +121265,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121905,7 +121963,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123246,7 +123304,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123258,12 +123316,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123489,7 +123547,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123512,7 +123570,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123560,7 +123618,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123603,19 +123661,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123644,7 +123702,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123656,7 +123714,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123664,18 +123722,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123690,14 +123748,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123726,7 +123784,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123738,7 +123796,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123746,7 +123804,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123788,10 +123846,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123822,7 +123880,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123834,7 +123892,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123857,7 +123915,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -123953,7 +124011,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -123993,11 +124051,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -124008,7 +124066,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -124086,7 +124144,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -124110,7 +124168,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124129,7 +124187,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124143,8 +124201,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -124159,23 +124217,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124199,7 +124257,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124219,7 +124277,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124236,14 +124294,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124251,7 +124309,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124280,14 +124338,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124314,7 +124372,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124323,12 +124381,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124342,8 +124400,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124351,7 +124409,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124366,7 +124424,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124384,7 +124442,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124755,11 +124813,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124779,19 +124837,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125711,7 +125769,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125720,7 +125778,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125728,7 +125786,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -125965,7 +126023,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -125973,7 +126031,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -126085,7 +126143,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126217,14 +126275,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126289,7 +126347,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126301,25 +126359,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126329,26 +126387,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126402,7 +126460,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126729,7 +126787,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126740,7 +126798,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126841,7 +126899,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+64) + ts+27933, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126905,7 +126963,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -126926,16 +126984,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -127051,7 +127109,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -127069,7 +127127,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -127087,7 +127145,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -127110,7 +127168,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127156,7 +127214,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127171,7 +127229,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -127211,7 +127269,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127221,18 +127279,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127244,18 +127302,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -127269,7 +127327,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -127316,7 +127374,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127334,7 +127392,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127356,25 +127414,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127416,7 +127474,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127451,7 +127509,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127459,11 +127517,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127483,7 +127541,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -127510,7 +127568,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127522,7 +127580,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127555,7 +127613,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127567,7 +127625,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127579,37 +127637,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127648,9 +127706,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127660,7 +127718,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127672,18 +127730,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127691,8 +127749,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127701,7 +127759,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127719,15 +127777,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } @@ -127764,19 +127822,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127788,15 +127846,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127816,7 +127874,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127825,7 +127883,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127835,23 +127893,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127877,13 +127935,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127893,16 +127951,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -127919,7 +127977,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -127956,7 +128014,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -128061,7 +128119,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -128084,7 +128142,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -128092,13 +128150,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -128114,7 +128172,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -128122,9 +128180,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128133,20 +128191,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128183,16 +128241,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128201,32 +128259,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128239,9 +128297,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128254,7 +128312,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128263,11 +128321,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128280,7 +128338,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128348,9 +128406,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -128409,7 +128467,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128482,18 +128540,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3286, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6441, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128533,11 +128591,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128549,13 +128607,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128574,21 +128632,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128596,7 +128654,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128604,7 +128662,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128633,14 +128691,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128766,7 +128824,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128781,8 +128839,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128899,7 +128957,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -128992,7 +129050,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -129000,7 +129058,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -129021,7 +129079,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -129051,9 +129109,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -129067,10 +129125,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129084,12 +129142,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129101,7 +129159,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129129,7 +129187,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129251,7 +129309,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129267,7 +129325,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129284,7 +129342,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129318,7 +129376,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -129348,13 +129406,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129365,7 +129423,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129439,7 +129497,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129457,12 +129515,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129486,14 +129544,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+64) + db, ts+32291, uintptr(0), uintptr(0), p+64) } } @@ -129547,7 +129605,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -129574,7 +129632,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129610,7 +129668,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129729,12 +129787,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -130080,7 +130138,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -130105,7 +130163,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130265,7 +130323,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -131094,7 +131152,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -131107,7 +131165,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131787,9 +131845,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131812,9 +131870,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131822,7 +131880,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131833,7 +131891,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131876,7 +131934,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -132003,7 +132061,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132479,7 +132537,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -132488,18 +132546,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+24) + sessionAppendStr(tls, bp+8, ts+32893, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1567, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32861, bp+24) + sessionAppendStr(tls, bp+8, ts+32908, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32869, bp+24) + sessionAppendStr(tls, bp+8, ts+32916, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132608,7 +132666,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132700,7 +132758,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132963,7 +133021,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -132986,7 +133044,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -133028,7 +133086,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -133089,7 +133147,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133163,13 +133221,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133231,7 +133289,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -133604,7 +133662,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133783,34 +133841,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+16) + sessionAppendStr(tls, bp, ts+32960, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32926, bp+16) + sessionAppendStr(tls, bp, ts+32973, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+16) + ts+32984, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133862,34 +133920,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+16) + sessionAppendStr(tls, bp, ts+33059, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+16) + sessionAppendStr(tls, bp, ts+33077, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32573, bp+16) + sessionAppendStr(tls, bp, ts+32620, bp+16) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -133916,9 +133974,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+16) + sessionAppendStr(tls, bp, ts+33090, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21531, bp+16) + sessionAppendStr(tls, bp, ts+21578, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+16) @@ -133926,9 +133984,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33061, bp+16) + sessionAppendStr(tls, bp, ts+33108, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+16) + sessionAppendStr(tls, bp, ts+33119, bp+16) } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -133947,11 +134005,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33189) + ts+33236) } return rc } @@ -133979,7 +134037,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134232,7 +134290,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134248,7 +134306,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134321,10 +134379,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -134383,16 +134441,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11348) { @@ -134446,14 +134504,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135701,7 +135759,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -135989,7 +136047,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136176,7 +136234,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136244,7 +136302,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136426,7 +136484,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136750,13 +136808,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137307,7 +137365,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137334,14 +137392,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137352,7 +137410,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137365,7 +137423,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -137374,7 +137432,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137393,7 +137451,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137408,14 +137466,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137423,9 +137481,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137433,9 +137491,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -137447,17 +137505,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137504,15 +137562,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137529,13 +137587,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137573,8 +137631,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137606,7 +137664,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137643,14 +137701,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137699,7 +137757,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137707,10 +137765,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137820,7 +137878,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137830,7 +137888,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137840,7 +137898,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137853,7 +137911,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137863,7 +137921,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137879,7 +137937,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137902,7 +137960,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -137924,7 +137982,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -137938,7 +137996,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -138036,7 +138094,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -138049,20 +138107,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139840,9 +139898,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139858,7 +139916,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -139945,7 +140003,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -140026,7 +140084,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140196,12 +140254,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141144,7 +141202,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -141223,7 +141281,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141248,7 +141306,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -141271,7 +141329,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141510,7 +141568,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142709,7 +142767,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144175,7 +144233,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145257,13 +145315,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145516,7 +145574,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -145630,7 +145688,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145800,7 +145858,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -146071,7 +146129,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146495,7 +146553,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146511,9 +146569,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146559,12 +146617,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146595,7 +146653,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146626,7 +146684,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146658,14 +146716,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146721,7 +146779,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -146938,7 +146996,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -147083,28 +147141,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147175,12 +147233,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147810,7 +147868,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -148054,7 +148112,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -148073,7 +148131,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -148120,7 +148178,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -148129,7 +148187,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148143,7 +148201,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148167,7 +148225,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148185,13 +148243,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148248,17 +148306,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148360,18 +148418,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148383,7 +148441,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148395,14 +148453,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148414,17 +148472,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148461,27 +148519,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148687,12 +148745,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148700,7 +148758,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148876,7 +148934,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -149058,14 +149116,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+48) + rc = fts5StorageCount(tls, p, ts+34104, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+56) + rc = fts5StorageCount(tls, p, ts+34455, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149260,9 +149318,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149477,7 +149535,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149489,7 +149547,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -149500,18 +149558,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37032) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149787,7 +149845,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -149929,7 +149987,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149937,11 +149995,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149949,7 +150007,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149957,7 +150015,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -149965,11 +150023,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -149977,19 +150035,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -149997,11 +150055,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150009,7 +150067,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150017,11 +150075,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150029,7 +150087,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150037,7 +150095,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150045,7 +150103,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150061,24 +150119,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -150093,44 +150151,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -150139,91 +150197,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150238,16 +150296,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150255,21 +150313,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150277,7 +150335,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150285,9 +150343,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150302,12 +150360,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150316,7 +150374,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150472,7 +150530,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37339) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150652,22 +150710,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151810,14 +151868,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151843,19 +151901,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -151988,11 +152046,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -152016,7 +152074,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152411,7 +152469,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152433,7 +152491,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153410,5 +153468,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_ppc64le.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_ppc64le.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_ppc64le.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_ppc64le.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_ppc64le.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_ppc64le.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -917,11 +917,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2162,7 +2162,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2269,8 +2269,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6121,7 +6121,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6763,17 +6764,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6994,14 +6996,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7808,7 +7810,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15905,7 +15907,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16382,7 +16384,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16673,7 +16675,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16690,14 +16692,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16717,7 +16719,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16785,7 +16787,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -17012,7 +17014,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -17040,7 +17042,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17139,7 +17141,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17269,7 +17271,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17315,7 +17317,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17546,7 +17548,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17680,7 +17682,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17701,7 +17703,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -18017,7 +18019,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18108,7 +18110,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18116,9 +18118,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18182,18 +18184,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+144, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([4098]uint8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 144 + uintptr(got))) = uint8(0) @@ -18233,14 +18235,14 @@ (*DbPath)(unsafe.Pointer(bp + 4104)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4104, bp) } appendAllPathElements(tls, bp+4104, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4104)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+4104)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4104)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4104)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18341,7 +18343,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19774,7 +19776,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19804,7 +19806,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19855,7 +19857,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19959,8 +19961,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -20038,13 +20040,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22338,7 +22340,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22771,7 +22773,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22923,9 +22925,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23257,7 +23259,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23407,7 +23409,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23788,7 +23790,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23894,7 +23896,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23912,7 +23914,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23951,7 +23953,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -24028,7 +24030,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24786,7 +24788,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -25029,9 +25031,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25788,7 +25790,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25887,7 +25889,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26473,7 +26475,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26748,7 +26750,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27221,7 +27223,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27726,7 +27728,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28384,7 +28386,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28521,7 +28523,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28538,7 +28540,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28546,7 +28548,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28589,7 +28591,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28599,7 +28601,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28849,7 +28851,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28896,7 +28898,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28906,7 +28908,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28919,7 +28921,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28928,14 +28930,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28945,7 +28947,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -29009,7 +29011,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -29019,7 +29021,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -29041,7 +29043,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -29076,7 +29078,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -29089,13 +29091,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29120,7 +29122,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29131,7 +29133,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29183,22 +29185,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29208,7 +29210,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29216,7 +29218,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29224,10 +29226,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29287,7 +29289,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29323,7 +29325,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29353,11 +29355,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29368,15 +29370,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29404,14 +29406,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29425,7 +29427,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29437,7 +29439,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29540,7 +29542,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29568,7 +29570,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29607,7 +29609,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30490,7 +30492,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30905,7 +30907,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30931,7 +30933,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30940,7 +30942,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30951,7 +30953,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30967,7 +30969,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -31028,7 +31030,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -31063,7 +31065,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31123,7 +31125,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31162,7 +31164,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31193,7 +31195,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31534,7 +31536,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31778,14 +31780,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31830,7 +31832,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31879,7 +31881,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31959,7 +31961,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -32050,7 +32052,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -32070,7 +32072,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32280,7 +32282,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32484,7 +32486,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32549,7 +32551,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32597,7 +32599,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32716,7 +32718,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32876,7 +32878,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32941,7 +32943,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32977,7 +32979,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -33021,7 +33023,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33133,7 +33135,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33291,7 +33293,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33348,7 +33350,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33364,7 +33366,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33438,7 +33440,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33450,7 +33452,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -33461,7 +33463,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33626,7 +33628,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33905,12 +33907,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33918,7 +33920,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33978,7 +33980,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -34067,7 +34069,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -34183,7 +34185,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -34503,7 +34505,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34514,7 +34516,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34672,7 +34674,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34746,7 +34748,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34809,7 +34811,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34837,7 +34839,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -35098,7 +35100,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35290,7 +35292,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35328,7 +35330,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -35434,7 +35436,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35459,7 +35461,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35529,7 +35531,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35642,7 +35644,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -35702,6 +35704,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35709,7 +35712,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35747,13 +35750,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35784,7 +35787,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35859,7 +35861,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35884,7 +35886,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35985,7 +35987,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35993,11 +35995,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -36072,7 +36074,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36141,7 +36143,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36170,7 +36172,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -36246,7 +36248,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36260,7 +36262,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36394,7 +36396,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38864,7 +38866,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39513,7 +39515,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39528,14 +39530,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41839,7 +41841,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42390,7 +42392,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42455,7 +42457,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42489,7 +42491,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42539,7 +42541,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42685,7 +42687,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42856,7 +42858,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42882,7 +42884,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43156,7 +43158,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43796,7 +43798,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44316,7 +44318,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44324,7 +44326,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44739,7 +44741,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44883,7 +44885,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45327,10 +45329,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47978,7 +47976,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49738,7 +49736,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50516,7 +50514,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50626,7 +50624,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50820,7 +50818,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52187,7 +52185,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52707,7 +52705,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52790,7 +52788,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56230,14 +56228,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56281,7 +56275,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56345,7 +56339,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56361,7 +56355,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56375,7 +56369,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56398,30 +56392,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56493,15 +56487,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -56509,7 +56503,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56640,7 +56634,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56660,7 +56654,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56695,7 +56689,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56752,7 +56746,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56780,7 +56774,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56994,7 +56988,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -57034,7 +57028,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -57045,7 +57039,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -57057,7 +57051,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57921,7 +57915,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58170,10 +58164,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58214,7 +58208,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58240,7 +58234,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58268,7 +58262,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58315,7 +58309,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58340,7 +58334,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58915,7 +58909,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -59038,7 +59032,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -59094,10 +59088,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60171,7 +60165,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60609,6 +60603,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60622,6 +60617,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60840,6 +60838,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60853,6 +60852,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61626,7 +61633,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61648,11 +61655,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61723,13 +61729,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61742,15 +61754,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61760,22 +61772,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61784,21 +61796,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61808,27 +61820,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61837,7 +61849,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64505,7 +64517,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64522,7 +64534,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65443,7 +65455,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69807,6 +69819,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70971,7 +70989,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72217,7 +72235,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74267,7 +74285,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80735,7 +80753,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -82134,7 +82152,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82295,6 +82313,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83613,7 +83632,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83629,7 +83648,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84434,80 +84453,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -84524,20 +84557,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84546,21 +84579,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84578,14 +84611,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84593,27 +84626,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84627,25 +84660,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84653,15 +84686,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -84669,41 +84702,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84718,31 +84751,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84752,10 +84785,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84775,19 +84808,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84796,86 +84829,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84883,36 +84916,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84921,10 +84954,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84933,10 +84966,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84944,10 +84977,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84999,14 +85032,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -85058,7 +85091,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -85066,7 +85099,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -85079,16 +85112,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85271,13 +85304,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85354,12 +85387,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85368,19 +85401,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85472,7 +85505,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85520,7 +85553,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -85649,7 +85682,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85663,7 +85696,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85995,7 +86028,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -86025,7 +86058,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -86121,7 +86154,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86220,7 +86253,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86552,13 +86585,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86733,7 +86766,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86778,7 +86811,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86789,7 +86822,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87417,16 +87450,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87436,7 +87469,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87462,9 +87495,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87808,7 +87841,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87908,7 +87941,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87924,7 +87957,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -88007,8 +88040,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -88023,12 +88054,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88244,7 +88278,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88335,7 +88369,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88355,7 +88389,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88392,7 +88426,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88426,11 +88460,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88531,8 +88565,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88579,7 +88613,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88646,7 +88680,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -88708,7 +88742,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88861,10 +88895,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -89118,8 +89152,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -89146,13 +89180,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -89164,7 +89198,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89352,7 +89386,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90251,7 +90286,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90334,7 +90369,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90463,7 +90498,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -90486,7 +90521,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90502,7 +90537,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90528,7 +90563,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90539,9 +90574,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90588,7 +90623,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90700,7 +90735,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90719,7 +90754,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90843,7 +90878,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90908,7 +90943,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90939,9 +90974,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90951,7 +90986,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -91089,7 +91124,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -91173,13 +91208,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91368,11 +91403,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91700,7 +91735,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91761,7 +91796,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91903,7 +91938,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91962,7 +91997,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92433,9 +92468,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92786,7 +92821,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92891,7 +92926,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -93124,7 +93159,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93168,7 +93203,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -93186,7 +93221,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93198,7 +93233,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93213,11 +93248,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93228,19 +93264,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93249,7 +93285,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93398,7 +93434,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93431,7 +93467,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93456,13 +93492,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93718,7 +93754,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93771,7 +93807,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93885,12 +93921,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -94004,7 +94040,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -94070,7 +94106,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94234,7 +94270,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94827,7 +94863,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94859,7 +94895,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95185,7 +95221,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95739,7 +95780,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -96045,10 +96086,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 216)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+216)) + ts+20611, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -96171,7 +96212,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96319,14 +96360,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96337,7 +96378,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96365,7 +96406,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96385,7 +96426,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96445,7 +96486,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96454,7 +96495,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96465,7 +96506,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96476,7 +96517,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96905,11 +96946,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96919,7 +96960,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96980,7 +97021,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -97008,9 +97049,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -97018,7 +97061,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -97030,7 +97073,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -97104,7 +97147,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -97162,7 +97205,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97196,7 +97239,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97649,7 +97692,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97676,7 +97719,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97907,7 +97950,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97919,11 +97962,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97938,7 +97981,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97964,27 +98007,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -98027,11 +98070,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -98044,40 +98087,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -98109,22 +98152,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99721,7 +99764,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99749,7 +99792,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100267,7 +100310,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100628,7 +100671,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -101118,12 +101161,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101202,7 +101245,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101296,7 +101339,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101672,7 +101715,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101688,7 +101731,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102406,7 +102449,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -102477,7 +102520,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102651,6 +102694,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102694,9 +102741,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102730,6 +102775,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102988,11 +103034,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104573,7 +104624,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104631,7 +104682,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -105029,7 +105080,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105636,7 +105687,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105671,6 +105722,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105965,6 +106020,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -106117,7 +106175,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -106181,7 +106239,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -107063,7 +107121,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107196,7 +107254,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107286,17 +107344,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107342,7 +107400,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107386,12 +107444,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107620,7 +107678,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107736,7 +107794,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107851,7 +107909,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107916,15 +107974,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107945,7 +108003,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -108101,11 +108159,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109500,19 +109558,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109580,7 +109638,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110677,7 +110735,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111656,7 +111714,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -111666,7 +111724,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112409,7 +112467,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112573,7 +112631,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112790,9 +112848,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -113076,19 +113134,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113467,9 +113525,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114237,7 +114295,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+2464)) break } } @@ -114260,7 +114318,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114433,7 +114491,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114446,11 +114504,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114463,9 +114521,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114704,7 +114762,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115279,7 +115337,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115294,7 +115352,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115485,23 +115543,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115519,35 +115577,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115668,7 +115726,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115718,7 +115776,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115840,7 +115898,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -116076,7 +116134,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -116169,7 +116227,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116239,7 +116297,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116249,7 +116307,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116286,14 +116344,14 @@ }() } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116423,7 +116481,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -116468,10 +116526,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116576,7 +116634,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -116587,17 +116645,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint64(4)) == 0) { goto __32 } @@ -116635,7 +116693,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116643,7 +116701,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116683,7 +116741,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116707,14 +116765,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116861,10 +116919,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116878,7 +116936,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116931,7 +116989,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -117036,7 +117094,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, func() uint8 { @@ -117154,21 +117212,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117326,7 +117384,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117982,7 +118040,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -118100,7 +118158,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118278,7 +118336,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118431,7 +118489,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -118486,7 +118544,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118550,13 +118608,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -119106,12 +119164,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119212,7 +119270,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119518,7 +119576,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119533,7 +119591,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119587,7 +119645,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119692,11 +119750,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -119853,14 +119911,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -120030,9 +120088,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -120165,7 +120223,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120262,7 +120320,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120306,7 +120364,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120437,7 +120495,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120456,7 +120514,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -120552,7 +120610,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120576,7 +120634,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120698,7 +120756,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120793,25 +120851,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120830,8 +120888,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -121091,11 +121149,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -121306,7 +121364,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -122004,7 +122062,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123345,7 +123403,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123357,12 +123415,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123588,7 +123646,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123611,7 +123669,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123659,7 +123717,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123702,19 +123760,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123743,7 +123801,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123755,7 +123813,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123763,18 +123821,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123789,14 +123847,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123825,7 +123883,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123837,7 +123895,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123845,7 +123903,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123887,10 +123945,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123921,7 +123979,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123933,7 +123991,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123956,7 +124014,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -124052,7 +124110,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -124092,11 +124150,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -124107,7 +124165,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -124185,7 +124243,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -124209,7 +124267,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124228,7 +124286,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124242,8 +124300,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -124258,23 +124316,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124298,7 +124356,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124318,7 +124376,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124335,14 +124393,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124350,7 +124408,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124379,14 +124437,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124413,7 +124471,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124422,12 +124480,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124441,8 +124499,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124450,7 +124508,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124465,7 +124523,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124483,7 +124541,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124854,11 +124912,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124878,19 +124936,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125810,7 +125868,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125819,7 +125877,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125827,7 +125885,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -126064,7 +126122,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -126072,7 +126130,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -126184,7 +126242,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126316,14 +126374,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126388,7 +126446,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126400,25 +126458,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126428,26 +126486,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126501,7 +126559,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126828,7 +126886,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126839,7 +126897,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126940,7 +126998,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+64) + ts+27933, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -127004,7 +127062,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -127025,16 +127083,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -127150,7 +127208,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -127168,7 +127226,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -127186,7 +127244,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -127209,7 +127267,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127255,7 +127313,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127270,7 +127328,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -127310,7 +127368,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127320,18 +127378,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127343,18 +127401,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -127368,7 +127426,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -127415,7 +127473,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127433,7 +127491,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127455,25 +127513,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127515,7 +127573,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127550,7 +127608,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127558,11 +127616,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127582,7 +127640,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -127609,7 +127667,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127621,7 +127679,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127654,7 +127712,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127666,7 +127724,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127678,37 +127736,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127747,9 +127805,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127759,7 +127817,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127771,18 +127829,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127790,8 +127848,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127800,7 +127858,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127818,15 +127876,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } @@ -127863,19 +127921,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127887,15 +127945,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127915,7 +127973,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127924,7 +127982,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127934,23 +127992,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127976,13 +128034,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127992,16 +128050,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -128018,7 +128076,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -128055,7 +128113,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -128160,7 +128218,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -128183,7 +128241,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -128191,13 +128249,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -128213,7 +128271,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -128221,9 +128279,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128232,20 +128290,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128282,16 +128340,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128300,32 +128358,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128338,9 +128396,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128353,7 +128411,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128362,11 +128420,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128379,7 +128437,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128447,9 +128505,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -128508,7 +128566,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128581,18 +128639,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3286, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6441, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128632,11 +128690,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128648,13 +128706,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128673,21 +128731,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128695,7 +128753,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128703,7 +128761,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128732,14 +128790,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128865,7 +128923,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128880,8 +128938,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128998,7 +129056,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -129091,7 +129149,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -129099,7 +129157,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -129120,7 +129178,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -129150,9 +129208,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -129166,10 +129224,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129183,12 +129241,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129200,7 +129258,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129228,7 +129286,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129350,7 +129408,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129366,7 +129424,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129383,7 +129441,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129417,7 +129475,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -129447,13 +129505,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129464,7 +129522,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129538,7 +129596,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129556,12 +129614,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129585,14 +129643,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+64) + db, ts+32291, uintptr(0), uintptr(0), p+64) } } @@ -129646,7 +129704,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -129673,7 +129731,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129709,7 +129767,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129828,12 +129886,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -130179,7 +130237,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -130204,7 +130262,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130364,7 +130422,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -131193,7 +131251,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -131206,7 +131264,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131886,9 +131944,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131911,9 +131969,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131921,7 +131979,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131932,7 +131990,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131975,7 +132033,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -132102,7 +132160,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132578,7 +132636,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -132587,18 +132645,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+24) + sessionAppendStr(tls, bp+8, ts+32893, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1567, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32861, bp+24) + sessionAppendStr(tls, bp+8, ts+32908, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32869, bp+24) + sessionAppendStr(tls, bp+8, ts+32916, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132707,7 +132765,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132799,7 +132857,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -133062,7 +133120,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -133085,7 +133143,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -133127,7 +133185,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -133188,7 +133246,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133262,13 +133320,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133330,7 +133388,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -133703,7 +133761,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133882,34 +133940,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+16) + sessionAppendStr(tls, bp, ts+32960, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32926, bp+16) + sessionAppendStr(tls, bp, ts+32973, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+16) + ts+32984, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133961,34 +134019,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+16) + sessionAppendStr(tls, bp, ts+33059, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+16) + sessionAppendStr(tls, bp, ts+33077, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32573, bp+16) + sessionAppendStr(tls, bp, ts+32620, bp+16) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134015,9 +134073,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+16) + sessionAppendStr(tls, bp, ts+33090, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21531, bp+16) + sessionAppendStr(tls, bp, ts+21578, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+16) @@ -134025,9 +134083,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33061, bp+16) + sessionAppendStr(tls, bp, ts+33108, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+16) + sessionAppendStr(tls, bp, ts+33119, bp+16) } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134046,11 +134104,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33189) + ts+33236) } return rc } @@ -134078,7 +134136,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134331,7 +134389,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134347,7 +134405,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134420,10 +134478,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -134482,16 +134540,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11348) { @@ -134545,14 +134603,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135800,7 +135858,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136088,7 +136146,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136275,7 +136333,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136343,7 +136401,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136525,7 +136583,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136849,13 +136907,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137406,7 +137464,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137433,14 +137491,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137451,7 +137509,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137464,7 +137522,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -137473,7 +137531,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137492,7 +137550,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137507,14 +137565,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137522,9 +137580,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137532,9 +137590,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -137546,17 +137604,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137603,15 +137661,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137628,13 +137686,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137672,8 +137730,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137705,7 +137763,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137742,14 +137800,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137798,7 +137856,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137806,10 +137864,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137919,7 +137977,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137929,7 +137987,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137939,7 +137997,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137952,7 +138010,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137962,7 +138020,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137978,7 +138036,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -138001,7 +138059,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -138023,7 +138081,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -138037,7 +138095,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -138135,7 +138193,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -138148,20 +138206,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139939,9 +139997,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139957,7 +140015,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -140044,7 +140102,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -140125,7 +140183,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140295,12 +140353,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141243,7 +141301,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -141322,7 +141380,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141347,7 +141405,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -141370,7 +141428,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141609,7 +141667,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142808,7 +142866,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144274,7 +144332,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145356,13 +145414,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145615,7 +145673,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -145729,7 +145787,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145899,7 +145957,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -146170,7 +146228,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146594,7 +146652,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146610,9 +146668,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146658,12 +146716,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146694,7 +146752,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146725,7 +146783,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146757,14 +146815,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146820,7 +146878,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -147037,7 +147095,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -147182,28 +147240,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147274,12 +147332,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147909,7 +147967,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -148153,7 +148211,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -148172,7 +148230,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -148219,7 +148277,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -148228,7 +148286,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148242,7 +148300,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148266,7 +148324,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148284,13 +148342,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148347,17 +148405,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148459,18 +148517,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148482,7 +148540,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148494,14 +148552,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148513,17 +148571,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148560,27 +148618,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148786,12 +148844,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148799,7 +148857,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148975,7 +149033,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -149157,14 +149215,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+48) + rc = fts5StorageCount(tls, p, ts+34104, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+56) + rc = fts5StorageCount(tls, p, ts+34455, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149359,9 +149417,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149576,7 +149634,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149588,7 +149646,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -149599,18 +149657,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37032) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149886,7 +149944,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -150028,7 +150086,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150036,11 +150094,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150048,7 +150106,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150056,7 +150114,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150064,11 +150122,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150076,19 +150134,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150096,11 +150154,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150108,7 +150166,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150116,11 +150174,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150128,7 +150186,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150136,7 +150194,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150144,7 +150202,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150160,24 +150218,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -150192,44 +150250,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -150238,91 +150296,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150337,16 +150395,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150354,21 +150412,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150376,7 +150434,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150384,9 +150442,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150401,12 +150459,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150415,7 +150473,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150571,7 +150629,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37339) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150751,22 +150809,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151909,14 +151967,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151942,19 +152000,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -152087,11 +152145,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -152115,7 +152173,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152510,7 +152568,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152532,7 +152590,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153509,5 +153567,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_riscv64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_riscv64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_riscv64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_riscv64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_riscv64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_riscv64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -921,11 +921,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2165,7 +2165,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2272,8 +2272,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6123,7 +6123,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6765,17 +6766,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6996,14 +6998,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7810,7 +7812,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15900,7 +15902,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16377,7 +16379,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16668,7 +16670,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16685,14 +16687,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16712,7 +16714,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16780,7 +16782,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -17007,7 +17009,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -17035,7 +17037,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17134,7 +17136,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17264,7 +17266,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17310,7 +17312,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17541,7 +17543,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17675,7 +17677,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17696,7 +17698,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -18012,7 +18014,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18103,7 +18105,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18111,9 +18113,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18177,18 +18179,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+128, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([4098]uint8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 128 + uintptr(got))) = uint8(0) @@ -18228,14 +18230,14 @@ (*DbPath)(unsafe.Pointer(bp + 4104)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4104, bp) } appendAllPathElements(tls, bp+4104, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4104)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+4104)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4104)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4104)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18336,7 +18338,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19769,7 +19771,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19799,7 +19801,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19850,7 +19852,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19954,8 +19956,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -20033,13 +20035,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22333,7 +22335,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22766,7 +22768,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22918,9 +22920,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23252,7 +23254,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23402,7 +23404,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23783,7 +23785,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23889,7 +23891,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23907,7 +23909,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23946,7 +23948,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -24023,7 +24025,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24781,7 +24783,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -25024,9 +25026,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25783,7 +25785,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25882,7 +25884,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26468,7 +26470,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26743,7 +26745,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27216,7 +27218,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27721,7 +27723,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28379,7 +28381,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28516,7 +28518,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28533,7 +28535,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28541,7 +28543,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28584,7 +28586,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28594,7 +28596,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28844,7 +28846,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28891,7 +28893,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28901,7 +28903,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28914,7 +28916,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28923,14 +28925,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28940,7 +28942,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -29004,7 +29006,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -29014,7 +29016,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -29036,7 +29038,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -29071,7 +29073,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -29084,13 +29086,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29115,7 +29117,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29126,7 +29128,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29178,22 +29180,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29203,7 +29205,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29211,7 +29213,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29219,10 +29221,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29282,7 +29284,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29318,7 +29320,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29348,11 +29350,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29363,15 +29365,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29399,14 +29401,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29420,7 +29422,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29432,7 +29434,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29535,7 +29537,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29563,7 +29565,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29602,7 +29604,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30485,7 +30487,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30900,7 +30902,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30926,7 +30928,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30935,7 +30937,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30946,7 +30948,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30962,7 +30964,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -31023,7 +31025,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -31058,7 +31060,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31118,7 +31120,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31157,7 +31159,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31188,7 +31190,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31529,7 +31531,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31773,14 +31775,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31825,7 +31827,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31874,7 +31876,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31954,7 +31956,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -32045,7 +32047,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -32065,7 +32067,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32275,7 +32277,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32479,7 +32481,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32544,7 +32546,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32592,7 +32594,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32711,7 +32713,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32871,7 +32873,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32936,7 +32938,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32972,7 +32974,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -33016,7 +33018,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33128,7 +33130,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33286,7 +33288,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33343,7 +33345,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33359,7 +33361,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33433,7 +33435,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33445,7 +33447,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -33456,7 +33458,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33621,7 +33623,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33900,12 +33902,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33913,7 +33915,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33973,7 +33975,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -34062,7 +34064,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -34178,7 +34180,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -34498,7 +34500,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34509,7 +34511,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34667,7 +34669,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34741,7 +34743,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34804,7 +34806,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34832,7 +34834,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -35093,7 +35095,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35285,7 +35287,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35323,7 +35325,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -35429,7 +35431,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35454,7 +35456,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35524,7 +35526,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35637,7 +35639,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -35697,6 +35699,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35704,7 +35707,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35742,13 +35745,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35779,7 +35782,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35854,7 +35856,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35879,7 +35881,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35980,7 +35982,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35988,11 +35990,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -36067,7 +36069,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36136,7 +36138,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36165,7 +36167,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -36241,7 +36243,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36255,7 +36257,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36389,7 +36391,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38859,7 +38861,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39508,7 +39510,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39523,14 +39525,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41834,7 +41836,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42385,7 +42387,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42450,7 +42452,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42484,7 +42486,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42534,7 +42536,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42680,7 +42682,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42851,7 +42853,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42877,7 +42879,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43151,7 +43153,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43791,7 +43793,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44311,7 +44313,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44319,7 +44321,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44734,7 +44736,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44878,7 +44880,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45322,10 +45324,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47973,7 +47971,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49733,7 +49731,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50511,7 +50509,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50621,7 +50619,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50815,7 +50813,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52182,7 +52180,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52702,7 +52700,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52785,7 +52783,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56225,14 +56223,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56276,7 +56270,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56340,7 +56334,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56356,7 +56350,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56370,7 +56364,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56393,30 +56387,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56488,15 +56482,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -56504,7 +56498,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56635,7 +56629,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56655,7 +56649,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56690,7 +56684,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56747,7 +56741,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56775,7 +56769,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56989,7 +56983,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -57029,7 +57023,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -57040,7 +57034,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -57052,7 +57046,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57916,7 +57910,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58165,10 +58159,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58209,7 +58203,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58235,7 +58229,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58263,7 +58257,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58310,7 +58304,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58335,7 +58329,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58910,7 +58904,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -59033,7 +59027,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -59089,10 +59083,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60166,7 +60160,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60604,6 +60598,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60617,6 +60612,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60835,6 +60833,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60848,6 +60847,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61621,7 +61628,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61643,11 +61650,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61718,13 +61724,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61737,15 +61749,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61755,22 +61767,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61779,21 +61791,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61803,27 +61815,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61832,7 +61844,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64500,7 +64512,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64517,7 +64529,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65438,7 +65450,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69802,6 +69814,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70966,7 +70984,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72212,7 +72230,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74262,7 +74280,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80730,7 +80748,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -82129,7 +82147,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82290,6 +82308,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83608,7 +83627,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83624,7 +83643,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84429,80 +84448,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -84519,20 +84552,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84541,21 +84574,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84573,14 +84606,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84588,27 +84621,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84622,25 +84655,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84648,15 +84681,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -84664,41 +84697,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84713,31 +84746,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84747,10 +84780,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84770,19 +84803,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84791,86 +84824,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84878,36 +84911,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84916,10 +84949,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84928,10 +84961,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84939,10 +84972,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84994,14 +85027,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -85053,7 +85086,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -85061,7 +85094,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -85074,16 +85107,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85266,13 +85299,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85349,12 +85382,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85363,19 +85396,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85467,7 +85500,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85515,7 +85548,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -85644,7 +85677,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85658,7 +85691,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85990,7 +86023,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -86020,7 +86053,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -86116,7 +86149,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86215,7 +86248,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86547,13 +86580,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86728,7 +86761,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86773,7 +86806,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86784,7 +86817,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87412,16 +87445,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87431,7 +87464,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87457,9 +87490,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87803,7 +87836,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87903,7 +87936,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87919,7 +87952,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -88002,8 +88035,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -88018,12 +88049,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88239,7 +88273,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88330,7 +88364,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88350,7 +88384,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88387,7 +88421,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88421,11 +88455,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88526,8 +88560,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88574,7 +88608,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88641,7 +88675,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -88703,7 +88737,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88856,10 +88890,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -89113,8 +89147,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -89141,13 +89175,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -89159,7 +89193,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89347,7 +89381,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90246,7 +90281,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90329,7 +90364,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90458,7 +90493,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -90481,7 +90516,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90497,7 +90532,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90523,7 +90558,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90534,9 +90569,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90583,7 +90618,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90695,7 +90730,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90714,7 +90749,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90838,7 +90873,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -90903,7 +90938,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90934,9 +90969,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90946,7 +90981,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -91084,7 +91119,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -91168,13 +91203,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91363,11 +91398,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91695,7 +91730,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91756,7 +91791,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91898,7 +91933,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -91957,7 +91992,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92428,9 +92463,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92781,7 +92816,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92886,7 +92921,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -93119,7 +93154,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93163,7 +93198,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -93181,7 +93216,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93193,7 +93228,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93208,11 +93243,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93223,19 +93259,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93244,7 +93280,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93393,7 +93429,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93426,7 +93462,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93451,13 +93487,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93713,7 +93749,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93766,7 +93802,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93880,12 +93916,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93999,7 +94035,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -94065,7 +94101,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94229,7 +94265,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94822,7 +94858,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94854,7 +94890,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95180,7 +95216,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95734,7 +95775,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -96040,10 +96081,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 216)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+216)) + ts+20611, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -96166,7 +96207,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96314,14 +96355,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96332,7 +96373,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96360,7 +96401,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96380,7 +96421,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96440,7 +96481,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96449,7 +96490,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96460,7 +96501,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96471,7 +96512,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96900,11 +96941,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96914,7 +96955,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96975,7 +97016,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -97003,9 +97044,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -97013,7 +97056,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -97025,7 +97068,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -97099,7 +97142,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -97157,7 +97200,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97191,7 +97234,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97644,7 +97687,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97671,7 +97714,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97902,7 +97945,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97914,11 +97957,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97933,7 +97976,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97959,27 +98002,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -98022,11 +98065,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -98039,40 +98082,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -98104,22 +98147,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99716,7 +99759,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99744,7 +99787,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100262,7 +100305,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100623,7 +100666,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -101113,12 +101156,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101197,7 +101240,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101291,7 +101334,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101667,7 +101710,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101683,7 +101726,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102401,7 +102444,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -102472,7 +102515,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102646,6 +102689,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102689,9 +102736,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102725,6 +102770,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102983,11 +103029,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104568,7 +104619,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104626,7 +104677,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -105024,7 +105075,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105631,7 +105682,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105666,6 +105717,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105960,6 +106015,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -106112,7 +106170,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -106176,7 +106234,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -107058,7 +107116,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107191,7 +107249,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107281,17 +107339,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107337,7 +107395,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107381,12 +107439,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107615,7 +107673,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107731,7 +107789,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107846,7 +107904,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107911,15 +107969,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107940,7 +107998,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -108096,11 +108154,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109495,19 +109553,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109575,7 +109633,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110672,7 +110730,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111651,7 +111709,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -111661,7 +111719,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112404,7 +112462,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112568,7 +112626,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112785,9 +112843,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -113071,19 +113129,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113462,9 +113520,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114232,7 +114290,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+2464)) break } } @@ -114255,7 +114313,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114428,7 +114486,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114441,11 +114499,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114458,9 +114516,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114699,7 +114757,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115274,7 +115332,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115289,7 +115347,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115480,23 +115538,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115514,35 +115572,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115663,7 +115721,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115713,7 +115771,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115835,7 +115893,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -116071,7 +116129,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -116164,7 +116222,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116234,7 +116292,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116244,7 +116302,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116281,14 +116339,14 @@ }() } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116418,7 +116476,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -116463,10 +116521,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116571,7 +116629,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -116582,17 +116640,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint64(4)) == 0) { goto __32 } @@ -116630,7 +116688,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116638,7 +116696,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116678,7 +116736,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116702,14 +116760,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116856,10 +116914,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116873,7 +116931,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116926,7 +116984,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -117031,7 +117089,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, func() uint8 { @@ -117149,21 +117207,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117321,7 +117379,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117977,7 +118035,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -118095,7 +118153,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118273,7 +118331,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118426,7 +118484,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -118481,7 +118539,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118545,13 +118603,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -119101,12 +119159,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119207,7 +119265,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119513,7 +119571,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119528,7 +119586,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119582,7 +119640,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119687,11 +119745,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -119848,14 +119906,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -120025,9 +120083,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -120160,7 +120218,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120257,7 +120315,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120301,7 +120359,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120432,7 +120490,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120451,7 +120509,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -120547,7 +120605,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120571,7 +120629,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120693,7 +120751,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120788,25 +120846,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120825,8 +120883,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -121086,11 +121144,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -121301,7 +121359,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121999,7 +122057,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123340,7 +123398,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123352,12 +123410,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123583,7 +123641,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123606,7 +123664,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123654,7 +123712,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123697,19 +123755,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123738,7 +123796,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123750,7 +123808,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123758,18 +123816,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123784,14 +123842,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123820,7 +123878,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123832,7 +123890,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123840,7 +123898,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123882,10 +123940,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123916,7 +123974,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123928,7 +123986,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123951,7 +124009,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -124047,7 +124105,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -124087,11 +124145,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -124102,7 +124160,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -124180,7 +124238,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -124204,7 +124262,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124223,7 +124281,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124237,8 +124295,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -124253,23 +124311,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124293,7 +124351,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124313,7 +124371,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124330,14 +124388,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124345,7 +124403,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124374,14 +124432,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124408,7 +124466,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124417,12 +124475,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124436,8 +124494,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124445,7 +124503,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124460,7 +124518,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124478,7 +124536,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124849,11 +124907,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124873,19 +124931,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125805,7 +125863,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125814,7 +125872,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125822,7 +125880,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -126059,7 +126117,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -126067,7 +126125,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -126179,7 +126237,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126311,14 +126369,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126383,7 +126441,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126395,25 +126453,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126423,26 +126481,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126496,7 +126554,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126823,7 +126881,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126834,7 +126892,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126935,7 +126993,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+64) + ts+27933, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126999,7 +127057,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -127020,16 +127078,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -127145,7 +127203,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -127163,7 +127221,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -127181,7 +127239,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -127204,7 +127262,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127250,7 +127308,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127265,7 +127323,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -127305,7 +127363,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127315,18 +127373,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127338,18 +127396,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -127363,7 +127421,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -127410,7 +127468,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127428,7 +127486,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127450,25 +127508,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127510,7 +127568,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127545,7 +127603,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127553,11 +127611,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127577,7 +127635,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -127604,7 +127662,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127616,7 +127674,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127649,7 +127707,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127661,7 +127719,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127673,37 +127731,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127742,9 +127800,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127754,7 +127812,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127766,18 +127824,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127785,8 +127843,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127795,7 +127853,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127813,15 +127871,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } @@ -127858,19 +127916,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127882,15 +127940,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127910,7 +127968,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127919,7 +127977,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127929,23 +127987,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127971,13 +128029,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127987,16 +128045,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -128013,7 +128071,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -128050,7 +128108,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -128155,7 +128213,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -128178,7 +128236,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -128186,13 +128244,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -128208,7 +128266,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -128216,9 +128274,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128227,20 +128285,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128277,16 +128335,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128295,32 +128353,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128333,9 +128391,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128348,7 +128406,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128357,11 +128415,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128374,7 +128432,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128442,9 +128500,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -128503,7 +128561,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128576,18 +128634,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3286, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6441, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128627,11 +128685,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128643,13 +128701,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128668,21 +128726,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128690,7 +128748,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128698,7 +128756,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128727,14 +128785,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128860,7 +128918,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128875,8 +128933,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128993,7 +129051,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -129086,7 +129144,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -129094,7 +129152,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -129115,7 +129173,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -129145,9 +129203,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -129161,10 +129219,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129178,12 +129236,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129195,7 +129253,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129223,7 +129281,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129345,7 +129403,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129361,7 +129419,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129378,7 +129436,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129412,7 +129470,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -129442,13 +129500,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129459,7 +129517,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129533,7 +129591,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129551,12 +129609,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129580,14 +129638,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+64) + db, ts+32291, uintptr(0), uintptr(0), p+64) } } @@ -129641,7 +129699,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -129668,7 +129726,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129704,7 +129762,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129823,12 +129881,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -130174,7 +130232,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -130199,7 +130257,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130359,7 +130417,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -131188,7 +131246,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -131201,7 +131259,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131881,9 +131939,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131906,9 +131964,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131916,7 +131974,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131927,7 +131985,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131970,7 +132028,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -132097,7 +132155,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132573,7 +132631,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -132582,18 +132640,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+24) + sessionAppendStr(tls, bp+8, ts+32893, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1567, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32861, bp+24) + sessionAppendStr(tls, bp+8, ts+32908, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32869, bp+24) + sessionAppendStr(tls, bp+8, ts+32916, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132702,7 +132760,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132794,7 +132852,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -133057,7 +133115,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -133080,7 +133138,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -133122,7 +133180,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -133183,7 +133241,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133257,13 +133315,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133325,7 +133383,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -133698,7 +133756,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133877,34 +133935,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+16) + sessionAppendStr(tls, bp, ts+32960, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32926, bp+16) + sessionAppendStr(tls, bp, ts+32973, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+16) + ts+32984, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133956,34 +134014,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+16) + sessionAppendStr(tls, bp, ts+33059, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+16) + sessionAppendStr(tls, bp, ts+33077, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32573, bp+16) + sessionAppendStr(tls, bp, ts+32620, bp+16) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134010,9 +134068,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+16) + sessionAppendStr(tls, bp, ts+33090, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21531, bp+16) + sessionAppendStr(tls, bp, ts+21578, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+16) @@ -134020,9 +134078,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33061, bp+16) + sessionAppendStr(tls, bp, ts+33108, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+16) + sessionAppendStr(tls, bp, ts+33119, bp+16) } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134041,11 +134099,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33189) + ts+33236) } return rc } @@ -134073,7 +134131,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134326,7 +134384,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134342,7 +134400,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134415,10 +134473,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -134477,16 +134535,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11348) { @@ -134540,14 +134598,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135795,7 +135853,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136083,7 +136141,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136270,7 +136328,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136338,7 +136396,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136520,7 +136578,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136844,13 +136902,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137401,7 +137459,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137428,14 +137486,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137446,7 +137504,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137459,7 +137517,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -137468,7 +137526,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137487,7 +137545,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137502,14 +137560,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137517,9 +137575,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137527,9 +137585,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -137541,17 +137599,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137598,15 +137656,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137623,13 +137681,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137667,8 +137725,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137700,7 +137758,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137737,14 +137795,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137793,7 +137851,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137801,10 +137859,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137914,7 +137972,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137924,7 +137982,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137934,7 +137992,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137947,7 +138005,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137957,7 +138015,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137973,7 +138031,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137996,7 +138054,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -138018,7 +138076,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -138032,7 +138090,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -138130,7 +138188,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -138143,20 +138201,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139934,9 +139992,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139952,7 +140010,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -140039,7 +140097,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -140120,7 +140178,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140290,12 +140348,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141238,7 +141296,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -141317,7 +141375,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141342,7 +141400,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -141365,7 +141423,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141604,7 +141662,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142803,7 +142861,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144269,7 +144327,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145351,13 +145409,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145610,7 +145668,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -145724,7 +145782,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145894,7 +145952,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -146165,7 +146223,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146589,7 +146647,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146605,9 +146663,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146653,12 +146711,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146689,7 +146747,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146720,7 +146778,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146752,14 +146810,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146815,7 +146873,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -147032,7 +147090,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -147177,28 +147235,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147269,12 +147327,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147904,7 +147962,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -148148,7 +148206,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -148167,7 +148225,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -148214,7 +148272,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -148223,7 +148281,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148237,7 +148295,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148261,7 +148319,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148279,13 +148337,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148342,17 +148400,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148454,18 +148512,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148477,7 +148535,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148489,14 +148547,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148508,17 +148566,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148555,27 +148613,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148781,12 +148839,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148794,7 +148852,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148970,7 +149028,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -149152,14 +149210,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+48) + rc = fts5StorageCount(tls, p, ts+34104, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+56) + rc = fts5StorageCount(tls, p, ts+34455, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149354,9 +149412,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149571,7 +149629,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149583,7 +149641,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -149594,18 +149652,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37032) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149881,7 +149939,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -150023,7 +150081,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150031,11 +150089,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150043,7 +150101,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150051,7 +150109,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150059,11 +150117,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150071,19 +150129,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150091,11 +150149,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150103,7 +150161,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150111,11 +150169,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150123,7 +150181,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150131,7 +150189,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150139,7 +150197,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150155,24 +150213,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -150187,44 +150245,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -150233,91 +150291,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150332,16 +150390,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150349,21 +150407,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150371,7 +150429,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150379,9 +150437,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150396,12 +150454,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150410,7 +150468,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150566,7 +150624,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37339) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150746,22 +150804,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151904,14 +151962,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151937,19 +151995,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -152082,11 +152140,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -152110,7 +152168,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152505,7 +152563,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152527,7 +152585,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153504,5 +153562,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_s390x.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_s390x.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_linux_s390x.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_linux_s390x.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_s390x.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_linux_s390x.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -921,11 +921,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NN = 1 NOT_WITHIN = 0 @@ -2165,7 +2165,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -2272,8 +2272,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -6117,7 +6117,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -6759,17 +6760,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6990,14 +6992,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -7804,7 +7806,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -15896,7 +15898,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -16373,7 +16375,7 @@ var pFile uintptr = id unixUnmapfile(tls, pFile) if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -16664,7 +16666,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3378, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3378, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -16681,14 +16683,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3666, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -16708,7 +16710,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { if nByte < (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize { (*UnixFile)(unsafe.Pointer(pFile)).FmmapSize = nByte @@ -16776,7 +16778,7 @@ if (*UnixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 { if robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno_location(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40883) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3297, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40887) } } @@ -17003,7 +17005,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -17031,7 +17033,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3297, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -17130,7 +17132,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3261, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3261, zShm, 41628) goto shm_open_err __10: ; @@ -17260,7 +17262,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3332, zFile, 41772) goto shmpage_out __14: ; @@ -17306,7 +17308,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3419, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -17537,7 +17539,7 @@ if pNew == libc.UintptrFromInt32(-1) { pNew = uintptr(0) nNew = int64(0) - unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42169) + unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*UnixFile)(unsafe.Pointer(pFd)).FzPath, 42173) (*UnixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = int64(0) } @@ -17671,7 +17673,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -17692,7 +17694,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -18008,7 +18010,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3261, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3261, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -18099,7 +18101,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3371, zPath, 43341) } return rc } @@ -18107,9 +18109,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3788, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -18173,18 +18175,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3459, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3459, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&X__mode_t(0170000) == X__mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+144, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([4098]uint8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3450, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3450, zIn, 43458) return } *(*uint8)(unsafe.Pointer(bp + 144 + uintptr(got))) = uint8(0) @@ -18224,14 +18226,14 @@ (*DbPath)(unsafe.Pointer(bp + 4104)).FzOut = zOut if int32(*(*uint8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([4098]uint8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3279, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3279, zPath, 43516) } appendAllPathElements(tls, bp+4104, bp) } appendAllPathElements(tls, bp+4104, zPath) *(*uint8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+4104)).FnUsed))) = uint8(0) if (*DbPath)(unsafe.Pointer(bp+4104)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+4104)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+4104)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -18332,7 +18334,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno_location(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -19765,7 +19767,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -19795,7 +19797,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -19846,7 +19848,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -19950,8 +19952,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -20029,13 +20031,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -22329,7 +22331,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -22762,7 +22764,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -22914,9 +22916,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -23248,7 +23250,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -23398,7 +23400,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -23779,7 +23781,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -23885,7 +23887,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -23903,7 +23905,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -23942,7 +23944,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -24019,7 +24021,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -24777,7 +24779,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -25020,9 +25022,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -25779,7 +25781,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -25878,7 +25880,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -26464,7 +26466,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -26739,7 +26741,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -27212,7 +27214,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -27717,7 +27719,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -28375,7 +28377,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -28512,7 +28514,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -28529,7 +28531,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -28537,7 +28539,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -28580,7 +28582,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -28590,7 +28592,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -28840,7 +28842,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -28887,7 +28889,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -28897,7 +28899,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -28910,7 +28912,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -28919,14 +28921,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -28936,7 +28938,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -29000,7 +29002,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -29010,7 +29012,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -29032,7 +29034,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -29067,7 +29069,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -29080,13 +29082,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -29111,7 +29113,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -29122,7 +29124,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -29174,22 +29176,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -29199,7 +29201,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -29207,7 +29209,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -29215,10 +29217,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -29278,7 +29280,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -29314,7 +29316,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -29344,11 +29346,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -29359,15 +29361,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -29395,14 +29397,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -29416,7 +29418,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -29428,7 +29430,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -29531,7 +29533,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -29559,7 +29561,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -29598,7 +29600,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -30481,7 +30483,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -30896,7 +30898,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -30922,7 +30924,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -30931,7 +30933,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -30942,7 +30944,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -30958,7 +30960,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -31019,7 +31021,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -31054,7 +31056,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -31114,7 +31116,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -31153,7 +31155,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -31184,7 +31186,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -31525,7 +31527,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -31769,14 +31771,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -31821,7 +31823,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -31870,7 +31872,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -31950,7 +31952,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -32041,7 +32043,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -32061,7 +32063,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -32271,7 +32273,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -32475,7 +32477,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -32540,7 +32542,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -32588,7 +32590,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -32707,7 +32709,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -32867,7 +32869,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -32932,7 +32934,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -32968,7 +32970,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -33012,7 +33014,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -33124,7 +33126,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -33282,7 +33284,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -33339,7 +33341,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -33355,7 +33357,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -33429,7 +33431,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -33441,7 +33443,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -33452,7 +33454,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -33617,7 +33619,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -33896,12 +33898,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -33909,7 +33911,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -33969,7 +33971,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -34058,7 +34060,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -34174,7 +34176,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -34494,7 +34496,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -34505,7 +34507,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -34663,7 +34665,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -34737,7 +34739,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -34800,7 +34802,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -34828,7 +34830,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -35089,7 +35091,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -35281,7 +35283,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -35319,7 +35321,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -35425,7 +35427,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -35450,7 +35452,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -35520,7 +35522,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -35633,7 +35635,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -35693,6 +35695,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -35700,7 +35703,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -35738,13 +35741,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -35775,7 +35778,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -35850,7 +35852,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -35875,7 +35877,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -35976,7 +35978,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -35984,11 +35986,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -36063,7 +36065,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -36132,7 +36134,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -36161,7 +36163,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -36237,7 +36239,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -36251,7 +36253,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -36385,7 +36387,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -38855,7 +38857,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -39504,7 +39506,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -39519,14 +39521,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -41830,7 +41832,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -42381,7 +42383,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -42446,7 +42448,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -42480,7 +42482,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -42530,7 +42532,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -42676,7 +42678,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -42847,7 +42849,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -42873,7 +42875,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -43147,7 +43149,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -43787,7 +43789,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -44307,7 +44309,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -44315,7 +44317,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5357, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -44730,7 +44732,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -44874,7 +44876,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -45318,10 +45320,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -47969,7 +47967,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -49729,7 +49727,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -50507,7 +50505,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5866) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5866) goto abort_due_to_error __770: ; @@ -50617,7 +50615,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -50811,7 +50809,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -52178,7 +52176,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -52698,7 +52696,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -52781,7 +52779,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -56221,14 +56219,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6764 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -56272,7 +56266,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6775, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6764, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -56336,7 +56330,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6792, libc.VaList(bp, pExpr)) + ts+6781, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -56352,7 +56346,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6856, + Xsqlite3ErrorMsg(tls, pParse, ts+6845, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56366,7 +56360,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6892, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6881, uintptr(0), pExpr) } } else { @@ -56389,30 +56383,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6920, libc.VaList(bp+16, pExpr)) + ts+6909, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6963 + zType = ts + 6952 } else { - zType = ts + 6970 + zType = ts + 6959 } - Xsqlite3ErrorMsg(tls, pParse, ts+6980, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6969, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7008, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6997, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7030, + Xsqlite3ErrorMsg(tls, pParse, ts+7019, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7074, + ts+7063, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -56484,15 +56478,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7111, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -56500,7 +56494,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7133, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7122, pExpr, pExpr) } break @@ -56631,7 +56625,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7144, libc.VaList(bp, i, zType, mx)) + ts+7133, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -56651,7 +56645,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7200, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7189, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -56686,7 +56680,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7234, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7223, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -56743,7 +56737,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x2000>>13) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7240, libc.VaList(bp, i+1)) + ts+7229, libc.VaList(bp, i+1)) return 1 } } @@ -56771,7 +56765,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7301, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7290, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -56985,7 +56979,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7332, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7321, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -57025,7 +57019,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -57036,7 +57030,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7371) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7360) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -57048,7 +57042,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7377, 0) + ts+7366, 0) return WRC_Abort } @@ -57912,7 +57906,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7436, libc.VaList(bp, mxHeight)) + ts+7425, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -58161,10 +58155,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7484, + Xsqlite3ErrorMsg(tls, pParse, ts+7473, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7528 + return ts + 7517 } return ts + 1554 }(), nElem)) @@ -58205,7 +58199,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -58231,7 +58225,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7532, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7521, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -58259,7 +58253,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7566, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7555, libc.VaList(bp, pExpr)) } } } @@ -58306,7 +58300,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7586, + Xsqlite3ErrorMsg(tls, pParse, ts+7575, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -58331,7 +58325,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7629, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7618, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -58906,7 +58900,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -59029,7 +59023,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7682, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7671, libc.VaList(bp, zObject)) } } @@ -59085,10 +59079,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6764) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7694) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6769) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7699) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -60162,7 +60156,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = uint8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -60600,6 +60594,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -60613,6 +60608,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -60831,6 +60829,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -60844,6 +60843,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = Xsqlite3ExprAffinity(tls, pExpr) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -61617,7 +61624,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7652, + Xsqlite3ErrorMsg(tls, pParse, ts+7641, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -61639,11 +61646,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -61714,13 +61720,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -61733,15 +61745,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -61751,22 +61763,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -61775,21 +61787,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -61799,27 +61811,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8082, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -61828,7 +61840,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -64496,7 +64508,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10922, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -64513,7 +64525,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -65434,7 +65446,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -69798,6 +69810,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -70962,7 +70980,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3286, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7133, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7122, 10) == 0 { return 0 } return 1 @@ -72208,7 +72226,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14140, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -74258,7 +74276,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -80726,7 +80744,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -82125,7 +82143,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17513 } else { - zType = ts + 7528 + zType = ts + 7517 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17515, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -82286,6 +82304,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -83604,7 +83623,7 @@ goto __217 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __216 __216: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -83620,7 +83639,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __220 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7528, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7517, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __219 __219: i6++ @@ -84425,80 +84444,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __345 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17929) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17965) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__345: + ; label6 = 0 kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; if !(label6 == 0) { - goto __349 + goto __350 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__349: +__350: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 616))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; if !(label6 != 0) { - goto __350 + goto __351 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17903) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17929) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17976) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__350: +__351: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __351 + goto __352 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf0>>4)) != 0) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 616))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 616))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17956) + Xsqlite3VdbeLoadString(tls, v, 3, ts+18003) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__351: +__352: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 632))) @@ -84515,20 +84548,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 612)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __356 + goto __357 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17983) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18030) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__357: +__358: if !(pIdx5 != 0) { - goto __359 + goto __360 } if !(pPk1 == pIdx5) { - goto __360 + goto __361 } - goto __358 -__360: + goto __359 +__361: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 616))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -84537,21 +84570,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __358 -__358: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __357 goto __359 __359: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __358 + goto __360 +__360: ; if !(pPk1 != 0) { - goto __361 + goto __362 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__361: +__362: ; -__356: +__357: ; goto __291 __291: @@ -84569,14 +84602,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __362 + goto __363 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 608)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18012 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18059 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__362: +__363: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -84584,27 +84617,27 @@ __45: if !!(zRight != 0) { - goto __363 + goto __364 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __365 + goto __366 } goto pragma_out -__365: +__366: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __364 -__363: + goto __365 +__364: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __366 + goto __367 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__367: +__368: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __369 + goto __370 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __370 + goto __371 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -84618,25 +84651,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __369 -__370: + goto __370 +__371: ; - goto __368 -__368: - pEnc += 16 - goto __367 goto __369 __369: + pEnc += 16 + goto __368 + goto __370 +__370: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __371 + goto __372 } - Xsqlite3ErrorMsg(tls, pParse, ts+18015, libc.VaList(bp+456, zRight)) -__371: + Xsqlite3ErrorMsg(tls, pParse, ts+18062, libc.VaList(bp+456, zRight)) +__372: ; -__366: +__367: ; -__364: +__365: ; goto __15 @@ -84644,15 +84677,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __372 + goto __373 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __374 + goto __375 } goto __15 -__374: +__375: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -84660,41 +84693,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __375 + goto __376 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__375: +__376: ; - goto __373 -__372: + goto __374 +__373: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __376 + goto __377 } goto __15 -__376: +__377: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__373: +__374: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__377: +__378: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __378 + goto __379 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __377 -__378: + goto __378 +__379: ; Xsqlite3VdbeReusable(tls, v) @@ -84709,31 +84742,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __379 + goto __380 } if !(Xsqlite3StrICmp(tls, zRight, ts+17345) == 0) { - goto __380 + goto __381 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __381 -__380: - if !(Xsqlite3StrICmp(tls, zRight, ts+18040) == 0) { - goto __382 + goto __382 +__381: + if !(Xsqlite3StrICmp(tls, zRight, ts+18087) == 0) { + goto __383 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __383 -__382: + goto __384 +__383: if !(Xsqlite3StrICmp(tls, zRight, ts+17498) == 0) { - goto __384 + goto __385 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__384: +__385: ; -__383: +__384: ; -__381: +__382: ; -__379: +__380: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -84743,10 +84776,10 @@ __49: if !(zRight != 0) { - goto __385 + goto __386 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__385: +__386: ; returnSingleInt(tls, v, func() int64 { @@ -84766,19 +84799,19 @@ __51: if !(zRight != 0) { - goto __386 + goto __387 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __388 + goto __389 } goto __15 -__388: +__389: ; - goto __387 -__386: - opMask = U32(0xfffe) + goto __388 __387: + opMask = U32(0xfffe) +__388: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -84787,86 +84820,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__389: +__390: if !(iDb <= iDbLast) { - goto __391 + goto __392 } if !(iDb == 1) { - goto __392 + goto __393 } - goto __390 -__392: + goto __391 +__393: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__393: +__394: if !(k4 != 0) { - goto __395 + goto __396 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __396 + goto __397 } - goto __394 -__396: + goto __395 +__397: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__397: +__398: if !(pIdx6 != 0) { - goto __399 + goto __400 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x100>>8) != 0) { - goto __400 + goto __401 } szThreshold = int16(0) - goto __399 -__400: + goto __400 +__401: ; - goto __398 -__398: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __397 goto __399 __399: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __398 + goto __400 +__400: ; if !(szThreshold != 0) { - goto __401 + goto __402 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__401: +__402: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18048, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18095, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __402 + goto __403 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __403 -__402: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __404 __403: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__404: ; - goto __394 -__394: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __393 goto __395 __395: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __394 + goto __396 +__396: ; - goto __390 -__390: - iDb++ - goto __389 goto __391 __391: + iDb++ + goto __390 + goto __392 +__392: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -84874,36 +84907,36 @@ __52: ; if !(zRight != 0) { - goto __404 + goto __405 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__404: +__405: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __405 + goto __406 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__405: +__406: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __406 + goto __407 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)))) { - goto __407 + goto __408 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__407: +__408: ; -__406: +__407: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -84912,10 +84945,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __408 + goto __409 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656))&int64(0x7fffffff))) -__408: +__409: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -84924,10 +84957,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __409 + goto __410 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) & int64(0x7fffffff)) -__409: +__410: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -84935,10 +84968,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __410 + goto __411 } -__410: +__411: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -84990,14 +85023,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18066, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18071, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18077, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18086, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18103, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18111}, - {FzName: ts + 18118}, + {FzName: ts + 18113, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18118, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18124, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18133, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18150, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18158}, + {FzName: ts + 18165}, {}, } var setCookie = [2]VdbeOpList{ @@ -85049,7 +85082,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]uint8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18124) + Xsqlite3_str_appendall(tls, bp+32, ts+18171) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -85057,7 +85090,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18139, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18186, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = uint8(',') } @@ -85070,16 +85103,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18146, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18193, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18152) + Xsqlite3_str_appendall(tls, bp+32, ts+18199) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18164) + Xsqlite3_str_appendall(tls, bp+32, ts+18211) j++ } Xsqlite3_str_append(tls, bp+32, ts+4957, 1) @@ -85262,13 +85295,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18179) + Xsqlite3_str_appendall(tls, bp+32, ts+18226) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18187, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18234, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18191, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18238, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -85345,12 +85378,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18195, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18242, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -85359,19 +85392,19 @@ } else { zObj = ts + 5008 } - z = Xsqlite3MPrintf(tls, db, ts+18223, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18270, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*uint8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18301, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18262, - ts + 18269, - ts + 18281, + ts + 18309, + ts + 18316, + ts + 18328, } // Check to see if any sibling index (another index on the same table) @@ -85463,7 +85496,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18292) + corruptSchema(tls, pData, argv, ts+18339) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -85511,7 +85544,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7938 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18305 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18352 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -85640,7 +85673,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18377) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18424) rc = SQLITE_ERROR goto initone_error_out __19: @@ -85654,7 +85687,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18401, + ts+18448, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -85986,7 +86019,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18435, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18482, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -86016,7 +86049,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18465, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18512, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -86112,7 +86145,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -86211,7 +86244,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -86543,13 +86576,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18484, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18531, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18514)) +var zKeyText = *(*[34]uint8)(unsafe.Pointer(ts + 18561)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -86724,7 +86757,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x20>>5)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18548, libc.VaList(bp, 0)) + ts+18595, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -86769,7 +86802,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x8>>3)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18598, libc.VaList(bp+8, zName)) + ts+18645, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -86780,7 +86813,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x8>>3)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x20>>5) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18662, + Xsqlite3ErrorMsg(tls, pParse, ts+18709, libc.VaList(bp+16, zName)) break } @@ -87408,16 +87441,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18699 + z = ts + 18746 break case TK_INTERSECT: - z = ts + 18709 + z = ts + 18756 break case TK_EXCEPT: - z = ts + 18719 + z = ts + 18766 break default: - z = ts + 18726 + z = ts + 18773 break } return z @@ -87427,7 +87460,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18732, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18779, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -87453,9 +87486,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18755, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18802, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18786 + return ts + 18833 } return ts + 1554 }())) @@ -87799,7 +87832,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -87899,7 +87932,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18801, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18848, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -87915,7 +87948,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18810, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18857, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -87998,8 +88031,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -88014,12 +88045,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = uint8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18818 + zType = ts + 18865 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -88235,7 +88269,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18869, 0) return __1: ; @@ -88326,7 +88360,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18871, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18918, 0) goto end_of_recursive_query __15: ; @@ -88346,7 +88380,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18913, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18960, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -88383,7 +88417,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18919, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18966, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -88417,11 +88451,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18934, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18981, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1554 } - return ts + 18957 + return ts + 19004 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -88522,8 +88556,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18959, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18974, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19006, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19021, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -88570,7 +88604,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18699, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18746, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -88637,7 +88671,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -88699,7 +88733,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18993, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19040, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -88852,10 +88886,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19014, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19061, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19060, + ts+19107, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -89109,8 +89143,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7234) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7234) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7223) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7223) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -89137,13 +89171,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19153, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19200, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -89155,7 +89189,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19158, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19205, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -89343,7 +89377,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -90242,7 +90277,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19164, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19211, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -90325,7 +90360,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2000>>13)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19182, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19229, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -90454,7 +90489,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4000>>14)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19205, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19252, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 7, 0x80) @@ -90477,7 +90512,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 9, 0x200) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19225, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19272, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -90493,7 +90528,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19268 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19315 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -90519,7 +90554,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19291, + Xsqlite3ErrorMsg(tls, pParse, ts+19338, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -90530,9 +90565,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19329 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19376 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19363 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19410 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -90579,7 +90614,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19401, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19448, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -90691,7 +90726,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19405, + Xsqlite3ErrorMsg(tls, pParse, ts+19452, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -90710,7 +90745,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19444, + Xsqlite3ErrorMsg(tls, pParse, ts+19491, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -90834,7 +90869,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19475, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19522, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 14, 0xc000) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 8, 0x100) } @@ -90899,7 +90934,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19480, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19527, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -90930,9 +90965,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19489, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19536, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19507, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19554, 0) } } } @@ -90942,7 +90977,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19527, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19574, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -91080,7 +91115,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -91164,13 +91199,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19558, 0) + ts+19605, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19609, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19656, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -91359,11 +91394,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0xc000>>14) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19642, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19689, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19654 + return ts + 19701 } return ts + 1554 }(), @@ -91691,7 +91726,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19677, + ts+19724, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -91752,7 +91787,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19731, + Xsqlite3ErrorMsg(tls, pParse, ts+19778, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -91894,7 +91929,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19771, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19818, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 10, 0x400) @@ -91953,7 +91988,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19786, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19833, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -92424,9 +92459,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19802 + return ts + 19849 } - return ts + 19811 + return ts + 19858 }()) groupBySort = 1 @@ -92777,7 +92812,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19802) + explainTempTable(tls, pParse, ts+19849) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -92882,7 +92917,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19820, 0) + ts+19867, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -93115,7 +93150,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19885, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19932, 0) goto trigger_cleanup __3: ; @@ -93159,7 +93194,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19931, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19978, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -93177,7 +93212,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19939, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19986, 0) goto trigger_orphan_error __11: ; @@ -93189,7 +93224,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19931, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19978, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -93204,11 +93239,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19980, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20027, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -93219,19 +93255,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6381, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+20006, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20053, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20044, + Xsqlite3ErrorMsg(tls, pParse, ts+20091, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20081 + return ts + 20128 } - return ts + 20088 + return ts + 20135 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -93240,7 +93276,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20094, libc.VaList(bp+24, pTableName+8)) + ts+20141, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -93389,7 +93425,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19931, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19978, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -93422,7 +93458,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20140, + ts+20187, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -93447,13 +93483,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20188, + ts+20235, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20263, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20310, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -93709,7 +93745,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20292, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20339, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -93762,7 +93798,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20312, + ts+20359, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -93876,12 +93912,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20374, + ts+20421, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20422 + return ts + 20469 } - return ts + 20429 + return ts + 20476 }())) __15: ; @@ -93995,7 +94031,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20436, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20483, 0) return 1 } @@ -94061,7 +94097,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -94225,7 +94261,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20478, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20525, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -94818,7 +94854,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20492, + ts+20539, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -94850,7 +94886,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20528, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20575, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -95176,7 +95212,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -95730,7 +95771,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20547) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20594) __169: ; update_cleanup: @@ -96036,10 +96077,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*uint8)(unsafe.Pointer(bp + 216)) = uint8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20560, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]uint8{})), bp+216, ts+20607, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20564, libc.VaList(bp+8, bp+216)) + ts+20611, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -96162,7 +96203,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20637, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20641, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20684, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20688, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -96310,14 +96351,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20645) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20692) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20685) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20732) return SQLITE_ERROR __2: ; @@ -96328,7 +96369,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20728) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20775) return SQLITE_ERROR __5: ; @@ -96356,7 +96397,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20746, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20793, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -96376,7 +96417,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20769) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20816) goto end_of_vacuum __8: ; @@ -96436,7 +96477,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20796, + ts+20843, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -96445,7 +96486,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20904, + ts+20951, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -96456,7 +96497,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20958, + ts+21005, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -96467,7 +96508,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21109, + ts+21156, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -96896,11 +96937,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21239, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21286, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21263, + ts+21310, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -96910,7 +96951,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21362, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21409, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -96971,7 +97012,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21381, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21428, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -96999,9 +97040,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -97009,7 +97052,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21423, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21470, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -97021,7 +97064,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21453 + var zFormat uintptr = ts + 21500 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -97095,7 +97138,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21499, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21546, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -97153,7 +97196,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21499, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21546, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -97187,7 +97230,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -97640,7 +97683,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -97667,7 +97710,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -97898,7 +97941,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21518 + return ts + 21565 } if i == -1 { return ts + 16267 @@ -97910,11 +97953,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97929,7 +97972,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21531, 1) + Xsqlite3_str_append(tls, pStr, ts+21578, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -97955,27 +97998,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21533, 2) + Xsqlite3_str_append(tls, pStr, ts+21580, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21525, 5) + Xsqlite3_str_append(tls, pStr, ts+21572, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21536 + return ts + 21583 } - return ts + 21541 + return ts + 21588 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21549) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21596) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21551) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21598) } Xsqlite3_str_append(tls, pStr, ts+4957, 1) } @@ -98018,11 +98061,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21553, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21600, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21559 + return ts + 21606 } - return ts + 21566 + return ts + 21613 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -98035,40 +98078,40 @@ zFmt = ts + 10976 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21571 + zFmt = ts + 21618 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21604 + zFmt = ts + 21651 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21629 + zFmt = ts + 21676 } else { - zFmt = ts + 21647 + zFmt = ts + 21694 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21656, 7) + Xsqlite3_str_append(tls, bp+64, ts+21703, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp uint8 var zRowid uintptr = ts + 16267 - Xsqlite3_str_appendf(tls, bp+64, ts+21664, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21711, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = uint8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21742, libc.VaList(bp+32, zRowid)) cRangeOp = uint8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = uint8('>') } else { cRangeOp = uint8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21705, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21752, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21710, + Xsqlite3_str_appendf(tls, bp+64, ts+21757, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21737, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21784, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -98100,22 +98143,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]uint8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21748, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21795, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21769, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21816, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21525, 5) + Xsqlite3_str_append(tls, bp+24, ts+21572, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21536, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21583, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4957, 1) @@ -99712,7 +99755,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21777, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21824, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -99740,7 +99783,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21792, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21839, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -100258,7 +100301,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21801, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21848, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -100619,7 +100662,7 @@ {FzOp: ts + 16116, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15447, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14967, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21815, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21862, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -101109,12 +101152,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21822, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21869, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -101193,7 +101236,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6769 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7699 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -101287,7 +101330,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21863 + return ts + 21910 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -101663,7 +101706,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21870, + Xsqlite3ErrorMsg(tls, pParse, ts+21917, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -101679,7 +101722,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -102397,7 +102440,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21906, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21953, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -102468,7 +102511,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21932 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21979 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -102642,6 +102685,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -102685,9 +102732,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -102721,6 +102766,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -102979,11 +103025,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0xc000>>14) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -104564,7 +104615,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -104622,7 +104673,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21943, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21990, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -105020,7 +105071,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21969, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22016, 0) rc = SQLITE_OK } else { goto __3 @@ -105627,7 +105678,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22004, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22051, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -105662,6 +105713,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -105956,6 +106011,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = *(*uint8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -106108,7 +106166,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22022, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22069, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -106172,7 +106230,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22050, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22097, 0) goto __5 __4: ii = 0 @@ -107054,7 +107112,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22068, -1) + pCtx, ts+22115, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -107187,7 +107245,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22124, -1) + pCtx, ts+22171, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -107277,17 +107335,17 @@ } } -var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22169)) -var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22180)) -var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22191)) -var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22196)) -var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22209)) -var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22219)) -var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22225)) -var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22236)) -var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22246)) -var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22258)) -var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22263)) +var row_numberName = *(*[11]uint8)(unsafe.Pointer(ts + 22216)) +var dense_rankName = *(*[11]uint8)(unsafe.Pointer(ts + 22227)) +var rankName = *(*[5]uint8)(unsafe.Pointer(ts + 22238)) +var percent_rankName = *(*[13]uint8)(unsafe.Pointer(ts + 22243)) +var cume_distName = *(*[10]uint8)(unsafe.Pointer(ts + 22256)) +var ntileName = *(*[6]uint8)(unsafe.Pointer(ts + 22266)) +var last_valueName = *(*[11]uint8)(unsafe.Pointer(ts + 22272)) +var nth_valueName = *(*[10]uint8)(unsafe.Pointer(ts + 22283)) +var first_valueName = *(*[12]uint8)(unsafe.Pointer(ts + 22293)) +var leadName = *(*[5]uint8)(unsafe.Pointer(ts + 22305)) +var lagName = *(*[4]uint8)(unsafe.Pointer(ts + 22310)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -107333,7 +107391,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22267, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22314, libc.VaList(bp, zName)) } return p } @@ -107377,12 +107435,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22286, 0) + ts+22333, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22357, 0) + ts+22404, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -107611,7 +107669,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22420, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22467, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -107727,7 +107785,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7530)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7519)) } pSub = Xsqlite3SelectNew(tls, @@ -107842,7 +107900,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22446, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22493, 0) goto windowAllocErr __2: ; @@ -107907,15 +107965,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22478 + zErr = ts + 22525 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22495 + zErr = ts + 22542 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22511 + zErr = ts + 22558 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22531, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22578, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -107936,7 +107994,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22564, 0) + ts+22611, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -108092,11 +108150,11 @@ } var azErr = [5]uintptr{ - ts + 22611, - ts + 22664, - ts + 22068, - ts + 22715, - ts + 22767, + ts + 22658, + ts + 22711, + ts + 22115, + ts + 22762, + ts + 22814, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -109491,19 +109549,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22817, + Xsqlite3ErrorMsg(tls, pParse, ts+22864, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22859 + return ts + 22906 } - return ts + 22868 + return ts + 22915 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22874, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22921, 0) } } @@ -109571,7 +109629,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22908, + Xsqlite3ErrorMsg(tls, pParse, ts+22955, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -110668,7 +110726,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22946, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22993, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -111647,7 +111705,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -111657,7 +111715,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22968, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+23015, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -112400,7 +112458,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22995) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23042) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -112564,7 +112622,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -112781,9 +112839,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6764 + return ts + 7694 } - return ts + 6769 + return ts + 7699 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -113067,19 +113125,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23028, 0) + ts+23075, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23123, 0) + ts+23170, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23207, 0) + ts+23254, 0) } break case uint32(273): @@ -113458,9 +113516,9 @@ _ = yymajor if *(*uint8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23004, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23051, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23292, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23339, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -114228,7 +114286,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23309, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23356, libc.VaList(bp, bp+2464)) break } } @@ -114251,7 +114309,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3663, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23334, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23381, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -114424,7 +114482,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23345, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23392, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -114437,11 +114495,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19931, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19978, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23352, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23399, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23357, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23404, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -114454,9 +114512,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23367, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23414, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23371, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23418, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -114695,7 +114753,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -115270,7 +115328,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -115285,7 +115343,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23379, 0) + ts+23426, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -115476,23 +115534,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23447 + var zErr uintptr = ts + 23494 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23461 + zErr = ts + 23508 break } case SQLITE_ROW: { - zErr = ts + 23483 + zErr = ts + 23530 break } case SQLITE_DONE: { - zErr = ts + 23505 + zErr = ts + 23552 break } @@ -115510,35 +115568,35 @@ } var aMsg = [29]uintptr{ - ts + 23528, - ts + 23541, + ts + 23575, + ts + 23588, uintptr(0), - ts + 23557, - ts + 23582, - ts + 23596, - ts + 23615, + ts + 23604, + ts + 23629, + ts + 23643, + ts + 23662, ts + 1490, - ts + 23640, - ts + 23677, - ts + 23689, - ts + 23704, - ts + 23737, - ts + 23755, - ts + 23780, - ts + 23809, + ts + 23687, + ts + 23724, + ts + 23736, + ts + 23751, + ts + 23784, + ts + 23802, + ts + 23827, + ts + 23856, uintptr(0), ts + 5838, ts + 5334, - ts + 23826, - ts + 23844, - ts + 23862, - uintptr(0), - ts + 23896, + ts + 23873, + ts + 23891, + ts + 23909, uintptr(0), - ts + 23917, ts + 23943, - ts + 23966, - ts + 23987, + uintptr(0), + ts + 23964, + ts + 23990, + ts + 24013, + ts + 24034, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -115659,7 +115717,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -115709,7 +115767,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24003, 0) + ts+24050, 0) return SQLITE_BUSY } else { @@ -115831,7 +115889,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24066, libc.VaList(bp, zName)) + ts+24113, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -116067,7 +116125,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24117, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24164, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -116160,7 +116218,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -116230,7 +116288,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116240,7 +116298,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -116277,14 +116335,14 @@ }() } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24138, 0) + ts+24185, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -116414,7 +116472,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24206, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24253, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -116459,10 +116517,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24212, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24259, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24222, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -116567,7 +116625,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24250, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24297, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -116578,17 +116636,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24254, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24301, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24254 + zModeType = ts + 24301 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24260, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24307, zOpt, uint64(4)) == 0) { goto __32 } @@ -116626,7 +116684,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24265, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24312, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -116634,7 +116692,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24285, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24332, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -116674,7 +116732,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24309, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24356, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -116698,14 +116756,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24325, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24332, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24372, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24379, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24340, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24343, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24346, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24387, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24390, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24393, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17362, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -116852,10 +116910,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21863, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21910, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24350, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24397, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -116869,7 +116927,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -116922,7 +116980,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6441 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23352 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23399 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -117027,7 +117085,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24356 + zFilename = ts + 24403 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, func() uint8 { @@ -117145,21 +117203,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24359, + Xsqlite3_log(tls, iErr, ts+24406, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24384) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24431) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24404) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24451) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24411) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24458) } // This is a convenience routine that makes sure that all thread-specific @@ -117317,7 +117375,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24428, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24475, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -117973,7 +118031,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24456, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24503, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -118091,7 +118149,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24464 + return ts + 24511 } return uintptr(0) }(), 0) @@ -118269,7 +118327,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6181, ts + 6764, ts + 6769, ts + 6191, ts + 6186, ts + 8005, ts + 24487, ts + 24493, + ts + 6181, ts + 7694, ts + 7699, ts + 6191, ts + 6186, ts + 8005, ts + 24534, ts + 24540, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -118422,7 +118480,7 @@ *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0') *(*uint8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = uint8('0' + int32(c)>>4) - c = *(*uint8)(unsafe.Pointer(ts + 24500 + uintptr(int32(c)&0xf))) + c = *(*uint8)(unsafe.Pointer(ts + 24547 + uintptr(int32(c)&0xf))) __8: ; __6: @@ -118477,7 +118535,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24517, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24564, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -118541,13 +118599,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6764, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7694, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6769, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7699, uint32(5)) break } @@ -119097,12 +119155,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6764, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7694, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(4))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6769, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7699, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(z + uintptr(i+U32(5))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -119203,7 +119261,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24546, -1) + Xsqlite3_result_error(tls, pCtx, ts+24593, -1) } } jsonParseReset(tls, pParse) @@ -119509,7 +119567,7 @@ } if int32(*(*uint8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24561, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24608, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -119524,7 +119582,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24565, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24612, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -119578,7 +119636,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24591, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24638, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -119683,11 +119741,11 @@ if int32(*(*uint8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[*(*uint8)(unsafe.Pointer(zPath))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24634, uint32(2)) + jsonAppendRaw(tls, bp, ts+24681, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4998, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24637, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24684, uint32(1+libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, uint8(0)) } @@ -119844,14 +119902,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24640, -1) + ts+24687, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, uint8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24691, -1) + Xsqlite3_result_error(tls, ctx, ts+24738, -1) jsonReset(tls, bp) return } @@ -120021,9 +120079,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24725 + return ts + 24772 } - return ts + 24729 + return ts + 24776 }()) return __2: @@ -120156,7 +120214,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24736, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24783, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120253,7 +120311,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24739, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24786, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -120297,7 +120355,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24742) + ts+24789) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -120428,7 +120486,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24825, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24872, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -120447,7 +120505,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24831, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24878, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -120543,7 +120601,7 @@ jsonAppendChar(tls, bp+8, uint8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24831, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24878, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -120567,7 +120625,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24836 + zRoot = ts + 24883 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -120689,7 +120747,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24546, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24593, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -120784,25 +120842,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24843}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24854}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24885}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24892}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24904}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24916}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24927}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24938}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24950}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24963}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24972}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24982}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24993}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25010}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24890}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24901}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24932}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24939}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24951}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24963}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24974}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24985}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24997}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 25010}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25019}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25029}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25040}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25057}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -120821,8 +120879,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25028, FpModule: 0}, - {FzName: ts + 25038, FpModule: 0}, + {FzName: ts + 25075, FpModule: 0}, + {FzName: ts + 25085, FpModule: 0}, } type Rtree1 = struct { @@ -121082,11 +121140,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25048, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25095, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25056, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25103, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -121297,7 +121355,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25061, + ts+25108, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -121995,7 +122053,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25143) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25190) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -123336,7 +123394,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25157, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25204, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -123348,12 +123406,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25177, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25224, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25209, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25256, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -123579,7 +123637,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25246, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25293, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -123602,7 +123660,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25391 + var zFmt uintptr = ts + 25438 var zSql uintptr var rc int32 @@ -123650,7 +123708,7 @@ } var azName1 = [3]uintptr{ - ts + 25447, ts + 5060, ts + 16267, + ts + 25494, ts + 5060, ts + 16267, } var rtreeModule = Sqlite3_module{ @@ -123693,19 +123751,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25452, + ts+25499, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25514, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25561, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25519, + ts+25566, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25583, + ts+25630, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25653, + ts+25700, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -123734,7 +123792,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25702 + zFormat = ts + 25749 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -123746,7 +123804,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25810, + ts+25857, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123754,18 +123812,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25855, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25902, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12767, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25882, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25929, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25904, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25951, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25912, 0) + Xsqlite3_str_appendf(tls, p, ts+25959, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -123780,14 +123838,14 @@ } var azSql = [8]uintptr{ - ts + 25928, - ts + 25981, - ts + 26026, - ts + 26078, - ts + 26132, - ts + 26177, - ts + 26235, - ts + 26290, + ts + 25975, + ts + 26028, + ts + 26073, + ts + 26125, + ts + 26179, + ts + 26224, + ts + 26282, + ts + 26337, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -123816,7 +123874,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26337, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26384, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -123828,7 +123886,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26357, + ts+26404, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -123836,7 +123894,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26414, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26461, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -123878,10 +123936,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26449, - ts + 26492, - ts + 26527, - ts + 26563, + ts + 26496, + ts + 26539, + ts + 26574, + ts + 26610, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -123912,7 +123970,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26600, + Xsqlite3_str_appendf(tls, pSql, ts+26647, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -123924,7 +123982,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26624, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26671, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -123947,7 +124005,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -124043,7 +124101,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26633, ts + 26644} +var azFormat = [2]uintptr{ts + 26680, ts + 26691} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -124083,11 +124141,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10920, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26654, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26701, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26660, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26707, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26664, 1) + Xsqlite3_str_append(tls, pOut, ts+26711, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -124098,7 +124156,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26666, -1) + Xsqlite3_result_error(tls, ctx, ts+26713, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -124176,7 +124234,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26699, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26746, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4062 @@ -124200,7 +124258,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26706, + ts+26753, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -124219,7 +124277,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26751, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26798, libc.VaList(bp+16, iNode)) } } @@ -124233,8 +124291,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26783, - ts + 26837, + ts + 26830, + ts + 26884, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -124249,23 +124307,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26885, + rtreeCheckAppendMsg(tls, pCheck, ts+26932, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26947, + ts+26994, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26930 + return ts + 26977 } - return ts + 26938 + return ts + 26985 }(), iKey, iVal)) } } @@ -124289,7 +124347,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27005, libc.VaList(bp, i, iCell, iNode)) + ts+27052, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -124309,7 +124367,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27053, libc.VaList(bp+24, i, iCell, iNode)) + ts+27100, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -124326,14 +124384,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27120, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27167, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27154, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27201, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -124341,7 +124399,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27184, + ts+27231, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -124370,14 +124428,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27239, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27286, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27270, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27317, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -124404,7 +124462,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27337, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27384, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -124413,12 +124471,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25157, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25204, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27365, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27412, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -124432,8 +124490,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27396, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27403, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27443, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27450, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -124441,7 +124499,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27411, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27458, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -124456,7 +124514,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27415, -1) + ts+27462, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -124474,7 +124532,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 18012 + return ts + 18059 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -124845,11 +124903,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27466, 1) + Xsqlite3_str_append(tls, x, ts+27513, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27468, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27515, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27479, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27526, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124869,19 +124927,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep uint8 = uint8('\'') - Xsqlite3_str_appendf(tls, x, ts+27490, 0) + Xsqlite3_str_appendf(tls, x, ts+27537, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = uint8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27516, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27563, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*uint8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27524, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27571, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27528, 0) + Xsqlite3_str_appendf(tls, x, ts+27575, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -125801,7 +125859,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27541, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27588, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -125810,7 +125868,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27563, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27610, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -125818,7 +125876,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26630, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26677, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -126055,7 +126113,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27567 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27614 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -126063,7 +126121,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27573 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27620 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -126175,7 +126233,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27582, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27629, 0) __4: ; goto geopoly_update_end @@ -126307,14 +126365,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27638) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27685) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -126379,7 +126437,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27653, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27700, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -126391,25 +126449,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27661}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27674}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27687}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27700}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27638}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27712}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27735}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27749}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27762}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27776}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27792}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27708}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27721}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27734}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27747}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27685}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27759}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27782}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27796}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27809}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27823}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27839}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27804}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27851}, } // Register the r-tree module with database handle db. This creates the @@ -126419,26 +126477,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27823, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27870, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27833, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27880, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27844, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27891, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27567, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27614, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27855, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27902, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -126492,7 +126550,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25143, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25190, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -126819,7 +126877,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) return } @@ -126830,7 +126888,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27865, -1) + Xsqlite3_result_error(tls, context, ts+27912, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -126931,7 +126989,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27886, uintptr(0), uintptr(0), p+64) + ts+27933, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -126995,7 +127053,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25056, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25103, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*uint8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -127016,16 +127074,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28057, libc.VaList(bp, func() uintptr { + ts+28104, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28207 + return ts + 28254 } return ts + 1554 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28248) + ts+28295) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -127141,7 +127199,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28373, libc.VaList(bp, zTab))) + ts+28420, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -127159,7 +127217,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28492, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28539, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -127177,7 +127235,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28513, libc.VaList(bp+16, zIdx))) + ts+28560, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -127200,7 +127258,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28564, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28611, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -127246,7 +127304,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -127261,7 +127319,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -127301,7 +127359,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19489, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19536, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -127311,18 +127369,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28642, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28689, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28661, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28708, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28666, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28713, zName) { bRbuRowid = 1 } } @@ -127334,18 +127392,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28676, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28723, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28705 + return ts + 28752 } - return ts + 28718 + return ts + 28765 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28727, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28774, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -127359,7 +127417,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28749, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28796, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -127406,7 +127464,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28776, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28823, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14614 } return zList @@ -127424,7 +127482,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28785, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28832, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -127446,25 +127504,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28798, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28845, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28830, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28877, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28853) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28859, ts+28866, ts+4957) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+28900) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28906, ts+28913, ts+4957) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28874, + ts+28921, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28916, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28963, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -127506,7 +127564,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -127541,7 +127599,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28936 + zCol = ts + 28983 __7: ; goto __5 @@ -127549,11 +127607,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28944, + zLhs = rbuMPrintf(tls, p, ts+28991, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28965, + zOrder = rbuMPrintf(tls, p, ts+29012, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+29001, + zSelect = rbuMPrintf(tls, p, ts+29048, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14614 iCol++ @@ -127573,7 +127631,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29028, + Xsqlite3_mprintf(tls, ts+29075, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -127600,7 +127658,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29076, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29123, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14614 goto __15 __15: @@ -127612,7 +127670,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29083, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29130, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -127645,7 +127703,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -127657,7 +127715,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29095, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29142, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1554 } else { @@ -127669,37 +127727,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28936 + zCol = ts + 28983 } else { - zCol = ts + 28666 + zCol = ts + 28713 } zType = ts + 1119 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29117, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29164, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29137, + zImpPK = Xsqlite3_mprintf(tls, ts+29184, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29158, + zImpCols = Xsqlite3_mprintf(tls, ts+29205, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29191, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29238, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14614 - zAnd = ts + 21525 + zAnd = ts + 21572 nBind++ } @@ -127738,9 +127796,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29215, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29262, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29227, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29274, libc.VaList(bp+32, zList, zS)) } zS = ts + 14614 if zList == uintptr(0) { @@ -127750,7 +127808,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29236, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29283, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -127762,18 +127820,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29251, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29298, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1554 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29265, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29312, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21572 } } zList = rbuMPrintf(tls, p, - ts+29277, libc.VaList(bp+40, zList)) + ts+29324, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1554 @@ -127781,8 +127839,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29327, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21525 + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21572 } } } @@ -127791,7 +127849,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29340, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29387, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -127809,15 +127867,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c uint8 = *(*uint8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29327, + zList = rbuMPrintf(tls, p, ts+29374, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29366, + zList = rbuMPrintf(tls, p, ts+29413, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29396, + zList = rbuMPrintf(tls, p, ts+29443, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14614 } @@ -127854,19 +127912,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29433 + var zSep uintptr = ts + 29480 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28585, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28632, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16155) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp+8, zIdx))) } break } @@ -127878,15 +127936,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28853 + zDesc = ts + 28900 } else { zDesc = ts + 1554 } - z = rbuMPrintf(tls, p, ts+29446, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29493, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14614 } } - z = rbuMPrintf(tls, p, ts+29457, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29504, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -127906,7 +127964,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29461) + ts+29508) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -127915,7 +127973,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28613, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28660, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -127925,23 +127983,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29511, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29558, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29533, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29580, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28853 + return ts + 28900 } return ts + 1554 }())) zComma = ts + 14614 } } - zCols = rbuMPrintf(tls, p, ts+29543, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29590, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29558, + ts+29605, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) } @@ -127967,13 +128025,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29620 + zPk = ts + 29667 } - zSql = rbuMPrintf(tls, p, ts+29633, + zSql = rbuMPrintf(tls, p, ts+29680, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29660 + return ts + 29707 } return ts + 1554 }())) @@ -127983,16 +128041,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29670, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29717, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29677, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29724, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) @@ -128009,7 +128067,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29724, + ts+29771, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -128046,7 +128104,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29781) + ts+29828) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -128151,7 +128209,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29847, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29894, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -128174,7 +128232,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29867, + ts+29914, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, 0, 0)) @@ -128182,13 +128240,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29932, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29979, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29968, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+30015, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -128204,7 +128262,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+30002, + ts+30049, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -128212,9 +128270,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }() } return ts + 1554 @@ -128223,20 +128281,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30073, + ts+30120, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30134, + ts+30181, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30063 + return ts + 30110 } - return ts + 30067 + return ts + 30114 }(), zCollist, zLimit)) } @@ -128273,16 +128331,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1554 } - return ts + 30293 + return ts + 30340 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30302, + ts+30349, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30338 + return ts + 30385 } return ts + 1554 }(), zBindings))) @@ -128291,32 +128349,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30348, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30395, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30376 + zRbuRowid = ts + 30423 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30388, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30435, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30464 + return ts + 30511 } return ts + 1554 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30481, + ts+30528, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30780, + ts+30827, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -128329,9 +128387,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30879 + zRbuRowid = ts + 30926 } else { - zRbuRowid = ts + 30889 + zRbuRowid = ts + 30936 } } @@ -128344,7 +128402,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28936, 0) + zOrder = rbuMPrintf(tls, p, ts+28983, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1554, ts+14614, ts+1554) } @@ -128353,11 +128411,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30900, + ts+30947, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30948 + return ts + 30995 } return ts + 1554 }(), @@ -128370,7 +128428,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22859 + return ts + 22906 } return ts + 1554 }(), zOrder, @@ -128438,9 +128496,9 @@ var zPrefix uintptr = ts + 1554 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30293 + zPrefix = ts + 30340 } - zUpdate = Xsqlite3_mprintf(tls, ts+30954, + zUpdate = Xsqlite3_mprintf(tls, ts+31001, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -128499,7 +128557,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30984, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31031, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -128572,18 +128630,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31014, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31061, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31042, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31089, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3286, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6441, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31060, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31107, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -128623,11 +128681,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31126, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31173, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24206, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24253, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*uint8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*uint8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -128639,13 +128697,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31158, + zTarget = Xsqlite3_mprintf(tls, ts+31205, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 } - return ts + 31190 + return ts + 31237 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1554 @@ -128664,21 +128722,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31192, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31239, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31207, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31254, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31224, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31271, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -128686,7 +128744,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_RBU, p) @@ -128694,7 +128752,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31268, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31315, 0) } } @@ -128723,14 +128781,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31240, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31287, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31286, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31333, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128856,7 +128914,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31321, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31368, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -128871,8 +128929,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6441) } - zOal = Xsqlite3_mprintf(tls, ts+31346, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31353, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31393, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31400, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -128989,7 +129047,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23844, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23891, 0) return } @@ -129082,7 +129140,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31360) + ts+31407) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -129090,7 +129148,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31382, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31429, libc.VaList(bp, iCookie+1)) } } } @@ -129111,7 +129169,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31409, + ts+31456, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -129141,9 +129199,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31567, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31614, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31582, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31629, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -129157,10 +129215,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31602, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31649, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31627) + ts+31674) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129174,12 +129232,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31735) + ts+31782) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31800) + ts+31847) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -129191,7 +129249,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31844, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31891, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -129219,7 +129277,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31869, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31916, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -129341,7 +129399,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31897, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31944, 0) } if rc == SQLITE_OK { @@ -129357,7 +129415,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31346, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31393, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6441, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -129374,7 +129432,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31922, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]uint8{})), bp+12, ts+31969, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -129408,7 +129466,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31933, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31980, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -129438,13 +129496,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32005, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32052, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32019) + ts+32066) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -129455,7 +129513,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32076) + ts+32123) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -129529,7 +129587,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32150, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32197, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -129547,12 +129605,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32182, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32229, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32214 + return ts + 32261 } - return ts + 32221 + return ts + 32268 }())) } } @@ -129576,14 +129634,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32228, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32275, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6441, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32244, uintptr(0), uintptr(0), p+64) + db, ts+32291, uintptr(0), uintptr(0), p+64) } } @@ -129637,7 +129695,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32268, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32315, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -129664,7 +129722,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30293, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30340, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*uint8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -129700,7 +129758,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32276, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32323, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -129819,12 +129877,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14503 } else { - zBegin = ts + 32228 + zBegin = ts + 32275 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32228, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32275, uintptr(0), uintptr(0), uintptr(0)) } } @@ -130170,7 +130228,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32303, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32350, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -130195,7 +130253,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32326, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32373, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -130355,7 +130413,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32337, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32384, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -131184,7 +131242,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32348, 0) + ts+32395, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1554, 0) } else { @@ -131197,7 +131255,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32469, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32516, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -131877,9 +131935,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32498, + zRet = Xsqlite3_mprintf(tls, ts+32545, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21525 + zSep = ts + 21572 if zRet == uintptr(0) { break } @@ -131902,9 +131960,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32532, + ts+32579, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32573 + zSep = ts + 32620 if zRet == uintptr(0) { break } @@ -131912,7 +131970,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7530, 0) + zRet = Xsqlite3_mprintf(tls, ts+7519, 0) } return zRet @@ -131923,7 +131981,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32578, + ts+32625, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -131966,7 +132024,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32656, + ts+32703, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -132093,7 +132151,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32709, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32756, 0) __16: ; rc = SQLITE_SCHEMA @@ -132569,7 +132627,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11348, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32736, libc.VaList(bp, zDb)) + ts+32783, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -132578,18 +132636,18 @@ var zSep uintptr = ts + 1554 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32846, bp+24) + sessionAppendStr(tls, bp+8, ts+32893, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1567, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32861, bp+24) + sessionAppendStr(tls, bp+8, ts+32908, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32869, bp+24) + sessionAppendStr(tls, bp+8, ts+32916, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21525 + zSep = ts + 21572 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -132698,7 +132756,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32875, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32922, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -132790,7 +132848,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32895, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32942, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -133053,7 +133111,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -133076,7 +133134,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -133118,7 +133176,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -133179,7 +133237,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -133253,13 +133311,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -133321,7 +133379,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -133694,7 +133752,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -133873,34 +133931,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32913, bp+16) + sessionAppendStr(tls, bp, ts+32960, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32926, bp+16) + sessionAppendStr(tls, bp, ts+32973, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14614 } } zSep = ts + 1554 - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32937, bp+16) + ts+32984, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21525 + zSep = ts + 21572 } } @@ -133952,34 +134010,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+33012, bp+16) + sessionAppendStr(tls, bp, ts+33059, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32861, bp+16) + sessionAppendStr(tls, bp, ts+32908, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32932, bp+16) + sessionAppendStr(tls, bp, ts+32979, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21525 + zSep = ts + 21572 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33030, bp+16) + sessionAppendStr(tls, bp, ts+33077, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32573, bp+16) + sessionAppendStr(tls, bp, ts+32620, bp+16) zSep = ts + 1554 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32869, bp+16) + sessionAppendStr(tls, bp, ts+32916, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33038 + zSep = ts + 33085 } } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134006,9 +134064,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33043, bp+16) + sessionAppendStr(tls, bp, ts+33090, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21531, bp+16) + sessionAppendStr(tls, bp, ts+21578, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14614, bp+16) @@ -134016,9 +134074,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33061, bp+16) + sessionAppendStr(tls, bp, ts+33108, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33072, bp+16) + sessionAppendStr(tls, bp, ts+33119, bp+16) } sessionAppendStr(tls, bp, ts+4957, bp+16) @@ -134037,11 +134095,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11348, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33076) + ts+33123) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33189) + ts+33236) } return rc } @@ -134069,7 +134127,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -134322,7 +134380,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33333, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33380, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -134338,7 +134396,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33354, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33401, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -134411,10 +134469,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33373, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33420, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33399, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33446, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -134473,16 +134531,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33429, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33476, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33473, + ts+33520, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33544, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33591, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11348) { @@ -134536,14 +134594,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33658, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33634, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33705, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33681, uintptr(0), uintptr(0), uintptr(0)) } } @@ -135791,7 +135849,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33686, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33733, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136079,7 +136137,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33761, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -136266,7 +136324,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33745, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33792, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -136334,7 +136392,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33752 + var zErr uintptr = ts + 33799 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136516,7 +136574,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33802 + var zErr uintptr = ts + 33849 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -136840,13 +136898,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33850, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33897, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33858, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33905, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33868, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33915, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -137397,7 +137455,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33873, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33920, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -137424,14 +137482,14 @@ break } if int32(*(*uint8)(unsafe.Pointer(p))) < '0' || int32(*(*uint8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33880, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33927, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33911, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33958, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137442,7 +137500,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33944, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33991, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -137455,7 +137513,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33981, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34028, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -137464,7 +137522,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33990, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34037, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*uint8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -137483,7 +137541,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34023, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34070, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -137498,14 +137556,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34057, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34104, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34065, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34112, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*uint8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34097, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34144, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -137513,9 +137571,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34103, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34150, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34117, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34164, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -137523,9 +137581,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34155, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34202, zCmd, nCmd) == 0 { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || int32(*(*uint8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34166, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34213, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*uint8)(unsafe.Pointer(zArg))) == '1') @@ -137537,17 +137595,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8026, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17345}, - {FzName: ts + 34201, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34248, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34209, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34256, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34287, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -137594,15 +137652,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22191) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22238) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16267) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34268, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34315, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34298) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34345) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34308, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34355, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -137619,13 +137677,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34339, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34386, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34344, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34391, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34351, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34398, libc.VaList(bp+16, i)) } } } @@ -137663,8 +137721,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22191) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34359, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22238) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34406, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -137696,7 +137754,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34388, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34435, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -137733,14 +137791,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34057 + zTail = ts + 34104 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34408 + zTail = ts + 34455 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34416, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34463, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -137789,7 +137847,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -137797,10 +137855,10 @@ } return ts + 14614 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34443, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34490, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34450, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22191)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34497, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22238)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -137910,7 +137968,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34476) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34523) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -137920,7 +137978,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34481) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34528) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -137930,7 +137988,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34490) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34537) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -137943,7 +138001,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34500) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34547) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -137953,7 +138011,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34510) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34557) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -137969,7 +138027,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22191) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22238) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -137992,7 +138050,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34522 + var zSelect uintptr = ts + 34569 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -138014,7 +138072,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34554) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34601) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -138028,7 +138086,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34562, + ts+34609, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -138126,7 +138184,7 @@ } } if int32(*(*uint8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34627, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34674, 0) return FTS5_EOF } } @@ -138139,20 +138197,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34647, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34694, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*uint8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34678, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34725, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34681, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34728, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30063, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30110, uint64(3)) == 0 { tok = FTS5_AND } break @@ -139930,9 +139988,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34685, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34732, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33714, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33761, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -139948,7 +140006,7 @@ var c uint8 = *(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34690, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34737, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*uint8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -140035,7 +140093,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20528, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20575, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -140116,7 +140174,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34719, 0) + ts+34766, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -140286,12 +140344,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34772, + ts+34819, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34822 + return ts + 34869 } - return ts + 34685 + return ts + 34732 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -141234,7 +141292,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34829, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34876, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -141313,7 +141371,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34835, + ts+34882, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -141338,7 +141396,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34886, + ts+34933, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -141361,7 +141419,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34935, + ts+34982, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -141600,7 +141658,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34975, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35022, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -142799,7 +142857,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34998, + ts+35045, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -144265,7 +144323,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35082, + ts+35129, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -145347,13 +145405,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35139, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35186, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25056, ts+35147, 0, pzErr) + pConfig, ts+25103, ts+35194, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11488, - ts+35182, + ts+35229, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -145606,7 +145664,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34829, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34876, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -145720,7 +145778,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35226, + ts+35273, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -145890,7 +145948,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35312) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35359) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -146161,7 +146219,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR } @@ -146585,7 +146643,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35356, + ts+35403, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -146601,9 +146659,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35411 + return ts + 35458 } - return ts + 35416 + return ts + 35463 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -146649,12 +146707,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35420, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35467, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5057, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35426, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35473, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -146685,7 +146743,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35454, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35501, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -146716,7 +146774,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35464, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35511, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -146748,14 +146806,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35485, libc.VaList(bp, z)) + ts+35532, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33868 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33915 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -146811,7 +146869,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35317, 0) + ts+35364, 0) return SQLITE_ERROR __1: ; @@ -147028,7 +147086,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35518, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35565, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -147173,28 +147231,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35554, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35601, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35565, 0) + ts+35612, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35645, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35692, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35653, 0) + ts+35700, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16934, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35709, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35756, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35715, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35762, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -147265,12 +147323,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35731, + ts+35778, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20429 + return ts + 20476 } - return ts + 35768 + return ts + 35815 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -147900,7 +147958,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35780, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35827, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -148144,7 +148202,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35801, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35848, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -148163,7 +148221,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35823, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35870, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -148210,7 +148268,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35854) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35901) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -148219,7 +148277,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35867, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35914, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -148233,7 +148291,7 @@ } var azName2 = [5]uintptr{ - ts + 35958, ts + 34057, ts + 25056, ts + 34408, ts + 11488, + ts + 36005, ts + 34104, ts + 25103, ts + 34455, ts + 11488, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -148257,7 +148315,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35965, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+36012, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -148275,13 +148333,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35965, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+36012, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35970, 0, + db, ts+36017, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -148338,17 +148396,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35985, - ts + 36053, - ts + 36122, - ts + 36155, - ts + 36194, - ts + 36234, - ts + 36273, - ts + 36314, - ts + 36353, - ts + 36395, - ts + 36435, + ts + 36032, + ts + 36100, + ts + 36169, + ts + 36202, + ts + 36241, + ts + 36281, + ts + 36320, + ts + 36361, + ts + 36400, + ts + 36442, + ts + 36482, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -148450,18 +148508,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36562, + ts+36609, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36600, + ts+36647, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -148473,7 +148531,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36638, + ts+36685, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -148485,14 +148543,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25056, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25103, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11488, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35958, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+36005, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34408, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34455, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34057, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34104, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -148504,17 +148562,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36680, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36727, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29709 + return ts + 29756 } return ts + 1554 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36710, + ts+36757, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -148551,27 +148609,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36754, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36801, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36777, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36824, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34057, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34104, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34408, ts+36783, 0, pzErr) + pConfig, ts+34455, ts+36830, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35958, ts+36815, 1, pzErr) + pConfig, ts+36005, ts+36862, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -148777,12 +148835,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36832, + ts+36879, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36882, + ts+36929, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -148790,7 +148848,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34554, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34601, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -148966,7 +149024,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36911, + zSql = Xsqlite3_mprintf(tls, ts+36958, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -149148,14 +149206,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34057, bp+48) + rc = fts5StorageCount(tls, p, ts+34104, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34408, bp+56) + rc = fts5StorageCount(tls, p, ts+34455, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -149350,9 +149408,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -149567,7 +149625,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36965 + var zCat uintptr = ts + 37012 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -149579,7 +149637,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -149590,18 +149648,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37032) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '2' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*uint8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36943) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36990) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36954) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37001) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37021) { } else { rc = SQLITE_ERROR } @@ -149877,7 +149935,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 37003 + var zBase uintptr = ts + 37050 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -150019,7 +150077,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150027,11 +150085,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150039,7 +150097,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150047,7 +150105,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37029, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150055,11 +150113,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37032, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37037, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150067,19 +150125,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37042, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37046, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37093, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37104, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150087,11 +150145,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37061, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37108, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37065, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37112, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -150099,7 +150157,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150107,11 +150165,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150119,7 +150177,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37080, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37127, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150127,7 +150185,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37084, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37131, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150135,7 +150193,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150151,24 +150209,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37139, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37095, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37098, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37142, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37102, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37088, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -150183,44 +150241,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37105, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37152, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37113, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37120, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37167, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37125, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37021, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37068, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37130, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37177, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37016, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37063, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37135, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37182, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37140, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37187, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -150229,91 +150287,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37145, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37192, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37149, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37196, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37154, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37057, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37104, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37160, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37207, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37164, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37211, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37172, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37219, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37088, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37135, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37227, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37186, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37205, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37252, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37213, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37260, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37264, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37080, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37127, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37225, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37231, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37084, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37131, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37237, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37284, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37098, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37145, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -150328,16 +150386,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37249, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37296, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -150345,21 +150403,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37254, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37301, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37307, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37029, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37076, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37213, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37260, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -150367,7 +150425,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37266, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37313, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -150375,9 +150433,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -150392,12 +150450,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*uint8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37278, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37325, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37282, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37329, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37332, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -150406,7 +150464,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37288, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37335, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -150562,7 +150620,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37292) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37339) { if int32(*(*uint8)(unsafe.Pointer(zArg))) != '0' && int32(*(*uint8)(unsafe.Pointer(zArg))) != '1' || *(*uint8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -150742,22 +150800,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 37003, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37050, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37307, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37354, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37313, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37360, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37320, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37367, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -151900,14 +151958,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37328) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37375) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37332) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37379) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37336) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37383) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37345, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37392, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -151933,19 +151991,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37379, - ts + 37419, - ts + 37454, + ts + 37426, + ts + 37466, + ts + 37501, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23352, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23399, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37497, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37544, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -152078,11 +152136,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37530, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37577, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37561, + ts+37608, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -152106,7 +152164,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37612, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37659, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -152501,7 +152559,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37638, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37685, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -152523,7 +152581,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37648 + return ts + 37695 } func init() { @@ -153500,5 +153558,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=gcc-10.2.1 20210110\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -675,11 +675,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NMEADISC = 7 NN = 1 @@ -1836,7 +1836,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -1944,8 +1944,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5209,7 +5209,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5851,17 +5852,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6082,14 +6084,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -6895,7 +6897,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -14824,7 +14826,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15300,7 +15302,7 @@ func closeUnixFile(tls *libc.TLS, id uintptr) int32 { var pFile uintptr = id if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15578,7 +15580,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3362, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3362, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15595,14 +15597,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3650, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3650, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15622,7 +15624,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3281, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3281, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { return SQLITE_OK } @@ -15877,7 +15879,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -15905,7 +15907,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3281, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3281, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16004,7 +16006,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3245, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3245, zShm, 41628) goto shm_open_err __10: ; @@ -16134,7 +16136,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3316, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3316, zFile, 41772) goto shmpage_out __14: ; @@ -16180,7 +16182,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3403, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3403, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16452,7 +16454,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16473,7 +16475,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -16789,7 +16791,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3245, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3245, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -16880,7 +16882,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3355, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3355, zPath, 43341) } return rc } @@ -16888,9 +16890,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3772, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3772, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -16954,18 +16956,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3443, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3443, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&Mode_t(0170000) == Mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+128, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3434, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3434, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 128 + uintptr(got))) = int8(0) @@ -17005,14 +17007,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3263, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3263, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17087,7 +17089,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -18520,7 +18522,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -18550,7 +18552,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -18601,7 +18603,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -18705,8 +18707,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -18784,13 +18786,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21080,7 +21082,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -21503,7 +21505,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -21961,7 +21963,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22111,7 +22113,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -22492,7 +22494,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -22598,7 +22600,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -22616,7 +22618,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -22655,7 +22657,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23442,7 +23444,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -23685,9 +23687,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -24444,7 +24446,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -24543,7 +24545,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25129,7 +25131,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -25404,7 +25406,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -25877,7 +25879,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26382,7 +26384,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27040,7 +27042,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27177,7 +27179,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27194,7 +27196,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27202,7 +27204,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27245,7 +27247,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27255,7 +27257,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -27505,7 +27507,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -27552,7 +27554,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -27562,7 +27564,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -27575,7 +27577,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -27584,14 +27586,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -27601,7 +27603,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -27665,7 +27667,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -27675,7 +27677,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -27697,7 +27699,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -27732,7 +27734,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -27745,13 +27747,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -27776,7 +27778,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -27787,7 +27789,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -27839,22 +27841,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -27864,7 +27866,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -27872,7 +27874,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -27880,10 +27882,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -27943,7 +27945,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -27979,7 +27981,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28009,11 +28011,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28024,15 +28026,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28060,14 +28062,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28081,7 +28083,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28093,7 +28095,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28196,7 +28198,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28224,7 +28226,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28263,7 +28265,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29135,7 +29137,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -29550,7 +29552,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -29576,7 +29578,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -29585,7 +29587,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -29596,7 +29598,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -29612,7 +29614,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -29673,7 +29675,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -29708,7 +29710,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -29768,7 +29770,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -29807,7 +29809,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -29838,7 +29840,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30179,7 +30181,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -30423,14 +30425,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -30475,7 +30477,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -30524,7 +30526,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -30604,7 +30606,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -30695,7 +30697,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -30715,7 +30717,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -30925,7 +30927,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31129,7 +31131,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31194,7 +31196,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31242,7 +31244,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31361,7 +31363,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -31521,7 +31523,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -31586,7 +31588,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -31622,7 +31624,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -31666,7 +31668,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -31778,7 +31780,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -31936,7 +31938,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -31993,7 +31995,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32009,7 +32011,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32083,7 +32085,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32095,7 +32097,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -32106,7 +32108,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32271,7 +32273,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -32550,12 +32552,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -32563,7 +32565,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -32623,7 +32625,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -32712,7 +32714,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -32828,7 +32830,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -33148,7 +33150,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33159,7 +33161,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33317,7 +33319,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -33391,7 +33393,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -33454,7 +33456,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -33482,7 +33484,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -33743,7 +33745,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -33935,7 +33937,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -33973,7 +33975,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -34079,7 +34081,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34104,7 +34106,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34174,7 +34176,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34287,7 +34289,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -34347,6 +34349,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34354,7 +34357,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -34392,13 +34395,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -34429,7 +34432,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -34504,7 +34506,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -34529,7 +34531,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -34630,7 +34632,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -34638,11 +34640,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -34717,7 +34719,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -34786,7 +34788,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -34815,7 +34817,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -34891,7 +34893,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -34905,7 +34907,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35039,7 +35041,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -37508,7 +37510,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38157,7 +38159,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38172,14 +38174,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -40483,7 +40485,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41034,7 +41036,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41099,7 +41101,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41133,7 +41135,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41183,7 +41185,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41329,7 +41331,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -41500,7 +41502,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -41526,7 +41528,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -41800,7 +41802,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -42415,7 +42417,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -42935,7 +42937,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -42943,7 +42945,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5341, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43348,7 +43350,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -43492,7 +43494,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -43936,10 +43938,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -46588,7 +46586,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48348,7 +48346,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49126,7 +49124,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5850) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5850) goto abort_due_to_error __770: ; @@ -49236,7 +49234,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -49430,7 +49428,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -50797,7 +50795,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51317,7 +51315,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -51400,7 +51398,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -54821,14 +54819,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6748 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6753 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -54872,7 +54866,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6759, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6748, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -54936,7 +54930,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6776, libc.VaList(bp, pExpr)) + ts+6765, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -54952,7 +54946,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6840, + Xsqlite3ErrorMsg(tls, pParse, ts+6829, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -54966,7 +54960,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6876, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6865, uintptr(0), pExpr) } } else { @@ -54989,30 +54983,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6904, libc.VaList(bp+16, pExpr)) + ts+6893, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6947 + zType = ts + 6936 } else { - zType = ts + 6954 + zType = ts + 6943 } - Xsqlite3ErrorMsg(tls, pParse, ts+6964, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6953, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+6992, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6981, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7014, + Xsqlite3ErrorMsg(tls, pParse, ts+7003, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7058, + ts+7047, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55084,15 +55078,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7106, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7095, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -55100,7 +55094,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7117, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7106, pExpr, pExpr) } break @@ -55231,7 +55225,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7128, libc.VaList(bp, i, zType, mx)) + ts+7117, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55251,7 +55245,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7184, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7173, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55286,7 +55280,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7218, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7207, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55343,7 +55337,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7224, libc.VaList(bp, i+1)) + ts+7213, libc.VaList(bp, i+1)) return 1 } } @@ -55371,7 +55365,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7285, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7274, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -55585,7 +55579,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7316, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7305, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -55625,7 +55619,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7218) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7207) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -55636,7 +55630,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7355) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7344) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -55648,7 +55642,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7361, 0) + ts+7350, 0) return WRC_Abort } @@ -56512,7 +56506,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7420, libc.VaList(bp, mxHeight)) + ts+7409, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -56761,10 +56755,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7468, + Xsqlite3ErrorMsg(tls, pParse, ts+7457, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7512 + return ts + 7501 } return ts + 1538 }(), nElem)) @@ -56805,7 +56799,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -56831,7 +56825,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7516, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7505, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -56859,7 +56853,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7550, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7539, libc.VaList(bp, pExpr)) } } } @@ -56906,7 +56900,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7570, + Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -56931,7 +56925,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7613, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7602, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -57506,7 +57500,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7636, + Xsqlite3ErrorMsg(tls, pParse, ts+7625, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -57629,7 +57623,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7666, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7655, libc.VaList(bp, zObject)) } } @@ -57685,10 +57679,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6748) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7678) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6753) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7683) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -58763,7 +58757,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59201,6 +59195,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59214,6 +59209,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -59432,6 +59430,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -59445,6 +59444,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60218,7 +60225,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7636, + Xsqlite3ErrorMsg(tls, pParse, ts+7625, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60240,11 +60247,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60315,13 +60321,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60334,15 +60346,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60352,22 +60364,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -60376,21 +60388,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -60400,27 +60412,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8066, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -60429,7 +60441,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63100,7 +63112,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10906, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63117,7 +63129,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64038,7 +64050,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -68402,6 +68414,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -69566,7 +69584,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3270, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7117, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7106, 10) == 0 { return 0 } return 1 @@ -70812,7 +70830,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14124, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -72857,7 +72875,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79323,7 +79341,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -80722,7 +80740,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17497 } else { - zType = ts + 7512 + zType = ts + 7501 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17499, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -80883,6 +80901,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82164,7 +82183,7 @@ goto __210 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7512, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7501, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __209 __209: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82180,7 +82199,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __213 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7512, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7501, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __212 __212: i6++ @@ -82985,80 +83004,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __338 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 608))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17913) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17949) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__338: + ; label6 = 0 kk = 0 -__338: +__339: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __340 + goto __341 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __341 + goto __342 } - goto __339 -__341: + goto __340 +__342: ; if !(label6 == 0) { - goto __342 + goto __343 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__342: +__343: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 608))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __339 -__339: - kk++ - goto __338 goto __340 __340: + kk++ + goto __339 + goto __341 +__341: ; if !(label6 != 0) { - goto __343 + goto __344 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17887) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17913) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17960) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__343: +__344: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __344 + goto __345 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 608))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 608))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17940) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17987) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__344: +__345: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 624))) @@ -83075,20 +83108,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 604)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __349 + goto __350 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17967) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18014) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__350: +__351: if !(pIdx5 != 0) { - goto __352 + goto __353 } if !(pPk1 == pIdx5) { - goto __353 + goto __354 } - goto __351 -__353: + goto __352 +__354: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 608))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83097,21 +83130,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __351 -__351: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __350 goto __352 __352: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __351 + goto __353 +__353: ; if !(pPk1 != 0) { - goto __354 + goto __355 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__354: +__355: ; -__349: +__350: ; goto __284 __284: @@ -83129,14 +83162,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __355 + goto __356 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 600)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 17996 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18043 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__355: +__356: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83144,27 +83177,27 @@ __45: if !!(zRight != 0) { - goto __356 + goto __357 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __358 + goto __359 } goto pragma_out -__358: +__359: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __357 -__356: + goto __358 +__357: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __359 + goto __360 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__360: +__361: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __362 + goto __363 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __363 + goto __364 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83173,25 +83206,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __362 -__363: + goto __363 +__364: ; - goto __361 -__361: - pEnc += 16 - goto __360 goto __362 __362: + pEnc += 16 + goto __361 + goto __363 +__363: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __364 + goto __365 } - Xsqlite3ErrorMsg(tls, pParse, ts+17999, libc.VaList(bp+456, zRight)) -__364: + Xsqlite3ErrorMsg(tls, pParse, ts+18046, libc.VaList(bp+456, zRight)) +__365: ; -__359: +__360: ; -__357: +__358: ; goto __15 @@ -83199,15 +83232,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __365 + goto __366 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __367 + goto __368 } goto __15 -__367: +__368: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -83215,41 +83248,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __368 + goto __369 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__368: +__369: ; - goto __366 -__365: + goto __367 +__366: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __369 + goto __370 } goto __15 -__369: +__370: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__366: +__367: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__370: +__371: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __371 + goto __372 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __370 -__371: + goto __371 +__372: ; Xsqlite3VdbeReusable(tls, v) @@ -83264,31 +83297,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __372 + goto __373 } if !(Xsqlite3StrICmp(tls, zRight, ts+17329) == 0) { - goto __373 + goto __374 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __374 -__373: - if !(Xsqlite3StrICmp(tls, zRight, ts+18024) == 0) { - goto __375 + goto __375 +__374: + if !(Xsqlite3StrICmp(tls, zRight, ts+18071) == 0) { + goto __376 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __376 -__375: + goto __377 +__376: if !(Xsqlite3StrICmp(tls, zRight, ts+17482) == 0) { - goto __377 + goto __378 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__377: +__378: ; -__376: +__377: ; -__374: +__375: ; -__372: +__373: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83298,10 +83331,10 @@ __49: if !(zRight != 0) { - goto __378 + goto __379 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__378: +__379: ; returnSingleInt(tls, v, func() int64 { @@ -83321,19 +83354,19 @@ __51: if !(zRight != 0) { - goto __379 + goto __380 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __381 + goto __382 } goto __15 -__381: +__382: ; - goto __380 -__379: - opMask = U32(0xfffe) + goto __381 __380: + opMask = U32(0xfffe) +__381: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -83342,86 +83375,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__382: +__383: if !(iDb <= iDbLast) { - goto __384 + goto __385 } if !(iDb == 1) { - goto __385 + goto __386 } - goto __383 -__385: + goto __384 +__386: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__386: +__387: if !(k4 != 0) { - goto __388 + goto __389 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __389 + goto __390 } - goto __387 -__389: + goto __388 +__390: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__390: +__391: if !(pIdx6 != 0) { - goto __392 + goto __393 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __393 + goto __394 } szThreshold = int16(0) - goto __392 -__393: + goto __393 +__394: ; - goto __391 -__391: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __390 goto __392 __392: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __391 + goto __393 +__393: ; if !(szThreshold != 0) { - goto __394 + goto __395 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__394: +__395: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18032, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18079, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __395 + goto __396 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __396 -__395: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __397 __396: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__397: ; - goto __387 -__387: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __386 goto __388 __388: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __387 + goto __389 +__389: ; - goto __383 -__383: - iDb++ - goto __382 goto __384 __384: + iDb++ + goto __383 + goto __385 +__385: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -83429,36 +83462,36 @@ __52: ; if !(zRight != 0) { - goto __397 + goto __398 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__397: +__398: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+632) == SQLITE_OK) { - goto __398 + goto __399 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 632))) -__398: +__399: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __399 + goto __400 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 640)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 640)))) { - goto __400 + goto __401 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__400: +__401: ; -__399: +__400: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -83467,10 +83500,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) >= int64(0)) { - goto __401 + goto __402 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648))&int64(0x7fffffff))) -__401: +__402: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -83479,10 +83512,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __402 + goto __403 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) & int64(0x7fffffff)) -__402: +__403: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -83490,10 +83523,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __403 + goto __404 } -__403: +__404: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -83545,14 +83578,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18050, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18055, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18061, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18087, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095}, - {FzName: ts + 18102}, + {FzName: ts + 18097, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18102, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18108, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18134, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142}, + {FzName: ts + 18149}, {}, } var setCookie = [2]VdbeOpList{ @@ -83604,7 +83637,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18108) + Xsqlite3_str_appendall(tls, bp+32, ts+18155) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -83612,7 +83645,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18123, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18170, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -83625,16 +83658,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18130, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18177, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18136) + Xsqlite3_str_appendall(tls, bp+32, ts+18183) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18148) + Xsqlite3_str_appendall(tls, bp+32, ts+18195) j++ } Xsqlite3_str_append(tls, bp+32, ts+4941, 1) @@ -83817,13 +83850,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18163) + Xsqlite3_str_appendall(tls, bp+32, ts+18210) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18171, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18218, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18175, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18222, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -83900,12 +83933,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18179, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18226, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -83914,19 +83947,19 @@ } else { zObj = ts + 4992 } - z = Xsqlite3MPrintf(tls, db, ts+18207, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18238, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18285, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18246, - ts + 18253, - ts + 18265, + ts + 18293, + ts + 18300, + ts + 18312, } // Check to see if any sibling index (another index on the same table) @@ -84018,7 +84051,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18276) + corruptSchema(tls, pData, argv, ts+18323) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84066,7 +84099,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7922 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18289 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18336 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -84195,7 +84228,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18361) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18408) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84209,7 +84242,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18385, + ts+18432, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -84541,7 +84574,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18419, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18466, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -84571,7 +84604,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18449, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18496, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -84667,7 +84700,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -84766,7 +84799,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85093,13 +85126,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18468, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18515, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18498)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18545)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85274,7 +85307,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18532, libc.VaList(bp, 0)) + ts+18579, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85319,7 +85352,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18582, libc.VaList(bp+8, zName)) + ts+18629, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85330,7 +85363,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18646, + Xsqlite3ErrorMsg(tls, pParse, ts+18693, libc.VaList(bp+16, zName)) break } @@ -85958,16 +85991,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18683 + z = ts + 18730 break case TK_INTERSECT: - z = ts + 18693 + z = ts + 18740 break case TK_EXCEPT: - z = ts + 18703 + z = ts + 18750 break default: - z = ts + 18710 + z = ts + 18757 break } return z @@ -85977,7 +86010,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18716, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18763, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86003,9 +86036,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18739, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18786, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18770 + return ts + 18817 } return ts + 1538 }())) @@ -86349,7 +86382,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18785, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18832, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -86449,7 +86482,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18785, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18832, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -86465,7 +86498,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -86548,8 +86581,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -86564,12 +86595,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18802 + zType = ts + 18849 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -86785,7 +86819,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18853, 0) return __1: ; @@ -86876,7 +86910,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18855, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18902, 0) goto end_of_recursive_query __15: ; @@ -86896,7 +86930,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18897, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18944, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -86933,7 +86967,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18903, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18950, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -86967,11 +87001,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18918, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18965, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1538 } - return ts + 18941 + return ts + 18988 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87072,8 +87106,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18943, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18958, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18990, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19005, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87120,7 +87154,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18683, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18730, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87187,7 +87221,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18977, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19024, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -87249,7 +87283,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18977, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19024, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -87402,10 +87436,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18998, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19045, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19044, + ts+19091, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -87659,8 +87693,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7218) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7218) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7207) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7207) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -87687,13 +87721,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19126, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19173, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19137, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19184, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -87705,7 +87739,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -87893,7 +87927,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -88792,7 +88827,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19148, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19195, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -88875,7 +88910,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19166, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19213, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89004,7 +89039,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19189, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19236, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -89027,7 +89062,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19209, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19256, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89043,7 +89078,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19252 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19299 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89069,7 +89104,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19275, + Xsqlite3ErrorMsg(tls, pParse, ts+19322, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89080,9 +89115,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19313 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19360 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19347 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19394 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89129,7 +89164,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19385, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19432, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89241,7 +89276,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19389, + Xsqlite3ErrorMsg(tls, pParse, ts+19436, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89260,7 +89295,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19428, + Xsqlite3ErrorMsg(tls, pParse, ts+19475, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -89384,7 +89419,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19459, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19506, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -89449,7 +89484,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19464, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19511, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -89480,9 +89515,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19473, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19520, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19491, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19538, 0) } } } @@ -89492,7 +89527,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19511, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19558, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -89630,7 +89665,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -89714,13 +89749,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19542, 0) + ts+19589, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19593, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19640, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -89909,11 +89944,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19626, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19673, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19638 + return ts + 19685 } return ts + 1538 }(), @@ -90241,7 +90276,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19661, + ts+19708, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90302,7 +90337,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19715, + Xsqlite3ErrorMsg(tls, pParse, ts+19762, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -90444,7 +90479,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19755, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19802, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -90503,7 +90538,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19770, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19817, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -90974,9 +91009,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19786 + return ts + 19833 } - return ts + 19795 + return ts + 19842 }()) groupBySort = 1 @@ -91327,7 +91362,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19786) + explainTempTable(tls, pParse, ts+19833) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -91432,7 +91467,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19804, 0) + ts+19851, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -91665,7 +91700,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19869, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19916, 0) goto trigger_cleanup __3: ; @@ -91709,7 +91744,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19915, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19962, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -91727,7 +91762,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19923, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19970, 0) goto trigger_orphan_error __11: ; @@ -91739,7 +91774,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19915, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19962, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -91754,11 +91789,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19964, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20011, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -91769,19 +91805,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6365, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19990, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20037, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20028, + Xsqlite3ErrorMsg(tls, pParse, ts+20075, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20065 + return ts + 20112 } - return ts + 20072 + return ts + 20119 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -91790,7 +91826,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20078, libc.VaList(bp+24, pTableName+8)) + ts+20125, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -91939,7 +91975,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19915, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19962, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -91972,7 +92008,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20124, + ts+20171, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -91997,13 +92033,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20172, + ts+20219, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20247, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20294, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92259,7 +92295,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20276, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20323, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92312,7 +92348,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20296, + ts+20343, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -92426,12 +92462,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20358, + ts+20405, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20406 + return ts + 20453 } - return ts + 20413 + return ts + 20460 }())) __15: ; @@ -92545,7 +92581,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20420, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20467, 0) return 1 } @@ -92611,7 +92647,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -92775,7 +92811,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20462, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20509, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -93368,7 +93404,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20476, + ts+20523, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -93400,7 +93436,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20512, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20559, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -93726,7 +93762,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94280,7 +94321,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20531) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20578) __169: ; update_cleanup: @@ -94586,10 +94627,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20544, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20591, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20548, libc.VaList(bp+8, bp+216)) + ts+20595, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -94712,7 +94753,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20621, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20625, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20668, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20672, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -94860,14 +94901,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20629) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20676) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20669) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20716) return SQLITE_ERROR __2: ; @@ -94878,7 +94919,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20712) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20759) return SQLITE_ERROR __5: ; @@ -94906,7 +94947,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20730, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20777, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -94926,7 +94967,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20753) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20800) goto end_of_vacuum __8: ; @@ -94986,7 +95027,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20780, + ts+20827, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -94995,7 +95036,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20888, + ts+20935, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95006,7 +95047,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20942, + ts+20989, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95017,7 +95058,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21093, + ts+21140, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -95446,11 +95487,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21223, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21270, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21247, + ts+21294, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -95460,7 +95501,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21346, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21393, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -95521,7 +95562,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21365, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21412, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -95549,9 +95590,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -95559,7 +95602,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21407, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21454, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3647, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -95571,7 +95614,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21437 + var zFormat uintptr = ts + 21484 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -95645,7 +95688,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21483, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21530, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -95703,7 +95746,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21483, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21530, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -95737,7 +95780,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96190,7 +96233,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96217,7 +96260,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -96448,7 +96491,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21502 + return ts + 21549 } if i == -1 { return ts + 16251 @@ -96460,11 +96503,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21509, 5) + Xsqlite3_str_append(tls, pStr, ts+21556, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21515, 1) + Xsqlite3_str_append(tls, pStr, ts+21562, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -96479,7 +96522,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21515, 1) + Xsqlite3_str_append(tls, pStr, ts+21562, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -96505,27 +96548,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21517, 2) + Xsqlite3_str_append(tls, pStr, ts+21564, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21509, 5) + Xsqlite3_str_append(tls, pStr, ts+21556, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21520 + return ts + 21567 } - return ts + 21525 + return ts + 21572 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21533) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21580) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21535) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21582) } Xsqlite3_str_append(tls, pStr, ts+4941, 1) } @@ -96568,11 +96611,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21537, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21584, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21543 + return ts + 21590 } - return ts + 21550 + return ts + 21597 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -96585,40 +96628,40 @@ zFmt = ts + 10960 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21555 + zFmt = ts + 21602 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21588 + zFmt = ts + 21635 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21613 + zFmt = ts + 21660 } else { - zFmt = ts + 21631 + zFmt = ts + 21678 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21640, 7) + Xsqlite3_str_append(tls, bp+64, ts+21687, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16251 - Xsqlite3_str_appendf(tls, bp+64, ts+21648, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21679, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21726, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21689, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21736, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21694, + Xsqlite3_str_appendf(tls, bp+64, ts+21741, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21721, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21768, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -96650,22 +96693,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21732, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21779, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21520, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21567, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21753, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21800, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21509, 5) + Xsqlite3_str_append(tls, bp+24, ts+21556, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21520, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21567, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4941, 1) @@ -98262,7 +98305,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21761, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21808, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98290,7 +98333,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21776, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21823, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -98808,7 +98851,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99169,7 +99212,7 @@ {FzOp: ts + 16100, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15431, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14951, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21799, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21846, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -99659,12 +99702,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21853, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21853, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -99743,7 +99786,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6753 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7683 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -99837,7 +99880,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21847 + return ts + 21894 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100213,7 +100256,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21854, + Xsqlite3ErrorMsg(tls, pParse, ts+21901, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100229,7 +100272,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -100947,7 +100990,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21890, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21937, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -101018,7 +101061,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21916 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21963 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101192,6 +101235,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101235,9 +101282,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101271,6 +101316,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -101529,11 +101575,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103114,7 +103165,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21927, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21974, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103172,7 +103223,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21927, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21974, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103570,7 +103621,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21953, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22000, 0) rc = SQLITE_OK } else { goto __3 @@ -104177,7 +104228,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21988, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22035, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104212,6 +104263,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -104506,6 +104561,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -104658,7 +104716,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22006, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22053, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -104722,7 +104780,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22034, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22081, 0) goto __5 __4: ii = 0 @@ -105604,7 +105662,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22052, -1) + pCtx, ts+22099, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -105737,7 +105795,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22108, -1) + pCtx, ts+22155, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -105827,17 +105885,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22153)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22164)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22175)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22180)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22193)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22203)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22209)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22220)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22230)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22242)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22247)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22200)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22211)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22222)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22227)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22240)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22250)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22256)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22267)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22277)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22289)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22294)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -105883,7 +105941,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22251, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22298, libc.VaList(bp, zName)) } return p } @@ -105927,12 +105985,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22270, 0) + ts+22317, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22341, 0) + ts+22388, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106159,7 +106217,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22404, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22451, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106275,7 +106333,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503)) } pSub = Xsqlite3SelectNew(tls, @@ -106390,7 +106448,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22430, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22477, 0) goto windowAllocErr __2: ; @@ -106455,15 +106513,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22462 + zErr = ts + 22509 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22479 + zErr = ts + 22526 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22495 + zErr = ts + 22542 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22515, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22562, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -106484,7 +106542,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22548, 0) + ts+22595, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -106640,11 +106698,11 @@ } var azErr = [5]uintptr{ - ts + 22595, - ts + 22648, - ts + 22052, - ts + 22699, - ts + 22751, + ts + 22642, + ts + 22695, + ts + 22099, + ts + 22746, + ts + 22798, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108039,19 +108097,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22801, + Xsqlite3ErrorMsg(tls, pParse, ts+22848, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22843 + return ts + 22890 } - return ts + 22852 + return ts + 22899 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22858, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22905, 0) } } @@ -108119,7 +108177,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22892, + Xsqlite3ErrorMsg(tls, pParse, ts+22939, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109216,7 +109274,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22930, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22977, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110195,7 +110253,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22952, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+22999, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -110205,7 +110263,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22952, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+22999, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -110948,7 +111006,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22979) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23026) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111112,7 +111170,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22988, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23035, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111329,9 +111387,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6748 + return ts + 7678 } - return ts + 6753 + return ts + 7683 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -111615,19 +111673,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23012, 0) + ts+23059, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23107, 0) + ts+23154, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23191, 0) + ts+23238, 0) } break case uint32(273): @@ -112006,9 +112064,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22988, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23035, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23276, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23323, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -112776,7 +112834,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23293, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23340, libc.VaList(bp, bp+2464)) break } } @@ -112799,7 +112857,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3647, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23318, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23365, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -112972,7 +113030,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23329, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23376, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -112985,11 +113043,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19915, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19962, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23336, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23383, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23341, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23388, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113002,9 +113060,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23351, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23398, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23355, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23402, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113238,7 +113296,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -113813,7 +113871,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -113828,7 +113886,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23363, 0) + ts+23410, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114019,23 +114077,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23431 + var zErr uintptr = ts + 23478 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23445 + zErr = ts + 23492 break } case SQLITE_ROW: { - zErr = ts + 23467 + zErr = ts + 23514 break } case SQLITE_DONE: { - zErr = ts + 23489 + zErr = ts + 23536 break } @@ -114053,35 +114111,35 @@ } var aMsg = [29]uintptr{ - ts + 23512, - ts + 23525, + ts + 23559, + ts + 23572, uintptr(0), - ts + 23541, - ts + 23566, - ts + 23580, - ts + 23599, + ts + 23588, + ts + 23613, + ts + 23627, + ts + 23646, ts + 1474, - ts + 23624, - ts + 23661, - ts + 23673, - ts + 23688, - ts + 23721, - ts + 23739, - ts + 23764, - ts + 23793, + ts + 23671, + ts + 23708, + ts + 23720, + ts + 23735, + ts + 23768, + ts + 23786, + ts + 23811, + ts + 23840, uintptr(0), ts + 5822, ts + 5318, - ts + 23810, - ts + 23828, - ts + 23846, - uintptr(0), - ts + 23880, + ts + 23857, + ts + 23875, + ts + 23893, uintptr(0), - ts + 23901, ts + 23927, - ts + 23950, - ts + 23971, + uintptr(0), + ts + 23948, + ts + 23974, + ts + 23997, + ts + 24018, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114202,7 +114260,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114247,7 +114305,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23987, 0) + ts+24034, 0) return SQLITE_BUSY } else { @@ -114364,7 +114422,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24050, libc.VaList(bp, zName)) + ts+24097, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -114600,7 +114658,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24101, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24148, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -114693,7 +114751,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -114763,7 +114821,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -114773,7 +114831,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -114805,14 +114863,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24122, 0) + ts+24169, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -114942,7 +115000,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24190, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24237, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -114987,10 +115045,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24196, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24243, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24206, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24253, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115095,7 +115153,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24234, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24281, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -115106,17 +115164,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24238, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24285, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24238 + zModeType = ts + 24285 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24244, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24291, zOpt, uint64(4)) == 0) { goto __32 } @@ -115154,7 +115212,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24249, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24296, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115162,7 +115220,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24316, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115202,7 +115260,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24293, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24340, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115226,14 +115284,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24309, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24316, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24356, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24363, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24324, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24327, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24330, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24371, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24374, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24377, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17346, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -115380,10 +115438,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21847, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21894, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24334, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24381, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -115397,7 +115455,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -115450,7 +115508,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6425 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23336 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23383 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -115555,7 +115613,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24340 + zFilename = ts + 24387 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -115658,21 +115716,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24343, + Xsqlite3_log(tls, iErr, ts+24390, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24368) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24415) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24388) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24435) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24395) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24442) } // This is a convenience routine that makes sure that all thread-specific @@ -115830,7 +115888,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24412, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24459, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -116486,7 +116544,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24440, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24487, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -116604,7 +116662,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24448 + return ts + 24495 } return uintptr(0) }(), 0) @@ -116782,7 +116840,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6165, ts + 6748, ts + 6753, ts + 6175, ts + 6170, ts + 7989, ts + 24471, ts + 24477, + ts + 6165, ts + 7678, ts + 7683, ts + 6175, ts + 6170, ts + 7989, ts + 24518, ts + 24524, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -116935,7 +116993,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24484 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24531 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -116990,7 +117048,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24501, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24548, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117054,13 +117112,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6748, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7678, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6753, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7683, uint32(5)) break } @@ -117610,12 +117668,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6748, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7678, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6753, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7683, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -117716,7 +117774,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24530, -1) + Xsqlite3_result_error(tls, pCtx, ts+24577, -1) } } jsonParseReset(tls, pParse) @@ -118022,7 +118080,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24545, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24592, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118037,7 +118095,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24549, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24596, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118091,7 +118149,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24575, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24622, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118196,11 +118254,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24618, uint32(2)) + jsonAppendRaw(tls, bp, ts+24665, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4982, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24621, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24668, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -118357,14 +118415,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24624, -1) + ts+24671, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24675, -1) + Xsqlite3_result_error(tls, ctx, ts+24722, -1) jsonReset(tls, bp) return } @@ -118534,9 +118592,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24709 + return ts + 24756 } - return ts + 24713 + return ts + 24760 }()) return __2: @@ -118669,7 +118727,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24720, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24767, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -118766,7 +118824,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24723, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24770, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -118810,7 +118868,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24726) + ts+24773) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -118941,7 +118999,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24809, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24856, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -118960,7 +119018,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24815, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24862, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -119056,7 +119114,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24815, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24862, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119080,7 +119138,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24820 + zRoot = ts + 24867 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119202,7 +119260,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24530, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24577, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119297,25 +119355,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24822}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24827}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24856}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24869}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24876}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24900}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24911}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24922}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24934}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24947}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24966}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24977}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24994}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24869}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24874}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24903}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24916}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24923}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24947}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24958}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24969}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24981}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24994}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25013}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25024}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25041}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -119334,8 +119392,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25012, FpModule: 0}, - {FzName: ts + 25022, FpModule: 0}, + {FzName: ts + 25059, FpModule: 0}, + {FzName: ts + 25069, FpModule: 0}, } type Rtree1 = struct { @@ -119595,11 +119653,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25032, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25079, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25040, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25087, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -119810,7 +119868,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25045, + ts+25092, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -120513,7 +120571,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25127) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25174) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -121854,7 +121912,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25141, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25188, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -121866,12 +121924,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25161, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25208, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25193, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25240, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122097,7 +122155,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25230, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25277, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122120,7 +122178,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25375 + var zFmt uintptr = ts + 25422 var zSql uintptr var rc int32 @@ -122168,7 +122226,7 @@ } var azName1 = [3]uintptr{ - ts + 25431, ts + 5044, ts + 16251, + ts + 25478, ts + 5044, ts + 16251, } var rtreeModule = Sqlite3_module{ @@ -122211,19 +122269,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25436, + ts+25483, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25498, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25545, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25503, + ts+25550, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25567, + ts+25614, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25637, + ts+25684, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122252,7 +122310,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25686 + zFormat = ts + 25733 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122264,7 +122322,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25794, + ts+25841, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122272,18 +122330,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25839, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25886, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12751, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25866, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25913, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25888, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25935, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25896, 0) + Xsqlite3_str_appendf(tls, p, ts+25943, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122298,14 +122356,14 @@ } var azSql = [8]uintptr{ - ts + 25912, - ts + 25965, - ts + 26010, - ts + 26062, - ts + 26116, - ts + 26161, - ts + 26219, - ts + 26274, + ts + 25959, + ts + 26012, + ts + 26057, + ts + 26109, + ts + 26163, + ts + 26208, + ts + 26266, + ts + 26321, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -122334,7 +122392,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26321, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26368, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -122346,7 +122404,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26341, + ts+26388, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -122354,7 +122412,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26398, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26445, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -122396,10 +122454,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26433, - ts + 26476, - ts + 26511, - ts + 26547, + ts + 26480, + ts + 26523, + ts + 26558, + ts + 26594, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -122430,7 +122488,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26584, + Xsqlite3_str_appendf(tls, pSql, ts+26631, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -122442,7 +122500,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26608, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26655, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -122465,7 +122523,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26614, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26661, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -122561,7 +122619,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26617, ts + 26628} +var azFormat = [2]uintptr{ts + 26664, ts + 26675} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -122601,11 +122659,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10904, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26638, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26685, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26644, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26691, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26648, 1) + Xsqlite3_str_append(tls, pOut, ts+26695, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -122616,7 +122674,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26650, -1) + Xsqlite3_result_error(tls, ctx, ts+26697, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -122694,7 +122752,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26683, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26730, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4046 @@ -122718,7 +122776,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26690, + ts+26737, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -122737,7 +122795,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26735, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26782, libc.VaList(bp+16, iNode)) } } @@ -122751,8 +122809,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26767, - ts + 26821, + ts + 26814, + ts + 26868, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -122767,23 +122825,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26869, + rtreeCheckAppendMsg(tls, pCheck, ts+26916, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26914 + return ts + 26961 } - return ts + 26922 + return ts + 26969 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26931, + ts+26978, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26914 + return ts + 26961 } - return ts + 26922 + return ts + 26969 }(), iKey, iVal)) } } @@ -122807,7 +122865,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26989, libc.VaList(bp, i, iCell, iNode)) + ts+27036, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -122827,7 +122885,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27037, libc.VaList(bp+24, i, iCell, iNode)) + ts+27084, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -122844,14 +122902,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27104, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27151, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27138, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27185, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -122859,7 +122917,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27168, + ts+27215, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -122888,14 +122946,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27223, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27270, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27254, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27301, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -122922,7 +122980,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27321, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27368, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -122931,12 +122989,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25141, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25188, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27349, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27396, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -122950,8 +123008,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27380, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27387, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27427, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27434, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -122959,7 +123017,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27395, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27442, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -122974,7 +123032,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27399, -1) + ts+27446, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -122992,7 +123050,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 17996 + return ts + 18043 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -123363,11 +123421,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27450, 1) + Xsqlite3_str_append(tls, x, ts+27497, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27452, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27499, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27463, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27510, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -123387,19 +123445,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27474, 0) + Xsqlite3_str_appendf(tls, x, ts+27521, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27492, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27539, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27500, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27547, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27512, 0) + Xsqlite3_str_appendf(tls, x, ts+27559, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124319,7 +124377,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27525, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27572, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124328,7 +124386,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27547, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27594, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -124336,7 +124394,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26614, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26661, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -124573,7 +124631,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27551 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27598 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -124581,7 +124639,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27557 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27604 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -124693,7 +124751,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27566, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27613, 0) __4: ; goto geopoly_update_end @@ -124825,14 +124883,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27606) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27653) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -124897,7 +124955,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27637, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27684, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -124909,25 +124967,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27645}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27658}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27671}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27684}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27696}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27606}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27719}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27733}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27746}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27760}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27776}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27692}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27705}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27718}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27731}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27743}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27653}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27766}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27780}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27793}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27807}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27823}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27788}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27835}, } // Register the r-tree module with database handle db. This creates the @@ -124937,26 +124995,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27807, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27854, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27817, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27864, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27828, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27875, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27551, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27598, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27839, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27886, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125010,7 +125068,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25127, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25174, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -125337,7 +125395,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27849, -1) + Xsqlite3_result_error(tls, context, ts+27896, -1) return } @@ -125348,7 +125406,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27849, -1) + Xsqlite3_result_error(tls, context, ts+27896, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -125449,7 +125507,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27870, uintptr(0), uintptr(0), p+64) + ts+27917, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -125513,7 +125571,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25040, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25087, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -125534,16 +125592,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28041, libc.VaList(bp, func() uintptr { + ts+28088, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28191 + return ts + 28238 } return ts + 1538 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28232) + ts+28279) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -125659,7 +125717,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28357, libc.VaList(bp, zTab))) + ts+28404, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -125677,7 +125735,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28476, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28523, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -125695,7 +125753,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28497, libc.VaList(bp+16, zIdx))) + ts+28544, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -125718,7 +125776,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28548, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28595, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -125764,7 +125822,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28569, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28616, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -125779,7 +125837,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -125819,7 +125877,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19473, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19520, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -125829,18 +125887,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28626, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28673, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28645, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28692, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28650, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28697, zName) { bRbuRowid = 1 } } @@ -125852,18 +125910,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28660, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28707, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28689 + return ts + 28736 } - return ts + 28702 + return ts + 28749 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28711, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28758, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -125877,7 +125935,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28733, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28780, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -125924,7 +125982,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28760, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28807, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14598 } return zList @@ -125942,7 +126000,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -125964,25 +126022,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28782, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28829, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28814, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28861, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+28837) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28843, ts+28850, ts+4941) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+28884) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28890, ts+28897, ts+4941) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+1538) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28858, + ts+28905, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28900, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28947, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126024,7 +126082,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126059,7 +126117,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28920 + zCol = ts + 28967 __7: ; goto __5 @@ -126067,11 +126125,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28928, + zLhs = rbuMPrintf(tls, p, ts+28975, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28949, + zOrder = rbuMPrintf(tls, p, ts+28996, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28985, + zSelect = rbuMPrintf(tls, p, ts+29032, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14598 iCol++ @@ -126091,7 +126149,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29012, + Xsqlite3_mprintf(tls, ts+29059, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -126118,7 +126176,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29060, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29107, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14598 goto __15 __15: @@ -126130,7 +126188,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29067, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29114, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126163,7 +126221,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126175,7 +126233,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29079, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29126, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1538 } else { @@ -126187,37 +126245,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28920 + zCol = ts + 28967 } else { - zCol = ts + 28650 + zCol = ts + 28697 } zType = ts + 1103 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29101, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29148, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28837 + return ts + 28884 } return ts + 1538 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29121, + zImpPK = Xsqlite3_mprintf(tls, ts+29168, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29142, + zImpCols = Xsqlite3_mprintf(tls, ts+29189, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29175, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29222, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14598 - zAnd = ts + 21509 + zAnd = ts + 21556 nBind++ } @@ -126256,9 +126314,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29199, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29246, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29211, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29258, libc.VaList(bp+32, zList, zS)) } zS = ts + 14598 if zList == uintptr(0) { @@ -126268,7 +126326,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29220, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29267, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126280,18 +126338,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29235, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29282, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1538 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29249, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21509 + zList = rbuMPrintf(tls, p, ts+29296, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21556 } } zList = rbuMPrintf(tls, p, - ts+29261, libc.VaList(bp+40, zList)) + ts+29308, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1538 @@ -126299,8 +126357,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29311, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21509 + zList = rbuMPrintf(tls, p, ts+29358, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21556 } } } @@ -126309,7 +126367,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29324, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29371, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126327,15 +126385,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29311, + zList = rbuMPrintf(tls, p, ts+29358, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29350, + zList = rbuMPrintf(tls, p, ts+29397, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29380, + zList = rbuMPrintf(tls, p, ts+29427, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } @@ -126372,19 +126430,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29417 + var zSep uintptr = ts + 29464 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28569, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28616, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16139) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp+8, zIdx))) } break } @@ -126396,15 +126454,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28837 + zDesc = ts + 28884 } else { zDesc = ts + 1538 } - z = rbuMPrintf(tls, p, ts+29430, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29477, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14598 } } - z = rbuMPrintf(tls, p, ts+29441, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29488, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -126424,7 +126482,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29445) + ts+29492) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -126433,7 +126491,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -126443,23 +126501,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29495, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29542, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29517, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29564, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28837 + return ts + 28884 } return ts + 1538 }())) zComma = ts + 14598 } } - zCols = rbuMPrintf(tls, p, ts+29527, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29574, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29542, + ts+29589, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 0)) } @@ -126485,13 +126543,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29604 + zPk = ts + 29651 } - zSql = rbuMPrintf(tls, p, ts+29617, + zSql = rbuMPrintf(tls, p, ts+29664, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29644 + return ts + 29691 } return ts + 1538 }())) @@ -126501,16 +126559,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29654, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29701, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29661, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29708, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29693 + return ts + 29740 } return ts + 1538 }())) @@ -126527,7 +126585,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29708, + ts+29755, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -126564,7 +126622,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29765) + ts+29812) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -126669,7 +126727,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29831, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29878, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -126692,7 +126750,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29851, + ts+29898, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 0)) @@ -126700,13 +126758,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29916, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29963, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29952, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+29999, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -126722,7 +126780,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29986, + ts+30033, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -126730,9 +126788,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30047 + return ts + 30094 } - return ts + 30051 + return ts + 30098 }() } return ts + 1538 @@ -126741,20 +126799,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30057, + ts+30104, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30118, + ts+30165, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30047 + return ts + 30094 } - return ts + 30051 + return ts + 30098 }(), zCollist, zLimit)) } @@ -126791,16 +126849,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1538 } - return ts + 30277 + return ts + 30324 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30286, + ts+30333, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30322 + return ts + 30369 } return ts + 1538 }(), zBindings))) @@ -126809,32 +126867,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30332, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30379, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1538 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30360 + zRbuRowid = ts + 30407 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30372, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30419, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30448 + return ts + 30495 } return ts + 1538 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30465, + ts+30512, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30764, + ts+30811, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -126847,9 +126905,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30863 + zRbuRowid = ts + 30910 } else { - zRbuRowid = ts + 30873 + zRbuRowid = ts + 30920 } } @@ -126862,7 +126920,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28920, 0) + zOrder = rbuMPrintf(tls, p, ts+28967, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+1538) } @@ -126871,11 +126929,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30884, + ts+30931, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30932 + return ts + 30979 } return ts + 1538 }(), @@ -126888,7 +126946,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22843 + return ts + 22890 } return ts + 1538 }(), zOrder, @@ -126956,9 +127014,9 @@ var zPrefix uintptr = ts + 1538 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30277 + zPrefix = ts + 30324 } - zUpdate = Xsqlite3_mprintf(tls, ts+30938, + zUpdate = Xsqlite3_mprintf(tls, ts+30985, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -127017,7 +127075,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30968, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31015, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127090,18 +127148,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+30998, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31045, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31026, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31073, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3270, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6425, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31044, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31091, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127141,11 +127199,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31110, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31157, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24190, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24237, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127157,13 +127215,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31142, + zTarget = Xsqlite3_mprintf(tls, ts+31189, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425), func() uintptr { if zExtra == uintptr(0) { return ts + 1538 } - return ts + 31174 + return ts + 31221 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1538 @@ -127182,21 +127240,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31176, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31223, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31191, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31238, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31208, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31255, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127204,7 +127262,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31224, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31271, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_RBU, p) @@ -127212,7 +127270,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31252, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31299, 0) } } @@ -127241,14 +127299,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31224, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31271, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31270, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31317, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -127374,7 +127432,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31305, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31352, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -127389,8 +127447,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425) } - zOal = Xsqlite3_mprintf(tls, ts+31330, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31337, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31377, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31384, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -127507,7 +127565,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23828, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23875, 0) return } @@ -127600,7 +127658,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31344) + ts+31391) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -127608,7 +127666,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31366, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31413, libc.VaList(bp, iCookie+1)) } } } @@ -127629,7 +127687,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31393, + ts+31440, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -127659,9 +127717,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31551, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31598, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31566, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31613, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -127675,10 +127733,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31586, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31633, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31611) + ts+31658) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -127692,12 +127750,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31719) + ts+31766) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31784) + ts+31831) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -127709,7 +127767,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31828, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31875, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -127737,7 +127795,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31853, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31900, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -127859,7 +127917,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31881, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31928, 0) } if rc == SQLITE_OK { @@ -127875,7 +127933,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31330, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31377, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -127892,7 +127950,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31906, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31953, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -127926,7 +127984,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31917, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31964, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -127956,13 +128014,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31989, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32036, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32003) + ts+32050) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -127973,7 +128031,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32060) + ts+32107) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128047,7 +128105,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32134, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32181, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128065,12 +128123,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32166, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32213, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32198 + return ts + 32245 } - return ts + 32205 + return ts + 32252 }())) } } @@ -128094,14 +128152,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32212, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32259, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6425, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32228, uintptr(0), uintptr(0), p+64) + db, ts+32275, uintptr(0), uintptr(0), p+64) } } @@ -128155,7 +128213,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32252, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32299, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -128182,7 +128240,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30277, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30324, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128218,7 +128276,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32260, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32307, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128337,12 +128395,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14487 } else { - zBegin = ts + 32212 + zBegin = ts + 32259 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32212, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32259, uintptr(0), uintptr(0), uintptr(0)) } } @@ -128688,7 +128746,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32287, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32334, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -128713,7 +128771,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32310, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32357, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -128873,7 +128931,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32321, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32368, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -129702,7 +129760,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32332, 0) + ts+32379, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1538, 0) } else { @@ -129715,7 +129773,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32453, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32500, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -130395,9 +130453,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32482, + zRet = Xsqlite3_mprintf(tls, ts+32529, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21509 + zSep = ts + 21556 if zRet == uintptr(0) { break } @@ -130420,9 +130478,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32516, + ts+32563, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32557 + zSep = ts + 32604 if zRet == uintptr(0) { break } @@ -130430,7 +130488,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7514, 0) + zRet = Xsqlite3_mprintf(tls, ts+7503, 0) } return zRet @@ -130441,7 +130499,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32562, + ts+32609, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -130484,7 +130542,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32640, + ts+32687, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -130611,7 +130669,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32693, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32740, 0) __16: ; rc = SQLITE_SCHEMA @@ -131087,7 +131145,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11332, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32720, libc.VaList(bp, zDb)) + ts+32767, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -131096,18 +131154,18 @@ var zSep uintptr = ts + 1538 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32830, bp+24) + sessionAppendStr(tls, bp+8, ts+32877, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1551, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32845, bp+24) + sessionAppendStr(tls, bp+8, ts+32892, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32853, bp+24) + sessionAppendStr(tls, bp+8, ts+32900, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21509 + zSep = ts + 21556 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131216,7 +131274,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32859, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32906, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131308,7 +131366,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32879, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32926, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131571,7 +131629,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -131594,7 +131652,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -131636,7 +131694,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -131697,7 +131755,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -131771,13 +131829,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -131839,7 +131897,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -132212,7 +132270,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -132391,34 +132449,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32897, bp+16) + sessionAppendStr(tls, bp, ts+32944, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32910, bp+16) + sessionAppendStr(tls, bp, ts+32957, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32916, bp+16) + sessionAppendStr(tls, bp, ts+32963, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14598 } } zSep = ts + 1538 - sessionAppendStr(tls, bp, ts+32845, bp+16) + sessionAppendStr(tls, bp, ts+32892, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32921, bp+16) + ts+32968, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32853, bp+16) + sessionAppendStr(tls, bp, ts+32900, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21509 + zSep = ts + 21556 } } @@ -132470,34 +132528,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+32996, bp+16) + sessionAppendStr(tls, bp, ts+33043, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32845, bp+16) + sessionAppendStr(tls, bp, ts+32892, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32916, bp+16) + sessionAppendStr(tls, bp, ts+32963, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21509 + zSep = ts + 21556 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33014, bp+16) + sessionAppendStr(tls, bp, ts+33061, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32557, bp+16) + sessionAppendStr(tls, bp, ts+32604, bp+16) zSep = ts + 1538 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32853, bp+16) + sessionAppendStr(tls, bp, ts+32900, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33022 + zSep = ts + 33069 } } sessionAppendStr(tls, bp, ts+4941, bp+16) @@ -132524,9 +132582,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33027, bp+16) + sessionAppendStr(tls, bp, ts+33074, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21515, bp+16) + sessionAppendStr(tls, bp, ts+21562, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14598, bp+16) @@ -132534,9 +132592,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33045, bp+16) + sessionAppendStr(tls, bp, ts+33092, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33056, bp+16) + sessionAppendStr(tls, bp, ts+33103, bp+16) } sessionAppendStr(tls, bp, ts+4941, bp+16) @@ -132555,11 +132613,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11332, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33060) + ts+33107) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33173) + ts+33220) } return rc } @@ -132587,7 +132645,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -132840,7 +132898,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33317, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33364, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -132856,7 +132914,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33338, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33385, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -132929,10 +132987,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33357, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33404, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33383, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33430, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -132991,16 +133049,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33413, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33460, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33457, + ts+33504, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33528, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33575, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11332) { @@ -133054,14 +133112,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33588, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33635, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33618, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33665, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33642, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33618, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33689, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33665, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134309,7 +134367,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33670, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33717, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -134597,7 +134655,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33698, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33745, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -134784,7 +134842,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33729, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33776, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -134852,7 +134910,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33736 + var zErr uintptr = ts + 33783 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135034,7 +135092,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33786 + var zErr uintptr = ts + 33833 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135358,13 +135416,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33834, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33881, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33842, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33889, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33852, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33899, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -135915,7 +135973,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33857, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33904, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -135942,14 +136000,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33864, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33911, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33895, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33942, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -135960,7 +136018,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33928, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33975, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -135973,7 +136031,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33965, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34012, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -135982,7 +136040,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33974, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34021, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136001,7 +136059,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34007, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34054, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136016,14 +136074,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34041, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34088, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34049, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34096, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34081, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34128, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136031,9 +136089,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34087, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34134, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34101, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34148, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136041,9 +136099,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34139, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34186, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34150, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34197, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -136055,17 +136113,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8010, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17329}, - {FzName: ts + 34185, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34232, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34193, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34224, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34271, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136112,15 +136170,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22175) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22222) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16251) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34252, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34299, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34282) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34329) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34292, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34339, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136137,13 +136195,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34323, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34370, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34328, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34375, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34335, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34382, libc.VaList(bp+16, i)) } } } @@ -136181,8 +136239,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22175) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34343, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22222) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34390, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136214,7 +136272,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34372, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34419, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136251,14 +136309,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34041 + zTail = ts + 34088 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34392 + zTail = ts + 34439 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34400, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34447, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136307,7 +136365,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34411, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34458, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136315,10 +136373,10 @@ } return ts + 14598 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34434, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22175)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34481, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22222)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -136428,7 +136486,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34460) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34507) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -136438,7 +136496,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34465) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34512) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -136448,7 +136506,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -136461,7 +136519,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34484) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34531) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -136471,7 +136529,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34494) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34541) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -136487,7 +136545,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22175) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22222) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -136510,7 +136568,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34506 + var zSelect uintptr = ts + 34553 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -136532,7 +136590,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34538) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34585) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -136546,7 +136604,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34546, + ts+34593, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -136644,7 +136702,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34611, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34658, 0) return FTS5_EOF } } @@ -136657,20 +136715,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34631, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34678, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34662, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34709, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34665, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34712, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30047, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30094, uint64(3)) == 0 { tok = FTS5_AND } break @@ -138448,9 +138506,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34669, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34716, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33698, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33745, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -138466,7 +138524,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34674, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34721, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -138553,7 +138611,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20512, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20559, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -138634,7 +138692,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34703, 0) + ts+34750, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -138804,12 +138862,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34756, + ts+34803, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34806 + return ts + 34853 } - return ts + 34669 + return ts + 34716 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -139752,7 +139810,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34813, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34860, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -139831,7 +139889,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34819, + ts+34866, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -139856,7 +139914,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34870, + ts+34917, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -139879,7 +139937,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34919, + ts+34966, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140118,7 +140176,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34959, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35006, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141317,7 +141375,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34982, + ts+35029, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -142783,7 +142841,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35066, + ts+35113, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -143865,13 +143923,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35123, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35170, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25040, ts+35131, 0, pzErr) + pConfig, ts+25087, ts+35178, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11472, - ts+35166, + ts+35213, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144124,7 +144182,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34813, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34860, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -144238,7 +144296,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35210, + ts+35257, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -144408,7 +144466,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35296) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35343) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -144679,7 +144737,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35301, 0) + ts+35348, 0) return SQLITE_ERROR } @@ -145103,7 +145161,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35340, + ts+35387, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145119,9 +145177,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35395 + return ts + 35442 } - return ts + 35400 + return ts + 35447 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145167,12 +145225,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35404, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35451, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5041, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35410, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145203,7 +145261,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35438, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35485, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145234,7 +145292,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35448, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35495, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145266,14 +145324,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35469, libc.VaList(bp, z)) + ts+35516, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33852 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33899 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145329,7 +145387,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35301, 0) + ts+35348, 0) return SQLITE_ERROR __1: ; @@ -145546,7 +145604,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35502, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35549, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -145691,28 +145749,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35538, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35585, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35549, 0) + ts+35596, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35629, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35676, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35637, 0) + ts+35684, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16918, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35693, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35740, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35699, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35746, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -145783,12 +145841,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35715, + ts+35762, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20413 + return ts + 20460 } - return ts + 35752 + return ts + 35799 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -146418,7 +146476,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35764, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35811, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -146662,7 +146720,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35785, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35832, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -146681,7 +146739,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35807, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35854, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -146728,7 +146786,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35838) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35885) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -146737,7 +146795,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35851, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35898, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -146751,7 +146809,7 @@ } var azName2 = [5]uintptr{ - ts + 35942, ts + 34041, ts + 25040, ts + 34392, ts + 11472, + ts + 35989, ts + 34088, ts + 25087, ts + 34439, ts + 11472, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -146775,7 +146833,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35949, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+35996, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -146793,13 +146851,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35949, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+35996, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35954, 0, + db, ts+36001, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -146856,17 +146914,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35969, - ts + 36037, - ts + 36106, - ts + 36139, - ts + 36178, - ts + 36218, - ts + 36257, - ts + 36298, - ts + 36337, - ts + 36379, - ts + 36419, + ts + 36016, + ts + 36084, + ts + 36153, + ts + 36186, + ts + 36225, + ts + 36265, + ts + 36304, + ts + 36345, + ts + 36384, + ts + 36426, + ts + 36466, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -146968,18 +147026,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36442, + ts+36489, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36546, + ts+36593, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36584, + ts+36631, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -146991,7 +147049,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36622, + ts+36669, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147003,14 +147061,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25040, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25087, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11472, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35942, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35989, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34392, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34439, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34041, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34088, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147022,17 +147080,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36664, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36711, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29693 + return ts + 29740 } return ts + 1538 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36694, + ts+36741, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147069,27 +147127,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36738, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36785, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36761, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36808, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34041, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34088, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34392, ts+36767, 0, pzErr) + pConfig, ts+34439, ts+36814, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35942, ts+36799, 1, pzErr) + pConfig, ts+35989, ts+36846, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34538, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34585, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147295,12 +147353,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36816, + ts+36863, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36866, + ts+36913, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147308,7 +147366,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34538, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34585, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -147484,7 +147542,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36895, + zSql = Xsqlite3_mprintf(tls, ts+36942, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -147666,14 +147724,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34041, bp+48) + rc = fts5StorageCount(tls, p, ts+34088, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34392, bp+56) + rc = fts5StorageCount(tls, p, ts+34439, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -147868,9 +147926,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36927) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36938) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148085,7 +148143,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36949 + var zCat uintptr = ts + 36996 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148097,7 +148155,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36958) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37005) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -148108,18 +148166,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36969) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37016) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36927) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36938) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36958) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37005) { } else { rc = SQLITE_ERROR } @@ -148395,7 +148453,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36987 + var zBase uintptr = ts + 37034 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -148537,7 +148595,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+36997, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37044, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148545,11 +148603,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37000, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37047, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37005, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148557,7 +148615,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37010, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148565,7 +148623,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148573,11 +148631,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148585,19 +148643,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37036, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37083, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37041, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148605,11 +148663,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37049, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37096, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148617,7 +148675,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148625,11 +148683,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37103, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37107, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148637,7 +148695,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37064, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37111, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148645,7 +148703,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148653,7 +148711,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148669,24 +148727,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37056, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37082, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37126, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -148701,44 +148759,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37136, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37144, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37104, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37151, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37109, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37156, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37005, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37052, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37114, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37161, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37000, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37047, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37171, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15464, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -148747,91 +148805,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37129, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37176, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37082, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37041, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37088, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37144, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37148, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37195, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37150, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37064, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37111, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37156, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37203, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37164, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37211, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37222, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37181, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37228, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37068, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37115, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37236, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37197, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37244, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37248, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37064, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37111, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37209, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37256, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37215, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37262, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37068, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37115, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37221, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37268, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37082, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -148846,16 +148904,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37228, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37275, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37280, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148863,21 +148921,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148885,7 +148943,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37250, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37297, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -148893,9 +148951,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37256, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37303, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -148910,12 +148968,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37262, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37309, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37266, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37313, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37269, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37316, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -148924,7 +148982,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149080,7 +149138,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37276) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37323) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149260,22 +149318,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36987, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37034, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37291, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37338, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37297, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37344, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37304, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37351, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -150418,14 +150476,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37312) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37359) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37316) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37363) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37320) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37367) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37329, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37376, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -150451,19 +150509,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37363, - ts + 37403, - ts + 37438, + ts + 37410, + ts + 37450, + ts + 37485, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23336, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23383, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37481, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37528, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -150596,11 +150654,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37514, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37561, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37545, + ts+37592, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -150624,7 +150682,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37596, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37643, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151019,7 +151077,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37622, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37669, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151041,7 +151099,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37632 + return ts + 37679 } func init() { @@ -152015,5 +152073,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_openbsd_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_openbsd_arm64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_UNIX=1', DO NOT EDIT. package sqlite3 @@ -675,11 +675,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NMEADISC = 7 NN = 1 @@ -1837,7 +1837,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -1945,8 +1945,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -5214,7 +5214,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -5856,17 +5857,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -6087,14 +6089,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -6900,7 +6902,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -14829,7 +14831,7 @@ for p = (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused; p != 0; p = pNext { pNext = (*UnixUnusedFd)(unsafe.Pointer(p)).FpNext - robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38271) + robust_close(tls, pFile, (*UnixUnusedFd)(unsafe.Pointer(p)).Ffd, 38275) Xsqlite3_free(tls, p) } (*UnixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0) @@ -15305,7 +15307,7 @@ func closeUnixFile(tls *libc.TLS, id uintptr) int32 { var pFile uintptr = id if (*UnixFile)(unsafe.Pointer(pFile)).Fh >= 0 { - robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39055) + robust_close(tls, pFile, (*UnixFile)(unsafe.Pointer(pFile)).Fh, 39059) (*UnixFile)(unsafe.Pointer(pFile)).Fh = -1 } @@ -15583,7 +15585,7 @@ if fd >= 0 { return SQLITE_OK } - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40676), ts+3362, bp+8, 40676) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 40680), ts+3362, bp+8, 40680) } func unixSync(tls *libc.TLS, id uintptr, flags int32) int32 { @@ -15600,14 +15602,14 @@ if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3650, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40717) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, ts+3650, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40721) } if int32((*UnixFile)(unsafe.Pointer(pFile)).FctrlFlags)&UNIXFILE_DIRSYNC != 0 { rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, bp) if rc == SQLITE_OK { full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) - robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40731) + robust_close(tls, pFile, *(*int32)(unsafe.Pointer(bp)), 40735) } else { rc = SQLITE_OK } @@ -15627,7 +15629,7 @@ rc = robust_ftruncate(tls, (*UnixFile)(unsafe.Pointer(pFile)).Fh, nByte) if rc != 0 { storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__errno(tls)))) - return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3281, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40762) + return unixLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, ts+3281, (*UnixFile)(unsafe.Pointer(pFile)).FzPath, 40766) } else { return SQLITE_OK } @@ -15882,7 +15884,7 @@ } Xsqlite3_free(tls, (*UnixShmNode)(unsafe.Pointer(p)).FapRegion) if (*UnixShmNode)(unsafe.Pointer(p)).FhShm >= 0 { - robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41442) + robust_close(tls, pFd, (*UnixShmNode)(unsafe.Pointer(p)).FhShm, 41446) (*UnixShmNode)(unsafe.Pointer(p)).FhShm = -1 } (*UnixInodeInfo)(unsafe.Pointer((*UnixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0) @@ -15910,7 +15912,7 @@ rc = unixShmSystemLock(tls, pDbFd, F_WRLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) if rc == SQLITE_OK && robust_ftruncate(tls, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(3)) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3281, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41499) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, ts+3281, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41503) } } } else if int32((*flock)(unsafe.Pointer(bp+8)).Fl_type) == F_WRLCK { @@ -16009,7 +16011,7 @@ if !((*unixShmNode)(unsafe.Pointer(pShmNode)).FhShm < 0) { goto __10 } - rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41624), ts+3245, zShm, 41624) + rc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 41628), ts+3245, zShm, 41628) goto shm_open_err __10: ; @@ -16139,7 +16141,7 @@ goto __14 } zFile = (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3316, zFile, 41768) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, ts+3316, zFile, 41772) goto shmpage_out __14: ; @@ -16185,7 +16187,7 @@ if !(pMem == libc.UintptrFromInt32(-1)) { goto __20 } - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3403, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41795) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, ts+3403, (*UnixShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 41799) goto shmpage_out __20: ; @@ -16457,7 +16459,7 @@ unixEnterMutex(tls) rc = findInodeInfo(tls, pNew, pNew+16) if rc != SQLITE_OK { - robust_close(tls, pNew, h, 42672) + robust_close(tls, pNew, h, 42676) h = -1 } unixLeaveMutex(tls) @@ -16478,7 +16480,7 @@ storeLastErrno(tls, pNew, 0) if rc != SQLITE_OK { if h >= 0 { - robust_close(tls, pNew, h, 42757) + robust_close(tls, pNew, h, 42761) } } else { (*Sqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle @@ -16794,7 +16796,7 @@ if !(fd < 0) { goto __19 } - rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43198), ts+3245, zName, 43198) + rc2 = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43202), ts+3245, zName, 43202) if !(rc == SQLITE_OK) { goto __20 } @@ -16885,7 +16887,7 @@ if *(*int32)(unsafe.Pointer(libc.X__errno(tls))) == ENOENT { rc = SQLITE_IOERR | int32(23)<<8 } else { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3355, zPath, 43337) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, ts+3355, zPath, 43341) } return rc } @@ -16893,9 +16895,9 @@ rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 17*24 + 8)))(tls, zPath, bp) if rc == SQLITE_OK { if full_fsync(tls, *(*int32)(unsafe.Pointer(bp)), 0, 0) != 0 { - rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3772, zPath, 43347) + rc = unixLogErrorAtLine(tls, SQLITE_IOERR|int32(5)<<8, ts+3772, zPath, 43351) } - robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43349) + robust_close(tls, uintptr(0), *(*int32)(unsafe.Pointer(bp)), 43353) } else { rc = SQLITE_OK } @@ -16959,18 +16961,18 @@ zIn = (*DbPath)(unsafe.Pointer(pPath)).FzOut if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 27*24 + 8)))(tls, zIn, bp) != 0 { if *(*int32)(unsafe.Pointer(libc.X__errno(tls))) != ENOENT { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43443), ts+3443, zIn, 43443) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43447), ts+3443, zIn, 43447) } } else if (*stat)(unsafe.Pointer(bp)).Fst_mode&Mode_t(0170000) == Mode_t(0120000) { var got Ssize_t if libc.PostIncInt32(&(*DbPath)(unsafe.Pointer(pPath)).FnSymlink, 1) > SQLITE_MAX_SYMLINK { - (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43449) + (*DbPath)(unsafe.Pointer(pPath)).Frc = Xsqlite3CantopenError(tls, 43453) return } got = (*(*func(*libc.TLS, uintptr, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls, zIn, bp+128, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) if got <= int64(0) || got >= Ssize_t(unsafe.Sizeof([1026]int8{}))-int64(2) { - (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43454), ts+3434, zIn, 43454) + (*DbPath)(unsafe.Pointer(pPath)).Frc = unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43458), ts+3434, zIn, 43458) return } *(*int8)(unsafe.Pointer(bp + 128 + uintptr(got))) = int8(0) @@ -17010,14 +17012,14 @@ (*DbPath)(unsafe.Pointer(bp + 1032)).FzOut = zOut if int32(*(*int8)(unsafe.Pointer(zPath))) != '/' { if (*(*func(*libc.TLS, uintptr, Size_t) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, bp, uint64(unsafe.Sizeof([1026]int8{}))-uint64(2)) == uintptr(0) { - return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43512), ts+3263, zPath, 43512) + return unixLogErrorAtLine(tls, Xsqlite3CantopenError(tls, 43516), ts+3263, zPath, 43516) } appendAllPathElements(tls, bp+1032, bp) } appendAllPathElements(tls, bp+1032, zPath) *(*int8)(unsafe.Pointer(zOut + uintptr((*DbPath)(unsafe.Pointer(bp+1032)).FnUsed))) = int8(0) if (*DbPath)(unsafe.Pointer(bp+1032)).Frc != 0 || (*DbPath)(unsafe.Pointer(bp+1032)).FnUsed < 2 { - return Xsqlite3CantopenError(tls, 43518) + return Xsqlite3CantopenError(tls, 43522) } if (*DbPath)(unsafe.Pointer(bp+1032)).FnSymlink != 0 { return SQLITE_OK | int32(2)<<8 @@ -17092,7 +17094,7 @@ for __ccgo := true; __ccgo; __ccgo = got < 0 && *(*int32)(unsafe.Pointer(libc.X__errno(tls))) == EINTR { got = int32((*(*func(*libc.TLS, int32, uintptr, Size_t) Ssize_t)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8*24 + 8)))(tls, fd, zBuf, uint64(nBuf))) } - robust_close(tls, uintptr(0), fd, 43619) + robust_close(tls, uintptr(0), fd, 43623) } } @@ -18525,7 +18527,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -18555,7 +18557,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -18606,7 +18608,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -18710,8 +18712,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -18789,13 +18791,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -21085,7 +21087,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -21508,7 +21510,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -21966,7 +21968,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -22116,7 +22118,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -22497,7 +22499,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -22603,7 +22605,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -22621,7 +22623,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -22660,7 +22662,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -23447,7 +23449,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -23690,9 +23692,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -24449,7 +24451,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -24548,7 +24550,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -25134,7 +25136,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -25409,7 +25411,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -25882,7 +25884,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -26387,7 +26389,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -27045,7 +27047,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -27182,7 +27184,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -27199,7 +27201,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -27207,7 +27209,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -27250,7 +27252,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -27260,7 +27262,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -27510,7 +27512,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -27557,7 +27559,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -27567,7 +27569,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -27580,7 +27582,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -27589,14 +27591,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -27606,7 +27608,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -27670,7 +27672,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -27680,7 +27682,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -27702,7 +27704,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -27737,7 +27739,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -27750,13 +27752,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -27781,7 +27783,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -27792,7 +27794,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -27844,22 +27846,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -27869,7 +27871,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -27877,7 +27879,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -27885,10 +27887,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -27948,7 +27950,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -27984,7 +27986,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -28014,11 +28016,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -28029,15 +28031,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -28065,14 +28067,14 @@ pc = int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(cellOffset+i*2) + 1))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -28086,7 +28088,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -28098,7 +28100,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -28201,7 +28203,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -28229,7 +28231,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -28268,7 +28270,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -29140,7 +29142,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -29555,7 +29557,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -29581,7 +29583,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -29590,7 +29592,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -29601,7 +29603,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -29617,7 +29619,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -29678,7 +29680,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -29713,7 +29715,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -29773,7 +29775,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -29812,7 +29814,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -29843,7 +29845,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -30184,7 +30186,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -30428,14 +30430,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -30480,7 +30482,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -30529,7 +30531,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -30609,7 +30611,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -30700,7 +30702,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -30720,7 +30722,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -30930,7 +30932,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -31134,7 +31136,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -31199,7 +31201,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -31247,7 +31249,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -31366,7 +31368,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -31526,7 +31528,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -31591,7 +31593,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -31627,7 +31629,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -31671,7 +31673,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -31783,7 +31785,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -31941,7 +31943,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -31998,7 +32000,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -32014,7 +32016,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -32088,7 +32090,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -32100,7 +32102,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -32111,7 +32113,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -32276,7 +32278,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -32555,12 +32557,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -32568,7 +32570,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -32628,7 +32630,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -32717,7 +32719,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -32833,7 +32835,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -33153,7 +33155,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -33164,7 +33166,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -33322,7 +33324,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -33396,7 +33398,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -33459,7 +33461,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -33487,7 +33489,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -33748,7 +33750,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -33940,7 +33942,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -33978,7 +33980,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -34084,7 +34086,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -34109,7 +34111,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -34179,7 +34181,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -34292,7 +34294,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -34352,6 +34354,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -34359,7 +34362,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -34397,13 +34400,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -34434,7 +34437,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -34509,7 +34511,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -34534,7 +34536,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -34635,7 +34637,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -34643,11 +34645,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx) + 1))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -34722,7 +34724,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)))))<<8|int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1)) + 1))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -34791,7 +34793,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -34820,7 +34822,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -34896,7 +34898,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -34910,7 +34912,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -35044,7 +35046,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -37513,7 +37515,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -38162,7 +38164,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -38177,14 +38179,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -40488,7 +40490,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -41039,7 +41041,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -41104,7 +41106,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -41138,7 +41140,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -41188,7 +41190,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -41334,7 +41336,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -41505,7 +41507,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -41531,7 +41533,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -41805,7 +41807,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -42420,7 +42422,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -42940,7 +42942,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -42948,7 +42950,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+5341, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -43353,7 +43355,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -43497,7 +43499,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -43941,10 +43943,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -46593,7 +46591,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -48353,7 +48351,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -49131,7 +49129,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+5850) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+5850) goto abort_due_to_error __770: ; @@ -49241,7 +49239,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -49435,7 +49433,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -50802,7 +50800,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -51322,7 +51320,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -51405,7 +51403,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -54826,14 +54824,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6748 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6753 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -54877,7 +54871,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6759, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6748, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -54941,7 +54935,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6776, libc.VaList(bp, pExpr)) + ts+6765, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -54957,7 +54951,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+6840, + Xsqlite3ErrorMsg(tls, pParse, ts+6829, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -54971,7 +54965,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+6876, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+6865, uintptr(0), pExpr) } } else { @@ -54994,30 +54988,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+6904, libc.VaList(bp+16, pExpr)) + ts+6893, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 6947 + zType = ts + 6936 } else { - zType = ts + 6954 + zType = ts + 6943 } - Xsqlite3ErrorMsg(tls, pParse, ts+6964, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6953, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+6992, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+6981, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+7014, + Xsqlite3ErrorMsg(tls, pParse, ts+7003, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7058, + ts+7047, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -55089,15 +55083,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+7106, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7095, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -55105,7 +55099,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+7117, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+7106, pExpr, pExpr) } break @@ -55236,7 +55230,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+7128, libc.VaList(bp, i, zType, mx)) + ts+7117, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -55256,7 +55250,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7184, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7173, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -55291,7 +55285,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+7218, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+7207, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -55348,7 +55342,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+7224, libc.VaList(bp, i+1)) + ts+7213, libc.VaList(bp, i+1)) return 1 } } @@ -55376,7 +55370,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7285, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+7274, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -55590,7 +55584,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7316, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7305, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -55630,7 +55624,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7218) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7207) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -55641,7 +55635,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7355) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+7344) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -55653,7 +55647,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+7361, 0) + ts+7350, 0) return WRC_Abort } @@ -56517,7 +56511,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+7420, libc.VaList(bp, mxHeight)) + ts+7409, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -56766,10 +56760,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+7468, + Xsqlite3ErrorMsg(tls, pParse, ts+7457, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 7512 + return ts + 7501 } return ts + 1538 }(), nElem)) @@ -56810,7 +56804,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -56836,7 +56830,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7516, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+7505, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -56864,7 +56858,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+7550, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+7539, libc.VaList(bp, pExpr)) } } } @@ -56911,7 +56905,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+7570, + Xsqlite3ErrorMsg(tls, pParse, ts+7559, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -56936,7 +56930,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+7613, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+7602, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -57511,7 +57505,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+7636, + Xsqlite3ErrorMsg(tls, pParse, ts+7625, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -57634,7 +57628,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+7666, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+7655, libc.VaList(bp, zObject)) } } @@ -57690,10 +57684,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+6748) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7678) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+6753) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+7683) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -58768,7 +58762,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -59206,6 +59200,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -59219,6 +59214,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -59437,6 +59435,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -59450,6 +59449,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -60223,7 +60230,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+7636, + Xsqlite3ErrorMsg(tls, pParse, ts+7625, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -60245,11 +60252,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -60320,13 +60326,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -60339,15 +60351,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -60357,22 +60369,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -60381,21 +60393,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -60405,27 +60417,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+8066, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -60434,7 +60446,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -63105,7 +63117,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+10906, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -63122,7 +63134,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -64043,7 +64055,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -68407,6 +68419,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -69571,7 +69589,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+3270, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7117, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+7106, 10) == 0 { return 0 } return 1 @@ -70817,7 +70835,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+14124, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -72862,7 +72880,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -79328,7 +79346,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -80727,7 +80745,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 17497 } else { - zType = ts + 7512 + zType = ts + 7501 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+17499, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -80888,6 +80906,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -82169,7 +82188,7 @@ goto __210 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7512, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7501, libc.VaList(bp+264, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __209 __209: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -82185,7 +82204,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __213 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7512, libc.VaList(bp+272, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+7501, libc.VaList(bp+272, aPragmaName[i6].FzName)) goto __212 __212: i6++ @@ -82990,80 +83009,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __338 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 608))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+17913) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17949) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__338: + ; label6 = 0 kk = 0 -__338: +__339: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __340 + goto __341 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __341 + goto __342 } - goto __339 -__341: + goto __340 +__342: ; if !(label6 == 0) { - goto __342 + goto __343 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__342: +__343: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 608))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __339 -__339: - kk++ - goto __338 goto __340 __340: + kk++ + goto __339 + goto __341 +__341: ; if !(label6 != 0) { - goto __343 + goto __344 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+17887) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+17913) + Xsqlite3VdbeLoadString(tls, v, 4, ts+17960) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__343: +__344: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __344 + goto __345 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__345: +__346: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __347 + goto __348 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __348 + goto __349 } - goto __346 -__348: + goto __347 +__349: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __346 -__346: - kk++ - goto __345 goto __347 __347: + kk++ + goto __346 + goto __348 +__348: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 608))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 608))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+17940) + Xsqlite3VdbeLoadString(tls, v, 3, ts+17987) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__344: +__345: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 624))) @@ -83080,20 +83113,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 604)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __349 + goto __350 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+17967) + Xsqlite3VdbeLoadString(tls, v, 2, ts+18014) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__350: +__351: if !(pIdx5 != 0) { - goto __352 + goto __353 } if !(pPk1 == pIdx5) { - goto __353 + goto __354 } - goto __351 -__353: + goto __352 +__354: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 608))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -83102,21 +83135,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __351 -__351: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __350 goto __352 __352: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __351 + goto __353 +__353: ; if !(pPk1 != 0) { - goto __354 + goto __355 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__354: +__355: ; -__349: +__350: ; goto __284 __284: @@ -83134,14 +83167,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __355 + goto __356 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 600)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 17996 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 18043 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__355: +__356: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -83149,27 +83182,27 @@ __45: if !!(zRight != 0) { - goto __356 + goto __357 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __358 + goto __359 } goto pragma_out -__358: +__359: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __357 -__356: + goto __358 +__357: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __359 + goto __360 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__360: +__361: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __362 + goto __363 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __363 + goto __364 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -83178,25 +83211,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __362 -__363: + goto __363 +__364: ; - goto __361 -__361: - pEnc += 16 - goto __360 goto __362 __362: + pEnc += 16 + goto __361 + goto __363 +__363: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __364 + goto __365 } - Xsqlite3ErrorMsg(tls, pParse, ts+17999, libc.VaList(bp+456, zRight)) -__364: + Xsqlite3ErrorMsg(tls, pParse, ts+18046, libc.VaList(bp+456, zRight)) +__365: ; -__359: +__360: ; -__357: +__358: ; goto __15 @@ -83204,15 +83237,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __365 + goto __366 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __367 + goto __368 } goto __15 -__367: +__368: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -83220,41 +83253,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __368 + goto __369 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__368: +__369: ; - goto __366 -__365: + goto __367 +__366: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __369 + goto __370 } goto __15 -__369: +__370: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__366: +__367: ; goto __15 __47: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__370: +__371: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __371 + goto __372 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __370 -__371: + goto __371 +__372: ; Xsqlite3VdbeReusable(tls, v) @@ -83269,31 +83302,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __372 + goto __373 } if !(Xsqlite3StrICmp(tls, zRight, ts+17329) == 0) { - goto __373 + goto __374 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __374 -__373: - if !(Xsqlite3StrICmp(tls, zRight, ts+18024) == 0) { - goto __375 + goto __375 +__374: + if !(Xsqlite3StrICmp(tls, zRight, ts+18071) == 0) { + goto __376 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __376 -__375: + goto __377 +__376: if !(Xsqlite3StrICmp(tls, zRight, ts+17482) == 0) { - goto __377 + goto __378 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__377: +__378: ; -__376: +__377: ; -__374: +__375: ; -__372: +__373: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -83303,10 +83336,10 @@ __49: if !(zRight != 0) { - goto __378 + goto __379 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__378: +__379: ; returnSingleInt(tls, v, func() int64 { @@ -83326,19 +83359,19 @@ __51: if !(zRight != 0) { - goto __379 + goto __380 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __381 + goto __382 } goto __15 -__381: +__382: ; - goto __380 -__379: - opMask = U32(0xfffe) + goto __381 __380: + opMask = U32(0xfffe) +__381: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -83347,86 +83380,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__382: +__383: if !(iDb <= iDbLast) { - goto __384 + goto __385 } if !(iDb == 1) { - goto __385 + goto __386 } - goto __383 -__385: + goto __384 +__386: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__386: +__387: if !(k4 != 0) { - goto __388 + goto __389 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __389 + goto __390 } - goto __387 -__389: + goto __388 +__390: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__390: +__391: if !(pIdx6 != 0) { - goto __392 + goto __393 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __393 + goto __394 } szThreshold = int16(0) - goto __392 -__393: + goto __393 +__394: ; - goto __391 -__391: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __390 goto __392 __392: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __391 + goto __393 +__393: ; if !(szThreshold != 0) { - goto __394 + goto __395 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__394: +__395: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+18032, + zSubSql = Xsqlite3MPrintf(tls, db, ts+18079, libc.VaList(bp+464, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __395 + goto __396 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __396 -__395: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __397 __396: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__397: ; - goto __387 -__387: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __386 goto __388 __388: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __387 + goto __389 +__389: ; - goto __383 -__383: - iDb++ - goto __382 goto __384 __384: + iDb++ + goto __383 + goto __385 +__385: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -83434,36 +83467,36 @@ __52: ; if !(zRight != 0) { - goto __397 + goto __398 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__397: +__398: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __53: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+632) == SQLITE_OK) { - goto __398 + goto __399 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 632))) -__398: +__399: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+640) == SQLITE_OK) { - goto __399 + goto __400 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 640)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 640)))) { - goto __400 + goto __401 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 640))) -__400: +__401: ; -__399: +__400: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -83472,10 +83505,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 648)) >= int64(0)) { - goto __401 + goto __402 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 648))&int64(0x7fffffff))) -__401: +__402: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -83484,10 +83517,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) >= int64(0)) { - goto __402 + goto __403 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) & int64(0x7fffffff)) -__402: +__403: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -83495,10 +83528,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __403 + goto __404 } -__403: +__404: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -83550,14 +83583,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 18050, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18055, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 18061, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18070, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18079, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 18087, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 18095}, - {FzName: ts + 18102}, + {FzName: ts + 18097, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18102, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 18108, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18117, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18126, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 18134, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 18142}, + {FzName: ts + 18149}, {}, } var setCookie = [2]VdbeOpList{ @@ -83609,7 +83642,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+18108) + Xsqlite3_str_appendall(tls, bp+32, ts+18155) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -83617,7 +83650,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+18123, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+18170, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -83630,16 +83663,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18130, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+18177, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18136) + Xsqlite3_str_appendall(tls, bp+32, ts+18183) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+18148) + Xsqlite3_str_appendall(tls, bp+32, ts+18195) j++ } Xsqlite3_str_append(tls, bp+32, ts+4941, 1) @@ -83822,13 +83855,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+18163) + Xsqlite3_str_appendall(tls, bp+32, ts+18210) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18171, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18218, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+18175, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+18222, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -83905,12 +83938,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+18179, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+18226, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -83919,19 +83952,19 @@ } else { zObj = ts + 4992 } - z = Xsqlite3MPrintf(tls, db, ts+18207, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+18254, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+18238, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+18285, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 18246, - ts + 18253, - ts + 18265, + ts + 18293, + ts + 18300, + ts + 18312, } // Check to see if any sibling index (another index on the same table) @@ -84023,7 +84056,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+18276) + corruptSchema(tls, pData, argv, ts+18323) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -84071,7 +84104,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 7922 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18289 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 18336 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -84200,7 +84233,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+18361) + Xsqlite3SetString(tls, pzErrMsg, db, ts+18408) rc = SQLITE_ERROR goto initone_error_out __19: @@ -84214,7 +84247,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+18385, + ts+18432, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -84546,7 +84579,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+18419, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+18466, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -84576,7 +84609,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18449, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+18496, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -84672,7 +84705,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -84771,7 +84804,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -85098,13 +85131,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+18468, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+18515, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18498)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 18545)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -85279,7 +85312,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18532, libc.VaList(bp, 0)) + ts+18579, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -85324,7 +85357,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+18582, libc.VaList(bp+8, zName)) + ts+18629, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -85335,7 +85368,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18646, + Xsqlite3ErrorMsg(tls, pParse, ts+18693, libc.VaList(bp+16, zName)) break } @@ -85963,16 +85996,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 18683 + z = ts + 18730 break case TK_INTERSECT: - z = ts + 18693 + z = ts + 18740 break case TK_EXCEPT: - z = ts + 18703 + z = ts + 18750 break default: - z = ts + 18710 + z = ts + 18757 break } return z @@ -85982,7 +86015,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18716, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18763, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -86008,9 +86041,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18739, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18786, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 18770 + return ts + 18817 } return ts + 1538 }())) @@ -86354,7 +86387,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+18785, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+18832, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -86454,7 +86487,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+18785, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+18832, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -86470,7 +86503,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+18794, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+18841, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -86553,8 +86586,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -86569,12 +86600,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 18802 + zType = ts + 18849 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -86790,7 +86824,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+18806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18853, 0) return __1: ; @@ -86881,7 +86915,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+18855, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+18902, 0) goto end_of_recursive_query __15: ; @@ -86901,7 +86935,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18897, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18944, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -86938,7 +86972,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18903, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18950, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -86972,11 +87006,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18918, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+18965, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1538 } - return ts + 18941 + return ts + 18988 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -87077,8 +87111,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18943, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18958, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18990, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19005, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -87125,7 +87159,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18683, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18730, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -87192,7 +87226,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18977, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19024, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -87254,7 +87288,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+18977, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19024, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -87407,10 +87441,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+18998, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19045, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+19044, + ts+19091, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -87664,8 +87698,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7218) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7218) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+7207) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+7207) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -87692,13 +87726,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19126, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19173, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19137, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19184, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -87710,7 +87744,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19142, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19189, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -87898,7 +87932,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -88797,7 +88832,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+19148, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+19195, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -88880,7 +88915,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19166, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19213, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -89009,7 +89044,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19189, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+19236, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -89032,7 +89067,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19209, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+19256, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -89048,7 +89083,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19252 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19299 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -89074,7 +89109,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+19275, + Xsqlite3ErrorMsg(tls, pParse, ts+19322, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -89085,9 +89120,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19313 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19360 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19347 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 19394 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -89134,7 +89169,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19385, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+19432, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -89246,7 +89281,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+19389, + Xsqlite3ErrorMsg(tls, pParse, ts+19436, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -89265,7 +89300,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+19428, + Xsqlite3ErrorMsg(tls, pParse, ts+19475, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -89389,7 +89424,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19459, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19506, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -89454,7 +89489,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19464, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+19511, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -89485,9 +89520,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+19473, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+19520, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+19491, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19538, 0) } } } @@ -89497,7 +89532,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+19511, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19558, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -89635,7 +89670,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -89719,13 +89754,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+19542, 0) + ts+19589, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19593, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19640, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -89914,11 +89949,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19626, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+19673, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 19638 + return ts + 19685 } return ts + 1538 }(), @@ -90246,7 +90281,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+19661, + ts+19708, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -90307,7 +90342,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+19715, + Xsqlite3ErrorMsg(tls, pParse, ts+19762, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -90449,7 +90484,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19755, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19802, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -90508,7 +90543,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19770, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+19817, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -90979,9 +91014,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 19786 + return ts + 19833 } - return ts + 19795 + return ts + 19842 }()) groupBySort = 1 @@ -91332,7 +91367,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+19786) + explainTempTable(tls, pParse, ts+19833) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -91437,7 +91472,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+19804, 0) + ts+19851, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -91670,7 +91705,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+19869, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19916, 0) goto trigger_cleanup __3: ; @@ -91714,7 +91749,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19915, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+19962, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -91732,7 +91767,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+19923, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+19970, 0) goto trigger_orphan_error __11: ; @@ -91744,7 +91779,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19915, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+19962, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -91759,11 +91794,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+19964, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20011, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -91774,19 +91810,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+6365, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+19990, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20037, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+20028, + Xsqlite3ErrorMsg(tls, pParse, ts+20075, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 20065 + return ts + 20112 } - return ts + 20072 + return ts + 20119 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -91795,7 +91831,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+20078, libc.VaList(bp+24, pTableName+8)) + ts+20125, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -91944,7 +91980,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19915, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+19962, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -91977,7 +92013,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+20124, + ts+20171, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -92002,13 +92038,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+20172, + ts+20219, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+20247, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+20294, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -92264,7 +92300,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+20276, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+20323, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -92317,7 +92353,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+20296, + ts+20343, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -92431,12 +92467,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+20358, + ts+20405, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 20406 + return ts + 20453 } - return ts + 20413 + return ts + 20460 }())) __15: ; @@ -92550,7 +92586,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+20420, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20467, 0) return 1 } @@ -92616,7 +92652,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -92780,7 +92816,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+20462, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+20509, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -93373,7 +93409,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+20476, + ts+20523, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -93405,7 +93441,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+20512, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20559, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -93731,7 +93767,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -94285,7 +94326,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20531) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+20578) __169: ; update_cleanup: @@ -94591,10 +94632,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20544, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+20591, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+20548, libc.VaList(bp+8, bp+216)) + ts+20595, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -94717,7 +94758,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+20621, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20625, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+20668, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+20672, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -94865,14 +94906,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20629) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20676) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20669) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20716) return SQLITE_ERROR __2: ; @@ -94883,7 +94924,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+20712) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20759) return SQLITE_ERROR __5: ; @@ -94911,7 +94952,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+20730, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+20777, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -94931,7 +94972,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+20753) + Xsqlite3SetString(tls, pzErrMsg, db, ts+20800) goto end_of_vacuum __8: ; @@ -94991,7 +95032,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+20780, + ts+20827, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -95000,7 +95041,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+20888, + ts+20935, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -95011,7 +95052,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+20942, + ts+20989, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -95022,7 +95063,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+21093, + ts+21140, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -95451,11 +95492,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+21223, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+21270, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+21247, + ts+21294, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -95465,7 +95506,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+21346, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+21393, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -95526,7 +95567,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+21365, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+21412, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -95554,9 +95595,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -95564,7 +95607,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21407, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21454, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+3647, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -95576,7 +95619,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 21437 + var zFormat uintptr = ts + 21484 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -95650,7 +95693,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+21483, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+21530, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -95708,7 +95751,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21483, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+21530, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -95742,7 +95785,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -96195,7 +96238,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -96222,7 +96265,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -96453,7 +96496,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 21502 + return ts + 21549 } if i == -1 { return ts + 16251 @@ -96465,11 +96508,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+21509, 5) + Xsqlite3_str_append(tls, pStr, ts+21556, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21515, 1) + Xsqlite3_str_append(tls, pStr, ts+21562, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -96484,7 +96527,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+21515, 1) + Xsqlite3_str_append(tls, pStr, ts+21562, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -96510,27 +96553,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+21517, 2) + Xsqlite3_str_append(tls, pStr, ts+21564, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+21509, 5) + Xsqlite3_str_append(tls, pStr, ts+21556, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 21520 + return ts + 21567 } - return ts + 21525 + return ts + 21572 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21533) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+21580) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21535) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+21582) } Xsqlite3_str_append(tls, pStr, ts+4941, 1) } @@ -96573,11 +96616,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+21537, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+21584, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 21543 + return ts + 21590 } - return ts + 21550 + return ts + 21597 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -96590,40 +96633,40 @@ zFmt = ts + 10960 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 21555 + zFmt = ts + 21602 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 21588 + zFmt = ts + 21635 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 21613 + zFmt = ts + 21660 } else { - zFmt = ts + 21631 + zFmt = ts + 21678 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+21640, 7) + Xsqlite3_str_append(tls, bp+64, ts+21687, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 16251 - Xsqlite3_str_appendf(tls, bp+64, ts+21648, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21695, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+21679, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+21726, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+21689, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+21736, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+21694, + Xsqlite3_str_appendf(tls, bp+64, ts+21741, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+21721, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+21768, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -96655,22 +96698,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+21732, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+21779, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+21520, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+21567, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+21753, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+21800, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+21509, 5) + Xsqlite3_str_append(tls, bp+24, ts+21556, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+21520, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+21567, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+4941, 1) @@ -98267,7 +98310,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21761, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21808, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -98295,7 +98338,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21776, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21823, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -98813,7 +98856,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21785, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21832, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -99174,7 +99217,7 @@ {FzOp: ts + 16100, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 15431, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 14951, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 21799, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 21846, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -99664,12 +99707,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+21806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21853, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21806, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21853, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -99748,7 +99791,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 6753 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 7683 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -99842,7 +99885,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 21847 + return ts + 21894 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -100218,7 +100261,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+21854, + Xsqlite3ErrorMsg(tls, pParse, ts+21901, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -100234,7 +100277,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -100952,7 +100995,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+21890, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+21937, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -101023,7 +101066,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21916 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 21963 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -101197,6 +101240,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -101240,9 +101287,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -101276,6 +101321,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -101534,11 +101580,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -103119,7 +103170,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21927, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21974, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103177,7 +103228,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+21927, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21974, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -103575,7 +103626,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+21953, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+22000, 0) rc = SQLITE_OK } else { goto __3 @@ -104182,7 +104233,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+21988, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22035, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -104217,6 +104268,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -104511,6 +104566,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -104663,7 +104721,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22006, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+22053, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -104727,7 +104785,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22034, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+22081, 0) goto __5 __4: ii = 0 @@ -105609,7 +105667,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+22052, -1) + pCtx, ts+22099, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -105742,7 +105800,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+22108, -1) + pCtx, ts+22155, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -105832,17 +105890,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22153)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22164)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22175)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22180)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22193)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22203)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22209)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22220)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22230)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22242)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22247)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 22200)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 22211)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 22222)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 22227)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 22240)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 22250)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 22256)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 22267)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 22277)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 22289)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 22294)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -105888,7 +105946,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+22251, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+22298, libc.VaList(bp, zName)) } return p } @@ -105932,12 +105990,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+22270, 0) + ts+22317, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22341, 0) + ts+22388, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -106164,7 +106222,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+22404, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+22451, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -106280,7 +106338,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+7514)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+7503)) } pSub = Xsqlite3SelectNew(tls, @@ -106395,7 +106453,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+22430, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22477, 0) goto windowAllocErr __2: ; @@ -106460,15 +106518,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 22462 + zErr = ts + 22509 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 22479 + zErr = ts + 22526 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 22495 + zErr = ts + 22542 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+22515, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+22562, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -106489,7 +106547,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+22548, 0) + ts+22595, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -106645,11 +106703,11 @@ } var azErr = [5]uintptr{ - ts + 22595, - ts + 22648, - ts + 22052, - ts + 22699, - ts + 22751, + ts + 22642, + ts + 22695, + ts + 22099, + ts + 22746, + ts + 22798, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -108044,19 +108102,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22801, + Xsqlite3ErrorMsg(tls, pParse, ts+22848, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 22843 + return ts + 22890 } - return ts + 22852 + return ts + 22899 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+22858, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22905, 0) } } @@ -108124,7 +108182,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22892, + Xsqlite3ErrorMsg(tls, pParse, ts+22939, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -109221,7 +109279,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+22930, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+22977, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -110200,7 +110258,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22952, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+22999, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -110210,7 +110268,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+22952, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+22999, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -110953,7 +111011,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+22979) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+23026) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -111117,7 +111175,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22988, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+23035, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -111334,9 +111392,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 6748 + return ts + 7678 } - return ts + 6753 + return ts + 7683 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -111620,19 +111678,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+23012, 0) + ts+23059, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+23107, 0) + ts+23154, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+23191, 0) + ts+23238, 0) } break case uint32(273): @@ -112011,9 +112069,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+22988, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+23035, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+23276, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23323, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -112781,7 +112839,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+23293, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+23340, libc.VaList(bp, bp+2464)) break } } @@ -112804,7 +112862,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+3647, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23318, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+23365, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -112977,7 +113035,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23329, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+23376, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -112990,11 +113048,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19915, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+19962, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23336, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+23383, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23341, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+23388, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -113007,9 +113065,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23351, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+23398, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23355, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+23402, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -113243,7 +113301,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -113818,7 +113876,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -113833,7 +113891,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23363, 0) + ts+23410, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -114024,23 +114082,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 23431 + var zErr uintptr = ts + 23478 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 23445 + zErr = ts + 23492 break } case SQLITE_ROW: { - zErr = ts + 23467 + zErr = ts + 23514 break } case SQLITE_DONE: { - zErr = ts + 23489 + zErr = ts + 23536 break } @@ -114058,35 +114116,35 @@ } var aMsg = [29]uintptr{ - ts + 23512, - ts + 23525, + ts + 23559, + ts + 23572, uintptr(0), - ts + 23541, - ts + 23566, - ts + 23580, - ts + 23599, + ts + 23588, + ts + 23613, + ts + 23627, + ts + 23646, ts + 1474, - ts + 23624, - ts + 23661, - ts + 23673, - ts + 23688, - ts + 23721, - ts + 23739, - ts + 23764, - ts + 23793, + ts + 23671, + ts + 23708, + ts + 23720, + ts + 23735, + ts + 23768, + ts + 23786, + ts + 23811, + ts + 23840, uintptr(0), ts + 5822, ts + 5318, - ts + 23810, - ts + 23828, - ts + 23846, - uintptr(0), - ts + 23880, + ts + 23857, + ts + 23875, + ts + 23893, uintptr(0), - ts + 23901, ts + 23927, - ts + 23950, - ts + 23971, + uintptr(0), + ts + 23948, + ts + 23974, + ts + 23997, + ts + 24018, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -114207,7 +114265,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -114252,7 +114310,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+23987, 0) + ts+24034, 0) return SQLITE_BUSY } else { @@ -114369,7 +114427,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+24050, libc.VaList(bp, zName)) + ts+24097, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -114605,7 +114663,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24101, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+24148, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -114698,7 +114756,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -114768,7 +114826,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -114778,7 +114836,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -114810,14 +114868,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24122, 0) + ts+24169, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -114947,7 +115005,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24190, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+24237, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -114992,10 +115050,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24196, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+24243, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24206, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24253, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -115100,7 +115158,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24234, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+24281, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -115111,17 +115169,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24238, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+24285, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 24238 + zModeType = ts + 24285 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24244, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+24291, zOpt, uint64(4)) == 0) { goto __32 } @@ -115159,7 +115217,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24249, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24296, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -115167,7 +115225,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24269, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24316, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -115207,7 +115265,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24293, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+24340, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -115231,14 +115289,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 24309, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 24316, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 24356, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 24363, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 24324, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 24327, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 24330, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 24371, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 24374, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 24377, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 17346, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -115385,10 +115443,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+21847, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+21894, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+24334, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+24381, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -115402,7 +115460,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -115455,7 +115513,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 6425 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23336 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 23383 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -115560,7 +115618,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 24340 + zFilename = ts + 24387 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -115663,21 +115721,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+24343, + Xsqlite3_log(tls, iErr, ts+24390, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24368) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+24415) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24388) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+24435) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24395) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+24442) } // This is a convenience routine that makes sure that all thread-specific @@ -115835,7 +115893,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24412, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+24459, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -116491,7 +116549,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+24440, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+24487, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -116609,7 +116667,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 24448 + return ts + 24495 } return uintptr(0) }(), 0) @@ -116787,7 +116845,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 6165, ts + 6748, ts + 6753, ts + 6175, ts + 6170, ts + 7989, ts + 24471, ts + 24477, + ts + 6165, ts + 7678, ts + 7683, ts + 6175, ts + 6170, ts + 7989, ts + 24518, ts + 24524, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -116940,7 +116998,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 24484 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 24531 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -116995,7 +117053,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24501, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+24548, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -117059,13 +117117,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+6748, uint32(4)) + jsonAppendRaw(tls, pOut, ts+7678, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+6753, uint32(5)) + jsonAppendRaw(tls, pOut, ts+7683, uint32(5)) break } @@ -117615,12 +117673,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6748, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7678, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+6753, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+7683, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -117721,7 +117779,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+24530, -1) + Xsqlite3_result_error(tls, pCtx, ts+24577, -1) } } jsonParseReset(tls, pParse) @@ -118027,7 +118085,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+24545, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+24592, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -118042,7 +118100,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+24549, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+24596, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -118096,7 +118154,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24575, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+24622, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -118201,11 +118259,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+24618, uint32(2)) + jsonAppendRaw(tls, bp, ts+24665, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+4982, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+24621, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+24668, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -118362,14 +118420,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+24624, -1) + ts+24671, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+24675, -1) + Xsqlite3_result_error(tls, ctx, ts+24722, -1) jsonReset(tls, bp) return } @@ -118539,9 +118597,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 24709 + return ts + 24756 } - return ts + 24713 + return ts + 24760 }()) return __2: @@ -118674,7 +118732,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24720, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24767, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -118771,7 +118829,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+24723, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+24770, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -118815,7 +118873,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+24726) + ts+24773) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -118946,7 +119004,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+24809, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+24856, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -118965,7 +119023,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+24815, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+24862, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -119061,7 +119119,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+24815, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+24862, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -119085,7 +119143,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 24820 + zRoot = ts + 24867 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -119207,7 +119265,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24530, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+24577, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -119302,25 +119360,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24822}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24827}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24838}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24856}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24869}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24872}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24876}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24888}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24900}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24911}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24922}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24934}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24947}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24956}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24966}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24977}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 24994}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24869}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24874}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24885}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24903}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 24916}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 24919}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24923}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24935}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24947}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24958}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24969}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 24981}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 24994}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25003}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 25013}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25024}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 25041}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -119339,8 +119397,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 25012, FpModule: 0}, - {FzName: ts + 25022, FpModule: 0}, + {FzName: ts + 25059, FpModule: 0}, + {FzName: ts + 25069, FpModule: 0}, } type Rtree1 = struct { @@ -119600,11 +119658,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+25032, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+25079, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25040, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+25087, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -119815,7 +119873,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+25045, + ts+25092, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -120518,7 +120576,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25127) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+25174) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -121859,7 +121917,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+25141, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+25188, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -121871,12 +121929,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25161, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+25208, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+25193, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+25240, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -122102,7 +122160,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+25230, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+25277, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -122125,7 +122183,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 25375 + var zFmt uintptr = ts + 25422 var zSql uintptr var rc int32 @@ -122173,7 +122231,7 @@ } var azName1 = [3]uintptr{ - ts + 25431, ts + 5044, ts + 16251, + ts + 25478, ts + 5044, ts + 16251, } var rtreeModule = Sqlite3_module{ @@ -122216,19 +122274,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+25436, + ts+25483, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+25498, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+25545, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+25503, + ts+25550, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25567, + ts+25614, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+25637, + ts+25684, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -122257,7 +122315,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 25686 + zFormat = ts + 25733 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -122269,7 +122327,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+25794, + ts+25841, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122277,18 +122335,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+25839, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+25886, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+12751, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+25866, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+25913, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+25888, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+25935, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+25896, 0) + Xsqlite3_str_appendf(tls, p, ts+25943, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -122303,14 +122361,14 @@ } var azSql = [8]uintptr{ - ts + 25912, - ts + 25965, - ts + 26010, - ts + 26062, - ts + 26116, - ts + 26161, - ts + 26219, - ts + 26274, + ts + 25959, + ts + 26012, + ts + 26057, + ts + 26109, + ts + 26163, + ts + 26208, + ts + 26266, + ts + 26321, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -122339,7 +122397,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+26321, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+26368, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -122351,7 +122409,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+26341, + ts+26388, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -122359,7 +122417,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26398, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+26445, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -122401,10 +122459,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 26433, - ts + 26476, - ts + 26511, - ts + 26547, + ts + 26480, + ts + 26523, + ts + 26558, + ts + 26594, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -122435,7 +122493,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+26584, + Xsqlite3_str_appendf(tls, pSql, ts+26631, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -122447,7 +122505,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+26608, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+26655, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -122470,7 +122528,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+26614, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26661, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -122566,7 +122624,7 @@ return rc } -var azFormat = [2]uintptr{ts + 26617, ts + 26628} +var azFormat = [2]uintptr{ts + 26664, ts + 26675} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -122606,11 +122664,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+10904, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+26638, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+26685, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+26644, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+26691, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+26648, 1) + Xsqlite3_str_append(tls, pOut, ts+26695, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -122621,7 +122679,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+26650, -1) + Xsqlite3_result_error(tls, ctx, ts+26697, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -122699,7 +122757,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26683, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+26730, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 4046 @@ -122723,7 +122781,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+26690, + ts+26737, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -122742,7 +122800,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+26735, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+26782, libc.VaList(bp+16, iNode)) } } @@ -122756,8 +122814,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 26767, - ts + 26821, + ts + 26814, + ts + 26868, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -122772,23 +122830,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+26869, + rtreeCheckAppendMsg(tls, pCheck, ts+26916, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 26914 + return ts + 26961 } - return ts + 26922 + return ts + 26969 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+26931, + ts+26978, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 26914 + return ts + 26961 } - return ts + 26922 + return ts + 26969 }(), iKey, iVal)) } } @@ -122812,7 +122870,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+26989, libc.VaList(bp, i, iCell, iNode)) + ts+27036, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -122832,7 +122890,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+27037, libc.VaList(bp+24, i, iCell, iNode)) + ts+27084, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -122849,14 +122907,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+27104, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+27151, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+27138, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+27185, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -122864,7 +122922,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+27168, + ts+27215, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -122893,14 +122951,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+27223, + pCount = rtreeCheckPrepare(tls, pCheck, ts+27270, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+27254, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+27301, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -122927,7 +122985,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+27321, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+27368, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -122936,12 +122994,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+25141, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+25188, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+27349, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+27396, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -122955,8 +123013,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+27380, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+27387, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+27427, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+27434, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -122964,7 +123022,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+27395, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+27442, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -122979,7 +123037,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+27399, -1) + ts+27446, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -122997,7 +123055,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 17996 + return ts + 18043 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -123368,11 +123426,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+27450, 1) + Xsqlite3_str_append(tls, x, ts+27497, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27452, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27499, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+27463, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27510, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -123392,19 +123450,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+27474, 0) + Xsqlite3_str_appendf(tls, x, ts+27521, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+27492, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+27539, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+27500, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+27547, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+27508, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+27555, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+27512, 0) + Xsqlite3_str_appendf(tls, x, ts+27559, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -124324,7 +124382,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27525, 0) + Xsqlite3_str_appendf(tls, pSql, ts+27572, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -124333,7 +124391,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+27547, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+27594, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -124341,7 +124399,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+26614, 0) + Xsqlite3_str_appendf(tls, pSql, ts+26661, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -124578,7 +124636,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27551 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27598 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -124586,7 +124644,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27557 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 27604 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -124698,7 +124756,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27566, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+27613, 0) __4: ; goto geopoly_update_end @@ -124830,14 +124888,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+27606) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27653) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+27622) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+27669) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -124902,7 +124960,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+27637, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27684, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -124914,25 +124972,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27645}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27658}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27671}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27684}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27622}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27696}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27606}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 27719}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27733}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27746}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27760}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27776}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27692}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27705}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27718}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 27731}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27669}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 27743}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 27653}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 27766}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27780}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 27793}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 27807}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 27823}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 27788}, + {FxStep: 0, FxFinal: 0, FzName: ts + 27835}, } // Register the r-tree module with database handle db. This creates the @@ -124942,26 +125000,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+27807, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27854, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27817, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27864, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+27828, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+27875, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27551, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27598, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+27839, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+27886, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -125015,7 +125073,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25127, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+25174, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -125342,7 +125400,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+27849, -1) + Xsqlite3_result_error(tls, context, ts+27896, -1) return } @@ -125353,7 +125411,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+27849, -1) + Xsqlite3_result_error(tls, context, ts+27896, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -125454,7 +125512,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+27870, uintptr(0), uintptr(0), p+64) + ts+27917, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -125518,7 +125576,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25040, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+25087, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -125539,16 +125597,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+28041, libc.VaList(bp, func() uintptr { + ts+28088, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 28191 + return ts + 28238 } return ts + 1538 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+28232) + ts+28279) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -125664,7 +125722,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+28357, libc.VaList(bp, zTab))) + ts+28404, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -125682,7 +125740,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+28476, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+28523, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -125700,7 +125758,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+28497, libc.VaList(bp+16, zIdx))) + ts+28544, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -125723,7 +125781,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+28548, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+28595, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -125769,7 +125827,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+28569, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28616, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -125784,7 +125842,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -125824,7 +125882,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19473, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+19520, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -125834,18 +125892,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28626, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+28673, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+28645, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+28692, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+28650, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+28697, zName) { bRbuRowid = 1 } } @@ -125857,18 +125915,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+28660, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+28707, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 28689 + return ts + 28736 } - return ts + 28702 + return ts + 28749 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28711, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28758, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -125882,7 +125940,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28733, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+28780, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -125929,7 +125987,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+28760, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+28807, libc.VaList(bp, zList, zSep, z)) zSep = ts + 14598 } return zList @@ -125947,7 +126005,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+28769, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+28816, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -125969,25 +126027,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28782, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+28829, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28814, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+28861, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+28837) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28843, ts+28850, ts+4941) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+28884) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+28890, ts+28897, ts+4941) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+1538) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+28858, + ts+28905, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+28900, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+28947, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -126029,7 +126087,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -126064,7 +126122,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 28920 + zCol = ts + 28967 __7: ; goto __5 @@ -126072,11 +126130,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+28928, + zLhs = rbuMPrintf(tls, p, ts+28975, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+28949, + zOrder = rbuMPrintf(tls, p, ts+28996, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+28985, + zSelect = rbuMPrintf(tls, p, ts+29032, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 14598 iCol++ @@ -126096,7 +126154,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+29012, + Xsqlite3_mprintf(tls, ts+29059, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -126123,7 +126181,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+29060, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+29107, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 14598 goto __15 __15: @@ -126135,7 +126193,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+29067, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+29114, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -126168,7 +126226,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -126180,7 +126238,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+29079, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+29126, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1538 } else { @@ -126192,37 +126250,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 28920 + zCol = ts + 28967 } else { - zCol = ts + 28650 + zCol = ts + 28697 } zType = ts + 1103 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+29101, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+29148, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 28837 + return ts + 28884 } return ts + 1538 }() - zImpPK = Xsqlite3_mprintf(tls, ts+29121, + zImpPK = Xsqlite3_mprintf(tls, ts+29168, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+29142, + zImpCols = Xsqlite3_mprintf(tls, ts+29189, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+29175, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+29222, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 14598 - zAnd = ts + 21509 + zAnd = ts + 21556 nBind++ } @@ -126261,9 +126319,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+29199, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+29246, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+29211, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+29258, libc.VaList(bp+32, zList, zS)) } zS = ts + 14598 if zList == uintptr(0) { @@ -126273,7 +126331,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29220, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+29267, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -126285,18 +126343,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+29235, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+29282, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1538 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+29249, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 21509 + zList = rbuMPrintf(tls, p, ts+29296, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 21556 } } zList = rbuMPrintf(tls, p, - ts+29261, libc.VaList(bp+40, zList)) + ts+29308, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1538 @@ -126304,8 +126362,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+29311, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 21509 + zList = rbuMPrintf(tls, p, ts+29358, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 21556 } } } @@ -126314,7 +126372,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29324, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+29371, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -126332,15 +126390,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+29311, + zList = rbuMPrintf(tls, p, ts+29358, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+29350, + zList = rbuMPrintf(tls, p, ts+29397, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+29380, + zList = rbuMPrintf(tls, p, ts+29427, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 14598 } @@ -126377,19 +126435,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 29417 + var zSep uintptr = ts + 29464 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+28569, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+28616, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+16139) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp+8, zIdx))) } break } @@ -126401,15 +126459,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 28837 + zDesc = ts + 28884 } else { zDesc = ts + 1538 } - z = rbuMPrintf(tls, p, ts+29430, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+29477, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 14598 } } - z = rbuMPrintf(tls, p, ts+29441, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+29488, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -126429,7 +126487,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+29445) + ts+29492) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -126438,7 +126496,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+28597, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+28644, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -126448,23 +126506,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+29495, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+29542, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+29517, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+29564, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 28837 + return ts + 28884 } return ts + 1538 }())) zComma = ts + 14598 } } - zCols = rbuMPrintf(tls, p, ts+29527, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+29574, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29542, + ts+29589, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 0)) } @@ -126490,13 +126548,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 29604 + zPk = ts + 29651 } - zSql = rbuMPrintf(tls, p, ts+29617, + zSql = rbuMPrintf(tls, p, ts+29664, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 29644 + return ts + 29691 } return ts + 1538 }())) @@ -126506,16 +126564,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+29654, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+29701, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29661, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+29708, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 29693 + return ts + 29740 } return ts + 1538 }())) @@ -126532,7 +126590,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+29708, + ts+29755, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -126569,7 +126627,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+29765) + ts+29812) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -126674,7 +126732,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+29831, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+29878, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -126697,7 +126755,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29851, + ts+29898, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, 0, 0)) @@ -126705,13 +126763,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+29916, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+29963, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+29952, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+29999, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -126727,7 +126785,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+29986, + ts+30033, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -126735,9 +126793,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 30047 + return ts + 30094 } - return ts + 30051 + return ts + 30098 }() } return ts + 1538 @@ -126746,20 +126804,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+30057, + ts+30104, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+30118, + ts+30165, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 30047 + return ts + 30094 } - return ts + 30051 + return ts + 30098 }(), zCollist, zLimit)) } @@ -126796,16 +126854,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1538 } - return ts + 30277 + return ts + 30324 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+30286, + ts+30333, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 30322 + return ts + 30369 } return ts + 1538 }(), zBindings))) @@ -126814,32 +126872,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+30332, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+30379, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1538 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 30360 + zRbuRowid = ts + 30407 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+30372, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+30419, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 30448 + return ts + 30495 } return ts + 1538 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30465, + ts+30512, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30764, + ts+30811, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -126852,9 +126910,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 30863 + zRbuRowid = ts + 30910 } else { - zRbuRowid = ts + 30873 + zRbuRowid = ts + 30920 } } @@ -126867,7 +126925,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+28920, 0) + zOrder = rbuMPrintf(tls, p, ts+28967, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1538, ts+14598, ts+1538) } @@ -126876,11 +126934,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+30884, + ts+30931, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 30932 + return ts + 30979 } return ts + 1538 }(), @@ -126893,7 +126951,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 22843 + return ts + 22890 } return ts + 1538 }(), zOrder, @@ -126961,9 +127019,9 @@ var zPrefix uintptr = ts + 1538 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 30277 + zPrefix = ts + 30324 } - zUpdate = Xsqlite3_mprintf(tls, ts+30938, + zUpdate = Xsqlite3_mprintf(tls, ts+30985, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -127022,7 +127080,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+30968, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+31015, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -127095,18 +127153,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+30998, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+31045, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31026, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31073, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+3270, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+6425, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31044, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+31091, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -127146,11 +127204,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31110, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31157, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24190, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+24237, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -127162,13 +127220,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+31142, + zTarget = Xsqlite3_mprintf(tls, ts+31189, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425), func() uintptr { if zExtra == uintptr(0) { return ts + 1538 } - return ts + 31174 + return ts + 31221 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1538 @@ -127187,21 +127245,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31176, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31223, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31191, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+31238, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31208, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+31255, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -127209,7 +127267,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31224, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31271, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_RBU, p) @@ -127217,7 +127275,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31252, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31299, 0) } } @@ -127246,14 +127304,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31224, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31271, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31270, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31317, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -127379,7 +127437,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+31305, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+31352, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -127394,8 +127452,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+6425) } - zOal = Xsqlite3_mprintf(tls, ts+31330, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+31337, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+31377, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+31384, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -127512,7 +127570,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23828, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+23875, 0) return } @@ -127605,7 +127663,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+31344) + ts+31391) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -127613,7 +127671,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31366, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31413, libc.VaList(bp, iCookie+1)) } } } @@ -127634,7 +127692,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+31393, + ts+31440, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -127664,9 +127722,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+31551, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+31598, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31566, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31613, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -127680,10 +127738,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31586, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31633, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31611) + ts+31658) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -127697,12 +127755,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+31719) + ts+31766) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+31784) + ts+31831) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -127714,7 +127772,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31828, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31875, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -127742,7 +127800,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31853, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+31900, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -127864,7 +127922,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31881, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+31928, 0) } if rc == SQLITE_OK { @@ -127880,7 +127938,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+31330, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+31377, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+6425, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -127897,7 +127955,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31906, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+31953, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -127931,7 +127989,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+31917, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+31964, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -127961,13 +128019,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31989, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32036, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32003) + ts+32050) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -127978,7 +128036,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+32060) + ts+32107) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -128052,7 +128110,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32134, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32181, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -128070,12 +128128,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32166, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32213, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32198 + return ts + 32245 } - return ts + 32205 + return ts + 32252 }())) } } @@ -128099,14 +128157,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32212, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+32259, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+6425, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+32228, uintptr(0), uintptr(0), p+64) + db, ts+32275, uintptr(0), uintptr(0), p+64) } } @@ -128160,7 +128218,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32252, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+32299, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -128187,7 +128245,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30277, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+30324, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -128223,7 +128281,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32260, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32307, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -128342,12 +128400,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 14487 } else { - zBegin = ts + 32212 + zBegin = ts + 32259 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32212, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32259, uintptr(0), uintptr(0), uintptr(0)) } } @@ -128693,7 +128751,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32287, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32334, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -128718,7 +128776,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+32310, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+32357, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -128878,7 +128936,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+32321, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+32368, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -129707,7 +129765,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+32332, 0) + ts+32379, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1538, 0) } else { @@ -129720,7 +129778,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+32453, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+32500, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -130400,9 +130458,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+32482, + zRet = Xsqlite3_mprintf(tls, ts+32529, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 21509 + zSep = ts + 21556 if zRet == uintptr(0) { break } @@ -130425,9 +130483,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+32516, + ts+32563, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 32557 + zSep = ts + 32604 if zRet == uintptr(0) { break } @@ -130435,7 +130493,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+7514, 0) + zRet = Xsqlite3_mprintf(tls, ts+7503, 0) } return zRet @@ -130446,7 +130504,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+32562, + ts+32609, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -130489,7 +130547,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+32640, + ts+32687, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -130616,7 +130674,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32693, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+32740, 0) __16: ; rc = SQLITE_SCHEMA @@ -131092,7 +131150,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+11332, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+32720, libc.VaList(bp, zDb)) + ts+32767, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -131101,18 +131159,18 @@ var zSep uintptr = ts + 1538 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+32830, bp+24) + sessionAppendStr(tls, bp+8, ts+32877, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1551, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+32845, bp+24) + sessionAppendStr(tls, bp+8, ts+32892, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+32853, bp+24) + sessionAppendStr(tls, bp+8, ts+32900, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 21509 + zSep = ts + 21556 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -131221,7 +131279,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32859, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+32906, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131313,7 +131371,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+32879, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+32926, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -131576,7 +131634,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -131599,7 +131657,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -131641,7 +131699,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -131702,7 +131760,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -131776,13 +131834,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -131844,7 +131902,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -132217,7 +132275,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -132396,34 +132454,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+32897, bp+16) + sessionAppendStr(tls, bp, ts+32944, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+32910, bp+16) + sessionAppendStr(tls, bp, ts+32957, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32916, bp+16) + sessionAppendStr(tls, bp, ts+32963, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 14598 } } zSep = ts + 1538 - sessionAppendStr(tls, bp, ts+32845, bp+16) + sessionAppendStr(tls, bp, ts+32892, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+32921, bp+16) + ts+32968, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32853, bp+16) + sessionAppendStr(tls, bp, ts+32900, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 21509 + zSep = ts + 21556 } } @@ -132475,34 +132533,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+32996, bp+16) + sessionAppendStr(tls, bp, ts+33043, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+32845, bp+16) + sessionAppendStr(tls, bp, ts+32892, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32916, bp+16) + sessionAppendStr(tls, bp, ts+32963, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 21509 + zSep = ts + 21556 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+33014, bp+16) + sessionAppendStr(tls, bp, ts+33061, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+32557, bp+16) + sessionAppendStr(tls, bp, ts+32604, bp+16) zSep = ts + 1538 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+32853, bp+16) + sessionAppendStr(tls, bp, ts+32900, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 33022 + zSep = ts + 33069 } } sessionAppendStr(tls, bp, ts+4941, bp+16) @@ -132529,9 +132587,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+33027, bp+16) + sessionAppendStr(tls, bp, ts+33074, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+21515, bp+16) + sessionAppendStr(tls, bp, ts+21562, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+14598, bp+16) @@ -132539,9 +132597,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+33045, bp+16) + sessionAppendStr(tls, bp, ts+33092, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+33056, bp+16) + sessionAppendStr(tls, bp, ts+33103, bp+16) } sessionAppendStr(tls, bp, ts+4941, bp+16) @@ -132560,11 +132618,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+11332, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+33060) + ts+33107) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+33173) + ts+33220) } return rc } @@ -132592,7 +132650,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -132845,7 +132903,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+33317, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33364, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -132861,7 +132919,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33338, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33385, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -132934,10 +132992,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+33357, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33404, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33383, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33430, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -132996,16 +133054,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33413, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33460, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33457, + ts+33504, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+33528, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+33575, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+11332) { @@ -133059,14 +133117,14 @@ } } } - Xsqlite3_exec(tls, db, ts+33588, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33635, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+33618, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+33665, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+33642, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+33618, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33689, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+33665, uintptr(0), uintptr(0), uintptr(0)) } } @@ -134314,7 +134372,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+33670, 0) + sqlite3Fts5ParseError(tls, pParse, ts+33717, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -134602,7 +134660,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+33698, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+33745, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -134789,7 +134847,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33729, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+33776, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -134857,7 +134915,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 33736 + var zErr uintptr = ts + 33783 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135039,7 +135097,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 33786 + var zErr uintptr = ts + 33833 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -135363,13 +135421,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 33834, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33881, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 33842, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33889, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 33852, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 33899, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -135920,7 +135978,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+33857, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+33904, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -135947,14 +136005,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33864, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33911, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+33895, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+33942, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -135965,7 +136023,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33928, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33975, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -135978,7 +136036,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+33965, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34012, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -135987,7 +136045,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+33974, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34021, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -136006,7 +136064,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34007, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34054, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -136021,14 +136079,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34041, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34088, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34049, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34096, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34081, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+34128, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -136036,9 +136094,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34087, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34134, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34101, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34148, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -136046,9 +136104,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+34139, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+34186, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34150, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34197, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -136060,17 +136118,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 8010, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 17329}, - {FzName: ts + 34185, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 34232, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34193, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34240, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34224, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34271, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -136117,15 +136175,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+22175) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+22222) || 0 == Xsqlite3_stricmp(tls, zCol, ts+16251) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34252, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34299, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+34282) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+34329) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34292, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34339, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -136142,13 +136200,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34323, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34370, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34328, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34375, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34335, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+34382, libc.VaList(bp+16, i)) } } } @@ -136186,8 +136244,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22175) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34343, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+22222) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34390, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -136219,7 +136277,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34372, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+34419, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -136256,14 +136314,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 34041 + zTail = ts + 34088 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 34392 + zTail = ts + 34439 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+34400, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+34447, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -136312,7 +136370,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34411, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34458, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -136320,10 +136378,10 @@ } return ts + 14598 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34427, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34474, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34434, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22175)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+34481, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+22222)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -136433,7 +136491,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+34460) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+34507) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -136443,7 +136501,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34465) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34512) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -136453,7 +136511,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34474) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34521) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -136466,7 +136524,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34484) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34531) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -136476,7 +136534,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34494) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+34541) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -136492,7 +136550,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22175) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+22222) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -136515,7 +136573,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 34506 + var zSelect uintptr = ts + 34553 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -136537,7 +136595,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+34538) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+34585) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -136551,7 +136609,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+34546, + ts+34593, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -136649,7 +136707,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34611, 0) + sqlite3Fts5ParseError(tls, pParse, ts+34658, 0) return FTS5_EOF } } @@ -136662,20 +136720,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+34631, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+34678, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34662, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34709, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34665, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+34712, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30047, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+30094, uint64(3)) == 0 { tok = FTS5_AND } break @@ -138453,9 +138511,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34669, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+34716, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+33698, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+33745, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -138471,7 +138529,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+34674, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+34721, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -138558,7 +138616,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+20512, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+20559, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -138639,7 +138697,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+34703, 0) + ts+34750, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -138809,12 +138867,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+34756, + ts+34803, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 34806 + return ts + 34853 } - return ts + 34669 + return ts + 34716 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -139757,7 +139815,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34813, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+34860, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -139836,7 +139894,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+34819, + ts+34866, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -139861,7 +139919,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+34870, + ts+34917, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -139884,7 +139942,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+34919, + ts+34966, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -140123,7 +140181,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+34959, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+35006, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -141322,7 +141380,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+34982, + ts+35029, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -142788,7 +142846,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+35066, + ts+35113, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -143870,13 +143928,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35123, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+35170, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+25040, ts+35131, 0, pzErr) + pConfig, ts+25087, ts+35178, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+11472, - ts+35166, + ts+35213, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -144129,7 +144187,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+34813, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+34860, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -144243,7 +144301,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+35210, + ts+35257, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -144413,7 +144471,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35296) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+35343) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -144684,7 +144742,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35301, 0) + ts+35348, 0) return SQLITE_ERROR } @@ -145108,7 +145166,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+35340, + ts+35387, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -145124,9 +145182,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 35395 + return ts + 35442 } - return ts + 35400 + return ts + 35447 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -145172,12 +145230,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35404, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+35451, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+5041, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35410, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35457, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -145208,7 +145266,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35438, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+35485, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -145239,7 +145297,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35448, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+35495, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -145271,14 +145329,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+35469, libc.VaList(bp, z)) + ts+35516, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33852 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 33899 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -145334,7 +145392,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35301, 0) + ts+35348, 0) return SQLITE_ERROR __1: ; @@ -145551,7 +145609,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35502, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+35549, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -145696,28 +145754,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+35538, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+35585, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+35549, 0) + ts+35596, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+35629, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35676, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+35637, 0) + ts+35684, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+16918, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+35693, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35740, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+35699, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+35746, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -145788,12 +145846,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+35715, + ts+35762, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 20413 + return ts + 20460 } - return ts + 35752 + return ts + 35799 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -146423,7 +146481,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+35764, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+35811, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -146667,7 +146725,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35785, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35832, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -146686,7 +146744,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35807, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35854, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -146733,7 +146791,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35838) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+35885) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -146742,7 +146800,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+35851, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+35898, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -146756,7 +146814,7 @@ } var azName2 = [5]uintptr{ - ts + 35942, ts + 34041, ts + 25040, ts + 34392, ts + 11472, + ts + 35989, ts + 34088, ts + 25087, ts + 34439, ts + 11472, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -146780,7 +146838,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+35949, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+35996, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -146798,13 +146856,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35949, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+35996, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+35954, 0, + db, ts+36001, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -146861,17 +146919,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 35969, - ts + 36037, - ts + 36106, - ts + 36139, - ts + 36178, - ts + 36218, - ts + 36257, - ts + 36298, - ts + 36337, - ts + 36379, - ts + 36419, + ts + 36016, + ts + 36084, + ts + 36153, + ts + 36186, + ts + 36225, + ts + 36265, + ts + 36304, + ts + 36345, + ts + 36384, + ts + 36426, + ts + 36466, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -146973,18 +147031,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36442, + ts+36489, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36546, + ts+36593, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36584, + ts+36631, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -146996,7 +147054,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36622, + ts+36669, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -147008,14 +147066,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+25040, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+25087, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+11472, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+35942, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35989, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+34392, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34439, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+34041, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+34088, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -147027,17 +147085,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36664, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+36711, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 29693 + return ts + 29740 } return ts + 1538 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+36694, + ts+36741, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -147074,27 +147132,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36738, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+36785, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36761, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+36808, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34041, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+34088, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+34392, ts+36767, 0, pzErr) + pConfig, ts+34439, ts+36814, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35942, ts+36799, 1, pzErr) + pConfig, ts+35989, ts+36846, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34538, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34585, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -147300,12 +147358,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36816, + ts+36863, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+36866, + ts+36913, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -147313,7 +147371,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34538, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+34585, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -147489,7 +147547,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+36895, + zSql = Xsqlite3_mprintf(tls, ts+36942, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -147671,14 +147729,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34041, bp+48) + rc = fts5StorageCount(tls, p, ts+34088, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+34392, bp+56) + rc = fts5StorageCount(tls, p, ts+34439, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -147873,9 +147931,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36927) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36938) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -148090,7 +148148,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 36949 + var zCat uintptr = ts + 36996 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -148102,7 +148160,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36958) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37005) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -148113,18 +148171,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36969) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37016) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36927) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36974) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36938) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36985) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+36958) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37005) { } else { rc = SQLITE_ERROR } @@ -148400,7 +148458,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 36987 + var zBase uintptr = ts + 37034 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -148542,7 +148600,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+36997, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37044, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148550,11 +148608,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37000, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37047, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37005, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148562,7 +148620,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37010, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37057, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148570,7 +148628,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37013, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148578,11 +148636,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37016, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37063, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37021, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148590,19 +148648,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37026, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37073, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37030, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37077, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37036, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37083, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37041, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37088, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148610,11 +148668,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37045, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37092, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37049, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37096, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -148622,7 +148680,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37052, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37099, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148630,11 +148688,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37056, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37103, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37060, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37107, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148642,7 +148700,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37064, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37111, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148650,7 +148708,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37068, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37115, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148658,7 +148716,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37072, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148674,24 +148732,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37076, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37056, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37123, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37079, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37082, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37126, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37086, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37072, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -148706,44 +148764,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37089, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37136, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37097, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37144, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37104, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37151, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37109, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37156, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37005, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37052, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37114, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37161, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37000, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37047, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37119, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37166, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37124, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37171, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+15464, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -148752,91 +148810,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37129, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37176, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37082, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37133, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37180, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37138, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37185, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37041, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37088, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37144, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37191, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37148, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37195, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37150, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37064, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37111, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37156, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37203, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37072, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37119, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37164, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37211, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37170, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37217, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37056, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37103, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37175, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37222, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37181, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37228, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37068, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37115, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37189, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37236, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37197, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37244, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37201, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+37248, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37064, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+37111, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37209, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37256, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37215, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37262, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37068, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37115, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37221, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+37268, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37082, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+37129, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -148851,16 +148909,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37228, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37275, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37233, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+37280, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -148868,21 +148926,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37238, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37285, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37291, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37013, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37060, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37197, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37244, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -148890,7 +148948,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37250, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37297, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -148898,9 +148956,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37256, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+37303, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+36997, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+37044, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -148915,12 +148973,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37262, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37309, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37266, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+37313, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37269, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+37316, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -148929,7 +148987,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37272, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+37319, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -149085,7 +149143,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37276) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+37323) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -149265,22 +149323,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 36987, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37034, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 37291, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37338, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 37297, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37344, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 37304, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 37351, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -150423,14 +150481,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+37312) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+37359) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+37316) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37363) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+37320) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+37367) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37329, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37376, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -150456,19 +150514,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 37363, - ts + 37403, - ts + 37438, + ts + 37410, + ts + 37450, + ts + 37485, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23336, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+23383, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37481, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37528, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -150601,11 +150659,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37514, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37561, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+37545, + ts+37592, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -150629,7 +150687,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+37596, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+37643, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -151024,7 +151082,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+37622, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+37669, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -151046,7 +151104,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 37632 + return ts + 37679 } func init() { @@ -152020,5 +152078,5 @@ *(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&vfs_template)) + 128)) = rbuVfsGetLastError } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=0\x00COMPILER=clang-13.0.0\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00open\x00close\x00access\x00getcwd\x00stat\x00fstat\x00ftruncate\x00fcntl\x00read\x00pread\x00pread64\x00write\x00pwrite\x00pwrite64\x00fchmod\x00fallocate\x00unlink\x00openDirectory\x00mkdir\x00rmdir\x00fchown\x00geteuid\x00mmap\x00munmap\x00mremap\x00getpagesize\x00readlink\x00lstat\x00ioctl\x00attempt to open \"%s\" as file descriptor %d\x00/dev/null\x00os_unix.c:%d: (%d) %s(%s) - %s\x00cannot fstat db file %s\x00file unlinked while open: %s\x00multiple links to file: %s\x00file renamed while open: %s\x00%s\x00full_fsync\x00%s-shm\x00readonly_shm\x00psow\x00unix-excl\x00%s.lock\x00/var/tmp\x00/usr/tmp\x00/tmp\x00SQLITE_TMPDIR\x00TMPDIR\x00%s/etilqs_%llx%c\x00modeof\x00fsync\x00/dev/urandom\x00unix\x00unix-none\x00unix-dotfile\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00so\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00exclusive\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_windows_amd64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_windows_amd64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_windows_amd64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_windows_amd64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. package sqlite3 @@ -10492,11 +10492,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NDR_ASCII_CHAR = 0 NDR_BIG_ENDIAN = 0 @@ -15089,7 +15089,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -15200,8 +15200,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -24337,7 +24337,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -24982,17 +24983,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -25213,14 +25215,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -26027,7 +26029,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -60911,7 +60913,7 @@ if dwRet == libc.Uint32(libc.Uint32FromInt32(-1)) && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - winLogErrorAtLine(tls, SQLITE_IOERR|int32(22)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4597, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47684) + winLogErrorAtLine(tls, SQLITE_IOERR|int32(22)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4597, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47688) return 1 } @@ -60937,7 +60939,7 @@ if rc != 0 { return SQLITE_OK } - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(16)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4609, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47780) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(16)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4609, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47784) } func winRead(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Sqlite3_int64) int32 { @@ -60971,9 +60973,9 @@ } (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = *(*DWORD)(unsafe.Pointer(bp + 40)) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(1)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4618, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47848) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(1)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4618, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47852) } - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47851) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47855) if *(*DWORD)(unsafe.Pointer(bp + 32)) < DWORD(amt) { libc.Xmemset(tls, pBuf+uintptr(*(*DWORD)(unsafe.Pointer(bp + 32))), 0, uint64(DWORD(amt)-*(*DWORD)(unsafe.Pointer(bp + 32)))) @@ -61029,12 +61031,12 @@ if rc != 0 { if (*WinFile)(unsafe.Pointer(pFile)).FlastErrno == DWORD(39) || (*WinFile)(unsafe.Pointer(pFile)).FlastErrno == DWORD(112) { - return winLogErrorAtLine(tls, SQLITE_FULL, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4626, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47960) + return winLogErrorAtLine(tls, SQLITE_FULL, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4626, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47964) } - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(3)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4636, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47965) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(3)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4636, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47969) } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47968) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47972) } return SQLITE_OK @@ -61061,10 +61063,10 @@ winUnmapfile(tls, pFile) if winSeekFile(tls, pFile, nByte) != 0 { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4646, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48031) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4646, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48035) } else if 0 == (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 53*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).Fh) && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(1224) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4659, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48036) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4659, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48040) } if rc == SQLITE_OK && oldMmapSize > int64(0) { @@ -61090,7 +61092,7 @@ } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4672, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48128) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4672, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48132) } } rc = (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 13*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).Fh) @@ -61100,7 +61102,7 @@ } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4681, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48143) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4681, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48147) } return int32(0) } @@ -61121,7 +61123,7 @@ if lowerBits == 0xffffffff && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(7)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4690, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48184) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(7)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4690, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48188) } } @@ -61163,7 +61165,7 @@ } if res == 0 && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(158) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4702, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48279) + winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4702, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48283) } return res @@ -61289,7 +61291,7 @@ if type1 >= EXCLUSIVE_LOCK { winUnlockFile(tls, pFile+16, uint32(Xsqlite3PendingByte+2), uint32(0), uint32(SHARED_SIZE), uint32(0)) if locktype == SHARED_LOCK && !(winGetReadLock(tls, pFile) != 0) { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4720, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48505) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4720, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48509) } } if type1 >= RESERVED_LOCK { @@ -61592,7 +61594,7 @@ return SQLITE_READONLY | int32(5)<<8 } else if winTruncate(tls, pShmNode+16, int64(0)) != 0 { winShmSystemLock(tls, pShmNode, WINSHM_UNLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4730, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 48971) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4730, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 48975) } } @@ -61699,7 +61701,7 @@ if !(rc != SQLITE_OK) { goto __13 } - rc = winLogErrorAtLine(tls, rc, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4770, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 49053) + rc = winLogErrorAtLine(tls, rc, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4770, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 49057) goto shm_open_err __13: ; @@ -61927,7 +61929,7 @@ if !(rc != SQLITE_OK) { goto __6 } - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4781, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49324) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4781, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49328) goto shmpage_out __6: ; @@ -61945,7 +61947,7 @@ if !(rc != SQLITE_OK) { goto __9 } - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4792, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49339) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4792, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49343) goto shmpage_out __9: ; @@ -61993,7 +61995,7 @@ goto __15 } (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno, ts+4803, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49398) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno, ts+4803, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49402) if !(hMap != 0) { goto __16 } @@ -62039,7 +62041,7 @@ if !((*(*func(*libc.TLS, LPCVOID) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 59*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).FpMapRegion) != 0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4814, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49447) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4814, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49451) } (*WinFile)(unsafe.Pointer(pFile)).FpMapRegion = uintptr(0) (*WinFile)(unsafe.Pointer(pFile)).FmmapSize = int64(0) @@ -62048,7 +62050,7 @@ if !((*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).FhMap) != 0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4828, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49458) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4828, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49462) } (*WinFile)(unsafe.Pointer(pFile)).FhMap = uintptr(0) } @@ -62092,7 +62094,7 @@ DWORD(*(*Sqlite3_int64)(unsafe.Pointer(bp))&int64(0xffffffff)), uintptr(0)) if (*WinFile)(unsafe.Pointer(pFd)).FhMap == uintptr(0) { (*WinFile)(unsafe.Pointer(pFd)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4842, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49535) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4842, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49539) return SQLITE_OK } @@ -62102,7 +62104,7 @@ (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFd)).FhMap) (*WinFile)(unsafe.Pointer(pFd)).FhMap = uintptr(0) (*WinFile)(unsafe.Pointer(pFd)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4854, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49553) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4854, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49557) return SQLITE_OK } @@ -62266,7 +62268,7 @@ Xsqlite3_mutex_leave(tls, Xsqlite3MutexAlloc(tls, SQLITE_MUTEX_STATIC_VFS1)) Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4874, uintptr(0), 49855) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4874, uintptr(0), 49859) } Xsqlite3_snprintf(tls, nMax, zBuf, ts+4493, libc.VaList(bp, Xsqlite3_temp_directory)) } @@ -62283,7 +62285,7 @@ Xsqlite3_free(tls, zWidePath) Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4890, uintptr(0), 49955) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4890, uintptr(0), 49959) } zMulti = winUnicodeToUtf8(tls, zWidePath) if zMulti != 0 { @@ -62307,7 +62309,7 @@ if (*(*func(*libc.TLS, DWORD, LPSTR) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 31*24 + 8)))(tls, uint32(nMax), zMbcsPath) == DWORD(0) { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4906, uintptr(0), 49982) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4906, uintptr(0), 49986) } zUtf8 = winMbcsToUtf8(tls, zMbcsPath, (*(*func(*libc.TLS) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8)))(tls)) if zUtf8 != 0 { @@ -62323,14 +62325,14 @@ if !(winMakeEndInDirSep(tls, nDir+1, zBuf) != 0) { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4922, uintptr(0), 50006) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4922, uintptr(0), 50010) } nLen = Xsqlite3Strlen30(tls, zBuf) if nLen+nPre+17 > nBuf { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4938, uintptr(0), 50024) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4938, uintptr(0), 50028) } Xsqlite3_snprintf(tls, nBuf-16-nLen, zBuf+uintptr(nLen), ts+4866, 0) @@ -62512,7 +62514,7 @@ } } } - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 8)), 50313) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 8)), 50317) if h == libc.UintptrFromInt64(int64(-1)) { Xsqlite3_free(tls, zConverted) @@ -62523,8 +62525,8 @@ pOutFlags) } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = *(*DWORD)(unsafe.Pointer(bp + 12)) - winLogErrorAtLine(tls, SQLITE_CANTOPEN, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+5027, zUtf8Name, 50328) - return Xsqlite3CantopenError(tls, 50329) + winLogErrorAtLine(tls, SQLITE_CANTOPEN, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+5027, zUtf8Name, 50332) + return Xsqlite3CantopenError(tls, 50333) } } @@ -62639,9 +62641,9 @@ } } if rc != 0 && rc != SQLITE_IOERR|int32(23)<<8 { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, *(*DWORD)(unsafe.Pointer(bp + 4)), ts+5040, zFilename, 50501) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, *(*DWORD)(unsafe.Pointer(bp + 4)), ts+5040, zFilename, 50505) } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp)), 50503) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp)), 50507) } Xsqlite3_free(tls, zConverted) @@ -62679,10 +62681,10 @@ attr = (*WIN32_FILE_ATTRIBUTE_DATA)(unsafe.Pointer(bp)).FdwFileAttributes } } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 50553) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 50557) if *(*DWORD)(unsafe.Pointer(bp + 40)) != DWORD(2) && *(*DWORD)(unsafe.Pointer(bp + 40)) != DWORD(3) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(13)<<8, *(*DWORD)(unsafe.Pointer(bp + 40)), ts+5050, zFilename, 50556) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(13)<<8, *(*DWORD)(unsafe.Pointer(bp + 40)), ts+5050, zFilename, 50560) } else { attr = libc.Uint32(libc.Uint32FromInt32(-1)) } @@ -62760,7 +62762,7 @@ nByte = (*(*func(*libc.TLS, LPCWSTR, DWORD, LPWSTR, uintptr) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 25*24 + 8)))(tls, zConverted, uint32(0), uintptr(0), uintptr(0)) if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5067, zRelative, 50773) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5067, zRelative, 50777) } nByte = nByte + DWORD(3) zTemp = Xsqlite3MallocZero(tls, uint64(nByte)*uint64(unsafe.Sizeof(WCHAR(0)))) @@ -62772,7 +62774,7 @@ if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) Xsqlite3_free(tls, zTemp) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5084, zRelative, 50786) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5084, zRelative, 50790) } Xsqlite3_free(tls, zConverted) zOut = winUnicodeToUtf8(tls, zTemp) @@ -62782,7 +62784,7 @@ nByte = (*(*func(*libc.TLS, LPCSTR, DWORD, LPSTR, uintptr) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 24*24 + 8)))(tls, zConverted, uint32(0), uintptr(0), uintptr(0)) if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5101, zRelative, 50799) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5101, zRelative, 50803) } nByte = nByte + DWORD(3) zTemp = Xsqlite3MallocZero(tls, uint64(nByte)*uint64(unsafe.Sizeof(int8(0)))) @@ -62794,7 +62796,7 @@ if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) Xsqlite3_free(tls, zTemp) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5118, zRelative, 50812) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5118, zRelative, 50816) } Xsqlite3_free(tls, zConverted) zOut = winMbcsToUtf8(tls, zTemp, (*(*func(*libc.TLS) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8)))(tls)) @@ -64424,7 +64426,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -64454,7 +64456,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -64505,7 +64507,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -64609,8 +64611,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -64688,13 +64690,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -66988,7 +66990,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -67421,7 +67423,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -67573,9 +67575,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -67907,7 +67909,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -68057,7 +68059,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -68438,7 +68440,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -68544,7 +68546,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -68562,7 +68564,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -68601,7 +68603,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -68678,7 +68680,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -69436,7 +69438,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -69679,9 +69681,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -70438,7 +70440,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -70537,7 +70539,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -71123,7 +71125,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -71398,7 +71400,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -71871,7 +71873,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -72376,7 +72378,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -73034,7 +73036,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -73171,7 +73173,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -73188,7 +73190,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -73196,7 +73198,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -73239,7 +73241,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -73249,7 +73251,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -73499,7 +73501,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -73546,7 +73548,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -73556,7 +73558,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -73569,7 +73571,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -73578,14 +73580,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -73595,7 +73597,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -73659,7 +73661,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -73669,7 +73671,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -73691,7 +73693,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -73726,7 +73728,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -73739,13 +73741,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -73770,7 +73772,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -73781,7 +73783,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -73833,22 +73835,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -73858,7 +73860,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -73866,7 +73868,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -73874,10 +73876,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -73937,7 +73939,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -73973,7 +73975,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -74003,11 +74005,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -74018,15 +74020,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -74054,14 +74056,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -74075,7 +74077,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -74087,7 +74089,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -74190,7 +74192,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -74218,7 +74220,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -74257,7 +74259,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -75140,7 +75142,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -75555,7 +75557,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -75581,7 +75583,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -75590,7 +75592,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -75601,7 +75603,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -75617,7 +75619,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -75678,7 +75680,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -75713,7 +75715,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -75773,7 +75775,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -75812,7 +75814,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -75843,7 +75845,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -76184,7 +76186,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -76428,14 +76430,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -76480,7 +76482,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -76529,7 +76531,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -76609,7 +76611,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -76700,7 +76702,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -76720,7 +76722,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -76930,7 +76932,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -77134,7 +77136,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -77199,7 +77201,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -77247,7 +77249,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -77366,7 +77368,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -77526,7 +77528,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -77591,7 +77593,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -77627,7 +77629,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -77671,7 +77673,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -77783,7 +77785,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -77941,7 +77943,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -77998,7 +78000,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -78014,7 +78016,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -78088,7 +78090,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -78100,7 +78102,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -78111,7 +78113,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -78276,7 +78278,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -78555,12 +78557,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -78568,7 +78570,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -78628,7 +78630,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -78717,7 +78719,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -78833,7 +78835,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -79153,7 +79155,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -79164,7 +79166,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -79322,7 +79324,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -79396,7 +79398,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -79459,7 +79461,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -79487,7 +79489,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -79748,7 +79750,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -79940,7 +79942,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -79978,7 +79980,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -80084,7 +80086,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -80109,7 +80111,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -80179,7 +80181,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -80292,7 +80294,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -80352,6 +80354,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -80359,7 +80362,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -80397,13 +80400,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -80434,7 +80437,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -80509,7 +80511,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -80534,7 +80536,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -80635,7 +80637,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -80643,11 +80645,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -80722,7 +80724,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -80791,7 +80793,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -80820,7 +80822,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -80896,7 +80898,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -80910,7 +80912,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -81044,7 +81046,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -83514,7 +83516,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -84163,7 +84165,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -84178,14 +84180,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -86489,7 +86491,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -87040,7 +87042,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -87105,7 +87107,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -87139,7 +87141,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -87189,7 +87191,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -87335,7 +87337,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -87506,7 +87508,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -87532,7 +87534,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -87806,7 +87808,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -88421,7 +88423,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -88941,7 +88943,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -88949,7 +88951,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+6709, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -89354,7 +89356,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -89498,7 +89500,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -89942,10 +89944,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -92593,7 +92591,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -94353,7 +94351,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -95131,7 +95129,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+7218) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+7218) goto abort_due_to_error __770: ; @@ -95241,7 +95239,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -95435,7 +95433,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -96802,7 +96800,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -97322,7 +97320,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -97405,7 +97403,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -100845,14 +100843,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8116 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8121 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -100896,7 +100890,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8127, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+8116, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -100960,7 +100954,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8144, libc.VaList(bp, pExpr)) + ts+8133, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -100976,7 +100970,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+8208, + Xsqlite3ErrorMsg(tls, pParse, ts+8197, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -100990,7 +100984,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8244, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+8233, uintptr(0), pExpr) } } else { @@ -101013,30 +101007,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8272, libc.VaList(bp+16, pExpr)) + ts+8261, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 8315 + zType = ts + 8304 } else { - zType = ts + 8322 + zType = ts + 8311 } - Xsqlite3ErrorMsg(tls, pParse, ts+8332, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8321, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+8360, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8349, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+8382, + Xsqlite3ErrorMsg(tls, pParse, ts+8371, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+8426, + ts+8415, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -101108,15 +101102,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+8474, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+8463, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -101124,7 +101118,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8485, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+8474, pExpr, pExpr) } break @@ -101255,7 +101249,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+8496, libc.VaList(bp, i, zType, mx)) + ts+8485, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -101275,7 +101269,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8552, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8541, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -101310,7 +101304,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+8586, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+8575, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -101367,7 +101361,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8592, libc.VaList(bp, i+1)) + ts+8581, libc.VaList(bp, i+1)) return 1 } } @@ -101395,7 +101389,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8653, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+8642, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -101609,7 +101603,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8684, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8673, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -101649,7 +101643,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8586) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8575) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -101660,7 +101654,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+8723) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+8712) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -101672,7 +101666,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+8729, 0) + ts+8718, 0) return WRC_Abort } @@ -102536,7 +102530,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+8788, libc.VaList(bp, mxHeight)) + ts+8777, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -102785,10 +102779,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+8836, + Xsqlite3ErrorMsg(tls, pParse, ts+8825, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 8880 + return ts + 8869 } return ts + 1544 }(), nElem)) @@ -102829,7 +102823,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -102855,7 +102849,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8884, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+8873, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -102883,7 +102877,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8918, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8907, libc.VaList(bp, pExpr)) } } } @@ -102930,7 +102924,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+8938, + Xsqlite3ErrorMsg(tls, pParse, ts+8927, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -102955,7 +102949,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8981, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8970, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -103530,7 +103524,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+9004, + Xsqlite3ErrorMsg(tls, pParse, ts+8993, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -103653,7 +103647,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+9034, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+9023, libc.VaList(bp, zObject)) } } @@ -103709,10 +103703,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+8116) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+9046) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+8121) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+9051) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -104786,7 +104780,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -105224,6 +105218,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -105237,6 +105232,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -105455,6 +105453,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -105468,6 +105467,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -106241,7 +106248,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+9004, + Xsqlite3ErrorMsg(tls, pParse, ts+8993, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -106263,11 +106270,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -106338,13 +106344,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -106357,15 +106369,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -106375,22 +106387,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -106399,21 +106411,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -106423,27 +106435,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+9434, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -106452,7 +106464,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -109120,7 +109132,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+12274, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -109137,7 +109149,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -110058,7 +110070,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -114422,6 +114434,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -115586,7 +115604,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+14829, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+8485, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+8474, 10) == 0 { return 0 } return 1 @@ -116832,7 +116850,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+15497, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -118877,7 +118895,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -125345,7 +125363,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -126747,7 +126765,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 18882 } else { - zType = ts + 8880 + zType = ts + 8869 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+18884, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -126908,6 +126926,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -128266,7 +128285,7 @@ goto __224 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8880, libc.VaList(bp+272, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8869, libc.VaList(bp+272, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __223 __223: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -128282,7 +128301,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __227 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8880, libc.VaList(bp+280, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8869, libc.VaList(bp+280, aPragmaName[i6].FzName)) goto __226 __226: i6++ @@ -129087,80 +129106,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __352 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+19298) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+19334) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__352: + ; label6 = 0 kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; if !(label6 == 0) { - goto __356 + goto __357 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__356: +__357: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 624))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; if !(label6 != 0) { - goto __357 + goto __358 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+19272) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+19298) + Xsqlite3VdbeLoadString(tls, v, 4, ts+19345) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__357: +__358: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __358 + goto __359 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__359: +__360: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __361 + goto __362 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __362 + goto __363 } - goto __360 -__362: + goto __361 +__363: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __360 -__360: - kk++ - goto __359 goto __361 __361: + kk++ + goto __360 + goto __362 +__362: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 624))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 624))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+19325) + Xsqlite3VdbeLoadString(tls, v, 3, ts+19372) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__358: +__359: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 640))) @@ -129177,20 +129210,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 620)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __363 + goto __364 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+19352) + Xsqlite3VdbeLoadString(tls, v, 2, ts+19399) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__364: +__365: if !(pIdx5 != 0) { - goto __366 + goto __367 } if !(pPk1 == pIdx5) { - goto __367 + goto __368 } - goto __365 -__367: + goto __366 +__368: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -129199,21 +129232,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __365 -__365: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __364 goto __366 __366: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __365 + goto __367 +__367: ; if !(pPk1 != 0) { - goto __368 + goto __369 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__368: +__369: ; -__363: +__364: ; goto __298 __298: @@ -129231,14 +129264,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __369 + goto __370 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 616)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 19381 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 19428 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__369: +__370: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -129246,27 +129279,27 @@ __46: if !!(zRight != 0) { - goto __370 + goto __371 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __372 + goto __373 } goto pragma_out -__372: +__373: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __371 -__370: + goto __372 +__371: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __373 + goto __374 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__374: +__375: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __376 + goto __377 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __377 + goto __378 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -129275,25 +129308,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __376 -__377: + goto __377 +__378: ; - goto __375 -__375: - pEnc += 16 - goto __374 goto __376 __376: + pEnc += 16 + goto __375 + goto __377 +__377: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __378 + goto __379 } - Xsqlite3ErrorMsg(tls, pParse, ts+19384, libc.VaList(bp+464, zRight)) -__378: + Xsqlite3ErrorMsg(tls, pParse, ts+19431, libc.VaList(bp+464, zRight)) +__379: ; -__373: +__374: ; -__371: +__372: ; goto __15 @@ -129301,15 +129334,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __379 + goto __380 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __381 + goto __382 } goto __15 -__381: +__382: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -129317,41 +129350,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __382 + goto __383 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__382: +__383: ; - goto __380 -__379: + goto __381 +__380: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __383 + goto __384 } goto __15 -__383: +__384: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__380: +__381: ; goto __15 __48: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__384: +__385: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __385 + goto __386 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __384 -__385: + goto __385 +__386: ; Xsqlite3VdbeReusable(tls, v) @@ -129366,31 +129399,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __386 + goto __387 } if !(Xsqlite3StrICmp(tls, zRight, ts+18714) == 0) { - goto __387 + goto __388 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __388 -__387: - if !(Xsqlite3StrICmp(tls, zRight, ts+19409) == 0) { - goto __389 + goto __389 +__388: + if !(Xsqlite3StrICmp(tls, zRight, ts+19456) == 0) { + goto __390 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __390 -__389: + goto __391 +__390: if !(Xsqlite3StrICmp(tls, zRight, ts+18867) == 0) { - goto __391 + goto __392 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__391: +__392: ; -__390: +__391: ; -__388: +__389: ; -__386: +__387: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -129400,10 +129433,10 @@ __50: if !(zRight != 0) { - goto __392 + goto __393 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__392: +__393: ; returnSingleInt(tls, v, func() int64 { @@ -129423,19 +129456,19 @@ __52: if !(zRight != 0) { - goto __393 + goto __394 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __395 + goto __396 } goto __15 -__395: +__396: ; - goto __394 -__393: - opMask = U32(0xfffe) + goto __395 __394: + opMask = U32(0xfffe) +__395: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -129444,86 +129477,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__396: +__397: if !(iDb <= iDbLast) { - goto __398 + goto __399 } if !(iDb == 1) { - goto __399 + goto __400 } - goto __397 -__399: + goto __398 +__400: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__400: +__401: if !(k4 != 0) { - goto __402 + goto __403 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __403 + goto __404 } - goto __401 -__403: + goto __402 +__404: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__404: +__405: if !(pIdx6 != 0) { - goto __406 + goto __407 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __407 + goto __408 } szThreshold = int16(0) - goto __406 -__407: + goto __407 +__408: ; - goto __405 -__405: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __404 goto __406 __406: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __405 + goto __407 +__407: ; if !(szThreshold != 0) { - goto __408 + goto __409 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__408: +__409: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+19417, + zSubSql = Xsqlite3MPrintf(tls, db, ts+19464, libc.VaList(bp+472, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __409 + goto __410 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __410 -__409: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __411 __410: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__411: ; - goto __401 -__401: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __400 goto __402 __402: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __401 + goto __403 +__403: ; - goto __397 -__397: - iDb++ - goto __396 goto __398 __398: + iDb++ + goto __397 + goto __399 +__399: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -129531,36 +129564,36 @@ __53: ; if !(zRight != 0) { - goto __411 + goto __412 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__411: +__412: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __412 + goto __413 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__412: +__413: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __55: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK) { - goto __413 + goto __414 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)))) { - goto __414 + goto __415 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 656))) -__414: +__415: ; -__413: +__414: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -129569,10 +129602,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __415 + goto __416 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664))&int64(0x7fffffff))) -__415: +__416: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -129581,10 +129614,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+672) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) >= int64(0)) { - goto __416 + goto __417 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) & int64(0x7fffffff)) -__416: +__417: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -129592,10 +129625,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __417 + goto __418 } -__417: +__418: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -129647,14 +129680,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 19435, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 19440, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 19446, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 19455, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 19464, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 19472, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 19480}, - {FzName: ts + 19487}, + {FzName: ts + 19482, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 19487, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 19493, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 19502, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 19511, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 19519, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 19527}, + {FzName: ts + 19534}, {}, } var setCookie = [2]VdbeOpList{ @@ -129706,7 +129739,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+19493) + Xsqlite3_str_appendall(tls, bp+32, ts+19540) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -129714,7 +129747,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+19508, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+19555, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -129727,16 +129760,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19515, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+19562, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+19521) + Xsqlite3_str_appendall(tls, bp+32, ts+19568) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+19533) + Xsqlite3_str_appendall(tls, bp+32, ts+19580) j++ } Xsqlite3_str_append(tls, bp+32, ts+6309, 1) @@ -129919,13 +129952,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+19548) + Xsqlite3_str_appendall(tls, bp+32, ts+19595) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+19603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19560, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+19607, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -130002,12 +130035,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+19564, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+19611, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -130016,19 +130049,19 @@ } else { zObj = ts + 6360 } - z = Xsqlite3MPrintf(tls, db, ts+19592, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+19639, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+19623, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+19670, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 19631, - ts + 19638, - ts + 19650, + ts + 19678, + ts + 19685, + ts + 19697, } // Check to see if any sibling index (another index on the same table) @@ -130120,7 +130153,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+19661) + corruptSchema(tls, pData, argv, ts+19708) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -130168,7 +130201,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 9290 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 19674 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 19721 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -130297,7 +130330,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+19746) + Xsqlite3SetString(tls, pzErrMsg, db, ts+19793) rc = SQLITE_ERROR goto initone_error_out __19: @@ -130311,7 +130344,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+19770, + ts+19817, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -130643,7 +130676,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+19804, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+19851, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -130673,7 +130706,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+19834, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+19881, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -130769,7 +130802,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -130868,7 +130901,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -131195,13 +131228,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+19853, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+19900, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19883)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19930)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -131376,7 +131409,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19917, libc.VaList(bp, 0)) + ts+19964, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -131421,7 +131454,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19967, libc.VaList(bp+8, zName)) + ts+20014, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -131432,7 +131465,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20031, + Xsqlite3ErrorMsg(tls, pParse, ts+20078, libc.VaList(bp+16, zName)) break } @@ -132060,16 +132093,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 20068 + z = ts + 20115 break case TK_INTERSECT: - z = ts + 20078 + z = ts + 20125 break case TK_EXCEPT: - z = ts + 20088 + z = ts + 20135 break default: - z = ts + 20095 + z = ts + 20142 break } return z @@ -132079,7 +132112,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20101, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20148, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -132105,9 +132138,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20124, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20171, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 20155 + return ts + 20202 } return ts + 1544 }())) @@ -132451,7 +132484,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+20170, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+20217, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -132551,7 +132584,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+20170, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+20217, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -132567,7 +132600,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+20179, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+20226, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -132650,8 +132683,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -132666,12 +132697,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 20187 + zType = ts + 20234 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -132887,7 +132921,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+20191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20238, 0) return __1: ; @@ -132978,7 +133012,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+20240, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20287, 0) goto end_of_recursive_query __15: ; @@ -132998,7 +133032,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20282, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20329, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -133035,7 +133069,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20288, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20335, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -133069,11 +133103,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20303, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20350, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1544 } - return ts + 20326 + return ts + 20373 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -133174,8 +133208,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20328, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20343, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20375, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20390, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -133222,7 +133256,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20068, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20115, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -133289,7 +133323,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20362, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20409, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -133351,7 +133385,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20362, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20409, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -133504,10 +133538,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20383, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20430, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+20429, + ts+20476, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -133761,8 +133795,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8586) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+8586) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8575) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+8575) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -133789,13 +133823,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20511, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20558, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20522, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20569, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -133807,7 +133841,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20527, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20574, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -133995,7 +134029,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -134894,7 +134929,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+20533, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+20580, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -134977,7 +135012,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20551, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20598, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -135106,7 +135141,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20574, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20621, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -135129,7 +135164,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+20594, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+20641, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -135145,7 +135180,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20637 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20684 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -135171,7 +135206,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+20660, + Xsqlite3ErrorMsg(tls, pParse, ts+20707, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -135182,9 +135217,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20698 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20745 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20732 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20779 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -135231,7 +135266,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+20770, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+20817, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -135343,7 +135378,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+20774, + Xsqlite3ErrorMsg(tls, pParse, ts+20821, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -135362,7 +135397,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+20813, + Xsqlite3ErrorMsg(tls, pParse, ts+20860, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -135486,7 +135521,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20844, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20891, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -135551,7 +135586,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20849, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20896, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -135582,9 +135617,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20858, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20905, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+20876, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20923, 0) } } } @@ -135594,7 +135629,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+20896, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20943, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -135732,7 +135767,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -135816,13 +135851,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+20927, 0) + ts+20974, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20978, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21025, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -136011,11 +136046,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21011, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21058, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 21023 + return ts + 21070 } return ts + 1544 }(), @@ -136343,7 +136378,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+21046, + ts+21093, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -136404,7 +136439,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+21100, + Xsqlite3ErrorMsg(tls, pParse, ts+21147, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -136546,7 +136581,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21140, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21187, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -136605,7 +136640,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21155, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21202, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -137076,9 +137111,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 21171 + return ts + 21218 } - return ts + 21180 + return ts + 21227 }()) groupBySort = 1 @@ -137429,7 +137464,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+21171) + explainTempTable(tls, pParse, ts+21218) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -137534,7 +137569,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+21189, 0) + ts+21236, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -137767,7 +137802,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+21254, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21301, 0) goto trigger_cleanup __3: ; @@ -137811,7 +137846,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+21300, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+21347, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -137829,7 +137864,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+21308, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21355, 0) goto trigger_orphan_error __11: ; @@ -137841,7 +137876,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+21300, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+21347, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -137856,11 +137891,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+21349, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+21396, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -137871,19 +137907,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+7733, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+21375, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21422, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+21413, + Xsqlite3ErrorMsg(tls, pParse, ts+21460, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 21450 + return ts + 21497 } - return ts + 21457 + return ts + 21504 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -137892,7 +137928,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+21463, libc.VaList(bp+24, pTableName+8)) + ts+21510, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -138041,7 +138077,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+21300, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+21347, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -138074,7 +138110,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+21509, + ts+21556, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -138099,13 +138135,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+21557, + ts+21604, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+21632, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+21679, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -138361,7 +138397,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+21661, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+21708, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -138414,7 +138450,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+21681, + ts+21728, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -138528,12 +138564,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+21743, + ts+21790, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 21791 + return ts + 21838 } - return ts + 21798 + return ts + 21845 }())) __15: ; @@ -138647,7 +138683,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+21805, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21852, 0) return 1 } @@ -138713,7 +138749,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -138877,7 +138913,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+21847, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+21894, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -139470,7 +139506,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+21861, + ts+21908, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -139502,7 +139538,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+21897, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21944, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -139828,7 +139864,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -140382,7 +140423,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21916) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21963) __169: ; update_cleanup: @@ -140688,10 +140729,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21929, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21976, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+21933, libc.VaList(bp+8, bp+216)) + ts+21980, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -140814,7 +140855,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+22006, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+22010, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+22053, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+22057, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -140962,14 +141003,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22014) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22061) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22054) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22101) return SQLITE_ERROR __2: ; @@ -140980,7 +141021,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22097) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22144) return SQLITE_ERROR __5: ; @@ -141008,7 +141049,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+22115, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+22162, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -141028,7 +141069,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+22138) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22185) goto end_of_vacuum __8: ; @@ -141088,7 +141129,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+22165, + ts+22212, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -141097,7 +141138,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+22273, + ts+22320, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -141108,7 +141149,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+22327, + ts+22374, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -141119,7 +141160,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+22478, + ts+22525, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -141548,11 +141589,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+22608, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+22655, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+22632, + ts+22679, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -141562,7 +141603,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+22731, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+22778, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -141623,7 +141664,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+22750, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+22797, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -141651,9 +141692,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -141661,7 +141704,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22792, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22839, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+4493, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -141673,7 +141716,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 22822 + var zFormat uintptr = ts + 22869 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -141747,7 +141790,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+22868, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+22915, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -141805,7 +141848,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22868, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22915, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -141839,7 +141882,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -142292,7 +142335,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -142319,7 +142362,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -142550,7 +142593,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 22887 + return ts + 22934 } if i == -1 { return ts + 17625 @@ -142562,11 +142605,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+22894, 5) + Xsqlite3_str_append(tls, pStr, ts+22941, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+22900, 1) + Xsqlite3_str_append(tls, pStr, ts+22947, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -142581,7 +142624,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+22900, 1) + Xsqlite3_str_append(tls, pStr, ts+22947, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -142607,27 +142650,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+22902, 2) + Xsqlite3_str_append(tls, pStr, ts+22949, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+22894, 5) + Xsqlite3_str_append(tls, pStr, ts+22941, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 22905 + return ts + 22952 } - return ts + 22910 + return ts + 22957 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22918) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22965) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22920) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22967) } Xsqlite3_str_append(tls, pStr, ts+6309, 1) } @@ -142670,11 +142713,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+22922, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+22969, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 22928 + return ts + 22975 } - return ts + 22935 + return ts + 22982 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -142687,40 +142730,40 @@ zFmt = ts + 12328 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 22940 + zFmt = ts + 22987 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 22973 + zFmt = ts + 23020 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 22998 + zFmt = ts + 23045 } else { - zFmt = ts + 23016 + zFmt = ts + 23063 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+23025, 7) + Xsqlite3_str_append(tls, bp+64, ts+23072, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 17625 - Xsqlite3_str_appendf(tls, bp+64, ts+23033, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+23080, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+23064, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+23111, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+23074, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+23121, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+23079, + Xsqlite3_str_appendf(tls, bp+64, ts+23126, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+23106, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+23153, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -142752,22 +142795,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+23117, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+23164, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+22905, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+22952, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+23138, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+23185, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+22894, 5) + Xsqlite3_str_append(tls, bp+24, ts+22941, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+22905, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+22952, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+6309, 1) @@ -144364,7 +144407,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23193, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -144392,7 +144435,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23161, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23208, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -144910,7 +144953,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23170, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23217, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -145271,7 +145314,7 @@ {FzOp: ts + 17474, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 16804, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 16324, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 23184, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 23231, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -145761,12 +145804,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+23191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23238, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23238, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -145845,7 +145888,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8121 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 9051 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -145939,7 +145982,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 23232 + return ts + 23279 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -146315,7 +146358,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+23239, + Xsqlite3ErrorMsg(tls, pParse, ts+23286, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -146331,7 +146374,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -147049,7 +147092,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+23275, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+23322, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -147120,7 +147163,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 23301 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 23348 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -147294,6 +147337,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -147337,9 +147384,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -147373,6 +147418,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -147631,11 +147677,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -149216,7 +149267,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23312, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23359, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -149274,7 +149325,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+23312, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23359, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -149672,7 +149723,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+23338, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+23385, 0) rc = SQLITE_OK } else { goto __3 @@ -150279,7 +150330,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23373, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23420, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -150314,6 +150365,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -150608,6 +150663,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -150760,7 +150818,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+23391, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+23438, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -150824,7 +150882,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+23419, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+23466, 0) goto __5 __4: ii = 0 @@ -151706,7 +151764,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+23437, -1) + pCtx, ts+23484, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -151839,7 +151897,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+23493, -1) + pCtx, ts+23540, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -151929,17 +151987,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 23538)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 23549)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 23560)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 23565)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 23578)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 23588)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 23594)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 23605)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 23615)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 23627)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 23632)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 23585)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 23596)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 23607)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 23612)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 23625)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 23635)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 23641)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 23652)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 23662)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 23674)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 23679)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -151985,7 +152043,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+23636, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23683, libc.VaList(bp, zName)) } return p } @@ -152029,12 +152087,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+23655, 0) + ts+23702, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+23726, 0) + ts+23773, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -152263,7 +152321,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+23789, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+23836, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -152379,7 +152437,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871)) } pSub = Xsqlite3SelectNew(tls, @@ -152494,7 +152552,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+23815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23862, 0) goto windowAllocErr __2: ; @@ -152559,15 +152617,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 23847 + zErr = ts + 23894 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 23864 + zErr = ts + 23911 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 23880 + zErr = ts + 23927 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+23900, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+23947, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -152588,7 +152646,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+23933, 0) + ts+23980, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -152744,11 +152802,11 @@ } var azErr = [5]uintptr{ - ts + 23980, - ts + 24033, - ts + 23437, - ts + 24084, - ts + 24136, + ts + 24027, + ts + 24080, + ts + 23484, + ts + 24131, + ts + 24183, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -154143,19 +154201,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24186, + Xsqlite3ErrorMsg(tls, pParse, ts+24233, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 24228 + return ts + 24275 } - return ts + 24237 + return ts + 24284 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+24243, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24290, 0) } } @@ -154223,7 +154281,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24277, + Xsqlite3ErrorMsg(tls, pParse, ts+24324, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -155320,7 +155378,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+24315, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24362, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -156299,7 +156357,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+24337, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+24384, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -156309,7 +156367,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+24337, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+24384, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -157052,7 +157110,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+24364) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+24411) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -157216,7 +157274,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24373, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+24420, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -157433,9 +157491,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 8116 + return ts + 9046 } - return ts + 8121 + return ts + 9051 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -157719,19 +157777,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+24397, 0) + ts+24444, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+24492, 0) + ts+24539, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+24576, 0) + ts+24623, 0) } break case uint32(273): @@ -158110,9 +158168,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24373, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+24420, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+24661, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24708, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -158880,7 +158938,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+24678, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+24725, libc.VaList(bp, bp+2464)) break } } @@ -158903,7 +158961,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+4493, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+24703, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+24750, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -159076,7 +159134,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+24714, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+24761, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -159089,11 +159147,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+21300, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+21347, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+24721, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+24768, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+24726, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+24773, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -159106,9 +159164,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+24736, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+24783, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+24740, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+24787, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -159342,7 +159400,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -159917,7 +159975,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -159932,7 +159990,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24748, 0) + ts+24795, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -160123,23 +160181,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 24816 + var zErr uintptr = ts + 24863 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 24830 + zErr = ts + 24877 break } case SQLITE_ROW: { - zErr = ts + 24852 + zErr = ts + 24899 break } case SQLITE_DONE: { - zErr = ts + 24874 + zErr = ts + 24921 break } @@ -160157,35 +160215,35 @@ } var aMsg = [29]uintptr{ - ts + 24897, - ts + 24910, + ts + 24944, + ts + 24957, uintptr(0), - ts + 24926, - ts + 24951, - ts + 24965, - ts + 24984, + ts + 24973, + ts + 24998, + ts + 25012, + ts + 25031, ts + 1480, - ts + 25009, - ts + 25046, - ts + 25058, - ts + 25073, - ts + 25106, - ts + 25124, - ts + 25149, - ts + 25178, + ts + 25056, + ts + 25093, + ts + 25105, + ts + 25120, + ts + 25153, + ts + 25171, + ts + 25196, + ts + 25225, uintptr(0), ts + 7190, ts + 6686, - ts + 25195, - ts + 25213, - ts + 25231, - uintptr(0), - ts + 25265, + ts + 25242, + ts + 25260, + ts + 25278, uintptr(0), - ts + 25286, ts + 25312, - ts + 25335, - ts + 25356, + uintptr(0), + ts + 25333, + ts + 25359, + ts + 25382, + ts + 25403, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -160306,7 +160364,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -160351,7 +160409,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+25372, 0) + ts+25419, 0) return SQLITE_BUSY } else { @@ -160468,7 +160526,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+25435, libc.VaList(bp, zName)) + ts+25482, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -160704,7 +160762,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+25486, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+25533, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -160797,7 +160855,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -160867,7 +160925,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -160877,7 +160935,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -160909,14 +160967,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+25507, 0) + ts+25554, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -161046,7 +161104,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+25575, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+25622, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -161091,10 +161149,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+25581, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+25628, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25591, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25638, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -161199,7 +161257,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+25619, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+25666, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -161210,17 +161268,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+25623, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+25670, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 25623 + zModeType = ts + 25670 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+25629, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+25676, zOpt, uint64(4)) == 0) { goto __32 } @@ -161228,7 +161286,7 @@ SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY aMode = uintptr(unsafe.Pointer(&aOpenMode)) limit = int32(uint32(mask) & flags) - zModeType = ts + 25634 + zModeType = ts + 25681 __32: ; if !(aMode != 0) { @@ -161258,7 +161316,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25641, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25688, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -161266,7 +161324,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25661, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25708, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -161306,7 +161364,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25685, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25732, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -161330,14 +161388,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 25701, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 25708, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 25748, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 25755, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 25716, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 25719, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 25722, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 25763, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 25766, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 25769, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 18731, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -161484,10 +161542,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+23232, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+23279, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+25726, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+25773, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -161501,7 +161559,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -161554,7 +161612,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 7793 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 24721 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 24768 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -161659,7 +161717,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 25732 + zFilename = ts + 25779 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -161762,21 +161820,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+25735, + Xsqlite3_log(tls, iErr, ts+25782, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+25760) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+25807) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+25780) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+25827) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+25787) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+25834) } // This is a convenience routine that makes sure that all thread-specific @@ -161934,7 +161992,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+25804, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+25851, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -162590,7 +162648,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+25832, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+25879, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -162708,7 +162766,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 25840 + return ts + 25887 } return uintptr(0) }(), 0) @@ -162886,7 +162944,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 7533, ts + 8116, ts + 8121, ts + 7543, ts + 7538, ts + 9357, ts + 25863, ts + 25869, + ts + 7533, ts + 9046, ts + 9051, ts + 7543, ts + 7538, ts + 9357, ts + 25910, ts + 25916, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -163039,7 +163097,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 25876 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 25923 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -163094,7 +163152,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25893, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25940, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -163158,13 +163216,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+8116, uint32(4)) + jsonAppendRaw(tls, pOut, ts+9046, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+8121, uint32(5)) + jsonAppendRaw(tls, pOut, ts+9051, uint32(5)) break } @@ -163714,12 +163772,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+8116, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+9046, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+8121, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+9051, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -163820,7 +163878,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+25922, -1) + Xsqlite3_result_error(tls, pCtx, ts+25969, -1) } } jsonParseReset(tls, pParse) @@ -164126,7 +164184,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+25937, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+25984, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -164141,7 +164199,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+25941, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+25988, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -164195,7 +164253,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25967, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+26014, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -164300,11 +164358,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+26010, uint32(2)) + jsonAppendRaw(tls, bp, ts+26057, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+6350, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+26013, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+26060, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -164461,14 +164519,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+26016, -1) + ts+26063, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+26067, -1) + Xsqlite3_result_error(tls, ctx, ts+26114, -1) jsonReset(tls, bp) return } @@ -164638,9 +164696,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 26101 + return ts + 26148 } - return ts + 26105 + return ts + 26152 }()) return __2: @@ -164773,7 +164831,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+26112, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+26159, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -164870,7 +164928,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+26115, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+26162, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -164914,7 +164972,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+26118) + ts+26165) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -165045,7 +165103,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+26201, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+26248, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -165064,7 +165122,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+26207, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+26254, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -165160,7 +165218,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+26207, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+26254, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -165184,7 +165242,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 26212 + zRoot = ts + 26259 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -165306,7 +165364,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25922, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25969, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -165401,25 +165459,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26214}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26219}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26230}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26230}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26248}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 26261}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 26264}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26268}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26280}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26292}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26303}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26314}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26326}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 26339}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26348}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26348}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26358}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26369}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26386}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26261}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26266}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26277}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26277}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26295}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 26308}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 26311}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26315}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26327}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26339}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26350}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26361}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26373}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 26386}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26395}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26395}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26405}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26416}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26433}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -165438,8 +165496,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 26404, FpModule: 0}, - {FzName: ts + 26414, FpModule: 0}, + {FzName: ts + 26451, FpModule: 0}, + {FzName: ts + 26461, FpModule: 0}, } type Rtree1 = struct { @@ -165699,11 +165757,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+26424, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+26471, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+26432, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+26479, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -165914,7 +165972,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+26437, + ts+26484, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -166612,7 +166670,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+26519) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+26566) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -167953,7 +168011,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+26533, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+26580, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -167965,12 +168023,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+26553, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+26600, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+26585, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+26632, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -168196,7 +168254,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+26622, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+26669, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -168219,7 +168277,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 26767 + var zFmt uintptr = ts + 26814 var zSql uintptr var rc int32 @@ -168267,7 +168325,7 @@ } var azName1 = [3]uintptr{ - ts + 26823, ts + 6412, ts + 17625, + ts + 26870, ts + 6412, ts + 17625, } var rtreeModule = Sqlite3_module{ @@ -168310,19 +168368,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+26828, + ts+26875, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+26890, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+26937, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+26895, + ts+26942, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26959, + ts+27006, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+27029, + ts+27076, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -168351,7 +168409,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 27078 + zFormat = ts + 27125 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -168363,7 +168421,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+27186, + ts+27233, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -168371,18 +168429,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+27231, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+27278, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+14119, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+27258, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+27305, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+27280, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+27327, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+27288, 0) + Xsqlite3_str_appendf(tls, p, ts+27335, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -168397,14 +168455,14 @@ } var azSql = [8]uintptr{ - ts + 27304, - ts + 27357, - ts + 27402, - ts + 27454, - ts + 27508, - ts + 27553, - ts + 27611, - ts + 27666, + ts + 27351, + ts + 27404, + ts + 27449, + ts + 27501, + ts + 27555, + ts + 27600, + ts + 27658, + ts + 27713, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -168433,7 +168491,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+27713, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+27760, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -168445,7 +168503,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+27733, + ts+27780, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -168453,7 +168511,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+27790, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+27837, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -168495,10 +168553,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 27825, - ts + 27868, - ts + 27903, - ts + 27939, + ts + 27872, + ts + 27915, + ts + 27950, + ts + 27986, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -168529,7 +168587,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27976, + Xsqlite3_str_appendf(tls, pSql, ts+28023, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -168541,7 +168599,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28000, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+28047, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -168564,7 +168622,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+28006, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28053, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -168660,7 +168718,7 @@ return rc } -var azFormat = [2]uintptr{ts + 28009, ts + 28020} +var azFormat = [2]uintptr{ts + 28056, ts + 28067} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -168700,11 +168758,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+12272, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+28030, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+28077, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+28036, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+28083, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+28040, 1) + Xsqlite3_str_append(tls, pOut, ts+28087, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -168715,7 +168773,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+28042, -1) + Xsqlite3_result_error(tls, ctx, ts+28089, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -168793,7 +168851,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+28075, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+28122, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 5414 @@ -168817,7 +168875,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+28082, + ts+28129, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -168836,7 +168894,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+28127, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+28174, libc.VaList(bp+16, iNode)) } } @@ -168850,8 +168908,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 28159, - ts + 28213, + ts + 28206, + ts + 28260, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -168866,23 +168924,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+28261, + rtreeCheckAppendMsg(tls, pCheck, ts+28308, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 28306 + return ts + 28353 } - return ts + 28314 + return ts + 28361 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+28323, + ts+28370, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 28306 + return ts + 28353 } - return ts + 28314 + return ts + 28361 }(), iKey, iVal)) } } @@ -168906,7 +168964,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+28381, libc.VaList(bp, i, iCell, iNode)) + ts+28428, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -168926,7 +168984,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+28429, libc.VaList(bp+24, i, iCell, iNode)) + ts+28476, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -168943,14 +169001,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+28496, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+28543, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+28530, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+28577, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -168958,7 +169016,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+28560, + ts+28607, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -168987,14 +169045,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+28615, + pCount = rtreeCheckPrepare(tls, pCheck, ts+28662, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+28646, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+28693, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -169021,7 +169079,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+28713, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+28760, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -169030,12 +169088,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+26533, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+26580, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+28741, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+28788, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -169049,8 +169107,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+28772, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+28779, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+28819, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+28826, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -169058,7 +169116,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+28787, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+28834, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -169073,7 +169131,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+28791, -1) + ts+28838, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -169091,7 +169149,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 19381 + return ts + 19428 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -169462,11 +169520,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+28842, 1) + Xsqlite3_str_append(tls, x, ts+28889, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+28844, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28891, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+28855, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28902, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -169486,19 +169544,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+28866, 0) + Xsqlite3_str_appendf(tls, x, ts+28913, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+28884, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28931, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+28892, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28939, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+28900, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+28947, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+28904, 0) + Xsqlite3_str_appendf(tls, x, ts+28951, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -170418,7 +170476,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+28917, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28964, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -170427,7 +170485,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28939, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+28986, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -170435,7 +170493,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+28006, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28053, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -170672,7 +170730,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28943 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28990 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -170680,7 +170738,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28949 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28996 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -170792,7 +170850,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28958, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+29005, 0) __4: ; goto geopoly_update_end @@ -170924,14 +170982,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+28998) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+29045) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+29014) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+29061) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -170996,7 +171054,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+29029, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+29076, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -171008,25 +171066,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29037}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29050}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29063}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 29076}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29014}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 29088}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28998}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 29111}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29125}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 29138}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 29152}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29168}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29084}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29097}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29110}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 29123}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29061}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 29135}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29045}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 29158}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29172}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 29185}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 29199}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29215}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 29180}, + {FxStep: 0, FxFinal: 0, FzName: ts + 29227}, } // Register the r-tree module with database handle db. This creates the @@ -171036,26 +171094,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+29199, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29246, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+29209, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29256, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+29220, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29267, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28943, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28990, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+29231, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+29278, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -171109,7 +171167,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+26519, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+26566, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -171436,7 +171494,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+29241, -1) + Xsqlite3_result_error(tls, context, ts+29288, -1) return } @@ -171447,7 +171505,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+29241, -1) + Xsqlite3_result_error(tls, context, ts+29288, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -171548,7 +171606,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29262, uintptr(0), uintptr(0), p+64) + ts+29309, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -171612,7 +171670,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+26432, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+26479, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -171633,16 +171691,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+29433, libc.VaList(bp, func() uintptr { + ts+29480, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 29583 + return ts + 29630 } return ts + 1544 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+29624) + ts+29671) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -171758,7 +171816,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+29749, libc.VaList(bp, zTab))) + ts+29796, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -171776,7 +171834,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+29868, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+29915, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -171794,7 +171852,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+29889, libc.VaList(bp+16, zIdx))) + ts+29936, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -171817,7 +171875,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+29940, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+29987, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -171863,7 +171921,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -171878,7 +171936,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -171918,7 +171976,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+20858, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+20905, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -171928,18 +171986,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+30018, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+30065, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+30037, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+30084, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+30042, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+30089, zName) { bRbuRowid = 1 } } @@ -171951,18 +172009,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+30052, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+30099, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 30081 + return ts + 30128 } - return ts + 30094 + return ts + 30141 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+30103, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30150, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -171976,7 +172034,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30125, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30172, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -172023,7 +172081,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+30152, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+30199, libc.VaList(bp, zList, zSep, z)) zSep = ts + 15971 } return zList @@ -172041,7 +172099,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+30161, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+30208, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -172063,25 +172121,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+30174, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+30221, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+30206, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+30253, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+30229) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+30235, ts+30242, ts+6309) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+30276) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+30282, ts+30289, ts+6309) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+1544) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+30250, + ts+30297, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+30292, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+30339, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -172123,7 +172181,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -172158,7 +172216,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 30312 + zCol = ts + 30359 __7: ; goto __5 @@ -172166,11 +172224,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+30320, + zLhs = rbuMPrintf(tls, p, ts+30367, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+30341, + zOrder = rbuMPrintf(tls, p, ts+30388, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+30377, + zSelect = rbuMPrintf(tls, p, ts+30424, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 15971 iCol++ @@ -172190,7 +172248,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+30404, + Xsqlite3_mprintf(tls, ts+30451, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -172217,7 +172275,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+30452, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+30499, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 15971 goto __15 __15: @@ -172229,7 +172287,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+30459, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+30506, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -172262,7 +172320,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -172274,7 +172332,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+30471, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+30518, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1544 } else { @@ -172286,37 +172344,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 30312 + zCol = ts + 30359 } else { - zCol = ts + 30042 + zCol = ts + 30089 } zType = ts + 1109 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+30493, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+30540, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 30229 + return ts + 30276 } return ts + 1544 }() - zImpPK = Xsqlite3_mprintf(tls, ts+30513, + zImpPK = Xsqlite3_mprintf(tls, ts+30560, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+30534, + zImpCols = Xsqlite3_mprintf(tls, ts+30581, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+30567, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+30614, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 15971 - zAnd = ts + 22894 + zAnd = ts + 22941 nBind++ } @@ -172355,9 +172413,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+30591, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+30638, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+30603, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+30650, libc.VaList(bp+32, zList, zS)) } zS = ts + 15971 if zList == uintptr(0) { @@ -172367,7 +172425,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+30612, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+30659, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -172379,18 +172437,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+30627, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+30674, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1544 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+30641, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 22894 + zList = rbuMPrintf(tls, p, ts+30688, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 22941 } } zList = rbuMPrintf(tls, p, - ts+30653, libc.VaList(bp+40, zList)) + ts+30700, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1544 @@ -172398,8 +172456,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+30703, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 22894 + zList = rbuMPrintf(tls, p, ts+30750, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 22941 } } } @@ -172408,7 +172466,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30716, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30763, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -172426,15 +172484,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+30703, + zList = rbuMPrintf(tls, p, ts+30750, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+30742, + zList = rbuMPrintf(tls, p, ts+30789, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+30772, + zList = rbuMPrintf(tls, p, ts+30819, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } @@ -172471,19 +172529,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 30809 + var zSep uintptr = ts + 30856 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+17513) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp+8, zIdx))) } break } @@ -172495,15 +172553,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 30229 + zDesc = ts + 30276 } else { zDesc = ts + 1544 } - z = rbuMPrintf(tls, p, ts+30822, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+30869, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 15971 } } - z = rbuMPrintf(tls, p, ts+30833, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+30880, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -172523,7 +172581,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+30837) + ts+30884) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -172532,7 +172590,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -172542,23 +172600,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+30887, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+30934, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+30909, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+30956, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 30229 + return ts + 30276 } return ts + 1544 }())) zComma = ts + 15971 } } - zCols = rbuMPrintf(tls, p, ts+30919, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+30966, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30934, + ts+30981, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 0)) } @@ -172584,13 +172642,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 30996 + zPk = ts + 31043 } - zSql = rbuMPrintf(tls, p, ts+31009, + zSql = rbuMPrintf(tls, p, ts+31056, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 31036 + return ts + 31083 } return ts + 1544 }())) @@ -172600,16 +172658,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+31046, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+31093, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31053, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31100, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 31085 + return ts + 31132 } return ts + 1544 }())) @@ -172626,7 +172684,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+31100, + ts+31147, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -172663,7 +172721,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+31157) + ts+31204) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -172768,7 +172826,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+31223, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+31270, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -172791,7 +172849,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31243, + ts+31290, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 0)) @@ -172799,13 +172857,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+31308, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+31355, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+31344, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+31391, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -172821,7 +172879,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+31378, + ts+31425, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -172829,9 +172887,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 31439 + return ts + 31486 } - return ts + 31443 + return ts + 31490 }() } return ts + 1544 @@ -172840,20 +172898,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+31449, + ts+31496, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+31510, + ts+31557, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 31439 + return ts + 31486 } - return ts + 31443 + return ts + 31490 }(), zCollist, zLimit)) } @@ -172890,16 +172948,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1544 } - return ts + 31669 + return ts + 31716 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+31678, + ts+31725, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 31714 + return ts + 31761 } return ts + 1544 }(), zBindings))) @@ -172908,32 +172966,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+31724, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+31771, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1544 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 31752 + zRbuRowid = ts + 31799 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31764, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+31811, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 31840 + return ts + 31887 } return ts + 1544 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31857, + ts+31904, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32156, + ts+32203, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -172946,9 +173004,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 32255 + zRbuRowid = ts + 32302 } else { - zRbuRowid = ts + 32265 + zRbuRowid = ts + 32312 } } @@ -172961,7 +173019,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+30312, 0) + zOrder = rbuMPrintf(tls, p, ts+30359, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+1544) } @@ -172970,11 +173028,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+32276, + ts+32323, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32324 + return ts + 32371 } return ts + 1544 }(), @@ -172987,7 +173045,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 24228 + return ts + 24275 } return ts + 1544 }(), zOrder, @@ -173055,9 +173113,9 @@ var zPrefix uintptr = ts + 1544 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 31669 + zPrefix = ts + 31716 } - zUpdate = Xsqlite3_mprintf(tls, ts+32330, + zUpdate = Xsqlite3_mprintf(tls, ts+32377, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -173116,7 +173174,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+32360, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+32407, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -173189,18 +173247,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+32390, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+32437, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32418, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32465, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+14829, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+7793, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32436, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32483, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -173240,11 +173298,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32502, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32549, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+25575, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+25622, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -173256,13 +173314,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+32534, + zTarget = Xsqlite3_mprintf(tls, ts+32581, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793), func() uintptr { if zExtra == uintptr(0) { return ts + 1544 } - return ts + 32566 + return ts + 32613 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1544 @@ -173281,21 +173339,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32568, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32615, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32583, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+32630, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32600, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32647, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -173303,7 +173361,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32616, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32663, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_RBU, p) @@ -173311,7 +173369,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32644, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32691, 0) } } @@ -173340,14 +173398,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32616, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32663, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32662, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32709, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -173473,7 +173531,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+32697, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+32744, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -173488,8 +173546,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793) } - zOal = Xsqlite3_mprintf(tls, ts+32722, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+32729, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+32769, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+32776, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -173606,7 +173664,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+25213, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+25260, 0) return } @@ -173699,7 +173757,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+32736) + ts+32783) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -173707,7 +173765,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32758, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32805, libc.VaList(bp, iCookie+1)) } } } @@ -173728,7 +173786,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+32785, + ts+32832, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -173758,9 +173816,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+32943, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+32990, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32958, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33005, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -173774,10 +173832,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32978, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33025, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33003) + ts+33050) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -173791,12 +173849,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33111) + ts+33158) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+33176) + ts+33223) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -173808,7 +173866,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33220, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33267, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -173836,7 +173894,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+33245, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+33292, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -173958,7 +174016,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33273, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33320, 0) } if rc == SQLITE_OK { @@ -173974,7 +174032,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+32722, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+32769, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -173991,7 +174049,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+33298, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+33345, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -174025,7 +174083,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+33309, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+33356, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -174055,13 +174113,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+33381, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+33428, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33395) + ts+33442) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -174072,7 +174130,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33452) + ts+33499) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -174146,7 +174204,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33526, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33573, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -174164,12 +174222,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33558, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33605, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 33590 + return ts + 33637 } - return ts + 33597 + return ts + 33644 }())) } } @@ -174193,14 +174251,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+7793, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+33620, uintptr(0), uintptr(0), p+64) + db, ts+33667, uintptr(0), uintptr(0), p+64) } } @@ -174254,7 +174312,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+33644, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+33691, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -174281,7 +174339,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+31669, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+31716, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -174317,7 +174375,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+33652, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+33699, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -174436,12 +174494,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 15860 } else { - zBegin = ts + 33604 + zBegin = ts + 33651 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33651, uintptr(0), uintptr(0), uintptr(0)) } } @@ -174787,7 +174845,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33679, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33726, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -174812,7 +174870,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+33702, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+33749, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -174972,7 +175030,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+33713, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+33760, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -175801,7 +175859,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+33724, 0) + ts+33771, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1544, 0) } else { @@ -175814,7 +175872,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+33845, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+33892, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -176494,9 +176552,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+33874, + zRet = Xsqlite3_mprintf(tls, ts+33921, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 22894 + zSep = ts + 22941 if zRet == uintptr(0) { break } @@ -176519,9 +176577,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+33908, + ts+33955, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 33949 + zSep = ts + 33996 if zRet == uintptr(0) { break } @@ -176529,7 +176587,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+8882, 0) + zRet = Xsqlite3_mprintf(tls, ts+8871, 0) } return zRet @@ -176540,7 +176598,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+33954, + ts+34001, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -176583,7 +176641,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+34032, + ts+34079, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -176710,7 +176768,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+34085, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+34132, 0) __16: ; rc = SQLITE_SCHEMA @@ -177186,7 +177244,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+12700, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+34112, libc.VaList(bp, zDb)) + ts+34159, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -177195,18 +177253,18 @@ var zSep uintptr = ts + 1544 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+34222, bp+24) + sessionAppendStr(tls, bp+8, ts+34269, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1557, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+34237, bp+24) + sessionAppendStr(tls, bp+8, ts+34284, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+34245, bp+24) + sessionAppendStr(tls, bp+8, ts+34292, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 22894 + zSep = ts + 22941 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -177315,7 +177373,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+34251, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+34298, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -177407,7 +177465,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+34271, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34318, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -177670,7 +177728,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -177693,7 +177751,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -177735,7 +177793,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -177796,7 +177854,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -177870,13 +177928,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -177938,7 +177996,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -178311,7 +178369,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -178490,34 +178548,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+34289, bp+16) + sessionAppendStr(tls, bp, ts+34336, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+34302, bp+16) + sessionAppendStr(tls, bp, ts+34349, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34308, bp+16) + sessionAppendStr(tls, bp, ts+34355, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 15971 } } zSep = ts + 1544 - sessionAppendStr(tls, bp, ts+34237, bp+16) + sessionAppendStr(tls, bp, ts+34284, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+34313, bp+16) + ts+34360, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34245, bp+16) + sessionAppendStr(tls, bp, ts+34292, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 22894 + zSep = ts + 22941 } } @@ -178569,34 +178627,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+34388, bp+16) + sessionAppendStr(tls, bp, ts+34435, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+34237, bp+16) + sessionAppendStr(tls, bp, ts+34284, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34308, bp+16) + sessionAppendStr(tls, bp, ts+34355, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 22894 + zSep = ts + 22941 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+34406, bp+16) + sessionAppendStr(tls, bp, ts+34453, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+33949, bp+16) + sessionAppendStr(tls, bp, ts+33996, bp+16) zSep = ts + 1544 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34245, bp+16) + sessionAppendStr(tls, bp, ts+34292, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 34414 + zSep = ts + 34461 } } sessionAppendStr(tls, bp, ts+6309, bp+16) @@ -178623,9 +178681,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+34419, bp+16) + sessionAppendStr(tls, bp, ts+34466, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+22900, bp+16) + sessionAppendStr(tls, bp, ts+22947, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+15971, bp+16) @@ -178633,9 +178691,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+34437, bp+16) + sessionAppendStr(tls, bp, ts+34484, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+34448, bp+16) + sessionAppendStr(tls, bp, ts+34495, bp+16) } sessionAppendStr(tls, bp, ts+6309, bp+16) @@ -178654,11 +178712,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+12700, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+34452) + ts+34499) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+34565) + ts+34612) } return rc } @@ -178686,7 +178744,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -178939,7 +178997,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+34709, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34756, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -178955,7 +179013,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34730, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34777, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -179028,10 +179086,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+34749, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34796, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34775, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34822, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -179090,16 +179148,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34805, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34852, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34849, + ts+34896, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34920, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34967, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+12700) { @@ -179153,14 +179211,14 @@ } } } - Xsqlite3_exec(tls, db, ts+34980, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35027, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+35010, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+35057, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+35034, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+35010, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35081, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35057, uintptr(0), uintptr(0), uintptr(0)) } } @@ -180408,7 +180466,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+35062, 0) + sqlite3Fts5ParseError(tls, pParse, ts+35109, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -180696,7 +180754,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+35090, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+35137, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -180883,7 +180941,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+35121, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+35168, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -180951,7 +181009,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 35128 + var zErr uintptr = ts + 35175 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -181133,7 +181191,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 35178 + var zErr uintptr = ts + 35225 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -181457,13 +181515,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 35226, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35273, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 35234, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35281, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 35244, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35291, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -182014,7 +182072,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+35249, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35296, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -182041,14 +182099,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35256, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35303, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+35287, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+35334, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -182059,7 +182117,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35320, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35367, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -182072,7 +182130,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35357, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35404, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -182081,7 +182139,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35366, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35413, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -182100,7 +182158,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35399, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35446, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -182115,14 +182173,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35433, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35480, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35441, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35488, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+35473, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+35520, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -182130,9 +182188,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35479, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35526, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35493, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35540, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -182140,9 +182198,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35531, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35578, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35542, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35589, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -182154,17 +182212,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 9378, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 18714}, - {FzName: ts + 35577, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 35624, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35585, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35632, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35616, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35663, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -182211,15 +182269,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+23560) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+23607) || 0 == Xsqlite3_stricmp(tls, zCol, ts+17625) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35644, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35691, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+35674) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+35721) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35684, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35731, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -182236,13 +182294,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35715, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35762, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35720, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35767, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35727, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35774, libc.VaList(bp+16, i)) } } } @@ -182280,8 +182338,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+23560) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35735, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+23607) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35782, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -182313,7 +182371,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35764, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35811, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -182350,14 +182408,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 35433 + zTail = ts + 35480 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 35784 + zTail = ts + 35831 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+35792, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+35839, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -182406,7 +182464,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35803, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35850, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -182414,10 +182472,10 @@ } return ts + 15971 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35819, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35866, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35826, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+23560)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35873, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+23607)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -182527,7 +182585,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+35852) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+35899) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -182537,7 +182595,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35857) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35904) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -182547,7 +182605,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35866) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35913) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -182560,7 +182618,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35876) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35923) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -182570,7 +182628,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35886) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35933) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -182586,7 +182644,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+23560) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+23607) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -182609,7 +182667,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 35898 + var zSelect uintptr = ts + 35945 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -182631,7 +182689,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+35930) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+35977) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -182645,7 +182703,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35938, + ts+35985, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -182743,7 +182801,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+36003, 0) + sqlite3Fts5ParseError(tls, pParse, ts+36050, 0) return FTS5_EOF } } @@ -182756,20 +182814,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+36023, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+36070, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36054, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36101, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36057, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36104, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+31439, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+31486, uint64(3)) == 0 { tok = FTS5_AND } break @@ -184547,9 +184605,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+36061, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+36108, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+35090, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+35137, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -184565,7 +184623,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+36066, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+36113, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -184652,7 +184710,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+21897, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+21944, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -184733,7 +184791,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+36095, 0) + ts+36142, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -184903,12 +184961,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+36148, + ts+36195, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 36198 + return ts + 36245 } - return ts + 36061 + return ts + 36108 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -185851,7 +185909,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+36205, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+36252, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -185930,7 +185988,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+36211, + ts+36258, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -185955,7 +186013,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+36262, + ts+36309, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -185978,7 +186036,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+36311, + ts+36358, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -186217,7 +186275,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+36351, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+36398, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -187416,7 +187474,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+36374, + ts+36421, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -188882,7 +188940,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -189964,13 +190022,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+36515, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+36562, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+26432, ts+36523, 0, pzErr) + pConfig, ts+26479, ts+36570, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+12840, - ts+36558, + ts+36605, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -190223,7 +190281,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+36205, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+36252, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -190337,7 +190395,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+36602, + ts+36649, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -190507,7 +190565,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+36688) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+36735) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -190778,7 +190836,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36693, 0) + ts+36740, 0) return SQLITE_ERROR } @@ -191202,7 +191260,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+36732, + ts+36779, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -191218,9 +191276,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 36787 + return ts + 36834 } - return ts + 36792 + return ts + 36839 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -191266,12 +191324,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+36796, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+36843, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+6409, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36802, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36849, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -191302,7 +191360,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+36830, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+36877, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -191333,7 +191391,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36840, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36887, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -191365,14 +191423,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+36861, libc.VaList(bp, z)) + ts+36908, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 35244 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 35291 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -191428,7 +191486,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36693, 0) + ts+36740, 0) return SQLITE_ERROR __1: ; @@ -191645,7 +191703,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+36894, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+36941, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -191790,28 +191848,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+36930, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+36977, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+36941, 0) + ts+36988, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+37021, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37068, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+37029, 0) + ts+37076, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+18313, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+37085, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37132, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+37091, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37138, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -191882,12 +191940,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+37107, + ts+37154, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 21798 + return ts + 21845 } - return ts + 37144 + return ts + 37191 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -192517,7 +192575,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+37156, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+37203, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -192761,7 +192819,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37177, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37224, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -192780,7 +192838,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37199, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37246, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -192827,7 +192885,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+37230) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+37277) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -192836,7 +192894,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+37243, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+37290, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -192850,7 +192908,7 @@ } var azName2 = [5]uintptr{ - ts + 37334, ts + 35433, ts + 26432, ts + 35784, ts + 12840, + ts + 37381, ts + 35480, ts + 26479, ts + 35831, ts + 12840, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -192874,7 +192932,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+37341, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+37388, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -192892,13 +192950,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+37341, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+37388, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+37346, 0, + db, ts+37393, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -192955,17 +193013,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 37361, - ts + 37429, - ts + 37498, - ts + 37531, - ts + 37570, - ts + 37610, - ts + 37649, - ts + 37690, - ts + 37729, - ts + 37771, - ts + 37811, + ts + 37408, + ts + 37476, + ts + 37545, + ts + 37578, + ts + 37617, + ts + 37657, + ts + 37696, + ts + 37737, + ts + 37776, + ts + 37818, + ts + 37858, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -193067,18 +193125,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37834, + ts+37881, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37938, + ts+37985, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37976, + ts+38023, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -193090,7 +193148,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38014, + ts+38061, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -193102,14 +193160,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+26432, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+26479, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+12840, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+37334, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+37381, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+35784, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35831, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+35433, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35480, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -193121,17 +193179,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+38056, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+38103, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 31085 + return ts + 31132 } return ts + 1544 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+38086, + ts+38133, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -193168,27 +193226,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+38130, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+38177, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+38153, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+38200, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+35433, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+35480, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35784, ts+38159, 0, pzErr) + pConfig, ts+35831, ts+38206, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+37334, ts+38191, 1, pzErr) + pConfig, ts+37381, ts+38238, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35930, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35977, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -193394,12 +193452,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38208, + ts+38255, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38258, + ts+38305, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -193407,7 +193465,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35930, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35977, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -193583,7 +193641,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+38287, + zSql = Xsqlite3_mprintf(tls, ts+38334, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -193765,14 +193823,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+35433, bp+48) + rc = fts5StorageCount(tls, p, ts+35480, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+35784, bp+56) + rc = fts5StorageCount(tls, p, ts+35831, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -193967,9 +194025,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38319) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38366) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38330) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38377) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -194184,7 +194242,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 38341 + var zCat uintptr = ts + 38388 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -194196,7 +194254,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38350) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38397) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -194207,18 +194265,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38361) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38408) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38319) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38366) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38330) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38377) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38350) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38397) { } else { rc = SQLITE_ERROR } @@ -194494,7 +194552,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 38379 + var zBase uintptr = ts + 38426 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -194636,7 +194694,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38389, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38436, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194644,11 +194702,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38392, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38439, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38397, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38444, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194656,7 +194714,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38402, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38449, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194664,7 +194722,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38405, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38452, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194672,11 +194730,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38408, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38455, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38413, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38460, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194684,19 +194742,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38418, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38465, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38422, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38469, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38428, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38475, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38433, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38480, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194704,11 +194762,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38437, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38484, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38441, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38488, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194716,7 +194774,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38444, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38491, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194724,11 +194782,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38448, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38495, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38452, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38499, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194736,7 +194794,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38456, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38503, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194744,7 +194802,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38460, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38507, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194752,7 +194810,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38464, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38511, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194768,24 +194826,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38468, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38448, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38515, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38471, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38474, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38518, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38478, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38464, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38525, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -194800,44 +194858,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38481, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38528, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38489, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38536, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38496, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38543, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38501, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38548, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38397, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38444, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38506, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38553, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38392, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38439, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38511, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38558, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38464, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38516, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38563, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+16837, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -194846,91 +194904,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38521, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38568, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38474, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38525, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38572, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38530, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38577, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38433, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38536, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38583, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38540, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38587, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38542, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38589, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38456, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38503, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38548, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38595, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38464, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38556, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38603, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38562, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38609, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38567, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38614, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38573, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38620, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38460, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38507, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38581, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38628, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38589, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38636, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38593, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38640, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38456, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38503, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38601, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38648, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38607, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38654, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38460, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38507, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38613, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38660, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38474, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -194945,16 +195003,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38620, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38667, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38625, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38672, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194962,21 +195020,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38630, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38677, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38636, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38683, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38589, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38636, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194984,7 +195042,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38642, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38689, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -194992,9 +195050,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38648, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38695, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -195009,12 +195067,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38654, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38701, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38658, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38705, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38661, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38708, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -195023,7 +195081,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38664, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38711, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -195179,7 +195237,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38668) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38715) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -195359,22 +195417,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 38379, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38426, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 38683, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38730, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 38689, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38736, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 38696, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38743, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -196517,14 +196575,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+38704) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+38751) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+38708) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+38755) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+38712) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+38759) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38721, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38768, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -196550,19 +196608,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 38755, - ts + 38795, - ts + 38830, + ts + 38802, + ts + 38842, + ts + 38877, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+24721, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+24768, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38920, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -196695,11 +196753,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38906, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38953, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+38937, + ts+38984, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -196723,7 +196781,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38988, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+39035, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -197118,7 +197176,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+39014, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+39061, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -197140,7 +197198,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 39024 + return ts + 39071 } func init() { @@ -198207,5 +198265,5 @@ *(*func(*libc.TLS, uintptr, uintptr) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&winVfs)) + 160)) = winNextSystemCall } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=msvc-1900\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00AreFileApisANSI\x00CharLowerW\x00CharUpperW\x00CloseHandle\x00CreateFileA\x00CreateFileW\x00CreateFileMappingA\x00CreateFileMappingW\x00CreateMutexW\x00DeleteFileA\x00DeleteFileW\x00FileTimeToLocalFileTime\x00FileTimeToSystemTime\x00FlushFileBuffers\x00FormatMessageA\x00FormatMessageW\x00FreeLibrary\x00GetCurrentProcessId\x00GetDiskFreeSpaceA\x00GetDiskFreeSpaceW\x00GetFileAttributesA\x00GetFileAttributesW\x00GetFileAttributesExW\x00GetFileSize\x00GetFullPathNameA\x00GetFullPathNameW\x00GetLastError\x00GetProcAddressA\x00GetSystemInfo\x00GetSystemTime\x00GetSystemTimeAsFileTime\x00GetTempPathA\x00GetTempPathW\x00GetTickCount\x00GetVersionExA\x00GetVersionExW\x00HeapAlloc\x00HeapCreate\x00HeapDestroy\x00HeapFree\x00HeapReAlloc\x00HeapSize\x00HeapValidate\x00HeapCompact\x00LoadLibraryA\x00LoadLibraryW\x00LocalFree\x00LockFile\x00LockFileEx\x00MapViewOfFile\x00MultiByteToWideChar\x00QueryPerformanceCounter\x00ReadFile\x00SetEndOfFile\x00SetFilePointer\x00Sleep\x00SystemTimeToFileTime\x00UnlockFile\x00UnlockFileEx\x00UnmapViewOfFile\x00WideCharToMultiByte\x00WriteFile\x00CreateEventExW\x00WaitForSingleObject\x00WaitForSingleObjectEx\x00SetFilePointerEx\x00GetFileInformationByHandleEx\x00MapViewOfFileFromApp\x00CreateFile2\x00LoadPackagedLibrary\x00GetTickCount64\x00GetNativeSystemInfo\x00OutputDebugStringA\x00OutputDebugStringW\x00GetProcessHeap\x00CreateFileMappingFromApp\x00InterlockedCompareExchange\x00UuidCreate\x00UuidCreateSequential\x00FlushViewOfFile\x00%s\x00OsError 0x%lx (%lu)\x00os_win.c:%d: (%lu) %s(%s) - %s\x00delayed %dms for lock/sharing conflict at line %d\x00winSeekFile\x00winClose\x00winRead\x00winWrite1\x00winWrite2\x00winTruncate1\x00winTruncate2\x00winSync1\x00winSync2\x00winFileSize\x00winUnlockReadLock\x00winUnlock\x00winLockSharedMemory\x00%s-shm\x00readonly_shm\x00winOpenShm\x00winShmMap1\x00winShmMap2\x00winShmMap3\x00winUnmapfile1\x00winUnmapfile2\x00winMapfile1\x00winMapfile2\x00etilqs_\x00winGetTempname1\x00winGetTempname2\x00winGetTempname3\x00winGetTempname4\x00winGetTempname5\x00abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\x00exclusive\x00winOpen\x00psow\x00winDelete\x00winAccess\x00%s%c%s\x00winFullPathname1\x00winFullPathname2\x00winFullPathname3\x00winFullPathname4\x00win32\x00win32-longpath\x00win32-none\x00win32-longpath-none\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00stat\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dll\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_store_directory\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00access\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=msvc-1900\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00AreFileApisANSI\x00CharLowerW\x00CharUpperW\x00CloseHandle\x00CreateFileA\x00CreateFileW\x00CreateFileMappingA\x00CreateFileMappingW\x00CreateMutexW\x00DeleteFileA\x00DeleteFileW\x00FileTimeToLocalFileTime\x00FileTimeToSystemTime\x00FlushFileBuffers\x00FormatMessageA\x00FormatMessageW\x00FreeLibrary\x00GetCurrentProcessId\x00GetDiskFreeSpaceA\x00GetDiskFreeSpaceW\x00GetFileAttributesA\x00GetFileAttributesW\x00GetFileAttributesExW\x00GetFileSize\x00GetFullPathNameA\x00GetFullPathNameW\x00GetLastError\x00GetProcAddressA\x00GetSystemInfo\x00GetSystemTime\x00GetSystemTimeAsFileTime\x00GetTempPathA\x00GetTempPathW\x00GetTickCount\x00GetVersionExA\x00GetVersionExW\x00HeapAlloc\x00HeapCreate\x00HeapDestroy\x00HeapFree\x00HeapReAlloc\x00HeapSize\x00HeapValidate\x00HeapCompact\x00LoadLibraryA\x00LoadLibraryW\x00LocalFree\x00LockFile\x00LockFileEx\x00MapViewOfFile\x00MultiByteToWideChar\x00QueryPerformanceCounter\x00ReadFile\x00SetEndOfFile\x00SetFilePointer\x00Sleep\x00SystemTimeToFileTime\x00UnlockFile\x00UnlockFileEx\x00UnmapViewOfFile\x00WideCharToMultiByte\x00WriteFile\x00CreateEventExW\x00WaitForSingleObject\x00WaitForSingleObjectEx\x00SetFilePointerEx\x00GetFileInformationByHandleEx\x00MapViewOfFileFromApp\x00CreateFile2\x00LoadPackagedLibrary\x00GetTickCount64\x00GetNativeSystemInfo\x00OutputDebugStringA\x00OutputDebugStringW\x00GetProcessHeap\x00CreateFileMappingFromApp\x00InterlockedCompareExchange\x00UuidCreate\x00UuidCreateSequential\x00FlushViewOfFile\x00%s\x00OsError 0x%lx (%lu)\x00os_win.c:%d: (%lu) %s(%s) - %s\x00delayed %dms for lock/sharing conflict at line %d\x00winSeekFile\x00winClose\x00winRead\x00winWrite1\x00winWrite2\x00winTruncate1\x00winTruncate2\x00winSync1\x00winSync2\x00winFileSize\x00winUnlockReadLock\x00winUnlock\x00winLockSharedMemory\x00%s-shm\x00readonly_shm\x00winOpenShm\x00winShmMap1\x00winShmMap2\x00winShmMap3\x00winUnmapfile1\x00winUnmapfile2\x00winMapfile1\x00winMapfile2\x00etilqs_\x00winGetTempname1\x00winGetTempname2\x00winGetTempname3\x00winGetTempname4\x00winGetTempname5\x00abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\x00exclusive\x00winOpen\x00psow\x00winDelete\x00winAccess\x00%s%c%s\x00winFullPathname1\x00winFullPathname2\x00winFullPathname3\x00winFullPathname4\x00win32\x00win32-longpath\x00win32-none\x00win32-longpath-none\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00stat\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dll\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_store_directory\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00access\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_windows_arm64.go temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_windows_arm64.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/lib/sqlite_windows_arm64.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/lib/sqlite_windows_arm64.go 2024-02-23 09:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410000/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. +// Code generated by 'ccgo -DSQLITE_PRIVATE= -export-defines "" -export-enums "" -export-externs X -export-fields F -export-typedefs "" -ignore-unsupported-alignment -pkgname sqlite3 -volatile=sqlite3_io_error_pending,sqlite3_open_file_count,sqlite3_pager_readdb_count,sqlite3_pager_writedb_count,sqlite3_pager_writej_count,sqlite3_search_count,sqlite3_sort_count,saved_cnt,randomnessPid -o lib/sqlite_windows_amd64.go -trace-translation-units testdata/sqlite-amalgamation-3410200/sqlite3.c -full-path-comments -DNDEBUG -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DSQLITE_CORE -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_MUTEX_APPDEF=1 -DSQLITE_MUTEX_NOOP -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_OS_WIN=1 -D_MSC_VER=1900', DO NOT EDIT. package sqlite3 @@ -10492,11 +10492,11 @@ NC_OrderAgg = 0x8000000 NC_PartIdx = 0x000002 NC_SelfRef = 0x00002e + NC_Subquery = 0x000040 NC_UAggInfo = 0x000100 NC_UBaseReg = 0x000400 NC_UEList = 0x000080 NC_UUpsert = 0x000200 - NC_VarSelect = 0x000040 NDEBUG = 1 NDR_ASCII_CHAR = 0 NDR_BIG_ENDIAN = 0 @@ -15089,7 +15089,7 @@ SQLITE_SHM_UNLOCK = 1 SQLITE_SORTER_PMASZ = 250 SQLITE_SOUNDEX = 1 - SQLITE_SOURCE_ID = "2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d" + SQLITE_SOURCE_ID = "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" SQLITE_SO_ASC = 0 SQLITE_SO_DESC = 1 SQLITE_SO_UNDEFINED = -1 @@ -15200,8 +15200,8 @@ SQLITE_UTF8 = 1 SQLITE_VDBEINT_H = 0 SQLITE_VDBE_H = 0 - SQLITE_VERSION = "3.41.0" - SQLITE_VERSION_NUMBER = 3041000 + SQLITE_VERSION = "3.41.2" + SQLITE_VERSION_NUMBER = 3041002 SQLITE_VTABRISK_High = 2 SQLITE_VTABRISK_Low = 0 SQLITE_VTABRISK_Normal = 1 @@ -24337,7 +24337,8 @@ FiIdxCur int32 FiIdxCol int32 FbMaybeNullRow U8 - F__ccgo_pad1 [3]byte + Faff U8 + F__ccgo_pad1 [2]byte FpIENext uintptr } @@ -24982,17 +24983,18 @@ // Handle type for pages. type PgHdr2 = struct { - FpPage uintptr - FpData uintptr - FpExtra uintptr - FpCache uintptr - FpDirty uintptr - FpPager uintptr - Fpgno Pgno - Fflags U16 - FnRef I16 - FpDirtyNext uintptr - FpDirtyPrev uintptr + FpPage uintptr + FpData uintptr + FpExtra uintptr + FpCache uintptr + FpDirty uintptr + FpPager uintptr + Fpgno Pgno + Fflags U16 + F__ccgo_pad1 [2]byte + FnRef I64 + FpDirtyNext uintptr + FpDirtyPrev uintptr } // Handle type for pages. @@ -25213,14 +25215,14 @@ FpDirty uintptr FpDirtyTail uintptr FpSynced uintptr - FnRefSum int32 + FnRefSum I64 FszCache int32 FszSpill int32 FszPage int32 FszExtra int32 FbPurgeable U8 FeCreate U8 - F__ccgo_pad1 [2]byte + F__ccgo_pad1 [6]byte FxStress uintptr FpStress uintptr FpCache uintptr @@ -26027,7 +26029,7 @@ _ = pMutex if op < 0 || op >= int32(uint64(unsafe.Sizeof([10]Sqlite3StatValueType{}))/uint64(unsafe.Sizeof(Sqlite3StatValueType(0)))) { - return Xsqlite3MisuseError(tls, 23229) + return Xsqlite3MisuseError(tls, 23233) } if statMutex[op] != 0 { pMutex = Xsqlite3Pcache1Mutex(tls) @@ -60911,7 +60913,7 @@ if dwRet == libc.Uint32(libc.Uint32FromInt32(-1)) && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - winLogErrorAtLine(tls, SQLITE_IOERR|int32(22)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4597, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47684) + winLogErrorAtLine(tls, SQLITE_IOERR|int32(22)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4597, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47688) return 1 } @@ -60937,7 +60939,7 @@ if rc != 0 { return SQLITE_OK } - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(16)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4609, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47780) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(16)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4609, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47784) } func winRead(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Sqlite3_int64) int32 { @@ -60971,9 +60973,9 @@ } (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = *(*DWORD)(unsafe.Pointer(bp + 40)) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(1)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4618, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47848) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(1)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4618, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47852) } - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47851) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47855) if *(*DWORD)(unsafe.Pointer(bp + 32)) < DWORD(amt) { libc.Xmemset(tls, pBuf+uintptr(*(*DWORD)(unsafe.Pointer(bp + 32))), 0, uint64(DWORD(amt)-*(*DWORD)(unsafe.Pointer(bp + 32)))) @@ -61029,12 +61031,12 @@ if rc != 0 { if (*WinFile)(unsafe.Pointer(pFile)).FlastErrno == DWORD(39) || (*WinFile)(unsafe.Pointer(pFile)).FlastErrno == DWORD(112) { - return winLogErrorAtLine(tls, SQLITE_FULL, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4626, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47960) + return winLogErrorAtLine(tls, SQLITE_FULL, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4626, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47964) } - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(3)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4636, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47965) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(3)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4636, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 47969) } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47968) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 47972) } return SQLITE_OK @@ -61061,10 +61063,10 @@ winUnmapfile(tls, pFile) if winSeekFile(tls, pFile, nByte) != 0 { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4646, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48031) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4646, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48035) } else if 0 == (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 53*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).Fh) && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(1224) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4659, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48036) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(6)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4659, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48040) } if rc == SQLITE_OK && oldMmapSize > int64(0) { @@ -61090,7 +61092,7 @@ } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4672, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48128) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4672, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48132) } } rc = (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 13*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).Fh) @@ -61100,7 +61102,7 @@ } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4681, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48143) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(4)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4681, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48147) } return int32(0) } @@ -61121,7 +61123,7 @@ if lowerBits == 0xffffffff && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(7)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4690, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48184) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(7)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4690, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48188) } } @@ -61163,7 +61165,7 @@ } if res == 0 && libc.AssignUint32(&lastErrno, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls)) != DWORD(158) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = lastErrno - winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4702, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48279) + winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4702, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48283) } return res @@ -61289,7 +61291,7 @@ if type1 >= EXCLUSIVE_LOCK { winUnlockFile(tls, pFile+16, uint32(Xsqlite3PendingByte+2), uint32(0), uint32(SHARED_SIZE), uint32(0)) if locktype == SHARED_LOCK && !(winGetReadLock(tls, pFile) != 0) { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4720, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48505) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(8)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4720, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 48509) } } if type1 >= RESERVED_LOCK { @@ -61592,7 +61594,7 @@ return SQLITE_READONLY | int32(5)<<8 } else if winTruncate(tls, pShmNode+16, int64(0)) != 0 { winShmSystemLock(tls, pShmNode, WINSHM_UNLCK, (22+SQLITE_SHM_NLOCK)*4+SQLITE_SHM_NLOCK, 1) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4730, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 48971) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(18)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4730, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 48975) } } @@ -61699,7 +61701,7 @@ if !(rc != SQLITE_OK) { goto __13 } - rc = winLogErrorAtLine(tls, rc, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4770, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 49053) + rc = winLogErrorAtLine(tls, rc, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4770, (*WinShmNode)(unsafe.Pointer(pShmNode)).FzFilename, 49057) goto shm_open_err __13: ; @@ -61927,7 +61929,7 @@ if !(rc != SQLITE_OK) { goto __6 } - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4781, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49324) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4781, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49328) goto shmpage_out __6: ; @@ -61945,7 +61947,7 @@ if !(rc != SQLITE_OK) { goto __9 } - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4792, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49339) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(19)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4792, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49343) goto shmpage_out __9: ; @@ -61993,7 +61995,7 @@ goto __15 } (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno, ts+4803, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49398) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(21)<<8, (*WinShmNode)(unsafe.Pointer(pShmNode)).FlastErrno, ts+4803, (*WinFile)(unsafe.Pointer(pDbFd)).FzPath, 49402) if !(hMap != 0) { goto __16 } @@ -62039,7 +62041,7 @@ if !((*(*func(*libc.TLS, LPCVOID) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 59*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).FpMapRegion) != 0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4814, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49447) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4814, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49451) } (*WinFile)(unsafe.Pointer(pFile)).FpMapRegion = uintptr(0) (*WinFile)(unsafe.Pointer(pFile)).FmmapSize = int64(0) @@ -62048,7 +62050,7 @@ if !((*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFile)).FhMap) != 0) { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4828, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49458) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+4828, (*WinFile)(unsafe.Pointer(pFile)).FzPath, 49462) } (*WinFile)(unsafe.Pointer(pFile)).FhMap = uintptr(0) } @@ -62092,7 +62094,7 @@ DWORD(*(*Sqlite3_int64)(unsafe.Pointer(bp))&int64(0xffffffff)), uintptr(0)) if (*WinFile)(unsafe.Pointer(pFd)).FhMap == uintptr(0) { (*WinFile)(unsafe.Pointer(pFd)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4842, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49535) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4842, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49539) return SQLITE_OK } @@ -62102,7 +62104,7 @@ (*(*func(*libc.TLS, HANDLE) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 3*24 + 8)))(tls, (*WinFile)(unsafe.Pointer(pFd)).FhMap) (*WinFile)(unsafe.Pointer(pFd)).FhMap = uintptr(0) (*WinFile)(unsafe.Pointer(pFd)).FlastErrno = (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls) - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4854, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49553) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(24)<<8, (*WinFile)(unsafe.Pointer(pFd)).FlastErrno, ts+4854, (*WinFile)(unsafe.Pointer(pFd)).FzPath, 49557) return SQLITE_OK } @@ -62266,7 +62268,7 @@ Xsqlite3_mutex_leave(tls, Xsqlite3MutexAlloc(tls, SQLITE_MUTEX_STATIC_VFS1)) Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4874, uintptr(0), 49855) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4874, uintptr(0), 49859) } Xsqlite3_snprintf(tls, nMax, zBuf, ts+4493, libc.VaList(bp, Xsqlite3_temp_directory)) } @@ -62283,7 +62285,7 @@ Xsqlite3_free(tls, zWidePath) Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4890, uintptr(0), 49955) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4890, uintptr(0), 49959) } zMulti = winUnicodeToUtf8(tls, zWidePath) if zMulti != 0 { @@ -62307,7 +62309,7 @@ if (*(*func(*libc.TLS, DWORD, LPSTR) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 31*24 + 8)))(tls, uint32(nMax), zMbcsPath) == DWORD(0) { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4906, uintptr(0), 49982) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(25)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+4906, uintptr(0), 49986) } zUtf8 = winMbcsToUtf8(tls, zMbcsPath, (*(*func(*libc.TLS) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8)))(tls)) if zUtf8 != 0 { @@ -62323,14 +62325,14 @@ if !(winMakeEndInDirSep(tls, nDir+1, zBuf) != 0) { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4922, uintptr(0), 50006) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4922, uintptr(0), 50010) } nLen = Xsqlite3Strlen30(tls, zBuf) if nLen+nPre+17 > nBuf { Xsqlite3_free(tls, zBuf) - return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4938, uintptr(0), 50024) + return winLogErrorAtLine(tls, SQLITE_ERROR, uint32(0), ts+4938, uintptr(0), 50028) } Xsqlite3_snprintf(tls, nBuf-16-nLen, zBuf+uintptr(nLen), ts+4866, 0) @@ -62512,7 +62514,7 @@ } } } - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 8)), 50313) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 8)), 50317) if h == libc.UintptrFromInt64(int64(-1)) { Xsqlite3_free(tls, zConverted) @@ -62523,8 +62525,8 @@ pOutFlags) } else { (*WinFile)(unsafe.Pointer(pFile)).FlastErrno = *(*DWORD)(unsafe.Pointer(bp + 12)) - winLogErrorAtLine(tls, SQLITE_CANTOPEN, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+5027, zUtf8Name, 50328) - return Xsqlite3CantopenError(tls, 50329) + winLogErrorAtLine(tls, SQLITE_CANTOPEN, (*WinFile)(unsafe.Pointer(pFile)).FlastErrno, ts+5027, zUtf8Name, 50332) + return Xsqlite3CantopenError(tls, 50333) } } @@ -62639,9 +62641,9 @@ } } if rc != 0 && rc != SQLITE_IOERR|int32(23)<<8 { - rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, *(*DWORD)(unsafe.Pointer(bp + 4)), ts+5040, zFilename, 50501) + rc = winLogErrorAtLine(tls, SQLITE_IOERR|int32(10)<<8, *(*DWORD)(unsafe.Pointer(bp + 4)), ts+5040, zFilename, 50505) } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp)), 50503) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp)), 50507) } Xsqlite3_free(tls, zConverted) @@ -62679,10 +62681,10 @@ attr = (*WIN32_FILE_ATTRIBUTE_DATA)(unsafe.Pointer(bp)).FdwFileAttributes } } else { - winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 50553) + winLogIoerr(tls, *(*int32)(unsafe.Pointer(bp + 36)), 50557) if *(*DWORD)(unsafe.Pointer(bp + 40)) != DWORD(2) && *(*DWORD)(unsafe.Pointer(bp + 40)) != DWORD(3) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_IOERR|int32(13)<<8, *(*DWORD)(unsafe.Pointer(bp + 40)), ts+5050, zFilename, 50556) + return winLogErrorAtLine(tls, SQLITE_IOERR|int32(13)<<8, *(*DWORD)(unsafe.Pointer(bp + 40)), ts+5050, zFilename, 50560) } else { attr = libc.Uint32(libc.Uint32FromInt32(-1)) } @@ -62760,7 +62762,7 @@ nByte = (*(*func(*libc.TLS, LPCWSTR, DWORD, LPWSTR, uintptr) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 25*24 + 8)))(tls, zConverted, uint32(0), uintptr(0), uintptr(0)) if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5067, zRelative, 50773) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5067, zRelative, 50777) } nByte = nByte + DWORD(3) zTemp = Xsqlite3MallocZero(tls, uint64(nByte)*uint64(unsafe.Sizeof(WCHAR(0)))) @@ -62772,7 +62774,7 @@ if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) Xsqlite3_free(tls, zTemp) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5084, zRelative, 50786) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5084, zRelative, 50790) } Xsqlite3_free(tls, zConverted) zOut = winUnicodeToUtf8(tls, zTemp) @@ -62782,7 +62784,7 @@ nByte = (*(*func(*libc.TLS, LPCSTR, DWORD, LPSTR, uintptr) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 24*24 + 8)))(tls, zConverted, uint32(0), uintptr(0), uintptr(0)) if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5101, zRelative, 50799) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5101, zRelative, 50803) } nByte = nByte + DWORD(3) zTemp = Xsqlite3MallocZero(tls, uint64(nByte)*uint64(unsafe.Sizeof(int8(0)))) @@ -62794,7 +62796,7 @@ if nByte == DWORD(0) { Xsqlite3_free(tls, zConverted) Xsqlite3_free(tls, zTemp) - return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5118, zRelative, 50812) + return winLogErrorAtLine(tls, SQLITE_CANTOPEN|int32(3)<<8, (*(*func(*libc.TLS) DWORD)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 26*24 + 8)))(tls), ts+5118, zRelative, 50816) } Xsqlite3_free(tls, zConverted) zOut = winMbcsToUtf8(tls, zTemp, (*(*func(*libc.TLS) WINBOOL)(unsafe.Pointer(uintptr(unsafe.Pointer(&aSyscall)) + 8)))(tls)) @@ -64424,7 +64426,7 @@ libc.Xmemset(tls, pPgHdr+32, 0, uint64(unsafe.Sizeof(PgHdr{}))-uint64(uintptr(0)+32)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage (*PgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Sqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf - (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*72 + (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80 libc.Xmemset(tls, (*PgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8)) (*PgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache (*PgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno @@ -64454,7 +64456,7 @@ // reference count drops to 0, then it is made eligible for recycling. func Xsqlite3PcacheRelease(tls *libc.TLS, p uintptr) { (*PCache)(unsafe.Pointer((*PgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum-- - if int32(libc.PreDecInt16(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1)) == 0 { + if libc.PreDecInt64(&(*PgHdr)(unsafe.Pointer(p)).FnRef, 1) == int64(0) { if int32((*PgHdr)(unsafe.Pointer(p)).Fflags)&PGHDR_CLEAN != 0 { pcacheUnpin(tls, p) } else { @@ -64505,7 +64507,7 @@ *(*U16)(unsafe.Pointer(p + 52)) &= libc.Uint16FromInt32(libc.CplInt32(PGHDR_DIRTY | PGHDR_NEED_SYNC | PGHDR_WRITEABLE)) *(*U16)(unsafe.Pointer(p + 52)) |= U16(PGHDR_CLEAN) - if int32((*PgHdr)(unsafe.Pointer(p)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(p)).FnRef == int64(0) { pcacheUnpin(tls, p) } } @@ -64609,8 +64611,8 @@ } func pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) uintptr { - bp := tls.Alloc(72) - defer tls.Free(72) + bp := tls.Alloc(80) + defer tls.Free(80) var pTail uintptr pTail = bp @@ -64688,13 +64690,13 @@ // // This is not the total number of pages referenced, but the sum of the // reference count for all pages. -func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) int32 { +func Xsqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) I64 { return (*PCache)(unsafe.Pointer(pCache)).FnRefSum } // Return the number of references to the page supplied as an argument. -func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) int32 { - return int32((*PgHdr)(unsafe.Pointer(p)).FnRef) +func Xsqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) I64 { + return (*PgHdr)(unsafe.Pointer(p)).FnRef } // Return the total number of pages in the cache. @@ -66988,7 +66990,7 @@ pPg = Xsqlite3PagerLookup(tls, pPager, iPg) if pPg != 0 { - if Xsqlite3PcachePageRefcount(tls, pPg) == 1 { + if Xsqlite3PcachePageRefcount(tls, pPg) == int64(1) { Xsqlite3PcacheDrop(tls, pPg) } else { rc = readDbPage(tls, pPg) @@ -67421,7 +67423,7 @@ var pageSize U32 = *(*U32)(unsafe.Pointer(pPageSize)) if (int32((*Pager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*Pager)(unsafe.Pointer(pPager)).FdbSize == Pgno(0)) && - Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 && + Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) && pageSize != 0 && pageSize != U32((*Pager)(unsafe.Pointer(pPager)).FpageSize) { var pNew uintptr = uintptr(0) *(*I64)(unsafe.Pointer(bp)) = int64(0) @@ -67573,9 +67575,9 @@ Xsqlite3OsUnfetch(tls, (*Pager)(unsafe.Pointer(pPager)).Ffd, I64(pgno-Pgno(1))*(*Pager)(unsafe.Pointer(pPager)).FpageSize, pData) return SQLITE_NOMEM } - (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*72 + (*PgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80 (*PgHdr)(unsafe.Pointer(p)).Fflags = U16(PGHDR_MMAP) - (*PgHdr)(unsafe.Pointer(p)).FnRef = int16(1) + (*PgHdr)(unsafe.Pointer(p)).FnRef = int64(1) (*PgHdr)(unsafe.Pointer(p)).FpPager = pPager } @@ -67907,7 +67909,7 @@ for rc == SQLITE_OK && pList != 0 { var pNext uintptr = (*PgHdr)(unsafe.Pointer(pList)).FpDirty - if int32((*PgHdr)(unsafe.Pointer(pList)).FnRef) == 0 { + if (*PgHdr)(unsafe.Pointer(pList)).FnRef == int64(0) { rc = pagerStress(tls, pPager, pList) } pList = pNext @@ -68057,7 +68059,7 @@ goto __12 } - rc = Xsqlite3CantopenError(tls, 60235) + rc = Xsqlite3CantopenError(tls, 60239) __12: ; if !(rc != SQLITE_OK) { @@ -68438,7 +68440,7 @@ if !(rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 8))&SQLITE_OPEN_READONLY != 0) { goto __10 } - rc = Xsqlite3CantopenError(tls, 60754) + rc = Xsqlite3CantopenError(tls, 60758) Xsqlite3OsClose(tls, (*Pager)(unsafe.Pointer(pPager)).Fjfd) __10: ; @@ -68544,7 +68546,7 @@ } func pagerUnlockIfUnused(tls *libc.TLS, pPager uintptr) { - if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == 0 { + if Xsqlite3PcacheRefCount(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache) == int64(0) { pagerUnlockAndRollback(tls, pPager) } } @@ -68562,7 +68564,7 @@ if !(pgno == Pgno(0)) { goto __1 } - return Xsqlite3CorruptError(tls, 60967) + return Xsqlite3CorruptError(tls, 60971) __1: ; *(*uintptr)(unsafe.Pointer(bp)) = Xsqlite3PcacheFetch(tls, (*Pager)(unsafe.Pointer(pPager)).FpPCache, pgno, 3) @@ -68601,7 +68603,7 @@ if !(pgno == (*Pager)(unsafe.Pointer(pPager)).FlckPgno) { goto __7 } - rc = Xsqlite3CorruptError(tls, 60999) + rc = Xsqlite3CorruptError(tls, 61003) goto pager_acquire_err __7: ; @@ -68678,7 +68680,7 @@ (int32((*Pager)(unsafe.Pointer(pPager)).FeState) == PAGER_READER || flags&PAGER_GET_READONLY != 0)) if pgno <= Pgno(1) && pgno == Pgno(0) { - return Xsqlite3CorruptError(tls, 61078) + return Xsqlite3CorruptError(tls, 61082) } if bMmapOk != 0 && (*Pager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) { @@ -69436,7 +69438,7 @@ // Return the number of references to the specified page. func Xsqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) int32 { - return Xsqlite3PcachePageRefcount(tls, pPage) + return int32(Xsqlite3PcachePageRefcount(tls, pPage)) } // Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, @@ -69679,9 +69681,9 @@ pPgOld = Xsqlite3PagerLookup(tls, pPager, pgno) if pPgOld != 0 { - if int32((*PgHdr)(unsafe.Pointer(pPgOld)).FnRef) > 1 { + if (*PgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) { Xsqlite3PagerUnrefNotNull(tls, pPgOld) - return Xsqlite3CorruptError(tls, 62623) + return Xsqlite3CorruptError(tls, 62627) } *(*U16)(unsafe.Pointer(pPg + 52)) |= U16(int32((*PgHdr)(unsafe.Pointer(pPgOld)).Fflags) & PGHDR_NEED_SYNC) if (*Pager)(unsafe.Pointer(pPager)).FtempFile != 0 { @@ -70438,7 +70440,7 @@ nCollide = idx for iKey = walHash(tls, iPage); *(*Ht_slot)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaHash + uintptr(iKey)*2)) != 0; iKey = walNextHash(tls, iKey) { if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 64387) + return Xsqlite3CorruptError(tls, 64391) } } *(*U32)(unsafe.Pointer((*WalHashLoc)(unsafe.Pointer(bp)).FaPgno + uintptr(idx-1)*4)) = iPage @@ -70537,7 +70539,7 @@ if !(version != U32(WAL_MAX_VERSION)) { goto __7 } - rc = Xsqlite3CantopenError(tls, 64519) + rc = Xsqlite3CantopenError(tls, 64523) goto finished __7: ; @@ -71123,7 +71125,7 @@ goto __14 } - rc = Xsqlite3CorruptError(tls, 65333) + rc = Xsqlite3CorruptError(tls, 65337) goto __15 __14: Xsqlite3OsFileControlHint(tls, (*Wal)(unsafe.Pointer(pWal)).FpDbFd, SQLITE_FCNTL_SIZE_HINT, bp+16) @@ -71398,7 +71400,7 @@ } if badHdr == 0 && (*Wal)(unsafe.Pointer(pWal)).Fhdr.FiVersion != U32(WALINDEX_MAX_VERSION) { - rc = Xsqlite3CantopenError(tls, 65682) + rc = Xsqlite3CantopenError(tls, 65686) } if (*Wal)(unsafe.Pointer(pWal)).FbShmUnreliable != 0 { if rc != SQLITE_OK { @@ -71871,7 +71873,7 @@ iRead = iFrame } if libc.PostDecInt32(&nCollide, 1) == 0 { - return Xsqlite3CorruptError(tls, 66419) + return Xsqlite3CorruptError(tls, 66423) } iKey = walNextHash(tls, iKey) } @@ -72376,7 +72378,7 @@ if rc == SQLITE_OK { if (*Wal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && walPagesize(tls, pWal) != nBuf { - rc = Xsqlite3CorruptError(tls, 67138) + rc = Xsqlite3CorruptError(tls, 67142) } else { rc = walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf) } @@ -73034,7 +73036,7 @@ } Xsqlite3VdbeRecordUnpack(tls, pKeyInfo, int32(nKey), pKey, pIdxKey) if int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) == 0 || int32((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FnField) > int32((*KeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) { - rc = Xsqlite3CorruptError(tls, 69249) + rc = Xsqlite3CorruptError(tls, 69253) } else { rc = Xsqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes) } @@ -73171,7 +73173,7 @@ if !(key == Pgno(0)) { goto __2 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69430) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69434) return __2: ; @@ -73188,7 +73190,7 @@ goto __4 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69443) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69447) goto ptrmap_exit __4: ; @@ -73196,7 +73198,7 @@ if !(offset < 0) { goto __5 } - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69448) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69452) goto ptrmap_exit __5: ; @@ -73239,7 +73241,7 @@ offset = int32(Pgno(5) * (key - Pgno(iPtrmap) - Pgno(1))) if offset < 0 { Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) - return Xsqlite3CorruptError(tls, 69493) + return Xsqlite3CorruptError(tls, 69497) } *(*U8)(unsafe.Pointer(pEType)) = *(*U8)(unsafe.Pointer(pPtrmap + uintptr(offset))) @@ -73249,7 +73251,7 @@ Xsqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp))) if int32(*(*U8)(unsafe.Pointer(pEType))) < 1 || int32(*(*U8)(unsafe.Pointer(pEType))) > 5 { - return Xsqlite3CorruptError(tls, 69501) + return Xsqlite3CorruptError(tls, 69505) } return SQLITE_OK } @@ -73499,7 +73501,7 @@ if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { var ovfl Pgno if Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) >= Uptr(pCell) && Uptr((*MemPage)(unsafe.Pointer(pSrc)).FaDataEnd) < Uptr(pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnLocal)) { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69893) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 69897) return } ovfl = Xsqlite3Get4byte(tls, pCell+uintptr(int32((*CellInfo)(unsafe.Pointer(bp)).FnSize)-4)) @@ -73546,7 +73548,7 @@ if !(iFree > usableSize-4) { goto __2 } - return Xsqlite3CorruptError(tls, 69951) + return Xsqlite3CorruptError(tls, 69955) __2: ; if !(iFree != 0) { @@ -73556,7 +73558,7 @@ if !(iFree2 > usableSize-4) { goto __4 } - return Xsqlite3CorruptError(tls, 69954) + return Xsqlite3CorruptError(tls, 69958) __4: ; if !(0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+1)))) == 0) { @@ -73569,7 +73571,7 @@ if !(top >= iFree) { goto __6 } - return Xsqlite3CorruptError(tls, 69962) + return Xsqlite3CorruptError(tls, 69966) __6: ; if !(iFree2 != 0) { @@ -73578,14 +73580,14 @@ if !(iFree+sz > iFree2) { goto __9 } - return Xsqlite3CorruptError(tls, 69965) + return Xsqlite3CorruptError(tls, 69969) __9: ; sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+2) + 1))) if !(iFree2+sz2 > usableSize) { goto __10 } - return Xsqlite3CorruptError(tls, 69967) + return Xsqlite3CorruptError(tls, 69971) __10: ; libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz))) @@ -73595,7 +73597,7 @@ if !(iFree+sz > usableSize) { goto __11 } - return Xsqlite3CorruptError(tls, 69971) + return Xsqlite3CorruptError(tls, 69975) __11: ; __8: @@ -73659,7 +73661,7 @@ if !(pc < iCellStart || pc > iCellLast) { goto __22 } - return Xsqlite3CorruptError(tls, 70004) + return Xsqlite3CorruptError(tls, 70008) __22: ; size = int32((*struct { @@ -73669,7 +73671,7 @@ if !(cbrk < iCellStart || pc+size > usableSize) { goto __23 } - return Xsqlite3CorruptError(tls, 70010) + return Xsqlite3CorruptError(tls, 70014) __23: ; *(*U8)(unsafe.Pointer(pAddr1)) = U8(cbrk >> 8) @@ -73691,7 +73693,7 @@ if !(int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+7))))+cbrk-iCellFirst != (*MemPage)(unsafe.Pointer(pPage)).FnFree) { goto __24 } - return Xsqlite3CorruptError(tls, 70024) + return Xsqlite3CorruptError(tls, 70028) __24: ; *(*uint8)(unsafe.Pointer(data + uintptr(hdr+5))) = U8(cbrk >> 8) @@ -73726,7 +73728,7 @@ *(*U8)(unsafe.Pointer(aData + uintptr(hdr+7))) += U8(int32(U8(x))) return aData + uintptr(pc) } else if x+pc > maxPC { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70081) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70085) return uintptr(0) } else { *(*U8)(unsafe.Pointer(aData + uintptr(pc+2))) = U8(x >> 8) @@ -73739,13 +73741,13 @@ pc = int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1))) if pc <= iAddr { if pc != 0 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70096) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70100) } return uintptr(0) } } if pc > maxPC+nByte-4 { - *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70103) + *(*int32)(unsafe.Pointer(pRc)) = Xsqlite3CorruptError(tls, 70107) } return uintptr(0) } @@ -73770,7 +73772,7 @@ if top == 0 && (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == U32(65536) { top = 65536 } else { - return Xsqlite3CorruptError(tls, 70152) + return Xsqlite3CorruptError(tls, 70156) } } @@ -73781,7 +73783,7 @@ *(*int32)(unsafe.Pointer(pIdx)) = libc.AssignInt32(&g2, int32((int64(pSpace)-int64(data))/1)) if g2 <= gap { - return Xsqlite3CorruptError(tls, 70170) + return Xsqlite3CorruptError(tls, 70174) } else { return SQLITE_OK } @@ -73833,22 +73835,22 @@ if int32(iFreeBlk) == 0 { break } - return Xsqlite3CorruptError(tls, 70249) + return Xsqlite3CorruptError(tls, 70253) } iPtr = iFreeBlk } if U32(iFreeBlk) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-U32(4) { - return Xsqlite3CorruptError(tls, 70254) + return Xsqlite3CorruptError(tls, 70258) } if iFreeBlk != 0 && iEnd+U32(3) >= U32(iFreeBlk) { nFrag = U8(U32(iFreeBlk) - iEnd) if iEnd > U32(iFreeBlk) { - return Xsqlite3CorruptError(tls, 70266) + return Xsqlite3CorruptError(tls, 70270) } iEnd = U32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+2) + 1))))) if iEnd > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - return Xsqlite3CorruptError(tls, 70269) + return Xsqlite3CorruptError(tls, 70273) } iSize = U16(iEnd - U32(iStart)) iFreeBlk = U16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk) + 1)))) @@ -73858,7 +73860,7 @@ var iPtrEnd int32 = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2))))<<8 | int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+2) + 1)))) if iPtrEnd+3 >= int32(iStart) { if iPtrEnd > int32(iStart) { - return Xsqlite3CorruptError(tls, 70282) + return Xsqlite3CorruptError(tls, 70286) } nFrag = U8(int32(nFrag) + (int32(iStart) - iPtrEnd)) iSize = U16(iEnd - U32(iPtr)) @@ -73866,7 +73868,7 @@ } } if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7)))) { - return Xsqlite3CorruptError(tls, 70288) + return Xsqlite3CorruptError(tls, 70292) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+7))) -= uint8(int32(nFrag)) } @@ -73874,10 +73876,10 @@ x = U16(int32(*(*U8)(unsafe.Pointer(pTmp)))<<8 | int32(*(*U8)(unsafe.Pointer(pTmp + 1)))) if int32(iStart) <= int32(x) { if int32(iStart) < int32(x) { - return Xsqlite3CorruptError(tls, 70297) + return Xsqlite3CorruptError(tls, 70301) } if int32(iPtr) != int32(hdr)+1 { - return Xsqlite3CorruptError(tls, 70298) + return Xsqlite3CorruptError(tls, 70302) } *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1))) = U8(int32(iFreeBlk) >> 8) *(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+1) + 1)) = U8(iFreeBlk) @@ -73937,7 +73939,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70357) + return Xsqlite3CorruptError(tls, 70361) } } else { (*MemPage)(unsafe.Pointer(pPage)).FchildPtrSize = U8(4) @@ -73973,7 +73975,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FxParseCell = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr) }{btreeParseCellPtrIndex})) - return Xsqlite3CorruptError(tls, 70381) + return Xsqlite3CorruptError(tls, 70385) } } return SQLITE_OK @@ -74003,11 +74005,11 @@ var next U32 var size U32 if pc < top { - return Xsqlite3CorruptError(tls, 70432) + return Xsqlite3CorruptError(tls, 70436) } for 1 != 0 { if pc > iCellLast { - return Xsqlite3CorruptError(tls, 70437) + return Xsqlite3CorruptError(tls, 70441) } next = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc) + 1)))) size = U32(int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2))))<<8 | int32(*(*U8)(unsafe.Pointer(data + uintptr(pc+2) + 1)))) @@ -74018,15 +74020,15 @@ pc = int32(next) } if next > U32(0) { - return Xsqlite3CorruptError(tls, 70447) + return Xsqlite3CorruptError(tls, 70451) } if U32(pc)+size > uint32(usableSize) { - return Xsqlite3CorruptError(tls, 70451) + return Xsqlite3CorruptError(tls, 70455) } } if nFree > usableSize || nFree < iCellFirst { - return Xsqlite3CorruptError(tls, 70463) + return Xsqlite3CorruptError(tls, 70467) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = int32(U16(nFree - iCellFirst)) return SQLITE_OK @@ -74054,14 +74056,14 @@ pc = int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer(data + uintptr(cellOffset+i*2))))) if pc < iCellFirst || pc > iCellLast { - return Xsqlite3CorruptError(tls, 70494) + return Xsqlite3CorruptError(tls, 70498) } sz = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxCellSize})).f(tls, pPage, data+uintptr(pc))) if pc+sz > usableSize { - return Xsqlite3CorruptError(tls, 70499) + return Xsqlite3CorruptError(tls, 70503) } } return SQLITE_OK @@ -74075,7 +74077,7 @@ data = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if decodeFlags(tls, pPage, int32(*(*U8)(unsafe.Pointer(data)))) != 0 { - return Xsqlite3CorruptError(tls, 70531) + return Xsqlite3CorruptError(tls, 70535) } (*MemPage)(unsafe.Pointer(pPage)).FmaskPage = U16((*BtShared)(unsafe.Pointer(pBt)).FpageSize - U32(1)) @@ -74087,7 +74089,7 @@ (*MemPage)(unsafe.Pointer(pPage)).FnCell = U16(int32(*(*U8)(unsafe.Pointer(data + 3)))<<8 | int32(*(*U8)(unsafe.Pointer(data + 3 + 1)))) if U32((*MemPage)(unsafe.Pointer(pPage)).FnCell) > ((*BtShared)(unsafe.Pointer(pBt)).FpageSize-U32(8))/U32(6) { - return Xsqlite3CorruptError(tls, 70545) + return Xsqlite3CorruptError(tls, 70549) } (*MemPage)(unsafe.Pointer(pPage)).FnFree = -1 @@ -74190,7 +74192,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - rc = Xsqlite3CorruptError(tls, 70700) + rc = Xsqlite3CorruptError(tls, 70704) goto getAndInitPage_error1 __1: ; @@ -74218,7 +74220,7 @@ if !(pCur != 0 && (int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FnCell) < 1 || int32((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FintKey) != int32((*BtCursor)(unsafe.Pointer(pCur)).FcurIntKey))) { goto __5 } - rc = Xsqlite3CorruptError(tls, 70721) + rc = Xsqlite3CorruptError(tls, 70725) goto getAndInitPage_error2 __5: ; @@ -74257,7 +74259,7 @@ if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > 1 { releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage))) *(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0) - return Xsqlite3CorruptError(tls, 70787) + return Xsqlite3CorruptError(tls, 70791) } (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = U8(0) } else { @@ -75140,7 +75142,7 @@ if !(Xsqlite3WritableSchema(tls, (*BtShared)(unsafe.Pointer(pBt)).Fdb) == 0) { goto __18 } - rc = Xsqlite3CorruptError(tls, 71722) + rc = Xsqlite3CorruptError(tls, 71726) goto page1_init_failed goto __19 __18: @@ -75555,7 +75557,7 @@ if int32(eType) == PTRMAP_OVERFLOW2 { if Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData) != iFrom { - return Xsqlite3CorruptError(tls, 72143) + return Xsqlite3CorruptError(tls, 72147) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData, iTo) } else { @@ -75581,7 +75583,7 @@ })(unsafe.Pointer(&struct{ uintptr }{(*MemPage)(unsafe.Pointer(pPage)).FxParseCell})).f(tls, pPage, pCell, bp) if U32((*CellInfo)(unsafe.Pointer(bp)).FnLocal) < (*CellInfo)(unsafe.Pointer(bp)).FnPayload { if pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72162) + return Xsqlite3CorruptError(tls, 72166) } if iFrom == Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4)) { Xsqlite3Put4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(bp)).FnSize)-uintptr(4), iTo) @@ -75590,7 +75592,7 @@ } } else { if pCell+uintptr(4) > (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) { - return Xsqlite3CorruptError(tls, 72171) + return Xsqlite3CorruptError(tls, 72175) } if Xsqlite3Get4byte(tls, pCell) == iFrom { Xsqlite3Put4byte(tls, pCell, iTo) @@ -75601,7 +75603,7 @@ if i == nCell { if int32(eType) != PTRMAP_BTREE || Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8)) != iFrom { - return Xsqlite3CorruptError(tls, 72183) + return Xsqlite3CorruptError(tls, 72187) } Xsqlite3Put4byte(tls, (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+8), iTo) } @@ -75617,7 +75619,7 @@ var pPager uintptr = (*BtShared)(unsafe.Pointer(pBt)).FpPager if iDbPage < Pgno(3) { - return Xsqlite3CorruptError(tls, 72218) + return Xsqlite3CorruptError(tls, 72222) } *(*int32)(unsafe.Pointer(bp)) = Xsqlite3PagerMovepage(tls, pPager, (*MemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit) @@ -75678,7 +75680,7 @@ return rc } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_ROOTPAGE { - return Xsqlite3CorruptError(tls, 72316) + return Xsqlite3CorruptError(tls, 72320) } if int32(*(*U8)(unsafe.Pointer(bp))) == PTRMAP_FREEPAGE { @@ -75713,7 +75715,7 @@ releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) if *(*Pgno)(unsafe.Pointer(bp + 40)) > dbSize { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) - return Xsqlite3CorruptError(tls, 72368) + return Xsqlite3CorruptError(tls, 72372) } } @@ -75773,7 +75775,7 @@ var nFin Pgno = finalDbSize(tls, pBt, nOrig, nFree) if nOrig < nFin || nFree >= nOrig { - rc = Xsqlite3CorruptError(tls, 72436) + rc = Xsqlite3CorruptError(tls, 72440) } else if nFree > Pgno(0) { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) if rc == SQLITE_OK { @@ -75812,7 +75814,7 @@ nOrig = btreePagecount(tls, pBt) if ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == U32(Xsqlite3PendingByte)/(*BtShared)(unsafe.Pointer(pBt)).FpageSize+U32(1) { - return Xsqlite3CorruptError(tls, 72487) + return Xsqlite3CorruptError(tls, 72491) } nFree = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer((*BtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36) @@ -75843,7 +75845,7 @@ } nFin = finalDbSize(tls, pBt, nOrig, nVac) if nFin > nOrig { - return Xsqlite3CorruptError(tls, 72514) + return Xsqlite3CorruptError(tls, 72518) } if nFin < nOrig { rc = saveAllCursors(tls, pBt, uint32(0), uintptr(0)) @@ -76184,7 +76186,7 @@ if iTable <= Pgno(1) { if iTable < Pgno(1) { - return Xsqlite3CorruptError(tls, 72978) + return Xsqlite3CorruptError(tls, 72982) } else if btreePagecount(tls, pBt) == Pgno(0) { iTable = Pgno(0) } @@ -76428,14 +76430,14 @@ var pBt uintptr = (*BtCursor)(unsafe.Pointer(pCur)).FpBt if int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { - return Xsqlite3CorruptError(tls, 73385) + return Xsqlite3CorruptError(tls, 73389) } getCellInfo(tls, pCur) aPayload = (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload if Uptr((int64(aPayload)-int64((*MemPage)(unsafe.Pointer(pPage)).FaData))/1) > Uptr((*BtShared)(unsafe.Pointer(pBt)).FusableSize-U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) { - return Xsqlite3CorruptError(tls, 73400) + return Xsqlite3CorruptError(tls, 73404) } if offset < U32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) { @@ -76480,7 +76482,7 @@ for *(*Pgno)(unsafe.Pointer(bp)) != 0 { if *(*Pgno)(unsafe.Pointer(bp)) > (*BtShared)(unsafe.Pointer(pBt)).FnPage { - return Xsqlite3CorruptError(tls, 73462) + return Xsqlite3CorruptError(tls, 73466) } *(*Pgno)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*Pgno)(unsafe.Pointer(bp)) @@ -76529,7 +76531,7 @@ } if rc == SQLITE_OK && amt > U32(0) { - return Xsqlite3CorruptError(tls, 73547) + return Xsqlite3CorruptError(tls, 73551) } return rc } @@ -76609,7 +76611,7 @@ func moveToChild(tls *libc.TLS, pCur uintptr, newPgno U32) int32 { if int32((*BtCursor)(unsafe.Pointer(pCur)).FiPage) >= BTCURSOR_MAX_DEPTH-1 { - return Xsqlite3CorruptError(tls, 73684) + return Xsqlite3CorruptError(tls, 73688) } (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) *(*U8)(unsafe.Pointer(pCur + 1)) &= libc.Uint8FromInt32(libc.CplInt32(BTCF_ValidNKey | BTCF_ValidOvfl)) @@ -76700,7 +76702,7 @@ if !(int32((*MemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.Bool32((*BtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*MemPage)(unsafe.Pointer(pRoot)).FintKey)) { goto __11 } - return Xsqlite3CorruptError(tls, 73823) + return Xsqlite3CorruptError(tls, 73827) __11: ; skip_init: @@ -76720,7 +76722,7 @@ if !((*MemPage)(unsafe.Pointer(pRoot)).Fpgno != Pgno(1)) { goto __16 } - return Xsqlite3CorruptError(tls, 73835) + return Xsqlite3CorruptError(tls, 73839) __16: ; subpage = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*MemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+8)) @@ -76930,7 +76932,7 @@ if !(pCell >= (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __21 } - return Xsqlite3CorruptError(tls, 74077) + return Xsqlite3CorruptError(tls, 74081) __21: ; goto __19 @@ -77134,7 +77136,7 @@ if !!(int32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit) != 0) { goto __4 } - return Xsqlite3CorruptError(tls, 74273) + return Xsqlite3CorruptError(tls, 74277) __4: ; goto bypass_moveto_root @@ -77199,7 +77201,7 @@ if !(nCell < 2 || U32(nCell)/(*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize > (*BtShared)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage) { goto __17 } - rc = Xsqlite3CorruptError(tls, 74360) + rc = Xsqlite3CorruptError(tls, 74364) goto moveto_index_finish __17: ; @@ -77247,7 +77249,7 @@ if !((*UnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0) { goto __24 } - rc = Xsqlite3CorruptError(tls, 74392) + rc = Xsqlite3CorruptError(tls, 74396) __24: ; goto moveto_index_finish @@ -77366,7 +77368,7 @@ pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage idx = int32(libc.PreIncUint16(&(*BtCursor)(unsafe.Pointer(pCur)).Fix, 1)) if !(int32((*MemPage)(unsafe.Pointer(pPage)).FisInit) != 0) || Xsqlite3FaultSim(tls, 412) != 0 { - return Xsqlite3CorruptError(tls, 74508) + return Xsqlite3CorruptError(tls, 74512) } if idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) { @@ -77526,7 +77528,7 @@ if !(n >= mxPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74688) + return Xsqlite3CorruptError(tls, 74692) __1: ; if !(n > U32(0)) { @@ -77591,7 +77593,7 @@ if !(iTrunk > mxPage || libc.PostIncUint32(&nSearch, 1) > n) { goto __16 } - rc = Xsqlite3CorruptError(tls, 74744) + rc = Xsqlite3CorruptError(tls, 74748) goto __17 __16: rc = btreeGetUnusedPage(tls, pBt, iTrunk, bp+8, 0) @@ -77627,7 +77629,7 @@ goto __22 } - rc = Xsqlite3CorruptError(tls, 74773) + rc = Xsqlite3CorruptError(tls, 74777) goto end_allocate_page goto __23 __22: @@ -77671,7 +77673,7 @@ if !(iNewTrunk > mxPage) { goto __32 } - rc = Xsqlite3CorruptError(tls, 74807) + rc = Xsqlite3CorruptError(tls, 74811) goto end_allocate_page __32: ; @@ -77783,7 +77785,7 @@ if !(iPage > mxPage || iPage < Pgno(2)) { goto __51 } - rc = Xsqlite3CorruptError(tls, 74872) + rc = Xsqlite3CorruptError(tls, 74876) goto end_allocate_page __51: ; @@ -77941,7 +77943,7 @@ if !(iPage < Pgno(2) || iPage > (*BtShared)(unsafe.Pointer(pBt)).FnPage) { goto __1 } - return Xsqlite3CorruptError(tls, 74999) + return Xsqlite3CorruptError(tls, 75003) __1: ; if !(pMemPage != 0) { @@ -77998,7 +78000,7 @@ if !(iTrunk > btreePagecount(tls, pBt)) { goto __10 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75046) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75050) goto freepage_out __10: ; @@ -78014,7 +78016,7 @@ if !(nLeaf > (*BtShared)(unsafe.Pointer(pBt)).FusableSize/U32(4)-U32(2)) { goto __12 } - *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75057) + *(*int32)(unsafe.Pointer(bp + 8)) = Xsqlite3CorruptError(tls, 75061) goto freepage_out __12: ; @@ -78088,7 +78090,7 @@ var ovflPageSize U32 if pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 75146) + return Xsqlite3CorruptError(tls, 75150) } ovflPgno = Xsqlite3Get4byte(tls, pCell+uintptr((*CellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4)) pBt = (*MemPage)(unsafe.Pointer(pPage)).FpBt @@ -78100,7 +78102,7 @@ *(*Pgno)(unsafe.Pointer(bp + 8)) = Pgno(0) *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) if ovflPgno < Pgno(2) || ovflPgno > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 75163) + return Xsqlite3CorruptError(tls, 75167) } if nOvfl != 0 { rc = getOverflowPage(tls, pBt, ovflPgno, bp, bp+8) @@ -78111,7 +78113,7 @@ if (*(*uintptr)(unsafe.Pointer(bp)) != 0 || libc.AssignPtrUintptr(bp, btreePageLookup(tls, pBt, ovflPgno)) != uintptr(0)) && Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 { - rc = Xsqlite3CorruptError(tls, 75183) + rc = Xsqlite3CorruptError(tls, 75187) } else { rc = freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), ovflPgno) } @@ -78276,7 +78278,7 @@ hdr = int32((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset) if pc+U32(sz) > (*BtShared)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize { - *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75436) + *(*int32)(unsafe.Pointer(pRC)) = Xsqlite3CorruptError(tls, 75440) return } rc = freeSpace(tls, pPage, uint16(pc), uint16(sz)) @@ -78555,12 +78557,12 @@ if Uptr(pCell) >= Uptr(aData+uintptr(j)) && Uptr(pCell) < Uptr(pEnd) { if Uptr(pCell+uintptr(sz)) > Uptr(pEnd) { - return Xsqlite3CorruptError(tls, 75737) + return Xsqlite3CorruptError(tls, 75741) } pCell = pTmp + uintptr((int64(pCell)-int64(aData))/1) } else if Uptr(pCell+uintptr(sz)) > Uptr(pSrcEnd) && Uptr(pCell) < Uptr(pSrcEnd) { - return Xsqlite3CorruptError(tls, 75742) + return Xsqlite3CorruptError(tls, 75746) } pData -= uintptr(sz) @@ -78568,7 +78570,7 @@ *(*U8)(unsafe.Pointer(pCellptr + 1)) = U8((int64(pData) - int64(aData)) / 1) pCellptr += uintptr(2) if pData < pCellptr { - return Xsqlite3CorruptError(tls, 75748) + return Xsqlite3CorruptError(tls, 75752) } libc.Xmemmove(tls, pData, pCell, uint64(sz)) @@ -78628,7 +78630,7 @@ if Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > Uptr(pEnd) && Uptr(*(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < Uptr(pEnd) { - Xsqlite3CorruptError(tls, 75833) + Xsqlite3CorruptError(tls, 75837) return 1 } libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*CellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz)) @@ -78717,7 +78719,7 @@ if !(nShift > nCell) { goto __2 } - return Xsqlite3CorruptError(tls, 75947) + return Xsqlite3CorruptError(tls, 75951) __2: ; libc.Xmemmove(tls, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*MemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*2), uint64(nCell*2)) @@ -78833,7 +78835,7 @@ var pBt uintptr = (*MemPage)(unsafe.Pointer(pPage)).FpBt if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) == 0 { - return Xsqlite3CorruptError(tls, 76060) + return Xsqlite3CorruptError(tls, 76064) } *(*int32)(unsafe.Pointer(bp + 136)) = allocateBtreePage(tls, pBt, bp, bp+8, uint32(0), uint8(0)) @@ -79153,7 +79155,7 @@ if !(int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pOld)).FaData))) != int32(*(*U8)(unsafe.Pointer((*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 112)))).FaData)))) { goto __25 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76481) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76485) goto balance_cleanup __25: ; @@ -79164,7 +79166,7 @@ if !(limit < int32(*(*U16)(unsafe.Pointer(pOld + 28)))) { goto __27 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76505) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76509) goto balance_cleanup __27: ; @@ -79322,7 +79324,7 @@ if !(k > NB+2) { goto __55 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76606) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76610) goto balance_cleanup __55: ; @@ -79396,7 +79398,7 @@ }()) { goto __67 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76639) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76643) goto balance_cleanup __67: ; @@ -79459,7 +79461,7 @@ }()) { goto __75 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76683) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76687) goto balance_cleanup __75: ; @@ -79487,7 +79489,7 @@ *(*int32)(unsafe.Pointer(bp + 172)) == SQLITE_OK) { goto __81 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76716) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76720) __81: ; if !(*(*int32)(unsafe.Pointer(bp + 172)) != 0) { @@ -79748,7 +79750,7 @@ if !(Uptr(pSrcEnd) >= Uptr(pCell1) && Uptr(pSrcEnd) < Uptr(pCell1+uintptr(sz2))) { goto __121 } - *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76916) + *(*int32)(unsafe.Pointer(bp + 172)) = Xsqlite3CorruptError(tls, 76920) goto balance_cleanup __121: ; @@ -79940,7 +79942,7 @@ if pOther != pCur && int32((*BtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*BtCursor)(unsafe.Pointer(pOther)).FpPage == (*BtCursor)(unsafe.Pointer(pCur)).FpPage { - return Xsqlite3CorruptError(tls, 77146) + return Xsqlite3CorruptError(tls, 77150) } } return SQLITE_OK @@ -79978,7 +79980,7 @@ break } } else if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) > 1 { - rc = Xsqlite3CorruptError(tls, 77206) + rc = Xsqlite3CorruptError(tls, 77210) } else { var pParent uintptr = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-1)*8)) var iIdx int32 = int32(*(*U16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-1)*2))) @@ -80084,7 +80086,7 @@ return rc } if Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1 || (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 { - rc = Xsqlite3CorruptError(tls, 77370) + rc = Xsqlite3CorruptError(tls, 77374) } else { if U32(iOffset)+ovflPageSize < U32(nTotal) { ovflPgno = Xsqlite3Get4byte(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData) @@ -80109,7 +80111,7 @@ if (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FcellOffset) { - return Xsqlite3CorruptError(tls, 77398) + return Xsqlite3CorruptError(tls, 77402) } if int32((*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal { return btreeOverwriteContent(tls, pPage, (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, @@ -80179,7 +80181,7 @@ goto __3 } - return Xsqlite3CorruptError(tls, 77479) + return Xsqlite3CorruptError(tls, 77483) __3: ; __1: @@ -80292,7 +80294,7 @@ goto __21 } - *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77602) + *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3CorruptError(tls, 77606) goto __22 __21: *(*int32)(unsafe.Pointer(bp + 120)) = btreeComputeFreeSpace(tls, pPage) @@ -80352,6 +80354,7 @@ __25: ; idx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) + (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !(*(*int32)(unsafe.Pointer(bp)) == 0) { goto __31 } @@ -80359,7 +80362,7 @@ if !(idx >= int32((*MemPage)(unsafe.Pointer(pPage)).FnCell)) { goto __33 } - return Xsqlite3CorruptError(tls, 77640) + return Xsqlite3CorruptError(tls, 77645) __33: ; *(*int32)(unsafe.Pointer(bp + 120)) = Xsqlite3PagerWrite(tls, (*MemPage)(unsafe.Pointer(pPage)).FpDbPage) @@ -80397,13 +80400,13 @@ if !(oldCell < (*MemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*MemPage)(unsafe.Pointer(pPage)).FhdrOffset)+uintptr(10)) { goto __39 } - return Xsqlite3CorruptError(tls, 77667) + return Xsqlite3CorruptError(tls, 77672) __39: ; if !(oldCell+uintptr(*(*int32)(unsafe.Pointer(bp + 124))) > (*MemPage)(unsafe.Pointer(pPage)).FaDataEnd) { goto __40 } - return Xsqlite3CorruptError(tls, 77670) + return Xsqlite3CorruptError(tls, 77675) __40: ; libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 124)))) @@ -80434,7 +80437,6 @@ ; *(*int32)(unsafe.Pointer(bp + 120)) = insertCell(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 124)), uintptr(0), uint32(0)) - (*BtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = U16(0) if !((*MemPage)(unsafe.Pointer(pPage)).FnOverflow != 0) { goto __44 } @@ -80509,7 +80511,7 @@ nIn = U32((*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal) aIn = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload if aIn+uintptr(nIn) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77773) + return Xsqlite3CorruptError(tls, 77777) } nRem = (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload if nIn == nRem && nIn < U32((*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) { @@ -80534,7 +80536,7 @@ if nRem > nIn { if aIn+uintptr(nIn)+uintptr(4) > (*MemPage)(unsafe.Pointer((*BtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd { - return Xsqlite3CorruptError(tls, 77798) + return Xsqlite3CorruptError(tls, 77802) } ovflIn = Xsqlite3Get4byte(tls, (*BtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn)) } @@ -80635,7 +80637,7 @@ return *(*int32)(unsafe.Pointer(bp + 24)) } } else { - return Xsqlite3CorruptError(tls, 77894) + return Xsqlite3CorruptError(tls, 77898) } } @@ -80643,11 +80645,11 @@ iCellIdx = int32((*BtCursor)(unsafe.Pointer(pCur)).Fix) pPage = (*BtCursor)(unsafe.Pointer(pCur)).FpPage if int32((*MemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx { - return Xsqlite3CorruptError(tls, 77903) + return Xsqlite3CorruptError(tls, 77907) } pCell = (*MemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pPage)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(2*iCellIdx)))))) if (*MemPage)(unsafe.Pointer(pPage)).FnFree < 0 && btreeComputeFreeSpace(tls, pPage) != 0 { - return Xsqlite3CorruptError(tls, 77907) + return Xsqlite3CorruptError(tls, 77911) } bPreserve = U8(libc.Bool32(int32(flags)&BTREE_SAVEPOSITION != 0)) @@ -80722,7 +80724,7 @@ } pCell = (*MemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*MemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&int32(libc.X__builtin_bswap16(tls, *(*U16)(unsafe.Pointer((*MemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(2*(int32((*MemPage)(unsafe.Pointer(pLeaf)).FnCell)-1))))))) if pCell < (*MemPage)(unsafe.Pointer(pLeaf)).FaData+4 { - return Xsqlite3CorruptError(tls, 77998) + return Xsqlite3CorruptError(tls, 78002) } nCell = int32((*struct { f func(*libc.TLS, uintptr, uintptr) U16 @@ -80791,7 +80793,7 @@ Xsqlite3BtreeGetMeta(tls, p, BTREE_LARGEST_ROOT_PAGE, bp) if *(*Pgno)(unsafe.Pointer(bp)) > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78114) + return Xsqlite3CorruptError(tls, 78118) } *(*Pgno)(unsafe.Pointer(bp))++ @@ -80820,7 +80822,7 @@ } *(*int32)(unsafe.Pointer(bp + 40)) = ptrmapGet(tls, pBt, *(*Pgno)(unsafe.Pointer(bp)), bp+32, bp+36) if int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_ROOTPAGE || int32(*(*U8)(unsafe.Pointer(bp + 32))) == PTRMAP_FREEPAGE { - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78162) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 78166) } if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -80896,7 +80898,7 @@ if !(pgno > btreePagecount(tls, pBt)) { goto __1 } - return Xsqlite3CorruptError(tls, 78252) + return Xsqlite3CorruptError(tls, 78256) __1: ; *(*int32)(unsafe.Pointer(bp + 32)) = getAndInitPage(tls, pBt, pgno, bp, uintptr(0), 0) @@ -80910,7 +80912,7 @@ Xsqlite3PagerPageRefcount(tls, (*MemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != 1+libc.Bool32(pgno == Pgno(1))) { goto __3 } - *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78259) + *(*int32)(unsafe.Pointer(bp + 32)) = Xsqlite3CorruptError(tls, 78263) goto cleardatabasepage_out __3: ; @@ -81044,7 +81046,7 @@ var pBt uintptr = (*Btree)(unsafe.Pointer(p)).FpBt if iTable > btreePagecount(tls, pBt) { - return Xsqlite3CorruptError(tls, 78363) + return Xsqlite3CorruptError(tls, 78367) } *(*int32)(unsafe.Pointer(bp + 12)) = Xsqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0)) @@ -83514,7 +83516,7 @@ var rc int32 (*Mem)(unsafe.Pointer(pMem)).Fflags = U16(MEM_Null) if Xsqlite3BtreeMaxRecordSize(tls, pCur) < Sqlite3_int64(offset+amt) { - return Xsqlite3CorruptError(tls, 81630) + return Xsqlite3CorruptError(tls, 81634) } if SQLITE_OK == libc.AssignInt32(&rc, Xsqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+U32(1)))) { rc = Xsqlite3BtreePayload(tls, pCur, offset, amt, (*Mem)(unsafe.Pointer(pMem)).Fz) @@ -84163,7 +84165,7 @@ return Xsqlite3GetVarint32(tls, a, bp) }()) if *(*int32)(unsafe.Pointer(bp)) > nRec || iHdr >= *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82270) + return Xsqlite3CorruptError(tls, 82274) } iField = *(*int32)(unsafe.Pointer(bp)) for i = 0; i <= iCol; i++ { @@ -84178,14 +84180,14 @@ }()) if iHdr > *(*int32)(unsafe.Pointer(bp)) { - return Xsqlite3CorruptError(tls, 82276) + return Xsqlite3CorruptError(tls, 82280) } szField = int32(Xsqlite3VdbeSerialTypeLen(tls, *(*U32)(unsafe.Pointer(bp + 4)))) iField = iField + szField } if iField > nRec { - return Xsqlite3CorruptError(tls, 82282) + return Xsqlite3CorruptError(tls, 82286) } if pMem == uintptr(0) { pMem = libc.AssignPtrUintptr(ppVal, Xsqlite3ValueNew(tls, db)) @@ -86489,7 +86491,7 @@ return rc } if *(*int32)(unsafe.Pointer(bp)) != 0 { - return Xsqlite3CorruptError(tls, 86058) + return Xsqlite3CorruptError(tls, 86062) } (*VdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = U8(0) (*VdbeCursor)(unsafe.Pointer(p)).FcacheStatus = U32(CACHE_STALE) @@ -87040,7 +87042,7 @@ i = 0 } if d1 > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86985)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 86989)) return 0 } @@ -87105,7 +87107,7 @@ if d1+U32((*Mem)(unsafe.Pointer(bp+8)).Fn) > uint32(nKey1) || int32((*KeyInfo)(unsafe.Pointer(libc.AssignUintptr(&pKeyInfo, (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo))).FnAllField) <= i { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87062)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87066)) return 0 } else if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 { (*Mem)(unsafe.Pointer(bp + 8)).Fenc = (*KeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc @@ -87139,7 +87141,7 @@ var nStr int32 = int32((*(*U32)(unsafe.Pointer(bp + 64)) - U32(12)) / U32(2)) if d1+U32(nStr) > uint32(nKey1) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87092)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87096)) return 0 } else if int32((*Mem)(unsafe.Pointer(pRhs)).Fflags)&MEM_Zero != 0 { if !(isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) { @@ -87189,7 +87191,7 @@ } idx1 = idx1 + U32(Xsqlite3VarintLen(tls, uint64(*(*U32)(unsafe.Pointer(bp + 64))))) if idx1 >= *(*U32)(unsafe.Pointer(bp + 4)) { - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87136)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87140)) return 0 } } @@ -87335,7 +87337,7 @@ if !(szHdr+nStr > nKey1) { goto __7 } - (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87299)) + (*UnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = U8(Xsqlite3CorruptError(tls, 87303)) return 0 __7: ; @@ -87506,7 +87508,7 @@ idx_rowid_corruption: ; Xsqlite3VdbeMemReleaseMalloc(tls, bp) - return Xsqlite3CorruptError(tls, 87457) + return Xsqlite3CorruptError(tls, 87461) } // Compare the key of the index entry that cursor pC is pointing to against @@ -87532,7 +87534,7 @@ if nCellKey <= int64(0) || nCellKey > int64(0x7fffffff) { *(*int32)(unsafe.Pointer(res)) = 0 - return Xsqlite3CorruptError(tls, 87490) + return Xsqlite3CorruptError(tls, 87494) } Xsqlite3VdbeMemInit(tls, bp, db, uint16(0)) rc = Xsqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, U32(nCellKey), bp) @@ -87806,7 +87808,7 @@ var v uintptr = pStmt var db uintptr = (*Vdbe)(unsafe.Pointer(v)).Fdb if vdbeSafety(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 87854) + return Xsqlite3MisuseError(tls, 87858) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Vdbe)(unsafe.Pointer(v)).FstartTime > int64(0) { @@ -88421,7 +88423,7 @@ var db uintptr if vdbeSafetyNotNull(tls, v) != 0 { - return Xsqlite3MisuseError(tls, 88544) + return Xsqlite3MisuseError(tls, 88548) } db = (*Vdbe)(unsafe.Pointer(v)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -88941,7 +88943,7 @@ var pVar uintptr if vdbeSafetyNotNull(tls, p) != 0 { - return Xsqlite3MisuseError(tls, 89208) + return Xsqlite3MisuseError(tls, 89212) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) if int32((*Vdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_READY_STATE { @@ -88949,7 +88951,7 @@ Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer((*Vdbe)(unsafe.Pointer(p)).Fdb)).Fmutex) Xsqlite3_log(tls, SQLITE_MISUSE, ts+6709, libc.VaList(bp, (*Vdbe)(unsafe.Pointer(p)).FzSql)) - return Xsqlite3MisuseError(tls, 89216) + return Xsqlite3MisuseError(tls, 89220) } if i >= uint32((*Vdbe)(unsafe.Pointer(p)).FnVar) { Xsqlite3Error(tls, (*Vdbe)(unsafe.Pointer(p)).Fdb, SQLITE_RANGE) @@ -89354,7 +89356,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_INSERT) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89707) + rc = Xsqlite3MisuseError(tls, 89711) goto preupdate_old_out __1: ; @@ -89498,7 +89500,7 @@ if !(!(p != 0) || (*PreUpdate)(unsafe.Pointer(p)).Fop == SQLITE_DELETE) { goto __1 } - rc = Xsqlite3MisuseError(tls, 89809) + rc = Xsqlite3MisuseError(tls, 89813) goto preupdate_new_out __1: ; @@ -89942,10 +89944,6 @@ } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Real != 0 { h = h + U64(Xsqlite3VdbeIntValue(tls, p)) } else if int32((*Mem)(unsafe.Pointer(p)).Fflags)&(MEM_Str|MEM_Blob) != 0 { - h = h + U64((*Mem)(unsafe.Pointer(p)).Fn) - if int32((*Mem)(unsafe.Pointer(p)).Fflags)&MEM_Zero != 0 { - h = h + U64(*(*int32)(unsafe.Pointer(p))) - } } } return h @@ -92593,7 +92591,7 @@ goto __9 goto __425 __424: - rc = Xsqlite3CorruptError(tls, 93317) + rc = Xsqlite3CorruptError(tls, 93320) goto abort_due_to_error __425: ; @@ -94353,7 +94351,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp2 == 0) { goto __682 } - rc = Xsqlite3CorruptError(tls, 95560) + rc = Xsqlite3CorruptError(tls, 95563) goto __683 __682: goto jump_to_p2 @@ -95131,7 +95129,7 @@ if !((*Op)(unsafe.Pointer(pOp)).Fp5 != 0 && !(Xsqlite3WritableSchema(tls, db) != 0)) { goto __770 } - rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96635, ts+7218) + rc = Xsqlite3ReportError(tls, SQLITE_CORRUPT|int32(3)<<8, 96638, ts+7218) goto abort_due_to_error __770: ; @@ -95241,7 +95239,7 @@ if !(nCellKey <= int64(0) || nCellKey > int64(0x7fffffff)) { goto __781 } - rc = Xsqlite3CorruptError(tls, 96840) + rc = Xsqlite3CorruptError(tls, 96843) goto abort_due_to_error __781: ; @@ -95435,7 +95433,7 @@ goto __803 } - rc = Xsqlite3CorruptError(tls, 97092) + rc = Xsqlite3CorruptError(tls, 97095) __803: ; Xsqlite3DbFreeNN(tls, db, zSql) @@ -96802,7 +96800,7 @@ if !(rc == SQLITE_IOERR|int32(33)<<8) { goto __957 } - rc = Xsqlite3CorruptError(tls, 99031) + rc = Xsqlite3CorruptError(tls, 99034) __957: ; __956: @@ -97322,7 +97320,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99516) + return Xsqlite3MisuseError(tls, 99519) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -97405,7 +97403,7 @@ var db uintptr if p == uintptr(0) { - return Xsqlite3MisuseError(tls, 99616) + return Xsqlite3MisuseError(tls, 99619) } db = (*Incrblob)(unsafe.Pointer(p)).Fdb Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) @@ -100845,14 +100843,10 @@ ; Xsqlite3WalkExpr(tls, pWalker, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) if 0 == Xsqlite3ExprCanBeNull(tls, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) && !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { - if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8116 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsTrue) - } else { - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8121 - *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) - } - (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) + *(*int32)(unsafe.Pointer(pExpr + 8)) = libc.Bool32(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_NOTNULL) + *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IntValue) + (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_INTEGER) + i = 0 p = pNC __4: @@ -100896,7 +100890,7 @@ var pLeft uintptr = (*Expr)(unsafe.Pointer(pExpr)).FpLeft if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8127, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+8116, uintptr(0), pExpr) } pRight = (*Expr)(unsafe.Pointer(pExpr)).FpRight @@ -100960,7 +100954,7 @@ (*Expr)(unsafe.Pointer(pExpr)).FiTable = exprProbability(tls, (*ExprList_item)(unsafe.Pointer(pList+8+1*32)).FpExpr) if (*Expr)(unsafe.Pointer(pExpr)).FiTable < 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8144, libc.VaList(bp, pExpr)) + ts+8133, libc.VaList(bp, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } } else { @@ -100976,7 +100970,7 @@ var auth int32 = Xsqlite3AuthCheck(tls, pParse, SQLITE_FUNCTION, uintptr(0), (*FuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0)) if auth != SQLITE_OK { if auth == SQLITE_DENY { - Xsqlite3ErrorMsg(tls, pParse, ts+8208, + Xsqlite3ErrorMsg(tls, pParse, ts+8197, libc.VaList(bp+8, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -100990,7 +100984,7 @@ } if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_CONSTANT) == U32(0) { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IdxExpr|NC_PartIdx|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8244, uintptr(0), pExpr) + notValidImpl(tls, pParse, pNC, ts+8233, uintptr(0), pExpr) } } else { @@ -101013,30 +101007,30 @@ if 0 == libc.Bool32(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { if pDef != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8272, libc.VaList(bp+16, pExpr)) + ts+8261, libc.VaList(bp+16, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowAgg == 0 || is_agg != 0 && (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_AllowWin == 0 { var zType uintptr if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 { - zType = ts + 8315 + zType = ts + 8304 } else { - zType = ts + 8322 + zType = ts + 8311 } - Xsqlite3ErrorMsg(tls, pParse, ts+8332, libc.VaList(bp+24, zType, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8321, libc.VaList(bp+24, zType, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ is_agg = 0 } else if no_such_func != 0 && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+8360, libc.VaList(bp+40, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8349, libc.VaList(bp+40, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if wrong_num_args != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+8382, + Xsqlite3ErrorMsg(tls, pParse, ts+8371, libc.VaList(bp+48, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } else if is_agg == 0 && (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_WinFunc) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+8426, + ts+8415, libc.VaList(bp+56, pExpr)) (*NameContext)(unsafe.Pointer(pNC)).FnNcErr++ } @@ -101108,15 +101102,15 @@ var nRef int32 = (*NameContext)(unsafe.Pointer(pNC)).FnRef if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&NC_SelfRef != 0 { - notValidImpl(tls, pParse, pNC, ts+8474, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+8463, pExpr, pExpr) } else { Xsqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) } if nRef != (*NameContext)(unsafe.Pointer(pNC)).FnRef { *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_VarSelect) - *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_VarSelect } + *(*int32)(unsafe.Pointer(pNC + 40)) |= NC_Subquery } break @@ -101124,7 +101118,7 @@ case TK_VARIABLE: { if (*NameContext)(unsafe.Pointer(pNC)).FncFlags&(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol) != 0 { - notValidImpl(tls, pParse, pNC, ts+8485, pExpr, pExpr) + notValidImpl(tls, pParse, pNC, ts+8474, pExpr, pExpr) } break @@ -101255,7 +101249,7 @@ defer tls.Free(24) Xsqlite3ErrorMsg(tls, pParse, - ts+8496, libc.VaList(bp, i, zType, mx)) + ts+8485, libc.VaList(bp, i, zType, mx)) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pError) } @@ -101275,7 +101269,7 @@ } db = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8552, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8541, 0) return 1 } for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { @@ -101310,7 +101304,7 @@ } if Xsqlite3ExprIsInteger(tls, pE, bp+8) != 0 { if *(*int32)(unsafe.Pointer(bp + 8)) <= 0 || *(*int32)(unsafe.Pointer(bp + 8)) > (*ExprList)(unsafe.Pointer(pEList)).FnExpr { - resolveOutOfRangeError(tls, pParse, ts+8586, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) + resolveOutOfRangeError(tls, pParse, ts+8575, i+1, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, pE) return 1 } } else { @@ -101367,7 +101361,7 @@ for i = 0; i < (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr; i++ { if int32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+8592, libc.VaList(bp, i+1)) + ts+8581, libc.VaList(bp, i+1)) return 1 } } @@ -101395,7 +101389,7 @@ return 0 } if (*ExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8653, libc.VaList(bp, zType)) + Xsqlite3ErrorMsg(tls, pParse, ts+8642, libc.VaList(bp, zType)) return 1 } pEList = (*Select)(unsafe.Pointer(pSelect)).FpEList @@ -101609,7 +101603,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) |= NC_UEList if (*Select)(unsafe.Pointer(p)).FpHaving != 0 { if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Aggregate) == U32(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8684, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8673, 0) return WRC_Abort } if Xsqlite3ResolveExprNames(tls, bp, (*Select)(unsafe.Pointer(p)).FpHaving) != 0 { @@ -101649,7 +101643,7 @@ if (*Select)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && - resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8586) != 0 { + resolveOrderGroupBy(tls, bp, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8575) != 0 { return WRC_Abort } if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -101660,7 +101654,7 @@ if pGroupBy != 0 { var pItem uintptr - if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+8723) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { + if resolveOrderGroupBy(tls, bp, p, pGroupBy, ts+8712) != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return WRC_Abort } i = 0 @@ -101672,7 +101666,7 @@ { if (*Expr)(unsafe.Pointer((*ExprList_item)(unsafe.Pointer(pItem)).FpExpr)).Fflags&U32(EP_Agg) != U32(0) { Xsqlite3ErrorMsg(tls, pParse, - ts+8729, 0) + ts+8718, 0) return WRC_Abort } @@ -102536,7 +102530,7 @@ var mxHeight int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4)) if nHeight > mxHeight { Xsqlite3ErrorMsg(tls, pParse, - ts+8788, libc.VaList(bp, mxHeight)) + ts+8777, libc.VaList(bp, mxHeight)) rc = SQLITE_ERROR } return rc @@ -102785,10 +102779,10 @@ nExprElem = 1 } if nExprElem != nElem { - Xsqlite3ErrorMsg(tls, pParse, ts+8836, + Xsqlite3ErrorMsg(tls, pParse, ts+8825, libc.VaList(bp, nExprElem, func() uintptr { if nExprElem > 1 { - return ts + 8880 + return ts + 8869 } return ts + 1544 }(), nElem)) @@ -102829,7 +102823,7 @@ !(int32((*Parse)(unsafe.Pointer(pParse)).FeParseMode) >= PARSE_MODE_RENAME) { Xsqlite3ExprDeferredDelete(tls, pParse, pLeft) Xsqlite3ExprDeferredDelete(tls, pParse, pRight) - return Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882) + return Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871) } else { return Xsqlite3PExpr(tls, pParse, TK_AND, pLeft, pRight) } @@ -102855,7 +102849,7 @@ if pList != 0 && (*ExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8884, libc.VaList(bp, pToken)) + Xsqlite3ErrorMsg(tls, pParse, ts+8873, libc.VaList(bp, pToken)) } *(*uintptr)(unsafe.Pointer(pNew + 32)) = pList *(*U32)(unsafe.Pointer(pNew + 4)) |= U32(EP_HasFunc) @@ -102883,7 +102877,7 @@ if (*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_FromDDL) != U32(0) { if (*FuncDef)(unsafe.Pointer(pDef)).FfuncFlags&U32(SQLITE_FUNC_DIRECT) != U32(0) || (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+8918, libc.VaList(bp, pExpr)) + Xsqlite3ErrorMsg(tls, pParse, ts+8907, libc.VaList(bp, pExpr)) } } } @@ -102930,7 +102924,7 @@ } if bOk == 0 || *(*I64)(unsafe.Pointer(bp + 8)) < int64(1) || *(*I64)(unsafe.Pointer(bp + 8)) > I64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) { - Xsqlite3ErrorMsg(tls, pParse, ts+8938, + Xsqlite3ErrorMsg(tls, pParse, ts+8927, libc.VaList(bp, *(*int32)(unsafe.Pointer(db + 136 + 9*4)))) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) return @@ -102955,7 +102949,7 @@ } (*Expr)(unsafe.Pointer(pExpr)).FiColumn = x if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+8981, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+8970, 0) Xsqlite3RecordErrorOffsetOfExpr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pExpr) } } @@ -103530,7 +103524,7 @@ if !(int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_SELECT && (*IdList)(unsafe.Pointer(pColumns)).FnId != libc.AssignInt32(&n, Xsqlite3ExprVectorSize(tls, pExpr))) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+9004, + Xsqlite3ErrorMsg(tls, pParse, ts+8993, libc.VaList(bp, (*IdList)(unsafe.Pointer(pColumns)).FnId, n)) goto vector_append_error __3: @@ -103653,7 +103647,7 @@ var mx int32 = *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4)) if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr > mx { - Xsqlite3ErrorMsg(tls, pParse, ts+9034, libc.VaList(bp, zObject)) + Xsqlite3ErrorMsg(tls, pParse, ts+9023, libc.VaList(bp, zObject)) } } @@ -103709,10 +103703,10 @@ // "false" EP_IsFalse // anything else 0 func Xsqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) U32 { - if Xsqlite3StrICmp(tls, zIn, ts+8116) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+9046) == 0 { return U32(EP_IsTrue) } - if Xsqlite3StrICmp(tls, zIn, ts+8121) == 0 { + if Xsqlite3StrICmp(tls, zIn, ts+9051) == 0 { return U32(EP_IsFalse) } return U32(0) @@ -104786,7 +104780,7 @@ } if (*Select)(unsafe.Pointer(pSel)).FpLimit != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb - pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882) + pLimit = Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871) if pLimit != 0 { (*Expr)(unsafe.Pointer(pLimit)).FaffExpr = int8(SQLITE_AFF_NUMERIC) pLimit = Xsqlite3PExpr(tls, pParse, TK_NE, @@ -105224,6 +105218,7 @@ func Xsqlite3ExprCodeGeneratedColumn(tls *libc.TLS, pParse uintptr, pTab uintptr, pCol uintptr, regOut int32) { var iAddr int32 var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe + var nErr int32 = (*Parse)(unsafe.Pointer(pParse)).FnErr if (*Parse)(unsafe.Pointer(pParse)).FiSelfTab > 0 { iAddr = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Parse)(unsafe.Pointer(pParse)).FiSelfTab-1, 0, regOut) @@ -105237,6 +105232,9 @@ if iAddr != 0 { Xsqlite3VdbeJumpHere(tls, v, iAddr) } + if (*Parse)(unsafe.Pointer(pParse)).FnErr > nErr { + (*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).FerrByteOffset = -1 + } } // Generate code to extract the value of the iCol-th column of a table. @@ -105455,6 +105453,7 @@ var p uintptr var v uintptr for p = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr; p != 0; p = (*IndexedExpr)(unsafe.Pointer(p)).FpIENext { + var exprAff U8 var iDataCur int32 = (*IndexedExpr)(unsafe.Pointer(p)).FiDataCur if iDataCur < 0 { continue @@ -105468,6 +105467,14 @@ if Xsqlite3ExprCompare(tls, uintptr(0), pExpr, (*IndexedExpr)(unsafe.Pointer(p)).FpExpr, iDataCur) != 0 { continue } + + exprAff = U8(Xsqlite3ExprAffinity(tls, pExpr)) + if int32(exprAff) <= SQLITE_AFF_BLOB && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_BLOB || + int32(exprAff) == SQLITE_AFF_TEXT && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_TEXT || + int32(exprAff) >= SQLITE_AFF_NUMERIC && int32((*IndexedExpr)(unsafe.Pointer(p)).Faff) != SQLITE_AFF_NUMERIC { + continue + } + v = (*Parse)(unsafe.Pointer(pParse)).FpVdbe if (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow != 0 { @@ -106241,7 +106248,7 @@ if !((*Expr)(unsafe.Pointer(pExpr)).FiTable != n1) { goto __122 } - Xsqlite3ErrorMsg(tls, pParse, ts+9004, + Xsqlite3ErrorMsg(tls, pParse, ts+8993, libc.VaList(bp+24, (*Expr)(unsafe.Pointer(pExpr)).FiTable, n1)) __122: ; @@ -106263,11 +106270,10 @@ return target __50: - if !(!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) && - (*Expr)(unsafe.Pointer(pExpr)).FpLeft != 0 && - int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == TK_FUNCTION) { + if !!((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_Collate) != U32(0)) { goto __123 } + inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) if !(inReg != target) { goto __125 @@ -106338,13 +106344,19 @@ ; __127: ; - addrINR = Xsqlite3VdbeAddOp1(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable) + addrINR = Xsqlite3VdbeAddOp3(tls, v, OP_IfNullRow, (*Expr)(unsafe.Pointer(pExpr)).FiTable, 0, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = U8(0) inReg = Xsqlite3ExprCodeTarget(tls, pParse, (*Expr)(unsafe.Pointer(pExpr)).FpLeft, target) (*Parse)(unsafe.Pointer(pParse)).FokConstFactor = okConstFactor + if !(inReg != target) { + goto __130 + } + Xsqlite3VdbeAddOp2(tls, v, OP_SCopy, inReg, target) + inReg = target +__130: + ; Xsqlite3VdbeJumpHere(tls, v, addrINR) - Xsqlite3VdbeChangeP3(tls, v, addrINR, inReg) goto __5 __56: @@ -106357,15 +106369,15 @@ nExpr = (*ExprList)(unsafe.Pointer(pEList)).FnExpr endLabel = Xsqlite3VdbeMakeLabel(tls, pParse) if !(libc.AssignUintptr(&pX, (*Expr)(unsafe.Pointer(pExpr)).FpLeft) != uintptr(0)) { - goto __130 + goto __131 } pDel = Xsqlite3ExprDup(tls, db1, pX, 0) if !((*Sqlite3)(unsafe.Pointer(db1)).FmallocFailed != 0) { - goto __131 + goto __132 } Xsqlite3ExprDelete(tls, db1, pDel) goto __5 -__131: +__132: ; exprToRegister(tls, pDel, exprCodeVector(tls, pParse, pDel, bp+40)) @@ -106375,22 +106387,22 @@ pTest = bp + 120 *(*int32)(unsafe.Pointer(bp + 40)) = 0 -__130: +__131: ; i1 = 0 -__132: +__133: if !(i1 < nExpr-1) { - goto __134 + goto __135 } if !(pX != 0) { - goto __135 + goto __136 } (*Expr)(unsafe.Pointer(bp + 120)).FpRight = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr - goto __136 -__135: - pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr + goto __137 __136: + pTest = (*ExprList_item)(unsafe.Pointer(aListelem + uintptr(i1)*32)).FpExpr +__137: ; nextCase = Xsqlite3VdbeMakeLabel(tls, pParse) @@ -106399,21 +106411,21 @@ Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(aListelem+uintptr(i1+1)*32)).FpExpr, target) Xsqlite3VdbeGoto(tls, v, endLabel) Xsqlite3VdbeResolveLabel(tls, v, nextCase) - goto __133 -__133: - i1 = i1 + 2 - goto __132 goto __134 __134: + i1 = i1 + 2 + goto __133 + goto __135 +__135: ; if !(nExpr&1 != 0) { - goto __137 + goto __138 } Xsqlite3ExprCode(tls, pParse, (*ExprList_item)(unsafe.Pointer(pEList+8+uintptr(nExpr-1)*32)).FpExpr, target) - goto __138 -__137: - Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) + goto __139 __138: + Xsqlite3VdbeAddOp2(tls, v, OP_Null, 0, target) +__139: ; Xsqlite3ExprDelete(tls, db1, pDel) setDoNotMergeFlagOnCopy(tls, v) @@ -106423,27 +106435,27 @@ __57: ; if !(!(int32((*Parse)(unsafe.Pointer(pParse)).FpTriggerTab) != 0) && !(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0)) { - goto __139 + goto __140 } Xsqlite3ErrorMsg(tls, pParse, ts+9434, 0) return 0 -__139: +__140: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Abort) { - goto __140 + goto __141 } Xsqlite3MayAbort(tls, pParse) -__140: +__141: ; if !(int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr) == OE_Ignore) { - goto __141 + goto __142 } Xsqlite3VdbeAddOp4(tls, v, OP_Halt, SQLITE_OK, OE_Ignore, 0, *(*uintptr)(unsafe.Pointer(pExpr + 8)), 0) - goto __142 -__141: + goto __143 +__142: Xsqlite3HaltConstraint(tls, pParse, func() int32 { if (*Parse)(unsafe.Pointer(pParse)).FpTriggerTab != 0 { @@ -106452,7 +106464,7 @@ return SQLITE_ERROR }(), int32((*Expr)(unsafe.Pointer(pExpr)).FaffExpr), *(*uintptr)(unsafe.Pointer(pExpr + 8)), int8(0), uint8(0)) -__142: +__143: ; goto __5 @@ -109120,7 +109132,7 @@ return SQLITE_NOMEM } if Xsqlite3_strnicmp(tls, zSql, ts+12274, 7) != 0 { - return Xsqlite3CorruptError(tls, 113494) + return Xsqlite3CorruptError(tls, 113516) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = func() uint8 { if bTemp != 0 { @@ -109137,7 +109149,7 @@ } if rc == SQLITE_OK && ((*Parse)(unsafe.Pointer(p)).FpNewTable == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewIndex == uintptr(0) && (*Parse)(unsafe.Pointer(p)).FpNewTrigger == uintptr(0)) { - rc = Xsqlite3CorruptError(tls, 113505) + rc = Xsqlite3CorruptError(tls, 113527) } (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) @@ -110058,7 +110070,7 @@ goto __2 } - rc = Xsqlite3CorruptError(tls, 114441) + rc = Xsqlite3CorruptError(tls, 114463) goto drop_column_done __2: ; @@ -114422,6 +114434,12 @@ pExpr = Xsqlite3PExpr(tls, pParse, TK_UPLUS, pExpr, uintptr(0)) __11: ; + if !(pExpr != 0 && int32((*Expr)(unsafe.Pointer(pExpr)).Fop) != TK_RAISE) { + goto __12 + } + (*Expr)(unsafe.Pointer(pExpr)).FaffExpr = (*Column)(unsafe.Pointer(pCol)).Faffinity +__12: + ; Xsqlite3ColumnSetExpr(tls, pParse, pTab, pCol, pExpr) pExpr = uintptr(0) goto generated_done @@ -115586,7 +115604,7 @@ if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+14829, 4) == 0 { return 0 } - if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+8485, 10) == 0 { + if Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName+uintptr(7), ts+8474, 10) == 0 { return 0 } return 1 @@ -116832,7 +116850,7 @@ goto __101 } Xsqlite3ErrorMsg(tls, pParse, ts+15497, 0) - (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121835) + (*Parse)(unsafe.Pointer(pParse)).Frc = Xsqlite3CorruptError(tls, 121859) goto exit_create_index __101: ; @@ -118877,7 +118895,7 @@ goto __16 __15: wcf = U16(WHERE_ONEPASS_DESIRED | WHERE_DUPLICATES_OK) - if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_VarSelect != 0) { + if !((*NameContext)(unsafe.Pointer(bp+16)).FncFlags&NC_Subquery != 0) { goto __23 } bComplex = 1 @@ -125345,7 +125363,7 @@ if !!(Xsqlite3SafetyCheckOk(tls, db) != 0) { goto __1 } - return Xsqlite3MisuseError(tls, 131895) + return Xsqlite3MisuseError(tls, 131931) __1: ; if !(zSql == uintptr(0)) { @@ -126747,7 +126765,7 @@ } else if (*FuncDef)(unsafe.Pointer(p)).FxFinalize != uintptr(0) { zType = ts + 18882 } else { - zType = ts + 8880 + zType = ts + 8869 } Xsqlite3VdbeMultiLoad(tls, v, 1, ts+18884, libc.VaList(bp, (*FuncDef)(unsafe.Pointer(p)).FzName, isBuiltin, @@ -126908,6 +126926,7 @@ var zErr2 uintptr var k3 int32 var pCheck uintptr + var jmp7 int32 var jmp6 int32 var iCol1 int32 var uniqOk int32 @@ -128266,7 +128285,7 @@ goto __224 } pMod = (*HashElem)(unsafe.Pointer(j1)).Fdata - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8880, libc.VaList(bp+272, (*Module)(unsafe.Pointer(pMod)).FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8869, libc.VaList(bp+272, (*Module)(unsafe.Pointer(pMod)).FzName)) goto __223 __223: j1 = (*HashElem)(unsafe.Pointer(j1)).Fnext @@ -128282,7 +128301,7 @@ if !(i6 < int32(uint64(unsafe.Sizeof(aPragmaName))/uint64(unsafe.Sizeof(PragmaName{})))) { goto __227 } - Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8880, libc.VaList(bp+280, aPragmaName[i6].FzName)) + Xsqlite3VdbeMultiLoad(tls, v, 1, ts+8869, libc.VaList(bp+280, aPragmaName[i6].FzName)) goto __226 __226: i6++ @@ -129087,80 +129106,94 @@ jmp4 = integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, jmp21) + if !((*Table)(unsafe.Pointer(pTab9)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) { + goto __352 + } + Xsqlite3VdbeAddOp2(tls, v, OP_IdxRowid, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) + jmp7 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 3, 0, r1+int32((*Index)(unsafe.Pointer(pIdx5)).FnColumn)-1) + + Xsqlite3VdbeLoadString(tls, v, 3, + ts+19298) + Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) + Xsqlite3VdbeLoadString(tls, v, 4, ts+19334) + Xsqlite3VdbeGoto(tls, v, jmp5-1) + Xsqlite3VdbeJumpHere(tls, v, jmp7) +__352: + ; label6 = 0 kk = 0 -__352: +__353: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __354 + goto __355 } if !(*(*uintptr)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FazColl + uintptr(kk)*8)) == uintptr(unsafe.Pointer(&Xsqlite3StrBINARY))) { - goto __355 + goto __356 } - goto __353 -__355: + goto __354 +__356: ; if !(label6 == 0) { - goto __356 + goto __357 } label6 = Xsqlite3VdbeMakeLabel(tls, pParse) -__356: +__357: ; Xsqlite3VdbeAddOp3(tls, v, OP_Column, *(*int32)(unsafe.Pointer(bp + 624))+j4, kk, 3) Xsqlite3VdbeAddOp3(tls, v, OP_Ne, 3, label6, r1+kk) - goto __353 -__353: - kk++ - goto __352 goto __354 __354: + kk++ + goto __353 + goto __355 +__355: ; if !(label6 != 0) { - goto __357 + goto __358 } jmp6 = Xsqlite3VdbeAddOp0(tls, v, OP_Goto) Xsqlite3VdbeResolveLabel(tls, v, label6) Xsqlite3VdbeLoadString(tls, v, 3, ts+19272) Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 7, 3, 3) - Xsqlite3VdbeLoadString(tls, v, 4, ts+19298) + Xsqlite3VdbeLoadString(tls, v, 4, ts+19345) Xsqlite3VdbeGoto(tls, v, jmp5-1) Xsqlite3VdbeJumpHere(tls, v, jmp6) -__357: +__358: ; if !(int32((*Index)(unsafe.Pointer(pIdx5)).FonError) != OE_None) { - goto __358 + goto __359 } uniqOk = Xsqlite3VdbeMakeLabel(tls, pParse) kk = 0 -__359: +__360: if !(kk < int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) { - goto __361 + goto __362 } iCol1 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx5)).FaiColumn + uintptr(kk)*2))) if !(iCol1 >= 0 && uint32(int32(*(*uint8)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab9)).FaCol + uintptr(iCol1)*24 + 8))&0xf>>0)) != 0) { - goto __362 + goto __363 } - goto __360 -__362: + goto __361 +__363: ; Xsqlite3VdbeAddOp2(tls, v, OP_IsNull, r1+kk, uniqOk) - goto __360 -__360: - kk++ - goto __359 goto __361 __361: + kk++ + goto __360 + goto __362 +__362: ; jmp61 = Xsqlite3VdbeAddOp1(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 624))+j4) Xsqlite3VdbeGoto(tls, v, uniqOk) Xsqlite3VdbeJumpHere(tls, v, jmp61) Xsqlite3VdbeAddOp4Int(tls, v, OP_IdxGT, *(*int32)(unsafe.Pointer(bp + 624))+j4, uniqOk, r1, int32((*Index)(unsafe.Pointer(pIdx5)).FnKeyCol)) - Xsqlite3VdbeLoadString(tls, v, 3, ts+19325) + Xsqlite3VdbeLoadString(tls, v, 3, ts+19372) Xsqlite3VdbeGoto(tls, v, jmp5) Xsqlite3VdbeResolveLabel(tls, v, uniqOk) -__358: +__359: ; Xsqlite3VdbeJumpHere(tls, v, jmp4) Xsqlite3ResolvePartIdxLabel(tls, pParse, *(*int32)(unsafe.Pointer(bp + 640))) @@ -129177,20 +129210,20 @@ Xsqlite3VdbeAddOp2(tls, v, OP_Next, *(*int32)(unsafe.Pointer(bp + 620)), loopTop) Xsqlite3VdbeJumpHere(tls, v, loopTop-1) if !!(isQuick != 0) { - goto __363 + goto __364 } - Xsqlite3VdbeLoadString(tls, v, 2, ts+19352) + Xsqlite3VdbeLoadString(tls, v, 2, ts+19399) j4 = 0 pIdx5 = (*Table)(unsafe.Pointer(pTab9)).FpIndex -__364: +__365: if !(pIdx5 != 0) { - goto __366 + goto __367 } if !(pPk1 == pIdx5) { - goto __367 + goto __368 } - goto __365 -__367: + goto __366 +__368: ; Xsqlite3VdbeAddOp2(tls, v, OP_Count, *(*int32)(unsafe.Pointer(bp + 624))+j4, 3) addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_Eq, 8+j4, 0, 3) @@ -129199,21 +129232,21 @@ Xsqlite3VdbeAddOp3(tls, v, OP_Concat, 4, 2, 3) integrityCheckResultRow(tls, v) Xsqlite3VdbeJumpHere(tls, v, addr1) - goto __365 -__365: - pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext - j4++ - goto __364 goto __366 __366: + pIdx5 = (*Index)(unsafe.Pointer(pIdx5)).FpNext + j4++ + goto __365 + goto __367 +__367: ; if !(pPk1 != 0) { - goto __368 + goto __369 } Xsqlite3ReleaseTempRange(tls, pParse, r2, int32((*Index)(unsafe.Pointer(pPk1)).FnKeyCol)) -__368: +__369: ; -__363: +__364: ; goto __298 __298: @@ -129231,14 +129264,14 @@ ; aOp2 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(endCode))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&endCode)), iLn5) if !(aOp2 != 0) { - goto __369 + goto __370 } (*VdbeOp)(unsafe.Pointer(aOp2)).Fp2 = 1 - *(*int32)(unsafe.Pointer(bp + 616)) (*VdbeOp)(unsafe.Pointer(aOp2 + 2*24)).Fp4type = int8(-1) - *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 19381 + *(*uintptr)(unsafe.Pointer(aOp2 + 2*24 + 16)) = ts + 19428 (*VdbeOp)(unsafe.Pointer(aOp2 + 5*24)).Fp4type = int8(-1) *(*uintptr)(unsafe.Pointer(aOp2 + 5*24 + 16)) = Xsqlite3ErrStr(tls, SQLITE_CORRUPT) -__369: +__370: ; Xsqlite3VdbeChangeP3(tls, v, 0, Xsqlite3VdbeCurrentAddr(tls, v)-2) @@ -129246,27 +129279,27 @@ __46: if !!(zRight != 0) { - goto __370 + goto __371 } if !(Xsqlite3ReadSchema(tls, pParse) != 0) { - goto __372 + goto __373 } goto pragma_out -__372: +__373: ; returnSingleText(tls, v, encnames1[(*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Fenc].FzName) - goto __371 -__370: + goto __372 +__371: if !((*Sqlite3)(unsafe.Pointer(db)).FmDbFlags&U32(DBFLAG_EncodingFixed) == U32(0)) { - goto __373 + goto __374 } pEnc = uintptr(unsafe.Pointer(&encnames1)) -__374: +__375: if !((*EncName)(unsafe.Pointer(pEnc)).FzName != 0) { - goto __376 + goto __377 } if !(0 == Xsqlite3StrICmp(tls, zRight, (*EncName)(unsafe.Pointer(pEnc)).FzName)) { - goto __377 + goto __378 } if (*EncName)(unsafe.Pointer(pEnc)).Fenc != 0 { enc = (*EncName)(unsafe.Pointer(pEnc)).Fenc @@ -129275,25 +129308,25 @@ } (*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FpSchema)).Fenc = enc Xsqlite3SetTextEncoding(tls, db, enc) - goto __376 -__377: + goto __377 +__378: ; - goto __375 -__375: - pEnc += 16 - goto __374 goto __376 __376: + pEnc += 16 + goto __375 + goto __377 +__377: ; if !!(int32((*EncName)(unsafe.Pointer(pEnc)).FzName) != 0) { - goto __378 + goto __379 } - Xsqlite3ErrorMsg(tls, pParse, ts+19384, libc.VaList(bp+464, zRight)) -__378: + Xsqlite3ErrorMsg(tls, pParse, ts+19431, libc.VaList(bp+464, zRight)) +__379: ; -__373: +__374: ; -__371: +__372: ; goto __15 @@ -129301,15 +129334,15 @@ iCookie = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiArg) Xsqlite3VdbeUsesBtree(tls, v, iDb) if !(zRight != 0 && int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_ReadOnly == 0) { - goto __379 + goto __380 } aOp3 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(setCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&setCookie)), 0) if !(0 != 0) { - goto __381 + goto __382 } goto __15 -__381: +__382: ; (*VdbeOp)(unsafe.Pointer(aOp3)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp1 = iDb @@ -129317,41 +129350,41 @@ (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp3 = Xsqlite3Atoi(tls, zRight) (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fp5 = U16(1) if !(iCookie == BTREE_SCHEMA_VERSION && (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_Defensive) != uint64(0)) { - goto __382 + goto __383 } (*VdbeOp)(unsafe.Pointer(aOp3 + 1*24)).Fopcode = U8(OP_Noop) -__382: +__383: ; - goto __380 -__379: + goto __381 +__380: ; aOp4 = Xsqlite3VdbeAddOpList(tls, v, int32(uint64(unsafe.Sizeof(readCookie))/uint64(unsafe.Sizeof(VdbeOpList{}))), uintptr(unsafe.Pointer(&readCookie)), 0) if !(0 != 0) { - goto __383 + goto __384 } goto __15 -__383: +__384: ; (*VdbeOp)(unsafe.Pointer(aOp4)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp1 = iDb (*VdbeOp)(unsafe.Pointer(aOp4 + 1*24)).Fp3 = iCookie Xsqlite3VdbeReusable(tls, v) -__380: +__381: ; goto __15 __48: i10 = 0 (*Parse)(unsafe.Pointer(pParse)).FnMem = 1 -__384: +__385: if !(libc.AssignUintptr(&zOpt, Xsqlite3_compileoption_get(tls, libc.PostIncInt32(&i10, 1))) != uintptr(0)) { - goto __385 + goto __386 } Xsqlite3VdbeLoadString(tls, v, 1, zOpt) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, 1, 1) - goto __384 -__385: + goto __385 +__386: ; Xsqlite3VdbeReusable(tls, v) @@ -129366,31 +129399,31 @@ }() eMode2 = SQLITE_CHECKPOINT_PASSIVE if !(zRight != 0) { - goto __386 + goto __387 } if !(Xsqlite3StrICmp(tls, zRight, ts+18714) == 0) { - goto __387 + goto __388 } eMode2 = SQLITE_CHECKPOINT_FULL - goto __388 -__387: - if !(Xsqlite3StrICmp(tls, zRight, ts+19409) == 0) { - goto __389 + goto __389 +__388: + if !(Xsqlite3StrICmp(tls, zRight, ts+19456) == 0) { + goto __390 } eMode2 = SQLITE_CHECKPOINT_RESTART - goto __390 -__389: + goto __391 +__390: if !(Xsqlite3StrICmp(tls, zRight, ts+18867) == 0) { - goto __391 + goto __392 } eMode2 = SQLITE_CHECKPOINT_TRUNCATE -__391: +__392: ; -__390: +__391: ; -__388: +__389: ; -__386: +__387: ; (*Parse)(unsafe.Pointer(pParse)).FnMem = 3 Xsqlite3VdbeAddOp3(tls, v, OP_Checkpoint, iBt, eMode2, 1) @@ -129400,10 +129433,10 @@ __50: if !(zRight != 0) { - goto __392 + goto __393 } Xsqlite3_wal_autocheckpoint(tls, db, Xsqlite3Atoi(tls, zRight)) -__392: +__393: ; returnSingleInt(tls, v, func() int64 { @@ -129423,19 +129456,19 @@ __52: if !(zRight != 0) { - goto __393 + goto __394 } opMask = U32(Xsqlite3Atoi(tls, zRight)) if !(opMask&U32(0x02) == U32(0)) { - goto __395 + goto __396 } goto __15 -__395: +__396: ; - goto __394 -__393: - opMask = U32(0xfffe) + goto __395 __394: + opMask = U32(0xfffe) +__395: ; iTabCur = libc.PostIncInt32(&(*Parse)(unsafe.Pointer(pParse)).FnTab, 1) iDbLast = func() int32 { @@ -129444,86 +129477,86 @@ } return (*Sqlite3)(unsafe.Pointer(db)).FnDb - 1 }() -__396: +__397: if !(iDb <= iDbLast) { - goto __398 + goto __399 } if !(iDb == 1) { - goto __399 + goto __400 } - goto __397 -__399: + goto __398 +__400: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) pSchema = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32)).FpSchema k4 = (*Hash)(unsafe.Pointer(pSchema + 8)).Ffirst -__400: +__401: if !(k4 != 0) { - goto __402 + goto __403 } pTab10 = (*HashElem)(unsafe.Pointer(k4)).Fdata if !((*Table)(unsafe.Pointer(pTab10)).FtabFlags&U32(TF_StatsUsed) == U32(0)) { - goto __403 + goto __404 } - goto __401 -__403: + goto __402 +__404: ; szThreshold = LogEst(int32((*Table)(unsafe.Pointer(pTab10)).FnRowLogEst) + 46) pIdx6 = (*Table)(unsafe.Pointer(pTab10)).FpIndex -__404: +__405: if !(pIdx6 != 0) { - goto __406 + goto __407 } if !!(int32(*(*uint16)(unsafe.Pointer(pIdx6 + 100))&0x80>>7) != 0) { - goto __407 + goto __408 } szThreshold = int16(0) - goto __406 -__407: + goto __407 +__408: ; - goto __405 -__405: - pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext - goto __404 goto __406 __406: + pIdx6 = (*Index)(unsafe.Pointer(pIdx6)).FpNext + goto __405 + goto __407 +__407: ; if !(szThreshold != 0) { - goto __408 + goto __409 } Xsqlite3OpenTable(tls, pParse, iTabCur, iDb, pTab10, OP_OpenRead) Xsqlite3VdbeAddOp3(tls, v, OP_IfSmaller, iTabCur, int32(U32(Xsqlite3VdbeCurrentAddr(tls, v)+2)+opMask&U32(1)), int32(szThreshold)) -__408: +__409: ; - zSubSql = Xsqlite3MPrintf(tls, db, ts+19417, + zSubSql = Xsqlite3MPrintf(tls, db, ts+19464, libc.VaList(bp+472, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab10)).FzName)) if !(opMask&U32(0x01) != 0) { - goto __409 + goto __410 } r11 = Xsqlite3GetTempReg(tls, pParse) Xsqlite3VdbeAddOp4(tls, v, OP_String8, 0, r11, 0, zSubSql, -6) Xsqlite3VdbeAddOp2(tls, v, OP_ResultRow, r11, 1) - goto __410 -__409: - Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) + goto __411 __410: + Xsqlite3VdbeAddOp4(tls, v, OP_SqlExec, 0, 0, 0, zSubSql, -6) +__411: ; - goto __401 -__401: - k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext - goto __400 goto __402 __402: + k4 = (*HashElem)(unsafe.Pointer(k4)).Fnext + goto __401 + goto __403 +__403: ; - goto __397 -__397: - iDb++ - goto __396 goto __398 __398: + iDb++ + goto __397 + goto __399 +__399: ; Xsqlite3VdbeAddOp0(tls, v, OP_Expire) goto __15 @@ -129531,36 +129564,36 @@ __53: ; if !(zRight != 0) { - goto __411 + goto __412 } Xsqlite3_busy_timeout(tls, db, Xsqlite3Atoi(tls, zRight)) -__411: +__412: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FbusyTimeout)) goto __15 __54: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+648) == SQLITE_OK) { - goto __412 + goto __413 } Xsqlite3_soft_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 648))) -__412: +__413: ; returnSingleInt(tls, v, Xsqlite3_soft_heap_limit64(tls, int64(-1))) goto __15 __55: if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+656) == SQLITE_OK) { - goto __413 + goto __414 } iPrior = Xsqlite3_hard_heap_limit64(tls, int64(-1)) if !(*(*Sqlite3_int64)(unsafe.Pointer(bp + 656)) > int64(0) && (iPrior == int64(0) || iPrior > *(*Sqlite3_int64)(unsafe.Pointer(bp + 656)))) { - goto __414 + goto __415 } Xsqlite3_hard_heap_limit64(tls, *(*Sqlite3_int64)(unsafe.Pointer(bp + 656))) -__414: +__415: ; -__413: +__414: ; returnSingleInt(tls, v, Xsqlite3_hard_heap_limit64(tls, int64(-1))) goto __15 @@ -129569,10 +129602,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+664) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 664)) >= int64(0)) { - goto __415 + goto __416 } Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 664))&int64(0x7fffffff))) -__415: +__416: ; returnSingleInt(tls, v, int64(Xsqlite3_limit(tls, db, SQLITE_LIMIT_WORKER_THREADS, -1))) goto __15 @@ -129581,10 +129614,10 @@ if !(zRight != 0 && Xsqlite3DecOrHexToI64(tls, zRight, bp+672) == SQLITE_OK && *(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) >= int64(0)) { - goto __416 + goto __417 } (*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit = int32(*(*Sqlite3_int64)(unsafe.Pointer(bp + 672)) & int64(0x7fffffff)) -__416: +__417: ; returnSingleInt(tls, v, int64((*Sqlite3)(unsafe.Pointer(db)).FnAnalysisLimit)) goto __15 @@ -129592,10 +129625,10 @@ __15: ; if !(int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_NoColumns1 != 0 && zRight != 0) { - goto __417 + goto __418 } -__417: +__418: ; pragma_out: Xsqlite3DbFree(tls, db, zLeft) @@ -129647,14 +129680,14 @@ {Fopcode: U8(OP_Goto), Fp2: int8(3)}, } var encnames1 = [9]EncName{ - {FzName: ts + 19435, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 19440, Fenc: U8(SQLITE_UTF8)}, - {FzName: ts + 19446, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 19455, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 19464, Fenc: U8(SQLITE_UTF16LE)}, - {FzName: ts + 19472, Fenc: U8(SQLITE_UTF16BE)}, - {FzName: ts + 19480}, - {FzName: ts + 19487}, + {FzName: ts + 19482, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 19487, Fenc: U8(SQLITE_UTF8)}, + {FzName: ts + 19493, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 19502, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 19511, Fenc: U8(SQLITE_UTF16LE)}, + {FzName: ts + 19519, Fenc: U8(SQLITE_UTF16BE)}, + {FzName: ts + 19527}, + {FzName: ts + 19534}, {}, } var setCookie = [2]VdbeOpList{ @@ -129706,7 +129739,7 @@ _ = argc _ = argv Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), bp+64, int32(unsafe.Sizeof([200]int8{})), 0) - Xsqlite3_str_appendall(tls, bp+32, ts+19493) + Xsqlite3_str_appendall(tls, bp+32, ts+19540) i = 0 j = int32((*PragmaName)(unsafe.Pointer(pPragma)).FiPragCName) __1: @@ -129714,7 +129747,7 @@ goto __3 } { - Xsqlite3_str_appendf(tls, bp+32, ts+19508, libc.VaList(bp, int32(cSep), pragCName[j])) + Xsqlite3_str_appendf(tls, bp+32, ts+19555, libc.VaList(bp, int32(cSep), pragCName[j])) cSep = int8(',') } @@ -129727,16 +129760,16 @@ __3: ; if i == 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19515, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) + Xsqlite3_str_appendf(tls, bp+32, ts+19562, libc.VaList(bp+16, (*PragmaName)(unsafe.Pointer(pPragma)).FzName)) i++ } j = 0 if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&PragFlg_Result1 != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+19521) + Xsqlite3_str_appendall(tls, bp+32, ts+19568) j++ } if int32((*PragmaName)(unsafe.Pointer(pPragma)).FmPragFlg)&(PragFlg_SchemaOpt|PragFlg_SchemaReq) != 0 { - Xsqlite3_str_appendall(tls, bp+32, ts+19533) + Xsqlite3_str_appendall(tls, bp+32, ts+19580) j++ } Xsqlite3_str_append(tls, bp+32, ts+6309, 1) @@ -129919,13 +129952,13 @@ __3: ; Xsqlite3StrAccumInit(tls, bp+32, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).Fdb + 136 + 1*4))) - Xsqlite3_str_appendall(tls, bp+32, ts+19548) + Xsqlite3_str_appendall(tls, bp+32, ts+19595) if *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19556, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) + Xsqlite3_str_appendf(tls, bp+32, ts+19603, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pCsr + 24 + 1*8)))) } Xsqlite3_str_appendall(tls, bp+32, (*PragmaName)(unsafe.Pointer((*PragmaVtab)(unsafe.Pointer(pTab)).FpName)).FzName) if *(*uintptr)(unsafe.Pointer(pCsr + 24)) != 0 { - Xsqlite3_str_appendf(tls, bp+32, ts+19560, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) + Xsqlite3_str_appendf(tls, bp+32, ts+19607, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(pCsr + 24)))) } zSql = Xsqlite3StrAccumFinish(tls, bp+32) if zSql == uintptr(0) { @@ -130002,12 +130035,12 @@ } else if *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) != uintptr(0) { } else if (*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask) != 0 { *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = Xsqlite3MPrintf(tls, db, - ts+19564, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), + ts+19611, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azObj)), *(*uintptr)(unsafe.Pointer(azObj + 1*8)), azAlterType[(*InitData)(unsafe.Pointer(pData)).FmInitFlags&U32(INITFLAG_AlterMask)-U32(1)], zExtra)) (*InitData)(unsafe.Pointer(pData)).Frc = SQLITE_ERROR } else if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_WriteSchema) != 0 { - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137196) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137249) } else { var z uintptr var zObj uintptr @@ -130016,19 +130049,19 @@ } else { zObj = ts + 6360 } - z = Xsqlite3MPrintf(tls, db, ts+19592, libc.VaList(bp+32, zObj)) + z = Xsqlite3MPrintf(tls, db, ts+19639, libc.VaList(bp+32, zObj)) if zExtra != 0 && *(*int8)(unsafe.Pointer(zExtra)) != 0 { - z = Xsqlite3MPrintf(tls, db, ts+19623, libc.VaList(bp+40, z, zExtra)) + z = Xsqlite3MPrintf(tls, db, ts+19670, libc.VaList(bp+40, z, zExtra)) } *(*uintptr)(unsafe.Pointer((*InitData)(unsafe.Pointer(pData)).FpzErrMsg)) = z - (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137203) + (*InitData)(unsafe.Pointer(pData)).Frc = Xsqlite3CorruptError(tls, 137256) } } var azAlterType = [3]uintptr{ - ts + 19631, - ts + 19638, - ts + 19650, + ts + 19678, + ts + 19685, + ts + 19697, } // Check to see if any sibling index (another index on the same table) @@ -130120,7 +130153,7 @@ var pIndex uintptr pIndex = Xsqlite3FindIndex(tls, db, *(*uintptr)(unsafe.Pointer(argv + 1*8)), (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName) if pIndex == uintptr(0) { - corruptSchema(tls, pData, argv, ts+19661) + corruptSchema(tls, pData, argv, ts+19708) } else if Xsqlite3GetUInt32(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8)), pIndex+88) == 0 || (*Index)(unsafe.Pointer(pIndex)).Ftnum < Pgno(2) || (*Index)(unsafe.Pointer(pIndex)).Ftnum > (*InitData)(unsafe.Pointer(pData)).FmxPage || @@ -130168,7 +130201,7 @@ }()) *(*uintptr)(unsafe.Pointer(bp + 16 + 2*8)) = *(*uintptr)(unsafe.Pointer(bp + 16 + 1*8)) *(*uintptr)(unsafe.Pointer(bp + 16 + 3*8)) = ts + 9290 - *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 19674 + *(*uintptr)(unsafe.Pointer(bp + 16 + 4*8)) = ts + 19721 *(*uintptr)(unsafe.Pointer(bp + 16 + 5*8)) = uintptr(0) (*InitData)(unsafe.Pointer(bp + 64)).Fdb = db (*InitData)(unsafe.Pointer(bp + 64)).FiDb = iDb @@ -130297,7 +130330,7 @@ if !(int32((*Schema)(unsafe.Pointer((*Db)(unsafe.Pointer(pDb)).FpSchema)).Ffile_format) > SQLITE_MAX_FILE_FORMAT) { goto __19 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+19746) + Xsqlite3SetString(tls, pzErrMsg, db, ts+19793) rc = SQLITE_ERROR goto initone_error_out __19: @@ -130311,7 +130344,7 @@ (*InitData)(unsafe.Pointer(bp + 64)).FmxPage = Xsqlite3BtreeLastPage(tls, (*Db)(unsafe.Pointer(pDb)).FpBt) zSql = Xsqlite3MPrintf(tls, db, - ts+19770, + ts+19817, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zSchemaTabName)) xAuth = (*Sqlite3)(unsafe.Pointer(db)).FxAuth @@ -130643,7 +130676,7 @@ goto __8 } zDb = (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32)).FzDbSName - Xsqlite3ErrorWithMsg(tls, db, rc, ts+19804, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, rc, ts+19851, libc.VaList(bp, zDb)) goto end_prepare __8: @@ -130673,7 +130706,7 @@ if !(nBytes > mxLen) { goto __12 } - Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+19834, 0) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_TOOBIG, ts+19881, 0) rc = Xsqlite3ApiExit(tls, db, SQLITE_TOOBIG) goto end_prepare __12: @@ -130769,7 +130802,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 137995) + return Xsqlite3MisuseError(tls, 138048) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) Xsqlite3BtreeEnterAll(tls, db) @@ -130868,7 +130901,7 @@ *(*uintptr)(unsafe.Pointer(ppStmt)) = uintptr(0) if !(Xsqlite3SafetyCheckOk(tls, db) != 0) || zSql == uintptr(0) { - return Xsqlite3MisuseError(tls, 138143) + return Xsqlite3MisuseError(tls, 138196) } if nBytes >= 0 { var sz int32 @@ -131195,13 +131228,13 @@ zSp2++ } Xsqlite3ErrorMsg(tls, pParse, - ts+19853, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) + ts+19900, libc.VaList(bp, pA, zSp1, pB, zSp2, pC)) jointype = JT_INNER } return jointype } -var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19883)) +var zKeyText = *(*[34]int8)(unsafe.Pointer(ts + 19930)) var aKeyword = [7]struct { Fi U8 FnChar U8 @@ -131376,7 +131409,7 @@ var pUsing uintptr = uintptr(0) if uint32(int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x400>>10)) != 0 || *(*uintptr)(unsafe.Pointer(pRight + 72)) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19917, libc.VaList(bp, 0)) + ts+19964, libc.VaList(bp, 0)) return 1 } for j = 0; j < int32((*Table)(unsafe.Pointer(pRightTab)).FnCol); j++ { @@ -131421,7 +131454,7 @@ tableAndColumnIndex(tls, pSrc, 0, i, zName, bp+24, bp+28, int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) == 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+19967, libc.VaList(bp+8, zName)) + ts+20014, libc.VaList(bp+8, zName)) return 1 } pE1 = Xsqlite3CreateColumnExpr(tls, db, pSrc, *(*int32)(unsafe.Pointer(bp + 24)), *(*int32)(unsafe.Pointer(bp + 28))) @@ -131432,7 +131465,7 @@ int32(*(*uint16)(unsafe.Pointer(pRight + 60 + 4))&0x1000>>12)) != 0 { if int32(*(*uint16)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 60 + 4))&0x400>>10) == 0 || Xsqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pSrc + 8 + uintptr(*(*int32)(unsafe.Pointer(bp + 24)))*104 + 72)), zName) < 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20031, + Xsqlite3ErrorMsg(tls, pParse, ts+20078, libc.VaList(bp+16, zName)) break } @@ -132060,16 +132093,16 @@ var z uintptr switch id { case TK_ALL: - z = ts + 20068 + z = ts + 20115 break case TK_INTERSECT: - z = ts + 20078 + z = ts + 20125 break case TK_EXCEPT: - z = ts + 20088 + z = ts + 20135 break default: - z = ts + 20095 + z = ts + 20142 break } return z @@ -132079,7 +132112,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20101, libc.VaList(bp, zUsage)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20148, libc.VaList(bp, zUsage)) } func generateSortTail(tls *libc.TLS, pParse uintptr, p uintptr, pSort uintptr, nColumn int32, pDest uintptr) { @@ -132105,9 +132138,9 @@ var nRefKey int32 = 0 var aOutEx uintptr = (*Select)(unsafe.Pointer(p)).FpEList + 8 - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20124, libc.VaList(bp, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20171, libc.VaList(bp, func() uintptr { if (*SortCtx)(unsafe.Pointer(pSort)).FnOBSat > 0 { - return ts + 20155 + return ts + 20202 } return ts + 1544 }())) @@ -132451,7 +132484,7 @@ } else { var z uintptr = (*ExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32)).FzEName if z == uintptr(0) { - z = Xsqlite3MPrintf(tls, db, ts+20170, libc.VaList(bp+16, i+1)) + z = Xsqlite3MPrintf(tls, db, ts+20217, libc.VaList(bp+16, i+1)) } else { z = Xsqlite3DbStrDup(tls, db, z) } @@ -132551,7 +132584,7 @@ if zName != 0 && !(Xsqlite3IsTrueOrFalse(tls, zName) != 0) { zName = Xsqlite3DbStrDup(tls, db, zName) } else { - zName = Xsqlite3MPrintf(tls, db, ts+20170, libc.VaList(bp, i+1)) + zName = Xsqlite3MPrintf(tls, db, ts+20217, libc.VaList(bp, i+1)) } *(*U32)(unsafe.Pointer(bp + 56)) = U32(0) @@ -132567,7 +132600,7 @@ nName = j } } - zName = Xsqlite3MPrintf(tls, db, ts+20179, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) + zName = Xsqlite3MPrintf(tls, db, ts+20226, libc.VaList(bp+8, nName, zName, libc.PreIncUint32(&*(*U32)(unsafe.Pointer(bp + 56)), 1))) Xsqlite3ProgressCheck(tls, pParse) if *(*U32)(unsafe.Pointer(bp + 56)) > U32(3) { Xsqlite3_randomness(tls, int32(unsafe.Sizeof(U32(0))), bp+56) @@ -132650,8 +132683,6 @@ (*Column)(unsafe.Pointer(pCol)).Faffinity = Xsqlite3ExprAffinity(tls, p) if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) <= SQLITE_AFF_NONE { (*Column)(unsafe.Pointer(pCol)).Faffinity = aff - } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { - (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) } if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_TEXT && (*Select)(unsafe.Pointer(pSelect)).FpNext != 0 { var m int32 = 0 @@ -132666,12 +132697,15 @@ } else if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && m&0x02 != 0 { (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_BLOB) } + if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) >= SQLITE_AFF_NUMERIC && int32((*Expr)(unsafe.Pointer(p)).Fop) == TK_CAST { + (*Column)(unsafe.Pointer(pCol)).Faffinity = int8(SQLITE_AFF_FLEXNUM) + } } zType = columnTypeImpl(tls, bp, p, uintptr(0), uintptr(0), uintptr(0)) if zType == uintptr(0) || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) != int32(Xsqlite3AffinityType(tls, zType, uintptr(0))) { if int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_NUMERIC || int32((*Column)(unsafe.Pointer(pCol)).Faffinity) == SQLITE_AFF_FLEXNUM { - zType = ts + 20187 + zType = ts + 20234 } else { zType = uintptr(0) for j = 1; j < SQLITE_N_STDTYPE; j++ { @@ -132887,7 +132921,7 @@ if !((*Select)(unsafe.Pointer(p)).FpWin != 0) { goto __1 } - Xsqlite3ErrorMsg(tls, pParse, ts+20191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20238, 0) return __1: ; @@ -132978,7 +133012,7 @@ if !((*Select)(unsafe.Pointer(pFirstRec)).FselFlags&U32(SF_Aggregate) != 0) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+20240, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20287, 0) goto end_of_recursive_query __15: ; @@ -132998,7 +133032,7 @@ ; pSetup = (*Select)(unsafe.Pointer(pFirstRec)).FpPrior (*Select)(unsafe.Pointer(pSetup)).FpNext = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20282, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20329, 0) rc = Xsqlite3Select(tls, pParse, pSetup, bp) (*Select)(unsafe.Pointer(pSetup)).FpNext = p if !(rc != 0) { @@ -133035,7 +133069,7 @@ Xsqlite3VdbeResolveLabel(tls, v, addrCont) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = uintptr(0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20288, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20335, 0) Xsqlite3Select(tls, pParse, p, bp) (*Select)(unsafe.Pointer(pFirstRec)).FpPrior = pSetup @@ -133069,11 +133103,11 @@ p = (*Select)(unsafe.Pointer(p)).FpPrior nRow = nRow + bShowAll } - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20303, libc.VaList(bp, nRow, func() uintptr { + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20350, libc.VaList(bp, nRow, func() uintptr { if nRow == 1 { return ts + 1544 } - return ts + 20326 + return ts + 20373 }())) for p != 0 { selectInnerLoop(tls, pParse, p, -1, uintptr(0), uintptr(0), pDest, 1, 1) @@ -133174,8 +133208,8 @@ if !((*Select)(unsafe.Pointer(pPrior)).FpPrior == uintptr(0)) { goto __8 } - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20328, 0) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20343, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20375, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20390, 0) __8: ; switch int32((*Select)(unsafe.Pointer(p)).Fop) { @@ -133222,7 +133256,7 @@ ; __15: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20068, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20115, 0) rc = Xsqlite3Select(tls, pParse, p, bp+16) @@ -133289,7 +133323,7 @@ pLimit = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 64)).FeDest = op - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20362, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20409, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+64) @@ -133351,7 +133385,7 @@ pLimit1 = (*Select)(unsafe.Pointer(p)).FpLimit (*Select)(unsafe.Pointer(p)).FpLimit = uintptr(0) (*SelectDest)(unsafe.Pointer(bp + 104)).FiSDParm = tab2 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20362, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20409, libc.VaList(bp+8, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) rc = Xsqlite3Select(tls, pParse, p, bp+104) @@ -133504,10 +133538,10 @@ defer tls.Free(8) if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Values) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20383, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20430, 0) } else { Xsqlite3ErrorMsg(tls, pParse, - ts+20429, + ts+20476, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) } } @@ -133761,8 +133795,8 @@ (*Select)(unsafe.Pointer(pPrior)).FpNext = uintptr(0) (*Select)(unsafe.Pointer(pPrior)).FpOrderBy = Xsqlite3ExprListDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pOrderBy, 0) - Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8586) - Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+8586) + Xsqlite3ResolveOrderGroupBy(tls, pParse, p, (*Select)(unsafe.Pointer(p)).FpOrderBy, ts+8575) + Xsqlite3ResolveOrderGroupBy(tls, pParse, pPrior, (*Select)(unsafe.Pointer(pPrior)).FpOrderBy, ts+8575) computeLimitRegisters(tls, pParse, p, labelEnd) if (*Select)(unsafe.Pointer(p)).FiLimit != 0 && op == TK_ALL { @@ -133789,13 +133823,13 @@ Xsqlite3SelectDestInit(tls, bp+8, SRT_Coroutine, regAddrA) Xsqlite3SelectDestInit(tls, bp+48, SRT_Coroutine, regAddrB) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20511, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20558, libc.VaList(bp, Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(p)).Fop)))) addrSelectA = Xsqlite3VdbeCurrentAddr(tls, v) + 1 addr1 = Xsqlite3VdbeAddOp3(tls, v, OP_InitCoroutine, regAddrA, 0, addrSelectA) (*Select)(unsafe.Pointer(pPrior)).FiLimit = regLimitA - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20522, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20569, 0) Xsqlite3Select(tls, pParse, pPrior, bp+8) Xsqlite3VdbeEndCoroutine(tls, v, regAddrA) Xsqlite3VdbeJumpHere(tls, v, addr1) @@ -133807,7 +133841,7 @@ savedOffset = (*Select)(unsafe.Pointer(p)).FiOffset (*Select)(unsafe.Pointer(p)).FiLimit = regLimitB (*Select)(unsafe.Pointer(p)).FiOffset = 0 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20527, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+20574, 0) Xsqlite3Select(tls, pParse, p, bp+48) (*Select)(unsafe.Pointer(p)).FiLimit = savedLimit (*Select)(unsafe.Pointer(p)).FiOffset = savedOffset @@ -133995,7 +134029,8 @@ Xsqlite3VectorErrorMsg(tls, (*SubstContext)(unsafe.Pointer(pSubst)).FpParse, pCopy) } else { var db uintptr = (*Parse)(unsafe.Pointer((*SubstContext)(unsafe.Pointer(pSubst)).FpParse)).Fdb - if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN { + if (*SubstContext)(unsafe.Pointer(pSubst)).FisOuterJoin != 0 && + (int32((*Expr)(unsafe.Pointer(pCopy)).Fop) != TK_COLUMN || (*Expr)(unsafe.Pointer(pCopy)).FiTable != (*SubstContext)(unsafe.Pointer(pSubst)).FiNewTable) { libc.Xmemset(tls, bp, 0, uint64(unsafe.Sizeof(Expr{}))) (*Expr)(unsafe.Pointer(bp)).Fop = U8(TK_IF_NULL_ROW) (*Expr)(unsafe.Pointer(bp)).FpLeft = pCopy @@ -134894,7 +134929,7 @@ for pIdx = (*Table)(unsafe.Pointer(pTab)).FpIndex; pIdx != 0 && Xsqlite3StrICmp(tls, (*Index)(unsafe.Pointer(pIdx)).FzName, zIndexedBy) != 0; pIdx = (*Index)(unsafe.Pointer(pIdx)).FpNext { } if !(pIdx != 0) { - Xsqlite3ErrorMsg(tls, pParse, ts+20533, libc.VaList(bp, zIndexedBy, 0)) + Xsqlite3ErrorMsg(tls, pParse, ts+20580, libc.VaList(bp, zIndexedBy, 0)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) return SQLITE_ERROR } @@ -134977,7 +135012,7 @@ defer tls.Free(8) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x4>>2)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20551, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20598, libc.VaList(bp, (*SrcItem)(unsafe.Pointer(pFrom)).FzName)) return 1 } return 0 @@ -135106,7 +135141,7 @@ *(*U32)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pFrom)).FpSelect + 4)) |= U32(SF_CopyCte) if uint32(int32(*(*uint16)(unsafe.Pointer(pFrom + 60 + 4))&0x2>>1)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20574, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) + Xsqlite3ErrorMsg(tls, pParse, ts+20621, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(pFrom + 88)))) return 2 } libc.SetBitFieldPtr16Uint32(pFrom+60+4, uint32(1), 8, 0x100) @@ -135129,7 +135164,7 @@ libc.SetBitFieldPtr16Uint32(pItem+60+4, uint32(1), 6, 0x40) if (*Select)(unsafe.Pointer(pRecTerm)).FselFlags&U32(SF_Recursive) != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+20594, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) + ts+20641, libc.VaList(bp+16, (*Cte)(unsafe.Pointer(pCte)).FzName)) return 2 } *(*U32)(unsafe.Pointer(pRecTerm + 4)) |= U32(SF_Recursive) @@ -135145,7 +135180,7 @@ pRecTerm = (*Select)(unsafe.Pointer(pRecTerm)).FpPrior } - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20637 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20684 pSavedWith = (*Parse)(unsafe.Pointer(pParse)).FpWith (*Parse)(unsafe.Pointer(pParse)).FpWith = *(*uintptr)(unsafe.Pointer(bp + 48)) if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { @@ -135171,7 +135206,7 @@ pEList = (*Select)(unsafe.Pointer(pLeft)).FpEList if (*Cte)(unsafe.Pointer(pCte)).FpCols != 0 { if pEList != 0 && (*ExprList)(unsafe.Pointer(pEList)).FnExpr != (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr { - Xsqlite3ErrorMsg(tls, pParse, ts+20660, + Xsqlite3ErrorMsg(tls, pParse, ts+20707, libc.VaList(bp+24, (*Cte)(unsafe.Pointer(pCte)).FzName, (*ExprList)(unsafe.Pointer(pEList)).FnExpr, (*ExprList)(unsafe.Pointer((*Cte)(unsafe.Pointer(pCte)).FpCols)).FnExpr)) (*Parse)(unsafe.Pointer(pParse)).FpWith = pSavedWith return 2 @@ -135182,9 +135217,9 @@ Xsqlite3ColumnsFromExprList(tls, pParse, pEList, pTab+54, pTab+8) if bMayRecursive != 0 { if (*Select)(unsafe.Pointer(pSel)).FselFlags&U32(SF_Recursive) != 0 { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20698 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20745 } else { - (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20732 + (*Cte)(unsafe.Pointer(pCte)).FzCteErr = ts + 20779 } Xsqlite3WalkSelect(tls, pWalker, pSel) } @@ -135231,7 +135266,7 @@ if (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias != 0 { (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3DbStrDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*SrcItem)(unsafe.Pointer(pFrom)).FzAlias) } else { - (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+20770, libc.VaList(bp, pFrom)) + (*Table)(unsafe.Pointer(pTab)).FzName = Xsqlite3MPrintf(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, ts+20817, libc.VaList(bp, pFrom)) } for (*Select)(unsafe.Pointer(pSel)).FpPrior != 0 { pSel = (*Select)(unsafe.Pointer(pSel)).FpPrior @@ -135343,7 +135378,7 @@ return WRC_Abort } if (*Table)(unsafe.Pointer(pTab)).FnTabRef >= U32(0xffff) { - Xsqlite3ErrorMsg(tls, pParse, ts+20774, + Xsqlite3ErrorMsg(tls, pParse, ts+20821, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) (*SrcItem)(unsafe.Pointer(pFrom)).FpTab = uintptr(0) return WRC_Abort @@ -135362,7 +135397,7 @@ if int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW { if (*Sqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_EnableView) == uint64(0) && (*Table)(unsafe.Pointer(pTab)).FpSchema != (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+1*32)).FpSchema { - Xsqlite3ErrorMsg(tls, pParse, ts+20813, + Xsqlite3ErrorMsg(tls, pParse, ts+20860, libc.VaList(bp+8, (*Table)(unsafe.Pointer(pTab)).FzName)) } (*SrcItem)(unsafe.Pointer(pFrom)).FpSelect = Xsqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(pTab + 64)), 0) @@ -135486,7 +135521,7 @@ if pNew != 0 { var pX uintptr = pNew + 8 + uintptr((*ExprList)(unsafe.Pointer(pNew)).FnExpr-1)*32 - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20844, libc.VaList(bp+24, zUName)) + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20891, libc.VaList(bp+24, zUName)) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(ENAME_TAB), 0, 0x3) libc.SetBitFieldPtr16Uint32(pX+16+4, uint32(1), 7, 0x80) } @@ -135551,7 +135586,7 @@ (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3DbStrDup(tls, db, (*ExprList_item)(unsafe.Pointer(pNestedFrom+8+uintptr(j)*32)).FzEName) } else { - (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20849, + (*ExprList_item)(unsafe.Pointer(pX)).FzEName = Xsqlite3MPrintf(tls, db, ts+20896, libc.VaList(bp+32, zSchemaName, zTabName, zName)) } @@ -135582,9 +135617,9 @@ ; if !(tableSeen != 0) { if zTName != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+20858, libc.VaList(bp+72, zTName)) + Xsqlite3ErrorMsg(tls, pParse, ts+20905, libc.VaList(bp+72, zTName)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+20876, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20923, 0) } } } @@ -135594,7 +135629,7 @@ } if (*Select)(unsafe.Pointer(p)).FpEList != 0 { if (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(p)).FpEList)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) { - Xsqlite3ErrorMsg(tls, pParse, ts+20896, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+20943, 0) return WRC_Abort } if elistFlags&U32(EP_HasFunc|EP_Subquery) != U32(0) { @@ -135732,7 +135767,7 @@ (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn = (*AggInfo)(unsafe.Pointer(pAggInfo)).FnAccumulator if int32((*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn) > 0 { if (*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn == 0 { - (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(0) + (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSelect)).FpGroupBy)).FnExpr) } else { (*AggInfo)(unsafe.Pointer(pAggInfo)).FnSortingColumn = U16(int32((*AggInfo_col)(unsafe.Pointer((*AggInfo)(unsafe.Pointer(pAggInfo)).FaCol+uintptr((*AggInfo)(unsafe.Pointer(pAggInfo)).FnColumn-1)*24)).FiSorterColumn) + 1) } @@ -135816,13 +135851,13 @@ if *(*uintptr)(unsafe.Pointer(pE + 32)) == uintptr(0) || (*ExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pE + 32)))).FnExpr != 1 { Xsqlite3ErrorMsg(tls, pParse, - ts+20927, 0) + ts+20974, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct = -1 } else { var pKeyInfo uintptr = Xsqlite3KeyInfoFromExprList(tls, pParse, *(*uintptr)(unsafe.Pointer(pE + 32)), 0, 0) (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistAddr = Xsqlite3VdbeAddOp4(tls, v, OP_OpenEphemeral, (*AggInfo_func)(unsafe.Pointer(pFunc)).FiDistinct, 0, 0, pKeyInfo, -8) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+20978, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21025, libc.VaList(bp, (*FuncDef)(unsafe.Pointer((*AggInfo_func)(unsafe.Pointer(pFunc)).FpFunc)).FzName)) } } @@ -136011,11 +136046,11 @@ if int32((*Parse)(unsafe.Pointer(pParse)).Fexplain) == 2 { var bCover int32 = libc.Bool32(pIdx != uintptr(0) && ((*Table)(unsafe.Pointer(pTab)).FtabFlags&U32(TF_WithoutRowid) == U32(0) || !(int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY))) - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21011, + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+21058, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, func() uintptr { if bCover != 0 { - return ts + 21023 + return ts + 21070 } return ts + 1544 }(), @@ -136343,7 +136378,7 @@ goto __7 } Xsqlite3ErrorMsg(tls, pParse, - ts+21046, + ts+21093, libc.VaList(bp, func() uintptr { if (*SrcItem)(unsafe.Pointer(p0)).FzAlias != 0 { return (*SrcItem)(unsafe.Pointer(p0)).FzAlias @@ -136404,7 +136439,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FnCol) != (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr) { goto __15 } - Xsqlite3ErrorMsg(tls, pParse, ts+21100, + Xsqlite3ErrorMsg(tls, pParse, ts+21147, libc.VaList(bp+8, int32((*Table)(unsafe.Pointer(pTab)).FnCol), (*Table)(unsafe.Pointer(pTab)).FzName, (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer(pSub)).FpEList)).FnExpr)) goto select_end __15: @@ -136546,7 +136581,7 @@ (*SrcItem)(unsafe.Pointer(pItem1)).FaddrFillSub = addrTop Xsqlite3SelectDestInit(tls, bp+96, SRT_Coroutine, (*SrcItem)(unsafe.Pointer(pItem1)).FregReturn) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21140, libc.VaList(bp+32, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21187, libc.VaList(bp+32, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow libc.SetBitFieldPtr16Uint32(pItem1+60+4, uint32(1), 5, 0x20) @@ -136605,7 +136640,7 @@ ; Xsqlite3SelectDestInit(tls, bp+96, SRT_EphemTab, (*SrcItem)(unsafe.Pointer(pItem1)).FiCursor) - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21155, libc.VaList(bp+40, pItem1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+21202, libc.VaList(bp+40, pItem1)) Xsqlite3Select(tls, pParse, pSub1, bp+96) (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pItem1)).FpTab)).FnRowLogEst = (*Select)(unsafe.Pointer(pSub1)).FnSelectRow if !(onceAddr != 0) { @@ -137076,9 +137111,9 @@ explainTempTable(tls, pParse, func() uintptr { if (*DistinctCtx)(unsafe.Pointer(bp+136)).FisTnct != 0 && (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_Distinct) == U32(0) { - return ts + 21171 + return ts + 21218 } - return ts + 21180 + return ts + 21227 }()) groupBySort = 1 @@ -137429,7 +137464,7 @@ if !(int32((*DistinctCtx)(unsafe.Pointer(bp+136)).FeTnctType) == WHERE_DISTINCT_UNORDERED) { goto __146 } - explainTempTable(tls, pParse, ts+21171) + explainTempTable(tls, pParse, ts+21218) __146: ; if !((*SortCtx)(unsafe.Pointer(bp+48)).FpOrderBy != 0) { @@ -137534,7 +137569,7 @@ } Xsqlite3_free(tls, (*TabResult)(unsafe.Pointer(p)).FzErrMsg) (*TabResult)(unsafe.Pointer(p)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+21189, 0) + ts+21236, 0) (*TabResult)(unsafe.Pointer(p)).Frc = SQLITE_ERROR return 1 __11: @@ -137767,7 +137802,7 @@ if !((*Token)(unsafe.Pointer(pName2)).Fn > uint32(0)) { goto __3 } - Xsqlite3ErrorMsg(tls, pParse, ts+21254, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21301, 0) goto trigger_cleanup __3: ; @@ -137811,7 +137846,7 @@ goto trigger_cleanup __8: ; - Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+21300, *(*uintptr)(unsafe.Pointer(bp + 32))) + Xsqlite3FixInit(tls, bp+40, pParse, iDb, ts+21347, *(*uintptr)(unsafe.Pointer(bp + 32))) if !(Xsqlite3FixSrcList(tls, bp+40, pTableName) != 0) { goto __9 } @@ -137829,7 +137864,7 @@ if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VTAB) { goto __11 } - Xsqlite3ErrorMsg(tls, pParse, ts+21308, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21355, 0) goto trigger_orphan_error __11: ; @@ -137841,7 +137876,7 @@ goto trigger_cleanup __12: ; - if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+21300, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { + if !(Xsqlite3CheckObjectName(tls, pParse, zName, ts+21347, (*Table)(unsafe.Pointer(pTab)).FzName) != 0) { goto __13 } goto trigger_cleanup @@ -137856,11 +137891,12 @@ if !!(noErr != 0) { goto __16 } - Xsqlite3ErrorMsg(tls, pParse, ts+21349, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) + Xsqlite3ErrorMsg(tls, pParse, ts+21396, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(bp + 32)))) goto __17 __16: ; Xsqlite3CodeVerifySchema(tls, pParse, iDb) + __17: ; goto trigger_cleanup @@ -137871,19 +137907,19 @@ if !(Xsqlite3_strnicmp(tls, (*Table)(unsafe.Pointer(pTab)).FzName, ts+7733, 7) == 0) { goto __18 } - Xsqlite3ErrorMsg(tls, pParse, ts+21375, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21422, 0) goto trigger_cleanup __18: ; if !(int32((*Table)(unsafe.Pointer(pTab)).FeTabType) == TABTYP_VIEW && tr_tm != TK_INSTEAD) { goto __19 } - Xsqlite3ErrorMsg(tls, pParse, ts+21413, + Xsqlite3ErrorMsg(tls, pParse, ts+21460, libc.VaList(bp+8, func() uintptr { if tr_tm == TK_BEFORE { - return ts + 21450 + return ts + 21497 } - return ts + 21457 + return ts + 21504 }(), pTableName+8)) goto trigger_orphan_error __19: @@ -137892,7 +137928,7 @@ goto __20 } Xsqlite3ErrorMsg(tls, pParse, - ts+21463, libc.VaList(bp+24, pTableName+8)) + ts+21510, libc.VaList(bp+24, pTableName+8)) goto trigger_orphan_error __20: ; @@ -138041,7 +138077,7 @@ __3: ; Xsqlite3TokenInit(tls, bp+56, (*Trigger)(unsafe.Pointer(pTrig)).FzName) - Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+21300, bp+56) + Xsqlite3FixInit(tls, bp+72, pParse, iDb, ts+21347, bp+56) if !(Xsqlite3FixTriggerStep(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).Fstep_list) != 0 || Xsqlite3FixExpr(tls, bp+72, (*Trigger)(unsafe.Pointer(pTrig)).FpWhen) != 0) { goto __4 @@ -138074,7 +138110,7 @@ goto __12 } Xsqlite3ErrorMsg(tls, pParse, - ts+21509, + ts+21556, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrig)).FzName, (*TriggerStep)(unsafe.Pointer(pStep)).FzTarget)) goto triggerfinish_cleanup __12: @@ -138099,13 +138135,13 @@ z = Xsqlite3DbStrNDup(tls, db, (*Token)(unsafe.Pointer(pAll)).Fz, uint64((*Token)(unsafe.Pointer(pAll)).Fn)) Xsqlite3NestedParse(tls, pParse, - ts+21557, + ts+21604, libc.VaList(bp+16, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, zName, (*Trigger)(unsafe.Pointer(pTrig)).Ftable, z)) Xsqlite3DbFree(tls, db, z) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, - Xsqlite3MPrintf(tls, db, ts+21632, libc.VaList(bp+48, zName)), uint16(0)) + Xsqlite3MPrintf(tls, db, ts+21679, libc.VaList(bp+48, zName)), uint16(0)) __7: ; __6: @@ -138361,7 +138397,7 @@ if !!(noErr != 0) { goto __9 } - Xsqlite3ErrorMsg(tls, pParse, ts+21661, libc.VaList(bp, pName+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+21708, libc.VaList(bp, pName+8)) goto __10 __9: Xsqlite3CodeVerifyNamedSchema(tls, pParse, zDb) @@ -138414,7 +138450,7 @@ if libc.AssignUintptr(&v, Xsqlite3GetVdbe(tls, pParse)) != uintptr(0) { Xsqlite3NestedParse(tls, pParse, - ts+21681, + ts+21728, libc.VaList(bp, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)) Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp4(tls, v, OP_DropTrigger, iDb, 0, 0, (*Trigger)(unsafe.Pointer(pTrigger)).FzName, 0) @@ -138528,12 +138564,12 @@ goto __15 } Xsqlite3ErrorMsg(tls, pParse, - ts+21743, + ts+21790, libc.VaList(bp, func() uintptr { if op == TK_DELETE { - return ts + 21791 + return ts + 21838 } - return ts + 21798 + return ts + 21845 }())) __15: ; @@ -138647,7 +138683,7 @@ if int32((*Expr)(unsafe.Pointer((*Expr)(unsafe.Pointer(pTerm)).FpRight)).Fop) != TK_ASTERISK { return 0 } - Xsqlite3ErrorMsg(tls, pParse, ts+21805, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+21852, 0) return 1 } @@ -138713,7 +138749,7 @@ } Xsqlite3ExprListDelete(tls, db, (*Select)(unsafe.Pointer(bp)).FpEList) pNew = sqlite3ExpandReturning(tls, pParse, (*Returning)(unsafe.Pointer(pReturning)).FpReturnEL, pTab) - if !(int32((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed) != 0) { + if (*Parse)(unsafe.Pointer(pParse)).FnErr == 0 { libc.Xmemset(tls, bp+240, 0, uint64(unsafe.Sizeof(NameContext{}))) if (*Returning)(unsafe.Pointer(pReturning)).FnRetCol == 0 { (*Returning)(unsafe.Pointer(pReturning)).FnRetCol = (*ExprList)(unsafe.Pointer(pNew)).FnExpr @@ -138877,7 +138913,7 @@ if v != 0 { if (*Trigger)(unsafe.Pointer(pTrigger)).FzName != 0 { Xsqlite3VdbeChangeP4(tls, v, -1, - Xsqlite3MPrintf(tls, db, ts+21847, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) + Xsqlite3MPrintf(tls, db, ts+21894, libc.VaList(bp, (*Trigger)(unsafe.Pointer(pTrigger)).FzName)), -6) } if (*Trigger)(unsafe.Pointer(pTrigger)).FpWhen != 0 { @@ -139470,7 +139506,7 @@ } Xsqlite3ErrorMsg(tls, pParse, - ts+21861, + ts+21908, libc.VaList(bp, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*24)).FzCnName)) goto update_cleanup __27: @@ -139502,7 +139538,7 @@ iRowidExpr = i goto __30 __29: - Xsqlite3ErrorMsg(tls, pParse, ts+21897, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) + Xsqlite3ErrorMsg(tls, pParse, ts+21944, libc.VaList(bp+8, (*ExprList_item)(unsafe.Pointer(pChanges+8+uintptr(i)*32)).FzEName)) (*Parse)(unsafe.Pointer(pParse)).FcheckSchema = U8(1) goto update_cleanup __30: @@ -139828,7 +139864,12 @@ goto __77 __76: flags = WHERE_ONEPASS_DESIRED - if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && !(pTrigger != 0) && !(hasFK != 0) && !(chngKey != 0) && !(*(*int32)(unsafe.Pointer(bp + 104)) != 0)) { + if !(!(int32((*Parse)(unsafe.Pointer(pParse)).Fnested) != 0) && + !(pTrigger != 0) && + !(hasFK != 0) && + !(chngKey != 0) && + !(*(*int32)(unsafe.Pointer(bp + 104)) != 0) && + (*NameContext)(unsafe.Pointer(bp+40)).FncFlags&NC_Subquery == 0) { goto __78 } flags = flags | WHERE_ONEPASS_MULTIROW @@ -140382,7 +140423,7 @@ if !(regRowCount != 0) { goto __169 } - Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21916) + Xsqlite3CodeChangeCount(tls, v, regRowCount, ts+21963) __169: ; update_cleanup: @@ -140688,10 +140729,10 @@ if nClause == 0 && (*Upsert)(unsafe.Pointer(pUpsert)).FpNextUpsert == uintptr(0) { *(*int8)(unsafe.Pointer(bp + 216)) = int8(0) } else { - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21929, libc.VaList(bp, nClause+1)) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([16]int8{})), bp+216, ts+21976, libc.VaList(bp, nClause+1)) } Xsqlite3ErrorMsg(tls, pParse, - ts+21933, libc.VaList(bp+8, bp+216)) + ts+21980, libc.VaList(bp+8, bp+216)) return SQLITE_ERROR } @@ -140814,7 +140855,7 @@ var zSubSql uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) if zSubSql != 0 && - (libc.Xstrncmp(tls, zSubSql, ts+22006, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+22010, uint64(3)) == 0) { + (libc.Xstrncmp(tls, zSubSql, ts+22053, uint64(3)) == 0 || libc.Xstrncmp(tls, zSubSql, ts+22057, uint64(3)) == 0) { rc = execSql(tls, db, pzErrMsg, zSubSql) if rc != SQLITE_OK { break @@ -140962,14 +141003,14 @@ if !!(int32((*Sqlite3)(unsafe.Pointer(db)).FautoCommit) != 0) { goto __1 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22014) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22061) return SQLITE_ERROR __1: ; if !((*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive > 1) { goto __2 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22054) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22101) return SQLITE_ERROR __2: ; @@ -140980,7 +141021,7 @@ if !(Xsqlite3_value_type(tls, pOut) != SQLITE_TEXT) { goto __5 } - Xsqlite3SetString(tls, pzErrMsg, db, ts+22097) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22144) return SQLITE_ERROR __5: ; @@ -141008,7 +141049,7 @@ isMemDb = Xsqlite3PagerIsMemdb(tls, Xsqlite3BtreePager(tls, pMain)) nDb = (*Sqlite3)(unsafe.Pointer(db)).FnDb - rc = execSqlF(tls, db, pzErrMsg, ts+22115, libc.VaList(bp, zOut)) + rc = execSqlF(tls, db, pzErrMsg, ts+22162, libc.VaList(bp, zOut)) (*Sqlite3)(unsafe.Pointer(db)).FopenFlags = saved_openFlags if !(rc != SQLITE_OK) { goto __6 @@ -141028,7 +141069,7 @@ goto __8 } rc = SQLITE_ERROR - Xsqlite3SetString(tls, pzErrMsg, db, ts+22138) + Xsqlite3SetString(tls, pzErrMsg, db, ts+22185) goto end_of_vacuum __8: ; @@ -141088,7 +141129,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(nDb) rc = execSqlF(tls, db, pzErrMsg, - ts+22165, + ts+22212, libc.VaList(bp+8, zDbMain)) if !(rc != SQLITE_OK) { goto __13 @@ -141097,7 +141138,7 @@ __13: ; rc = execSqlF(tls, db, pzErrMsg, - ts+22273, + ts+22320, libc.VaList(bp+16, zDbMain)) if !(rc != SQLITE_OK) { goto __14 @@ -141108,7 +141149,7 @@ (*Sqlite3)(unsafe.Pointer(db)).Finit.FiDb = U8(0) rc = execSqlF(tls, db, pzErrMsg, - ts+22327, + ts+22374, libc.VaList(bp+24, zDbMain)) *(*U32)(unsafe.Pointer(db + 44)) &= libc.Uint32FromInt32(libc.CplInt32(DBFLAG_Vacuum)) @@ -141119,7 +141160,7 @@ __15: ; rc = execSqlF(tls, db, pzErrMsg, - ts+22478, + ts+22525, libc.VaList(bp+32, zDbMain)) if !(rc != 0) { goto __16 @@ -141548,11 +141589,11 @@ if pEnd != 0 { (*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fn = uint32(int32((int64((*Token)(unsafe.Pointer(pEnd)).Fz)-int64((*Parse)(unsafe.Pointer(pParse)).FsNameToken.Fz))/1)) + (*Token)(unsafe.Pointer(pEnd)).Fn } - zStmt = Xsqlite3MPrintf(tls, db, ts+22608, libc.VaList(bp, pParse+272)) + zStmt = Xsqlite3MPrintf(tls, db, ts+22655, libc.VaList(bp, pParse+272)) iDb = Xsqlite3SchemaToIndex(tls, db, (*Table)(unsafe.Pointer(pTab)).FpSchema) Xsqlite3NestedParse(tls, pParse, - ts+22632, + ts+22679, libc.VaList(bp+8, (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb+uintptr(iDb)*32)).FzDbSName, (*Table)(unsafe.Pointer(pTab)).FzName, (*Table)(unsafe.Pointer(pTab)).FzName, @@ -141562,7 +141603,7 @@ Xsqlite3ChangeCookie(tls, pParse, iDb) Xsqlite3VdbeAddOp0(tls, v, OP_Expire) - zWhere = Xsqlite3MPrintf(tls, db, ts+22731, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) + zWhere = Xsqlite3MPrintf(tls, db, ts+22778, libc.VaList(bp+48, (*Table)(unsafe.Pointer(pTab)).FzName, zStmt)) Xsqlite3VdbeAddParseSchemaOp(tls, v, iDb, zWhere, uint16(0)) Xsqlite3DbFree(tls, db, zStmt) @@ -141623,7 +141664,7 @@ for pCtx = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx; pCtx != 0; pCtx = (*VtabCtx)(unsafe.Pointer(pCtx)).FpPrior { if (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab == pTab { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, - ts+22750, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) + ts+22797, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName)) return SQLITE_LOCKED } } @@ -141651,9 +141692,11 @@ (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx (*VtabCtx)(unsafe.Pointer(bp + 32)).FbDeclared = 0 (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = bp + 32 + (*Table)(unsafe.Pointer(pTab)).FnTabRef++ rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xConstruct})).f(tls, db, (*Module)(unsafe.Pointer(pMod)).FpAux, nArg, azArg, pVTable+16, bp+64) + Xsqlite3DeleteTable(tls, db, pTab) (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx = (*VtabCtx)(unsafe.Pointer(bp + 32)).FpPrior if rc == SQLITE_NOMEM { Xsqlite3OomFault(tls, db) @@ -141661,7 +141704,7 @@ if SQLITE_OK != rc { if *(*uintptr)(unsafe.Pointer(bp + 64)) == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22792, libc.VaList(bp+8, zModuleName)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22839, libc.VaList(bp+8, zModuleName)) } else { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+4493, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) @@ -141673,7 +141716,7 @@ (*Module)(unsafe.Pointer(pMod)).FnRefModule++ (*VTable)(unsafe.Pointer(pVTable)).FnRef = 1 if (*VtabCtx)(unsafe.Pointer(bp+32)).FbDeclared == 0 { - var zFormat uintptr = ts + 22822 + var zFormat uintptr = ts + 22869 *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, zFormat, libc.VaList(bp+24, (*Table)(unsafe.Pointer(pTab)).FzName)) Xsqlite3VtabUnlock(tls, pVTable) rc = SQLITE_ERROR @@ -141747,7 +141790,7 @@ if !(pMod != 0) { var zModule uintptr = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pTab + 64 + 8)))) - Xsqlite3ErrorMsg(tls, pParse, ts+22868, libc.VaList(bp, zModule)) + Xsqlite3ErrorMsg(tls, pParse, ts+22915, libc.VaList(bp, zModule)) rc = SQLITE_ERROR } else { *(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) @@ -141805,7 +141848,7 @@ pMod = Xsqlite3HashFind(tls, db+576, zMod) if pMod == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate == uintptr(0) || (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxDestroy == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22868, libc.VaList(bp, zMod)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3MPrintf(tls, db, ts+22915, libc.VaList(bp, zMod)) rc = SQLITE_ERROR } else { rc = vtabCallConstructor(tls, db, pTab, pMod, (*Sqlite3_module)(unsafe.Pointer((*Module)(unsafe.Pointer(pMod)).FpModule)).FxCreate, pzErr) @@ -141839,7 +141882,7 @@ if !(pCtx != 0) || (*VtabCtx)(unsafe.Pointer(pCtx)).FbDeclared != 0 { Xsqlite3Error(tls, db, SQLITE_MISUSE) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) - return Xsqlite3MisuseError(tls, 151030) + return Xsqlite3MisuseError(tls, 151102) } pTab = (*VtabCtx)(unsafe.Pointer(pCtx)).FpTab @@ -142292,7 +142335,7 @@ Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) p = (*Sqlite3)(unsafe.Pointer(db)).FpVtabCtx if !(p != 0) { - rc = Xsqlite3MisuseError(tls, 151521) + rc = Xsqlite3MisuseError(tls, 151593) } else { ap = va switch op { @@ -142319,7 +142362,7 @@ fallthrough default: { - rc = Xsqlite3MisuseError(tls, 151539) + rc = Xsqlite3MisuseError(tls, 151611) break } @@ -142550,7 +142593,7 @@ func explainIndexColumnName(tls *libc.TLS, pIdx uintptr, i int32) uintptr { i = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) if i == -2 { - return ts + 22887 + return ts + 22934 } if i == -1 { return ts + 17625 @@ -142562,11 +142605,11 @@ var i int32 if bAnd != 0 { - Xsqlite3_str_append(tls, pStr, ts+22894, 5) + Xsqlite3_str_append(tls, pStr, ts+22941, 5) } if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+22900, 1) + Xsqlite3_str_append(tls, pStr, ts+22947, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -142581,7 +142624,7 @@ Xsqlite3_str_append(tls, pStr, zOp, 1) if nTerm > 1 { - Xsqlite3_str_append(tls, pStr, ts+22900, 1) + Xsqlite3_str_append(tls, pStr, ts+22947, 1) } for i = 0; i < nTerm; i++ { if i != 0 { @@ -142607,27 +142650,27 @@ if int32(nEq) == 0 && (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) == U32(0) { return } - Xsqlite3_str_append(tls, pStr, ts+22902, 2) + Xsqlite3_str_append(tls, pStr, ts+22949, 2) for i = 0; i < int32(nEq); i++ { var z uintptr = explainIndexColumnName(tls, pIndex, i) if i != 0 { - Xsqlite3_str_append(tls, pStr, ts+22894, 5) + Xsqlite3_str_append(tls, pStr, ts+22941, 5) } Xsqlite3_str_appendf(tls, pStr, func() uintptr { if i >= int32(nSkip) { - return ts + 22905 + return ts + 22952 } - return ts + 22910 + return ts + 22957 }(), libc.VaList(bp, z)) } j = i if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_BTM_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22918) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 2))), j, i, ts+22965) i = 1 } if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_TOP_LIMIT) != 0 { - explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22920) + explainAppendTerm(tls, pStr, pIndex, int32(*(*U16)(unsafe.Pointer(pLoop + 24 + 4))), j, i, ts+22967) } Xsqlite3_str_append(tls, pStr, ts+6309, 1) } @@ -142670,11 +142713,11 @@ Xsqlite3StrAccumInit(tls, bp+64, db, bp+96, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 64)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+64, ts+22922, libc.VaList(bp, func() uintptr { + Xsqlite3_str_appendf(tls, bp+64, ts+22969, libc.VaList(bp, func() uintptr { if isSearch != 0 { - return ts + 22928 + return ts + 22975 } - return ts + 22935 + return ts + 22982 }(), pItem)) if flags&U32(WHERE_IPK|WHERE_VIRTUALTABLE) == U32(0) { var zFmt uintptr = uintptr(0) @@ -142687,40 +142730,40 @@ zFmt = ts + 12328 } } else if flags&U32(WHERE_PARTIALIDX) != 0 { - zFmt = ts + 22940 + zFmt = ts + 22987 } else if flags&U32(WHERE_AUTO_INDEX) != 0 { - zFmt = ts + 22973 + zFmt = ts + 23020 } else if flags&U32(WHERE_IDX_ONLY) != 0 { - zFmt = ts + 22998 + zFmt = ts + 23045 } else { - zFmt = ts + 23016 + zFmt = ts + 23063 } if zFmt != 0 { - Xsqlite3_str_append(tls, bp+64, ts+23025, 7) + Xsqlite3_str_append(tls, bp+64, ts+23072, 7) Xsqlite3_str_appendf(tls, bp+64, zFmt, libc.VaList(bp+16, (*Index)(unsafe.Pointer(pIdx)).FzName)) explainIndexRange(tls, bp+64, pLoop) } } else if flags&U32(WHERE_IPK) != U32(0) && flags&U32(WHERE_CONSTRAINT) != U32(0) { var cRangeOp int8 var zRowid uintptr = ts + 17625 - Xsqlite3_str_appendf(tls, bp+64, ts+23033, libc.VaList(bp+24, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+23080, libc.VaList(bp+24, zRowid)) if flags&U32(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) != 0 { cRangeOp = int8('=') } else if flags&U32(WHERE_BOTH_LIMIT) == U32(WHERE_BOTH_LIMIT) { - Xsqlite3_str_appendf(tls, bp+64, ts+23064, libc.VaList(bp+32, zRowid)) + Xsqlite3_str_appendf(tls, bp+64, ts+23111, libc.VaList(bp+32, zRowid)) cRangeOp = int8('<') } else if flags&U32(WHERE_BTM_LIMIT) != 0 { cRangeOp = int8('>') } else { cRangeOp = int8('<') } - Xsqlite3_str_appendf(tls, bp+64, ts+23074, libc.VaList(bp+40, int32(cRangeOp))) + Xsqlite3_str_appendf(tls, bp+64, ts+23121, libc.VaList(bp+40, int32(cRangeOp))) } else if flags&U32(WHERE_VIRTUALTABLE) != U32(0) { - Xsqlite3_str_appendf(tls, bp+64, ts+23079, + Xsqlite3_str_appendf(tls, bp+64, ts+23126, libc.VaList(bp+48, *(*int32)(unsafe.Pointer(pLoop + 24)), *(*uintptr)(unsafe.Pointer(pLoop + 24 + 16)))) } if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&JT_LEFT != 0 { - Xsqlite3_str_appendf(tls, bp+64, ts+23106, 0) + Xsqlite3_str_appendf(tls, bp+64, ts+23153, 0) } zMsg = Xsqlite3StrAccumFinish(tls, bp+64) @@ -142752,22 +142795,22 @@ Xsqlite3StrAccumInit(tls, bp+24, db, bp+56, int32(unsafe.Sizeof([100]int8{})), SQLITE_MAX_LENGTH) (*StrAccum)(unsafe.Pointer(bp + 24)).FprintfFlags = U8(SQLITE_PRINTF_INTERNAL) - Xsqlite3_str_appendf(tls, bp+24, ts+23117, libc.VaList(bp, pItem)) + Xsqlite3_str_appendf(tls, bp+24, ts+23164, libc.VaList(bp, pItem)) pLoop = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop if (*WhereLoop)(unsafe.Pointer(pLoop)).FwsFlags&U32(WHERE_IPK) != 0 { var pTab uintptr = (*SrcItem)(unsafe.Pointer(pItem)).FpTab if int32((*Table)(unsafe.Pointer(pTab)).FiPKey) >= 0 { - Xsqlite3_str_appendf(tls, bp+24, ts+22905, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) + Xsqlite3_str_appendf(tls, bp+24, ts+22952, libc.VaList(bp+8, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTab)).FaCol+uintptr((*Table)(unsafe.Pointer(pTab)).FiPKey)*24)).FzCnName)) } else { - Xsqlite3_str_appendf(tls, bp+24, ts+23138, 0) + Xsqlite3_str_appendf(tls, bp+24, ts+23185, 0) } } else { for i = int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip); i < int32(*(*U16)(unsafe.Pointer(pLoop + 24))); i++ { var z uintptr = explainIndexColumnName(tls, *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)), i) if i > int32((*WhereLoop)(unsafe.Pointer(pLoop)).FnSkip) { - Xsqlite3_str_append(tls, bp+24, ts+22894, 5) + Xsqlite3_str_append(tls, bp+24, ts+22941, 5) } - Xsqlite3_str_appendf(tls, bp+24, ts+22905, libc.VaList(bp+16, z)) + Xsqlite3_str_appendf(tls, bp+24, ts+22952, libc.VaList(bp+16, z)) } } Xsqlite3_str_append(tls, bp+24, ts+6309, 1) @@ -144364,7 +144407,7 @@ ; __126: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23146, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23193, 0) ii = 0 __135: if !(ii < (*WhereClause)(unsafe.Pointer(pOrWc)).FnTerm) { @@ -144392,7 +144435,7 @@ pOrExpr = pAndExpr __140: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23161, libc.VaList(bp, ii+1)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23208, libc.VaList(bp, ii+1)) pSubWInfo = Xsqlite3WhereBegin(tls, pParse, pOrTab, pOrExpr, uintptr(0), uintptr(0), uintptr(0), uint16(WHERE_OR_SUBCLAUSE), iCovCur) @@ -144910,7 +144953,7 @@ var mAll Bitmask = uint64(0) var k int32 - Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23170, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) + Xsqlite3VdbeExplain(tls, pParse, uint8(1), ts+23217, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pTabItem)).FpTab)).FzName)) for k = 0; k < iLevel; k++ { var iIdxCur int32 @@ -145271,7 +145314,7 @@ {FzOp: ts + 17474, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_MATCH)}, {FzOp: ts + 16804, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_GLOB)}, {FzOp: ts + 16324, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_LIKE)}, - {FzOp: ts + 23184, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, + {FzOp: ts + 23231, FeOp2: uint8(SQLITE_INDEX_CONSTRAINT_REGEXP)}, } func transferJoinMarkings(tls *libc.TLS, pDerived uintptr, pBase uintptr) { @@ -145761,12 +145804,12 @@ extraRight = x - uint64(1) if prereqAll>>1 >= x { - Xsqlite3ErrorMsg(tls, pParse, ts+23191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23238, 0) return } } else if prereqAll>>1 >= x { if (*SrcList)(unsafe.Pointer(pSrc)).FnSrc > 0 && int32((*SrcItem)(unsafe.Pointer(pSrc+8)).Ffg.Fjointype)&JT_LTORJ != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23191, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23238, 0) return } *(*U32)(unsafe.Pointer(pExpr + 4)) &= libc.Uint32FromInt32(libc.CplInt32(EP_InnerON)) @@ -145845,7 +145888,7 @@ !((*Expr)(unsafe.Pointer(pExpr)).Fflags&U32(EP_OuterON) != U32(0)) && 0 == Xsqlite3ExprCanBeNull(tls, pLeft) { (*Expr)(unsafe.Pointer(pExpr)).Fop = U8(TK_TRUEFALSE) - *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 8121 + *(*uintptr)(unsafe.Pointer(pExpr + 8)) = ts + 9051 *(*U32)(unsafe.Pointer(pExpr + 4)) |= U32(EP_IsFalse) (*WhereTerm)(unsafe.Pointer(pTerm)).FprereqAll = uint64(0) (*WhereTerm)(unsafe.Pointer(pTerm)).FeOperator = U16(0) @@ -145939,7 +145982,7 @@ } zCollSeqName = func() uintptr { if *(*int32)(unsafe.Pointer(bp + 20)) != 0 { - return ts + 23232 + return ts + 23279 } return uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)) }() @@ -146315,7 +146358,7 @@ k++ } if k >= int32((*Table)(unsafe.Pointer(pTab)).FnCol) { - Xsqlite3ErrorMsg(tls, pParse, ts+23239, + Xsqlite3ErrorMsg(tls, pParse, ts+23286, libc.VaList(bp, (*Table)(unsafe.Pointer(pTab)).FzName, j)) return } @@ -146331,7 +146374,7 @@ pRhs = Xsqlite3PExpr(tls, pParse, TK_UPLUS, Xsqlite3ExprDup(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, (*ExprList_item)(unsafe.Pointer(pArgs+8+uintptr(j)*32)).FpExpr, 0), uintptr(0)) pTerm = Xsqlite3PExpr(tls, pParse, TK_EQ, pColRef, pRhs) - if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ) != 0 { + if int32((*SrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(JT_LEFT|JT_LTORJ|JT_RIGHT) != 0 { joinType = U32(EP_OuterON) } else { joinType = U32(EP_InnerON) @@ -147049,7 +147092,7 @@ goto __6 } Xsqlite3_log(tls, SQLITE_WARNING|int32(1)<<8, - ts+23275, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, + ts+23322, libc.VaList(bp, (*Table)(unsafe.Pointer(pTable)).FzName, (*Column)(unsafe.Pointer((*Table)(unsafe.Pointer(pTable)).FaCol+uintptr(iCol)*24)).FzCnName)) sentWarning = U8(1) __6: @@ -147120,7 +147163,7 @@ __14: ; *(*uintptr)(unsafe.Pointer(pLoop + 24 + 8)) = pIdx - (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 23301 + (*Index)(unsafe.Pointer(pIdx)).FzName = ts + 23348 (*Index)(unsafe.Pointer(pIdx)).FpTable = pTable n = 0 idxCols = uint64(0) @@ -147294,6 +147337,10 @@ var v uintptr = (*Parse)(unsafe.Pointer(pParse)).FpVdbe var pLoop uintptr = (*WhereLevel)(unsafe.Pointer(pLevel)).FpWLoop var iCur int32 + var saved_pIdxEpr uintptr + + saved_pIdxEpr = (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = uintptr(0) addrOnce = Xsqlite3VdbeAddOp0(tls, v, OP_Once) for __ccgo := true; __ccgo; __ccgo = iLevel < int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnLevel) { @@ -147337,9 +147384,7 @@ var r1 int32 = Xsqlite3GetTempRange(tls, pParse, n) var jj int32 for jj = 0; jj < n; jj++ { - var iCol int32 = int32(*(*I16)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(jj)*2))) - - Xsqlite3ExprCodeGetColumnOfTable(tls, v, (*Index)(unsafe.Pointer(pIdx)).FpTable, iCur, iCol, r1+jj) + Xsqlite3ExprCodeLoadIndexColumn(tls, pParse, pIdx, iCur, jj, r1+jj) } Xsqlite3VdbeAddOp4Int(tls, v, OP_FilterAdd, (*WhereLevel)(unsafe.Pointer(pLevel)).FregFilter, 0, r1, n) Xsqlite3ReleaseTempRange(tls, pParse, r1, n) @@ -147373,6 +147418,7 @@ } } Xsqlite3VdbeJumpHere(tls, v, addrOnce) + (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = saved_pIdxEpr } func allocateIndexInfo(tls *libc.TLS, pWInfo uintptr, pWC uintptr, mUnusable Bitmask, pSrc uintptr, pmNoOmit uintptr) uintptr { @@ -147631,11 +147677,16 @@ _ = pParse + if !((*Table)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FpTable)).FtabFlags&U32(TF_WithoutRowid) == U32(0)) && int32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x3>>0) == SQLITE_IDXTYPE_PRIMARYKEY { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnKeyCol) + } else { + nField = int32((*Index)(unsafe.Pointer(pIdx)).FnColumn) + } nField = func() int32 { - if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < (*Index)(unsafe.Pointer(pIdx)).FnSample { + if int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) < nField { return int32((*UnpackedRecord)(unsafe.Pointer(pRec)).FnField) } - return (*Index)(unsafe.Pointer(pIdx)).FnSample + return nField }() iCol = 0 iSample = (*Index)(unsafe.Pointer(pIdx)).FnSample * nField @@ -149216,7 +149267,7 @@ j >= (*WhereClause)(unsafe.Pointer(pWC)).FnTerm || *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(iTerm)*8)) != uintptr(0) || int32((*sqlite3_index_constraint)(unsafe.Pointer(pIdxCons)).Fusable) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23312, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23359, libc.VaList(bp, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -149274,7 +149325,7 @@ (*WhereLoop)(unsafe.Pointer(pNew)).FnLTerm = U16(mxTerm + 1) for i = 0; i <= mxTerm; i++ { if *(*uintptr)(unsafe.Pointer((*WhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+23312, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23359, libc.VaList(bp+8, (*Table)(unsafe.Pointer((*SrcItem)(unsafe.Pointer(pSrc)).FpTab)).FzName)) return SQLITE_ERROR } @@ -149672,7 +149723,7 @@ mPrior = mPrior | (*WhereLoop)(unsafe.Pointer(pNew)).FmaskSelf if rc != 0 || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { if rc == SQLITE_DONE { - Xsqlite3_log(tls, SQLITE_WARNING, ts+23338, 0) + Xsqlite3_log(tls, SQLITE_WARNING, ts+23385, 0) rc = SQLITE_OK } else { goto __3 @@ -150279,7 +150330,7 @@ } if nFrom == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+23373, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23420, 0) Xsqlite3DbFreeNN(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pSpace) return SQLITE_ERROR } @@ -150314,6 +150365,10 @@ if int32((*WherePath)(unsafe.Pointer(pFrom)).FisOrdered) == (*ExprList)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpOrderBy)).FnExpr { (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_ORDERED) } + if (*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy != 0 && + int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) > (*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr { + (*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat = I8((*ExprList)(unsafe.Pointer((*Select)(unsafe.Pointer((*WhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)).FpOrderBy)).FnExpr) + } } else { (*WhereInfo)(unsafe.Pointer(pWInfo)).FrevMask = (*WherePath)(unsafe.Pointer(pFrom)).FrevLoop if int32((*WhereInfo)(unsafe.Pointer(pWInfo)).FnOBSat) <= 0 { @@ -150608,6 +150663,9 @@ (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur (*IndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i (*IndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = U8(bMaybeNullRow) + if Xsqlite3IndexAffinityStr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 { + (*IndexedExpr)(unsafe.Pointer(p)).Faff = U8(*(*int8)(unsafe.Pointer((*Index)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i)))) + } (*Parse)(unsafe.Pointer(pParse)).FpIdxEpr = p if (*IndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) { Xsqlite3ParserAddCleanup(tls, pParse, *(*uintptr)(unsafe.Pointer(&struct { @@ -150760,7 +150818,7 @@ if !((*SrcList)(unsafe.Pointer(pTabList)).FnSrc > int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8))) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+23391, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) + Xsqlite3ErrorMsg(tls, pParse, ts+23438, libc.VaList(bp, int32(uint64(unsafe.Sizeof(Bitmask(0)))*uint64(8)))) return uintptr(0) __2: ; @@ -150824,7 +150882,7 @@ (*WhereInfo)(unsafe.Pointer(pWInfo)).FeDistinct = U8(WHERE_DISTINCT_UNIQUE) __7: ; - Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+23419, 0) + Xsqlite3VdbeExplain(tls, pParse, uint8(0), ts+23466, 0) goto __5 __4: ii = 0 @@ -151706,7 +151764,7 @@ error_out: Xsqlite3_result_error(tls, - pCtx, ts+23437, -1) + pCtx, ts+23484, -1) } func nth_valueFinalizeFunc(tls *libc.TLS, pCtx uintptr) { @@ -151839,7 +151897,7 @@ (*NtileCtx)(unsafe.Pointer(p)).FnParam = Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer(apArg))) if (*NtileCtx)(unsafe.Pointer(p)).FnParam <= int64(0) { Xsqlite3_result_error(tls, - pCtx, ts+23493, -1) + pCtx, ts+23540, -1) } } (*NtileCtx)(unsafe.Pointer(p)).FnTotal++ @@ -151929,17 +151987,17 @@ } } -var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 23538)) -var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 23549)) -var rankName = *(*[5]int8)(unsafe.Pointer(ts + 23560)) -var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 23565)) -var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 23578)) -var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 23588)) -var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 23594)) -var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 23605)) -var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 23615)) -var leadName = *(*[5]int8)(unsafe.Pointer(ts + 23627)) -var lagName = *(*[4]int8)(unsafe.Pointer(ts + 23632)) +var row_numberName = *(*[11]int8)(unsafe.Pointer(ts + 23585)) +var dense_rankName = *(*[11]int8)(unsafe.Pointer(ts + 23596)) +var rankName = *(*[5]int8)(unsafe.Pointer(ts + 23607)) +var percent_rankName = *(*[13]int8)(unsafe.Pointer(ts + 23612)) +var cume_distName = *(*[10]int8)(unsafe.Pointer(ts + 23625)) +var ntileName = *(*[6]int8)(unsafe.Pointer(ts + 23635)) +var last_valueName = *(*[11]int8)(unsafe.Pointer(ts + 23641)) +var nth_valueName = *(*[10]int8)(unsafe.Pointer(ts + 23652)) +var first_valueName = *(*[12]int8)(unsafe.Pointer(ts + 23662)) +var leadName = *(*[5]int8)(unsafe.Pointer(ts + 23674)) +var lagName = *(*[4]int8)(unsafe.Pointer(ts + 23679)) func noopStepFunc(tls *libc.TLS, p uintptr, n int32, a uintptr) { _ = p @@ -151985,7 +152043,7 @@ } } if p == uintptr(0) { - Xsqlite3ErrorMsg(tls, pParse, ts+23636, libc.VaList(bp, zName)) + Xsqlite3ErrorMsg(tls, pParse, ts+23683, libc.VaList(bp, zName)) } return p } @@ -152029,12 +152087,12 @@ ((*Window)(unsafe.Pointer(pWin)).FpStart != 0 || (*Window)(unsafe.Pointer(pWin)).FpEnd != 0) && ((*Window)(unsafe.Pointer(pWin)).FpOrderBy == uintptr(0) || (*ExprList)(unsafe.Pointer((*Window)(unsafe.Pointer(pWin)).FpOrderBy)).FnExpr != 1) { Xsqlite3ErrorMsg(tls, pParse, - ts+23655, 0) + ts+23702, 0) } else if (*FuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&U32(SQLITE_FUNC_WINDOW) != 0 { var db uintptr = (*Parse)(unsafe.Pointer(pParse)).Fdb if (*Window)(unsafe.Pointer(pWin)).FpFilter != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+23726, 0) + ts+23773, 0) } else { *(*[8]WindowUpdate)(unsafe.Pointer(bp)) = [8]WindowUpdate{ {FzFunc: uintptr(unsafe.Pointer(&row_numberName)), FeFrmType: TK_ROWS, FeStart: TK_UNBOUNDED, FeEnd: TK_CURRENT}, @@ -152263,7 +152321,7 @@ if int32((*Expr)(unsafe.Pointer(pExpr)).Fop) == TK_AGG_FUNCTION && (*Expr)(unsafe.Pointer(pExpr)).FpAggInfo == uintptr(0) { Xsqlite3ErrorMsg(tls, (*Walker)(unsafe.Pointer(pWalker)).FpParse, - ts+23789, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) + ts+23836, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(pExpr + 8)))) } return WRC_Continue } @@ -152379,7 +152437,7 @@ if *(*uintptr)(unsafe.Pointer(bp + 48)) == uintptr(0) { *(*uintptr)(unsafe.Pointer(bp + 48)) = Xsqlite3ExprListAppend(tls, pParse, uintptr(0), - Xsqlite3Expr(tls, db, TK_INTEGER, ts+8882)) + Xsqlite3Expr(tls, db, TK_INTEGER, ts+8871)) } pSub = Xsqlite3SelectNew(tls, @@ -152494,7 +152552,7 @@ eStart == TK_FOLLOWING && (eEnd == TK_PRECEDING || eEnd == TK_CURRENT)) { goto __2 } - Xsqlite3ErrorMsg(tls, pParse, ts+23815, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+23862, 0) goto windowAllocErr __2: ; @@ -152559,15 +152617,15 @@ var zErr uintptr = uintptr(0) if (*Window)(unsafe.Pointer(pWin)).FpPartition != 0 { - zErr = ts + 23847 + zErr = ts + 23894 } else if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 && (*Window)(unsafe.Pointer(pWin)).FpOrderBy != 0 { - zErr = ts + 23864 + zErr = ts + 23911 } else if int32((*Window)(unsafe.Pointer(pExist)).FbImplicitFrame) == 0 { - zErr = ts + 23880 + zErr = ts + 23927 } if zErr != 0 { Xsqlite3ErrorMsg(tls, pParse, - ts+23900, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) + ts+23947, libc.VaList(bp, zErr, (*Window)(unsafe.Pointer(pWin)).FzBase)) } else { (*Window)(unsafe.Pointer(pWin)).FpPartition = Xsqlite3ExprListDup(tls, db, (*Window)(unsafe.Pointer(pExist)).FpPartition, 0) if (*Window)(unsafe.Pointer(pExist)).FpOrderBy != 0 { @@ -152588,7 +152646,7 @@ (*Window)(unsafe.Pointer(pWin)).FpOwner = p if (*Expr)(unsafe.Pointer(p)).Fflags&U32(EP_Distinct) != 0 && int32((*Window)(unsafe.Pointer(pWin)).FeFrmType) != TK_FILTER { Xsqlite3ErrorMsg(tls, pParse, - ts+23933, 0) + ts+23980, 0) } } else { Xsqlite3WindowDelete(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, pWin) @@ -152744,11 +152802,11 @@ } var azErr = [5]uintptr{ - ts + 23980, - ts + 24033, - ts + 23437, - ts + 24084, - ts + 24136, + ts + 24027, + ts + 24080, + ts + 23484, + ts + 24131, + ts + 24183, } var aOp1 = [5]int32{OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge} @@ -154143,19 +154201,19 @@ } cnt++ if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != 0 || (*Select)(unsafe.Pointer(pLoop)).FpLimit != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24186, + Xsqlite3ErrorMsg(tls, pParse, ts+24233, libc.VaList(bp, func() uintptr { if (*Select)(unsafe.Pointer(pLoop)).FpOrderBy != uintptr(0) { - return ts + 24228 + return ts + 24275 } - return ts + 24237 + return ts + 24284 }(), Xsqlite3SelectOpName(tls, int32((*Select)(unsafe.Pointer(pNext)).Fop)))) break } } if (*Select)(unsafe.Pointer(p)).FselFlags&U32(SF_MultiValue) == U32(0) && libc.AssignInt32(&mxSelect, *(*int32)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb + 136 + 4*4))) > 0 && cnt > mxSelect { - Xsqlite3ErrorMsg(tls, pParse, ts+24243, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24290, 0) } } @@ -154223,7 +154281,7 @@ var p uintptr = Xsqlite3ExprListAppend(tls, pParse, pPrior, uintptr(0)) if (hasCollate != 0 || sortOrder != -1) && int32((*Sqlite3)(unsafe.Pointer((*Parse)(unsafe.Pointer(pParse)).Fdb)).Finit.Fbusy) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24277, + Xsqlite3ErrorMsg(tls, pParse, ts+24324, libc.VaList(bp, (*Token)(unsafe.Pointer(pIdToken)).Fn, (*Token)(unsafe.Pointer(pIdToken)).Fz)) } Xsqlite3ExprListSetName(tls, pParse, p, pIdToken, 1) @@ -155320,7 +155378,7 @@ yy_pop_parser_stack(tls, yypParser) } - Xsqlite3ErrorMsg(tls, pParse, ts+24315, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24362, 0) (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse } @@ -156299,7 +156357,7 @@ *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(TF_WithoutRowid | TF_NoVisibleRowid) } else { *(*U32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+24337, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+24384, libc.VaList(bp, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } break @@ -156309,7 +156367,7 @@ *(*U32)(unsafe.Pointer(bp + 40)) = U32(TF_Strict) } else { *(*U32)(unsafe.Pointer(bp + 40)) = U32(0) - Xsqlite3ErrorMsg(tls, pParse, ts+24337, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) + Xsqlite3ErrorMsg(tls, pParse, ts+24384, libc.VaList(bp+16, (*Token)(unsafe.Pointer(yymsp+8)).Fn, (*Token)(unsafe.Pointer(yymsp+8)).Fz)) } } *(*U32)(unsafe.Pointer(yymsp + 8)) = *(*U32)(unsafe.Pointer(bp + 40)) @@ -157052,7 +157110,7 @@ case uint32(157): { Xsqlite3SrcListIndexedBy(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-5)*24 + 8)), yymsp+libc.UintptrFromInt32(-4)*24+8) - Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+24364) + Xsqlite3ExprListCheckLength(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)), ts+24411) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) != 0 { var pFromClause uintptr = *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-1)*24 + 8)) if (*SrcList)(unsafe.Pointer(pFromClause)).FnSrc > 1 { @@ -157216,7 +157274,7 @@ *(*Token)(unsafe.Pointer(bp + 128)) = *(*Token)(unsafe.Pointer(yymsp + 8)) if int32((*Parse)(unsafe.Pointer(pParse)).Fnested) == 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24373, libc.VaList(bp+32, bp+128)) + Xsqlite3ErrorMsg(tls, pParse, ts+24420, libc.VaList(bp+32, bp+128)) *(*uintptr)(unsafe.Pointer(yymsp + 8)) = uintptr(0) } else { *(*uintptr)(unsafe.Pointer(yymsp + 8)) = Xsqlite3PExpr(tls, pParse, TK_REGISTER, uintptr(0), uintptr(0)) @@ -157433,9 +157491,9 @@ Xsqlite3ExprUnmapAndDelete(tls, pParse, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) = Xsqlite3Expr(tls, (*Parse)(unsafe.Pointer(pParse)).Fdb, TK_STRING, func() uintptr { if *(*int32)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-3)*24 + 8)) != 0 { - return ts + 8116 + return ts + 9046 } - return ts + 8121 + return ts + 9051 }()) if *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8)) != 0 { Xsqlite3ExprIdToTrueFalse(tls, *(*uintptr)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-4)*24 + 8))) @@ -157719,19 +157777,19 @@ { *(*Token)(unsafe.Pointer(yymsp + libc.UintptrFromInt32(-2)*24 + 8)) = *(*Token)(unsafe.Pointer(yymsp + 8)) Xsqlite3ErrorMsg(tls, pParse, - ts+24397, 0) + ts+24444, 0) } break case uint32(271): { Xsqlite3ErrorMsg(tls, pParse, - ts+24492, 0) + ts+24539, 0) } break case uint32(272): { Xsqlite3ErrorMsg(tls, pParse, - ts+24576, 0) + ts+24623, 0) } break case uint32(273): @@ -158110,9 +158168,9 @@ _ = yymajor if *(*int8)(unsafe.Pointer((*Token)(unsafe.Pointer(bp + 8)).Fz)) != 0 { - Xsqlite3ErrorMsg(tls, pParse, ts+24373, libc.VaList(bp, bp+8)) + Xsqlite3ErrorMsg(tls, pParse, ts+24420, libc.VaList(bp, bp+8)) } else { - Xsqlite3ErrorMsg(tls, pParse, ts+24661, 0) + Xsqlite3ErrorMsg(tls, pParse, ts+24708, 0) } (*YyParser)(unsafe.Pointer(yypParser)).FpParse = pParse @@ -158880,7 +158938,7 @@ } else { (*Token)(unsafe.Pointer(bp + 2464)).Fz = zSql (*Token)(unsafe.Pointer(bp + 2464)).Fn = uint32(n) - Xsqlite3ErrorMsg(tls, pParse, ts+24678, libc.VaList(bp, bp+2464)) + Xsqlite3ErrorMsg(tls, pParse, ts+24725, libc.VaList(bp, bp+2464)) break } } @@ -158903,7 +158961,7 @@ if (*Parse)(unsafe.Pointer(pParse)).FzErrMsg == uintptr(0) { (*Parse)(unsafe.Pointer(pParse)).FzErrMsg = Xsqlite3MPrintf(tls, db, ts+4493, libc.VaList(bp+8, Xsqlite3ErrStr(tls, (*Parse)(unsafe.Pointer(pParse)).Frc))) } - Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+24703, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) + Xsqlite3_log(tls, (*Parse)(unsafe.Pointer(pParse)).Frc, ts+24750, libc.VaList(bp+16, (*Parse)(unsafe.Pointer(pParse)).FzErrMsg, (*Parse)(unsafe.Pointer(pParse)).FzTail)) nErr++ } (*Parse)(unsafe.Pointer(pParse)).FzTail = zSql @@ -159076,7 +159134,7 @@ fallthrough case 'C': { - if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+24714, 6) == 0 { + if nId == 6 && Xsqlite3_strnicmp(tls, zSql, ts+24761, 6) == 0 { token = U8(TkCREATE) } else { token = U8(TkOTHER) @@ -159089,11 +159147,11 @@ fallthrough case 'T': { - if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+21300, 7) == 0 { + if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+21347, 7) == 0 { token = U8(TkTRIGGER) - } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+24721, 4) == 0 { + } else if nId == 4 && Xsqlite3_strnicmp(tls, zSql, ts+24768, 4) == 0 { token = U8(TkTEMP) - } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+24726, 9) == 0 { + } else if nId == 9 && Xsqlite3_strnicmp(tls, zSql, ts+24773, 9) == 0 { token = U8(TkTEMP) } else { token = U8(TkOTHER) @@ -159106,9 +159164,9 @@ fallthrough case 'E': { - if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+24736, 3) == 0 { + if nId == 3 && Xsqlite3_strnicmp(tls, zSql, ts+24783, 3) == 0 { token = U8(TkEND) - } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+24740, 7) == 0 { + } else if nId == 7 && Xsqlite3_strnicmp(tls, zSql, ts+24787, 7) == 0 { token = U8(TkEXPLAIN) } else { token = U8(TkOTHER) @@ -159342,7 +159400,7 @@ var rc int32 = SQLITE_OK if Xsqlite3Config.FisInit != 0 { - return Xsqlite3MisuseError(tls, 174337) + return Xsqlite3MisuseError(tls, 174426) } ap = va @@ -159917,7 +159975,7 @@ return SQLITE_OK } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 175111) + return Xsqlite3MisuseError(tls, 175200) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if int32((*Sqlite3)(unsafe.Pointer(db)).FmTrace)&SQLITE_TRACE_CLOSE != 0 { @@ -159932,7 +159990,7 @@ if !(forceZombie != 0) && connectionIsBusy(tls, db) != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+24748, 0) + ts+24795, 0) Xsqlite3_mutex_leave(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) return SQLITE_BUSY } @@ -160123,23 +160181,23 @@ // Return a static string that describes the kind of error specified in the // argument. func Xsqlite3ErrStr(tls *libc.TLS, rc int32) uintptr { - var zErr uintptr = ts + 24816 + var zErr uintptr = ts + 24863 switch rc { case SQLITE_ABORT | int32(2)<<8: { - zErr = ts + 24830 + zErr = ts + 24877 break } case SQLITE_ROW: { - zErr = ts + 24852 + zErr = ts + 24899 break } case SQLITE_DONE: { - zErr = ts + 24874 + zErr = ts + 24921 break } @@ -160157,35 +160215,35 @@ } var aMsg = [29]uintptr{ - ts + 24897, - ts + 24910, + ts + 24944, + ts + 24957, uintptr(0), - ts + 24926, - ts + 24951, - ts + 24965, - ts + 24984, + ts + 24973, + ts + 24998, + ts + 25012, + ts + 25031, ts + 1480, - ts + 25009, - ts + 25046, - ts + 25058, - ts + 25073, - ts + 25106, - ts + 25124, - ts + 25149, - ts + 25178, + ts + 25056, + ts + 25093, + ts + 25105, + ts + 25120, + ts + 25153, + ts + 25171, + ts + 25196, + ts + 25225, uintptr(0), ts + 7190, ts + 6686, - ts + 25195, - ts + 25213, - ts + 25231, - uintptr(0), - ts + 25265, + ts + 25242, + ts + 25260, + ts + 25278, uintptr(0), - ts + 25286, ts + 25312, - ts + 25335, - ts + 25356, + uintptr(0), + ts + 25333, + ts + 25359, + ts + 25382, + ts + 25403, } func sqliteDefaultBusyCallback(tls *libc.TLS, ptr uintptr, count int32) int32 { @@ -160306,7 +160364,7 @@ libc.Bool32(xValue == uintptr(0)) != libc.Bool32(xInverse == uintptr(0)) || (nArg < -1 || nArg > SQLITE_MAX_FUNCTION_ARG) || 255 < Xsqlite3Strlen30(tls, zFunctionName) { - return Xsqlite3MisuseError(tls, 175758) + return Xsqlite3MisuseError(tls, 175847) } extraFlags = enc & (SQLITE_DETERMINISTIC | SQLITE_DIRECTONLY | SQLITE_SUBTYPE | SQLITE_INNOCUOUS) @@ -160351,7 +160409,7 @@ if p != 0 && (*FuncDef)(unsafe.Pointer(p)).FfuncFlags&U32(SQLITE_FUNC_ENCMASK) == U32(enc) && int32((*FuncDef)(unsafe.Pointer(p)).FnArg) == nArg { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+25372, 0) + ts+25419, 0) return SQLITE_BUSY } else { @@ -160468,7 +160526,7 @@ _ = NotUsed _ = NotUsed2 zErr = Xsqlite3_mprintf(tls, - ts+25435, libc.VaList(bp, zName)) + ts+25482, libc.VaList(bp, zName)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } @@ -160704,7 +160762,7 @@ } if iDb < 0 { rc = SQLITE_ERROR - Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+25486, libc.VaList(bp, zDb)) + Xsqlite3ErrorWithMsg(tls, db, SQLITE_ERROR, ts+25533, libc.VaList(bp, zDb)) } else { (*Sqlite3)(unsafe.Pointer(db)).FbusyHandler.FnBusy = 0 rc = Xsqlite3Checkpoint(tls, db, iDb, eMode, pnLog, pnCkpt) @@ -160797,7 +160855,7 @@ return Xsqlite3ErrStr(tls, SQLITE_NOMEM) } if !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176503)) + return Xsqlite3ErrStr(tls, Xsqlite3MisuseError(tls, 176592)) } Xsqlite3_mutex_enter(tls, (*Sqlite3)(unsafe.Pointer(db)).Fmutex) if (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { @@ -160867,7 +160925,7 @@ // passed to this function, we assume a malloc() failed during sqlite3_open(). func Xsqlite3_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176582) + return Xsqlite3MisuseError(tls, 176671) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -160877,7 +160935,7 @@ func Xsqlite3_extended_errcode(tls *libc.TLS, db uintptr) int32 { if db != 0 && !(Xsqlite3SafetyCheckSickOrOk(tls, db) != 0) { - return Xsqlite3MisuseError(tls, 176591) + return Xsqlite3MisuseError(tls, 176680) } if !(db != 0) || (*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 { return SQLITE_NOMEM @@ -160909,14 +160967,14 @@ enc2 = SQLITE_UTF16LE } if enc2 < SQLITE_UTF8 || enc2 > SQLITE_UTF16BE { - return Xsqlite3MisuseError(tls, 176639) + return Xsqlite3MisuseError(tls, 176728) } pColl = Xsqlite3FindCollSeq(tls, db, U8(enc2), zName, 0) if pColl != 0 && (*CollSeq)(unsafe.Pointer(pColl)).FxCmp != 0 { if (*Sqlite3)(unsafe.Pointer(db)).FnVdbeActive != 0 { Xsqlite3ErrorWithMsg(tls, db, SQLITE_BUSY, - ts+25507, 0) + ts+25554, 0) return SQLITE_BUSY } Xsqlite3ExpirePreparedStatements(tls, db, 0) @@ -161046,7 +161104,7 @@ if !((flags&uint32(SQLITE_OPEN_URI) != 0 || Xsqlite3Config.FbOpenUri != 0) && - nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+25575, uint64(5)) == 0) { + nUri >= 5 && libc.Xmemcmp(tls, zUri, ts+25622, uint64(5)) == 0) { goto __1 } iOut = 0 @@ -161091,10 +161149,10 @@ goto __8 __9: ; - if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+25581, zUri+7, uint64(9)) != 0)) { + if !(iIn != 7 && (iIn != 16 || libc.Xmemcmp(tls, ts+25628, zUri+7, uint64(9)) != 0)) { goto __10 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25591, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25638, libc.VaList(bp, iIn-7, zUri+7)) rc = SQLITE_ERROR goto parse_uri_out @@ -161199,7 +161257,7 @@ zVal = zOpt + uintptr(nOpt+1) nVal = Xsqlite3Strlen30(tls, zVal) - if !(nOpt == 3 && libc.Xmemcmp(tls, ts+25619, zOpt, uint64(3)) == 0) { + if !(nOpt == 3 && libc.Xmemcmp(tls, ts+25666, zOpt, uint64(3)) == 0) { goto __29 } zVfs = zVal @@ -161210,17 +161268,17 @@ mask = 0 limit = 0 - if !(nOpt == 5 && libc.Xmemcmp(tls, ts+25623, zOpt, uint64(5)) == 0) { + if !(nOpt == 5 && libc.Xmemcmp(tls, ts+25670, zOpt, uint64(5)) == 0) { goto __31 } mask = SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_PRIVATECACHE aMode = uintptr(unsafe.Pointer(&aCacheMode)) limit = mask - zModeType = ts + 25623 + zModeType = ts + 25670 __31: ; - if !(nOpt == 4 && libc.Xmemcmp(tls, ts+25629, zOpt, uint64(4)) == 0) { + if !(nOpt == 4 && libc.Xmemcmp(tls, ts+25676, zOpt, uint64(4)) == 0) { goto __32 } @@ -161228,7 +161286,7 @@ SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY aMode = uintptr(unsafe.Pointer(&aOpenMode)) limit = int32(uint32(mask) & flags) - zModeType = ts + 25634 + zModeType = ts + 25681 __32: ; if !(aMode != 0) { @@ -161258,7 +161316,7 @@ if !(mode == 0) { goto __38 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25641, libc.VaList(bp+16, zModeType, zVal)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25688, libc.VaList(bp+16, zModeType, zVal)) rc = SQLITE_ERROR goto parse_uri_out __38: @@ -161266,7 +161324,7 @@ if !(mode&libc.CplInt32(SQLITE_OPEN_MEMORY) > limit) { goto __39 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25661, + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25708, libc.VaList(bp+32, zModeType, zVal)) rc = SQLITE_PERM goto parse_uri_out @@ -161306,7 +161364,7 @@ if !(*(*uintptr)(unsafe.Pointer(ppVfs)) == uintptr(0)) { goto __42 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25685, libc.VaList(bp+48, zVfs)) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+25732, libc.VaList(bp+48, zVfs)) rc = SQLITE_ERROR __42: ; @@ -161330,14 +161388,14 @@ } var aCacheMode = [3]OpenMode{ - {Fz: ts + 25701, Fmode: SQLITE_OPEN_SHAREDCACHE}, - {Fz: ts + 25708, Fmode: SQLITE_OPEN_PRIVATECACHE}, + {Fz: ts + 25748, Fmode: SQLITE_OPEN_SHAREDCACHE}, + {Fz: ts + 25755, Fmode: SQLITE_OPEN_PRIVATECACHE}, {}, } var aOpenMode = [5]OpenMode{ - {Fz: ts + 25716, Fmode: SQLITE_OPEN_READONLY}, - {Fz: ts + 25719, Fmode: SQLITE_OPEN_READWRITE}, - {Fz: ts + 25722, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, + {Fz: ts + 25763, Fmode: SQLITE_OPEN_READONLY}, + {Fz: ts + 25766, Fmode: SQLITE_OPEN_READWRITE}, + {Fz: ts + 25769, Fmode: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE}, {Fz: ts + 18731, Fmode: SQLITE_OPEN_MEMORY}, {}, } @@ -161484,10 +161542,10 @@ createCollation(tls, db, uintptr(unsafe.Pointer(&Xsqlite3StrBINARY)), uint8(SQLITE_UTF16LE), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{binCollFunc})), uintptr(0)) - createCollation(tls, db, ts+23232, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+23279, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{nocaseCollatingFunc})), uintptr(0)) - createCollation(tls, db, ts+25726, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + createCollation(tls, db, ts+25773, uint8(SQLITE_UTF8), uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32 }{rtrimCollFunc})), uintptr(0)) if !((*Sqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) { @@ -161501,7 +161559,7 @@ if !(int32(1)<<(*(*uint32)(unsafe.Pointer(bp + 8))&uint32(7))&0x46 == 0) { goto __16 } - rc = Xsqlite3MisuseError(tls, 177308) + rc = Xsqlite3MisuseError(tls, 177397) goto __17 __16: rc = Xsqlite3ParseUri(tls, zVfs, zFilename, bp+8, db, bp+16, bp+24) @@ -161554,7 +161612,7 @@ (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).FzDbSName = ts + 7793 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb)).Fsafety_level = U8(SQLITE_DEFAULT_SYNCHRONOUS + 1) - (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 24721 + (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).FzDbSName = ts + 24768 (*Db)(unsafe.Pointer((*Sqlite3)(unsafe.Pointer(db)).FaDb + 1*32)).Fsafety_level = U8(PAGER_SYNCHRONOUS_OFF) (*Sqlite3)(unsafe.Pointer(db)).FeOpenState = U8(SQLITE_STATE_OPEN) @@ -161659,7 +161717,7 @@ return rc } if zFilename == uintptr(0) { - zFilename = ts + 25732 + zFilename = ts + 25779 } pVal = Xsqlite3ValueNew(tls, uintptr(0)) Xsqlite3ValueSetStr(tls, pVal, -1, zFilename, uint8(SQLITE_UTF16LE), uintptr(0)) @@ -161762,21 +161820,21 @@ bp := tls.Alloc(24) defer tls.Free(24) - Xsqlite3_log(tls, iErr, ts+25735, + Xsqlite3_log(tls, iErr, ts+25782, libc.VaList(bp, zType, lineno, uintptr(20)+Xsqlite3_sourceid(tls))) return iErr } func Xsqlite3CorruptError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+25760) + return Xsqlite3ReportError(tls, SQLITE_CORRUPT, lineno, ts+25807) } func Xsqlite3MisuseError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+25780) + return Xsqlite3ReportError(tls, SQLITE_MISUSE, lineno, ts+25827) } func Xsqlite3CantopenError(tls *libc.TLS, lineno int32) int32 { - return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+25787) + return Xsqlite3ReportError(tls, SQLITE_CANTOPEN, lineno, ts+25834) } // This is a convenience routine that makes sure that all thread-specific @@ -161934,7 +161992,7 @@ goto __20 } Xsqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp + 24))) - *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+25804, libc.VaList(bp, zTableName, + *(*uintptr)(unsafe.Pointer(bp + 24)) = Xsqlite3MPrintf(tls, db, ts+25851, libc.VaList(bp, zTableName, zColumnName)) rc = SQLITE_ERROR __20: @@ -162590,7 +162648,7 @@ azCompileOpt = Xsqlite3CompileOptions(tls, bp) - if Xsqlite3_strnicmp(tls, zOptName, ts+25832, 7) == 0 { + if Xsqlite3_strnicmp(tls, zOptName, ts+25879, 7) == 0 { zOptName += uintptr(7) } n = Xsqlite3Strlen30(tls, zOptName) @@ -162708,7 +162766,7 @@ Xsqlite3ErrorWithMsg(tls, db, rc, func() uintptr { if rc != 0 { - return ts + 25840 + return ts + 25887 } return uintptr(0) }(), 0) @@ -162886,7 +162944,7 @@ type JsonParse = JsonParse1 var jsonType = [8]uintptr{ - ts + 7533, ts + 8116, ts + 8121, ts + 7543, ts + 7538, ts + 9357, ts + 25863, ts + 25869, + ts + 7533, ts + 9046, ts + 9051, ts + 7543, ts + 7538, ts + 9357, ts + 25910, ts + 25916, } func jsonZero(tls *libc.TLS, p uintptr) { @@ -163039,7 +163097,7 @@ *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0') *(*int8)(unsafe.Pointer((*JsonString)(unsafe.Pointer(p)).FzBuf + uintptr(libc.PostIncUint64(&(*JsonString)(unsafe.Pointer(p)).FnUsed, 1)))) = int8('0' + int32(c)>>4) - c = uint8(*(*int8)(unsafe.Pointer(ts + 25876 + uintptr(int32(c)&0xf)))) + c = uint8(*(*int8)(unsafe.Pointer(ts + 25923 + uintptr(int32(c)&0xf)))) __8: ; __6: @@ -163094,7 +163152,7 @@ default: { if int32((*JsonString)(unsafe.Pointer(p)).FbErr) == 0 { - Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25893, -1) + Xsqlite3_result_error(tls, (*JsonString)(unsafe.Pointer(p)).FpCtx, ts+25940, -1) (*JsonString)(unsafe.Pointer(p)).FbErr = U8(2) jsonReset(tls, p) } @@ -163158,13 +163216,13 @@ } case JSON_TRUE: { - jsonAppendRaw(tls, pOut, ts+8116, uint32(4)) + jsonAppendRaw(tls, pOut, ts+9046, uint32(4)) break } case JSON_FALSE: { - jsonAppendRaw(tls, pOut, ts+8121, uint32(5)) + jsonAppendRaw(tls, pOut, ts+9051, uint32(5)) break } @@ -163714,12 +163772,12 @@ jsonParseAddNode(tls, pParse, uint32(JSON_NULL), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 't' && - libc.Xstrncmp(tls, z+uintptr(i), ts+8116, uint64(4)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+9046, uint64(4)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(4)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_TRUE), uint32(0), uintptr(0)) return int32(i + U32(4)) } else if int32(c) == 'f' && - libc.Xstrncmp(tls, z+uintptr(i), ts+8121, uint64(5)) == 0 && + libc.Xstrncmp(tls, z+uintptr(i), ts+9051, uint64(5)) == 0 && !(int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i+U32(5)))))])&0x06 != 0) { jsonParseAddNode(tls, pParse, uint32(JSON_FALSE), uint32(0), uintptr(0)) return int32(i + U32(5)) @@ -163820,7 +163878,7 @@ if (*JsonParse)(unsafe.Pointer(pParse)).Foom != 0 { Xsqlite3_result_error_nomem(tls, pCtx) } else { - Xsqlite3_result_error(tls, pCtx, ts+25922, -1) + Xsqlite3_result_error(tls, pCtx, ts+25969, -1) } } jsonParseReset(tls, pParse) @@ -164126,7 +164184,7 @@ } if int32(*(*int8)(unsafe.Pointer(zPath))) == '.' { jsonParseAddNode(tls, pParse, uint32(JSON_OBJECT), uint32(0), uintptr(0)) - } else if libc.Xstrncmp(tls, zPath, ts+25937, uint64(3)) == 0 { + } else if libc.Xstrncmp(tls, zPath, ts+25984, uint64(3)) == 0 { jsonParseAddNode(tls, pParse, uint32(JSON_ARRAY), uint32(0), uintptr(0)) } else { return uintptr(0) @@ -164141,7 +164199,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - return Xsqlite3_mprintf(tls, ts+25941, libc.VaList(bp, zErr)) + return Xsqlite3_mprintf(tls, ts+25988, libc.VaList(bp, zErr)) } func jsonLookup(tls *libc.TLS, pParse uintptr, zPath uintptr, pApnd uintptr, pCtx uintptr) uintptr { @@ -164195,7 +164253,7 @@ bp := tls.Alloc(8) defer tls.Free(8) - var zMsg uintptr = Xsqlite3_mprintf(tls, ts+25967, + var zMsg uintptr = Xsqlite3_mprintf(tls, ts+26014, libc.VaList(bp, zFuncName)) Xsqlite3_result_error(tls, pCtx, zMsg, -1) Xsqlite3_free(tls, zMsg) @@ -164300,11 +164358,11 @@ if int32(*(*int8)(unsafe.Pointer(zPath))) != '$' { jsonInit(tls, bp, ctx) if int32(Xsqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zPath)))])&0x04 != 0 { - jsonAppendRaw(tls, bp, ts+26010, uint32(2)) + jsonAppendRaw(tls, bp, ts+26057, uint32(2)) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendRaw(tls, bp, ts+6350, uint32(2)) } else { - jsonAppendRaw(tls, bp, ts+26013, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) + jsonAppendRaw(tls, bp, ts+26060, uint32(1+libc.Bool32(int32(*(*int8)(unsafe.Pointer(zPath))) != '['))) jsonAppendRaw(tls, bp, zPath, uint32(int32(libc.Xstrlen(tls, zPath)))) jsonAppendChar(tls, bp, int8(0)) } @@ -164461,14 +164519,14 @@ if argc&1 != 0 { Xsqlite3_result_error(tls, ctx, - ts+26016, -1) + ts+26063, -1) return } jsonInit(tls, bp, ctx) jsonAppendChar(tls, bp, int8('{')) for i = 0; i < argc; i = i + 2 { if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) != SQLITE_TEXT { - Xsqlite3_result_error(tls, ctx, ts+26067, -1) + Xsqlite3_result_error(tls, ctx, ts+26114, -1) jsonReset(tls, bp) return } @@ -164638,9 +164696,9 @@ } jsonWrongNumArgs(tls, ctx, func() uintptr { if bIsSet != 0 { - return ts + 26101 + return ts + 26148 } - return ts + 26105 + return ts + 26152 }()) return __2: @@ -164773,7 +164831,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+26112, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+26159, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -164870,7 +164928,7 @@ (*JsonString)(unsafe.Pointer(pStr)).FnUsed-- } } else { - Xsqlite3_result_text(tls, ctx, ts+26115, 2, uintptr(0)) + Xsqlite3_result_text(tls, ctx, ts+26162, 2, uintptr(0)) } Xsqlite3_result_subtype(tls, ctx, uint32(JSON_SUBTYPE)) } @@ -164914,7 +164972,7 @@ _ = argc _ = pAux rc = Xsqlite3_declare_vtab(tls, db, - ts+26118) + ts+26165) if rc == SQLITE_OK { pNew = libc.AssignPtrUintptr(ppVtab, Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Sqlite3_vtab{})))) if pNew == uintptr(0) { @@ -165045,7 +165103,7 @@ nn = nn - 2 } } - jsonPrintf(tls, nn+2, pStr, ts+26201, libc.VaList(bp, nn, z)) + jsonPrintf(tls, nn+2, pStr, ts+26248, libc.VaList(bp, nn, z)) } func jsonEachComputePath(tls *libc.TLS, p uintptr, pStr uintptr, i U32) { @@ -165064,7 +165122,7 @@ pNode = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(i)*16 pUp = (*JsonEachCursor)(unsafe.Pointer(p)).FsParse.FaNode + uintptr(iUp)*16 if int32((*JsonNode)(unsafe.Pointer(pUp)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, pStr, ts+26207, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) + jsonPrintf(tls, 30, pStr, ts+26254, libc.VaList(bp, *(*U32)(unsafe.Pointer(pUp + 8)))) } else { if int32((*JsonNode)(unsafe.Pointer(pNode)).FjnFlags)&JNODE_LABEL == 0 { pNode -= 16 @@ -165160,7 +165218,7 @@ jsonAppendChar(tls, bp+8, int8('$')) } if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_ARRAY { - jsonPrintf(tls, 30, bp+8, ts+26207, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) + jsonPrintf(tls, 30, bp+8, ts+26254, libc.VaList(bp, (*JsonEachCursor)(unsafe.Pointer(p)).FiRowid)) } else if int32((*JsonEachCursor)(unsafe.Pointer(p)).FeType) == JSON_OBJECT { jsonAppendObjectPathElement(tls, bp+8, pThis) } @@ -165184,7 +165242,7 @@ { var zRoot uintptr = (*JsonEachCursor)(unsafe.Pointer(p)).FzRoot if zRoot == uintptr(0) { - zRoot = ts + 26212 + zRoot = ts + 26259 } Xsqlite3_result_text(tls, ctx, zRoot, -1, uintptr(0)) break @@ -165306,7 +165364,7 @@ var rc int32 = SQLITE_NOMEM if int32((*JsonEachCursor)(unsafe.Pointer(p)).FsParse.Foom) == 0 { Xsqlite3_free(tls, (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg) - (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25922, 0) + (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+25969, 0) if (*Sqlite3_vtab)(unsafe.Pointer((*Sqlite3_vtab_cursor)(unsafe.Pointer(cur)).FpVtab)).FzErrMsg != 0 { rc = SQLITE_ERROR } @@ -165401,25 +165459,25 @@ } var aJsonFunc = [19]FuncDef{ - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26214}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26219}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26230}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26230}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26248}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 26261}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 26264}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26268}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26280}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26292}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26303}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26314}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26326}, - {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 26339}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26348}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26348}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26358}, - {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26369}, - {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26386}} + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26261}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26266}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26277}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26277}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26295}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_JSON)), FxSFunc: 0, FzName: ts + 26308}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_SQL)), FxSFunc: 0, FzName: ts + 26311}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26315}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26327}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26339}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26350}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26361}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26373}, + {FnArg: int8(-1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FpUserData: uintptr(int64(JSON_ISSET)), FxSFunc: 0, FzName: ts + 26386}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26395}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26395}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_DETERMINISTIC | SQLITE_FUNC_CONSTANT | SQLITE_UTF8), FxSFunc: 0, FzName: ts + 26405}, + {FnArg: int8(1), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26416}, + {FnArg: int8(2), FfuncFlags: U32(SQLITE_FUNC_BUILTIN | SQLITE_UTF8 | 0*SQLITE_FUNC_NEEDCOLL | SQLITE_SUBTYPE | SQLITE_UTF8 | SQLITE_DETERMINISTIC), FxSFunc: 0, FxFinalize: 0, FxValue: 0, FxInverse: 0, FzName: ts + 26433}} // Register the JSON table-valued functions func Xsqlite3JsonTableFunctions(tls *libc.TLS, db uintptr) int32 { @@ -165438,8 +165496,8 @@ FzName uintptr FpModule uintptr }{ - {FzName: ts + 26404, FpModule: 0}, - {FzName: ts + 26414, FpModule: 0}, + {FzName: ts + 26451, FpModule: 0}, + {FzName: ts + 26461, FpModule: 0}, } type Rtree1 = struct { @@ -165699,11 +165757,11 @@ } } if (*Rtree)(unsafe.Pointer(pRtree)).FpNodeBlob == uintptr(0) { - var zTab uintptr = Xsqlite3_mprintf(tls, ts+26424, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + var zTab uintptr = Xsqlite3_mprintf(tls, ts+26471, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zTab == uintptr(0) { return SQLITE_NOMEM } - rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+26432, iNode, 0, + rc = Xsqlite3_blob_open(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, zTab, ts+26479, iNode, 0, pRtree+112) Xsqlite3_free(tls, zTab) } @@ -165914,7 +165972,7 @@ var pRtree uintptr = pVtab var rc int32 var zCreate uintptr = Xsqlite3_mprintf(tls, - ts+26437, + ts+26484, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) @@ -166612,7 +166670,7 @@ var pSrc uintptr var pInfo uintptr - pSrc = Xsqlite3_value_pointer(tls, pValue, ts+26519) + pSrc = Xsqlite3_value_pointer(tls, pValue, ts+26566) if pSrc == uintptr(0) { return SQLITE_ERROR } @@ -167953,7 +168011,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+26533, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) + zSql = Xsqlite3_mprintf(tls, ts+26580, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) if zSql != 0 { rc = Xsqlite3_prepare_v2(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, -1, bp+56, uintptr(0)) } else { @@ -167965,12 +168023,12 @@ if iCol == 0 { var zCol uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 0) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+26553, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) + ts+26600, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol)) } else { var zCol1 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol) var zCol2 uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), iCol+1) (*Rtree)(unsafe.Pointer(pRtree)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+26585, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) + ts+26632, libc.VaList(bp+32, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zCol1, zCol2)) } } @@ -168196,7 +168254,7 @@ var pRtree uintptr = pVtab var rc int32 = SQLITE_NOMEM var zSql uintptr = Xsqlite3_mprintf(tls, - ts+26622, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) + ts+26669, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName, zNewName)) if zSql != 0 { nodeBlobReset(tls, pRtree) rc = Xsqlite3_exec(tls, (*Rtree)(unsafe.Pointer(pRtree)).Fdb, zSql, uintptr(0), uintptr(0), uintptr(0)) @@ -168219,7 +168277,7 @@ bp := tls.Alloc(24) defer tls.Free(24) - var zFmt uintptr = ts + 26767 + var zFmt uintptr = ts + 26814 var zSql uintptr var rc int32 @@ -168267,7 +168325,7 @@ } var azName1 = [3]uintptr{ - ts + 26823, ts + 6412, ts + 17625, + ts + 26870, ts + 6412, ts + 17625, } var rtreeModule = Sqlite3_module{ @@ -168310,19 +168368,19 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 Xsqlite3_str_appendf(tls, p, - ts+26828, + ts+26875, libc.VaList(bp, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { - Xsqlite3_str_appendf(tls, p, ts+26890, libc.VaList(bp+16, ii)) + Xsqlite3_str_appendf(tls, p, ts+26937, libc.VaList(bp+16, ii)) } Xsqlite3_str_appendf(tls, p, - ts+26895, + ts+26942, libc.VaList(bp+24, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+26959, + ts+27006, libc.VaList(bp+40, zDb, zPrefix)) Xsqlite3_str_appendf(tls, p, - ts+27029, + ts+27076, libc.VaList(bp+56, zDb, zPrefix, (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize)) zCreate = Xsqlite3_str_finish(tls, p) if !(zCreate != 0) { @@ -168351,7 +168409,7 @@ if i != 3 || int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) == 0 { zFormat = azSql[i] } else { - zFormat = ts + 27078 + zFormat = ts + 27125 } zSql = Xsqlite3_mprintf(tls, zFormat, libc.VaList(bp+80, zDb, zPrefix)) if zSql != 0 { @@ -168363,7 +168421,7 @@ } if (*Rtree)(unsafe.Pointer(pRtree)).FnAux != 0 { (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql = Xsqlite3_mprintf(tls, - ts+27186, + ts+27233, libc.VaList(bp+96, zDb, zPrefix)) if (*Rtree)(unsafe.Pointer(pRtree)).FzReadAuxSql == uintptr(0) { rc = SQLITE_NOMEM @@ -168371,18 +168429,18 @@ var p uintptr = Xsqlite3_str_new(tls, db) var ii int32 var zSql uintptr - Xsqlite3_str_appendf(tls, p, ts+27231, libc.VaList(bp+112, zDb, zPrefix)) + Xsqlite3_str_appendf(tls, p, ts+27278, libc.VaList(bp+112, zDb, zPrefix)) for ii = 0; ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux); ii++ { if ii != 0 { Xsqlite3_str_append(tls, p, ts+14119, 1) } if ii < int32((*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull) { - Xsqlite3_str_appendf(tls, p, ts+27258, libc.VaList(bp+128, ii, ii+2, ii)) + Xsqlite3_str_appendf(tls, p, ts+27305, libc.VaList(bp+128, ii, ii+2, ii)) } else { - Xsqlite3_str_appendf(tls, p, ts+27280, libc.VaList(bp+152, ii, ii+2)) + Xsqlite3_str_appendf(tls, p, ts+27327, libc.VaList(bp+152, ii, ii+2)) } } - Xsqlite3_str_appendf(tls, p, ts+27288, 0) + Xsqlite3_str_appendf(tls, p, ts+27335, 0) zSql = Xsqlite3_str_finish(tls, p) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -168397,14 +168455,14 @@ } var azSql = [8]uintptr{ - ts + 27304, - ts + 27357, - ts + 27402, - ts + 27454, - ts + 27508, - ts + 27553, - ts + 27611, - ts + 27666, + ts + 27351, + ts + 27404, + ts + 27449, + ts + 27501, + ts + 27555, + ts + 27600, + ts + 27658, + ts + 27713, } func getIntFromStmt(tls *libc.TLS, db uintptr, zSql uintptr, piVal uintptr) int32 { @@ -168433,7 +168491,7 @@ var zSql uintptr if isCreate != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = 0 - zSql = Xsqlite3_mprintf(tls, ts+27713, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) + zSql = Xsqlite3_mprintf(tls, ts+27760, libc.VaList(bp, (*Rtree)(unsafe.Pointer(pRtree)).FzDb)) rc = getIntFromStmt(tls, db, zSql, bp+48) if rc == SQLITE_OK { (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize = *(*int32)(unsafe.Pointer(bp + 48)) - 64 @@ -168445,7 +168503,7 @@ } } else { zSql = Xsqlite3_mprintf(tls, - ts+27733, + ts+27780, libc.VaList(bp+16, (*Rtree)(unsafe.Pointer(pRtree)).FzDb, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) rc = getIntFromStmt(tls, db, zSql, pRtree+32) if rc != SQLITE_OK { @@ -168453,7 +168511,7 @@ } else if (*Rtree)(unsafe.Pointer(pRtree)).FiNodeSize < 512-64 { rc = SQLITE_CORRUPT | int32(1)<<8 - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+27790, + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+27837, libc.VaList(bp+40, (*Rtree)(unsafe.Pointer(pRtree)).FzName)) } } @@ -168495,10 +168553,10 @@ ii = 4 *(*[5]uintptr)(unsafe.Pointer(bp + 96)) = [5]uintptr{ uintptr(0), - ts + 27825, - ts + 27868, - ts + 27903, - ts + 27939, + ts + 27872, + ts + 27915, + ts + 27950, + ts + 27986, } if !(argc < 6 || argc > RTREE_MAX_AUX_COLUMN+3) { @@ -168529,7 +168587,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+27976, + Xsqlite3_str_appendf(tls, pSql, ts+28023, libc.VaList(bp+16, rtreeTokenLength(tls, *(*uintptr)(unsafe.Pointer(argv + 3*8))), *(*uintptr)(unsafe.Pointer(argv + 3*8)))) ii = 4 __3: @@ -168541,7 +168599,7 @@ goto __6 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28000, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) + Xsqlite3_str_appendf(tls, pSql, ts+28047, libc.VaList(bp+32, rtreeTokenLength(tls, zArg+uintptr(1)), zArg+uintptr(1))) goto __7 __6: if !(int32((*Rtree)(unsafe.Pointer(pRtree)).FnAux) > 0) { @@ -168564,7 +168622,7 @@ goto __5 __5: ; - Xsqlite3_str_appendf(tls, pSql, ts+28006, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28053, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __10 @@ -168660,7 +168718,7 @@ return rc } -var azFormat = [2]uintptr{ts + 28009, ts + 28020} +var azFormat = [2]uintptr{ts + 28056, ts + 28067} func rtreenode(tls *libc.TLS, ctx uintptr, nArg int32, apArg uintptr) { bp := tls.Alloc(1072) @@ -168700,11 +168758,11 @@ if ii > 0 { Xsqlite3_str_append(tls, pOut, ts+12272, 1) } - Xsqlite3_str_appendf(tls, pOut, ts+28030, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) + Xsqlite3_str_appendf(tls, pOut, ts+28077, libc.VaList(bp, (*RtreeCell)(unsafe.Pointer(bp+1024)).FiRowid)) for jj = 0; jj < int32((*Rtree)(unsafe.Pointer(bp+56)).FnDim2); jj++ { - Xsqlite3_str_appendf(tls, pOut, ts+28036, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) + Xsqlite3_str_appendf(tls, pOut, ts+28083, libc.VaList(bp+8, float64(*(*RtreeValue)(unsafe.Pointer(bp + 1024 + 8 + uintptr(jj)*4))))) } - Xsqlite3_str_append(tls, pOut, ts+28040, 1) + Xsqlite3_str_append(tls, pOut, ts+28087, 1) } errCode = Xsqlite3_str_errcode(tls, pOut) Xsqlite3_result_text(tls, ctx, Xsqlite3_str_finish(tls, pOut), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) @@ -168715,7 +168773,7 @@ _ = nArg if Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(apArg))) != SQLITE_BLOB || Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(apArg))) < 2 { - Xsqlite3_result_error(tls, ctx, ts+28042, -1) + Xsqlite3_result_error(tls, ctx, ts+28089, -1) } else { var zBlob uintptr = Xsqlite3_value_blob(tls, *(*uintptr)(unsafe.Pointer(apArg))) if zBlob != 0 { @@ -168793,7 +168851,7 @@ if z == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = SQLITE_NOMEM } else { - (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+28075, + (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport = Xsqlite3_mprintf(tls, ts+28122, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport, func() uintptr { if (*RtreeCheck)(unsafe.Pointer(pCheck)).FzReport != 0 { return ts + 5414 @@ -168817,7 +168875,7 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode == uintptr(0) { (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode = rtreeCheckPrepare(tls, pCheck, - ts+28082, + ts+28129, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab)) } @@ -168836,7 +168894,7 @@ } rtreeCheckReset(tls, pCheck, (*RtreeCheck)(unsafe.Pointer(pCheck)).FpGetNode) if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK && pRet == uintptr(0) { - rtreeCheckAppendMsg(tls, pCheck, ts+28127, libc.VaList(bp+16, iNode)) + rtreeCheckAppendMsg(tls, pCheck, ts+28174, libc.VaList(bp+16, iNode)) } } @@ -168850,8 +168908,8 @@ var rc int32 var pStmt uintptr *(*[2]uintptr)(unsafe.Pointer(bp + 80)) = [2]uintptr{ - ts + 28159, - ts + 28213, + ts + 28206, + ts + 28260, } if *(*uintptr)(unsafe.Pointer(pCheck + 40 + uintptr(bLeaf)*8)) == uintptr(0) { @@ -168866,23 +168924,23 @@ Xsqlite3_bind_int64(tls, pStmt, 1, iKey) rc = Xsqlite3_step(tls, pStmt) if rc == SQLITE_DONE { - rtreeCheckAppendMsg(tls, pCheck, ts+28261, + rtreeCheckAppendMsg(tls, pCheck, ts+28308, libc.VaList(bp+16, iKey, iVal, func() uintptr { if bLeaf != 0 { - return ts + 28306 + return ts + 28353 } - return ts + 28314 + return ts + 28361 }())) } else if rc == SQLITE_ROW { var ii I64 = Xsqlite3_column_int64(tls, pStmt, 0) if ii != iVal { rtreeCheckAppendMsg(tls, pCheck, - ts+28323, + ts+28370, libc.VaList(bp+40, iKey, ii, func() uintptr { if bLeaf != 0 { - return ts + 28306 + return ts + 28353 } - return ts + 28314 + return ts + 28361 }(), iKey, iVal)) } } @@ -168906,7 +168964,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 48)) > *(*RtreeValue)(unsafe.Pointer(bp + 52))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+28381, libc.VaList(bp, i, iCell, iNode)) + ts+28428, libc.VaList(bp, i, iCell, iNode)) } if pParent != 0 { @@ -168926,7 +168984,7 @@ return libc.Bool32(*(*RtreeValue)(unsafe.Pointer(bp + 52)) > *(*RtreeValue)(unsafe.Pointer(bp + 60))) }() != 0 { rtreeCheckAppendMsg(tls, pCheck, - ts+28429, libc.VaList(bp+24, i, iCell, iNode)) + ts+28476, libc.VaList(bp+24, i, iCell, iNode)) } } } @@ -168943,14 +169001,14 @@ if aNode != 0 { if *(*int32)(unsafe.Pointer(bp + 48)) < 4 { rtreeCheckAppendMsg(tls, pCheck, - ts+28496, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) + ts+28543, libc.VaList(bp, iNode, *(*int32)(unsafe.Pointer(bp + 48)))) } else { var nCell int32 var i int32 if aParent == uintptr(0) { iDepth = readInt16(tls, aNode) if iDepth > RTREE_MAX_DEPTH { - rtreeCheckAppendMsg(tls, pCheck, ts+28530, libc.VaList(bp+16, iDepth)) + rtreeCheckAppendMsg(tls, pCheck, ts+28577, libc.VaList(bp+16, iDepth)) Xsqlite3_free(tls, aNode) return } @@ -168958,7 +169016,7 @@ nCell = readInt16(tls, aNode+2) if 4+nCell*(8+(*RtreeCheck)(unsafe.Pointer(pCheck)).FnDim*2*4) > *(*int32)(unsafe.Pointer(bp + 48)) { rtreeCheckAppendMsg(tls, pCheck, - ts+28560, + ts+28607, libc.VaList(bp+24, iNode, nCell, *(*int32)(unsafe.Pointer(bp + 48)))) } else { for i = 0; i < nCell; i++ { @@ -168987,14 +169045,14 @@ if (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc == SQLITE_OK { var pCount uintptr - pCount = rtreeCheckPrepare(tls, pCheck, ts+28615, + pCount = rtreeCheckPrepare(tls, pCheck, ts+28662, libc.VaList(bp, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzDb, (*RtreeCheck)(unsafe.Pointer(pCheck)).FzTab, zTbl)) if pCount != 0 { if Xsqlite3_step(tls, pCount) == SQLITE_ROW { var nActual I64 = Xsqlite3_column_int64(tls, pCount, 0) if nActual != nExpect { rtreeCheckAppendMsg(tls, pCheck, - ts+28646, libc.VaList(bp+24, zTbl, nExpect, nActual)) + ts+28693, libc.VaList(bp+24, zTbl, nExpect, nActual)) } } (*RtreeCheck)(unsafe.Pointer(pCheck)).Frc = Xsqlite3_finalize(tls, pCount) @@ -169021,7 +169079,7 @@ } if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { - pStmt = rtreeCheckPrepare(tls, bp+32, ts+28713, libc.VaList(bp, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+28760, libc.VaList(bp, zDb, zTab)) if pStmt != 0 { nAux = Xsqlite3_column_count(tls, pStmt) - 2 Xsqlite3_finalize(tls, pStmt) @@ -169030,12 +169088,12 @@ } } - pStmt = rtreeCheckPrepare(tls, bp+32, ts+26533, libc.VaList(bp+16, zDb, zTab)) + pStmt = rtreeCheckPrepare(tls, bp+32, ts+26580, libc.VaList(bp+16, zDb, zTab)) if pStmt != 0 { var rc int32 (*RtreeCheck)(unsafe.Pointer(bp + 32)).FnDim = (Xsqlite3_column_count(tls, pStmt) - 1 - nAux) / 2 if (*RtreeCheck)(unsafe.Pointer(bp+32)).FnDim < 1 { - rtreeCheckAppendMsg(tls, bp+32, ts+28741, 0) + rtreeCheckAppendMsg(tls, bp+32, ts+28788, 0) } else if SQLITE_ROW == Xsqlite3_step(tls, pStmt) { (*RtreeCheck)(unsafe.Pointer(bp + 32)).FbInt = libc.Bool32(Xsqlite3_column_type(tls, pStmt, 1) == SQLITE_INTEGER) } @@ -169049,8 +169107,8 @@ if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { rtreeCheckNode(tls, bp+32, 0, uintptr(0), int64(1)) } - rtreeCheckCount(tls, bp+32, ts+28772, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) - rtreeCheckCount(tls, bp+32, ts+28779, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) + rtreeCheckCount(tls, bp+32, ts+28819, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnLeaf)) + rtreeCheckCount(tls, bp+32, ts+28826, int64((*RtreeCheck)(unsafe.Pointer(bp+32)).FnNonLeaf)) } Xsqlite3_finalize(tls, (*RtreeCheck)(unsafe.Pointer(bp+32)).FpGetNode) @@ -169058,7 +169116,7 @@ Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp + 32 + 40 + 1*8))) if bEnd != 0 { - var rc int32 = Xsqlite3_exec(tls, db, ts+28787, uintptr(0), uintptr(0), uintptr(0)) + var rc int32 = Xsqlite3_exec(tls, db, ts+28834, uintptr(0), uintptr(0), uintptr(0)) if (*RtreeCheck)(unsafe.Pointer(bp+32)).Frc == SQLITE_OK { (*RtreeCheck)(unsafe.Pointer(bp + 32)).Frc = rc } @@ -169073,7 +169131,7 @@ if nArg != 1 && nArg != 2 { Xsqlite3_result_error(tls, ctx, - ts+28791, -1) + ts+28838, -1) } else { var rc int32 *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) @@ -169091,7 +169149,7 @@ if *(*uintptr)(unsafe.Pointer(bp)) != 0 { return *(*uintptr)(unsafe.Pointer(bp)) } - return ts + 19381 + return ts + 19428 }(), -1, libc.UintptrFromInt32(-1)) } else { Xsqlite3_result_error_code(tls, ctx, rc) @@ -169462,11 +169520,11 @@ var db uintptr = Xsqlite3_context_db_handle(tls, context) var x uintptr = Xsqlite3_str_new(tls, db) var i int32 - Xsqlite3_str_append(tls, x, ts+28842, 1) + Xsqlite3_str_append(tls, x, ts+28889, 1) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+28844, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28891, libc.VaList(bp, float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) } - Xsqlite3_str_appendf(tls, x, ts+28855, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28902, libc.VaList(bp+16, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -169486,19 +169544,19 @@ var x uintptr = Xsqlite3_str_new(tls, db) var i int32 var cSep int8 = int8('\'') - Xsqlite3_str_appendf(tls, x, ts+28866, 0) + Xsqlite3_str_appendf(tls, x, ts+28913, 0) for i = 0; i < (*GeoPoly)(unsafe.Pointer(p)).FnVertex; i++ { - Xsqlite3_str_appendf(tls, x, ts+28884, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) + Xsqlite3_str_appendf(tls, x, ts+28931, libc.VaList(bp, int32(cSep), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2)*4))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + uintptr(i*2+1)*4))))) cSep = int8(' ') } - Xsqlite3_str_appendf(tls, x, ts+28892, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) + Xsqlite3_str_appendf(tls, x, ts+28939, libc.VaList(bp+24, float64(*(*GeoCoord)(unsafe.Pointer(p + 8))), float64(*(*GeoCoord)(unsafe.Pointer(p + 8 + 1*4))))) for i = 1; i < argc; i++ { var z uintptr = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8))) if z != 0 && *(*int8)(unsafe.Pointer(z)) != 0 { - Xsqlite3_str_appendf(tls, x, ts+28900, libc.VaList(bp+40, z)) + Xsqlite3_str_appendf(tls, x, ts+28947, libc.VaList(bp+40, z)) } } - Xsqlite3_str_appendf(tls, x, ts+28904, 0) + Xsqlite3_str_appendf(tls, x, ts+28951, 0) Xsqlite3_result_text(tls, context, Xsqlite3_str_finish(tls, x), -1, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) Xsqlite3_free(tls, p) } @@ -170418,7 +170476,7 @@ libc.Xmemcpy(tls, (*Rtree)(unsafe.Pointer(pRtree)).FzName, *(*uintptr)(unsafe.Pointer(argv + 2*8)), uint64(nName)) pSql = Xsqlite3_str_new(tls, db) - Xsqlite3_str_appendf(tls, pSql, ts+28917, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28964, 0) (*Rtree)(unsafe.Pointer(pRtree)).FnAux = U8(1) (*Rtree)(unsafe.Pointer(pRtree)).FnAuxNotNull = U8(1) ii = 3 @@ -170427,7 +170485,7 @@ goto __4 } (*Rtree)(unsafe.Pointer(pRtree)).FnAux++ - Xsqlite3_str_appendf(tls, pSql, ts+28939, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) + Xsqlite3_str_appendf(tls, pSql, ts+28986, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(argv + uintptr(ii)*8)))) goto __3 __3: ii++ @@ -170435,7 +170493,7 @@ goto __4 __4: ; - Xsqlite3_str_appendf(tls, pSql, ts+28006, 0) + Xsqlite3_str_appendf(tls, pSql, ts+28053, 0) zSql = Xsqlite3_str_finish(tls, pSql) if !!(zSql != 0) { goto __5 @@ -170672,7 +170730,7 @@ } if iFuncTerm >= 0 { (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = idxNum - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28943 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28990 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).FargvIndex = 1 (*sqlite3_index_constraint_usage)(unsafe.Pointer((*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FaConstraintUsage + uintptr(iFuncTerm)*8)).Fomit = uint8(0) (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 300.0 @@ -170680,7 +170738,7 @@ return SQLITE_OK } (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxNum = 4 - (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28949 + (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FidxStr = ts + 28996 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedCost = 3000000.0 (*Sqlite3_index_info)(unsafe.Pointer(pIdxInfo)).FestimatedRows = int64(100000) return SQLITE_OK @@ -170792,7 +170850,7 @@ if !(*(*int32)(unsafe.Pointer(bp + 48)) == SQLITE_ERROR) { goto __4 } - (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+28958, 0) + (*Sqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = Xsqlite3_mprintf(tls, ts+29005, 0) __4: ; goto geopoly_update_end @@ -170924,14 +170982,14 @@ func geopolyFindFunction(tls *libc.TLS, pVtab uintptr, nArg int32, zName uintptr, pxFunc uintptr, ppArg uintptr) int32 { _ = pVtab _ = nArg - if Xsqlite3_stricmp(tls, zName, ts+28998) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+29045) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyOverlapFunc})) *(*uintptr)(unsafe.Pointer(ppArg)) = uintptr(0) return SQLITE_INDEX_CONSTRAINT_FUNCTION } - if Xsqlite3_stricmp(tls, zName, ts+29014) == 0 { + if Xsqlite3_stricmp(tls, zName, ts+29061) == 0 { *(*uintptr)(unsafe.Pointer(pxFunc)) = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{geopolyWithinFunc})) @@ -170996,7 +171054,7 @@ uintptr(0), aAgg[i].FxStep, aAgg[i].FxFinal) } if rc == SQLITE_OK { - rc = Xsqlite3_create_module_v2(tls, db, ts+29029, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+29076, uintptr(unsafe.Pointer(&geopolyModule)), uintptr(0), uintptr(0)) } return rc } @@ -171008,25 +171066,25 @@ F__ccgo_pad1 [6]byte FzName uintptr }{ - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29037}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29050}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29063}, - {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 29076}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29014}, - {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 29088}, - {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 28998}, - {FxFunc: 0, FnArg: int8(1), FzName: ts + 29111}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29125}, - {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 29138}, - {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 29152}, - {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29168}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29084}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29097}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29110}, + {FxFunc: 0, FnArg: int8(-1), FbPure: uint8(1), FzName: ts + 29123}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29061}, + {FxFunc: 0, FnArg: int8(3), FbPure: uint8(1), FzName: ts + 29135}, + {FxFunc: 0, FnArg: int8(2), FbPure: uint8(1), FzName: ts + 29045}, + {FxFunc: 0, FnArg: int8(1), FzName: ts + 29158}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29172}, + {FxFunc: 0, FnArg: int8(7), FbPure: uint8(1), FzName: ts + 29185}, + {FxFunc: 0, FnArg: int8(4), FbPure: uint8(1), FzName: ts + 29199}, + {FxFunc: 0, FnArg: int8(1), FbPure: uint8(1), FzName: ts + 29215}, } var aAgg = [1]struct { FxStep uintptr FxFinal uintptr FzName uintptr }{ - {FxStep: 0, FxFinal: 0, FzName: ts + 29180}, + {FxStep: 0, FxFinal: 0, FzName: ts + 29227}, } // Register the r-tree module with database handle db. This creates the @@ -171036,26 +171094,26 @@ var utf8 int32 = SQLITE_UTF8 var rc int32 - rc = Xsqlite3_create_function(tls, db, ts+29199, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29246, 2, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreenode})), uintptr(0), uintptr(0)) if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+29209, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29256, 1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreedepth})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_create_function(tls, db, ts+29220, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + rc = Xsqlite3_create_function(tls, db, ts+29267, -1, utf8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rtreecheck})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_REAL32) - rc = Xsqlite3_create_module_v2(tls, db, ts+28943, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+28990, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { var c uintptr = uintptr(RTREE_COORD_INT32) - rc = Xsqlite3_create_module_v2(tls, db, ts+29231, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) + rc = Xsqlite3_create_module_v2(tls, db, ts+29278, uintptr(unsafe.Pointer(&rtreeModule)), c, uintptr(0)) } if rc == SQLITE_OK { rc = sqlite3_geopoly_init(tls, db) @@ -171109,7 +171167,7 @@ Xsqlite3_result_error_nomem(tls, ctx) rtreeMatchArgFree(tls, pBlob) } else { - Xsqlite3_result_pointer(tls, ctx, pBlob, ts+26519, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) + Xsqlite3_result_pointer(tls, ctx, pBlob, ts+26566, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{rtreeMatchArgFree}))) } } } @@ -171436,7 +171494,7 @@ nOut = rbuDeltaOutputSize(tls, aDelta, nDelta) if nOut < 0 { - Xsqlite3_result_error(tls, context, ts+29241, -1) + Xsqlite3_result_error(tls, context, ts+29288, -1) return } @@ -171447,7 +171505,7 @@ nOut2 = rbuDeltaApply(tls, aOrig, nOrig, aDelta, nDelta, aOut) if nOut2 != nOut { Xsqlite3_free(tls, aOut) - Xsqlite3_result_error(tls, context, ts+29241, -1) + Xsqlite3_result_error(tls, context, ts+29288, -1) } else { Xsqlite3_result_blob(tls, context, aOut, nOut, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{Xsqlite3_free}))) } @@ -171548,7 +171606,7 @@ rbuObjIterClearStatements(tls, pIter) if (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) { rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+29262, uintptr(0), uintptr(0), p+64) + ts+29309, uintptr(0), uintptr(0), p+64) } if rc == SQLITE_OK { @@ -171612,7 +171670,7 @@ Xsqlite3_result_text(tls, pCtx, zIn, -1, uintptr(0)) } } else { - if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+26432, zIn, uint64(4)) == 0 { + if libc.Xstrlen(tls, zIn) > uint64(4) && libc.Xmemcmp(tls, ts+26479, zIn, uint64(4)) == 0 { var i int32 for i = 4; int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) >= '0' && int32(*(*int8)(unsafe.Pointer(zIn + uintptr(i)))) <= '9'; i++ { } @@ -171633,16 +171691,16 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter, p+64, Xsqlite3_mprintf(tls, - ts+29433, libc.VaList(bp, func() uintptr { + ts+29480, libc.VaList(bp, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 29583 + return ts + 29630 } return ts + 1544 }()))) if rc == SQLITE_OK { rc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+8, p+64, - ts+29624) + ts+29671) } (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup = 1 @@ -171758,7 +171816,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32, p+64, Xsqlite3_mprintf(tls, - ts+29749, libc.VaList(bp, zTab))) + ts+29796, libc.VaList(bp, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK || Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 32))) != SQLITE_ROW) { goto __1 } @@ -171776,7 +171834,7 @@ *(*int32)(unsafe.Pointer(piTnum)) = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 32)), 1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+1*8, p+64, - Xsqlite3_mprintf(tls, ts+29868, libc.VaList(bp+8, zTab))) + Xsqlite3_mprintf(tls, ts+29915, libc.VaList(bp+8, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0) { goto __3 } @@ -171794,7 +171852,7 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+2*8, p+64, Xsqlite3_mprintf(tls, - ts+29889, libc.VaList(bp+16, zIdx))) + ts+29936, libc.VaList(bp+16, zIdx))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __7 } @@ -171817,7 +171875,7 @@ __5: ; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+32+3*8, p+64, - Xsqlite3_mprintf(tls, ts+29940, libc.VaList(bp+24, zTab))) + Xsqlite3_mprintf(tls, ts+29987, libc.VaList(bp+24, zTab))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK) { goto __10 } @@ -171863,7 +171921,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { libc.Xmemcpy(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, (*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+16, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } (*RbuObjIter)(unsafe.Pointer(pIter)).FnIndex = 0 @@ -171878,7 +171936,7 @@ libc.Xmemset(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed, 0x01, uint64(unsafe.Sizeof(U8(0)))*uint64((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)) } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp+8, zIdx))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 1) if iCid >= 0 { @@ -171918,7 +171976,7 @@ rbuTableType(tls, p, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, pIter+72, bp+56, pIter+108) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NOTABLE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+20858, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+20905, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != 0 { return (*Sqlite3rbu)(unsafe.Pointer(p)).Frc @@ -171928,18 +171986,18 @@ } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+30018, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) + Xsqlite3_mprintf(tls, ts+30065, libc.VaList(bp+8, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { nCol = Xsqlite3_column_count(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) rbuAllocateIterArrays(tls, p, pIter, nCol) } for i = 0; (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && i < nCol; i++ { var zName uintptr = Xsqlite3_column_name(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), i) - if Xsqlite3_strnicmp(tls, ts+30037, zName, 4) != 0 { + if Xsqlite3_strnicmp(tls, ts+30084, zName, 4) != 0 { var zCopy uintptr = rbuStrndup(tls, zName, p+56) *(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr((*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol)*4)) = (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(libc.PostIncInt32(&(*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol, 1))*8)) = zCopy - } else if 0 == Xsqlite3_stricmp(tls, ts+30042, zName) { + } else if 0 == Xsqlite3_stricmp(tls, ts+30089, zName) { bRbuRowid = 1 } } @@ -171951,18 +172009,18 @@ bRbuRowid != libc.Bool32((*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, - ts+30052, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, + ts+30099, libc.VaList(bp+16, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if bRbuRowid != 0 { - return ts + 30081 + return ts + 30128 } - return ts + 30094 + return ts + 30141 }())) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+30103, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30150, libc.VaList(bp+32, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) { var zName uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 1) @@ -171976,7 +172034,7 @@ } if i == (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30125, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30172, libc.VaList(bp+40, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zName)) } else { var iPk int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 5) @@ -172023,7 +172081,7 @@ var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var z uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+30152, libc.VaList(bp, zList, zSep, z)) + zList = rbuMPrintf(tls, p, ts+30199, libc.VaList(bp, zList, zSep, z)) zSep = ts + 15971 } return zList @@ -172041,7 +172099,7 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if int32(*(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i)))) == iPk { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zRet = rbuMPrintf(tls, p, ts+30161, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) + zRet = rbuMPrintf(tls, p, ts+30208, libc.VaList(bp, zRet, zSep, zPre, zCol, zPost)) zSep = zSeparator break } @@ -172063,25 +172121,25 @@ if bRowid != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+30174, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + ts+30221, libc.VaList(bp, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var iMax Sqlite3_int64 = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+30206, libc.VaList(bp+16, iMax)) + zRet = rbuMPrintf(tls, p, ts+30253, libc.VaList(bp+16, iMax)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } else { - var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+30229) - var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+30235, ts+30242, ts+6309) + var zOrder uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+30276) + var zSelect uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+30282, ts+30289, ts+6309) var zList uintptr = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+1544) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+72, p+64, Xsqlite3_mprintf(tls, - ts+30250, + ts+30297, libc.VaList(bp+24, zSelect, zWrite, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 72))) { var zVal uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 72)), 0) - zRet = rbuMPrintf(tls, p, ts+30292, libc.VaList(bp+56, zList, zVal)) + zRet = rbuMPrintf(tls, p, ts+30339, libc.VaList(bp+56, zList, zVal)) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 72))) } @@ -172123,7 +172181,7 @@ *(*uintptr)(unsafe.Pointer(bp + 176)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) __1: if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 176)))) { goto __2 @@ -172158,7 +172216,7 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) goto __7 __6: - zCol = ts + 30312 + zCol = ts + 30359 __7: ; goto __5 @@ -172166,11 +172224,11 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) __5: ; - zLhs = rbuMPrintf(tls, p, ts+30320, + zLhs = rbuMPrintf(tls, p, ts+30367, libc.VaList(bp+8, zLhs, zSep, zCol, zCollate)) - zOrder = rbuMPrintf(tls, p, ts+30341, + zOrder = rbuMPrintf(tls, p, ts+30388, libc.VaList(bp+40, zOrder, zSep, iCol, zCol, zCollate)) - zSelect = rbuMPrintf(tls, p, ts+30377, + zSelect = rbuMPrintf(tls, p, ts+30424, libc.VaList(bp+80, zSelect, zSep, iCol, zCol)) zSep = ts + 15971 iCol++ @@ -172190,7 +172248,7 @@ *(*uintptr)(unsafe.Pointer(bp + 184)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+184, p+64, - Xsqlite3_mprintf(tls, ts+30404, + Xsqlite3_mprintf(tls, ts+30451, libc.VaList(bp+112, zSelect, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zOrder))) if !((*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 184)))) { goto __13 @@ -172217,7 +172275,7 @@ ; __18: ; - zVector = rbuMPrintf(tls, p, ts+30452, libc.VaList(bp+136, zVector, zSep, zQuoted)) + zVector = rbuMPrintf(tls, p, ts+30499, libc.VaList(bp+136, zVector, zSep, zQuoted)) zSep = ts + 15971 goto __15 __15: @@ -172229,7 +172287,7 @@ if !!(bFailed != 0) { goto __20 } - zRet = rbuMPrintf(tls, p, ts+30459, libc.VaList(bp+160, zLhs, zVector)) + zRet = rbuMPrintf(tls, p, ts+30506, libc.VaList(bp+160, zLhs, zVector)) __20: ; __13: @@ -172262,7 +172320,7 @@ if rc == SQLITE_OK { rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+200, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx))) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 200))) { @@ -172274,7 +172332,7 @@ if iCid == -2 { var iSeq int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 0) - zRet = Xsqlite3_mprintf(tls, ts+30471, libc.VaList(bp+8, zRet, zCom, + zRet = Xsqlite3_mprintf(tls, ts+30518, libc.VaList(bp+8, zRet, zCom, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FnSpan, (*RbuSpan)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaIdxCol+uintptr(iSeq)*16)).FzSpan, zCollate)) zType = ts + 1544 } else { @@ -172286,37 +172344,37 @@ zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zCol = ts + 30312 + zCol = ts + 30359 } else { - zCol = ts + 30042 + zCol = ts + 30089 } zType = ts + 1109 } else { zCol = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(iCid)*8)) zType = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)) } - zRet = Xsqlite3_mprintf(tls, ts+30493, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) + zRet = Xsqlite3_mprintf(tls, ts+30540, libc.VaList(bp+48, zRet, zCom, zCol, zCollate)) } if (*RbuObjIter)(unsafe.Pointer(pIter)).FbUnique == 0 || Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), 5) != 0 { var zOrder uintptr = func() uintptr { if bDesc != 0 { - return ts + 30229 + return ts + 30276 } return ts + 1544 }() - zImpPK = Xsqlite3_mprintf(tls, ts+30513, + zImpPK = Xsqlite3_mprintf(tls, ts+30560, libc.VaList(bp+80, zImpPK, zCom, nBind, zCol, zOrder)) } - zImpCols = Xsqlite3_mprintf(tls, ts+30534, + zImpCols = Xsqlite3_mprintf(tls, ts+30581, libc.VaList(bp+120, zImpCols, zCom, nBind, zCol, zType, zCollate)) zWhere = Xsqlite3_mprintf(tls, - ts+30567, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) + ts+30614, libc.VaList(bp+168, zWhere, zAnd, nBind, zCol)) if zRet == uintptr(0) || zImpPK == uintptr(0) || zImpCols == uintptr(0) || zWhere == uintptr(0) { rc = SQLITE_NOMEM } zCom = ts + 15971 - zAnd = ts + 22894 + zAnd = ts + 22941 nBind++ } @@ -172355,9 +172413,9 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = Xsqlite3_mprintf(tls, ts+30591, libc.VaList(bp, zList, zS, zObj, zCol)) + zList = Xsqlite3_mprintf(tls, ts+30638, libc.VaList(bp, zList, zS, zObj, zCol)) } else { - zList = Xsqlite3_mprintf(tls, ts+30603, libc.VaList(bp+32, zList, zS)) + zList = Xsqlite3_mprintf(tls, ts+30650, libc.VaList(bp+32, zList, zS)) } zS = ts + 15971 if zList == uintptr(0) { @@ -172367,7 +172425,7 @@ } if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+30612, libc.VaList(bp+48, zList, zObj)) + zList = rbuMPrintf(tls, p, ts+30659, libc.VaList(bp+48, zList, zObj)) } } return zList @@ -172379,18 +172437,18 @@ var zList uintptr = uintptr(0) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zList = rbuMPrintf(tls, p, ts+30627, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) + zList = rbuMPrintf(tls, p, ts+30674, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol+1)) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { var zSep uintptr = ts + 1544 var i int32 for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { - zList = rbuMPrintf(tls, p, ts+30641, libc.VaList(bp+8, zList, zSep, i, i+1)) - zSep = ts + 22894 + zList = rbuMPrintf(tls, p, ts+30688, libc.VaList(bp+8, zList, zSep, i, i+1)) + zSep = ts + 22941 } } zList = rbuMPrintf(tls, p, - ts+30653, libc.VaList(bp+40, zList)) + ts+30700, libc.VaList(bp+40, zList)) } else { var zSep uintptr = ts + 1544 @@ -172398,8 +172456,8 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 { var zCol uintptr = *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)) - zList = rbuMPrintf(tls, p, ts+30703, libc.VaList(bp+48, zList, zSep, zCol, i+1)) - zSep = ts + 22894 + zList = rbuMPrintf(tls, p, ts+30750, libc.VaList(bp+48, zList, zSep, zCol, i+1)) + zSep = ts + 22941 } } } @@ -172408,7 +172466,7 @@ func rbuBadControlError(tls *libc.TLS, p uintptr) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30716, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+30763, 0) } func rbuObjIterGetSetlist(tls *libc.TLS, p uintptr, pIter uintptr, zMask uintptr) uintptr { @@ -172426,15 +172484,15 @@ for i = 0; i < (*RbuObjIter)(unsafe.Pointer(pIter)).FnTblCol; i++ { var c int8 = *(*int8)(unsafe.Pointer(zMask + uintptr(*(*int32)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FaiSrcOrder + uintptr(i)*4))))) if int32(c) == 'x' { - zList = rbuMPrintf(tls, p, ts+30703, + zList = rbuMPrintf(tls, p, ts+30750, libc.VaList(bp, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } else if int32(c) == 'd' { - zList = rbuMPrintf(tls, p, ts+30742, + zList = rbuMPrintf(tls, p, ts+30789, libc.VaList(bp+32, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } else if int32(c) == 'f' { - zList = rbuMPrintf(tls, p, ts+30772, + zList = rbuMPrintf(tls, p, ts+30819, libc.VaList(bp+72, zList, zSep, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblCol + uintptr(i)*8)), i+1)) zSep = ts + 15971 } @@ -172471,19 +172529,19 @@ var z uintptr = uintptr(0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - var zSep uintptr = ts + 30809 + var zSep uintptr = ts + 30856 *(*uintptr)(unsafe.Pointer(bp + 56)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+56, p+64, - Xsqlite3_mprintf(tls, ts+29961, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) + Xsqlite3_mprintf(tls, ts+30008, libc.VaList(bp, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl))) for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 56))) { var zOrig uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 3) if zOrig != 0 && libc.Xstrcmp(tls, zOrig, ts+17513) == 0 { var zIdx uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 56)), 1) if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+64, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp+8, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp+8, zIdx))) } break } @@ -172495,15 +172553,15 @@ var zCol uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 2) var zDesc uintptr if Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 64)), 3) != 0 { - zDesc = ts + 30229 + zDesc = ts + 30276 } else { zDesc = ts + 1544 } - z = rbuMPrintf(tls, p, ts+30822, libc.VaList(bp+16, z, zSep, zCol, zDesc)) + z = rbuMPrintf(tls, p, ts+30869, libc.VaList(bp+16, z, zSep, zCol, zDesc)) zSep = ts + 15971 } } - z = rbuMPrintf(tls, p, ts+30833, libc.VaList(bp+48, z)) + z = rbuMPrintf(tls, p, ts+30880, libc.VaList(bp+48, z)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 64))) } return z @@ -172523,7 +172581,7 @@ var zPk uintptr = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+168, p+64, - ts+30837) + ts+30884) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { Xsqlite3_bind_int(tls, *(*uintptr)(unsafe.Pointer(bp + 168)), 1, tnum) if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 168))) { @@ -172532,7 +172590,7 @@ } if zIdx != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+176, p+64, - Xsqlite3_mprintf(tls, ts+29989, libc.VaList(bp, zIdx))) + Xsqlite3_mprintf(tls, ts+30036, libc.VaList(bp, zIdx))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 168))) @@ -172542,23 +172600,23 @@ var iCid int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 1) var bDesc int32 = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 3) var zCollate uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 176)), 4) - zCols = rbuMPrintf(tls, p, ts+30887, libc.VaList(bp+8, zCols, zComma, + zCols = rbuMPrintf(tls, p, ts+30934, libc.VaList(bp+8, zCols, zComma, iCid, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCid)*8)), zCollate)) - zPk = rbuMPrintf(tls, p, ts+30909, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { + zPk = rbuMPrintf(tls, p, ts+30956, libc.VaList(bp+48, zPk, zComma, iCid, func() uintptr { if bDesc != 0 { - return ts + 30229 + return ts + 30276 } return ts + 1544 }())) zComma = ts + 15971 } } - zCols = rbuMPrintf(tls, p, ts+30919, libc.VaList(bp+80, zCols)) + zCols = rbuMPrintf(tls, p, ts+30966, libc.VaList(bp+80, zCols)) rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 176))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+88, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+30934, + ts+30981, libc.VaList(bp+120, zCols, zPk)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+136, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 0)) } @@ -172584,13 +172642,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zCol, uintptr(0), bp+192, uintptr(0), uintptr(0), uintptr(0)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(iCol))) != 0 { - zPk = ts + 30996 + zPk = ts + 31043 } - zSql = rbuMPrintf(tls, p, ts+31009, + zSql = rbuMPrintf(tls, p, ts+31056, libc.VaList(bp+32, zSql, zComma, zCol, *(*uintptr)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FazTblType + uintptr(iCol)*8)), zPk, *(*uintptr)(unsafe.Pointer(bp + 192)), func() uintptr { if *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabNotNull + uintptr(iCol))) != 0 { - return ts + 31036 + return ts + 31083 } return ts + 1544 }())) @@ -172600,16 +172658,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { var zPk uintptr = rbuWithoutRowidPK(tls, p, pIter) if zPk != 0 { - zSql = rbuMPrintf(tls, p, ts+31046, libc.VaList(bp+88, zSql, zPk)) + zSql = rbuMPrintf(tls, p, ts+31093, libc.VaList(bp+88, zSql, zPk)) } } Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+104, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31053, + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+31100, libc.VaList(bp+136, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSql, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_WITHOUT_ROWID { - return ts + 31085 + return ts + 31132 } return ts + 1544 }())) @@ -172626,7 +172684,7 @@ if zBind != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+152, p+64, Xsqlite3_mprintf(tls, - ts+31100, + ts+31147, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zCollist, zRbuRowid, zBind))) } } @@ -172663,7 +172721,7 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp, p+64, - ts+31157) + ts+31204) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { var rc2 int32 @@ -172768,7 +172826,7 @@ var zLimit uintptr = uintptr(0) if nOffset != 0 { - zLimit = Xsqlite3_mprintf(tls, ts+31223, libc.VaList(bp, nOffset)) + zLimit = Xsqlite3_mprintf(tls, ts+31270, libc.VaList(bp, nOffset)) if !(zLimit != 0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM } @@ -172791,7 +172849,7 @@ Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+8, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 1)) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+40, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 1, tnum)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31243, + ts+31290, libc.VaList(bp+72, zTbl, *(*uintptr)(unsafe.Pointer(bp + 600)), *(*uintptr)(unsafe.Pointer(bp + 608)))) Xsqlite3_test_control(tls, SQLITE_TESTCTRL_IMPOSTER, libc.VaList(bp+96, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, 0, 0)) @@ -172799,13 +172857,13 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, p+64, - Xsqlite3_mprintf(tls, ts+31308, libc.VaList(bp+128, zTbl, zBind))) + Xsqlite3_mprintf(tls, ts+31355, libc.VaList(bp+128, zTbl, zBind))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, p+64, - Xsqlite3_mprintf(tls, ts+31344, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) + Xsqlite3_mprintf(tls, ts+31391, libc.VaList(bp+144, zTbl, *(*uintptr)(unsafe.Pointer(bp + 616))))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -172821,7 +172879,7 @@ } zSql = Xsqlite3_mprintf(tls, - ts+31378, + ts+31425, libc.VaList(bp+160, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, @@ -172829,9 +172887,9 @@ if zStart != 0 { return func() uintptr { if zPart != 0 { - return ts + 31439 + return ts + 31486 } - return ts + 31443 + return ts + 31490 }() } return ts + 1544 @@ -172840,20 +172898,20 @@ Xsqlite3_free(tls, zStart) } else if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { zSql = Xsqlite3_mprintf(tls, - ts+31449, + ts+31496, libc.VaList(bp+216, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, zLimit)) } else { zSql = Xsqlite3_mprintf(tls, - ts+31510, + ts+31557, libc.VaList(bp+264, zCollist, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, zCollist, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, zPart, func() uintptr { if zPart != 0 { - return ts + 31439 + return ts + 31486 } - return ts + 31443 + return ts + 31490 }(), zCollist, zLimit)) } @@ -172890,16 +172948,16 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_VTAB { return ts + 1544 } - return ts + 31669 + return ts + 31716 }() if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+136, pz, Xsqlite3_mprintf(tls, - ts+31678, + ts+31725, libc.VaList(bp+344, zWrite, zTbl, zCollist, func() uintptr { if bRbuRowid != 0 { - return ts + 31714 + return ts + 31761 } return ts + 1544 }(), zBindings))) @@ -172908,32 +172966,32 @@ if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pIter+144, pz, Xsqlite3_mprintf(tls, - ts+31724, libc.VaList(bp+384, zWrite, zTbl, zWhere))) + ts+31771, libc.VaList(bp+384, zWrite, zTbl, zWhere))) } if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { var zRbuRowid uintptr = ts + 1544 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { - zRbuRowid = ts + 31752 + zRbuRowid = ts + 31799 } rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+31764, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { + ts+31811, libc.VaList(bp+408, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl, func() uintptr { if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL { - return ts + 31840 + return ts + 31887 } return ts + 1544 }(), (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+31857, + ts+31904, libc.VaList(bp+440, zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist)) if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_EXTERNAL || (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_NONE { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32156, + ts+32203, libc.VaList(bp+512, zWrite, zTbl, zNewlist)) } @@ -172946,9 +173004,9 @@ var zOrder uintptr = uintptr(0) if bRbuRowid != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - zRbuRowid = ts + 32255 + zRbuRowid = ts + 32302 } else { - zRbuRowid = ts + 32265 + zRbuRowid = ts + 32312 } } @@ -172961,7 +173019,7 @@ } } if bRbuRowid != 0 { - zOrder = rbuMPrintf(tls, p, ts+30312, 0) + zOrder = rbuMPrintf(tls, p, ts+30359, 0) } else { zOrder = rbuObjIterGetPkList(tls, p, pIter, ts+1544, ts+15971, ts+1544) } @@ -172970,11 +173028,11 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, pIter+128, pz, Xsqlite3_mprintf(tls, - ts+32276, + ts+32323, libc.VaList(bp+536, zCollist, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 32324 + return ts + 32371 } return ts + 1544 }(), @@ -172987,7 +173045,7 @@ }(), func() uintptr { if zOrder != 0 { - return ts + 24228 + return ts + 24275 } return ts + 1544 }(), zOrder, @@ -173055,9 +173113,9 @@ var zPrefix uintptr = ts + 1544 if (*RbuObjIter)(unsafe.Pointer(pIter)).FeType != RBU_PK_VTAB { - zPrefix = ts + 31669 + zPrefix = ts + 31716 } - zUpdate = Xsqlite3_mprintf(tls, ts+32330, + zUpdate = Xsqlite3_mprintf(tls, ts+32377, libc.VaList(bp, zPrefix, (*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl, zSet, zWhere)) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, pUp+8, p+64, zUpdate) @@ -173116,7 +173174,7 @@ } *(*int32)(unsafe.Pointer(bp + 16)) = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+8, p+64, - Xsqlite3_mprintf(tls, ts+32360, libc.VaList(bp, p+48))) + Xsqlite3_mprintf(tls, ts+32407, libc.VaList(bp, p+48))) for *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { switch Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) { case RBU_STATE_STAGE: @@ -173189,18 +173247,18 @@ Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793, SQLITE_FCNTL_RBUCNT, p) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState == uintptr(0) { var zFile uintptr = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793) - (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+32390, libc.VaList(bp, zFile, zFile)) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzState = rbuMPrintf(tls, p, ts+32437, libc.VaList(bp, zFile, zFile)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).FzState != 0 { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32418, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32465, libc.VaList(bp+16, (*Sqlite3rbu)(unsafe.Pointer(p)).FzState)) libc.Xmemcpy(tls, p+48, ts+14829, uint64(4)) } else { libc.Xmemcpy(tls, p+48, ts+7793, uint64(4)) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32436, libc.VaList(bp+24, p+48)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+32483, libc.VaList(bp+24, p+48)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { var bOpen int32 = 0 @@ -173240,11 +173298,11 @@ return } (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32502, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32549, 0) } else { var zTarget uintptr var zExtra uintptr = uintptr(0) - if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+25575, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { + if libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu) >= uint64(5) && 0 == libc.Xmemcmp(tls, ts+25622, (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu, uint64(5)) { zExtra = (*Sqlite3rbu)(unsafe.Pointer(p)).FzRbu + 5 for *(*int8)(unsafe.Pointer(zExtra)) != 0 { if int32(*(*int8)(unsafe.Pointer(libc.PostIncUintptr(&zExtra, 1)))) == '?' { @@ -173256,13 +173314,13 @@ } } - zTarget = Xsqlite3_mprintf(tls, ts+32534, + zTarget = Xsqlite3_mprintf(tls, ts+32581, libc.VaList(bp+32, Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793), func() uintptr { if zExtra == uintptr(0) { return ts + 1544 } - return ts + 32566 + return ts + 32613 }(), func() uintptr { if zExtra == uintptr(0) { return ts + 1544 @@ -173281,21 +173339,21 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32568, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32615, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTmpInsertFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, - ts+32583, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { + ts+32630, 2, SQLITE_UTF8, uintptr(0), *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuFossilDeltaFunc})), uintptr(0), uintptr(0)) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+32600, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+32647, -1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuTargetNameFunc})), uintptr(0), uintptr(0)) } @@ -173303,7 +173361,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_RBU, p) } - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32616, 0) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32663, 0) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_RBU, p) @@ -173311,7 +173369,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32644, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+32691, 0) } } @@ -173340,14 +173398,14 @@ if pState == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = 0 if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32616, uintptr(0), uintptr(0), uintptr(0)) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32663, uintptr(0), uintptr(0), uintptr(0)) } } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var rc2 int32 (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CAPTURE - rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32662, uintptr(0), uintptr(0), uintptr(0)) + rc2 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32709, uintptr(0), uintptr(0), uintptr(0)) if rc2 != SQLITE_NOTICE { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -173473,7 +173531,7 @@ func rbuExclusiveCheckpoint(tls *libc.TLS, db uintptr) int32 { var zUri uintptr = Xsqlite3_db_filename(tls, db, uintptr(0)) - return Xsqlite3_uri_boolean(tls, zUri, ts+32697, 0) + return Xsqlite3_uri_boolean(tls, zUri, ts+32744, 0) } func rbuMoveOalFile(tls *libc.TLS, p uintptr) { @@ -173488,8 +173546,8 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zMove = Xsqlite3_db_filename(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+7793) } - zOal = Xsqlite3_mprintf(tls, ts+32722, libc.VaList(bp, zMove)) - zWal = Xsqlite3_mprintf(tls, ts+32729, libc.VaList(bp+8, zMove)) + zOal = Xsqlite3_mprintf(tls, ts+32769, libc.VaList(bp, zMove)) + zWal = Xsqlite3_mprintf(tls, ts+32776, libc.VaList(bp+8, zMove)) if zWal == uintptr(0) || zOal == uintptr(0) { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_NOMEM @@ -173606,7 +173664,7 @@ (*RbuObjIter)(unsafe.Pointer(pIter)).FzIdx == uintptr(0) && (*RbuObjIter)(unsafe.Pointer(pIter)).FeType == RBU_PK_IPK && *(*U8)(unsafe.Pointer((*RbuObjIter)(unsafe.Pointer(pIter)).FabTblPk + uintptr(i))) != 0 && Xsqlite3_column_type(tls, (*RbuObjIter)(unsafe.Pointer(pIter)).FpSelect, i) == SQLITE_NULL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_MISMATCH - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+25213, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+25260, 0) return } @@ -173699,7 +173757,7 @@ var iCookie int32 = 1000000 (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, dbread, bp+8, p+64, - ts+32736) + ts+32783) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) { iCookie = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), 0) @@ -173707,7 +173765,7 @@ rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32758, libc.VaList(bp, iCookie+1)) + rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32805, libc.VaList(bp, iCookie+1)) } } } @@ -173728,7 +173786,7 @@ rc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+168, p+64, Xsqlite3_mprintf(tls, - ts+32785, + ts+32832, libc.VaList(bp, p+48, RBU_STATE_STAGE, eStage, RBU_STATE_TBL, (*Sqlite3rbu)(unsafe.Pointer(p)).Fobjiter.FzTbl, @@ -173758,9 +173816,9 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareFreeAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp+24, p+64, - Xsqlite3_mprintf(tls, ts+32943, libc.VaList(bp, zPragma))) + Xsqlite3_mprintf(tls, ts+32990, libc.VaList(bp, zPragma))) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32958, + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33005, libc.VaList(bp+8, zPragma, Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), 0))) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp + 24))) @@ -173774,10 +173832,10 @@ *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+32978, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33025, uintptr(0), uintptr(0), p+64) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33003) + ts+33050) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -173791,12 +173849,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33111) + ts+33158) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, bp+8, p+64, - ts+33176) + ts+33223) } for (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) == SQLITE_ROW { @@ -173808,7 +173866,7 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33220, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33267, uintptr(0), uintptr(0), p+64) } rbuFinalize(tls, p, *(*uintptr)(unsafe.Pointer(bp))) @@ -173836,7 +173894,7 @@ if (*RbuObjIter)(unsafe.Pointer(pIter)).FbCleanup != 0 { if libc.Bool32((*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0)) == 0 && (*RbuObjIter)(unsafe.Pointer(pIter)).FabIndexed != 0 { rbuMPrintfExec(tls, p, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+33245, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) + ts+33292, libc.VaList(bp, p+48, (*RbuObjIter)(unsafe.Pointer(pIter)).FzDataTbl)) } } else { rbuObjIterPrepareAll(tls, p, pIter, 0) @@ -173958,7 +174016,7 @@ if rc == SQLITE_OK && !(int32((*RbuObjIter)(unsafe.Pointer(pIter)).FzTbl) != 0) { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33273, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33320, 0) } if rc == SQLITE_OK { @@ -173974,7 +174032,7 @@ bp := tls.Alloc(16) defer tls.Free(16) - var zOal uintptr = rbuMPrintf(tls, p, ts+32722, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) + var zOal uintptr = rbuMPrintf(tls, p, ts+32769, libc.VaList(bp, (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget)) if zOal != 0 { *(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) Xsqlite3_file_control(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+7793, SQLITE_FCNTL_VFS_POINTER, bp+8) @@ -173991,7 +174049,7 @@ defer tls.Free(76) Xsqlite3_randomness(tls, int32(unsafe.Sizeof(int32(0))), bp+8) - Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+33298, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) + Xsqlite3_snprintf(tls, int32(unsafe.Sizeof([64]int8{})), bp+12, ts+33345, libc.VaList(bp, *(*int32)(unsafe.Pointer(bp + 8)))) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3rbu_create_vfs(tls, bp+12, uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var pVfs uintptr = Xsqlite3_vfs_find(tls, bp+12) @@ -174025,7 +174083,7 @@ rc = prepareFreeAndCollectError(tls, db, bp+8, bp+16, Xsqlite3_mprintf(tls, - ts+33309, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) + ts+33356, libc.VaList(bp, Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(apVal)))))) if rc != SQLITE_OK { Xsqlite3_result_error(tls, pCtx, *(*uintptr)(unsafe.Pointer(bp + 16)), -1) } else { @@ -174055,13 +174113,13 @@ (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = int64(-1) (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_create_function(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, - ts+33381, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + ts+33428, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{rbuIndexCntFunc})), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33395) + ts+33442) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { @@ -174072,7 +174130,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && bExists != 0 { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = prepareAndCollectError(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, bp, p+64, - ts+33452) + ts+33499) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { if SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp))) { (*Sqlite3rbu)(unsafe.Pointer(p)).FnPhaseOneStep = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) @@ -174146,7 +174204,7 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK && (*Rbu_file)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FpTargetFd)).FpWalFd != 0 { if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_OAL { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33526, 0) + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33573, 0) } else if (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage == RBU_STAGE_MOVE { (*Sqlite3rbu)(unsafe.Pointer(p)).FeStage = RBU_STAGE_CKPT (*Sqlite3rbu)(unsafe.Pointer(p)).FnStep = 0 @@ -174164,12 +174222,12 @@ }() if (*Rbu_file)(unsafe.Pointer(pFd)).FiCookie != (*RbuState)(unsafe.Pointer(pState)).FiCookie { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = SQLITE_BUSY - (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33558, + (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33605, libc.VaList(bp+8, func() uintptr { if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { - return ts + 33590 + return ts + 33637 } - return ts + 33597 + return ts + 33644 }())) } } @@ -174193,14 +174251,14 @@ } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { - (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+33604, uintptr(0), uintptr(0), p+64) + (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, db, ts+33651, uintptr(0), uintptr(0), p+64) } if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_OK { var frc int32 = Xsqlite3_file_control(tls, db, ts+7793, SQLITE_FCNTL_ZIPVFS, uintptr(0)) if frc == SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = Xsqlite3_exec(tls, - db, ts+33620, uintptr(0), uintptr(0), p+64) + db, ts+33667, uintptr(0), uintptr(0), p+64) } } @@ -174254,7 +174312,7 @@ } if zState != 0 { var n int32 = int32(libc.Xstrlen(tls, zState)) - if n >= 7 && 0 == libc.Xmemcmp(tls, ts+33644, zState+uintptr(n-7), uint64(7)) { + if n >= 7 && 0 == libc.Xmemcmp(tls, ts+33691, zState+uintptr(n-7), uint64(7)) { return rbuMisuseError(tls) } } @@ -174281,7 +174339,7 @@ var i uint32 var nErrmsg Size_t = libc.Xstrlen(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg) for i = uint32(0); Size_t(i) < nErrmsg-uint64(8); i++ { - if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+31669, uint64(8)) == 0 { + if libc.Xmemcmp(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg+uintptr(i), ts+31716, uint64(8)) == 0 { var nDel int32 = 8 for int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) >= '0' && int32(*(*int8)(unsafe.Pointer((*Sqlite3rbu)(unsafe.Pointer(p)).FzErrmsg + uintptr(i+uint32(nDel))))) <= '9' { nDel++ @@ -174317,7 +174375,7 @@ rbuObjIterFinalize(tls, p+80) if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) && (*Sqlite3rbu)(unsafe.Pointer(p)).Frc != SQLITE_OK && (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu != 0 { - var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+33652, uintptr(0), uintptr(0), uintptr(0)) + var rc2 int32 = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, ts+33699, uintptr(0), uintptr(0), uintptr(0)) if (*Sqlite3rbu)(unsafe.Pointer(p)).Frc == SQLITE_DONE && rc2 != SQLITE_OK { (*Sqlite3rbu)(unsafe.Pointer(p)).Frc = rc2 } @@ -174436,12 +174494,12 @@ if (*Sqlite3rbu)(unsafe.Pointer(p)).FzTarget == uintptr(0) { zBegin = ts + 15860 } else { - zBegin = ts + 33604 + zBegin = ts + 33651 } rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbRbu, zBegin, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33604, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, (*Sqlite3rbu)(unsafe.Pointer(p)).FdbMain, ts+33651, uintptr(0), uintptr(0), uintptr(0)) } } @@ -174787,7 +174845,7 @@ })(unsafe.Pointer(&struct{ uintptr }{xControl})).f(tls, (*Rbu_file)(unsafe.Pointer(p)).FpReal, SQLITE_FCNTL_ZIPVFS, bp+16) if rc == SQLITE_OK { rc = SQLITE_ERROR - (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33679, 0) + (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FzErrmsg = Xsqlite3_mprintf(tls, ts+33726, 0) } else if rc == SQLITE_NOTFOUND { (*Sqlite3rbu)(unsafe.Pointer(pRbu)).FpTargetFd = p (*Rbu_file)(unsafe.Pointer(p)).FpRbu = pRbu @@ -174812,7 +174870,7 @@ if rc == SQLITE_OK && op == SQLITE_FCNTL_VFSNAME { var pRbuVfs uintptr = (*Rbu_file)(unsafe.Pointer(p)).FpRbuVfs var zIn uintptr = *(*uintptr)(unsafe.Pointer(pArg)) - var zOut uintptr = Xsqlite3_mprintf(tls, ts+33702, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) + var zOut uintptr = Xsqlite3_mprintf(tls, ts+33749, libc.VaList(bp, (*Rbu_vfs)(unsafe.Pointer(pRbuVfs)).Fbase.FzName, zIn)) *(*uintptr)(unsafe.Pointer(pArg)) = zOut if zOut == uintptr(0) { rc = SQLITE_NOMEM @@ -174972,7 +175030,7 @@ } if oflags&SQLITE_OPEN_MAIN_DB != 0 && - Xsqlite3_uri_boolean(tls, zName, ts+33713, 0) != 0 { + Xsqlite3_uri_boolean(tls, zName, ts+33760, 0) != 0 { oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE zOpen = uintptr(0) } @@ -175801,7 +175859,7 @@ rc = Xsqlite3_table_column_metadata(tls, db, zDb, zThis, uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { zPragma = Xsqlite3_mprintf(tls, - ts+33724, 0) + ts+33771, 0) } else if rc == SQLITE_ERROR { zPragma = Xsqlite3_mprintf(tls, ts+1544, 0) } else { @@ -175814,7 +175872,7 @@ return rc } } else { - zPragma = Xsqlite3_mprintf(tls, ts+33845, libc.VaList(bp, zDb, zThis)) + zPragma = Xsqlite3_mprintf(tls, ts+33892, libc.VaList(bp, zDb, zThis)) } if !(zPragma != 0) { *(*uintptr)(unsafe.Pointer(pazCol)) = uintptr(0) @@ -176494,9 +176552,9 @@ for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { - zRet = Xsqlite3_mprintf(tls, ts+33874, + zRet = Xsqlite3_mprintf(tls, ts+33921, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 22894 + zSep = ts + 22941 if zRet == uintptr(0) { break } @@ -176519,9 +176577,9 @@ if int32(*(*U8)(unsafe.Pointer(abPK + uintptr(i)))) == 0 { bHave = 1 zRet = Xsqlite3_mprintf(tls, - ts+33908, + ts+33955, libc.VaList(bp, zRet, zSep, zDb1, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), zDb2, zTab, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)))) - zSep = ts + 33949 + zSep = ts + 33996 if zRet == uintptr(0) { break } @@ -176529,7 +176587,7 @@ } if bHave == 0 { - zRet = Xsqlite3_mprintf(tls, ts+8882, 0) + zRet = Xsqlite3_mprintf(tls, ts+8871, 0) } return zRet @@ -176540,7 +176598,7 @@ defer tls.Free(40) var zRet uintptr = Xsqlite3_mprintf(tls, - ts+33954, + ts+34001, libc.VaList(bp, zDb1, zTbl, zDb2, zTbl, zExpr)) return zRet } @@ -176583,7 +176641,7 @@ rc = SQLITE_NOMEM } else { var zStmt uintptr = Xsqlite3_mprintf(tls, - ts+34032, + ts+34079, libc.VaList(bp, (*Sqlite3_session)(unsafe.Pointer(pSession)).FzDb, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zFrom, (*SessionTable)(unsafe.Pointer(pTab)).FzName, zExpr, zExpr2)) if zStmt == uintptr(0) { rc = SQLITE_NOMEM @@ -176710,7 +176768,7 @@ if !(pzErrMsg != 0) { goto __16 } - *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+34085, 0) + *(*uintptr)(unsafe.Pointer(pzErrMsg)) = Xsqlite3_mprintf(tls, ts+34132, 0) __16: ; rc = SQLITE_SCHEMA @@ -177186,7 +177244,7 @@ if 0 == Xsqlite3_stricmp(tls, ts+12700, zTab) { zSql = Xsqlite3_mprintf(tls, - ts+34112, libc.VaList(bp, zDb)) + ts+34159, libc.VaList(bp, zDb)) if zSql == uintptr(0) { *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_NOMEM } @@ -177195,18 +177253,18 @@ var zSep uintptr = ts + 1544 *(*SessionBuffer)(unsafe.Pointer(bp + 8)) = SessionBuffer{} - sessionAppendStr(tls, bp+8, ts+34222, bp+24) + sessionAppendStr(tls, bp+8, ts+34269, bp+24) sessionAppendIdent(tls, bp+8, zDb, bp+24) sessionAppendStr(tls, bp+8, ts+1557, bp+24) sessionAppendIdent(tls, bp+8, zTab, bp+24) - sessionAppendStr(tls, bp+8, ts+34237, bp+24) + sessionAppendStr(tls, bp+8, ts+34284, bp+24) for i = 0; i < nCol; i++ { if *(*U8)(unsafe.Pointer(abPK + uintptr(i))) != 0 { sessionAppendStr(tls, bp+8, zSep, bp+24) sessionAppendIdent(tls, bp+8, *(*uintptr)(unsafe.Pointer(azCol + uintptr(i)*8)), bp+24) - sessionAppendStr(tls, bp+8, ts+34245, bp+24) + sessionAppendStr(tls, bp+8, ts+34292, bp+24) sessionAppendInteger(tls, bp+8, i+1, bp+24) - zSep = ts + 22894 + zSep = ts + 22941 } } zSql = (*SessionBuffer)(unsafe.Pointer(bp + 8)).FaBuf @@ -177315,7 +177373,7 @@ if (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc != 0 { return (*Sqlite3_session)(unsafe.Pointer(pSession)).Frc } - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+34251, uintptr(0), uintptr(0), uintptr(0)) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3_exec(tls, (*Sqlite3_session)(unsafe.Pointer(pSession)).Fdb, ts+34298, uintptr(0), uintptr(0), uintptr(0)) if *(*int32)(unsafe.Pointer(bp + 40)) != SQLITE_OK { return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -177407,7 +177465,7 @@ } Xsqlite3_free(tls, (*SessionBuffer)(unsafe.Pointer(bp+24)).FaBuf) - Xsqlite3_exec(tls, db, ts+34271, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+34318, uintptr(0), uintptr(0), uintptr(0)) Xsqlite3_mutex_leave(tls, Xsqlite3_db_mutex(tls, db)) return *(*int32)(unsafe.Pointer(bp + 40)) } @@ -177670,7 +177728,7 @@ rc = sessionInputBuffer(tls, pIn, 9) if rc == SQLITE_OK { if (*SessionInput)(unsafe.Pointer(pIn)).FiNext >= (*SessionInput)(unsafe.Pointer(pIn)).FnData { - rc = Xsqlite3CorruptError(tls, 219078) + rc = Xsqlite3CorruptError(tls, 219169) } else { eType = int32(*(*U8)(unsafe.Pointer((*SessionInput)(unsafe.Pointer(pIn)).FaData + uintptr(libc.PostIncInt32(&(*SessionInput)(unsafe.Pointer(pIn)).FiNext, 1))))) @@ -177693,7 +177751,7 @@ rc = sessionInputBuffer(tls, pIn, *(*int32)(unsafe.Pointer(bp))) if rc == SQLITE_OK { if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > (*SessionInput)(unsafe.Pointer(pIn)).FnData-(*SessionInput)(unsafe.Pointer(pIn)).FiNext { - rc = Xsqlite3CorruptError(tls, 219098) + rc = Xsqlite3CorruptError(tls, 219189) } else { var enc U8 = func() uint8 { if eType == SQLITE_TEXT { @@ -177735,7 +177793,7 @@ nRead = nRead + sessionVarintGet(tls, (*SessionInput)(unsafe.Pointer(pIn)).FaData+uintptr((*SessionInput)(unsafe.Pointer(pIn)).FiNext+nRead), bp) if *(*int32)(unsafe.Pointer(bp)) < 0 || *(*int32)(unsafe.Pointer(bp)) > 65536 { - rc = Xsqlite3CorruptError(tls, 219152) + rc = Xsqlite3CorruptError(tls, 219243) } else { rc = sessionInputBuffer(tls, pIn, nRead+*(*int32)(unsafe.Pointer(bp))+100) nRead = nRead + *(*int32)(unsafe.Pointer(bp)) @@ -177796,7 +177854,7 @@ (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Ftblhdr.FnBuf = 0 sessionBufferGrow(tls, p+72, int64(nByte), bp+4) } else { - *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219240) + *(*int32)(unsafe.Pointer(bp + 4)) = Xsqlite3CorruptError(tls, 219331) } } @@ -177870,13 +177928,13 @@ } if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FzTab == uintptr(0) || (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbPatchset != 0 && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbInvert != 0 { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219326)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219417)) } (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop = int32(op) (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FbIndirect = int32(*(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FaData + uintptr(libc.PostIncInt32(&(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fin.FiNext, 1))))) if (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_UPDATE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_DELETE && (*Sqlite3_changeset_iter)(unsafe.Pointer(p)).Fop != SQLITE_INSERT { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219332)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219423)) } if paRec != 0 { @@ -177938,7 +177996,7 @@ if *(*U8)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) if *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i)*8)) == uintptr(0) { - return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219376)) + return libc.AssignPtrInt32(p+100, Xsqlite3CorruptError(tls, 219467)) } *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FapValue + uintptr(i+(*Sqlite3_changeset_iter)(unsafe.Pointer(p)).FnCol)*8)) = uintptr(0) } @@ -178311,7 +178369,7 @@ goto __6 __11: - *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219741) + *(*int32)(unsafe.Pointer(bp + 40)) = Xsqlite3CorruptError(tls, 219832) goto finished_invert __6: ; @@ -178490,34 +178548,34 @@ (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask = pUp + 1*24 libc.Xmemcpy(tls, (*SessionUpdate)(unsafe.Pointer(pUp)).FaMask, (*SessionApplyCtx)(unsafe.Pointer(p)).FaUpdateMask, uint64(nU32)*uint64(unsafe.Sizeof(U32(0)))) - sessionAppendStr(tls, bp, ts+34289, bp+16) + sessionAppendStr(tls, bp, ts+34336, bp+16) sessionAppendIdent(tls, bp, (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FzTab, bp+16) - sessionAppendStr(tls, bp, ts+34302, bp+16) + sessionAppendStr(tls, bp, ts+34349, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii)))) == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol+ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34308, bp+16) + sessionAppendStr(tls, bp, ts+34355, bp+16) sessionAppendInteger(tls, bp, ii*2+1, bp+16) zSep = ts + 15971 } } zSep = ts + 1544 - sessionAppendStr(tls, bp, ts+34237, bp+16) + sessionAppendStr(tls, bp, ts+34284, bp+16) for ii = 0; ii < (*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FnCol; ii++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(ii))) != 0 || bPatchset == 0 && *(*uintptr)(unsafe.Pointer((*Sqlite3_changeset_iter)(unsafe.Pointer(pIter)).FapValue + uintptr(ii)*8)) != 0 { sessionAppendStr(tls, bp, zSep, bp+16) if bStat1 != 0 && ii == 1 { sessionAppendStr(tls, bp, - ts+34313, bp+16) + ts+34360, bp+16) } else { sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(ii)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34245, bp+16) + sessionAppendStr(tls, bp, ts+34292, bp+16) sessionAppendInteger(tls, bp, ii*2+2, bp+16) } - zSep = ts + 22894 + zSep = ts + 22941 } } @@ -178569,34 +178627,34 @@ *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} var nPk int32 = 0 - sessionAppendStr(tls, bp, ts+34388, bp+16) + sessionAppendStr(tls, bp, ts+34435, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+34237, bp+16) + sessionAppendStr(tls, bp, ts+34284, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if *(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i))) != 0 { nPk++ sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34308, bp+16) + sessionAppendStr(tls, bp, ts+34355, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 22894 + zSep = ts + 22941 } } if nPk < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol { - sessionAppendStr(tls, bp, ts+34406, bp+16) + sessionAppendStr(tls, bp, ts+34453, bp+16) sessionAppendInteger(tls, bp, (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol+1, bp+16) - sessionAppendStr(tls, bp, ts+33949, bp+16) + sessionAppendStr(tls, bp, ts+33996, bp+16) zSep = ts + 1544 for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if !(int32(*(*U8)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FabPK + uintptr(i)))) != 0) { sessionAppendStr(tls, bp, zSep, bp+16) sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) - sessionAppendStr(tls, bp, ts+34245, bp+16) + sessionAppendStr(tls, bp, ts+34292, bp+16) sessionAppendInteger(tls, bp, i+1, bp+16) - zSep = ts + 34414 + zSep = ts + 34461 } } sessionAppendStr(tls, bp, ts+6309, bp+16) @@ -178623,9 +178681,9 @@ var i int32 *(*SessionBuffer)(unsafe.Pointer(bp)) = SessionBuffer{} - sessionAppendStr(tls, bp, ts+34419, bp+16) + sessionAppendStr(tls, bp, ts+34466, bp+16) sessionAppendIdent(tls, bp, zTab, bp+16) - sessionAppendStr(tls, bp, ts+22900, bp+16) + sessionAppendStr(tls, bp, ts+22947, bp+16) for i = 0; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { if i != 0 { sessionAppendStr(tls, bp, ts+15971, bp+16) @@ -178633,9 +178691,9 @@ sessionAppendIdent(tls, bp, *(*uintptr)(unsafe.Pointer((*SessionApplyCtx)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)), bp+16) } - sessionAppendStr(tls, bp, ts+34437, bp+16) + sessionAppendStr(tls, bp, ts+34484, bp+16) for i = 1; i < (*SessionApplyCtx)(unsafe.Pointer(p)).FnCol; i++ { - sessionAppendStr(tls, bp, ts+34448, bp+16) + sessionAppendStr(tls, bp, ts+34495, bp+16) } sessionAppendStr(tls, bp, ts+6309, bp+16) @@ -178654,11 +178712,11 @@ var rc int32 = sessionSelectRow(tls, db, ts+12700, p) if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+16, - ts+34452) + ts+34499) } if rc == SQLITE_OK { rc = sessionPrepare(tls, db, p+8, - ts+34565) + ts+34612) } return rc } @@ -178686,7 +178744,7 @@ f func(*libc.TLS, uintptr, int32, uintptr) int32 })(unsafe.Pointer(&struct{ uintptr }{xValue})).f(tls, pIter, i, bp) if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) { - rc = Xsqlite3CorruptError(tls, 220219) + rc = Xsqlite3CorruptError(tls, 220310) } else { rc = sessionBindValue(tls, pStmt, i+1, *(*uintptr)(unsafe.Pointer(bp))) } @@ -178939,7 +178997,7 @@ if *(*int32)(unsafe.Pointer(bp + 4)) != 0 { rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } else if *(*int32)(unsafe.Pointer(bp)) != 0 { - rc = Xsqlite3_exec(tls, db, ts+34709, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34756, uintptr(0), uintptr(0), uintptr(0)) if rc == SQLITE_OK { rc = sessionBindRow(tls, pIter, *(*uintptr)(unsafe.Pointer(&struct { @@ -178955,7 +179013,7 @@ rc = sessionApplyOneOp(tls, pIter, pApply, xConflict, pCtx, uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34730, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34777, uintptr(0), uintptr(0), uintptr(0)) } } } @@ -179028,10 +179086,10 @@ (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FbInvertConstraints = libc.BoolInt32(!!(flags&SQLITE_CHANGESETAPPLY_INVERT != 0)) Xsqlite3_mutex_enter(tls, Xsqlite3_db_mutex(tls, db)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { - rc = Xsqlite3_exec(tls, db, ts+34749, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34796, uintptr(0), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+34775, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+34822, uintptr(0), uintptr(0), uintptr(0)) } for rc == SQLITE_OK && SQLITE_ROW == Xsqlite3changeset_next(tls, pIter) { Xsqlite3changeset_op(tls, pIter, bp+176, bp+184, bp+188, uintptr(0)) @@ -179090,16 +179148,16 @@ if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol == 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34805, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34852, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else if (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol < *(*int32)(unsafe.Pointer(bp + 184)) { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34849, + ts+34896, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp + 200)), (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FnCol, *(*int32)(unsafe.Pointer(bp + 184)))) } else if *(*int32)(unsafe.Pointer(bp + 184)) < nMinCol || libc.Xmemcmp(tls, (*SessionApplyCtx)(unsafe.Pointer(bp+48)).FabPK, *(*uintptr)(unsafe.Pointer(bp + 192)), uint64(*(*int32)(unsafe.Pointer(bp + 184)))) != 0 { schemaMismatch = 1 Xsqlite3_log(tls, SQLITE_SCHEMA, - ts+34920, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) + ts+34967, libc.VaList(bp+40, *(*uintptr)(unsafe.Pointer(bp + 200)))) } else { (*SessionApplyCtx)(unsafe.Pointer(bp + 48)).FnCol = *(*int32)(unsafe.Pointer(bp + 184)) if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(bp + 200)), ts+12700) { @@ -179153,14 +179211,14 @@ } } } - Xsqlite3_exec(tls, db, ts+34980, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35027, uintptr(0), uintptr(0), uintptr(0)) if flags&SQLITE_CHANGESETAPPLY_NOSAVEPOINT == 0 { if rc == SQLITE_OK { - rc = Xsqlite3_exec(tls, db, ts+35010, uintptr(0), uintptr(0), uintptr(0)) + rc = Xsqlite3_exec(tls, db, ts+35057, uintptr(0), uintptr(0), uintptr(0)) } else { - Xsqlite3_exec(tls, db, ts+35034, uintptr(0), uintptr(0), uintptr(0)) - Xsqlite3_exec(tls, db, ts+35010, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35081, uintptr(0), uintptr(0), uintptr(0)) + Xsqlite3_exec(tls, db, ts+35057, uintptr(0), uintptr(0), uintptr(0)) } } @@ -180408,7 +180466,7 @@ fts5yy_pop_parser_stack(tls, fts5yypParser) } - sqlite3Fts5ParseError(tls, pParse, ts+35062, 0) + sqlite3Fts5ParseError(tls, pParse, ts+35109, 0) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -180696,7 +180754,7 @@ _ = fts5yymajor sqlite3Fts5ParseError(tls, - pParse, ts+35090, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) + pParse, ts+35137, libc.VaList(bp, fts5yyminor.Fn, fts5yyminor.Fp)) (*Fts5yyParser)(unsafe.Pointer(fts5yypParser)).FpParse = pParse @@ -180883,7 +180941,7 @@ if n < 0 { n = int32(libc.Xstrlen(tls, z)) } - (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+35121, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) + (*HighlightContext)(unsafe.Pointer(p)).FzOut = Xsqlite3_mprintf(tls, ts+35168, libc.VaList(bp, (*HighlightContext)(unsafe.Pointer(p)).FzOut, n, z)) if (*HighlightContext)(unsafe.Pointer(p)).FzOut == uintptr(0) { *(*int32)(unsafe.Pointer(pRc)) = SQLITE_NOMEM } @@ -180951,7 +181009,7 @@ var iCol int32 if nVal != 3 { - var zErr uintptr = ts + 35128 + var zErr uintptr = ts + 35175 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -181133,7 +181191,7 @@ var nCol int32 if nVal != 5 { - var zErr uintptr = ts + 35178 + var zErr uintptr = ts + 35225 Xsqlite3_result_error(tls, pCtx, zErr, -1) return } @@ -181457,13 +181515,13 @@ defer tls.Free(96) *(*[3]Builtin)(unsafe.Pointer(bp)) = [3]Builtin{ - {FzFunc: ts + 35226, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35273, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5SnippetFunction}))}, - {FzFunc: ts + 35234, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35281, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5HighlightFunction}))}, - {FzFunc: ts + 35244, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { + {FzFunc: ts + 35291, FxFunc: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr) }{fts5Bm25Function}))}, } @@ -182014,7 +182072,7 @@ *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_OK var nCmd int32 = int32(libc.Xstrlen(tls, zCmd)) - if Xsqlite3_strnicmp(tls, ts+35249, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35296, zCmd, nCmd) == 0 { var nByte int32 = int32(uint64(unsafe.Sizeof(int32(0))) * uint64(FTS5_MAX_PREFIX_INDEXES)) var p uintptr var bFirst int32 = 1 @@ -182041,14 +182099,14 @@ break } if int32(*(*int8)(unsafe.Pointer(p))) < '0' || int32(*(*int8)(unsafe.Pointer(p))) > '9' { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35256, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35303, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } if (*Fts5Config)(unsafe.Pointer(pConfig)).FnPrefix == FTS5_MAX_PREFIX_INDEXES { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+35287, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) + ts+35334, libc.VaList(bp, FTS5_MAX_PREFIX_INDEXES)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -182059,7 +182117,7 @@ } if nPre <= 0 || nPre >= 1000 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35320, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35367, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR break } @@ -182072,7 +182130,7 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35357, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35404, zCmd, nCmd) == 0 { var p uintptr = zArg var nArg Sqlite3_int64 = Sqlite3_int64(libc.Xstrlen(tls, zArg) + uint64(1)) var azArg uintptr = sqlite3Fts5MallocZero(tls, bp+40, int64(uint64(unsafe.Sizeof(uintptr(0)))*uint64(nArg))) @@ -182081,7 +182139,7 @@ if azArg != 0 && pSpace != 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FpTok != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35366, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35413, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { for nArg = int64(0); p != 0 && *(*int8)(unsafe.Pointer(p)) != 0; nArg++ { @@ -182100,7 +182158,7 @@ } } if p == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35399, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35446, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { *(*int32)(unsafe.Pointer(bp + 40)) = sqlite3Fts5GetTokenizer(tls, pGlobal, @@ -182115,14 +182173,14 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35433, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35480, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent != FTS5_CONTENT_NORMAL { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35441, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35488, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if *(*int8)(unsafe.Pointer(zArg)) != 0 { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_EXTERNAL - (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+35473, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzContent = sqlite3Fts5Mprintf(tls, bp+40, ts+35520, libc.VaList(bp+8, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, zArg)) } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent = FTS5_CONTENT_NONE } @@ -182130,9 +182188,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35479, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35526, zCmd, nCmd) == 0 { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35493, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35540, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FzContentRowid = sqlite3Fts5Strndup(tls, bp+40, zArg, -1) @@ -182140,9 +182198,9 @@ return *(*int32)(unsafe.Pointer(bp + 40)) } - if Xsqlite3_strnicmp(tls, ts+35531, zCmd, nCmd) == 0 { + if Xsqlite3_strnicmp(tls, ts+35578, zCmd, nCmd) == 0 { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || int32(*(*int8)(unsafe.Pointer(zArg + 1))) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35542, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35589, 0) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize = libc.Bool32(int32(*(*int8)(unsafe.Pointer(zArg))) == '1') @@ -182154,17 +182212,17 @@ *(*[4]Fts5Enum)(unsafe.Pointer(bp + 48)) = [4]Fts5Enum{ {FzName: ts + 9378, FeVal: FTS5_DETAIL_NONE}, {FzName: ts + 18714}, - {FzName: ts + 35577, FeVal: FTS5_DETAIL_COLUMNS}, + {FzName: ts + 35624, FeVal: FTS5_DETAIL_COLUMNS}, {}, } if libc.AssignPtrInt32(bp+40, fts5ConfigSetEnum(tls, bp+48, zArg, pConfig+92)) != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35585, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35632, 0) } return *(*int32)(unsafe.Pointer(bp + 40)) } - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35616, libc.VaList(bp+24, nCmd, zCmd)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35663, libc.VaList(bp+24, nCmd, zCmd)) return SQLITE_ERROR } @@ -182211,15 +182269,15 @@ defer tls.Free(16) var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zCol, ts+23560) || + if 0 == Xsqlite3_stricmp(tls, zCol, ts+23607) || 0 == Xsqlite3_stricmp(tls, zCol, ts+17625) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35644, libc.VaList(bp, zCol)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35691, libc.VaList(bp, zCol)) rc = SQLITE_ERROR } else if zArg != 0 { - if 0 == Xsqlite3_stricmp(tls, zArg, ts+35674) { + if 0 == Xsqlite3_stricmp(tls, zArg, ts+35721) { *(*U8)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FabUnindexed + uintptr((*Fts5Config)(unsafe.Pointer(p)).FnCol))) = U8(1) } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35684, libc.VaList(bp+8, zArg)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35731, libc.VaList(bp+8, zArg)) rc = SQLITE_ERROR } } @@ -182236,13 +182294,13 @@ *(*int32)(unsafe.Pointer(bp + 24)) = SQLITE_OK *(*Fts5Buffer)(unsafe.Pointer(bp + 32)) = Fts5Buffer{} - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35715, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35762, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(p)).FzContentRowid)) if (*Fts5Config)(unsafe.Pointer(p)).FeContent != FTS5_CONTENT_NONE { for i = 0; i < (*Fts5Config)(unsafe.Pointer(p)).FnCol; i++ { if (*Fts5Config)(unsafe.Pointer(p)).FeContent == FTS5_CONTENT_EXTERNAL { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35720, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35767, libc.VaList(bp+8, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(p)).FazCol + uintptr(i)*8)))) } else { - sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35727, libc.VaList(bp+16, i)) + sqlite3Fts5BufferAppendPrintf(tls, bp+24, bp+32, ts+35774, libc.VaList(bp+16, i)) } } } @@ -182280,8 +182338,8 @@ (*Fts5Config)(unsafe.Pointer(pRet)).FzName = sqlite3Fts5Strndup(tls, bp+40, *(*uintptr)(unsafe.Pointer(azArg + 2*8)), -1) (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize = 1 (*Fts5Config)(unsafe.Pointer(pRet)).FeDetail = FTS5_DETAIL_FULL - if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+23560) == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35735, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) + if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK && Xsqlite3_stricmp(tls, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, ts+23607) == 0 { + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35782, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pRet)).FzName)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } @@ -182313,7 +182371,7 @@ if *(*int32)(unsafe.Pointer(bp + 40)) == SQLITE_OK { if z == uintptr(0) { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35764, libc.VaList(bp+8, zOrig)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+35811, libc.VaList(bp+8, zOrig)) *(*int32)(unsafe.Pointer(bp + 40)) = SQLITE_ERROR } else { if bOption != 0 { @@ -182350,14 +182408,14 @@ var zTail uintptr = uintptr(0) if (*Fts5Config)(unsafe.Pointer(pRet)).FeContent == FTS5_CONTENT_NORMAL { - zTail = ts + 35433 + zTail = ts + 35480 } else if (*Fts5Config)(unsafe.Pointer(pRet)).FbColumnsize != 0 { - zTail = ts + 35784 + zTail = ts + 35831 } if zTail != 0 { (*Fts5Config)(unsafe.Pointer(pRet)).FzContent = sqlite3Fts5Mprintf(tls, - bp+40, ts+35792, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) + bp+40, ts+35839, libc.VaList(bp+16, (*Fts5Config)(unsafe.Pointer(pRet)).FzDb, (*Fts5Config)(unsafe.Pointer(pRet)).FzName, zTail)) } } @@ -182406,7 +182464,7 @@ *(*int32)(unsafe.Pointer(bp + 48)) = SQLITE_OK var zSql uintptr - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35803, 0) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35850, 0) for i = 0; zSql != 0 && i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { var zSep uintptr = func() uintptr { if i == 0 { @@ -182414,10 +182472,10 @@ } return ts + 15971 }() - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35819, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35866, libc.VaList(bp, zSql, zSep, *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FazCol + uintptr(i)*8)))) } - zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35826, - libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+23560)) + zSql = sqlite3Fts5Mprintf(tls, bp+48, ts+35873, + libc.VaList(bp+24, zSql, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, ts+23607)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 48)) = Xsqlite3_declare_vtab(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql) @@ -182527,7 +182585,7 @@ var rc int32 = SQLITE_OK - if 0 == Xsqlite3_stricmp(tls, zKey, ts+35852) { + if 0 == Xsqlite3_stricmp(tls, zKey, ts+35899) { var pgsz int32 = 0 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { pgsz = Xsqlite3_value_int(tls, pVal) @@ -182537,7 +182595,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).Fpgsz = pgsz } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35857) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35904) { var nHashSize int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nHashSize = Xsqlite3_value_int(tls, pVal) @@ -182547,7 +182605,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnHashSize = nHashSize } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35866) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35913) { var nAutomerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nAutomerge = Xsqlite3_value_int(tls, pVal) @@ -182560,7 +182618,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnAutomerge = nAutomerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35876) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35923) { var nUsermerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nUsermerge = Xsqlite3_value_int(tls, pVal) @@ -182570,7 +182628,7 @@ } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FnUsermerge = nUsermerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35886) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+35933) { var nCrisisMerge int32 = -1 if SQLITE_INTEGER == Xsqlite3_value_numeric_type(tls, pVal) { nCrisisMerge = Xsqlite3_value_int(tls, pVal) @@ -182586,7 +182644,7 @@ } (*Fts5Config)(unsafe.Pointer(pConfig)).FnCrisisMerge = nCrisisMerge } - } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+23560) { + } else if 0 == Xsqlite3_stricmp(tls, zKey, ts+23607) { var zIn uintptr = Xsqlite3_value_text(tls, pVal) rc = sqlite3Fts5ConfigParseRank(tls, zIn, bp, bp+8) @@ -182609,7 +182667,7 @@ bp := tls.Alloc(52) defer tls.Free(52) - var zSelect uintptr = ts + 35898 + var zSelect uintptr = ts + 35945 var zSql uintptr *(*uintptr)(unsafe.Pointer(bp + 40)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK @@ -182631,7 +182689,7 @@ for SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 40))) { var zK uintptr = Xsqlite3_column_text(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 0) var pVal uintptr = Xsqlite3_column_value(tls, *(*uintptr)(unsafe.Pointer(bp + 40)), 1) - if 0 == Xsqlite3_stricmp(tls, zK, ts+35930) { + if 0 == Xsqlite3_stricmp(tls, zK, ts+35977) { iVersion = Xsqlite3_value_int(tls, pVal) } else { *(*int32)(unsafe.Pointer(bp + 48)) = 0 @@ -182645,7 +182703,7 @@ *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR if (*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg != 0 { *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+35938, + ts+35985, libc.VaList(bp+16, iVersion, FTS5_CURRENT_VERSION)) } } @@ -182743,7 +182801,7 @@ } } if int32(*(*int8)(unsafe.Pointer(z2))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+36003, 0) + sqlite3Fts5ParseError(tls, pParse, ts+36050, 0) return FTS5_EOF } } @@ -182756,20 +182814,20 @@ { var z2 uintptr if sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z))) == 0 { - sqlite3Fts5ParseError(tls, pParse, ts+36023, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+36070, libc.VaList(bp, z)) return FTS5_EOF } tok = FTS5_STRING for z2 = z + 1; sqlite3Fts5IsBareword(tls, *(*int8)(unsafe.Pointer(z2))) != 0; z2++ { } (*Fts5Token)(unsafe.Pointer(pToken)).Fn = int32((int64(z2) - int64(z)) / 1) - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36054, uint64(2)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 2 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36101, uint64(2)) == 0 { tok = FTS5_OR } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36057, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+36104, uint64(3)) == 0 { tok = FTS5_NOT } - if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+31439, uint64(3)) == 0 { + if (*Fts5Token)(unsafe.Pointer(pToken)).Fn == 3 && libc.Xmemcmp(tls, (*Fts5Token)(unsafe.Pointer(pToken)).Fp, ts+31486, uint64(3)) == 0 { tok = FTS5_AND } break @@ -184547,9 +184605,9 @@ bp := tls.Alloc(16) defer tls.Free(16) - if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+36061, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { + if (*Fts5Token)(unsafe.Pointer(pTok)).Fn != 4 || libc.Xmemcmp(tls, ts+36108, (*Fts5Token)(unsafe.Pointer(pTok)).Fp, uint64(4)) != 0 { sqlite3Fts5ParseError(tls, - pParse, ts+35090, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) + pParse, ts+35137, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(pTok)).Fn, (*Fts5Token)(unsafe.Pointer(pTok)).Fp)) } } @@ -184565,7 +184623,7 @@ var c int8 = *(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i))) if int32(c) < '0' || int32(c) > '9' { sqlite3Fts5ParseError(tls, - pParse, ts+36066, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) + pParse, ts+36113, libc.VaList(bp, (*Fts5Token)(unsafe.Pointer(p)).Fn, (*Fts5Token)(unsafe.Pointer(p)).Fp)) return } nNear = nNear*10 + (int32(*(*int8)(unsafe.Pointer((*Fts5Token)(unsafe.Pointer(p)).Fp + uintptr(i)))) - '0') @@ -184652,7 +184710,7 @@ } } if iCol == (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol { - sqlite3Fts5ParseError(tls, pParse, ts+21897, libc.VaList(bp, z)) + sqlite3Fts5ParseError(tls, pParse, ts+21944, libc.VaList(bp, z)) } else { pRet = fts5ParseColset(tls, pParse, pColset, iCol) } @@ -184733,7 +184791,7 @@ *(*uintptr)(unsafe.Pointer(bp)) = pColset if (*Fts5Config)(unsafe.Pointer((*Fts5Parse)(unsafe.Pointer(pParse)).FpConfig)).FeDetail == FTS5_DETAIL_NONE { sqlite3Fts5ParseError(tls, pParse, - ts+36095, 0) + ts+36142, 0) } else { fts5ParseSetColset(tls, pParse, pExpr, pColset, bp) } @@ -184903,12 +184961,12 @@ (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 1 || (*Fts5ExprPhrase)(unsafe.Pointer(pPhrase)).FnTerm > 0 && (*Fts5ExprTerm)(unsafe.Pointer(pPhrase+32)).FbFirst != 0 { sqlite3Fts5ParseError(tls, pParse, - ts+36148, + ts+36195, libc.VaList(bp, func() uintptr { if (*Fts5ExprNearset)(unsafe.Pointer(pNear)).FnPhrase == 1 { - return ts + 36198 + return ts + 36245 } - return ts + 36061 + return ts + 36108 }())) Xsqlite3_free(tls, pRet) pRet = uintptr(0) @@ -185851,7 +185909,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpReader == uintptr(0) && rc == SQLITE_OK { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, - (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+36205, iRowid, 0, p+56) + (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, ts+36252, iRowid, 0, p+56) } if rc == SQLITE_ERROR { @@ -185930,7 +185988,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+64, Xsqlite3_mprintf(tls, - ts+36211, + ts+36258, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return @@ -185955,7 +186013,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig var zSql uintptr = Xsqlite3_mprintf(tls, - ts+36262, + ts+36309, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if fts5IndexPrepareStmt(tls, p, p+72, zSql) != 0 { return @@ -185978,7 +186036,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxDeleter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+88, Xsqlite3_mprintf(tls, - ts+36311, + ts+36358, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { @@ -186217,7 +186275,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK { if (*Fts5Index)(unsafe.Pointer(p)).FpDataVersion == uintptr(0) { (*Fts5Index)(unsafe.Pointer(p)).Frc = fts5IndexPrepareStmt(tls, p, p+112, - Xsqlite3_mprintf(tls, ts+36351, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) + Xsqlite3_mprintf(tls, ts+36398, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer((*Fts5Index)(unsafe.Pointer(p)).FpConfig)).FzDb))) if (*Fts5Index)(unsafe.Pointer(p)).Frc != 0 { return int64(0) } @@ -187416,7 +187474,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+96, Xsqlite3_mprintf(tls, - ts+36374, + ts+36421, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } return (*Fts5Index)(unsafe.Pointer(p)).FpIdxSelect @@ -188882,7 +188940,7 @@ if (*Fts5Index)(unsafe.Pointer(p)).FpIdxWriter == uintptr(0) { var pConfig uintptr = (*Fts5Index)(unsafe.Pointer(p)).FpConfig fts5IndexPrepareStmt(tls, p, p+80, Xsqlite3_mprintf(tls, - ts+36458, + ts+36505, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName))) } @@ -189964,13 +190022,13 @@ if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { (*Fts5Index)(unsafe.Pointer(p)).FpConfig = pConfig (*Fts5Index)(unsafe.Pointer(p)).FnWorkUnit = FTS5_WORK_UNIT - (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+36515, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl = sqlite3Fts5Mprintf(tls, bp+8, ts+36562, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl != 0 && bCreate != 0 { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, - pConfig, ts+26432, ts+36523, 0, pzErr) + pConfig, ts+26479, ts+36570, 0, pzErr) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { *(*int32)(unsafe.Pointer(bp + 8)) = sqlite3Fts5CreateTable(tls, pConfig, ts+12840, - ts+36558, + ts+36605, 1, pzErr) } if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { @@ -190223,7 +190281,7 @@ sqlite3Fts5Put32(tls, bp, iNew) rc = Xsqlite3_blob_open(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Index)(unsafe.Pointer(p)).FzDataTbl, - ts+36205, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) + ts+36252, int64(FTS5_STRUCTURE_ROWID), 1, bp+8) if rc == SQLITE_OK { Xsqlite3_blob_write(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp, 4, 0) rc = Xsqlite3_blob_close(tls, *(*uintptr)(unsafe.Pointer(bp + 8))) @@ -190337,7 +190395,7 @@ } fts5IndexPrepareStmt(tls, p, bp+24, Xsqlite3_mprintf(tls, - ts+36602, + ts+36649, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5StructureSegment)(unsafe.Pointer(pSeg)).FiSegid))) for (*Fts5Index)(unsafe.Pointer(p)).Frc == SQLITE_OK && SQLITE_ROW == Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp + 24))) { @@ -190507,7 +190565,7 @@ } else { (*Fts5Buffer)(unsafe.Pointer(bp + 16)).Fn = 0 fts5SegiterPoslist(tls, p, *(*uintptr)(unsafe.Pointer(bp))+96+uintptr((*Fts5CResult)(unsafe.Pointer((*Fts5Iter)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaFirst+1*4)).FiFirst)*120, uintptr(0), bp+16) - sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+36688) + sqlite3Fts5BufferAppendBlob(tls, p+52, bp+16, uint32(4), ts+36735) for 0 == sqlite3Fts5PoslistNext64(tls, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fp, (*Fts5Buffer)(unsafe.Pointer(bp+16)).Fn, bp+32, bp+40) { var iCol int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) >> 32) var iTokOff int32 = int32(*(*I64)(unsafe.Pointer(bp + 40)) & int64(0x7FFFFFFF)) @@ -190778,7 +190836,7 @@ if (*Fts5Config)(unsafe.Pointer(pConfig)).FbLock != 0 { (*Fts5Table)(unsafe.Pointer(pTab)).Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36693, 0) + ts+36740, 0) return SQLITE_ERROR } @@ -191202,7 +191260,7 @@ (*Fts5Sorter)(unsafe.Pointer(pSorter)).FnIdx = nPhrase rc = fts5PrepareStatement(tls, pSorter, pConfig, - ts+36732, + ts+36779, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zRank, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, func() uintptr { if zRankArgs != 0 { @@ -191218,9 +191276,9 @@ }(), func() uintptr { if bDesc != 0 { - return ts + 36787 + return ts + 36834 } - return ts + 36792 + return ts + 36839 }())) (*Fts5Cursor)(unsafe.Pointer(pCsr)).FpSorter = pSorter @@ -191266,12 +191324,12 @@ (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan = FTS5_PLAN_SPECIAL - if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+36796, z, n) { + if n == 5 && 0 == Xsqlite3_strnicmp(tls, ts+36843, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = I64(sqlite3Fts5IndexReads(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.FpIndex)) } else if n == 2 && 0 == Xsqlite3_strnicmp(tls, ts+6409, z, n) { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiSpecial = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FiCsrId } else { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36802, libc.VaList(bp, n, z)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36849, libc.VaList(bp, n, z)) rc = SQLITE_ERROR } @@ -191302,7 +191360,7 @@ var zRankArgs uintptr = (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs if zRankArgs != 0 { - var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+36830, libc.VaList(bp, zRankArgs)) + var zSql uintptr = sqlite3Fts5Mprintf(tls, bp+16, ts+36877, libc.VaList(bp, zRankArgs)) if zSql != 0 { *(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0) *(*int32)(unsafe.Pointer(bp + 16)) = Xsqlite3_prepare_v3(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, zSql, -1, @@ -191333,7 +191391,7 @@ if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK { pAux = fts5FindAuxiliary(tls, pTab, zRank) if pAux == uintptr(0) { - (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36840, libc.VaList(bp+8, zRank)) + (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, ts+36887, libc.VaList(bp+8, zRank)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } } @@ -191365,14 +191423,14 @@ *(*int32)(unsafe.Pointer(pCsr + 80)) |= FTS5CSR_FREE_ZRANK } else if rc == SQLITE_ERROR { (*Sqlite3_vtab)(unsafe.Pointer((*Fts5Cursor)(unsafe.Pointer(pCsr)).Fbase.FpVtab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+36861, libc.VaList(bp, z)) + ts+36908, libc.VaList(bp, z)) } } else { if (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank != 0 { (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRank (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = (*Fts5Config)(unsafe.Pointer(pConfig)).FzRankArgs } else { - (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 35244 + (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRank = ts + 35291 (*Fts5Cursor)(unsafe.Pointer(pCsr)).FzRankArgs = uintptr(0) } } @@ -191428,7 +191486,7 @@ goto __1 } (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+36693, 0) + ts+36740, 0) return SQLITE_ERROR __1: ; @@ -191645,7 +191703,7 @@ goto __40 } *(*uintptr)(unsafe.Pointer((*Fts5Config)(unsafe.Pointer(pConfig)).FpzErrmsg)) = Xsqlite3_mprintf(tls, - ts+36894, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) + ts+36941, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) rc = SQLITE_ERROR goto __41 __40: @@ -191790,28 +191848,28 @@ var rc int32 = SQLITE_OK *(*int32)(unsafe.Pointer(bp)) = 0 - if 0 == Xsqlite3_stricmp(tls, ts+36930, zCmd) { + if 0 == Xsqlite3_stricmp(tls, ts+36977, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { fts5SetVtabError(tls, pTab, - ts+36941, 0) + ts+36988, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageDeleteAll(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } - } else if 0 == Xsqlite3_stricmp(tls, ts+37021, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37068, zCmd) { if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NONE { fts5SetVtabError(tls, pTab, - ts+37029, 0) + ts+37076, 0) rc = SQLITE_ERROR } else { rc = sqlite3Fts5StorageRebuild(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) } } else if 0 == Xsqlite3_stricmp(tls, ts+18313, zCmd) { rc = sqlite3Fts5StorageOptimize(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage) - } else if 0 == Xsqlite3_stricmp(tls, ts+37085, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37132, zCmd) { var nMerge int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageMerge(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, nMerge) - } else if 0 == Xsqlite3_stricmp(tls, ts+37091, zCmd) { + } else if 0 == Xsqlite3_stricmp(tls, ts+37138, zCmd) { var iArg int32 = Xsqlite3_value_int(tls, pVal) rc = sqlite3Fts5StorageIntegrity(tls, (*Fts5FullTable)(unsafe.Pointer(pTab)).FpStorage, iArg) } else { @@ -191882,12 +191940,12 @@ if eType0 == SQLITE_INTEGER && fts5IsContentless(tls, pTab) != 0 { (*Fts5FullTable)(unsafe.Pointer(pTab)).Fp.Fbase.FzErrMsg = Xsqlite3_mprintf(tls, - ts+37107, + ts+37154, libc.VaList(bp, func() uintptr { if nArg > 1 { - return ts + 21798 + return ts + 21845 } - return ts + 37144 + return ts + 37191 }(), (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) *(*int32)(unsafe.Pointer(bp + 16)) = SQLITE_ERROR } else if nArg == 1 { @@ -192517,7 +192575,7 @@ pCsr = fts5CursorFromCsrid(tls, (*Fts5Auxiliary)(unsafe.Pointer(pAux)).FpGlobal, iCsrId) if pCsr == uintptr(0) || (*Fts5Cursor)(unsafe.Pointer(pCsr)).FePlan == 0 { - var zErr uintptr = Xsqlite3_mprintf(tls, ts+37156, libc.VaList(bp, iCsrId)) + var zErr uintptr = Xsqlite3_mprintf(tls, ts+37203, libc.VaList(bp, iCsrId)) Xsqlite3_result_error(tls, context, zErr, -1) Xsqlite3_free(tls, zErr) } else { @@ -192761,7 +192819,7 @@ }()) if pMod == uintptr(0) { rc = SQLITE_ERROR - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37177, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37224, libc.VaList(bp, *(*uintptr)(unsafe.Pointer(azArg)))) } else { rc = (*struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 @@ -192780,7 +192838,7 @@ (*Fts5Config)(unsafe.Pointer(pConfig)).FpTokApi = pMod + 16 if rc != SQLITE_OK { if pzErr != 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37199, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+37246, 0) } } else { (*Fts5Config)(unsafe.Pointer(pConfig)).FePattern = sqlite3Fts5TokenizerPattern(tls, @@ -192827,7 +192885,7 @@ var ppApi uintptr _ = nArg - ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+37230) + ppApi = Xsqlite3_value_pointer(tls, *(*uintptr)(unsafe.Pointer(apArg)), ts+37277) if ppApi != 0 { *(*uintptr)(unsafe.Pointer(ppApi)) = pGlobal } @@ -192836,7 +192894,7 @@ func fts5SourceIdFunc(tls *libc.TLS, pCtx uintptr, nArg int32, apUnused uintptr) { _ = nArg _ = apUnused - Xsqlite3_result_text(tls, pCtx, ts+37243, -1, libc.UintptrFromInt32(-1)) + Xsqlite3_result_text(tls, pCtx, ts+37290, -1, libc.UintptrFromInt32(-1)) } func fts5ShadowName(tls *libc.TLS, zName uintptr) int32 { @@ -192850,7 +192908,7 @@ } var azName2 = [5]uintptr{ - ts + 37334, ts + 35433, ts + 26432, ts + 35784, ts + 12840, + ts + 37381, ts + 35480, ts + 26479, ts + 35831, ts + 12840, } func fts5Init(tls *libc.TLS, db uintptr) int32 { @@ -192874,7 +192932,7 @@ (*Fts5Global)(unsafe.Pointer(pGlobal)).Fapi.FxFindTokenizer = *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, uintptr, uintptr) int32 }{fts5FindTokenizer})) - rc = Xsqlite3_create_module_v2(tls, db, ts+37341, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) + rc = Xsqlite3_create_module_v2(tls, db, ts+37388, uintptr(unsafe.Pointer(&fts5Mod)), p, *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5ModuleDestroy}))) if rc == SQLITE_OK { rc = sqlite3Fts5IndexInit(tls, db) } @@ -192892,13 +192950,13 @@ } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+37341, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { + db, ts+37388, 1, SQLITE_UTF8, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) }{fts5Fts5Func})), uintptr(0), uintptr(0)) } if rc == SQLITE_OK { rc = Xsqlite3_create_function(tls, - db, ts+37346, 0, + db, ts+37393, 0, SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, p, *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, int32, uintptr) @@ -192955,17 +193013,17 @@ if *(*uintptr)(unsafe.Pointer(p + 40 + uintptr(eStmt)*8)) == uintptr(0) { *(*[11]uintptr)(unsafe.Pointer(bp + 128)) = [11]uintptr{ - ts + 37361, - ts + 37429, - ts + 37498, - ts + 37531, - ts + 37570, - ts + 37610, - ts + 37649, - ts + 37690, - ts + 37729, - ts + 37771, - ts + 37811, + ts + 37408, + ts + 37476, + ts + 37545, + ts + 37578, + ts + 37617, + ts + 37657, + ts + 37696, + ts + 37737, + ts + 37776, + ts + 37818, + ts + 37858, } var pC uintptr = (*Fts5Storage)(unsafe.Pointer(p)).FpConfig var zSql uintptr = uintptr(0) @@ -193067,18 +193125,18 @@ defer tls.Free(80) var rc int32 = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37834, + ts+37881, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37938, + ts+37985, libc.VaList(bp+48, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+37976, + ts+38023, libc.VaList(bp+64, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } return rc @@ -193090,7 +193148,7 @@ if *(*int32)(unsafe.Pointer(pRc)) == SQLITE_OK { *(*int32)(unsafe.Pointer(pRc)) = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38014, + ts+38061, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zTail, zName, zTail)) } } @@ -193102,14 +193160,14 @@ var pConfig uintptr = (*Fts5Storage)(unsafe.Pointer(pStorage)).FpConfig *(*int32)(unsafe.Pointer(bp)) = sqlite3Fts5StorageSync(tls, pStorage) - fts5StorageRenameOne(tls, pConfig, bp, ts+26432, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+26479, zName) fts5StorageRenameOne(tls, pConfig, bp, ts+12840, zName) - fts5StorageRenameOne(tls, pConfig, bp, ts+37334, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+37381, zName) if (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { - fts5StorageRenameOne(tls, pConfig, bp, ts+35784, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35831, zName) } if (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { - fts5StorageRenameOne(tls, pConfig, bp, ts+35433, zName) + fts5StorageRenameOne(tls, pConfig, bp, ts+35480, zName) } return *(*int32)(unsafe.Pointer(bp)) } @@ -193121,17 +193179,17 @@ var rc int32 *(*uintptr)(unsafe.Pointer(bp + 64)) = uintptr(0) - rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+38056, + rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, bp+64, ts+38103, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, zDefn, func() uintptr { if bWithout != 0 { - return ts + 31085 + return ts + 31132 } return ts + 1544 }())) if *(*uintptr)(unsafe.Pointer(bp + 64)) != 0 { *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, - ts+38086, + ts+38133, libc.VaList(bp+40, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zPost, *(*uintptr)(unsafe.Pointer(bp + 64)))) Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 64))) } @@ -193168,27 +193226,27 @@ } else { var i int32 var iOff int32 - Xsqlite3_snprintf(tls, nDefn, zDefn, ts+38130, 0) + Xsqlite3_snprintf(tls, nDefn, zDefn, ts+38177, 0) iOff = int32(libc.Xstrlen(tls, zDefn)) for i = 0; i < (*Fts5Config)(unsafe.Pointer(pConfig)).FnCol; i++ { - Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+38153, libc.VaList(bp, i)) + Xsqlite3_snprintf(tls, nDefn-iOff, zDefn+uintptr(iOff), ts+38200, libc.VaList(bp, i)) iOff = iOff + int32(libc.Xstrlen(tls, zDefn+uintptr(iOff))) } - rc = sqlite3Fts5CreateTable(tls, pConfig, ts+35433, zDefn, 0, pzErr) + rc = sqlite3Fts5CreateTable(tls, pConfig, ts+35480, zDefn, 0, pzErr) } Xsqlite3_free(tls, zDefn) } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+35784, ts+38159, 0, pzErr) + pConfig, ts+35831, ts+38206, 0, pzErr) } if rc == SQLITE_OK { rc = sqlite3Fts5CreateTable(tls, - pConfig, ts+37334, ts+38191, 1, pzErr) + pConfig, ts+37381, ts+38238, 1, pzErr) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35930, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35977, uintptr(0), FTS5_CURRENT_VERSION) } } @@ -193394,12 +193452,12 @@ (*Fts5Storage)(unsafe.Pointer(p)).FbTotalsValid = 0 rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38208, + ts+38255, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { rc = fts5ExecPrintf(tls, (*Fts5Config)(unsafe.Pointer(pConfig)).Fdb, uintptr(0), - ts+38258, + ts+38305, libc.VaList(bp+32, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName)) } @@ -193407,7 +193465,7 @@ rc = sqlite3Fts5IndexReinit(tls, (*Fts5Storage)(unsafe.Pointer(p)).FpIndex) } if rc == SQLITE_OK { - rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35930, uintptr(0), FTS5_CURRENT_VERSION) + rc = sqlite3Fts5StorageConfigValue(tls, p, ts+35977, uintptr(0), FTS5_CURRENT_VERSION) } return rc } @@ -193583,7 +193641,7 @@ var zSql uintptr var rc int32 - zSql = Xsqlite3_mprintf(tls, ts+38287, + zSql = Xsqlite3_mprintf(tls, ts+38334, libc.VaList(bp, (*Fts5Config)(unsafe.Pointer(pConfig)).FzDb, (*Fts5Config)(unsafe.Pointer(pConfig)).FzName, zSuffix)) if zSql == uintptr(0) { rc = SQLITE_NOMEM @@ -193765,14 +193823,14 @@ if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FeContent == FTS5_CONTENT_NORMAL { *(*I64)(unsafe.Pointer(bp + 48)) = int64(0) - rc = fts5StorageCount(tls, p, ts+35433, bp+48) + rc = fts5StorageCount(tls, p, ts+35480, bp+48) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 48)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } } if rc == SQLITE_OK && (*Fts5Config)(unsafe.Pointer(pConfig)).FbColumnsize != 0 { *(*I64)(unsafe.Pointer(bp + 56)) = int64(0) - rc = fts5StorageCount(tls, p, ts+35784, bp+56) + rc = fts5StorageCount(tls, p, ts+35831, bp+56) if rc == SQLITE_OK && *(*I64)(unsafe.Pointer(bp + 56)) != (*Fts5Storage)(unsafe.Pointer(p)).FnTotalRow { rc = SQLITE_CORRUPT | int32(1)<<8 } @@ -193967,9 +194025,9 @@ libc.Xmemcpy(tls, p, uintptr(unsafe.Pointer(&aAsciiTokenChar)), uint64(unsafe.Sizeof(aAsciiTokenChar))) for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38319) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38366) { fts5AsciiAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38330) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38377) { fts5AsciiAddExceptions(tls, p, zArg, 0) } else { rc = SQLITE_ERROR @@ -194184,7 +194242,7 @@ } else { p = Xsqlite3_malloc(tls, int32(unsafe.Sizeof(Unicode61Tokenizer{}))) if p != 0 { - var zCat uintptr = ts + 38341 + var zCat uintptr = ts + 38388 var i int32 libc.Xmemset(tls, p, 0, uint64(unsafe.Sizeof(Unicode61Tokenizer{}))) @@ -194196,7 +194254,7 @@ } for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38350) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38397) { zCat = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) } } @@ -194207,18 +194265,18 @@ for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38361) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38408) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' && int32(*(*int8)(unsafe.Pointer(zArg))) != '2' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { (*Unicode61Tokenizer)(unsafe.Pointer(p)).FeRemoveDiacritic = int32(*(*int8)(unsafe.Pointer(zArg))) - '0' } - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38319) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38366) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 1) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38330) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38377) { rc = fts5UnicodeAddExceptions(tls, p, zArg, 0) - } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38350) { + } else if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38397) { } else { rc = SQLITE_ERROR } @@ -194494,7 +194552,7 @@ var rc int32 = SQLITE_OK var pRet uintptr *(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) - var zBase uintptr = ts + 38379 + var zBase uintptr = ts + 38426 if nArg > 0 { zBase = *(*uintptr)(unsafe.Pointer(azArg)) @@ -194636,7 +194694,7 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38389, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38436, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194644,11 +194702,11 @@ break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38392, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38439, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38397, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38444, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194656,7 +194714,7 @@ break case 'e': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38402, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38449, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194664,7 +194722,7 @@ break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38405, aBuf+uintptr(nBuf-2), uint64(2)) { + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38452, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194672,11 +194730,11 @@ break case 'l': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38408, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38455, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38413, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38460, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194684,19 +194742,19 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38418, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38465, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38422, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38469, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt1(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38428, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38475, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt1(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38433, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38480, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194704,11 +194762,11 @@ break case 'o': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38437, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38484, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1_and_S_or_T(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38441, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38488, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_MGt1(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 } @@ -194716,7 +194774,7 @@ break case 's': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38444, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38491, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194724,11 +194782,11 @@ break case 't': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38448, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38495, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38452, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38499, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194736,7 +194794,7 @@ break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38456, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38503, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194744,7 +194802,7 @@ break case 'v': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38460, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38507, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194752,7 +194810,7 @@ break case 'z': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38464, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38511, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt1(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194768,24 +194826,24 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38468, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38448, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38515, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'b': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38471, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38474, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38518, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } break case 'i': - if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38478, aBuf+uintptr(nBuf-2), uint64(2)) { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38464, uint64(3)) + if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38525, aBuf+uintptr(nBuf-2), uint64(2)) { + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-2), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 + 3 ret = 1 } @@ -194800,44 +194858,44 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38481, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38528, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38489, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38536, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38496, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38543, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 4 } } break case 'c': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38501, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38548, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38397, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38444, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38506, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38553, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38392, uint64(4)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38439, uint64(4)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 4 } } break case 'e': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38511, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38558, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38464, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 'g': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38516, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38563, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+16837, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 @@ -194846,91 +194904,91 @@ break case 'l': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38521, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38568, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38474, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38525, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38572, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38530, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38577, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38433, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38480, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38536, aBuf+uintptr(nBuf-3), uint64(3)) { + } else if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38583, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38540, uint64(1)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38587, uint64(1)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 1 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38542, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38589, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38456, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38503, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } } break case 'o': - if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38548, aBuf+uintptr(nBuf-7), uint64(7)) { + if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38595, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38464, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38511, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38556, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38603, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38562, aBuf+uintptr(nBuf-4), uint64(4)) { + } else if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38609, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38448, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38495, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 3 } } break case 's': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38567, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38614, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38573, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38620, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38460, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38507, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38581, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38628, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38589, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38636, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } - } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38593, aBuf+uintptr(nBuf-7), uint64(7)) { + } else if nBuf > 7 && 0 == libc.Xmemcmp(tls, ts+38640, aBuf+uintptr(nBuf-7), uint64(7)) { if fts5Porter_MGt0(tls, aBuf, nBuf-7) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38456, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-7), ts+38503, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 7 + 3 } } break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38601, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38648, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38607, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38654, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38460, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38507, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 3 } - } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38613, aBuf+uintptr(nBuf-6), uint64(6)) { + } else if nBuf > 6 && 0 == libc.Xmemcmp(tls, ts+38660, aBuf+uintptr(nBuf-6), uint64(6)) { if fts5Porter_MGt0(tls, aBuf, nBuf-6) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38474, uint64(3)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-6), ts+38521, uint64(3)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 6 + 3 } } @@ -194945,16 +195003,16 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'a': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38620, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38667, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-4), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 + 2 } } break case 's': - if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38625, aBuf+uintptr(nBuf-4), uint64(4)) { + if nBuf > 4 && 0 == libc.Xmemcmp(tls, ts+38672, aBuf+uintptr(nBuf-4), uint64(4)) { if fts5Porter_MGt0(tls, aBuf, nBuf-4) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 4 } @@ -194962,21 +195020,21 @@ break case 't': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38630, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38677, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } - } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38636, aBuf+uintptr(nBuf-5), uint64(5)) { + } else if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38683, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38405, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38452, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } break case 'u': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38589, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38636, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 } @@ -194984,7 +195042,7 @@ break case 'v': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38642, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38689, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 } @@ -194992,9 +195050,9 @@ break case 'z': - if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38648, aBuf+uintptr(nBuf-5), uint64(5)) { + if nBuf > 5 && 0 == libc.Xmemcmp(tls, ts+38695, aBuf+uintptr(nBuf-5), uint64(5)) { if fts5Porter_MGt0(tls, aBuf, nBuf-5) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38389, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-5), ts+38436, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 5 + 2 } } @@ -195009,12 +195067,12 @@ var nBuf int32 = *(*int32)(unsafe.Pointer(pnBuf)) switch int32(*(*int8)(unsafe.Pointer(aBuf + uintptr(nBuf-2)))) { case 'e': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38654, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38701, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_MGt0(tls, aBuf, nBuf-3) != 0 { - libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38658, uint64(2)) + libc.Xmemcpy(tls, aBuf+uintptr(nBuf-3), ts+38705, uint64(2)) *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 + 2 } - } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38661, aBuf+uintptr(nBuf-2), uint64(2)) { + } else if nBuf > 2 && 0 == libc.Xmemcmp(tls, ts+38708, aBuf+uintptr(nBuf-2), uint64(2)) { if fts5Porter_Vowel(tls, aBuf, nBuf-2) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 2 ret = 1 @@ -195023,7 +195081,7 @@ break case 'n': - if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38664, aBuf+uintptr(nBuf-3), uint64(3)) { + if nBuf > 3 && 0 == libc.Xmemcmp(tls, ts+38711, aBuf+uintptr(nBuf-3), uint64(3)) { if fts5Porter_Vowel(tls, aBuf, nBuf-3) != 0 { *(*int32)(unsafe.Pointer(pnBuf)) = nBuf - 3 ret = 1 @@ -195179,7 +195237,7 @@ (*TrigramTokenizer)(unsafe.Pointer(pNew)).FbFold = 1 for i = 0; rc == SQLITE_OK && i < nArg; i = i + 2 { var zArg uintptr = *(*uintptr)(unsafe.Pointer(azArg + uintptr(i+1)*8)) - if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38668) { + if 0 == Xsqlite3_stricmp(tls, *(*uintptr)(unsafe.Pointer(azArg + uintptr(i)*8)), ts+38715) { if int32(*(*int8)(unsafe.Pointer(zArg))) != '0' && int32(*(*int8)(unsafe.Pointer(zArg))) != '1' || *(*int8)(unsafe.Pointer(zArg + 1)) != 0 { rc = SQLITE_ERROR } else { @@ -195359,22 +195417,22 @@ defer tls.Free(128) *(*[4]BuiltinTokenizer)(unsafe.Pointer(bp)) = [4]BuiltinTokenizer{ - {FzName: ts + 38379, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38426, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5UnicodeCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5UnicodeDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5UnicodeTokenize}))}}, - {FzName: ts + 38683, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38730, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5AsciiCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5AsciiDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5AsciiTokenize}))}}, - {FzName: ts + 38689, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38736, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5PorterCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5PorterDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 }{fts5PorterTokenize}))}}, - {FzName: ts + 38696, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { + {FzName: ts + 38743, Fx: Fts5_tokenizer{FxCreate: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32 }{fts5TriCreate})), FxDelete: *(*uintptr)(unsafe.Pointer(&struct{ f func(*libc.TLS, uintptr) }{fts5TriDelete})), FxTokenize: *(*uintptr)(unsafe.Pointer(&struct { f func(*libc.TLS, uintptr, uintptr, int32, uintptr, int32, uintptr) int32 @@ -196517,14 +196575,14 @@ var zCopy uintptr = sqlite3Fts5Strndup(tls, bp+8, zType, -1) if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK { sqlite3Fts5Dequote(tls, zCopy) - if Xsqlite3_stricmp(tls, zCopy, ts+38704) == 0 { + if Xsqlite3_stricmp(tls, zCopy, ts+38751) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_COL - } else if Xsqlite3_stricmp(tls, zCopy, ts+38708) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+38755) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_ROW - } else if Xsqlite3_stricmp(tls, zCopy, ts+38712) == 0 { + } else if Xsqlite3_stricmp(tls, zCopy, ts+38759) == 0 { *(*int32)(unsafe.Pointer(peType)) = FTS5_VOCAB_INSTANCE } else { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38721, libc.VaList(bp, zCopy)) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38768, libc.VaList(bp, zCopy)) *(*int32)(unsafe.Pointer(bp + 8)) = SQLITE_ERROR } Xsqlite3_free(tls, zCopy) @@ -196550,19 +196608,19 @@ defer tls.Free(36) *(*[3]uintptr)(unsafe.Pointer(bp + 8)) = [3]uintptr{ - ts + 38755, - ts + 38795, - ts + 38830, + ts + 38802, + ts + 38842, + ts + 38877, } var pRet uintptr = uintptr(0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_OK var bDb int32 - bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+24721, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) + bDb = libc.Bool32(argc == 6 && libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(argv + 1*8))) == uint64(4) && libc.Xmemcmp(tls, ts+24768, *(*uintptr)(unsafe.Pointer(argv + 1*8)), uint64(4)) == 0) if argc != 5 && bDb == 0 { - *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38873, 0) + *(*uintptr)(unsafe.Pointer(pzErr)) = Xsqlite3_mprintf(tls, ts+38920, 0) *(*int32)(unsafe.Pointer(bp + 32)) = SQLITE_ERROR } else { var nByte int32 @@ -196695,11 +196753,11 @@ if (*Fts5VocabTable)(unsafe.Pointer(pTab)).FbBusy != 0 { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38906, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+38953, libc.VaList(bp, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) return SQLITE_ERROR } zSql = sqlite3Fts5Mprintf(tls, bp+64, - ts+38937, + ts+38984, libc.VaList(bp+16, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) if zSql != 0 { *(*int32)(unsafe.Pointer(bp + 64)) = Xsqlite3_prepare_v2(tls, (*Fts5VocabTable)(unsafe.Pointer(pTab)).Fdb, zSql, -1, bp+72, uintptr(0)) @@ -196723,7 +196781,7 @@ *(*uintptr)(unsafe.Pointer(bp + 72)) = uintptr(0) if *(*int32)(unsafe.Pointer(bp + 64)) == SQLITE_OK { (*Sqlite3_vtab)(unsafe.Pointer(pVTab)).FzErrMsg = Xsqlite3_mprintf(tls, - ts+38988, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) + ts+39035, libc.VaList(bp+48, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Db, (*Fts5VocabTable)(unsafe.Pointer(pTab)).FzFts5Tbl)) *(*int32)(unsafe.Pointer(bp + 64)) = SQLITE_ERROR } } else { @@ -197118,7 +197176,7 @@ func sqlite3Fts5VocabInit(tls *libc.TLS, pGlobal uintptr, db uintptr) int32 { var p uintptr = pGlobal - return Xsqlite3_create_module_v2(tls, db, ts+39014, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) + return Xsqlite3_create_module_v2(tls, db, ts+39061, uintptr(unsafe.Pointer(&fts5Vocab)), p, uintptr(0)) } var fts5Vocab = Sqlite3_module{ @@ -197140,7 +197198,7 @@ // ************* End of stmt.c *********************************************** // Return the source-id for this library func Xsqlite3_sourceid(tls *libc.TLS) uintptr { - return ts + 39024 + return ts + 39071 } func init() { @@ -198207,5 +198265,5 @@ *(*func(*libc.TLS, uintptr, uintptr) uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&winVfs)) + 160)) = winNextSystemCall } -var ts1 = "3.41.0\x00ATOMIC_INTRINSICS=1\x00COMPILER=msvc-1900\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00AreFileApisANSI\x00CharLowerW\x00CharUpperW\x00CloseHandle\x00CreateFileA\x00CreateFileW\x00CreateFileMappingA\x00CreateFileMappingW\x00CreateMutexW\x00DeleteFileA\x00DeleteFileW\x00FileTimeToLocalFileTime\x00FileTimeToSystemTime\x00FlushFileBuffers\x00FormatMessageA\x00FormatMessageW\x00FreeLibrary\x00GetCurrentProcessId\x00GetDiskFreeSpaceA\x00GetDiskFreeSpaceW\x00GetFileAttributesA\x00GetFileAttributesW\x00GetFileAttributesExW\x00GetFileSize\x00GetFullPathNameA\x00GetFullPathNameW\x00GetLastError\x00GetProcAddressA\x00GetSystemInfo\x00GetSystemTime\x00GetSystemTimeAsFileTime\x00GetTempPathA\x00GetTempPathW\x00GetTickCount\x00GetVersionExA\x00GetVersionExW\x00HeapAlloc\x00HeapCreate\x00HeapDestroy\x00HeapFree\x00HeapReAlloc\x00HeapSize\x00HeapValidate\x00HeapCompact\x00LoadLibraryA\x00LoadLibraryW\x00LocalFree\x00LockFile\x00LockFileEx\x00MapViewOfFile\x00MultiByteToWideChar\x00QueryPerformanceCounter\x00ReadFile\x00SetEndOfFile\x00SetFilePointer\x00Sleep\x00SystemTimeToFileTime\x00UnlockFile\x00UnlockFileEx\x00UnmapViewOfFile\x00WideCharToMultiByte\x00WriteFile\x00CreateEventExW\x00WaitForSingleObject\x00WaitForSingleObjectEx\x00SetFilePointerEx\x00GetFileInformationByHandleEx\x00MapViewOfFileFromApp\x00CreateFile2\x00LoadPackagedLibrary\x00GetTickCount64\x00GetNativeSystemInfo\x00OutputDebugStringA\x00OutputDebugStringW\x00GetProcessHeap\x00CreateFileMappingFromApp\x00InterlockedCompareExchange\x00UuidCreate\x00UuidCreateSequential\x00FlushViewOfFile\x00%s\x00OsError 0x%lx (%lu)\x00os_win.c:%d: (%lu) %s(%s) - %s\x00delayed %dms for lock/sharing conflict at line %d\x00winSeekFile\x00winClose\x00winRead\x00winWrite1\x00winWrite2\x00winTruncate1\x00winTruncate2\x00winSync1\x00winSync2\x00winFileSize\x00winUnlockReadLock\x00winUnlock\x00winLockSharedMemory\x00%s-shm\x00readonly_shm\x00winOpenShm\x00winShmMap1\x00winShmMap2\x00winShmMap3\x00winUnmapfile1\x00winUnmapfile2\x00winMapfile1\x00winMapfile2\x00etilqs_\x00winGetTempname1\x00winGetTempname2\x00winGetTempname3\x00winGetTempname4\x00winGetTempname5\x00abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\x00exclusive\x00winOpen\x00psow\x00winDelete\x00winAccess\x00%s%c%s\x00winFullPathname1\x00winFullPathname2\x00winFullPathname3\x00winFullPathname4\x00win32\x00win32-longpath\x00win32-none\x00win32-longpath-none\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00true\x00false\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00stat\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dll\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_store_directory\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00access\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-02-21 18:09:37 05941c2a04037fc3ed2ffae11f5d2260706f89431f463518740f72ada350866d\x00" +var ts1 = "3.41.2\x00ATOMIC_INTRINSICS=1\x00COMPILER=msvc-1900\x00DEFAULT_AUTOVACUUM\x00DEFAULT_CACHE_SIZE=-2000\x00DEFAULT_FILE_FORMAT=4\x00DEFAULT_JOURNAL_SIZE_LIMIT=-1\x00DEFAULT_MEMSTATUS=0\x00DEFAULT_MMAP_SIZE=0\x00DEFAULT_PAGE_SIZE=4096\x00DEFAULT_PCACHE_INITSZ=20\x00DEFAULT_RECURSIVE_TRIGGERS\x00DEFAULT_SECTOR_SIZE=4096\x00DEFAULT_SYNCHRONOUS=2\x00DEFAULT_WAL_AUTOCHECKPOINT=1000\x00DEFAULT_WAL_SYNCHRONOUS=2\x00DEFAULT_WORKER_THREADS=0\x00ENABLE_COLUMN_METADATA\x00ENABLE_FTS5\x00ENABLE_GEOPOLY\x00ENABLE_MATH_FUNCTIONS\x00ENABLE_MEMORY_MANAGEMENT\x00ENABLE_OFFSET_SQL_FUNC\x00ENABLE_PREUPDATE_HOOK\x00ENABLE_RBU\x00ENABLE_RTREE\x00ENABLE_SESSION\x00ENABLE_SNAPSHOT\x00ENABLE_STAT4\x00ENABLE_UNLOCK_NOTIFY\x00LIKE_DOESNT_MATCH_BLOBS\x00MALLOC_SOFT_LIMIT=1024\x00MAX_ATTACHED=10\x00MAX_COLUMN=2000\x00MAX_COMPOUND_SELECT=500\x00MAX_DEFAULT_PAGE_SIZE=8192\x00MAX_EXPR_DEPTH=1000\x00MAX_FUNCTION_ARG=127\x00MAX_LENGTH=1000000000\x00MAX_LIKE_PATTERN_LENGTH=50000\x00MAX_MMAP_SIZE=0x7fff0000\x00MAX_PAGE_COUNT=1073741823\x00MAX_PAGE_SIZE=65536\x00MAX_SQL_LENGTH=1000000000\x00MAX_TRIGGER_DEPTH=1000\x00MAX_VARIABLE_NUMBER=32766\x00MAX_VDBE_OP=250000000\x00MAX_WORKER_THREADS=8\x00MUTEX_NOOP\x00SOUNDEX\x00SYSTEM_MALLOC\x00TEMP_STORE=1\x00THREADSAFE=1\x00BINARY\x00ANY\x00BLOB\x00INT\x00INTEGER\x00REAL\x00TEXT\x0020b:20e\x0020c:20e\x0020e\x0040f-21a-21d\x00now\x00local time unavailable\x00second\x00minute\x00hour\x00\x00\x00day\x00\x00\x00\x00month\x00\x00year\x00\x00\x00auto\x00julianday\x00localtime\x00unixepoch\x00utc\x00weekday \x00start of \x00month\x00year\x00day\x00%02d\x00%06.3f\x00%03d\x00%.16g\x00%lld\x00%04d\x00date\x00time\x00datetime\x00strftime\x00current_time\x00current_timestamp\x00current_date\x00failed to allocate %u bytes of memory\x00failed memory resize %u to %u bytes\x00out of memory\x000123456789ABCDEF0123456789abcdef\x00-x0\x00X0\x00%\x00NaN\x00Inf\x00\x00NULL\x00(NULL)\x00.\x00(join-%u)\x00(subquery-%u)\x00thstndrd\x00922337203685477580\x00API call with %s database connection pointer\x00unopened\x00invalid\x00Savepoint\x00AutoCommit\x00Transaction\x00Checkpoint\x00JournalMode\x00Vacuum\x00VFilter\x00VUpdate\x00Init\x00Goto\x00Gosub\x00InitCoroutine\x00Yield\x00MustBeInt\x00Jump\x00Once\x00If\x00IfNot\x00IsType\x00Not\x00IfNullRow\x00SeekLT\x00SeekLE\x00SeekGE\x00SeekGT\x00IfNotOpen\x00IfNoHope\x00NoConflict\x00NotFound\x00Found\x00SeekRowid\x00NotExists\x00Last\x00IfSmaller\x00SorterSort\x00Sort\x00Rewind\x00SorterNext\x00Prev\x00Next\x00IdxLE\x00IdxGT\x00IdxLT\x00Or\x00And\x00IdxGE\x00RowSetRead\x00RowSetTest\x00Program\x00FkIfZero\x00IsNull\x00NotNull\x00Ne\x00Eq\x00Gt\x00Le\x00Lt\x00Ge\x00ElseEq\x00IfPos\x00IfNotZero\x00DecrJumpZero\x00IncrVacuum\x00VNext\x00Filter\x00PureFunc\x00Function\x00Return\x00EndCoroutine\x00HaltIfNull\x00Halt\x00Integer\x00Int64\x00String\x00BeginSubrtn\x00Null\x00SoftNull\x00Blob\x00Variable\x00Move\x00Copy\x00SCopy\x00IntCopy\x00FkCheck\x00ResultRow\x00CollSeq\x00AddImm\x00RealAffinity\x00Cast\x00Permutation\x00Compare\x00IsTrue\x00ZeroOrNull\x00Offset\x00Column\x00TypeCheck\x00Affinity\x00MakeRecord\x00Count\x00ReadCookie\x00SetCookie\x00ReopenIdx\x00BitAnd\x00BitOr\x00ShiftLeft\x00ShiftRight\x00Add\x00Subtract\x00Multiply\x00Divide\x00Remainder\x00Concat\x00OpenRead\x00OpenWrite\x00BitNot\x00OpenDup\x00OpenAutoindex\x00String8\x00OpenEphemeral\x00SorterOpen\x00SequenceTest\x00OpenPseudo\x00Close\x00ColumnsUsed\x00SeekScan\x00SeekHit\x00Sequence\x00NewRowid\x00Insert\x00RowCell\x00Delete\x00ResetCount\x00SorterCompare\x00SorterData\x00RowData\x00Rowid\x00NullRow\x00SeekEnd\x00IdxInsert\x00SorterInsert\x00IdxDelete\x00DeferredSeek\x00IdxRowid\x00FinishSeek\x00Destroy\x00Clear\x00ResetSorter\x00CreateBtree\x00SqlExec\x00ParseSchema\x00LoadAnalysis\x00DropTable\x00DropIndex\x00Real\x00DropTrigger\x00IntegrityCk\x00RowSetAdd\x00Param\x00FkCounter\x00MemMax\x00OffsetLimit\x00AggInverse\x00AggStep\x00AggStep1\x00AggValue\x00AggFinal\x00Expire\x00CursorLock\x00CursorUnlock\x00TableLock\x00VBegin\x00VCreate\x00VDestroy\x00VOpen\x00VInitIn\x00VColumn\x00VRename\x00Pagecount\x00MaxPgcnt\x00ClrSubtype\x00FilterAdd\x00Trace\x00CursorHint\x00ReleaseReg\x00Noop\x00Explain\x00Abortable\x00AreFileApisANSI\x00CharLowerW\x00CharUpperW\x00CloseHandle\x00CreateFileA\x00CreateFileW\x00CreateFileMappingA\x00CreateFileMappingW\x00CreateMutexW\x00DeleteFileA\x00DeleteFileW\x00FileTimeToLocalFileTime\x00FileTimeToSystemTime\x00FlushFileBuffers\x00FormatMessageA\x00FormatMessageW\x00FreeLibrary\x00GetCurrentProcessId\x00GetDiskFreeSpaceA\x00GetDiskFreeSpaceW\x00GetFileAttributesA\x00GetFileAttributesW\x00GetFileAttributesExW\x00GetFileSize\x00GetFullPathNameA\x00GetFullPathNameW\x00GetLastError\x00GetProcAddressA\x00GetSystemInfo\x00GetSystemTime\x00GetSystemTimeAsFileTime\x00GetTempPathA\x00GetTempPathW\x00GetTickCount\x00GetVersionExA\x00GetVersionExW\x00HeapAlloc\x00HeapCreate\x00HeapDestroy\x00HeapFree\x00HeapReAlloc\x00HeapSize\x00HeapValidate\x00HeapCompact\x00LoadLibraryA\x00LoadLibraryW\x00LocalFree\x00LockFile\x00LockFileEx\x00MapViewOfFile\x00MultiByteToWideChar\x00QueryPerformanceCounter\x00ReadFile\x00SetEndOfFile\x00SetFilePointer\x00Sleep\x00SystemTimeToFileTime\x00UnlockFile\x00UnlockFileEx\x00UnmapViewOfFile\x00WideCharToMultiByte\x00WriteFile\x00CreateEventExW\x00WaitForSingleObject\x00WaitForSingleObjectEx\x00SetFilePointerEx\x00GetFileInformationByHandleEx\x00MapViewOfFileFromApp\x00CreateFile2\x00LoadPackagedLibrary\x00GetTickCount64\x00GetNativeSystemInfo\x00OutputDebugStringA\x00OutputDebugStringW\x00GetProcessHeap\x00CreateFileMappingFromApp\x00InterlockedCompareExchange\x00UuidCreate\x00UuidCreateSequential\x00FlushViewOfFile\x00%s\x00OsError 0x%lx (%lu)\x00os_win.c:%d: (%lu) %s(%s) - %s\x00delayed %dms for lock/sharing conflict at line %d\x00winSeekFile\x00winClose\x00winRead\x00winWrite1\x00winWrite2\x00winTruncate1\x00winTruncate2\x00winSync1\x00winSync2\x00winFileSize\x00winUnlockReadLock\x00winUnlock\x00winLockSharedMemory\x00%s-shm\x00readonly_shm\x00winOpenShm\x00winShmMap1\x00winShmMap2\x00winShmMap3\x00winUnmapfile1\x00winUnmapfile2\x00winMapfile1\x00winMapfile2\x00etilqs_\x00winGetTempname1\x00winGetTempname2\x00winGetTempname3\x00winGetTempname4\x00winGetTempname5\x00abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\x00exclusive\x00winOpen\x00psow\x00winDelete\x00winAccess\x00%s%c%s\x00winFullPathname1\x00winFullPathname2\x00winFullPathname3\x00winFullPathname4\x00win32\x00win32-longpath\x00win32-none\x00win32-longpath-none\x00memdb\x00memdb(%p,%lld)\x00PRAGMA \"%w\".page_count\x00ATTACH x AS %Q\x00recovered %d pages from %s\x00-journal\x00-wal\x00nolock\x00immutable\x00PRAGMA table_list\x00recovered %d frames from WAL file %s\x00cannot limit WAL size: %s\x00SQLite format 3\x00:memory:\x00@ \x00\n\x00invalid page number %d\x002nd reference to page %d\x00Failed to read ptrmap key=%d\x00Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)\x00failed to get page %d\x00freelist leaf count too big on page %d\x00%s is %d but should be %d\x00size\x00overflow list length\x00Page %u: \x00unable to get the page. error code=%d\x00btreeInitPage() returns error code %d\x00free space corruption\x00On tree page %u cell %d: \x00On page %u at right child: \x00Offset %d out of range %d..%d\x00Extends off end of page\x00Rowid %lld out of order\x00Child page depth differs\x00Multiple uses for byte %u of page %u\x00Fragmentation of %d bytes reported as %d on page %u\x00Main freelist: \x00max rootpage (%d) disagrees with header (%d)\x00incremental_vacuum enabled with a max rootpage of zero\x00Page %d is never used\x00Pointer map page %d is referenced\x00unknown database %s\x00destination database is in use\x00source and destination must be distinct\x00%!.15g\x00-\x00%s%s\x00k(%d\x00B\x00,%s%s%s\x00N.\x00)\x00%.18s-%s\x00%s(%d)\x00%d\x00(blob)\x00vtab:%p\x00%c%u\x00]\x00program\x00?\x008\x0016LE\x0016BE\x00addr\x00opcode\x00p1\x00p2\x00p3\x00p4\x00p5\x00comment\x00id\x00parent\x00notused\x00detail\x00%.4c%s%.16c\x00MJ delete: %s\x00MJ collide: %s\x00-mj%06X9%02X\x00FOREIGN KEY constraint failed\x00a CHECK constraint\x00a generated column\x00an index\x00non-deterministic use of %s() in %s\x00API called with finalized prepared statement\x00API called with NULL prepared statement\x00string or blob too big\x00bind on a busy prepared statement: [%s]\x00-- \x00'%.*q'\x00zeroblob(%d)\x00x'\x00%02x\x00'\x00%s constraint failed\x00%z: %s\x00abort at %d in [%s]: %s\x00cannot store %s value in %s column %s.%s\x00cannot open savepoint - SQL statements in progress\x00no such savepoint: %s\x00cannot release savepoint - SQL statements in progress\x00cannot commit transaction - SQL statements in progress\x00cannot start a transaction within a transaction\x00cannot rollback - no transaction is active\x00cannot commit - no transaction is active\x00database schema has changed\x00index corruption\x00sqlite_master\x00SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid\x00too many levels of trigger recursion\x00cannot change %s wal mode from within a transaction\x00into\x00out of\x00database table is locked: %s\x00ValueList\x00-- %s\x00statement aborts at %d: [%s] %s\x00NOT NULL\x00UNIQUE\x00CHECK\x00FOREIGN KEY\x00cannot open value of type %s\x00null\x00real\x00integer\x00no such rowid: %lld\x00cannot open virtual table: %s\x00cannot open table without rowid: %s\x00cannot open view: %s\x00no such column: \"%s\"\x00foreign key\x00indexed\x00cannot open %s column for writing\x00sqlite_\x00sqlite_temp_master\x00sqlite_temp_schema\x00sqlite_schema\x00main\x00*\x00new\x00old\x00excluded\x00misuse of aliased aggregate %s\x00misuse of aliased window function %s\x00row value misused\x00double-quoted string literal: \"%w\"\x00coalesce\x00no such column\x00ambiguous column name\x00%s: %s.%s.%s\x00%s: %s.%s\x00%s: %s\x00partial index WHERE clauses\x00index expressions\x00CHECK constraints\x00generated columns\x00%s prohibited in %s\x00the \".\" operator\x00second argument to %#T() must be a constant between 0.0 and 1.0\x00not authorized to use function: %#T\x00non-deterministic functions\x00%#T() may not be used as a window function\x00window\x00aggregate\x00misuse of %s function %#T()\x00no such function: %#T\x00wrong number of arguments to function %#T()\x00FILTER may not be used with non-aggregate %#T()\x00subqueries\x00parameters\x00%r %s BY term out of range - should be between 1 and %d\x00too many terms in ORDER BY clause\x00ORDER\x00%r ORDER BY term does not match any column in the result set\x00too many terms in %s BY clause\x00HAVING clause on a non-aggregate query\x00GROUP\x00aggregate functions are not allowed in the GROUP BY clause\x00Expression tree is too large (maximum depth %d)\x00IN(...) element has %d term%s - expected %d\x00s\x000\x00too many arguments on function %T\x00unsafe use of %#T()\x00variable number must be between ?1 and ?%d\x00too many SQL variables\x00%d columns assigned %d values\x00too many columns in %s\x00true\x00false\x00_ROWID_\x00ROWID\x00OID\x00USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR\x00USING INDEX %s FOR IN-OPERATOR\x00sub-select returns %d columns - expected %d\x00REUSE LIST SUBQUERY %d\x00%sLIST SUBQUERY %d\x00CORRELATED \x00REUSE SUBQUERY %d\x00%sSCALAR SUBQUERY %d\x001\x000x\x00hex literal too big: %s%#T\x00generated column loop on \"%s\"\x00blob\x00text\x00numeric\x00flexnum\x00none\x00misuse of aggregate: %#T()\x00unknown function: %#T()\x00RAISE() may only be used within a trigger-program\x00B\x00C\x00D\x00E\x00F\x00table %s may not be altered\x00SELECT 1 FROM \"%w\".sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL \x00SELECT 1 FROM temp.sqlite_master WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%' AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL \x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_quotefix(%Q, sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_quotefix('temp', sql)WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND sql NOT LIKE 'create virtual%%'\x00there is already another table or index with this name: %s\x00table\x00view %s may not be altered\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'\x00UPDATE %Q.sqlite_master SET tbl_name = %Q, name = CASE WHEN type='table' THEN %Q WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' AND type='index' THEN 'sqlite_autoindex_' || %Q || substr(name,%d+18) ELSE name END WHERE tbl_name=%Q COLLATE nocase AND (type='table' OR type='index' OR type='trigger');\x00sqlite_sequence\x00UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q\x00UPDATE sqlite_temp_schema SET sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), tbl_name = CASE WHEN tbl_name=%Q COLLATE nocase AND sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) THEN %Q ELSE tbl_name END WHERE type IN ('view', 'trigger')\x00after rename\x00SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"\x00Cannot add a PRIMARY KEY column\x00Cannot add a UNIQUE column\x00Cannot add a REFERENCES column with non-NULL default value\x00Cannot add a NOT NULL column with default value NULL\x00Cannot add a column with non-constant default\x00cannot add a STORED column\x00UPDATE \"%w\".sqlite_master SET sql = printf('%%.%ds, ',sql) || %Q || substr(sql,1+length(printf('%%.%ds',sql))) WHERE type = 'table' AND name = %Q\x00SELECT CASE WHEN quick_check GLOB 'CHECK*' THEN raise(ABORT,'CHECK constraint failed') ELSE raise(ABORT,'NOT NULL constraint failed') END FROM pragma_quick_check(%Q,%Q) WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'\x00virtual tables may not be altered\x00Cannot add a column to a view\x00sqlite_altertab_%s\x00view\x00virtual table\x00cannot %s %s \"%s\"\x00drop column from\x00rename columns of\x00no such column: \"%T\"\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' AND (type != 'index' OR tbl_name = %Q)\x00UPDATE temp.sqlite_master SET sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) WHERE type IN ('trigger', 'view')\x00error in %s %s%s%s: %s\x00 \x00CREATE \x00\"%w\" \x00%Q%s\x00%.*s%s\x00cannot drop %s column: \"%s\"\x00PRIMARY KEY\x00cannot drop column \"%s\": no other columns exist\x00UPDATE \"%w\".sqlite_master SET sql = sqlite_drop_column(%d, sql, %d) WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)\x00after drop column\x00sqlite_rename_column\x00sqlite_rename_table\x00sqlite_rename_test\x00sqlite_drop_column\x00sqlite_rename_quotefix\x00CREATE TABLE %Q.%s(%s)\x00DELETE FROM %Q.%s WHERE %s=%Q\x00DELETE FROM %Q.%s\x00sqlite_stat1\x00tbl,idx,stat\x00sqlite_stat4\x00tbl,idx,neq,nlt,ndlt,sample\x00sqlite_stat3\x00stat_init\x00stat_push\x00%llu\x00 %llu\x00%llu \x00stat_get\x00sqlite\\_%\x00BBB\x00idx\x00tbl\x00unordered*\x00sz=[0-9]*\x00noskipscan*\x00SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx\x00SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4\x00SELECT tbl,idx,stat FROM %Q.sqlite_stat1\x00x\x00\x00too many attached databases - max %d\x00database %s is already in use\x00database is already attached\x00attached databases must use the same text encoding as main database\x00unable to open database: %s\x00no such database: %s\x00cannot detach database %s\x00database %s is locked\x00sqlite_detach\x00sqlite_attach\x00%s cannot use variables\x00%s %T cannot reference objects in database %s\x00authorizer malfunction\x00%s.%s\x00%s.%z\x00access to %z is prohibited\x00not authorized\x00pragma_\x00no such view\x00no such table\x00corrupt database\x00unknown database %T\x00object name reserved for internal use: %s\x00temporary table name must be unqualified\x00%s %T already exists\x00there is already an index named %s\x00sqlite_returning\x00cannot use RETURNING in a trigger\x00too many columns on %s\x00always\x00generated\x00duplicate column name: %s\x00default value of column [%s] is not constant\x00cannot use DEFAULT on a generated column\x00generated columns cannot be part of the PRIMARY KEY\x00table \"%s\" has more than one primary key\x00AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY\x00virtual tables cannot use computed columns\x00virtual\x00stored\x00error in generated column \"%s\"\x00,\x00\n \x00,\n \x00\n)\x00CREATE TABLE \x00 TEXT\x00 NUM\x00 INT\x00 REAL\x00unknown datatype for %s.%s: \"%s\"\x00missing datatype for %s.%s\x00AUTOINCREMENT not allowed on WITHOUT ROWID tables\x00PRIMARY KEY missing on table %s\x00must have at least one non-generated column\x00TABLE\x00VIEW\x00CREATE %s %.*s\x00UPDATE %Q.sqlite_master SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q WHERE rowid=#%d\x00CREATE TABLE %Q.sqlite_sequence(name,seq)\x00tbl_name='%q' AND type!='trigger'\x00parameters are not allowed in views\x00view %s is circularly defined\x00corrupt schema\x00UPDATE %Q.sqlite_master SET rootpage=%d WHERE #%d AND rootpage=#%d\x00sqlite_stat%d\x00DELETE FROM %Q.sqlite_sequence WHERE name=%Q\x00DELETE FROM %Q.sqlite_master WHERE tbl_name=%Q and type!='trigger'\x00stat\x00table %s may not be dropped\x00use DROP TABLE to delete table %s\x00use DROP VIEW to delete view %s\x00foreign key on %s should reference only one column of table %T\x00number of columns in foreign key does not match the number of columns in the referenced table\x00unknown column \"%s\" in foreign key definition\x00unsupported use of NULLS %s\x00FIRST\x00LAST\x00index\x00cannot create a TEMP index on non-TEMP table \"%s\"\x00table %s may not be indexed\x00views may not be indexed\x00virtual tables may not be indexed\x00there is already a table named %s\x00index %s already exists\x00sqlite_autoindex_%s_%d\x00expressions prohibited in PRIMARY KEY and UNIQUE constraints\x00conflicting ON CONFLICT clauses specified\x00invalid rootpage\x00CREATE%s INDEX %.*s\x00 UNIQUE\x00INSERT INTO %Q.sqlite_master VALUES('index',%Q,%Q,#%d,%Q);\x00name='%q' AND type='index'\x00no such index: %S\x00index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='index'\x00too many FROM clause terms, max: %d\x00a JOIN clause is required before %s\x00ON\x00USING\x00BEGIN\x00ROLLBACK\x00COMMIT\x00RELEASE\x00unable to open a temporary database file for storing temporary tables\x00index '%q'\x00, \x00%s.rowid\x00unable to identify the object to be reindexed\x00duplicate WITH table name: %s\x00no such collation sequence: %s\x00unsafe use of virtual table \"%s\"\x00table %s may not be modified\x00cannot modify %s because it is a view\x00rows deleted\x00integer overflow\x00%.*f\x00LIKE or GLOB pattern too complex\x00ESCAPE expression must be a single character\x00%!.20e\x00%Q\x00?000\x00MATCH\x00like\x00implies_nonnull_row\x00expr_compare\x00expr_implies_expr\x00affinity\x00soundex\x00load_extension\x00sqlite_compileoption_used\x00sqlite_compileoption_get\x00unlikely\x00likelihood\x00likely\x00sqlite_offset\x00ltrim\x00rtrim\x00trim\x00min\x00max\x00typeof\x00subtype\x00length\x00instr\x00printf\x00format\x00unicode\x00char\x00abs\x00round\x00upper\x00lower\x00hex\x00unhex\x00ifnull\x00random\x00randomblob\x00nullif\x00sqlite_version\x00sqlite_source_id\x00sqlite_log\x00quote\x00last_insert_rowid\x00changes\x00total_changes\x00replace\x00zeroblob\x00substr\x00substring\x00sum\x00total\x00avg\x00count\x00group_concat\x00glob\x00ceil\x00ceiling\x00floor\x00trunc\x00ln\x00log\x00log10\x00log2\x00exp\x00pow\x00power\x00mod\x00acos\x00asin\x00atan\x00atan2\x00cos\x00sin\x00tan\x00cosh\x00sinh\x00tanh\x00acosh\x00asinh\x00atanh\x00sqrt\x00radians\x00degrees\x00pi\x00sign\x00iif\x00foreign key mismatch - \"%w\" referencing \"%w\"\x00cannot INSERT into generated column \"%s\"\x00table %S has no column named %s\x00table %S has %d columns but %d values were supplied\x00%d values for %d columns\x00UPSERT not implemented for virtual table \"%s\"\x00cannot UPSERT a view\x00rows inserted\x00sqlite3_extension_init\x00sqlite3_\x00lib\x00_init\x00no entry point [%s] in shared library [%s]\x00error during initialization: %s\x00unable to open shared library [%.*s]\x00dll\x00automatic extension loading failed: %s\x00seq\x00from\x00to\x00on_update\x00on_delete\x00match\x00cid\x00name\x00type\x00notnull\x00dflt_value\x00pk\x00hidden\x00schema\x00ncol\x00wr\x00strict\x00seqno\x00desc\x00coll\x00key\x00builtin\x00enc\x00narg\x00flags\x00wdth\x00hght\x00flgs\x00unique\x00origin\x00partial\x00rowid\x00fkid\x00file\x00busy\x00checkpointed\x00database\x00status\x00cache_size\x00timeout\x00analysis_limit\x00application_id\x00auto_vacuum\x00automatic_index\x00busy_timeout\x00cache_spill\x00case_sensitive_like\x00cell_size_check\x00checkpoint_fullfsync\x00collation_list\x00compile_options\x00count_changes\x00data_store_directory\x00data_version\x00database_list\x00default_cache_size\x00defer_foreign_keys\x00empty_result_callbacks\x00encoding\x00foreign_key_check\x00foreign_key_list\x00foreign_keys\x00freelist_count\x00full_column_names\x00fullfsync\x00function_list\x00hard_heap_limit\x00ignore_check_constraints\x00incremental_vacuum\x00index_info\x00index_list\x00index_xinfo\x00integrity_check\x00journal_mode\x00journal_size_limit\x00legacy_alter_table\x00locking_mode\x00max_page_count\x00mmap_size\x00module_list\x00optimize\x00page_count\x00page_size\x00pragma_list\x00query_only\x00quick_check\x00read_uncommitted\x00recursive_triggers\x00reverse_unordered_selects\x00schema_version\x00secure_delete\x00short_column_names\x00shrink_memory\x00soft_heap_limit\x00synchronous\x00table_info\x00table_list\x00table_xinfo\x00temp_store\x00temp_store_directory\x00threads\x00trusted_schema\x00user_version\x00wal_autocheckpoint\x00wal_checkpoint\x00writable_schema\x00onoffalseyestruextrafull\x00normal\x00full\x00incremental\x00memory\x00temporary storage cannot be changed from within a transaction\x00SET NULL\x00SET DEFAULT\x00CASCADE\x00RESTRICT\x00NO ACTION\x00delete\x00persist\x00off\x00truncate\x00wal\x00w\x00a\x00sissii\x00utf8\x00utf16le\x00utf16be\x00-%T\x00fast\x00not a writable directory\x00Safety level may not be changed inside a transaction\x00reset\x00issisii\x00issisi\x00SELECT*FROM\"%w\"\x00shadow\x00sssiii\x00iisX\x00isiX\x00c\x00u\x00isisi\x00iss\x00is\x00iissssss\x00NONE\x00siX\x00*** in database %s ***\n\x00row not in PRIMARY KEY order for %s\x00NULL value in %s.%s\x00non-%s value in %s.%s\x00NUMERIC value in %s.%s\x00C\x00TEXT value in %s.%s\x00CHECK constraint failed in %s\x00row \x00 missing from index \x00rowid not at end-of-record for row \x00 of index \x00 values differ from index \x00non-unique entry in index \x00wrong # of entries in index \x00ok\x00unsupported encoding: %s\x00restart\x00ANALYZE \"%w\".\"%w\"\x00UTF8\x00UTF-8\x00UTF-16le\x00UTF-16be\x00UTF16le\x00UTF16be\x00UTF-16\x00UTF16\x00CREATE TABLE x\x00%c\"%s\"\x00(\"%s\"\x00,arg HIDDEN\x00,schema HIDDEN\x00PRAGMA \x00%Q.\x00=%Q\x00error in %s %s after %s: %s\x00malformed database schema (%s)\x00%z - %s\x00rename\x00drop column\x00add column\x00orphan index\x00CREATE TABLE x(type text,name text,tbl_name text,rootpage int,sql text)\x00unsupported file format\x00SELECT*FROM\"%w\".%s ORDER BY rowid\x00database schema is locked: %s\x00statement too long\x00unknown join type: %T%s%T%s%T\x00naturaleftouterightfullinnercross\x00a NATURAL join may not have an ON or USING clause\x00cannot join using column %s - column not present in both tables\x00ambiguous reference to %s in USING()\x00UNION ALL\x00INTERSECT\x00EXCEPT\x00UNION\x00USE TEMP B-TREE FOR %s\x00USE TEMP B-TREE FOR %sORDER BY\x00RIGHT PART OF \x00column%d\x00%.*z:%u\x00NUM\x00cannot use window functions in recursive queries\x00recursive aggregate queries not supported\x00SETUP\x00RECURSIVE STEP\x00SCAN %d CONSTANT ROW%s\x00S\x00COMPOUND QUERY\x00LEFT-MOST SUBQUERY\x00%s USING TEMP B-TREE\x00all VALUES must have the same number of terms\x00SELECTs to the left and right of %s do not have the same number of result columns\x00MERGE (%s)\x00LEFT\x00RIGHT\x00no such index: %s\x00'%s' is not a function\x00no such index: \"%s\"\x00multiple references to recursive table: %s\x00circular reference: %s\x00table %s has %d values for %d columns\x00multiple recursive references: %s\x00recursive reference in a subquery: %s\x00%!S\x00too many references to \"%s\": max 65535\x00access to view \"%s\" prohibited\x00..%s\x00%s.%s.%s\x00no such table: %s\x00no tables specified\x00too many columns in result set\x00DISTINCT aggregates must have exactly one argument\x00USE TEMP B-TREE FOR %s(DISTINCT)\x00SCAN %s%s%s\x00 USING COVERING INDEX \x00target object/alias may not appear in FROM clause: %s\x00expected %d columns for '%s' but got %d\x00CO-ROUTINE %!S\x00MATERIALIZE %!S\x00DISTINCT\x00GROUP BY\x00sqlite3_get_table() called with two or more incompatible queries\x00temporary trigger may not have qualified name\x00trigger\x00cannot create triggers on virtual tables\x00trigger %T already exists\x00cannot create trigger on system table\x00cannot create %s trigger on view: %S\x00BEFORE\x00AFTER\x00cannot create INSTEAD OF trigger on table: %S\x00trigger \"%s\" may not write to shadow table \"%s\"\x00INSERT INTO %Q.sqlite_master VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')\x00type='trigger' AND name='%q'\x00no such trigger: %S\x00DELETE FROM %Q.sqlite_master WHERE name=%Q AND type='trigger'\x00%s RETURNING is not available on virtual tables\x00DELETE\x00UPDATE\x00RETURNING may not use \"TABLE.*\" wildcards\x00-- TRIGGER %s\x00cannot UPDATE generated column \"%s\"\x00no such column: %s\x00rows updated\x00%r \x00%sON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint\x00CRE\x00INS\x00cannot VACUUM from within a transaction\x00cannot VACUUM - SQL statements in progress\x00non-text filename\x00ATTACH %Q AS vacuum_db\x00output file already exists\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='table'AND name<>'sqlite_sequence' AND coalesce(rootpage,1)>0\x00SELECT sql FROM \"%w\".sqlite_schema WHERE type='index'\x00SELECT'INSERT INTO vacuum_db.'||quote(name)||' SELECT*FROM\"%w\".'||quote(name)FROM vacuum_db.sqlite_schema WHERE type='table'AND coalesce(rootpage,1)>0\x00INSERT INTO vacuum_db.sqlite_schema SELECT*FROM \"%w\".sqlite_schema WHERE type IN('view','trigger') OR(type='table'AND rootpage=0)\x00CREATE VIRTUAL TABLE %T\x00UPDATE %Q.sqlite_master SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d\x00name=%Q AND sql=%Q\x00vtable constructor called recursively: %s\x00vtable constructor failed: %s\x00vtable constructor did not declare schema: %s\x00no such module: %s\x00\x00 AND \x00(\x00 (\x00%s=?\x00ANY(%s)\x00>\x00<\x00%s %S\x00SEARCH\x00SCAN\x00AUTOMATIC PARTIAL COVERING INDEX\x00AUTOMATIC COVERING INDEX\x00COVERING INDEX %s\x00INDEX %s\x00 USING \x00 USING INTEGER PRIMARY KEY (%s\x00>? AND %s\x00%c?)\x00 VIRTUAL TABLE INDEX %d:%s\x00 LEFT-JOIN\x00BLOOM FILTER ON %S (\x00rowid=?\x00MULTI-INDEX OR\x00INDEX %d\x00RIGHT-JOIN %s\x00regexp\x00ON clause references tables to its right\x00NOCASE\x00too many arguments on %s() - max %d\x00automatic index on %s(%s)\x00auto-index\x00%s.xBestIndex malfunction\x00abbreviated query algorithm search\x00no query solution\x00at most %d tables in a join\x00SCAN CONSTANT ROW\x00second argument to nth_value must be a positive integer\x00argument of ntile must be a positive integer\x00row_number\x00dense_rank\x00rank\x00percent_rank\x00cume_dist\x00ntile\x00last_value\x00nth_value\x00first_value\x00lead\x00lag\x00no such window: %s\x00RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression\x00FILTER clause may only be used with aggregate window functions\x00misuse of aggregate: %s()\x00unsupported frame specification\x00PARTITION clause\x00ORDER BY clause\x00frame specification\x00cannot override %s of window: %s\x00DISTINCT is not supported for window functions\x00frame starting offset must be a non-negative integer\x00frame ending offset must be a non-negative integer\x00frame starting offset must be a non-negative number\x00frame ending offset must be a non-negative number\x00%s clause should come after %s not before\x00ORDER BY\x00LIMIT\x00too many terms in compound SELECT\x00syntax error after column name \"%.*s\"\x00parser stack overflow\x00unknown table option: %.*s\x00set list\x00near \"%T\": syntax error\x00qualified table names are not allowed on INSERT, UPDATE, and DELETE statements within triggers\x00the INDEXED BY clause is not allowed on UPDATE or DELETE statements within triggers\x00the NOT INDEXED clause is not allowed on UPDATE or DELETE statements within triggers\x00incomplete input\x00unrecognized token: \"%T\"\x00%s in \"%s\"\x00create\x00temp\x00temporary\x00end\x00explain\x00unable to close due to unfinalized statements or unfinished backups\x00unknown error\x00abort due to ROLLBACK\x00another row available\x00no more rows available\x00not an error\x00SQL logic error\x00access permission denied\x00query aborted\x00database is locked\x00database table is locked\x00attempt to write a readonly database\x00interrupted\x00disk I/O error\x00database disk image is malformed\x00unknown operation\x00database or disk is full\x00unable to open database file\x00locking protocol\x00constraint failed\x00datatype mismatch\x00bad parameter or other API misuse\x00authorization denied\x00column index out of range\x00file is not a database\x00notification message\x00warning message\x00unable to delete/modify user-function due to active statements\x00unable to use function %s in the requested context\x00unknown database: %s\x00unable to delete/modify collation sequence due to active statements\x00file:\x00localhost\x00invalid uri authority: %.*s\x00vfs\x00cache\x00mode\x00access\x00no such %s mode: %s\x00%s mode not allowed: %s\x00no such vfs: %s\x00shared\x00private\x00ro\x00rw\x00rwc\x00RTRIM\x00\x00\x00\x00%s at line %d of [%.10s]\x00database corruption\x00misuse\x00cannot open file\x00no such table column: %s.%s\x00SQLITE_\x00database is deadlocked\x00array\x00object\x000123456789abcdef\x00JSON cannot hold BLOB values\x00malformed JSON\x00[0]\x00JSON path error near '%q'\x00json_%s() needs an odd number of arguments\x00$[\x00$.\x00json_object() requires an even number of arguments\x00json_object() labels must be TEXT\x00set\x00insert\x00[]\x00{}\x00CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,json HIDDEN,root HIDDEN)\x00.%.*s\x00[%d]\x00$\x00json\x00json_array\x00json_array_length\x00json_extract\x00->\x00->>\x00json_insert\x00json_object\x00json_patch\x00json_quote\x00json_remove\x00json_replace\x00json_set\x00json_type\x00json_valid\x00json_group_array\x00json_group_object\x00json_each\x00json_tree\x00%s_node\x00data\x00DROP TABLE '%q'.'%q_node';DROP TABLE '%q'.'%q_rowid';DROP TABLE '%q'.'%q_parent';\x00RtreeMatchArg\x00SELECT * FROM %Q.%Q\x00UNIQUE constraint failed: %s.%s\x00rtree constraint failed: %s.(%s<=%s)\x00ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";\x00SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'\x00node\x00CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno\x00,a%d\x00);CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);\x00CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);\x00INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))\x00INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno\x00SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1\x00UPDATE \"%w\".\"%w_rowid\"SET \x00a%d=coalesce(?%d,a%d)\x00a%d=?%d\x00 WHERE rowid=?1\x00INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1\x00SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1\x00SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)\x00DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1\x00PRAGMA %Q.page_size\x00SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1\x00undersize RTree blobs in \"%q_node\"\x00Wrong number of columns for an rtree table\x00Too few columns for an rtree table\x00Too many columns for an rtree table\x00Auxiliary rtree columns must be last\x00CREATE TABLE x(%.*s INT\x00,%.*s\x00);\x00,%.*s REAL\x00,%.*s INT\x00{%lld\x00 %g\x00}\x00Invalid argument to rtreedepth()\x00%z%s%z\x00SELECT data FROM %Q.'%q_node' WHERE nodeno=?\x00Node %lld missing from database\x00SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1\x00SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1\x00Mapping (%lld -> %lld) missing from %s table\x00%_rowid\x00%_parent\x00Found (%lld -> %lld) in %s table, expected (%lld -> %lld)\x00Dimension %d of cell %d on node %lld is corrupt\x00Dimension %d of cell %d on node %lld is corrupt relative to parent\x00Node %lld is too small (%d bytes)\x00Rtree depth out of range (%d)\x00Node %lld is too small for cell count of %d (%d bytes)\x00SELECT count(*) FROM %Q.'%q%s'\x00Wrong number of entries in %%%s table - expected %lld, actual %lld\x00SELECT * FROM %Q.'%q_rowid'\x00Schema corrupt or not an rtree\x00_rowid\x00_parent\x00END\x00wrong number of arguments to function rtreecheck()\x00[\x00[%!g,%!g],\x00[%!g,%!g]]\x00\x00CREATE TABLE x(_shape\x00,%s\x00rtree\x00fullscan\x00_shape does not contain a valid polygon\x00geopoly_overlap\x00geopoly_within\x00geopoly\x00geopoly_area\x00geopoly_blob\x00geopoly_json\x00geopoly_svg\x00geopoly_contains_point\x00geopoly_debug\x00geopoly_bbox\x00geopoly_xform\x00geopoly_regular\x00geopoly_ccw\x00geopoly_group_bbox\x00rtreenode\x00rtreedepth\x00rtreecheck\x00rtree_i32\x00corrupt fossil delta\x00DROP TRIGGER IF EXISTS temp.rbu_insert_tr;DROP TRIGGER IF EXISTS temp.rbu_update1_tr;DROP TRIGGER IF EXISTS temp.rbu_update2_tr;DROP TRIGGER IF EXISTS temp.rbu_delete_tr;\x00SELECT rbu_target_name(name, type='view') AS target, name FROM sqlite_schema WHERE type IN ('table', 'view') AND target IS NOT NULL %s ORDER BY name\x00AND rootpage!=0 AND rootpage IS NOT NULL\x00SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' FROM main.sqlite_schema WHERE type='index' AND tbl_name = ?\x00SELECT (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM'), rootpage FROM sqlite_schema WHERE name=%Q\x00PRAGMA index_list=%Q\x00SELECT rootpage FROM sqlite_schema WHERE name = %Q\x00PRAGMA table_info=%Q\x00PRAGMA main.index_list = %Q\x00PRAGMA main.index_xinfo = %Q\x00SELECT * FROM '%q'\x00rbu_\x00rbu_rowid\x00table %q %s rbu_rowid column\x00may not have\x00requires\x00PRAGMA table_info(%Q)\x00column missing from %q: %s\x00%z%s\"%w\"\x00%z%s%s\"%w\"%s\x00SELECT max(_rowid_) FROM \"%s%w\"\x00 WHERE _rowid_ > %lld \x00 DESC\x00quote(\x00||','||\x00SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1\x00 WHERE (%s) > (%s) \x00_rowid_\x00%z%s \"%w\" COLLATE %Q\x00%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC\x00%z%s quote(\"rbu_imp_%d%w\")\x00SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1\x00%z%s%s\x00(%s) > (%s)\x00%z%s(%.*s) COLLATE %Q\x00%z%s\"%w\" COLLATE %Q\x00%z%s\"rbu_imp_%d%w\"%s\x00%z%s\"rbu_imp_%d%w\" %s COLLATE %Q\x00%z%s\"rbu_imp_%d%w\" IS ?\x00%z%s%s.\"%w\"\x00%z%sNULL\x00%z, %s._rowid_\x00_rowid_ = ?%d\x00%z%sc%d=?%d\x00_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)\x00%z%s\"%w\"=?%d\x00invalid rbu_control value\x00%z%s\"%w\"=rbu_delta(\"%w\", ?%d)\x00%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)\x00PRIMARY KEY(\x00%z%s\"%w\"%s\x00%z)\x00SELECT name FROM sqlite_schema WHERE rootpage = ?\x00%z%sc%d %s COLLATE %Q\x00%z%sc%d%s\x00%z, id INTEGER\x00CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID\x00PRIMARY KEY \x00%z%s\"%w\" %s %sCOLLATE %Q%s\x00 NOT NULL\x00%z, %z\x00CREATE TABLE \"rbu_imp_%w\"(%z)%s\x00 WITHOUT ROWID\x00INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)\x00SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?\x00 LIMIT -1 OFFSET %d\x00CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID\x00INSERT INTO \"rbu_imp_%w\" VALUES(%s)\x00DELETE FROM \"rbu_imp_%w\" WHERE %s\x00SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s\x00AND\x00WHERE\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s\x00SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s UNION ALL SELECT %s, rbu_control FROM '%q' %s %s typeof(rbu_control)='integer' AND rbu_control!=1 ORDER BY %s%s\x00rbu_imp_\x00INSERT INTO \"%s%w\"(%s%s) VALUES(%s)\x00, _rowid_\x00DELETE FROM \"%s%w\" WHERE %s\x00, rbu_rowid\x00CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS SELECT *%s FROM '%q' WHERE 0;\x00, 0 AS rbu_rowid\x00CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(3, %s);END;CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(4, %s);END;\x00CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" BEGIN SELECT rbu_tmp_insert(0, %s);END;\x00,_rowid_ \x00,rbu_rowid\x00SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s\x000 AS \x00UPDATE \"%s%w\" SET %s WHERE %s\x00SELECT k, v FROM %s.rbu_state\x00file:///%s-vacuum?modeof=%s\x00ATTACH %Q AS stat\x00CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)\x00cannot vacuum wal mode database\x00file:%s-vactmp?rbu_memory=1%s%s\x00&\x00rbu_tmp_insert\x00rbu_fossil_delta\x00rbu_target_name\x00SELECT * FROM sqlite_schema\x00rbu vfs not found\x00PRAGMA main.wal_checkpoint=restart\x00rbu_exclusive_checkpoint\x00%s-oal\x00%s-wal\x00PRAGMA schema_version\x00PRAGMA schema_version = %d\x00INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES (%d, %d), (%d, %Q), (%d, %Q), (%d, %d), (%d, %d), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %lld), (%d, %Q) \x00PRAGMA main.%s\x00PRAGMA main.%s = %d\x00PRAGMA writable_schema=1\x00SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0 AND name!='sqlite_sequence' ORDER BY type DESC\x00SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL\x00INSERT INTO sqlite_schema VALUES(?,?,?,?,?)\x00PRAGMA writable_schema=0\x00DELETE FROM %s.'rbu_tmp_%q'\x00rbu_state mismatch error\x00rbu_vfs_%d\x00SELECT count(*) FROM sqlite_schema WHERE type='index' AND tbl_name = %Q\x00rbu_index_cnt\x00SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'\x00SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))FROM rbu_count\x00cannot update wal mode database\x00database modified during rbu %s\x00vacuum\x00update\x00BEGIN IMMEDIATE\x00PRAGMA journal_mode=off\x00-vactmp\x00DELETE FROM stat.rbu_state\x00rbu/zipvfs setup error\x00rbu(%s)/%z\x00rbu_memory\x00SELECT 0, 'tbl', '', 0, '', 1 UNION ALL SELECT 1, 'idx', '', 0, '', 2 UNION ALL SELECT 2, 'stat', '', 0, '', 0\x00PRAGMA '%q'.table_info('%q')\x00%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"\x00%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"\x00 OR \x00SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ( SELECT 1 FROM \"%w\".\"%w\" WHERE %s)\x00SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)\x00table schemas do not match\x00SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)\x00SELECT * FROM \x00 WHERE \x00 IS ?\x00SAVEPOINT changeset\x00RELEASE changeset\x00UPDATE main.\x00 SET \x00 = ?\x00idx IS CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END \x00DELETE FROM main.\x00 AND (?\x00AND \x00INSERT INTO main.\x00) VALUES(?\x00, ?\x00INSERT INTO main.sqlite_stat1 VALUES(?1, CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, ?3)\x00DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END AND (?4 OR stat IS ?3)\x00SAVEPOINT replace_op\x00RELEASE replace_op\x00SAVEPOINT changeset_apply\x00PRAGMA defer_foreign_keys = 1\x00sqlite3changeset_apply(): no such table: %s\x00sqlite3changeset_apply(): table %s has %d columns, expected %d or more\x00sqlite3changeset_apply(): primary key mismatch for table %s\x00PRAGMA defer_foreign_keys = 0\x00RELEASE changeset_apply\x00ROLLBACK TO changeset_apply\x00fts5: parser stack overflow\x00fts5: syntax error near \"%.*s\"\x00%z%.*s\x00wrong number of arguments to function highlight()\x00wrong number of arguments to function snippet()\x00snippet\x00highlight\x00bm25\x00prefix\x00malformed prefix=... directive\x00too many prefix indexes (max %d)\x00prefix length out of range (max 999)\x00tokenize\x00multiple tokenize=... directives\x00parse error in tokenize directive\x00content\x00multiple content=... directives\x00%Q.%Q\x00content_rowid\x00multiple content_rowid=... directives\x00columnsize\x00malformed columnsize=... directive\x00columns\x00malformed detail=... directive\x00unrecognized option: \"%.*s\"\x00reserved fts5 column name: %s\x00unindexed\x00unrecognized column option: %s\x00T.%Q\x00, T.%Q\x00, T.c%d\x00reserved fts5 table name: %s\x00parse error in \"%s\"\x00docsize\x00%Q.'%q_%s'\x00CREATE TABLE x(\x00%z%s%Q\x00%z, %Q HIDDEN, %s HIDDEN)\x00pgsz\x00hashsize\x00automerge\x00usermerge\x00crisismerge\x00SELECT k, v FROM %Q.'%q_config'\x00version\x00invalid fts5 file format (found %d, expected %d) - run 'rebuild'\x00unterminated string\x00fts5: syntax error near \"%.1s\"\x00OR\x00NOT\x00NEAR\x00expected integer, got \"%.*s\"\x00fts5: column queries are not supported (detail=none)\x00fts5: %s queries are not supported (detail!=full)\x00phrase\x00block\x00REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)\x00DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?\x00DELETE FROM '%q'.'%q_idx' WHERE segid=?\x00PRAGMA %Q.data_version\x00SELECT pgno FROM '%q'.'%q_idx' WHERE segid=? AND term<=? ORDER BY term DESC LIMIT 1\x00INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)\x00%s_data\x00id INTEGER PRIMARY KEY, block BLOB\x00segid, term, pgno, PRIMARY KEY(segid, term)\x00SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d ORDER BY 1, 2\x00\x00\x00\x00\x00\x00recursively defined fts5 content table\x00SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s\x00DESC\x00ASC\x00reads\x00unknown special query: %.*s\x00SELECT %s\x00no such function: %s\x00parse error in rank function: %s\x00%s: table does not support scanning\x00delete-all\x00'delete-all' may only be used with a contentless or external content fts5 table\x00rebuild\x00'rebuild' may not be used with a contentless fts5 table\x00merge\x00integrity-check\x00cannot %s contentless fts5 table: %s\x00DELETE from\x00no such cursor: %lld\x00no such tokenizer: %s\x00error in tokenizer constructor\x00fts5_api_ptr\x00fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00config\x00fts5\x00fts5_source_id\x00SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC\x00SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC\x00SELECT %s FROM %s T WHERE T.%Q=?\x00INSERT INTO %Q.'%q_content' VALUES(%s)\x00REPLACE INTO %Q.'%q_content' VALUES(%s)\x00DELETE FROM %Q.'%q_content' WHERE id=?\x00REPLACE INTO %Q.'%q_docsize' VALUES(?,?)\x00DELETE FROM %Q.'%q_docsize' WHERE id=?\x00SELECT sz FROM %Q.'%q_docsize' WHERE id=?\x00REPLACE INTO %Q.'%q_config' VALUES(?,?)\x00SELECT %s FROM %s AS T\x00DROP TABLE IF EXISTS %Q.'%q_data';DROP TABLE IF EXISTS %Q.'%q_idx';DROP TABLE IF EXISTS %Q.'%q_config';\x00DROP TABLE IF EXISTS %Q.'%q_docsize';\x00DROP TABLE IF EXISTS %Q.'%q_content';\x00ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';\x00CREATE TABLE %Q.'%q_%q'(%s)%s\x00fts5: error creating shadow table %q_%s: %s\x00id INTEGER PRIMARY KEY\x00, c%d\x00id INTEGER PRIMARY KEY, sz BLOB\x00k PRIMARY KEY, v\x00DELETE FROM %Q.'%q_data';DELETE FROM %Q.'%q_idx';\x00DELETE FROM %Q.'%q_docsize';\x00SELECT count(*) FROM %Q.'%q_%s'\x00tokenchars\x00separators\x00L* N* Co\x00categories\x00remove_diacritics\x00unicode61\x00al\x00ance\x00ence\x00er\x00ic\x00able\x00ible\x00ant\x00ement\x00ment\x00ent\x00ion\x00ou\x00ism\x00ate\x00iti\x00ous\x00ive\x00ize\x00at\x00bl\x00ble\x00iz\x00ational\x00tional\x00tion\x00enci\x00anci\x00izer\x00logi\x00bli\x00alli\x00entli\x00eli\x00e\x00ousli\x00ization\x00ation\x00ator\x00alism\x00iveness\x00fulness\x00ful\x00ousness\x00aliti\x00iviti\x00biliti\x00ical\x00ness\x00icate\x00iciti\x00ative\x00alize\x00eed\x00ee\x00ed\x00ing\x00case_sensitive\x00ascii\x00porter\x00trigram\x00col\x00row\x00instance\x00fts5vocab: unknown table type: %Q\x00CREATE TABlE vocab(term, col, doc, cnt)\x00CREATE TABlE vocab(term, doc, cnt)\x00CREATE TABlE vocab(term, doc, col, offset)\x00wrong number of vtable arguments\x00recursive definition for %s.%s\x00SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'\x00no such fts5 table: %s.%s\x00fts5vocab\x002023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da\x00" var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/nodmesg.go temporal-1.22.5/src/vendor/modernc.org/sqlite/nodmesg.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/nodmesg.go 1970-01-01 00:00:00.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/nodmesg.go 2024-02-23 09:46:16.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2023 The Sqlite Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !sqlite.dmesg +// +build !sqlite.dmesg + +package sqlite // import "modernc.org/sqlite" + +const dmesgs = false + +func dmesg(s string, args ...interface{}) {} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/sqlite.go temporal-1.22.5/src/vendor/modernc.org/sqlite/sqlite.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/sqlite.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/sqlite.go 2024-02-23 09:46:16.000000000 +0000 @@ -10,11 +10,14 @@ "context" "database/sql" "database/sql/driver" + "errors" "fmt" "io" "math" + "math/bits" "net/url" "reflect" + "runtime" "strconv" "strings" "sync" @@ -1319,7 +1322,12 @@ // Begin starts a transaction. // // Deprecated: Drivers should implement ConnBeginTx instead (or additionally). -func (c *conn) Begin() (driver.Tx, error) { +func (c *conn) Begin() (dt driver.Tx, err error) { + if dmesgs { + defer func() { + dmesg("conn %p: (driver.Tx %p, err %v)", c, dt, err) + }() + } return c.begin(context.Background(), driver.TxOptions{}) } @@ -1333,7 +1341,12 @@ // Because the sql package maintains a free pool of connections and only calls // Close when there's a surplus of idle connections, it shouldn't be necessary // for drivers to do their own connection caching. -func (c *conn) Close() error { +func (c *conn) Close() (err error) { + if dmesgs { + defer func() { + dmesg("conn %p: err %v", c, err) + }() + } c.Lock() // Defend against race with .interrupt invoked by context handling. defer c.Unlock() @@ -1362,27 +1375,106 @@ return nil } +// FunctionImpl describes an [application-defined SQL function]. If Scalar is +// set, it is treated as a scalar function; otherwise, it is treated as an +// aggregate function using MakeAggregate. +// +// [application-defined SQL function]: https://sqlite.org/appfunc.html +type FunctionImpl struct { + // NArgs is the required number of arguments that the function accepts. + // If NArgs is negative, then the function is variadic. + NArgs int32 + + // If Deterministic is true, the function must always give the same + // output when the input parameters are the same. This enables functions + // to be used in additional contexts like the WHERE clause of partial + // indexes and enables additional optimizations. + // + // See https://sqlite.org/c3ref/c_deterministic.html#sqlitedeterministic + // for more details. + Deterministic bool + + // Scalar is called when a scalar function is invoked in SQL. The + // argument Values are not valid past the return of the function. + Scalar func(ctx *FunctionContext, args []driver.Value) (driver.Value, error) + + // MakeAggregate is called at the beginning of each evaluation of an + // aggregate function. + MakeAggregate func(ctx FunctionContext) (AggregateFunction, error) +} + +// An AggregateFunction is an invocation of an aggregate or window function. See +// the documentation for [aggregate function callbacks] and [application-defined +// window functions] for an overview. +// +// [aggregate function callbacks]: https://www.sqlite.org/appfunc.html#the_aggregate_function_callbacks +// [application-defined window functions]: https://www.sqlite.org/windowfunctions.html#user_defined_aggregate_window_functions +type AggregateFunction interface { + // Step is called for each row of an aggregate function's SQL + // invocation. The argument Values are not valid past the return of the + // function. + Step(ctx *FunctionContext, rowArgs []driver.Value) error + + // WindowInverse is called to remove the oldest presently aggregated + // result of Step from the current window. The arguments are those + // passed to Step for the row being removed. The argument Values are not + // valid past the return of the function. + WindowInverse(ctx *FunctionContext, rowArgs []driver.Value) error + + // WindowValue is called to get the current value of an aggregate + // function. This is used to return the final value of the function, + // whether it is used as a window function or not. + WindowValue(ctx *FunctionContext) (driver.Value, error) + + // Final is called after all of the aggregate function's input rows have + // been stepped through. No other methods will be called on the + // AggregateFunction after calling Final. WindowValue returns the value + // from the function. + Final(ctx *FunctionContext) +} + type userDefinedFunction struct { zFuncName uintptr nArg int32 eTextRep int32 - xFunc func(*libc.TLS, uintptr, int32, uintptr) + pApp uintptr + scalar bool freeOnce sync.Once } func (c *conn) createFunctionInternal(fun *userDefinedFunction) error { - if rc := sqlite3.Xsqlite3_create_function( - c.tls, - c.db, - fun.zFuncName, - fun.nArg, - fun.eTextRep, - 0, - *(*uintptr)(unsafe.Pointer(&fun.xFunc)), - 0, - 0, - ); rc != sqlite3.SQLITE_OK { + var rc int32 + + if fun.scalar { + rc = sqlite3.Xsqlite3_create_function( + c.tls, + c.db, + fun.zFuncName, + fun.nArg, + fun.eTextRep, + fun.pApp, + cFuncPointer(funcTrampoline), + 0, + 0, + ) + } else { + rc = sqlite3.Xsqlite3_create_window_function( + c.tls, + c.db, + fun.zFuncName, + fun.nArg, + fun.eTextRep, + fun.pApp, + cFuncPointer(stepTrampoline), + cFuncPointer(finalTrampoline), + cFuncPointer(valueTrampoline), + cFuncPointer(inverseTrampoline), + 0, + ) + } + + if rc != sqlite3.SQLITE_OK { return c.errstr(rc) } return nil @@ -1396,7 +1488,12 @@ // Exec may return ErrSkip. // // Deprecated: Drivers should implement ExecerContext instead. -func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) { +func (c *conn) Exec(query string, args []driver.Value) (dr driver.Result, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, query %q, args %v: (driver.Result %p, err %v)", c, query, args, dr, err) + }() + } return c.exec(context.Background(), query, toNamedValues(args)) } @@ -1416,7 +1513,12 @@ } // Prepare returns a prepared statement, bound to this connection. -func (c *conn) Prepare(query string) (driver.Stmt, error) { +func (c *conn) Prepare(query string) (ds driver.Stmt, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, query %q: (driver.Stmt %p, err %v)", c, query, ds, err) + }() + } return c.prepare(context.Background(), query) } @@ -1433,7 +1535,12 @@ // Query may return ErrSkip. // // Deprecated: Drivers should implement QueryerContext instead. -func (c *conn) Query(query string, args []driver.Value) (driver.Rows, error) { +func (c *conn) Query(query string, args []driver.Value) (dr driver.Rows, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, query %q, args %v: (driver.Rows %p, err %v)", c, query, args, dr, err) + }() + } return c.query(context.Background(), query, toNamedValues(args)) } @@ -1491,7 +1598,12 @@ // not specify one, which SQLite maps to "deferred". More information is // available at // https://www.sqlite.org/lang_transaction.html#deferred_immediate_and_exclusive_transactions -func (d *Driver) Open(name string) (driver.Conn, error) { +func (d *Driver) Open(name string) (conn driver.Conn, err error) { + if dmesgs { + defer func() { + dmesg("name %q: (driver.Conn %p, err %v)", name, conn, err) + }() + } c, err := newConn(name) if err != nil { return nil, err @@ -1508,10 +1620,38 @@ // FunctionContext represents the context user defined functions execute in. // Fields and/or methods of this type may get addedd in the future. -type FunctionContext struct{} +type FunctionContext struct { + tls *libc.TLS + ctx uintptr +} const sqliteValPtrSize = unsafe.Sizeof(&sqlite3.Sqlite3_value{}) +// RegisterFunction registers a function named zFuncName with nArg arguments. +// Passing -1 for nArg indicates the function is variadic. The FunctionImpl +// determines whether the function is deterministic or not, and whether it is a +// scalar function (when Scalar is defined) or an aggregate function (when +// Scalar is not defined and MakeAggregate is defined). +// +// The new function will be available to all new connections opened after +// executing RegisterFunction. +func RegisterFunction( + zFuncName string, + impl *FunctionImpl, +) error { + return registerFunction(zFuncName, impl) +} + +// MustRegisterFunction is like RegisterFunction but panics on error. +func MustRegisterFunction( + zFuncName string, + impl *FunctionImpl, +) { + if err := RegisterFunction(zFuncName, impl); err != nil { + panic(err) + } +} + // RegisterScalarFunction registers a scalar function named zFuncName with nArg // arguments. Passing -1 for nArg indicates the function is variadic. // @@ -1521,8 +1661,13 @@ zFuncName string, nArg int32, xFunc func(ctx *FunctionContext, args []driver.Value) (driver.Value, error), -) error { - return registerScalarFunction(zFuncName, nArg, sqlite3.SQLITE_UTF8, xFunc) +) (err error) { + if dmesgs { + defer func() { + dmesg("zFuncName %q, nArg %v, xFunc %p: err %v", zFuncName, nArg, xFunc, err) + }() + } + return registerFunction(zFuncName, &FunctionImpl{NArgs: nArg, Scalar: xFunc, Deterministic: false}) } // MustRegisterScalarFunction is like RegisterScalarFunction but panics on @@ -1532,6 +1677,9 @@ nArg int32, xFunc func(ctx *FunctionContext, args []driver.Value) (driver.Value, error), ) { + if dmesgs { + dmesg("zFuncName %q, nArg %v, xFunc %p", zFuncName, nArg, xFunc) + } if err := RegisterScalarFunction(zFuncName, nArg, xFunc); err != nil { panic(err) } @@ -1544,6 +1692,9 @@ nArg int32, xFunc func(ctx *FunctionContext, args []driver.Value) (driver.Value, error), ) { + if dmesgs { + dmesg("zFuncName %q, nArg %v, xFunc %p", zFuncName, nArg, xFunc) + } if err := RegisterDeterministicScalarFunction(zFuncName, nArg, xFunc); err != nil { panic(err) } @@ -1560,15 +1711,18 @@ zFuncName string, nArg int32, xFunc func(ctx *FunctionContext, args []driver.Value) (driver.Value, error), -) error { - return registerScalarFunction(zFuncName, nArg, sqlite3.SQLITE_UTF8|sqlite3.SQLITE_DETERMINISTIC, xFunc) +) (err error) { + if dmesgs { + defer func() { + dmesg("zFuncName %q, nArg %v, xFunc %p: err %v", zFuncName, nArg, xFunc, err) + }() + } + return registerFunction(zFuncName, &FunctionImpl{NArgs: nArg, Scalar: xFunc, Deterministic: true}) } -func registerScalarFunction( +func registerFunction( zFuncName string, - nArg int32, - eTextRep int32, - xFunc func(ctx *FunctionContext, args []driver.Value) (driver.Value, error), + impl *FunctionImpl, ) error { if _, ok := d.udfs[zFuncName]; ok { @@ -1581,91 +1735,360 @@ return err } + var textrep int32 = sqlite3.SQLITE_UTF8 + + if impl.Deterministic { + textrep |= sqlite3.SQLITE_DETERMINISTIC + } + udf := &userDefinedFunction{ zFuncName: name, - nArg: nArg, - eTextRep: eTextRep, - xFunc: func(tls *libc.TLS, ctx uintptr, argc int32, argv uintptr) { - setErrorResult := func(res error) { - errmsg, cerr := libc.CString(res.Error()) - if cerr != nil { - panic(cerr) - } - defer libc.Xfree(tls, errmsg) - sqlite3.Xsqlite3_result_error(tls, ctx, errmsg, -1) - sqlite3.Xsqlite3_result_error_code(tls, ctx, sqlite3.SQLITE_ERROR) - } - - args := make([]driver.Value, argc) - for i := int32(0); i < argc; i++ { - valPtr := *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*sqliteValPtrSize)) - - switch valType := sqlite3.Xsqlite3_value_type(tls, valPtr); valType { - case sqlite3.SQLITE_TEXT: - args[i] = libc.GoString(sqlite3.Xsqlite3_value_text(tls, valPtr)) - case sqlite3.SQLITE_INTEGER: - args[i] = sqlite3.Xsqlite3_value_int64(tls, valPtr) - case sqlite3.SQLITE_FLOAT: - args[i] = sqlite3.Xsqlite3_value_double(tls, valPtr) - case sqlite3.SQLITE_NULL: - args[i] = nil - case sqlite3.SQLITE_BLOB: - size := sqlite3.Xsqlite3_value_bytes(tls, valPtr) - blobPtr := sqlite3.Xsqlite3_value_blob(tls, valPtr) - v := make([]byte, size) - copy(v, (*libc.RawMem)(unsafe.Pointer(blobPtr))[:size:size]) - args[i] = v - default: - panic(fmt.Sprintf("unexpected argument type %q passed by sqlite", valType)) - } - } + nArg: impl.NArgs, + eTextRep: textrep, + } - res, err := xFunc(&FunctionContext{}, args) - if err != nil { - setErrorResult(err) - return - } + if impl.Scalar != nil { + xFuncs.mu.Lock() + id := xFuncs.ids.next() + xFuncs.m[id] = impl.Scalar + xFuncs.mu.Unlock() + + udf.scalar = true + udf.pApp = id + } else { + xAggregateFactories.mu.Lock() + id := xAggregateFactories.ids.next() + xAggregateFactories.m[id] = impl.MakeAggregate + xAggregateFactories.mu.Unlock() - switch resTyped := res.(type) { - case nil: - sqlite3.Xsqlite3_result_null(tls, ctx) - case int64: - sqlite3.Xsqlite3_result_int64(tls, ctx, resTyped) - case float64: - sqlite3.Xsqlite3_result_double(tls, ctx, resTyped) - case bool: - sqlite3.Xsqlite3_result_int(tls, ctx, libc.Bool32(resTyped)) - case time.Time: - sqlite3.Xsqlite3_result_int64(tls, ctx, resTyped.Unix()) - case string: - size := int32(len(resTyped)) - cstr, err := libc.CString(resTyped) - if err != nil { - panic(err) - } - defer libc.Xfree(tls, cstr) - sqlite3.Xsqlite3_result_text(tls, ctx, cstr, size, sqlite3.SQLITE_TRANSIENT) - case []byte: - size := int32(len(resTyped)) - if size == 0 { - sqlite3.Xsqlite3_result_zeroblob(tls, ctx, 0) - return - } - p := libc.Xmalloc(tls, types.Size_t(size)) - if p == 0 { - panic(fmt.Sprintf("unable to allocate space for blob: %d", size)) - } - defer libc.Xfree(tls, p) - copy((*libc.RawMem)(unsafe.Pointer(p))[:size:size], resTyped) - - sqlite3.Xsqlite3_result_blob(tls, ctx, p, size, sqlite3.SQLITE_TRANSIENT) - default: - setErrorResult(fmt.Errorf("function did not return a valid driver.Value: %T", resTyped)) - return - } - }, + udf.pApp = id } + d.udfs[zFuncName] = udf return nil } + +func origin(skip int) string { + pc, fn, fl, _ := runtime.Caller(skip) + f := runtime.FuncForPC(pc) + var fns string + if f != nil { + fns = f.Name() + if x := strings.LastIndex(fns, "."); x > 0 { + fns = fns[x+1:] + } + } + return fmt.Sprintf("%s:%d:%s", fn, fl, fns) +} + +func errorResultFunction(tls *libc.TLS, ctx uintptr) func(error) { + return func(res error) { + errmsg, cerr := libc.CString(res.Error()) + if cerr != nil { + panic(cerr) + } + defer libc.Xfree(tls, errmsg) + sqlite3.Xsqlite3_result_error(tls, ctx, errmsg, -1) + sqlite3.Xsqlite3_result_error_code(tls, ctx, sqlite3.SQLITE_ERROR) + } +} + +func functionArgs(tls *libc.TLS, argc int32, argv uintptr) []driver.Value { + args := make([]driver.Value, argc) + for i := int32(0); i < argc; i++ { + valPtr := *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*sqliteValPtrSize)) + + switch valType := sqlite3.Xsqlite3_value_type(tls, valPtr); valType { + case sqlite3.SQLITE_TEXT: + args[i] = libc.GoString(sqlite3.Xsqlite3_value_text(tls, valPtr)) + case sqlite3.SQLITE_INTEGER: + args[i] = sqlite3.Xsqlite3_value_int64(tls, valPtr) + case sqlite3.SQLITE_FLOAT: + args[i] = sqlite3.Xsqlite3_value_double(tls, valPtr) + case sqlite3.SQLITE_NULL: + args[i] = nil + case sqlite3.SQLITE_BLOB: + size := sqlite3.Xsqlite3_value_bytes(tls, valPtr) + blobPtr := sqlite3.Xsqlite3_value_blob(tls, valPtr) + v := make([]byte, size) + copy(v, (*libc.RawMem)(unsafe.Pointer(blobPtr))[:size:size]) + args[i] = v + default: + panic(fmt.Sprintf("unexpected argument type %q passed by sqlite", valType)) + } + } + + return args +} + +func functionReturnValue(tls *libc.TLS, ctx uintptr, res driver.Value) error { + switch resTyped := res.(type) { + case nil: + sqlite3.Xsqlite3_result_null(tls, ctx) + case int64: + sqlite3.Xsqlite3_result_int64(tls, ctx, resTyped) + case float64: + sqlite3.Xsqlite3_result_double(tls, ctx, resTyped) + case bool: + sqlite3.Xsqlite3_result_int(tls, ctx, libc.Bool32(resTyped)) + case time.Time: + sqlite3.Xsqlite3_result_int64(tls, ctx, resTyped.Unix()) + case string: + size := int32(len(resTyped)) + cstr, err := libc.CString(resTyped) + if err != nil { + panic(err) + } + defer libc.Xfree(tls, cstr) + sqlite3.Xsqlite3_result_text(tls, ctx, cstr, size, sqlite3.SQLITE_TRANSIENT) + case []byte: + size := int32(len(resTyped)) + if size == 0 { + sqlite3.Xsqlite3_result_zeroblob(tls, ctx, 0) + return nil + } + p := libc.Xmalloc(tls, types.Size_t(size)) + if p == 0 { + panic(fmt.Sprintf("unable to allocate space for blob: %d", size)) + } + defer libc.Xfree(tls, p) + copy((*libc.RawMem)(unsafe.Pointer(p))[:size:size], resTyped) + + sqlite3.Xsqlite3_result_blob(tls, ctx, p, size, sqlite3.SQLITE_TRANSIENT) + default: + return fmt.Errorf("function did not return a valid driver.Value: %T", resTyped) + } + + return nil +} + +// The below is all taken from zombiezen.com/go/sqlite. Aggregate functions need +// to maintain state (for instance, the count of values seen so far). We give +// each aggregate function an ID, generated by idGen, and put that in the pApp +// argument to sqlite3_create_function. We track this on the Go side in +// xAggregateFactories. +// +// When (if) the function is called is called by a query, we call the +// MakeAggregate factory function to set it up, and track that in +// xAggregateContext, retrieving it via sqlite3_aggregate_context. +// +// We also need to ensure that, for both aggregate and scalar functions, the +// function pointer we pass to SQLite meets certain rules on the Go side, so +// that the pointer remains valid. +var ( + xFuncs = struct { + mu sync.RWMutex + m map[uintptr]func(*FunctionContext, []driver.Value) (driver.Value, error) + ids idGen + }{ + m: make(map[uintptr]func(*FunctionContext, []driver.Value) (driver.Value, error)), + } + + xAggregateFactories = struct { + mu sync.RWMutex + m map[uintptr]func(FunctionContext) (AggregateFunction, error) + ids idGen + }{ + m: make(map[uintptr]func(FunctionContext) (AggregateFunction, error)), + } + + xAggregateContext = struct { + mu sync.RWMutex + m map[uintptr]AggregateFunction + ids idGen + }{ + m: make(map[uintptr]AggregateFunction), + } +) + +type idGen struct { + bitset []uint64 +} + +func (gen *idGen) next() uintptr { + base := uintptr(1) + for i := 0; i < len(gen.bitset); i, base = i+1, base+64 { + b := gen.bitset[i] + if b != 1<<64-1 { + n := uintptr(bits.TrailingZeros64(^b)) + gen.bitset[i] |= 1 << n + return base + n + } + } + gen.bitset = append(gen.bitset, 1) + return base +} + +func (gen *idGen) reclaim(id uintptr) { + bit := id - 1 + gen.bitset[bit/64] &^= 1 << (bit % 64) +} + +func makeAggregate(tls *libc.TLS, ctx uintptr) (AggregateFunction, uintptr) { + goCtx := FunctionContext{tls: tls, ctx: ctx} + aggCtx := (*uintptr)(unsafe.Pointer(sqlite3.Xsqlite3_aggregate_context(tls, ctx, int32(ptrSize)))) + setErrorResult := errorResultFunction(tls, ctx) + if aggCtx == nil { + setErrorResult(errors.New("insufficient memory for aggregate")) + return nil, 0 + } + if *aggCtx != 0 { + // Already created. + xAggregateContext.mu.RLock() + f := xAggregateContext.m[*aggCtx] + xAggregateContext.mu.RUnlock() + return f, *aggCtx + } + + factoryID := sqlite3.Xsqlite3_user_data(tls, ctx) + xAggregateFactories.mu.RLock() + factory := xAggregateFactories.m[factoryID] + xAggregateFactories.mu.RUnlock() + + f, err := factory(goCtx) + if err != nil { + setErrorResult(err) + return nil, 0 + } + if f == nil { + setErrorResult(errors.New("MakeAggregate function returned nil")) + return nil, 0 + } + + xAggregateContext.mu.Lock() + *aggCtx = xAggregateContext.ids.next() + xAggregateContext.m[*aggCtx] = f + xAggregateContext.mu.Unlock() + return f, *aggCtx +} + +// cFuncPointer converts a function defined by a function declaration to a C pointer. +// The result of using cFuncPointer on closures is undefined. +func cFuncPointer[T any](f T) uintptr { + // This assumes the memory representation described in https://golang.org/s/go11func. + // + // cFuncPointer does its conversion by doing the following in order: + // 1) Create a Go struct containing a pointer to a pointer to + // the function. It is assumed that the pointer to the function will be + // stored in the read-only data section and thus will not move. + // 2) Convert the pointer to the Go struct to a pointer to uintptr through + // unsafe.Pointer. This is permitted via Rule #1 of unsafe.Pointer. + // 3) Dereference the pointer to uintptr to obtain the function value as a + // uintptr. This is safe as long as function values are passed as pointers. + return *(*uintptr)(unsafe.Pointer(&struct{ f T }{f})) +} + +func funcTrampoline(tls *libc.TLS, ctx uintptr, argc int32, argv uintptr) { + id := sqlite3.Xsqlite3_user_data(tls, ctx) + xFuncs.mu.RLock() + xFunc := xFuncs.m[id] + xFuncs.mu.RUnlock() + + setErrorResult := errorResultFunction(tls, ctx) + res, err := xFunc(&FunctionContext{}, functionArgs(tls, argc, argv)) + + if err != nil { + setErrorResult(err) + return + } + + err = functionReturnValue(tls, ctx, res) + if err != nil { + setErrorResult(err) + } +} + +func stepTrampoline(tls *libc.TLS, ctx uintptr, argc int32, argv uintptr) { + impl, _ := makeAggregate(tls, ctx) + if impl == nil { + return + } + + setErrorResult := errorResultFunction(tls, ctx) + err := impl.Step(&FunctionContext{}, functionArgs(tls, argc, argv)) + if err != nil { + setErrorResult(err) + } +} + +func inverseTrampoline(tls *libc.TLS, ctx uintptr, argc int32, argv uintptr) { + impl, _ := makeAggregate(tls, ctx) + if impl == nil { + return + } + + setErrorResult := errorResultFunction(tls, ctx) + err := impl.WindowInverse(&FunctionContext{}, functionArgs(tls, argc, argv)) + if err != nil { + setErrorResult(err) + } +} + +func valueTrampoline(tls *libc.TLS, ctx uintptr) { + impl, _ := makeAggregate(tls, ctx) + if impl == nil { + return + } + + setErrorResult := errorResultFunction(tls, ctx) + res, err := impl.WindowValue(&FunctionContext{}) + if err != nil { + setErrorResult(err) + } else { + err = functionReturnValue(tls, ctx, res) + if err != nil { + setErrorResult(err) + } + } +} + +func finalTrampoline(tls *libc.TLS, ctx uintptr) { + impl, id := makeAggregate(tls, ctx) + if impl == nil { + return + } + + setErrorResult := errorResultFunction(tls, ctx) + res, err := impl.WindowValue(&FunctionContext{}) + if err != nil { + setErrorResult(err) + } else { + err = functionReturnValue(tls, ctx, res) + if err != nil { + setErrorResult(err) + } + } + impl.Final(&FunctionContext{}) + + xAggregateContext.mu.Lock() + defer xAggregateContext.mu.Unlock() + delete(xAggregateContext.m, id) + xAggregateContext.ids.reclaim(id) +} + +// int sqlite3_limit(sqlite3*, int id, int newVal); +func (c *conn) limit(id int, newVal int) int { + return int(sqlite3.Xsqlite3_limit(c.tls, c.db, int32(id), int32(newVal))) +} + +// Limit calls sqlite3_limit, see the docs at +// https://www.sqlite.org/c3ref/limit.html for details. +// +// To get a sql.Conn from a *sql.DB, use (*sql.DB).Conn(). Limits are bound to +// the particular instance of 'c', so getting a new connection only to pass it +// to Limit is possibly not useful above querying what are the various +// configured default values. +func Limit(c *sql.Conn, id int, newVal int) (r int, err error) { + err = c.Raw(func(driverConn any) error { + switch dc := driverConn.(type) { + case *conn: + r = dc.limit(id, newVal) + return nil + default: + return fmt.Errorf("unexpected driverConn type: %T", driverConn) + } + }) + return r, err + +} diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/sqlite_go18.go temporal-1.22.5/src/vendor/modernc.org/sqlite/sqlite_go18.go --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/sqlite_go18.go 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/sqlite_go18.go 2024-02-23 09:46:16.000000000 +0000 @@ -13,37 +13,72 @@ ) // Ping implements driver.Pinger -func (c *conn) Ping(ctx context.Context) error { - _, err := c.ExecContext(ctx, "select 1", nil) +func (c *conn) Ping(ctx context.Context) (err error) { + if dmesgs { + defer func() { + dmesg("conn %p, ctx %p: err %v", c, ctx, err) + }() + } + _, err = c.ExecContext(ctx, "select 1", nil) return err } // BeginTx implements driver.ConnBeginTx -func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { +func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (dt driver.Tx, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, ctx %p, opts %+v: (driver.Tx %v, err %v)", c, ctx, opts, dt, err) + }() + } return c.begin(ctx, opts) } // PrepareContext implements driver.ConnPrepareContext -func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { +func (c *conn) PrepareContext(ctx context.Context, query string) (ds driver.Stmt, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, ctx %p, query %q: (driver.Stmt %v, err %v)", c, ctx, query, ds, err) + }() + } return c.prepare(ctx, query) } // ExecContext implements driver.ExecerContext -func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { +func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (dr driver.Result, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, ctx %p, query %q, args %v: (driver.Result %p, err %v)", c, ctx, query, args, dr, err) + }() + } return c.exec(ctx, query, args) } // QueryContext implements driver.QueryerContext -func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { +func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (dr driver.Rows, err error) { + if dmesgs { + defer func() { + dmesg("conn %p, ctx %p, query %q, args %v: (driver.Rows %p, err %v)", c, ctx, query, args, dr, err) + }() + } return c.query(ctx, query, args) } // ExecContext implements driver.StmtExecContext -func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { +func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (dr driver.Result, err error) { + if dmesgs { + defer func() { + dmesg("stmt %p, ctx %p, args %v: (driver.Result %p, err %v)", s, ctx, args, dr, err) + }() + } return s.exec(ctx, args) } // QueryContext implements driver.StmtQueryContext -func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { +func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (dr driver.Rows, err error) { + if dmesgs { + defer func() { + dmesg("stmt %p, ctx %p, args %v: (driver.Rows %p, err %v)", s, ctx, args, dr, err) + }() + } return s.query(ctx, args) } diff -Nru temporal-1.21.5-1/src/vendor/modernc.org/sqlite/unconvert.sh temporal-1.22.5/src/vendor/modernc.org/sqlite/unconvert.sh --- temporal-1.21.5-1/src/vendor/modernc.org/sqlite/unconvert.sh 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modernc.org/sqlite/unconvert.sh 2024-02-23 09:46:16.000000000 +0000 @@ -1,5 +1,5 @@ set -evx until unconvert -fastmath -all ./... do - unconvert -fastmath -apply -all ./... + unconvert2 -fastmath -apply -all ./... done diff -Nru temporal-1.21.5-1/src/vendor/modules.txt temporal-1.22.5/src/vendor/modules.txt --- temporal-1.21.5-1/src/vendor/modules.txt 2023-09-29 14:03:35.000000000 +0000 +++ temporal-1.22.5/src/vendor/modules.txt 2024-02-23 09:46:16.000000000 +0000 @@ -1,29 +1,29 @@ -# cloud.google.com/go v0.110.0 +# cloud.google.com/go v0.110.8 ## explicit; go 1.19 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.19.0 +# cloud.google.com/go/compute v1.23.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.13.0 +# cloud.google.com/go/iam v1.1.2 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.29.0 +# cloud.google.com/go/storage v1.30.1 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/stubs -# github.com/apache/thrift v0.18.0 +# github.com/apache/thrift v0.18.1 ## explicit; go 1.19 github.com/apache/thrift/lib/go/thrift -# github.com/aws/aws-sdk-go v1.44.203 +# github.com/aws/aws-sdk-go v1.44.289 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -75,7 +75,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/benbjohnson/clock v1.3.0 +# github.com/benbjohnson/clock v1.3.5 ## explicit; go 1.15 github.com/benbjohnson/clock # github.com/beorn7/perks v1.0.1 @@ -86,13 +86,16 @@ # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/brianvoe/gofakeit/v6 v6.20.1 +# github.com/brianvoe/gofakeit/v6 v6.22.0 ## explicit; go 1.17 github.com/brianvoe/gofakeit/v6 github.com/brianvoe/gofakeit/v6/data # github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c ## explicit; go 1.11 github.com/cactus/go-statsd-client/statsd +# github.com/cactus/go-statsd-client/v5 v5.0.0 +## explicit; go 1.11 +github.com/cactus/go-statsd-client/v5/statsd # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 @@ -122,7 +125,7 @@ # github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a ## explicit github.com/facebookgo/clock -# github.com/fatih/color v1.14.1 +# github.com/fatih/color v1.15.0 ## explicit; go 1.17 github.com/fatih/color # github.com/go-logr/logr v1.2.4 @@ -135,7 +138,7 @@ # github.com/go-sql-driver/mysql v1.5.0 ## explicit; go 1.10 github.com/go-sql-driver/mysql -# github.com/gocql/gocql v1.4.0 +# github.com/gocql/gocql v1.5.2 ## explicit; go 1.13 github.com/gocql/gocql github.com/gocql/gocql/internal/lru @@ -155,7 +158,7 @@ # github.com/gogo/status v1.1.1 ## explicit; go 1.12 github.com/gogo/status -# github.com/golang-jwt/jwt/v4 v4.4.3 +# github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -166,12 +169,15 @@ github.com/golang/mock/gomock # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/descriptor github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy @@ -183,25 +189,53 @@ github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +# github.com/google/s2a-go v0.1.4 +## explicit; go 1.16 +github.com/google/s2a-go +github.com/google/s2a-go/fallback +github.com/google/s2a-go/internal/authinfo +github.com/google/s2a-go/internal/handshaker +github.com/google/s2a-go/internal/handshaker/service +github.com/google/s2a-go/internal/proto/common_go_proto +github.com/google/s2a-go/internal/proto/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/s2a_go_proto +github.com/google/s2a-go/internal/proto/v2/common_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto +github.com/google/s2a-go/internal/record +github.com/google/s2a-go/internal/record/internal/aeadcrypter +github.com/google/s2a-go/internal/record/internal/halfconn +github.com/google/s2a-go/internal/tokenmanager +github.com/google/s2a-go/internal/v2 +github.com/google/s2a-go/internal/v2/certverifier +github.com/google/s2a-go/internal/v2/remotesigner +github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/stream # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +# github.com/googleapis/enterprise-certificate-proxy v0.2.5 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.7.1 +# github.com/googleapis/gax-go/v2 v2.12.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 +# github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware/retry github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils github.com/grpc-ecosystem/go-grpc-middleware/util/metautils -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 +# github.com/grpc-ecosystem/grpc-gateway v1.16.0 +## explicit; go 1.14 +github.com/grpc-ecosystem/grpc-gateway/internal +github.com/grpc-ecosystem/grpc-gateway/runtime +github.com/grpc-ecosystem/grpc-gateway/utilities +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 ## explicit; go 1.17 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -219,16 +253,13 @@ ## explicit; go 1.10 github.com/jmoiron/sqlx github.com/jmoiron/sqlx/reflectx -# github.com/jonboulle/clockwork v0.4.0 -## explicit; go 1.15 -github.com/jonboulle/clockwork # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/lib/pq v1.10.7 +# github.com/lib/pq v1.10.9 ## explicit; go 1.13 github.com/lib/pq github.com/lib/pq/oid @@ -242,7 +273,7 @@ # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.17 +# github.com/mattn/go-isatty v0.0.19 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.14 @@ -274,7 +305,7 @@ # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.15.1 +# github.com/prometheus/client_golang v1.16.0 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -287,7 +318,7 @@ github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.10.1 +# github.com/prometheus/procfs v0.11.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -298,7 +329,7 @@ # github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec ## explicit; go 1.12 github.com/remyoudompheng/bigfft -# github.com/rivo/uniseg v0.4.3 +# github.com/rivo/uniseg v0.4.4 ## explicit; go 1.18 github.com/rivo/uniseg # github.com/robfig/cron v1.2.0 @@ -310,7 +341,7 @@ # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/stretchr/objx v0.5.0 @@ -322,7 +353,7 @@ github.com/stretchr/testify/mock github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/temporalio/ringpop-go v0.0.0-20220818230611-30bf23b490b2 +# github.com/temporalio/ringpop-go v0.0.0-20230606200434-b5c079f412d3 ## explicit; go 1.11 github.com/temporalio/ringpop-go github.com/temporalio/ringpop-go/discovery @@ -349,16 +380,16 @@ github.com/temporalio/tchannel-go/tos github.com/temporalio/tchannel-go/trand github.com/temporalio/tchannel-go/typed -# github.com/temporalio/tctl-kit v0.0.0-20230213052353-2342ea1e7d14 +# github.com/temporalio/tctl-kit v0.0.0-20230328153839-577f95d16fa0 ## explicit; go 1.18 github.com/temporalio/tctl-kit/pkg/color -# github.com/twmb/murmur3 v1.1.6 +# github.com/twmb/murmur3 v1.1.8 ## explicit; go 1.11 github.com/twmb/murmur3 # github.com/uber-common/bark v1.3.0 ## explicit; go 1.14 github.com/uber-common/bark -# github.com/uber-go/tally/v4 v4.1.6 +# github.com/uber-go/tally/v4 v4.1.7 ## explicit; go 1.15 github.com/uber-go/tally/v4 github.com/uber-go/tally/v4/internal/cache @@ -372,7 +403,7 @@ github.com/uber-go/tally/v4/thirdparty/github.com/apache/thrift/lib/go/thrift # github.com/uber/jaeger-client-go v2.30.0+incompatible ## explicit -# github.com/urfave/cli v1.22.12 +# github.com/urfave/cli v1.22.14 ## explicit; go 1.11 github.com/urfave/cli # github.com/urfave/cli/v2 v2.4.0 @@ -469,15 +500,15 @@ # go.opentelemetry.io/otel/trace v1.16.0 ## explicit; go 1.19 go.opentelemetry.io/otel/trace -# go.opentelemetry.io/proto/otlp v0.19.0 -## explicit; go 1.14 +# go.opentelemetry.io/proto/otlp v0.20.0 +## explicit; go 1.17 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/metrics/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# go.temporal.io/api v1.23.0 +# go.temporal.io/api v1.24.1-0.20231003165936-bb03061759c8 ## explicit; go 1.16 go.temporal.io/api/batch/v1 go.temporal.io/api/command/v1 @@ -487,6 +518,8 @@ go.temporal.io/api/failure/v1 go.temporal.io/api/filter/v1 go.temporal.io/api/history/v1 +go.temporal.io/api/internal/temporalgateway +go.temporal.io/api/internal/temporaljsonpb go.temporal.io/api/namespace/v1 go.temporal.io/api/operatorservice/v1 go.temporal.io/api/protocol/v1 @@ -502,8 +535,8 @@ go.temporal.io/api/workflow/v1 go.temporal.io/api/workflowservice/v1 go.temporal.io/api/workflowservicemock/v1 -# go.temporal.io/sdk v1.23.0 -## explicit; go 1.16 +# go.temporal.io/sdk v1.25.1 +## explicit; go 1.20 go.temporal.io/sdk/activity go.temporal.io/sdk/client go.temporal.io/sdk/converter @@ -526,7 +559,7 @@ # go.temporal.io/version v0.3.0 ## explicit; go 1.14 go.temporal.io/version/check -# go.uber.org/atomic v1.10.0 +# go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic # go.uber.org/automaxprocs v1.5.2 @@ -534,22 +567,22 @@ go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# go.uber.org/dig v1.16.1 +# go.uber.org/dig v1.17.0 ## explicit; go 1.18 go.uber.org/dig go.uber.org/dig/internal/digerror go.uber.org/dig/internal/digreflect go.uber.org/dig/internal/dot go.uber.org/dig/internal/graph -# go.uber.org/fx v1.19.1 -## explicit; go 1.18 +# go.uber.org/fx v1.20.0 +## explicit; go 1.19 go.uber.org/fx go.uber.org/fx/fxevent go.uber.org/fx/internal/fxclock go.uber.org/fx/internal/fxlog go.uber.org/fx/internal/fxreflect go.uber.org/fx/internal/lifecycle -# go.uber.org/multierr v1.9.0 +# go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr # go.uber.org/zap v1.24.0 @@ -563,20 +596,27 @@ go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zaptest -# golang.org/x/crypto v0.6.0 +# golang.org/x/crypto v0.14.0 ## explicit; go 1.17 +golang.org/x/crypto/chacha20 +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ed25519 +golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/alias +golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pbkdf2 -# golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/rand golang.org/x/exp/slices -# golang.org/x/mod v0.9.0 +# golang.org/x/mod v0.11.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.10.0 +# golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -590,7 +630,7 @@ golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/trace -# golang.org/x/oauth2 v0.8.0 +# golang.org/x/oauth2 v0.10.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -599,17 +639,17 @@ golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/semaphore -# golang.org/x/sys v0.8.0 +# golang.org/x/sys v0.13.0 ## explicit; go 1.17 +golang.org/x/sys/cpu golang.org/x/sys/execabs -golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.9.0 +# golang.org/x/text v0.13.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -618,16 +658,16 @@ # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.7.0 +# golang.org/x/tools v0.10.0 ## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal @@ -639,7 +679,7 @@ ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.114.0 +# google.golang.org/api v0.128.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -671,25 +711,24 @@ google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 -google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.55.0 -## explicit; go 1.17 +# google.golang.org/grpc v1.58.2 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -734,6 +773,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -749,16 +789,17 @@ google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/grpc/examples v0.0.0-20230216223317-abff344ead8f +# google.golang.org/grpc/examples v0.0.0-20230623203957-0b3a81eabc28 ## explicit; go 1.17 google.golang.org/grpc/examples/helloworld/helloworld -# google.golang.org/protobuf v1.30.0 +# google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -810,16 +851,16 @@ # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# lukechampine.com/uint128 v1.2.0 +# lukechampine.com/uint128 v1.3.0 ## explicit; go 1.12 lukechampine.com/uint128 -# modernc.org/cc/v3 v3.40.0 +# modernc.org/cc/v3 v3.41.0 ## explicit; go 1.17 modernc.org/cc/v3 -# modernc.org/ccgo/v3 v3.16.13 -## explicit; go 1.17 +# modernc.org/ccgo/v3 v3.16.14 +## explicit; go 1.18 modernc.org/ccgo/v3/lib -# modernc.org/libc v1.22.3 +# modernc.org/libc v1.24.1 ## explicit; go 1.18 modernc.org/libc modernc.org/libc/errno @@ -850,13 +891,13 @@ # modernc.org/mathutil v1.5.0 ## explicit; go 1.13 modernc.org/mathutil -# modernc.org/memory v1.5.0 +# modernc.org/memory v1.6.0 ## explicit; go 1.18 modernc.org/memory # modernc.org/opt v0.1.3 ## explicit; go 1.13 modernc.org/opt -# modernc.org/sqlite v1.21.0 +# modernc.org/sqlite v1.23.1 ## explicit; go 1.18 modernc.org/sqlite modernc.org/sqlite/lib